From 33756ac8999eed4b4be1c9a88cc54327d4fbf94e Mon Sep 17 00:00:00 2001 From: Yang Liu Date: Thu, 13 Jun 2019 21:58:52 -0400 Subject: [PATCH] Initial submission for starlingx pytest framework. Include: - util modules. such as table_parser, ssh/localhost clients, cli module, exception, logger, etc. Util modules are mostly used by keywords. - keywords modules. These are helper functions that are used directly by test functions. - platform (with platform or platform_sanity marker) and stx-openstack (with sanity, sx_sanity, cpe_sanity, or storage_sanity marker) sanity testcases - pytest config conftest, and test fixture modules - test config file template/example Required packages: - python3.4 or python3.5 - pytest >=3.10,<4.0 - pexpect - requests - pyyaml - selenium (firefox, ffmpeg, pyvirtualdisplay, Xvfb or Xephyr or Xvnc) Limitations: - Anything that requires copying from Test File Server will not work until a public share is configured to shared test files. Tests skipped for now. Co-Authored-By: Maria Yousaf Co-Authored-By: Marvin Huang Co-Authored-By: Yosief Gebremariam Co-Authored-By: Paul Warner Co-Authored-By: Xueguang Ma Co-Authored-By: Charles Chen Co-Authored-By: Daniel Graziano Co-Authored-By: Jordan Li Co-Authored-By: Nimalini Rasa Co-Authored-By: Senthil Mukundakumar Co-Authored-By: Anuejyan Manokeran Co-Authored-By: Peng Peng Co-Authored-By: Chris Winnicki Co-Authored-By: Joe Vimar Co-Authored-By: Alex Kozyrev Co-Authored-By: Jack Ding Co-Authored-By: Ming Lei Co-Authored-By: Ankit Jain Co-Authored-By: Eric Barrett Co-Authored-By: William Jia Co-Authored-By: Joseph Richard Co-Authored-By: Aldo Mcfarlane Story: 2005892 Task: 33750 Signed-off-by: Yang Liu Change-Id: I7a88a47e09733d39f024144530f5abb9aee8cad2 --- README.rst | 26 +- automated-pytest-suite/README.rst | 76 + .../{testcases/remove.txt => __init__.py} | 0 automated-pytest-suite/conftest.py | 693 ++ automated-pytest-suite/consts/__init__.py | 0 automated-pytest-suite/consts/auth.py | 348 + automated-pytest-suite/consts/cli_errs.py | 192 + automated-pytest-suite/consts/filepaths.py | 55 + automated-pytest-suite/consts/horizon.py | 8 + automated-pytest-suite/consts/lab.py | 162 + automated-pytest-suite/consts/proj_vars.py | 87 + automated-pytest-suite/consts/reasons.py | 41 + automated-pytest-suite/consts/stx.py | 681 ++ automated-pytest-suite/consts/timeout.py | 160 + .../consts/ubuntu_if_config.sh | 10 + automated-pytest-suite/keywords/__init__.py | 0 .../keywords/ceilometer_helper.py | 67 + .../keywords/check_helper.py | 635 ++ .../keywords/cinder_helper.py | 1756 +++++ automated-pytest-suite/keywords/common.py | 787 +++ .../keywords/container_helper.py | 853 +++ .../keywords/glance_helper.py | 1146 ++++ .../keywords/gnocchi_helper.py | 165 + .../keywords/heat_helper.py | 398 ++ .../keywords/horizon_helper.py | 45 + .../keywords/host_helper.py | 4831 +++++++++++++ .../keywords/html_helper.py | 198 + .../keywords/keystone_helper.py | 540 ++ .../keywords/kube_helper.py | 1117 +++ .../keywords/network_helper.py | 5696 ++++++++++++++++ .../keywords/nova_helper.py | 1310 ++++ automated-pytest-suite/keywords/pm_helper.py | 1152 ++++ .../keywords/security_helper.py | 1113 +++ .../keywords/storage_helper.py | 1677 +++++ .../keywords/system_helper.py | 3620 ++++++++++ automated-pytest-suite/keywords/vm_helper.py | 5988 +++++++++++++++++ automated-pytest-suite/pytest.ini | 21 + automated-pytest-suite/requirements.txt | 6 + automated-pytest-suite/setups.py | 750 +++ automated-pytest-suite/stx-test_template.conf | 137 + automated-pytest-suite/testcases/__init__.py | 0 automated-pytest-suite/testcases/conftest.py | 72 + .../testcases/functional/__init__.py | 0 .../functional/ceilometer/__init__.py | 0 .../functional/ceilometer/conftest.py | 3 + .../ceilometer/test_ceilometer_statistics.py | 102 + .../testcases/functional/common/__init__.py | 0 .../testcases/functional/common/conftest.py | 3 + .../common/test_host_connections.py | 66 + .../functional/common/test_system_health.py | 58 + .../testcases/functional/conftest.py | 5 + .../functional/fault_mgmt/__init__.py | 0 .../functional/fault_mgmt/conftest.py | 3 + .../fault_mgmt/test_fm_on_host_operation.py | 110 + .../testcases/functional/horizon/__init__.py | 0 .../testcases/functional/horizon/conftest.py | 1 + .../functional/horizon/test_hosts.py | 322 + .../functional/horizon/test_instances.py | 86 + .../testcases/functional/mtc/__init__.py | 0 .../testcases/functional/mtc/conftest.py | 3 + .../testcases/functional/mtc/test_evacuate.py | 146 + .../mtc/test_host_operations_negative.py | 31 + .../functional/mtc/test_lock_unlock_host.py | 93 + .../mtc/test_services_persists_over_reboot.py | 85 + .../testcases/functional/mtc/test_swact.py | 123 + .../functional/mtc/test_ungraceful_reboot.py | 45 + .../functional/networking/__init__.py | 0 .../functional/networking/conftest.py | 3 + .../functional/networking/test_dvr.py | 203 + .../networking/test_multiple_ports.py | 538 ++ .../functional/networking/test_ping_vms.py | 117 + .../networking/test_vm_meta_data_retrieval.py | 45 + .../testcases/functional/nova/__init__.py | 0 .../testcases/functional/nova/conftest.py | 3 + .../functional/nova/test_config_drive.py | 131 + .../functional/nova/test_cpu_policy.py | 185 + .../functional/nova/test_cpu_thread.py | 437 ++ .../functional/nova/test_evacuate_vms.py | 318 + .../functional/nova/test_lock_with_vms.py | 183 + .../functional/nova/test_mempage_size.py | 501 ++ .../functional/nova/test_migrate_vms.py | 412 ++ .../functional/nova/test_nova_actions.py | 91 + .../functional/nova/test_resize_vm.py | 508 ++ .../functional/nova/test_vm_recovery.py | 105 + .../testcases/functional/security/__init__.py | 0 .../testcases/functional/security/test_ima.py | 412 ++ .../security/test_kernel_modules.py | 71 + .../testcases/functional/storage/__init__.py | 0 .../functional/storage/ceph/test_ceph.py | 115 + .../testcases/functional/storage/conftest.py | 3 + .../storage/test_storage_vm_migration.py | 521 ++ .../functional/z_containers/__init__.py | 0 .../z_containers/test_custom_containers.py | 389 ++ .../z_containers/test_kube_edgex_services.py | 117 + .../z_containers/test_kube_system_services.py | 93 + .../z_containers/test_openstack_services.py | 292 + .../testcases/rest/rest_test_helper.py | 41 + .../testcases/rest/test_GET_adversarial.py | 98 + .../rest/test_GET_good_authentication.py | 67 + .../rest/test_GET_ihosts_host_id_valid.py | 72 + .../testcases/rest/test_GET_ports_valid.py | 535 ++ .../testcases/rest/test_bad_authentication.py | 349 + .../testcases/rest/test_rest_fm.py | 28 + .../testcases/rest/test_rest_gnocchi.py | 30 + .../testcases/system_config/__init__.py | 0 .../testcases/system_config/conftest.py | 6 + .../test_config_host_storage_backing.py | 194 + .../system_config/test_system_cleanup.py | 19 + .../testfixtures/__init__.py | 0 .../testfixtures/config_host.py | 101 + .../testfixtures/fixture_resources.py | 184 + .../testfixtures/horizon.py | 87 + .../testfixtures/pre_checks_and_configs.py | 173 + .../testfixtures/recover_hosts.py | 143 + .../testfixtures/resource_create.py | 172 + .../testfixtures/resource_mgmt.py | 267 + .../testfixtures/verify_fixtures.py | 226 + automated-pytest-suite/utils/__init__.py | 0 automated-pytest-suite/utils/cli.py | 327 + .../utils/clients/__init__.py | 0 automated-pytest-suite/utils/clients/local.py | 299 + automated-pytest-suite/utils/clients/ssh.py | 1688 +++++ .../utils/clients/telnet.py | 549 ++ automated-pytest-suite/utils/exceptions.py | 261 + .../utils/guest_scripts/dpdk_pktgen.sh | 49 + .../utils/guest_scripts/kpktgen.sh | 81 + .../utils/guest_scripts/scripts.py | 111 + .../guest_scripts/tis_automation_init.sh | 604 ++ .../utils/horizon/__init__.py | 0 .../utils/horizon/basewebobject.py | 169 + .../utils/horizon/helper.py | 93 + .../utils/horizon/pages/__init__.py | 0 .../utils/horizon/pages/admin/__init__.py | 0 .../horizon/pages/admin/compute/__init__.py | 0 .../pages/admin/compute/flavorspage.py | 174 + .../pages/admin/compute/hostaggregatespage.py | 93 + .../pages/admin/compute/hypervisorspage.py | 81 + .../horizon/pages/admin/compute/imagespage.py | 6 + .../pages/admin/compute/instancespage.py | 6 + .../pages/admin/compute/servergroupspage.py | 63 + .../pages/admin/fault_management/__init__.py | 0 .../fault_management/activealarmspage.py | 24 + .../admin/fault_management/eventspage.py | 24 + .../fault_management/eventssuppressionpage.py | 49 + .../horizon/pages/admin/network/__init__.py | 0 .../pages/admin/network/floatingipspage.py | 25 + .../pages/admin/network/networkspage.py | 173 + .../pages/admin/network/routerspage.py | 6 + .../utils/horizon/pages/admin/overviewpage.py | 7 + .../horizon/pages/admin/platform/__init__.py | 0 .../pages/admin/platform/hostinventorypage.py | 472 ++ .../platform/providernetworkoverviewpage.py | 13 + .../pages/admin/platform/providernetworks.py | 123 + .../platform/providernetworkstopology.py | 87 + .../admin/platform/softwaremanagementpage.py | 100 + .../admin/platform/storageoverviewpage.py | 83 + .../admin/platform/systemconfigurationpage.py | 494 ++ .../horizon/pages/admin/system/__init__.py | 0 .../pages/admin/system/defaultspage.py | 158 + .../admin/system/metadatadefinitionspage.py | 128 + .../admin/system/systeminformationpage.py | 87 + .../horizon/pages/admin/volume/__init__.py | 0 .../pages/admin/volume/volumesnapshotspage.py | 1 + .../horizon/pages/admin/volume/volumespage.py | 1 + .../pages/admin/volume/volumetypespage.py | 171 + .../utils/horizon/pages/basepage.py | 55 + .../utils/horizon/pages/identity/__init__.py | 0 .../horizon/pages/identity/groupspage.py | 82 + .../horizon/pages/identity/projectspage.py | 226 + .../utils/horizon/pages/identity/rolespage.py | 86 + .../utils/horizon/pages/identity/userspage.py | 105 + .../utils/horizon/pages/loginpage.py | 44 + .../utils/horizon/pages/pageobject.py | 88 + .../utils/horizon/pages/project/__init__.py | 0 .../horizon/pages/project/apiaccesspage.py | 75 + .../horizon/pages/project/compute/__init__.py | 0 .../pages/project/compute/imagespage.py | 278 + .../pages/project/compute/instancespage.py | 406 ++ .../pages/project/compute/keypairspage.py | 94 + .../pages/project/compute/overviewpage.py | 48 + .../pages/project/compute/servergroupspage.py | 81 + .../horizon/pages/project/network/__init__.py | 0 .../pages/project/network/floatingipspage.py | 103 + .../pages/project/network/managerulespage.py | 63 + .../project/network/networkoverviewpage.py | 18 + .../pages/project/network/networkspage.py | 177 + .../project/network/routerinterfacespage.py | 76 + .../project/network/routeroverviewpage.py | 28 + .../pages/project/network/routerspage.py | 133 + .../project/network/securitygroupspage.py | 64 + .../pages/project/orchestration/__init__.py | 0 .../orchestration/resourcestypespage.py | 0 .../pages/project/orchestration/stackspage.py | 0 .../orchestration/templateversionspage.py | 0 .../horizon/pages/project/volumes/__init__.py | 0 .../pages/project/volumes/shotspage.py | 143 + .../pages/project/volumes/volumespage.py | 363 + .../utils/horizon/pages/settings/__init__.py | 0 .../pages/settings/changepasswordpage.py | 53 + .../pages/settings/usersettingspage.py | 71 + .../utils/horizon/regions/__init__.py | 0 .../utils/horizon/regions/bars.py | 52 + .../utils/horizon/regions/baseregion.py | 68 + .../utils/horizon/regions/exceptions.py | 22 + .../utils/horizon/regions/forms.py | 588 ++ .../utils/horizon/regions/menus.py | 236 + .../utils/horizon/regions/messages.py | 45 + .../utils/horizon/regions/tables.py | 368 + .../utils/horizon/video_recorder.py | 78 + automated-pytest-suite/utils/local_host.py | 27 + automated-pytest-suite/utils/multi_thread.py | 455 ++ automated-pytest-suite/utils/parse_log.py | 206 + automated-pytest-suite/utils/rest.py | 197 + automated-pytest-suite/utils/table_parser.py | 1262 ++++ automated-pytest-suite/utils/tis_log.py | 168 + 215 files changed, 60098 insertions(+), 3 deletions(-) create mode 100644 automated-pytest-suite/README.rst rename automated-pytest-suite/{testcases/remove.txt => __init__.py} (100%) create mode 100644 automated-pytest-suite/conftest.py create mode 100644 automated-pytest-suite/consts/__init__.py create mode 100755 automated-pytest-suite/consts/auth.py create mode 100644 automated-pytest-suite/consts/cli_errs.py create mode 100755 automated-pytest-suite/consts/filepaths.py create mode 100644 automated-pytest-suite/consts/horizon.py create mode 100755 automated-pytest-suite/consts/lab.py create mode 100644 automated-pytest-suite/consts/proj_vars.py create mode 100644 automated-pytest-suite/consts/reasons.py create mode 100755 automated-pytest-suite/consts/stx.py create mode 100644 automated-pytest-suite/consts/timeout.py create mode 100644 automated-pytest-suite/consts/ubuntu_if_config.sh create mode 100644 automated-pytest-suite/keywords/__init__.py create mode 100644 automated-pytest-suite/keywords/ceilometer_helper.py create mode 100644 automated-pytest-suite/keywords/check_helper.py create mode 100644 automated-pytest-suite/keywords/cinder_helper.py create mode 100644 automated-pytest-suite/keywords/common.py create mode 100644 automated-pytest-suite/keywords/container_helper.py create mode 100644 automated-pytest-suite/keywords/glance_helper.py create mode 100644 automated-pytest-suite/keywords/gnocchi_helper.py create mode 100644 automated-pytest-suite/keywords/heat_helper.py create mode 100644 automated-pytest-suite/keywords/horizon_helper.py create mode 100755 automated-pytest-suite/keywords/host_helper.py create mode 100644 automated-pytest-suite/keywords/html_helper.py create mode 100644 automated-pytest-suite/keywords/keystone_helper.py create mode 100644 automated-pytest-suite/keywords/kube_helper.py create mode 100755 automated-pytest-suite/keywords/network_helper.py create mode 100755 automated-pytest-suite/keywords/nova_helper.py create mode 100644 automated-pytest-suite/keywords/pm_helper.py create mode 100644 automated-pytest-suite/keywords/security_helper.py create mode 100644 automated-pytest-suite/keywords/storage_helper.py create mode 100644 automated-pytest-suite/keywords/system_helper.py create mode 100755 automated-pytest-suite/keywords/vm_helper.py create mode 100644 automated-pytest-suite/pytest.ini create mode 100644 automated-pytest-suite/requirements.txt create mode 100644 automated-pytest-suite/setups.py create mode 100644 automated-pytest-suite/stx-test_template.conf create mode 100755 automated-pytest-suite/testcases/__init__.py create mode 100755 automated-pytest-suite/testcases/conftest.py create mode 100755 automated-pytest-suite/testcases/functional/__init__.py create mode 100755 automated-pytest-suite/testcases/functional/ceilometer/__init__.py create mode 100755 automated-pytest-suite/testcases/functional/ceilometer/conftest.py create mode 100755 automated-pytest-suite/testcases/functional/ceilometer/test_ceilometer_statistics.py create mode 100755 automated-pytest-suite/testcases/functional/common/__init__.py create mode 100755 automated-pytest-suite/testcases/functional/common/conftest.py create mode 100755 automated-pytest-suite/testcases/functional/common/test_host_connections.py create mode 100755 automated-pytest-suite/testcases/functional/common/test_system_health.py create mode 100755 automated-pytest-suite/testcases/functional/conftest.py create mode 100755 automated-pytest-suite/testcases/functional/fault_mgmt/__init__.py create mode 100755 automated-pytest-suite/testcases/functional/fault_mgmt/conftest.py create mode 100755 automated-pytest-suite/testcases/functional/fault_mgmt/test_fm_on_host_operation.py create mode 100644 automated-pytest-suite/testcases/functional/horizon/__init__.py create mode 100644 automated-pytest-suite/testcases/functional/horizon/conftest.py create mode 100755 automated-pytest-suite/testcases/functional/horizon/test_hosts.py create mode 100755 automated-pytest-suite/testcases/functional/horizon/test_instances.py create mode 100755 automated-pytest-suite/testcases/functional/mtc/__init__.py create mode 100755 automated-pytest-suite/testcases/functional/mtc/conftest.py create mode 100755 automated-pytest-suite/testcases/functional/mtc/test_evacuate.py create mode 100755 automated-pytest-suite/testcases/functional/mtc/test_host_operations_negative.py create mode 100755 automated-pytest-suite/testcases/functional/mtc/test_lock_unlock_host.py create mode 100755 automated-pytest-suite/testcases/functional/mtc/test_services_persists_over_reboot.py create mode 100755 automated-pytest-suite/testcases/functional/mtc/test_swact.py create mode 100644 automated-pytest-suite/testcases/functional/mtc/test_ungraceful_reboot.py create mode 100755 automated-pytest-suite/testcases/functional/networking/__init__.py create mode 100755 automated-pytest-suite/testcases/functional/networking/conftest.py create mode 100755 automated-pytest-suite/testcases/functional/networking/test_dvr.py create mode 100755 automated-pytest-suite/testcases/functional/networking/test_multiple_ports.py create mode 100755 automated-pytest-suite/testcases/functional/networking/test_ping_vms.py create mode 100755 automated-pytest-suite/testcases/functional/networking/test_vm_meta_data_retrieval.py create mode 100755 automated-pytest-suite/testcases/functional/nova/__init__.py create mode 100755 automated-pytest-suite/testcases/functional/nova/conftest.py create mode 100755 automated-pytest-suite/testcases/functional/nova/test_config_drive.py create mode 100755 automated-pytest-suite/testcases/functional/nova/test_cpu_policy.py create mode 100755 automated-pytest-suite/testcases/functional/nova/test_cpu_thread.py create mode 100755 automated-pytest-suite/testcases/functional/nova/test_evacuate_vms.py create mode 100755 automated-pytest-suite/testcases/functional/nova/test_lock_with_vms.py create mode 100755 automated-pytest-suite/testcases/functional/nova/test_mempage_size.py create mode 100755 automated-pytest-suite/testcases/functional/nova/test_migrate_vms.py create mode 100755 automated-pytest-suite/testcases/functional/nova/test_nova_actions.py create mode 100755 automated-pytest-suite/testcases/functional/nova/test_resize_vm.py create mode 100755 automated-pytest-suite/testcases/functional/nova/test_vm_recovery.py create mode 100755 automated-pytest-suite/testcases/functional/security/__init__.py create mode 100755 automated-pytest-suite/testcases/functional/security/test_ima.py create mode 100755 automated-pytest-suite/testcases/functional/security/test_kernel_modules.py create mode 100755 automated-pytest-suite/testcases/functional/storage/__init__.py create mode 100755 automated-pytest-suite/testcases/functional/storage/ceph/test_ceph.py create mode 100755 automated-pytest-suite/testcases/functional/storage/conftest.py create mode 100755 automated-pytest-suite/testcases/functional/storage/test_storage_vm_migration.py create mode 100644 automated-pytest-suite/testcases/functional/z_containers/__init__.py create mode 100644 automated-pytest-suite/testcases/functional/z_containers/test_custom_containers.py create mode 100644 automated-pytest-suite/testcases/functional/z_containers/test_kube_edgex_services.py create mode 100644 automated-pytest-suite/testcases/functional/z_containers/test_kube_system_services.py create mode 100644 automated-pytest-suite/testcases/functional/z_containers/test_openstack_services.py create mode 100644 automated-pytest-suite/testcases/rest/rest_test_helper.py create mode 100755 automated-pytest-suite/testcases/rest/test_GET_adversarial.py create mode 100755 automated-pytest-suite/testcases/rest/test_GET_good_authentication.py create mode 100755 automated-pytest-suite/testcases/rest/test_GET_ihosts_host_id_valid.py create mode 100755 automated-pytest-suite/testcases/rest/test_GET_ports_valid.py create mode 100755 automated-pytest-suite/testcases/rest/test_bad_authentication.py create mode 100644 automated-pytest-suite/testcases/rest/test_rest_fm.py create mode 100755 automated-pytest-suite/testcases/rest/test_rest_gnocchi.py create mode 100755 automated-pytest-suite/testcases/system_config/__init__.py create mode 100755 automated-pytest-suite/testcases/system_config/conftest.py create mode 100755 automated-pytest-suite/testcases/system_config/test_config_host_storage_backing.py create mode 100755 automated-pytest-suite/testcases/system_config/test_system_cleanup.py create mode 100644 automated-pytest-suite/testfixtures/__init__.py create mode 100644 automated-pytest-suite/testfixtures/config_host.py create mode 100755 automated-pytest-suite/testfixtures/fixture_resources.py create mode 100644 automated-pytest-suite/testfixtures/horizon.py create mode 100755 automated-pytest-suite/testfixtures/pre_checks_and_configs.py create mode 100644 automated-pytest-suite/testfixtures/recover_hosts.py create mode 100644 automated-pytest-suite/testfixtures/resource_create.py create mode 100755 automated-pytest-suite/testfixtures/resource_mgmt.py create mode 100644 automated-pytest-suite/testfixtures/verify_fixtures.py create mode 100644 automated-pytest-suite/utils/__init__.py create mode 100644 automated-pytest-suite/utils/cli.py create mode 100644 automated-pytest-suite/utils/clients/__init__.py create mode 100644 automated-pytest-suite/utils/clients/local.py create mode 100644 automated-pytest-suite/utils/clients/ssh.py create mode 100644 automated-pytest-suite/utils/clients/telnet.py create mode 100644 automated-pytest-suite/utils/exceptions.py create mode 100644 automated-pytest-suite/utils/guest_scripts/dpdk_pktgen.sh create mode 100644 automated-pytest-suite/utils/guest_scripts/kpktgen.sh create mode 100644 automated-pytest-suite/utils/guest_scripts/scripts.py create mode 100644 automated-pytest-suite/utils/guest_scripts/tis_automation_init.sh create mode 100644 automated-pytest-suite/utils/horizon/__init__.py create mode 100644 automated-pytest-suite/utils/horizon/basewebobject.py create mode 100644 automated-pytest-suite/utils/horizon/helper.py create mode 100644 automated-pytest-suite/utils/horizon/pages/__init__.py create mode 100644 automated-pytest-suite/utils/horizon/pages/admin/__init__.py create mode 100644 automated-pytest-suite/utils/horizon/pages/admin/compute/__init__.py create mode 100644 automated-pytest-suite/utils/horizon/pages/admin/compute/flavorspage.py create mode 100644 automated-pytest-suite/utils/horizon/pages/admin/compute/hostaggregatespage.py create mode 100644 automated-pytest-suite/utils/horizon/pages/admin/compute/hypervisorspage.py create mode 100644 automated-pytest-suite/utils/horizon/pages/admin/compute/imagespage.py create mode 100644 automated-pytest-suite/utils/horizon/pages/admin/compute/instancespage.py create mode 100644 automated-pytest-suite/utils/horizon/pages/admin/compute/servergroupspage.py create mode 100644 automated-pytest-suite/utils/horizon/pages/admin/fault_management/__init__.py create mode 100644 automated-pytest-suite/utils/horizon/pages/admin/fault_management/activealarmspage.py create mode 100644 automated-pytest-suite/utils/horizon/pages/admin/fault_management/eventspage.py create mode 100644 automated-pytest-suite/utils/horizon/pages/admin/fault_management/eventssuppressionpage.py create mode 100644 automated-pytest-suite/utils/horizon/pages/admin/network/__init__.py create mode 100644 automated-pytest-suite/utils/horizon/pages/admin/network/floatingipspage.py create mode 100644 automated-pytest-suite/utils/horizon/pages/admin/network/networkspage.py create mode 100644 automated-pytest-suite/utils/horizon/pages/admin/network/routerspage.py create mode 100644 automated-pytest-suite/utils/horizon/pages/admin/overviewpage.py create mode 100644 automated-pytest-suite/utils/horizon/pages/admin/platform/__init__.py create mode 100644 automated-pytest-suite/utils/horizon/pages/admin/platform/hostinventorypage.py create mode 100644 automated-pytest-suite/utils/horizon/pages/admin/platform/providernetworkoverviewpage.py create mode 100644 automated-pytest-suite/utils/horizon/pages/admin/platform/providernetworks.py create mode 100644 automated-pytest-suite/utils/horizon/pages/admin/platform/providernetworkstopology.py create mode 100644 automated-pytest-suite/utils/horizon/pages/admin/platform/softwaremanagementpage.py create mode 100644 automated-pytest-suite/utils/horizon/pages/admin/platform/storageoverviewpage.py create mode 100644 automated-pytest-suite/utils/horizon/pages/admin/platform/systemconfigurationpage.py create mode 100644 automated-pytest-suite/utils/horizon/pages/admin/system/__init__.py create mode 100644 automated-pytest-suite/utils/horizon/pages/admin/system/defaultspage.py create mode 100644 automated-pytest-suite/utils/horizon/pages/admin/system/metadatadefinitionspage.py create mode 100644 automated-pytest-suite/utils/horizon/pages/admin/system/systeminformationpage.py create mode 100644 automated-pytest-suite/utils/horizon/pages/admin/volume/__init__.py create mode 100644 automated-pytest-suite/utils/horizon/pages/admin/volume/volumesnapshotspage.py create mode 100644 automated-pytest-suite/utils/horizon/pages/admin/volume/volumespage.py create mode 100644 automated-pytest-suite/utils/horizon/pages/admin/volume/volumetypespage.py create mode 100644 automated-pytest-suite/utils/horizon/pages/basepage.py create mode 100644 automated-pytest-suite/utils/horizon/pages/identity/__init__.py create mode 100644 automated-pytest-suite/utils/horizon/pages/identity/groupspage.py create mode 100644 automated-pytest-suite/utils/horizon/pages/identity/projectspage.py create mode 100644 automated-pytest-suite/utils/horizon/pages/identity/rolespage.py create mode 100644 automated-pytest-suite/utils/horizon/pages/identity/userspage.py create mode 100644 automated-pytest-suite/utils/horizon/pages/loginpage.py create mode 100644 automated-pytest-suite/utils/horizon/pages/pageobject.py create mode 100644 automated-pytest-suite/utils/horizon/pages/project/__init__.py create mode 100644 automated-pytest-suite/utils/horizon/pages/project/apiaccesspage.py create mode 100644 automated-pytest-suite/utils/horizon/pages/project/compute/__init__.py create mode 100644 automated-pytest-suite/utils/horizon/pages/project/compute/imagespage.py create mode 100644 automated-pytest-suite/utils/horizon/pages/project/compute/instancespage.py create mode 100644 automated-pytest-suite/utils/horizon/pages/project/compute/keypairspage.py create mode 100644 automated-pytest-suite/utils/horizon/pages/project/compute/overviewpage.py create mode 100644 automated-pytest-suite/utils/horizon/pages/project/compute/servergroupspage.py create mode 100644 automated-pytest-suite/utils/horizon/pages/project/network/__init__.py create mode 100644 automated-pytest-suite/utils/horizon/pages/project/network/floatingipspage.py create mode 100644 automated-pytest-suite/utils/horizon/pages/project/network/managerulespage.py create mode 100644 automated-pytest-suite/utils/horizon/pages/project/network/networkoverviewpage.py create mode 100644 automated-pytest-suite/utils/horizon/pages/project/network/networkspage.py create mode 100644 automated-pytest-suite/utils/horizon/pages/project/network/routerinterfacespage.py create mode 100644 automated-pytest-suite/utils/horizon/pages/project/network/routeroverviewpage.py create mode 100644 automated-pytest-suite/utils/horizon/pages/project/network/routerspage.py create mode 100644 automated-pytest-suite/utils/horizon/pages/project/network/securitygroupspage.py create mode 100644 automated-pytest-suite/utils/horizon/pages/project/orchestration/__init__.py create mode 100644 automated-pytest-suite/utils/horizon/pages/project/orchestration/resourcestypespage.py create mode 100644 automated-pytest-suite/utils/horizon/pages/project/orchestration/stackspage.py create mode 100644 automated-pytest-suite/utils/horizon/pages/project/orchestration/templateversionspage.py create mode 100644 automated-pytest-suite/utils/horizon/pages/project/volumes/__init__.py create mode 100644 automated-pytest-suite/utils/horizon/pages/project/volumes/shotspage.py create mode 100644 automated-pytest-suite/utils/horizon/pages/project/volumes/volumespage.py create mode 100644 automated-pytest-suite/utils/horizon/pages/settings/__init__.py create mode 100644 automated-pytest-suite/utils/horizon/pages/settings/changepasswordpage.py create mode 100644 automated-pytest-suite/utils/horizon/pages/settings/usersettingspage.py create mode 100644 automated-pytest-suite/utils/horizon/regions/__init__.py create mode 100644 automated-pytest-suite/utils/horizon/regions/bars.py create mode 100644 automated-pytest-suite/utils/horizon/regions/baseregion.py create mode 100644 automated-pytest-suite/utils/horizon/regions/exceptions.py create mode 100644 automated-pytest-suite/utils/horizon/regions/forms.py create mode 100644 automated-pytest-suite/utils/horizon/regions/menus.py create mode 100644 automated-pytest-suite/utils/horizon/regions/messages.py create mode 100644 automated-pytest-suite/utils/horizon/regions/tables.py create mode 100644 automated-pytest-suite/utils/horizon/video_recorder.py create mode 100644 automated-pytest-suite/utils/local_host.py create mode 100644 automated-pytest-suite/utils/multi_thread.py create mode 100644 automated-pytest-suite/utils/parse_log.py create mode 100644 automated-pytest-suite/utils/rest.py create mode 100644 automated-pytest-suite/utils/table_parser.py create mode 100644 automated-pytest-suite/utils/tis_log.py diff --git a/README.rst b/README.rst index ef56575..95582b4 100644 --- a/README.rst +++ b/README.rst @@ -1,5 +1,25 @@ -========== +======== stx-test -========== +======== -StarlingX Test +StarlingX Test repository for manual and automated test cases. + + +Contribute +---------- + +- Clone the repo +- Gerrit hook needs to be added for code review purpose. + +.. code-block:: bash + + # Generate a ssh key if needed + ssh-keygen -t rsa -C "" + ssh-add $private_keyfile_path + + # add ssh key to settings https://review.opendev.org/#/q/project:starlingx/test + cd + git remote add gerrit ssh://@review.opendev.org/starlingx/test.git + git review -s + +- When you are ready, create your commit with detailed commit message, and submit for review. \ No newline at end of file diff --git a/automated-pytest-suite/README.rst b/automated-pytest-suite/README.rst new file mode 100644 index 0000000..d8363b5 --- /dev/null +++ b/automated-pytest-suite/README.rst @@ -0,0 +1,76 @@ +==================================== +StarlingX Integration Test Framework +==================================== + +The project contains integration test cases that can be executed on an +installed and configured StarlingX system. + +Supported test cases: + +- CLI tests over SSH connection to StarlingX system via OAM floating IP +- Platform RestAPI test cases via external endpoints +- Horizon test cases + + +Packages Required +----------------- +- python >='3.4.3,<3.7' +- pytest>='3.1.0,<4.0' +- pexpect +- pyyaml +- requests (used by RestAPI test cases only) +- selenium (used by Horizon test cases only) +- Firefox (used by Horizon test cases only) +- pyvirtualdisplay (used by Horizon test cases only) +- ffmpeg (used by Horizon test cases only) +- Xvfb or Xephyr or Xvnc (used by pyvirtualdisplay for Horizon test cases only) + + +Setup Test Tool +--------------- +This is a off-box test tool that needs to be set up once on a Linux server +that can reach the StarlingX system under test (such as SSH to STX +system, send/receive RestAPI requests, open Horizon page). + +- Install above packages +- Clone stx-test repo +- Add absolute path for automated-pytest-suite to PYTHONPATH environment variable + +Execute Test Cases +------------------ +Precondition: STX system under test should be installed and configured. + +- | Customized config can be provided via --testcase-config . + | Config template can be found at ${project_root}/stx-test_template.conf. +- Test cases can be selected by specifying via -m +- | If stx-openstack is not deployed, platform specific marker should be specified, + | e.g., -m "platform_sanity or platform" +- | Automation logs will be created at ${HOME}/AUTOMATION_LOGS directory by default. + | Log directory can also be specified with --resultlog=${LOG_DIR} commandline option +- Examples: + +.. code-block:: bash + + export project_root= + + # Include $project_root to PYTHONPATH if not already done + export PYTHONPATH=${PYTHONPATH}:${project_root} + + cd $project_root + + # Example 1: Run all platform_sanity test cases under testcases/ + pytest -m platform_sanity --testcase-config=~/my_config.conf testcases/ + + # Example 2: Run platform_sanity or sanity (requires stx-openstack) test cases, + # on a StarlingX virtual box system that is already saved in consts/lab.py + # and save automation logs to /tmp/AUTOMATION_LOGS + pytest --resultlog=/tmp/ -m sanity --lab=vbox --natbox=localhost testcases/ + + # Example 3: List (not execute) the test cases with "migrate" in the name + pytest --collect-only -k "migrate" --lab= testcases/ + + +Contribute +---------- + +- In order to contribute, python3.4 is required to avoid producing code that is incompatible with python3.4. diff --git a/automated-pytest-suite/testcases/remove.txt b/automated-pytest-suite/__init__.py similarity index 100% rename from automated-pytest-suite/testcases/remove.txt rename to automated-pytest-suite/__init__.py diff --git a/automated-pytest-suite/conftest.py b/automated-pytest-suite/conftest.py new file mode 100644 index 0000000..89a6454 --- /dev/null +++ b/automated-pytest-suite/conftest.py @@ -0,0 +1,693 @@ +# +# Copyright (c) 2019 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + + +import logging +import os +from time import strftime, gmtime +# import threading # Used for formatting logger + + +import pytest # Don't remove. Used in eval + +import setups +from consts.proj_vars import ProjVar +from utils.tis_log import LOG +from utils import parse_log + +tc_start_time = None +tc_end_time = None +has_fail = False +repeat_count = -1 +stress_count = -1 +count = -1 +no_teardown = False +tracebacks = [] +region = None +test_count = 0 +console_log = True + +################################ +# Process and log test results # +################################ + + +class MakeReport: + nodeid = None + instances = {} + + def __init__(self, item): + MakeReport.nodeid = item.nodeid + self.test_pass = None + self.test_results = {} + MakeReport.instances[item.nodeid] = self + + def update_results(self, call, report): + if report.failed: + global has_fail + has_fail = True + msg = "***Failure at test {}: {}".format(call.when, call.excinfo) + print(msg) + LOG.debug(msg + "\n***Details: {}".format(report.longrepr)) + global tracebacks + tracebacks.append(str(report.longrepr)) + self.test_results[call.when] = ['Failed', call.excinfo] + elif report.skipped: + sep = 'Skipped: ' + skipreason_list = str(call.excinfo).split(sep=sep)[1:] + skipreason_str = sep.join(skipreason_list) + self.test_results[call.when] = ['Skipped', skipreason_str] + elif report.passed: + self.test_results[call.when] = ['Passed', ''] + + def get_results(self): + return self.test_results + + @classmethod + def get_report(cls, item): + if item.nodeid == cls.nodeid: + return cls.instances[cls.nodeid] + else: + return cls(item) + + +class TestRes: + PASSNUM = 0 + FAILNUM = 0 + SKIPNUM = 0 + TOTALNUM = 0 + + +def _write_results(res_in_tests, test_name): + global tc_start_time + with open(ProjVar.get_var("TCLIST_PATH"), mode='a') as f: + f.write('\n{}\t{}\t{}'.format(res_in_tests, tc_start_time, test_name)) + global test_count + test_count += 1 + # reset tc_start and end time for next test case + tc_start_time = None + + +def pytest_runtest_makereport(item, call, __multicall__): + report = __multicall__.execute() + my_rep = MakeReport.get_report(item) + my_rep.update_results(call, report) + + test_name = item.nodeid.replace('::()::', + '::') # .replace('testcases/', '') + res_in_tests = '' + res = my_rep.get_results() + + # Write final result to test_results.log + if report.when == 'teardown': + res_in_log = 'Test Passed' + fail_at = [] + for key, val in res.items(): + if val[0] == 'Failed': + fail_at.append('test ' + key) + elif val[0] == 'Skipped': + res_in_log = 'Test Skipped\nReason: {}'.format(val[1]) + res_in_tests = 'SKIP' + break + if fail_at: + fail_at = ', '.join(fail_at) + res_in_log = 'Test Failed at {}'.format(fail_at) + + # Log test result + testcase_log(msg=res_in_log, nodeid=test_name, log_type='tc_res') + + if 'Test Passed' in res_in_log: + res_in_tests = 'PASS' + elif 'Test Failed' in res_in_log: + res_in_tests = 'FAIL' + if ProjVar.get_var('PING_FAILURE'): + setups.add_ping_failure(test_name=test_name) + + if not res_in_tests: + res_in_tests = 'UNKNOWN' + + # count testcases by status + TestRes.TOTALNUM += 1 + if res_in_tests == 'PASS': + TestRes.PASSNUM += 1 + elif res_in_tests == 'FAIL': + TestRes.FAILNUM += 1 + elif res_in_tests == 'SKIP': + TestRes.SKIPNUM += 1 + + _write_results(res_in_tests=res_in_tests, test_name=test_name) + + if repeat_count > 0: + for key, val in res.items(): + if val[0] == 'Failed': + global tc_end_time + tc_end_time = strftime("%Y%m%d %H:%M:%S", gmtime()) + _write_results(res_in_tests='FAIL', test_name=test_name) + TestRes.FAILNUM += 1 + if ProjVar.get_var('PING_FAILURE'): + setups.add_ping_failure(test_name=test_name) + + try: + parse_log.parse_test_steps(ProjVar.get_var('LOG_DIR')) + except Exception as e: + LOG.warning( + "Unable to parse test steps. \nDetails: {}".format( + e.__str__())) + + pytest.exit( + "Skip rest of the iterations upon stress test failure") + + if no_teardown and report.when == 'call': + for key, val in res.items(): + if val[0] == 'Skipped': + break + else: + pytest.exit("No teardown and skip rest of the tests if any") + + return report + + +def pytest_runtest_setup(item): + global tc_start_time + # tc_start_time = setups.get_tis_timestamp(con_ssh) + tc_start_time = strftime("%Y%m%d %H:%M:%S", gmtime()) + print('') + message = "Setup started:" + testcase_log(message, item.nodeid, log_type='tc_setup') + # set test name for ping vm failure + test_name = 'test_{}'.format( + item.nodeid.rsplit('::test_', 1)[-1].replace('/', '_')) + ProjVar.set_var(TEST_NAME=test_name) + ProjVar.set_var(PING_FAILURE=False) + + +def pytest_runtest_call(item): + separator = \ + '++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++' + message = "Test steps started:" + testcase_log(message, item.nodeid, separator=separator, log_type='tc_start') + + +def pytest_runtest_teardown(item): + print('') + message = 'Teardown started:' + testcase_log(message, item.nodeid, log_type='tc_teardown') + + +def testcase_log(msg, nodeid, separator=None, log_type=None): + if separator is None: + separator = '-----------' + + print_msg = separator + '\n' + msg + logging_msg = '\n{}{} {}'.format(separator, msg, nodeid) + if console_log: + print(print_msg) + if log_type == 'tc_res': + global tc_end_time + tc_end_time = strftime("%Y%m%d %H:%M:%S", gmtime()) + LOG.tc_result(msg=msg, tc_name=nodeid) + elif log_type == 'tc_start': + LOG.tc_func_start(nodeid) + elif log_type == 'tc_setup': + LOG.tc_setup_start(nodeid) + elif log_type == 'tc_teardown': + LOG.tc_teardown_start(nodeid) + else: + LOG.debug(logging_msg) + + +######################## +# Command line options # +######################## +@pytest.mark.tryfirst +def pytest_configure(config): + config.addinivalue_line("markers", + "features(feature_name1, feature_name2, " + "...): mark impacted feature(s) for a test case.") + config.addinivalue_line("markers", + "priorities(, cpe_sanity, p2, ...): mark " + "priorities for a test case.") + config.addinivalue_line("markers", + "known_issue(LP-xxxx): mark known issue with " + "LP ID or description if no LP needed.") + + if config.getoption('help'): + return + + # Common reporting params + collect_all = config.getoption('collectall') + always_collect = config.getoption('alwayscollect') + session_log_dir = config.getoption('sessiondir') + resultlog = config.getoption('resultlog') + + # Test case params on installed system + testcase_config = config.getoption('testcase_config') + lab_arg = config.getoption('lab') + natbox_arg = config.getoption('natbox') + tenant_arg = config.getoption('tenant') + horizon_visible = config.getoption('horizon_visible') + is_vbox = config.getoption('is_vbox') + + global repeat_count + repeat_count = config.getoption('repeat') + global stress_count + stress_count = config.getoption('stress') + global count + if repeat_count > 0: + count = repeat_count + elif stress_count > 0: + count = stress_count + + global no_teardown + no_teardown = config.getoption('noteardown') + if repeat_count > 0 or no_teardown: + ProjVar.set_var(NO_TEARDOWN=True) + + collect_netinfo = config.getoption('netinfo') + + # Determine lab value. + lab = natbox = None + if lab_arg: + lab = setups.get_lab_dict(lab_arg) + if natbox_arg: + natbox = setups.get_natbox_dict(natbox_arg) + + lab, natbox = setups.setup_testcase_config(testcase_config, lab=lab, + natbox=natbox) + tenant = tenant_arg.upper() if tenant_arg else 'TENANT1' + + # Log collection params + collect_all = True if collect_all else False + always_collect = True if always_collect else False + + # If floating ip cannot be reached, whether to try to ping/ssh + # controller-0 unit IP, etc. + if collect_netinfo: + ProjVar.set_var(COLLECT_SYS_NET_INFO=True) + + horizon_visible = True if horizon_visible else False + + if session_log_dir: + log_dir = session_log_dir + else: + # compute directory for all logs based on resultlog arg, lab, + # and timestamp on local machine + resultlog = resultlog if resultlog else os.path.expanduser("~") + if '/AUTOMATION_LOGS' in resultlog: + resultlog = resultlog.split(sep='/AUTOMATION_LOGS')[0] + resultlog = os.path.join(resultlog, 'AUTOMATION_LOGS') + lab_name = lab['short_name'] + time_stamp = strftime('%Y%m%d%H%M') + log_dir = '{}/{}/{}'.format(resultlog, lab_name, time_stamp) + os.makedirs(log_dir, exist_ok=True) + + # set global constants, which will be used for the entire test session, etc + ProjVar.init_vars(lab=lab, natbox=natbox, logdir=log_dir, tenant=tenant, + collect_all=collect_all, + always_collect=always_collect, + horizon_visible=horizon_visible) + + if lab.get('central_region'): + ProjVar.set_var(IS_DC=True, + PRIMARY_SUBCLOUD=config.getoption('subcloud')) + + if is_vbox: + ProjVar.set_var(IS_VBOX=True) + + config_logger(log_dir, console=console_log) + + # set resultlog save location + config.option.resultlog = ProjVar.get_var("PYTESTLOG_PATH") + + # Repeat test params + file_or_dir = config.getoption('file_or_dir') + origin_file_dir = list(file_or_dir) + if count > 1: + print("Repeat following tests {} times: {}".format(count, file_or_dir)) + del file_or_dir[:] + for f_or_d in origin_file_dir: + for i in range(count): + file_or_dir.append(f_or_d) + + +def pytest_addoption(parser): + testconf_help = "Absolute path for testcase config file. Template can be " \ + "found at automated-pytest-suite/stx-test_template.conf" + lab_help = "STX system to connect to. Valid value: 1) short_name or name " \ + "of an existing dict entry in consts.Labs; Or 2) OAM floating " \ + "ip of the STX system under test" + tenant_help = "Default tenant to use when unspecified. Valid values: " \ + "tenant1, tenant2, or admin" + natbox_help = "NatBox IP or name. If automated tests are executed from " \ + "NatBox, --natbox=localhost can be used. " \ + "If username/password are required to SSH to NatBox, " \ + "please specify them in test config file." + vbox_help = "Specify if StarlingX system is installed in virtual " \ + "environment." + collect_all_help = "Run collect all on STX system at the end of test " \ + "session if any test fails." + logdir_help = "Directory to store test session logs. If this is " \ + "specified, then --resultlog will be ignored." + stress_help = "Number of iterations to run specified testcase(s). Abort " \ + "rest of the test session on first failure" + count_help = "Repeat tests x times - NO stop on failure" + horizon_visible_help = "Display horizon on screen" + no_console_log = 'Print minimal console logs' + + # Test session options on installed and configured STX system: + parser.addoption('--testcase-config', action='store', + metavar='testcase_config', default=None, + help=testconf_help) + parser.addoption('--lab', action='store', metavar='lab', default=None, + help=lab_help) + parser.addoption('--tenant', action='store', metavar='tenantname', + default=None, help=tenant_help) + parser.addoption('--natbox', action='store', metavar='natbox', default=None, + help=natbox_help) + parser.addoption('--vm', '--vbox', action='store_true', dest='is_vbox', + help=vbox_help) + + # Debugging/Log collection options: + parser.addoption('--sessiondir', '--session_dir', '--session-dir', + action='store', dest='sessiondir', + metavar='sessiondir', default=None, help=logdir_help) + parser.addoption('--collectall', '--collect_all', '--collect-all', + dest='collectall', action='store_true', + help=collect_all_help) + parser.addoption('--alwayscollect', '--always-collect', '--always_collect', + dest='alwayscollect', + action='store_true', help=collect_all_help) + parser.addoption('--repeat', action='store', metavar='repeat', type=int, + default=-1, help=stress_help) + parser.addoption('--stress', metavar='stress', action='store', type=int, + default=-1, help=count_help) + parser.addoption('--no-teardown', '--no_teardown', '--noteardown', + dest='noteardown', action='store_true') + parser.addoption('--netinfo', '--net-info', dest='netinfo', + action='store_true', + help="Collect system networking info if scp keyfile fails") + parser.addoption('--horizon-visible', '--horizon_visible', + action='store_true', dest='horizon_visible', + help=horizon_visible_help) + parser.addoption('--noconsolelog', '--noconsole', '--no-console-log', + '--no_console_log', '--no-console', + '--no_console', action='store_true', dest='noconsolelog', + help=no_console_log) + + +def config_logger(log_dir, console=True): + # logger for log saved in file + file_name = log_dir + '/TIS_AUTOMATION.log' + logging.Formatter.converter = gmtime + log_format = '[%(asctime)s] %(lineno)-5d%(levelname)-5s %(threadName)-8s ' \ + '%(module)s.%(funcName)-8s:: %(message)s' + tis_formatter = logging.Formatter(log_format) + LOG.setLevel(logging.NOTSET) + + tmp_path = os.path.join(os.path.expanduser('~'), '.tmp_log') + # clear the tmp log with best effort so it wont keep growing + try: + os.remove(tmp_path) + except: + pass + logging.basicConfig(level=logging.NOTSET, format=log_format, + filename=tmp_path, filemode='w') + + # file handler: + file_handler = logging.FileHandler(file_name) + file_handler.setFormatter(tis_formatter) + file_handler.setLevel(logging.DEBUG) + LOG.addHandler(file_handler) + + # logger for stream output + console_level = logging.INFO if console else logging.CRITICAL + stream_hdler = logging.StreamHandler() + stream_hdler.setFormatter(tis_formatter) + stream_hdler.setLevel(console_level) + LOG.addHandler(stream_hdler) + + print("LOG DIR: {}".format(log_dir)) + + +def pytest_unconfigure(config): + # collect all if needed + if config.getoption('help'): + return + + try: + natbox_ssh = ProjVar.get_var('NATBOX_SSH') + natbox_ssh.close() + except: + pass + + version_and_patch = '' + try: + version_and_patch = setups.get_version_and_patch_info() + except Exception as e: + LOG.debug(e) + pass + log_dir = ProjVar.get_var('LOG_DIR') + if not log_dir: + try: + from utils.clients.ssh import ControllerClient + ssh_list = ControllerClient.get_active_controllers(fail_ok=True) + for con_ssh_ in ssh_list: + con_ssh_.close() + except: + pass + return + + log_dir = ProjVar.get_var('LOG_DIR') + if not log_dir: + try: + from utils.clients.ssh import ControllerClient + ssh_list = ControllerClient.get_active_controllers(fail_ok=True) + for con_ssh_ in ssh_list: + con_ssh_.close() + except: + pass + return + + try: + tc_res_path = log_dir + '/test_results.log' + build_info = ProjVar.get_var('BUILD_INFO') + build_id = build_info.get('BUILD_ID', '') + build_job = build_info.get('JOB', '') + build_server = build_info.get('BUILD_HOST', '') + system_config = ProjVar.get_var('SYS_TYPE') + session_str = '' + total_exec = TestRes.PASSNUM + TestRes.FAILNUM + # pass_rate = fail_rate = '0' + if total_exec > 0: + pass_rate = "{}%".format( + round(TestRes.PASSNUM * 100 / total_exec, 2)) + fail_rate = "{}%".format( + round(TestRes.FAILNUM * 100 / total_exec, 2)) + with open(tc_res_path, mode='a') as f: + # Append general info to result log + f.write('\n\nLab: {}\n' + 'Build ID: {}\n' + 'Job: {}\n' + 'Build Server: {}\n' + 'System Type: {}\n' + 'Automation LOGs DIR: {}\n' + 'Ends at: {}\n' + '{}' # test session id and tag + '{}'.format(ProjVar.get_var('LAB_NAME'), build_id, + build_job, build_server, system_config, + ProjVar.get_var('LOG_DIR'), tc_end_time, + session_str, version_and_patch)) + # Add result summary to beginning of the file + f.write( + '\nSummary:\nPassed: {} ({})\nFailed: {} ({})\nTotal ' + 'Executed: {}\n'. + format(TestRes.PASSNUM, pass_rate, TestRes.FAILNUM, + fail_rate, total_exec)) + if TestRes.SKIPNUM > 0: + f.write('------------\nSkipped: {}'.format(TestRes.SKIPNUM)) + + LOG.info("Test Results saved to: {}".format(tc_res_path)) + with open(tc_res_path, 'r') as fin: + print(fin.read()) + except Exception as e: + LOG.exception( + "Failed to add session summary to test_results.py. " + "\nDetails: {}".format(e.__str__())) + # Below needs con_ssh to be initialized + try: + from utils.clients.ssh import ControllerClient + con_ssh = ControllerClient.get_active_controller() + except: + LOG.warning("No con_ssh found") + return + + try: + parse_log.parse_test_steps(ProjVar.get_var('LOG_DIR')) + except Exception as e: + LOG.warning( + "Unable to parse test steps. \nDetails: {}".format(e.__str__())) + + if test_count > 0 and (ProjVar.get_var('ALWAYS_COLLECT') or ( + has_fail and ProjVar.get_var('COLLECT_ALL'))): + # Collect tis logs if collect all required upon test(s) failure + # Failure on collect all would not change the result of the last test + # case. + try: + setups.collect_tis_logs(con_ssh) + except Exception as e: + LOG.warning("'collect all' failed. {}".format(e.__str__())) + + ssh_list = ControllerClient.get_active_controllers(fail_ok=True, + current_thread_only=True) + for con_ssh_ in ssh_list: + try: + con_ssh_.close() + except: + pass + + +def pytest_collection_modifyitems(items): + # print("Collection modify") + move_to_last = [] + absolute_last = [] + + for item in items: + # re-order tests: + trylast_marker = item.get_closest_marker('trylast') + abslast_marker = item.get_closest_marker('abslast') + + if abslast_marker: + absolute_last.append(item) + elif trylast_marker: + move_to_last.append(item) + + priority_marker = item.get_closest_marker('priorities') + if priority_marker is not None: + priorities = priority_marker.args + for priority in priorities: + item.add_marker(eval("pytest.mark.{}".format(priority))) + + feature_marker = item.get_closest_marker('features') + if feature_marker is not None: + features = feature_marker.args + for feature in features: + item.add_marker(eval("pytest.mark.{}".format(feature))) + + # known issue marker + known_issue_mark = item.get_closest_marker('known_issue') + if known_issue_mark is not None: + issue = known_issue_mark.args[0] + msg = "{} has a workaround due to {}".format(item.nodeid, issue) + print(msg) + LOG.debug(msg=msg) + item.add_marker(eval("pytest.mark.known_issue")) + + # add dc maker to all tests start with test_dc_xxx + dc_maker = item.get_marker('dc') + if not dc_maker and 'test_dc_' in item.nodeid: + item.add_marker(pytest.mark.dc) + + # add trylast tests to the end + for item in move_to_last: + items.remove(item) + items.append(item) + + for i in absolute_last: + items.remove(i) + items.append(i) + + +def pytest_generate_tests(metafunc): + # Prefix 'remote_cli' to test names so they are reported as a different + # testcase + if ProjVar.get_var('REMOTE_CLI'): + metafunc.parametrize('prefix_remote_cli', ['remote_cli']) + + +############################################################## +# Manipulating fixture orders based on following pytest rules +# session > module > class > function +# autouse > non-autouse +# alphabetic after full-filling above criteria +# +# Orders we want on fixtures of same scope: +# check_alarms > delete_resources > config_host +############################################################# + +@pytest.fixture(scope='session') +def check_alarms(): + LOG.debug("Empty check alarms") + return + + +@pytest.fixture(scope='session') +def config_host_class(): + LOG.debug("Empty config host class") + return + + +@pytest.fixture(scope='session') +def config_host_module(): + LOG.debug("Empty config host module") + + +@pytest.fixture(autouse=True) +def a1_fixture(check_alarms): + return + + +@pytest.fixture(scope='module', autouse=True) +def c1_fixture(config_host_module): + return + + +@pytest.fixture(scope='class', autouse=True) +def c2_fixture(config_host_class): + return + + +@pytest.fixture(scope='session', autouse=True) +def prefix_remote_cli(): + return + + +def __params_gen(index): + return 'iter{}'.format(index) + + +@pytest.fixture(scope='session') +def global_setup(): + os.makedirs(ProjVar.get_var('TEMP_DIR'), exist_ok=True) + os.makedirs(ProjVar.get_var('PING_FAILURE_DIR'), exist_ok=True) + os.makedirs(ProjVar.get_var('GUEST_LOGS_DIR'), exist_ok=True) + + if region: + setups.set_region(region=region) + + +##################################### +# End of fixture order manipulation # +##################################### + + +def pytest_sessionfinish(): + if ProjVar.get_var('TELNET_THREADS'): + threads, end_event = ProjVar.get_var('TELNET_THREADS') + end_event.set() + for thread in threads: + thread.join() + + if repeat_count > 0 and has_fail: + # _thread.interrupt_main() + print('Printing traceback: \n' + '\n'.join(tracebacks)) + pytest.exit("\n========== Test failed - " + "Test session aborted without teardown to leave the " + "system in state ==========") + + if no_teardown: + pytest.exit( + "\n========== Test session stopped without teardown after first " + "test executed ==========") diff --git a/automated-pytest-suite/consts/__init__.py b/automated-pytest-suite/consts/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/automated-pytest-suite/consts/auth.py b/automated-pytest-suite/consts/auth.py new file mode 100755 index 0000000..668a6aa --- /dev/null +++ b/automated-pytest-suite/consts/auth.py @@ -0,0 +1,348 @@ +# +# Copyright (c) 2019 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + + +class Tenant: + __PASSWORD = 'St8rlingX*' + __REGION = 'RegionOne' + __URL_PLATFORM = 'http://192.168.204.2:5000/v3/' + __URL_CONTAINERS = 'http://keystone.openstack.svc.cluster.local/v3' + __DC_MAP = {'SystemController': {'region': 'SystemController', + 'auth_url': __URL_PLATFORM}, + 'RegionOne': {'region': 'RegionOne', + 'auth_url': __URL_PLATFORM}} + + # Platform openstack user - admin + __ADMIN_PLATFORM = { + 'user': 'admin', + 'password': __PASSWORD, + 'tenant': 'admin', + 'domain': 'Default', + 'platform': True, + } + + # Containerized openstack users - admin, and two test users/tenants + __ADMIN = { + 'user': 'admin', + 'password': __PASSWORD, + 'tenant': 'admin', + 'domain': 'Default' + } + + __TENANT1 = { + 'user': 'tenant1', + 'password': __PASSWORD, + 'tenant': 'tenant1', + 'domain': 'Default', + 'nova_keypair': 'keypair-tenant1' + } + + __TENANT2 = { + 'user': 'tenant2', + 'password': __PASSWORD, + 'tenant': 'tenant2', + 'domain': 'Default', + 'nova_keypair': 'keypair-tenant2' + } + + __tenants = { + 'ADMIN_PLATFORM': __ADMIN_PLATFORM, + 'ADMIN': __ADMIN, + 'TENANT1': __TENANT1, + 'TENANT2': __TENANT2} + + @classmethod + def add_dc_region(cls, region_info): + cls.__DC_MAP.update(region_info) + + @classmethod + def set_platform_url(cls, url, central_region=False): + """ + Set auth_url for platform keystone + Args: + url (str): + central_region (bool) + """ + if central_region: + cls.__DC_MAP.get('SystemController')['auth_url'] = url + cls.__DC_MAP.get('RegionOne')['auth_url'] = url + else: + cls.__URL_PLATFORM = url + + @classmethod + def set_region(cls, region): + """ + Set default region for all tenants + Args: + region (str): e.g., SystemController, subcloud-2 + + """ + cls.__REGION = region + + @classmethod + def add(cls, tenantname, dictname=None, username=None, password=None, + region=None, auth_url=None, domain='Default'): + tenant_dict = dict(tenant=tenantname) + tenant_dict['user'] = username if username else tenantname + tenant_dict['password'] = password if password else cls.__PASSWORD + tenant_dict['domain'] = domain + if region: + tenant_dict['region'] = region + if auth_url: + tenant_dict['auth_url'] = auth_url + + dictname = dictname.upper() if dictname else tenantname.upper().\ + replace('-', '_') + cls.__tenants[dictname] = tenant_dict + return tenant_dict + + __primary = 'TENANT1' + + @classmethod + def get(cls, tenant_dictname, dc_region=None): + """ + Get tenant auth dict that can be passed to auth_info in cli cmd + Args: + tenant_dictname (str): e.g., tenant1, TENANT2, system_controller + dc_region (None|str): key for dc_region added via add_dc_region. + Used to update auth_url and region + e.g., SystemController, RegionOne, subcloud-2 + + Returns (dict): mutable dictionary. If changed, DC map or tenant dict + will update as well. + + """ + tenant_dictname = tenant_dictname.upper().replace('-', '_') + tenant_dict = cls.__tenants.get(tenant_dictname) + if dc_region: + region_dict = cls.__DC_MAP.get(dc_region, None) + if not region_dict: + raise ValueError( + 'Distributed cloud region {} is not added to ' + 'DC_MAP yet. DC_MAP: {}'.format(dc_region, cls.__DC_MAP)) + tenant_dict.update({'region': region_dict['region']}) + else: + tenant_dict.pop('region', None) + + return tenant_dict + + @classmethod + def get_region_and_url(cls, platform=False, dc_region=None): + auth_region_and_url = { + 'auth_url': + cls.__URL_PLATFORM if platform else cls.__URL_CONTAINERS, + 'region': cls.__REGION + } + + if dc_region: + region_dict = cls.__DC_MAP.get(dc_region, None) + if not region_dict: + raise ValueError( + 'Distributed cloud region {} is not added to DC_MAP yet. ' + 'DC_MAP: {}'.format(dc_region, cls.__DC_MAP)) + auth_region_and_url['region'] = region_dict.get('region') + if platform: + auth_region_and_url['auth_url'] = region_dict.get('auth_url') + + return auth_region_and_url + + @classmethod + def set_primary(cls, tenant_dictname): + """ + should be called after _set_region and _set_url + Args: + tenant_dictname (str): Tenant dict name + + Returns: + + """ + cls.__primary = tenant_dictname.upper() + + @classmethod + def get_primary(cls): + return cls.get(tenant_dictname=cls.__primary) + + @classmethod + def get_secondary(cls): + secondary = 'TENANT1' if cls.__primary != 'TENANT1' else 'TENANT2' + return cls.get(tenant_dictname=secondary) + + @classmethod + def update(cls, tenant_dictname, username=None, password=None, tenant=None, + **kwargs): + tenant_dict = cls.get(tenant_dictname) + + if not isinstance(tenant_dict, dict): + raise ValueError("{} dictionary does not exist in " + "consts/auth.py".format(tenant_dictname)) + + if not username and not password and not tenant and not kwargs: + raise ValueError("Please specify username, password, tenant, " + "and/or domain to update for {} dict". + format(tenant_dictname)) + + if username: + kwargs['user'] = username + if password: + kwargs['password'] = password + if tenant: + kwargs['tenant'] = tenant + tenant_dict.update(kwargs) + cls.__tenants[tenant_dictname] = tenant_dict + + @classmethod + def get_dc_map(cls): + return cls.__DC_MAP + + +class HostLinuxUser: + + __SYSADMIN = { + 'user': 'sysadmin', + 'password': 'St8rlingX*' + } + + @classmethod + def get_user(cls): + return cls.__SYSADMIN['user'] + + @classmethod + def get_password(cls): + return cls.__SYSADMIN['password'] + + @classmethod + def get_home(cls): + return cls.__SYSADMIN.get('home', '/home/{}'.format(cls.get_user())) + + @classmethod + def set_user(cls, username): + cls.__SYSADMIN['user'] = username + + @classmethod + def set_password(cls, password): + cls.__SYSADMIN['password'] = password + + @classmethod + def set_home(cls, home): + if home: + cls.__SYSADMIN['home'] = home + + +class Guest: + CREDS = { + 'tis-centos-guest': { + 'user': 'root', + 'password': 'root' + }, + + 'cgcs-guest': { + 'user': 'root', + 'password': 'root' + }, + + 'ubuntu': { + 'user': 'ubuntu', + 'password': None + }, + + 'centos_6': { + 'user': 'centos', + 'password': None + }, + + 'centos_7': { + 'user': 'centos', + 'password': None + }, + + # This image has some issue where it usually fails to boot + 'opensuse_13': { + 'user': 'root', + 'password': None + }, + + # OPV image has root/root enabled + 'rhel': { + 'user': 'root', + 'password': 'root' + }, + + 'cirros': { + 'user': 'cirros', + 'password': 'cubswin:)' + }, + + 'win_2012': { + 'user': 'Administrator', + 'password': 'Li69nux*' + }, + + 'win_2016': { + 'user': 'Administrator', + 'password': 'Li69nux*' + }, + + 'ge_edge': { + 'user': 'root', + 'password': 'root' + }, + + 'vxworks': { + 'user': 'root', + 'password': 'root' + }, + + } + + @classmethod + def set_user(cls, image_name, username): + cls.CREDS[image_name]['user'] = username + + @classmethod + def set_password(cls, image_name, password): + cls.CREDS[image_name]['password'] = password + + +class TestFileServer: + # Place holder for shared file server in future. + SERVER = 'server_name_or_ip_that_can_ssh_to' + USER = 'username' + PASSWORD = 'password' + HOME = 'my_home' + HOSTNAME = 'hostname' + PROMPT = r'[\[]?.*@.*\$[ ]?' + + +class CliAuth: + + __var_dict = { + 'OS_AUTH_URL': 'http://192.168.204.2:5000/v3', + 'OS_ENDPOINT_TYPE': 'internalURL', + 'CINDER_ENDPOINT_TYPE': 'internalURL', + 'OS_USER_DOMAIN_NAME': 'Default', + 'OS_PROJECT_DOMAIN_NAME': 'Default', + 'OS_IDENTITY_API_VERSION': '3', + 'OS_REGION_NAME': 'RegionOne', + 'OS_INTERFACE': 'internal', + 'HTTPS': False, + 'OS_KEYSTONE_REGION_NAME': None, + } + + @classmethod + def set_vars(cls, **kwargs): + + for key in kwargs: + cls.__var_dict[key.upper()] = kwargs[key] + + @classmethod + def get_var(cls, var_name): + var_name = var_name.upper() + valid_vars = cls.__var_dict.keys() + if var_name not in valid_vars: + raise ValueError("Invalid var_name. Valid vars: {}". + format(valid_vars)) + + return cls.__var_dict[var_name] diff --git a/automated-pytest-suite/consts/cli_errs.py b/automated-pytest-suite/consts/cli_errs.py new file mode 100644 index 0000000..1450a35 --- /dev/null +++ b/automated-pytest-suite/consts/cli_errs.py @@ -0,0 +1,192 @@ +# +# Copyright (c) 2019 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + + +class VCPUSchedulerErr: + CANNOT_SET_VCPU0 = "vcpu 0 cannot be specified" + VCPU_VAL_OUT_OF_RANGE = "vcpu value out of range" + INVALID_PRIORITY = "priority must be between 1-99" + PRIORITY_NOT_INTEGER = "priority must be an integer" + INVALID_FORMAT = "invalid format" + UNSUPPORTED_POLICY = "not a supported policy" + POLICY_MUST_SPECIFIED_LAST = "policy/priority for all vcpus must be " \ + "specified last" + MISSING_PARAMETER = "missing required parameter" + TOO_MANY_PARAMETERS = "too many parameters" + VCPU_MULTIPLE_ASSIGNMENT = "specified multiple times, specification is " \ + "ambiguous" + CPU_MODEL_UNAVAIL = "No valid host was found.*Host VCPU model.*required.*" + CPU_MODEL_CONFLICT = "Image vCPU model is not permitted to override " \ + "configuration set against the flavor" + + +class NumaErr: + GENERAL_ERR_PIKE = 'Requested instance NUMA topology cannot fit the ' \ + 'given host NUMA topology' + # NUMA_AFFINITY_MISMATCH = " not match requested NUMA: {}" + NUMA_VSWITCH_MISMATCH = 'vswitch not configured.* does not match ' \ + 'requested NUMA' + NUMA_NODE_EXCLUDED = "NUMA: {} excluded" + # UNINITIALIZED = '(NUMATopologyFilter) Uninitialized' + TWO_NUMA_ONE_VSWITCH = 'vswitch not configured' + FLV_UNDEVISIBLE = 'ERROR (Conflict): flavor vcpus not evenly divisible ' \ + 'by the specified hw:numa_nodes value' + FLV_CPU_OR_MEM_UNSPECIFIED = 'ERROR (Conflict): CPU and memory ' \ + 'allocation must be provided for all ' \ + 'NUMA nodes' + INSUFFICIENT_CORES = 'Not enough free cores to schedule the instance' + + +class MinCPUErr: + VAL_LARGER_THAN_VCPUS = "min_vcpus must be less than or equal to " \ + "the flavor vcpus value" + VAL_LESS_THAN_1 = "min_vcpus must be greater than or equal to 1" + CPU_POLICY_NOT_DEDICATED = "min_vcpus is only valid when hw:cpu_policy " \ + "is dedicated" + + +class ScaleErr: + SCALE_LIMIT_HIT = "When scaling, cannot scale beyond limits" + + +class CpuAssignment: + VSWITCH_TOO_MANY_CORES = "The vswitch function can only be assigned up to" \ + " 8 core" + TOTAL_TOO_MANY_CORES = "More total logical cores requested than present " \ + "on 'Processor {}'" + NO_VM_CORE = "There must be at least one unused core for VMs." + VSWITCH_INSUFFICIENT_CORES = "The vswitch function must have at least {} " \ + "core(s)" + + +class CPUThreadErr: + INVALID_POLICY = "invalid hw:cpu_thread_policy '{}', must be one of " \ + "prefer, isolate, require" + DEDICATED_CPU_REQUIRED_FLAVOR = 'ERROR (Conflict): hw:cpu_thread_policy ' \ + 'is only valid when hw:cpu_policy is ' \ + 'dedicated. Either unset ' \ + 'hw:cpu_thread_policy or set ' \ + 'hw:cpu_policy to dedicated.' + DEDICATED_CPU_REQUIRED_BOOT_VM = 'ERROR (BadRequest): Cannot set cpu ' \ + 'thread pinning policy in a non ' \ + 'dedicated ' \ + 'cpu pinning policy' + VCPU_NUM_UNDIVISIBLE = "(NUMATopologyFilter) Cannot use 'require' cpu " \ + "threads policy as requested #VCPUs: {}, " \ + "is not divisible by number of threads: 2" + INSUFFICIENT_CORES_FOR_ISOLATE = "{}: (NUMATopologyFilter) Cannot use " \ + "isolate cpu thread policy as requested " \ + "VCPUS: {} is greater than available " \ + "CPUs with all siblings free" + HT_HOST_UNAVAIL = "(NUMATopologyFilter) Host not useable. Requested " \ + "threads policy: '{}'; from flavor or image " \ + "is not allowed on non-hyperthreaded host" + UNSET_SHARED_VCPU = "Cannot set hw:cpu_thread_policy to {} if " \ + "hw:wrs:shared_vcpu is set. Either unset " \ + "hw:cpu_thread_policy, set it to prefer, or unset " \ + "hw:wrs:shared_vcpu" + UNSET_MIN_VCPUS = "Cannot set hw:cpu_thread_policy to {} if " \ + "hw:wrs:min_vcpus is set. Either unset " \ + "hw:cpu_thread_policy, set it to another policy, " \ + "or unset hw:wrs:min_vcpus" + CONFLICT_FLV_IMG = "Image property 'hw_cpu_thread_policy' is not " \ + "permitted to override CPU thread pinning policy " \ + "set against the flavor" + + +class CPUPolicyErr: + CONFLICT_FLV_IMG = "Image property 'hw_cpu_policy' is not permitted to " \ + "override CPU pinning policy set against " \ + "the flavor " + + +class SharedCPUErr: + DEDICATED_CPU_REQUIRED = "hw:wrs:shared_vcpu is only valid when " \ + "hw:cpu_policy is dedicated" + INVALID_VCPU_ID = "hw:wrs:shared_vcpu must be greater than or equal to 0" + MORE_THAN_FLAVOR = "hw:wrs:shared_vcpu value ({}) must be less than " \ + "flavor vcpus ({})" + + +class ResizeVMErr: + RESIZE_ERR = "Error resizing server" + SHARED_NOT_ENABLED = 'Shared vCPU not enabled .*, required by instance ' \ + 'cell {}' + + +class ColdMigErr: + HT_HOST_REQUIRED = "(NUMATopologyFilter) Host not useable. Requested " \ + "threads policy: '[{}, {}]'; from flavor or " \ + "image is not allowed on non-hyperthreaded host" + + +class LiveMigErr: + BLOCK_MIG_UNSUPPORTED = "is not on local storage: Block migration can " \ + "not be used with shared storage" + GENERAL_NO_HOST = "No valid host was found. There are not enough hosts " \ + "available." + BLOCK_MIG_UNSUPPORTED_LVM = 'Block live migration is not supported for ' \ + 'hosts with LVM backed storage' + LVM_PRECHECK_ERROR = 'Live migration can not be used with LVM backed ' \ + 'storage except a booted from volume VM ' \ + 'which does not have a local disk' + + +class NetworkingErr: + INVALID_VXLAN_VNI_RANGE = "exceeds 16777215" + INVALID_MULTICAST_IP_ADDRESS = "is not a valid multicast IP address." + INVALID_VXLAN_PROVISION_PORTS = "Invalid input for port" + VXLAN_TTL_RANGE_MISSING = "VXLAN time-to-live attribute missing" + VXLAN_TTL_RANGE_TOO_LARGE = "is too large - must be no larger than '255'." + VXLAN_TTL_RANGE_TOO_SMALL = "is too small - must be at least '1'." + OVERLAP_SEGMENTATION_RANGE = "segmentation id range overlaps with" + INVALID_MTU_VALUE = "requires an interface MTU value of at least" + VXLAN_MISSING_IP_ON_INTERFACE = "requires an IP address" + WRONG_IF_ADDR_MODE = "interface address mode must be 'static'" + SET_IF_ADDR_MODE_WHEN_IP_EXIST = "addresses still exist on interfac" + NULL_IP_ADDR = "Address must not be null" + NULL_NETWORK_ADDR = "Network must not be null" + NULL_GATEWAY_ADDR = "Gateway address must not be null" + NULL_HOST_PARTION_ADDR = "Host bits must not be zero" + NOT_UNICAST_ADDR = "Address must be a unicast address" + NOT_BROADCAST_ADDR = "Address cannot be the network broadcast address" + DUPLICATE_IP_ADDR = "already exists" + INVALID_IP_OR_PREFIX = "Invalid IP address and prefix" + INVALID_IP_NETWORK = "Invalid IP network" + ROUTE_GATEWAY_UNREACHABLE = "not reachable" + IP_VERSION_NOT_MATCH = "Network and gateway IP versions must match" + GATEWAY_IP_IN_SUBNET = "Gateway address must not be within destination " \ + "subnet" + NETWORK_IP_EQUAL_TO_GATEWAY = "Network and gateway IP addresses must be " \ + "different" + + +class PciAddrErr: + NONE_ZERO_DOMAIN = 'Only domain 0000 is supported' + LARGER_THAN_MAX_BUS = 'PCI bus maximum value is 8' + NONE_ZERO_FUNCTION = 'Only function 0 is supported' + RESERVED_SLOTS_BUS0 = 'Slots 0,1 are reserved for PCI bus 0' + RESERVED_SLOT_ANY_BUS = 'Slots 0 is reserved for any PCI bus' + LARGER_THAN_MAX_SLOT = 'PCI slot maximum value is 31' + BAD_FORMAT = 'Bad PCI address format' + WRONG_BUS_VAL = 'Wrong bus value for PCI address' + + +class SrvGrpErr: + EXCEEDS_GRP_SIZE = 'Action would result in server group {} exceeding the ' \ + 'group size of {}' + HOST_UNAVAIL_ANTI_AFFINITY = '(ServerGroupAntiAffinityFilter) ' \ + 'Anti-affinity server group specified, ' \ + 'but this host is already used by that group' + + +class CpuRtErr: + RT_AND_ORD_REQUIRED = 'Realtime policy needs vCPU.* mask configured with ' \ + 'at least 1 RT vCPU and 1 ordinary vCPU' + DED_CPU_POL_REQUIRED = 'Cannot set realtime policy in a non dedicated cpu' \ + ' pinning policy' + RT_MASK_SHARED_VCPU_CONFLICT = 'hw:wrs:shared_vcpu .* is not a subset of ' \ + 'non-realtime vCPUs' diff --git a/automated-pytest-suite/consts/filepaths.py b/automated-pytest-suite/consts/filepaths.py new file mode 100755 index 0000000..aa40aa1 --- /dev/null +++ b/automated-pytest-suite/consts/filepaths.py @@ -0,0 +1,55 @@ +# +# Copyright (c) 2019 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + + +class StxPath: + TIS_UBUNTU_PATH = '~/userdata/ubuntu_if_config.sh' + TIS_CENTOS_PATH = '~/userdata/centos_if_config.sh' + USERDATA = '~/userdata/' + IMAGES = '~/images/' + HEAT = '~/heat/' + BACKUPS = '/opt/backups' + CUSTOM_HEAT_TEMPLATES = '~/custom_heat_templates/' + HELM_CHARTS_DIR = '/www/pages/helm_charts/' + DOCKER_CONF = '/etc/docker-distribution/registry/config.yml' + DOCKER_REPO = '/var/lib/docker-distribution/docker/registry/v2/repositories' + + +class VMPath: + VM_IF_PATH_UBUNTU = '/etc/network/interfaces.d/' + ETH_PATH_UBUNTU = '/etc/network/interfaces.d/{}.cfg' + # Below two paths are common for CentOS, OpenSUSE, and RHEL + VM_IF_PATH_CENTOS = '/etc/sysconfig/network-scripts/' + ETH_PATH_CENTOS = '/etc/sysconfig/network-scripts/ifcfg-{}' + + # Centos paths for ipv4: + RT_TABLES = '/etc/iproute2/rt_tables' + ETH_RT_SCRIPT = '/etc/sysconfig/network-scripts/route-{}' + ETH_RULE_SCRIPT = '/etc/sysconfig/network-scripts/rule-{}' + ETH_ARP_ANNOUNCE = '/proc/sys/net/ipv4/conf/{}/arp_announce' + ETH_ARP_FILTER = '/proc/sys/net/ipv4/conf/{}/arp_filter' + + +class UserData: + ADDUSER_TO_GUEST = 'cloud_config_adduser.txt' + DPDK_USER_DATA = 'dpdk_user_data.txt' + + +class TestServerPath: + USER_DATA = '/home/svc-cgcsauto/userdata/' + TEST_SCRIPT = '/home/svc-cgcsauto/test_scripts/' + CUSTOM_HEAT_TEMPLATES = '/sandbox/custom_heat_templates/' + CUSTOM_APPS = '/sandbox/custom_apps/' + + +class PrivKeyPath: + OPT_PLATFORM = '/opt/platform/id_rsa' + SYS_HOME = '~/.ssh/id_rsa' + + +class SysLogPath: + DC_MANAGER = '/var/log/dcmanager/dcmanager.log' + DC_ORCH = '/var/log/dcorch/dcorch.log' diff --git a/automated-pytest-suite/consts/horizon.py b/automated-pytest-suite/consts/horizon.py new file mode 100644 index 0000000..1712512 --- /dev/null +++ b/automated-pytest-suite/consts/horizon.py @@ -0,0 +1,8 @@ +# +# Copyright (c) 2019 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + + +test_result = False diff --git a/automated-pytest-suite/consts/lab.py b/automated-pytest-suite/consts/lab.py new file mode 100755 index 0000000..2433353 --- /dev/null +++ b/automated-pytest-suite/consts/lab.py @@ -0,0 +1,162 @@ +# +# Copyright (c) 2019 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + + +class Labs: + # Place for existing stx systems for convenience. + # --lab can be used in cmdline specify an existing system + + EXAMPLE = { + 'short_name': 'my_server', + 'name': 'my_server.com', + 'floating ip': '10.10.10.2', + 'controller-0 ip': '10.10.10.3', + 'controller-1 ip': '10.10.10.4', + } + + +def update_lab(lab_dict_name=None, lab_name=None, floating_ip=None, **kwargs): + """ + Update/Add lab dict params for specified lab + Args: + lab_dict_name (str|None): + lab_name (str|None): lab short_name. This is used only if + lab_dict_name is not specified + floating_ip (str|None): + **kwargs: Some possible keys: subcloud-1, name, etc + + Returns (dict): updated lab dict + + """ + + if not lab_name and not lab_dict_name: + from consts.proj_vars import ProjVar + lab_name = ProjVar.get_var('LAB').get('short_name', None) + if not lab_name: + raise ValueError("lab_dict_name or lab_name needs to be specified") + + if floating_ip: + kwargs.update(**{'floating ip': floating_ip}) + + if not kwargs: + raise ValueError("Please specify floating_ip and/or kwargs") + + if not lab_dict_name: + attr_names = [attr for attr in dir(Labs) if not attr.startswith('__')] + lab_names = [getattr(Labs, attr).get('short_name') for attr in + attr_names] + lab_index = lab_names.index(lab_name.lower().strip()) + lab_dict_name = attr_names[lab_index] + else: + lab_dict_name = lab_dict_name.upper().replace('-', '_') + + lab_dict = getattr(Labs, lab_dict_name) + lab_dict.update(kwargs) + return lab_dict + + +def get_lab_dict(lab, key='short_name'): + """ + + Args: + lab: lab name or fip + key: unique identifier to locate a lab. Valid values: short_name, + name, floating ip + + Returns (dict|None): lab dict or None if no matching lab found + """ + __lab_attr_list = [attr for attr in dir(Labs) if not attr.startswith('__')] + __lab_list = [getattr(Labs, attr) for attr in __lab_attr_list] + __lab_list = [lab for lab in __lab_list if isinstance(lab, dict)] + + lab_info = None + for lab_ in __lab_list: + if lab.lower().replace('-', '_') == lab_.get(key).lower().replace('-', + '_'): + lab_info = lab_ + break + + return lab_info + + +def add_lab_entry(floating_ip, dict_name=None, short_name=None, name=None, + **kwargs): + """ + Add a new lab dictionary to Labs class + Args: + floating_ip (str): floating ip of a lab to be added + dict_name: name of the entry, such as 'PV0' + short_name: short name of the TiS system, such as ip_1_4 + name: name of the STX system, such as 'yow-cgcs-pv-0' + **kwargs: other information of the lab such as controllers' ips, etc + + Returns: + dict: lab dict added to Labs class + + """ + for attr in dir(Labs): + lab = getattr(Labs, attr) + if isinstance(lab, dict): + if lab['floating ip'] == floating_ip: + raise ValueError( + "Entry for {} already exists in Labs class!".format( + floating_ip)) + + if dict_name and dict_name in dir(Labs): + raise ValueError( + "Entry for {} already exists in Labs class!".format(dict_name)) + + if not short_name: + short_name = floating_ip + + if not name: + name = floating_ip + + if not dict_name: + dict_name = floating_ip + + lab_dict = {'name': name, + 'short_name': short_name, + 'floating ip': floating_ip, + } + + lab_dict.update(kwargs) + setattr(Labs, dict_name, lab_dict) + return lab_dict + + +class NatBoxes: + # Place for existing NatBox that are already configured + NAT_BOX_HW_EXAMPLE = { + 'name': 'nat_hw', + 'ip': '10.10.10.10', + 'user': 'natbox_user', + 'password': 'natbox_password' + } + + # Following example when localhost is configured as natbox, and test cases + # are also ran from same localhost + NAT_BOX_VBOX_EXAMPLE = { + 'name': 'localhost', + 'ip': 'localhost', + 'user': None, + 'password': None, + } + + @staticmethod + def add_natbox(ip, user=None, password=None, prompt=None): + user = user if user else 'svc-cgcsauto' + password = password if password else ')OKM0okm' + + nat_dict = {'ip': ip, + 'name': ip, + 'user': user, + 'password': password, + } + if prompt: + nat_dict['prompt'] = prompt + setattr(NatBoxes, 'NAT_NEW', nat_dict) + return nat_dict diff --git a/automated-pytest-suite/consts/proj_vars.py b/automated-pytest-suite/consts/proj_vars.py new file mode 100644 index 0000000..3479dfb --- /dev/null +++ b/automated-pytest-suite/consts/proj_vars.py @@ -0,0 +1,87 @@ +# +# Copyright (c) 2019 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + + +# Please DO NOT import any modules + + +class ProjVar: + __var_dict = {'BUILD_PATH': None, + 'LOG_DIR': None, + 'SOURCE_OPENRC': False, + 'SW_VERSION': [], + 'PATCH': None, + 'SESSION_ID': None, + 'CGCS_DB': True, + 'IS_SIMPLEX': False, + 'KEYSTONE_DEBUG': False, + 'TEST_NAME': None, + 'PING_FAILURE': False, + 'LAB': None, + 'ALWAYS_COLLECT': False, + 'REGION': 'RegionOne', + 'COLLECT_TELNET': False, + 'TELNET_THREADS': None, + 'SYS_TYPE': None, + 'COLLECT_SYS_NET_INFO': False, + 'IS_VBOX': False, + 'RELEASE': 'R6', + 'REMOTE_CLI': False, + 'USER_FILE_DIR': '~/', + 'NO_TEARDOWN': False, + 'VSWITCH_TYPE': None, + 'IS_DC': False, + 'PRIMARY_SUBCLOUD': None, + 'BUILD_INFO': {}, + 'TEMP_DIR': '', + 'INSTANCE_BACKING': {}, + 'OPENSTACK_DEPLOYED': None, + 'DEFAULT_INSTANCE_BACKING': None, + 'STX_KEYFILE_PATH': '~/.ssh/id_rsa' + } + + @classmethod + def init_vars(cls, lab, natbox, logdir, tenant, collect_all, always_collect, + horizon_visible): + + labname = lab['short_name'] + + cls.__var_dict.update(**{ + 'NATBOX_KEYFILE_PATH': '~/priv_keys/keyfile_{}.pem'.format(labname), + 'STX_KEYFILE_SYS_HOME': '~/keyfile_{}.pem'.format(labname), + 'LOG_DIR': logdir, + 'TCLIST_PATH': logdir + '/test_results.log', + 'PYTESTLOG_PATH': logdir + '/pytestlog.log', + 'LAB_NAME': lab['short_name'], + 'TEMP_DIR': logdir + '/tmp_files/', + 'PING_FAILURE_DIR': logdir + '/ping_failures/', + 'GUEST_LOGS_DIR': logdir + '/guest_logs/', + 'PRIMARY_TENANT': tenant, + 'LAB': lab, + 'NATBOX': natbox, + 'COLLECT_ALL': collect_all, + 'ALWAYS_COLLECT': always_collect, + 'HORIZON_VISIBLE': horizon_visible + }) + + @classmethod + def set_var(cls, append=False, **kwargs): + for key, val in kwargs.items(): + if append: + cls.__var_dict[key.upper()].append(val) + else: + cls.__var_dict[key.upper()] = val + + @classmethod + def get_var(cls, var_name): + var_name = var_name.upper() + valid_vars = cls.__var_dict.keys() + if var_name not in valid_vars: + raise ValueError( + "Invalid var_name: {}. Valid vars: {}".format(var_name, + valid_vars)) + + return cls.__var_dict[var_name] diff --git a/automated-pytest-suite/consts/reasons.py b/automated-pytest-suite/consts/reasons.py new file mode 100644 index 0000000..4547936 --- /dev/null +++ b/automated-pytest-suite/consts/reasons.py @@ -0,0 +1,41 @@ +# +# Copyright (c) 2019 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + + +class SkipStorageSpace: + SMALL_CINDER_VOLUMES_POOL = "Cinder Volumes Pool is less than 30G" + INSUFFICIENT_IMG_CONV = 'Insufficient image-conversion space to convert ' \ + '{} image to raw format' + + +class SkipStorageBacking: + LESS_THAN_TWO_HOSTS_WITH_BACKING = "Less than two hosts with {} instance " \ + "storage backing exist on system" + NO_HOST_WITH_BACKING = "No host with {} instance storage backing exists " \ + "on system" + + +class SkipHypervisor: + LESS_THAN_TWO_HYPERVISORS = "Less than two hypervisors available" + + +class SkipHyperthreading: + LESS_THAN_TWO_HT_HOSTS = "Less than two hyperthreaded hosts available" + MORE_THAN_ONE_HT_HOSTS = "More than one hyperthreaded hosts available" + + +class SkipHostIf: + PCI_IF_UNAVAIL = "SRIOV or PCI-passthrough interface unavailable" + PCIPT_IF_UNAVAIL = "PCI-passthrough interface unavailable" + SRIOV_IF_UNAVAIL = "SRIOV interface unavailable" + MGMT_INFRA_UNAVAIL = 'traffic control class is not defined in this lab' + + +class SkipSysType: + SMALL_FOOTPRINT = "Skip for small footprint lab" + LESS_THAN_TWO_CONTROLLERS = "Less than two controllers on system" + SIMPLEX_SYSTEM = 'Not applicable to Simplex system' + SIMPLEX_ONLY = 'Only applicable to Simplex system' diff --git a/automated-pytest-suite/consts/stx.py b/automated-pytest-suite/consts/stx.py new file mode 100755 index 0000000..f1fa652 --- /dev/null +++ b/automated-pytest-suite/consts/stx.py @@ -0,0 +1,681 @@ +# +# Copyright (c) 2019 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + + +from consts.proj_vars import ProjVar + +# output of date. such as: Tue Mar 1 18:20:29 UTC 2016 +DATE_OUTPUT = r'[0-2]\d:[0-5]\d:[0-5]\d\s[A-Z]{3,}\s\d{4}$' + +EXT_IP = '8.8.8.8' + +# such as in string '5 packets transmitted, 0 received, 100% packet loss, +# time 4031ms', number 100 will be found +PING_LOSS_RATE = r'\, (\d{1,3})\% packet loss\,' + +# vshell ping loss rate pattern. 3 packets transmitted, 0 received, 0 total, +# 100.00%% loss +VSHELL_PING_LOSS_RATE = r'\, (\d{1,3}).\d{1,2}[%]% loss' + +# Matches 8-4-4-4-12 hexadecimal digits. Lower case only +UUID = r'[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}' + +# Match name and uuid. +# Such as: 'ubuntu_14 (a764c205-eb82-4f18-bda6-6c8434223eb5)' +NAME_UUID = r'(.*) \((' + UUID + r')\)' + +# Message to indicate boot from volume from nova show +BOOT_FROM_VOLUME = 'Attempt to boot from volume - no image supplied' + +METADATA_SERVER = '169.254.169.254' + +# Heat template path +HEAT_PATH = 'heat/hot/simple/' +HEAT_SCENARIO_PATH = 'heat/hot/scenarios/' +HEAT_FLAVORS = ['small_ded', 'small_float'] +HEAT_CUSTOM_TEMPLATES = 'custom_heat_templates' + +# special NIC patterns +MELLANOX_DEVICE = 'MT27500|MT27710' +MELLANOX4 = 'MT.*ConnectX-4' + +PLATFORM_AFFINE_INCOMPLETE = '/etc/platform/.task_affining_incomplete' +PLATFORM_CONF_PATH = '/etc/platform/platform.conf' + +SUBCLOUD_PATTERN = 'subcloud' + +PLATFORM_NET_TYPES = ('mgmt', 'oam', 'infra', 'pxeboot') + +TIMEZONES = [ + "Asia/Hong_Kong", # UTC+8 + "America/Los_Angeles", # UTC-8, DST:UTC-7 + "Canada/Eastern", # UTC-5, DST:UTC-4 + "Canada/Central", # UTC-6, DST:UTC-5 + # "Europe/London", # UTC, DST:UTC+1 + "Europe/Berlin", # UTC+1, DST:UTC+2 + "UTC" +] + +STORAGE_AGGREGATE = { + # 'local_lvm' : 'local_storage_lvm_hosts', + 'local_image': 'local_storage_image_hosts', + 'remote': 'remote_storage_hosts', +} + + +class NtpPool: + NTP_POOL_1 = '2.pool.ntp.org,1.pool.ntp.org,0.pool.ntp.org' + NTP_POOL_2 = '1.pool.ntp.org,2.pool.ntp.org,2.pool.ntp.org' + NTP_POOL_3 = '3.ca.pool.ntp.org,2.ca.pool.ntp.org,1.ca.pool.ntp.org' + NTP_POOL_TOO_LONG = '3.ca.pool.ntp.org,2.ca.pool.ntp.org,' \ + '1.ca.pool.ntp.org,1.com,2.com,3.com' + NTP_NAME_TOO_LONG = 'garbage_' * 30 + + +class GuestImages: + TMP_IMG_DIR = '/opt/backups' + DEFAULT = { + 'image_dir': '{}/images'.format(ProjVar.get_var('USER_FILE_DIR')), + 'image_dir_file_server': '/sandbox/images', + 'guest': 'tis-centos-guest' + } + TIS_GUEST_PATTERN = 'cgcs-guest|tis-centos-guest' + GUESTS_NO_RM = ['ubuntu_14', 'tis-centos-guest', 'cgcs-guest'] + # Image files name and size from TestFileServer + # : , , + # , , + IMAGE_FILES = { + 'ubuntu_14': ( + 'ubuntu-14.04-server-cloudimg-amd64-disk1.img', 3, + 'ubuntu_14.qcow2', 'qcow2', 'bare'), + 'ubuntu_12': ( + 'ubuntu-12.04-server-cloudimg-amd64-disk1.img', 8, + 'ubuntu_12.qcow2', 'qcow2', 'bare'), + 'ubuntu_16': ( + 'ubuntu-16.04-xenial-server-cloudimg-amd64-disk1.img', 8, + 'ubuntu_16.qcow2', 'qcow2', 'bare'), + 'centos_6': ( + 'CentOS-6.8-x86_64-GenericCloud-1608.qcow2', 8, + 'centos_6.qcow2', 'qcow2', 'bare'), + 'centos_7': ( + 'CentOS-7-x86_64-GenericCloud.qcow2', 8, + 'centos_7.qcow2', 'qcow2', 'bare'), + 'rhel_6': ( + 'rhel-6.5-x86_64.qcow2', 11, 'rhel_6.qcow2', 'qcow2', 'bare'), + 'rhel_7': ( + 'rhel-7.2-x86_64.qcow2', 11, 'rhel_7.qcow2', 'qcow2', 'bare'), + 'opensuse_11': ( + 'openSUSE-11.3-x86_64.qcow2', 11, + 'opensuse_11.qcow2', 'qcow2', 'bare'), + 'opensuse_12': ( + 'openSUSE-12.3-x86_64.qcow2', 21, + 'opensuse_12.qcow2', 'qcow2', 'bare'), + 'opensuse_13': ( + 'openSUSE-13.2-OpenStack-Guest.x86_64-0.0.10-Build2.94.qcow2', 16, + 'opensuse_13.qcow2', 'qcow2', 'bare'), + 'win_2012': ( + 'win2012r2_cygwin_compressed.qcow2', 13, + 'win2012r2.qcow2', 'qcow2', 'bare'), + 'win_2016': ( + 'win2016_cygwin_compressed.qcow2', 29, + 'win2016.qcow2', 'qcow2', 'bare'), + 'ge_edge': ( + 'edgeOS.hddirect.qcow2', 5, + 'ge_edge.qcow2', 'qcow2', 'bare'), + 'cgcs-guest': ( + 'cgcs-guest.img', 1, 'cgcs-guest.img', 'raw', 'bare'), + 'vxworks': ( + 'vxworks-tis.img', 1, 'vxworks.img', 'raw', 'bare'), + 'tis-centos-guest': ( + None, 2, 'tis-centos-guest.img', 'raw', 'bare'), + 'tis-centos-guest-rt': ( + None, 2, 'tis-centos-guest-rt.img', 'raw', 'bare'), + 'tis-centos-guest-qcow2': ( + None, 2, 'tis-centos-guest.qcow2', 'qcow2', 'bare'), + 'centos_gpu': ( + 'centos-67-cloud-gpu.img', 8, + 'centos_6_gpu.qcow2', 'qcow2', 'bare'), + 'debian-8-m-agent': ( + 'debian-8-m-agent.qcow2', 1.8, + 'debian-8-m-agent.qcow2', 'qcow2', 'bare'), + 'trusty_uefi': ( + 'trusty-server-cloudimg-amd64-uefi1.img', 2.2, + 'trusty-uefi.qcow2', 'qcow2', 'bare'), + 'uefi_shell': ( + 'uefi_shell.iso', 2, 'uefi_shell.iso', 'raw', 'bare'), + } + + +class Networks: + INFRA_NETWORK_CIDR = "192.168.205.0/24" + IPV4_IP = r'\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3}' + + __NEUTRON_NET_NAME_PATTERN = { + 'mgmt': r'tenant\d-mgmt-net', + 'data': r'tenant\d-net', + 'internal': 'internal', + 'external': 'external', + } + __NEUTRON_NET_IP_PATTERN = { + 'data': r'172.\d{1,3}.\d{1,3}.\d{1,3}', + 'mgmt': r'192.168.\d{3}\.\d{1,3}|192.168.[8|9]\d\.\d{1,3}', + 'internal': r'10.\d{1,3}.\d{1,3}.\d{1,3}', + 'external': r'192.168.\d\.\d{1,3}|192.168.[1-5]\d\.\d{1,3}|10.10.\d{' + r'1,3}\.\d{1,3}' + } + + @classmethod + def get_nenutron_net_patterns(cls, net_type='mgmt'): + return cls.__NEUTRON_NET_NAME_PATTERN.get( + net_type), cls.__NEUTRON_NET_IP_PATTERN.get(net_type) + + @classmethod + def set_neutron_net_patterns(cls, net_type, net_name_pattern=None, + net_ip_pattern=None): + if net_type not in cls.__NEUTRON_NET_NAME_PATTERN: + raise ValueError("Unknown net_type {}. Select from: {}".format( + net_type, list(cls.__NEUTRON_NET_NAME_PATTERN.keys()))) + + if net_name_pattern is not None: + cls.__NEUTRON_NET_NAME_PATTERN[net_type] = net_name_pattern + if net_ip_pattern is not None: + cls.__NEUTRON_NET_IP_PATTERN[net_type] = net_ip_pattern + + +class SystemType: + CPE = 'All-in-one' + STANDARD = 'Standard' + + +class StorageAggregate: + LOCAL_LVM = 'local_storage_lvm_hosts' + LOCAL_IMAGE = 'local_storage_image_hosts' + REMOTE = 'remote_storage_hosts' + + +class VMStatus: + # under http://docs.openstack.org/developer/nova/vmstates.html + ACTIVE = 'ACTIVE' + BUILD = 'BUILDING' + REBUILD = 'REBUILD' + VERIFY_RESIZE = 'VERIFY_RESIZE' + RESIZE = 'RESIZED' + ERROR = 'ERROR' + SUSPENDED = 'SUSPENDED' + PAUSED = 'PAUSED' + NO_STATE = 'NO STATE' + HARD_REBOOT = 'HARD REBOOT' + SOFT_REBOOT = 'REBOOT' + STOPPED = "SHUTOFF" + MIGRATING = 'MIGRATING' + + +class ImageStatus: + QUEUED = 'queued' + ACTIVE = 'active' + SAVING = 'saving' + + +class HostAdminState: + UNLOCKED = 'unlocked' + LOCKED = 'locked' + + +class HostOperState: + ENABLED = 'enabled' + DISABLED = 'disabled' + + +class HostAvailState: + DEGRADED = 'degraded' + OFFLINE = 'offline' + ONLINE = 'online' + AVAILABLE = 'available' + FAILED = 'failed' + POWER_OFF = 'power-off' + + +class HostTask: + BOOTING = 'Booting' + REBOOTING = 'Rebooting' + POWERING_ON = 'Powering-on' + POWER_CYCLE = 'Critical Event Power-Cycle' + POWER_DOWN = 'Critical Event Power-Down' + + +class Prompt: + CONTROLLER_0 = r'.*controller\-0[:| ].*\$' + CONTROLLER_1 = r'.*controller\-1[:| ].*\$' + CONTROLLER_PROMPT = r'.*controller\-[01][:| ].*\$ ' + + VXWORKS_PROMPT = '-> ' + + ADMIN_PROMPT = r'\[.*@controller\-[01].*\(keystone_admin\)\]\$' + TENANT1_PROMPT = r'\[.*@controller\-[01] .*\(keystone_tenant1\)\]\$ ' + TENANT2_PROMPT = r'\[.*@controller\-[01] .*\(keystone_tenant2\)\]\$ ' + TENANT_PROMPT = r'\[.*@controller\-[01] .*\(keystone_{}\)\]\$ ' # + # general prompt. Need to fill in tenant name + REMOTE_CLI_PROMPT = r'\(keystone_{}\)\]\$ ' # remote cli prompt + + COMPUTE_PROMPT = r'.*compute\-([0-9]){1,}\:~\$' + STORAGE_PROMPT = r'.*storage\-([0-9]){1,}\:~\$' + PASSWORD_PROMPT = r'.*assword\:[ ]?$|assword for .*:[ ]?$' + LOGIN_PROMPT = "ogin:" + SUDO_PASSWORD_PROMPT = 'Password: ' + BUILD_SERVER_PROMPT_BASE = r'{}@{}\:~.*' + TEST_SERVER_PROMPT_BASE = r'\[{}@.*\]\$ ' + # TIS_NODE_PROMPT_BASE = r'{}\:~\$ ' + TIS_NODE_PROMPT_BASE = r'{}[: ]?~.*$' + ADD_HOST = r'.*\(yes/no\).*' + ROOT_PROMPT = '.*root@.*' + Y_N_PROMPT = r'.*\(y/n\)\?.*' + YES_N_PROMPT = r'.*\[yes/N\]\: ?' + CONFIRM_PROMPT = '.*confirm: ?' + + +class NovaCLIOutput: + VM_ACTION_ACCEPTED = "Request to {} server (.*) has been accepted." + VM_START_ACCEPTED = "Request to start server (.*) has been accepted." + VM_STOP_ACCEPTED = "Request to stop server (.*) has been accepted." + VM_DELETE_REJECTED_NOT_EXIST = "No server with a name or ID of '(.*)' " \ + "exists." + VM_DELETE_ACCEPTED = "Request to delete server (.*) has been accepted." + VM_BOOT_REJECT_MEM_PAGE_SIZE_FORBIDDEN = "Page size .* forbidden against .*" + SRV_GRP_DEL_REJ_NOT_EXIST = "Delete for server group (.*) failed" + SRV_GRP_DEL_SUCC = "Server group (.*) has been successfully deleted." + + +class FlavorSpec: + CPU_POLICY = 'hw:cpu_policy' + VCPU_MODEL = 'hw:cpu_model' + SHARED_VCPU = 'hw:wrs:shared_vcpu' + CPU_THREAD_POLICY = 'hw:cpu_thread_policy' + VCPU_SCHEDULER = 'hw:wrs:vcpu:scheduler' + MIN_VCPUS = "hw:wrs:min_vcpus" + STORAGE_BACKING = 'aggregate_instance_extra_specs:stx_storage' + DISK_READ_BYTES = 'quota:disk_read_bytes_sec' + DISK_READ_IOPS = 'quota:disk_read_iops_sec' + DISK_WRITE_BYTES = 'quota:disk_write_bytes_sec' + DISK_WRITE_IOPS = 'quota:disk_write_iops_sec' + DISK_TOTAL_BYTES = 'quota:disk_total_bytes_sec' + DISK_TOTAL_IOPS = 'quota:disk_total_iops_sec' + NUMA_NODES = 'hw:numa_nodes' + NUMA_0 = 'hw:numa_node.0' + NUMA_1 = 'hw:numa_node.1' + NUMA0_CPUS = 'hw:numa_cpus.0' + NUMA1_CPUS = 'hw:numa_cpus.1' + NUMA0_MEM = 'hw:numa_mem.0' + NUMA1_MEM = 'hw:numa_mem.1' + VSWITCH_NUMA_AFFINITY = 'hw:wrs:vswitch_numa_affinity' + MEM_PAGE_SIZE = 'hw:mem_page_size' + AUTO_RECOVERY = 'sw:wrs:auto_recovery' + GUEST_HEARTBEAT = 'sw:wrs:guest:heartbeat' + SRV_GRP_MSG = "sw:wrs:srv_grp_messaging" + NIC_ISOLATION = "hw:wrs:nic_isolation" + PCI_NUMA_AFFINITY = "hw:pci_numa_affinity_policy" + PCI_PASSTHROUGH_ALIAS = "pci_passthrough:alias" + PCI_IRQ_AFFINITY_MASK = "hw:pci_irq_affinity_mask" + CPU_REALTIME = 'hw:cpu_realtime' + CPU_REALTIME_MASK = 'hw:cpu_realtime_mask' + HPET_TIMER = 'sw:wrs:guest:hpet' + NESTED_VMX = 'hw:wrs:nested_vmx' + NUMA0_CACHE_CPUS = 'hw:cache_vcpus.0' + NUMA1_CACHE_CPUS = 'hw:cache_vcpus.1' + NUMA0_L3_CACHE = 'hw:cache_l3.0' + NUMA1_L3_CACHE = 'hw:cache_l3.1' + LIVE_MIG_TIME_OUT = 'hw:wrs:live_migration_timeout' + + +class ImageMetadata: + MEM_PAGE_SIZE = 'hw_mem_page_size' + AUTO_RECOVERY = 'sw_wrs_auto_recovery' + VIF_MODEL = 'hw_vif_model' + CPU_THREAD_POLICY = 'hw_cpu_thread_policy' + CPU_POLICY = 'hw_cpu_policy' + CPU_RT_MASK = 'hw_cpu_realtime_mask' + CPU_RT = 'hw_cpu_realtime' + CPU_MODEL = 'hw_cpu_model' + FIRMWARE_TYPE = 'hw_firmware_type' + + +class VMMetaData: + EVACUATION_PRIORITY = 'sw:wrs:recovery_priority' + + +class InstanceTopology: + NODE = r'node:(\d),' + PGSIZE = r'pgsize:(\d{1,3}),' + VCPUS = r'vcpus:(\d{1,2}),' + PCPUS = r'pcpus:(\d{1,2}),\s' # find a string separated by ', + # ' if multiple numa nodes + CPU_POLICY = 'pol:(.*),' + SIBLINGS = 'siblings:(.*),' + THREAD_POLICY = 'thr:(.*)$|thr:(.*),' + TOPOLOGY = r'\d{1,2}s,\d{1,2}c,\d{1,2}t' + + +class RouterStatus: + ACTIVE = 'ACTIVE' + DOWN = 'DOWN' + + +class EventLogID: + PATCH_INSTALL_FAIL = '900.002' + PATCH_IN_PROGRESS = '900.001' + CINDER_IO_CONGEST = '800.101' + STORAGE_LOR = '800.011' + STORAGE_POOLQUOTA = '800.003' + STORAGE_ALARM_COND = '800.001' + HEARTBEAT_CHECK_FAILED = '700.215' + HEARTBEAT_ENABLED = '700.211' + REBOOT_VM_COMPLETE = '700.186' + REBOOT_VM_INPROGRESS = '700.182' + REBOOT_VM_ISSUED = '700.181' # soft-reboot or hard-reboot in reason text + VM_DELETED = '700.114' + VM_DELETING = '700.110' + VM_CREATED = '700.108' + MULTI_NODE_RECOVERY = '700.016' + HEARTBEAT_DISABLED = '700.015' + VM_REBOOTING = '700.005' + VM_FAILED = '700.001' + IMA = '500.500' + SERVICE_GROUP_STATE_CHANGE = '401.001' + LOSS_OF_REDUNDANCY = '400.002' + CON_DRBD_SYNC = '400.001' + PROVIDER_NETWORK_FAILURE = '300.005' + NETWORK_AGENT_NOT_RESPOND = '300.003' + CONFIG_OUT_OF_DATE = '250.001' + INFRA_NET_FAIL = '200.009' + BMC_SENSOR_ACTION = '200.007' + STORAGE_DEGRADE = '200.006' + # 200.004 compute-0 experienced a service-affecting failure. + # Auto-recovery in progress. + # host=compute-0 critical April 7, 2017, 2:34 p.m. + HOST_RECOVERY_IN_PROGRESS = '200.004' + HOST_LOCK = '200.001' + NTP_ALARM = '100.114' + INFRA_PORT_FAIL = '100.110' + FS_THRESHOLD_EXCEEDED = '100.104' + CPU_USAGE_HIGH = '100.101' + MNFA_MODE = '200.020' + + +class NetworkingVmMapping: + VSWITCH = { + 'vif': 'avp', + 'flavor': 'medium.dpdk', + } + AVP = { + 'vif': 'avp', + 'flavor': 'small', + } + VIRTIO = { + 'vif': 'avp', + 'flavor': 'small', + } + + +class VifMapping: + VIF_MAP = {'vswitch': 'DPDKAPPS', + 'avp': 'AVPAPPS', + 'virtio': 'VIRTIOAPPS', + 'vhost': 'VHOSTAPPS', + 'sriov': 'SRIOVAPPS', + 'pcipt': 'PCIPTAPPS' + } + + +class LocalStorage: + DIR_PROFILE = 'storage_profiles' + TYPE_STORAGE_PROFILE = ['storageProfile', 'localstorageProfile'] + + +class VMNetwork: + NET_IF = r"auto {}\niface {} inet dhcp\n" + IFCFG_DHCP = """ +DEVICE={} +BOOTPROTO=dhcp +ONBOOT=yes +TYPE=Ethernet +USERCTL=yes +PEERDNS=yes +IPV6INIT={} +PERSISTENT_DHCLIENT=1 +""" + + IFCFG_STATIC = """ +DEVICE={} +BOOTPROTO=static +ONBOOT=yes +TYPE=Ethernet +USERCTL=yes +PEERDNS=yes +IPV6INIT={} +PERSISTENT_DHCLIENT=1 +IPADDR={} +""" + + +class HTTPPort: + NEUTRON_PORT = 9696 + NEUTRON_VER = "v2.0" + CEIL_PORT = 8777 + CEIL_VER = "v2" + GNOCCHI_PORT = 8041 + GNOCCHI_VER = 'v1' + SYS_PORT = 6385 + SYS_VER = "v1" + CINDER_PORT = 8776 + CINDER_VER = "v3" # v1 and v2 are also supported + GLANCE_PORT = 9292 + GLANCE_VER = "v2" + HEAT_PORT = 8004 + HEAT_VER = "v1" + HEAT_CFN_PORT = 8000 + HEAT_CFN_VER = "v1" + NOVA_PORT = 8774 + NOVA_VER = "v2.1" # v3 also supported + NOVA_EC2_PORT = 8773 + NOVA_EC2_VER = "v2" + PATCHING_PORT = 15491 + PATCHING_VER = "v1" + + +class QoSSpec: + READ_BYTES = 'read_bytes_sec' + WRITE_BYTES = 'write_bytes_sec' + TOTAL_BYTES = 'total_bytes_sec' + READ_IOPS = 'read_iops_sec' + WRITE_IOPS = 'write_iops_sec' + TOTAL_IOPS = 'total_iops_sec' + + +class DevClassID: + QAT_VF = '0b4000' + GPU = '030000' + USB = '0c0320|0c0330' + + +class MaxVmsSupported: + SX = 10 + XEON_D = 4 + DX = 10 + VBOX = 2 + + +class CpuModel: + CPU_MODELS = ( + 'Skylake-Server', 'Skylake-Client', + 'Broadwell', 'Broadwell-noTSX', + 'Haswell-noTSX-IBRS', 'Haswell', + 'IvyBridge', 'SandyBridge', + 'Westmere', 'Nehalem', 'Penryn', 'Conroe') + + +class BackendState: + CONFIGURED = 'configured' + CONFIGURING = 'configuring' + + +class BackendTask: + RECONFIG_CONTROLLER = 'reconfig-controller' + APPLY_MANIFEST = 'applying-manifests' + + +class PartitionStatus: + READY = 'Ready' + MODIFYING = 'Modifying' + DELETING = 'Deleting' + CREATING = 'Creating' + IN_USE = 'In-Use' + + +class SysType: + AIO_DX = 'AIO-DX' + AIO_SX = 'AIO-SX' + STORAGE = 'Storage' + REGULAR = 'Regular' + MULTI_REGION = 'Multi-Region' + DISTRIBUTED_CLOUD = 'Distributed_Cloud' + + +class HeatStackStatus: + CREATE_FAILED = 'CREATE_FAILED' + CREATE_COMPLETE = 'CREATE_COMPLETE' + UPDATE_COMPLETE = 'UPDATE_COMPLETE' + UPDATE_FAILED = 'UPDATE_FAILED' + DELETE_FAILED = 'DELETE_FAILED' + + +class VimEventID: + LIVE_MIG_BEGIN = 'instance-live-migrate-begin' + LIVE_MIG_END = 'instance-live-migrated' + COLD_MIG_BEGIN = 'instance-cold-migrate-begin' + COLD_MIG_END = 'instance-cold-migrated' + COLD_MIG_CONFIRM_BEGIN = 'instance-cold-migrate-confirm-begin' + COLD_MIG_CONFIRMED = 'instance-cold-migrate-confirmed' + + +class MigStatus: + COMPLETED = 'completed' + RUNNING = 'running' + PREPARING = 'preparing' + PRE_MIG = 'pre-migrating' + POST_MIG = 'post-migrating' + + +class TrafficControl: + CLASSES = {'1:40': 'default', '1:1': 'root', '1:10': 'hiprio', + '1:20': 'storage', '1:30': 'migration', + '1:50': 'drbd'} + + RATE_PATTERN_ROOT = r'class htb 1:1 root rate (\d+)([GMK])bit ceil (\d+)(' \ + r'[GMK])bit burst \d+b cburst \d+b' + RATE_PATTERN = r'class htb (1:\d+) parent 1:1 leaf \d+: prio \d+ rate (' \ + r'\d+)([GMK])bit ceil (\d+)([GMK])bit ' \ + r'burst \d+b cburst \d+b' + + # no infra + MGMT_NO_INFRA = { + 'config': 'no infra', + 'root': (1, 1), + 'default': (0.1, 0.2), + 'hiprio': (0.1, 0.2), + 'storage': (0.5, 1), + 'migration': (0.3, 1), + 'drbd': (0.8, 1)} + + # infra must be sep + MGMT_SEP = { + 'config': 'separate mgmt', + 'root': (1, 1), + 'default': (0.1, 1), + 'hiprio': (0.1, 1)} + + # infra could be sep or over pxe + MGMT_USES_PXE = { + 'config': 'mgmt consolidated over pxeboot', + 'root': (1, 1), + 'default': (0.1, 0.2), + 'hiprio': (0.1, 0.2)} + + # infra over mgmt + MGMT_USED_BY_INFRA = { + 'config': 'infra consolidated over mgmt', + 'root': (1, 1), + 'default': (0.1, 0.2), + 'hiprio': (0.1, 0.2), + 'storage': (0.5, 1), + 'migration': (0.3, 1), + 'drbd': (0.8, 1)} + + # infra over mgmt + INFRA_USES_MGMT = { + 'config': 'infra consolidated over mgmt', + 'root': (0.99, 0.99), + 'default': (0.99 * 0.1, 0.99 * 0.2), + 'hiprio': (0.99 * 0.1, 0.99 * 0.2), + 'storage': (0.99 * 0.5, 0.99 * 1), + 'migration': (0.99 * 0.3, 0.99 * 1), + 'drbd': (0.99 * 0.8, 0.99 * 1)} + + # mgmt could be sep or over pxe + INFRA_SEP = { + 'config': 'separate infra', + 'root': (1, 1), + 'default': (0.1, 0.2), + 'hiprio': (0.1, 0.2), + 'storage': (0.5, 1), + 'migration': (0.3, 1), + 'drbd': (0.8, 1)} + + # mgmt must be over pxe + INFRA_USES_PXE = { + 'config': 'infra and mgmt consolidated over pxeboot', + 'root': (1, 1), + 'default': (0.99 * 0.1, 0.99 * 0.2), # 0.1, 0.2 is the ratio for mgmt + 'hiprio': (0.99 * 0.1, 0.99 * 0.2), # 0.1, 0.2 is the ratio for mgmt + 'storage': (0.99 * 0.5, 0.99), + 'migration': (0.99 * 0.3, 0.99), + 'drbd': (0.99 * 0.8, 0.99)} + + +class SubcloudStatus: + AVAIL_ONLINE = "online" + AVAIL_OFFLINE = "offline" + MGMT_MANAGED = "managed" + MGMT_UNMANAGED = "unmanaged" + SYNCED = 'in-sync' + UNSYNCED = 'out-of-sync' + + +class PodStatus: + RUNNING = 'Running' + COMPLETED = 'Completed' + CRASH = 'CrashLoopBackOff' + POD_INIT = 'PodInitializing' + INIT = 'Init:0/1' + PENDING = 'Pending' + + +class AppStatus: + UPLOADING = 'uploading' + UPLOADED = 'uploaded' + UPLOAD_FAILED = 'upload-failed' + APPLIED = 'applied' + APPLY_FAILED = 'apply-failed' + REMOVE_FAILED = 'remove-failed' + DELETE_FAILED = 'delete-failed' + + +class VSwitchType: + OVS_DPDK = 'ovs-dpdk' + AVS = 'avs' + NONE = 'none' + + +class Container: + LOCAL_DOCKER_REG = 'registry.local:9001' diff --git a/automated-pytest-suite/consts/timeout.py b/automated-pytest-suite/consts/timeout.py new file mode 100644 index 0000000..f0663bc --- /dev/null +++ b/automated-pytest-suite/consts/timeout.py @@ -0,0 +1,160 @@ +# +# Copyright (c) 2019 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + + +CLI_TIMEOUT = 600 + + +class HostTimeout: + # Host in online state after locked + ONLINE_AFTER_LOCK = 1200 + # Compute host reaches enabled/available state after system host-unlock + # returned + COMPUTE_UNLOCK = 840 + # Host reaches enabled/available state after system host-unlock returned + CONTROLLER_UNLOCK = 1360 + # Host reaches enabled/available state after sudo reboot -f from host + REBOOT = 2400 + # Active controller switched and being able to run openstack CLI after + # system host-swact returned + SWACT = 180 + # Host in locked state after system host-lock cli returned + LOCK = 900 + # Task clears in system host-show after host reaches enabled/available state + TASK_CLEAR = 600 + # Host in offline or failed state via system host-show after sudo reboot + # -f returned + FAIL_AFTER_REBOOT = 120 + # Hypervsior in enabled/up state after host in available state and task + # clears + HYPERVISOR_UP = 300 + # Web service up in sudo sm-dump after host in available state and task + # clears + WEB_SERVICE_UP = 180 + PING_TIMEOUT = 60 + TIMEOUT_BUFFER = 2 + # subfunction go enabled/available after host admin/avail states go + # enabled/available + SUBFUNC_READY = 300 + SYSTEM_RESTORE = 3600 # System restore complete + SYSTEM_BACKUP = 1800 # system backup complete + BACKUP_COPY_USB = 600 + INSTALL_CLONE = 3600 + INSTALL_CLONE_STATUS = 60 + INSTALL_CONTROLLER = 2400 + INSTALL_LOAD = 3600 + POST_INSTALL_SCRIPTS = 3600 + CONFIG_CONTROLLER_TIMEOUT = 1800 + CEPH_MON_ADD_CONFIG = 300 + NODES_STATUS_READY = 7200 + + +class InstallTimeout: + # Host reaches enabled/available state after system host-unlock returned + CONTROLLER_UNLOCK = 9000 + CONFIG_CONTROLLER_TIMEOUT = 1800 + # REBOOT = 2000 # Host reaches enabled/available state after sudo + # reboot -f from host + UPGRADE = 7200 + WIPE_DISK_TIMEOUT = 30 + SYSTEM_RESTORE = 3600 # System restore complete + SYSTEM_BACKUP = 1800 # system backup complete + BACKUP_COPY_USB = 600 + INSTALL_CLONE = 3600 + INSTALL_CLONE_STATUS = 60 + INSTALL_CONTROLLER = 2400 + INSTALL_LOAD = 3600 + POST_INSTALL_SCRIPTS = 3600 + + +class VMTimeout: + STATUS_CHANGE = 300 + STATUS_VERIFY_RESIZE = 30 + LIVE_MIGRATE_COMPLETE = 240 + COLD_MIGRATE_CONFIRM = 600 + BOOT_VM = 1800 + DELETE = 180 + VOL_ATTACH = 60 + SSH_LOGIN = 90 + AUTO_RECOVERY = 600 + REBOOT = 180 + PAUSE = 180 + IF_ADD = 30 + REBUILD = 300 + DHCP_IP_ASSIGN = 30 + DHCP_RETRY = 500 + PING_VM = 200 + + +class VolumeTimeout: + STATUS_CHANGE = 2700 # Windows guest takes a long time + DELETE = 90 + + +class SysInvTimeout: + RETENTION_PERIOD_SAVED = 30 + RETENTION_PERIOD_MODIFY = 60 + DNS_SERVERS_SAVED = 30 + DNS_MODIFY = 60 + PARTITION_CREATE = 120 + PARTITION_DELETE = 120 + PARTITION_MODIFY = 120 + + +class CMDTimeout: + HOST_CPU_MODIFY = 600 + RESOURCE_LIST = 60 + REBOOT_VM = 60 + CPU_PROFILE_APPLY = 30 + + +class ImageTimeout: + CREATE = 1800 + STATUS_CHANGE = 60 + DELETE = 120 + + +class EventLogTimeout: + HEARTBEAT_ESTABLISH = 300 + HEALTH_CHECK_FAIL = 60 + VM_REBOOT = 60 + NET_AGENT_NOT_RESPOND_CLEAR = 120 + + +class MTCTimeout: + KILL_PROCESS_HOST_CHANGE_STATUS = 40 + KILL_PROCESS_HOST_KEEP_STATUS = 20 + KILL_PROCESS_SWACT_NOT_START = 20 + KILL_PROCESS_SWACT_START = 40 + KILL_PROCESS_SWACT_COMPLETE = 40 + + +class CeilTimeout: + EXPIRE = 300 + + +class OrchestrationPhaseTimeout: + INITIAL = 20 + BUILD = 60 + ABORT = 7200 + APPLY = 86400 + + +class DCTimeout: + SYNC = 660 # 10 minutes + 1 + SUBCLOUD_AUDIT = 600 # 4 minutes + 1 + PATCH_AUDIT = 240 # 3 minutes + 1 + + +class MiscTimeout: + # timeout for two audits. 'sudo ntpq' got pulled every 10 minutes in + # /var/log/user.log + NTPQ_UPDATE = 1260 + + +class K8sTimeout: + APP_UPLOAD = 300 + APP_APPLY = 600 diff --git a/automated-pytest-suite/consts/ubuntu_if_config.sh b/automated-pytest-suite/consts/ubuntu_if_config.sh new file mode 100644 index 0000000..22911e4 --- /dev/null +++ b/automated-pytest-suite/consts/ubuntu_if_config.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +# Ubuntu cloud-init user data script to be executed after ubuntu vm +# initialization + +sudo echo -e "auto eth1\niface eth1 inet dhcp\n\nauto eth2\niface eth2 inet dhcp" >> "/etc/network/interfaces" +sudo ifup eth1 +sudo ifup eth2 + +ip addr \ No newline at end of file diff --git a/automated-pytest-suite/keywords/__init__.py b/automated-pytest-suite/keywords/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/automated-pytest-suite/keywords/ceilometer_helper.py b/automated-pytest-suite/keywords/ceilometer_helper.py new file mode 100644 index 0000000..b84975c --- /dev/null +++ b/automated-pytest-suite/keywords/ceilometer_helper.py @@ -0,0 +1,67 @@ +# +# Copyright (c) 2019 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + + +from consts.auth import Tenant +from utils import table_parser, cli +from utils.clients.ssh import ControllerClient +from utils.tis_log import LOG + + +def get_alarms(header='alarm_id', name=None, strict=False, + auth_info=Tenant.get('admin'), con_ssh=None): + """ + + Args: + header + name: + strict: + auth_info: + con_ssh: + + Returns: + + """ + + table_ = table_parser.table(cli.openstack('alarm list', + ssh_client=con_ssh, + auth_info=auth_info)[1], + combine_multiline_entry=True) + if name is None: + return table_parser.get_column(table_, header) + + return table_parser.get_values(table_, header, Name=name, strict=strict) + + +def get_events(event_type, limit=None, header='message_id', con_ssh=None, + auth_info=None, **filters): + """ + + Args: + event_type: + limit + header: + con_ssh: + auth_info: + + Returns: + + """ + args = '' + if limit: + args = '--limit {}'.format(limit) + + if event_type or filters: + if event_type: + filters['event_type'] = event_type + + extra_args = ['{}={}'.format(k, v) for k, v in filters.items()] + args += ' --filter {}'.format(';'.join(extra_args)) + + table_ = table_parser.table(cli.openstack('event list', args, + ssh_client=con_ssh, + auth_info=auth_info)[1]) + return table_parser.get_values(table_, header) diff --git a/automated-pytest-suite/keywords/check_helper.py b/automated-pytest-suite/keywords/check_helper.py new file mode 100644 index 0000000..932b6f1 --- /dev/null +++ b/automated-pytest-suite/keywords/check_helper.py @@ -0,0 +1,635 @@ +# +# Copyright (c) 2019 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + + +############################################################### +# Intended for check functions for test result verifications +# assert is used to fail the check +# LOG.tc_step is used log the info +# Should be called by test function directly +############################################################### +import re +import time +import copy + +from utils.tis_log import LOG +from utils.rest import Rest +from consts.auth import Tenant +from consts.stx import GuestImages, EventLogID +from keywords import host_helper, system_helper, vm_helper, common, \ + glance_helper, storage_helper + +SEP = '\n------------------------------------ ' + + +def check_topology_of_vm(vm_id, vcpus, prev_total_cpus=None, numa_num=None, + vm_host=None, cpu_pol=None, + cpu_thr_pol=None, expt_increase=None, min_vcpus=None, + current_vcpus=None, + prev_siblings=None, shared_vcpu=None, con_ssh=None, + guest=None): + """ + Check vm has the correct topology based on the number of vcpus, + cpu policy, cpu threads policy, number of numa nodes + + Check is done via vm-topology, nova host-describe, virsh vcpupin (on vm + host), nova-compute.log (on vm host), + /sys/devices/system/cpu//topology/thread_siblings_list (on vm) + + Args: + vm_id (str): + vcpus (int): number of vcpus specified in flavor + prev_total_cpus (float): such as 37.0000, 37.0625 + numa_num (int): number of numa nodes vm vcpus are on. Default is 1 if + unset in flavor. + vm_host (str): + cpu_pol (str): dedicated or shared + cpu_thr_pol (str): isolate, require, or prefer + expt_increase (int): expected total vcpu increase on vm host compared + to prev_total_cpus + min_vcpus (None|int): min vcpu flavor spec. vcpu scaling specific + current_vcpus (None|int): current number of vcpus. vcpu scaling specific + prev_siblings (list): list of siblings total. Usually used when + checking vm topology after live migration + con_ssh (SSHClient) + shared_vcpu (int): which vcpu is shared + guest (str|None): guest os. e.g., ubuntu_14. Default guest is assumed + when None. + + """ + LOG.info( + "------ Check topology of vm {} on controller, hypervisor and " + "vm".format( + vm_id)) + cpu_pol = cpu_pol if cpu_pol else 'shared' + + if vm_host is None: + vm_host = vm_helper.get_vm_host(vm_id, con_ssh=con_ssh) + + log_cores_siblings = host_helper.get_logcore_siblings(host=vm_host, + con_ssh=con_ssh) + + if prev_total_cpus is not None: + if expt_increase is None: + expt_increase = vcpus + + LOG.info( + "{}Check total vcpus for vm host is increased by {} via " + "'openstack hypervisor show'".format( + SEP, expt_increase)) + expt_used_vcpus = prev_total_cpus + expt_increase + end_time = time.time() + 70 + while time.time() < end_time: + post_hosts_cpus = host_helper.get_vcpus_for_computes( + hosts=vm_host, field='vcpus_used') + if expt_used_vcpus == post_hosts_cpus[vm_host]: + break + time.sleep(10) + else: + post_hosts_cpus = host_helper.get_vcpus_for_computes( + hosts=vm_host, field='used_now') + assert expt_used_vcpus == post_hosts_cpus[ + vm_host], "Used vcpus on host {} is not as expected. " \ + "Expected: {}; Actual: {}".format(vm_host, + expt_used_vcpus, + post_hosts_cpus[ + vm_host]) + + LOG.info( + "{}Check vm vcpus, pcpus on vm host via nova-compute.log and virsh " + "vcpupin".format(SEP)) + # Note: floating vm pcpus will not be checked via virsh vcpupin + vm_host_cpus, vm_siblings = _check_vm_topology_on_host( + vm_id, vcpus=vcpus, vm_host=vm_host, cpu_pol=cpu_pol, + cpu_thr_pol=cpu_thr_pol, + host_log_core_siblings=log_cores_siblings, + shared_vcpu=shared_vcpu) + + LOG.info( + "{}Check vm vcpus, siblings on vm via " + "/sys/devices/system/cpu//topology/thread_siblings_list". + format(SEP)) + check_sibling = True if shared_vcpu is None else False + _check_vm_topology_on_vm(vm_id, vcpus=vcpus, siblings_total=vm_siblings, + current_vcpus=current_vcpus, + prev_siblings=prev_siblings, guest=guest, + check_sibling=check_sibling) + + return vm_host_cpus, vm_siblings + + +def _check_vm_topology_on_host(vm_id, vcpus, vm_host, cpu_pol, cpu_thr_pol, + host_log_core_siblings=None, shared_vcpu=None, + shared_host_cpus=None): + """ + + Args: + vm_id (str): + vcpus (int): + vm_host (str): + cpu_pol (str): + cpu_thr_pol (str): + host_log_core_siblings (list|None): + shared_vcpu (int|None): + shared_host_cpus (None|list) + + Returns: None + + """ + if not host_log_core_siblings: + host_log_core_siblings = host_helper.get_logcore_siblings(host=vm_host) + + if shared_vcpu and not shared_host_cpus: + shared_cpus_ = host_helper.get_host_cpu_cores_for_function( + func='Shared', hostname=vm_host, thread=None) + shared_host_cpus = [] + for proc, shared_cores in shared_cpus_.items(): + shared_host_cpus += shared_cores + + LOG.info( + '======= Check vm topology from vm_host via: virsh vcpupin, taskset') + instance_name = vm_helper.get_vm_instance_name(vm_id) + + with host_helper.ssh_to_host(vm_host) as host_ssh: + vcpu_cpu_map = vm_helper.get_vcpu_cpu_map(host_ssh=host_ssh) + used_host_cpus = [] + vm_host_cpus = [] + vcpus_list = list(range(vcpus)) + for instance_name_, instance_map in vcpu_cpu_map.items(): + used_host_cpus += list(instance_map.values()) + if instance_name_ == instance_name: + for vcpu in vcpus_list: + vm_host_cpus.append(instance_map[vcpu]) + used_host_cpus = list(set(used_host_cpus)) + vm_siblings = None + # Check vm sibling pairs + if 'ded' in cpu_pol and cpu_thr_pol in ('isolate', 'require'): + if len(host_log_core_siblings[0]) == 1: + assert cpu_thr_pol != 'require', \ + "cpu_thread_policy 'require' must be used on a HT host" + vm_siblings = [[vcpu_] for vcpu_ in vcpus_list] + else: + vm_siblings = [] + for vcpu_index in vcpus_list: + vm_host_cpu = vm_host_cpus[vcpu_index] + for host_sibling in host_log_core_siblings: + if vm_host_cpu in host_sibling: + other_cpu = host_sibling[0] if \ + vm_host_cpu == host_sibling[1] else \ + host_sibling[1] + if cpu_thr_pol == 'require': + assert other_cpu in vm_host_cpus, \ + "'require' vm uses only 1 of the sibling " \ + "cores" + vm_siblings.append(sorted([vcpu_index, + vm_host_cpus.index( + other_cpu)])) + else: + assert other_cpu not in used_host_cpus, \ + "sibling core was not reserved for " \ + "'isolate' vm" + vm_siblings.append([vcpu_index]) + + LOG.info("{}Check vcpus for vm via sudo virsh vcpupin".format(SEP)) + vcpu_pins = host_helper.get_vcpu_pins_for_instance_via_virsh( + host_ssh=host_ssh, + instance_name=instance_name) + assert vcpus == len(vcpu_pins), \ + 'Actual vm cpus number - {} is not as expected - {} in sudo ' \ + 'virsh vcpupin'.format(len(vcpu_pins), vcpus) + + virsh_cpus_sets = [] + for vcpu_pin in vcpu_pins: + vcpu = int(vcpu_pin['vcpu']) + cpu_set = common.parse_cpus_list(vcpu_pin['cpuset']) + virsh_cpus_sets += cpu_set + if shared_vcpu is not None and vcpu == shared_vcpu: + assert len(cpu_set) == 1, \ + "shared vcpu is pinned to more than 1 host cpu" + assert cpu_set[0] in shared_host_cpus, \ + "shared vcpu is not pinned to shared host cpu" + + if 'ded' in cpu_pol: + assert set(vm_host_cpus) == set( + virsh_cpus_sets), "pinned cpus in virsh cpupin is not the " \ + "same as ps" + else: + assert set(vm_host_cpus) < set( + virsh_cpus_sets), "floating vm should be affined to all " \ + "available host cpus" + + LOG.info("{}Get cpu affinity list for vm via taskset -pc".format(SEP)) + ps_affined_cpus = \ + vm_helper.get_affined_cpus_for_vm(vm_id, + host_ssh=host_ssh, + vm_host=vm_host, + instance_name=instance_name) + assert set(ps_affined_cpus) == set( + virsh_cpus_sets), "Actual affined cpu in taskset is different " \ + "than virsh" + return vm_host_cpus, vm_siblings + + +def _check_vm_topology_on_vm(vm_id, vcpus, siblings_total, current_vcpus=None, + prev_siblings=None, guest=None, + check_sibling=True): + siblings_total_ = None + if siblings_total: + siblings_total_ = copy.deepcopy(siblings_total) + # Check from vm in /proc/cpuinfo and + # /sys/devices/.../cpu#/topology/thread_siblings_list + if not guest: + guest = '' + if not current_vcpus: + current_vcpus = int(vcpus) + + LOG.info( + '=== Check vm topology from within the vm via: /sys/devices/system/cpu') + actual_sibs = [] + vm_helper.wait_for_vm_pingable_from_natbox(vm_id) + with vm_helper.ssh_to_vm_from_natbox(vm_id) as vm_ssh: + + win_expt_cores_per_sib = win_log_count_per_sibling = None + if 'win' in guest: + LOG.info( + "{}Check windows guest cores via wmic cpu get cmds".format(SEP)) + offline_cores_count = 0 + log_cores_count, win_log_count_per_sibling = \ + get_procs_and_siblings_on_windows(vm_ssh) + online_cores_count = present_cores_count = log_cores_count + else: + LOG.info( + "{}Check vm present|online|offline cores from inside vm via " + "/sys/devices/system/cpu/".format(SEP)) + present_cores, online_cores, offline_cores = \ + vm_helper.get_proc_nums_from_vm(vm_ssh) + present_cores_count = len(present_cores) + online_cores_count = len(online_cores) + offline_cores_count = len(offline_cores) + + assert vcpus == present_cores_count, \ + "Number of vcpus: {}, present cores: {}".format( + vcpus, present_cores_count) + assert current_vcpus == online_cores_count, \ + "Current vcpus for vm: {}, online cores: {}".format( + current_vcpus, online_cores_count) + + expt_total_cores = online_cores_count + offline_cores_count + assert expt_total_cores in [present_cores_count, 512], \ + "Number of present cores: {}. online+offline cores: {}".format( + vcpus, expt_total_cores) + + if check_sibling and siblings_total_ and online_cores_count == \ + present_cores_count: + expt_sibs_list = [[vcpu] for vcpu in + range(present_cores_count)] if not \ + siblings_total_ \ + else siblings_total_ + + expt_sibs_list = [sorted(expt_sibs_list)] + if prev_siblings: + # siblings_total may get modified here + expt_sibs_list.append(sorted(prev_siblings)) + + if 'win' in guest: + LOG.info("{}Check windows guest siblings via wmic cpu get " + "cmds".format(SEP)) + expt_cores_list = [] + for sib_list in expt_sibs_list: + win_expt_cores_per_sib = [len(vcpus) for vcpus in sib_list] + expt_cores_list.append(win_expt_cores_per_sib) + assert win_log_count_per_sibling in expt_cores_list, \ + "Expected log cores count per sibling: {}, actual: {}".\ + format(win_expt_cores_per_sib, win_log_count_per_sibling) + + else: + LOG.info( + "{}Check vm /sys/devices/system/cpu/[" + "cpu#]/topology/thread_siblings_list".format( + SEP)) + for cpu in ['cpu{}'.format(i) for i in + range(online_cores_count)]: + actual_sibs_for_cpu = \ + vm_ssh.exec_cmd( + 'cat /sys/devices/system/cpu/{}/topology/thread_' + 'siblings_list'.format(cpu), fail_ok=False)[1] + + sib_for_cpu = common.parse_cpus_list(actual_sibs_for_cpu) + if sib_for_cpu not in actual_sibs: + actual_sibs.append(sib_for_cpu) + + assert sorted( + actual_sibs) in expt_sibs_list, "Expt sib lists: {}, " \ + "actual sib list: {}". \ + format(expt_sibs_list, sorted(actual_sibs)) + + +def get_procs_and_siblings_on_windows(vm_ssh): + cmd = 'wmic cpu get {}' + + procs = [] + for param in ['NumberOfCores', 'NumberOfLogicalProcessors']: + output = vm_ssh.exec_cmd(cmd.format(param), fail_ok=False)[1].strip() + num_per_proc = [int(line.strip()) for line in output.splitlines() if + line.strip() + and not re.search('{}|x'.format(param), line)] + procs.append(num_per_proc) + procs = zip(procs[0], procs[1]) + log_procs_per_phy = [nums[0] * nums[1] for nums in procs] + total_log_procs = sum(log_procs_per_phy) + + LOG.info( + "Windows guest total logical cores: {}, logical_cores_per_phy_core: {}". + format(total_log_procs, log_procs_per_phy)) + return total_log_procs, log_procs_per_phy + + +def check_vm_vswitch_affinity(vm_id, on_vswitch_nodes=True): + vm_host, vm_numa_nodes = vm_helper.get_vm_host_and_numa_nodes(vm_id) + vswitch_cores_dict = host_helper.get_host_cpu_cores_for_function( + vm_host, func='vSwitch') + vswitch_procs = [proc for proc in vswitch_cores_dict if + vswitch_cores_dict[proc]] + if not vswitch_procs: + return + + if on_vswitch_nodes: + assert set(vm_numa_nodes) <= set( + vswitch_procs), "VM {} is on numa nodes {} instead of vswitch " \ + "numa nodes {}".format( + vm_id, vm_numa_nodes, vswitch_procs) + else: + assert not (set(vm_numa_nodes) & set( + vswitch_procs)), "VM {} is on vswitch numa node(s). VM numa " \ + "nodes: {}, vSwitch numa nodes: {}".format( + vm_id, vm_numa_nodes, vswitch_procs) + + +def check_fs_sufficient(guest_os, boot_source='volume'): + """ + Check if volume pool, image storage, and/or image conversion space is + sufficient to launch vm + Args: + guest_os (str): e.g., tis-centos-guest, win_2016 + boot_source (str): volume or image + + Returns (str): image id + + """ + LOG.info("Check if storage fs is sufficient to launch boot-from-{} vm " + "with {}".format(boot_source, guest_os)) + check_disk = True if 'win' in guest_os else False + cleanup = None if re.search( + 'ubuntu_14|{}'.format(GuestImages.TIS_GUEST_PATTERN), + guest_os) else 'function' + img_id = glance_helper.get_guest_image(guest_os, check_disk=check_disk, + cleanup=cleanup) + return img_id + + +def check_vm_files(vm_id, storage_backing, ephemeral, swap, vm_type, file_paths, + content, root=None, vm_action=None, + prev_host=None, post_host=None, disks=None, post_disks=None, + guest_os=None, + check_volume_root=False): + """ + Check the files on vm after specified action. This is to check the disks + in the basic nova matrix table. + Args: + vm_id (str): + storage_backing (str): local_image, local_lvm, or remote + root (int): root disk size in flavor. e.g., 2, 5 + ephemeral (int): e.g., 0, 1 + swap (int): e.g., 0, 512 + vm_type (str): image, volume, image_with_vol, vol_with_vol + file_paths (list): list of file paths to check + content (str): content of the files (assume all files have the same + content) + vm_action (str|None): live_migrate, cold_migrate, resize, evacuate, + None (expect no data loss) + prev_host (None|str): vm host prior to vm_action. This is used to + check if vm host has changed when needed. + post_host (None|str): vm host after vm_action. + disks (dict): disks that are returned from + vm_helper.get_vm_devices_via_virsh() + post_disks (dict): only used in resize case + guest_os (str|None): default guest assumed for None. e,g., ubuntu_16 + check_volume_root (bool): whether to check root disk size even if vm + is booted from image + + Returns: + + """ + final_disks = post_disks if post_disks else disks + final_paths = list(file_paths) + if not disks: + disks = vm_helper.get_vm_devices_via_virsh(vm_id=vm_id) + + eph_disk = disks.get('eph', {}) + if not eph_disk: + if post_disks: + eph_disk = post_disks.get('eph', {}) + swap_disk = disks.get('swap', {}) + if not swap_disk: + if post_disks: + swap_disk = post_disks.get('swap', {}) + + disk_check = 'no_loss' + if vm_action in [None, 'live_migrate']: + disk_check = 'no_loss' + elif vm_type == 'volume': + # boot-from-vol, non-live migrate actions + disk_check = 'no_loss' + if storage_backing == 'local_lvm' and (eph_disk or swap_disk): + disk_check = 'eph_swap_loss' + elif storage_backing == 'local_image' and vm_action == 'evacuate' and ( + eph_disk or swap_disk): + disk_check = 'eph_swap_loss' + elif storage_backing == 'local_image': + # local_image, boot-from-image, non-live migrate actions + disk_check = 'no_loss' + if vm_action == 'evacuate': + disk_check = 'local_loss' + elif storage_backing == 'local_lvm': + # local_lvm, boot-from-image, non-live migrate actions + disk_check = 'local_loss' + if vm_action == 'resize': + post_host = post_host if post_host else vm_helper.get_vm_host(vm_id) + if post_host == prev_host: + disk_check = 'eph_swap_loss' + + LOG.info("disk check type: {}".format(disk_check)) + loss_paths = [] + if disk_check == 'no_loss': + no_loss_paths = final_paths + else: + # If there's any loss, we must not have remote storage. And any + # ephemeral/swap disks will be local. + disks_to_check = disks.get('eph', {}) + # skip swap type checking for data loss since it's not a regular + # filesystem + # swap_disks = disks.get('swap', {}) + # disks_to_check.update(swap_disks) + + for path_ in final_paths: + # For tis-centos-guest, ephemeral disk is mounted to /mnt after + # vm launch. + if str(path_).rsplit('/', 1)[0] == '/mnt': + loss_paths.append(path_) + break + + for disk in disks_to_check: + for path in final_paths: + if disk in path: + # We mount disk vdb to /mnt/vdb, so this is looking for + # vdb in the mount path + loss_paths.append(path) + break + + if disk_check == 'local_loss': + # if vm booted from image, then the root disk is also local disk + root_img = disks.get('root_img', {}) + if root_img: + LOG.info( + "Auto mount vm disks again since root disk was local with " + "data loss expected") + vm_helper.auto_mount_vm_disks(vm_id=vm_id, disks=final_disks) + file_name = final_paths[0].rsplit('/')[-1] + root_path = '/{}'.format(file_name) + loss_paths.append(root_path) + assert root_path in final_paths, \ + "root_path:{}, file_paths:{}".format(root_path, final_paths) + + no_loss_paths = list(set(final_paths) - set(loss_paths)) + + LOG.info("loss_paths: {}, no_loss_paths: {}, total_file_pahts: {}".format( + loss_paths, no_loss_paths, final_paths)) + res_files = {} + with vm_helper.ssh_to_vm_from_natbox(vm_id=vm_id, + vm_image_name=guest_os) as vm_ssh: + vm_ssh.exec_sudo_cmd('cat /etc/fstab') + vm_ssh.exec_sudo_cmd("mount | grep --color=never '/dev'") + + for file_path in loss_paths: + vm_ssh.exec_sudo_cmd('touch {}2'.format(file_path), fail_ok=False) + vm_ssh.exec_sudo_cmd('echo "{}" >> {}2'.format(content, file_path), + fail_ok=False) + + for file_path in no_loss_paths: + output = vm_ssh.exec_sudo_cmd('cat {}'.format(file_path), + fail_ok=False)[1] + res = '' if content in output else 'content mismatch' + res_files[file_path] = res + + for file, error in res_files.items(): + assert not error, "Check {} failed: {}".format(file, error) + + swap_disk = final_disks.get('swap', {}) + if swap_disk: + disk_name = list(swap_disk.keys())[0] + partition = '/dev/{}'.format(disk_name) + if disk_check != 'local_loss' and not disks.get('swap', {}): + mount_on, fs_type = storage_helper.mount_partition( + ssh_client=vm_ssh, disk=disk_name, + partition=partition, fs_type='swap') + storage_helper.auto_mount_fs(ssh_client=vm_ssh, fs=partition, + mount_on=mount_on, fs_type=fs_type) + + LOG.info("Check swap disk is on") + swap_output = vm_ssh.exec_sudo_cmd( + 'cat /proc/swaps | grep --color=never {}'.format(partition))[1] + assert swap_output, "Expect swapon for {}. Actual output: {}". \ + format(partition, vm_ssh.exec_sudo_cmd('cat /proc/swaps')[1]) + + LOG.info("Check swap disk size") + _check_disk_size(vm_ssh, disk_name=disk_name, expt_size=swap) + + eph_disk = final_disks.get('eph', {}) + if eph_disk: + LOG.info("Check ephemeral disk size") + eph_name = list(eph_disk.keys())[0] + _check_disk_size(vm_ssh, eph_name, expt_size=ephemeral * 1024) + + if root: + image_root = final_disks.get('root_img', {}) + root_name = '' + if image_root: + root_name = list(image_root.keys())[0] + elif check_volume_root: + root_name = list(final_disks.get('root_vol').keys())[0] + + if root_name: + LOG.info("Check root disk size") + _check_disk_size(vm_ssh, disk_name=root_name, + expt_size=root * 1024) + + +def _check_disk_size(vm_ssh, disk_name, expt_size): + partition = vm_ssh.exec_sudo_cmd( + 'cat /proc/partitions | grep --color=never "{}$"'.format(disk_name))[1] + actual_size = int( + int(partition.split()[-2].strip()) / 1024) if partition else 0 + expt_size = int(expt_size) + assert actual_size == expt_size, "Expected disk size: {}M. Actual: {}M".\ + format(expt_size, actual_size) + + +def check_alarms(before_alarms, timeout=300, + auth_info=Tenant.get('admin_platform'), con_ssh=None, + fail_ok=False): + after_alarms = system_helper.get_alarms(auth_info=auth_info, + con_ssh=con_ssh) + new_alarms = [] + check_interval = 5 + for item in after_alarms: + if item not in before_alarms: + alarm_id, entity_id = item.split('::::') + if alarm_id == EventLogID.CPU_USAGE_HIGH: + check_interval = 45 + elif alarm_id == EventLogID.NTP_ALARM: + # NTP alarm handling + LOG.info("NTP alarm found, checking ntpq stats") + host = entity_id.split('host=')[1].split('.ntp')[0] + system_helper.wait_for_ntp_sync(host=host, fail_ok=False, + auth_info=auth_info, + con_ssh=con_ssh) + continue + + new_alarms.append((alarm_id, entity_id)) + + res = True + remaining_alarms = None + if new_alarms: + LOG.info("New alarms detected. Waiting for new alarms to clear.") + res, remaining_alarms = \ + system_helper.wait_for_alarms_gone(new_alarms, + fail_ok=True, + timeout=timeout, + check_interval=check_interval, + auth_info=auth_info, + con_ssh=con_ssh) + + if not res: + msg = "New alarm(s) found and did not clear within {} seconds. " \ + "Alarm IDs and Entity IDs: {}".format(timeout, remaining_alarms) + LOG.warning(msg) + if not fail_ok: + assert res, msg + + return res, remaining_alarms + + +def check_rest_api(): + LOG.info("Check sysinv REST API") + sysinv_rest = Rest('sysinv', platform=True) + resource = '/controller_fs' + status_code, text = sysinv_rest.get(resource=resource, auth=True) + message = "Retrieved: status_code: {} message: {}" + LOG.debug(message.format(status_code, text)) + + LOG.info("Check status_code of 200 is received") + message = "Expected status_code of 200 - received {} and message {}" + assert status_code == 200, message.format(status_code, text) diff --git a/automated-pytest-suite/keywords/cinder_helper.py b/automated-pytest-suite/keywords/cinder_helper.py new file mode 100644 index 0000000..25a10fd --- /dev/null +++ b/automated-pytest-suite/keywords/cinder_helper.py @@ -0,0 +1,1756 @@ +# +# Copyright (c) 2019 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + + +import re +import os +import random +import time +import math + +from consts.auth import Tenant +from consts.stx import GuestImages, Prompt +from consts.timeout import VolumeTimeout +from keywords import common, glance_helper +from testfixtures.fixture_resources import ResourceCleanup +from utils import table_parser, cli, exceptions +from utils.clients.ssh import ControllerClient +from utils.tis_log import LOG + + +def get_any_volume(status='available', bootable=True, auth_info=None, + con_ssh=None, new_name=None, cleanup=None): + """ + Get an id of any volume that meets the criteria. Create one if none exists. + + Args: + status (str): + bootable (str|bool): + auth_info (dict): + con_ssh (SSHClient): + new_name (str): This is only used if no existing volume found and new + volume needs to be created + cleanup (str|None) + + Returns: + str: volume id + + """ + volumes = get_volumes(status=status, bootable=bootable, auth_info=auth_info, + con_ssh=con_ssh) + if volumes: + return 0, random.choice(volumes) + else: + return 1, create_volume(name=new_name, bootable=bootable, + auth_info=auth_info, con_ssh=con_ssh, + cleanup=cleanup)[1] + + +def get_volumes(vols=None, full_name=None, project=None, project_domain=None, + user=None, user_domain=None, all_=True, + long=True, name=None, name_strict=False, vol_type=None, + size=None, status=None, attached_vm=None, + bootable=None, field='ID', auth_info=Tenant.get('admin'), + con_ssh=None): + """ + Return a list of volume ids based on the given criteria + + Args: + vols (list or str): + full_name + project + project_domain + user + user_domain + all_ (bool) + long (bool) + name (str): post execution table filters + name_strict (bool): + vol_type (str): + size (str): + status:(str|list|tuple) + attached_vm (str): + bootable (str|bool): true or false + field (str) + auth_info (dict): could be Tenant.get('admin'),Tenant.get('tenant1'), + Tenant.get('tenant2') + con_ssh (str): + + Returns (list): a list of volume ids based on the given criteria + """ + args_dict = { + '--long': long, + '--a': all_, + '--name': full_name, + '--project': project, + '--project-domain': project_domain, + '--user': user, + '--user-domain': user_domain, + } + args = common.parse_args(args_dict) + table_ = table_parser.table( + cli.openstack('volume list', args, ssh_client=con_ssh, + auth_info=auth_info)[1]) + + if name is not None: + table_ = table_parser.filter_table(table_, strict=name_strict, + **{'Name': name}) + if bootable is not None: + bootable = str(bootable).lower() + filters = { + 'ID': vols, + 'Type': vol_type, + 'Size': size, + 'Attached to': attached_vm, + 'Status': status, + 'Bootable': bootable + } + filters = {k: v for k, v in filters.items() if v is not None} + if filters: + table_ = table_parser.filter_table(table_, **filters) + + return table_parser.get_column(table_, field) + + +def get_volume_snapshot_values(vol_snapshot, fields, strict=True, con_ssh=None, + auth_info=None): + """ + Get volume snapshot values for given fields via openstack volume snapshot + show + Args: + vol_snapshot (str): + fields (list|str|tuple): + strict (bool): + con_ssh: + auth_info: + + Returns (list): values for given fields + + """ + + if isinstance(fields, str): + fields = [fields] + + table_ = table_parser.table( + cli.openstack('volume snapshot show', vol_snapshot, ssh_client=con_ssh, + auth_info=auth_info)[1]) + vals = [] + for field in fields: + vals.append( + table_parser.get_value_two_col_table(table_, field, strict=strict)) + + return vals + + +def get_volume_snapshot_list(vol_snaps=None, name=None, name_strict=False, + size=None, status=None, volume=None, + field='ID', auth_info=Tenant.get('admin'), + con_ssh=None): + """ + Return a list of volume ids based on the given criteria + + Args: + vol_snaps (list or str): + name (str): + name_strict (bool): + size (str): + status:(str) + volume (str): + field + auth_info (dict): could be Tenant.get('admin'),Tenant.get('tenant1'), + Tenant.get('tenant2') + con_ssh (str): + + Returns (list): a list of volume snapshot ids based on the given criteria + + """ + optional_args = { + 'ID': vol_snaps, + 'Size': size, + 'Status': status, + 'Volume': volume, + } + + criteria = {} + for key, value in optional_args.items(): + if value is not None: + criteria[key] = value + + table_ = table_parser.table( + cli.openstack('volume snapshot list --a --long', ssh_client=con_ssh, + auth_info=auth_info)[1]) + + if name: + table_ = table_parser.filter_table(table_, strict=name_strict, + **{'Name': name}) + + return table_parser.get_values(table_, field, **criteria) + + +def get_volumes_attached_to_vms(volumes=None, vms=None, header='ID', + con_ssh=None, auth_info=Tenant.get('admin')): + """ + Filter out the volumes that are attached to a vm. + Args: + volumes (list or str): list of volumes ids to filter out from. When + None, filter from all volumes + vms (list or str): get volumes attached to given vm(s). When None, + filter volumes attached to any vm + header (str): header of the column in the table to return + con_ssh (SSHClient): + auth_info (dict): + + Returns (list): a list of values from the column specified or [] if no + match found + + """ + table_ = table_parser.table( + cli.openstack('volume list --a', ssh_client=con_ssh, + auth_info=auth_info)[1]) + + # Filter from given volumes if provided + if volumes is not None: + table_ = table_parser.filter_table(table_, ID=volumes) + + # Filter from given vms if provided + if vms: + table_ = table_parser.filter_table(table_, strict=False, + **{'Attached to': vms}) + # Otherwise filter out volumes attached to any vm + else: + table_ = table_parser.filter_table(table_, strict=False, regex=True, + **{'Attached to': r'.*\S.*'}) + + return table_parser.get_column(table_, header) + + +def create_volume(name=None, description=None, source_type='image', + source_id=None, vol_type=None, size=None, + avail_zone=None, properties=None, hints=None, + multi_attach=None, bootable=True, read_only=None, + consistency_group=None, fail_ok=False, auth_info=None, + con_ssh=None, + avail_timeout=VolumeTimeout.STATUS_CHANGE, guest_image=None, + cleanup=None): + """ + Create a volume with given criteria. + + Args: + name (str|None): display name of the volume + description (str|None): description of the volume + source_type (str|None): image, snapshot, volume, or None. + source_id (str|None): source volume id to create volume from + vol_type (str|None): volume type such as 'raw' + size (int|None): volume size in GBs + avail_zone (str|None): availability zone + properties (str|list|tuple|dict|None): metadata key and value pairs + '[ [ ...]]' + bootable (bool|None): When False, the source id params will be + ignored and non-bootable volume will be created + read_only (bool|None) + hints (str|list|tuple|dict|None) + multi_attach + consistency_group (str|None) + fail_ok (bool): + auth_info (dict): + con_ssh (SSHClient): + guest_image (str): guest image name if image_id unspecified. valid + values: cgcs-guest, ubuntu, centos_7, etc + avail_timeout (int) + cleanup (None|str): teardown level + + Returns (tuple): (return_code, volume_id or err msg) + (-1, existing_vol_id) # returns existing volume_id instead of + creating a new one. Applies when rtn_exist=True. + (0, vol_id) # Volume created successfully and in available state. + (1, ) # Create volume cli rejected with sterr + (2, vol_id) # volume created, but not in available state. + (3, vol_id]: if volume created, but not in given bootable state. + + Notes: + snapshot_id > source_vol_id > image_id if more than one source ids + are provided. + """ + valid_cleanups = ('module', 'session', 'function', 'class', None) + if cleanup not in valid_cleanups: + raise ValueError( + "Invalid scope provided. Choose from: {}".format(valid_cleanups)) + + valid_source_types = (None, 'image', 'volume', 'source', 'snapshot') + if source_type not in valid_source_types: + raise ValueError( + "Invalid source type specified. Choose from: {}".format( + valid_source_types)) + + if source_type and not source_id: + if source_type != 'image': + raise ValueError( + "source_id has to be provided for {}".format(source_type)) + + # Get glance image id as source_id based on guest_image value + guest_image = guest_image if guest_image else GuestImages.DEFAULT[ + 'guest'] + source_id = glance_helper.get_image_id_from_name(guest_image, + strict=True, + auth_info=auth_info, + con_ssh=con_ssh) + if size is None: + size = GuestImages.IMAGE_FILES[guest_image][1] + + if size is None: + # size is required if source_type is not volume or snapshot + if not source_type: + size = 2 + elif source_type == 'image': + if guest_image: + size = GuestImages.IMAGE_FILES[guest_image][1] + else: + # check glance image size via openstack image show to + # determine the volume size + image_size = glance_helper.get_image_values(source_id, 'size', + auth_info=auth_info, + con_ssh=con_ssh)[0] + size = max(2, math.ceil(image_size / math.pow(1024, 3))) + + if not name: + if not auth_info: + auth_info = Tenant.get_primary() + name = 'vol-{}'.format(auth_info['tenant']) + existing_volumes = get_volumes(field='Name', auth_info=auth_info, + con_ssh=con_ssh) + name = common.get_unique_name(name, resource_type='volume', + existing_names=existing_volumes) + + optional_args = {'--size': size, + '--description': description, + '--type': vol_type, + '--availability-zone': avail_zone, + '--consistency-group': consistency_group, + '--property': properties, + '--hint': hints, + '--multi-attach': multi_attach, + '--bootable': True if bootable else None, + '--non-bootable': True if bootable is False else None, + '--read-only': True if read_only else None, + '--read-write': True if read_only is False else None, + } + if source_type: + source_type = 'source' if 'volume' in source_type else source_type + optional_args['--{}'.format(source_type)] = source_id + + args = '{} {}'.format(common.parse_args(optional_args, repeat_arg=True), + name) + LOG.info("Creating Volume with args: {}".format(args)) + exit_code, cmd_output = cli.openstack('volume create', args, + ssh_client=con_ssh, fail_ok=fail_ok, + auth_info=auth_info) + + table_ = table_parser.table(cmd_output) + volume_id = table_parser.get_value_two_col_table(table_, 'id') + if cleanup and volume_id: + ResourceCleanup.add('volume', volume_id, scope=cleanup) + + if exit_code > 0: + return 1, cmd_output + + LOG.info("Post action check started for create volume.") + if not wait_for_volume_status(volume=volume_id, status='available', + auth_info=auth_info, fail_ok=fail_ok, + timeout=avail_timeout): + LOG.warning( + "Volume {} did not reach available state within {}s after " + "creation".format( + name, avail_timeout)) + return 2, volume_id + + LOG.info("Volume is created and in available state: {}".format(volume_id)) + return 0, volume_id + + +def get_volume_show_values(volume, fields, con_ssh=None, + auth_info=Tenant.get('admin')): + """ + Get values for given cinder volume via openstack volume show + Args: + volume: + fields (str|tuple|list): + con_ssh: + auth_info: + + Returns (list): + + """ + if not volume: + raise ValueError("Volume is not provided.") + + if isinstance(fields, str): + fields = (fields,) + + table_ = table_parser.table( + cli.openstack('volume show', volume, ssh_client=con_ssh, + auth_info=auth_info)[1]) + vals = [] + for field in fields: + field = field.lower() + val = table_parser.get_value_two_col_table(table_, field=field, + merge_lines=True) + if field == 'properties': + val = table_parser.convert_value_to_dict(val) + elif val and (field in ('attachments', 'volume_image_metadata') or + val.lower() in ('true', 'false', 'none')): + try: + LOG.info('val: {}'.format(val)) + val = eval( + val.replace('true', 'True').replace('none', 'None').replace( + 'false', 'False')) + except (NameError, SyntaxError): + pass + vals.append(val) + + return vals + + +def wait_for_volume_status(volume, status='available', + timeout=VolumeTimeout.STATUS_CHANGE, fail_ok=True, + check_interval=5, con_ssh=None, auth_info=None): + """ + + Args: + volume (str): + status (str/list): + timeout (int): + fail_ok (bool): + check_interval (int): + con_ssh (str): + auth_info (dict): + + Returns: + True if the status of the volume is same as the status(str) that was + passed into the function \n + false if timed out or otherwise + + """ + return __wait_for_vol_status(volume, is_snapshot=False, status=status, + timeout=timeout, fail_ok=fail_ok, + check_interval=check_interval, con_ssh=con_ssh, + auth_info=auth_info) + + +def wait_for_vol_snapshot_status(vol_snapshot, status='available', + timeout=VolumeTimeout.STATUS_CHANGE, + fail_ok=False, + check_interval=5, con_ssh=None, + auth_info=None): + """ + Wait for cinder volume or volume snapshot to reach given state + Args: + vol_snapshot (str): + status (str/list): + timeout (int): + fail_ok (bool): + check_interval (int): + con_ssh (str): + auth_info (dict): + + Returns: + True if the status of the volume is same as the status(str) that was + passed into the function \n + false if timed out or otherwise + + """ + return __wait_for_vol_status(vol_snapshot, is_snapshot=True, status=status, + timeout=timeout, + fail_ok=fail_ok, check_interval=check_interval, + con_ssh=con_ssh, auth_info=auth_info) + + +def __wait_for_vol_status(volume, is_snapshot=False, status='available', + timeout=VolumeTimeout.STATUS_CHANGE, + fail_ok=False, check_interval=5, con_ssh=None, + auth_info=None): + if isinstance(status, str): + status = (status,) + + vol_str = 'snapshot ' if is_snapshot else '' + LOG.info("Waiting for cinder volume {}{} status: {}".format(vol_str, volume, + status)) + end_time = time.time() + timeout + current_status = prev_status = None + + func = get_volume_snapshot_values if is_snapshot else get_volume_show_values + + while time.time() < end_time: + current_status = func( + volume, fields='status', con_ssh=con_ssh, auth_info=auth_info)[0] + if current_status in status: + LOG.info("Volume {}{} is in {} state".format(vol_str, volume, + current_status)) + return True + elif current_status == 'error': + msg = 'Volume {}{} is in error status'.format(vol_str, volume) + LOG.warning(msg) + if fail_ok: + return False + raise exceptions.VolumeError(msg) + elif current_status != prev_status: + LOG.info("Volume {}status is: {}".format(vol_str, current_status)) + prev_status = current_status + + time.sleep(check_interval) + else: + msg = "Timed out waiting for volume {}{} status to reach status: {}. " \ + "Actual status: {}". \ + format(vol_str, volume, status, current_status) + LOG.warning(msg) + if fail_ok: + return False + raise exceptions.TimeoutException(msg) + + +def get_vol_snapshots(status='available', volume=None, vol_id=None, name=None, + size=None, field='ID', + con_ssh=None, auth_info=None): + """ + Get one volume snapshot id that matches the given criteria. + + Args: + status (str): snapshot status. e.g., 'available', 'in use' + volume (str): Name of the volume the snapshot created from + vol_id (str): volume id the snapshot was created from + name (str): snapshot name + size (int): + field (str) + con_ssh (SSHClient): + auth_info (dict): + + Returns: + A string of snapshot id. Return None if no matching snapshot found. + + """ + table_ = table_parser.table( + cli.openstack('snapshot list', ssh_client=con_ssh, auth_info=auth_info)[ + 1]) + if size is not None: + size = str(size) + + if vol_id and not volume: + volume = get_volumes(vols=vol_id, field='Name')[0] + + possible_args = { + 'status': status, + "Volume": volume, + 'Status': status, + 'name': name, + 'Size': size + } + + args_ = {} + for key, val in possible_args.items(): + if val: + args_[key] = val + + return table_parser.get_values(table_, field, **args_) + + +def _wait_for_volumes_deleted(volumes, timeout=VolumeTimeout.DELETE, + fail_ok=True, + check_interval=3, con_ssh=None, + auth_info=Tenant.get('admin')): + """ + check if a specific field still exist in a specified column for + cinder list + + Args: + volumes(list or str): ids of volumes + timeout (int): + fail_ok (bool): + check_interval (int): + con_ssh: + auth_info (dict): + + Returns (tuple): (result(boot), volumes_deleted(list)) + + """ + if isinstance(volumes, str): + volumes = [volumes] + + vols_to_check = list(volumes) + end_time = time.time() + timeout + while time.time() < end_time: + existing_vols = get_volumes(long=False, auth_info=auth_info, + con_ssh=con_ssh) + vols_to_check = list(set(existing_vols) & set(vols_to_check)) + if not vols_to_check: + return True, list(volumes) + + time.sleep(check_interval) + else: + if fail_ok: + return False, list(set(volumes) - set(vols_to_check)) + raise exceptions.TimeoutException( + "Timed out waiting for volume(s) to be removed from openstack " + "volume list: " + "{}.".format(vols_to_check)) + + +def delete_volumes(volumes=None, fail_ok=False, timeout=VolumeTimeout.DELETE, + check_first=True, con_ssh=None, + auth_info=Tenant.get('admin')): + """ + Delete volume(s). + + Args: + volumes (list|str): ids of the volumes to delete. If None, + all available volumes under given Tenant will be + deleted. If given Tenant is admin, available volumes for all + tenants will be deleted. + fail_ok (bool): True or False + timeout (int): CLI timeout and waiting for volumes disappear timeout + in seconds. + check_first (bool): Whether to check volumes existence before attempt + to delete + con_ssh (SSHClient): + auth_info (dict): + + Returns (tuple): (rtn_code (int), msg (str)) + (-1, "No volume to delete. Do nothing.") # No volume given and no + volume exists on system for given tenant + (-1, ""None of the given volume(s) exist on system. Do nothing."") + # None of the given volume(s) exists on + system for given tenant + (0, "Volume(s) deleted successfully") # volume is successfully + deleted. + (1, ) # Delete volume cli returns stderr + (2, "Delete request(s) accepted but some volume(s) did not disappear + within seconds".) + (3, "Delete request(s) rejected and post check failed for accepted + request(s). \nCLI error: " + + """ + if volumes is None: + volumes = get_volumes(status=('available', 'error'), + auth_info=auth_info, con_ssh=con_ssh) + + LOG.info("Deleting volume(s): {}".format(volumes)) + + if not volumes: + msg = "No volume to delete. Do nothing." + LOG.info(msg) + return -1, msg + + if isinstance(volumes, str): + volumes = [volumes] + volumes = list(volumes) + + if check_first: + vols_to_del = get_volumes(vols=volumes, auth_info=auth_info, + con_ssh=con_ssh) + if not vols_to_del: + msg = "None of the given volume(s) exist on system. Do nothing." + LOG.info(msg) + return -1, msg + + if not vols_to_del == volumes: + LOG.info( + "Some volume(s) don't exist. Given volumes: {}. Volumes to " + "delete: {}.". + format(volumes, vols_to_del)) + else: + vols_to_del = volumes + + vols_to_del_str = ' '.join(vols_to_del) + + LOG.debug("Volumes to delete: {}".format(vols_to_del)) + exit_code, cmd_output = cli.openstack('volume delete', vols_to_del_str, + ssh_client=con_ssh, fail_ok=fail_ok, + auth_info=auth_info, timeout=timeout) + + vols_to_check = [] + if exit_code == 1: + for vol in vols_to_del: + # if cinder delete on a specific volume ran successfully, then it + # has no output regarding that vol + if vol not in cmd_output: + vols_to_check.append(vol) + else: + vols_to_check = vols_to_del + + LOG.info("Waiting for volumes to be removed from cinder list: {}".format( + vols_to_check)) + all_deleted, vols_deleted = _wait_for_volumes_deleted(vols_to_check, + fail_ok=True, + con_ssh=con_ssh, + auth_info=auth_info, + timeout=timeout) + + if exit_code == 1: + if all_deleted: + if fail_ok: + return 1, cmd_output + raise exceptions.CLIRejected(cmd_output) + else: + msg = "Delete request(s) rejected and post check failed for " \ + "accepted request(s). \nCLI error: {}". \ + format(cmd_output) + if fail_ok: + LOG.warning(msg) + return 3, msg + raise exceptions.VolumeError(msg) + + if not all_deleted: + msg = "Delete request(s) accepted but some volume(s) did not " \ + "disappear within {} seconds".format(timeout) + if fail_ok: + LOG.warning(msg) + return 2, msg + raise exceptions.VolumeError(msg) + + LOG.info("Volume(s) are successfully deleted: {}".format(vols_to_check)) + return 0, "Volume(s) deleted successfully" + + +def delete_volume_snapshots(snapshots=None, force=False, check_first=True, + fail_ok=False, auth_info=Tenant.get('admin'), + con_ssh=None): + """ + Delete given volume snapshot via cinder snapshot-delete + + Args: + snapshots (str|list): + force (bool): + check_first (bool): + fail_ok (bool): + auth_info (dict): + con_ssh (SSHClient): + + Returns (tuple): + (0, volume snapshot is successfully deleted) + (1, ) + (2, volume snapshot still exists in cinder qos-list + after deletion) + + """ + + if not snapshots: + snapshots_to_del = get_volume_snapshot_list(auth_info=auth_info) + else: + snapshots_to_del = [snapshots] if isinstance(snapshots, str) else list( + snapshots) + if check_first: + snapshots_to_del = list(set(snapshots_to_del) & set( + get_volume_snapshot_list(auth_info=auth_info))) + + if not snapshots_to_del: + msg = "No volume snapshot to delete or provided snapshot(s) not " \ + "exist on system" + LOG.info(msg) + return -1, msg + + args_ = '{}{}'.format('--force ' if force else '', + ' '.join(snapshots_to_del)) + code, output = cli.openstack('snapshot delete', args_, ssh_client=con_ssh, + fail_ok=fail_ok, auth_info=auth_info) + + if code == 1: + return code, output + + post_vol_snap_list = get_volume_snapshot_list(auth_info=auth_info) + undeleted_snapshots = list(set(snapshots_to_del) & set(post_vol_snap_list)) + if undeleted_snapshots: + err_msg = "Volume snapshot {} still exists in cinder snapshot-list " \ + "after deletion".format(undeleted_snapshots) + if fail_ok: + LOG.warning(err_msg) + return 2, err_msg + else: + raise exceptions.CinderError(err_msg) + + succ_msg = "Volume snapshot(s) successfully deleted: {}".format( + snapshots_to_del) + LOG.info(succ_msg) + return 0, succ_msg + + +def create_volume_qos(qos_name=None, consumer=None, field='id', fail_ok=False, + auth_info=Tenant.get('admin'), con_ssh=None, **specs): + """ + Create volume QoS with given name and specs + + Args: + qos_name (str): + fail_ok (bool): + consumer (str): Valid consumer of QoS specs are: ['front-end', + 'back-end', 'both'] + field (str) + auth_info (dict): + con_ssh (SSHClient): + **specs: QoS specs + format: **{: , : } + + Returns (tuple): + (0, QoS created successfully with specs: ) + (1, ) + + """ + if not qos_name: + qos_name = 'vol_qos' + + qos_name = common.get_unique_name(qos_name, + get_volume_qos_list(field='name'), + resource_type='qos') + args_dict = { + '--consumer': consumer, + '--property': specs, + } + args_ = common.parse_args(args_dict, repeat_arg=True) + + LOG.info("Creating QoS {} with args: {}".format(qos_name, args_)) + args_ += ' {}'.format(qos_name) + code, output = cli.openstack('volume qos create', args_, ssh_client=con_ssh, + fail_ok=fail_ok, auth_info=auth_info) + + if code > 0: + return 1, output + + qos_tab = table_parser.table(output) + qos_value = table_parser.get_value_two_col_table(qos_tab, field) + + LOG.info( + "QoS {} created successfully with specs: {}".format(qos_name, specs)) + return 0, qos_value + + +def delete_volume_qos(qos_ids, force=False, check_first=True, fail_ok=False, + auth_info=Tenant.get('admin'), + con_ssh=None): + """ + Delete given list of QoS' + + Args: + qos_ids (list|str|tuple): + force (bool): + check_first (bool): + fail_ok (bool): + auth_info (dict): + con_ssh (SSHClient): + + Returns: + + """ + if isinstance(qos_ids, str): + qos_ids = [qos_ids] + + qos_ids_to_del = list(qos_ids) + if check_first: + existing_qos_list = get_volume_qos_list(auth_info=auth_info, + con_ssh=con_ssh) + qos_ids_to_del = list(set(existing_qos_list) & set(qos_ids)) + if not qos_ids_to_del: + msg = "None of the QoS specs {} exist in cinder qos-list. Do " \ + "nothing.".format(qos_ids) + LOG.info(msg) + return -1, msg + + rejected_list = [] + for qos in qos_ids_to_del: + args = qos if force is None else '--force {} {}'.format(force, qos) + code, output = cli.openstack('volume qos delete', args, + ssh_client=con_ssh, fail_ok=fail_ok, + auth_info=auth_info) + if code > 0: + rejected_list.append(qos) + + qos_list_to_check = list(set(qos_ids) - set(rejected_list)) + + undeleted_list = [] + if qos_list_to_check: + undeleted_list = \ + wait_for_qos_deleted(qos_ids=qos_list_to_check, fail_ok=fail_ok, + con_ssh=con_ssh, + auth_info=auth_info)[1] + + if rejected_list or undeleted_list: + reject_str = ' Deletion rejected volume QoS: {}.'.format( + rejected_list) if rejected_list else '' + undeleted_str = ' Volume QoS still exists after deletion: {}.'.format( + undeleted_list) if undeleted_list else '' + err_msg = "Some QoS's failed to delete.{}{}".format(reject_str, + undeleted_str) + LOG.warning(err_msg) + if fail_ok: + return 1, err_msg + else: + raise exceptions.CinderError(err_msg) + + succ_msg = "QoS's successfully deleted: {}".format(qos_ids) + LOG.info(succ_msg) + return 0, succ_msg + + +def wait_for_qos_deleted(qos_ids, timeout=10, check_interval=1, fail_ok=False, + auth_info=Tenant.get('admin'), con_ssh=None): + """ + Wait for given list of QoS to be gone from cinder qos-list + Args: + qos_ids (list): + timeout (int): + check_interval (int): + auth_info (dict) + fail_ok (bool): + con_ssh (SSHClient): + + Returns (tuple): + (True, []) All given QoS ids are gone from cinder qos-list + (False, [undeleted_qos_list]) Some given QoS' still exist in + cinder qos-list + + """ + LOG.info("Waiting for QoS' to be deleted from system: {}".format(qos_ids)) + if isinstance(qos_ids, str): + qos_ids = (qos_ids,) + + qos_undeleted = list(qos_ids) + end_time = time.time() + timeout + + while time.time() < end_time: + existing_qos_list = get_volume_qos_list(con_ssh=con_ssh, + auth_info=auth_info) + qos_undeleted = list(set(existing_qos_list) & set(qos_undeleted)) + + if not qos_undeleted: + msg = "QoS' all gone from 'openstack volume qos list': {}".format( + qos_ids) + LOG.info(msg) + return True, [] + + time.sleep(check_interval) + + err_msg = "Timed out waiting for QoS' to be gone from cinder qos-list: " \ + "{}".format(qos_undeleted) + LOG.warning(err_msg) + if fail_ok: + return False, qos_undeleted + else: + raise exceptions.CinderError(err_msg) + + +def create_volume_type(name=None, public=None, project=None, + project_domain=None, field='id', fail_ok=False, + auth_info=Tenant.get('admin'), con_ssh=None, + **properties): + """ + Create a volume type with given name + + Args: + name (str|None): name for the volume type + public (bool|None): + project (str|None) + project_domain (str|None) + field (str): 'id' or 'name' + fail_ok (bool): + auth_info (dict): + con_ssh (SSHClient): + + Returns (tuple): + (0, ) - volume type created successfully + (1, ) - cli rejected + (2, ) - volume type public flag is not as expected + + """ + + if not name: + name = 'vol_type' + name = common.get_unique_name(name, get_volume_types(field='Name')) + LOG.info("Creating volume type {}".format(name)) + + args_dict = { + '--public': True if public else None, + '--private': True if public is False else None, + '--property': properties, + '--project': project, + '--project-domain': project_domain, + } + + args_ = ' '.join((common.parse_args(args_dict, repeat_arg=True), name)) + code, output = cli.openstack('volume type create', args_, + ssh_client=con_ssh, fail_ok=fail_ok, + auth_info=auth_info) + if code == 1: + return 1, output + + table_ = table_parser.table(output) + vol_type = table_parser.get_value_two_col_table(table_, field) + + LOG.info("Volume type {} is created successfully".format(vol_type)) + return 0, vol_type + + +def delete_volume_types(vol_types, check_first=True, fail_ok=False, + auth_info=Tenant.get('admin'), con_ssh=None): + """ + Delete given volume type + + Args: + vol_types (list|str|tuple): volume type ID(s) to delete + check_first (bool): + fail_ok (bool): + auth_info (dict): + con_ssh (SSHClient): + + Returns (tuple): + (-1, None of the volume types exist in cinder qos-list. Do + nothing.) + (0, Volume types successfully deleted: ) + (1, ) + (2, Volume types delete rejected: ; volume types still in cinder + type-list after deletion: ) + + """ + + LOG.info("Delete volume types started") + if isinstance(vol_types, str): + vol_types = (vol_types,) + + vol_types_to_del = list(vol_types) + if check_first: + existing_vol_types = get_volume_types(auth_info=auth_info, + con_ssh=con_ssh) + vol_types_to_del = list(set(existing_vol_types) & set(vol_types)) + if not vol_types_to_del: + msg = "None of the volume types {} exist in cinder qos-list. Do " \ + "nothing.".format(vol_types) + LOG.info(msg) + return -1, msg + + args = ' '.join(vol_types_to_del) + code, output = cli.openstack('volume type delete', args, ssh_client=con_ssh, + fail_ok=fail_ok, auth_info=auth_info) + if code > 1: + return 1, output + + LOG.info("Check volume types are gone from 'openstack volume type list'") + post_vol_types = get_volume_types(auth_info=auth_info, con_ssh=con_ssh) + types_undeleted = list(set(post_vol_types) & set(vol_types_to_del)) + + if types_undeleted: + err_msg = "Volume type(s) still in exist after deletion: {}".format( + types_undeleted) + LOG.warning(err_msg) + if fail_ok: + return 2, err_msg + else: + raise exceptions.CinderError(err_msg) + + succ_msg = "Volume types successfully deleted: {}".format(vol_types) + LOG.info(succ_msg) + return 0, succ_msg + + +def get_volume_types(long=False, ids=None, public=None, name=None, strict=True, + field='ID', con_ssh=None, + auth_info=Tenant.get('admin')): + """ + Get cinder volume types via openstack volume type list + Args: + long (bool) + ids (str|list|tuple|None): + public: + name: + strict: + field (str|list|tuple): + con_ssh: + auth_info: + + Returns (list): + + """ + args = '--long' if long else '' + table_ = table_parser.table( + cli.openstack('volume type list', args, ssh_client=con_ssh, + auth_info=auth_info)[1]) + + filters = {} + if ids: + filters['ID'] = ids + if public is not None: + filters['Is Public'] = public + + if filters: + table_ = table_parser.filter_table(table_, **filters) + + if name is not None: + table_ = table_parser.filter_table(table_, strict=strict, + **{'Name': name}) + + return table_parser.get_multi_values(table_, field) + + +def get_volume_qos_list(field='id', qos_id=None, name=None, consumer=None, + strict=True, con_ssh=None, + auth_info=Tenant.get('admin')): + """ + Get qos list based on given filters + + Args: + field (str|list|tuple): 'id', 'name', 'associations', etc... + qos_id (list|str|None): volume qos id(s) to filter out from + name (str|None): name of the qos' to filter for + consumer (str): consumer of the qos' to filter for + strict (bool): + con_ssh: + auth_info: + + Returns (list): list of matching volume QoS' + + """ + + kwargs_raw = { + 'ID': qos_id, + 'Name': name, + 'Consumer': consumer, + } + + kwargs = {} + for key, val in kwargs_raw.items(): + if val is not None: + kwargs[key] = val + + table_ = table_parser.table( + cli.openstack('volume qos list', ssh_client=con_ssh, + auth_info=auth_info)[1]) + + return table_parser.get_multi_values(table_, field, strict=strict, **kwargs) + + +def associate_volume_qos(volume_qos, volume_type, fail_ok=False, + auth_info=Tenant.get('admin'), con_ssh=None): + """ + Associates qos spec with specified volume type. + # must be an admin to perform cinder qos-associate + + Args: + volume_qos (str) + volume_type (str) + auth_info + fail_ok (bool) + con_ssh + + Returns (tuple) + + """ + args_ = '{} {}'.format(volume_qos, volume_type) + + LOG.info( + "Associate volume qos {} to type {}".format(volume_qos, volume_type)) + code, output = cli.openstack('volume qos associate', args_, + ssh_client=con_ssh, fail_ok=fail_ok, + auth_info=auth_info) + if code > 0: + return 1, output + + msg = "Volume qos {} is successfully associated to volume type {}".format( + volume_qos, volume_type) + LOG.info(msg) + return 0, msg + + +def disassociate_volume_qos(volume_qos, volume_type=None, all_vol_types=False, + fail_ok=False, con_ssh=None, + auth_info=Tenant.get('admin')): + """ + Disassociate a volume QoS spec from volume type(s) + + Args: + volume_qos (str): + volume_type (str|None): volume type name/id + all_vol_types (bool): + fail_ok (bool): + con_ssh: + auth_info (dict) + + Returns (tuple): + + """ + if not all_vol_types and not volume_type: + raise ValueError( + 'volume_type has to be specified unless all_vol_types=True') + + if all_vol_types: + args_ = '--all' + else: + args_ = '--volume-type {}'.format(volume_type) + + LOG.info("Disassociating volume qos {} from: {}".format(volume_qos, args_)) + args_ = '{} {}'.format(args_, volume_qos) + code, output = cli.openstack('volume qos disassociate', args_, + ssh_client=con_ssh, fail_ok=fail_ok, + auth_info=auth_info) + + if code > 0: + return 1, output + + msg = "Volume QoS {} is successfully disassociated".format(volume_qos) + LOG.info(msg) + return 0, msg + + +def get_qos_associations(volume_qos, qos_val='ID', con_ssh=None, + auth_info=Tenant.get('admin')): + """ + Get associated volume types for given volume qos spec + Args: + volume_qos: + qos_val: + con_ssh: + auth_info: + + Returns (list): list of volume type names + + """ + key = 'qos_id' if qos_val.lower() == 'id' else 'name' + + associations = get_volume_qos_list(field='associations', con_ssh=con_ssh, + auth_info=auth_info, + **{key: volume_qos})[0] + associations = [i.strip() for i in associations.split(sep=',')] + + LOG.info("Volume QoS {} associations: {}".format(volume_qos, associations)) + + return associations + + +def is_volumes_pool_sufficient(min_size=40): + """ + Check if cinder-volume-pool has sufficient space + Args: + min_size (int): Minimum requirement for cinder volume pool size in + Gbs. Default 30G. + + Returns (bool): + + """ + con_ssh = ControllerClient.get_active_controller() + lvs_pool = con_ssh.exec_sudo_cmd( + cmd="lvs --units g | grep --color='never' cinder-volumes-pool")[1] + # Sample output: + # cinder-volumes-pool cinder-volumes twi-aotz-- + # 19.95g 64.31 33.38 + # volume-05fa416d-d37b-4d57-a6ff-ab4fe49deece cinder-volumes Vwi-a-tz-- + # 1.00g cinder-volumes-pool 64.16 + # volume-1b04fa7f-b839-4cf9-a177-e676ec6cf9b7 cinder-volumes Vwi-a-tz-- + # 1.00g cinder-volumes-pool 64.16 + if lvs_pool: + pool_size = float( + lvs_pool.splitlines()[0].strip().split()[3].strip()[:-1].split( + sep='<')[-1]) + return pool_size >= min_size + + # assume enough volumes in ceph: + return True + + +def create_volume_snapshot(name, volume=None, description=None, force=False, + properties=None, remote_sources=None, + fail_ok=False, con_ssh=None, auth_info=None): + """ + Create snapshot for an existing volume + Args: + name (str): + volume (None): + description (str|None): + force (bool): + properties (None|dict): + remote_sources (None|dict): + fail_ok (bool): + con_ssh: + auth_info: + + Returns (tuple): + + """ + arg_dict = { + 'volume': volume, + 'description': description, + 'force': force, + 'property': properties, + 'remote-source': remote_sources + } + + arg_str = common.parse_args(arg_dict, repeat_arg=True) + arg_str += ' {}'.format(name) + + vol = volume if volume else name + LOG.info('Creating snapshot for volume: {}'.format(vol)) + code, output = cli.openstack('volume snapshot create', arg_str, + ssh_client=con_ssh, fail_ok=fail_ok, + auth_info=auth_info) + if code > 0: + return 1, output + + table_ = table_parser.table(output) + snap_shot_id = table_parser.get_value_two_col_table(table_, 'id') + + LOG.info( + "Volume snapshot {} created for volume {}. Wait for it to become " + "available".format( + snap_shot_id, vol)) + wait_for_vol_snapshot_status(snap_shot_id, status='available', + con_ssh=con_ssh, auth_info=auth_info) + + LOG.info("Volume snapshot {} created and READY for volume {}".format( + snap_shot_id, vol)) + return 0, snap_shot_id + + +def import_volume(cinder_volume_backup, vol_id=None, con_ssh=None, + fail_ok=False, auth_info=Tenant.get('admin'), + retries=2): + """ + Imports a cinder volume from a backup file located in /opt/backups + folder. The backup file is expected in + volume--.tgz format. Either volume_backup filename or vol_id + must be provided + Args: + cinder_volume_backup(str): the filename of the backup file + vol_id (str): - is the uuid of the cinder volume to be imported + con_ssh: + fail_ok: + auth_info: + retries (int) + + Returns: + + """ + + if not cinder_volume_backup and not vol_id: + raise ValueError("Volume backup file name or vol_id must be provided.") + + if con_ssh is None: + con_ssh = ControllerClient.get_active_controller() + + controller_prompt = Prompt.CONTROLLER_0 + \ + r'|.*controller\-0\:/opt/backups\$' + controller_prompt += r'|.*controller\-0.*backups.*\$' + LOG.info('set prompt to:{}'.format(controller_prompt)) + vol_backup = cinder_volume_backup + vol_id_ = vol_id + cd_cmd = "cd /opt/backups" + con_ssh.set_prompt(prompt=controller_prompt) + + con_ssh.exec_cmd(cd_cmd) + + if not cinder_volume_backup: + # search backup file in /opt/backups + search_str = "volume-" + vol_id_ + "*.tgz" + cmd = "cd /opt/backups; ls {}".format(search_str) + + rc, output = con_ssh.exec_cmd(cmd) + if rc == 0: + vol_backup = output.split()[0] + else: + err_msg = "volume backup file not found in /opt/backups: {}".format( + output) + LOG.error(err_msg) + if fail_ok: + return -1, err_msg + else: + raise exceptions.CinderError(err_msg) + if not vol_id_: + vol_id_ = vol_backup[7:-20] + + # according to the user documents, the first time of 'cinder import' may + # fail, in which case + # we just have to try again + for retry in range(retries if 2 <= retries <= 10 else 2): + con_ssh.set_prompt(prompt=controller_prompt) + rc, output = cli.cinder('import', vol_backup, ssh_client=con_ssh, + fail_ok=fail_ok, auth_info=auth_info) + if rc == 1: + LOG.warn( + 'Failed to import volume for the:{} time'.format(retry + 1)) + + if wait_for_volume_status(volume=vol_id_, + status=['available', 'in-use'], + auth_info=auth_info, + con_ssh=con_ssh, fail_ok=True): + break + else: + err_msg = "Volume is imported, but not in available/in-use state." + LOG.warning(err_msg) + if fail_ok: + return 2, vol_id_ + else: + raise exceptions.CinderError(err_msg) + + return 0, "Volume {} is imported successfully".format(vol_id_) + + +def delete_backups(backup_ids=None, con_ssh=None, fail_ok=False, + auth_info=None): + LOG.info('Deleting backups:{}'.format(backup_ids)) + + if backup_ids is None: + backup_ids = get_backup_ids(con_ssh=con_ssh, fail_ok=fail_ok, + auth_info=auth_info) + + for backup_id in backup_ids: + LOG.info('Deleting backup:{}'.format(backup_id)) + cli.cinder('backup-delete', backup_id, fail_ok=fail_ok, + auth_info=auth_info) + + +def export_free_volume_using_cinder_backup(vol_id=None, container='cinder', + name='', con_ssh=None, fail_ok=False, + auth_info=None, + backup_file_path='/opt/backups'): + LOG.info( + 'Exporing free volume using cinder-backup, volume-id:{}'.format(vol_id)) + if not name: + name = 'free_vol_backup_' + str(vol_id)[0:2] + '_' + str(vol_id)[-5:] + + arg = '--container {} --name {} {}'.format(container, name, vol_id) + output = table_parser.table( + cli.cinder('backup-create', arg, ssh_client=con_ssh, fail_ok=fail_ok, + auth_info=auth_info)[1]) + + backup_id = table_parser.get_value_two_col_table(output, 'id') + backup_name = table_parser.get_value_two_col_table(output, 'name') + volume_id = table_parser.get_value_two_col_table(output, 'volume_id') + + LOG.info( + 'TODO: backup_id:{}, backup_name:{}, volume_id:{}'.format(backup_id, + backup_name, + volume_id)) + + assert backup_name == name and volume_id == vol_id + + wait_for_backup_ready(backup_id) + + msg = ( + 'backup:{} reached "available" status, check if the files are ' + 'gerated'.format( + backup_id)) + LOG.info('OK,' + msg) + code, output = con_ssh.exec_sudo_cmd( + 'ls -l {}/*{}*'.format(os.path.join(backup_file_path, container), + backup_id)) + + if code != 0: + con_ssh.exec_sudo_cmd( + 'ls -l {}/*'.format(os.path.join(backup_file_path, container))) + + assert 0 == code and output, 'backup became "available", but files are ' \ + 'not generated' + + return backup_id + + +def wait_for_backup_ready(backup_id, timeout=900, interval=15, con_ssh=None, + fail_ok=False, auth_info=None): + LOG.info( + 'Waiting for backup reaches "available" status, backup-id:{}'.format( + backup_id)) + now = time.time() + end = now + timeout + + while time.time() < end: + time.sleep(interval) + status = get_cinder_backup_status(backup_id, con_ssh=con_ssh, + auth_info=auth_info) + if status == 'available': + break + else: + msg = 'backup did not reach status: "available" within {} ' \ + 'seconds'.format(timeout) + LOG.warning('Error:' + msg) + if not fail_ok: + assert False, msg + return -1 + + return 0 + + +def export_busy_volume_using_cinder_backup(vol_id=None, + container='cinder', + name='', + con_ssh=None, + fail_ok=False, + auth_info=None, + backup_file_path='/opt/backups' + ): + LOG.info('TODO: exporting in-use volume using cinder-backup, vol:{}'.format( + vol_id)) + if not name: + name = 'inuse_vol_backup_' + vol_id[-4:] + snp_id = create_volume_snapshot('snp_' + name, volume=vol_id, + con_ssh=con_ssh, + fail_ok=fail_ok, + force=True, auth_info=auth_info)[1] + arg = '--container {} --name {} --snapshot-id {} {}'.format( + container, name, snp_id, vol_id) + output = table_parser.table(cli.cinder('backup-create', arg, + fail_ok=fail_ok, + auth_info=auth_info)[1]) + + backup_id = table_parser.get_value_two_col_table(output, 'id') + backup_name = table_parser.get_value_two_col_table(output, 'name') + volume_id = table_parser.get_value_two_col_table(output, 'volume_id') + + LOG.info( + 'TODO: backup_id:{}, backup_name:{}, volume_id:{}'.format( + backup_id, backup_name, volume_id)) + + assert backup_name == name and volume_id == vol_id + + wait_for_backup_ready(backup_id) + + msg = ( + 'backup:{} reached "available" status, check if the files are ' + 'gerated'.format( + backup_id)) + LOG.info('OK,' + msg) + code, output = con_ssh.exec_sudo_cmd( + 'ls -l {}/*{}*'.format(os.path.join(backup_file_path, container), + backup_id)) + + if code != 0: + con_ssh.exec_sudo_cmd( + 'ls -l {}/*'.format(os.path.join(backup_file_path, container))) + + assert 0 == code and output, 'backup became "available", but files are ' \ + 'not generated' + + LOG.info( + 'TODO: successfully exported in-use volume using cinder-backup, ' + 'vol:{}'.format( + vol_id)) + + return backup_id + + +def export_volumes_using_cinder_backup(vol_ids=None, delete_existing=True, + con_ssh=None, fail_ok=False, + auth_info=None, + backup_file_path='/opt/backups'): + if not vol_ids: + LOG.warning('No volume IDs specified, skip the rest of test') + return 0, [] + + backup_ids = get_backup_ids(searching_status='', con_ssh=con_ssh, + fail_ok=fail_ok, auth_info=auth_info) + + if delete_existing and len(backup_ids) > 0: + delete_backups(con_ssh=None, fail_ok=fail_ok, auth_info=auth_info) + + code = 0 + exported_volume_ids = [] + for vol_id in vol_ids: + LOG.info('Backup volume: {}'.format(vol_id)) + volume_status = get_volume_show_values( + vol_id, 'status', con_ssh=con_ssh)[0] + if volume_status == 'available': + code = export_free_volume_using_cinder_backup( + vol_id=vol_id, + con_ssh=con_ssh, + fail_ok=fail_ok, + auth_info=auth_info, + backup_file_path=backup_file_path) + + elif volume_status == 'in-use': + code = export_busy_volume_using_cinder_backup( + vol_id=vol_id, + con_ssh=con_ssh, + fail_ok=fail_ok, + auth_info=auth_info, + backup_file_path=backup_file_path) + + exported_volume_ids.append(vol_id) + + LOG.info('Volumes backuped using cinder-backup:{}'.format( + exported_volume_ids)) + return code, exported_volume_ids + + +def get_backup_ids(searching_status='available', con_ssh=None, fail_ok=False, + auth_info=None): + if not auth_info: + auth_info = Tenant.get('admin') + + table_ = table_parser.table( + cli.cinder('backup-list', ssh_client=con_ssh, fail_ok=fail_ok, + auth_info=auth_info)[1]) + + if searching_status and searching_status.strip(): + kwargs = {'Status': searching_status.strip()} + table_ = table_parser.filter_table(table_, **kwargs) + + status = table_parser.get_values(table_, 'Status') + backup_ids = table_parser.get_values(table_, 'ID') + volume_ids = table_parser.get_values(table_, 'Volume ID') + + LOG.info('status:{}'.format(status)) + LOG.info('backup_ids:{}'.format(backup_ids)) + LOG.info('volume_ids:{}'.format(volume_ids)) + LOG.info('backup-ids:{}'.format(backup_ids)) + + return backup_ids + + +def get_cinder_backup_status(backup_id, con_ssh=None, fail_ok=False, + auth_info=Tenant.get('admin')): + states = table_parser.table( + cli.cinder('backup-show', backup_id, ssh_client=con_ssh, + fail_ok=fail_ok, auth_info=auth_info)[1]) + return table_parser.get_value_two_col_table(states, 'status') + + +def export_volumes(vol_ids=None, con_ssh=None, fail_ok=False, + auth_info=Tenant.get('admin'), cinder_backup=False, + backup_file_path='/opt/backups'): + """ + Exports cinder volume to controller's /opt/backups folder. The backup + file is in + volume--.tgz format. + Args: + vol_ids(list/str): the list of volume ids to be exported, if none, + all system volumes are exported + con_ssh: + fail_ok: + auth_info: + cinder_backup + backup_file_path + + Returns: + + """ + if not vol_ids: + vol_ids = get_volumes() + elif isinstance(vol_ids, str): + vol_ids = [vol_ids] + + if cinder_backup: + return export_volumes_using_cinder_backup( + vol_ids=vol_ids, + con_ssh=con_ssh, + fail_ok=fail_ok, + auth_info=auth_info, + backup_file_path=backup_file_path) + volume_exported = [] + for vol_id in vol_ids: + + if get_volume_show_values(vol_id, 'status', con_ssh=con_ssh, + auth_info=auth_info)[0] == 'available': + # export available volume to ~/opt/backups + LOG.tc_step("export available volume {} ".format(vol_id)) + code, out = cli.cinder('export', vol_id, ssh_client=con_ssh, + fail_ok=fail_ok, auth_info=auth_info) + + if code > 0: + return 1, out + + # wait for volume copy to complete + if not wait_for_volume_status(vol_id, fail_ok=fail_ok, + auth_info=auth_info, con_ssh=con_ssh): + err_msg = "cinder volume failed to reach available status " \ + "after export" + LOG.warning(err_msg) + return 2, vol_id + + LOG.info( + "Exported 'Available' Volumes {} successfully ".format(vol_id)) + volume_exported.append(vol_id) + + # execute backup in-use volume command + if get_volume_show_values(vol_id, 'status', auth_info=auth_info, + con_ssh=con_ssh)[0] == 'in-use': + LOG.tc_step("export in use volume {} ".format(vol_id)) + snapshot_name = 'snapshot_' + vol_id + snap_shot_id = create_volume_snapshot(name=snapshot_name, + volume=vol_id, + con_ssh=con_ssh, + auth_info=auth_info)[1] + LOG.info( + "Volume snapshot {} created for volume {}".format(snap_shot_id, + vol_id)) + + # wait for volume copy to complete + if not wait_for_vol_snapshot_status(snap_shot_id, fail_ok=fail_ok, + auth_info=auth_info, + con_ssh=con_ssh): + err_msg = "cinder snapshot volume {} failed to reach " \ + "available status after copy".format(snap_shot_id) + LOG.warning(err_msg) + return 3, err_msg + + found_snap = get_vol_snapshots(vol_id=vol_id, auth_info=auth_info, + con_ssh=con_ssh)[0] + LOG.info( + "Matched Volume snapshot {} to volume {}".format(found_snap, + vol_id)) + if found_snap not in snap_shot_id: + err_msg = "cinder volume snapshot {} for volume {} not found " \ + "after export".format(snap_shot_id, vol_id) + LOG.warn(err_msg) + if fail_ok: + LOG.warning(err_msg) + return 4, err_msg + else: + raise exceptions.CinderError(err_msg) + + LOG.info( + "Exporting in-use Volume snapshot {} ".format(snap_shot_id)) + cli.cinder('snapshot-export', snap_shot_id, ssh_client=con_ssh, + auth_info=auth_info) + if not wait_for_vol_snapshot_status(snap_shot_id, fail_ok=fail_ok, + auth_info=auth_info, + con_ssh=con_ssh): + err_msg = "cinder snapshot volume {} failed to reach " \ + "available status after export".format(snap_shot_id) + return 5, err_msg + + # delete the snapshot after export + LOG.info( + "Deleting snapshot Volume snapshot {} after export ".format( + snap_shot_id)) + cli.cinder('snapshot-delete', snap_shot_id, ssh_client=con_ssh, + auth_info=auth_info) + + LOG.info( + "Exported 'in-use' Volumes {} successfully ".format(vol_id)) + volume_exported.append(vol_id) + + return 0, volume_exported + + +def get_lvm_usage(con_ssh): + LOG.info('Getting usage of cinder-volumes') + free, total, unit = 0, 0, 'g' + pattern = r'(\d+(\.\d+)?)([gm])' + code, output = con_ssh.exec_sudo_cmd('lvs') + if 0 != code: + LOG.warn('Failed to get usage of cinder-volumes') + else: + try: + used = 0 + for line in output.strip().splitlines(): + fields = line.split() + if fields[0] == 'cinder-volumes-pool': + total = re.search(pattern, fields[3], re.IGNORECASE) + unit = total.group(3) + total = float(total.group(1)) + elif fields[0].startswith('volume-'): + usage = re.search(pattern, fields[3], re.IGNORECASE) + used += float(usage.group(1)) + + free = total - used + + LOG.info('lvm usage: free:{}, used:{}, total:{}'.format(free, used, + total)) + except Exception: + LOG.info('Wrong format:{}'.format(output)) + free = total = 0 + + return free, total, unit diff --git a/automated-pytest-suite/keywords/common.py b/automated-pytest-suite/keywords/common.py new file mode 100644 index 0000000..8457463 --- /dev/null +++ b/automated-pytest-suite/keywords/common.py @@ -0,0 +1,787 @@ +# +# Copyright (c) 2019 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + + +############################################################# +# DO NOT import anything from helper modules to this module # +############################################################# + +import os +import re +import time +from contextlib import contextmanager +from datetime import datetime + +import pexpect +from pytest import skip + +from consts.auth import Tenant, TestFileServer, HostLinuxUser +from consts.stx import Prompt +from consts.proj_vars import ProjVar +from utils import exceptions +from utils.clients.ssh import ControllerClient, NATBoxClient, SSHClient, \ + get_cli_client +from utils.tis_log import LOG + + +def scp_from_test_server_to_user_file_dir(source_path, dest_dir, dest_name=None, + timeout=900, con_ssh=None, + central_region=False): + if con_ssh is None: + con_ssh = get_cli_client(central_region=central_region) + if dest_name is None: + dest_name = source_path.split(sep='/')[-1] + + if ProjVar.get_var('USER_FILE_DIR') == ProjVar.get_var('TEMP_DIR'): + LOG.info("Copy file from test server to localhost") + source_server = TestFileServer.SERVER + source_user = TestFileServer.USER + source_password = TestFileServer.PASSWORD + dest_path = dest_dir if not dest_name else os.path.join(dest_dir, + dest_name) + LOG.info('Check if file already exists on TiS') + if con_ssh.file_exists(file_path=dest_path): + LOG.info('dest path {} already exists. Return existing path'.format( + dest_path)) + return dest_path + + os.makedirs(dest_dir, exist_ok=True) + con_ssh.scp_on_dest(source_user=source_user, source_ip=source_server, + source_path=source_path, + dest_path=dest_path, source_pswd=source_password, + timeout=timeout) + return dest_path + else: + LOG.info("Copy file from test server to active controller") + return scp_from_test_server_to_active_controller( + source_path=source_path, dest_dir=dest_dir, + dest_name=dest_name, timeout=timeout, con_ssh=con_ssh) + + +def _scp_from_remote_to_active_controller(source_server, source_path, + dest_dir, dest_name=None, + source_user=None, + source_password=None, + timeout=900, con_ssh=None, + is_dir=False): + """ + SCP file or files under a directory from remote server to TiS server + + Args: + source_path (str): remote server file path or directory path + dest_dir (str): destination directory. should end with '/' + dest_name (str): destination file name if not dir + timeout (int): + con_ssh: + is_dir + + Returns (str|None): destination file/dir path if scp successful else None + + """ + if con_ssh is None: + con_ssh = ControllerClient.get_active_controller() + + if not source_user: + source_user = TestFileServer.USER + if not source_password: + source_password = TestFileServer.PASSWORD + + if dest_name is None and not is_dir: + dest_name = source_path.split(sep='/')[-1] + + dest_path = dest_dir if not dest_name else os.path.join(dest_dir, dest_name) + + LOG.info('Check if file already exists on TiS') + if not is_dir and con_ssh.file_exists(file_path=dest_path): + LOG.info('dest path {} already exists. Return existing path'.format( + dest_path)) + return dest_path + + LOG.info('Create destination directory on tis server if not already exists') + cmd = 'mkdir -p {}'.format(dest_dir) + con_ssh.exec_cmd(cmd, fail_ok=False) + + nat_name = ProjVar.get_var('NATBOX') + if nat_name: + nat_name = nat_name.get('name') + if nat_name and ProjVar.get_var('IS_VBOX'): + LOG.info('VBox detected, performing intermediate scp') + + nat_dest_path = '/tmp/{}'.format(dest_name) + nat_ssh = NATBoxClient.get_natbox_client() + + if not nat_ssh.file_exists(nat_dest_path): + LOG.info("scp file from {} to NatBox: {}".format(nat_name, + source_server)) + nat_ssh.scp_on_dest(source_user=source_user, + source_ip=source_server, + source_path=source_path, + dest_path=nat_dest_path, + source_pswd=source_password, timeout=timeout, + is_dir=is_dir) + + LOG.info( + 'scp file from natbox {} to active controller'.format(nat_name)) + dest_user = HostLinuxUser.get_user() + dest_pswd = HostLinuxUser.get_password() + dest_ip = ProjVar.get_var('LAB').get('floating ip') + nat_ssh.scp_on_source(source_path=nat_dest_path, dest_user=dest_user, + dest_ip=dest_ip, dest_path=dest_path, + dest_password=dest_pswd, timeout=timeout, + is_dir=is_dir) + + else: # if not a VBox lab, scp from remote server directly to TiS server + LOG.info("scp file(s) from {} to tis".format(source_server)) + con_ssh.scp_on_dest(source_user=source_user, source_ip=source_server, + source_path=source_path, + dest_path=dest_path, source_pswd=source_password, + timeout=timeout, is_dir=is_dir) + + return dest_path + + +def scp_from_test_server_to_active_controller(source_path, dest_dir, + dest_name=None, timeout=900, + con_ssh=None, + is_dir=False): + """ + SCP file or files under a directory from test server to TiS server + + Args: + source_path (str): test server file path or directory path + dest_dir (str): destination directory. should end with '/' + dest_name (str): destination file name if not dir + timeout (int): + con_ssh: + is_dir (bool) + + Returns (str|None): destination file/dir path if scp successful else None + + """ + skip('Shared Test File Server is not ready') + if con_ssh is None: + con_ssh = ControllerClient.get_active_controller() + + source_server = TestFileServer.SERVER + source_user = TestFileServer.USER + source_password = TestFileServer.PASSWORD + + return _scp_from_remote_to_active_controller( + source_server=source_server, + source_path=source_path, + dest_dir=dest_dir, + dest_name=dest_name, + source_user=source_user, + source_password=source_password, + timeout=timeout, + con_ssh=con_ssh, + is_dir=is_dir) + + +def scp_from_active_controller_to_test_server(source_path, dest_dir, + dest_name=None, timeout=900, + is_dir=False, + con_ssh=None): + """ + SCP file or files under a directory from test server to TiS server + + Args: + source_path (str): test server file path or directory path + dest_dir (str): destination directory. should end with '/' + dest_name (str): destination file name if not dir + timeout (int): + is_dir (bool): + con_ssh: + + Returns (str|None): destination file/dir path if scp successful else None + + """ + skip('Shared Test File Server is not ready') + if con_ssh is None: + con_ssh = ControllerClient.get_active_controller() + + dir_option = '-r ' if is_dir else '' + dest_server = TestFileServer.SERVER + dest_user = TestFileServer.USER + dest_password = TestFileServer.PASSWORD + + dest_path = dest_dir if not dest_name else os.path.join(dest_dir, dest_name) + + scp_cmd = 'scp -oStrictHostKeyChecking=no -o ' \ + 'UserKnownHostsFile=/dev/null ' \ + '{}{} {}@{}:{}'.\ + format(dir_option, source_path, dest_user, dest_server, dest_path) + + LOG.info("scp file(s) from tis server to test server") + con_ssh.send(scp_cmd) + index = con_ssh.expect( + [con_ssh.prompt, Prompt.PASSWORD_PROMPT, Prompt.ADD_HOST], + timeout=timeout) + if index == 2: + con_ssh.send('yes') + index = con_ssh.expect([con_ssh.prompt, Prompt.PASSWORD_PROMPT], + timeout=timeout) + if index == 1: + con_ssh.send(dest_password) + index = con_ssh.expect(timeout=timeout) + + assert index == 0, "Failed to scp files" + + exit_code = con_ssh.get_exit_code() + assert 0 == exit_code, "scp not fully succeeded" + + return dest_path + + +def scp_from_localhost_to_active_controller( + source_path, dest_path=None, + dest_user=None, + dest_password=None, + timeout=900, is_dir=False): + + active_cont_ip = ControllerClient.get_active_controller().host + if not dest_path: + dest_path = HostLinuxUser.get_home() + if not dest_user: + dest_user = HostLinuxUser.get_user() + if not dest_password: + dest_password = HostLinuxUser.get_password() + + return scp_from_local(source_path, active_cont_ip, dest_path=dest_path, + dest_user=dest_user, dest_password=dest_password, + timeout=timeout, is_dir=is_dir) + + +def scp_from_active_controller_to_localhost( + source_path, dest_path='', + src_user=None, + src_password=None, + timeout=900, is_dir=False): + + active_cont_ip = ControllerClient.get_active_controller().host + if not src_user: + src_user = HostLinuxUser.get_user() + if not src_password: + src_password = HostLinuxUser.get_password() + + return scp_to_local(source_path=source_path, source_ip=active_cont_ip, + source_user=src_user, source_password=src_password, + dest_path=dest_path, timeout=timeout, is_dir=is_dir) + + +def scp_from_local(source_path, dest_ip, dest_path, + dest_user, + dest_password, + timeout=900, is_dir=False): + """ + Scp file(s) from localhost (i.e., from where the automated tests are + executed). + + Args: + source_path (str): source file/directory path + dest_ip (str): ip of the destination host + dest_user (str): username of destination host. + dest_password (str): password of destination host + dest_path (str): destination directory path to copy the file(s) to + timeout (int): max time to wait for scp finish in seconds + is_dir (bool): whether to copy a single file or a directory + + """ + dir_option = '-r ' if is_dir else '' + + cmd = 'scp -oStrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null ' \ + '{}{} {}@{}:{}'. \ + format(dir_option, source_path, dest_user, dest_ip, dest_path) + + _scp_on_local(cmd, remote_password=dest_password, timeout=timeout) + + +def scp_to_local(source_path, source_ip, source_user, source_password, + dest_path, timeout=900, is_dir=False): + """ + Scp file(s) to localhost (i.e., to where the automated tests are executed). + + Args: + source_path (str): source file/directory path + source_ip (str): ip of the source host. + source_user (str): username of source host. + source_password (str): password of source host + dest_path (str): destination directory path to copy the file(s) to + timeout (int): max time to wait for scp finish in seconds + is_dir (bool): whether to copy a single file or a directory + + """ + dir_option = '-r ' if is_dir else '' + cmd = 'scp -oStrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null ' \ + '{}{}@{}:{} {}'.\ + format(dir_option, source_user, source_ip, source_path, dest_path) + + _scp_on_local(cmd, remote_password=source_password, timeout=timeout) + + +def _scp_on_local(cmd, remote_password, logdir=None, timeout=900): + LOG.debug('scp cmd: {}'.format(cmd)) + + logdir = logdir or ProjVar.get_var('LOG_DIR') + logfile = os.path.join(logdir, 'scp_files.log') + + with open(logfile, mode='a') as f: + local_child = pexpect.spawn(command=cmd, encoding='utf-8', logfile=f) + index = local_child.expect([pexpect.EOF, 'assword:', 'yes/no'], + timeout=timeout) + + if index == 2: + local_child.sendline('yes') + index = local_child.expect([pexpect.EOF, 'assword:'], + timeout=timeout) + + if index == 1: + local_child.sendline(remote_password) + local_child.expect(pexpect.EOF, timeout=timeout) + + +def get_tenant_name(auth_info=None): + """ + Get name of given tenant. If None is given, primary tenant name will be + returned. + + Args: + auth_info (dict|None): Tenant dict + + Returns: + str: name of the tenant + + """ + if auth_info is None: + auth_info = Tenant.get_primary() + return auth_info['tenant'] + + +class Count: + __vm_count = 0 + __flavor_count = 0 + __volume_count = 0 + __image_count = 0 + __server_group = 0 + __router = 0 + __subnet = 0 + __other = 0 + + @classmethod + def get_vm_count(cls): + cls.__vm_count += 1 + return cls.__vm_count + + @classmethod + def get_flavor_count(cls): + cls.__flavor_count += 1 + return cls.__flavor_count + + @classmethod + def get_volume_count(cls): + cls.__volume_count += 1 + return cls.__volume_count + + @classmethod + def get_image_count(cls): + cls.__image_count += 1 + return cls.__image_count + + @classmethod + def get_sever_group_count(cls): + cls.__server_group += 1 + return cls.__server_group + + @classmethod + def get_router_count(cls): + cls.__router += 1 + return cls.__router + + @classmethod + def get_subnet_count(cls): + cls.__subnet += 1 + return cls.__subnet + + @classmethod + def get_other_count(cls): + cls.__other += 1 + return cls.__other + + +class NameCount: + __names_count = { + 'vm': 0, + 'flavor': 0, + 'volume': 0, + 'image': 0, + 'server_group': 0, + 'subnet': 0, + 'heat_stack': 0, + 'qos': 0, + 'other': 0, + } + + @classmethod + def get_number(cls, resource_type='other'): + cls.__names_count[resource_type] += 1 + return cls.__names_count[resource_type] + + @classmethod + def get_valid_types(cls): + return list(cls.__names_count.keys()) + + +def get_unique_name(name_str, existing_names=None, resource_type='other'): + """ + Get a unique name string by appending a number to given name_str + + Args: + name_str (str): partial name string + existing_names (list): names to avoid + resource_type (str): type of resource. valid values: 'vm' + + Returns: + + """ + valid_types = NameCount.get_valid_types() + if resource_type not in valid_types: + raise ValueError( + "Invalid resource_type provided. Valid types: {}".format( + valid_types)) + + if existing_names: + if resource_type in ['image', 'volume', 'flavor']: + unique_name = name_str + else: + unique_name = "{}-{}".format(name_str, NameCount.get_number( + resource_type=resource_type)) + + for i in range(50): + if unique_name not in existing_names: + return unique_name + + unique_name = "{}-{}".format(name_str, NameCount.get_number( + resource_type=resource_type)) + else: + raise LookupError("Cannot find unique name.") + else: + unique_name = "{}-{}".format(name_str, NameCount.get_number( + resource_type=resource_type)) + + return unique_name + + +def parse_cpus_list(cpus): + """ + Convert human friendly pcup list to list of integers. + e.g., '5-7,41-43, 43, 45' >> [5, 6, 7, 41, 42, 43, 43, 45] + + Args: + cpus (str): + + Returns (list): list of integers + + """ + if isinstance(cpus, str): + if cpus.strip() == '': + return [] + + cpus = cpus.split(sep=',') + + cpus_list = list(cpus) + + for val in cpus: + # convert '3-6' to [3, 4, 5, 6] + if '-' in val: + cpus_list.remove(val) + min_, max_ = val.split(sep='-') + + # unpinned:20; pinned_cpulist:-, unpinned_cpulist:10-19,30-39 + if min_ != '': + cpus_list += list(range(int(min_), int(max_) + 1)) + + return sorted([int(val) for val in cpus_list]) + + +def get_timedelta_for_isotimes(time1, time2): + """ + + Args: + time1 (str): such as "2016-08-16T12:59:45.440697+00:00" + time2 (str): + + Returns () + + """ + + def _parse_time(time_): + time_ = time_.strip().split(sep='.')[0].split(sep='+')[0] + if 'T' in time_: + pattern = "%Y-%m-%dT%H:%M:%S" + elif ' ' in time_: + pattern = "%Y-%m-%d %H:%M:%S" + else: + raise ValueError("Unknown format for time1: {}".format(time_)) + time_datetime = datetime.strptime(time_, pattern) + return time_datetime + + time1_datetime = _parse_time(time_=time1) + time2_datetime = _parse_time(time_=time2) + + return time2_datetime - time1_datetime + + +def _execute_with_openstack_cli(): + """ + DO NOT USE THIS IN TEST FUNCTIONS! + """ + return ProjVar.get_var('OPENSTACK_CLI') + + +def get_date_in_format(ssh_client=None, date_format="%Y%m%d %T"): + """ + Get date in given format. + Args: + ssh_client (SSHClient): + date_format (str): Please see date --help for valid format strings + + Returns (str): date output in given format + + """ + if ssh_client is None: + ssh_client = ControllerClient.get_active_controller() + return ssh_client.exec_cmd("date +'{}'".format(date_format), fail_ok=False)[ + 1] + + +def write_to_file(file_path, content, mode='a'): + """ + Write content to specified local file + Args: + file_path (str): file path on localhost + content (str): content to write to file + mode (str): file operation mode. Default is 'a' (append to end of file). + + Returns: None + + """ + time_stamp = time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime()) + with open(file_path, mode=mode) as f: + f.write( + '\n-----------------[{}]-----------------\n{}\n'.format(time_stamp, + content)) + + +def collect_software_logs(con_ssh=None): + if not con_ssh: + con_ssh = ControllerClient.get_active_controller() + LOG.info("Collecting all hosts logs...") + con_ssh.exec_cmd('source /etc/platform/openrc', get_exit_code=False) + con_ssh.send('collect all') + + expect_list = ['.*password for sysadmin:', 'collecting data.', + con_ssh.prompt] + index_1 = con_ssh.expect(expect_list, timeout=20) + if index_1 == 2: + LOG.error( + "Something is wrong with collect all. Check ssh console log for " + "detail.") + return + elif index_1 == 0: + con_ssh.send(con_ssh.password) + con_ssh.expect('collecting data') + + index_2 = con_ssh.expect(['/scratch/ALL_NODES.*', con_ssh.prompt], + timeout=1200) + if index_2 == 0: + output = con_ssh.cmd_output + con_ssh.expect() + logpath = re.findall('.*(/scratch/ALL_NODES_.*.tar).*', output)[0] + LOG.info( + "\n################### TiS server log path: {}".format(logpath)) + else: + LOG.error("Collecting logs failed. No ALL_NODES logs found.") + return + + dest_path = ProjVar.get_var('LOG_DIR') + try: + LOG.info("Copying log file from active controller to local {}".format( + dest_path)) + scp_from_active_controller_to_localhost( + source_path=logpath, dest_path=dest_path, timeout=300) + LOG.info("{} is successfully copied to local directory: {}".format( + logpath, dest_path)) + except Exception as e: + LOG.warning("Failed to copy log file to localhost.") + LOG.error(e, exc_info=True) + + +def parse_args(args_dict, repeat_arg=False, vals_sep=' '): + """ + parse args dictionary and convert it to string + Args: + args_dict (dict): key/value pairs + repeat_arg: if value is tuple, list, dict, should the arg be repeated. + e.g., True for --nic in nova boot. False for -m in gnocchi + measures aggregation + vals_sep (str): separator to join multiple vals. Only applicable when + repeat_arg=False. + + Returns (str): + + """ + + def convert_val_dict(key__, vals_dict, repeat_key): + vals_ = [] + for k, v in vals_dict.items(): + if ' ' in v: + v = '"{}"'.format(v) + vals_.append('{}={}'.format(k, v)) + if repeat_key: + args_str = ' ' + ' '.join( + ['{} {}'.format(key__, v_) for v_ in vals_]) + else: + args_str = ' {} {}'.format(key__, vals_sep.join(vals_)) + return args_str + + args = '' + for key, val in args_dict.items(): + if val is None: + continue + + key = key if key.startswith('-') else '--{}'.format(key) + if isinstance(val, str): + if ' ' in val: + val = '"{}"'.format(val) + args += ' {}={}'.format(key, val) + elif isinstance(val, bool): + if val: + args += ' {}'.format(key) + elif isinstance(val, (int, float)): + args += ' {}={}'.format(key, val) + elif isinstance(val, dict): + args += convert_val_dict(key__=key, vals_dict=val, + repeat_key=repeat_arg) + elif isinstance(val, (list, tuple)): + if repeat_arg: + for val_ in val: + if isinstance(val_, dict): + args += convert_val_dict(key__=key, vals_dict=val_, + repeat_key=False) + else: + args += ' {}={}'.format(key, val_) + else: + args += ' {}={}'.format(key, vals_sep.join(val)) + else: + raise ValueError( + "Unrecognized value type. Key: {}; value: {}".format(key, val)) + + return args.strip() + + +def get_symlink(ssh_client, file_path): + code, output = ssh_client.exec_cmd( + 'ls -l {} | grep --color=never ""'.format(file_path)) + if code != 0: + LOG.warning('{} not found!'.format(file_path)) + return None + + res = re.findall('> (.*)', output) + if not res: + LOG.warning('No symlink found for {}'.format(file_path)) + return None + + link = res[0].strip() + return link + + +def is_file(filename, ssh_client): + code = ssh_client.exec_cmd('test -f {}'.format(filename), fail_ok=True)[0] + return 0 == code + + +def is_directory(dirname, ssh_client): + code = ssh_client.exec_cmd('test -d {}'.format(dirname), fail_ok=True)[0] + return 0 == code + + +def lab_time_now(con_ssh=None, date_format='%Y-%m-%dT%H:%M:%S'): + if not con_ssh: + con_ssh = ControllerClient.get_active_controller() + + date_cmd_format = date_format + '.%N' + timestamp = get_date_in_format(ssh_client=con_ssh, + date_format=date_cmd_format) + with_milliseconds = timestamp.split('.')[0] + '.{}'.format( + int(int(timestamp.split('.')[1]) / 1000)) + format1 = date_format + '.%f' + parsed = datetime.strptime(with_milliseconds, format1) + + return with_milliseconds.split('.')[0], parsed + + +@contextmanager +def ssh_to_remote_node(host, username=None, password=None, prompt=None, + ssh_client=None, use_telnet=False, + telnet_session=None): + """ + ssh to a external node from sshclient. + + Args: + host (str|None): hostname or ip address of remote node to ssh to. + username (str): + password (str): + prompt (str): + ssh_client (SSHClient): client to ssh from + use_telnet: + telnet_session: + + Returns (SSHClient): ssh client of the host + + Examples: with ssh_to_remote_node('128.224.150.92) as remote_ssh: + remote_ssh.exec_cmd(cmd) +\ """ + + if not host: + raise exceptions.SSHException( + "Remote node hostname or ip address must be provided") + + if use_telnet and not telnet_session: + raise exceptions.SSHException( + "Telnet session cannot be none if using telnet.") + + if not ssh_client and not use_telnet: + ssh_client = ControllerClient.get_active_controller() + + if not use_telnet: + from keywords.security_helper import LinuxUser + default_user, default_password = LinuxUser.get_current_user_password() + else: + default_user = HostLinuxUser.get_user() + default_password = HostLinuxUser.get_password() + + user = username if username else default_user + password = password if password else default_password + if use_telnet: + original_host = telnet_session.exec_cmd('hostname')[1] + else: + original_host = ssh_client.host + + if not prompt: + prompt = '.*' + host + r'\:~\$' + + remote_ssh = SSHClient(host, user=user, password=password, + initial_prompt=prompt) + remote_ssh.connect() + current_host = remote_ssh.host + if not current_host == host: + raise exceptions.SSHException( + "Current host is {} instead of {}".format(current_host, host)) + try: + yield remote_ssh + finally: + if current_host != original_host: + remote_ssh.close() diff --git a/automated-pytest-suite/keywords/container_helper.py b/automated-pytest-suite/keywords/container_helper.py new file mode 100644 index 0000000..85ca581 --- /dev/null +++ b/automated-pytest-suite/keywords/container_helper.py @@ -0,0 +1,853 @@ +# +# Copyright (c) 2019 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + + +""" +Container/Application related helper functions for non-kubectl commands. +For example: +- docker commands +- system application-xxx commands +- helm commands + +""" + +import os +import time +import yaml + +from utils import cli, exceptions, table_parser +from utils.tis_log import LOG +from utils.clients.ssh import ControllerClient +from consts.auth import Tenant +from consts.proj_vars import ProjVar +from consts.stx import AppStatus, Prompt, EventLogID, Container +from consts.filepaths import StxPath +from keywords import system_helper, host_helper + + +def exec_helm_upload_cmd(tarball, repo=None, timeout=120, con_ssh=None, + fail_ok=False): + if not con_ssh: + con_ssh = ControllerClient.get_active_controller() + + if not repo: + repo = 'starlingx' + cmd = 'helm-upload {} {}'.format(repo, tarball) + con_ssh.send(cmd) + pw_prompt = Prompt.PASSWORD_PROMPT + prompts = [con_ssh.prompt, pw_prompt] + + index = con_ssh.expect(prompts, timeout=timeout, searchwindowsize=100, + fail_ok=fail_ok) + if index == 1: + con_ssh.send(con_ssh.password) + prompts.remove(pw_prompt) + con_ssh.expect(prompts, timeout=timeout, searchwindowsize=100, + fail_ok=fail_ok) + + code, output = con_ssh._process_exec_result(rm_date=True, + get_exit_code=True) + if code != 0 and not fail_ok: + raise exceptions.SSHExecCommandFailed( + "Non-zero return code for cmd: {}. Output: {}". + format(cmd, output)) + + return code, output + + +def exec_docker_cmd(sub_cmd, args, timeout=120, con_ssh=None, fail_ok=False): + if not con_ssh: + con_ssh = ControllerClient.get_active_controller() + + cmd = 'docker {} {}'.format(sub_cmd, args) + code, output = con_ssh.exec_sudo_cmd(cmd, expect_timeout=timeout, + fail_ok=fail_ok) + + return code, output + + +def upload_helm_charts(tar_file, repo=None, delete_first=False, con_ssh=None, + timeout=120, fail_ok=False): + """ + Upload helm charts via helm-upload cmd + Args: + tar_file: + repo + delete_first: + con_ssh: + timeout: + fail_ok: + + Returns (tuple): + (0, ) + (1, ) + (2, ) + + """ + if not con_ssh: + con_ssh = ControllerClient.get_active_controller() + + helm_dir = os.path.normpath(StxPath.HELM_CHARTS_DIR) + if not repo: + repo = 'starlingx' + file_path = os.path.join(helm_dir, repo, os.path.basename(tar_file)) + current_host = con_ssh.get_hostname() + controllers = [current_host] + if not system_helper.is_aio_simplex(con_ssh=con_ssh): + con_name = 'controller-1' if controllers[ + 0] == 'controller-0' else \ + 'controller-0' + controllers.append(con_name) + + if delete_first: + for host in controllers: + with host_helper.ssh_to_host(hostname=host, + con_ssh=con_ssh) as host_ssh: + if host_ssh.file_exists(file_path): + host_ssh.exec_sudo_cmd('rm -f {}'.format(file_path)) + + code, output = exec_helm_upload_cmd(tarball=tar_file, repo=repo, + timeout=timeout, con_ssh=con_ssh, + fail_ok=fail_ok) + if code != 0: + return 1, output + + file_exist = con_ssh.file_exists(file_path) + if not file_exist: + raise exceptions.ContainerError( + "{} not found on {} after helm-upload".format(file_path, + current_host)) + + LOG.info("Helm charts {} uploaded successfully".format(file_path)) + return 0, file_path + + +def upload_app(tar_file, app_name=None, app_version=None, check_first=True, + fail_ok=False, uploaded_timeout=300, + con_ssh=None, auth_info=Tenant.get('admin_platform')): + """ + Upload an application via 'system application-upload' + Args: + app_name: + app_version: + tar_file: + check_first + fail_ok: + uploaded_timeout: + con_ssh: + auth_info: + + Returns: + + """ + if check_first and get_apps(application=app_name, con_ssh=con_ssh, + auth_info=auth_info): + msg = '{} already exists. Do nothing.'.format(app_name) + LOG.info(msg) + return -1, msg + + args = '' + if app_name: + args += '-n {} '.format(app_name) + if app_version: + args += '-v {} '.format(app_version) + args = '{}{}'.format(args, tar_file) + code, output = cli.system('application-upload', args, ssh_client=con_ssh, + fail_ok=fail_ok, auth_info=auth_info) + + if code > 0: + return 1, output + + res = wait_for_apps_status(apps=app_name, status=AppStatus.UPLOADED, + timeout=uploaded_timeout, + con_ssh=con_ssh, auth_info=auth_info, + fail_ok=fail_ok)[0] + if not res: + return 2, "{} failed to upload".format(app_name) + + msg = '{} uploaded successfully'.format(app_name) + LOG.info(msg) + return 0, msg + + +def get_apps(field='status', application=None, con_ssh=None, + auth_info=Tenant.get('admin_platform'), + rtn_dict=False, **kwargs): + """ + Get applications values for give apps and fields via system application-list + Args: + application (str|list|tuple): + field (str|list|tuple): + con_ssh: + auth_info: + rtn_dict: + **kwargs: extra filters other than application + + Returns (list|dict): + list of list, or + dict with app name(str) as key and values(list) for given fields for + each app as value + + """ + table_ = table_parser.table( + cli.system('application-list', ssh_client=con_ssh, auth_info=auth_info)[ + 1]) + if application: + kwargs['application'] = application + + return table_parser.get_multi_values(table_, fields=field, + rtn_dict=rtn_dict, zip_values=True, + **kwargs) + + +def get_app_values(app_name, fields, con_ssh=None, + auth_info=Tenant.get('admin_platform')): + """ + Get values from system application-show + Args: + app_name: + fields (str|list|tuple): + con_ssh: + auth_info: + + Returns: + + """ + if isinstance(fields, str): + fields = [fields] + + table_ = table_parser.table( + cli.system('application-show', app_name, ssh_client=con_ssh, + auth_info=auth_info)[1], + combine_multiline_entry=True) + values = table_parser.get_multi_values_two_col_table(table_, fields=fields) + return values + + +def wait_for_apps_status(apps, status, timeout=360, check_interval=5, + fail_ok=False, con_ssh=None, + auth_info=Tenant.get('admin_platform')): + """ + Wait for applications to reach expected status via system application-list + Args: + apps: + status: + timeout: + check_interval: + fail_ok: + con_ssh: + auth_info: + + Returns (tuple): + + """ + status = '' if not status else status + if isinstance(apps, str): + apps = [apps] + apps_to_check = list(apps) + check_failed = [] + end_time = time.time() + timeout + + LOG.info( + "Wait for {} application(s) to reach status: {}".format(apps, status)) + while time.time() < end_time: + apps_status = get_apps(application=apps_to_check, + field=('application', 'status'), con_ssh=con_ssh, + auth_info=auth_info) + apps_status = {item[0]: item[1] for item in apps_status if item} + + checked = [] + for app in apps_to_check: + current_app_status = apps_status.get(app, '') + if current_app_status == status: + checked.append(app) + elif current_app_status.endswith('ed'): + check_failed.append(app) + checked.append(app) + + apps_to_check = list(set(apps_to_check) - set(checked)) + if not apps_to_check: + if check_failed: + msg = '{} failed to reach status - {}'.format(check_failed, + status) + LOG.warning(msg) + if fail_ok: + return False, check_failed + else: + raise exceptions.ContainerError(msg) + + LOG.info("{} reached expected status {}".format(apps, status)) + return True, None + + time.sleep(check_interval) + + check_failed += apps_to_check + msg = '{} did not reach status {} within {}s'.format(check_failed, status, + timeout) + LOG.warning(msg) + if fail_ok: + return False, check_failed + raise exceptions.ContainerError(msg) + + +def apply_app(app_name, check_first=False, fail_ok=False, applied_timeout=300, + check_interval=10, + wait_for_alarm_gone=True, con_ssh=None, + auth_info=Tenant.get('admin_platform')): + """ + Apply/Re-apply application via system application-apply. Check for status + reaches 'applied'. + Args: + app_name (str): + check_first: + fail_ok: + applied_timeout: + check_interval: + con_ssh: + wait_for_alarm_gone (bool): + auth_info: + + Returns (tuple): + (-1, " is already applied. Do nothing.") # only returns + if check_first=True. + (0, " (re)applied successfully") + (1, ) # cli rejected + (2, " failed to apply") # did not reach applied status + after apply. + + """ + if check_first: + app_status = get_apps(application=app_name, field='status', + con_ssh=con_ssh, auth_info=auth_info) + if app_status and app_status[0] == AppStatus.APPLIED: + msg = '{} is already applied. Do nothing.'.format(app_name) + LOG.info(msg) + return -1, msg + + LOG.info("Apply application: {}".format(app_name)) + code, output = cli.system('application-apply', app_name, ssh_client=con_ssh, + fail_ok=fail_ok, auth_info=auth_info) + if code > 0: + return 1, output + + res = wait_for_apps_status(apps=app_name, status=AppStatus.APPLIED, + timeout=applied_timeout, + check_interval=check_interval, con_ssh=con_ssh, + auth_info=auth_info, fail_ok=fail_ok)[0] + if not res: + return 2, "{} failed to apply".format(app_name) + + if wait_for_alarm_gone: + alarm_id = EventLogID.CONFIG_OUT_OF_DATE + if system_helper.wait_for_alarm(alarm_id=alarm_id, + entity_id='controller', + timeout=15, fail_ok=True, + auth_info=auth_info, + con_ssh=con_ssh)[0]: + system_helper.wait_for_alarm_gone(alarm_id=alarm_id, + entity_id='controller', + timeout=120, + check_interval=10, + con_ssh=con_ssh, + auth_info=auth_info) + + msg = '{} (re)applied successfully'.format(app_name) + LOG.info(msg) + return 0, msg + + +def delete_app(app_name, check_first=True, fail_ok=False, applied_timeout=300, + con_ssh=None, + auth_info=Tenant.get('admin_platform')): + """ + Delete an application via system application-delete. Verify application + no longer listed. + Args: + app_name: + check_first: + fail_ok: + applied_timeout: + con_ssh: + auth_info: + + Returns (tuple): + (-1, " does not exist. Do nothing.") + (0, " deleted successfully") + (1, ) + (2, " failed to delete") + + """ + + if check_first: + app_vals = get_apps(application=app_name, field='status', + con_ssh=con_ssh, auth_info=auth_info) + if not app_vals: + msg = '{} does not exist. Do nothing.'.format(app_name) + LOG.info(msg) + return -1, msg + + code, output = cli.system('application-delete', app_name, + ssh_client=con_ssh, fail_ok=fail_ok, + auth_info=auth_info) + if code > 0: + return 1, output + + res = wait_for_apps_status(apps=app_name, status=None, + timeout=applied_timeout, + con_ssh=con_ssh, auth_info=auth_info, + fail_ok=fail_ok)[ + 0] + if not res: + return 2, "{} failed to delete".format(app_name) + + msg = '{} deleted successfully'.format(app_name) + LOG.info(msg) + return 0, msg + + +def remove_app(app_name, check_first=True, fail_ok=False, applied_timeout=300, + con_ssh=None, + auth_info=Tenant.get('admin_platform')): + """ + Remove applied application via system application-remove. Verify it is in + 'uploaded' status. + Args: + app_name (str): + check_first: + fail_ok: + applied_timeout: + con_ssh: + auth_info: + + Returns (tuple): + (-1, " is not applied. Do nothing.") + (0, " removed successfully") + (1, ) + (2, " failed to remove") # Did not reach uploaded status + + """ + + if check_first: + app_vals = get_apps(application=app_name, field='status', + con_ssh=con_ssh, auth_info=auth_info) + if not app_vals or app_vals[0] in (AppStatus.UPLOADED, + AppStatus.UPLOAD_FAILED): + msg = '{} is not applied. Do nothing.'.format(app_name) + LOG.info(msg) + return -1, msg + + code, output = cli.system('application-remove', app_name, + ssh_client=con_ssh, fail_ok=fail_ok, + auth_info=auth_info) + if code > 0: + return 1, output + + res = wait_for_apps_status(apps=app_name, status=AppStatus.UPLOADED, + timeout=applied_timeout, + con_ssh=con_ssh, auth_info=auth_info, + fail_ok=fail_ok)[0] + if not res: + return 2, "{} failed to remove".format(app_name) + + msg = '{} removed successfully'.format(app_name) + LOG.info(msg) + return 0, msg + + +def get_docker_reg_addr(con_ssh=None): + """ + Get local docker registry ip address in docker conf file. + Args: + con_ssh: + + Returns (str): + + """ + if not con_ssh: + con_ssh = ControllerClient.get_active_controller() + + output = con_ssh.exec_cmd( + 'grep --color=never "addr: " {}'.format(StxPath.DOCKER_CONF), + fail_ok=False)[1] + reg_addr = output.split('addr: ')[1].strip() + return reg_addr + + +def pull_docker_image(name, tag=None, digest=None, con_ssh=None, timeout=300, + fail_ok=False): + """ + Pull docker image via docker image pull. Verify image is listed in docker + image list. + Args: + name: + tag: + digest: + con_ssh: + timeout: + fail_ok: + + Returns (tuple): + (0, ) + (1, ) + + """ + + args = '{}'.format(name.strip()) + if tag: + args += ':{}'.format(tag) + elif digest: + args += '@{}'.format(digest) + + LOG.info("Pull docker image {}".format(args)) + code, out = exec_docker_cmd('image pull', args, timeout=timeout, + fail_ok=fail_ok, con_ssh=con_ssh) + if code != 0: + return 1, out + + image_id = get_docker_images(repo=name, tag=tag, field='IMAGE ID', + con_ssh=con_ssh, fail_ok=False)[0] + LOG.info( + 'docker image {} successfully pulled. ID: {}'.format(args, image_id)) + + return 0, image_id + + +def login_to_docker(registry=None, user=None, password=None, con_ssh=None, + fail_ok=False): + """ + Login to docker registry + Args: + registry (str|None): default docker registry will be used when None + user (str|None): admin user will be used when None + password (str|None): admin password will be used when None + con_ssh (SSHClient|None): + fail_ok (bool): + + Returns (tuple): + (0, (str)) # login succeeded + (1, (str)) # login failed + + """ + if not user: + user = 'admin' + if not password: + password = Tenant.get('admin_platform').get('password') + if not registry: + registry = Container.LOCAL_DOCKER_REG + + args = '-u {} -p {} {}'.format(user, password, registry) + LOG.info("Login to docker registry {}".format(registry)) + code, out = exec_docker_cmd('login', args, timeout=60, fail_ok=fail_ok, + con_ssh=con_ssh) + if code != 0: + return 1, out + + LOG.info('Logged into docker registry successfully: {}'.format(registry)) + return 0, args + + +def push_docker_image(name, tag=None, login_registry=None, con_ssh=None, + timeout=300, fail_ok=False): + """ + Push docker image via docker image push. + Args: + name: + tag: + login_registry (str|None): when set, login to given docker registry + before push + con_ssh: + timeout: + fail_ok: + + Returns (tuple): + (0, ) + (1, ) + + """ + args = '{}'.format(name.strip()) + if tag: + args += ':{}'.format(tag) + + if login_registry: + login_to_docker(registry=login_registry, con_ssh=con_ssh) + + LOG.info("Push docker image: {}".format(args)) + code, out = exec_docker_cmd('image push', args, timeout=timeout, + fail_ok=fail_ok, con_ssh=con_ssh) + if code != 0: + return 1, out + + LOG.info('docker image {} successfully pushed.'.format(args)) + return 0, args + + +def tag_docker_image(source_image, target_name, source_tag=None, + target_tag=None, con_ssh=None, timeout=300, + fail_ok=False): + """ + Tag docker image via docker image tag. Verify image is tagged via docker + image list. + Args: + source_image: + target_name: + source_tag: + target_tag: + con_ssh: + timeout: + fail_ok: + + Returns: + (0, ) + (1, ) + + """ + source_args = source_image.strip() + if source_tag: + source_args += ':{}'.format(source_tag) + + target_args = target_name.strip() + if target_tag: + target_args += ':{}'.format(target_tag) + + LOG.info("Tag docker image {} as {}".format(source_args, target_args)) + args = '{} {}'.format(source_args, target_args) + code, out = exec_docker_cmd('image tag', args, timeout=timeout, + fail_ok=fail_ok, con_ssh=con_ssh) + if code != 0: + return 1, out + + if not get_docker_images(repo=target_name, tag=target_tag, con_ssh=con_ssh, + fail_ok=False): + raise exceptions.ContainerError( + "Docker image {} is not listed after tagging {}".format( + target_name, source_image)) + + LOG.info('docker image {} successfully tagged as {}.'.format(source_args, + target_args)) + return 0, target_args + + +def remove_docker_images(images, force=False, con_ssh=None, timeout=300, + fail_ok=False): + """ + Remove docker image(s) via docker image rm + Args: + images (str|tuple|list): + force (bool): + con_ssh: + timeout: + fail_ok: + + Returns (tuple): + (0, ) + (1, ) + + """ + if isinstance(images, str): + images = (images,) + + LOG.info("Remove docker images: {}".format(images)) + args = ' '.join(images) + if force: + args = '--force {}'.format(args) + + code, out = exec_docker_cmd('image rm', args, timeout=timeout, + fail_ok=fail_ok, con_ssh=con_ssh) + return code, out + + +def get_docker_images(repo=None, tag=None, field='IMAGE ID', con_ssh=None, + fail_ok=False): + """ + get values for given docker image via 'docker image ls ' + Args: + repo (str): + tag (str|None): + field (str|tuple|list): + con_ssh: + fail_ok + + Returns (list|None): return None if no docker images returned at all due + to cmd failure + + """ + args = None + if repo: + args = repo + if tag: + args += ':{}'.format(tag) + code, output = exec_docker_cmd(sub_cmd='image ls', args=args, + fail_ok=fail_ok, con_ssh=con_ssh) + if code != 0: + return None + + table_ = table_parser.table_kube(output) + if not table_['values']: + if fail_ok: + return None + else: + raise exceptions.ContainerError( + "docker image {} does not exist".format(args)) + + values = table_parser.get_multi_values(table_, fields=field, + zip_values=True) + + return values + + +def get_helm_overrides(field='overrides namespaces', app_name='stx-openstack', + charts=None, + auth_info=Tenant.get('admin_platform'), con_ssh=None): + """ + Get helm overrides values via system helm-override-list + Args: + field (str): + app_name + charts (None|str|list|tuple): + auth_info: + con_ssh: + + Returns (list): + + """ + table_ = table_parser.table( + cli.system('helm-override-list', app_name, ssh_client=con_ssh, + auth_info=auth_info)[1]) + + if charts: + table_ = table_parser.filter_table(table_, **{'chart name': charts}) + + vals = table_parser.get_multi_values(table_, fields=field, evaluate=True) + + return vals + + +def get_helm_override_values(chart, namespace, app_name='stx-openstack', + fields=('combined_overrides',), + auth_info=Tenant.get('admin_platform'), + con_ssh=None): + """ + Get helm-override values for given chart via system helm-override-show + Args: + chart (str): + namespace (str): + app_name (str) + fields (str|tuple|list): + auth_info: + con_ssh: + + Returns (list): list of parsed yaml formatted output. e.g., list of dict, + list of list, list of str + + """ + args = '{} {} {}'.format(app_name, chart, namespace) + table_ = table_parser.table( + cli.system('helm-override-show', args, ssh_client=con_ssh, + auth_info=auth_info)[1], + rstrip_value=True) + + if isinstance(fields, str): + fields = (fields,) + + values = [] + for field in fields: + value = table_parser.get_value_two_col_table(table_, field=field, + merge_lines=False) + values.append(yaml.load('\n'.join(value))) + + return values + + +def __convert_kv(k, v): + if '.' not in k: + return {k: v} + new_key, new_val = k.rsplit('.', maxsplit=1) + return __convert_kv(new_key, {new_val: v}) + + +def update_helm_override(chart, namespace, app_name='stx-openstack', + yaml_file=None, kv_pairs=None, + reset_vals=False, reuse_vals=False, + auth_info=Tenant.get('admin_platform'), + con_ssh=None, fail_ok=False): + """ + Update helm_override values for given chart + Args: + chart: + namespace: + app_name + yaml_file: + kv_pairs: + reset_vals: + reuse_vals: + fail_ok + con_ssh + auth_info + + Returns (tuple): + (0, (str|list|dict)) # cmd accepted. + (1, ) # system helm-override-update cmd rejected + + """ + args = '{} {} {}'.format(app_name, chart, namespace) + if reset_vals: + args = '--reset-values {}'.format(args) + if reuse_vals: + args = '--reuse-values {}'.format(args) + if yaml_file: + args = '--values {} {}'.format(yaml_file, args) + if kv_pairs: + cmd_overrides = ','.join( + ['{}={}'.format(k, v) for k, v in kv_pairs.items()]) + args = '--set {} {}'.format(cmd_overrides, args) + + code, output = cli.system('helm-override-update', args, ssh_client=con_ssh, + fail_ok=fail_ok, auth_info=auth_info) + if code != 0: + return 1, output + + table_ = table_parser.table(output, rstrip_value=True) + overrides = table_parser.get_value_two_col_table(table_, 'user_overrides') + overrides = yaml.load('\n'.join(overrides)) + # yaml.load converts str to bool, int, float; but does not convert + # None type. Updates are not verified here since it is rather complicated + # to verify properly. + LOG.info("Helm-override updated : {}".format(overrides)) + + return 0, overrides + + +def is_stx_openstack_deployed(applied_only=False, con_ssh=None, + auth_info=Tenant.get('admin_platform'), + force_check=False): + """ + Whether stx-openstack application is deployed. + Args: + applied_only (bool): if True, then only return True when application + is in applied state + con_ssh: + auth_info: + force_check: + + Returns (bool): + + """ + openstack_deployed = ProjVar.get_var('OPENSTACK_DEPLOYED') + if not applied_only and not force_check and openstack_deployed is not None: + return openstack_deployed + + openstack_status = get_apps(application='stx-openstack', field='status', + con_ssh=con_ssh, auth_info=auth_info) + + LOG.info("{}".format(openstack_status)) + + res = False + if openstack_status and 'appl' in openstack_status[0].lower(): + res = True + if applied_only and openstack_status[0] != AppStatus.APPLIED: + res = False + + return res diff --git a/automated-pytest-suite/keywords/glance_helper.py b/automated-pytest-suite/keywords/glance_helper.py new file mode 100644 index 0000000..ed2478a --- /dev/null +++ b/automated-pytest-suite/keywords/glance_helper.py @@ -0,0 +1,1146 @@ +# +# Copyright (c) 2019 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + + +import os +import re +import time +import json + +from pytest import skip + +from consts.auth import Tenant, HostLinuxUser +from consts.stx import GuestImages, ImageMetadata +from consts.proj_vars import ProjVar +from consts.timeout import ImageTimeout +from keywords import common, system_helper, host_helper +from testfixtures.fixture_resources import ResourceCleanup +from utils import table_parser, cli, exceptions +from utils.clients.ssh import ControllerClient, get_cli_client +from utils.tis_log import LOG + + +def get_images(long=False, images=None, field='id', + auth_info=Tenant.get('admin'), con_ssh=None, strict=True, + exclude=False, **kwargs): + """ + Get a list of image id(s) that matches the criteria + Args: + long (bool) + images (str|list): ids of images to filter from + field(str|list|tuple): id or name + auth_info (dict): + con_ssh (SSHClient): + strict (bool): match full string or substring for the value(s) given + in kwargs. + This is only applicable if kwargs key-val pair(s) are provided. + exclude (bool): whether to exclude item containing the string/pattern + in kwargs. + e.g., search for images that don't contain 'raw' + **kwargs: header-value pair(s) to filter out images from given image + list. e.g., Status='active', Name='centos' + + Returns (list): list of image ids + + """ + args = '--long' if long else '' + table_ = table_parser.table( + cli.openstack('image list', args, ssh_client=con_ssh, + auth_info=auth_info)[1]) + if images: + table_ = table_parser.filter_table(table_, ID=images) + + return table_parser.get_multi_values(table_, field, strict=strict, + exclude=exclude, **kwargs) + + +def get_image_id_from_name(name=None, strict=False, fail_ok=True, con_ssh=None, + auth_info=None): + """ + + Args: + name (list or str): + strict: + fail_ok (bool): whether to raise exception if no image found with + provided name + con_ssh: + auth_info (dict: + + Returns: + Return a random image_id that match the name. else return an empty + string + + """ + if name is None: + name = GuestImages.DEFAULT['guest'] + + matching_images = get_images(name=name, auth_info=auth_info, + con_ssh=con_ssh, strict=strict) + if not matching_images: + image_id = '' + msg = "No existing image found with name: {}".format(name) + LOG.warning(msg) + if not fail_ok: + raise exceptions.CommonError(msg) + else: + image_id = matching_images[0] + if len(matching_images) > 1: + LOG.warning('More than one glace image found with name {}. ' + 'Select {}.'.format(name, image_id)) + + return image_id + + +def get_avail_image_space(con_ssh, path='/opt/cgcs'): + """ + Get available disk space in GiB on given path which is where glance + images are saved at + Args: + con_ssh: + path (str) + + Returns (float): e.g., 9.2 + + """ + size = con_ssh.exec_cmd("df {} | awk '{{print $4}}'".format(path), + fail_ok=False)[1] + size = float(size.splitlines()[-1].strip()) / (1024 * 1024) + return size + + +def is_image_storage_sufficient(img_file_path=None, guest_os=None, + min_diff=0.05, con_ssh=None, + image_host_ssh=None): + """ + Check if glance image storage disk is sufficient to create new glance + image from specified image + Args: + img_file_path (str): e.g., /home/sysadmin/images/tis-centos-guest.img + guest_os (str): used if img_file_path is not provided. e,g., + ubuntu_14, ge_edge, cgcs-guest, etc + min_diff: minimum difference required between available space and + specifiec size. e.g., 0.1G + con_ssh (SSHClient): tis active controller ssh client + image_host_ssh (SSHClient): such as test server ssh where image file + was stored + + Returns (bool): + + """ + if image_host_ssh is None: + image_host_ssh = get_cli_client(central_region=True) + file_size = get_image_size(img_file_path=img_file_path, guest_os=guest_os, + ssh_client=image_host_ssh) + + if con_ssh is None: + name = 'RegionOne' if ProjVar.get_var('IS_DC') else None + con_ssh = ControllerClient.get_active_controller(name=name) + if 0 == con_ssh.exec_cmd('ceph df')[0]: + # assume image storage for ceph is sufficient + return True, file_size, None + + avail_size = get_avail_image_space(con_ssh=con_ssh) + + return avail_size - file_size >= min_diff, file_size, avail_size + + +def get_image_file_info(img_file_path=None, guest_os=None, ssh_client=None): + """ + Get image file info as dictionary + Args: + img_file_path (str): e.g., /home/sysadmin/images/tis-centos-guest.img + guest_os (str): has to be specified if img_file_path is unspecified. + e.g., 'tis-centos-guest' + ssh_client (SSHClient): e.g., test server ssh + + Returns (dict): image info dict. + Examples: + { + "virtual-size": 688914432, + "filename": "images/cgcs-guest.img", + "format": "raw", + "actual-size": 688918528, + "dirty-flag": false + } + + """ + if not img_file_path: + if guest_os is None: + raise ValueError( + "Either img_file_path or guest_os has to be provided") + else: + img_file_info = GuestImages.IMAGE_FILES.get(guest_os, None) + if not img_file_info: + raise ValueError( + "Invalid guest_os provided. Choose from: {}".format( + GuestImages.IMAGE_FILES.keys())) + # Assume ssh_client is test server client and image path is test + # server path + img_file_path = "{}/{}".format( + GuestImages.DEFAULT['image_dir_file_server'], img_file_info[0]) + + def _get_img_dict(ssh_): + img_info = ssh_.exec_cmd("qemu-img info --output json {}".format( + img_file_path), fail_ok=False)[1] + return json.loads(img_info) + + if ssh_client is None: + with host_helper.ssh_to_test_server() as ssh_client: + img_dict = _get_img_dict(ssh_=ssh_client) + else: + img_dict = _get_img_dict(ssh_=ssh_client) + + LOG.info("Image {} info: {}".format(img_file_path, img_dict)) + return img_dict + + +def get_image_size(img_file_path=None, guest_os=None, virtual_size=False, + ssh_client=None): + """ + Get image virtual or actual size in GB via qemu-img info + Args: + img_file_path (str): e.g., /home/sysadmin/images/tis-centos-guest.img + guest_os (str): has to be specified if img_file_path is unspecified. + e.g., 'tis-centos-guest' + virtual_size: + ssh_client: + + Returns (float): image size in GiB + """ + key = "virtual-size" if virtual_size else "actual-size" + img_size = get_image_file_info(img_file_path=img_file_path, + guest_os=guest_os, + ssh_client=ssh_client)[key] + img_size = float(img_size) / (1024 * 1024 * 1024) + return img_size + + +def get_avail_image_conversion_space(con_ssh=None): + """ + Get available disk space in GB on /opt/img-conversions + Args: + con_ssh: + + Returns (float): e.g., 19.2 + + """ + size = con_ssh.exec_cmd("df | grep '/opt/img-conversions' | " + "awk '{{print $4}}'")[1] + size = float(size.strip()) / (1024 * 1024) + return size + + +def is_image_conversion_sufficient(img_file_path=None, guest_os=None, + min_diff=0.05, con_ssh=None, + img_host_ssh=None): + """ + Check if image conversion space is sufficient to convert given image to + raw format + Args: + img_file_path (str): e.g., /home/sysadmin/images/tis-centos-guest.img + guest_os (str): has to be specified if img_file_path is unspecified. + e.g., 'tis-centos-guest' + min_diff (int): in GB + con_ssh: + img_host_ssh + + Returns (bool): + + """ + if con_ssh is None: + con_ssh = ControllerClient.get_active_controller() + + if not system_helper.get_storage_nodes(con_ssh=con_ssh): + return True + + avail_size = get_avail_image_conversion_space(con_ssh=con_ssh) + file_size = get_image_size(img_file_path=img_file_path, guest_os=guest_os, + virtual_size=True, + ssh_client=img_host_ssh) + + return avail_size - file_size >= min_diff + + +def ensure_image_storage_sufficient(guest_os, con_ssh=None): + """ + Before image file is copied to tis, check if image storage is sufficient + Args: + guest_os: + con_ssh: + + Returns: + + """ + with host_helper.ssh_to_test_server() as img_ssh: + is_sufficient, image_file_size, avail_size = \ + is_image_storage_sufficient(guest_os=guest_os, con_ssh=con_ssh, + image_host_ssh=img_ssh) + if not is_sufficient: + images_to_del = get_images(exclude=True, + Name=GuestImages.DEFAULT['guest'], + con_ssh=con_ssh) + if images_to_del: + LOG.info( + "Delete non-default images due to insufficient image " + "storage media to create required image") + delete_images(images_to_del, check_first=False, con_ssh=con_ssh) + if not is_image_storage_sufficient(guest_os=guest_os, + con_ssh=con_ssh, + image_host_ssh=img_ssh)[0]: + LOG.info( + "Insufficient image storage media to create {} image " + "even after deleting non-default " + "glance images".format(guest_os)) + return False, image_file_size + else: + LOG.info( + "Insufficient image storage media to create {} " + "image".format( + guest_os)) + return False, image_file_size + + return True, image_file_size + + +def create_image(name=None, image_id=None, source_image_file=None, volume=None, + visibility='public', force=None, + store=None, disk_format=None, container_format=None, + min_disk=None, min_ram=None, tags=None, + protected=None, project=None, project_domain=None, + timeout=ImageTimeout.CREATE, con_ssh=None, + auth_info=Tenant.get('admin'), fail_ok=False, + ensure_sufficient_space=True, sys_con_for_dc=True, + cleanup=None, hw_vif_model=None, **properties): + """ + Create an image with given criteria. + + Args: + name (str): string to be included in image name + image_id (str): id for the image to be created + source_image_file (str): local image file to create image from. + DefaultImage will be used if unset + volume (str) + disk_format (str): One of these: ami, ari, aki, vhd, vmdk, raw, + qcow2, vdi, iso + container_format (str): One of these: ami, ari, aki, bare, ovf + min_disk (int): Minimum size of disk needed to boot image (in gigabytes) + min_ram (int): Minimum amount of ram needed to boot image (in + megabytes) + visibility (str): public|private|shared|community + protected (bool): Prevent image from being deleted. + store (str): Store to upload image to + force (bool) + tags (str|tuple|list) + project (str|None) + project_domain (str|None) + timeout (int): max seconds to wait for cli return + con_ssh (SSHClient): + auth_info (dict|None): + fail_ok (bool): + ensure_sufficient_space (bool) + sys_con_for_dc (bool): create image on system controller if it's + distributed cloud + cleanup (str|None): add to teardown list. 'function', 'class', + 'module', 'session', or None + hw_vif_model (None|str): if this is set, 'hw_vif_model' in properties + will be overridden + **properties: key=value pair(s) of properties to associate with the + image + + Returns (tuple): (rtn_code(int), message(str)) # 1, 2 only + applicable if fail_ok=True + - (0, , "Image is created successfully") + - (1, , ) # openstack image create cli rejected + - (2, , "Image status is not active.") + """ + + # Use source image url if url is provided. Else use local img file. + + default_guest_img = GuestImages.IMAGE_FILES[GuestImages.DEFAULT['guest']][2] + + file_path = source_image_file + if not file_path and not volume: + img_dir = GuestImages.DEFAULT['image_dir'] + file_path = "{}/{}".format(img_dir, default_guest_img) + + if file_path: + if file_path.startswith('~/'): + file_path = file_path.replace('~', HostLinuxUser.get_home(), 1) + file_path = os.path.normpath(file_path) + if 'win' in file_path and 'os_type' not in properties: + properties['os_type'] = 'windows' + elif 'ge_edge' in file_path and 'hw_firmware_type' not in properties: + properties['hw_firmware_type'] = 'uefi' + + if hw_vif_model: + properties[ImageMetadata.VIF_MODEL] = hw_vif_model + + if sys_con_for_dc and ProjVar.get_var('IS_DC'): + con_ssh = ControllerClient.get_active_controller('RegionOne') + create_auth = Tenant.get(tenant_dictname=auth_info['tenant'], + dc_region='SystemController').copy() + image_host_ssh = get_cli_client(central_region=True) + else: + if not con_ssh: + con_ssh = ControllerClient.get_active_controller() + image_host_ssh = get_cli_client() + create_auth = auth_info + + if ensure_sufficient_space: + if not is_image_storage_sufficient(img_file_path=file_path, + con_ssh=con_ssh, + image_host_ssh=image_host_ssh)[0]: + skip('Insufficient image storage for creating glance image ' + 'from {}'.format(file_path)) + + source_str = file_path + + known_imgs = ['cgcs-guest', 'tis-centos-guest', 'ubuntu', 'cirros', + 'opensuse', 'rhel', 'centos', 'win', 'ge_edge', + 'vxworks', 'debian-8-m-agent'] + name = name if name else 'auto' + for img_str in known_imgs: + if img_str in name: + break + elif img_str in source_str: + name = img_str + '_' + name + break + else: + name_prefix = source_str.split(sep='/')[-1] + name_prefix = name_prefix.split(sep='.')[0] + name = name_prefix + '_' + name + + name = common.get_unique_name(name_str=name, existing_names=get_images(), + resource_type='image') + + LOG.info("Creating glance image: {}".format(name)) + + if not disk_format: + if not source_image_file: + # default tis-centos-guest image is raw + disk_format = 'raw' + else: + disk_format = 'qcow2' + + args_dict = { + '--id': image_id, + '--store': store, + '--disk-format': disk_format, + '--container-format': container_format if container_format else 'bare', + '--min-disk': min_disk, + '--min-ram': min_ram, + '--file': file_path, + '--force': True if force else None, + '--protected': True if protected else None, + '--unprotected': True if protected is False else None, + '--tag': tags, + '--property': properties, + '--project': project, + '--project-domain': project_domain, + '--volume': volume, + } + if visibility: + args_dict['--{}'.format(visibility)] = True + args_ = '{} {}'.format( + common.parse_args(args_dict, repeat_arg=True, vals_sep=','), name) + + try: + LOG.info("Creating image {} with args: {}".format(name, args_)) + code, output = cli.openstack('image create', args_, ssh_client=con_ssh, + fail_ok=fail_ok, auth_info=create_auth, + timeout=timeout) + except: + # This is added to help debugging image create failure in case of + # insufficient space + con_ssh.exec_cmd('df -h', fail_ok=True, get_exit_code=False) + raise + + table_ = table_parser.table(output) + actual_id = table_parser.get_value_two_col_table(table_, 'id') + if cleanup and actual_id: + ResourceCleanup.add('image', actual_id, scope=cleanup) + + if code > 1: + return 1, actual_id, output + + in_active = wait_for_image_status(actual_id, con_ssh=con_ssh, + auth_info=create_auth, fail_ok=fail_ok) + if not in_active: + return 2, actual_id, "Image status is not active." + + if image_id and image_id != actual_id: + msg = "Actual image id - {} is different than requested id - {}.".\ + format(actual_id, image_id) + if fail_ok: + return 3, actual_id, msg + raise exceptions.ImageError(msg) + + msg = "Image {} is created successfully".format(actual_id) + LOG.info(msg) + return 0, actual_id, msg + + +def wait_for_image_appear(image_id, auth_info=None, timeout=900, fail_ok=False): + end_time = time.time() + timeout + while time.time() < end_time: + images = get_images(auth_info=auth_info) + if image_id in images: + return True + + time.sleep(20) + + if not fail_ok: + raise exceptions.StorageError( + "Glance image {} did not appear within {} seconds.".format(image_id, + timeout)) + + return False + + +def wait_for_image_status(image_id, status='active', + timeout=ImageTimeout.STATUS_CHANGE, check_interval=3, + fail_ok=True, con_ssh=None, auth_info=None): + actual_status = None + end_time = time.time() + timeout + while time.time() < end_time: + actual_status = get_image_values(image_id, fields='status', + auth_info=auth_info, + con_ssh=con_ssh)[0] + if status.lower() == actual_status.lower(): + LOG.info("Image {} has reached status: {}".format(image_id, status)) + return True + + time.sleep(check_interval) + + else: + msg = "Timed out waiting for image {} status to change to {}. Actual " \ + "status: {}".format(image_id, status, actual_status) + if fail_ok: + LOG.warning(msg) + return False + raise exceptions.TimeoutException(msg) + + +def _wait_for_images_deleted(images, timeout=ImageTimeout.STATUS_CHANGE, + fail_ok=True, + check_interval=3, con_ssh=None, + auth_info=Tenant.get('admin')): + """ + check if a specific field still exist in a specified column of openstack + image list + + Args: + images (list|str): + timeout (int): + fail_ok (bool): + check_interval (int): + con_ssh: + auth_info (dict): + + Returns (bool): Return True if the specific image_id is found within the + timeout period. False otherwise + + """ + if isinstance(images, str): + images = [images] + + imgs_to_check = list(images) + imgs_deleted = [] + end_time = time.time() + timeout + while time.time() < end_time: + existing_imgs = get_images(con_ssh=con_ssh, auth_info=auth_info) + for img in imgs_to_check: + if img not in existing_imgs: + imgs_to_check.remove(img) + imgs_deleted.append(img) + + if not imgs_to_check: + return True, tuple(imgs_deleted) + + time.sleep(check_interval) + else: + if fail_ok: + return False, tuple(imgs_deleted) + raise exceptions.TimeoutException( + "Timed out waiting for all given images to be removed from " + "openstack " + "image list. Given images: {}. Images still exist: {}.". + format(images, imgs_to_check)) + + +def image_exists(image, image_val='ID', con_ssh=None, + auth_info=Tenant.get('admin')): + """ + Args: + image: + image_val: Name or ID + con_ssh: + auth_info + + Returns (bool): + + """ + images = get_images(auth_info=auth_info, con_ssh=con_ssh, field=image_val) + return image in images + + +def delete_images(images, timeout=ImageTimeout.DELETE, check_first=True, + fail_ok=False, con_ssh=None, + auth_info=Tenant.get('admin')): + """ + Delete given images + + Args: + images (list|str): ids of images to delete + timeout (int): max time wait for cli to return, and max time wait for + images to remove from openstack image list + check_first (bool): whether to check if images exist before attempt + to delete + fail_ok (bool): + con_ssh (SSHClient): + auth_info (dict): + + Returns (tuple): + (-1, "None of the given image(s) exist on system. Do nothing.") + (0, "image(s) deleted successfully") + (1, ) # if delete image cli returns stderr + (2, "Delete image cli ran successfully but some image(s) did + not disappear within seconds") + """ + if not images: + return -1, "No image provided to delete" + + LOG.info("Deleting image(s): {}".format(images)) + if isinstance(images, str): + images = [images] + else: + images = list(images) + + if check_first: + existing_images = get_images(images=images, auth_info=auth_info, + con_ssh=con_ssh) + imgs_to_del = list(set(existing_images) & set(images)) + if not imgs_to_del: + msg = "None of the given image(s) exist on system. Do nothing." + LOG.info(msg) + return -1, msg + else: + imgs_to_del = list(images) + + args_ = ' '.join(imgs_to_del) + + exit_code, cmd_output = cli.openstack('image delete', args_, + ssh_client=con_ssh, fail_ok=fail_ok, + auth_info=auth_info, timeout=timeout) + if exit_code > 1: + return 1, cmd_output + + LOG.info("Waiting for images to be removed from openstack image " + "list: {}".format(imgs_to_del)) + all_deleted, images_deleted = _wait_for_images_deleted(imgs_to_del, + fail_ok=fail_ok, + con_ssh=con_ssh, + auth_info=auth_info, + timeout=timeout) + + if not all_deleted: + images_undeleted = set(imgs_to_del) - set(images_deleted) + msg = "Delete image cli ran successfully but some image(s) {} did " \ + "not disappear within {} seconds".format(images_undeleted, + timeout) + return 2, msg + + LOG.info("image(s) are successfully deleted: {}".format(imgs_to_del)) + return 0, "image(s) deleted successfully" + + +def get_image_properties(image, property_keys, rtn_dict=False, + auth_info=Tenant.get('admin'), con_ssh=None): + """ + + Args: + image (str): id of image + property_keys (str|list\tuple): list of metadata key(s) to get value( + s) for + rtn_dict (bool): whether to return list or dict + auth_info (dict): Admin by default + con_ssh (SSHClient): + + Returns (dict|list): image metadata in a dictionary. + Examples: {'hw_mem_page_size': small} + """ + if isinstance(property_keys, str): + property_keys = [property_keys] + + property_keys = [k.strip().lower().replace(':', '_').replace('-', '_') for k + in property_keys] + properties = get_image_values(image, fields='properties', + auth_info=auth_info, con_ssh=con_ssh)[0] + + if rtn_dict: + return {k: properties.get(k) for k in property_keys} + else: + return [properties.get(k) for k in property_keys] + + +def get_image_values(image, fields, auth_info=Tenant.get('admin'), con_ssh=None, + fail_ok=False): + """ + Get glance image values from openstack image show + Args: + image: + fields: + auth_info: + con_ssh: + fail_ok + + Returns (list): + + """ + if isinstance(fields, str): + fields = (fields,) + code, output = cli.openstack('image show', image, ssh_client=con_ssh, + fail_ok=fail_ok, auth_info=auth_info) + if code > 0: + return [None] * len(fields) + + table_ = table_parser.table(output) + values = table_parser.get_multi_values_two_col_table( + table_, fields, merge_lines=True, evaluate=True, + dict_fields='properties') + return values + + +def scp_guest_image(img_os='ubuntu_14', dest_dir=None, timeout=3600, + con_ssh=None): + """ + + Args: + img_os (str): guest image os type. valid values: ubuntu, centos_7, + centos_6 + dest_dir (str): where to save the downloaded image. Default is + '~/images' + timeout (int) + con_ssh (SSHClient): + + Returns (str): full file name of downloaded image. e.g., + '~/images/ubuntu_14.qcow2' + + """ + valid_img_os_types = list(GuestImages.IMAGE_FILES.keys()) + + if img_os not in valid_img_os_types: + raise ValueError( + "Invalid guest image OS type provided. Valid values: {}".format( + valid_img_os_types)) + + if not dest_dir: + dest_dir = GuestImages.DEFAULT['image_dir'] + + LOG.info("Downloading guest image from test server...") + dest_name = GuestImages.IMAGE_FILES[img_os][2] + ts_source_name = GuestImages.IMAGE_FILES[img_os][0] + if con_ssh is None: + con_ssh = get_cli_client(central_region=True) + + if ts_source_name: + # img saved on test server. scp from test server + source_path = '{}/{}'.format( + GuestImages.DEFAULT['image_dir_file_server'], ts_source_name) + dest_path = common.scp_from_test_server_to_user_file_dir( + source_path=source_path, dest_dir=dest_dir, + dest_name=dest_name, timeout=timeout, con_ssh=con_ssh) + else: + # scp from tis system if needed + dest_path = '{}/{}'.format(dest_dir, dest_name) + if ProjVar.get_var('REMOTE_CLI') and not con_ssh.file_exists(dest_path): + tis_source_path = '{}/{}'.format(GuestImages.DEFAULT['image_dir'], + dest_name) + common.scp_from_active_controller_to_localhost( + source_path=tis_source_path, dest_path=dest_path, + timeout=timeout) + + if not con_ssh.file_exists(dest_path): + raise exceptions.CommonError( + "image {} does not exist after download".format(dest_path)) + + LOG.info("{} image downloaded successfully and saved to {}".format( + img_os, dest_path)) + return dest_path + + +def get_guest_image(guest_os, rm_image=True, check_disk=False, cleanup=None, + use_existing=True): + """ + Get or create a glance image with given guest OS + Args: + guest_os (str): valid values: ubuntu_12, ubuntu_14, centos_6, + centos_7, opensuse_11, tis-centos-guest, + cgcs-guest, vxworks-guest, debian-8-m-agent + rm_image (bool): whether or not to rm image from /home/sysadmin/images + after creating glance image + check_disk (bool): whether to check if image storage disk is + sufficient to create new glance image + cleanup (str|None) + use_existing (bool): whether to use existing guest image if exists + + Returns (str): image_id + + """ + # TODO: temp workaround + if guest_os in ['opensuse_12', 'win_2016', 'win_2012']: + skip('Skip test with 20G+ virtual size image for now - CGTS-10776') + + nat_name = ProjVar.get_var('NATBOX').get('name') + if nat_name == 'localhost': + if re.search('win|rhel|opensuse', guest_os): + skip("Skip tests with large images for vbox") + + LOG.info("Get or create a glance image with {} guest OS".format(guest_os)) + img_id = None + if use_existing: + img_id = get_image_id_from_name(guest_os, strict=True) + + if not img_id: + con_ssh = None + img_file_size = 0 + if check_disk: + is_sufficient, img_file_size = ensure_image_storage_sufficient( + guest_os=guest_os) + if not is_sufficient: + skip( + "Insufficient image storage space in /opt/cgcs/ to create " + "{} image".format( + guest_os)) + + if guest_os == '{}-qcow2'.format(GuestImages.DEFAULT['guest']): + # convert default img to qcow2 format if needed + qcow2_img_path = '{}/{}'.format(GuestImages.DEFAULT['image_dir'], + GuestImages.IMAGE_FILES[guest_os][ + 2]) + con_ssh = ControllerClient.get_active_controller() + if not con_ssh.file_exists(qcow2_img_path): + raw_img_path = '{}/{}'.format(GuestImages.DEFAULT['image_dir'], + GuestImages.IMAGE_FILES[ + GuestImages.DEFAULT['guest']][ + 2]) + con_ssh.exec_cmd( + 'qemu-img convert -f raw -O qcow2 {} {}'.format( + raw_img_path, qcow2_img_path), + fail_ok=False, expect_timeout=600) + + # copy non-default img from test server + dest_dir = GuestImages.DEFAULT['image_dir'] + home_dir = HostLinuxUser.get_home() + if check_disk and os.path.normpath(home_dir) in os.path.abspath( + dest_dir): + # Assume image file should not be present on system since large + # image file should get removed + if not con_ssh: + con_ssh = ControllerClient.get_active_controller() + avail_sysadmin_home = get_avail_image_space(con_ssh=con_ssh, + path=home_dir) + if avail_sysadmin_home < img_file_size: + skip("Insufficient space in {} for {} image to be copied " + "to".format(home_dir, guest_os)) + + image_path = scp_guest_image(img_os=guest_os, dest_dir=dest_dir) + + try: + disk_format, container_format = GuestImages.IMAGE_FILES[guest_os][ + 3:5] + img_id = create_image(name=guest_os, source_image_file=image_path, + disk_format=disk_format, + container_format=container_format, + fail_ok=False, cleanup=cleanup)[1] + finally: + if rm_image and not re.search('cgcs-guest|tis-centos|ubuntu_14', + guest_os): + con_ssh = ControllerClient.get_active_controller() + con_ssh.exec_cmd('rm -f {}'.format(image_path), fail_ok=True, + get_exit_code=False) + + return img_id + + +def set_unset_image_vif_multiq(image, set_=True, fail_ok=False, con_ssh=None, + auth_info=Tenant.get('admin')): + """ + Set or unset a glance image with multiple vif-Queues + Args: + image (str): name or id of a glance image + set_ (bool): whether or not to set the hw_vif_multiqueue_enabled + fail_ok: + con_ssh: + auth_info: + + Returns (str): code, msg + + """ + + if image is None: + return 1, "Error:image_name not provided" + if set_: + cmd = 'image set ' + else: + cmd = 'image unset ' + + cmd += image + cmd += ' --property' + + if set_: + cmd += ' hw_vif_multiqueue_enabled=True' + else: + cmd += ' hw_vif_multiqueue_enabled' + + res, out = cli.openstack(cmd, ssh_client=con_ssh, fail_ok=fail_ok, + auth_info=auth_info) + + return res, out + + +def unset_image(image, properties=None, tags=None, con_ssh=None, + auth_info=Tenant.get('admin')): + """ + + Args: + image (str): image name or id + properties (None|str|list|tuple): properties to unset + tags (None|str|list|tuple): tags to unset + con_ssh: + auth_info: + + Returns: + """ + args = [] + post_checks = {} + if properties: + if isinstance(properties, str): + properties = [properties] + for item in properties: + args.append('--property {}'.format(item)) + post_checks['properties'] = properties + + if tags: + if isinstance(tags, str): + tags = [tags] + for tag in tags: + args.append('--tag {}'.format(tag)) + post_checks['tags'] = tags + + if not args: + raise ValueError( + "Nothing to unset. Please specify property or tag to unset") + + args = ' '.join(args) + ' {}'.format(image) + code, out = cli.openstack('image unset', args, ssh_client=con_ssh, + fail_ok=True, auth_info=auth_info) + if code > 0: + return 1, out + + check_image_settings(image=image, check_dict=post_checks, unset=True, + con_ssh=con_ssh, auth_info=auth_info) + msg = "Image {} is successfully unset".format(image) + return 0, msg + + +def set_image(image, new_name=None, properties=None, min_disk=None, + min_ram=None, container_format=None, + disk_format=None, architecture=None, instance_id=None, + kernel_id=None, os_distro=None, + os_version=None, ramdisk_id=None, activate=None, project=None, + project_domain=None, tags=None, + protected=None, visibility=None, membership=None, + hw_vif_model=None, + con_ssh=None, auth_info=Tenant.get('admin')): + """ + Set image properties/metadata + Args: + image (str): + new_name (str|None): + properties (dict|None): + hw_vif_model (str|None): override hw_vif_model in properties if any + min_disk (int|str|None): + min_ram (int|str|None): + container_format (str|None): + disk_format (str|None): + architecture (str|None): + instance_id (str|None): + kernel_id (str|None): + os_distro (str|None): + os_version (str|None): + ramdisk_id (str|None): + activate (bool|None): + project (str|None): + project_domain (str|None): + tags (list|tuple|None): + protected (bool|None): + visibility (str): valid values: 'public', 'private', 'community', + 'shared' + membership (str): valid values: 'accept', 'reject', 'pending' + con_ssh: + auth_info: + + Returns (tupe): + (0, Image is successfully modified) + (1, ) - openstack image set is rejected + + """ + + post_checks = {} + args = [] + if protected is not None: + if protected: + args.append('--protected') + post_check_val = True + else: + args.append('--unprocteced') + post_check_val = False + post_checks['protected'] = post_check_val + + if visibility is not None: + valid_vals = ('public', 'private', 'community', 'shared') + if visibility not in valid_vals: + raise ValueError( + "Invalid visibility specified. Valid options: {}".format( + valid_vals)) + args.append('--{}'.format(visibility)) + post_checks['visibility'] = visibility + + if activate is not None: + if activate: + args.append('--activate') + post_check_val = 'active' + else: + args.append('--deactivate') + post_check_val = 'deactivated' + post_checks['status'] = post_check_val + + if membership is not None: + valid_vals = ('accept', 'reject', 'pending') + if membership not in valid_vals: + raise ValueError( + "Invalid membership specified. Valid options: {}".format( + valid_vals)) + args.append('--{}'.format(membership)) + # Unsure how to do post check + + if not properties: + properties = {} + if hw_vif_model: + properties[ImageMetadata.VIF_MODEL] = hw_vif_model + if properties: + for key, val in properties.items(): + args.append('--property {}="{}"'.format(key, val)) + post_checks['properties'] = properties + + if tags: + if isinstance(tags, str): + tags = [tags] + for tag in tags: + args.append('--tag {}'.format(tag)) + post_checks['tags'] = list(tags) + + other_args = { + '--name': (new_name, 'name'), + '--min-disk': (min_disk, 'min_disk'), + '--min-ram': (min_ram, 'min_ram'), + '--container-format': (container_format, 'container_format'), + '--disk-format': (disk_format, 'disk_format'), + '--project': (project, 'owner'), # assume project id will be given + '--project-domain': (project_domain, None), # Post check unhandled atm + '--architecture': (architecture, None), + '--instance-id': (instance_id, None), + '--kernel-id': (kernel_id, None), + '--os-distro': (os_distro, None), + '--os-version': (os_version, None), + '--ramdisk-id': (ramdisk_id, None), + } + + for key, val in other_args.items(): + if val[0] is not None: + args.append('{} {}'.format(key, val[0])) + if val[1]: + post_checks[val[1]] = val[0] + + args = ' '.join(args) + if not args: + raise ValueError("Nothing to set") + + args += ' {}'.format(image) + code, out = cli.openstack('image set', args, ssh_client=con_ssh, + fail_ok=True, auth_info=auth_info) + if code > 0: + return 1, out + + check_image_settings(image=image, check_dict=post_checks, con_ssh=con_ssh, + auth_info=auth_info) + msg = "Image {} is successfully modified".format(image) + return 0, msg + + +def check_image_settings(image, check_dict, unset=False, con_ssh=None, + auth_info=Tenant.get('admin')): + """ + Check image settings via openstack image show. + Args: + image (str): + check_dict (dict): key should be the field; + if unset, value should be a list or tuple, key should be properties + and/or tags + if set, value should be dict if key is properties or tags, + otherwise value should normally be a str + unset (bool): whether to check if given metadata are set or unset + con_ssh (SSHClient): + auth_info (dict): + + Returns (None): + + """ + LOG.info("Checking image setting is as specified: {}".format(check_dict)) + + post_tab = table_parser.table( + cli.openstack('image show', image, ssh_client=con_ssh, + auth_info=auth_info)[1], + combine_multiline_entry=True) + + for field, expt_val in check_dict.items(): + actual_val = table_parser.get_value_two_col_table(post_tab, field=field, + merge_lines=True) + if field == 'properties': + actual_vals = actual_val.split(', ') + actual_vals = ((val.split('=')) for val in actual_vals) + actual_dict = {k.strip(): v.strip() for k, v in actual_vals} + if unset: + for key in expt_val: + assert -1 == actual_dict.get(key, -1) + else: + for key, val in expt_val.items(): + actual = actual_dict[key] + try: + actual = eval(actual) + except (NameError, SyntaxError): + pass + assert str(val) == str(actual), \ + "Property {} is not as set. Expected: {}, actual: {}". \ + format(key, val, actual_dict[key]) + elif field == 'tags': + actual_vals = [val.strip() for val in actual_val.split(',')] + if unset: + assert not (set(expt_val) & set(actual_val)), \ + "Expected to be unset: {}, actual: {}". \ + format(expt_val, actual_vals) + else: + assert set(expt_val) <= set(actual_vals), \ + "Expected tags: {}, actual: {}".format( + expt_val, actual_vals) + else: + if unset: + LOG.warning("Unset flag ignored. Only property and tag " + "is valid for unset") + assert str(expt_val) == str(actual_val), \ + "{} is not as set. Expected: {}, actual: {}". \ + format(field, expt_val, actual_val) diff --git a/automated-pytest-suite/keywords/gnocchi_helper.py b/automated-pytest-suite/keywords/gnocchi_helper.py new file mode 100644 index 0000000..8e8b3bb --- /dev/null +++ b/automated-pytest-suite/keywords/gnocchi_helper.py @@ -0,0 +1,165 @@ +# +# Copyright (c) 2019 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + + +from utils import cli +from utils import table_parser +from utils.tis_log import LOG + +from consts.auth import Tenant +from keywords import common + + +def get_aggregated_measures(field='value', resource_type=None, metrics=None, + start=None, stop=None, overlap=None, + refresh=None, resource_ids=None, extra_query=None, + fail_ok=False, auth_info=Tenant.get('admin'), + con_ssh=None): + """ + Get measurements via 'openstack metric measures aggregation' + Args: + field (str): header of a column + resource_type (str|None): used in --resource-type + metrics (str|list|tuple|None): used in --metric [metric2 ...] + start (str|None): used in --start + stop (str|None): used in --stop + refresh (bool): used in --refresh + overlap (str|None): overlap percentage. used in + --needed-overlap + resource_ids (str|list|tuple|None): used in --query "id=[ + or id= ...]" + extra_query (str|None): used in --query + fail_ok: + auth_info: + con_ssh: + + Returns (list): list of strings + + """ + LOG.info("Getting aggregated measurements...") + args_dict = { + 'resource-type': resource_type, + 'metric': metrics, + 'start': start, + 'stop': stop, + 'needed-overlap': overlap, + 'refresh': refresh, + } + + args = common.parse_args(args_dict, vals_sep=' ') + query_str = '' + if resource_ids: + if isinstance(resource_ids, str): + resource_ids = [resource_ids] + resource_ids = ['id={}'.format(val) for val in resource_ids] + query_str = ' or '.join(resource_ids) + + if extra_query: + if resource_ids: + query_str += ' and ' + query_str += '{}'.format(extra_query) + + if query_str: + args += ' --query "{}"'.format(query_str) + + code, out = cli.openstack('metric measures aggregation', args, + ssh_client=con_ssh, fail_ok=fail_ok, + auth_info=auth_info) + if code > 0: + return 1, out + + table_ = table_parser.table(out) + return 0, table_parser.get_values(table_, field) + + +def get_metric_values(metric_id=None, metric_name=None, resource_id=None, + fields='id', fail_ok=False, + auth_info=Tenant.get('admin'), con_ssh=None): + """ + Get metric info via 'openstack metric show' + Args: + metric_id (str|None): + metric_name (str|None): Only used if metric_id is not provided + resource_id (str|None): Only used if metric_id is not provided + fields (str|list|tuple): field name + fail_ok (bool): + auth_info: + con_ssh: + + Returns (list): + + """ + if metric_id is None and metric_name is None: + raise ValueError("metric_id or metric_name has to be provided.") + + if metric_id: + arg = metric_id + else: + if resource_id: + arg = '--resource-id {} "{}"'.format(resource_id, metric_name) + else: + if not fail_ok: + raise ValueError("resource_id needs to be provided when using " + "metric_name") + arg = '"{}"'.format(metric_name) + + code, output = cli.openstack('openstack metric show', arg, + ssh_client=con_ssh, fail_ok=fail_ok, + auth_info=auth_info) + if code > 0: + return output + + table_ = table_parser.table(output) + return table_parser.get_multi_values_two_col_table(table_, fields) + + +def get_metrics(field='id', metric_name=None, resource_id=None, fail_ok=True, + auth_info=Tenant.get('admin'), con_ssh=None): + """ + Get metrics values via 'openstack metric list' + Args: + field (str|list|tuple): header of the metric list table + metric_name (str|None): + resource_id (str|None): + fail_ok (bool): + auth_info: + con_ssh: + + Returns (list): list of strings + + """ + columns = ['id', 'archive_policy/name', 'name', 'unit', 'resource_id'] + arg = '-f value ' + arg += ' '.join(['-c {}'.format(column) for column in columns]) + + grep_str = '' + if resource_id: + grep_str += ' | grep --color=never -E -i {}'.format(resource_id) + if metric_name: + grep_str += ' | grep --color=never -E -i {}'.format(metric_name) + + arg += grep_str + + code, output = cli.openstack('metric list', arg, ssh_client=con_ssh, + fail_ok=fail_ok, auth_info=auth_info) + if code > 0: + return [] + + values = [] + convert = False + if isinstance(field, str): + field = (field, ) + convert = True + + for header in field: + lines = output.splitlines() + index = columns.index(header.lower()) + vals = [line.split(sep=' ')[index] for line in lines] + values.append(vals) + + if convert: + values = values[0] + return values diff --git a/automated-pytest-suite/keywords/heat_helper.py b/automated-pytest-suite/keywords/heat_helper.py new file mode 100644 index 0000000..bf7d5c1 --- /dev/null +++ b/automated-pytest-suite/keywords/heat_helper.py @@ -0,0 +1,398 @@ +# +# Copyright (c) 2019 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + + +import time + +from utils import table_parser, cli, exceptions +from utils.tis_log import LOG +from utils.clients.ssh import get_cli_client +from consts.stx import GuestImages, HeatStackStatus, HEAT_CUSTOM_TEMPLATES +from consts.filepaths import TestServerPath +from keywords import network_helper, common +from testfixtures.fixture_resources import ResourceCleanup + + +def _wait_for_heat_stack_deleted(stack_name=None, timeout=120, + check_interval=3, con_ssh=None, + auth_info=None): + """ + This will wait for the heat stack to be deleted + Args: + stack_name(str): Heat stack name to check for state + con_ssh (SSHClient): If None, active controller ssh will be used. + auth_info (dict): Tenant dict. If None, primary tenant will be used. + + Returns: + + """ + LOG.info("Waiting for {} to be deleted...".format(stack_name)) + end_time = time.time() + timeout + while time.time() < end_time: + stack_status = get_stack_status(stack=stack_name, auth_info=auth_info, + con_ssh=con_ssh, fail_ok=True) + if not stack_status: + return True + elif stack_status[0] == HeatStackStatus.DELETE_FAILED: + LOG.warning('Heat stack in DELETE_FAILED state') + return False + + time.sleep(check_interval) + + msg = "Heat stack {} did not get deleted within timeout".format(stack_name) + + LOG.warning(msg) + return False + + +def wait_for_heat_status(stack_name=None, + status=HeatStackStatus.CREATE_COMPLETE, + timeout=300, check_interval=5, + fail_ok=False, con_ssh=None, auth_info=None): + """ + This will wait for the desired state of the heat stack or timeout + Args: + stack_name(str): Heat stack name to check for state + status(str): Status to check for + timeout (int) + check_interval (int) + fail_ok (bool + con_ssh (SSHClient): If None, active controller ssh will be used. + auth_info (dict): Tenant dict. If None, primary tenant will be used. + + Returns (tuple): , + + """ + LOG.info("Waiting for {} to be shown in {} ...".format(stack_name, status)) + end_time = time.time() + timeout + + fail_status = current_status = None + if status == HeatStackStatus.CREATE_COMPLETE: + fail_status = HeatStackStatus.CREATE_FAILED + elif status == HeatStackStatus.UPDATE_COMPLETE: + fail_status = HeatStackStatus.UPDATE_FAILED + + while time.time() < end_time: + current_status = get_stack_status(stack=stack_name, auth_info=auth_info, + con_ssh=con_ssh)[0] + if status == current_status: + return True, 'Heat stack {} has reached {} status'.format( + stack_name, status) + elif fail_status == current_status: + stack_id = get_stack_values(stack=stack_name, fields='id', + auth_info=auth_info, con_ssh=con_ssh)[0] + get_stack_resources(stack=stack_id, auth_info=auth_info, + con_ssh=con_ssh) + + err = "Heat stack {} failed to reach {}, actual status: {}".format( + stack_name, status, fail_status) + if fail_ok: + LOG.warning(err) + return False, err + raise exceptions.HeatError(err) + + time.sleep(check_interval) + + stack_id = get_stack_values(stack=stack_name, fields='id', + auth_info=auth_info, con_ssh=con_ssh)[0] + get_stack_resources(stack=stack_id, auth_info=auth_info, con_ssh=con_ssh) + err_msg = "Heat stack {} did not reach {} within {}s. Actual " \ + "status: {}".format(stack_name, status, timeout, current_status) + if fail_ok: + LOG.warning(err_msg) + return False, err_msg + raise exceptions.HeatError(err_msg) + + +def get_stack_values(stack, fields='stack_status_reason', con_ssh=None, + auth_info=None, fail_ok=False): + code, out = cli.openstack('stack show', stack, ssh_client=con_ssh, + auth_info=auth_info, fail_ok=fail_ok) + if code > 0: + return None + + table_ = table_parser.table(out) + return table_parser.get_multi_values_two_col_table(table_=table_, + fields=fields) + + +def get_stacks(name=None, field='id', con_ssh=None, auth_info=None, all_=True): + """ + Get the stacks list based on name if given for a given tenant. + + Args: + con_ssh (SSHClient): If None, active controller ssh will be used. + auth_info (dict): Tenant dict. If None, primary tenant will be used. + all_ (bool): whether to display all stacks for admin user + name (str): Given name for the heat stack + field (str|list|tuple) + + Returns (list): list of heat stacks. + + """ + args = '' + if auth_info is not None: + if auth_info['user'] == 'admin' and all_: + args = '--a' + table_ = table_parser.table( + cli.openstack('stack list', positional_args=args, ssh_client=con_ssh, + auth_info=auth_info)[1]) + + kwargs = {'Stack Name': name} if name else {} + return table_parser.get_multi_values(table_, field, **kwargs) + + +def get_stack_status(stack, con_ssh=None, auth_info=None, fail_ok=False): + """ + Get the stacks status based on name if given for a given tenant. + + Args: + con_ssh (SSHClient): If None, active controller ssh will be used. + auth_info (dict): Tenant dict. If None, primary tenant will be used. + stack (str): Given name for the heat stack + fail_ok (bool): + + Returns (str): Heat stack status of a specific tenant. + + """ + status = get_stack_values(stack, fields='stack_status', con_ssh=con_ssh, + auth_info=auth_info, fail_ok=fail_ok) + status = status[0] if status else None + return status + + +def get_stack_resources(stack, field='resource_name', auth_info=None, + con_ssh=None, **kwargs): + """ + + Args: + stack (str): id (or name) for the heat stack. ID is required if admin + user is used to display tenant resource. + field: values to return + auth_info: + con_ssh: + kwargs: key/value pair to filer out the values to return + + Returns (list): + + """ + table_ = table_parser.table( + cli.openstack('stack resource list --long', stack, ssh_client=con_ssh, + auth_info=auth_info)[1]) + return table_parser.get_values(table_, target_header=field, **kwargs) + + +def delete_stack(stack, fail_ok=False, check_first=False, con_ssh=None, + auth_info=None): + """ + Delete the given heat stack for a given tenant. + + Args: + con_ssh (SSHClient): If None, active controller ssh will be used. + fail_ok (bool): + check_first (bool): whether or not to check the stack existence + before attempt to delete + auth_info (dict): Tenant dict. If None, primary tenant will be used. + stack (str): Given name for the heat stack + + Returns (tuple): Status and msg of the heat deletion. + + """ + + if not stack: + raise ValueError("stack_name is not provided.") + + if check_first: + if not get_stack_status(stack, con_ssh=con_ssh, auth_info=auth_info, + fail_ok=True): + msg = "Heat stack {} doesn't exist on the system. Do " \ + "nothing.".format(stack) + LOG.info(msg) + return -1, msg + + LOG.info("Deleting Heat Stack %s", stack) + exitcode, output = cli.openstack('stack delete -y', stack, + ssh_client=con_ssh, fail_ok=fail_ok, + auth_info=auth_info) + if exitcode > 1: + LOG.warning("Delete heat stack request rejected.") + return 1, output + + if not _wait_for_heat_stack_deleted(stack_name=stack, auth_info=auth_info): + stack_id = get_stack_values(stack=stack, fields='id', + auth_info=auth_info, con_ssh=con_ssh)[0] + get_stack_resources(stack=stack_id, auth_info=auth_info, + con_ssh=con_ssh) + + msg = "heat stack {} is not removed after stack-delete.".format(stack) + if fail_ok: + LOG.warning(msg) + return 2, msg + raise exceptions.HeatError(msg) + + succ_msg = "Heat stack {} is successfully deleted.".format(stack) + LOG.info(succ_msg) + return 0, succ_msg + + +def get_heat_params(param_name=None): + """ + Generate parameters for heat based on keywords + + Args: + param_name (str): template to be used to create heat stack. + + Returns (str): return None if failure or the val for the given param + + """ + if param_name is 'NETWORK': + net_id = network_helper.get_mgmt_net_id() + return network_helper.get_net_name_from_id(net_id=net_id) + elif param_name is 'FLAVOR': + return 'small_ded' + elif param_name is 'IMAGE': + return GuestImages.DEFAULT['guest'] + else: + return None + + +def create_stack(stack_name, template, pre_creates=None, environments=None, + stack_timeout=None, parameters=None, param_files=None, + enable_rollback=None, dry_run=None, wait=None, tags=None, + fail_ok=False, con_ssh=None, auth_info=None, + cleanup='function', timeout=300): + """ + Create the given heat stack for a given tenant. + + Args: + stack_name (str): Given name for the heat stack + template (str): path of heat template + pre_creates (str|list|None) + environments (str|list|None) + stack_timeout (int|str|None): stack creating timeout in minutes + parameters (str|dict|None) + param_files (str|dict|None) + enable_rollback (bool|None) + dry_run (bool|None) + wait (bool|None) + tags (str|list|None) + auth_info (dict): Tenant dict. If None, primary tenant will be used. + con_ssh (SSHClient): If None, active controller ssh will be used. + timeout (int): automation timeout in seconds + fail_ok (bool): + cleanup (str|None) + + Returns (tuple): Status and msg of the heat deletion. + """ + + args_dict = { + '--template': template, + '--environment': environments, + '--timeout': stack_timeout, + '--pre-create': pre_creates, + '--enable-rollback': enable_rollback, + '--parameter': parameters, + '--parameter-file': param_files, + '--wait': wait, + '--tags': ','.join(tags) if isinstance(tags, (list, tuple)) else tags, + '--dry-run': dry_run, + } + args = common.parse_args(args_dict, repeat_arg=True) + LOG.info("Create Heat Stack {} with args: {}".format(stack_name, args)) + exitcode, output = cli.openstack('stack create', '{} {}'. + format(args, stack_name), + ssh_client=con_ssh, fail_ok=fail_ok, + auth_info=auth_info, timeout=timeout) + if exitcode > 0: + return 1, output + + if cleanup: + ResourceCleanup.add('heat_stack', resource_id=stack_name, scope=cleanup) + + LOG.info("Wait for Heat Stack Status to reach CREATE_COMPLETE for " + "stack %s", stack_name) + res, msg = wait_for_heat_status(stack_name=stack_name, + status=HeatStackStatus.CREATE_COMPLETE, + auth_info=auth_info, fail_ok=fail_ok) + if not res: + return 2, msg + + LOG.info("Stack {} created successfully".format(stack_name)) + return 0, stack_name + + +def update_stack(stack_name, params_string, fail_ok=False, con_ssh=None, + auth_info=None, timeout=300): + """ + Update the given heat stack for a given tenant. + + Args: + con_ssh (SSHClient): If None, active controller ssh will be used. + fail_ok (bool): + params_string: Parameters to pass to the heat create cmd. + ex: -f -P IMAGE=tis + auth_info (dict): Tenant dict. If None, primary tenant will be used. + stack_name (str): Given name for the heat stack + timeout (int) + + Returns (tuple): Status and msg of the heat deletion. + """ + + if not params_string: + raise ValueError("Parameters not provided.") + + LOG.info("Create Heat Stack %s", params_string) + exitcode, output = cli.heat('stack-update', params_string, + ssh_client=con_ssh, fail_ok=fail_ok, + auth_info=auth_info) + if exitcode == 1: + LOG.warning("Create heat stack request rejected.") + return 1, output + + LOG.info("Wait for Heat Stack Status to reach UPDATE_COMPLETE for stack %s", + stack_name) + res, msg = wait_for_heat_status(stack_name=stack_name, + status=HeatStackStatus.UPDATE_COMPLETE, + auth_info=auth_info, fail_ok=fail_ok, + timeout=timeout) + if not res: + return 2, msg + + LOG.info("Stack {} updated successfully".format(stack_name)) + return 0, stack_name + + +def get_custom_heat_files(file_name, file_dir=HEAT_CUSTOM_TEMPLATES, + cli_client=None): + """ + + Args: + file_name: + file_dir: + cli_client: + + Returns: + + """ + file_path = '{}/{}'.format(file_dir, file_name) + + if cli_client is None: + cli_client = get_cli_client() + + if not cli_client.file_exists(file_path=file_path): + LOG.debug('Create userdata directory if not already exists') + cmd = 'mkdir -p {}'.format(file_dir) + cli_client.exec_cmd(cmd, fail_ok=False) + source_file = TestServerPath.CUSTOM_HEAT_TEMPLATES + file_name + dest_path = common.scp_from_test_server_to_user_file_dir( + source_path=source_file, dest_dir=file_dir, + dest_name=file_name, timeout=300, con_ssh=cli_client) + if dest_path is None: + raise exceptions.CommonError( + "Heat template file {} does not exist after download".format( + file_path)) + + return file_path diff --git a/automated-pytest-suite/keywords/horizon_helper.py b/automated-pytest-suite/keywords/horizon_helper.py new file mode 100644 index 0000000..dbed8a9 --- /dev/null +++ b/automated-pytest-suite/keywords/horizon_helper.py @@ -0,0 +1,45 @@ +# +# Copyright (c) 2019 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + + +import os + +from utils.tis_log import LOG +from utils.horizon.helper import HorizonDriver +from consts.auth import Tenant +from consts.proj_vars import ProjVar + + +def download_openrc_files(quit_driver=True): + """ + Download openrc files from Horizon to /horizon/. + + """ + LOG.info("Download openrc files from horizon") + local_dir = os.path.join(ProjVar.get_var('LOG_DIR'), 'horizon') + + from utils.horizon.pages import loginpage + rc_files = [] + login_pg = loginpage.LoginPage() + login_pg.go_to_target_page() + try: + for auth_info in (Tenant.get('admin'), Tenant.get('tenant1'), Tenant.get('tenant2')): + user = auth_info['user'] + password = auth_info['password'] + openrc_file = '{}-openrc.sh'.format(user) + home_pg = login_pg.login(user, password=password) + home_pg.download_rc_v3() + home_pg.log_out() + openrc_path = os.path.join(local_dir, openrc_file) + assert os.path.exists(openrc_path), "{} not found after download".format(openrc_file) + rc_files.append(openrc_path) + + finally: + if quit_driver: + HorizonDriver.quit_driver() + + LOG.info("openrc files are successfully downloaded to: {}".format(local_dir)) + return rc_files diff --git a/automated-pytest-suite/keywords/host_helper.py b/automated-pytest-suite/keywords/host_helper.py new file mode 100755 index 0000000..583051e --- /dev/null +++ b/automated-pytest-suite/keywords/host_helper.py @@ -0,0 +1,4831 @@ +# +# Copyright (c) 2019 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + + +""" +This module is for helper functions targeting one or more STX host. + +Including: +- system host-xxx commands related helper functions +(Note that system host-show, host-list related helper functions are in +system_helper.py) +- Non-system operations targeting specific host, such as ssh to a host, +sudo reboot on given host(s), etc + +""" + +import ast +import re +import os +import time +import copy +from contextlib import contextmanager +from xml.etree import ElementTree + +from consts.proj_vars import ProjVar +from consts.auth import Tenant, TestFileServer, HostLinuxUser +from consts.timeout import HostTimeout, CMDTimeout +from consts.stx import HostAvailState, HostAdminState, HostOperState, \ + Prompt, MELLANOX_DEVICE, MaxVmsSupported, EventLogID, TrafficControl, \ + PLATFORM_NET_TYPES, AppStatus, PLATFORM_AFFINE_INCOMPLETE, FlavorSpec, \ + STORAGE_AGGREGATE +from utils import cli, exceptions, table_parser +from utils.clients.ssh import ControllerClient, SSHFromSSH, SSHClient +from utils.tis_log import LOG +from keywords import system_helper, common, kube_helper, security_helper, \ + nova_helper + + +@contextmanager +def ssh_to_host(hostname, username=None, password=None, prompt=None, + con_ssh=None, timeout=60): + """ + ssh to a host from ssh client. + + Args: + hostname (str|None): host to ssh to. When None, return active + controller ssh + username (str): + password (str): + prompt (str): + con_ssh (SSHClient): + timeout (int) + + Returns (SSHClient): ssh client of the host + + Examples: with ssh_to_host('controller-1') as host_ssh: + host.exec_cmd(cmd) + + """ + if not con_ssh: + con_ssh = ControllerClient.get_active_controller() + + if not hostname: + yield con_ssh + return + + user = username if username else HostLinuxUser.get_user() + password = password if password else HostLinuxUser.get_password() + if not prompt: + prompt = '.*' + hostname + r'\:~\$' + original_host = con_ssh.get_hostname() + if original_host != hostname: + host_ssh = SSHFromSSH(ssh_client=con_ssh, host=hostname, user=user, + password=password, initial_prompt=prompt, + timeout=timeout) + host_ssh.connect(prompt=prompt) + current_host = host_ssh.get_hostname() + if not current_host == hostname: + raise exceptions.SSHException("Current host is {} instead of " + "{}".format(current_host, hostname)) + close = True + else: + close = False + host_ssh = con_ssh + try: + yield host_ssh + finally: + if close: + host_ssh.close() + + +def reboot_hosts(hostnames, timeout=HostTimeout.REBOOT, con_ssh=None, + fail_ok=False, wait_for_offline=True, + wait_for_reboot_finish=True, check_hypervisor_up=True, + check_webservice_up=True, force_reboot=True, + check_up_time=True, auth_info=Tenant.get('admin_platform')): + """ + Reboot one or multiple host(s) + + Args: + hostnames (list|str): hostname(s) to reboot. str input is also + acceptable when only one host to be rebooted + timeout (int): timeout waiting for reboot to complete in seconds + con_ssh (SSHClient): Active controller ssh + fail_ok (bool): Whether it is okay or not for rebooting to fail on any + host + wait_for_offline (bool): Whether to wait for host to be offline after + reboot + wait_for_reboot_finish (bool): whether to wait for reboot finishes + before return + check_hypervisor_up (bool): + check_webservice_up (bool): + force_reboot (bool): whether to add -f, i.e., sudo reboot [-f] + check_up_time (bool): Whether to ensure active controller uptime is + more than 15 minutes before rebooting + auth_info + + Returns (tuple): (rtn_code, message) + (-1, "Reboot host command sent") Reboot host command is sent, but did + not wait for host to be back up + (0, "Host(s) state(s) - .") hosts rebooted and back to + available/degraded or online state. + (1, "Host(s) not in expected availability states or task unfinished. + () ()" ) + (2, "Hosts not up in nova hypervisor-list: )" + (3, "Hosts web-services not active in system servicegroup-list") + """ + if con_ssh is None: + con_ssh = ControllerClient.get_active_controller() + + if isinstance(hostnames, str): + hostnames = [hostnames] + + reboot_active = False + active_con = system_helper.get_active_controller_name(con_ssh=con_ssh, + auth_info=auth_info) + hostnames = list(set(hostnames)) + if active_con in hostnames: + reboot_active = True + hostnames.remove(active_con) + + system_helper.get_hosts(con_ssh=con_ssh, auth_info=auth_info) + + is_simplex = system_helper.is_aio_simplex(con_ssh=con_ssh, + auth_info=auth_info) + user, password = security_helper.LinuxUser.get_current_user_password() + # reboot hosts other than active controller + cmd = 'sudo reboot -f' if force_reboot else 'sudo reboot' + + for host in hostnames: + prompt = '.*' + host + r'\:~\$' + host_ssh = SSHFromSSH(ssh_client=con_ssh, host=host, user=user, + password=password, initial_prompt=prompt) + host_ssh.connect() + current_host = host_ssh.get_hostname() + if not current_host == host: + raise exceptions.SSHException("Current host is {} instead of " + "{}".format(current_host, host)) + + LOG.info("Rebooting {}".format(host)) + host_ssh.send(cmd) + host_ssh.expect(['.*[pP]assword:.*', 'Rebooting']) + host_ssh.send(password) + con_ssh.expect(timeout=300) + + # reconnect to lab and wait for system up if rebooting active controller + if reboot_active: + if check_up_time: + LOG.info("Ensure uptime for controller(s) is at least 15 " + "minutes before rebooting.") + time_to_sleep = max(0, 910 - system_helper.get_controller_uptime( + con_ssh=con_ssh)) + time.sleep(time_to_sleep) + + LOG.info("Rebooting active controller: {}".format(active_con)) + con_ssh.send(cmd) + index = con_ssh.expect(['.*[pP]assword:.*', 'Rebooting']) + if index == 0: + con_ssh.send(password) + + if is_simplex: + _wait_for_simplex_reconnect(con_ssh=con_ssh, timeout=timeout, + auth_info=auth_info) + else: + LOG.info("Active controller reboot started. Wait for 20 seconds " + "then attempt to reconnect for " + "maximum {}s".format(timeout)) + time.sleep(20) + con_ssh.connect(retry=True, retry_timeout=timeout) + + LOG.info("Reconnected via fip. Waiting for system show cli to " + "re-enable") + _wait_for_openstack_cli_enable(con_ssh=con_ssh, auth_info=auth_info) + + if not wait_for_offline and not is_simplex: + msg = "{} cmd sent".format(cmd) + LOG.info(msg) + return -1, msg + + if hostnames: + time.sleep(30) + hostnames = sorted(hostnames) + hosts_in_rebooting = system_helper.wait_for_hosts_states( + hostnames, timeout=HostTimeout.FAIL_AFTER_REBOOT, + check_interval=10, duration=8, con_ssh=con_ssh, + availability=[HostAvailState.OFFLINE, HostAvailState.FAILED], + auth_info=auth_info) + + if not hosts_in_rebooting: + hosts_info = system_helper.get_hosts_values( + hostnames, + ['task', 'availability'], + con_ssh=con_ssh, + auth_info=auth_info) + raise exceptions.HostError("Some hosts are not rebooting. " + "\nHosts info:{}".format(hosts_info)) + + if reboot_active: + hostnames.append(active_con) + if not is_simplex: + system_helper.wait_for_hosts_states( + active_con, timeout=HostTimeout.FAIL_AFTER_REBOOT, + fail_ok=True, check_interval=10, duration=8, + con_ssh=con_ssh, + availability=[HostAvailState.OFFLINE, HostAvailState.FAILED], + auth_info=auth_info) + + if not wait_for_reboot_finish: + msg = 'Host(s) in offline state' + LOG.info(msg) + return -1, msg + + hosts_, admin_states = \ + system_helper.get_hosts(hostname=hostnames, + field=('hostname', 'administrative'), + con_ssh=con_ssh, auth_info=auth_info) + unlocked_hosts = [] + locked_hosts = [] + for i in range(len(hosts_)): + if admin_states[i] == HostAdminState.UNLOCKED: + unlocked_hosts.append(hosts_[i]) + elif admin_states[i] == HostAdminState.LOCKED: + locked_hosts.append(hosts_[i]) + + LOG.info("Locked: {}. Unlocked:{}".format(locked_hosts, unlocked_hosts)) + sorted_total_hosts = sorted(locked_hosts + unlocked_hosts) + if not sorted_total_hosts == hostnames: + raise exceptions.HostError("Some hosts are neither locked or unlocked. " + "\nHosts Rebooted: {}. Locked: {}; " + "Unlocked: {}".format(hostnames, + locked_hosts, + unlocked_hosts)) + unlocked_hosts_in_states = True + locked_hosts_in_states = True + if len(locked_hosts) > 0: + locked_hosts_in_states = \ + system_helper.wait_for_hosts_states(locked_hosts, + timeout=HostTimeout.REBOOT, + check_interval=10, + duration=8, con_ssh=con_ssh, + availability=['online'], + auth_info=auth_info) + + if len(unlocked_hosts) > 0: + unlocked_hosts_in_states = \ + system_helper.wait_for_hosts_states(unlocked_hosts, + timeout=HostTimeout.REBOOT, + check_interval=10, + con_ssh=con_ssh, + availability=['available', + 'degraded'], + auth_info=auth_info) + + if unlocked_hosts_in_states: + for host_unlocked in unlocked_hosts: + LOG.info("Waiting for task clear for {}".format(host_unlocked)) + system_helper.wait_for_host_values( + host_unlocked, + timeout=HostTimeout.TASK_CLEAR, fail_ok=False, + task='', auth_info=auth_info) + + LOG.info( + "Get available hosts after task clear and wait for " + "hypervsior/webservice up") + hosts_avail = system_helper.get_hosts( + availability=HostAvailState.AVAILABLE, + hostname=unlocked_hosts, + con_ssh=con_ssh, auth_info=auth_info) + + if hosts_avail and (check_hypervisor_up or check_webservice_up): + + all_nodes = system_helper.get_hosts_per_personality( + con_ssh=con_ssh, auth_info=auth_info) + computes = list(set(hosts_avail) & set(all_nodes['compute'])) + controllers = list( + set(hosts_avail) & set(all_nodes['controller'])) + if system_helper.is_aio_system(con_ssh): + computes += controllers + + if check_webservice_up and controllers: + res, hosts_webdown = wait_for_webservice_up( + controllers, fail_ok=fail_ok, con_ssh=con_ssh, + timeout=HostTimeout.WEB_SERVICE_UP, auth_info=auth_info) + if not res: + err_msg = "Hosts web-services not active in system " \ + "servicegroup-list: {}".format(hosts_webdown) + if fail_ok: + return 3, err_msg + else: + raise exceptions.HostPostCheckFailed(err_msg) + + if check_hypervisor_up and computes: + res, hosts_hypervisordown = wait_for_hypervisors_up( + computes, fail_ok=fail_ok, con_ssh=con_ssh, + timeout=HostTimeout.HYPERVISOR_UP, auth_info=auth_info) + if not res: + err_msg = "Hosts not up in nova hypervisor-list: " \ + "{}".format(hosts_hypervisordown) + if fail_ok: + return 2, err_msg + else: + raise exceptions.HostPostCheckFailed(err_msg) + + hosts_affine_incomplete = [] + for host in list(set(computes) & set(hosts_avail)): + if not wait_for_tasks_affined(host, fail_ok=True, + auth_info=auth_info, + con_ssh=con_ssh): + hosts_affine_incomplete.append(host) + + if hosts_affine_incomplete: + err_msg = "Hosts platform tasks affining incomplete: " \ + "{}".format(hosts_affine_incomplete) + LOG.error(err_msg) + + states_vals = {} + failure_msg = '' + for host in hostnames: + vals = system_helper.get_host_values(host, + fields=['task', 'availability'], + rtn_dict=True) + if not vals['task'] == '': + failure_msg += " {} still in task: {}.".format(host, vals['task']) + states_vals[host] = vals + from keywords.kube_helper import wait_for_nodes_ready + hosts_not_ready = wait_for_nodes_ready(hostnames, timeout=30, + con_ssh=con_ssh, fail_ok=fail_ok)[1] + if hosts_not_ready: + failure_msg += " {} not ready in kubectl get ndoes".format( + hosts_not_ready) + + message = "Host(s) state(s) - {}.".format(states_vals) + + if locked_hosts_in_states and unlocked_hosts_in_states and \ + failure_msg == '': + succ_msg = "Hosts {} rebooted successfully".format(hostnames) + LOG.info(succ_msg) + return 0, succ_msg + + err_msg = "Host(s) not in expected states or task unfinished. " + \ + message + failure_msg + if fail_ok: + LOG.warning(err_msg) + return 1, err_msg + else: + raise exceptions.HostPostCheckFailed(err_msg) + + +def recover_simplex(con_ssh=None, fail_ok=False, + auth_info=Tenant.get('admin_platform')): + """ + Ensure simplex host is unlocked, available, and hypervisor up + This function should only be called for simplex system + + Args: + con_ssh (SSHClient): + fail_ok (bool) + auth_info (dict) + + """ + if not con_ssh: + con_name = auth_info.get('region') if \ + (auth_info and ProjVar.get_var('IS_DC')) else None + con_ssh = ControllerClient.get_active_controller(name=con_name) + + if not con_ssh.is_connected(): + con_ssh.connect(retry=True, retry_timeout=HostTimeout.REBOOT) + + _wait_for_openstack_cli_enable(con_ssh=con_ssh, timeout=HostTimeout.REBOOT, + auth_info=auth_info) + + host = 'controller-0' + is_unlocked = \ + system_helper.get_host_values(host=host, + fields='administrative', + auth_info=auth_info, + con_ssh=con_ssh)[0] \ + == HostAdminState.UNLOCKED + + if not is_unlocked: + unlock_host(host=host, available_only=True, fail_ok=fail_ok, + con_ssh=con_ssh, auth_info=auth_info) + else: + wait_for_hosts_ready(host, fail_ok=fail_ok, check_task_affinity=False, + con_ssh=con_ssh, auth_info=auth_info) + + +def wait_for_hosts_ready(hosts, fail_ok=False, check_task_affinity=False, + con_ssh=None, auth_info=Tenant.get('admin_platform'), + timeout=None, check_interval=None): + """ + Wait for hosts to be in online state if locked, and available and + hypervisor/webservice up if unlocked + Args: + hosts: + fail_ok: whether to raise exception when fail + check_task_affinity + con_ssh: + auth_info + timeout + check_interval + + Returns: + + """ + if isinstance(hosts, str): + hosts = [hosts] + + expt_online_hosts = system_helper.get_hosts( + administrative=HostAdminState.LOCKED, hostname=hosts, con_ssh=con_ssh, + auth_info=auth_info) + expt_avail_hosts = system_helper.get_hosts( + administrative=HostAdminState.UNLOCKED, hostname=hosts, con_ssh=con_ssh, + auth_info=auth_info) + + res_lock = res_unlock = True + timeout_args = {'timeout': timeout} if timeout else {} + if check_interval: + timeout_args['check_interval'] = check_interval + from keywords import kube_helper, container_helper + if expt_online_hosts: + LOG.info("Wait for hosts to be online: {}".format(hosts)) + res_lock = system_helper.wait_for_hosts_states( + expt_online_hosts, + availability=HostAvailState.ONLINE, + fail_ok=fail_ok, + con_ssh=con_ssh, + auth_info=auth_info, + **timeout_args) + + res_kube = kube_helper.wait_for_nodes_ready(hosts=expt_online_hosts, + timeout=30, + con_ssh=con_ssh, + fail_ok=fail_ok)[0] + res_lock = res_lock and res_kube + + if expt_avail_hosts: + hypervisors = [] + nova_auth = Tenant.get('admin', + dc_region=auth_info.get('region') if + auth_info else None) + hosts_per_personality = system_helper.get_hosts_per_personality( + con_ssh=con_ssh, auth_info=auth_info) + if container_helper.is_stx_openstack_deployed(con_ssh=con_ssh, + auth_info=auth_info): + hypervisors = list(set( + get_hypervisors(con_ssh=con_ssh, auth_info=nova_auth)) & set( + expt_avail_hosts)) + computes = hypervisors + else: + computes = list( + set(hosts_per_personality['compute']) & set(expt_avail_hosts)) + + controllers = list( + set(hosts_per_personality['controller']) & set(expt_avail_hosts)) + + LOG.info("Wait for hosts to be available: {}".format(hosts)) + res_unlock = system_helper.wait_for_hosts_states( + expt_avail_hosts, + availability=HostAvailState.AVAILABLE, + fail_ok=fail_ok, + con_ssh=con_ssh, + auth_info=auth_info, + **timeout_args) + + if res_unlock: + res_1 = wait_for_task_clear_and_subfunction_ready( + hosts, + fail_ok=fail_ok, + auth_info=auth_info, + con_ssh=con_ssh) + res_unlock = res_unlock and res_1 + + if controllers: + LOG.info( + "Wait for webservices up for hosts: {}".format(controllers)) + res_2 = wait_for_webservice_up(controllers, fail_ok=fail_ok, + con_ssh=con_ssh, auth_info=auth_info, + timeout=HostTimeout.WEB_SERVICE_UP) + res_unlock = res_unlock and res_2 + if hypervisors: + LOG.info( + "Wait for hypervisors up for hosts: {}".format(hypervisors)) + res_3 = wait_for_hypervisors_up(hypervisors, fail_ok=fail_ok, + con_ssh=con_ssh, + auth_info=nova_auth, + timeout=HostTimeout.HYPERVISOR_UP) + res_unlock = res_unlock and res_3 + + if computes and check_task_affinity: + for host in computes: + # Do not fail the test due to task affining incomplete for + # now to unblock test case. + wait_for_tasks_affined(host, fail_ok=True, auth_info=auth_info, + con_ssh=con_ssh) + # res_4 = wait_for_tasks_affined(host=host, fail_ok=fail_ok, + # auth_info=auth_info, con_ssh=con_ssh) + # res_unlock = res_unlock and res_4 + + res_kube = \ + kube_helper.wait_for_nodes_ready(hosts=expt_avail_hosts, timeout=30, + con_ssh=con_ssh, + fail_ok=fail_ok)[0] + res_unlock = res_unlock and res_kube + + return res_lock and res_unlock + + +def wait_for_task_clear_and_subfunction_ready( + hosts, fail_ok=False, con_ssh=None, + timeout=HostTimeout.SUBFUNC_READY, + auth_info=Tenant.get('admin_platform')): + if isinstance(hosts, str): + hosts = [hosts] + + hosts_to_check = list(hosts) + LOG.info("Waiting for task clear and subfunctions enable/available " + "(if applicable) for hosts: {}".format(hosts_to_check)) + end_time = time.time() + timeout + while time.time() < end_time: + hosts_vals = system_helper.get_hosts_values( + hosts_to_check, + ['subfunction_avail', 'subfunction_oper', 'task'], + con_ssh=con_ssh, + auth_info=auth_info) + for host, vals in hosts_vals.items(): + if not vals['task'] and vals['subfunction_avail'] in \ + ('', HostAvailState.AVAILABLE) and \ + vals['subfunction_oper'] in ('', HostOperState.ENABLED): + hosts_to_check.remove(host) + + if not hosts_to_check: + LOG.info( + "Hosts task cleared and subfunctions (if applicable) are now " + "in enabled/available states") + return True + + time.sleep(10) + + err_msg = "Host(s) subfunctions are not all in enabled/available states: " \ + "{}".format(hosts_to_check) + if fail_ok: + LOG.warning(err_msg) + return False + + raise exceptions.HostError(err_msg) + + +def lock_host(host, force=False, lock_timeout=HostTimeout.LOCK, + timeout=HostTimeout.ONLINE_AFTER_LOCK, con_ssh=None, + fail_ok=False, check_first=True, swact=False, + check_cpe_alarm=True, auth_info=Tenant.get('admin_platform')): + """ + lock a host. + + Args: + host (str): hostname or id in string format + force (bool): + lock_timeout (int): max time in seconds waiting for host to goto + locked state after locking attempt. + timeout (int): how many seconds to wait for host to go online after lock + con_ssh (SSHClient): + fail_ok (bool): + check_first (bool): + swact (bool): whether to check if host is active controller and do a + swact before attempt locking + check_cpe_alarm (bool): whether to wait for cpu usage alarm gone + before locking + auth_info + + Returns: (return_code(int), msg(str)) # 1, 2, 3, 4, 5, 6 only returns + when fail_ok=True + (-1, "Host already locked. Do nothing.") + (0, "Host is locked and in online state."] + (1, ) # Lock host cli rejected + (2, "Host is not in locked state") # cli ran okay, but host did not + reach locked state within timeout + (3, "Host did not go online within seconds after (force) + lock") # Locked but didn't go online + (4, "Lock host is rejected. Details in host-show + vim_process_status.") + (5, "Lock host failed due to migrate vm failed. Details in + host-show vm_process_status.") + (6, "Task is not cleared within 180 seconds after host goes online") + + """ + host_avail, host_admin = \ + system_helper.get_host_values(host, + ('availability', 'administrative'), + con_ssh=con_ssh, auth_info=auth_info) + if host_avail in [HostAvailState.OFFLINE, HostAvailState.FAILED]: + LOG.warning("Host in offline or failed state before locking!") + + if check_first and host_admin == 'locked': + msg = "{} already locked. Do nothing.".format(host) + LOG.info(msg) + return -1, msg + + is_aio_dup = system_helper.is_aio_duplex(con_ssh=con_ssh, + auth_info=auth_info) + + if swact: + if system_helper.is_active_controller(host, con_ssh=con_ssh, + auth_info=auth_info) and \ + len(system_helper.get_controllers( + con_ssh=con_ssh, auth_info=auth_info, + operational=HostOperState.ENABLED)) > 1: + LOG.info("{} is active controller, swact first before attempt to " + "lock.".format(host)) + swact_host(host, auth_info=auth_info, con_ssh=con_ssh) + if is_aio_dup: + time.sleep(90) + + if check_cpe_alarm and is_aio_dup: + LOG.info( + "For AIO-duplex, wait for cpu usage high alarm gone on active " + "controller before locking standby") + active_con = system_helper.get_active_controller_name( + con_ssh=con_ssh, auth_info=auth_info) + entity_id = 'host={}'.format(active_con) + system_helper.wait_for_alarms_gone( + [(EventLogID.CPU_USAGE_HIGH, entity_id)], check_interval=45, + fail_ok=fail_ok, con_ssh=con_ssh, timeout=300, auth_info=auth_info) + + positional_arg = host + extra_msg = '' + if force: + positional_arg += ' --force' + extra_msg = 'force ' + + LOG.info("Locking {}...".format(host)) + exitcode, output = cli.system('host-lock', positional_arg, + ssh_client=con_ssh, + fail_ok=fail_ok, auth_info=auth_info) + + if exitcode == 1: + return 1, output + + table_ = table_parser.table(output) + task_val = table_parser.get_value_two_col_table(table_, field='task') + admin_val = table_parser.get_value_two_col_table(table_, + field='administrative') + + if admin_val != HostAdminState.LOCKED: + if 'Locking' not in task_val: + system_helper.wait_for_host_values(host=host, timeout=30, + check_interval=0, fail_ok=True, + task='Locking', + con_ssh=con_ssh, + auth_info=auth_info) + + # Wait for task complete. If task stucks, fail the test regardless. + # Perhaps timeout needs to be increased. + system_helper.wait_for_host_values(host=host, timeout=lock_timeout, + task='', fail_ok=False, + con_ssh=con_ssh, + auth_info=auth_info) + + if not system_helper.wait_for_host_values( + host, timeout=20, + administrative=HostAdminState.LOCKED, + con_ssh=con_ssh, + auth_info=auth_info): + + # vim_progress_status | Lock of host compute-0 rejected because + # there are no other hypervisors available. + vim_status = \ + system_helper.get_host_values(host, + fields='vim_progress_status', + auth_info=auth_info, + con_ssh=con_ssh, + merge_lines=True)[0] + if re.search('ock .* host .* rejected.*', vim_status): + msg = "Lock host {} is rejected. Details in host-show " \ + "vim_process_status.".format(host) + code = 4 + elif re.search('Migrate of instance .* from host .* failed.*', + vim_status): + msg = "Lock host {} failed due to migrate vm failed. Details " \ + "in host-show vm_process_status.".format(host) + code = 5 + else: + msg = "Host is not in locked state" + code = 2 + + if fail_ok: + return code, msg + raise exceptions.HostPostCheckFailed(msg) + + LOG.info("{} is {}locked. Waiting for it to go Online...".format(host, + extra_msg)) + + if system_helper.wait_for_host_values(host, timeout=timeout, + availability=HostAvailState.ONLINE, + auth_info=auth_info, con_ssh=con_ssh): + # ensure the online status lasts for more than 5 seconds. Sometimes + # host goes online then offline to reboot.. + time.sleep(5) + if system_helper.wait_for_host_values( + host, timeout=timeout, + availability=HostAvailState.ONLINE, + auth_info=auth_info, + con_ssh=con_ssh): + if system_helper.wait_for_host_values( + host, + timeout=HostTimeout.TASK_CLEAR, + task='', auth_info=auth_info, + con_ssh=con_ssh): + LOG.info("Host is successfully locked and in online state.") + return 0, "Host is locked and in online state." + else: + msg = "Task is not cleared within {} seconds after host goes " \ + "online".format(HostTimeout.TASK_CLEAR) + if fail_ok: + LOG.warning(msg) + return 6, msg + raise exceptions.HostPostCheckFailed(msg) + + msg = "Host did not go online within {} seconds after {}lock".format( + timeout, extra_msg) + if fail_ok: + return 3, msg + else: + raise exceptions.HostPostCheckFailed(msg) + + +def _wait_for_simplex_reconnect(con_ssh=None, + timeout=HostTimeout.CONTROLLER_UNLOCK, + auth_info=Tenant.get('admin_platform'), + duplex_direct=False): + time.sleep(30) + if not con_ssh: + con_name = auth_info.get('region') if \ + (auth_info and ProjVar.get_var('IS_DC')) else None + con_ssh = ControllerClient.get_active_controller(name=con_name) + + con_ssh.wait_for_disconnect(check_interval=10, timeout=300) + time.sleep(30) + con_ssh.connect(retry=True, retry_timeout=timeout) + ControllerClient.set_active_controller(con_ssh) + + if not duplex_direct: + # Give it sometime before openstack cmds enables on after host + _wait_for_openstack_cli_enable(con_ssh=con_ssh, auth_info=auth_info, + fail_ok=False, timeout=timeout, + check_interval=10, + reconnect=True, single_node=True) + time.sleep(10) + LOG.info("Re-connected via ssh and openstack CLI enabled") + + +def unlock_host(host, timeout=HostTimeout.CONTROLLER_UNLOCK, + available_only=True, fail_ok=False, con_ssh=None, + auth_info=Tenant.get('admin_platform'), + check_hypervisor_up=True, + check_webservice_up=True, check_subfunc=True, check_first=True, + con0_install=False, + check_containers=True): + """ + Unlock given host + Args: + host (str): + timeout (int): MAX seconds to wait for host to become available or + degraded after unlocking + available_only(bool): if True, wait for host becomes Available after + unlock; otherwise wait for either + Degraded or Available + fail_ok (bool): + con_ssh (SSHClient): + auth_info (dict): + check_hypervisor_up (bool): Whether to check if host is up in nova + hypervisor-list + check_webservice_up (bool): Whether to check if host's web-service is + active in system servicegroup-list + check_subfunc (bool): whether to check subfunction_oper and + subfunction_avail for CPE system + check_first (bool): whether to check host state before unlock. + con0_install (bool) + check_containers (bool) + + Returns (tuple): Only -1, 0, 4 senarios will be returned if fail_ok=False + (-1, "Host already unlocked. Do nothing") + (0, "Host is unlocked and in available state.") + (1, ) # cli returns stderr. only applicable if fail_ok + (2, "Host is not in unlocked state") # only applicable if fail_ok + (3, "Host state did not change to available or degraded within + timeout") # only applicable if fail_ok + (4, "Host is in degraded state after unlocked.") # Only applicable + if available_only=False + (5, "Task is not cleared within 180 seconds after host goes + available") # Applicable if fail_ok + (6, "Host is not up in nova hypervisor-list") # Host with compute + function only. Applicable if fail_ok + (7, "Host web-services is not active in system servicegroup-list") # + controllers only. Applicable if fail_ok + (8, "Failed to wait for host to reach Available state after unlocked + to Degraded state") + # only applicable if fail_ok and available_only are True + (9, "Host subfunctions operational and availability are not enable + and available system host-show") # CPE only + (10, " is not ready in kubectl get nodes after unlock") + + """ + LOG.info("Unlocking {}...".format(host)) + if not con_ssh: + con_name = auth_info.get('region') if ( + auth_info and ProjVar.get_var('IS_DC')) else None + con_ssh = ControllerClient.get_active_controller(name=con_name) + + if check_first: + if system_helper.get_host_values(host, 'availability', con_ssh=con_ssh, + auth_info=auth_info)[0] in \ + [HostAvailState.OFFLINE, HostAvailState.FAILED]: + LOG.info( + "Host is offline or failed, waiting for it to go online, " + "available or degraded first...") + system_helper.wait_for_host_values(host, availability=[ + HostAvailState.AVAILABLE, HostAvailState.ONLINE, + HostAvailState.DEGRADED], con_ssh=con_ssh, + fail_ok=False, + auth_info=auth_info) + + if system_helper.get_host_values(host, 'administrative', + con_ssh=con_ssh, + auth_info=auth_info)[0] == \ + HostAdminState.UNLOCKED: + message = "Host already unlocked. Do nothing" + LOG.info(message) + return -1, message + + is_simplex = system_helper.is_aio_simplex(con_ssh=con_ssh, + auth_info=auth_info) + + from keywords import kube_helper, container_helper + check_stx = prev_bad_pods = None + if check_containers: + check_stx = container_helper.is_stx_openstack_deployed( + applied_only=True, con_ssh=con_ssh, auth_info=auth_info) + prev_bad_pods = kube_helper.get_unhealthy_pods(node=host, + con_ssh=con_ssh, + all_namespaces=True) + exitcode, output = cli.system('host-unlock', host, ssh_client=con_ssh, + fail_ok=fail_ok, auth_info=auth_info, + timeout=60) + if exitcode == 1: + return 1, output + + if is_simplex or con0_install: + time.sleep(120) + _wait_for_simplex_reconnect(con_ssh=con_ssh, auth_info=auth_info, + timeout=timeout) + + if not system_helper.wait_for_host_values( + host, timeout=60, + administrative=HostAdminState.UNLOCKED, + con_ssh=con_ssh, + fail_ok=fail_ok, + auth_info=auth_info): + return 2, "Host is not in unlocked state" + + if not system_helper.wait_for_host_values( + host, timeout=timeout, fail_ok=fail_ok, + check_interval=10, con_ssh=con_ssh, auth_info=auth_info, + availability=[HostAvailState.AVAILABLE, HostAvailState.DEGRADED]): + return 3, "Host state did not change to available or degraded within " \ + "timeout" + + if not system_helper.wait_for_host_values(host, + timeout=HostTimeout.TASK_CLEAR, + fail_ok=fail_ok, con_ssh=con_ssh, + auth_info=auth_info, + task=''): + return 5, "Task is not cleared within {} seconds after host goes " \ + "available".format(HostTimeout.TASK_CLEAR) + + if check_hypervisor_up or check_webservice_up or check_subfunc: + + subfunc, personality = system_helper.get_host_values( + host, fields=('subfunctions', 'personality'), + con_ssh=con_ssh, auth_info=auth_info) + string_total = subfunc + personality + + is_controller = 'controller' in string_total + is_compute = bool(re.search('compute|worker', string_total)) + + if check_hypervisor_up and is_compute: + if container_helper.is_stx_openstack_deployed(con_ssh=con_ssh, + auth_info=auth_info): + nova_auth = Tenant.get('admin', dc_region=auth_info.get( + 'region') if auth_info else None) + if not wait_for_hypervisors_up( + host, fail_ok=fail_ok, con_ssh=con_ssh, + auth_info=nova_auth, + timeout=HostTimeout.HYPERVISOR_UP)[0]: + return 6, "Host is not up in nova hypervisor-list" + + if not is_simplex: + # wait_for_tasks_affined(host, con_ssh=con_ssh) + # Do not fail the test due to task affining incomplete for + # now to unblock test case. + wait_for_tasks_affined(host, con_ssh=con_ssh, fail_ok=True) + + if check_webservice_up and is_controller: + if not \ + wait_for_webservice_up(host, fail_ok=fail_ok, con_ssh=con_ssh, + auth_info=auth_info, timeout=300)[0]: + return 7, "Host web-services is not active in system " \ + "servicegroup-list" + + if check_subfunc and is_controller and is_compute: + # wait for subfunction states to be operational enabled and + # available + if not system_helper.wait_for_host_values( + host, timeout=90, + fail_ok=fail_ok, + con_ssh=con_ssh, + auth_info=auth_info, + subfunction_oper=HostOperState.ENABLED, + subfunction_avail=HostAvailState.AVAILABLE): + err_msg = "Host subfunctions operational and availability " \ + "did not change to enabled and available" \ + " within timeout" + LOG.warning(err_msg) + return 9, err_msg + + if check_containers: + from keywords import kube_helper, container_helper + + res_nodes = kube_helper.wait_for_nodes_ready(hosts=host, timeout=180, + con_ssh=con_ssh, + fail_ok=fail_ok)[0] + res_app = True + if check_stx: + res_app = container_helper.wait_for_apps_status( + apps='stx-openstack', + status=AppStatus.APPLIED, + auth_info=auth_info, + con_ssh=con_ssh, + check_interval=10, + fail_ok=fail_ok)[0] + + res_pods = kube_helper.wait_for_pods_healthy(check_interval=10, + con_ssh=con_ssh, + fail_ok=fail_ok, + node=host, + name=prev_bad_pods, + exclude=True, + all_namespaces=True) + + if not (res_nodes and res_app and res_pods): + err_msg = "Container check failed after unlock {}".format(host) + return 10, err_msg + + if system_helper.get_host_values(host, 'availability', con_ssh=con_ssh, + auth_info=auth_info)[0] == \ + HostAvailState.DEGRADED: + if not available_only: + LOG.warning("Host is in degraded state after unlocked.") + return 4, "Host is in degraded state after unlocked." + else: + if not system_helper.wait_for_host_values( + host, timeout=timeout, + fail_ok=fail_ok, + check_interval=10, + con_ssh=con_ssh, + availability=HostAvailState.AVAILABLE, + auth_info=auth_info): + err_msg = "Failed to wait for host to reach Available state " \ + "after unlocked to Degraded state" + LOG.warning(err_msg) + return 8, err_msg + + LOG.info( + "Host {} is successfully unlocked and in available state".format(host)) + return 0, "Host is unlocked and in available state." + + +def unlock_hosts(hosts, timeout=HostTimeout.CONTROLLER_UNLOCK, fail_ok=True, + con_ssh=None, + auth_info=Tenant.get('admin_platform'), + check_hypervisor_up=False, check_webservice_up=False, + check_nodes_ready=True, check_containers=False): + """ + Unlock given hosts. Please use unlock_host() keyword if only one host + needs to be unlocked. + Args: + hosts (list|str): Host(s) to unlock + timeout (int): MAX seconds to wait for host to become available or + degraded after unlocking + fail_ok (bool): + con_ssh (SSHClient): + auth_info (dict): + check_hypervisor_up (bool): Whether to check if host is up in nova + hypervisor-list + check_webservice_up (bool): Whether to check if host's web-service is + active in system servicegroup-list + check_nodes_ready (bool) + check_containers (bool) + + + Returns (dict): {host_0: res_0, host_1: res_1, ...} + where res is a tuple as below, and scenario 1, 2, 3 only applicable + if fail_ok=True + (-1, "Host already unlocked. Do nothing") + (0, "Host is unlocked and in available state.") + (1, ) + (2, "Host is not in unlocked state") + (3, "Host is not in available or degraded state.") + (4, "Host is in degraded state after unlocked.") + (5, "Host is not up in nova hypervisor-list") # Host with compute + function only + (6, "Host web-services is not active in system servicegroup-list") # + controllers only + (7, "Host platform tasks affining incomplete") + (8, "Host status not ready in kubectl get nodes") + + """ + if not hosts: + raise ValueError("No host(s) provided to unlock.") + + LOG.info("Unlocking {}...".format(hosts)) + + if isinstance(hosts, str): + hosts = [hosts] + + res = {} + hosts_to_unlock = list(set(hosts)) + for host in hosts: + if system_helper.get_host_values(host, 'administrative', + con_ssh=con_ssh, + auth_info=auth_info)[0] == \ + HostAdminState.UNLOCKED: + message = "Host already unlocked. Do nothing" + + res[host] = -1, message + hosts_to_unlock.remove(host) + + if not hosts_to_unlock: + LOG.info("Host(s) already unlocked. Do nothing.") + return res + + if len(hosts_to_unlock) != len(hosts): + LOG.info("Some host(s) already unlocked. Unlocking the rest: {}".format( + hosts_to_unlock)) + + is_simplex = system_helper.is_aio_simplex(con_ssh=con_ssh, + auth_info=auth_info) + + check_stx = prev_bad_pods = None + if check_containers: + from keywords import kube_helper, container_helper + check_stx = container_helper.is_stx_openstack_deployed( + applied_only=True, con_ssh=con_ssh, auth_info=auth_info) + prev_bad_pods = kube_helper.get_unhealthy_pods(con_ssh=con_ssh, + all_namespaces=True) + + hosts_to_check = [] + for host in hosts_to_unlock: + exitcode, output = cli.system('host-unlock', host, ssh_client=con_ssh, + fail_ok=fail_ok, + auth_info=auth_info, timeout=60) + if exitcode == 1: + res[host] = 1, output + else: + hosts_to_check.append(host) + + if not hosts_to_check: + LOG.warning("Unlock host(s) rejected: {}".format(hosts_to_unlock)) + return res + + if is_simplex: + _wait_for_simplex_reconnect(con_ssh=con_ssh, + timeout=HostTimeout.CONTROLLER_UNLOCK, + auth_info=auth_info) + + if not system_helper.wait_for_hosts_states( + hosts_to_check, timeout=60, + administrative=HostAdminState.UNLOCKED, + con_ssh=con_ssh, + auth_info=auth_info): + LOG.warning("Some host(s) not in unlocked states after 60 seconds.") + + if not system_helper.wait_for_hosts_states( + hosts_to_check, timeout=timeout, check_interval=10, + con_ssh=con_ssh, auth_info=auth_info, + availability=[HostAvailState.AVAILABLE, HostAvailState.DEGRADED]): + LOG.warning( + "Some host(s) state did not change to available or degraded " + "within timeout") + + hosts_vals = system_helper.get_hosts(hostname=hosts_to_check, + field=('hostname', 'availability'), + administrative=HostAdminState.UNLOCKED, + con_ssh=con_ssh, auth_info=auth_info) + hosts_unlocked, hosts_avails_, = hosts_vals + indices = range(len(hosts_unlocked)) + hosts_not_unlocked = list(set(hosts_to_check) - set(hosts_unlocked)) + hosts_avail = [hosts_unlocked[i] for i in indices if + hosts_avails_[i].lower() == HostAvailState.AVAILABLE] + hosts_degrd = [hosts_unlocked[i] for i in indices if + hosts_avails_[i].lower() == HostAvailState.DEGRADED] + hosts_other = list( + set(hosts_unlocked) - set(hosts_avail) - set(hosts_degrd)) + + for host in hosts_not_unlocked: + res[host] = 2, "Host is not in unlocked state." + for host in hosts_degrd: + res[host] = 4, "Host is in degraded state after unlocked." + for host in hosts_other: + res[host] = 3, "Host is not in available or degraded state." + + if hosts_avail and (check_hypervisor_up or check_webservice_up): + + all_nodes = system_helper.get_hosts_per_personality(con_ssh=con_ssh, + auth_info=auth_info) + computes = list(set(hosts_avail) & set(all_nodes['compute'])) + controllers = list(set(hosts_avail) & set(all_nodes['controller'])) + if system_helper.is_aio_system(con_ssh, auth_info=auth_info): + computes += controllers + + if check_hypervisor_up and computes: + nova_auth = Tenant.get('admin', dc_region=auth_info.get( + 'region') if auth_info else None) + hosts_hypervisordown = \ + wait_for_hypervisors_up(computes, fail_ok=fail_ok, + con_ssh=con_ssh, + timeout=HostTimeout.HYPERVISOR_UP, + auth_info=nova_auth)[1] + for host in hosts_hypervisordown: + res[host] = 5, "Host is not up in nova hypervisor-list" + hosts_avail = list(set(hosts_avail) - set(hosts_hypervisordown)) + + if check_webservice_up and controllers: + hosts_webdown = wait_for_webservice_up(controllers, fail_ok=fail_ok, + con_ssh=con_ssh, timeout=180, + auth_info=auth_info)[1] + for host in hosts_webdown: + res[host] = 6, "Host web-services is not active in system " \ + "servicegroup-list" + hosts_avail = list(set(hosts_avail) - set(hosts_webdown)) + + hosts_affine_incomplete = [] + for host in list(set(computes) & set(hosts_avail)): + if not wait_for_tasks_affined(host, fail_ok=True, + auth_info=auth_info): + msg = "Host {} platform tasks affining incomplete".format(host) + hosts_affine_incomplete.append(host) + + # Do not fail the test due to task affining incomplete for + # now to unblock test case. + LOG.error(msg) + # res[host] = 7, + # hosts_avail = list(set(hosts_avail) - set(hosts_affine_incomplete)) + + if check_nodes_ready and (hosts_avail or hosts_degrd): + from keywords import kube_helper, container_helper + + hosts_to_wait = list(hosts_avail) + hosts_to_wait += hosts_degrd + res_nodes, hosts_not_ready = kube_helper.wait_for_nodes_ready( + hosts=hosts_to_wait, timeout=180, con_ssh=con_ssh, + fail_ok=fail_ok) + if hosts_not_ready: + hosts_avail = list(set(hosts_avail) - set(hosts_not_ready)) + for host in hosts_not_ready: + res[host] = 8, "Host status not ready in kubectl get nodes" + + if check_containers: + res_app = True + if check_stx: + res_app = container_helper.wait_for_apps_status( + apps='stx-openstack', + status=AppStatus.APPLIED, + con_ssh=con_ssh, + check_interval=10, + fail_ok=fail_ok)[0] + res_pods = kube_helper.wait_for_pods_healthy(check_interval=10, + con_ssh=con_ssh, + fail_ok=fail_ok, + name=prev_bad_pods, + exclude=True, + all_namespaces=True) + if not (res_app and res_pods): + err_msg = "Application status or pods status check failed " \ + "after unlock {}".format(hosts) + hosts_to_update = list( + (set(hosts_to_wait) - set(hosts_not_ready))) + hosts_avail = [] + for host_ in hosts_to_update: + res[host_] = 9, err_msg + + for host in hosts_avail: + res[host] = 0, "Host is unlocked and in available state." + + if not len(res) == len(hosts): + raise exceptions.CommonError( + "Something wrong with the keyword. Number of hosts in result is " + "incorrect.") + + if not fail_ok: + for host in res: + if res[host][0] not in [-1, 0, 4]: + raise exceptions.HostPostCheckFailed( + " Not all host(s) unlocked successfully. Detail: {}".format( + res)) + + LOG.info("Results for unlocking hosts: {}".format(res)) + return res + + +def _wait_for_openstack_cli_enable(con_ssh=None, timeout=HostTimeout.SWACT, + fail_ok=False, check_interval=10, + reconnect=True, single_node=None, + auth_info=Tenant.get('admin_platform')): + """ + Wait for 'system show' cli to work on active controller. Also wait for + host task to clear and subfunction ready. + Args: + con_ssh: + timeout: + fail_ok: + check_interval: + reconnect: + auth_info + + Returns (bool): + + """ + from keywords import container_helper + + if not con_ssh: + con_name = auth_info.get('region') if ( + auth_info and ProjVar.get_var('IS_DC')) else None + con_ssh = ControllerClient.get_active_controller(name=con_name) + + def check_sysinv_cli(): + + cli.system('show', ssh_client=con_ssh, auth_info=auth_info, + timeout=10) + time.sleep(10) + active_con = system_helper.get_active_controller_name( + con_ssh=con_ssh, auth_info=auth_info) + + if ((single_node or ( + single_node is None and system_helper.is_aio_simplex())) and + system_helper.get_host_values(active_con, + fields='administrative')[ + 0] == HostAdminState.LOCKED): + LOG.info( + "Simplex system in locked state. Wait for task to clear only") + system_helper.wait_for_host_values(host=active_con, + timeout=HostTimeout.LOCK, + task='', con_ssh=con_ssh, + auth_info=auth_info) + else: + wait_for_task_clear_and_subfunction_ready(hosts=active_con, + con_ssh=con_ssh, + auth_info=auth_info) + is_openstack_applied = container_helper.is_stx_openstack_deployed( + con_ssh=con_ssh, auth_info=auth_info) + LOG.info("system cli and subfunction enabled") + return is_openstack_applied + + def check_nova_cli(): + region = auth_info.get('region', None) if auth_info else None + nova_auth = Tenant.get('admin', dc_region=region) + cli.openstack('server list', ssh_client=con_ssh, auth_info=nova_auth, + timeout=10) + LOG.info("nova cli enabled") + + cli_enable_end_time = time.time() + timeout + LOG.info( + "Waiting for system cli and subfunctions to be ready and nova cli (if " + "stx-openstack applied) to be " + "enabled on active controller") + check_nova = None + while time.time() < cli_enable_end_time: + try: + if check_nova is None: + check_nova = check_sysinv_cli() + if check_nova: + check_nova_cli() + return True + except: + if not con_ssh.is_connected(): + if reconnect: + LOG.info( + "con_ssh connection lost while waiting for system to " + "recover. Attempt to reconnect...") + con_ssh.connect(retry_timeout=timeout, retry=True) + else: + LOG.error("system disconnected") + if fail_ok: + return False + raise + + time.sleep(check_interval) + + err_msg = "Timed out waiting for system to recover. Time waited: {}".format( + timeout) + if fail_ok: + LOG.warning(err_msg) + return False + raise TimeoutError(err_msg) + + +def swact_host(hostname=None, swact_start_timeout=HostTimeout.SWACT, + swact_complete_timeout=HostTimeout.SWACT, + fail_ok=False, auth_info=Tenant.get('admin_platform'), + con_ssh=None, wait_for_alarm=False): + """ + Swact active controller from given hostname. + + Args: + hostname (str|None): When None, active controller will be used for + swact. + swact_start_timeout (int): Max time to wait between cli executes and + swact starts + swact_complete_timeout (int): Max time to wait for swact to complete + after swact started + fail_ok (bool): + con_ssh (SSHClient): + auth_info + wait_for_alarm (bool),: whether to wait for pre-swact alarms after swact + + Returns (tuple): (rtn_code(int), msg(str)) # 1, 3, 4 only returns + when fail_ok=True + (0, "Active controller is successfully swacted.") + (1, ) # swact host cli rejected + (2, " is not active controller host, thus swact request + failed as expected.") + (3, "Swact did not start within ") + (4, "Active controller did not change after swact within + ") + + """ + active_host = system_helper.get_active_controller_name(con_ssh=con_ssh, + auth_info=auth_info) + if hostname is None: + hostname = active_host + + pre_alarms = None + if wait_for_alarm: + pre_alarms = system_helper.get_alarms(con_ssh=con_ssh, + auth_info=auth_info) + + exitcode, msg = cli.system('host-swact', hostname, ssh_client=con_ssh, + fail_ok=fail_ok, auth_info=auth_info) + if exitcode == 1: + return 1, msg + + if hostname != active_host: + system_helper.wait_for_host_values(hostname, + timeout=swact_start_timeout, + fail_ok=False, con_ssh=con_ssh, + auth_info=auth_info, task='') + return 2, "{} is not active controller host, thus swact request " \ + "failed as expected.".format(hostname) + else: + rtn = wait_for_swact_complete( + hostname, con_ssh, swact_start_timeout=swact_start_timeout, + auth_info=auth_info, swact_complete_timeout=swact_complete_timeout, + fail_ok=fail_ok) + if rtn[0] == 0: + nova_auth = Tenant.get('admin', dc_region=auth_info.get( + 'region') if auth_info else None) + try: + res = wait_for_webservice_up( + system_helper.get_active_controller_name(), + fail_ok=fail_ok, + auth_info=auth_info, con_ssh=con_ssh)[0] + if not res: + return 5, "Web-services for new controller is not active" + + if system_helper.is_aio_duplex(con_ssh=con_ssh, + auth_info=auth_info): + hypervisor_up_res = wait_for_hypervisors_up(hostname, + fail_ok=fail_ok, + con_ssh=con_ssh, + auth_info=nova_auth) + if not hypervisor_up_res: + return 6, "Hypervisor state is not up for {} after " \ + "swacted".format(hostname) + + for host in ('controller-0', 'controller-1'): + task_aff_res = wait_for_tasks_affined(host, con_ssh=con_ssh, + fail_ok=True, + auth_info=auth_info, + timeout=300) + if not task_aff_res: + msg = "tasks affining incomplete on {} after swact " \ + "from {}".format(host, hostname) + # Do not fail the test due to task affining + # incomplete for now to unblock test case. + LOG.error(msg=msg) + return 7, msg + finally: + # After swact, there is a delay for alarms to re-appear on new + # active controller, thus the wait. + if pre_alarms: + post_alarms = system_helper.get_alarms(con_ssh=con_ssh, + auth_info=auth_info) + for alarm in pre_alarms: + if alarm not in post_alarms: + alarm_id, entity_id = alarm.split('::::') + system_helper.wait_for_alarm(alarm_id=alarm_id, + entity_id=entity_id, + fail_ok=True, timeout=300, + check_interval=15, + auth_info=auth_info) + + return rtn + + +def wait_for_swact_complete(before_host, con_ssh=None, + swact_start_timeout=HostTimeout.SWACT, + swact_complete_timeout=HostTimeout.SWACT, + fail_ok=True, + auth_info=Tenant.get('admin_platform')): + """ + Wait for swact to start and complete + NOTE: This function assumes swact command was run from ssh session using + floating ip!! + + Args: + before_host (str): Active controller name before swact request + con_ssh (SSHClient): + swact_start_timeout (int): Max time to wait between cli executs and + swact starts + swact_complete_timeout (int): Max time to wait for swact to complete + after swact started + fail_ok + auth_info + + Returns (tuple): + (0, "Active controller is successfully swacted.") + (3, "Swact did not start within ") # returns + when fail_ok=True + (4, "Active controller did not change after swact within + ") # returns when fail_ok=True + (5, "400.001 alarm is not cleared within timeout after swact") + (6, "tasks affining incomplete on ") + + """ + if con_ssh is None: + con_name = auth_info.get('region') if ( + auth_info and ProjVar.get_var('IS_DC')) else None + con_ssh = ControllerClient.get_active_controller(name=con_name) + + fip_disconnected = con_ssh.wait_for_disconnect(fail_ok=fail_ok, + timeout=swact_start_timeout) + if not fip_disconnected: + return 3, "Swact did not start within {}".format(swact_start_timeout) + + LOG.info( + "ssh to {} OAM floating IP disconnected, indicating swact " + "initiated.".format( + con_ssh.host)) + + # permission denied is received when ssh right after swact initiated. Add + # delay to avoid sanity failure + time.sleep(30) + con_ssh.connect(retry=True, retry_timeout=swact_complete_timeout - 30) + + # Give it sometime before openstack cmds enables on after host + _wait_for_openstack_cli_enable(con_ssh=con_ssh, fail_ok=False, + timeout=swact_complete_timeout, + auth_info=auth_info) + + after_host = system_helper.get_active_controller_name(con_ssh=con_ssh, + auth_info=auth_info) + LOG.info( + "Host before swacting: {}, host after swacting: {}".format(before_host, + after_host)) + + if before_host == after_host: + if fail_ok: + return 4, "Active controller did not change after swact within " \ + "{}".format(swact_complete_timeout) + raise exceptions.HostPostCheckFailed( + "Swact failed. Active controller host did not change") + + drbd_res = system_helper.wait_for_alarm_gone( + alarm_id=EventLogID.CON_DRBD_SYNC, entity_id=after_host, + strict=False, fail_ok=fail_ok, timeout=300, con_ssh=con_ssh, + auth_info=auth_info) + if not drbd_res: + return 5, "400.001 alarm is not cleared within timeout after swact" + + return 0, "Active controller is successfully swacted." + + +def wait_for_hypervisors_up(hosts, timeout=HostTimeout.HYPERVISOR_UP, + check_interval=5, fail_ok=False, + con_ssh=None, auth_info=Tenant.get('admin')): + """ + Wait for given hypervisors to be up and enabled in nova hypervisor-list + Args: + hosts (list|str): names of the hypervisors, such as compute-0 + timeout (int): + check_interval (int): + fail_ok (bool): + con_ssh (SSHClient): + auth_info + + Returns (tuple): res_bool(bool), hosts_not_up(list) + (True, []) # all hypervisors given are up and enabled + (False, [] # some hosts are not up and enabled + + """ + if isinstance(hosts, str): + hosts = [hosts] + + hypervisors = get_hypervisors(con_ssh=con_ssh, auth_info=auth_info) + + if not set(hosts) <= set(hypervisors): + msg = "Some host(s) not in nova hypervisor-list. Host(s) given: {}. " \ + "Hypervisors: {}".format(hosts, hypervisors) + raise exceptions.HostPreCheckFailed(msg) + + hosts_to_check = list(hosts) + LOG.info("Waiting for {} to be up in nova hypervisor-list...".format(hosts)) + end_time = time.time() + timeout + while time.time() < end_time: + up_hosts = get_hypervisors(state='up', con_ssh=con_ssh, + auth_info=auth_info) + for host in hosts_to_check: + if host in up_hosts: + hosts_to_check.remove(host) + + if not hosts_to_check: + msg = "Host(s) {} are up and enabled in nova " \ + "hypervisor-list".format(hosts) + LOG.info(msg) + return True, hosts_to_check + + time.sleep(check_interval) + else: + msg = "Host(s) {} are not up in hypervisor-list within timeout".format( + hosts_to_check) + if fail_ok: + LOG.warning(msg) + return False, hosts_to_check + raise exceptions.HostTimeout(msg) + + +def wait_for_webservice_up(hosts, timeout=HostTimeout.WEB_SERVICE_UP, + check_interval=5, fail_ok=False, con_ssh=None, + auth_info=Tenant.get('admin_platform')): + if isinstance(hosts, str): + hosts = [hosts] + + hosts_to_check = list(hosts) + LOG.info( + "Waiting for {} to be active for web-service in system " + "servicegroup-list...".format( + hosts_to_check)) + end_time = time.time() + timeout + + while time.time() < end_time: + # need to check for strict True because 'go-active' state is not + # 'active' state + active_hosts = \ + system_helper.get_servicegroups(fields='hostname', + service_group_name='web-services', + strict=True, + con_ssh=con_ssh, + auth_info=auth_info) + + for host in hosts: + if host in active_hosts and host in hosts_to_check: + hosts_to_check.remove(host) + + if not hosts_to_check: + msg = "Host(s) {} are active for web-service in system " \ + "servicegroup-list".format(hosts) + LOG.info(msg) + return True, hosts_to_check + + time.sleep(check_interval) + else: + msg = "Host(s) {} are not active for web-service in system " \ + "servicegroup-list within timeout".format(hosts_to_check) + if fail_ok: + LOG.warning(msg) + return False, hosts_to_check + raise exceptions.HostTimeout(msg) + + +def get_hosts_in_storage_backing(storage_backing='local_image', up_only=True, + hosts=None, con_ssh=None, + auth_info=Tenant.get('admin')): + """ + Return a list of hosts that supports the given storage backing. + + System: Regular, Small footprint + + Args: + hosts (None|list|tuple): hosts to check + storage_backing (str): 'local_image', or 'remote' + up_only (bool): whether to return only up hypervisors + con_ssh (SSHClient): + auth_info + + Returns (tuple): + such as ('compute-0', 'compute-2', 'compute-1', 'compute-3') + or () if no host supports this storage backing + + """ + storage_backing = storage_backing.strip().lower() + if 'image' in storage_backing: + storage_backing = 'local_image' + elif 'remote' in storage_backing: + storage_backing = 'remote' + else: + raise ValueError("Invalid storage backing provided. " + "Please use one of these: 'local_image', 'remote'") + + hosts_per_backing = get_hosts_per_storage_backing(up_only=up_only, + con_ssh=con_ssh, + auth_info=auth_info, + hosts=hosts) + return hosts_per_backing.get(storage_backing, []) + + +def get_up_hypervisors(con_ssh=None, auth_info=Tenant.get('admin')): + return get_hypervisors(state='up', con_ssh=con_ssh, auth_info=auth_info) + + +def get_hypervisors(state=None, field='Hypervisor Hostname', + auth_info=Tenant.get('admin'), con_ssh=None): + """ + Return a list of hypervisors names in specified state and status. If None + is set to state and status, + all hypervisors will be returned. + + System: Regular + + Args: + state (str): e.g., 'up', 'down' + con_ssh (SSHClient): + field (str|list|tuple): target header. e.g., ID, Hypervisor hostname + auth_info + + Returns (list): a list of hypervisor names. Return () if no match found. + Always return () for small footprint lab. i.e., do not work with + small footprint lab + """ + table_ = table_parser.table( + cli.openstack('hypervisor list', ssh_client=con_ssh, + auth_info=auth_info)[1]) + + kwargs = {'State': state} if state else {} + return table_parser.get_multi_values(table_, field, **kwargs) + + +def _get_element_tree_virsh_xmldump(instance_name, host_ssh): + code, output = host_ssh.exec_sudo_cmd( + cmd='virsh dumpxml {}'.format(instance_name)) + if not 0 == code: + raise exceptions.SSHExecCommandFailed( + "virsh dumpxml failed to execute.") + + element_tree = ElementTree.fromstring(output) + return element_tree + + +def get_values_virsh_xmldump(instance_name, host_ssh, tag_paths, + target_type='element'): + """ + + Args: + instance_name (str): instance_name of a vm. Such as 'instance-00000002' + host_ssh (SSHFromSSH): ssh of the host that hosting the given instance + tag_paths (str|list|tuple): the tag path to reach to the target + element. such as 'memoryBacking/hugepages/page' + target_type (str): 'element', 'dict', 'text' + + Returns (list): list of Elements, dictionaries, or strings based on the + target_type param. + + """ + target_type = target_type.lower().strip() + root_element = _get_element_tree_virsh_xmldump(instance_name, host_ssh) + + is_str = False + if isinstance(tag_paths, str): + is_str = True + tag_paths = [tag_paths] + + values_list = [] + for tag_path_ in tag_paths: + elements = root_element.findall(tag_path_) + + if 'dict' in target_type: + dics = [] + for element in elements: + dics.append(element.attrib) + values_list.append(dics) + + elif 'text' in target_type: + texts = [] + for element in elements: + text_list = list(element.itertext()) + if not text_list: + LOG.warning( + "No text found under tag: {}.".format(tag_path_)) + else: + texts.append(text_list[0]) + if len(text_list) > 1: + LOG.warning(( + "More than one text found under tag: " + "{}, returning the first one.". + format(tag_path_))) + + values_list.append(texts) + + else: + values_list.append(elements) + + if is_str: + return values_list[0] + else: + return values_list + + +def _get_actual_mems(host): + headers = ('mem_avail(MiB)', 'app_hp_total_1G', 'app_hp_pending_1G') + displayed_mems = get_host_memories(host=host, headers=headers, + wait_for_update=False) + + actual_mems = {} + for proc in displayed_mems: + mem_avail, total_1g, pending_1g = displayed_mems[proc] + actual_1g = total_1g if pending_1g is None else pending_1g + + args = '-2M {} {} {}'.format(mem_avail, host, proc) + code, output = cli.system('host-memory-modify', args, fail_ok=True) + if code == 0: + raise exceptions.SysinvError( + 'system host-memory-modify is not rejected when 2M pages ' + 'exceeds mem_avail') + + # Processor 0:No available space for 2M huge page allocation, max 2M + # VM pages: 27464 + actual_mem = int(re.findall(r'max 2M pages: (\d+)', output)[0]) * 2 + actual_mems[proc] = (actual_mem, actual_1g) + + return actual_mems + + +def wait_for_memory_update(host, proc_id=None, expt_1g=None, timeout=420, + auth_info=Tenant.get('admin_platform')): + """ + Wait for host memory to be updated after modifying and unlocking host. + Args: + host: + proc_id (int|list|None): + expt_1g (int|list|None): + timeout: + auth_info + + Returns: + + """ + proc_id_type = type(proc_id) + if not isinstance(expt_1g, proc_id_type): + raise ValueError("proc_id and expt_1g have to be the same type") + + pending_2m = pending_1g = -1 + headers = ['app_hp_total_1G', 'app_hp_pending_1G', 'app_hp_pending_2M'] + current_time = time.time() + end_time = current_time + timeout + pending_end_time = current_time + 120 + while time.time() < end_time: + host_mems = get_host_memories(host, headers, proc_id=proc_id, + wait_for_update=False, + auth_info=auth_info) + for proc in host_mems: + current_1g, pending_1g, pending_2m = host_mems[proc] + if not (pending_2m is None and pending_1g is None): + break + else: + if time.time() > pending_end_time: + LOG.info("Pending memories are None for at least 120 seconds") + break + time.sleep(15) + else: + err = "Pending memory after {}s. Pending 2M: {}; Pending 1G: {}".format( + timeout, pending_2m, pending_1g) + assert 0, err + + if expt_1g: + if isinstance(expt_1g, int): + expt_1g = [expt_1g] + proc_id = [proc_id] + + for i in range(len(proc_id)): + actual_1g = host_mems[proc_id[i]][0] + expt = expt_1g[i] + assert expt == actual_1g, "{} proc{} 1G pages - actual: {}, " \ + "expected: {}". \ + format(host, proc_id[i], actual_1g, expt_1g) + + +def modify_host_memory(host, proc, gib_1g=None, gib_4k_range=None, + actual_mems=None, fail_ok=False, + con_ssh=None, auth_into=Tenant.get('admin_platform')): + """ + + Args: + host (str): + proc (int|str) + gib_1g (None|int): 1g page to set + gib_4k_range (None|tuple): + None: no requirement on 4k page + tuple: (min_val(None|int), max_val(None|int)) make sure 4k page + total gib fall between the range (inclusive) + actual_mems + con_ssh + auth_into + fail_ok + + Returns (tuple): + + """ + args = '' + if not actual_mems: + actual_mems = _get_actual_mems(host=host) + mib_avail, page_1g = actual_mems[proc] + + if gib_1g is not None: + page_1g = gib_1g + args += ' -1G {}'.format(gib_1g) + mib_avail_2m = mib_avail - page_1g * 1024 + + if gib_4k_range: + min_4k, max_4k = gib_4k_range + if not (min_4k is None and max_4k is None): + if min_4k is None: + gib_4k_final = max(0, max_4k - 2) + elif max_4k is None: + gib_4k_final = min_4k + 2 + else: + gib_4k_final = (min_4k + max_4k) / 2 + mib_avail_2m = mib_avail_2m - gib_4k_final * 1024 + + page_2m = int(mib_avail_2m / 2) + args += ' -2M {} {} {}'.format(page_2m, host, proc) + + code, output = cli.system('host-memory-modify', args, ssh_client=con_ssh, + auth_info=auth_into, fail_ok=fail_ok) + if code > 0: + return 1, output + + LOG.info("{} memory modified successfully".format(host)) + return 0, page_2m + + +def modify_host_cpu(host, cpu_function, timeout=CMDTimeout.HOST_CPU_MODIFY, + fail_ok=False, con_ssh=None, + auth_info=Tenant.get('admin_platform'), **kwargs): + """ + Modify host cpu to given key-value pairs. i.e., system host-cpu-modify -f + -p + Notes: This assumes given host is already locked. + + Args: + host (str): hostname of host to be modified + cpu_function (str): cpu function to modify. e.g., 'vSwitch', 'platform' + timeout (int): Timeout waiting for system host-cpu-modify cli to return + fail_ok (bool): + con_ssh (SSHClient): + auth_info (dict): + **kwargs: processor id and number of cores pair(s). e.g., p0=1, p1=1 + + Returns (tuple): (rtn_code(int), message(str)) + (0, "Host cpu function modified successfully") + (1, ) # cli rejected + (2, "Number of actual log_cores for is different than + number set. Actual: , expect: ") + + """ + LOG.info( + "Modifying host {} CPU function {} to {}".format(host, cpu_function, + kwargs)) + + if not kwargs: + raise ValueError( + "At least one key-value pair such as p0=1 has to be provided.") + + final_args = {} + proc_args = '' + for proc, cores in kwargs.items(): + if cores is not None: + final_args[proc] = cores + cores = str(cores) + proc_args = ' '.join([proc_args, '-' + proc.lower().strip(), cores]) + + if not final_args: + raise ValueError("cores values cannot be all None") + + if not proc_args: + raise ValueError( + "At least one key-value pair should have non-None value. e.g., " + "p1=2") + + subcmd = ' '.join( + ['host-cpu-modify', '-f', cpu_function.lower().strip(), proc_args]) + code, output = cli.system(subcmd, host, ssh_client=con_ssh, fail_ok=fail_ok, + auth_info=auth_info, timeout=timeout) + + if code == 1: + return 1, output + + LOG.info("Post action check for host-cpu-modify...") + table_ = table_parser.table(output) + threads = len(set(table_parser.get_column(table_, 'thread'))) + + table_ = table_parser.filter_table(table_, assigned_function=cpu_function) + + for proc, num in final_args.items(): + num = int(num) + proc_id = re.findall(r'\d+', proc)[0] + expt_cores = threads * num + actual_cores = len( + table_parser.get_values(table_, 'log_core', processor=proc_id)) + if expt_cores != actual_cores: + msg = "Number of actual log_cores for {} is different than " \ + "number set. Actual: {}, expect: {}". \ + format(proc, actual_cores, expt_cores) + if fail_ok: + LOG.warning(msg) + return 2, msg + raise exceptions.HostPostCheckFailed(msg) + + msg = "Host cpu function modified successfully" + LOG.info(msg) + return 0, msg + + +def add_host_interface(host, if_name, ports_or_ifs, if_type=None, pnet=None, + ae_mode=None, tx_hash_policy=None, + vlan_id=None, mtu=None, if_class=None, network=None, + ipv4_mode=None, ipv6_mode=None, + ipv4_pool=None, ipv6_pool=None, lock_unlock=True, + fail_ok=False, con_ssh=None, + auth_info=Tenant.get('admin_platform')): + """ + + Args: + host: + if_name: + ports_or_ifs: + if_type: + pnet: + ae_mode: + tx_hash_policy: + vlan_id: + mtu: + if_class: + network: + ipv4_mode: + ipv6_mode: + ipv4_pool: + ipv6_pool: + lock_unlock: + fail_ok: + con_ssh: + auth_info: + + Returns: + + """ + if lock_unlock: + lock_host(host=host, con_ssh=con_ssh, swact=True, fail_ok=False) + + if isinstance(ports_or_ifs, str): + ports_or_ifs = [ports_or_ifs] + args = '{} {}{}{} {}'.format(host, if_name, + ' ' + if_type if if_type else '', + ' ' + pnet if pnet else '', + ' '.join(ports_or_ifs)) + opt_args_dict = { + '--aemode': ae_mode, + '--txhashpolicy': tx_hash_policy, + '--vlan_id': vlan_id, + '--imtu': mtu, + '--ifclass': if_class, + '--networks': network, + '--ipv4-mode': ipv4_mode, + '--ipv6-mode': ipv6_mode, + '--ipv4-pool': ipv4_pool, + '--ipv6-pool': ipv6_pool, + } + + opt_args = '' + for key, val in opt_args_dict.items(): + if val is not None: + opt_args += '{} {} '.format(key, val) + + args = '{} {}'.format(args, opt_args).strip() + code, out = cli.system('host-if-add', args, ssh_client=con_ssh, + fail_ok=fail_ok, auth_info=auth_info) + if code > 0: + return 1, out + + if lock_unlock: + unlock_host(host, con_ssh=con_ssh) + + msg = "Interface {} successfully added to {}".format(if_name, host) + LOG.info(msg) + + return 0, msg + + +def modify_host_interface(host, interface, pnet=None, ae_mode=None, + tx_hash_policy=None, + mtu=None, if_class=None, network=None, ipv4_mode=None, + ipv6_mode=None, + ipv4_pool=None, ipv6_pool=None, sriov_vif_count=None, + new_if_name=None, + lock_unlock=True, fail_ok=False, con_ssh=None, + auth_info=Tenant.get('admin_platform')): + """ + + Args: + host: + interface: + pnet: + ae_mode: + tx_hash_policy: + mtu: + if_class: + network: + ipv4_mode: + ipv6_mode: + ipv4_pool: + ipv6_pool: + sriov_vif_count: + new_if_name: + lock_unlock: + fail_ok: + con_ssh: + auth_info: + + Returns: + + """ + if lock_unlock: + lock_host(host=host, con_ssh=con_ssh, swact=True, fail_ok=False) + + args = '{} {}'.format(host, interface) + opt_args_dict = { + '--ifname': new_if_name, + '--aemode': ae_mode, + '--txhashpolicy': tx_hash_policy, + '--imtu': mtu, + '--ifclass': if_class, + '--networks': network, + '--ipv4-mode': ipv4_mode, + '--ipv6-mode': ipv6_mode, + '--ipv4-pool': ipv4_pool, + '--ipv6-pool': ipv6_pool, + '--num-vfs': sriov_vif_count, + '--providernetworks': pnet, + } + + opt_args = '' + for key, val in opt_args_dict.items(): + if val is not None: + opt_args += '{} {} '.format(key, val) + + args = '{} {}'.format(args, opt_args).strip() + code, out = cli.system('host-if-modify', args, ssh_client=con_ssh, + fail_ok=fail_ok, auth_info=auth_info) + if code > 0: + return 1, out + + if lock_unlock: + unlock_host(host, con_ssh=con_ssh) + + msg = "{} interface {} is successfully modified".format(host, interface) + LOG.info(msg) + + return 0, msg + + +def compare_host_to_cpuprofile(host, profile_uuid): + """ + Compares the cpu function assignments of a host and a cpu profile. + + Args: + host (str): name of host + profile_uuid (str): name or uuid of the cpu profile + + Returns (tuple): (rtn_code(int), message(str)) + (0, "The host and cpu profile have the same information") + (2, "The function of one of the cores has not been changed correctly: + ") + + """ + if not host or not profile_uuid: + raise ValueError("There is either no host or no cpu profile given.") + + def check_range(core_group, core_num): + group = [] + if isinstance(core_group, str): + group.append(core_group) + elif isinstance(core_group, list): + for proc in core_group: + group.append(proc) + + for processors in group: + parts = processors.split(' ') + cores = parts[len(parts) - 1] + ranges = cores.split(',') + for range_ in ranges: + if range_ == '': + continue + range_ = range_.split('-') + if len(range_) == 2: + if int(range_[0]) <= int(core_num) <= int(range_[1]): + return True + elif len(range_) == 1: + if int(range_[0]) == int(core_num): + return True + LOG.warn("Could not match {} in {}".format(core_num, core_group)) + return False + + table_ = table_parser.table(cli.system('host-cpu-list', host)[1]) + functions = table_parser.get_column(table_=table_, + header='assigned_function') + + table_ = table_parser.table(cli.system('cpuprofile-show', profile_uuid)[1]) + + platform_cores = table_parser.get_value_two_col_table(table_, + field='platform ' + 'cores') + vswitch_cores = table_parser.get_value_two_col_table(table_, + field='vswitch cores') + shared_cores = table_parser.get_value_two_col_table(table_, + field='shared cores') + vm_cores = table_parser.get_value_two_col_table(table_, field='vm cores') + + msg = "The function of one of the cores has not been changed correctly: " + + for i in range(0, len(functions)): + if functions[i] == 'Platform': + if not check_range(platform_cores, i): + LOG.warning(msg + str(i)) + return 2, msg + str(i) + elif functions[i] == 'vSwitch': + if not check_range(vswitch_cores, i): + LOG.warning(msg + str(i)) + return 2, msg + str(i) + elif functions[i] == 'Shared': + if not check_range(shared_cores, i): + LOG.warning(msg + str(i)) + return 2, msg + str(i) + elif functions[i] == 'Applications': + if not check_range(vm_cores, i): + LOG.warning(msg + str(i)) + return 2, msg + str(i) + + msg = "The host and cpu profile have the same information" + return 0, msg + + +def apply_host_cpu_profile(host, profile_uuid, + timeout=CMDTimeout.CPU_PROFILE_APPLY, fail_ok=False, + con_ssh=None, + auth_info=Tenant.get('admin_platform')): + """ + Apply the given cpu profile to the host. + Assumes the host is already locked. + + Args: + host (str): name of host + profile_uuid (str): name or uuid of the cpu profile + timeout (int): timeout to wait for cli to return + fail_ok (bool): + con_ssh (SSHClient): + auth_info (dict): + + Returns (tuple): (rtn_code(int), message(str)) + (0, "cpu profile applied successfully") + (1, ) # cli rejected + (2, "The function of one of the cores has not been changed correctly: + ") + """ + if not host or not profile_uuid: + raise ValueError("There is either no host or no cpu profile given.") + + LOG.info("Applying cpu profile: {} to host: {}".format(profile_uuid, host)) + + code, output = cli.system('host-apply-cpuprofile', + '{} {}'.format(host, profile_uuid), + ssh_client=con_ssh, + fail_ok=fail_ok, auth_info=auth_info, + timeout=timeout) + + if 1 == code: + LOG.warning(output) + return 1, output + + LOG.info("Post action host-apply-cpuprofile") + res, out = compare_host_to_cpuprofile(host, profile_uuid) + + if res != 0: + LOG.warning(output) + return res, out + + success_msg = "cpu profile applied successfully" + LOG.info(success_msg) + return 0, success_msg + + +def get_host_cpu_cores_for_function(hostname, func='vSwitch', + core_type='log_core', thread=0, + con_ssh=None, + auth_info=Tenant.get('admin_platform'), + rtn_dict_per_proc=True): + """ + Get processor/logical cpu cores/per processor on thread 0 for given + function for host via system host-cpu-list + + Args: + hostname (str): hostname to pass to system host-cpu-list + func (str|tuple|list): such as 'Platform', 'vSwitch', or 'Applications' + core_type (str): 'phy_core' or 'log_core' + thread (int|None): thread number. 0 or 1 + con_ssh (SSHClient): + auth_info (dict): + rtn_dict_per_proc (bool) + + Returns (dict|list): format: { (int): (list), ...} + e.g., {0: [1, 2], 1: [21, 22]} + + """ + table_ = get_host_cpu_list_table(hostname, con_ssh=con_ssh, + auth_info=auth_info) + procs = list(set(table_parser.get_values(table_, 'processor', + thread=thread))) if \ + rtn_dict_per_proc else [ + None] + res = {} + + convert = False + if isinstance(func, str): + func = [func] + convert = True + + for proc in procs: + funcs_cores = [] + for func_ in func: + if func_: + func_ = 'Applications' if func_.lower() == 'vms' else func_ + cores = table_parser.get_values(table_, core_type, processor=proc, + assigned_function=func_, + thread=thread) + funcs_cores.append(sorted([int(item) for item in cores])) + + if convert: + funcs_cores = funcs_cores[0] + + if proc is not None: + res[int(str(proc))] = funcs_cores + else: + res = funcs_cores + break + + LOG.info("{} {} {}s: {}".format(hostname, func, core_type, res)) + return res + + +def get_logcores_counts(host, proc_ids=(0, 1), thread='0', functions=None, + con_ssh=None, + auth_info=Tenant.get('admin_platform')): + """ + Get number of logical cores on given processor on thread 0. + + Args: + host: + proc_ids: + thread (str|list): '0' or ['0', '1'] + con_ssh: + functions (list|str) + auth_info + + Returns (list): + + """ + table_ = get_host_cpu_list_table(host=host, con_ssh=con_ssh, + auth_info=auth_info) + table_ = table_parser.filter_table(table_, thread=thread) + + rtns = [] + kwargs = {} + if functions: + kwargs = {'assigned_function': functions} + + for i in proc_ids: + cores_on_proc = table_parser.get_values(table_, 'log_core', + processor=str(i), **kwargs) + LOG.info("Cores on proc {}: {}".format(i, cores_on_proc)) + rtns.append(len(cores_on_proc)) + + return rtns + + +def get_host_procs(hostname, con_ssh=None, + auth_info=Tenant.get('admin_platform')): + table_ = get_host_cpu_list_table(host=hostname, con_ssh=con_ssh, + auth_info=auth_info) + procs = table_parser.get_column(table_, 'processor') + return sorted(list(set(procs))) + + +def get_expected_vswitch_port_engine_map(host_ssh): + """ + Get expected ports and vswitch cores mapping via vshell port-list and + vshell engine-list + + Args: + host_ssh (SSHClient): ssh of a nova host + + Returns (dict): format: { (str): (list), ...} + e.g., {'0': ['1', '2'], '1': ['1', '2']} + + """ + ports_tab = table_parser.table( + host_ssh.exec_cmd("vshell port-list", fail_ok=False)[1]) + ports_tab = table_parser.filter_table(ports_tab, type='physical') + + cores_tab = table_parser.table( + host_ssh.exec_cmd("vshell engine-list", fail_ok=False)[1]) + + header = 'socket' if 'socket' in ports_tab['headers'] else 'socket-id' + sockets_for_ports = sorted(int(item) for item in list( + set(table_parser.get_column(ports_tab, header)))) + sockets_for_cores = sorted(int(item) for item in list( + set(table_parser.get_column(cores_tab, 'socket-id')))) + expt_map = {} + if sockets_for_ports == sockets_for_cores: + for socket in sockets_for_ports: + soc_ports = table_parser.get_values(ports_tab, 'id', + **{header: str(socket)}) + soc_cores = sorted(int(item) for item in + table_parser.get_values(cores_tab, 'cpuid', + **{'socket-id': str( + socket)})) + for port in soc_ports: + expt_map[port] = soc_cores + + else: + all_ports = table_parser.get_column(ports_tab, 'id') + all_cores = sorted( + int(item) for item in table_parser.get_column(cores_tab, 'cpuid')) + for port in all_ports: + expt_map[port] = all_cores + + return expt_map + + +def get_host_instance_backing(host, con_ssh=None, auth_info=Tenant.get('admin'), + fail_ok=False, refresh=False): + """ + Get instance backing for host. + + Args: + host (str): + con_ssh: + auth_info (dict) + fail_ok: + refresh (bool): if not refresh, it will try to get the value from + existing global var if already exist + + Returns (str): remote, local_image, or '' (if unable to get host backing + from nova conf) + + """ + instance_backings = ProjVar.get_var('INSTANCE_BACKING') + if not refresh and instance_backings: + for backing, hosts in instance_backings.items(): + if host in hosts: + return backing + + config = kube_helper.get_openstack_configs(conf_file='/etc/nova/nova.conf', + configs={ + 'libvirt': 'images_type'}, + node=host, + label_app='nova', + label_component='compute', + con_ssh=con_ssh) + images_type = list(config.values())[0].get('libvirt', 'images_type', + fallback='') + if not images_type: + if fail_ok: + return '' + raise exceptions.NovaError( + 'images_type cannot be determined from {} nova-compute pod'.format( + host)) + + host_backing = 'remote' if images_type == 'rbd' else 'local_image' + LOG.info("{} instance backing: {}".format(host, host_backing)) + if host_backing not in instance_backings: + instance_backings[host_backing] = [] + + for backing, hosts_with_backing in instance_backings.items(): + if host_backing == backing and host not in hosts_with_backing: + instance_backings[backing].append(host) + elif host_backing != backing and host in hosts_with_backing: + instance_backings[backing].remove(host) + + ProjVar.set_var(INSTANCE_BACKING=instance_backings) + + return host_backing + + +def assign_host_labels(host, labels, default_value='enabled', check_first=True, + lock=True, unlock=True, fail_ok=False, + con_ssh=None, auth_info=Tenant.get('admin_platform')): + """ + Assign given labels to host + Args: + host: + labels (dict|list): when list of label names instead dict, + use default_value for each label + default_value (str): + check_first: + lock: + unlock: + fail_ok: + con_ssh: + auth_info: + + Returns (tuple): + (-1, "Host already have expected labels: . Do nothing.") + (0, (dict)) + (1, ) + + """ + if isinstance(labels, (list, tuple)): + labels = {label: default_value for label in labels} + + if check_first: + existing_labels = get_host_labels_info(host, con_ssh=con_ssh, + auth_info=auth_info) + for label, expt_val in labels.items(): + if expt_val != existing_labels.get(label, 'disabled'): + LOG.debug( + "{} label needs to assigned to {}".format(label, host)) + break + else: + msg = "{} already have expected labels: {}. Do nothing.".format( + host, labels) + LOG.info(msg) + return -1, msg + + if lock: + lock_host(host, con_ssh=con_ssh, swact=True, auth_info=auth_info) + + args = '{} {}'.format(host, ' '.join( + ['{}={}'.format(key, val) for key, val in labels.items()])) + code, output = cli.system('host-label-assign', args, ssh_client=con_ssh, + fail_ok=fail_ok, auth_info=auth_info) + if code > 0: + return 1, output + + LOG.info("{} label(s) assigned: {}".format(host, labels)) + if unlock: + unlock_host(host, con_ssh=con_ssh, auth_info=auth_info) + + post_labels = get_host_labels_info(host, con_ssh=con_ssh, + auth_info=auth_info) + for label_, expt_val in labels.items(): + if expt_val != post_labels.get(label_, 'disabled'): + raise exceptions.SysinvError( + 'Unexpected value for {} label {}'.format(host, label_)) + + LOG.info("{} label(s) removed: {}".format(host, labels)) + + return 0, labels + + +def get_host_labels_info(host, con_ssh=None, + auth_info=Tenant.get('admin_platform')): + """ + Get host labels + Args: + host (str): + con_ssh: + auth_info: + + Returns (dict): key/value pairs of host labels + + """ + output = cli.system('host-label-list --nowrap', host, ssh_client=con_ssh, + auth_info=auth_info)[1] + table_ = table_parser.table(output) + label_keys = table_parser.get_column(table_, 'label key') + label_values = table_parser.get_column(table_, 'label value') + + labels_info = {label_keys[i]: label_values[i] for i in + range(len(label_keys))} + return labels_info + + +def remove_host_labels(host, labels, check_first=True, lock=True, unlock=True, + fail_ok=False, con_ssh=None, + auth_info=Tenant.get('admin_platform')): + """ + Remove given labels from host + Args: + host: + labels (tuple|list): labels to remove + check_first: + lock: + unlock: + fail_ok: + con_ssh: + auth_info: + + Returns (tuple): + (-1, "Host already have expected labels: . Do nothing.") + (0, (list)) + (1, ) + + """ + if isinstance(labels, str): + labels = [labels] + + labels_to_remove = labels + if check_first: + existing_labels = get_host_labels_info(host, con_ssh=con_ssh, + auth_info=auth_info) + labels_to_remove = list(set(labels) & set(existing_labels)) + if not labels_to_remove: + msg = "{} does not have any of these labels to remove: {}. Do " \ + "nothing.".format(host, labels) + LOG.info(msg) + return -1, msg + + if lock: + lock_host(host, con_ssh=con_ssh, swact=True, auth_info=auth_info) + + args = '{} {}'.format(host, ' '.join(labels_to_remove)) + code, output = cli.system('host-label-remove', args, ssh_client=con_ssh, + fail_ok=fail_ok, auth_info=auth_info) + if code > 0: + return 1, output + + if unlock: + unlock_host(host, con_ssh=con_ssh, auth_info=auth_info) + + post_labels = get_host_labels_info(host, con_ssh=con_ssh, + auth_info=auth_info) + unremoved_labels = list(set(labels) & set(post_labels)) + if unremoved_labels: + raise exceptions.SysinvError( + "{} labels still exist after removal: {}".format(host, + unremoved_labels)) + + LOG.info("{} label(s) removed: {}".format(host, labels)) + + return 0, labels + + +def set_host_storage_backing(host, inst_backing, lock=True, unlock=True, + wait_for_configured=True, check_first=True, + fail_ok=False, + auth_info=Tenant.get('admin_platform'), + con_ssh=None): + """ + + Args: + host (str): host to modify lvg for + inst_backing (str): image, or remote + wait_for_configured (bool): Whether or not wait for host instance + backing change via system host-lvg-show + lock (bool): whether or not to lock host before modify + unlock (bool): whether or not to unlock host and verify config after + modify + check_first + fail_ok (bool): whether or not raise exception if host-label-assign + fails + auth_info (dict): + con_ssh (SSHClient): + + Returns: + + """ + if wait_for_configured and not unlock: + raise ValueError("'wait_for_configured=True' requires 'unlock=True'") + + label = { + 'remote-storage': 'enabled' if inst_backing == 'remote' else 'disabled'} + code, output = assign_host_labels(host, labels=label, lock=lock, + unlock=unlock, fail_ok=fail_ok, + check_first=check_first, + auth_info=auth_info, con_ssh=con_ssh) + if code > 0: + return 1, 'Failed to assign label to {}: {}'.format(host, output) + + if wait_for_configured: + nova_auth = Tenant.get('admin', dc_region=auth_info.get( + 'region') if auth_info else None) + res = wait_for_host_in_instance_backing(host=host, + storage_backing=inst_backing, + fail_ok=fail_ok, + auth_info=nova_auth) + if not res: + err = "Host {} is not in {} lvg within timeout".format( + host, inst_backing) + return 2, err + + return 0, "{} storage backing is successfully set to {}".format( + host, inst_backing) + + +def wait_for_host_in_instance_backing(host, storage_backing, timeout=120, + check_interval=3, fail_ok=False, + con_ssh=None, + auth_info=Tenant.get('admin')): + """ + Wait for host instance backing to be given value via system host-lvg-show + Args: + host (str): + storage_backing: local_image or remote + timeout: + check_interval: + fail_ok: + con_ssh: + auth_info + + Returns: + + """ + storage_backing = 'local_image' if 'image' in storage_backing else \ + storage_backing + end_time = time.time() + timeout + while time.time() < end_time: + host_backing = get_host_instance_backing(host=host, con_ssh=con_ssh, + refresh=True, + auth_info=auth_info) + if host_backing in storage_backing: + LOG.info("{} is configured with {} backing".format( + host, storage_backing)) + time.sleep(30) + return True + + time.sleep(check_interval) + + err_msg = "Timed out waiting for {} to appear in {} host-aggregate".format( + host, storage_backing) + if fail_ok: + LOG.warning(err_msg) + return False + else: + raise exceptions.HostError(err_msg) + + +def __parse_total_cpus(output): + last_line = output.splitlines()[-1] + print(last_line) + # Final resource view: name=controller-0 phys_ram=44518MB used_ram=0MB + # phys_disk=141GB used_disk=1GB + # free_disk=133GB total_vcpus=31 used_vcpus=0.0 pci_stats=[PciDevicePool( + # count=1,numa_node=0,product_id='0522', + # tags={class_id='030000',configured='1',dev_type='type-PCI'}, + # vendor_id='102b')] + total = round(float(re.findall(r'used_vcpus=([\d|.]*) ', last_line)[0]), 4) + return total + + +def get_vcpus_per_proc(hosts=None, thread=None, con_ssh=None, + auth_info=Tenant.get('admin_platform')): + if not hosts: + hosts = get_up_hypervisors(con_ssh=con_ssh) + elif isinstance(hosts, str): + hosts = [hosts] + + vcpus_per_proc = {} + for host in hosts: + vcpus_per_proc[host] = {} + cpus_per_proc = get_host_cpu_cores_for_function(host, + func='Applications', + thread=thread, + auth_info=auth_info, + con_ssh=con_ssh) + with ssh_to_host(host, con_ssh=con_ssh) as host_ssh: + cmd = """ps-sched.sh|grep qemu|grep " CPU" |awk '{{print $10;}}'""" + cores = host_ssh.exec_cmd(cmd)[1] + cores = [int(core.strip()) for core in cores.splitlines()] + + for proc, total_vcpus_per_proc in cpus_per_proc.items(): + used_cores = list(set(total_vcpus_per_proc) & set(cores)) + vcpus_per_proc[host][proc] = (used_cores, total_vcpus_per_proc) + + return vcpus_per_proc + + +def get_vcpus_for_computes(hosts=None, field='vcpus_used', con_ssh=None): + """ + Get vcpus info for given computes via openstack hypervisor show + Args: + hosts: + field (str): valid values: vcpus_used, vcpus, vcpus_avail + con_ssh: + + Returns (dict): host(str),cpu_val(float with 4 digits after decimal + point) pairs as dictionary + + """ + if hosts is None: + hosts = get_up_hypervisors(con_ssh=con_ssh) + elif isinstance(hosts, str): + hosts = [hosts] + + if field == 'used_now': + field = 'vcpus_used' + + if 'avail' not in field: + hosts_cpus = get_hypervisor_info(hosts=hosts, field=field, + con_ssh=con_ssh) + else: + cpus_info = get_hypervisor_info(hosts=hosts, + field=('vcpus', 'vcpus_used'), + con_ssh=con_ssh) + hosts_cpus = {} + for host in hosts: + total_cpu, used_cpu = cpus_info[host] + hosts_cpus[host] = float(total_cpu) - float(used_cpu) + + return hosts_cpus + + +def get_hypervisor_info(hosts, field='status', con_ssh=None, + auth_info=Tenant.get('admin')): + """ + Get info from openstack hypervisor show for specified field + Args: + hosts (str|list): hostname(s) + field (str|list|tuple): field(s) in hypervisor show table + con_ssh: + auth_info: + + Returns (dict): {(str): val(str|list), ...} + """ + if isinstance(hosts, str): + hosts = [hosts] + + convert_to_str = False + if isinstance(field, str): + field = [field] + convert_to_str = True + + hosts_vals = {} + for host in hosts: + table_ = table_parser.table( + cli.openstack('hypervisor show --fit-width', host, + ssh_client=con_ssh, + auth_info=auth_info)[1], combine_multiline_entry=True) + vals = [] + for field_ in field: + val = table_parser.get_value_two_col_table(table_, field=field_, + strict=True, + merge_lines=True) + try: + val = eval(val) + except (NameError, SyntaxError): + pass + vals.append(val) + if convert_to_str: + vals = vals[0] + hosts_vals[host] = vals + + LOG.info("Hosts_info: {}".format(hosts_vals)) + return hosts_vals + + +def _get_host_logcores_per_thread(host, con_ssh=None, + auth_info=Tenant.get('admin_platform')): + table_ = get_host_cpu_list_table(host=host, con_ssh=con_ssh, + auth_info=auth_info) + threads = list(set(table_parser.get_column(table_, 'thread'))) + cores_per_thread = {} + for thread in threads: + table_thread = table_parser.filter_table(table_, strict=True, + regex=False, thread=thread) + cores_str = table_parser.get_column(table_thread, 'log_core') + cores_per_thread[int(thread)] = [int(core) for core in cores_str] + + return cores_per_thread + + +def get_thread_num_for_cores(log_cores, host, con_ssh=None): + cores_per_thread = _get_host_logcores_per_thread(host=host, con_ssh=con_ssh) + + core_thread_dict = {} + for thread, cores_for_thread in cores_per_thread.items(): + for core in log_cores: + if int(core) in cores_for_thread: + core_thread_dict[core] = thread + + if len(core_thread_dict) == len(log_cores): + return core_thread_dict + else: + raise exceptions.HostError( + "Cannot find thread num for all cores provided. Cores provided: " + "{}. Threads found: {}".format(log_cores, core_thread_dict)) + + +def get_logcore_siblings(host, con_ssh=None, + auth_info=Tenant.get('admin_platform')): + """ + Get cpu pairs for given host. + Args: + host (str): such as compute-1 + con_ssh (SSHClient): + auth_info (dict) + + Returns (list): list of log_core_siblings(tuple). Output examples: + - HT enabled: [[0, 20], [1, 21], ..., [19, 39]] + - HT disabled: [[0], [1], ..., [19]] + """ + if con_ssh is None: + con_ssh = ControllerClient.get_active_controller() + + table_ = get_host_cpu_list_table(host=host, con_ssh=con_ssh, + auth_info=auth_info) + phy_cores = sorted( + [int(i) for i in set(table_parser.get_column(table_, 'phy_core'))]) + + sibling_pairs = [] + for phy_core in phy_cores: + log_cores = table_parser.get_values(table_, 'log_core', + **{'phy_core': str(phy_core)}) + sibling_pairs.append(log_cores) + + LOG.info("Sibling cores for {}: {}".format(host, sibling_pairs)) + return sibling_pairs + + +def get_vcpu_pins_for_instance_via_virsh(host_ssh, instance_name): + vcpu_pins = get_values_virsh_xmldump(instance_name=instance_name, + host_ssh=host_ssh, + tag_paths='cputune/vcpupin', + target_type='dict') + return vcpu_pins + + +def get_hosts_per_storage_backing(up_only=True, con_ssh=None, + auth_info=Tenant.get('admin'), hosts=None, + refresh=False): + """ + Get hosts for each possible storage backing + Args: + up_only (bool): whether to return up hypervisor only + auth_info + con_ssh: + hosts (None|list|tuple): hosts to check + refresh (bool) + + Returns (dict): {'local_image': , + 'remote': + } + """ + instance_backings = ProjVar.get_var('INSTANCE_BACKING') + if instance_backings and not refresh and not up_only: + return instance_backings + + if not hosts: + host_func = get_up_hypervisors if up_only else get_hypervisors + hosts = host_func(con_ssh=con_ssh, auth_info=auth_info) + elif isinstance(hosts, str): + hosts = (hosts,) + + for host in hosts: + backing = get_host_instance_backing(host=host, con_ssh=con_ssh, + fail_ok=True, refresh=refresh) + if not backing: + LOG.warning('{} instance backing cannot be determined'.format(host)) + + post_instance_backings = ProjVar.get_var('INSTANCE_BACKING') + LOG.info("Hosts per storage backing: {}".format(post_instance_backings)) + if not ProjVar.get_var( + 'DEFAULT_INSTANCE_BACKING') or post_instance_backings != \ + instance_backings: + # Host backing changed. As a result, + # if system has more than 1 instance backings across nova + # hypervisors, nova aggregates need to be created + # in order to restrict openstack vms onto host(s) with specific + # instance backing + configured_backings = [backing for backing in post_instance_backings if + post_instance_backings.get(backing)] + default_local_storage = 'remote' + if 'local_image' in configured_backings: + default_local_storage = 'local_image' + if len(post_instance_backings.get('remote', [])) > len( + post_instance_backings['local_image']): + default_local_storage = 'remote' + + ProjVar.set_var(DEFAULT_INSTANCE_BACKING=default_local_storage) + if len(configured_backings) > 1: + # More than 1 instance backings across nova hosts + # Need to configure host aggregates + aggregates = nova_helper.get_aggregates(con_ssh=con_ssh, + auth_info=auth_info) + for inst_backing in configured_backings: + expt_hosts = sorted(post_instance_backings[inst_backing]) + aggregate_name = STORAGE_AGGREGATE[inst_backing] + if aggregate_name not in aggregates: + nova_helper.create_aggregate(name=aggregate_name, + avail_zone='nova', + check_first=False, + con_ssh=con_ssh, + auth_info=auth_info) + properties = {} + hosts_in_aggregate = [] + else: + properties, hosts_in_aggregate = \ + nova_helper.get_aggregate_values( + aggregate_name, + fields=('properties', 'hosts'), + con_ssh=con_ssh, auth_info=auth_info) + + property_key = FlavorSpec.STORAGE_BACKING.split(':')[-1].strip() + if property_key not in properties: + nova_helper.set_aggregate( + aggregate_name, + properties={property_key: inst_backing}, + con_ssh=con_ssh, auth_info=auth_info) + + if expt_hosts != sorted(hosts_in_aggregate): + hosts_to_remove = list( + set(hosts_in_aggregate) - set(expt_hosts)) + hosts_to_add = list( + set(expt_hosts) - set(hosts_in_aggregate)) + if hosts_to_add: + nova_helper.add_hosts_to_aggregate( + aggregate=aggregate_name, hosts=hosts_to_add, + check_first=False, con_ssh=con_ssh, + auth_info=auth_info) + if hosts_to_remove: + nova_helper.remove_hosts_from_aggregate( + aggregate=aggregate_name, hosts=hosts_to_remove, + check_first=False, con_ssh=con_ssh, + auth_info=auth_info) + + return {backing: hosts_ for backing, hosts_ in + post_instance_backings.items() if set(hosts_) & set(hosts)} + + +def get_coredumps_and_crashreports(move=True): + """ + Get core dumps and crash reports from every host + Args: + move: whether to move coredumps and crashreports to local automation dir + + Returns (dict): + + """ + LOG.info( + "Getting existing system crash reports from /var/crash/ and coredumps " + "from /var/lib/systemd/coredump/") + hosts_to_check = system_helper.get_hosts( + availability=(HostAvailState.FAILED, HostAvailState.OFFLINE), + exclude=True) + + core_dumps_and_reports = {} + active_con = system_helper.get_active_controller_name() + con_ssh = ControllerClient.get_active_controller() + con_dir = '{}/coredumps_and_crashreports/'.format(HostLinuxUser.get_home()) + con_ssh.exec_cmd('mkdir -p {}'.format(con_dir)) + scp_to_local = False + ls_cmd = 'ls -l --time-style=+%Y-%m-%d_%H-%M-%S {} | grep --color=never ' \ + '-v total' + core_dump_dir = '/var/lib/systemd/coredump/' + crash_report_dir = '/var/crash/' + for host in hosts_to_check: + with ssh_to_host(hostname=host) as host_ssh: + core_dumps_and_reports[host] = [] + + for failure_dir in (core_dump_dir, crash_report_dir): + failures = host_ssh.exec_cmd(ls_cmd.format(failure_dir), + fail_ok=True)[1].splitlines() + core_dumps_and_reports[host].append(failures) + + if move and failures: + for line in failures: + timestamp, name = line.split(sep=' ')[-2:] + new_name = '_'.join((host, timestamp, name)) + host_ssh.exec_sudo_cmd( + 'mv {}/{} {}/{}'.format(failure_dir, name, failure_dir, + new_name), + fail_ok=False) + + scp_to_local = True + if host_ssh.get_hostname() != active_con: + host_ssh.scp_on_source( + source_path='{}/*'.format(failure_dir), + dest_user=HostLinuxUser.get_user(), + dest_ip=active_con, dest_path=con_dir, + dest_password=HostLinuxUser.get_password()) + else: + host_ssh.exec_sudo_cmd( + 'cp -r {}/* {}'.format(failure_dir, con_dir), + fail_ok=False) + host_ssh.exec_sudo_cmd('rm -rf {}/*'.format(failure_dir)) + + if scp_to_local: + con_ssh.exec_sudo_cmd('chmod -R 755 {}'.format(con_dir)) + + log_dir = ProjVar.get_var('LOG_DIR') + coredump_and_crashreport_dir = os.path.join( + log_dir, 'coredumps_and_crashreports') + os.makedirs(coredump_and_crashreport_dir, exist_ok=True) + source_path = '{}/*'.format(con_dir) + common.scp_from_active_controller_to_localhost( + source_path=source_path, dest_path=coredump_and_crashreport_dir) + con_ssh.exec_cmd('rm -rf {}/*'.format(con_dir)) + + LOG.info("core dumps and crash reports per host: {}".format( + core_dumps_and_reports)) + return core_dumps_and_reports + + +def modify_mtu_on_interface(host, interface, mtu_val, network_type='data', + lock_unlock=True, fail_ok=False, con_ssh=None): + mtu_val = int(mtu_val) + + LOG.info("Modify MTU for IF {} of NET-TYPE {} to: {} on {}".format( + interface, network_type, mtu_val, host)) + + args = "-m {} {} {}".format(mtu_val, host, interface) + + code, output = cli.system('host-if-modify', args, ssh_client=con_ssh, + fail_ok=fail_ok) + + if code != 0: + msg = "Attempt to change MTU failed on host:{} for IF:{} to " \ + "MTU:{}".format(host, interface, mtu_val) + if fail_ok: + return 2, msg + raise exceptions.HostPostCheckFailed(msg) + + if lock_unlock: + unlock_host(host) + + return code, output + + +def modify_mtu_on_interfaces(hosts, mtu_val, network_type, lock_unlock=True, + fail_ok=False, con_ssh=None): + if not hosts: + raise exceptions.HostError("No hostname provided.") + + mtu_val = int(mtu_val) + + if isinstance(hosts, str): + hosts = [hosts] + + res = {} + rtn_code = 0 + + if_class = network_type + network = '' + if network_type in PLATFORM_NET_TYPES: + if_class = 'platform' + network = network_type + + for host in hosts: + table_ = table_parser.table( + cli.system('host-if-list', '{} --nowrap'.format(host), + ssh_client=con_ssh)[1]) + table_ = table_parser.filter_table(table_, **{'class': if_class}) + # exclude unmatched platform interfaces from the table. + if 'platform' == if_class: + platform_ifs = table_parser.get_values(table_, target_header='name', + **{'class': 'platform'}) + for pform_if in platform_ifs: + if_nets = \ + get_host_interface_values(host=host, interface=pform_if, + fields='networks', + con_ssh=con_ssh)[0] + if_nets = [if_net.strip() for if_net in if_nets.split(sep=',')] + if network not in if_nets: + table_ = table_parser.filter_table(table_, strict=True, + exclude=True, + name=pform_if) + + uses_if_names = table_parser.get_values(table_, 'name', exclude=True, + **{'uses i/f': '[]'}) + non_uses_if_names = table_parser.get_values(table_, 'name', + exclude=False, + **{'uses i/f': '[]'}) + uses_if_first = False + if uses_if_names: + current_mtu = int( + get_host_interface_values(host, interface=uses_if_names[0], + fields=['imtu'], + con_ssh=con_ssh)[0]) + if current_mtu <= mtu_val: + uses_if_first = True + + if uses_if_first: + if_names = uses_if_names + non_uses_if_names + else: + if_names = non_uses_if_names + uses_if_names + + if lock_unlock: + lock_host(host, swact=True) + + LOG.info("Modify MTU for {} {} interfaces to: {}".format( + host, network_type, mtu_val)) + + res_for_ifs = {} + for if_name in if_names: + args = "-m {} {} {}".format(mtu_val, host, if_name) + # system host-if-modify controller-1 --imtu + code, output = cli.system('host-if-modify', args, + ssh_client=con_ssh, fail_ok=fail_ok) + res_for_ifs[if_name] = code, output + + if code != 0: + rtn_code = 1 + + res[host] = res_for_ifs + + if lock_unlock: + unlock_hosts(hosts, check_hypervisor_up=True, check_webservice_up=True) + + check_failures = [] + for host in hosts: + host_res = res[host] + for if_name in host_res: + mod_res = host_res[if_name] + + # Check mtu modified correctly + if mod_res[0] == 0: + actual_mtu = int( + get_host_interface_values(host, interface=if_name, + fields=['imtu'], + con_ssh=con_ssh)[0]) + if not actual_mtu == mtu_val: + check_failures.append((host, if_name, actual_mtu)) + + if check_failures: + msg = "Actual MTU value after modify is not as expected. " \ + "Expected MTU value: {}. Actual [Host, Interface, " \ + "MTU value]: {}".format(mtu_val, check_failures) + if fail_ok: + return 2, msg + raise exceptions.HostPostCheckFailed(msg) + + return rtn_code, res + + +def get_hosts_and_pnets_with_pci_devs(pci_type='pci-sriov', up_hosts_only=True, + con_ssh=None, + auth_info=Tenant.get('admin')): + """ + + Args: + pci_type (str|list|tuple): pci-sriov, pci-passthrough + up_hosts_only: + con_ssh: + auth_info: + + Returns (dict): hosts and pnets with ALL specified pci devs + + """ + state = 'up' if up_hosts_only else None + hosts = get_hypervisors(state=state, auth_info=auth_info) + sysinv_auth = Tenant.get('admin_platform', dc_region=auth_info.get( + 'region') if auth_info else None) + + hosts_pnets_with_pci = {} + if isinstance(pci_type, str): + pci_type = [pci_type] + + for host_ in hosts: + pnets_list_for_host = [] + for pci_type_ in pci_type: + + pnets_list = get_host_interfaces(host_, field='data networks', + net_type=pci_type_, + con_ssh=con_ssh, + auth_info=sysinv_auth) + pnets_for_type = [] + for pnets_ in pnets_list: + pnets_for_type += pnets_ + + if not pnets_for_type: + LOG.info("{} {} interface data network not found".format( + host_, pci_type_)) + pnets_list_for_host = [] + break + pnets_list_for_host.append(list(set(pnets_for_type))) + + if pnets_list_for_host: + pnets_final = pnets_list_for_host[0] + for pnets_ in pnets_list_for_host[1:]: + pnets_final = list(set(pnets_final) & set(pnets_)) + + if pnets_final: + hosts_pnets_with_pci[host_] = pnets_final + + if not hosts_pnets_with_pci: + LOG.info("No {} interface found from any of following hosts: " + "{}".format(pci_type, hosts)) + else: + LOG.info("Hosts and provider networks with {} devices: {}".format( + pci_type, hosts_pnets_with_pci)) + + return hosts_pnets_with_pci + + +def get_sm_dump_table(controller, con_ssh=None): + """ + + Args: + controller (str|SSHClient): controller name/ssh client to get sm-dump + con_ssh (SSHClient): ssh client for active controller + + Returns (): + table_ (dict): Dictionary of a table parsed by tempest. + Example: table = + { + 'headers': ["Field", "Value"]; + 'values': [['name', 'internal-subnet0'], ['id', '36864844783']]} + + """ + if isinstance(controller, str): + with ssh_to_host(controller, con_ssh=con_ssh) as host_ssh: + return table_parser.sm_dump_table( + host_ssh.exec_sudo_cmd('sm-dump', fail_ok=False)[1]) + + host_ssh = controller + return table_parser.sm_dump_table( + host_ssh.exec_sudo_cmd('sm-dump', fail_ok=False)[1]) + + +def get_sm_dump_items(controller, item_names=None, con_ssh=None): + """ + get sm dump dict for specified items + Args: + controller (str|SSHClient): hostname or ssh client for a controller + such as controller-0, controller-1 + item_names (list|str|None): such as 'oam-services', or ['oam-ip', + 'oam-services'] + con_ssh (SSHClient): + + Returns (dict): such as {'oam-services': {'desired-state': 'active', + 'actual-state': 'active'}, + 'oam-ip': {...} + } + + """ + sm_dump_tab = get_sm_dump_table(controller=controller, con_ssh=con_ssh) + if item_names: + if isinstance(item_names, str): + item_names = [item_names] + + sm_dump_tab = table_parser.filter_table(sm_dump_tab, name=item_names) + + sm_dump_items = table_parser.row_dict_table(sm_dump_tab, key_header='name', + unique_key=True) + return sm_dump_items + + +def get_sm_dump_item_states(controller, item_name, con_ssh=None): + """ + get desired and actual states of given item + + Args: + controller (str|SSHClient): hostname or host_ssh for a controller + such as controller-0, controller-1 + item_name (str): such as 'oam-services' + con_ssh (SSHClient): + + Returns (tuple): (, ) such as ('active', + 'active') + + """ + item_value_dict = \ + get_sm_dump_items(controller=controller, item_names=item_name, + con_ssh=con_ssh)[item_name] + + return item_value_dict['desired-state'], item_value_dict['actual-state'] + + +def wait_for_sm_dump_desired_states(controller, item_names=None, timeout=60, + strict=True, fail_ok=False, con_ssh=None): + """ + Wait for sm_dump item(s) to reach desired state(s) + + Args: + controller (str): controller name + item_names (str|list|None): item(s) name(s) to wait for desired + state(s). Wait for desired states for all items + when set to None. + timeout (int): max seconds to wait + strict (bool): whether to find strict match for given item_names. + e.g., item_names='drbd-', strict=False will + check all items whose name contain 'drbd-' + fail_ok (bool): whether or not to raise exception if any item did not + reach desired state before timed out + con_ssh (SSHClient): + + Returns (bool): True if all of given items reach desired state + + """ + + LOG.info("Waiting for {} {} in sm-dump to reach desired state".format( + controller, item_names)) + if item_names is None: + item_names = get_sm_dump_items(controller=controller, + item_names=item_names, con_ssh=con_ssh) + + elif not strict: + table_ = get_sm_dump_table(controller=controller, con_ssh=con_ssh) + item_names = table_parser.get_values(table_, 'name', strict=False, + name=item_names) + + if isinstance(item_names, str): + item_names = [item_names] + + items_to_check = {} + for item in item_names: + items_to_check[item] = {} + items_to_check[item]['prev-state'] = items_to_check[item][ + 'actual-state'] = \ + items_to_check[item]['desired-state'] = '' + + def __wait_for_desired_state(ssh_client): + end_time = time.time() + timeout + + while time.time() < end_time: + items_names_to_check = list(items_to_check.keys()) + items_states = get_sm_dump_items(ssh_client, + item_names=items_names_to_check, + con_ssh=con_ssh) + + for item_ in items_states: + items_to_check[item_].update(**items_states[item_]) + + prev_state = items_to_check[item_]['prev-state'] + desired_state = items_states[item_]['desired-state'] + actual_state = items_states[item_]['actual-state'] + + if desired_state == actual_state: + LOG.info( + "{} in sm-dump has reached desired state: {}".format( + item_, desired_state)) + items_to_check.pop(item_) + continue + + elif prev_state and actual_state != prev_state: + LOG.info( + "{} actual state changed from {} to {} while desired " + "state is: {}". + format(item_, prev_state, actual_state, desired_state)) + + items_to_check[item_].update(prev_state=actual_state) + + if not items_to_check: + return True + + time.sleep(3) + + err_msg = "Timed out waiting for sm-dump item(s) to reach desired " \ + "state(s): {}".format(items_to_check) + if fail_ok: + LOG.warning(err_msg) + return False + else: + raise exceptions.TimeoutException(err_msg) + + if isinstance(controller, str): + with ssh_to_host(controller, con_ssh=con_ssh) as host_ssh: + return __wait_for_desired_state(host_ssh) + else: + return __wait_for_desired_state(controller) + + +# This is a copy from installer_helper due to blocking issues in +# installer_helper on importing non-exist modules + + +@contextmanager +def ssh_to_test_server(test_srv=TestFileServer.SERVER, user=TestFileServer.USER, + password=TestFileServer.PASSWORD, prompt=None): + """ + ssh to test server. + Usage: Use with context_manager. i.e., + with ssh_to_build_server(bld_srv=cgts-yow3-lx) as bld_srv_ssh: + # do something + # ssh session will be closed automatically + + Args: + test_srv (str): test server ip + user (str): svc-cgcsauto if unspecified + password (str): password for svc-cgcsauto user if unspecified + prompt (str|None): expected prompt. such as: + svc-cgcsauto@yow-cgts4-lx.wrs.com$ + + Yields (SSHClient): ssh client for given build server and user + + """ + # Get build_server dict from bld_srv param. + + prompt = prompt if prompt else Prompt.TEST_SERVER_PROMPT_BASE.format(user) + test_server_conn = SSHClient(test_srv, user=user, password=password, + initial_prompt=prompt) + test_server_conn.connect() + + try: + yield test_server_conn + finally: + test_server_conn.close() + + +def get_host_co_processor_pci_list(hostname): + host_pci_info = [] + with ssh_to_host(hostname) as host_ssh: + LOG.info( + "Getting the Co-processor pci list for host {}".format(hostname)) + cmd = r"lspci -nnm | grep Co-processor | grep --color=never -v -A 1 " \ + r"-E 'Device \[0000\]|Virtual'" + rc, output = host_ssh.exec_cmd(cmd) + if rc != 0: + return host_pci_info + + # sample output: + # wcp7-12: + # 09:00.0 "Co-processor [0b40]" "Intel Corporation [8086]" "DH895XCC + # Series QAT [0435]" "Intel Corporation [8086]" "Device [35c5]" + # 09:01.0 "Co-processor [0b40]" "Intel Corporation [8086]" "DH895XCC + # Series QAT Virtual Function [0443]" "Intel Corporation [8086]" + # "Device [0000]" + + # wolfpass-13_14: + # 3f:00.0 "Co-processor [0b40]" "Intel Corporation [8086]" "Device [ + # 37c8]" -r04 "Intel Corporation [8086]" "Device [35cf]" + # 3f:01.0 "Co-processor [0b40]" "Intel Corporation [8086]" "Device [ + # 37c9]" -r04 "Intel Corporation [8086]" "Device [0000]" + # -- + # da:00.0 "Co-processor [0b40]" "Intel Corporation [8086]" "Device [ + # 37c8]" -r04 "Intel Corporation [8086]" "Device [35cf]" + # da:01.0 "Co-processor [0b40]" "Intel Corporation [8086]" "Device [ + # 37c9]" -r04 "Intel Corporation [8086]" "Device [0000]" + dev_sets = output.split('--\n') + for dev_set in dev_sets: + pdev_line, vdev_line = dev_set.strip().splitlines() + class_id, vendor_id, device_id = re.findall(r'\[([0-9a-fA-F]{4})\]', + pdev_line)[0:3] + vf_class_id, vf_vendor_id, vf_device_id = re.findall( + r'\[([0-9a-fA-F]{4})\]', vdev_line)[0:3] + assert vf_class_id == class_id + assert vf_vendor_id == vendor_id + assert device_id != vf_device_id + + vendor_name = \ + re.findall(r'\"([^\"]+) \[{}\]'.format(vendor_id), pdev_line)[0] + pci_alias = \ + re.findall(r'\"([^\"]+) \[{}\]'.format(device_id), pdev_line)[0] + if pci_alias == 'Device': + pci_alias = None + else: + pci_alias = 'qat-{}-vf'.format(pci_alias.lower()) + pci_address = ( + "0000:{}".format(pdev_line.split(sep=' "', maxsplit=1)[0])) + pci_name = "pci_{}".format( + pci_address.replace('.', '_').replace(':', '_').strip()) + # Ensure class id is at least 6 digits as displayed in nova + # device-list and system host-device-list + class_id = (class_id + '000000')[0:6] + + LOG.info("pci_name={} device_id={}".format(pci_name, device_id)) + pci_info = {'pci_address': pci_address, + 'pci_name': pci_name, + 'vendor_name': vendor_name, + 'vendor_id': vendor_id, + 'device_id': device_id, + 'class_id': class_id, + 'pci-alias': pci_alias, + 'vf_device_id': vf_device_id, + } + + host_pci_info.append(pci_info) + + LOG.info("The Co-processor pci list for host {}: {}".format( + hostname, host_pci_info)) + + return host_pci_info + + +def get_mellanox_ports(host): + """ + Get Mellanox data ports for given host + + Args: + host (str): hostname + + Returns (list): + + """ + data_ports = get_host_ports_for_net_type(host, net_type='data', + ports_only=True) + mt_ports = get_host_ports(host, 'uuid', if_name=data_ports, strict=False, + regex=True, **{'device type': MELLANOX_DEVICE}) + LOG.info("Mellanox ports: {}".format(mt_ports)) + return mt_ports + + +def is_host_locked(host, con_ssh=None): + admin_state = system_helper.get_host_values(host, 'administrative', + con_ssh=con_ssh)[0] + return admin_state.lower() == HostAdminState.LOCKED.lower() + + +def get_host_network_interface_dev_names(host, con_ssh=None): + dev_names = [] + with ssh_to_host(host, con_ssh=con_ssh) as host_ssh: + + cmd = "ifconfig -a | sed 's/[ \t].*//;/^$/d;/^lo/d'" + rc, output = host_ssh.exec_sudo_cmd(cmd) + if rc == 0: + output = output.splitlines() + for dev in output: + if dev.endswith(':'): + dev = dev[:-1] + dev_names.append(dev) + LOG.info( + "Host {} interface device names: {}".format(host, dev_names)) + else: + LOG.warning( + "Failed to get interface device names for host {}".format(host)) + + return dev_names + + +def get_host_interfaces_for_net_type(host, net_type='infra', if_type=None, + exclude_iftype=False, con_ssh=None): + """ + Get interface names for given net_type that is expected to be listed in + ifconfig on host + Args: + host (str): + net_type (str): 'infra', 'mgmt' or 'oam', (data is handled in AVS + thus not shown in ifconfig on host) + if_type (str|None): When None, interfaces with all eth types will return + exclude_iftype(bool): whether or not to exclude the if type specified. + con_ssh (SSHClient): + + Returns (dict): { + 'ethernet': [, , etc], + 'vlan': [, , etc], + 'ae': [(, []), (, []), ...] + } + + """ + LOG.info("Getting expected eth names for {} network on {}".format(net_type, + host)) + table_origin = get_host_interfaces_table(host=host, con_ssh=con_ssh) + + if if_type: + table_ = table_parser.filter_table(table_origin, exclude=exclude_iftype, + **{'type': if_type}) + else: + table_ = copy.deepcopy(table_origin) + + network = '' + if_class = net_type + if net_type in PLATFORM_NET_TYPES: + if_class = 'platform' + network = net_type + + table_ = table_parser.filter_table(table_, **{'class': if_class}) + # exclude unmatched platform interfaces from the table. + if 'platform' == if_class: + platform_ifs = table_parser.get_values(table_, target_header='name', + **{'class': 'platform'}) + for pform_if in platform_ifs: + if_nets = get_host_interface_values(host=host, interface=pform_if, + fields='networks')[0] + if_nets = [if_net.strip() for if_net in if_nets.split(sep=',')] + if network not in if_nets: + table_ = table_parser.filter_table(table_, strict=True, + exclude=True, name=pform_if) + + interfaces = {} + table_eth = table_parser.filter_table(table_, **{'type': 'ethernet'}) + eth_ifs = table_parser.get_values(table_eth, 'ports') + interfaces['ethernet'] = eth_ifs + # such as ["[u'enp134s0f1']", "[u'enp131s0f1']"] + + table_ae = table_parser.filter_table(table_, **{'type': 'ae'}) + ae_names = table_parser.get_values(table_ae, 'name') + ae_ifs = table_parser.get_values(table_ae, 'uses i/f') + + ae_list = [] + for i in range(len(ae_names)): + ae_list.append((ae_names[i], ae_ifs[i])) + interfaces['ae'] = ae_list + + table_vlan = table_parser.filter_table(table_, + **{'type': ['vlan', 'vxlan']}) + vlan_ifs_ = table_parser.get_values(table_vlan, 'uses i/f') + vlan_ids = table_parser.get_values(table_vlan, 'vlan id') + vlan_list = [] + for i in range(len(vlan_ifs_)): + # assuming only 1 item in 'uses i/f' list + vlan_useif = eval(vlan_ifs_[i])[0] + vlan_useif_ports = eval( + table_parser.get_values(table_origin, 'ports', name=vlan_useif)[0]) + if vlan_useif_ports: + vlan_useif = vlan_useif_ports[0] + vlan_list.append("{}.{}".format(vlan_useif, vlan_ids[i])) + + LOG.info( + "Expected eth names for {} network on {}: {}".format(net_type, host, + interfaces)) + return interfaces + + +def get_host_cpu_model(host, con_ssh=None, + auth_info=Tenant.get('admin_platform')): + """ + Get cpu model for a given host. e.g., Intel(R) Xeon(R) CPU E5-2680 v2 @ + 2.80GHz + Args: + host (str): e.g., compute-0 + con_ssh (SSHClient): + auth_info + + Returns (str): + """ + table_ = get_host_cpu_list_table(host=host, con_ssh=con_ssh, + auth_info=auth_info) + cpu_model = table_parser.get_column(table_, 'processor_model')[0] + + LOG.info("CPU Model for {}: {}".format(host, cpu_model)) + return cpu_model + + +def get_max_vms_supported(host, con_ssh=None): + max_count = 10 + cpu_model = get_host_cpu_model(host=host, con_ssh=con_ssh) + if ProjVar.get_var('IS_VBOX'): + max_count = MaxVmsSupported.VBOX + elif re.search(r'Xeon.* CPU D-[\d]+', cpu_model): + max_count = MaxVmsSupported.XEON_D + + LOG.info("Max number vms supported on {}: {}".format(host, max_count)) + return max_count + + +def get_hypersvisors_with_config(hosts=None, up_only=True, hyperthreaded=None, + storage_backing=None, con_ssh=None): + """ + Get hypervisors with specified configurations + Args: + hosts (None|list): + up_only (bool): + hyperthreaded + storage_backing (None|str): + con_ssh (SSHClient): + + Returns (list): list of hosts meeting the requirements + + """ + if up_only: + hypervisors = get_up_hypervisors(con_ssh=con_ssh) + else: + hypervisors = get_hypervisors(con_ssh=con_ssh) + + if hosts: + candidate_hosts = list(set(hypervisors) & set(hosts)) + else: + candidate_hosts = hypervisors + + if candidate_hosts and storage_backing: + candidate_hosts = get_hosts_in_storage_backing( + storage_backing=storage_backing, con_ssh=con_ssh, + hosts=candidate_hosts) + + if hyperthreaded is not None and candidate_hosts: + ht_hosts = [] + non_ht = [] + for host in candidate_hosts: + if is_host_hyperthreaded(host, con_ssh=con_ssh): + ht_hosts.append(host) + else: + non_ht.append(host) + candidate_hosts = ht_hosts if hyperthreaded else non_ht + + return candidate_hosts + + +def lock_unlock_controllers(host_recover='function', alarm_ok=False, + no_standby_ok=False): + """ + lock/unlock both controller to get rid of the config out of date situations + + Args: + host_recover (None|str): try to recover host if lock/unlock fails + alarm_ok (bool) + no_standby_ok (bool) + + Returns (tuple): return code and msg + + """ + active, standby = system_helper.get_active_standby_controllers() + if standby: + LOG.info("Locking unlocking controllers to complete action") + from testfixtures.recover_hosts import HostsToRecover + if host_recover: + HostsToRecover.add(hostnames=standby, scope=host_recover) + lock_host(standby) + unlock_host(standby) + if host_recover: + HostsToRecover.remove(hostnames=standby, scope=host_recover) + drbd_res = system_helper.wait_for_alarm_gone( + alarm_id=EventLogID.CON_DRBD_SYNC, entity_id=standby, + strict=False, fail_ok=alarm_ok, timeout=300, check_interval=20) + if not drbd_res: + return 1, "400.001 alarm is not cleared within timeout after " \ + "unlock standby" + + lock_host(active, swact=True) + unlock_host(active) + drbd_res = system_helper.wait_for_alarm_gone( + alarm_id=EventLogID.CON_DRBD_SYNC, entity_id=active, + strict=False, fail_ok=alarm_ok, timeout=300) + if not drbd_res: + return 1, "400.001 alarm is not cleared within timeout after " \ + "unlock standby" + + elif system_helper.is_aio_simplex(): + LOG.info("Simplex system - lock/unlock only controller") + lock_host('controller-0', swact=False) + unlock_host('controller-0') + + else: + LOG.warning( + "Standby controller unavailable. Unable to lock active controller.") + if no_standby_ok: + return 2, 'No standby available, thus unable to lock/unlock ' \ + 'controllers' + else: + raise exceptions.HostError( + "Unable to lock/unlock controllers due to no standby " + "controller") + + return 0, "Locking unlocking controller(s) completed" + + +def lock_unlock_hosts(hosts, force_lock=False, con_ssh=None, + auth_info=Tenant.get('admin_platform'), + recover_scope='function'): + """ + Lock/unlock hosts simultaneously when possible. + Args: + hosts (str|list): + force_lock (bool): lock without migrating vms out + con_ssh: + auth_info + recover_scope (None|str): + + Returns: + + """ + if isinstance(hosts, str): + hosts = [hosts] + + last_compute = last_storage = None + from testfixtures.recover_hosts import HostsToRecover + controllers, computes, storages = system_helper.get_hosts_per_personality( + con_ssh=con_ssh, auth_info=auth_info, + rtn_tuple=True) + controllers = list(set(controllers) & set(hosts)) + computes_to_lock = list(set(computes) & set(hosts)) + storages = list(set(storages) & set(hosts)) + + hosts_to_lock = list(computes_to_lock) + from keywords import container_helper, vm_helper + nova_auth = Tenant.get('admin', + auth_info.get('region') if auth_info else None) + if computes and not force_lock and \ + len(computes) == len(computes_to_lock) and \ + container_helper.is_stx_openstack_deployed() and \ + vm_helper.get_vms(auth_info=nova_auth): + # leave a compute if there are vms on system and force lock=False + last_compute = hosts_to_lock.pop() + + active, standby = system_helper.get_active_standby_controllers( + con_ssh=con_ssh, auth_info=auth_info) + + if standby and standby in controllers: + hosts_to_lock.append(standby) + + if storages and 'storage-0' in storages: + # storage-0 cannot be locked with any controller + last_storage = 'storage-0' + storages.remove(last_storage) + if storages: + hosts_to_lock += storages + + LOG.info("Lock/unlock: {}".format(hosts_to_lock)) + hosts_locked = [] + try: + for host in hosts_to_lock: + HostsToRecover.add(hostnames=host, scope=recover_scope) + lock_host(host, con_ssh=con_ssh, force=force_lock, + auth_info=auth_info) + hosts_locked.append(host) + + finally: + if hosts_locked: + unlock_hosts(hosts=hosts_locked, con_ssh=con_ssh, + auth_info=auth_info) + wait_for_hosts_ready(hosts=hosts_locked, con_ssh=con_ssh, + auth_info=auth_info) + HostsToRecover.remove(hosts_locked, scope=recover_scope) + + LOG.info("Lock/unlock last compute {} and storage {} if any".format( + last_compute, last_storage)) + hosts_locked_next = [] + try: + for host in (last_compute, last_storage): + if host: + HostsToRecover.add(host, scope=recover_scope) + lock_host(host=host, con_ssh=con_ssh, auth_info=auth_info) + hosts_locked_next.append(host) + + finally: + if hosts_locked_next: + unlock_hosts(hosts_locked_next, con_ssh=con_ssh, + auth_info=auth_info) + wait_for_hosts_ready(hosts_locked_next, con_ssh=con_ssh, + auth_info=auth_info) + HostsToRecover.remove(hosts_locked_next, scope=recover_scope) + + if active in controllers: + if active and system_helper.is_aio_duplex(con_ssh=con_ssh, + auth_info=auth_info): + system_helper.wait_for_alarm_gone( + alarm_id=EventLogID.CPU_USAGE_HIGH, check_interval=30, + timeout=300, con_ssh=con_ssh, entity_id=active, + auth_info=auth_info) + LOG.info("Lock/unlock {}".format(active)) + HostsToRecover.add(active, scope=recover_scope) + lock_host(active, swact=True, con_ssh=con_ssh, force=force_lock, + auth_info=auth_info) + unlock_hosts(active, con_ssh=con_ssh, auth_info=auth_info) + wait_for_hosts_ready(active, con_ssh=con_ssh, + auth_info=auth_info) + HostsToRecover.remove(active, scope=recover_scope) + + LOG.info("Hosts lock/unlock completed: {}".format(hosts)) + + +def get_traffic_control_rates(dev, con_ssh=None): + """ + Check the traffic control profile on given device name + + Returns (dict): return traffic control rates in Mbit. + e.g., {'root': [10000, 10000], 'drbd': [8000, 10000], ... } + + """ + if con_ssh is None: + con_ssh = ControllerClient.get_active_controller() + output = con_ssh.exec_cmd('tc class show dev {}'.format(dev), + expect_timeout=10)[1] + + traffic_classes = {} + for line in output.splitlines(): + match = re.findall(TrafficControl.RATE_PATTERN, line) + if match: + ratio, rate, rate_unit, ceil_rate, ceil_rate_unit = match[0] + class_name = TrafficControl.CLASSES[ratio] + else: + root_match = re.findall(TrafficControl.RATE_PATTERN_ROOT, line) + if not root_match: + raise NotImplementedError( + 'Unrecognized traffic class line: {}'.format(line)) + rate, rate_unit, ceil_rate, ceil_rate_unit = root_match[0] + class_name = 'root' + + rate = int(rate) + ceil_rate = int(ceil_rate) + + rates = [] + for rate_info in ((rate, rate_unit), (ceil_rate, ceil_rate_unit)): + rate_, unit_ = rate_info + rate_ = int(rate_) + if unit_ == 'G': + rate_ = int(rate_ * 1000) + elif unit_ == 'K': + rate_ = int(rate_ / 1000) + + rates.append(rate_) + + traffic_classes[class_name] = rates + + LOG.info("Traffic classes for {}: ".format(dev, traffic_classes)) + return traffic_classes + + +def get_nic_speed(interface, con_ssh=None): + """ + Check the speed on given interface name + Args: + interface (str|list) + con_ssh + + Returns (list): return speed + + """ + if con_ssh is None: + con_ssh = ControllerClient.get_active_controller() + + if isinstance(interface, str): + interface = [interface] + + speeds = [] + for if_ in interface: + if_speed = con_ssh.exec_cmd('cat /sys/class/net/{}/speed'.format(if_), + expect_timeout=10, fail_ok=False)[1] + speeds.append(int(if_speed)) + + return speeds + + +def get_host_cmdline_options(host, con_ssh=None): + with ssh_to_host(hostname=host, con_ssh=con_ssh) as host_ssh: + output = host_ssh.exec_cmd('cat /proc/cmdline')[1] + + return output + + +def get_host_memories(host, headers=('app_hp_avail_2M',), proc_id=None, + wait_for_update=True, con_ssh=None, + auth_info=Tenant.get('admin_platform'), rtn_dict=True): + """ + Get host memory values + Args: + host (str): hostname + headers (str|list|tuple): + proc_id (int|str|None|tuple|list): such as 0, '1' + wait_for_update (bool): wait for app_hp_pending_2M and + app_hp_pending_1G to be None + con_ssh (SSHClient): + auth_info (dict): + rtn_dict + + Returns (dict|list): {(int): (list), ... } or [( + list), (list), ...] + e.g., {0: [62018, 1]} + + """ + + cmd = 'host-memory-list --nowrap' + table_ = table_parser.table( + cli.system(cmd, host, ssh_client=con_ssh, auth_info=auth_info)[1]) + + if proc_id is None: + proc_id = table_parser.get_column(table_, 'processor') + elif isinstance(proc_id, (str, int)): + proc_id = [int(proc_id)] + + procs = sorted([int(proc) for proc in proc_id]) + + if wait_for_update: + end_time = time.time() + 330 + while time.time() < end_time: + pending_2m, pending_1g = table_parser.get_multi_values( + table_, evaluate=True, + fields=('app_hp_pending_2M', 'app_hp_pending_1G')) + for i in range(len(pending_2m)): + if (pending_2m[i] is not None) or (pending_1g[i] is not None): + break + else: + LOG.debug("No pending 2M or 1G mem pages") + break + + LOG.info("Pending 2M or 1G pages, wait for mem page to update") + time.sleep(30) + table_ = table_parser.table(cli.system(cmd, host, + ssh_client=con_ssh, + auth_info=auth_info)[1]) + else: + raise exceptions.SysinvError( + "Pending 2M or 1G pages after 5 minutes") + + values_all_procs = [] + for proc in procs: + vals = table_parser.get_multi_values(table_, headers, evaluate=True, + convert_single_field=False, + **{'processor': str(proc)}) + # Since proc is set, there will be only 1 row filtered out. + vals = [val[0] for val in vals] + values_all_procs.append(vals) + + if rtn_dict: + values_all_procs = {procs[i]: values_all_procs[i] for i in + range(len(procs))} + + return values_all_procs + + +def get_host_used_mem_values(host, proc_id=0, + auth_info=Tenant.get('admin_platform'), + con_ssh=None): + """ + Return number of MiB used by a specific host + Args: + host: + proc_id: + auth_info: + con_ssh: + + Returns (int): + + """ + mem_vals = get_host_memories( + host, ['mem_total(MiB)', 'mem_avail(MiB)', 'avs_hp_size(MiB)', + 'avs_hp_total'], + proc_id=proc_id, con_ssh=con_ssh, auth_info=auth_info)[int(proc_id)] + + mem_total, mem_avail, avs_hp_size, avs_hp_total = [int(val) for val in + mem_vals] + + used_mem = mem_total - mem_avail - avs_hp_size * avs_hp_total + + return used_mem + + +def is_host_hyperthreaded(host, con_ssh=None, + auth_info=Tenant.get('admin_platform')): + table_ = table_parser.table( + cli.system('host-cpu-list', host, ssh_client=con_ssh, + auth_info=auth_info)[1]) + return len(set(table_parser.get_column(table_, 'thread'))) > 1 + + +def get_host_cpu_list_table(host, con_ssh=None, + auth_info=Tenant.get('admin_platform')): + """ + Get the parsed version of the output from system host-cpu-list + Args: + host (str): host's name + con_ssh (SSHClient): + auth_info (dict): + + Returns (dict): output of system host-cpu-list parsed by table_parser + + """ + output = cli.system('host-cpu-list --nowrap', host, ssh_client=con_ssh, + auth_info=auth_info)[1] + table_ = table_parser.table(output) + return table_ + + +def get_host_ports(host, field='name', if_name=None, pci_addr=None, proc=None, + dev_type=None, strict=True, + regex=False, rtn_dict=False, con_ssh=None, + auth_info=Tenant.get('admin_platform'), **kwargs): + """ + Get + Args: + host: + field (str|list): + if_name: + pci_addr: + proc: + dev_type: + strict: + regex: + con_ssh: + auth_info: + rtn_dict + **kwargs: + + Returns (list|dict): list if header is string, dict if header is list. + + """ + table_ = table_parser.table( + cli.system('host-port-list --nowrap', host, ssh_client=con_ssh, + auth_info=auth_info)[1]) + + args_tmp = { + 'name': if_name, + 'pci address': pci_addr, + 'processor': proc, + 'device_type': dev_type + } + + kwargs.update({k: v for k, v in args_tmp.items() if v is not None}) + return table_parser.get_multi_values(table_, field, rtn_dict=rtn_dict, + strict=strict, regex=regex, **kwargs) + + +def get_host_interfaces_table(host, show_all=False, con_ssh=None, + auth_info=Tenant.get('admin_platform')): + """ + Get system host-if-list table + Args: + host (str): + show_all (bool): + con_ssh (SSHClient): + auth_info (dict): + + Returns (dict): + + """ + args = '' + args += ' --a' if show_all else '' + args += ' ' + host + + table_ = table_parser.table( + cli.system('host-if-list --nowrap', args, ssh_client=con_ssh, + auth_info=auth_info)[1]) + return table_ + + +def get_host_interfaces(host, field='name', net_type=None, if_type=None, + uses_ifs=None, used_by_ifs=None, + show_all=False, strict=True, regex=False, con_ssh=None, + auth_info=Tenant.get('admin_platform'), + exclude=False, **kwargs): + """ + Get specified interfaces info for given host via system host-if-list + + Args: + host (str): + field (str|tuple): header for return info + net_type (str|list|tuple): valid values: 'oam', 'data', 'infra', + 'mgmt', 'None'(string instead of None type) + if_type (str): possible values: 'ethernet', 'ae', 'vlan' + uses_ifs (str): + used_by_ifs (str): + show_all (bool): whether or not to show unused interfaces + exclude (bool): whether or not to exclude the interfaces filtered + strict (bool): + regex (bool): + con_ssh (SSHClient): + auth_info (dict): + **kwargs: extraheader=value pairs to further filter out info. such as + attributes='MTU=1500'. + + Returns (list): + + """ + table_ = get_host_interfaces_table(host=host, show_all=show_all, + con_ssh=con_ssh, auth_info=auth_info) + + if isinstance(net_type, str): + net_type = [net_type] + networks = if_classes = None + if net_type is not None: + networks = [] + if_classes = [] + for net in net_type: + network = '' + if_class = net + if net in PLATFORM_NET_TYPES: + if_class = 'platform' + network = net + networks.append(network) + if_classes.append(if_class) + + args_tmp = { + 'class': if_classes, + 'type': if_type, + 'uses i/f': uses_ifs, + 'used by i/f': used_by_ifs + } + + for key, value in args_tmp.items(): + if value is not None: + kwargs[key] = value + + table_ = table_parser.filter_table(table_, strict=strict, regex=regex, + exclude=exclude, **kwargs) + + # exclude the platform interface that does not have desired net_type + if if_classes is not None and 'platform' in if_classes: + platform_ifs = table_parser.get_values(table_, target_header='name', + **{'class': 'platform'}) + for pform_if in platform_ifs: + if_nets = get_host_interface_values(host=host, interface=pform_if, + fields='networks', + con_ssh=con_ssh)[0] + if_nets = [if_net.strip() for if_net in if_nets.split(sep=',')] + if not (set(if_nets) & set(networks)): + table_ = table_parser.filter_table(table_, strict=True, + exclude=(not exclude), + name=pform_if) + + vals = table_parser.get_multi_values(table_, fields=field, evaluate=True) + if not isinstance(field, str) and len(vals) > 1: + vals = list(zip(*vals)) + + return vals + + +def get_host_ports_for_net_type(host, net_type='data', ports_only=True, + con_ssh=None, + auth_info=Tenant.get('admin_platform')): + """ + + Args: + host: + net_type: + ports_only: whether to include dev_name as well + con_ssh: + auth_info: + + Returns (list): + + """ + table_ = get_host_interfaces_table(host=host, con_ssh=con_ssh, + auth_info=auth_info) + table_origin = copy.deepcopy(table_) + if net_type: + if_class = net_type + network = '' + if net_type in PLATFORM_NET_TYPES: + if_class = 'platform' + network = net_type + + table_ = table_parser.filter_table(table_, **{'class': if_class}) + # exclude unmatched platform interfaces from the table. + if 'platform' == if_class: + platform_ifs = table_parser.get_values(table_, target_header='name', + **{'class': 'platform'}) + for pform_if in platform_ifs: + if_nets = \ + get_host_interface_values(host=host, interface=pform_if, + fields='networks', + con_ssh=con_ssh)[0] + if_nets = [if_net.strip() for if_net in if_nets.split(sep=',')] + if network not in if_nets: + table_ = table_parser.filter_table(table_, strict=True, + exclude=True, + name=pform_if) + + net_ifs_names = table_parser.get_column(table_, 'name') + total_ports = [] + for if_name in net_ifs_names: + if_type = table_parser.get_values(table_, 'type', name=if_name)[0] + if if_type == 'ethernet': + ports = ast.literal_eval( + table_parser.get_values(table_, 'ports', name=if_name)[0]) + dev_name = ports[0] if len(ports) == 1 else if_name + else: + dev_name = if_name + ports = [] + uses_ifs = ast.literal_eval( + table_parser.get_values(table_, 'uses i/f', name=if_name)[0]) + for use_if in uses_ifs: + use_if_type = \ + table_parser.get_values(table_origin, 'type', + name=use_if)[0] + if use_if_type == 'ethernet': + useif_ports = ast.literal_eval( + table_parser.get_values(table_origin, 'ports', + name=use_if)[0]) + else: + # uses if is ae + useif_ports = ast.literal_eval( + table_parser.get_values(table_origin, 'uses i/f', + name=use_if)[0]) + ports += useif_ports + + if if_type == 'vlan': + vlan_id = \ + table_parser.get_values(table_, 'vlan id', name=if_name)[0] + if ports: + dev_name = ports[0] if len(ports) == 1 else uses_ifs[0] + dev_name = '{}.{}'.format(dev_name, vlan_id) + + if ports_only: + total_ports += ports + else: + total_ports.append((dev_name, sorted(ports))) + + LOG.info("{} {} network ports are: {}".format(host, net_type, total_ports)) + if ports_only: + total_ports = list(set(total_ports)) + + return total_ports + + +def get_host_port_pci_address(host, interface, con_ssh=None, + auth_info=Tenant.get('admin_platform')): + """ + + Args: + host: + interface: + con_ssh: + auth_info: + + Returns (str): pci address of interface + + """ + table_ = table_parser.table( + cli.system('host-port-list --nowrap', host, ssh_client=con_ssh, + auth_info=auth_info)[1]) + pci_addresses = table_parser.get_values(table_, 'pci address', + name=interface) + + pci_address = pci_addresses.pop() + LOG.info("pci address of interface {} for host is: {}".format(interface, + pci_address)) + + return pci_address + + +def get_host_port_pci_address_for_net_type(host, net_type='mgmt', rtn_list=True, + con_ssh=None, + auth_info=Tenant.get( + 'admin_platform')): + """ + + Args: + host: + net_type: + rtn_list: + con_ssh: + auth_info: + + Returns (list): + + """ + ports = get_host_ports_for_net_type(host, net_type=net_type, + ports_only=rtn_list, con_ssh=con_ssh, + auth_info=auth_info) + pci_addresses = [] + for port in ports: + pci_address = get_host_port_pci_address(host, port, con_ssh=con_ssh, + auth_info=auth_info) + pci_addresses.append(pci_address) + + return pci_addresses + + +def get_host_mgmt_pci_address(host, con_ssh=None, + auth_info=Tenant.get('admin_platform')): + """ + + Args: + host: + con_ssh: + auth_info: + + Returns: + + """ + mgmt_ip = \ + system_helper.get_host_values(host=host, fields='mgmt_ip', + con_ssh=con_ssh, + auth_info=auth_info)[0] + mgmt_ports = get_host_ifnames_by_address(host, address=mgmt_ip) + pci_addresses = [] + for port in mgmt_ports: + pci_address = get_host_port_pci_address(host, port, con_ssh=con_ssh, + auth_info=auth_info) + pci_addresses.append(pci_address) + + return pci_addresses + + +def get_host_interface_values(host, interface, fields, con_ssh=None, + auth_info=Tenant.get('admin_platform')): + args = "{} {}".format(host, interface) + table_ = table_parser.table( + cli.system('host-if-show', args, ssh_client=con_ssh, + auth_info=auth_info)[1]) + return table_parser.get_multi_values_two_col_table(table_, fields) + + +def get_hosts_interfaces_info(hosts, fields, con_ssh=None, + auth_info=Tenant.get('admin_platform'), + strict=True, + **interface_filters): + if isinstance(hosts, str): + hosts = [hosts] + + res = {} + for host in hosts: + interfaces = get_host_interfaces(host, field='name', strict=strict, + **interface_filters) + host_res = {} + for interface in interfaces: + values = get_host_interface_values(host, interface, fields=fields, + con_ssh=con_ssh, + auth_info=auth_info) + host_res[interface] = values + + res[host] = host_res + + return res + + +def get_host_ethernet_port_table(host, con_ssh=None, + auth_info=Tenant.get('admin_platform')): + """ + Get system host-if-list table + Args: + host (str): + con_ssh (SSHClient): + auth_info (dict): + + Returns (dict): + + """ + args = '' + args += ' ' + host + + table_ = table_parser.table( + cli.system('host-ethernet-port-list --nowrap', args, ssh_client=con_ssh, + auth_info=auth_info)[1]) + return table_ + + +def get_host_ifnames_by_address(host, field='ifname', address=None, id_=None, + fail_ok=False, con_ssh=None, + auth_info=Tenant.get('admin_platform')): + """ + Get the host ifname by address. + Args: + host + con_ssh (SSHClient): + address: + id_: + field: + auth_info (dict): + fail_ok: whether return False or raise exception when some services + fail to reach enabled-active state + + Returns (list): + + """ + + table_ = table_parser.table( + cli.system('host-addr-list', host, ssh_client=con_ssh, fail_ok=fail_ok, + auth_info=auth_info)[1]) + args_dict = { + 'uuid': id_, + 'address': address, + } + kwargs = ({k: v for k, v in args_dict.items() if v is not None}) + ifnames = table_parser.get_multi_values(table_, field, strict=True, + regex=True, merge_lines=True, + **kwargs) + return ifnames + + +def get_host_addresses(host, field='address', ifname=None, id_=None, + auth_info=Tenant.get('admin_platform'), + fail_ok=False, con_ssh=None): + """ + Disable Murano Services + Args: + host + con_ssh (SSHClient): + ifname: + id_: + field: + auth_info (dict): + fail_ok: whether return False or raise exception when some services + fail to reach enabled-active state + + Returns: + + """ + + table_ = table_parser.table( + cli.system('host-addr-list --nowrap', host, ssh_client=con_ssh, + fail_ok=fail_ok, + auth_info=auth_info)[1]) + args_dict = { + 'id': id_, + 'ifname': ifname, + } + kwargs = ({k: v for k, v in args_dict.items() if v is not None}) + address = table_parser.get_multi_values(table_, field, strict=True, + regex=True, merge_lines=True, + **kwargs) + return address + + +def get_host_lldp_agents(host, field='uuid', uuid=None, local_port=None, + status=None, chassis_id=None, + port_id=None, system_name=None, + system_description=None, + auth_info=Tenant.get('admin_platform'), con_ssh=None, + strict=True, regex=None, **kwargs): + """ + Get lldp agent table via system host-lldp-agent-list + Args: + host: (mandatory) + field: 'uuid' (default) + uuid: + local_port: + status: + chassis_id: + port_id: + system_name: + system_description: + auth_info: + con_ssh: + strict: + regex: + **kwargs: + + Returns (list): + + """ + table_ = table_parser.table( + cli.system('host-lldp-agent-list --nowrap', host, ssh_client=con_ssh, + auth_info=auth_info)[1]) + + args_temp = { + 'uuid': uuid, + 'local_port': local_port, + 'status': status, + 'chassis_id': chassis_id, + 'system_name': system_name, + 'system_description': system_description, + 'port_id': port_id, + } + kwargs.update({k: v for k, v in args_temp.items() if v is not None}) + return table_parser.get_multi_values(table_, field, strict=strict, + regex=regex, **kwargs) + + +def get_host_lldp_neighbors(host, field='uuid', uuid=None, local_port=None, + remote_port=None, chassis_id=None, + management_address=None, system_name=None, + system_description=None, + auth_info=Tenant.get('admin_platform'), + con_ssh=None, strict=True, + regex=None, **kwargs): + """ + Get lldp neighbour table via system host-lldp-neighbor-list + Args: + host (str) + field (str|list|tuple): 'uuid' (default value) + uuid: + local_port: + remote_port: + chassis_id: + management_address: + system_name: + system_description: + auth_info: + con_ssh: + strict: + regex: + **kwargs: + + Returns (list): + + """ + table_ = table_parser.table( + cli.system('host-lldp-neighbor-list --nowrap', host, ssh_client=con_ssh, + auth_info=auth_info)[1]) + args_temp = { + 'uuid': uuid, + 'local_port': local_port, + 'remote_port': remote_port, + 'chassis_id': chassis_id, + 'system_name': system_name, + 'system_description': system_description, + 'management_address': management_address + } + kwargs.update({k: v for k, v in args_temp.items() if v is not None}) + return table_parser.get_multi_values(table_, field, strict=strict, + regex=regex, **kwargs) + + +def get_host_device_values(host, device, fields, con_ssh=None, + auth_info=Tenant.get('admin_platform')): + """ + Get host device values for given fields via system host-device-show + Args: + host: + device: + fields (str|list|tuple): + con_ssh: + auth_info: + + Returns (list): + + """ + args = "{} {}".format(host, device) + table_ = table_parser.table( + cli.system('host-device-show', args, ssh_client=con_ssh, + auth_info=auth_info)[1]) + + return table_parser.get_value_two_col_table(table_, fields) + + +def get_host_devices(host, field='name', list_all=False, con_ssh=None, + auth_info=Tenant.get('admin_platform'), strict=True, + regex=False, **kwargs): + """ + Get the parsed version of the output from system host-device-list + Args: + host (str): host's name + field (str): field name to return value for + list_all (bool): whether to list all devices including the disabled ones + con_ssh (SSHClient): + auth_info (dict): + strict (bool): whether to perform strict search on filter + regex (bool): whether to use regular expression to search the value in + kwargs + kwargs: key-value pairs to filter the table + + Returns (list): output of system host-device-list parsed by + table_parser + + """ + param = '--nowrap' + param += ' --all' if list_all else '' + table_ = table_parser.table( + cli.system('host-device-list {}'.format(param), host, + ssh_client=con_ssh, auth_info=auth_info)[1]) + + values = table_parser.get_multi_values(table_, field, strict=strict, + evaluate=True, regex=regex, **kwargs) + + return values + + +def modify_host_device(host, device, new_name=None, new_state=None, + check_first=True, lock_unlock=False, fail_ok=False, + con_ssh=None, auth_info=Tenant.get('admin_platform')): + """ + Modify host device to given name or state. + Args: + host: host to modify + device: device name or pci address + new_name (str): new name to modify to + new_state (bool): new state to modify to + lock_unlock (bool): whether to lock unlock host before and after modify + con_ssh (SSHClient): + fail_ok (bool): + check_first (bool): + auth_info (dict): + + Returns (tuple): + + """ + args = '' + fields = [] + expt_vals = [] + if new_name: + fields.append('name') + expt_vals.append(new_name) + args += ' --name {}'.format(new_name) + if new_state is not None: + fields.append('enabled') + expt_vals.append(new_state) + args += ' --enabled {}'.format(new_state) + + if check_first and fields: + vals = get_host_device_values(host, device, fields=fields, + con_ssh=con_ssh, auth_info=auth_info) + if vals == expt_vals: + return -1, "{} device {} already set to given name and/or " \ + "state".format(host, device) + + try: + if lock_unlock: + LOG.info("Lock host before modify host device") + lock_host(host=host, con_ssh=con_ssh, auth_info=auth_info) + + LOG.info("Modify {} device {} with args: {}".format(host, device, args)) + args = "{} {} {}".format(host, device, args.strip()) + res, out = cli.system('host-device-modify', args, ssh_client=con_ssh, + fail_ok=fail_ok, auth_info=auth_info) + + if res == 1: + return 1, out + + LOG.info("Verifying the host device new pci name") + post_vals = get_host_device_values(host, device, fields=fields, + con_ssh=con_ssh, auth_info=auth_info) + assert expt_vals == post_vals, "{} device {} is not modified to " \ + "given values. Expt: {}, actual: {}". \ + format(host, device, expt_vals, post_vals) + + msg = "{} device {} is successfully modified to given values".format( + host, device) + LOG.info(msg) + return 0, msg + finally: + if lock_unlock: + LOG.info("Unlock host after host device modify") + unlock_host(host=host, con_ssh=con_ssh, auth_info=auth_info) + + +def enable_disable_hosts_devices(hosts, devices, enable=True, con_ssh=None, + auth_info=Tenant.get('admin_platform')): + """ + Enable/Disable given devices on specified hosts. (lock/unlock required + unless devices already in state) + Args: + hosts (str|list|tuple): hostname(s) + devices (str|list|tuple): device(s) name or address via + system host-device-list + enable (bool): whether to enable or disable devices + con_ssh + auth_info + + Returns: + + """ + if isinstance(hosts, str): + hosts = [hosts] + + if isinstance(devices, str): + devices = [devices] + + key = 'name' if 'pci_' in devices[0] else 'address' + + for host_ in hosts: + states = get_host_devices(host=host_, field='enabled', list_all=True, + con_ssh=con_ssh, + auth_info=auth_info, **{key: devices}) + if (not enable) in states: + try: + lock_host(host=host_, swact=True, con_ssh=con_ssh, + auth_info=auth_info) + for i in range(len(states)): + if states[i] is not enable: + device = devices[i] + modify_host_device(host=host_, device=device, + new_state=enable, check_first=False, + con_ssh=con_ssh, auth_info=auth_info) + finally: + unlock_host(host=host_, con_ssh=con_ssh, auth_info=auth_info) + + post_states = get_host_devices(host=host_, field='enabled', + list_all=True, con_ssh=con_ssh, + auth_info=auth_info, **{key: devices}) + assert not ((not enable) in post_states), \ + "Some devices enabled!={} after unlock".format(enable) + + LOG.info("enabled={} set successfully for following devices on hosts " + "{}: {}".format(enable, hosts, devices)) + + +def wait_for_tasks_affined(host, timeout=180, fail_ok=False, con_ssh=None, + auth_info=Tenant.get('admin_platform')): + if system_helper.is_aio_simplex(con_ssh=con_ssh, auth_info=auth_info): + return True + + LOG.info( + "Check {} non-existent on {}".format(PLATFORM_AFFINE_INCOMPLETE, host)) + if not con_ssh: + con_name = auth_info.get('region') if ( + auth_info and ProjVar.get_var('IS_DC')) else None + con_ssh = ControllerClient.get_active_controller(name=con_name) + + with ssh_to_host(host, con_ssh=con_ssh) as host_ssh: + end_time = time.time() + timeout + while time.time() < end_time: + if not host_ssh.file_exists(PLATFORM_AFFINE_INCOMPLETE): + LOG.info( + "{} platform tasks re-affined successfully".format(host)) + return True + time.sleep(5) + + err = "{} did not clear on {}".format(PLATFORM_AFFINE_INCOMPLETE, host) + if fail_ok: + LOG.warning(err) + return False + raise exceptions.HostError(err) + + +def get_storage_backing_with_max_hosts(rtn_down_hosts=False, con_ssh=None): + """ + Get storage backing that has the most hypervisors + Args: + rtn_down_hosts (bool): whether to return down hosts if no up + hosts available + con_ssh (SSHClient): + + Returns (tuple): ((str), (list)) + Examples: + Regular/Storage system: ('local_image',['compute-1', 'compute-3']) + AIO: ('local_image', ['controller-0', 'controller-1']) + + """ + hosts_per_backing = get_hosts_per_storage_backing( + up_only=not rtn_down_hosts, con_ssh=con_ssh) + default_backing = ProjVar.get_var('DEFAULT_INSTANCE_BACKING') + return default_backing, hosts_per_backing.get(default_backing, []) diff --git a/automated-pytest-suite/keywords/html_helper.py b/automated-pytest-suite/keywords/html_helper.py new file mode 100644 index 0000000..dca85da --- /dev/null +++ b/automated-pytest-suite/keywords/html_helper.py @@ -0,0 +1,198 @@ +# +# Copyright (c) 2019 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + + +import json +import requests + +from consts.auth import Tenant +from utils import table_parser, cli +from utils.tis_log import LOG +from consts.proj_vars import ProjVar +from keywords import keystone_helper + + +def get_ip_addr(): + return ProjVar.get_var('lab')['floating ip'] + + +def create_url(ip=None, port=None, version=None, extension=None): + """ + Creates a url with the given parameters inn the form: + http(s)://:// + Args: + ip (str): the main ip address. If set to None will be set to the lab's + ip address by default. + port (int): the port number to connect to. + version (str): for REST API. version number, e.g. "v1", "v2.0" + extension (str): extensions to add to the url + + Returns (str): a url created with the given parameters + + """ + if keystone_helper.is_https_enabled() is True: + url = 'https://' + else: + url = 'http://' + if ip: + url += ip + else: + url += get_ip_addr() + + if port: + url += ':{}'.format(port) + + if version: + url += '/{}'.format(version) + + if extension: + url += '/{}'.format(extension) + + return url + + +def get_user_token(field='id', con_ssh=None, auth_info=Tenant.get('admin')): + """ + Return an authentication token for the admin. + + Args: + field (str): + con_ssh (SSHClient): + auth_info + Returns (list): a list containing at most one authentication token + + """ + table_ = table_parser.table(cli.openstack('token issue', ssh_client=con_ssh, + auth_info=auth_info)[1]) + token = table_parser.get_value_two_col_table(table_, field) + return token + + +def get_request(url, headers, verify=True): + """ + Sends a GET request to the url + Args: + url (str): url to send request to + headers (dict): header to add to the request + verify: Verify SSL certificate + + Returns (dict): The response for the request + + """ + LOG.info("Sending GET request to {}. Headers: {}".format(url, headers)) + resp = requests.get(url, headers=headers, verify=verify) + + if resp.status_code == requests.codes.ok: + data = json.loads(resp.text) + LOG.info("The returned data is: {}".format(data)) + return data + + LOG.info("Error {}".format(resp.status_code)) + return None + + +def post_request(url, data, headers, verify=True): + """ + Sends a POST request to the url + Args: + url (str): url to send request to + data (dict): data to be sent in the request body + headers (dict): header to add to the request + verify: Verify SSL certificate + + Returns (dict): The response for the request + + """ + if not isinstance(data, str): + data = json.dumps(data) + LOG.info("Sending POST request to {}. Headers: {}. Data: " + "{}".format(url, headers, data)) + resp = requests.post(url, headers=headers, data=data, verify=verify) + + if resp.status_code == requests.codes.ok: + data = json.loads(resp.text) + LOG.info("The returned data is: {}".format(data)) + return data + + LOG.info("Error {}".format(resp.status_code)) + return None + + +def put_request(url, data, headers, verify=True): + """ + Sends a GET request to the url + Args: + url (str): url to send request to + data (dict): data to be sent in the request body + headers (dict): header to add to the request + verify: Verify SSL certificate + + Returns (dict): The response for the request + + """ + if not isinstance(data, str): + data = json.dumps(data) + LOG.info("Sending PUT request to {}. Headers: {}. Data: " + "{}".format(url, headers, data)) + resp = requests.put(url, headers=headers, data=data, verify=verify) + + if resp.status_code == requests.codes.ok: + data = json.loads(resp.text) + LOG.info("The returned data is: {}".format(data)) + return data + + LOG.info("Error {}".format(resp.status_code)) + return None + + +def delete_request(url, headers, verify=True): + """ + Sends a GET request to the url + Args: + url (str): url to send request to + headers (dict): header to add to the request + verify: Verify SSL certificate + + Returns (dict): The response for the request + + """ + LOG.info("Sending DELETE request to {}. Headers: {}".format(url, headers)) + resp = requests.delete(url, headers=headers, verify=verify) + + if resp.status_code == requests.codes.ok: + data = json.loads(resp.text) + LOG.info("The returned data is: {}".format(data)) + return data + + LOG.info("Error {}".format(resp.status_code)) + return None + + +def patch_request(url, data, headers, verify=True): + """ + Sends a PATCH request to the url + Args: + url (str): url to send request to + data (dict|str|list): data to be sent in the request body + headers (dict): header to add to the request + verify: Verify SSL certificate + + Returns (dict): The response for the request + + """ + if not isinstance(data, str): + data = json.dumps(data) + LOG.info("Sending PATCH request to {}. Headers: {}. Data: " + "{}".format(url, headers, data)) + resp = requests.patch(url, headers=headers, data=data, verify=verify) + + if resp.status_code == requests.codes.ok: + data = json.loads(resp.text) + LOG.info("The returned data is: {}".format(data)) + return data + + LOG.info("Error {}".format(resp.status_code)) + return None diff --git a/automated-pytest-suite/keywords/keystone_helper.py b/automated-pytest-suite/keywords/keystone_helper.py new file mode 100644 index 0000000..7fb0f96 --- /dev/null +++ b/automated-pytest-suite/keywords/keystone_helper.py @@ -0,0 +1,540 @@ +# +# Copyright (c) 2019 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + + +import re + +from consts.auth import Tenant, HostLinuxUser +from consts.proj_vars import ProjVar +from utils import cli, exceptions, table_parser +from utils.clients.ssh import ControllerClient +from utils.tis_log import LOG +from keywords import common + + +def get_roles(field='ID', con_ssh=None, auth_info=Tenant.get('admin'), + **kwargs): + table_ = table_parser.table(cli.openstack('role list', ssh_client=con_ssh, + auth_info=auth_info)[1]) + return table_parser.get_multi_values(table_, field, **kwargs) + + +def get_users(field='ID', con_ssh=None, auth_info=Tenant.get('admin'), + **kwargs): + """ + Return a list of user id(s) with given user name. + + Args: + field (str|list|tuple): + con_ssh (SSHClient): + auth_info + + Returns (list): list of user id(s) + + """ + table_ = table_parser.table(cli.openstack('user list', ssh_client=con_ssh, + auth_info=auth_info)[1]) + return table_parser.get_multi_values(table_, field, **kwargs) + + +def add_or_remove_role(add_=True, role='admin', project=None, user=None, + domain=None, group=None, group_domain=None, + project_domain=None, user_domain=None, inherited=None, + check_first=True, fail_ok=False, + con_ssh=None, auth_info=Tenant.get('admin')): + """ + Add or remove given role for specified user and tenant. e.g., add admin + role to tenant2 user on tenant2 project + + Args: + add_(bool): whether to add or remove + role (str): an existing role from openstack role list + project (str): tenant name. When unset, the primary tenant name + will be used + user (str): an existing user that belongs to given tenant + domain (str): Include (name or ID) + group (str): Include (name or ID) + group_domain (str): Domain the group belongs to (name or ID). + This can be used in case collisions between group names exist. + project_domain (str): Domain the project belongs to (name or ID). + This can be used in case collisions between project names exist. + user_domain (str): Domain the user belongs to (name or ID). + This can be used in case collisions between user names exist. + inherited (bool): Specifies if the role grant is inheritable to the + sub projects + check_first (bool): whether to check if role already exists for given + user and tenant + fail_ok (bool): whether to throw exception on failure + con_ssh (SSHClient): active controller ssh session + auth_info (dict): auth info to use to executing the add role cli + + Returns (tuple): + + """ + tenant_dict = {} + + if project is None: + tenant_dict = Tenant.get_primary() + project = tenant_dict['tenant'] + + if user is None: + user = tenant_dict.get('user', project) + + if check_first: + existing_roles = get_role_assignments(role=role, project=project, + user=user, + user_domain=user_domain, + group=group, + group_domain=group_domain, + domain=domain, + project_domain=project_domain, + inherited=inherited, + effective_only=False, + con_ssh=con_ssh, + auth_info=auth_info) + if existing_roles: + if add_: + msg = "Role already exists with given criteria: {}".format( + existing_roles) + LOG.info(msg) + return -1, msg + else: + if not add_: + msg = "Role with given criteria does not exist. Do nothing." + LOG.info(msg) + return -1, msg + + msg_str = 'Add' if add_ else 'Remov' + LOG.info( + "{}ing {} role to {} user under {} project".format(msg_str, role, user, + project)) + + sub_cmd = "--user {} --project {}".format(user, project) + if inherited is True: + sub_cmd += ' --inherited' + + optional_args = { + 'domain': domain, + 'group': group, + 'group-domain': group_domain, + 'project-domain': project_domain, + 'user-domain': user_domain, + } + + for key, val in optional_args.items(): + if val is not None: + sub_cmd += ' --{} {}'.format(key, val) + + sub_cmd += ' {}'.format(role) + + cmd = 'role add' if add_ else 'role remove' + res, out = cli.openstack(cmd, sub_cmd, ssh_client=con_ssh, fail_ok=fail_ok, + auth_info=auth_info) + + if res == 1: + return 1, out + + LOG.info("{} cli accepted. Check role is {}ed " + "successfully".format(cmd, msg_str)) + post_roles = get_role_assignments(role=role, project=project, user=user, + user_domain=user_domain, group=group, + group_domain=group_domain, domain=domain, + project_domain=project_domain, + inherited=inherited, effective_only=True, + con_ssh=con_ssh, auth_info=auth_info) + + err_msg = '' + if add_ and not post_roles: + err_msg = "No role is added with given criteria" + elif post_roles and not add_: + err_msg = "Role is not removed" + if err_msg: + if fail_ok: + LOG.warning(err_msg) + return 2, err_msg + else: + raise exceptions.KeystoneError(err_msg) + + succ_msg = "Role is successfully {}ed".format(msg_str) + LOG.info(succ_msg) + return 0, succ_msg + + +def get_role_assignments(field='Role', names=True, role=None, user=None, + project=None, user_domain=None, group=None, + group_domain=None, domain=None, project_domain=None, + inherited=None, effective_only=None, + con_ssh=None, auth_info=Tenant.get('admin')): + """ + Get values from 'openstack role assignment list' table + + Args: + field (str|list|tuple): role assignment table header to determine + which values to return + names (bool): whether to display role assignment with name + (default is ID) + role (str): an existing role from openstack role list + project (str): tenant name. When unset, the primary tenant name + will be used + user (str): an existing user that belongs to given tenant + domain (str): Include (name or ID) + group (str): Include (name or ID) + group_domain (str): Domain the group belongs to (name or ID). This can + be used in case collisions between group names exist. + project_domain (str): Domain the project belongs to (name or ID). This + can be used in case collisions between project names exist. + user_domain (str): Domain the user belongs to (name or ID). This can + be used in case collisions between user names exist. + inherited (bool): Specifies if the role grant is inheritable to the + sub projects + effective_only (bool): Whether to show effective roles only + con_ssh (SSHClient): active controller ssh session + auth_info (dict): auth info to use to executing the add role cli + + Returns (list): list of values + + """ + optional_args = { + 'role': role, + 'user': user, + 'project': project, + 'domain': domain, + 'group': group, + 'group-domain': group_domain, + 'project-domain': project_domain, + 'user-domain': user_domain, + 'names': True if names else None, + 'effective': True if effective_only else None, + 'inherited': True if inherited else None + } + args = common.parse_args(optional_args) + + role_assignment_tab = table_parser.table( + cli.openstack('role assignment list', args, ssh_client=con_ssh, + auth_info=auth_info)[1]) + + if not role_assignment_tab['headers']: + LOG.info("No role assignment is found with criteria: {}".format(args)) + return [] + + return table_parser.get_multi_values(role_assignment_tab, field) + + +def set_user(user, name=None, project=None, password=None, project_doamin=None, + email=None, description=None, + enable=None, fail_ok=False, auth_info=Tenant.get('admin'), + con_ssh=None): + LOG.info("Updating {}...".format(user)) + arg = '' + optional_args = { + 'name': name, + 'project': project, + 'password': password, + 'project-domain': project_doamin, + 'email': email, + 'description': description, + } + for key, val in optional_args.items(): + if val is not None: + arg += "--{} '{}' ".format(key, val) + + if enable is not None: + arg += '--{} '.format('enable' if enable else 'disable') + + if not arg.strip(): + raise ValueError( + "Please specify the param(s) and value(s) to change to") + + arg += user + + code, output = cli.openstack('user set', arg, ssh_client=con_ssh, + fail_ok=fail_ok, auth_info=auth_info) + + if code > 0: + return 1, output + + if name or project or password: + tenant_dictname = user.upper() + Tenant.update(tenant_dictname, username=name, password=password, + tenant=project) + + if password and user == 'admin': + from consts.proj_vars import ProjVar + if ProjVar.get_var('REGION') != 'RegionOne': + LOG.info( + "Run openstack_update_admin_password on secondary region " + "after admin password change") + if not con_ssh: + con_ssh = ControllerClient.get_active_controller() + with con_ssh.login_as_root(timeout=30) as con_ssh: + con_ssh.exec_cmd( + "echo 'y' | openstack_update_admin_password '{}'".format( + password)) + + msg = 'User {} updated successfully'.format(user) + LOG.info(msg) + return 0, output + + +def get_endpoints(field='ID', endpoint_id=None, service_name=None, + service_type=None, enabled=None, interface="admin", + region=None, url=None, strict=False, + auth_info=Tenant.get('admin'), con_ssh=None, cli_filter=True): + """ + Get a list of endpoints with given arguments + Args: + field (str|list|tuple): valid header of openstack endpoints list + table. 'ID' + endpoint_id (str): id of the endpoint + service_name (str): Service name of endpoint like novaav3, neutron, + keystone. vim, heat, swift, etc + service_type(str): Service type + enabled (str): True/False + interface (str): Interface of endpoints. valid entries: admin, + internal, public + region (str): RegionOne or RegionTwo + url (str): url of endpoint + strict(bool): + auth_info (dict): + con_ssh (SSHClient): + cli_filter (bool): whether to filter out using cli. e.g., openstack + endpoint list --service xxx + + Returns (list): + + """ + pre_args_str = '' + if cli_filter: + pre_args_dict = { + '--service': service_name, + '--interface': interface, + '--region': region, + } + + pre_args = [] + for key, val in pre_args_dict.items(): + if val: + pre_args.append('{}={}'.format(key, val)) + pre_args_str = ' '.join(pre_args) + + output = cli.openstack('endpoint list', positional_args=pre_args_str, + ssh_client=con_ssh, auth_info=auth_info)[1] + if not output.strip(): + LOG.warning("No endpoints returned with param: {}".format(pre_args_str)) + return [] + + table_ = table_parser.table(output) + + kwargs = { + 'ID': endpoint_id, + 'Service Name': service_name, + 'Service Type': service_type, + 'Enabled': enabled, + 'Interface': interface, + 'URL': url, + 'Region': region, + } + kwargs = {k: v for k, v in kwargs.items() if v} + return table_parser.get_multi_values(table_, field, strict=strict, + regex=True, merge_lines=True, **kwargs) + + +def get_endpoints_values(endpoint_id, fields, con_ssh=None, + auth_info=Tenant.get('admin')): + """ + Gets the endpoint target field value for given endpoint Id + Args: + endpoint_id: the endpoint id to get the value of + fields: the target field name to retrieve value of + con_ssh: + auth_info + + Returns (list): list of endpoint values + + """ + table_ = table_parser.table( + cli.openstack('endpoint show', endpoint_id, ssh_client=con_ssh, + auth_info=auth_info)[1]) + return table_parser.get_multi_values_two_col_table(table_, fields) + + +def is_https_enabled(con_ssh=None, source_openrc=True, + auth_info=Tenant.get('admin_platform')): + if not con_ssh: + con_name = auth_info.get('region') if ( + auth_info and ProjVar.get_var('IS_DC')) else None + con_ssh = ControllerClient.get_active_controller(name=con_name) + + table_ = table_parser.table( + cli.openstack('endpoint list', ssh_client=con_ssh, auth_info=auth_info, + source_openrc=source_openrc)[1]) + con_ssh.exec_cmd('unset OS_REGION_NAME') # Workaround + filters = {'Service Name': 'keystone', 'Service Type': 'identity', + 'Interface': 'public'} + keystone_pub = table_parser.get_values(table_=table_, target_header='URL', + **filters)[0] + return 'https' in keystone_pub + + +def delete_users(user, fail_ok=False, auth_info=Tenant.get('admin'), + con_ssh=None): + """ + Delete the given openstack user + Args: + user: user name to delete + fail_ok: if the deletion expected to fail + auth_info + con_ssh + + Returns: tuple, (code, msg) + """ + return cli.openstack('user delete', user, ssh_client=con_ssh, + fail_ok=fail_ok, auth_info=auth_info) + + +def get_projects(field='ID', auth_info=Tenant.get('admin'), con_ssh=None, + strict=False, **filters): + """ + Get list of Project names or IDs + Args: + field (str|list|tuple): + auth_info: + con_ssh: + strict (bool): used for filters + filters + + Returns (list): + + """ + table_ = table_parser.table( + cli.openstack('project list', ssh_client=con_ssh, auth_info=auth_info)[ + 1]) + return table_parser.get_multi_values(table_, field, strict=strict, + **filters) + + +def create_project(name=None, field='ID', domain=None, parent=None, + description=None, enable=None, con_ssh=None, + rtn_exist=None, fail_ok=False, auth_info=Tenant.get('admin'), + **properties): + """ + Create a openstack project + Args: + name (str|None): + field (str): ID or Name. Whether to return project id or name if + created successfully + domain (str|None): + parent (str|None): + description (str|None): + enable (bool|None): + con_ssh: + rtn_exist + fail_ok: + auth_info: + **properties: + + Returns (tuple): + (0, ) + (1, ) + + """ + if not name: + existing_names = get_projects(field='Name', + auth_info=Tenant.get('admin'), + con_ssh=con_ssh) + max_count = 0 + end_str = '' + for name in existing_names: + match = re.match(r'tenant(\d+)(.*)', name) + if match: + count, end_str = match.groups() + max_count = max(int(count), max_count) + name = 'tenant{}{}'.format(max_count + 1, end_str) + + LOG.info("Create/Show openstack project {}".format(name)) + + arg_dict = { + 'domain': domain, + 'parent': parent, + 'description': description, + 'enable': True if enable is True else None, + 'disable': True if enable is False else None, + 'or-show': rtn_exist, + 'property': properties, + } + + arg_str = common.parse_args(args_dict=arg_dict, repeat_arg=True) + arg_str += ' {}'.format(name) + + code, output = cli.openstack('project create', arg_str, ssh_client=con_ssh, + fail_ok=fail_ok, auth_info=auth_info) + if code > 0: + return 1, output + + project_ = table_parser.get_value_two_col_table(table_parser.table(output), + field=field) + LOG.info("Project {} successfully created/showed.".format(project_)) + + return 0, project_ + + +def create_user(name=None, field='name', domain=None, project=None, + project_domain=None, rtn_exist=None, + password=HostLinuxUser.get_password(), email=None, + description=None, enable=None, + auth_info=Tenant.get('admin'), fail_ok=False, con_ssh=None): + """ + Create an openstack user + Args: + name (str|None): + field: name or id + domain: + project (str|None): default project + project_domain: + rtn_exist (bool) + password: + email: + description: + enable: + auth_info: + fail_ok: + con_ssh: + + Returns (tuple): + (0, ) + (1, ) + + """ + + if not name: + name = 'user' + common.get_unique_name(name_str=name) + + LOG.info("Create/Show openstack user {}".format(name)) + arg_dict = { + 'domain': domain, + 'project': project, + 'project-domain': project_domain, + 'password': password, + 'email': email, + 'description': description, + 'enable': True if enable is True else None, + 'disable': True if enable is False else None, + 'or-show': rtn_exist, + } + + arg_str = '{} {}'.format(common.parse_args(args_dict=arg_dict), name) + + code, output = cli.openstack('user create', arg_str, ssh_client=con_ssh, + fail_ok=fail_ok, auth_info=auth_info) + if code > 0: + return 1, output + + user = table_parser.get_value_two_col_table(table_parser.table(output), + field=field) + LOG.info("Openstack user {} successfully created/showed".format(user)) + + return 0, user diff --git a/automated-pytest-suite/keywords/kube_helper.py b/automated-pytest-suite/keywords/kube_helper.py new file mode 100644 index 0000000..aedb178 --- /dev/null +++ b/automated-pytest-suite/keywords/kube_helper.py @@ -0,0 +1,1117 @@ +# +# Copyright (c) 2019 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + + +import re +import configparser +import time + +import yaml + +from utils import table_parser, exceptions +from utils.tis_log import LOG +from utils.clients.ssh import ControllerClient +from keywords import common, system_helper +from consts.stx import PodStatus + + +def exec_kube_cmd(sub_cmd, args=None, con_ssh=None, fail_ok=False, grep=None): + """ + Execute an kubectl cmd on given ssh client. i.e., 'kubectl ' + Args: + sub_cmd (str): + args (None|str): + con_ssh: + fail_ok: + grep (None|str|tuple|list) + + Returns (tuple): + (0, ) + (1, ) + + """ + if not con_ssh: + con_ssh = ControllerClient.get_active_controller() + cmd = 'kubectl {} {}'.format(sub_cmd.strip(), + args.strip() if args else '').strip() + + get_exit_code = True + if cmd.endswith(';echo'): + get_exit_code = False + if grep: + if isinstance(grep, str): + grep = (grep,) + for grep_str in grep: + if '-v ' not in grep_str and '-e ' in grep_str and 'NAME' not in \ + grep_str: + grep_str += ' -e NAME' + cmd += ' | grep --color=never {}'.format(grep_str) + + code, out = con_ssh.exec_cmd(cmd, fail_ok=True, get_exit_code=get_exit_code) + if code <= 0: + return 0, out + + if fail_ok: + return 1, out + else: + raise exceptions.KubeCmdError('CMD: {} Output: {}'.format(cmd, out)) + + +def __get_resource_tables(namespace=None, all_namespaces=None, + resource_types=None, resource_names=None, + labels=None, field_selectors=None, wide=True, + con_ssh=None, fail_ok=False, grep=None): + if not resource_types: + resource_types = '' + elif isinstance(resource_types, (list, tuple)): + resource_types = ','.join(resource_types) + resources = resource_types + + if resource_names: + if ',' in resource_types: + raise ValueError( + "At most 1 resource_types can be specified if resource_names " + "are provided.") + if all_namespaces and not namespace: + raise ValueError( + "all_namespaces is disallowed when resource_names are provided") + if isinstance(resource_names, (list, tuple)): + resource_names = ' '.join(resource_names) + resources = '{} {}'.format(resources, resource_names) + + args_dict = { + '-n': namespace, + '--all-namespaces': True if all_namespaces and not namespace else None, + '-l': labels, + '--field-selector': field_selectors, + '-o': 'wide' if wide else None + } + args = '{} {}'.format(resources, + common.parse_args(args_dict, repeat_arg=False, + vals_sep=',')) + code, out = exec_kube_cmd(sub_cmd='get', args=args, con_ssh=con_ssh, + fail_ok=fail_ok, grep=grep) + if code > 0: + return code, out + + tables = table_parser.tables_kube(out) + return code, tables + + +def get_unhealthy_pods(field='NAME', namespace=None, all_namespaces=True, + pod_names=None, + labels=None, exclude=False, strict=True, con_ssh=None, + **kwargs): + """ + Get pods that are not Completed and not Running + Args: + namespace (str|None): + all_namespaces: (bool|None) + pod_names (str|list|tuple|None): full names of pods to check + labels (str|dict|None): + field (str|tuple|list): + exclude: + strict: + con_ssh: + + Returns (list): + + """ + field_selector = 'status.phase!=Running,status.phase!=Succeeded' + return get_pods(field=field, namespace=namespace, + all_namespaces=all_namespaces, pod_names=pod_names, + labels=labels, field_selectors=field_selector, + exclude=exclude, strict=strict, + con_ssh=con_ssh, **kwargs) + + +def get_pods(field='NAME', namespace=None, all_namespaces=False, pod_names=None, + labels=None, field_selectors=None, + fail_ok=False, con_ssh=None, exclude=False, strict=True, **kwargs): + """ + Get pods values for specified field(s) + Args: + field (str|tuple|list): return values for given header(s) + namespace (str|None): when None, --all-namespaces will be used. + all_namespaces (bool|none): + pod_names (str|list|tuple): Full pod name(s). When specified, labels + and field_selectors will be ignored. + labels (str|dict|None|list|tuple): label selectors. Used only if + full_names are unspecified. + e.g., application=nova,component=compute + field_selectors (str): Used only if full_names are unspecified. + e.g., , 'spec.nodeName=controller-0,status.phase!=Running, + status.phase!=Succeeded' + exclude (bool): + strict (bool): + con_ssh: + fail_ok (bool) + **kwargs: table filters for post processing output to return filtered + values + + Returns (list): examples: + Input: + field=('NAME', 'STATUS') OR 'Name' + labels='application=nova,component=compute', + field_selector='spec.nodeName=compute-0' + Output: + [('nova-compute-compute-0-xdjkds', 'Running')] OR [ + 'nova-compute-compute-0-xdjkds'] + + """ + return get_resources(field=field, namespace=namespace, + all_namespaces=all_namespaces, resource_type='pod', + resource_names=pod_names, labels=labels, + field_selectors=field_selectors, + con_ssh=con_ssh, fail_ok=fail_ok, exclude=exclude, + strict=strict, **kwargs) + + +def get_resources(field='NAME', namespace=None, all_namespaces=None, + resource_names=None, resource_type='pod', + labels=None, field_selectors=None, con_ssh=None, + fail_ok=False, grep=None, + exclude=False, strict=True, **kwargs): + """ + Get resources values for single resource type via kubectl get + Args: + field (str|tuple|list) + namespace (None|str): e.g., kube-system, openstack, default. + all_namespaces (bool|None): used only when namespace is unspecified + resource_names (str|None|list|tuple): e.g., calico-typha + resource_type (str): e.g., "deployments.apps", "pod", "service" + labels (dict|str\list|tuple): Used only when resource_names are + unspecified + field_selectors (dict|str|list|tuple): Used only when resource_names + are unspecified + con_ssh: + fail_ok: + grep (str|None): grep on cmd output + exclude + strict + **kwargs: table filters for post processing return values + + Returns (list): + key is the name prefix, e.g., service, default, deployment.apps, + replicaset.apps + value is a list. Each item is a dict rep for a row with lowercase keys. + e.g., [{'name': 'cinder-api', 'age': '4d19h', ... }, ...] + + """ + name_filter = None + if resource_names and ( + (all_namespaces and not namespace) or field_selectors or labels): + name_filter = {'name': resource_names} + resource_names = None + + code, tables = __get_resource_tables(namespace=namespace, + all_namespaces=all_namespaces, + resource_types=resource_type, + resource_names=resource_names, + labels=labels, + field_selectors=field_selectors, + con_ssh=con_ssh, fail_ok=fail_ok, + grep=grep) + if code > 0: + output = tables + if 'NAME ' not in output: # no resource returned + return [] + + output = output.split('\nError from server')[0] + tables = table_parser.tables_kube(output) + + final_table = tables[0] + if len(tables) > 1: + combined_values = final_table['values'] + column_count = len(combined_values) + for table_ in tables[1:]: + table_values = table_['values'] + combined_values = [combined_values[i] + table_values[i] for i in + range(column_count)] + final_table['values'] = combined_values + + if name_filter: + final_table = table_parser.filter_table(final_table, **name_filter) + + return table_parser.get_multi_values(final_table, fields=field, + zip_values=True, strict=strict, + exclude=exclude, **kwargs) + + +def apply_pod(file_path, pod_name, namespace=None, recursive=None, + select_all=None, + labels=None, con_ssh=None, fail_ok=False, + check_both_controllers=True): + """ + Apply a pod from given file via kubectl apply + Args: + file_path (str): + pod_name (str): + namespace (None|str): + recursive (None|bool): + select_all (None|bool): + labels (dict|str|list|tuple|None): key value pairs + con_ssh: + fail_ok: + check_both_controllers (bool): + + Returns (tuple): + (0, (dict)) + (1, ) + (2, ) # pod is not running after apply + (3, ) # pod if not running on the other controller after + apply + + """ + arg_dict = { + '--all': select_all, + '-l': labels, + '--recursive': recursive, + } + + arg_str = common.parse_args(args_dict=arg_dict, vals_sep=',') + arg_str += ' -f {}'.format(file_path) + + if not con_ssh: + con_ssh = ControllerClient.get_active_controller() + code, output = exec_kube_cmd(sub_cmd='apply', args=arg_str, con_ssh=con_ssh, + fail_ok=fail_ok) + if code > 0: + return 1, output + + LOG.info("Check pod is running on current host") + res = wait_for_pods_status(pod_names=pod_name, namespace=namespace, + status=PodStatus.RUNNING, + con_ssh=con_ssh, fail_ok=fail_ok) + if not res: + return 2, "Pod {} is not running after apply on active " \ + "controller".format(pod_name) + + if check_both_controllers and not system_helper.is_aio_simplex( + con_ssh=con_ssh): + LOG.info("Check pod is running on the other controller as well") + con_name = 'controller-1' if con_ssh.get_hostname() == 'controller-0' \ + else 'controller-0' + from keywords import host_helper + with host_helper.ssh_to_host(hostname=con_name, + con_ssh=con_ssh) as other_con: + res, pods_info = wait_for_pods_status(pod_names=pod_name, + namespace=namespace, + con_ssh=other_con, + fail_ok=fail_ok) + if not res: + return 3, "Pod {} is not running after apply on standby " \ + "controller".format(pod_name) + + LOG.info("{} pod is successfully applied and running".format(pod_name)) + return 0, pod_name + + +def wait_for_pods_status(pod_names=None, partial_names=None, labels=None, + namespace=None, status=PodStatus.RUNNING, + timeout=120, check_interval=3, con_ssh=None, + fail_ok=False, strict=False, **kwargs): + """ + Wait for pod(s) to reach given status via kubectl get pod + Args: + pod_names (str|list|tuple): full name of the pods + partial_names (str|list|tuple): Used only if pod_names are not provided + labels (str|list|tuple|dict|None): Used only if pod_names are not + provided + namespace (None|str): + status (str|None|list): None means any state as long as pod exists. + timeout: + check_interval: + con_ssh: + fail_ok: + strict (bool): + + Returns (tuple): + (True, ) # actual_pods_info is a dict with + pod_name as key, and pod_info(dict) as value + (False, ) + + """ + + pods_to_check = [] + if pod_names: + if isinstance(pod_names, str): + pod_names = [pod_names] + else: + pod_names = list(pod_names) + labels = partial_names = None + pods_to_check = list(pod_names) + elif partial_names: + if isinstance(partial_names, str): + partial_names = [partial_names] + else: + partial_names = list(partial_names) + kwargs['NAME'] = partial_names + pods_to_check = list(partial_names) + + actual_status = {} + end_time = time.time() + timeout + + while time.time() < end_time: + pod_full_names = pods_to_check if pod_names else None + pods_values = get_pods(pod_names=pod_full_names, + field=('NAME', 'status'), namespace=namespace, + labels=labels, + strict=strict, fail_ok=True, con_ssh=con_ssh, + **kwargs) + if not pods_values: + # No pods returned, continue to check. + time.sleep(check_interval) + continue + + continue_check = False # This is used when only labels are provided + for pod_info in pods_values: + pod_name, pod_status = pod_info + actual_status[pod_name] = pod_status + if status and pod_status not in status: + # Status not as expected, continue to wait + continue_check = True + if partial_names: + # In this case, there might be multiple pods that matches + # 1 partial name, so the partial name that + # matches current pod could have been removed if there + # was one other pod that also matched the name + # had reached the desired state. In this case, we will + # add the partial name back to check list + for partial_name in partial_names: + if partial_name in pod_name and partial_name not in \ + pods_to_check: + pods_to_check.append(partial_name) + break + else: + # Criteria met for current pod, remove it from check_list + if pod_names: + pods_to_check.remove(pod_name) + elif partial_names: + for partial_name in partial_names: + if partial_name in pod_name and partial_name in \ + pods_to_check: + pods_to_check.remove(partial_name) + break + + if not pods_to_check and not continue_check: + return True, actual_status + + time.sleep(check_interval) + + name_str = 'Names: {}'.format(pods_to_check) if pods_to_check else '' + label_str = 'Labels: {}'.format(labels) if labels else '' + criteria = '{} {}'.format(name_str, label_str).strip() + msg = "Pods did not reach expected status within {}s. Criteria not met: " \ + "{}. Actual info: {}".format(timeout, criteria, actual_status) + if fail_ok: + LOG.info(msg) + return False, actual_status + + raise exceptions.KubeError(msg) + + +def wait_for_resources_gone(resource_names=None, resource_type='pod', + namespace=None, timeout=120, + check_interval=3, con_ssh=None, fail_ok=False, + strict=True, exclude=False, **kwargs): + """ + Wait for pod(s) to be gone from kubectl get + Args: + resource_names (str|list|tuple): full name of a pod + resource_type (str): + namespace (None|str): + timeout: + check_interval: + con_ssh: + fail_ok: + strict (bool): + exclude + **kwargs + + Returns (tuple): + (True, None) + (False, ) # actual_pods_info is a dict with + pod_name as key, and pod_info(dict) as value + + """ + + end_time = time.time() + timeout + resources_to_check = resource_names + + while time.time() < end_time: + + resources_to_check = get_resources(resource_names=resources_to_check, + namespace=namespace, + resource_type=resource_type, + con_ssh=con_ssh, + fail_ok=True, strict=strict, + exclude=exclude, **kwargs) + + if not resources_to_check: + return True, resources_to_check + + time.sleep(check_interval) + + msg = 'Resources did not disappear in {} seconds. Remaining resources: ' \ + '{}, namespace: {}'.format(timeout, resources_to_check, namespace) + + if fail_ok: + LOG.info(msg) + return False, resources_to_check + + raise exceptions.KubeError(msg) + + +def delete_resources(resource_names=None, select_all=None, resource_types='pod', + namespace=None, + recursive=None, labels=None, con_ssh=None, fail_ok=False, + post_check=True, + check_both_controllers=True): + """ + Delete pods via kubectl delete + Args: + resource_names (None|str|list|tuple): + select_all (None|bool): + resource_types (str|list|tuple): + namespace (None|str): + recursive (bool): + labels (None|dict): + con_ssh: + fail_ok: + post_check (bool): Whether to check if resources are gone after deletion + check_both_controllers (bool): + + Returns (tuple): + (0, None) # pods successfully deleted + (1, ) + (2, (list of dict)) # pod(s) still exist in + kubectl after deletion + (3, (list of dict)) # + pod(s) still exist on the other controller + + """ + arg_dict = { + '--all': select_all, + '-l': labels, + '--recursive': recursive, + } + + arg_str = common.parse_args(args_dict=arg_dict, vals_sep=',') + if resource_types: + if isinstance(resource_types, str): + resource_types = [resource_types] + arg_str = '{} {}'.format(','.join(resource_types), arg_str).strip() + + if resource_names: + if isinstance(resource_names, str): + resource_names = [resource_names] + arg_str = '{} {}'.format(arg_str, ' '.join(resource_names)) + + if not con_ssh: + con_ssh = ControllerClient.get_active_controller() + code, output = exec_kube_cmd(sub_cmd='delete', args=arg_str, + con_ssh=con_ssh, fail_ok=fail_ok) + if code > 0: + return 1, output + + if post_check: + def __wait_for_resources_gone(ssh_client): + final_remaining = [] + if resource_types: + for resource_type in resource_types: + res, remaining_res = wait_for_resources_gone( + resource_names=resource_names, + resource_type=resource_type, + namespace=namespace, + con_ssh=ssh_client, fail_ok=fail_ok) + if not res: + final_remaining += remaining_res + else: + res, final_remaining = wait_for_resources_gone( + resource_names=resource_names, + namespace=namespace, + con_ssh=ssh_client, fail_ok=fail_ok) + return final_remaining + + LOG.info("Check pod is not running on current host") + + remaining = __wait_for_resources_gone(con_ssh) + if remaining: + return 2, remaining + + if check_both_controllers and not system_helper.is_aio_simplex( + con_ssh=con_ssh): + LOG.info("Check pod is running on the other controller as well") + con_name = 'controller-1' if \ + con_ssh.get_hostname() == 'controller-0' else 'controller-0' + from keywords import host_helper + with host_helper.ssh_to_host(hostname=con_name, + con_ssh=con_ssh) as other_con: + remaining = __wait_for_resources_gone(other_con) + if remaining: + return 3, remaining + + LOG.info("{} are successfully removed.".format(resource_names)) + return 0, None + + +def get_pods_info_yaml(type_names='pods', namespace=None, con_ssh=None, + fail_ok=False): + """ + pods info parsed from yaml output of kubectl get cmd + Args: + namespace (None|str): e.g., kube-system, openstack, default. If set + to 'all', use --all-namespaces. + type_names (None|list|tuple|str): e.g., ("deployments.apps", + "services/calico-typha") + con_ssh: + fail_ok: + + Returns (list): each item is a pod info dictionary + + """ + if isinstance(type_names, (list, tuple)): + type_names = ','.join(type_names) + args = type_names + + if namespace == 'all': + args += ' --all-namespaces' + elif namespace: + args += ' --namespace={}'.format(namespace) + + args += ' -o yaml' + + code, out = exec_kube_cmd(sub_cmd='get', args=args, con_ssh=con_ssh, + fail_ok=fail_ok) + if code > 0: + return [] + + try: + pods_info = yaml.load(out) + except yaml.YAMLError: + LOG.warning('Output is not yaml') + return [] + + pods_info = pods_info.get('items', [pods_info]) + + return pods_info + + +def get_pod_value_jsonpath(type_name, jsonpath, namespace=None, con_ssh=None): + """ + Get value for specified pod with jsonpath + Args: + type_name (str): e.g., 'service/kubernetes' + jsonpath (str): e.g., '{.spec.ports[0].targetPort}' + namespace (str|None): e.g., 'kube-system' + con_ssh: + + Returns (str): + + """ + args = '{} -o jsonpath="{}"'.format(type_name, jsonpath) + if namespace: + args += ' --namespace {}'.format(namespace) + + args += ';echo' + value = exec_kube_cmd('get', args, con_ssh=con_ssh)[1] + return value + + +def get_nodes(hosts=None, status=None, field='STATUS', exclude=False, + con_ssh=None, fail_ok=False): + """ + Get nodes values via 'kubectl get nodes' + Args: + hosts (None|str|list|tuple): table filter + status (None|str|list|tuple): table filter + field (str|list|tuple): any header of the nodes table + exclude (bool): whether to exclude rows with given criteria + con_ssh: + fail_ok: + + Returns (None|list): None if cmd failed. + + """ + code, output = exec_kube_cmd('get', args='nodes', con_ssh=con_ssh, + fail_ok=fail_ok) + if code > 0: + return None + + table_ = table_parser.table_kube(output) + if hosts or status: + table_ = table_parser.filter_table(table_, exclude=exclude, + **{'NAME': hosts, 'STATUS': status}) + + return table_parser.get_multi_values(table_, field) + + +def wait_for_nodes_ready(hosts=None, timeout=120, check_interval=5, + con_ssh=None, fail_ok=False): + """ + Wait for hosts in ready state via kubectl get nodes + Args: + hosts (None|list|str|tuple): Wait for all hosts ready if None is + specified + timeout: + check_interval: + con_ssh: + fail_ok: + + Returns (tuple): + (True, None) + (False, (list)) + + """ + end_time = time.time() + timeout + nodes_not_ready = None + while time.time() < end_time: + nodes_not_ready = get_nodes(status='Ready', field='NAME', + exclude=True, con_ssh=con_ssh, + fail_ok=True) + + if nodes_not_ready and hosts: + nodes_not_ready = list(set(nodes_not_ready) & set(hosts)) + + if nodes_not_ready: + LOG.info('{} not ready yet'.format(nodes_not_ready)) + elif nodes_not_ready is not None: + LOG.info("All nodes are ready{}".format( + ': {}'.format(hosts) if hosts else '')) + return True, None + + time.sleep(check_interval) + + msg = '{} are not ready within {}s'.format(nodes_not_ready, timeout) + LOG.warning(msg) + if fail_ok: + return False, nodes_not_ready + else: + raise exceptions.KubeError(msg) + + +def exec_cmd_in_container(cmd, pod, namespace=None, container_name=None, + stdin=None, tty=None, con_ssh=None, + fail_ok=False): + """ + Execute given cmd in given pod via kubectl exec + Args: + cmd: + pod: + namespace: + container_name: + stdin: + tty: + con_ssh: + fail_ok: + + Returns (tuple): + (0, ) + (1, ) + + """ + args = pod + if namespace: + args += ' -n {}'.format(namespace) + if container_name: + args += ' -c {}'.format(container_name) + if stdin: + args += ' -i' + if tty: + args += ' -t' + args += ' -- {}'.format(cmd) + + code, output = exec_kube_cmd(sub_cmd='exec', args=args, con_ssh=con_ssh, + fail_ok=fail_ok) + return code, output + + +def wait_for_pods_healthy(pod_names=None, namespace=None, all_namespaces=True, + labels=None, timeout=300, + check_interval=5, con_ssh=None, fail_ok=False, + exclude=False, strict=False, **kwargs): + """ + Wait for pods ready + Args: + pod_names (list|tuple|str|None): full name of pod(s) + namespace (str|None): + all_namespaces (bool|None) + labels (str|dict|list|tuple|None): + timeout: + check_interval: + con_ssh: + fail_ok: + exclude (bool) + strict (bool): strict applies to node and name matching if given + **kwargs + + Returns (tuple): + + """ + LOG.info("Wait for pods ready..") + if not pod_names: + pod_names = None + elif isinstance(pod_names, str): + pod_names = [pod_names] + + bad_pods = None + end_time = time.time() + timeout + while time.time() < end_time: + bad_pods_info = get_unhealthy_pods(labels=labels, + field=('NAME', 'STATUS'), + namespace=namespace, + all_namespaces=all_namespaces, + con_ssh=con_ssh, exclude=exclude, + strict=strict, **kwargs) + bad_pods = {pod_info[0]: pod_info[1] for pod_info in bad_pods_info if + (not pod_names or pod_info[0] in pod_names)} + if not bad_pods: + LOG.info("Pods are Completed or Running.") + if pod_names: + pod_names = [pod for pod in pod_names if + not re.search('audit-|init-', pod)] + if not pod_names: + return True + + is_ready = wait_for_running_pods_ready( + pod_names=pod_names, + namespace=namespace, + all_namespaces=all_namespaces, + labels=labels, timeout=int(end_time - time.time()), + strict=strict, + con_ssh=con_ssh, + fail_ok=fail_ok, **kwargs) + return is_ready + time.sleep(check_interval) + + msg = 'Some pods are not Running or Completed: {}'.format(bad_pods) + LOG.warning(msg) + if fail_ok: + return False + dump_pods_info(con_ssh=con_ssh) + raise exceptions.KubeError(msg) + + +def wait_for_running_pods_ready(pod_names=None, namespace=None, + all_namespaces=False, labels=None, timeout=300, + fail_ok=False, con_ssh=None, exclude=False, + strict=False, **kwargs): + """ + Wait for Running pods to be Ready, such as 1/1, 3/3 + Args: + pod_names: + namespace: + all_namespaces: + labels: + timeout: + fail_ok: + con_ssh: + exclude: + strict: + **kwargs: + + Returns (bool): + + """ + unready_pods = get_unready_running_pods(namespace=namespace, + all_namespaces=all_namespaces, + pod_names=pod_names, labels=labels, + exclude=exclude, strict=strict, + con_ssh=con_ssh, **kwargs) + if not unready_pods: + return True + + end_time = time.time() + timeout + while time.time() < end_time: + pods_info = get_pods(field=('NAME', 'READY'), namespace=namespace, + all_namespaces=all_namespaces, + pod_names=unready_pods, con_ssh=con_ssh) + for pod_info in pods_info: + pod_name, pod_ready = pod_info + ready_count, total_count = pod_ready.split('/') + if ready_count == total_count: + unready_pods.remove(pod_name) + if not unready_pods: + return True + + msg = "Some pods are not ready within {}s: {}".format(timeout, unready_pods) + LOG.warning(msg) + if fail_ok: + return False + raise exceptions.KubeError(msg) + + +def get_unready_running_pods(pod_names=None, namespace=None, + all_namespaces=False, labels=None, + con_ssh=None, exclude=False, strict=False, + **kwargs): + """ + Get Running pods that are not yet Ready. + Args: + pod_names: + namespace: + all_namespaces: + labels: + con_ssh: + exclude: + strict: + **kwargs: + + Returns (list): pod names + + """ + # field_selector does not work with pod_names, determine whether to use + # field_selector or do post filtering instead + # If field_selector is specified, the underlying get_pods function will + # use pod_names for post filtering + if exclude or labels or (not namespace and all_namespaces) or not pod_names: + field_selector = 'status.phase=Running' + else: + field_selector = None + kwargs['Status'] = 'Running' + + pods_running = get_pods(field=('NAME', 'READY'), namespace=namespace, + all_namespaces=all_namespaces, + pod_names=pod_names, labels=labels, + field_selectors=field_selector, grep='-v 1/1', + exclude=exclude, strict=strict, con_ssh=con_ssh, + fail_ok=True, **kwargs) + not_ready_pods = [] + for pod_info in pods_running: + pod_name, pod_ready = pod_info + ready_count, total_count = pod_ready.split('/') + if ready_count != total_count: + not_ready_pods.append(pod_name) + + return not_ready_pods + + +def wait_for_openstack_pods_status(pod_names=None, application=None, + component=None, status=PodStatus.RUNNING, + con_ssh=None, timeout=60, check_interval=5, + fail_ok=False): + """ + Wait for openstack pods to be in Completed or Running state + Args: + pod_names (str|tuple|list|None): + application (str|None): only used when pod_names are not provided + component (str|None): only used when pod_names are not provided + status (str|tuple|list|None): + con_ssh: + timeout: + check_interval: + fail_ok: + + Returns: + + """ + if not pod_names and not application and not component: + raise ValueError( + 'pod_names, or application and component have to be provided to ' + 'filter out pods') + + labels = None + if not pod_names: + labels = [] + if application: + labels.append('application={}'.format(application)) + if component: + labels.append('component={}'.format(component)) + + return wait_for_pods_status(pod_names=pod_names, labels=labels, + status=status, namespace='openstack', + con_ssh=con_ssh, check_interval=check_interval, + timeout=timeout, fail_ok=fail_ok) + + +def get_pod_logs(pod_name, namespace='openstack', grep_pattern=None, + tail_count=10, strict=False, + fail_ok=False, con_ssh=None): + """ + Get logs for given pod via kubectl logs cmd + Args: + pod_name (str): partial or full pod_name. If full name, set strict to + True. + namespace (str|None): + grep_pattern (str|None): + tail_count (int|None): + strict (bool): + fail_ok: + con_ssh: + + Returns (str): + + """ + if pod_name and not strict: + grep = '-E -i "{}|NAME"'.format(pod_name) + pod_name = get_resources(namespace='openstack', resource_type='pod', + con_ssh=con_ssh, rtn_list=True, + grep=grep, fail_ok=fail_ok)[0].get('name') + namespace = '-n {} '.format(namespace) if namespace else '' + + grep = '' + if grep_pattern: + if isinstance(grep_pattern, str): + grep_pattern = (grep_pattern,) + grep = ''.join( + [' | grep --color=never {}'.format(grep_str) for grep_str in + grep_pattern]) + tail = ' | tail -n {}'.format(tail_count) if tail_count else '' + args = '{}{}{}{}'.format(namespace, pod_name, grep, tail) + code, output = exec_kube_cmd(sub_cmd='logs', args=args, con_ssh=con_ssh) + if not output and not fail_ok: + raise exceptions.KubeError( + "No kubectl logs found with args: {}".format(args)) + return output + + +def dump_pods_info(con_ssh=None): + """ + Dump pods info for debugging purpose. + Args: + con_ssh: + + Returns: + + """ + LOG.info('------- Dump pods info --------') + exec_kube_cmd('get pods', + '--all-namespaces -o wide | grep -v -e Running -e Completed', + con_ssh=con_ssh, + fail_ok=True) + exec_kube_cmd( + 'get pods', + "--all-namespaces -o wide | grep -v -e Running -e Completed " + "-e NAMESPACE | awk " + + """'{system("kubectl describe pods -n "$1" "$2)}'""""", + con_ssh=con_ssh, fail_ok=True) + + +def get_openstack_pods(field='Name', namespace='openstack', application=None, + component=None, pod_names=None, + extra_labels=None, field_selectors=None, + exclude_label=False, fail_ok=False, con_ssh=None, + strict=True, exclude=False, **kwargs): + """ + Get openstack pods via kubectl get pods + Note that pod labels can be found via kubectl get pods -n + --show-labels + Args: + field (str|list|tuple): + namespace: + application (str|None): label: application + component (str|None): label: component + pod_names + extra_labels (str|None): + field_selectors (str|list|tuple|dict|None): + exclude_label + fail_ok: + con_ssh: + exclude: + strict: + **kwargs: + + Returns (list): + + """ + if pod_names: + labels = None + else: + connector = '!=' if exclude_label else '=' + labels = [] + if application: + labels.append('application{}{}'.format(connector, application)) + if component: + labels.append('component{}{}'.format(connector, component)) + if extra_labels: + labels.append(extra_labels) + labels = ','.join(labels) + + pods = get_pods(pod_names=pod_names, field=field, namespace=namespace, + labels=labels, fail_ok=fail_ok, + field_selectors=field_selectors, strict=strict, + exclude=exclude, con_ssh=con_ssh, **kwargs) + if not pods: + msg = "No pods found for namespace - {} with selectors: {}".format( + namespace, labels) + LOG.info(msg) + if not fail_ok: + raise exceptions.KubeError(msg) + + return pods + + +def get_openstack_configs(conf_file, configs=None, node=None, pods=None, + label_component=None, label_app=None, + fail_ok=False, con_ssh=None): + """ + Get config values for openstack pods with given chart + Args: + pods (str|list|tuple): openstack pod name(s) + label_app (str|None): e.g., nova, neutron, panko, ... + label_component (str|None): e.g., api, compute, etc. + conf_file (str): config file path inside the filtered openstack + container, e.g., /etc/nova/nova.conf + configs (dict): {(str): (str|list|tuple), + : ..} + e.g., {'database': 'event_time_to_live'} + node (str|None) + fail_ok: + con_ssh: + + Returns (dict): {(str): (dict), ... } + + """ + if not pods and not (label_component and label_app): + raise ValueError('Either pods, or label_component and label_app ' + 'have to be specified to locate the containers') + + if not pods: + pods = get_openstack_pods(component=label_component, + application=label_app, fail_ok=fail_ok, + node=node, + con_ssh=con_ssh) + elif isinstance(pods, str): + pods = (pods,) + + LOG.info('Getting {} {} values from openstack pods: {}'.format(conf_file, + configs, + pods)) + + cmd = 'cat {}'.format(conf_file) + if configs: + all_fields = [] + section_filter = r'$1 ~ /^\[.*\]/' + for fields in configs.values(): + if isinstance(fields, str): + all_fields.append(fields) + elif isinstance(fields, (tuple, list)): + all_fields += list(fields) + + fields_filter = '|| '.join( + ['$1 ~ /^{}/'.format(field) for field in set(all_fields)]) + cmd += r" | awk '{{ if ( {} || {}) print }}' | grep --color=never " \ + r"--group-separator='' -B 1 -v '\[.*\]'". \ + format(section_filter, fields_filter) + + config_values = {} + for pod in pods: + code, output = exec_cmd_in_container(cmd, pod=pod, + namespace='openstack', + con_ssh=con_ssh, fail_ok=fail_ok) + if code > 0: + config_values[pod] = {} + continue + + # Remove irrelevant string at beginning of the output + output = "[{}".format( + re.split(r'\n\[', r'\n{}'.format(output), maxsplit=1)[-1]) + settings = configparser.ConfigParser() + settings.read_string(output) + config_values[pod] = settings + + return config_values diff --git a/automated-pytest-suite/keywords/network_helper.py b/automated-pytest-suite/keywords/network_helper.py new file mode 100755 index 0000000..8319d1a --- /dev/null +++ b/automated-pytest-suite/keywords/network_helper.py @@ -0,0 +1,5696 @@ +# +# Copyright (c) 2019 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + + +import ipaddress +import math +import re +import os +import time +from collections import Counter +from contextlib import contextmanager + +import pexpect + +from consts.auth import Tenant, HostLinuxUser +from consts.filepaths import UserData +from consts.stx import Networks, PING_LOSS_RATE, MELLANOX4, \ + VSHELL_PING_LOSS_RATE, DevClassID, UUID +from consts.proj_vars import ProjVar +from consts.timeout import VMTimeout +from keywords import common, keystone_helper, host_helper, system_helper +from testfixtures.fixture_resources import ResourceCleanup +from utils import table_parser, cli, exceptions +from utils.clients.ssh import NATBoxClient, get_cli_client, ControllerClient +from utils.tis_log import LOG + + +def is_valid_ip_address(ip=None): + """ + Validate the input IP address + + Args: + ip: IPv4 or IPv6 address + + Returns: + True: valid IPv4 or IPv6 address + False: otherwise + """ + return bool(get_ip_address_str(ip)) + + +def get_ip_address_str(ip=None): + """ + Get the representation of the input IP address + + Args: + ip: IPv4 or IPv6 address + + Returns: + str: string representation of the input IP address if it's valid + None: otherwise + """ + try: + ipaddr = ipaddress.ip_address(ip) + return str(ipaddr) + except ValueError: + # invalid IPv4 or IPv6 address + return None + + +def create_network(name=None, shared=None, project=None, network_type=None, + segmentation_id=None, qos=None, + physical_network=None, vlan_transparent=None, + port_security=None, avail_zone=None, external=None, + default=None, tags=None, fail_ok=False, auth_info=None, + con_ssh=None, cleanup=None): + """ + Create a network for given tenant + + Args: + name (str): name of the network + shared (bool) + project: such as tenant1, tenant2. + network_type (str): The physical mechanism by which the virtual + network is implemented + segmentation_id (None|str): w VLAN ID for VLAN networks + qos + physical_network (str): Name of the physical network over which the + virtual network is implemented + vlan_transparent(None|bool): Create a VLAN transparent network + port_security (None|bool) + avail_zone (None|str) + external (None|bool) + default (None|bool): applicable only if external=True. + tags (None|False|str|list|tuple) + fail_ok (bool): + auth_info (dict): run 'openstack network create' cli using these + authorization info + con_ssh (SSHClient): + cleanup (str|None): function, module, class, session or None + + Returns (tuple): (rnt_code (int), net_id (str), message (str)) + + """ + if name is None: + name = common.get_unique_name(name_str='net') + + args = name + if project is not None: + tenant_id = keystone_helper.get_projects(field='ID', name=project, + con_ssh=con_ssh)[0] + args += ' --project ' + tenant_id + + if shared is not None: + args += ' --share' if shared else ' --no-share' + if vlan_transparent is not None: + args += ' --transparent-vlan' if vlan_transparent else \ + ' --no-transparent-vlan' + if port_security is not None: + args += ' --enable-port-security' if port_security else \ + ' --disable-port-security' + + if external: + args += ' --external' + if default is not None: + args += ' --default' if default else ' --no-default' + elif external is False: + args += ' --internal' + + if tags is False: + args += ' --no-tag' + elif tags: + if isinstance(tags, str): + tags = [tags] + for tag in tags: + args += ' --tag ' + tag + + if segmentation_id: + args += ' --provider:segmentation_id ' + segmentation_id + if network_type: + args += ' --provider:network_type ' + network_type + if physical_network: + args += ' --provider:physical_network ' + physical_network + if avail_zone: + args += ' --availability-zone-hint ' + avail_zone + if qos: + args += ' --wrs-tm:qos ' + qos + + LOG.info("Creating network: Args: {}".format(args)) + code, output = cli.openstack('network create', args, ssh_client=con_ssh, + fail_ok=fail_ok, auth_info=auth_info) + table_ = table_parser.table(output) + net_id = table_parser.get_value_two_col_table(table_, 'id') + if cleanup and net_id: + ResourceCleanup.add('network', net_id, scope=cleanup) + + if code == 1: + return 1, output + + succ_msg = "Network {} is successfully created".format(net_id) + LOG.info(succ_msg) + return 0, net_id + + +def create_subnet(network, name=None, subnet_range=None, gateway=None, + dhcp=None, dns_servers=None, + allocation_pools=None, ip_version=None, subnet_pool=None, + use_default_subnet_pool=None, + project=None, project_domain=None, prefix_length=None, + description=None, host_routes=None, + ipv6_ra_mode=None, ipv6_addr_mode=None, network_segment=None, + service_types=None, + tags=None, no_tag=None, fail_ok=False, auth_info=None, + con_ssh=None, cleanup=None): + """ + Create a subnet with given parameters + + Args: + network (str): id of the network to create subnet for + name (str|None): name of the subnet + subnet_range (str|None): such as "192.168.3.0/24" + project (str|None): such as tenant1, tenant2. + project_domain (str|None) + gateway (str): Valid values: , auto, none + dhcp (bool): whether or not to enable DHCP + dns_servers (list|tuple|str|None): DNS name servers. e.g., + ["147.11.57.133", "128.224.144.130", "147.11.57.128"] + allocation_pools (list|dict|None): {'start': , 'end': + 'end_ip'} + ip_version (int|str|None): 4, or 6 + subnet_pool (str|None): ID or name of subnetpool from which this + subnet will obtain a CIDR. + use_default_subnet_pool (bool|None) + prefix_length (str|None) + description (str|None) + host_routes (str|None) + ipv6_addr_mode (str|None) + ipv6_ra_mode (str|None) + network_segment (str|None) + service_types (list|tuple|str|None) + tags (list|tuple|str|None) + no_tag (bool|None) + fail_ok (bool): + auth_info (dict): run the neutron subnet-create cli using these + authorization info + con_ssh (SSHClient): + cleanup (str|None) + + Returns (tuple): (rnt_code (int), subnet_id (str)) + + """ + + if subnet_range is None and subnet_pool is None: + raise ValueError("Either cidr or subnet_pool has to be specified.") + + args_dict = { + '--project': project, + '--project-domain': project_domain, + '--subnet-pool': subnet_pool, + '--use-default-subnet-pool': use_default_subnet_pool, + '--prefix-length': prefix_length, + '--subnet-range': subnet_range, + '--dhcp': True if dhcp else None, + '--no-dhcp': True if dhcp is False else None, + '--gateway': gateway, + '--ip-version': ip_version, + '--ipv6-ra-mode': ipv6_ra_mode, + '--ipv6-address-mode': ipv6_addr_mode, + '--network-segment': network_segment, + '--network': network, + '--description': description, + '--allocation-pool': allocation_pools, + '--dns-nameserver': dns_servers, + '--host-route': host_routes, + '--service-type': service_types, + '--tag': tags, + '--no-tag': no_tag + } + + if not name: + name = '{}-subnet'.format( + get_net_name_from_id(network, con_ssh=con_ssh, auth_info=auth_info)) + name = "{}-{}".format(name, common.Count.get_subnet_count()) + args = '{} {}'.format( + common.parse_args(args_dict, repeat_arg=True, vals_sep=','), name) + + LOG.info("Creating subnet for network: {}. Args: {}".format(network, args)) + code, output = cli.openstack('subnet create', args, ssh_client=con_ssh, + fail_ok=fail_ok, auth_info=auth_info) + table_ = table_parser.table(output) + subnet_id = table_parser.get_value_two_col_table(table_, 'id') + if cleanup and subnet_id: + ResourceCleanup.add('subnet', subnet_id, scope=cleanup) + + if code > 0: + return 1, output + + LOG.info( + "Subnet {} is successfully created for network {}".format(subnet_id, + network)) + return 0, subnet_id + + +def delete_subnets(subnets, auth_info=Tenant.get('admin'), con_ssh=None, + fail_ok=False): + """ + Delete subnet(s) + Args: + subnets (str|list|tuple): + auth_info: + con_ssh: + fail_ok: + + Returns (tuple): + + """ + if isinstance(subnets, str): + subnets = (subnets,) + + args = ' '.join(subnets) + LOG.info("Deleting subnet {}".format(subnets)) + code, output = cli.openstack('subnet delete', args, ssh_client=con_ssh, + fail_ok=True, auth_info=auth_info) + + if code > 0: + return 1, output + + field = 'ID' if re.match(UUID, subnets[0]) else 'Name' + undeleted_subnets = list(set(subnets) & set( + get_subnets(auth_info=auth_info, con_ssh=con_ssh, field=field))) + if undeleted_subnets: + msg = "Subnet(s) still listed in openstack subnet list after " \ + "deletion: {}".format(undeleted_subnets) + if fail_ok: + LOG.warning(msg) + return 2, msg + raise exceptions.NeutronError(msg) + + succ_msg = "Subnet(s) successfully deleted: {}".format(subnets) + LOG.info(succ_msg) + return 0, succ_msg + + +def set_subnet(subnet, allocation_pools=None, dns_servers=None, + host_routes=None, service_types=None, + tags=None, no_tag=None, name=None, dhcp=None, gateway=None, + network_segment=None, description=None, + no_dns_servers=None, no_host_routes=None, + no_allocation_pool=None, + auth_info=Tenant.get('admin'), fail_ok=False, con_ssh=None): + kwargs = locals() + kwargs['unset'] = False + return __update_subnet(**kwargs) + + +def unset_subnet(subnet, allocation_pools=None, dns_servers=None, + host_routes=None, service_types=None, + tags=None, no_tag=None, auth_info=Tenant.get('admin'), + fail_ok=False, con_ssh=None): + kwargs = locals() + kwargs['unset'] = True + return __update_subnet(**kwargs) + + +def __update_subnet(subnet, unset=False, allocation_pools=None, + dns_servers=None, host_routes=None, service_types=None, + tags=None, no_tag=None, name=None, dhcp=None, gateway=None, + network_segment=None, description=None, + no_dns_servers=None, no_host_routes=None, + no_allocation_pool=None, + auth_info=Tenant.get('admin'), fail_ok=False, con_ssh=None): + """ + set/unset given setup + Args: + subnet (str): + unset (bool): set or unset + allocation_pools (None|str|tuple|list): + dns_servers (None|str|tuple|list): + host_routes (None|str|tuple|list): + service_types (None|str|tuple|list): + tags (None|bool): + name (str|None): + dhcp (None|bool): + gateway (str|None): valid str: or 'none' + description: + auth_info: + fail_ok: + con_ssh: + + Returns: + + """ + LOG.info("Update subnet {}".format(subnet)) + + arg_dict = { + '--allocation-pool': allocation_pools, + '--dns-nameserver': dns_servers, + '--host-route': host_routes, + '--service-type': service_types, + '--tag': tags, + } + + if unset: + arg_dict.update(**{'all-tag': True if no_tag else None}) + cmd = 'unset' + else: + set_only_dict = { + '--name': name, + '--dhcp': True if dhcp else None, + '--gateway': gateway, + '--description': description, + '--network-segment': network_segment, + '--no-dhcp': True if dhcp is False else None, + '--no-tag': True if no_tag else None, + '--no-dns-nameservers': True if no_dns_servers else None, + '--no-host-route': True if no_host_routes else None, + '--no-allocation-pool': True if no_allocation_pool else None, + } + arg_dict.update(**set_only_dict) + cmd = 'set' + + args = '{} {}'.format( + common.parse_args(args_dict=arg_dict, repeat_arg=True, vals_sep=','), + subnet) + + code, output = cli.openstack('subnet {}'.format(cmd), args, + ssh_client=con_ssh, fail_ok=fail_ok, + auth_info=auth_info) + + if code > 0: + return 1, output + + LOG.info("Subnet {} {} successfully".format(subnet, cmd)) + return 0, subnet + + +def get_subnets(field='ID', long=False, network=None, subnet_range=None, + gateway_ip=None, full_name=None, + ip_version=None, dhcp=None, project=None, project_domain=None, + service_types=None, + tags=None, any_tags=None, not_tags=None, not_any_tags=None, + name=None, strict=True, regex=False, auth_info=None, + con_ssh=None): + """ + Get subnets ids based on given criteria. + + Args: + field (str): header of subnet list table + long (bool) + network (str|None): + subnet_range (str|None): + gateway_ip (str|None): + full_name (str|None): + ip_version (str|None): + dhcp (bool) + project (str|None): + project_domain (str|None): + service_types (str|list|tuple|None): + tags (str|list|tuple|None): + any_tags (str|list|tuple|None): + not_tags (str|list|tuple|None): + not_any_tags (str|list|tuple|None): + name (str): name of the subnet + strict (bool): whether to perform strict search on given name and cidr + regex (bool): whether to use regext to search + auth_info (dict): + con_ssh (SSHClient): + + Returns (list): a list of subnet ids + + """ + args_dict = { + '--long': long, + '--ip-version': ip_version, + '--network': network, + '--subnet-range': subnet_range, + '--gateway': gateway_ip, + '--name': full_name, + '--dhcp': True if dhcp else None, + '--no-dhcp': True if dhcp is False else None, + '--project': project, + '--project-domain': project_domain, + '--tags': tags, + '--any-tags': any_tags, + '--not-tags': not_tags, + '--not-any-tags': not_any_tags + } + args = common.parse_args(args_dict, repeat_arg=False, vals_sep=',') + service_type_args = common.parse_args({'--server-type': service_types}, + repeat_arg=True) + args = ' '.join((args, service_type_args)) + + table_ = table_parser.table( + cli.openstack('subnet list', args, ssh_client=con_ssh, + auth_info=auth_info)[1]) + if name is not None: + table_ = table_parser.filter_table(table_, strict=strict, regex=regex, + name=name) + + return table_parser.get_multi_values(table_, field) + + +def get_subnet_values(subnet, fields, con_ssh=None, + auth_info=Tenant.get('admin')): + """ + Subnet values for given fields via openstack subnet show + Args: + subnet: + fields: + con_ssh: + auth_info: + + Returns (list): + + """ + table_ = table_parser.table( + cli.openstack('subnet show', subnet, ssh_client=con_ssh, + auth_info=auth_info)[1]) + return table_parser.get_multi_values_two_col_table(table_, fields) + + +def get_network_values(network, fields, strict=True, rtn_dict=False, + con_ssh=None, auth_info=Tenant.get('admin')): + """ + Get network values via openstack network show + Args: + network: + fields: + strict: + rtn_dict: + con_ssh: + auth_info: + + Returns (list|dict): + + """ + if isinstance(fields, str): + fields = [fields] + + table_ = table_parser.table( + cli.openstack('network show', network, ssh_client=con_ssh, + auth_info=auth_info)[1]) + vals = [] + for field in fields: + val = table_parser.get_value_two_col_table(table_, field, strict=strict, + merge_lines=True) + if field == 'subnets': + val = val.split(',') + val = [val_.strip() for val_ in val] + vals.append(val) + + if rtn_dict: + return {fields[i]: vals[i] for i in range(len(fields))} + return vals + + +def set_network(net_id, name=None, enable=None, share=None, + enable_port_security=None, external=None, default=None, + provider_net_type=None, provider_phy_net=None, + provider_segment=None, transparent_vlan=None, + auth_info=Tenant.get('admin'), fail_ok=False, con_ssh=None, + **kwargs): + """ + Update network with given parameters + Args: + net_id (str): + name (str|None): name to update to. Don't update name when None. + enable (bool|None): True to add --enable. False to add --disable. + Don't update enable/disable when None. + share (bool|None): + enable_port_security (bool|None): + external (bool|None): + default (bool|None): + provider_net_type (str|None): + provider_phy_net (str|None): + provider_segment (str|int|None): + transparent_vlan (bool|None): + auth_info (dict): + fail_ok (bool): + con_ssh (SSHClient): + **kwargs: additional key/val pairs that are not listed in 'openstack + network update -h'. + e,g.,{'wrs-tm:qos': } + + Returns (tuple): (code, msg) + (0, "Network is successfully updated") Network updated + successfully + (1, ) 'openstack network update' cli is rejected + + """ + args_dict = { + '--name': (name, {'name': name}), + '--enable': ( + True if enable is True else None, {'admin_state_up': 'UP'}), + '--disable': ( + True if enable is False else None, {'admin_state_up': 'DOWN'}), + '--share': (True if share is True else None, {'shared': 'True'}), + '--no-share': (True if share is False else None, {'shared': 'False'}), + '--enable-port-security': ( + True if enable_port_security is True else None, + {'port_security_enabled': 'True'}), + '--disable-port-security': ( + True if enable_port_security is False else None, + {'port_security_enabled': 'False'}), + '--external': ( + True if external is True else None, + {'router:external': 'External'}), + '--internal': ( + True if external is False else None, + {'router:external': 'Internal'}), + '--default': ( + True if default is True else None, {'is_default': 'True'}), + '--no-default': ( + True if default is False else None, {'is_default': 'False'}), + '--transparent-vlan': (True if transparent_vlan is True else None, + {'vlan_transparent': 'True'}), + '--no-transparent-vlan': (True if transparent_vlan is False else None, + {'vlan_transparent': 'False'}), + '--provider-network-type': ( + provider_net_type, {'provider:network_type': provider_net_type}), + '--provider-physical-network': ( + provider_phy_net, {'provider:physical_network': provider_phy_net}), + '--provider-segment': ( + provider_segment, {'provider:segmentation_id': provider_segment}), + } + checks = {} + args_str = '' + for arg in args_dict: + val, check = args_dict[arg] + if val is not None: + set_val = '' if val is True else ' {}'.format(val) + args_str += ' {}{}'.format(arg, set_val) + if check: + checks.update(**check) + else: + LOG.info("Unknown check field in 'openstack network show' " + "for arg {}".format(arg)) + + for key, val_ in kwargs.items(): + val_ = ' {}'.format(val_) if val_ else '' + field_name = key.split('--', 1)[-1] + arg = '--{}'.format(field_name) + args_str += ' {}{}'.format(arg, val_) + if val_: + checks.update(**kwargs) + else: + LOG.info("Unknown check field in 'openstack network show' for " + "arg {}".format(arg)) + + if not args_str: + raise ValueError( + "Nothing to update. Please specify at least one None value") + + LOG.info("Updating network {} with: {}".format(net_id, args_str)) + code, out = cli.openstack('network set', '{} {}'.format(args_str, net_id), + ssh_client=con_ssh, fail_ok=fail_ok, + auth_info=auth_info) + if code > 0: + return 1, out + + if checks: + LOG.info("Check network {} is updated with: {}".format(net_id, checks)) + actual_res = get_network_values(net_id, fields=list(checks.keys()), + rtn_dict=True, auth_info=auth_info) + failed = {} + for field in checks: + expt_val = checks[field] + actual_val = actual_res[field] + if expt_val != actual_val: + failed[field] = (expt_val, actual_val) + + # Fail directly. If a field is not allowed to be updated, the cli + # should be rejected + assert not failed, "Actual value is different than set value in " \ + "following fields: {}".format(failed) + + msg = "Network {} is successfully updated".format(net_id) + return 0, msg + + +def create_security_group(name, project=None, description=None, + project_domain=None, tag=None, no_tag=None, + auth_info=None, fail_ok=False, con_ssh=None, + cleanup='function'): + """ + Create a security group + Args: + name (str): + project + project_domain + tag (str|None|list|tuple) + no_tag (bool|None) + description (str): + auth_info (dict): + create under this project + fail_ok (bool): + con_ssh + cleanup (str): + + Returns (str|tuple): + str identifier for the newly created security group + or if fail_ok=True, return tuple: + (0, identifier) succeeded + (1, msg) failed + """ + args_dict = { + '--project': project, + '--project-domain': project_domain, + '--description': description, + '--tag': tag, + '--no-tag': no_tag, + } + args = '{} {}'.format(common.parse_args(args_dict, repeat_arg=True), name) + + code, output = cli.openstack("security group create", args, + ssh_client=con_ssh, fail_ok=fail_ok, + auth_info=auth_info) + if code > 0: + return 1, output + + table_ = table_parser.table(output) + group_id = table_parser.get_value_two_col_table(table_, 'id') + if cleanup: + ResourceCleanup.add('security_group', group_id, scope=cleanup) + + LOG.info("Security group created: name={} id={}".format(name, group_id)) + return 0, group_id + + +def delete_security_group(group_id, fail_ok=False, + auth_info=Tenant.get('admin')): + """ + Delete a security group + Args: + group_id (str): security group to be deleted + fail_ok + auth_info (dict): + + Returns (tuple): (code, msg) + (0, msg): succeeded + (1, err_msg): failed + """ + LOG.info("Deleting security group {}".format(group_id)) + return cli.openstack("security group delete", group_id, fail_ok=fail_ok, + auth_info=auth_info) + + +def create_security_group_rule(group=None, remote_ip=None, remote_group=None, + description=None, dst_port=None, + icmp_type=None, icmp_code=None, protocol=None, + ingress=None, egress=None, + ethertype=None, project=None, + project_domain=None, fail_ok=False, + auth_info=None, + con_ssh=None, field='id', cleanup=None): + """ + Create security group rule for given security group + Args: + group: + remote_ip: + remote_group: + description: + dst_port: + icmp_type: + icmp_code: + protocol: + ingress: + egress: + ethertype: + project: + project_domain: + fail_ok: + auth_info: + con_ssh: + field (str) + cleanup + + Returns: + + """ + if not group: + groups = get_security_groups(name='default', project=project, + project_domain=project_domain, + auth_info=auth_info, con_ssh=con_ssh) + if len(groups) != 1: + return ValueError( + 'group has to be specified when multiple default groups exist') + group = groups[0] + + args_dict = { + 'remote-ip': remote_ip, + 'remote-group': remote_group, + 'description': description, + 'dst-port': dst_port, + 'icmp-type': icmp_type, + 'icmp-code': icmp_code, + 'protocol': protocol, + 'ingress': ingress, + 'egress': egress, + 'ethertype': ethertype, + 'project': project, + 'project-domain': project_domain + } + args = ' '.join((common.parse_args(args_dict), group)) + + LOG.info( + "Creating security group rule for group {} with args: {}".format(group, + args)) + code, output = cli.openstack('security group rule create', args, + ssh_client=con_ssh, fail_ok=fail_ok, + auth_info=auth_info) + if code > 0: + return 1, output + + table_ = table_parser.table(output) + value = table_parser.get_value_two_col_table(table_, field) + if cleanup: + ResourceCleanup.add('security_group_rule', + table_parser.get_value_two_col_table(table_, 'id')) + + LOG.info( + "Security group rule created successfully for group {} with " + "{}={}".format(group, field, value)) + return 0, value + + +def delete_security_group_rules(sec_rules, check_first=True, fail_ok=False, + con_ssh=None, + auth_info=Tenant.get('admin')): + """ + Delete given security group rules + Args: + sec_rules: + check_first: + fail_ok: + con_ssh: + auth_info: + + Returns (tuple): + + """ + if isinstance(sec_rules, str): + sec_rules = (sec_rules,) + + if check_first: + existing_sec_rules = get_security_group_rules(long=False, + auth_info=auth_info, + con_ssh=con_ssh) + sec_rules = list(set(sec_rules) & set(existing_sec_rules)) + + code, output = cli.openstack('security group rule delete', + ' '.join(sec_rules), ssh_client=con_ssh, + fail_ok=fail_ok, + auth_info=auth_info) + if code > 0: + return 1, output + + post_sec_rules = get_security_group_rules(long=False, auth_info=auth_info, + con_ssh=con_ssh) + undeleted_rules = sec_rules = list(set(sec_rules) & set(post_sec_rules)) + if undeleted_rules: + msg = 'Security group rule(s) still exist after deletion: {}'.format( + undeleted_rules) + LOG.warning(msg) + if fail_ok: + return 2, msg + + msg = "Security group rule(s) deleted successfully: {}".format(sec_rules) + LOG.info(msg) + return 0, msg + + +def get_security_group_rules(field='ID', long=True, protocol=None, ingress=None, + egress=None, group=None, + auth_info=None, con_ssh=None, **filters): + """ + Get security group rules + Args: + field (str|list|tuple) + long (bool) + protocol: + ingress: + egress: + group (str): security group id + auth_info: + con_ssh: + **filters: header value pairs for security group rules table + + Returns (list): + + """ + args_dict = { + 'protocol': protocol, + 'ingress': ingress, + 'egress': egress, + 'long': long, + } + args = common.parse_args(args_dict) + if group: + args += ' {}'.format(group) + output = cli.openstack('security group rule list', args, ssh_client=con_ssh, + auth_info=auth_info)[1] + table_ = table_parser.table(output) + return table_parser.get_multi_values(table_, field, **filters) + + +def add_icmp_and_tcp_rules(security_group, auth_info=Tenant.get('admin'), + con_ssh=None, cleanup=None): + """ + Add icmp and tcp security group rules to given security group to allow + ping and ssh + Args: + security_group (str): + auth_info: + con_ssh: + cleanup + + """ + security_rules = get_security_group_rules( + con_ssh=con_ssh, auth_info=auth_info, group=security_group, + protocol='ingress', **{'IP Protocol': ('tcp', 'icmp')}) + if len(security_rules) >= 2: + LOG.info("Security group rules for {} already exist to allow ping and " + "ssh".format(security_group)) + return + + LOG.info("Create icmp and ssh security group rules for {} with best " + "effort".format(security_group)) + for rules in (('icmp', None), ('tcp', 22)): + protocol, dst_port = rules + create_security_group_rule(group=security_group, protocol=protocol, + dst_port=dst_port, fail_ok=True, + auth_info=auth_info, cleanup=cleanup) + + +def get_net_name_from_id(net_id, con_ssh=None, auth_info=None): + """ + Get network name from id + + Args: + net_id (str): + con_ssh (SSHClient): + auth_info (dict): + + Returns (str): name of a network + + """ + return get_networks(auth_info=auth_info, con_ssh=con_ssh, net_id=net_id, + field='Name')[0] + + +def get_net_id_from_name(net_name, con_ssh=None, auth_info=None): + """ + Get network id from full name + + Args: + net_name (str): + con_ssh (SSHClient): + auth_info (dict): + + Returns (str): id of a network + + """ + return get_networks(auth_info=auth_info, con_ssh=con_ssh, + full_name=net_name, field='ID')[0] + + +def create_floating_ip(external_net=None, subnet=None, port=None, + fixed_ip_addr=None, floating_ip_addr=None, + qos_policy=None, description=None, dns_domain=None, + dns_name=None, tags=None, no_tag=None, + project=None, project_domain=None, fail_ok=False, + con_ssh=None, auth_info=None, cleanup=None): + """ + Create a floating ip for given tenant + + Args: + external_net (str|None): external network to allocate the floating + ip from + subnet (str|None): + qos_policy (str|None): + description (str|None): + dns_name (str|None): + dns_domain (str|None): + tags (tuple|list|str|None) + no_tag (bool|None) + project_domain (str|None): + project (str|None): name of the tenant to create floating ip for. + e.g., 'tenant1', 'tenant2' + port (str|None): id of the port + fixed_ip_addr (str): fixed ip address. such as 192.168.x.x + floating_ip_addr (str): specific floating ip to create + fail_ok (bool): + con_ssh (SSHClient): + auth_info (dict): + cleanup (None|str): valid scopes: function, class, module, session + + Returns (str): floating IP. such as 192.168.x.x + + """ + if not external_net: + external_net = get_networks(con_ssh=con_ssh, external=True, + auth_info=auth_info)[0] + + args_dict = { + '--subnet': subnet, + '--port': port, + '--floating-ip-address': floating_ip_addr, + '--fixed-ip-address': fixed_ip_addr, + '--qos-policy': qos_policy, + '--dns-domain': dns_domain, + '--dns-name': dns_name, + '--description': description, + '--project': project, + '--project-domain': project_domain, + '--tag': tags, + '--no-tag': no_tag + } + + args = '{} {}'.format(common.parse_args(args_dict, repeat_arg=True), + external_net) + code, output = cli.openstack('floating ip create', args, ssh_client=con_ssh, + fail_ok=fail_ok, auth_info=auth_info) + + table_ = table_parser.table(output) + actual_fip_addr = table_parser.get_value_two_col_table( + table_, "floating_ip_address") + if actual_fip_addr and cleanup: + ResourceCleanup.add('floating_ip', actual_fip_addr, scope=cleanup) + + if code > 0: + return 1, output + + if not actual_fip_addr: + msg = "Floating IP is not found in the list" + if fail_ok: + LOG.warning(msg) + return 2, msg + raise exceptions.NeutronError(msg) + + succ_msg = "Floating IP created successfully: {}".format(actual_fip_addr) + LOG.info(succ_msg) + return 0, actual_fip_addr + + +def delete_floating_ips(floating_ips, auth_info=Tenant.get('admin'), + con_ssh=None, fail_ok=False): + """ + Delete a floating ip + + Args: + floating_ips (str|tuple|list): floating ip to delete. + auth_info (dict): + con_ssh (SSHClient): + fail_ok (bool): whether to raise exception if fail to delete floating ip + + Returns (tuple): (rtn_code(int), msg(str)) + - (0, Floating ip is successfully deleted.) + - (1, ) + - (2, Floating ip still exists in floatingip-list.) + + """ + if isinstance(floating_ips, str): + floating_ips = (floating_ips,) + + args = ' '.join(floating_ips) + code, output = cli.openstack('floating ip delete', args, ssh_client=con_ssh, + fail_ok=fail_ok, auth_info=auth_info) + + if code > 0: + return 1, output + + post_deletion_fips = get_floating_ips(field='ID', con_ssh=con_ssh, + auth_info=Tenant.get('admin')) + undeleted_fips = list(set(floating_ips) & set(post_deletion_fips)) + + if undeleted_fips: + msg = "Floating ip {} still exists in floating ip list.".format( + undeleted_fips) + if fail_ok: + LOG.warning(msg) + return 2, msg + raise exceptions.NeutronError(msg) + + succ_msg = "Floating ip deleted successfully: {}".format(floating_ips) + LOG.info(succ_msg) + return 0, succ_msg + + +def get_floating_ips(field='Floating IP Address', long=False, network=None, + port=None, router=None, + floating_ip=None, fixed_ip=None, status=None, project=None, + project_domain=None, + tags=None, any_tags=None, not_tags=None, not_any_tags=None, + floating_ips=None, + auth_info=Tenant.get('admin'), con_ssh=None): + """ + Get floating ips values with given parameters. + + Args: + field (str|tuple|list): header of floating ip list table, such as + 'Floating IP Address' or 'Fixed IP Address' + long (bool) + network (str|None) + router (str|None) + fixed_ip (str|None): fixed ip address + floating_ip (str|None): + port (str|None): port id + status (str|None): + project (str|None): + project_domain (str|None): + tags (str|tuple|listNone): + any_tags (str|tuple|listNone): + not_tags (str|tuple|listNone): + not_any_tags (str|tuple|listNone): + floating_ips (str|list|tuple): post execution table filters + auth_info (dict): if tenant auth_info is given instead of admin, + only floating ips for this tenant will be + returned. + con_ssh (SSHClient): + + Returns (list): list of floating ips values + + """ + args_dict = { + '--long': long, + '--network': network, + '--port': port, + '--fixed-ip-address': fixed_ip, + '--floating-ip-address': floating_ip, + '--status': status, + '--router': router, + '--project': project, + '--project-domain': project_domain, + '--tags': tags, + '--any-tags': any_tags, + '--not-tags': not_tags, + '--not-any-tags': not_any_tags + } + args = common.parse_args(args_dict, repeat_arg=False, vals_sep=',') + table_ = table_parser.table( + cli.openstack('floating ip list', args, ssh_client=con_ssh, + auth_info=auth_info)[1]) + if floating_ips: + table_ = table_parser.filter_table(table_, **{ + 'Floating IP Address': floating_ips}) + + return table_parser.get_multi_values(table_, field) + + +def get_floating_ip_values(fip, fields='fixed_ip_address', + auth_info=Tenant.get('admin'), con_ssh=None): + """ + Get floating ip info for given field. + Args: + fip (str): ip or id of a floating ip + fields (str|list|tuple): field(s) in floating ip show table. + auth_info (dict): + con_ssh (SSHClient): + + Returns (list): values of given fields for specified floating ip + + """ + table_ = table_parser.table( + cli.openstack('floating ip show', fip, ssh_client=con_ssh, + auth_info=auth_info)[1]) + + return table_parser.get_multi_values_two_col_table(table_, fields=fields, + evaluate=True) + + +def unset_floating_ip(floating_ip, port=None, qos_policy=None, tags=None, + all_tag=None, auth_info=Tenant.get('admin'), + con_ssh=None, fail_ok=False): + """ + Disassociate a floating ip + + Args: + floating_ip (str): ip or id of the floating ip + port (bool) + qos_policy (bool) + tags (str|None|list|tuple) + all_tag (bool) + auth_info (dict): + con_ssh (SSHClient): + fail_ok (bool): + + Returns (tuple): (rtn_code(int), msg(str)) + (0, "Floating ip is successfully disassociated with fixed ip") + (1, ) + + """ + + args_dict = { + '--port': port, + '--qos-policy': qos_policy, + '--tag': tags, + '--all-tag': all_tag, + } + + args = common.parse_args(args_dict, repeat_arg=True) + if not args: + raise ValueError("Nothing is specified to unset") + + args = '{} {}'.format(args, floating_ip) + code, output = cli.openstack('floating ip unset', args, ssh_client=con_ssh, + fail_ok=fail_ok, auth_info=auth_info) + + if code == 1: + return 1, output + + fixed_ip = get_floating_ip_values(floating_ip, fields='fixed_ip_address', + auth_info=auth_info, con_ssh=con_ssh)[0] + if fixed_ip is not None: + err_msg = "Fixed ip address is {} instead of None for floating ip " \ + "{}".format(fixed_ip, floating_ip) + if fail_ok: + return 2, err_msg + else: + raise exceptions.NeutronError(err_msg) + + succ_msg = "Floating ip {} is successfully disassociated with fixed " \ + "ip".format(floating_ip) + LOG.info(succ_msg) + return 0, succ_msg + + +def associate_floating_ip_to_vm(floating_ip, vm_id, vm_ip=None, + auth_info=Tenant.get('admin'), + con_ssh=None, fail_ok=False): + """ + Associate a floating ip to management net ip of given vm. + + Args: + floating_ip (str): ip or id of the floating ip + vm_id (str): vm id + vm_ip (str): management ip of a vm used to find the matching port to + attach floating ip to + auth_info (dict): + con_ssh (SSHClient): + fail_ok (bool): + + Returns (tuple): (rtn_code(int), msg(str)) + (0, ) + (1, ) + + """ + if not vm_ip: + # get a vm management ip if not given + vm_ip = get_mgmt_ips_for_vms(vm_id, con_ssh=con_ssh)[0] + + port = get_ports(server=vm_id, fixed_ips={'ip-address': vm_ip}, + con_ssh=con_ssh)[0] + + code, output = set_floating_ip(floating_ip=floating_ip, port=port, + fixed_ip_addr=vm_ip, auth_info=auth_info, + con_ssh=con_ssh, fail_ok=fail_ok) + if code > 0: + return 1, output + + if re.match(floating_ip, UUID): + floating_ip = \ + get_floating_ip_values(floating_ip, fields='floating_ip_address', + con_ssh=con_ssh)[0] + + _wait_for_ip_in_nova_list(vm_id, ip_addr=floating_ip, fail_ok=False, + con_ssh=con_ssh) + return 0, floating_ip + + +def set_floating_ip(floating_ip, port=None, fixed_ip_addr=None, qos_policy=None, + tags=None, no_tag=None, + auth_info=Tenant.get('admin'), con_ssh=None, fail_ok=False): + """ + Set floating ip properties + Args: + floating_ip: + port: + fixed_ip_addr: + qos_policy: + tags: + no_tag: + auth_info: + con_ssh: + fail_ok: + + Returns (tuple): + + """ + args_dict = { + '--port': port, + '--fixed-ip-address': fixed_ip_addr, + '--qos-policy': qos_policy, + '--tag': tags, + '--no-tag': no_tag, + } + + args = common.parse_args(args_dict, repeat_arg=True) + if not args: + raise ValueError("Nothing is specified to set") + + args = '{} {}'.format(args, floating_ip) + + code, output = cli.openstack('floating ip set', args, ssh_client=con_ssh, + fail_ok=fail_ok, auth_info=auth_info) + if code > 0: + return 1, output + + succ_msg = "port {} is successfully associated with floating ip {}".format( + port, floating_ip) + LOG.info(succ_msg) + return 0, floating_ip + + +def _wait_for_ip_in_nova_list(vm_id, ip_addr, timeout=300, fail_ok=False, + con_ssh=None, auth_info=Tenant.get('admin')): + end_time = time.time() + timeout + while time.time() < end_time: + vm_ips = _get_net_ips_for_vms(vms=vm_id, rtn_dict=False, + con_ssh=con_ssh, auth_info=auth_info) + if ip_addr in vm_ips: + return True + else: + msg = "IP address {} is not found in openstack server list for vm {} " \ + "within {} seconds".format(ip_addr, vm_id, timeout) + if fail_ok: + return False + raise exceptions.TimeoutException(msg) + + +def get_providernet_ranges(field='name', range_name=None, providernet_name=None, + providernet_type=None, strict=False, + auth_info=Tenant.get('admin'), con_ssh=None): + """ + + Args: + field (str): 'name' or 'id' + range_name (str): + providernet_name (str): + providernet_type (str): + strict (bool): + auth_info (dict): + con_ssh (SSHClient): + + Returns (list): list of range names or ids + + """ + + table_ = table_parser.table( + cli.neutron('providernet-range-list', ssh_client=con_ssh, + auth_info=auth_info)[1]) + + kwargs = {} + if providernet_name is not None: + kwargs['providernet'] = providernet_name + + if range_name is not None: + kwargs['name'] = range_name + + if providernet_type is not None: + kwargs['type'] = providernet_type + + return table_parser.get_values(table_, field, strict=strict, **kwargs) + + +def get_security_groups(field='id', project=None, project_domain=None, + tags=None, any_tags=None, + not_tags=None, not_any_tags=None, name=None, + strict=False, con_ssh=None, auth_info=None): + """ + Get the neutron security group list based on name if given for given + user. + + Args: + field (str|list|tuple) + project + project_domain + tags (list|tuple|str|None) + any_tags (list|tuple|str|None) + not_tags (list|tuple|str|None) + not_any_tags (list|tuple|str|None) + con_ssh (SSHClient): If None, active controller ssh will be used. + auth_info (dict): Tenant dict. If None, primary tenant will be used. + name (str): Given name for the security group to filter + strict (bool): strict match for name + + Returns (list): Neutron security group id. + + """ + args_dict = { + 'project': project, + 'project_domain': project_domain, + 'tags': tags, + 'any-tags': any_tags, + 'not-tags': not_tags, + 'not-any-tags': not_any_tags, + } + args = common.parse_args(args_dict, vals_sep=',') + table_ = table_parser.table( + cli.openstack('security group list', args, ssh_client=con_ssh, + auth_info=auth_info)[1]) + if name: + table_ = table_parser.filter_table(table_, strict=strict, name=name) + + return table_parser.get_multi_values(table_, field) + + +def get_internal_net_id(net_name=None, strict=False, con_ssh=None, + auth_info=None): + """ + Get internal network id that matches the given net_name of a specific + tenant. + + Args: + net_name (str): name of the internal network. This can be a substring + of the tenant net name, such as 'net1', + and it will return id for internal0-net1 + strict (bool): Whether to perform strict search on given net_name + con_ssh (SSHClient): + auth_info (dict): If None, primary tenant will be used. + + Returns (str): A tenant network id for given tenant network name. + If multiple ids matches the given name, only the first will return + + """ + net_ids = get_internal_net_ids(net_names=net_name, strict=strict, + con_ssh=con_ssh, auth_info=auth_info) + if not net_ids: + raise exceptions.TiSError( + "No network name contains {} in 'openstack network list'".format( + net_name)) + + return net_ids[0] + + +def get_mgmt_net_id(con_ssh=None, auth_info=None): + """ + Get the management net id of given tenant. + + Args: + con_ssh (SSHClient): If None, active controller ssh will be used. + auth_info (dict): Tenant dict. If None, primary tenant will be used. + + Returns (str): Management network id of a specific tenant. + + """ + mgmt_net_name = Networks.get_nenutron_net_patterns(net_type='mgmt')[0] + mgmt_ids = get_networks(name=mgmt_net_name, con_ssh=con_ssh, + auth_info=auth_info, strict=False, regex=True) + if not mgmt_ids: + raise exceptions.TiSError( + "No network name contains {} in 'openstack network list'".format( + mgmt_net_name)) + return mgmt_ids[0] + + +def get_tenant_net_id(net_name=None, con_ssh=None, auth_info=None): + """ + Get tenant network id that matches the given net_name of a specific tenant. + + Args: + net_name (str): name of the tenant network. This can be a substring + of the tenant net name, such as 'net1', + and it will return id for -net1 + con_ssh (SSHClient): + auth_info (dict): If None, primary tenant will be used. + + Returns (str): A tenant network id for given tenant network name. + If multiple ids matches the given name, only the first will return + + """ + net_ids = get_tenant_net_ids(net_names=net_name, con_ssh=con_ssh, + auth_info=auth_info) + if not net_ids: + raise exceptions.TiSError( + "No network name contains {} in 'openstack network list'".format( + net_name)) + + return net_ids[0] + + +def get_tenant_net_ids(net_names=None, strict=False, regex=True, con_ssh=None, + auth_info=None, field='id'): + """ + Get a list of tenant network ids that match the given net_names for a + specific tenant. + + Args: + net_names (str or list): list of tenant network name(s) to get id(s) for + strict (bool): whether to perform a strict search on given name + regex (bool): whether to search using regular expression + con_ssh (SSHClient): + auth_info (dict): If None, primary tenant will be used + field (str): id or name + + Returns (list): list of tenant nets. such as (, + ) + + """ + if net_names is None: + net_names = Networks.get_nenutron_net_patterns('data')[0] + regex = True + strict = False + + return get_networks(field=field, con_ssh=con_ssh, auth_info=auth_info, + strict=strict, regex=regex, name=net_names) + + +def get_internal_net_ids(net_names=None, strict=False, regex=True, con_ssh=None, + auth_info=None): + """ + Get a list of internal network ids that match the given net_names for a + specific tenant. + + Args: + net_names (str or list): list of internal network name(s) to get id( + s) for + strict (bool): whether to perform a strict search on given name + regex (bool): whether to search using regular expression + con_ssh (SSHClient): + auth_info (dict): If None, primary tenant will be used + + Returns (list): list of tenant nets. such as (, + ) + + """ + if net_names is None: + net_names = Networks.get_nenutron_net_patterns('internal')[0] + strict = False + regex = True + else: + if isinstance(net_names, str): + net_names = [net_names] + + for i in range(len(net_names)): + net_name = net_names[i] + if 'internal' not in net_name: + net_names[i] = 'internal.*{}'.format(net_name) + + return get_networks(field='ID', con_ssh=con_ssh, auth_info=auth_info, + strict=strict, regex=regex, name=net_names) + + +def get_tenant_ips_for_vms(vms=None, con_ssh=None, + auth_info=Tenant.get('admin'), rtn_dict=False, + exclude_nets=None): + """ + This function returns the management IPs for all VMs on the system. + We make the assumption that the management IPs start with "192". + Args: + vms (str|list|None): vm ids list. If None, management ips for ALL vms + with given Tenant(via auth_info) will be + returned. + con_ssh (SSHClient): active controller SSHClient object + auth_info (dict): use admin by default unless specified + rtn_dict (bool): return list if False, return dict if True + exclude_nets (list|str) network name(s) - exclude ips from given + network name(s) + + Returns (list|dict): + a list of all VM management IPs # rtn_dict=False + dictionary with vm IDs as the keys, and mgmt ips as values # + rtn_dict=True + """ + net_name_pattern, net_ip_pattern = Networks.get_nenutron_net_patterns( + 'data') + return _get_net_ips_for_vms(netname_pattern=net_name_pattern, + ip_pattern=net_ip_pattern, vms=vms, + con_ssh=con_ssh, auth_info=auth_info, + rtn_dict=rtn_dict, + exclude_nets=exclude_nets) + + +def get_internal_ips_for_vms(vms=None, con_ssh=None, + auth_info=Tenant.get('admin'), rtn_dict=False, + exclude_nets=None): + """ + This function returns the management IPs for all VMs on the system. + We make the assumption that the management IPs start with "192". + Args: + vms (str|list|None): vm ids list. If None, management ips for ALL vms + with given Tenant(via auth_info) will be + returned. + con_ssh (SSHClient): active controller SSHClient object + auth_info (dict): use admin by default unless specified + rtn_dict (bool): return list if False, return dict if True + exclude_nets (list|str) network name(s) - exclude ips from given + network name(s) + + Returns (list|dict): + a list of all VM management IPs # rtn_dict=False + dictionary with vm IDs as the keys, and mgmt ips as values # + rtn_dict=True + """ + net_name_pattern, net_ip_pattern = Networks.get_nenutron_net_patterns( + 'internal') + return _get_net_ips_for_vms(netname_pattern=net_name_pattern, + ip_pattern=net_ip_pattern, vms=vms, + con_ssh=con_ssh, auth_info=auth_info, + rtn_dict=rtn_dict, + exclude_nets=exclude_nets) + + +def get_external_ips_for_vms(vms=None, con_ssh=None, + auth_info=Tenant.get('admin'), rtn_dict=False, + exclude_nets=None): + net_name_pattern, net_ip_pattern = Networks.get_nenutron_net_patterns( + 'external') + return _get_net_ips_for_vms(netname_pattern=net_name_pattern, + ip_pattern=net_ip_pattern, vms=vms, + con_ssh=con_ssh, auth_info=auth_info, + rtn_dict=rtn_dict, + exclude_nets=exclude_nets) + + +def get_mgmt_ips_for_vms(vms=None, con_ssh=None, auth_info=Tenant.get('admin'), + rtn_dict=False, exclude_nets=None): + """ + This function returns the management IPs for all VMs on the system. + We make the assumption that the management IP pattern is "192.168.xxx.x( + xx)". + Args: + vms (str|list|None): vm ids list. If None, management ips for ALL vms + with given Tenant(via auth_info) will be + returned. + con_ssh (SSHClient): active controller SSHClient object + auth_info (dict): use admin by default unless specified + rtn_dict (bool): return list if False, return dict if True + exclude_nets (list|str) network name(s) - exclude ips from given + network name(s) + + Returns (list|dict): + a list of all VM management IPs # rtn_dict=False + dictionary with vm IDs as the keys, and mgmt ips as values # + rtn_dict=True + """ + net_name_pattern, net_ip_pattern = Networks.get_nenutron_net_patterns( + 'mgmt') + return _get_net_ips_for_vms(netname_pattern=net_name_pattern, + ip_pattern=net_ip_pattern, vms=vms, + con_ssh=con_ssh, auth_info=auth_info, + rtn_dict=rtn_dict, + exclude_nets=exclude_nets) + + +def _get_net_ips_for_vms(netname_pattern=None, ip_pattern=None, vms=None, + con_ssh=None, auth_info=Tenant.get('admin'), + rtn_dict=False, exclude_nets=None, fail_ok=False): + if not vms and vms is not None: + raise ValueError("Invalid value for vms: {}".format(vms)) + + args = '--a' if auth_info and auth_info.get('user') == 'admin' else '' + table_ = table_parser.table( + cli.openstack('server list', args, ssh_client=con_ssh, + auth_info=auth_info)[1]) + if vms: + table_ = table_parser.filter_table(table_, ID=vms) + + vm_ids = table_parser.get_column(table_, 'ID') + if not vm_ids: + raise ValueError("No VM found.") + + all_ips = [] + all_ips_dict = {} + vms_nets = table_parser.get_column(table_, 'Networks') + + if exclude_nets and isinstance(exclude_nets, str): + exclude_nets = [exclude_nets] + + for i in range(len(vm_ids)): + vm_id = vm_ids[i] + vm_nets = vms_nets[i].split(sep=';') + ips_for_vm = [] + for vm_net in vm_nets: + net_name, net_ips = vm_net.strip().split('=') + if exclude_nets: + for net_to_exclude in exclude_nets: + if net_to_exclude in net_name: + LOG.info("Excluding IPs from {}".format(net_to_exclude)) + continue + # find ips for given netname_pattern + if not netname_pattern or re.search(netname_pattern, net_name): + net_ips = [net_ip.strip() for net_ip in net_ips.split(',')] + ips_for_vm += net_ips + + if not ips_for_vm: + LOG.warning( + "No network found for vm {} with net name pattern: {}".format( + vm_id, netname_pattern)) + continue + + # Filter further if IP pattern is given + if ip_pattern: + ips_for_vm = re.findall(ip_pattern, ','.join(ips_for_vm)) + if not ips_for_vm: + LOG.warning( + "No ip found for vm {} with pattern {}".format(vm_id, + ip_pattern)) + continue + + LOG.debug('targeted ips for vm: {}'.format(ips_for_vm)) + all_ips_dict[vm_id] = ips_for_vm + all_ips += ips_for_vm + + if not all_ips: + if fail_ok: + return all_ips_dict if rtn_dict else all_ips + raise ValueError( + "No ip found for VM(s) {} with net name pattern: {}{}".format( + vm_ids, netname_pattern, ', and ip pattern: {}'.format( + ip_pattern) if ip_pattern else '')) + + LOG.info("IPs dict: {}".format(all_ips_dict)) + if rtn_dict: + return all_ips_dict + else: + return all_ips + + +def get_routers(field='ID', name=None, distributed=None, ha=None, + gateway_ip=None, strict=True, regex=False, + auth_info=None, con_ssh=None): + """ + Get router id(s) based on given criteria. + Args: + field (str|tuple|list): header(s) of the router list table + name (str): router name + distributed (bool): filter out dvr or non-dvr router + ha (bool): filter out HA router + gateway_ip (str): ip of the external router gateway such as + "192.168.13.3" + strict (bool): whether to perform strict search on router name + regex + auth_info (dict): + con_ssh (SSHClient): + + Returns (list): list of routers + + """ + param_dict = { + 'Distributed': distributed, + 'HA': ha, + 'External_gateway_info': gateway_ip, + } + params = {k: str(v) for k, v in param_dict.items() if v is not None} + args = '--long' if 'External_gateway_info' in params else '' + + table_ = table_parser.table( + cli.openstack('router list', args, ssh_client=con_ssh, + auth_info=auth_info)[1], + combine_multiline_entry=True) + if name is not None: + table_ = table_parser.filter_table(table_, strict=strict, regex=regex, + name=name) + if params: + table_ = table_parser.filter_table(table_, **params) + + convert = False + if isinstance(field, str): + field = [field] + convert = True + + values_all_fields = [] + for header in field: + values = table_parser.get_values(table_, header) + if header.lower() == 'external gateway info': + values = [ + eval(value.replace('true', 'True').replace('false', 'False')) + for value in values] + values_all_fields.append(values) + + if convert: + return values_all_fields[0] + + return values_all_fields + + +def get_tenant_router(router_name=None, auth_info=None, con_ssh=None): + """ + Get id of tenant router with specified name. + + Args: + router_name (str): name of the router + auth_info (dict): + con_ssh (SSHClient): + + Returns (str): router id + + """ + if router_name is None: + tenant_name = common.get_tenant_name(auth_info=auth_info) + router_name = tenant_name + '-router' + + routers = get_routers(auth_info=auth_info, con_ssh=con_ssh, + name=router_name) + if not routers: + LOG.warning("No router with name {} found".format(router_name)) + return None + return routers[0] + + +def get_router_values(router_id=None, fields='status', strict=True, + auth_info=Tenant.get('admin'), con_ssh=None): + """ + Get values of specified fields for given router via openstack router show + + Args: + router_id (str): + fields (str|list|tuple): + strict (bool): + auth_info (dict): + con_ssh (SSHClient): + + Returns (list): values for given fields in openstack router show + + """ + if router_id is None: + router_id = get_tenant_router(con_ssh=con_ssh) + + table_ = table_parser.table( + cli.openstack('router show', router_id, ssh_client=con_ssh, + auth_info=auth_info)[1], + combine_multiline_entry=True) + + if isinstance(fields, str): + fields = (fields,) + values = [] + for field in fields: + value = table_parser.get_value_two_col_table(table_, field, + strict=strict) + if field in ('interfaces_info', 'external_gateway_info', + 'distributed') or value in ('None', 'False', 'True'): + value = eval( + value.replace('true', 'True').replace('false', 'False')) + values.append(value) + return values + + +def create_router(name=None, project=None, distributed=None, ha=None, + disable=None, description=None, tags=None, + no_tag=None, avail_zone_hint=None, project_domain=None, + rtn_name=False, + fail_ok=False, auth_info=Tenant.get('admin'), con_ssh=None, + cleanup=None): + """ + Create a neutron router with given parameters + Args: + name (str|None): + project (str|None): + distributed (bool|None): + ha (bool|None): + disable (bool|None): + description (str|None): + tags (str|list|tuple|None): + no_tag (bool|None): + avail_zone_hint (str|None): + project_domain (str|None): + rtn_name (bool): return router name if True else return router id + fail_ok (bool): + auth_info: + con_ssh: + cleanup (str|None): Valid cleanup scopes: function, class, module, + session + + Returns (tuple): + (0, ) # router created successfully + (1, ) # CLI rejected + + """ + if name is None: + name = 'router' + name = '-'.join([project, name, str(common.Count.get_router_count())]) + + if not project and auth_info and auth_info['tenant'] == 'admin': + project = Tenant.get_primary()['tenant'] + + args_dict = { + '--project': project, + '--distributed': True if distributed else None, + '--centralized': True if distributed is False else None, + '--ha': True if ha else None, + '--no-ha': True if ha is False else None, + '--enable': True if disable is False else None, + '--disable': True if disable else None, + '--description': description, + '--tag': tags, + '--no-tag': no_tag, + '--availability-zone-hint': avail_zone_hint, + '--project-domain': project_domain, + } + args = '{} {}'.format(common.parse_args(args_dict, repeat_arg=True), name) + + LOG.info("Creating router with args: {}".format(args)) + code, output = cli.openstack('router create', args, ssh_client=con_ssh, + fail_ok=fail_ok, auth_info=auth_info) + + table_ = table_parser.table(output) + router_id = table_parser.get_value_two_col_table(table_, 'id') + if cleanup and router_id: + ResourceCleanup.add('router', router_id, scope=cleanup) + + # process result + if code > 0: + return 1, output + + succ_msg = "Router {} is created successfully.".format(name) + LOG.info(succ_msg) + return 0, name if rtn_name else router_id + + +def get_router_subnets(router, field='subnet_id', router_interface_only=True, + auth_info=Tenant.get('admin'), + con_ssh=None): + """ + Get router subnets' ids or ips via openstack port list + Args: + router (str): router name or id + field (str): 'subnet_id' or 'ip_address' + router_interface_only + auth_info: + con_ssh: + + Returns (list): + + """ + fixed_ips, device_owners = get_ports( + field=('Fixed IP Addresses', 'Device Owner'), router=router, long=True, + auth_info=auth_info, con_ssh=con_ssh) + + subnets = [] + for i in range(len(device_owners)): + device_owner = device_owners[i] + # Assume router can have only 1 fixed ip on same port + fixed_ip_info = fixed_ips[i][0] + if router_interface_only and 'router_interface' not in device_owner: + continue + subnets.append(fixed_ip_info.get(field, None)) + + return subnets + + +def get_next_subnet_cidr(net_id, ip_pattern=Networks.IPV4_IP, con_ssh=None, + auth_info=Tenant.get('admin')): + """ + Get next unused cider for given network + Args: + net_id: + ip_pattern: + con_ssh: + auth_info: + + Returns: + + """ + existing_subnets = get_subnets(field='Subnet', network=net_id, + con_ssh=con_ssh, auth_info=auth_info) + existing_subnets_str = ','.join(existing_subnets) + # TODO: add ipv6 support + mask = re.findall(ip_pattern + r'/(\d{1,3})', existing_subnets_str)[0] + increment = int(math.pow(2, math.ceil(math.log2(int(mask))))) + + ips = re.findall(ip_pattern, existing_subnets_str) + ips = [ipaddress.ip_address(item) for item in ips] + max_ip = ipaddress.ip_address(max(ips)) + + cidr = "{}/{}".format(str(ipaddress.ip_address(int(max_ip) + increment)), + mask) + LOG.info("Next unused CIDR for network {}: {}".format(net_id, cidr)) + + return cidr + + +def delete_router(router, remove_ports=True, auth_info=Tenant.get('admin'), + con_ssh=None, fail_ok=False): + """ + Delete given router + Args: + router (str): + remove_ports (bool): + auth_info: + con_ssh: + fail_ok: + + Returns (tuple): + + """ + + if remove_ports: + LOG.info("Clear router gateway and remove attached ports for router " + "{}".format(router)) + clear_router_gateway(router, auth_info=auth_info, con_ssh=con_ssh) + router_ports = get_ports(router=router, con_ssh=con_ssh, + auth_info=auth_info) + for port in router_ports: + remove_router_interface(router, port=port, auth_info=auth_info, + con_ssh=con_ssh) + + LOG.info("Deleting router {}...".format(router)) + code, output = cli.openstack('router delete', router, ssh_client=con_ssh, + fail_ok=fail_ok, auth_info=auth_info) + if code > 0: + return 1, output + + rtn_val = 'ID' if re.match(UUID, router) else 'Name' + post_routers = get_routers(auth_info=auth_info, con_ssh=con_ssh, + field=rtn_val) + if router in post_routers: + msg = "Router {} is still showing in neutron router-list".format(router) + if fail_ok: + LOG.warning(msg) + return 2, msg + + succ_msg = "Router {} deleted successfully".format(router) + LOG.info(succ_msg) + return 0, succ_msg + + +def add_router_interface(router=None, subnet=None, port=None, auth_info=None, + con_ssh=None, fail_ok=False): + """ + Add port or subnet to router + Args: + router (str|None): + subnet (str|None): + port (str|None): + auth_info (dict): + con_ssh: + fail_ok (bool): + + Returns (tuple): + """ + + return __add_remove_router_interface(router=router, port=port, + subnet=subnet, action='add', + auth_info=auth_info, con_ssh=con_ssh, + fail_ok=fail_ok) + + +def remove_router_interface(router=None, subnet=None, port=None, auth_info=None, + con_ssh=None, fail_ok=False): + """ + Remove port or subnet from router + Args: + router (str|None): + subnet (str|None): + port (str|None): + auth_info (dict): + con_ssh: + fail_ok (bool): + + Returns (tuple): + """ + return __add_remove_router_interface(router=router, port=port, + subnet=subnet, action='remove', + auth_info=auth_info, con_ssh=con_ssh, + fail_ok=fail_ok) + + +def __add_remove_router_interface(router=None, subnet=None, port=None, + action='add', auth_info=None, + con_ssh=None, fail_ok=False): + """ + Remove router port or subnet + Args: + router (str): + subnet + port + action (str): add or remove + auth_info: + con_ssh: + fail_ok: + + Returns (tuple): + + """ + if subnet is None and port is None: + raise ValueError("subnet or port has to be provided") + + if not router: + router = get_tenant_router(con_ssh=con_ssh, auth_info=auth_info) + + if subnet: + interface = subnet + interface_type = 'subnet' + else: + interface = port + interface_type = 'port' + + cmd = 'router {} {}'.format(action, interface_type) + + args = '{} {}'.format(router, interface) + LOG.info("Removing router interface: {}".format(args)) + code, output = cli.openstack(cmd, args, ssh_client=con_ssh, fail_ok=fail_ok, + auth_info=auth_info) + + if code == 1: + return 1, output + + succ_msg = "{} ran successfully for router {}.".format(cmd, router) + LOG.info(succ_msg) + return 0, interface + + +def set_router(router=None, enable=None, external_gateway=None, + enable_snat=None, routes=None, no_route=None, + fixed_ips=None, tags=None, no_tag=None, qos_policy=None, + no_qos_policy=None, ha=None, distributed=None, + name=None, description=None, fail_ok=False, con_ssh=None, + auth_info=Tenant.get('admin')): + """ + Set router with given parameters + Args: + router (str): + enable (bool): + external_gateway (str): + enable_snat (bool): + routes (list): list of dict or strings + list of dict: + [{'destination': , 'gateway': }, + {'destination': , 'gateway': }] + list of strings: + ['destination=,gateway=', + 'destination=,gateway='] + no_route (bool): + fixed_ips (list|tuple|str|dict): If list, it could be a list of dict + or strings + list of dict: + [{'subnet': , 'ip-address': }, {'subnet': + , 'ip-address': }] + list of strings: + ['subnet=,ip-address=', 'subnet=, + ip-address='] + tags (list\tuple): list of strings + no_tag (bool): + qos_policy (str): + no_qos_policy (bool): + ha (bool): + distributed (bool): + name (str): + description (str): + fail_ok (bool): + con_ssh: + auth_info: + + Returns: + + """ + args_dict = { + '--name': name, + '--description': description, + '--enable': True if enable else None, + '--disable': True if enable is False else None, + '--distributed': True if distributed else None, + '--centralized': True if distributed is False else None, + '--route': routes, + '--no-route': True if no_route else None, + '--ha': True if ha else None, + '--no-ha': True if ha is False else None, + '--external-gateway': external_gateway, + '--fixed-ip': fixed_ips, + '--enable-snat': True if enable_snat else None, + '--disable-snat': True if enable_snat is False else None, + '--qos-policy': qos_policy, + '--no-qos-policy': True if no_qos_policy else None, + '--tag': tags, + '--no-tag': True if no_tag else None, + } + args = common.parse_args(args_dict, repeat_arg=True) + if not args: + raise ValueError("No parameters provided to set router") + + if not router: + router = get_tenant_router(con_ssh=con_ssh) + + LOG.info("Setting router {} with args: {}".format(router, args)) + args = '{} {}'.format(args, router) + code, out = cli.openstack('router set', args, ssh_client=con_ssh, + fail_ok=fail_ok, auth_info=auth_info) + if code > 0: + return 1, out + + LOG.info("Router {} set successfully".format(router)) + return 0, router + + +def unset_router(router_id=None, external_gateway=None, routes=None, + qos_policy=None, tag=None, all_tag=None, + fail_ok=False, con_ssh=None, auth_info=Tenant.get('admin')): + """ + Unset router with given parameters + Args: + router_id (str|None): + external_gateway (bool): + qos_policy (bool): + tag (str): + all_tag (bool): + fail_ok: + con_ssh: + auth_info: + routes (list): list of dict or string. + list of dict: + [{'destination': , 'gateway': }, + {'destination': , 'gateway': }] + list of strings: + ['destination=,gateway=', + 'destination=,gateway='] + Returns: + + """ + args_dict = { + '--route': routes, + '--external-gateway': external_gateway, + '--qos-polity': qos_policy, + '--tag': tag, + '--all-tag': all_tag + } + args = common.parse_args(args_dict, repeat_arg=True) + if not args: + raise ValueError("No parameter specified to unset") + + if not router_id: + router_id = get_tenant_router(con_ssh=con_ssh) + + LOG.info("Unsetting router {} with args: {}".format(router_id, args)) + args = '{} {}'.format(args, router_id) + code, output = cli.openstack('router unset', args, ssh_client=con_ssh, + fail_ok=fail_ok, auth_info=auth_info) + if code > 0: + return 1, output + + msg = "Router {} unset successfully".format(router_id) + LOG.info(msg) + return 0, msg + + +def get_router_ext_gateway_info(router_id=None, auth_info=None, con_ssh=None): + """ + Get router's external gateway info as a dictionary + + Args: + router_id (str): + auth_info (dict|None): + con_ssh (SSHClient): + + Returns (dict): external gateway info as a dict. + Examples: {"network_id": "55e5967a-2138-4f27-a17c-d700af1c2429", + "enable_snat": True, + "external_fixed_ips": [{"subnet_id": + "892d3ad8-9cbc-46db-88f3-84e151bbc116", + "ip_address": "192.168.9.3"}] + } + """ + return get_router_values(router_id=router_id, + fields='external_gateway_info', + con_ssh=con_ssh, + auth_info=auth_info)[0] + + +def set_router_gateway(router_id=None, external_net=None, enable_snat=False, + fixed_ips=None, fail_ok=False, + auth_info=Tenant.get('admin'), con_ssh=None, + clear_first=False): + """ + Set router gateway with given snat, ip settings. + + Args: + router_id (str): id of the router to set gateway for. If None, + tenant router for Primary tenant will be used. + external_net (str): id of the external network for getting the gateway + enable_snat (bool): whether to enable SNAT. + fixed_ips (str|None|list|tuple): ip address(es) on external gateway + fail_ok (bool): + auth_info (dict): auth info for running the router-gateway-set cli + con_ssh (SSHClient): + clear_first (bool): Whether to clear the router gateway first if + router already has a gateway set + + Returns (tuple): (rtn_code (int), message (str)) scenario 1,2,3, + 4 only returns if fail_ok=True + - (0, "Router gateway is successfully set.") + - (1, ) -- cli is rejected + + """ + # Process args + if fixed_ips: + if isinstance(fixed_ips, str): + fixed_ips = (fixed_ips,) + fixed_ips = [{'ip-address': fixed_ip} for fixed_ip in fixed_ips] + if not router_id: + router_id = get_tenant_router(con_ssh=con_ssh) + if not external_net: + external_net = \ + get_networks(con_ssh=con_ssh, external=True, auth_info=auth_info)[0] + + # Clear first if gateway already set + if clear_first and get_router_ext_gateway_info(router_id, + auth_info=auth_info, + con_ssh=con_ssh): + clear_router_gateway(router_id=router_id, check_first=False, + auth_info=auth_info, con_ssh=con_ssh) + + return set_router(router_id, external_gateway=external_net, + enable_snat=enable_snat, fixed_ips=fixed_ips, + con_ssh=con_ssh, auth_info=auth_info, fail_ok=fail_ok) + + +def clear_router_gateway(router_id=None, fail_ok=False, + auth_info=Tenant.get('admin'), con_ssh=None, + check_first=True): + """ + Clear router gateway + + Args: + router_id (str): id of router to clear gateway for. If None, tenant + router for primary tenant will be used. + fail_ok (bool): + auth_info (dict): auth info for running the router-gateway-clear cli + con_ssh (SSHClient): + check_first (bool): whether to check if gateway is set for given + router before clearing + + Returns (tuple): (rtn_code (int), message (str)) + - (0, "Router gateway is successfully cleared.") + - (1, ) -- cli is rejected + - (2, "Failed to clear gateway for router ") + + """ + if router_id is None: + router_id = get_tenant_router(con_ssh=con_ssh, auth_info=auth_info) + + if check_first and not get_router_ext_gateway_info(router_id, + con_ssh=con_ssh, + auth_info=auth_info): + msg = "No gateway found for router. Do nothing." + LOG.info(msg) + return -1, msg + + return unset_router(router_id=router_id, external_gateway=True, + fail_ok=fail_ok, con_ssh=con_ssh, + auth_info=auth_info) + + +def get_router_external_gateway_ips(router_id, auth_info=None, con_ssh=None): + """ + Get router external gateway fixed ips + Args: + router_id: + auth_info: + con_ssh: + + Returns (list): list of ip addresses + + """ + ext_gateway_info = get_router_ext_gateway_info(router_id, + auth_info=auth_info, + con_ssh=con_ssh) + fixed_ips = [] + if ext_gateway_info: + fixed_ips = ext_gateway_info['external_fixed_ips'] + fixed_ips = [fixed_ip['ip_address'] for fixed_ip in fixed_ips if + fixed_ip.get('ip_address', '')] + + return fixed_ips + + +def get_router_host(router=None, auth_info=Tenant.get('admin'), con_ssh=None): + """ + Get router host + Args: + router (str|None): + auth_info: + con_ssh: + + Returns (str): + + """ + if not router: + router = get_tenant_router(con_ssh=con_ssh, auth_info=auth_info) + + return get_network_agents(router=router, field='Host', con_ssh=con_ssh, + auth_info=auth_info)[0] + + +def set_router_mode(router_id=None, distributed=None, ha=None, + enable_on_failure=True, fail_ok=False, + auth_info=Tenant.get('admin'), con_ssh=None): + """ + Update router to distributed or centralized + + Args: + router_id (str): id of the router to update + distributed (bool|None): True if set to distributed, False if set to + centralized + ha (bool|None) + enable_on_failure (bool): whether to set admin state up if updating + router failed + fail_ok (bool): whether to throw exception if cli got rejected + auth_info (dict): + con_ssh (SSHClient): + + Returns: + + """ + router_mode = [] + if distributed is not None: + router_mode.append('distributed' if distributed else 'centralized') + if ha is not None: + router_mode.append('ha' if ha else 'no-ha') + + if not router_mode: + raise ValueError("Distributed or ha has to be specified") + + router_mode = ' and '.join(router_mode) + LOG.info("Disable router {} and set it to {} mode".format(router_id, + router_mode)) + try: + code, output = set_router(router=router_id, distributed=distributed, + ha=ha, enable=False, fail_ok=fail_ok, + con_ssh=con_ssh, auth_info=auth_info) + except (exceptions.TiSError, pexpect.ExceptionPexpect): + if enable_on_failure: + set_router(router=router_id, enable=True, con_ssh=con_ssh, + auth_info=auth_info) + raise + + LOG.info("Re-enable router after set to {}".format(router_mode)) + set_router(router=router_id, enable=True, con_ssh=con_ssh, + auth_info=auth_info) + + if code > 0: + return 1, output + + fields = ('distributed', 'ha') + expt_values = (distributed, ha) + post_values = get_router_values(router_id, fields, auth_info=auth_info, + con_ssh=con_ssh) + + for i in range(len(fields)): + field = fields[i] + post_value = post_values[i] + expt_value = expt_values[i] + if expt_value and post_value != expt_value: + msg = "Router {} {} is {} instead of {}".format(router_id, field, + post_value, + expt_value) + raise exceptions.NeutronError(msg) + + succ_msg = "Router is successfully updated to distributed={}".format( + distributed) + LOG.info(succ_msg) + return 0, succ_msg + + +def get_networks_on_providernet(providernet, segment=None, external=None, + field='id', + con_ssh=None, auth_info=Tenant.get('admin'), + name=None, net_id=None, + strict=True, regex=False, exclude=False): + """ + + Args: + providernet(str): + segment (int|None) + external (bool|None) + field(str): 'id' or 'name' + con_ssh (SSHClient): + auth_info (dict): + name + net_id + strict (bool) + regex (bool) + exclude (bool): whether to return networks that are NOT on given + providernet + + Returns (list): list of networks + """ + if not providernet: + raise ValueError("No providernet_id provided.") + + return get_networks(field=field, provider_physical_network=providernet, + provider_setment=segment, + external=external, name=name, net_id=net_id, + strict=strict, regex=regex, exclude=exclude, + con_ssh=con_ssh, auth_info=auth_info) + + +def get_eth_for_mac(ssh_client, mac_addr, timeout=VMTimeout.IF_ADD, + vshell=False): + """ + Get the eth name for given mac address on the ssh client provided + Args: + ssh_client (SSHClient): usually a vm_ssh + mac_addr (str): such as "fa:16:3e:45:0d:ec" + timeout (int): max time to wait for the given mac address appear in + ip addr + vshell (bool): if True, get eth name from "vshell port-list" + + Returns (str): The first matching eth name for given mac. such as "eth3" + + """ + end_time = time.time() + timeout + while time.time() < end_time: + if not vshell: + if mac_addr in ssh_client.exec_cmd('ip addr'.format(mac_addr))[1]: + code, output = ssh_client.exec_cmd( + 'ip addr | grep --color=never -B 1 "{}"'.format(mac_addr)) + # sample output: + # 7: eth4: mtu 1500 qdisc noop state + # DOWN qlen 1000 + # link/ether 90:e2:ba:60:c8:08 brd ff:ff:ff:ff:ff:ff + + return output.split(sep=':')[1].strip() + else: + code, output = ssh_client.exec_cmd( + 'vshell port-list | grep {}'.format(mac_addr)) + # |uuid|id|type|name|socket|admin|oper|mtu|mac-address|pci + # -address|network-uuid|network-name + return output.split(sep='|')[4].strip() + time.sleep(1) + else: + LOG.warning( + "Cannot find provided mac address {} in 'ip addr'".format(mac_addr)) + return '' + + +def _get_interfaces_via_vshell(ssh_client, net_type='internal'): + """ + Get interface uuids for given network type + Args: + ssh_client (SSHClient): + net_type: 'data', 'mgmt', or 'internal' + + Returns (list): interface uuids + + """ + LOG.info( + "Getting {} interface-uuid via vshell address-list".format(net_type)) + table_ = table_parser.table( + ssh_client.exec_cmd('vshell address-list', fail_ok=False)[1]) + interfaces = table_parser.get_values( + table_, 'interface-uuid', regex=True, + address=Networks.get_nenutron_net_patterns(net_type=net_type)[1]) + + return interfaces + + +__PING_LOSS_MATCH = re.compile(PING_LOSS_RATE) + + +def ping_server(server, ssh_client, num_pings=5, timeout=60, check_interval=5, + fail_ok=False, vshell=False, interface=None, retry=0, + net_type='internal'): + """ + + Args: + server (str): server ip to ping + ssh_client (SSHClient): ping from this ssh client + num_pings (int): + timeout (int): max time to wait for ping response in seconds + check_interval (int): seconds in between retries + fail_ok (bool): whether to raise exception if packet loss rate is 100% + vshell (bool): whether to ping via 'vshell ping' cmd + interface (str): interface uuid. vm's internal interface-uuid will be + used when unset + retry (int): + net_type (str): 'mgmt', 'data', 'internal', or 'external', only used + for vshell=True and interface=None + + Returns (tuple): ( (int), + (int)) + + """ + LOG.info('Ping {} from host {}'.format(server, ssh_client.host)) + output = packet_loss_rate = None + for i in range(max(retry + 1, 0)): + if not vshell: + cmd = 'ping -c {} {}'.format(num_pings, server) + code, output = ssh_client.exec_cmd(cmd=cmd, expect_timeout=timeout, + fail_ok=True) + if code != 0: + packet_loss_rate = 100 + else: + packet_loss_rate = __PING_LOSS_MATCH.findall(output)[-1] + else: + if not interface: + interface = _get_interfaces_via_vshell(ssh_client, + net_type=net_type)[0] + cmd = 'vshell ping --count {} {} {}'.format(num_pings, server, + interface) + code, output = ssh_client.exec_cmd(cmd=cmd, expect_timeout=timeout) + if code != 0: + packet_loss_rate = 100 + else: + if "ERROR" in output: + # usually due to incorrectly selected interface (no route + # to destination) + raise exceptions.NeutronError( + "vshell ping rejected, output={}".format(output)) + packet_loss_rate = re.findall(VSHELL_PING_LOSS_RATE, output)[-1] + + packet_loss_rate = int(packet_loss_rate) + if packet_loss_rate < 100: + if packet_loss_rate > 0: + LOG.warning("Some packets dropped when ping from {} ssh " + "session to {}. Packet loss rate: {}%". + format(ssh_client.host, server, packet_loss_rate)) + else: + LOG.info("All packets received by {}".format(server)) + break + + LOG.info("retry in 3 seconds") + time.sleep(3) + else: + msg = "Ping from {} to {} failed.".format(ssh_client.host, server) + LOG.warning(msg) + if not fail_ok: + raise exceptions.VMNetworkError(msg) + + untransmitted_packets = re.findall(r"(\d+) packets transmitted,", output) + if untransmitted_packets: + untransmitted_packets = int(num_pings) - int(untransmitted_packets[0]) + else: + untransmitted_packets = num_pings + + return packet_loss_rate, untransmitted_packets + + +def get_pci_vm_network(pci_type='pci-sriov', net_name=None, strict=False, + con_ssh=None, auth_info=Tenant.get('admin'), + rtn_all=False): + """ + + Args: + pci_type (str|tuple|list): + net_name: + strict: + con_ssh: + auth_info: + rtn_all + + Returns (tuple|list): None if no network for given pci type; 2 nets(list) + if CX nics; 1 net otherwise. + + """ + if isinstance(pci_type, str): + pci_type = [pci_type] + + hosts_and_pnets = host_helper.get_hosts_and_pnets_with_pci_devs( + pci_type=pci_type, up_hosts_only=True, + con_ssh=con_ssh, auth_info=auth_info) + if not hosts_and_pnets: + if rtn_all: + return [], None + return [] + + # print("hosts and pnets: {}".format(hosts_and_pnets)) + + host = list(hosts_and_pnets.keys())[0] + pnet_name = hosts_and_pnets[host][0] + nets = list(set(get_networks_on_providernet(pnet_name, field='name'))) + + nets_list_all_types = [] + for pci_type_ in pci_type: + if pci_type_ == 'pci-sriov': + # Exclude network on first segment + # The switch is setup with untagged frames for the first segment + # within the range. + # This is suitable for PCI passthrough, but would not work for SRIOV + first_segs = get_first_segments_of_pnet_ranges(pnet_name, + con_ssh=con_ssh) + first_segs = [seg for seg in first_segs if seg > 20] + for seg in first_segs: + untagged_net = get_net_on_segment(pnet_name, seg_id=seg, + field='name', con_ssh=con_ssh) + if untagged_net in nets: + LOG.info( + "{} is on first segment of {} range with untagged " + "frames. Remove for sriov.". + format(untagged_net, pnet_name)) + nets.remove(untagged_net) + + # print("pnet: {}; Nets: {}".format(pnet_name, nets)) + nets_for_type = _get_preferred_nets(nets=nets, net_name=net_name, + strict=strict) + if not nets_for_type: + nets_list_all_types = [] + break + + nets_list_all_types.append(nets_for_type) + + final_nets = [] + cx_for_pcipt = False + if nets_list_all_types: + final_nets = set(nets_list_all_types[0]) + for nets_ in nets_list_all_types[1:]: + final_nets.intersection_update(set(nets_)) + final_nets = list(final_nets) + if final_nets: + if 'pci-passthrough' in pci_type: + + port = host_helper.get_host_interfaces(host, field='ports', + net_type=pci_type)[0] + host_nic = host_helper.get_host_ports(host, field='device type', + **{'name': port})[0] + if re.match(MELLANOX4, host_nic): + cx_for_pcipt = True + + if not rtn_all: + final_nets = final_nets[0:2] if cx_for_pcipt else final_nets[-1] + + if rtn_all: + final_nets = final_nets, cx_for_pcipt + + return final_nets + + +def get_network_segment_ranges(field=('Minimum ID', 'Maximum ID'), long=False, + shared=None, physical_network=None, + network_type=None, project_id=None, + auth_info=Tenant.get('admin'), con_ssh=None): + """ + Get network segment ranges info + Args: + field (str|tuple|list): + long (bool|None): cli parameter --long + shared (bool|None): return value filter + physical_network (str|None): return value filter + network_type (str|None): return value filter + project_id (str|None): return value filter + auth_info: + con_ssh: + + Returns (list of str|tuple): return list of str if rtn_val is str, + otherwise rtn list of tuples + + """ + + table_ = table_parser.table( + cli.openstack('network segment range list', '--long' if long else '', + ssh_client=con_ssh, + auth_info=auth_info)[1]) + kwargs = { + 'Shared': shared, + 'Physical Network': physical_network, + 'Network Type': network_type, + 'Project ID': project_id, + } + kwargs = {k: v for k, v in kwargs.items() if v is not None} + + vals = table_parser.get_multi_values(table_, field, evaluate=True, **kwargs) + if not isinstance(field, str): + vals = zip(*vals) + + return vals + + +def get_first_segments_of_pnet_ranges(providernet, con_ssh=None, + auth_info=Tenant.get('admin')): + """ + Get first segment id within the range of given providernet + Args: + providernet (str): physical network name + con_ssh (SSHClient): + auth_info (dict): + + Returns (list of int): list of min segment for each range of the physical + network + + """ + min_segments = get_network_segment_ranges(field='Minimum ID', + physical_network=providernet, + auth_info=auth_info, + con_ssh=con_ssh) + + return min_segments + + +def get_net_on_segment(providernet, seg_id, field='name', con_ssh=None, + auth_info=Tenant.get('admin')): + """ + Get network name on given prvidernet with specified segment id + Args: + providernet (str): pnet name or id + seg_id (int|list|tuple): segment id(s) + field (str): 'name' or 'id' + con_ssh (SSHClient): + auth_info (dict): + + Returns (str|None): network id/name or None if no network on given seg id + + """ + nets = get_networks_on_providernet(providernet=providernet, field=field, + con_ssh=con_ssh, segment=seg_id, + auth_info=auth_info) + + net = nets[0] if nets else None + return net + + +def _get_preferred_nets(nets, net_name=None, strict=False): + specified_nets = [] + nets_dict = { + 'internal': [], + 'mgmt': [], + 'data': [] + } + + for net in nets: + if net_name: + if strict: + if re.match(net_name, net): + specified_nets.append(net) + else: + if re.search(net_name, net): + specified_nets.append(net) + else: + # If net_name unspecified: + for net_type, nets_found in nets_dict.items(): + net_name_pattern = Networks.get_nenutron_net_patterns(net_type)[ + 0] + if net_name_pattern and re.search(net_name_pattern, net): + nets_found.append(net) + break + else: + LOG.warning("Unknown network: {}. Ignore.".format(net)) + + for nets_ in (specified_nets, nets_dict['internal'], nets_dict['data'], + nets_dict['mgmt']): + if nets_: + nets_counts = Counter(nets_) + nets_ = sorted(nets_counts.keys(), key=nets_counts.get, + reverse=True) + LOG.info("Preferred networks selected: {}".format(nets_)) + return nets_ + + +def create_port(net_id, name, project=None, fixed_ips=None, device_id=None, + device_owner=None, port_security=None, + enable_port=None, mac_addr=None, vnic_type=None, + security_groups=None, no_security_groups=None, + qos_pol=None, allowed_addr_pairs=None, dns_name=None, tag=None, + no_tag=None, + host_id=None, wrs_vif=None, fail_ok=False, auth_info=None, + con_ssh=None, cleanup=None): + """ + Create a port on given network + + Args: + net_id (str): network id to create port for + name (str): name of the new port + project (str): tenant name. such as tenant1, tenant2 + fixed_ips (list|tuple|dict|None): e.g., [{"subnet_id": , + "ip-address"=}, {"ip-address": } + device_id (str): device id of this port + device_owner (str): Device owner of this port + port_security (None|bool): + enable_port (bool|None): + mac_addr (str): MAC address of this port + vnic_type: one of the: + security_groups (str|list): Security group(s) associated with the port + no_security_groups (bool): Associate no security groups with the port + qos_pol (str): Attach QoS policy ID or name to the resource + allowed_addr_pairs (str|list): Allowed address pair associated with + the port. + e.g., "ip_address=IP_ADDR[,mac_address=MAC_ADDR]" + dns_name (str): Assign DNS name to the port (requires DNS + integration extension) + host_id (str) + tag (str|None) + no_tag (str|None) + wrs_vif + fail_ok (bool): + auth_info (dict): + con_ssh (SSHClient): + cleanup (None|str) + + Returns (tuple): (, ) + (0, ) - port created successfully + (1, ) - CLI rejected + (2, "Network ID for created port is not as specified.") - post + create check fail + + """ + LOG.info("Creating port on network {}".format(net_id)) + if not net_id: + raise ValueError("network id is required") + tenant_id = \ + keystone_helper.get_projects(field='ID', name=project, + con_ssh=con_ssh)[0] if project else None + + args_dict = { + '--no-security-groups': no_security_groups, + '--enable-port-security': True if port_security else None, + '--disable-port-security': True if port_security is False else None, + '--tenant-id': tenant_id, + '--device-id': device_id, + '--device-owner': device_owner, + '--mac-address': mac_addr, + '--vnic-type': vnic_type, + '--host': host_id, + # '--binding-profile': + '--enable': True if port_security else None, + '--disable': True if enable_port is False else None, + '--qos-policy': qos_pol, + '--dns-name': dns_name, + '--binding-profile vif_model': wrs_vif, + '--fixed-ip': fixed_ips, + '--allowed-address-pair': allowed_addr_pairs, + '--security-group': security_groups, + '--tag': tag, + '--no-tag': no_tag + } + + args = common.parse_args(args_dict=args_dict, repeat_arg=True, vals_sep=',') + args = '--network={} {} {}'.format(net_id, args, name) + + code, output = cli.openstack('port create', args, ssh_client=con_ssh, + fail_ok=fail_ok, auth_info=auth_info) + + port_tab = table_parser.table(output) + port_net_id = table_parser.get_value_two_col_table(port_tab, 'network_id') + port_id = table_parser.get_value_two_col_table(port_tab, 'id') + if port_id and cleanup: + ResourceCleanup.add('port', port_id) + + if code == 1: + return code, output + + if not net_id == port_net_id: + err_msg = "Network ID for created port is not as specified. Expt:{}; " \ + "Actual: {}".format(net_id, port_net_id) + if fail_ok: + LOG.warning(err_msg) + return 2, port_id + + succ_msg = "Port {} is successfully created on network {}".format(port_id, + net_id) + LOG.info(succ_msg) + return 0, port_id + + +def delete_port(port_id, fail_ok=False, auth_info=Tenant.get('admin'), + con_ssh=None): + """ + Delete given port + Args: + port_id (str): + fail_ok (bool): + auth_info (dict): + con_ssh (SSHClient): + + Returns (tuple): (, ) + (0, "Port is successfully deleted") + (1, ) - delete port cli rejected + (2, "Port still exists after deleting") - post deletion + check failed + + """ + LOG.info("Deleting port: {}".format(port_id)) + if not port_id: + msg = "No port specified" + LOG.warning(msg) + return -1, msg + + code, output = cli.openstack('port delete', port_id, ssh_client=con_ssh, + fail_ok=fail_ok, auth_info=auth_info) + if code > 0: + return 1, output + + existing_ports = get_ports(field='id', auth_info=auth_info, con_ssh=con_ssh) + if port_id in existing_ports: + err_msg = "Port {} still exists after deleting".format(port_id) + if fail_ok: + LOG.warning(err_msg) + return 2, err_msg + raise exceptions.NeutronError(err_msg) + + succ_msg = "Port {} is successfully deleted".format(port_id) + LOG.info(succ_msg) + return 0, succ_msg + + +def set_port(port_id, name=None, fixed_ips=None, no_fixed_ip=None, + device_id=None, device_owner=None, + port_security=None, enable_port=None, mac_addr=None, + vnic_type=None, wrs_vif=None, + security_groups=None, no_security_groups=None, qos_pol=None, + host_id=None, + allowed_addr_pairs=None, no_allowed_addr_pairs=None, dns_name=None, + description=None, + tag=None, no_tag=None, fail_ok=False, auth_info=None, + con_ssh=None): + args_dict = { + '--description': description, + '--device': device_id, + '--mac-address': mac_addr, + '--device-owner': device_owner, + '--vnic-type': vnic_type, + '--host': host_id, + '--dns-name': dns_name, + '--enable': enable_port, + '--disable': True if enable_port is False else None, + '--enable-port-security': port_security, + '--disable-port-security': True if port_security is False else None, + '--name': name, + '--fixed-ip': fixed_ips, + '--no-fixed-ip': no_fixed_ip, + '--qos-policy': qos_pol, + '--security-group': security_groups, + '--no-security-group': no_security_groups, + '--allowed-address': allowed_addr_pairs, + '--no-allowed-address': no_allowed_addr_pairs, + '--tag': tag, + '--no-tag': no_tag, + '--binding-profile vif_model': wrs_vif, + } + args = '{} {}'.format( + common.parse_args(args_dict, repeat_arg=True, vals_sep=','), port_id) + code, out = cli.openstack('port set', args, ssh_client=con_ssh, + fail_ok=fail_ok, auth_info=auth_info) + if code != 0: + return code, out + + msg = "Port {} is updated.".format(port_id) + LOG.info(msg) + return code, msg + + +def __convert_ip_subnet(line): + ip_addr = subnet = '' + if 'ip_address' in line: + ip_addrs = re.findall("ip_address=\'(.*)\',", line) + if ip_addrs: + ip_addr = ip_addrs[0] + subnets = re.findall("subnet_id=\'(.*)\'", line) + if subnets: + subnet = subnets[0] + + return {'ip_address': ip_addr, 'subnet_id': subnet} + + +def get_ports(field='id', network=None, router=None, server=None, project=None, + fixed_ips=None, long=False, mac=None, + port_id=None, port_name=None, auth_info=Tenant.get('admin'), + con_ssh=None, strict=False): + """ + Get a list of ports with given arguments + Args: + field (str|list|tuple): openstack port list table header(s). 'ID', + 'NAME', 'MAC Address', 'Fixed IP Addresses' + network (str|None) + router (str|None) + server (str|None) + project (str|None) + mac (str|None) + fixed_ips (list|tuple|dict|None) e.g., ({'subnet': , + 'ip-address': }, {'ip-address': }) + long (bool): + port_id (str): id of the port + port_name (str): name of the port + strict (bool): + auth_info (dict): + con_ssh (SSHClient): + + Returns (list): + + """ + optional_args = { + '--fixed-ip': fixed_ips, + '--project': project, + '--network': network, + '--router': router, + '--server': server, + '--mac-address': mac, + '--long': long, + } + args_str = common.parse_args(args_dict=optional_args, repeat_arg=True, + vals_sep=',') + table_ = table_parser.table( + cli.openstack('port list', args_str, ssh_client=con_ssh, + auth_info=auth_info)[1]) + + filters = {} + if port_id: + filters['id'] = port_id + elif port_name: + filters['name'] = port_name + + convert = False + if isinstance(field, str): + convert = True + field = (field,) + + res = [] + for header in field: + ports_info = table_parser.get_values(table_, header, strict=strict, + merge_lines=False, **filters) + if header.lower() == 'fixed ip addresses': + values = [] + for port_info in ports_info: + if isinstance(port_info, str): + port_info = [port_info] + port_info = [__convert_ip_subnet(line=line) for line in + port_info] + values.append(port_info) + ports_info = values + res.append(ports_info) + + if convert: + res = res[0] + return res + + +def get_port_values(port, fields=('binding_vnic_type', 'mac_address'), + con_ssh=None, auth_info=None): + """ + Get port info via openstack port show + Args: + port (str): + fields (str|list|tuple): + con_ssh (SSHClient): + auth_info (dict): + + Returns (list): return list of list if field is fixed_ips e.g., + fields = ('id', 'fixed_ips') + returns: [, [{'ip_address': , 'subnet_id': }, + ..]] + + """ + if isinstance(fields, str): + fields = (fields,) + + table_ = table_parser.table( + cli.openstack('port show', port, ssh_client=con_ssh, + auth_info=auth_info)[1]) + values = [] + for field in fields: + value = table_parser.get_value_two_col_table(table_, field) + if field == 'fixed_ips': + if isinstance(value, str): + value = [value] + value = [__convert_ip_subnet(line) for line in value] + values.append(value) + + return values + + +def get_pci_devices_info(class_id, con_ssh=None, auth_info=None): + """ + Get PCI devices with nova device-list/show. + + As in load "2017-01-17_22-01-49", the known supported devices are: + Coleto Creek PCIe Co-processor Device Id: 0443 Vendor Id:8086 + + Args: + class_id (str|list): Some possible values: + 0b4000 (Co-processor), + 0c0330 (USB controller), + 030000 (VGA compatible controller) + con_ssh: + auth_info: + + Returns (dict): nova pci devices dict. + Format: {: {: {}, : {...}}, + : {...}, + ...} + Examples: + {'qat-dh895xcc-vf': {'compute-0': {'Device ID':'0443','Class + Id':'0b4000', ...} 'compute-1': {...}}} + + """ + table_ = table_parser.table( + cli.nova('device-list', ssh_client=con_ssh, auth_info=auth_info)[1]) + table_ = table_parser.filter_table(table_, **{'class_id': class_id}) + LOG.info('output of nova device-list for {}: {}'.format(class_id, table_)) + + devices = table_parser.get_column(table_, 'PCI Alias') + LOG.info('PCI Alias from device-list:{}'.format(devices)) + + nova_pci_devices = {} + for alias in devices: + table_ = table_parser.table(cli.nova('device-show {}'.format(alias))[0]) + # LOG.debug('output from nova device-show for device-id:{}\n{ + # }'.format(alias, table_)) + + table_dict = table_parser.row_dict_table(table_, key_header='Host', + unique_key=True, + lower_case=False) + nova_pci_devices[alias] = table_dict + # {'qat-dh895xcc-vf': {'compute-0': {'Device ID':'0443','Class + # Id':'0b4000', ...} 'compute-1': {...}}} + + LOG.info('nova_pci_devices: {}'.format(nova_pci_devices)) + + return nova_pci_devices + + +def get_pci_device_configured_vfs_value(device_id, con_ssh=None, + auth_info=None): + """ + Get PCI device configured vfs value for given device id + + Args: + device_id (str): device vf id + con_ssh: + auth_info: + + Returns: + str : + + """ + _table = table_parser.table( + cli.nova('device-list', ssh_client=con_ssh, auth_info=auth_info)[1]) + LOG.info('output of nova device-list:{}'.format(_table)) + _table = table_parser.filter_table(_table, **{'Device Id': device_id}) + return table_parser.get_column(_table, 'pci_vfs_configured')[0] + + +def get_pci_device_used_vfs_value(device_id, con_ssh=None, auth_info=None): + """ + Get PCI device used number of vfs value for given device id + + Args: + device_id (str): device vf id + con_ssh: + auth_info: + + Returns: + str : + + """ + _table = table_parser.table( + cli.nova('device-list', ssh_client=con_ssh, auth_info=auth_info)[1]) + LOG.info('output of nova device-list:{}'.format(_table)) + _table = table_parser.filter_table(_table, **{'Device Id': device_id}) + LOG.info('output of nova device-list:{}'.format(_table)) + return table_parser.get_column(_table, 'pci_vfs_used')[0] + + +def get_pci_device_vfs_counts_for_host( + host, device_id=None, fields=('pci_vfs_configured', 'pci_vfs_used'), + con_ssh=None, auth_info=Tenant.get('admin')): + """ + Get PCI device used number of vfs value for given device id + + Args: + host (str): compute hostname + device_id (str): device vf id + fields (tuple|str|list) + con_ssh: + auth_info: + + Returns: + list + + """ + if device_id is None: + device_id = get_pci_device_list_values(field='Device Id', + con_ssh=con_ssh, + auth_info=auth_info)[0] + + table_ = table_parser.table( + cli.nova('device-show {}'.format(device_id), ssh_client=con_ssh, + auth_info=auth_info)[1]) + LOG.debug( + 'output from nova device-show for device-id:{}\n{}'.format(device_id, + table_)) + + table_ = table_parser.filter_table(table_, host=host) + counts = [] + if isinstance(fields, str): + fields = [fields] + + for field in fields: + counts.append(int(table_parser.get_column(table_, field)[0])) + + return counts + + +def get_pci_device_list_values(field='pci_vfs_used', con_ssh=None, + auth_info=Tenant.get('admin'), **kwargs): + table_ = table_parser.table( + cli.nova('device-list', ssh_client=con_ssh, auth_info=auth_info)[1]) + + values = table_parser.get_values(table_, field, **kwargs) + if field in ['pci_pfs_configured', 'pci_pfs_used', 'pci_vfs_configured', + 'pci_vfs_used']: + values = [int(value) for value in values] + + return values + + +def get_pci_device_list_info(con_ssh=None, header_key='pci alias', + auth_info=Tenant.get('admin'), **kwargs): + table_ = table_parser.table( + cli.nova('device-list', ssh_client=con_ssh, auth_info=auth_info)[1]) + if kwargs: + table_ = table_parser.filter_table(table_, **kwargs) + + return table_parser.row_dict_table(table_, key_header=header_key) + + +def get_tenant_routers_for_vms(vms, con_ssh=None, + auth_info=Tenant.get('admin')): + """ + Get tenant routers for given vms + + Args: + vms (str|list): + con_ssh (SSHClient): + auth_info + + Returns (list): list of router ids + + """ + if isinstance(vms, str): + vms = [vms] + + router_ids, router_projects = get_routers(auth_info=auth_info, + con_ssh=con_ssh, + field=('ID', 'Project')) + vms_routers = [] + from keywords import vm_helper + for i in range(len(router_ids)): + router_project = router_projects[i] + vms_with_router = vm_helper.get_vms(vms=vms, project=router_project, + all_projects=False, + auth_info=auth_info, + con_ssh=con_ssh) + if vms_with_router: + vms_routers.append(router_ids[i]) + vms = list(set(vms) - set(vms_with_router)) + + if not vms: + break + + return vms_routers + + +def collect_networking_info(time_stamp, routers=None, vms=None, sep_file=None, + con_ssh=None): + LOG.info("Ping tenant(s) router's external and internal gateway IPs") + + if not routers: + if vms: + if isinstance(vms, str): + vms = [vms] + routers = get_tenant_routers_for_vms(vms=vms) + else: + routers = get_routers(name='tenant[12]-router', regex=True, + auth_info=Tenant.get('admin'), + con_ssh=con_ssh) + elif isinstance(routers, str): + routers = [routers] + + ips_to_ping = [] + for router_ in routers: + router_ips = get_router_subnets(router=router_, field='ip_address', + con_ssh=con_ssh) + ips_to_ping += router_ips + + res_bool, res_dict = ping_ips_from_natbox(ips_to_ping, num_pings=3, + timeout=15) + if sep_file: + res_str = "succeeded" if res_bool else 'failed' + content = "#### Ping router interfaces {} ####\n{}\n".format(res_str, + res_dict) + common.write_to_file(sep_file, content=content) + + # if ProjVar.get_var('ALWAYS_COLLECT'): + # common.collect_software_logs() + + hosts = host_helper.get_up_hypervisors(con_ssh=con_ssh) + for router in routers: + router_host = get_network_agents(field='Host', router=router, + con_ssh=con_ssh)[0] + if router_host and router_host not in hosts: + hosts.append(router_host) + LOG.info("Router {} is hosted on {}".format(router, router_host)) + + if hosts: + is_avs = system_helper.is_avs(con_ssh=con_ssh) + vswitch_type = 'avs' if is_avs else 'ovs' + LOG.info( + "Collect {}.info for {} router(s) on router host(s): {}".format( + vswitch_type, routers, hosts)) + for host in hosts: + collect_vswitch_info_on_host(host, vswitch_type, + collect_extra_ovs=(not is_avs), + time_stamp=time_stamp, + con_ssh=con_ssh) + + +def get_network_agents(field='Host', agent_host=None, router=None, network=None, + agent_type=None, long=False, + con_ssh=None, auth_info=Tenant.get('admin'), **kwargs): + """ + Get network agents values from openstack network agent list + Args: + field (str|list|tuple): + agent_host: + router: + network: + agent_type: + long: + con_ssh: + auth_info: + **kwargs: + + Returns (list): + + """ + args_dict = { + '--agent-type': agent_type, + '--host': agent_host, + '--network': network, + '--router': router, + '--long': long, + } + args = common.parse_args(args_dict) + table_ = table_parser.table( + cli.openstack('network agent list', args, ssh_client=con_ssh, + auth_info=auth_info)[1]) + return table_parser.get_multi_values(table_, field, **kwargs) + + +def ping_ips_from_natbox(ips, natbox_ssh=None, num_pings=5, timeout=30): + if not natbox_ssh: + natbox_ssh = NATBoxClient.get_natbox_client() + + res_dict = {} + for ip_ in ips: + packet_loss_rate = ping_server( + server=ip_, ssh_client=natbox_ssh, num_pings=num_pings, + timeout=timeout, fail_ok=True, vshell=False)[0] + res_dict[ip_] = packet_loss_rate + + res_bool = not any(loss_rate == 100 for loss_rate in res_dict.values()) + # LOG.error("PING RES: {}".format(res_dict)) + if res_bool: + LOG.info("Ping successful from NatBox: {}".format(ips)) + else: + LOG.warning("Ping unsuccessful from NatBox: {}".format(res_dict)) + + return res_bool, res_dict + + +def collect_vswitch_info_on_host(host, vswitch_type, time_stamp, + collect_extra_ovs=False, con_ssh=None): + """ + + Args: + host (str): + vswitch_type (str): avs or ovs + time_stamp (str) + collect_extra_ovs + con_ssh + + Returns: + + """ + if not con_ssh: + con_ssh = ControllerClient.get_active_controller() + if not time_stamp: + time_stamp = common.get_date_in_format(ssh_client=con_ssh, + date_format='%Y%m%d_%H-%M') + con_name = con_ssh.get_hostname() + with host_helper.ssh_to_host(host, con_ssh=con_ssh) as host_ssh: + # create log file for host under home dir + # time_stamp = common.get_date_in_format(ssh_client=host_ssh, + # date_format='%Y%m%d_%H-%M') + test_name = ProjVar.get_var("TEST_NAME").split(sep='[')[0] + file_name = os.path.join(HostLinuxUser.get_home(), + '{}-{}-{}-vswitch.log'.format(time_stamp, + test_name, host)) + host_ssh.exec_cmd('touch {}'.format(file_name)) + + # Collect vswitch logs using collect tool + # vswitch log will be saved to /scratch/var/extra/avs.info on the + # compute host + host_ssh.exec_sudo_cmd('/etc/collect.d/collect_{}'.format(vswitch_type)) + vswitch_info_path = '/scratch/var/extra/{}.info'.format(vswitch_type) + host_ssh.exec_cmd( + r'echo -e "##### {} {}.info collected #####\n" >> {}'.format( + host, vswitch_type, file_name), + get_exit_code=False) + time.sleep(1) + host_ssh.exec_sudo_cmd( + 'cat {} >> {}'.format(vswitch_info_path, file_name), + get_exit_code=False) + host_ssh.exec_sudo_cmd('rm -f {}'.format(vswitch_info_path)) + + if collect_extra_ovs: + # Run a few cmds to collect more ovs info + host_ssh.exec_cmd(r'echo -e "\n\n#### Additional ovs ' + r'cmds on {} ####\n >> {}"'.format(host, + file_name), + get_exit_code=False) + for cmd in ('ovs-ofctl show br-int', 'ovs-ofctl dump-flows br-int', + 'ovs-appctl dpif/dump-flows br-int'): + host_ssh.exec_cmd( + r'echo -e "\n\n\n$ sudo {}" >> {}'.format(cmd, file_name)) + cmd = '{} >> {}'.format(cmd, file_name) + host_ssh.exec_sudo_cmd(cmd, get_exit_code=False) + + host_ssh.exec_sudo_cmd('chmod 777 {}'.format(file_name)) + + if host != con_name: + host_ssh.scp_on_source(file_name, + dest_user=HostLinuxUser.get_user(), + dest_ip=con_name, + dest_path=file_name, + dest_password=HostLinuxUser.get_password(), + timeout=120) + + dest_path = os.path.join(ProjVar.get_var('PING_FAILURE_DIR'), + os.path.basename(file_name)) + common.scp_from_active_controller_to_localhost(file_name, + dest_path=dest_path, + timeout=120) + return dest_path + + +def get_pci_device_numa_nodes(hosts): + """ + Get processors of crypto PCI devices for given hosts + + Args: + hosts (list): list of hosts to check + + Returns (dict): host, numa_nodes map. e.g., {'compute-0': ['0'], + 'compute-1': ['0', '1']} + + """ + hosts_numa = {} + for host in hosts: + numa_nodes = host_helper.get_host_devices(host, field='numa_node') + hosts_numa[host] = numa_nodes + + LOG.info("Hosts numa_nodes map for PCI devices: {}".format(hosts_numa)) + return hosts_numa + + +def get_pci_procs(hosts, net_type='pci-sriov'): + """ + Get processors of pci-sriov or pci-passthrough devices for given hosts + + Args: + hosts (list): list of hosts to check + net_type (str): pci-sriov or pci-passthrough + + Returns (dict): host, procs map. e.g., {'compute-0': ['0'], 'compute-1': + ['0', '1']} + + """ + hosts_procs = {} + for host in hosts: + ports_list = host_helper.get_host_interfaces(host, field='ports', + net_type=net_type) + + ports = [] + for port in ports_list: + ports += port + ports = list(set(ports)) + + procs = host_helper.get_host_ports(host, field='processor', + **{'name': ports}) + hosts_procs[host] = list(set(procs)) + + LOG.info("Hosts procs map for {} devices: {}".format(net_type, hosts_procs)) + return hosts_procs + + +def wait_for_agents_healthy(hosts=None, timeout=120, fail_ok=False, + con_ssh=None, auth_info=Tenant.get('admin')): + """ + Wait for neutron agents to be alive + Args: + hosts (str|list): hostname(s) to check. When None, all nova + hypervisors will be checked + timeout (int): max wait time in seconds + fail_ok (bool): whether to return False or raise exception when + non-alive agents exist + con_ssh (SSHClient): + auth_info (dict): + + Returns (tuple): ((bool), (str)) + (True, "All agents for are alive") + (False, "Some agents are not alive: ") + Applicable when fail_ok=True + + """ + if hosts is None: + hosts = host_helper.get_hypervisors(con_ssh=con_ssh, + auth_info=auth_info) + elif isinstance(hosts, str): + hosts = [hosts] + + unhealthy_agents = None + LOG.info("Wait for neutron agents to be alive for {}".format(hosts)) + end_time = time.time() + timeout + while time.time() < end_time: + alive_vals, states, agents, agent_hosts = get_network_agents( + field=('Alive', 'State', 'Binary', 'Host'), + host=hosts, con_ssh=con_ssh, auth_info=auth_info) + + unhealthy_agents = [i for i in + list(zip(agents, agent_hosts, states, alive_vals)) + if + (i[-1] != ':-)' or i[-2] != 'UP')] + if not unhealthy_agents: + succ_msg = "All agents for {} are alive and up".format(hosts) + LOG.info(succ_msg) + return True, succ_msg + + msg = "Some network agents are not healthy: {}".format(unhealthy_agents) + LOG.warning(msg) + if fail_ok: + return False, msg + raise exceptions.NeutronError(msg) + + +def get_trunks(field='id', trunk_id=None, trunk_name=None, parent_port=None, + strict=False, + auth_info=Tenant.get('admin'), con_ssh=None): + """ + Get a list of trunks with given arguments + Args: + field (str): any valid header of neutron trunk list table. 'id', + 'name', 'mac_address', or 'fixed_ips' + trunk_id (str): id of the trunk + trunk_name (str): name of the trunk + parent_port (str): parent port of the trunk + strict (bool): + auth_info (dict): + con_ssh (SSHClient): + + Returns (list): + + """ + table_ = table_parser.table( + cli.openstack('network trunk list', ssh_client=con_ssh, + auth_info=auth_info)[1]) + + kwargs = { + 'id': trunk_id, + 'name': trunk_name, + 'parent_port': parent_port, + } + kwargs = {k: v for k, v in kwargs.items() if v} + + trunks = table_parser.get_values(table_, field, strict=strict, regex=True, + merge_lines=True, **kwargs) + return trunks + + +def create_trunk(parent_port, name=None, sub_ports=None, description=None, + project=None, project_domain=None, + enable=True, fail_ok=False, con_ssh=None, + auth_info=Tenant.get('admin'), cleanup=None): + """ + Create a trunk via API. + Args: + parent_port (str): Parent port of trunk. + project (str|None): tenant name to create the trunk under. + project_domain (str|None) + name (str|None): Name of the trunk. + enable (bool|None): Admin state of the trunk. + sub_ports (list|tuple|dict|str|None): List of subport dictionaries in + format + [[, + segmentation_type(vlan), + segmentation_id()] []..] + description (str|None) + fail_ok + con_ssh + auth_info + cleanup + + Return: List with trunk's data returned from Neutron API. + + """ + if not project and auth_info and auth_info['tenant'] == 'admin': + project = Tenant.get_primary()['tenant'] + + args_dict = { + '--description': description, + '--parent-port': parent_port, + '--subport': sub_ports, + '--enable': True if enable else None, + '--disable': True if enable is False else None, + '--project': project, + '--project-domain': project_domain, + } + args = common.parse_args(args_dict, repeat_arg=True, vals_sep=',') + if not name: + name = common.get_unique_name('port_trunk') + + LOG.info("Creating trunk {} with args: {}".format(name, args)) + args = '{} {}'.format(args, name) + code, output = cli.openstack('network trunk create', args, + ssh_client=con_ssh, fail_ok=fail_ok, + auth_info=auth_info) + + table_ = table_parser.table(output) + trunk_id = table_parser.get_value_two_col_table(table_, 'id') + + if cleanup and trunk_id: + ResourceCleanup.add('trunk', trunk_id) + + if code > 0: + return 1, output + + succ_msg = "Trunk {} is successfully created for port {}".format( + name, parent_port) + LOG.info(succ_msg) + return 0, trunk_id + + +def delete_trunks(trunks, fail_ok=False, auth_info=Tenant.get('admin'), + con_ssh=None): + """ + Delete given trunk + Args: + trunks (str): + fail_ok (bool): + auth_info (dict): + con_ssh (SSHClient): + + Returns (tuple): (, ) + (0, "Port is successfully deleted") + (1, ) - delete port cli rejected + (2, "trunk still exists after deleting") - post deletion + check failed + + """ + if not trunks: + msg = "No trunk specified" + LOG.info(msg) + return -1, msg + + if isinstance(trunks, str): + trunks = [trunks] + + rtn_val = 'id' if re.match(UUID, trunks[0]) else 'name' + existing_trunks = get_trunks(field=rtn_val, auth_info=auth_info, + con_ssh=con_ssh) + trunks = list(set(trunks) & set(existing_trunks)) + if not trunks: + msg = "Given trunks not found on system. Do nothing." + LOG.info(msg) + return -1, msg + + trunks = ' '.join(trunks) + LOG.info("Deleting trunk: {}".format(trunks)) + code, output = cli.openstack('network trunk delete', trunks, + ssh_client=con_ssh, fail_ok=fail_ok, + auth_info=auth_info) + + if code > 0: + return 1, output + + existing_trunks = get_trunks(field='id', auth_info=auth_info, + con_ssh=con_ssh) + undeleted_trunks = list(set(trunks) & set(existing_trunks)) + if undeleted_trunks: + err_msg = "Trunk {} still exists after deleting".format( + undeleted_trunks) + if fail_ok: + LOG.warning(err_msg) + return 2, err_msg + raise exceptions.NeutronError(err_msg) + + succ_msg = "Trunk {} is successfully deleted".format(trunks) + LOG.info(succ_msg) + return 0, succ_msg + + +def set_trunk(trunk, sub_ports=None, name=None, enable=None, description=None, + fail_ok=False, + con_ssh=None, auth_info=Tenant.get('admin')): + """ + Set trunk with given parameters. + Args: + trunk (str): Trunk id to add the subports + sub_ports (list|tuple|str|None): + name (str|None) + description (str|None) + enable (bool|None) + fail_ok + con_ssh + auth_info + + Return (tuple): + + """ + args_dict = { + '--name': name, + '--description': description, + '--subport': sub_ports, + '--enable': True if enable else None, + '--disable': True if enable is False else None, + } + + args = common.parse_args(args_dict, repeat_arg=True, vals_sep=',') + if not args: + raise ValueError("Nothing specified to set") + + LOG.info("Setting trunk {} with args: {}".format(trunk, args)) + args = '{} {}'.format(args, trunk) + code, output = cli.openstack('network trunk set', args, ssh_client=con_ssh, + fail_ok=fail_ok, auth_info=auth_info) + + if code > 0: + return 1, output + + msg = 'Trunk {} is set successfully'.format(trunk) + LOG.info(msg) + return 0, msg + + +def unset_trunk(trunk, sub_ports, fail_ok=False, con_ssh=None, + auth_info=Tenant.get('admin')): + """ + Remove subports from a trunk via API. + Args: + trunk: Trunk id to remove the subports from + sub_ports (list|str|tuple) + fail_ok + con_ssh + auth_info + + Return: list with return code and msg + + """ + args = {'--subport': sub_ports} + args = '{} {}'.format(common.parse_args(args, repeat_arg=True), trunk) + + LOG.info("Unsetting trunk: {}".format(args)) + code, output = cli.openstack('network trunk unset', args, + ssh_client=con_ssh, fail_ok=fail_ok, + auth_info=auth_info) + if code > 0: + return 1, output + + msg = 'Subport(s) removed from trunk {} successfully: {}'.format(trunk, + sub_ports) + LOG.info(msg) + return 0, msg + + +def get_networks(field='ID', long=False, full_name=None, external=None, + enabled=None, project=None, + project_domain=None, shared=None, status=None, + providernet_type=None, provider_physical_network=None, + provider_setment=None, agent=None, tags=None, any_tags=None, + not_tags=None, not_any_tags=None, + name=None, subnets=None, net_id=None, strict=False, + regex=False, exclude=False, auth_info=None, + con_ssh=None): + """ + Get networks based on given criteria. + + Args: + field (str|tuple|list) + long (bool|None): + full_name (str|None): + external (bool|None): + enabled (bool|None): + project (str|None): + project_domain (str|None): + shared (bool|None): + status (str|None): + providernet_type (str|None): + provider_physical_network (str|None): + provider_setment (str|None): + agent (str|None): + tags (list|tuple|str|None): + any_tags (list|tuple|str|None): + not_tags (list|tuple|str|None): + not_any_tags (list|tuple|str|None): + name (str): partial/full name of network, can be regex. This will be + used to filter networks after cmd executed + subnets (str|list\tuple): post filter + net_id (str|None): post filter + strict (bool): whether to perform strict search on given name and + subnets + regex (bool): whether to use regex to search + exclude (bool) + auth_info (dict): + con_ssh (SSHClient): + + Returns (list): list of networks + + """ + args_dict = { + '--long': long, + '--name': full_name, + '--project': project, + '--project-domain': project_domain, + '--external': True if external else None, + '--internal': True if external is False else None, + '--enable': True if enabled else None, + '--disable': True if enabled is False else None, + '--share': True if shared else None, + '--no-share': True if shared is False else None, + '--status': status, + '--provider-network-type': providernet_type, + '--provider-physical-network': provider_physical_network, + '--provider-segment': provider_setment, + '--agent': agent, + '--tags': tags, + '--any-tags': any_tags, + '--not-tags': not_tags, + '--not-any-tags': not_any_tags, + } + args = common.parse_args(args_dict, repeat_arg=False, vals_sep=',') + table_ = table_parser.table( + cli.openstack('network list', args, ssh_client=con_ssh, + auth_info=auth_info)[1]) + + filters = {'name': name, 'subnets': subnets, 'id': net_id} + filters = {k: v for k, v in filters.items() if str(v)} + if filters: + table_ = table_parser.filter_table(table_, strict=strict, regex=regex, + exclude=exclude, **filters) + + convert = False + if isinstance(field, str): + field = (field,) + convert = True + + res = [] + for header in field: + vals = table_parser.get_column(table_, header) + if header.lower() == 'subnets': + vals = [val.split(sep=', ') for val in vals] + res.append(vals) + if convert: + res = res[0] + + return res + + +def delete_network(network_id, auth_info=Tenant.get('admin'), con_ssh=None, + fail_ok=False): + """ + Delete given network + Args: + network_id: network id to be deleted. + con_ssh (SSHClient): + auth_info (dict): + fail_ok (bool): whether to return False or raise exception when + non-alive agents exist + + Returns (list): + + """ + LOG.info("Deleting network {}".format(network_id)) + code, output = cli.openstack('network delete', network_id, + ssh_client=con_ssh, fail_ok=True, + auth_info=auth_info) + + if code > 0: + return 1, output + + if network_id in get_networks(auth_info=auth_info, con_ssh=con_ssh): + msg = "Network {} is still listed in neutron net-list".format( + network_id) + if fail_ok: + LOG.warning(msg) + return 2, msg + raise exceptions.NeutronError(msg) + + succ_msg = "Network {} is successfully deleted.".format(network_id) + return 0, succ_msg + + +def create_sfc_port_pair(ingress_port, egress_port, name=None, description=None, + service_func_param=None, fail_ok=False, + con_ssh=None, auth_info=None, cleanup=None): + """ + Create port pair + + Args: + ingress_port (str): + egress_port (str): + name (str|None): + description (str|None): + service_func_param (str|None): + fail_ok (bool): + con_ssh: + auth_info: + cleanup (str|None) + + Returns (tuple): + (0, ) # successfully created + (1, ) # create CLI rejected + + """ + if not name: + name = 'port_pair' + name = common.get_unique_name(name_str=name) + + args_dict = { + '--ingress': ingress_port, + '--egress': egress_port, + '--description': description, + '--service-function-parameters': service_func_param, + } + + arg = '{} {}'.format( + common.parse_args(args_dict, repeat_arg=True, vals_sep=','), name) + LOG.info("Creating port pair {}".format(name)) + code, output = cli.openstack(cmd='sfc port pair create', + positional_args=arg, ssh_client=con_ssh, + fail_ok=fail_ok, + auth_info=auth_info) + table_ = table_parser.table(output) + pair_id = table_parser.get_value_two_col_table(table_, field='ID') + if pair_id and cleanup: + ResourceCleanup.add('port_pair', pair_id, scope=cleanup) + + if code > 0: + return 1, output + + LOG.info("Port pair {} created successfully".format(pair_id)) + return 0, pair_id + + +def delete_sfc_port_pairs(port_pairs=None, value='ID', check_first=True, + fail_ok=False, con_ssh=None, auth_info=None): + """ + Delete port pairs + Args: + port_pairs (str|list|tuple|None): + value: ID or Name + check_first (bool): + fail_ok (bool): + con_ssh: + auth_info: + + Returns (tuple): ((int), (list), + (list), list) + (0, (list), [], []) + (1, , (list), + list) # fail_ok=True + + """ + if not port_pairs: + port_pairs = get_sfc_port_pairs(field=value, auth_info=auth_info, + con_ssh=con_ssh) + else: + if isinstance(port_pairs, str): + port_pairs = [port_pairs] + + if check_first: + existing_pairs = get_sfc_port_pairs(field=value, + auth_info=auth_info, + con_ssh=con_ssh) + port_pairs = list(set(port_pairs) & set(existing_pairs)) + + if not port_pairs: + msg = 'Port pair(s) do not exist. Do nothing.' + LOG.info(msg) + return -1, msg + + errors = [] + LOG.info("Deleting port pair(s): {}".format(port_pairs)) + for port_pair in port_pairs: + code, output = cli.openstack(cmd='sfc port pair delete', + positional_args=port_pair, + ssh_client=con_ssh, + fail_ok=fail_ok, auth_info=auth_info) + + if code > 0: + errors.append(output) + + if errors: + return 1, '\n'.join(errors) + + post_del_pairs = get_sfc_port_pairs(field=value, auth_info=auth_info, + con_ssh=con_ssh) + failed_pairs = list(set(port_pairs) & set(post_del_pairs)) + if failed_pairs: + msg = "Some port-pair(s) still exist after deletion: {}".format( + failed_pairs) + LOG.warning(msg) + if fail_ok: + return 2, msg + raise exceptions.NeutronError(msg) + + msg = "Port pair(s) deleted successfully." + LOG.info(msg) + return 0, msg + + +def get_sfc_port_pairs(field='ID', con_ssh=None, auth_info=None, **filters): + """ + Get port pairs + Args: + field (str|tuple|list): header of the table. ID or Name + con_ssh: + auth_info: + **filters: + + Returns (list): + + """ + arg = '--print-empty' + table_ = table_parser.table( + cli.openstack('sfc port pair list', positional_args=arg, + ssh_client=con_ssh, auth_info=auth_info)[1]) + return table_parser.get_multi_values(table_, field, **filters) + + +def create_sfc_port_pair_group(port_pairs=None, port_pair_val='ID', name=None, + description=None, group_param=None, + fail_ok=False, con_ssh=None, auth_info=None, + cleanup=None): + """ + Create a port pair group + Args: + port_pairs (str|list|tuple|None): + port_pair_val (str): ID or Name + name (str|None): + description (str|None): + group_param (str|None): + fail_ok (bool): + con_ssh: + auth_info: + cleanup + + Returns (tuple): + (0, ) + (1, ) + + """ + args_dict = { + '--port-pair': port_pairs, + '--description': description, + '--port-pair-group-parameters': group_param + } + + if not name: + name = 'port_pair_group' + name = common.get_unique_name(name_str=name) + arg = '{} {}'.format( + common.parse_args(args_dict, repeat_arg=True, vals_sep=','), name) + + LOG.info("Creating port pair group {}".format(name)) + code, output = cli.openstack('sfc port pair group create', arg, + ssh_client=con_ssh, fail_ok=fail_ok, + auth_info=auth_info) + + table_ = table_parser.table(output) + group_id = table_parser.get_value_two_col_table(table_, 'ID') + if cleanup and group_id: + ResourceCleanup.add('port_pair_group', group_id, scope=cleanup) + + if code > 0: + return 1, output + + # Check specified port-pair(s) are in created group + port_pairs_in_group = eval( + table_parser.get_value_two_col_table(table_, 'Port Pair')) + if port_pairs: + if port_pair_val.lower() != 'id': + pair_ids = [] + for port_pair in port_pairs: + port_pair_id = \ + get_sfc_port_pairs(Name=port_pair, con_ssh=con_ssh, + auth_info=auth_info)[0] + pair_ids.append(port_pair_id) + port_pairs = pair_ids + assert sorted(port_pairs_in_group) == sorted( + port_pairs), "Port pairs expected in group: {}. Actual: {}". \ + format(port_pairs, port_pairs_in_group) + else: + assert not port_pairs_in_group, "Port pair(s) exist in group even " \ + "though no port pair is specified" + + LOG.info("Port pair group {} created successfully".format(name)) + return 0, group_id + + +def set_sfc_port_pair_group(group, port_pairs=None, name=None, description=None, + fail_ok=False, con_ssh=None, + auth_info=None): + """ + Set port pair group with given values + Args: + group (str): port pair group to set + port_pairs (list|str|tuple|None): port pair(s) to add + name (str|None): + description (str|None): + fail_ok (bool): + con_ssh: + auth_info: + + Returns (tuple): + (0, "Port pair group set successfully") + (1, ) + + """ + LOG.info("Setting port pair group {}".format(group)) + arg = '' + verify = {} + if port_pairs is not None: + if port_pairs: + if isinstance(port_pairs, str): + port_pairs = [port_pairs] + port_pairs = list(port_pairs) + for port_pair in port_pairs: + arg += ' --port-pair {}'.format(port_pair) + + verify['Port Pair'] = port_pairs + else: + arg += ' --no-port-pair' + verify['Port Pair'] = [] + + if name is not None: + arg += ' --name {}'.format(name) + verify['Name'] = name + if description is not None: + arg += ' --description {}'.format(description) + verify['Description'] = description + + arg = '{} {}'.format(arg, group) + code, output = cli.openstack('sfc port pair group set', positional_args=arg, + ssh_client=con_ssh, fail_ok=fail_ok, + auth_info=auth_info) + if code > 0: + return 1, output + + LOG.info("Verify port pair group is set correctly") + table_ = table_parser.table(output) + for key, val in verify.items(): + actual_val = table_parser.get_value_two_col_table(table_, key) + if isinstance(val, list): + actual_val = eval(actual_val) + if val: + assert set(val) <= set( + actual_val), "Port pair(s) set: {}; pairs in group: " \ + "{}".format(val, actual_val) + assert len(set(actual_val)) == len( + actual_val), "Duplicated item found in Port pairs field: " \ + "{}". format(actual_val) + else: + assert not actual_val, "Port pair still exist in group {} " \ + "after setting to no: {}". \ + format(group, actual_val) + else: + assert val == actual_val, "Value set for {} is {} ; " \ + "actual: {}".format(key, val, actual_val) + + msg = "Port pair group set successfully" + LOG.info("Port pair group set successfully") + return 0, msg + + +def unset_sfc_port_pair_group(group, port_pairs='all', fail_ok=False, + con_ssh=None, auth_info=None): + """ + Remove port pair(s) from a group + Args: + group (str): + port_pairs (str|list|tuple|None): port_pair(s). When 'all': remove + all port pairs from group. + fail_ok (bool): + con_ssh: + auth_info: + + Returns: + (0, (list)) + (1, (str)) + + """ + LOG.info("Unsetting port pair group {}".format(group)) + arg = '' + if port_pairs == 'all': + arg = '--all-port-pair' + else: + if isinstance(port_pairs, str): + port_pairs = [port_pairs] + port_pairs = list(port_pairs) + + for port_pair in port_pairs: + arg += ' --port-pair {}'.format(port_pair) + + arg = '{} {}'.format(arg, group) + + code, output = cli.openstack('sfc port pair group unset', + positional_args=arg, ssh_client=con_ssh, + fail_ok=fail_ok, + auth_info=auth_info) + + if code > 0: + return 1, output + + LOG.info("Verify port pair group is unset correctly") + table_ = table_parser.table(output) + actual_pairs = eval( + table_parser.get_value_two_col_table(table_, 'Port Pair')) + if port_pairs == 'all': + assert not actual_pairs + else: + unremoved_pairs = list(set(actual_pairs) & set(port_pairs)) + assert not unremoved_pairs + + LOG.info("Port pairs are successfully removed from group {}".format(group)) + return 0, actual_pairs + + +def delete_sfc_port_pair_group(group, check_first=True, fail_ok=False, + auth_info=None, con_ssh=None): + """ + Delete given port pair group + Args: + group (str): + check_first (bool): Whether to check before deletion + fail_ok (bool): + auth_info: + con_ssh: + + Returns (tuple): + (-1, 'Port pair group does not exist. Skip deleting.') # + check_first=True + (0, 'Port pair group successfully deleted') + (1, ) # CLI rejected. fail_ok=True + + """ + if check_first: + group_id = get_sfc_port_pair_group_values(group=group, field='ID', + auth_info=auth_info, + con_ssh=con_ssh, + fail_ok=True) + if group_id is None: + msg = 'Port pair group {} does not exist. Skip deleting.'.format( + group) + LOG.info(msg) + return -1, msg + + LOG.info("Deleting port pair group {}".format(group)) + code, output = cli.openstack('sfc port pair group delete', group, + ssh_client=con_ssh, fail_ok=fail_ok, + auth_info=auth_info) + + if code > 0: + return 1, output + + group_id = get_sfc_port_pair_group_values(group=group, field='ID', + auth_info=auth_info, + con_ssh=con_ssh, + fail_ok=True) + assert group_id is None, "Port pair group {} still exists after " \ + "deletion".format(group) + + msg = 'Port pair group {} successfully deleted'.format(group) + LOG.info(msg) + return 0, msg + + +def get_sfc_port_pair_groups(field='ID', auth_info=None, con_ssh=None): + """ + Get port pair groups + Args: + field (str|tuple|list): field(s) for port pair groups table + auth_info: + con_ssh: + + Returns (list): + + """ + table_ = table_parser.table( + cli.openstack('sfc port pair group list --print-empty', + ssh_client=con_ssh, auth_info=auth_info)[1]) + + return table_parser.get_multi_values(table_, field) + + +def get_sfc_port_pair_group_values(group, field='Port Pair', fail_ok=False, + auth_info=None, con_ssh=None): + """ + Get port pair group value from 'openstack sfc port pair group show' + Args: + group (str): + field (str|list|tuple): + fail_ok (bool): + auth_info: + con_ssh: + + Returns (list|None): + None # if group does not exist. Only when fail_ok=True + str|dict|list # value of given field. + + """ + code, output = cli.openstack('sfc port pair group show', group, + ssh_client=con_ssh, fail_ok=fail_ok, + auth_info=auth_info) + if code > 0: + return None + + table_ = table_parser.table(output) + values = table_parser.get_multi_values_two_col_table(table_, field, + evaluate=True) + + return values + + +def get_sfc_flow_classifiers(field='ID', auth_info=None, con_ssh=None): + """ + Get flow classifiers + Args: + field (str|tuple|list): ID or Name + auth_info: + con_ssh: + + Returns (list): + + """ + table_ = table_parser.table( + cli.openstack('sfc flow classifier list --print-empty', + ssh_client=con_ssh, auth_info=auth_info)[1]) + + return table_parser.get_multi_values(table_, field) + + +def get_sfc_port_chains(field='ID', auth_info=None, con_ssh=None): + """ + Get flow classifiers + Args: + field (str): ID or Name + auth_info: + con_ssh: + + Returns (list): + + """ + table_ = table_parser.table( + cli.openstack('sfc port chain list --print-empty', ssh_client=con_ssh, + auth_info=auth_info)[1]) + + return table_parser.get_multi_values(table_, field) + + +def create_sfc_port_chain(port_pair_groups, name=None, flow_classifiers=None, + description=None, chain_param=None, + auth_info=None, fail_ok=False, con_ssh=None, + cleanup=None): + """ + Create port chain + Args: + port_pair_groups (str|list|tuple): + name (str|None): + flow_classifiers (str|list|tuple|None): + description (str|None): + chain_param (str|None): + auth_info: + fail_ok: + con_ssh: + cleanup + + Returns (tuple): + (1, ) # CLI rejected. fail_ok=True + (0, ) + + """ + + args_dict = { + '--port-pair-group': port_pair_groups, + '--flow-classifier': flow_classifiers, + '--description': description, + '--chain-parameters': chain_param + } + arg = common.parse_args(args_dict, repeat_arg=True, vals_sep=',') + + if not name: + name = common.get_unique_name(name_str='port_chain') + + arg = '{} {}'.format(arg, name) + LOG.info("Creating port chain {}".format(name)) + code, output = cli.openstack('sfc port chain create', arg, + ssh_client=con_ssh, fail_ok=fail_ok, + auth_info=auth_info) + + if code > 0: + return 1, output + + table_ = table_parser.table(output, combine_multiline_entry=True) + port_chain_id = table_parser.get_value_two_col_table(table_, 'ID') + if cleanup: + ResourceCleanup.add('port_chain', port_chain_id, scope=cleanup) + + LOG.info("Port chain {} successfully created".format(name)) + return 0, port_chain_id + + +def set_sfc_port_chain(port_chain, port_pair_groups=None, flow_classifiers=None, + no_flow_classifier=None, + no_port_pair_group=None, fail_ok=False, con_ssh=None, + auth_info=None): + """ + Set port chain with given values + Args: + port_chain (str): port chain to set + port_pair_groups (list|str|tuple|None): port pair group(s) to add. + Use '' if no port pair group is desired + flow_classifiers (list|str|tuple|None): flow classifier(s) to add. + Use '' if no flow classifier is desired + no_flow_classifier (bool|None) + no_port_pair_group (bool|None) + fail_ok (bool): + con_ssh: + auth_info: + + Returns (tuple): + (0, "Port chain set successfully") + (1, ) + + """ + LOG.info("Setting port chain {}".format(port_chain)) + arg_dict = { + 'flow-classifier': flow_classifiers, + 'no-flow-classifier': no_flow_classifier, + 'port-pair-group': port_pair_groups, + 'no-port-pair-group': no_port_pair_group, + } + + arg = '{} {}'.format(common.parse_args(arg_dict, repeat_arg=True), + port_chain) + code, output = cli.openstack('sfc port chain set', positional_args=arg, + ssh_client=con_ssh, fail_ok=fail_ok, + auth_info=auth_info) + if code > 0: + return 1, output + + msg = "Port chain {} set successfully".format(port_chain) + LOG.info(msg) + return 0, msg + + +def unset_sfc_port_chain(port_chain, flow_classifiers=None, + port_pair_groups=None, all_flow_classifier=None, + fail_ok=False, con_ssh=None, + auth_info=None): + """ + Remove port pair(s) from a group + Args: + port_chain (str): + flow_classifiers (str|list|tuple|None): flow_classifier(s) to remove. + When 'all': remove all flow_classifiers from group. + port_pair_groups (str|list|tuple|None): port_pair_group(s) to remove. + all_flow_classifier (bool|None) + fail_ok (bool): + con_ssh: + auth_info: + + Returns: + (0, "Port chain unset successfully") + (1, (str)) + + """ + LOG.info("Unsetting port chain {}".format(port_chain)) + args_dict = { + '--all-flow-classifier': all_flow_classifier, + '--flow-classifier': flow_classifiers, + '--port-pair-group': port_pair_groups + } + arg = common.parse_args(args_dict, repeat_arg=True) + if not arg: + raise ValueError("Nothing specified to unset.") + + arg = '{} {}'.format(arg, port_chain) + code, output = cli.openstack('sfc port chain unset', arg, + ssh_client=con_ssh, fail_ok=fail_ok, + auth_info=auth_info) + + if code > 0: + return 1, output + + msg = "Port chain unset successfully" + LOG.info(msg) + return 0, msg + + +def delete_sfc_port_chain(port_chain, check_first=True, fail_ok=False, + auth_info=None, con_ssh=None): + """ + Delete given port pair group + Args: + port_chain (str): + check_first (bool): Whether to check before deletion + fail_ok (bool): + auth_info: + con_ssh: + + Returns (tuple): + (-1, 'Port chain does not exist. Skip deleting.') # + check_first=True + (0, 'Port chain successfully deleted') + (1, ) # CLI rejected. fail_ok=True + + """ + if check_first: + chain_id = get_sfc_port_chain_values(port_chain=port_chain, fields='ID', + auth_info=auth_info, + con_ssh=con_ssh, + fail_ok=True) + if chain_id is None: + msg = 'Port chain {} does not exist. Skip deleting.'.format( + port_chain) + LOG.info(msg) + return -1, msg + + LOG.info("Deleting port chain {}".format(port_chain)) + code, output = cli.openstack('sfc port chain delete', port_chain, + ssh_client=con_ssh, fail_ok=fail_ok, + auth_info=auth_info) + + if code > 0: + return 1, output + + chain_id = get_sfc_port_chain_values(port_chain=port_chain, fields='ID', + auth_info=auth_info, con_ssh=con_ssh, + fail_ok=True) + assert chain_id is None, "Port chain {} still exists after deletion".format( + port_chain) + + msg = 'Port chain {} successfully deleted'.format(port_chain) + LOG.info(msg) + return 0, msg + + +def get_sfc_port_chain_values(port_chain, fields='Flow Classifiers', + fail_ok=False, auth_info=None, con_ssh=None): + """ + Get port chain value from 'openstack sfc port chain show' + Args: + port_chain (str): + fields (str|list|tuple): + fail_ok (bool): + auth_info: + con_ssh: + + Returns (None|list): None # if chain does not exist. Only when + fail_ok=True + + """ + code, output = cli.openstack('sfc port chain show', port_chain, + ssh_client=con_ssh, fail_ok=fail_ok, + auth_info=auth_info) + if code > 0: + return None + + table_ = table_parser.table(output) + return table_parser.get_multi_values_two_col_table(table_, fields, + evaluate=True, + merge_lines=True) + + +def get_sfc_flow_classifier_values(flow_classifier, fields='Protocol', + fail_ok=False, auth_info=None, con_ssh=None): + """ + Get flow classifier value from 'openstack sfc flow classifier show' + Args: + flow_classifier (str): + fields (str): + fail_ok (bool): + auth_info: + con_ssh: + + Returns (None|list): return None if flow classifier does not exist. + Only when fail_ok=True + + """ + code, output = cli.openstack('sfc flow classifier show', flow_classifier, + ssh_client=con_ssh, fail_ok=fail_ok, + auth_info=auth_info) + if code > 0: + return None + + table_ = table_parser.table(output) + return table_parser.get_multi_values_two_col_table(table_, fields, + merge_lines=True) + + +def create_flow_classifier(name=None, description=None, protocol=None, + ether_type=None, source_port=None, + dest_port=None, source_ip_prefix=None, + dest_ip_prefix=None, logical_source_port=None, + logical_dest_port=None, l7_param=None, fail_ok=False, + auth_info=None, con_ssh=None, + cleanup=None): + """ + Create a flow classifier + Args: + name: + description: + protocol: + ether_type: + source_port: + dest_port: + source_ip_prefix: + dest_ip_prefix: + logical_source_port: + logical_dest_port: + l7_param: + fail_ok: + auth_info: + con_ssh: + cleanup + + Returns (tuple): + (0, ) + (1, ) + + """ + arg_dict = { + 'description': description, + 'protocol': protocol, + 'ethertype': ether_type, + 'logical-source-port': logical_source_port, + 'logical-destination-port': logical_dest_port, + 'source-ip-prefix': source_ip_prefix, + 'destination-ip-prefix': dest_ip_prefix, + 'l7-parameters': l7_param, + 'source-port': source_port, + 'destination-port': dest_port, + } + + arg = common.parse_args(arg_dict) + if not name: + name = 'flow_classifier' + name = common.get_unique_name(name_str=name) + + arg += ' {}'.format(name) + + LOG.info("Creating flow classifier {}".format(name)) + code, output = cli.openstack('sfc flow classifier create', arg, + ssh_client=con_ssh, fail_ok=fail_ok, + auth_info=auth_info) + + if code > 0: + return 1, output + + table_ = table_parser.table(output) + id_ = table_parser.get_value_two_col_table(table_, 'ID') + if cleanup and id_: + ResourceCleanup.add('flow_classifier', id_) + + msg = "Flow classifier {} successfully created.".format(id_) + LOG.info(msg) + return 0, id_ + + +def delete_flow_classifier(flow_classifier, check_first=True, fail_ok=False, + auth_info=None, con_ssh=None): + """ + Delete flow classifier + Args: + flow_classifier (str): + check_first: + fail_ok: + auth_info: + con_ssh: + + Returns (tuple): + (-1, Flow classifier does not exist. Skip deletion.") + (0, "Flow classifier successfully deleted") + (1, ) + + """ + if check_first: + info = get_sfc_flow_classifier_values(flow_classifier, fields='ID', + fail_ok=True, con_ssh=con_ssh, + auth_info=auth_info) + if info is None: + msg = "Flow classifier {} does not exist. Skip deletion.".format( + flow_classifier) + LOG.info(msg) + return -1, msg + + code, output = cli.openstack('sfc flow classifier delete', flow_classifier, + ssh_client=con_ssh, fail_ok=fail_ok, + auth_info=auth_info) + if code > 0: + return 1, output + + post_del_id = get_sfc_flow_classifier_values(flow_classifier, fields='ID', + auth_info=auth_info, + con_ssh=con_ssh, + fail_ok=True)[0] + if post_del_id: + err = "Flow classifier {} still exists after deletion".format( + flow_classifier) + LOG.warning(err) + if fail_ok: + return 2, err + raise exceptions.NeutronError(err) + + msg = "Flow classifier {} successfully deleted".format(flow_classifier) + LOG.info(msg) + return 0, msg + + +def get_ip_for_eth(ssh_client, eth_name): + """ + Get the IP addr for given eth on the ssh client provided + Args: + ssh_client (SSHClient): usually a vm_ssh + eth_name (str): such as "eth1, eth1.1" + + Returns (str): The first matching ipv4 addr for given eth. such as + "30.0.0.2" + + """ + if eth_name in ssh_client.exec_cmd('ip addr'.format(eth_name))[1]: + output = ssh_client.exec_cmd('ip addr show {}'.format(eth_name), + fail_ok=False)[1] + if re.search('inet {}'.format(Networks.IPV4_IP), output): + return re.findall('{}'.format(Networks.IPV4_IP), output)[0] + else: + LOG.warning( + "Cannot find ip address for interface{}".format(eth_name)) + return '' + + else: + LOG.warning( + "Cannot find provided interface{} in 'ip addr'".format(eth_name)) + return '' + + +def _is_v4_only(ip_list): + rtn_val = True + for ip in ip_list: + ip_addr = ipaddress.ip_address(ip) + if ip_addr.version == 6: + rtn_val = False + return rtn_val + + +def get_internal_net_ids_on_vxlan(vxlan_provider_net_id, ip_version=4, + mode='dynamic', con_ssh=None): + """ + Get the networks ids that matches the vxlan underlay ip version + Args: + vxlan_provider_net_id: vxlan provider net id to get the networks info + ip_version: 4 or 6 (IPV4 or IPV6) + mode: mode of the vxlan: dynamic or static + con_ssh (SSHClient): + + Returns (list): The list of networks name that matches the vxlan underlay + (v4/v6) and the mode + + """ + rtn_networks = [] + networks = get_networks_on_providernet(providernet=vxlan_provider_net_id, + field='id', con_ssh=con_ssh) + if not networks: + return rtn_networks + provider_attributes = get_networks_on_providernet( + providernet=vxlan_provider_net_id, con_ssh=con_ssh, + field='providernet_attributes') + if not provider_attributes: + return rtn_networks + + index = 0 + new_attr_list = [] + # In the case where some val could be 'null', need to change that to 'None' + for attr in provider_attributes: + new_attr = attr.replace('null', 'None') + new_attr_list.append(new_attr) + + # getting the configured vxlan mode + dic_attr_1 = eval(new_attr_list[0]) + vxlan_mode = dic_attr_1['mode'] + + if mode == 'static' and vxlan_mode == mode: + data_if_name = host_helper.get_host_interfaces('compute-0', + net_type='data', + con_ssh=con_ssh) + address = host_helper.get_host_addresses(host='compute-0', + ifname=data_if_name, + con_ssh=con_ssh) + if ip_version == 4 and _is_v4_only(address): + rtn_networks.append(networks[index]) + elif ip_version == 6 and not _is_v4_only(address): + LOG.info("here in v6") + rtn_networks = networks + else: + return rtn_networks + elif mode == 'dynamic' and vxlan_mode == mode: + for attr in provider_attributes: + dic_attr = eval(attr) + ip = dic_attr['group'] + ip_addr = ipaddress.ip_address(ip) + if ip_addr.version == ip_version: + rtn_networks.append(networks[index]) + index += 1 + + return rtn_networks + + +def get_dpdk_user_data(con_ssh=None): + """ + copy the cloud-config userdata to TiS server. + This userdata adds wrsroot/li69nux user to guest + + Args: + con_ssh (SSHClient): + + Returns (str): TiS filepath of the userdata + + """ + file_dir = '{}/userdata/'.format(ProjVar.get_var('USER_FILE_DIR')) + file_name = UserData.DPDK_USER_DATA + file_path = file_dir + file_name + + if con_ssh is None: + con_ssh = get_cli_client() + + if con_ssh.file_exists(file_path=file_path): + # LOG.info('userdata {} already exists. Return existing path'.format( + # file_path)) + # return file_path + con_ssh.exec_cmd('rm -f {}'.format(file_path), fail_ok=False) + + LOG.debug('Create userdata directory if not already exists') + cmd = 'mkdir -p {};touch {}'.format(file_dir, file_path) + con_ssh.exec_cmd(cmd, fail_ok=False) + + content = "#wrs-config\nFUNCTIONS=hugepages,avr\n" + con_ssh.exec_cmd('echo "{}" >> {}'.format(content, file_path), + fail_ok=False) + output = con_ssh.exec_cmd('cat {}'.format(file_path))[1] + assert output in content + + return file_path + + +def get_ping_failure_duration(server, ssh_client, end_event, timeout=600, + ipv6=False, start_event=None, + ping_interval=0.2, single_ping_timeout=1, + cumulative=False, init_timeout=60): + """ + Get ping failure duration in milliseconds + Args: + server (str): destination ip + ssh_client (SSHClient): where the ping cmd sent from + timeout (int): Max time to ping and gather ping loss duration before + ipv6 (bool): whether to use ping IPv6 address + start_event + end_event: an event that signals the end of the ping + ping_interval (int|float): interval between two pings in seconds + single_ping_timeout (int): timeout for ping reply in seconds. Minimum + is 1 second. + cumulative (bool): Whether to accumulate the total loss time before + end_event set + init_timeout (int): Max time to wait before vm pingable + + Returns (int): ping failure duration in milliseconds. 0 if ping did not + fail. + + """ + optional_args = '' + if ipv6: + optional_args += '6' + + fail_str = 'no answer yet' + cmd = 'ping{} -i {} -W {} -D -O {} | grep -B 1 -A 1 ' \ + '--color=never "{}"'.format(optional_args, ping_interval, + single_ping_timeout, server, fail_str) + + start_time = time.time() + ping_init_end_time = start_time + init_timeout + prompts = [ssh_client.prompt, fail_str] + ssh_client.send_sudo(cmd=cmd) + while time.time() < ping_init_end_time: + index = ssh_client.expect(prompts, timeout=10, searchwindowsize=100, + fail_ok=True) + if index == 1: + continue + elif index == 0: + raise exceptions.CommonError("Continuous ping cmd interrupted") + + LOG.info("Ping to {} succeeded".format(server)) + start_event.set() + break + else: + raise exceptions.VMNetworkError( + "VM is not reachable within {} seconds".format(init_timeout)) + + end_time = start_time + timeout + while time.time() < end_time: + if end_event.is_set(): + LOG.info("End event set. Stop continuous ping and process results") + break + + # End ping upon end_event set or timeout reaches + ssh_client.send_control() + try: + ssh_client.expect(fail_ok=False) + except (exceptions.TiSError, pexpect.ExceptionPexpect): + ssh_client.send_control() + ssh_client.expect(fail_ok=False) + + # Process ping output to get the ping loss duration + output = ssh_client.process_cmd_result(cmd='sudo {}'.format(cmd), + get_exit_code=False)[1] + lines = output.splitlines() + prev_succ = '' + duration = 0 + count = 0 + prev_line = '' + succ_str = 'bytes from' + post_succ = '' + for line in lines: + if succ_str in line: + if prev_succ and (fail_str in prev_line): + # Ping resumed after serious of lost ping + count += 1 + post_succ = line + tmp_duration = _parse_ping_timestamp( + post_succ) - _parse_ping_timestamp(prev_succ) + LOG.info("Count {} ping loss duration: {}".format(count, + tmp_duration)) + if cumulative: + duration += tmp_duration + elif tmp_duration > duration: + duration = tmp_duration + prev_succ = line + + prev_line = line + + if not post_succ: + LOG.warning("Ping did not resume within {} seconds".format(timeout)) + duration = -1 + else: + LOG.info("Final ping loss duration: {}".format(duration)) + return duration + + +def _parse_ping_timestamp(output): + timestamp = math.ceil(float(re.findall(r'\[(.*)\]', output)[0]) * 1000) + return timestamp + + +@contextmanager +def vconsole(ssh_client): + """ + Enter vconsole for the given ssh connection. + raises if vconsole connection cannot be established + + Args: + ssh_client (SSHClient): + the connection to use for vconsole session + + Yields (function): + executer function for vconsole + + """ + LOG.info("Entering vconsole") + original_prompt = ssh_client.get_prompt() + ssh_client.set_prompt("AVS> ") + try: + ssh_client.exec_sudo_cmd("vconsole", get_exit_code=False) + except Exception as err: + # vconsole failed to connect + # this is usually because vswitch initialization failed + # check instance logs + ssh_client.set_prompt(original_prompt) + ssh_client.flush(3) + ssh_client.send_control('c') + ssh_client.flush(10) + raise err + + def v_exec(cmd, fail_ok=False): + LOG.info("vconsole execute: {}".format(cmd)) + if cmd.strip().lower() == 'quit': + raise ValueError("shall not exit vconsole without proper cleanup") + + code, output = ssh_client.exec_cmd(cmd, get_exit_code=False) + if "done" in output.lower(): + return 0, output + + LOG.warning(output) + if not fail_ok: + assert 0, 'vconsole failed to execute "{}"'.format(cmd) + return 1, output + + yield v_exec + + LOG.info("Exiting vconsole") + ssh_client.set_prompt(original_prompt) + ssh_client.exec_cmd("quit") + + +def create_port_forwarding_rule(router_id, inside_addr=None, inside_port=None, + outside_port=None, protocol='tcp', + tenant=None, description=None, fail_ok=False, + auth_info=Tenant.get('admin'), + con_ssh=None): + """ + + Args: + router_id (str): The router_id of the tenant router the + portforwarding rule is created + inside_addr(str): private ip address + inside_port (int|str): private protocol port number + outside_port(int|str): The public layer4 protocol port number + protocol(str): the protocol tcp|udp|udp-lite|sctp|dccp + tenant(str): The owner Tenant id. + description(str): User specified text description. The default is + "portforwarding" + fail_ok: + auth_info: + con_ssh: + + Returns (tuple): + 0, , - Portforwarding rule + created successfully + 1, '', - Portforwarding rule create cli rejected + 2, '', - Portforwarding rule create failed; one or more + values required are not specified. + + + """ + # Process args + if tenant is None: + tenant = Tenant.get_primary()['tenant'] + + if description is None: + description = '"portforwarding"' + + tenant_id = keystone_helper.get_projects(field='ID', name=tenant, + con_ssh=con_ssh)[0] + + mgmt_ips_for_vms = get_mgmt_ips_for_vms() + + if inside_addr not in mgmt_ips_for_vms: + msg = "The inside_addr {} must be one of the vm mgmt internal " \ + "addresses: {}.".format(inside_addr, mgmt_ips_for_vms) + return 1, msg + + args_dict = { + '--tenant-id': tenant_id if auth_info == Tenant.get('admin') else None, + '--inside-addr': inside_addr, + '--inside-port': inside_port, + '--outside-port': outside_port, + '--protocol': protocol, + '--description': description, + } + args = router_id + + for key, value in args_dict.items(): + if value is None: + msg = 'A value must be specified for {}'.format(key) + if fail_ok: + return 1, '', msg + raise exceptions.NeutronError(msg) + else: + args = "{} {} {}".format(key, value, args) + + LOG.info("Creating port forwarding with args: {}".format(args)) + # send portforwarding-create cli + code, output = cli.neutron('portforwarding-create', args, + ssh_client=con_ssh, fail_ok=fail_ok, + auth_info=auth_info) + + # process result + if code == 1: + msg = 'Fail to create port forwarding rules: {}'.format(output) + if fail_ok: + return 1, '', msg + raise exceptions.NeutronError(msg) + + table_ = table_parser.table(output) + portforwarding_id = table_parser.get_value_two_col_table(table_, 'id') + + expt_values = { + 'router_id': router_id, + 'tenant_id': tenant_id + } + + for field, expt_val in expt_values.items(): + if table_parser.get_value_two_col_table(table_, field) != expt_val: + msg = "{} is not set to {} for portforwarding {}".format( + field, expt_val, router_id) + if fail_ok: + return 2, portforwarding_id, msg + raise exceptions.NeutronError(msg) + + succ_msg = "Portforwarding {} is created successfully.".format( + portforwarding_id) + LOG.info(succ_msg) + return 0, portforwarding_id, succ_msg + + +def create_port_forwarding_rule_for_vm(vm_id, inside_addr=None, + inside_port=None, outside_port=None, + protocol='tcp', + description=None, fail_ok=False, + auth_info=Tenant.get('admin'), + con_ssh=None): + """ + + Args: + vm_id (str): The id of vm the portforwarding rule is created for + inside_addr(str): private ip address; default is mgmt address of vm. + inside_port (str): private protocol port number; default is 80 ( web + port) + outside_port(str): The public layer4 protocol port number; default is + 8080 + protocol(str): the protocol tcp|udp|udp-lite|sctp|dccp; default is tcp + description(str): User specified text description. The default is + "portforwarding" + fail_ok: + auth_info: + con_ssh: + + Returns (tuple): + 0, , - Portforwarding rule + created successfully + 1, '', - Portforwarding rule create cli rejected + 2, '', - Portforwarding rule create failed; one or more + values required are not specified. + + """ + # Process args + router_id = get_tenant_router() + + if inside_addr is None: + inside_addr = get_mgmt_ips_for_vms(vm_id)[0] + if inside_port is None: + inside_port = "80" + + if outside_port is None: + outside_port = "8080" + + return create_port_forwarding_rule(router_id, inside_addr=inside_addr, + inside_port=inside_port, + outside_port=outside_port, + protocol=protocol, + description=description, fail_ok=fail_ok, + auth_info=auth_info, + con_ssh=con_ssh) + + +def update_portforwarding_rule(portforwarding_id, inside_addr=None, + inside_port=None, outside_port=None, + protocol=None, description=None, fail_ok=False, + auth_info=Tenant.get('admin'), con_ssh=None): + """ + + Args: + portforwarding_id (str): Id or name of portfowarding rule to update + inside_addr (str): Private ip address + inside_port (str): Private layer4 protocol port + outside_port (str): Public layer4 protocol port + protocol (str): protocol name tcp|udp|udp-lite|sctp|dccp + description (str): User specified text description + fail_ok: + auth_info: + con_ssh: + + Returns (tuple): + 0, - Portforwarding rule updated successfully + + + """ + + if portforwarding_id is None or not isinstance(portforwarding_id, str): + raise ValueError( + "Expecting string value for portforwarding_id. Get {}".format( + type(portforwarding_id))) + + args = '' + + args_dict = { + '--inside_addr': inside_addr, + '--inside_port': inside_port, + '--outside_port': outside_port, + '--protocol': protocol, + '--description': description, + } + + for key, value in args_dict.items(): + if value is not None: + args += ' {} {}'.format(key, value) + + if not args: + raise ValueError("At least of the args need to be specified.") + + LOG.info("Updating router {}: {}".format(portforwarding_id, args)) + + args = '{} {}'.format(portforwarding_id, args.strip()) + return cli.neutron('portforwarding-update', args, ssh_client=con_ssh, + fail_ok=fail_ok, auth_info=auth_info) + + +def delete_portforwarding_rules(pf_ids, auth_info=Tenant.get('admin'), + con_ssh=None, fail_ok=False): + """ + Deletes list of portforwarding rules + + Args: + pf_ids(list): list of portforwarding rules to be deleted. + auth_info: + con_ssh: + fail_ok: + + Returns (tuple): + 0, - Portforwarding rules delete successful + + """ + if pf_ids is None or len(pf_ids) == 0: + return 0, None + + for pf_id in pf_ids: + rc, output = delete_portforwarding_rule(pf_id, auth_info=auth_info, + con_ssh=con_ssh, + fail_ok=fail_ok) + if rc != 0: + return rc, output + return 0, None + + +def delete_portforwarding_rule(portforwarding_id, auth_info=Tenant.get('admin'), + con_ssh=None, fail_ok=False): + """ + Deletes a single portforwarding rule + Args: + portforwarding_id (str): Id or name of portforwarding rule to delete. + auth_info: + con_ssh: + fail_ok: + + Returns (tuple): + 0, - Portforwarding rules delete successful + 1, - Portforwarding rules delete cli rejected + 2, - Portforwarding rules delete fail + + """ + + LOG.info("Deleting port-forwarding {}...".format(portforwarding_id)) + code, output = cli.neutron('portforwarding-delete', portforwarding_id, + ssh_client=con_ssh, fail_ok=fail_ok, + auth_info=auth_info) + if code != 0: + msg = "CLI rejected. Fail to delete Port-forwarding {}; {}".format( + portforwarding_id, output) + LOG.warn(msg) + if fail_ok: + return code, msg + else: + raise exceptions.NeutronError(msg) + + portforwardings = get_portforwarding_rules(auth_info=auth_info, + con_ssh=con_ssh) + if portforwarding_id in portforwardings: + msg = "Port-forwarding {} is still showing in neutron " \ + "portforwarding-list".format(portforwarding_id) + if fail_ok: + LOG.warning(msg) + return 2, msg + + succ_msg = "Port-forwarding {} is successfully deleted.".format( + portforwarding_id) + LOG.info(succ_msg) + return 0, succ_msg + + +def get_portforwarding_rules(router_id=None, inside_addr=None, inside_port=None, + outside_port=None, + protocol=None, strict=True, auth_info=None, + con_ssh=None): + """ + Get porforwarding id(s) based on given criteria. + Args: + router_id (str): portforwarding router id + inside_addr (str): portforwarding inside_addr + inside_port (str): portforwarding inside_port + outside_port (str): portforwarding outside_port" + protocol (str): portforwarding protocol + strict (bool): + auth_info (dict): + con_ssh (SSHClient): + + Returns (list): list of porforwarding id(s) + + """ + + param_dict = { + 'router_id': router_id, + 'inside_addr': inside_addr, + 'inside_port': inside_port, + 'outside_port': outside_port, + 'protocol': protocol, + } + + final_params = {} + for key, val in param_dict.items(): + if val is not None: + final_params[key] = str(val) + + table_ = table_parser.table( + cli.neutron('portforwarding-list', ssh_client=con_ssh, + auth_info=auth_info)[1], + combine_multiline_entry=True) + if not table_parser.get_all_rows(table_): + return [] + + if router_id is not None: + table_ = table_parser.filter_table(table_, strict=strict, + router_id=router_id) + + return table_parser.get_values(table_, 'id', **final_params) + + +def get_portforwarding_rule_info(portforwarding_id, field='inside_addr', + strict=True, auth_info=Tenant.get('admin'), + con_ssh=None): + """ + Get value of specified field for given portforwarding rule + + Args: + portforwarding_id (str): Id or name of portforwarding rule + field (str): the name of the field attribute + strict (bool): + auth_info (dict): + con_ssh (SSHClient): + + Returns (str): value of specified field for given portforwarding rule + + """ + + table_ = table_parser.table( + cli.neutron('portforwarding-show', portforwarding_id, + ssh_client=con_ssh, auth_info=auth_info)[1], + combine_multiline_entry=True) + return table_parser.get_value_two_col_table(table_, field, strict) + + +def create_pci_alias_for_devices(dev_type, hosts=None, devices=None, + alias_names=None, apply=True, con_ssh=None): + """ + Create pci alias for given devices by adding nova pci-alias service + parameters + Args: + dev_type (str): Valid values: 'gpu-pf', 'user' + hosts (str|list|tuple|None): Check devices on given host(s). + Check all hosts when None + devices (str|list|tuple|None): Devices to add in pci-alias. + When None, add all devices for given dev_type + alias_names (str|list|tuple|None): Pci alias' to create. + When None, name automatically. + apply (bool): whether to apply after nova service parameters modify + con_ssh: + + Returns (list): list of dict. + e.g., [{'device_id': '1d2d', 'vendor_id': '8086', 'name': user_intel-1}, + {'device_id': '1d26', 'vendor_id': '8086', 'name': + user_intel-2}, ... ] + + Examples: + network_helper.create_pci_alias_for_devices(dev_type='user', + hosts=('compute-2', 'compute-3')) + network_helper.create_pci_alias_for_devices(dev_type='gpu-pf', + devices='pci_0000_0c_00_0') + + """ + LOG.info("Prepare for adding pci alias") + if not hosts: + hosts = host_helper.get_hypervisors(con_ssh=con_ssh) + + if not devices: + if 'gpu' in dev_type: + class_id = DevClassID.GPU + else: + class_id = DevClassID.USB + devices = host_helper.get_host_devices(host=hosts[0], field='address', + list_all=True, regex=True, + **{'class id': class_id}) + elif isinstance(devices, str): + devices = [devices] + + if not alias_names: + alias_names = [None] * len(devices) + elif isinstance(alias_names, str): + alias_names = [alias_names] + + if len(devices) != len(alias_names): + raise ValueError( + "Number of devices do not match number of alias names provided") + + LOG.info( + "Ensure devices are enabled on hosts {}: {}".format(hosts, devices)) + host_helper.enable_disable_hosts_devices(hosts, devices) + + host = hosts[0] + devices_to_create = [] + param_strs = [] + for i in range(len(devices)): + device = devices[i] + alias_name = alias_names[i] + dev_id, vendor_id, vendor_name = host_helper.get_host_device_values( + host=host, device=device, + fields=('device id', 'vendor id', 'vendor name')) + + if not alias_name: + alias_name = '{}_{}'.format(dev_type, + vendor_name.split()[0].lower()) + alias_name = common.get_unique_name(name_str=alias_name) + + param = {'device_id': dev_id, 'vendor_id': vendor_id, + 'name': alias_name} + param_str = ','.join( + ['{}={}'.format(key, val) for key, val in param.items()]) + param_strs.append(param_str) + + pci_alias_dict = {'device id': dev_id, 'vendor id': vendor_id, + 'pci alias': alias_name} + devices_to_create.append(pci_alias_dict) + + LOG.info("Create nova pci alias service parameters: {}".format( + devices_to_create)) + system_helper.create_service_parameter( + service='nova', section='pci_alias', + con_ssh=con_ssh, name=dev_type, + value='"{}"'.format(';'.join(param_strs))) + + if apply: + LOG.info("Apply service parameters") + system_helper.apply_service_parameters(service='nova') + LOG.info("Verify nova pci alias' are listed after applying service " + "parameters: {}".format(devices_to_create)) + _check_pci_alias_created(devices_to_create, con_ssh=con_ssh) + + return devices_to_create + + +def _check_pci_alias_created(devices, con_ssh=None, timeout=60): + end_time = time.time() + timeout + out = None + while time.time() < end_time: + code, out = cli.nova('device-list', ssh_client=con_ssh, fail_ok=True, + auth_info=Tenant.get('admin')) + if code == 0: + break + time.sleep(10) + else: + raise exceptions.NovaError( + 'nova device-list failed. Error: \n{}'.format(out)) + + pci_alias_dict = get_pci_device_list_info(con_ssh=con_ssh) + for param_ in devices: + pci_alias = param_.get('pci alias') + assert pci_alias, "pci alias {} is not shown in nova " \ + "device-list".format(pci_alias) + created_alias = pci_alias_dict[pci_alias] + assert param_.get('vendor id') == created_alias['vendor id'] + assert param_.get('device id') == created_alias['device id'] + + +def get_qos_policies(field='id', name=None, qos_ids=None, con_ssh=None, + auth_info=None): + """ + Get qos policies + Args: + field (str|list|tuple) + name + qos_ids(str|list|None): QoS id to filter name. + con_ssh(SSHClient): If None, active controller ssh will be used. + auth_info(dict): Tenant dict. If None, primary tenant will be used. + + Returns(list): List of neutron qos names filtered by qos_id. + + """ + table_ = table_parser.table( + cli.neutron('qos-list', ssh_client=con_ssh, auth_info=auth_info)[1]) + filters = {'id': qos_ids, 'name': name} + + return table_parser.get_multi_values(table_, field, **filters) + + +def create_qos(name=None, tenant_name=None, description=None, scheduler=None, + dscp=None, ratelimit=None, fail_ok=False, + con_ssh=None, auth_info=Tenant.get('admin'), cleanup=None): + """ + Args: + name(str): Name of the QoS to be created. + tenant_name(str): Such as tenant1, tenant2. If none uses primary tenant. + description(str): Description of the created QoS. + scheduler(dict): Dictionary of scheduler policies formatted + as {'policy': value}. + dscp(dict): Dictionary of dscp policies formatted as {'policy': value}. + ratelimit(dict): Dictionary of ratelimit policies formatted + as {'policy': value}. + fail_ok(bool): + con_ssh(SSHClient): + auth_info(dict): Run the neutron qos-create cli using this + authorization info. Admin by default, + cleanup (str): + + Returns(tuple): exit_code(int), qos_id(str) + (0, qos_id) qos successfully created. + (1, output) qos not created successfully + """ + tenant_id = keystone_helper.get_projects(field='ID', + name=tenant_name, + con_ssh=con_ssh)[0] + check_dict = {} + args = '' + current_qos = get_qos_policies(field='name', con_ssh=con_ssh, + auth_info=auth_info) + if name is None: + if tenant_name is None: + tenant_name = common.get_tenant_name(Tenant.get_primary()) + name = common.get_unique_name("{}-qos".format(tenant_name), + existing_names=current_qos, + resource_type='qos') + else: + name = common.get_unique_name("{}-qos".format(tenant_name), + existing_names=current_qos, + resource_type='qos') + args_dict = {'name': name, + 'tenant-id': tenant_id, + 'description': description, + 'scheduler': scheduler, + 'dscp': dscp, + 'ratelimit': ratelimit + } + check_dict['policies'] = {} + for key, value in args_dict.items(): + if value: + if key in ('scheduler', 'dscp', 'ratelimit'): + args += " --{}".format(key) + for policy, val in value.items(): + args += " {}={}".format(policy, val) + value[policy] = str(val) + check_dict['policies'][key] = value + else: + args += " --{} '{}'".format(key, value) + if key is 'tenant-id': + key = 'tenant_id' + check_dict[key] = value + + LOG.info("Creating QoS with args: {}".format(args)) + exit_code, output = cli.neutron('qos-create', args, ssh_client=con_ssh, + fail_ok=fail_ok, auth_info=auth_info) + if exit_code == 1: + return 1, output + + table_ = table_parser.table(output) + for key, exp_value in check_dict.items(): + if key is 'policies': + actual_value = eval( + table_parser.get_value_two_col_table(table_, key)) + else: + actual_value = table_parser.get_value_two_col_table(table_, key) + if actual_value != exp_value: + msg = "Qos created but {} expected to be {} but actually {}".format( + key, exp_value, actual_value) + raise exceptions.NeutronError(msg) + + qos_id = table_parser.get_value_two_col_table(table_, 'id') + if cleanup: + ResourceCleanup.add('network_qos', qos_id, scope=cleanup) + LOG.info("QoS successfully created") + return 0, qos_id + + +def delete_qos(qos_id, auth_info=Tenant.get('admin'), con_ssh=None, + fail_ok=False): + """ + + Args: + qos_id(str): QoS to be deleted + auth_info(dict): tenant to be used, if none admin will be used + con_ssh(SSHClient): + fail_ok(bool): + + Returns: code(int), output(string) + (0, "QoS successfully deleted" ) + (1, ) openstack qos delete cli rejected + """ + + LOG.info("deleting QoS: {}".format(qos_id)) + code, output = cli.neutron('qos-delete', qos_id, ssh_client=con_ssh, + fail_ok=fail_ok, auth_info=auth_info) + if code == 1: + return 1, output + + if qos_id in get_qos_policies(auth_info=auth_info, con_ssh=con_ssh): + msg = "QoS {} still listed in neutron QoS list".format(qos_id) + raise exceptions.NeutronError(msg) + + succ_msg = "QoS {} successfully deleted".format(qos_id) + LOG.info(succ_msg) + return 0, succ_msg + + +def update_net_qos(net_id, qos_id=None, fail_ok=False, + auth_info=Tenant.get('admin'), con_ssh=None): + """ + Update network qos to given value + Args: + net_id (str): network to update + qos_id (str|None): when None, remove the qos from network + fail_ok (bool): + auth_info (dict): + con_ssh (SSHClient): + + Returns (tuple): (code, msg) + (0, "Network qos is successfully updated to ") + (1, ) openstack network update cli rejected + + """ + if qos_id: + kwargs = {'--wrs-tm:qos': qos_id} + arg_str = '--wrs-tm:qos {}'.format(qos_id) + else: + kwargs = {'--no-qos': None} + arg_str = '--no-qos' + + code, msg = cli.neutron('net-update', '{} {}'.format(arg_str, net_id), + ssh_client=con_ssh, fail_ok=fail_ok, + auth_info=auth_info) + if code > 0: + return code, msg + + if '--no-qos' in kwargs: + actual_qos = get_network_values(net_id, fields='wrs-tm:qos', + auth_info=auth_info, con_ssh=con_ssh)[0] + assert not actual_qos, "Qos {} is not removed from {}".format( + actual_qos, net_id) + + msg = "Network {} qos is successfully updated to {}".format(net_id, qos_id) + LOG.info(msg) + return 0, msg diff --git a/automated-pytest-suite/keywords/nova_helper.py b/automated-pytest-suite/keywords/nova_helper.py new file mode 100755 index 0000000..ad48d2c --- /dev/null +++ b/automated-pytest-suite/keywords/nova_helper.py @@ -0,0 +1,1310 @@ +# +# Copyright (c) 2019 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + + +from utils import cli, exceptions +from utils import table_parser +from utils.tis_log import LOG +from consts.proj_vars import ProjVar +from consts.auth import Tenant +from consts.stx import FlavorSpec, GuestImages +from keywords import common +from testfixtures.fixture_resources import ResourceCleanup + + +def create_flavor(name=None, flavor_id=None, vcpus=1, ram=1024, root_disk=None, + ephemeral=None, swap=None, + is_public=None, rxtx_factor=None, project=None, + project_domain=None, description=None, guest_os=None, + fail_ok=False, auth_info=Tenant.get('admin'), con_ssh=None, + storage_backing=None, + rtn_id=True, cleanup=None, add_default_specs=True, + properties=None): + """ + Create a flavor with given criteria. + + Args: + name (str): substring of flavor name. Whole name will be + -. e,g., 'myflavor-1'. If None, name + will be set to 'flavor'. + flavor_id (str): auto generated by default unless specified. + vcpus (int): + ram (int): + root_disk (int): + ephemeral (int): + swap (int|None): + is_public (bool): + rxtx_factor (str): + project + project_domain + description + guest_os (str|None): guest name such as 'tis-centos-guest' or None - + default tis guest assumed + fail_ok (bool): whether it's okay to fail to create a flavor. Default + to False. + auth_info (dict): This is set to Admin by default. Can be set to + other tenant for negative test. + con_ssh (SSHClient): + storage_backing (str): storage backing in extra flavor. Auto set + storage backing based on system config if None. + Valid values: 'local_image', 'remote' + rtn_id (bool): return id or name + cleanup (str|None): cleanup scope. function, class, module, or session + add_default_specs (False): Whether to automatically add extra specs + that are needed to launch vm + properties (str|list|dict) + + Returns (tuple): (rtn_code (int), flavor_id/err_msg (str)) + (0, ): flavor created successfully + (1, ): create flavor cli rejected + + """ + + table_ = table_parser.table( + cli.openstack('flavor list', ssh_client=con_ssh, auth_info=auth_info)[ + 1]) + existing_names = table_parser.get_column(table_, 'Name') + + if name is None: + name = 'flavor' + flavor_name = common.get_unique_name(name_str=name, + existing_names=existing_names, + resource_type='flavor') + + if root_disk is None: + if not guest_os: + guest_os = GuestImages.DEFAULT['guest'] + root_disk = GuestImages.IMAGE_FILES[guest_os][1] + + args_dict = { + '--ephemeral': ephemeral, + '--swap': swap, + '--rxtx-factor': rxtx_factor, + '--is-public': is_public, + '--disk': root_disk, + '--ram': ram, + '--vcpus': vcpus, + '--id': flavor_id, + '--project': project, + '--project-domain': project_domain, + '--description': description, + '--public': True if is_public else None, + '--private': True if is_public is False else None, + '--property': properties, + } + args = '{} {}'.format(common.parse_args(args_dict, repeat_arg=True), + flavor_name) + + LOG.info("Creating flavor {}...".format(flavor_name)) + LOG.info("openstack flavor create option: {}".format(args)) + exit_code, output = cli.openstack('flavor create', args, ssh_client=con_ssh, + fail_ok=fail_ok, auth_info=auth_info) + if exit_code > 1: + return 1, output + + table_ = table_parser.table(output) + flavor_id = table_parser.get_value_two_col_table(table_, 'id') + LOG.info("Flavor {} created successfully.".format(flavor_name)) + + if cleanup: + ResourceCleanup.add('flavor', flavor_id, scope=cleanup) + + if add_default_specs: + extra_specs = {FlavorSpec.MEM_PAGE_SIZE: '2048'} + # extra_specs = {FlavorSpec.MEM_PAGE_SIZE: 'small'} + default_flavor_backing = ProjVar.get_var('DEFAULT_INSTANCE_BACKING') + sys_inst_backing = ProjVar.get_var('INSTANCE_BACKING') + if not default_flavor_backing: + from keywords import host_helper + sys_inst_backing = host_helper.get_hosts_per_storage_backing( + up_only=False, auth_info=auth_info, + con_ssh=con_ssh, refresh=True) + configured_backings = [backing for backing in sys_inst_backing if + sys_inst_backing.get(backing)] + LOG.debug( + "configured backing:{} sys inst backing: {}, required storage " + "backing: {}". + format(configured_backings, sys_inst_backing, storage_backing)) + + if storage_backing and storage_backing not in configured_backings: + raise ValueError( + 'Required local_storage {} is not configured on any nova ' + 'hypervisor'. + format(storage_backing)) + + if len(configured_backings) > 1: + extra_specs[ + FlavorSpec.STORAGE_BACKING] = storage_backing if \ + storage_backing else \ + ProjVar.get_var('DEFAULT_INSTANCE_BACKING') + + if extra_specs: + LOG.info("Setting flavor specs: {}".format(extra_specs)) + set_flavor(flavor_id, con_ssh=con_ssh, auth_info=auth_info, + **extra_specs) + + flavor = flavor_id if rtn_id else flavor_name + return 0, flavor, storage_backing + + +def set_aggregate(aggregate, properties=None, no_property=None, zone=None, + name=None, fail_ok=False, con_ssh=None, + auth_info=Tenant.get('admin')): + """ + Set aggregate with given params + Args: + aggregate (str): aggregate to set + properties (dict|None): + no_property (bool|None): + zone (str|None): + name (str|None): + fail_ok (bool): + con_ssh: + auth_info: + + Returns (tuple): + (0, "Aggregate set successfully with param: ) + (1, ) returns only if fail_ok=True + + """ + args_dict = { + '--zone': zone, + '--name': name, + '--property': properties, + '--no-property': no_property, + } + + args = '{} {}'.format(common.parse_args(args_dict, repeat_arg=True), + aggregate) + code, output = cli.openstack('aggregate set', args, ssh_client=con_ssh, + fail_ok=fail_ok, auth_info=auth_info) + if code > 0: + return 1, output + + msg = "Aggregate set successfully with param: {}".format(aggregate, args) + LOG.info(msg) + return 0, msg + + +def unset_aggregate(aggregate, properties, fail_ok=False, con_ssh=None, + auth_info=Tenant.get('admin')): + """ + Unset given properties for aggregate + Args: + aggregate (str): aggregate to unset + properties (list|tuple|str|None): + fail_ok (bool): + con_ssh: + auth_info: + + Returns (tuple): + (0, "Aggregate set successfully with param: ) + (1, ) returns only if fail_ok=True + + """ + if isinstance(properties, str): + properties = (properties,) + + args = ' '.join(['--property {}'.format(key) for key in properties]) + args = '{} {}'.format(args, aggregate) + code, output = cli.openstack('aggregate unset', args, ssh_client=con_ssh, + fail_ok=fail_ok, auth_info=auth_info) + if code > 0: + return 1, output + + msg = "Aggregate {} properties unset successfully: {}".format(aggregate, + properties) + LOG.info(msg) + return 0, msg + + +def get_aggregate_values(aggregate, fields, con_ssh=None, + auth_info=Tenant.get('admin'), fail_ok=False): + """ + Get values of a nova aggregate for given fields + Args: + aggregate (str): + fields (str|list|tuple): + con_ssh: + auth_info (dict): + fail_ok (bool) + + Returns (list): + + """ + code, out = cli.openstack('aggregate show', aggregate, ssh_client=con_ssh, + auth_info=auth_info, fail_ok=fail_ok) + if code > 0: + return [] + + table_ = table_parser.table(out) + return table_parser.get_multi_values_two_col_table( + table_, fields, evaluate=True, dict_fields=('properties',)) + + +def delete_flavors(flavors, check_first=True, fail_ok=False, con_ssh=None, + auth_info=Tenant.get('admin')): + """ + Delete given flavor(s) + Args: + flavors (list|str): id(s) of flavor(s) to delete + check_first (bool) + fail_ok (bool): whether to raise exception if any flavor fails to delete + con_ssh (SSHClient): + auth_info (dict): + + Returns (tuple): + (-1, 'None of the flavor(s) exists. Do nothing.') + (0, 'Flavor is successfully deleted') + (1, ) + (2, "Flavor still exists on system after deleted.") + + """ + if isinstance(flavors, str): + flavors = [flavors] + + if check_first: + existing_favors = get_flavors(con_ssh=con_ssh, auth_info=auth_info) + flavors = list(set(flavors) & set(existing_favors)) + if not flavors: + msg = "None of the given flavors exist. Do nothing." + LOG.info(msg) + return -1, msg + + LOG.info("Flavor(s) to delete: {}".format(flavors)) + code, output = cli.openstack('flavor delete', ' '.join(flavors), + ssh_client=con_ssh, fail_ok=fail_ok, + auth_info=auth_info) + if code > 0: + return 1, output + + existing_favors = get_flavors(con_ssh=con_ssh, auth_info=auth_info) + flavors_still_exist = list(set(flavors) & set(existing_favors)) + if flavors_still_exist: + err_msg = "Flavor(s) still exist after deletion: {}".format( + flavors_still_exist) + LOG.warning(err_msg) + if fail_ok: + return 2, err_msg + else: + raise exceptions.FlavorError(err_msg) + + success_msg = "Flavor(s) deleted successfully." + LOG.info(success_msg) + return 0, success_msg + + +def get_flavors(name=None, memory=None, disk=None, ephemeral=None, swap=None, + vcpu=None, rxtx=None, is_public=None, + flv_id=None, long=False, con_ssh=None, auth_info=None, + strict=True, field='id'): + """ + Get a flavor id with given criteria. If no criteria given, a random + flavor will be returned. + + Args: + name (str): name of a flavor + memory (int): memory size in MB + disk (int): size of the disk in GB + ephemeral (int): size of ephemeral disk in GB + swap (int): size of swap disk in GB + vcpu (int): number of vcpus + rxtx (str): + is_public (bool): + flv_id (str) + long (bool) + con_ssh (SSHClient): + auth_info (dict): + strict (bool): whether or not to perform strict search on provided + values + field (str|list|tuple) + + Returns (list): + + """ + + args = '--long' if long else '' + table_ = table_parser.table( + cli.openstack('flavor list', args, ssh_client=con_ssh, + auth_info=auth_info)[1]) + + req_dict = {'Name': name, + 'RAM': memory, + 'Disk': disk, + 'Ephemeral': ephemeral, + 'Swap': '' if str(swap) == '0' else swap, + 'VCPUs': vcpu, + 'RXTX Factor': rxtx, + 'Is Public': is_public, + 'ID': flv_id, + } + final_dict = {k: str(v) for k, v in req_dict.items() if v is not None} + return table_parser.get_multi_values(table_, field, strict=strict, + **final_dict) + + +def get_basic_flavor(auth_info=None, con_ssh=None, guest_os='', rtn_id=True): + """ + Get a basic flavor with the default arg values and without adding extra + specs. + Args: + auth_info (dict): + con_ssh (SSHClient): + guest_os + rtn_id (bool): return flavor id or name + + Returns (str): id of the basic flavor + + """ + if not guest_os: + guest_os = GuestImages.DEFAULT['guest'] + size = GuestImages.IMAGE_FILES[guest_os][1] + + default_flavor_name = 'flavor-default-size{}'.format(size) + rtn_val = 'id' if rtn_id else 'name' + flavors = get_flavors(name=default_flavor_name, con_ssh=con_ssh, + auth_info=auth_info, strict=False, + field=rtn_val) + flavor = flavors[0] if flavors else \ + create_flavor(name=default_flavor_name, root_disk=size, con_ssh=con_ssh, + cleanup='session', rtn_id=rtn_id)[1] + + return flavor + + +def set_flavor(flavor, project=None, project_domain=None, description=None, + no_property=None, con_ssh=None, + auth_info=Tenant.get('admin'), fail_ok=False, **properties): + """ + Set flavor with given parameters + Args: + flavor (str): id of a flavor + project (str) + project_domain (str) + description (str) + no_property (bool) + con_ssh (SSHClient): + auth_info (dict): + fail_ok (bool): + **properties: extra specs to set. e.g., **{"hw:mem_page_size": "2048"} + + Returns (tuple): (rtn_code (int), message (str)) + (0, 'Flavor extra specs set successfully.'): required extra spec(s) + added successfully + (1, ): add extra spec cli rejected + + """ + args_dict = { + '--description': description, + '--project': project, + '--project-domain': project_domain, + '--no-property': no_property and not properties, + '--property': properties + } + args = common.parse_args(args_dict, repeat_arg=True) + + if not args.strip(): + raise ValueError("Nothing is provided to set") + + LOG.info("Setting flavor {} with args: {}".format(flavor, args)) + args = '{} {}'.format(args, flavor) + exit_code, output = cli.openstack('flavor set', args, ssh_client=con_ssh, + fail_ok=fail_ok, auth_info=auth_info) + if exit_code == 1: + return 1, output + + msg = "Flavor {} set successfully".format(flavor) + LOG.info(msg) + return 0, flavor + + +def unset_flavor(flavor, properties=None, project=None, project_domain=None, + check_first=True, fail_ok=False, + auth_info=Tenant.get('admin'), con_ssh=None): + """ + Unset specific extra spec(s) from given flavor. + + Args: + flavor (str): id of the flavor + properties (str|list|tuple): extra spec(s) to be removed. At least + one should be provided. + project_domain + project + check_first (bool): Whether to check if extra spec exists in flavor + before attempt to unset + con_ssh (SSHClient): + auth_info (dict): + fail_ok (bool): + con_ssh + + Returns (tuple): (rtn_code (int), message (str)) + (-1, 'Extra spec(s) not exist in flavor. Do nothing.') + (0, 'Flavor extra specs unset successfully.'): required extra spec(s) + removed successfully + (1, ): unset extra spec cli rejected + (2, ' is still in the extra specs list'): post action + check failed + + """ + if isinstance(properties, str): + properties = [properties] + + if properties and check_first: + existing_specs = get_flavor_values(flavor, fields='properties', + con_ssh=con_ssh, + auth_info=auth_info)[0] + properties = list(set(properties) & set(existing_specs.keys())) + + args_dict = { + '--property': properties, + '--project': project, + '--project_domain': project_domain, + } + args = common.parse_args(args_dict, repeat_arg=True) + if not args: + msg = "Nothing to unset for flavor {}. Do nothing.".format(flavor) + LOG.info(msg) + return -1, msg + + LOG.info("Unsetting flavor {} with args: {}".format(flavor, args)) + exit_code, output = cli.openstack('flavor unset', args, ssh_client=con_ssh, + fail_ok=fail_ok, auth_info=auth_info) + if exit_code > 0: + return 1, output + + success_msg = "Flavor {} unset successfully".format(flavor) + LOG.info(success_msg) + return 0, success_msg + + +def get_flavor_properties(flavor, con_ssh=None, auth_info=Tenant.get('admin')): + """ + Get extra specs of a flavor as dictionary + Args: + flavor (str): id of a flavor + con_ssh (SSHClient): + auth_info (dict): + + Returns (dict): e.g., {"aggregate_instance_extra_specs:storage": + "local_image", "hw:mem_page_size": "2048"} + + """ + return get_flavor_values(flavor, fields='properties', con_ssh=con_ssh, + auth_info=auth_info)[0] + + +def create_server_group(name=None, policy='affinity', rule=None, fail_ok=False, + auth_info=None, con_ssh=None, + rtn_exist=False, field='id'): + """ + Create a server group with given criteria + + Args: + name (str): name of the server group + policy (str): affinity or anti_infinity + rule (str|None): max_server_per_host can be specified when + policy=anti-affinity + fail_ok (bool): + auth_info (dict): + con_ssh (SSHClient): + rtn_exist (bool): Whether to return existing server group that + matches the given name + field (str): id or name + + Returns (tuple): (rtn_code (int), err_msg_or_srv_grp_id (str)) + - (0, ) # server group created successfully + - (1, ) # create server group cli rejected + + """ + # process server group metadata + if name and rtn_exist: + existing_grp = get_server_groups(name=name, strict=False, + con_ssh=con_ssh, auth_info=auth_info, + field=field) + if existing_grp: + LOG.debug( + "Returning existing server group {}".format(existing_grp[0])) + return -1, existing_grp[0] + + # process server group name and policy + if not name: + name = 'grp_{}'.format(policy.replace('-', '_')) + name = common.get_unique_name(name_str=name) + args = '{}{} {}'.format('--rule {} '.format(rule) if rule else '', name, + policy.replace('_', '-')) + + LOG.info("Creating server group with args: {}...".format(args)) + exit_code, output = cli.nova('server-group-create', args, + ssh_client=con_ssh, fail_ok=fail_ok, + auth_info=auth_info) + if exit_code > 0: + return 1, output + + table_ = table_parser.table(output) + srv_grp_id = table_parser.get_values(table_, field)[0] + LOG.info("Server group {} created successfully.".format(name)) + return 0, srv_grp_id + + +def get_server_groups(field='ID', all_projects=True, long=False, strict=True, + regex=False, + auth_info=Tenant.get('admin'), con_ssh=None, **kwargs): + """ + Get server groups ids based on the given criteria + + Args: + auth_info (dict): + con_ssh (SSHClient): + strict (bool): whether to do strict search for given name + regex (bool): whether or not to use regex when for given name + all_projects(bool): whether to list for all projects + long + field (str|list|tuple): + **kwargs: filters + + Returns (list): list of server groups + + """ + args_dict = { + '--all-projects': all_projects, + '--long': long + } + args = common.parse_args(args_dict) + table_ = table_parser.table( + cli.openstack('server group list', args, ssh_client=con_ssh, + auth_info=auth_info)[1]) + + def _parse_list(value_str): + return [val.strip() for val in value_str.split(',')] + + parsers = {_parse_list: ('Policies', 'Members')} + + return table_parser.get_multi_values(table_, field, strict=strict, + regex=regex, parsers=parsers, **kwargs) + + +def get_server_groups_info(headers=('Policies', 'Members'), auth_info=None, + con_ssh=None, + strict=False, **kwargs): + """ + Get a server group(s) info as a list + + Args: + headers (str|list|tuple): header string for info. such as 'Member', + 'Metadata', 'Policies' + auth_info (dict): + con_ssh (SSHClient): + strict + kwargs + + Returns (dict): server group(s) info in dict. server group id as key, + and values of specified headers as value. + Examples: {: [['affinity'], [, , ...]], + : ['anti-affinity', []]} + + """ + if isinstance(headers, str): + headers = [headers] + headers = ['ID'] + list(headers) + + values = get_server_groups(field=headers, all_projects=True, long=True, + con_ssh=con_ssh, auth_info=auth_info, + strict=strict, **kwargs) + group_ids = values.pop(0) + values = list(zip(*values)) + srv_groups_info = {group_ids[i]: values[i] for i in range(len(group_ids))} + return srv_groups_info + + +def get_server_group_info(group_id=None, group_name=None, + headers=('Policies', 'Members'), strict=False, + auth_info=None, con_ssh=None): + """ + Get server group info for specified server group + Args: + group_id: + group_name: + headers (str|list|tuple): + auth_info: + strict + con_ssh: + + Returns (list): + + """ + filters = {'ID': group_id} + if group_name: + filters['Name'] = group_name + + group_info = get_server_groups_info(headers=headers, auth_info=auth_info, + strict=strict, + con_ssh=con_ssh, **filters) + assert len(group_info) == 1, "More than 1 server group filtered" + + values = list(group_info.values())[0] + + return values + + +def server_group_exists(srv_grp_id, auth_info=Tenant.get('admin'), + con_ssh=None): + """ + Return True if given server group exists else False + + Args: + srv_grp_id (str): + auth_info (dict): + con_ssh (SSHClient): + + Returns (bool): True or False + + """ + existing_server_groups = get_server_groups(all_projects=True, + auth_info=auth_info, + con_ssh=con_ssh) + return srv_grp_id in existing_server_groups + + +def delete_server_groups(srv_grp_ids=None, check_first=True, fail_ok=False, + auth_info=Tenant.get('admin'), + con_ssh=None): + """ + Delete server group(s) + + Args: + srv_grp_ids (list|str): id(s) for server group(s) to delete. + check_first (bool): whether to check existence of given server groups + before attempt to delete. Default: True. + fail_ok (bool): + auth_info (dict|None): + con_ssh (SSHClient): + + Returns (tuple): (rtn_code(int), msg(str)) # rtn_code 1,2 only returns + when fail_ok=True + (-1, 'No server group(s) to delete.') # "Empty vm list/string + provided and no vm exist on system. + (-1, 'None of the given server group(s) exists on system.') + (0, "Server group(s) deleted successfully.") + (1, ) # Deletion rejected for all of the server groups. + Return CLI stderr. + (2, "Some deleted server group(s) still exist on system:: + ") + """ + existing_sgs = None + if not srv_grp_ids: + existing_sgs = srv_grp_ids = get_server_groups(con_ssh=con_ssh, + auth_info=auth_info) + elif isinstance(srv_grp_ids, str): + srv_grp_ids = [srv_grp_ids] + + srv_grp_ids = [sg for sg in srv_grp_ids if sg] + if not srv_grp_ids: + LOG.info("No server group(s) to delete. Do Nothing") + return -1, 'No server group(s) to delete.' + + if check_first: + if existing_sgs is None: + existing_sgs = get_server_groups(con_ssh=con_ssh, + auth_info=auth_info) + + srv_grp_ids = list(set(srv_grp_ids) & set(existing_sgs)) + if not srv_grp_ids: + msg = "None of the given server group(s) exists on system. Do " \ + "nothing" + LOG.info(msg) + return -1, msg + + LOG.info("Deleting server group(s): {}".format(srv_grp_ids)) + code, output = cli.openstack('server group delete', ' '.join(srv_grp_ids), + ssh_client=con_ssh, fail_ok=True, + auth_info=auth_info, timeout=60) + if code == 1: + return 1, output + + existing_sgs = get_server_groups(con_ssh=con_ssh, auth_info=auth_info) + grps_undeleted = list(set(srv_grp_ids) & set(existing_sgs)) + if grps_undeleted: + msg = "Some server group(s) still exist on system after deletion: " \ + "{}".format(grps_undeleted) + LOG.warning(msg) + if fail_ok: + return 2, msg + raise exceptions.NovaError(msg) + + msg = "Server group(s) deleted successfully." + LOG.info(msg) + return 0, "Server group(s) deleted successfully." + + +def get_keypairs(name=None, field='Name', con_ssh=None, auth_info=None): + """ + + Args: + name (str): Name of the key pair to filter for a given user + field (str|list|tuple) + con_ssh (SSHClient): + auth_info (dict): Tenant to be used to execute the cli if none + Primary tenant will be used + + Returns (list):return keypair names + + """ + table_ = table_parser.table( + cli.openstack('keypair list', ssh_client=con_ssh, auth_info=auth_info)[ + 1]) + return table_parser.get_multi_values(table_, field, Name=name) + + +def get_flavor_values(flavor, fields, strict=True, con_ssh=None, + auth_info=Tenant.get('admin')): + """ + Get flavor values for given fields via openstack flavor show + Args: + flavor (str): + fields (str|list|tuple): + strict (bool): strict search for field name or not + con_ssh: + auth_info: + + Returns (list): + + """ + table_ = table_parser.table( + cli.openstack('flavor show', flavor, ssh_client=con_ssh, + auth_info=auth_info)[1]) + return table_parser.get_multi_values_two_col_table( + table_, fields, merge_lines=True, evaluate=True, + strict=strict, dict_fields=('properties',)) + + +def copy_flavor(origin_flavor, new_name=None, con_ssh=None): + """ + Extract the info from an existing flavor and create a new flavor that is + has identical info + + Args: + origin_flavor (str): id of an existing flavor to extract the info from + new_name: + con_ssh: + + Returns (str): flavor_id + + """ + table_ = table_parser.table( + cli.openstack('flavor show', origin_flavor, ssh_client=con_ssh, + auth_info=Tenant.get('admin'))[1]) + + extra_specs = table_parser.get_value_two_col_table(table_, 'properties') + extra_specs = table_parser.convert_value_to_dict(value=extra_specs) + ephemeral = table_parser.get_value_two_col_table(table_, 'ephemeral', + strict=False) + disk = table_parser.get_value_two_col_table(table_, 'disk') + is_public = table_parser.get_value_two_col_table(table_, 'is_public', + strict=False) + ram = table_parser.get_value_two_col_table(table_, 'ram') + rxtx_factor = table_parser.get_value_two_col_table(table_, 'rxtx_factor') + swap = table_parser.get_value_two_col_table(table_, 'swap') + vcpus = table_parser.get_value_two_col_table(table_, 'vcpus') + old_name = table_parser.get_value_two_col_table(table_, 'name') + + if not new_name: + new_name = "{}-{}".format(old_name, new_name) + swap = swap if swap else 0 + new_flavor_id = \ + create_flavor(name=new_name, vcpus=vcpus, ram=ram, swap=swap, + root_disk=disk, ephemeral=ephemeral, + is_public=is_public, rxtx_factor=rxtx_factor, + con_ssh=con_ssh)[1] + set_flavor(new_flavor_id, con_ssh=con_ssh, **extra_specs) + + return new_flavor_id + + +# TODO: nova providernet-show no longer exists for pci pfs/vfs info. Update +# required. +def get_provider_net_info(providernet_id, field='pci_pfs_configured', + strict=True, auth_info=Tenant.get('admin'), + con_ssh=None, rtn_int=True): + """ + Get provider net info from "nova providernet-show" + + Args: + providernet_id (str): id of a providernet + field (str): Field name such as pci_vfs_configured, pci_pfs_used, etc + strict (bool): whether to perform a strict search on field name + auth_info (dict): + con_ssh (SSHClient): + rtn_int (bool): whether to return integer or string + + Returns (int|str): value of specified field. Convert to integer by + default unless rnt_int=False. + + """ + if not providernet_id: + raise ValueError("Providernet id is not provided.") + + table_ = table_parser.table( + cli.nova('providernet-show', providernet_id, ssh_client=con_ssh, + auth_info=auth_info)[1]) + info_str = table_parser.get_value_two_col_table(table_, field, + strict=strict) + return int(info_str) if rtn_int else info_str + + +def get_pci_interface_stats_for_providernet( + providernet_id, + fields=('pci_pfs_configured', 'pci_pfs_used', 'pci_vfs_configured', + 'pci_vfs_used'), + auth_info=Tenant.get('admin'), con_ssh=None): + """ + get pci interface usage + Args: + providernet_id (str): id of a providernet + fields: fields such as ('pci_vfs_configured', 'pci_pfs_used') + auth_info (dict): + con_ssh (SSHClient): + + Returns (tuple): tuple of integers + + """ + if not providernet_id: + raise ValueError("Providernet id is not provided.") + + table_ = table_parser.table( + cli.nova('providernet-show', providernet_id, ssh_client=con_ssh, + auth_info=auth_info)[1]) + rtn_vals = [] + for field in fields: + pci_stat = int( + table_parser.get_value_two_col_table(table_, field, strict=True)) + rtn_vals.append(pci_stat) + return tuple(rtn_vals) + + +def create_aggregate(field='name', name=None, avail_zone=None, properties=None, + check_first=True, fail_ok=False, + con_ssh=None, auth_info=Tenant.get('admin')): + """ + Add a aggregate with given name and availability zone. + + Args: + field (str): name or id + name (str): name for aggregate to create + avail_zone (str|None): + properties (dict|None) + check_first (bool) + fail_ok (bool): + con_ssh (SSHClient): + auth_info (dict): + + Returns (tuple): + (0, ) -- aggregate successfully created + (1, ) -- cli rejected + (2, "Created aggregate is not as specified") -- name and/or + availability zone mismatch + + """ + if not name: + existing_names = get_aggregates(field='name') + name = common.get_unique_name(name_str='stxauto', + existing_names=existing_names) + + args_dict = { + '--zone': avail_zone, + '--property': properties, + } + args = '{} {}'.format(common.parse_args(args_dict, repeat_arg=True), name) + + if check_first: + aggregates_ = get_aggregates(field=field, name=name, + avail_zone=avail_zone) + if aggregates_: + LOG.warning("Aggregate {} already exists. Do nothing.".format(name)) + return -1, aggregates_[0] + + LOG.info("Adding aggregate {}".format(name)) + res, out = cli.openstack('aggregate create', args, ssh_client=con_ssh, + fail_ok=fail_ok, auth_info=auth_info) + if res == 1: + return res, out + + out_tab = table_parser.table(out) + + succ_msg = "Aggregate {} is successfully created".format(name) + LOG.info(succ_msg) + return 0, table_parser.get_value_two_col_table(out_tab, field) + + +def get_aggregates(field='name', name=None, avail_zone=None, con_ssh=None, + auth_info=Tenant.get('admin')): + """ + Get a list of aggregates + + Args: + field (str|list|tuple): id or name + name (str|list): filter out the aggregates with given name if specified + avail_zone (str): filter out the aggregates with given availability + zone if specified + con_ssh (SSHClient): + auth_info (dict): + + Returns (list): + + """ + kwargs = {} + if avail_zone: + kwargs['Availability Zone'] = avail_zone + if name: + kwargs['Name'] = name + + aggregates_tab = table_parser.table( + cli.openstack('aggregate list', ssh_client=con_ssh, + auth_info=auth_info)[1]) + return table_parser.get_multi_values(aggregates_tab, field, **kwargs) + + +def delete_aggregates(names, check_first=True, remove_hosts=True, fail_ok=False, + con_ssh=None, + auth_info=Tenant.get('admin')): + """ + Add a aggregate with given name and availability zone. + + Args: + names (str|list): name for aggregate to delete + check_first (bool) + remove_hosts (bool) + fail_ok (bool): + con_ssh (SSHClient): + auth_info (dict): + + Returns (tuple): + (0, "Aggregate is successfully deleted") -- aggregate + successfully deletec + (1, ) -- cli rejected + (2, "Aggregate still exists in aggregate-list after deletion") + -- failed although cli accepted + + """ + if check_first: + names = get_aggregates(name=names, con_ssh=con_ssh, auth_info=auth_info) + if not names: + msg = 'Aggregate {} does not exists. Do nothing.'.format(names) + LOG.warning(msg) + return -1, msg + elif isinstance(names, str): + names = [names] + + if remove_hosts: + for name in names: + remove_hosts_from_aggregate(aggregate=name, check_first=True) + + LOG.info("Deleting aggregate {}".format(names)) + res, out = cli.openstack('aggregate delete', ' '.join(names), + ssh_client=con_ssh, fail_ok=fail_ok, + auth_info=auth_info) + if res == 1: + return res, out + + post_aggregates = get_aggregates(name=names, con_ssh=con_ssh, + auth_info=auth_info) + if post_aggregates: + err_msg = "Aggregate {} still exists in openstack aggregate list " \ + "after deletion.".format(post_aggregates) + LOG.warning(err_msg) + if fail_ok: + return 2, err_msg + else: + raise exceptions.NovaError(err_msg) + + succ_msg = "Aggregate(s) successfully deleted: {}".format(names) + LOG.info(succ_msg) + return 0, succ_msg + + +def get_compute_services(field, con_ssh=None, auth_info=Tenant.get('admin'), + **kwargs): + """ + Get values from compute services list + + System: Regular, Small footprint + + Args: + field (str) + con_ssh (SSHClient): + auth_info (dict): + kwargs: Valid keys: Id, Binary, Host, Zone, Status, State, Updated At + + Returns (list): a list of hypervisors in given zone + """ + table_ = table_parser.table( + cli.openstack('compute service list', ssh_client=con_ssh, + auth_info=auth_info)[1]) + return table_parser.get_values(table_, field, **kwargs) + + +def remove_hosts_from_aggregate(aggregate, hosts=None, check_first=True, + fail_ok=False, con_ssh=None, + auth_info=Tenant.get('admin')): + """ + Remove hosts from specified aggregate + + Args: + aggregate (str): name of the aggregate to remove hosts. stxauto + aggregate can be added via add_stxauto_zone + session fixture + hosts (list|str): host(s) to remove from aggregate + check_first (bool): + fail_ok (bool): + con_ssh (SSHClient): + auth_info (dict): + + Returns (tuple): + (0, "Hosts successfully removed from aggregate") + (1, ) cli rejected on at least one host + (2, "Host(s) still exist in aggregate after + aggregate-remove-host: ) + + """ + __remove_or_add_hosts_in_aggregate(remove=True, aggregate=aggregate, + hosts=hosts, check_first=check_first, + fail_ok=fail_ok, con_ssh=con_ssh, + auth_info=auth_info) + + +def add_hosts_to_aggregate(aggregate, hosts, check_first=True, fail_ok=False, + con_ssh=None, + auth_info=Tenant.get('admin')): + """ + Add host(s) to specified aggregate + + Args: + aggregate (str): name of the aggregate to add hosts. stxauto + aggregate can be added via add_stxauto_zone + session fixture + hosts (list|str): host(s) to add to aggregate + check_first (bool): + fail_ok (bool): + con_ssh (SSHClient): + auth_info (dict): + + Returns (tuple): + (0, "Hosts successfully added from aggregate") + (1, ) cli rejected on at least one host + (2, "aggregate-add-host accepted, but some host(s) are not added in + aggregate") + + """ + __remove_or_add_hosts_in_aggregate(remove=False, aggregate=aggregate, + hosts=hosts, check_first=check_first, + fail_ok=fail_ok, con_ssh=con_ssh, + auth_info=auth_info) + + +def __remove_or_add_hosts_in_aggregate(aggregate, hosts=None, remove=False, + check_first=True, fail_ok=False, + con_ssh=None, + auth_info=Tenant.get('admin')): + """ + Remove/Add hosts from/to given aggregate + + Args: + aggregate (str): name of the aggregate to add/remove hosts. stxauto + aggregate can be added via + add_stxauto_zone session fixture + hosts (list|str): + remove (bool): True if remove hosts from given aggregate, otherwise + add hosts to aggregate + check_first (bool): + fail_ok (bool): + con_ssh (SSHClient): + auth_info (dict): + + Returns (tuple): + (0, "Hosts successfully removed from aggregate") + (1, ) cli rejected on at least one host + (2, "Host(s) still exist in aggregate after + aggregate-remove-host: ) + + """ + hosts_in_aggregate = get_hosts_in_aggregate(aggregate, con_ssh=con_ssh) + + if hosts is None: + if remove: + hosts = hosts_in_aggregate + else: + from keywords import host_helper + hosts = host_helper.get_hypervisors() + + if isinstance(hosts, str): + hosts = [hosts] + + msg_str = 'Remov' if remove else 'Add' + LOG.info("{}ing hosts {} in aggregate {}".format(msg_str, hosts, aggregate)) + if check_first: + if remove: + hosts_to_rm_or_add = list(set(hosts) & set(hosts_in_aggregate)) + else: + hosts_to_rm_or_add = list(set(hosts) - set(hosts_in_aggregate)) + else: + hosts_to_rm_or_add = list(hosts) + + if not hosts_to_rm_or_add: + warn_str = 'No' if remove else 'All' + msg = "{} given host(s) in aggregate {}. Do nothing. Given hosts: " \ + "{}; hosts in aggregate: {}". \ + format(warn_str, aggregate, hosts, hosts_in_aggregate) + LOG.warning(msg) + return -1, msg + + failed_res = {} + cmd = 'aggregate remove host' if remove else 'aggregate add host' + for host in hosts_to_rm_or_add: + args = '{} {}'.format(aggregate, host) + code, output = cli.openstack(cmd, args, ssh_client=con_ssh, + fail_ok=True, auth_info=auth_info) + if code > 0: + failed_res[host] = output + + if failed_res: + err_msg = "'{}' is rejected for following host(s) in aggregate " \ + "{}: {}".format(cmd, aggregate, failed_res) + if fail_ok: + LOG.warning(err_msg) + return 1, err_msg + else: + raise exceptions.NovaError(err_msg) + + post_hosts_in_aggregate = get_hosts_in_aggregate(aggregate, con_ssh=con_ssh) + if remove: + failed_hosts = list(set(hosts) & set(post_hosts_in_aggregate)) + else: + failed_hosts = list(set(hosts) - set(post_hosts_in_aggregate)) + + if failed_hosts: + err_msg = "{} accepted, but some host(s) are not {}ed in aggregate " \ + "{}: {}".format(cmd, msg_str, aggregate, failed_hosts) + if fail_ok: + LOG.warning(err_msg) + return 2, err_msg + else: + raise exceptions.NovaError(err_msg) + + succ_msg = "Hosts successfully {}ed in aggregate {}: {}".format( + msg_str.lower(), aggregate, hosts) + LOG.info(succ_msg) + return 0, succ_msg + + +def get_migration_list_table(con_ssh=None, auth_info=Tenant.get('admin')): + """ + nova migration-list to collect migration history of each vm + Args: + con_ssh (SSHClient): + auth_info (dict): + + """ + LOG.info("Listing migration history...") + return table_parser.table( + cli.nova('migration-list', ssh_client=con_ssh, auth_info=auth_info)[1]) + + +def create_keypair(name, public_key=None, private_key=None, fail_ok=False, + con_ssh=None, + auth_info=Tenant.get('admin')): + """ + Create a new keypair + Args: + name (str): keypair name to create + public_key (str|None): existing public key file path to use + private_key (str|None): file path to save private key + fail_ok (bool) + con_ssh (SSHClient): + auth_info (dict): + + Returns (tuple): + + """ + args_dict = {'--public-key': public_key, '--private-key': private_key} + args = '{} "{}"'.format(common.parse_args(args_dict), name) + LOG.info("Creating keypair with args: {}".format(args)) + + code, out = cli.openstack('keypair create', args, ssh_client=con_ssh, + fail_ok=fail_ok, auth_info=auth_info) + if code > 0: + return 1, out + + LOG.info("Keypair {} created successfully".format(name)) + return 0, name + + +def delete_keypairs(keypairs, check_first=True, fail_ok=False, con_ssh=None, + auth_info=None): + """ + Delete keypair(s) + Args: + keypairs (list/str): keypair(s) to delete + check_first (bool) + fail_ok (bool) + con_ssh (SSHClient): + auth_info (dict): + + Returns (tuple): + + """ + if isinstance(keypairs, str): + keypairs = (keypairs,) + + if check_first: + existing_keypairs = get_keypairs(con_ssh=con_ssh, auth_info=auth_info) + keypairs = list(set(keypairs) & set(existing_keypairs)) + if not keypairs: + msg = 'Give keypair(s) not exist. Do nothing.' + LOG.info(msg) + return -1, msg + + LOG.info('Deleting keypairs: {}'.format(keypairs)) + code, out = cli.openstack('keypair delete', ' '.join(keypairs), + ssh_client=con_ssh, fail_ok=fail_ok, + auth_info=auth_info) + if code > 0: + return code, out + + post_keypairs = get_keypairs(con_ssh=con_ssh, auth_info=auth_info) + undeleted_kp_names = list(set(keypairs) & set(post_keypairs)) + if undeleted_kp_names: + raise exceptions.NovaError( + "keypair(s) still exist after deletion: {}".format( + undeleted_kp_names)) + + msg = 'keypairs deleted successfully: {}'.format(keypairs) + LOG.info(msg) + return 0, msg + + +def get_hosts_in_aggregate(aggregate, con_ssh=None, + auth_info=Tenant.get('admin'), fail_ok=False): + """ + Get list of hosts in given nova aggregate + Args: + aggregate (str): + con_ssh: + auth_info: + fail_ok (bool) + + Returns (list): + + """ + if 'image' in aggregate: + aggregate = 'local_storage_image_hosts' + elif 'remote' in aggregate: + aggregate = 'remote_storage_hosts' + + hosts = get_aggregate_values(aggregate, 'hosts', con_ssh=con_ssh, + auth_info=auth_info, fail_ok=fail_ok) + if hosts: + hosts = hosts[0] + LOG.info("Hosts in {} aggregate: {}".format(aggregate, hosts)) + return hosts diff --git a/automated-pytest-suite/keywords/pm_helper.py b/automated-pytest-suite/keywords/pm_helper.py new file mode 100644 index 0000000..18b4696 --- /dev/null +++ b/automated-pytest-suite/keywords/pm_helper.py @@ -0,0 +1,1152 @@ +# +# Copyright (c) 2019 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + + +import configparser +import datetime +import os.path +import re +import time +from io import StringIO + +import pexpect + +from consts.auth import Tenant +from consts.timeout import MTCTimeout +from keywords import system_helper, host_helper +from utils.clients.ssh import ControllerClient +from utils.tis_log import LOG + +KILL_CMD = 'kill -9' +PROCESS_TYPES = ['sm', 'pmon', 'other'] +KILL_PROC_EVENT_FORMAT = { + # documented + # 401.001 Service group state change from to on + # host + # + # actual in 2017-02-20_22-01-22 + # clear | 400.001 | Service group cloud-services degraded; + # cinder-api(disabled, failed) |\ + # service_domain=controller.service_group=cloud-services.host=controller-1 + # log | 401.001 | Service group cloud-services state change from + # active-degraded to active on host + # set | 400.001 | Service group cloud-services degraded; + # cinder-api(disabled, failed) |\ + # service_domain=controller.service_group=cloud-services.host=controller-1 + + # 'sm': ('401.001', + # actual in 2017-02-20_22-01-22 + # clear 400.001 Service group cloud-services warning; nova-novnc(disabled, + # failed) + # service_domain=controller.service_group=cloud-services.host=controller-0 + + # 'sm': ('400.001', + # r'Service group ([^\s]+) ([^\s]+);\s*(.*)', + # r'service_domain=controller\.service_group=([^\.]+)\.host=(.*)'), + 'sm': { + 'event_id': '400.001', + 'critical': ( + r'Service group ([^\s]+) ([^\s]+);\s*(.*)', + r'service_domain=controller\.service_group=([^\.]+)\.host=(.*)' + ), + 'major': ( + r'Service group ([^\s]+) ([^\s]+);\s*(.*)', + r'service_domain=controller\.service_group=([^\.]+)\.host=(.*)' + ), + 'minor': ( + r'Service group ([^\s]+) ([^\s]+);\s*(.*)', + r'service_domain=controller\.service_group=([^\.]+)\.host=(.*)' + ), + }, + + # set 200.006 controller-1 'acpid' process has failed. + # Auto recovery in progress. host=controller-1.process=acpid minor + 'pmon': { + 'event_id': '200.006', + # controller-1 critical 'sm' process has failed and could not be + # auto-recovered gracefully. + # Auto- recovery progression by host reboot is required and in + # progress. host=controller-1.process=sm + 'critical': ( + r'([^\s]+) ([^\s]+) \'([^\']+)\' process has ([^\s]+) and could ' + r'not be auto-recovered gracefully. ' + r'Auto.recovery progression by host reboot is required and in ' + r'progress.', + r'host=([^\.]+)\.process=([^\s]+)' + ), + # compute-2 is degraded due to the failure of its 'fsmond' process. + # Auto recovery of this major + # | host=compute-2.process= | major | process is in progress. + 'major': ( + r'([^\s]+) is ([^\s]+) due to the failure of its \'([^\']+)\' ' + r'process. Auto recovery of this ([^\s]+) ' + r'process is in progress.', + r'host=([^\.]+)\.process=([^\s]+)' + ), + # clear 200.006 compute-2 'mtclogd' process has failed. Auto + # recovery in progress. + # host=compute-2.process=mtclogd minor + # "compute-2 'mtclogd' process has failed. Auto recovery in progress." + # set compute-1 'ntpd' process has failed. Manual recovery is required. + 'minor': ( + r"([^\s]+) '([^\']+)' process has ([^\s]+)\. [^\s]+ recovery.*", + r'host=([^\.]+)\.process=([^\s]+)' + ), + }, +} +AVAILABILITY_MAPPINGS = {'active': 'enabled', 'enabled': 'active'} +PMON_PROC_CONF_DIR = '/etc/pmon.d' + + +def get_pmon_process_info(name, host, conf_file=None, con_ssh=None): + """ + Get process info from its PMON config file + Args: + name (str): name of the PMON process + host (str): host on which the PROM process running + con_ssh: connection to the active controller + conf_file (str): configuration file for the PMON process + + Returns (dict): settings of the process + + """ + LOG.info('Get PMON process information for {}'.format(name)) + + if not conf_file: + file_name = '{}.conf'.format(name) + else: + file_name = conf_file + + cmd = 'cat {}'.format(os.path.join(PMON_PROC_CONF_DIR, file_name)) + + with host_helper.ssh_to_host(host, con_ssh=con_ssh) as con0_ssh: + code, output = con0_ssh.exec_sudo_cmd(cmd) + + if 0 != code or not output.strip(): + LOG.error( + 'Failed to read config file:{}/{} for PMON process:{} on host:{}, ' + 'code:{}, message:{}'.format( + PMON_PROC_CONF_DIR, file_name, name, host, code, output)) + return {} + + conf_parser = configparser.ConfigParser() + conf_parser.read_file(StringIO(output)) + + settings = {} + + if 'process' in conf_parser.sections(): + settings = {k.strip(): v.split(';')[0].strip() for k, v in + conf_parser.items('process')} + + settings['interval'] = int(settings.get('interval', 5)) + settings['debounce'] = int(settings.get('debounce', 20)) + LOG.debug('process settings:{}'.format(settings)) + return settings + + +def get_ancestor_process(name, host, cmd='', fail_ok=False, retries=5, + retry_interval=3, con_ssh=None): + """ + Get the ancestor of the processes with the given name and command-line if + any. + + Args: + name: name of the process + host: host on which to find the process + cmd: executable name + fail_ok: do not throw exception when errors + retries: times to try before return + retry_interval: wait before next re-try + con_ssh: ssh connection/client to the active controller + + Returns: + pid (int), process id, -1 if there is any error + ppid (int), parent process id, -1 if there is any error + cmdline (str) command line of the process + """ + retries = retries if retries > 1 else 3 + retry_interval = retry_interval if retry_interval > 0 else 1 + + if cmd: + ps_cmd = r'ps -e -oppid,pid,cmd | /usr/bin/grep "{}\|{}" | ' \ + r'/usr/bin/grep -v grep | /usr/bin/grep {}'.\ + format(name, os.path.basename(cmd), cmd) + else: + ps_cmd = 'ps -e -oppid,pid,cmd | /usr/bin/grep "{}" | /usr/bin/grep ' \ + '-v grep'.format(name) + + code, output = -1, '' + if fail_ok: + for count in range(retries): + with host_helper.ssh_to_host(host, con_ssh=con_ssh) as con0_ssh: + code, output = con0_ssh.exec_cmd(ps_cmd, fail_ok=True) + if 0 == code and output.strip(): + break + LOG.warn('Failed to run cli:{} on controller at retry:{:02d}, ' + 'wait:{} seconds and try again'.format(cmd, count, + retry_interval)) + time.sleep(retry_interval) + else: + with host_helper.ssh_to_host(host, con_ssh=con_ssh) as con0_ssh: + code, output = con0_ssh.exec_cmd(ps_cmd, fail_ok=False) + + if not (0 == code and output.strip()): + LOG.error( + 'Failed to find process with name:{} and cmd:{}'.format(name, cmd)) + return -1, -1, '' + + procs = [] + ppids = [] + for line in output.strip().splitlines(): + proc_attr = line.strip().split() + if not proc_attr: + continue + try: + ppid = int(proc_attr[0].strip()) + pid = int(proc_attr[1].strip()) + cmdline = ' '.join(proc_attr[2:]) + LOG.info('ppid={}, pid={}\ncmdline={}'.format(ppid, pid, cmdline)) + except IndexError: + LOG.warn( + 'Failed to execute ps -p ?! cmd={}, line={}, output={}'.format( + cmd, line, output.strip())) + continue + + if cmd and cmd not in cmdline: + continue + procs.append((pid, ppid, cmdline)) + ppids.append(ppid) + + if len(procs) <= 0: + LOG.error( + 'Could not find process with name:{} and cmd:{}'.format(name, cmd)) + return -1, -1, '' + + pids = [v[1] for v in procs] + + if len(pids) == 1: + LOG.info('porcs[0]:{}'.format(procs[0])) + return procs[0] + + LOG.warn( + 'Multiple ({}) parent processes?, ppids:{}'.format(len(ppids), ppids)) + + if '1' not in ppids: + LOG.warn( + 'Init is not the grand parent process?, ppids:{}'.format(ppids)) + + for ppid, pid, cmdline in procs: + if pid in ppids and ppid not in pids and 1 != pid: + LOG.info('pid={}, ppid={}, cmdline={}'.format(pid, ppid, cmdline)) + return pid, ppid, cmdline + + LOG.error( + 'Could not find process, procs:{}, ppids:{}, pids:{}'.format(procs, + ppids, + pids)) + return -1, -1, '' + + +def verify_process_with_pid_file(pid, pid_file, con_ssh=None): + """ + Check if the given PID matching the PID in the specified pid_file + + Args: + pid: process id + pid_file: the file containing the process id + con_ssh: ssh connnection/client to the host on which the process + resides + + Returns: + + """ + con_ssh = con_ssh or ControllerClient.get_active_controller() + + code, output = con_ssh.exec_sudo_cmd('cat {} | head -n1'.format(pid_file), + fail_ok=False) + LOG.info('code={}, output={}'.format(code, output)) + + output = output.strip() + if not output or int(output) != pid: + LOG.info('Mismatched PID, expected:<{}>, from pid_file:<{}>, ' + 'pid_file={}'.format(pid, output, pid_file)) + return False + else: + LOG.info( + 'OK PID:{} matches with that from pid_file:{}, pid_file={}'.format( + pid, output.strip(), pid_file)) + return True + + +def get_process_from_sm(name, con_ssh=None, pid_file='', + expecting_status='enabled-active'): + """ + Get the information for the process from SM, including PID, Name, Current + Status and Pid-File + + Args: + name: name of the process + con_ssh: ssh connection/client to the active-controller + pid_file: known pid-file path/name to compare with + expecting_status: expected status of the process + + Returns: + pid (int): process id + proc_name (str): process name + actual_status (str): actual/current status of the process + sm_pid_file (str): pid-file in records of SM + """ + con_ssh = con_ssh or ControllerClient.get_active_controller() + + cmd = "true; NM={}; sudo sm-dump --impact --pid --pid_file | awk -v " \ + "pname=$NM '{{ if ($1 == pname) print }}'; " \ + "echo".format(name) + + code, output = con_ssh.exec_sudo_cmd(cmd, fail_ok=True) + + pid, proc_name, impact, sm_pid_file, actual_status = -1, '', '', '', '' + + if 0 != code or not output: + LOG.warn( + 'Cannot find the process:{} in SM with error code:\n{}\n' + 'output:{}'.format(name, code, output)) + return pid, proc_name, impact, sm_pid_file, actual_status + + for line in output.splitlines(): + if not line.strip(): + continue + pid, proc_name, impact, sm_pid_file, actual_status = -1, '', '', '', '' + + results_array = line.strip().split() + LOG.info('results_array={}'.format(results_array)) + + if len(results_array) != 6: + LOG.debug( + 'Invalid format from output of sm-dump?! line={}\n' + 'cmd={}'.format(line, cmd)) + continue + + proc_name = results_array[0] + if proc_name != name: + continue + + expect_status = results_array[1] + actual_status = results_array[2] + + if expect_status != actual_status: + LOG.warn( + 'service:{} is not in expected status yet. expected:{}, ' + 'actual:{}. Retry'.format( + proc_name, expect_status, actual_status)) + continue + + if actual_status != expecting_status: + LOG.warn( + 'service:{} is not in expected status yet. expected:{}, ' + 'actual:{}. Retry'.format( + proc_name, expecting_status, actual_status)) + break + + impact = results_array[3] + + pid = int(results_array[4].strip()) + if results_array[5] != sm_pid_file: + LOG.warn( + 'pid_file not matching with that from SM-dump, pid_file={}, ' + 'sm-dump-pid_file={}'.format( + sm_pid_file, results_array[5])) + sm_pid_file = results_array[5] + + if pid_file and sm_pid_file != pid_file: + LOG.warn( + 'pid_file differs from input pid_file, pid_file={}, ' + 'sm-dump-pid_file={}'.format( + pid_file, sm_pid_file)) + + if sm_pid_file: + if not verify_process_with_pid_file(pid, sm_pid_file, + con_ssh=con_ssh): + LOG.warn( + 'pid of service mismatch that from pid-file, pid:{}, ' + 'pid-file:{}, proc-name:{}'.format( + pid, sm_pid_file, proc_name)) + # found + break + + if -1 != pid: + host = system_helper.get_active_controller_name() + running, msg = is_process_running(pid, host) + if not running: + LOG.warn( + 'Process not existing, name={}, pid={}, msg={}'.format(name, + pid, + msg)) + return -1, '', '', '', '' + else: + LOG.info( + 'OK, Process is running: name={}, pid={}, output={}'.format( + name, pid, msg)) + + return pid, proc_name, impact, actual_status, sm_pid_file + + +def is_controller_swacted( + prev_active, prev_standby, + swact_start_timeout=MTCTimeout.KILL_PROCESS_SWACT_NOT_START, + swact_complete_timeout=MTCTimeout.KILL_PROCESS_SWACT_COMPLETE, + con_ssh=None): + """ + Wait and check if the active-controller on the system was 'swacted' with + give time period + + Args: + prev_active: previous active controller + prev_standby: previous standby controller + swact_start_timeout: check within this time frame if the swacting + started + swact_complete_timeout: check if the swacting (if any) completed in + this time period + con_ssh: ssh connection/client to the current + active-controller + + Returns: + + """ + LOG.info( + 'Check if the controllers started to swact within:{}, and completing ' + 'swacting within:{}'.format( + swact_start_timeout, swact_complete_timeout)) + + code = -1 + host = prev_active + for retry in range(1, 5): + LOG.info( + 'retry{:02d}: checking if swacting triggered, ' + 'prev-active-controller={}'.format( + retry, prev_active)) + code = 0 + try: + code, msg = host_helper.wait_for_swact_complete( + host, con_ssh=con_ssh, fail_ok=True, + swact_start_timeout=swact_start_timeout, + swact_complete_timeout=swact_complete_timeout) + + if 0 == code: + LOG.info( + 'OK, host-swacted, prev-active:{}, pre-standby:{}, ' + 'code:{}, message:{}'.format( + prev_active, prev_active, code, msg)) + return True + + active, standby = system_helper.get_active_standby_controllers() + if active == prev_standby and standby == prev_active: + LOG.info( + 'swacted?! prev-active:{} prev-standby:{}, cur active:{}, ' + 'cur standby:{}'.format( + prev_active, prev_standby, active, standby)) + return True + break + + except Exception as e: + LOG.warn( + 'erred, indicating system is in unstable state, meaning ' + 'probably swacting is in process. ' + 'previous active-controller:{}, previous standby-controller:{}' + '\nerror message:{}'.format(prev_active, prev_standby, e)) + + if retry >= 4: + LOG.error( + 'Fail the test after retry {} times, system remains in ' + 'unstable state, ' + 'meaning probably swacting is in process. previous ' + 'active-controller:{}, ' + 'previous standby-controller:{}\nerror message:{}'. + format(retry, prev_active, prev_standby, e)) + raise + + time.sleep(10) + + return 0 == code + + +def wait_for_sm_process_events(service, host, target_status, expecting=True, + severity='major', + last_events=None, process_type='sm', timeout=60, + interval=3, con_ssh=None): + if process_type not in KILL_PROC_EVENT_FORMAT: + LOG.error('unknown type of process:{}'.format(process_type)) + + event_log_id = KILL_PROC_EVENT_FORMAT[process_type]['event_id'] + reason_pattern, entity_id_pattern = KILL_PROC_EVENT_FORMAT[process_type][ + severity][0:2] + + if last_events is not None: + last_event = last_events['values'][0] + start_time = \ + last_event[1].replace('-', '').replace('T', ' ').split('.')[0] + else: + start_time = '' + + search_keys = { + 'Event Log ID': event_log_id, + 'Reason Text': reason_pattern, + 'Entity Instance ID': entity_id_pattern, + } + + expected_availability = target_status.get('availability', None) + + matched_events = [] + stop_time = time.time() + timeout + if expecting and (service == 'nova-novnc' or service == 'vim-webserver'): + stop_time = time.time() + timeout + 300 + interval = 60 + retry = 0 + while time.time() < stop_time: + retry += 1 + matched_events[:] = [] + events_table = system_helper.get_events_table( + event_log_id=event_log_id, show_uuid=True, + start=start_time, limit=10, con_ssh=con_ssh, regex=True, + **search_keys) + + if not events_table or not events_table['values']: + LOG.warn( + 'run{:02d} for process:{}: Empty event table?!\n' + 'evens_table:{}\nevent_id={}, ' + 'start={}\nkeys={}, severify={}'. + format(retry, service, events_table, event_log_id, start_time, + search_keys, severity)) + continue + + for event in events_table['values']: + try: + actual_event_id = event[3].strip() + if actual_event_id != event_log_id: + LOG.warn('Irrelevant event? event-list quering broken?!' + ' looking-for-event-id={}, actual-event-id={}, ' + 'event={}'. + format(event_log_id, actual_event_id, event)) + continue + + actual_state = event[2] + if actual_state not in ('set', 'clear'): + LOG.info( + 'State not matching, expected-state="log", ' + 'actual-state={}", event={}'.format( + actual_state, event)) + continue + + actual_reason = event[4].strip() + # ('cloud-services', 'active', 'active-degraded', + # 'controller-0;', ' glance-api(disabled, failed)') + m = re.match(reason_pattern, actual_reason) + if not m: + LOG.info( + 'Not matched event:{},\nevent_id={}, start={}, ' + 'reason_text={}'.format( + event, event_log_id, start_time, reason_pattern)) + continue + + actual_group_status = m.group(2) + if actual_group_status not in ('active', expected_availability): + LOG.info( + 'Group status not matching!, expected-status={}, ' + 'actual-status={}\nevent={}'.format( + expected_availability, actual_group_status, event)) + continue + + if 'host={}'.format(host) not in event[5]: + LOG.info( + 'Host not matching, expected-host={}, acutal-host={}, ' + 'event={}'.format( + host, event[5], event)) + continue + + actual_service_name, status = m.group(3).split('(') + service_operational, service_availability = status.split(',') + matched_events.append(dict( + uuid=event[0], + event=event[1:-1], + service=actual_service_name, + serice_operational=service_operational, + service_availability=service_availability.strip().strip( + ')'), + group_name=m.group(1), + group_prev_status=m.group(2), + group_status=m.group(3) + )) + + if not expecting: + LOG.error( + 'Found set/clear event while it should NOT\nevent:' + '{}'.format(event)) + return -1, tuple(matched_events) + + matched_events = list(reversed(matched_events)) + if len(matched_events) > 1: + if matched_events[-1]['event'][1] == 'clear' and \ + matched_events[-2]['event'][1] == 'set': + LOG.info('OK, found matched events:{}'.format( + matched_events)) + return 0, tuple(matched_events) + + except IndexError: + LOG.error( + 'CLI fm event-list changed its output format?\nsearching ' + 'keys={}'.format( + search_keys)) + raise + + LOG.warn( + 'No matched event found at try:{}, will sleep {} seconds and retry' + '\nmatched events:\n{}, host={}'.format(retry, interval, + matched_events, host)) + + time.sleep(interval) + continue + + LOG.info('No matched events:\n{}'.format(matched_events)) + + return -1, tuple() + + +def _check_status_after_killing_process(service, host, target_status, + expecting=True, process_type='sm', + last_events=None, con_ssh=None, + auth_info=Tenant.get('admin_platform')): + LOG.info( + 'check for process:{} on host:{} expecting status:{}, process_type:' + '{}'.format(service, host, target_status, process_type)) + + try: + operational, availability = target_status.split('-') + except ValueError as e: + LOG.error('unknown host status:{}, error:{}'.format(target_status, e)) + raise + + expected = {'operational': operational, 'availability': availability} + + if availability == 'warning': + LOG.info('impact:{} meaning: operational={}, availabiltiy={}'.format( + target_status, operational, availability)) + code, _ = wait_for_sm_process_events( + service, + host, + expected, + expecting=expecting, + last_events=last_events, + process_type=process_type, + con_ssh=con_ssh) + + return (0 == code) == expecting + + total_wait = 120 if expecting else 30 + time.sleep(1) + + found = system_helper.wait_for_host_values(host, timeout=total_wait / 2, + con_ssh=con_ssh, fail_ok=True, + auth_info=auth_info, **expected) + + if expecting and found: + LOG.debug('OK, process:{} in status:{} as expected.'.format( + service, target_status)) + + LOG.debug('Next, wait and verify the sytstem recovers') + expected = {'operational': 'enabled', 'availability': 'available'} + return system_helper.wait_for_host_values( + host, timeout=total_wait / 2, con_ssh=con_ssh, auth_info=auth_info, + fail_ok=True, **expected) + # return True + + elif not expecting and found: + LOG.error('Unexpected status for process:{}, expected status:{}'.format( + service, expected)) + return False + + elif not expecting and not found: + LOG.info( + 'OK, IMPACT did not happen which is correct. ' + 'target_status={}'.format(target_status)) + return True + + elif expecting and not found: + LOG.warn( + 'host is not in expected status:{} for service:{}'.format(expected, + service)) + + code = wait_for_sm_process_events( + service, host, expected, expecting=expecting, + last_events=last_events, + process_type=process_type, con_ssh=con_ssh)[0] + + return 0 == code + + else: + # should never reach here + pass + + +def check_impact(impact, service_name, host='', last_events=None, + expecting_impact=False, process_type='sm', con_ssh=None, + timeout=80, **kwargs): + """ + Check if the expected IMPACT happens (or NOT) on the specified host + + Args: + impact (str): system behavior to check, including: + swact --- the active controller swacting + enabled-degraded --- the host changed to + 'enalbed-degraded' status + disabled-failed --- the host changed to + 'disabled-failed' status + ... + service_name (str): name of the service/process + host (str): the host to check + last_events (dict) the last events before action + expecting_impact (bool): if the IMPACT should happen timeout + process_type (str): type of the process: sm, pm, other + con_ssh: ssh connection/client to the active controller + timeout + **kwargs: + + Returns: + boolean - whether the IMPACT happens as expected + + """ + LOG.info( + 'Checking impact:{} on host:{} after killing process:{}, ' + 'process_type={}'.format( + impact, host, service_name, process_type)) + + prev_active = kwargs.get('active_controller', 'controller-0') + prev_standby = kwargs.get('standby_controller', 'controller-1') + severity = kwargs.get('severity', 'major') + + if impact == 'swact': + if expecting_impact: + return is_controller_swacted(prev_active, prev_standby, + con_ssh=con_ssh, + swact_start_timeout=max(timeout / 2, + 20), + swact_complete_timeout=timeout) + else: + return not is_controller_swacted(prev_active, prev_standby, + con_ssh=con_ssh, + swact_start_timeout=timeout / 4) + + elif impact in ('enabled-degraded', 'enabled-warning'): + return _check_status_after_killing_process( + service_name, host, target_status=impact, + expecting=expecting_impact, + process_type=process_type, last_events=last_events, con_ssh=con_ssh) + + elif impact == 'disabled-failed': + + if host == prev_active: + LOG.info( + 'Killing PMON process:{} on active host:{} will trigger ' + 'swact. impact:{}, ' + 'severity:{}'.format(service_name, host, impact, severity)) + swacted = is_controller_swacted(prev_active, prev_standby, + con_ssh=con_ssh, + swact_start_timeout=20, + swact_complete_timeout=timeout) + assert swacted, 'Active-controller must be swacted before been ' \ + 'taken into disabled-failed status' + + operational, available = impact.split('-') + expected = {'operational': operational, 'available': available} + + reached = system_helper.wait_for_host_values(host, timeout=timeout, + con_ssh=con_ssh, + fail_ok=True, **expected) + if reached and expecting_impact: + LOG.info( + 'host {} reached status {} as expected after killing process ' + '{}'.format( + host, expected, service_name)) + return True + + elif not reached and not expecting_impact: + LOG.info( + 'host {} DID NOT reach status {} (as expected) after killing ' + 'process {}'.format( + host, expected, service_name)) + return True + + else: + LOG.error( + 'Host:{} did not get into status:{} in {} seconds, seaching ' + 'for related events'.format( + host, expected, timeout)) + + # todo: it's better to do this in parallel with process-monitoring + expected = {'operational': 'enabled', + 'available': ['available', 'degraded']} + reached = system_helper.wait_for_host_values(host, timeout=timeout, + con_ssh=con_ssh, + fail_ok=True, + **expected) + + if reached: + LOG.info( + 'Host:{} did not recover into status:{} in {} ' + 'seconds'.format( + host, expected, timeout)) + return True + + LOG.error( + 'Host:{} did not get into status:{} in {} seconds, and there ' + 'is no related events'.format( + host, expected, timeout)) + + return False + else: + LOG.warn( + 'impact-checker for impact:{} not implemented yet, ' + 'kwargs:{}'.format(impact, kwargs)) + return False + + +def get_pmon_process_id(pid_file, host, con_ssh=None): + cmd = 'cat {} 2>/dev/null | head -n1 && echo 2>/dev/null'.format(pid_file) + + with host_helper.ssh_to_host(host, con_ssh=con_ssh) as con: + code, output = con.exec_cmd(cmd) + + if output.strip(): + return int(output.strip()) + + return -1 + + +def get_process_info(name, cmd='', pid_file='', host='', process_type='sm', + con_ssh=None, + auth_info=Tenant.get('admin_platform')): + """ + Get the information of the process with the specified name + + Args: + name (str): name of the process + cmd (str): path of the executable + pid_file (str): path of the file containing the process id + host (str): host on which the process resides + process_type (str): type of service/process, must be one of 'sm', + 'pm', 'other' + con_ssh: ssh connection/client to the active controller + auth_info + + Returns: + + """ + LOG.info('name:{} cmd={} pid_file={} host={} process_type={}'.format( + name, cmd, pid_file, host, process_type)) + + active_controller = system_helper.get_active_controller_name( + con_ssh=con_ssh, auth_info=auth_info) + if not host: + host = active_controller + + if process_type == 'sm': + LOG.debug( + 'to get_process_info for SM process:{} on host:{}'.format(name, + host)) + + if host != active_controller: + LOG.warn( + 'Already swacted? host:{} is not the active controller now. ' + 'Active controller is {}'.format( + host, active_controller)) + pid, name, impact, status, pid_file = get_process_from_sm( + name, con_ssh=con_ssh, pid_file=pid_file) + if status != 'enabled-active': + LOG.warn('SM process is in status:{}, not "enabled-active"'.format( + status)) + if 'disabl' in status: + LOG.warn( + 'Wrong controller? Or controller already swacted, ' + 'wait and try on the other controller') + time.sleep(10) + return get_process_from_sm(name, pid_file=pid_file) + + return -1, name, impact, status, pid_file + else: + return pid, name, impact, status, pid_file + + elif process_type == 'pmon': + pid = get_pmon_process_id(pid_file, host, con_ssh=con_ssh) + LOG.info('Found: PID={} for PMON process:{}'.format(pid, name)) + return pid, name + + else: + LOG.info('Try to find the process:{} using "ps"'.format(name)) + + pid = get_ancestor_process(name, host, cmd=cmd, con_ssh=con_ssh)[0] + if -1 == pid: + return -1, '' + + return pid, name + + +def is_process_running(pid, host, con_ssh=None, retries=3, interval=3): + """ + Check if the process with the PID is existing + + Args: + pid (int): process id + host (str): host the process resides + con_ssh: ssh connection/client to the host + retries (int): times to re-try if no process found before return + failure + interval (int): time to wait before next re-try + + Returns: + boolean - true if the process existing, false otherwise + msg (str) - the details of the process or error messages + """ + cmd = 'ps -p {}'.format(pid) + for _ in range(retries): + with host_helper.ssh_to_host(host, con_ssh=con_ssh) as host_ssh: + code, output = host_ssh.exec_cmd(cmd, fail_ok=True) + if 0 != code: + LOG.warn( + 'Process:{} DOES NOT exist, error:{}'.format(pid, output)) + else: + return True, output + time.sleep(interval) + + return False, '' + + +def _get_last_events_timestamps(limit=1, event_log_id=None, con_ssh=None, + auth_info=Tenant.get('admin_platform')): + latest_events = system_helper.get_events_table(limit=limit, + event_log_id=event_log_id, + show_uuid=True, + con_ssh=con_ssh, + auth_info=auth_info) + + return latest_events + + +def kill_sm_process_and_verify_impact(name, cmd='', pid_file='', retries=2, + impact='swact', host='controller-0', + interval=20, action_timeout=90, + total_retries=3, process_type='sm', + on_active_controller=True, con_ssh=None, + auth_info=Tenant.get('admin_platform')): + """ + Kill the process with the specified name and verify the system behaviors + as expected + + Args: + name (str): name of the process + cmd (str): executable of the process + pid_file (str): file containing process id + retries (int): times of killing actions upon which the + IMPACT will be triggered + impact (str): system behavior including: + swact -- active controller is swacted + enabled-degraded -- the status of the + service will change to + disabled-failed -- the status of the + service will change to + ... + host (str): host to test on + interval (int): least time to wait between kills + action_timeout (int): kills and impact should happen within this + time frame + total_retries (int): total number of retries for whole kill and + wait actions + process_type (str): valid types are: sm, pmon, other + on_active_controller (boolean): + con_ssh: ssh connection/client to the active controller + auth_info + + Returns: (pid, host) + pid: + >0 suceess, the final PID of the process + -1 fail because of impact NOT happening after killing the + process up to threshold times + -2 fail because of impact happening before killing threshold times + -3 fail after try total_retries times + host: + the host tested on + """ + active_controller, standby_controller = \ + system_helper.get_active_standby_controllers(con_ssh=con_ssh, + auth_info=auth_info) + + if on_active_controller: + LOG.info( + 'on active controller: {}, host:{}'.format(active_controller, host)) + + host = active_controller + con_ssh = con_ssh or ControllerClient.get_active_controller() + + LOG.info('on host: {}'.format(host)) + + if total_retries < 1 or retries < 1: + LOG.error( + 'retries/total-retries < 1? retires:{}, total retries:{}'.format( + retries, total_retries)) + return None + count = 0 + for i in range(1, total_retries + 1): + LOG.info( + 'retry:{:02d} kill the process:{} and verify impact:{}'.format( + i, name, impact)) + + exec_times = [] + killed_pids = [] + + timeout = time.time() + action_timeout * ( + retries / 2 if retries > 2 else 1) + + while time.time() < timeout: + count += 1 + + LOG.debug( + 'retry{:02d}-{:02d}: Failed to get process id for {} on ' + 'host:{}, swacted unexpectedly?'.format( + i, count, name, host)) + + try: + pid, proc_name = get_process_info(name, cmd=cmd, host=host, + process_type=process_type, + pid_file=pid_file, + con_ssh=con_ssh)[0:2] + + except pexpect.exceptions.EOF: + LOG.warn( + 'retry{:02d}-{:02d}: Failed to get process id for {} on ' + 'host:{}, swacted unexpectedly?'.format( + i, count, name, host)) + time.sleep(interval / 3.0) + continue + + if -1 == pid: + LOG.error( + 'retry{:02d}-{:02d}: Failed to get PID for process with ' + 'name:{}, cmd:{}, ' + 'wait and retries'.format(i, count, name, cmd)) + time.sleep(interval / 3.0) + continue + + if killed_pids and pid in killed_pids: + LOG.warn( + 'retry{:02d}-{:02d}: No new process re-created, ' + 'prev-pid={}, cur-pid={}'.format( + i, count, killed_pids[-1], pid)) + time.sleep(interval / 3.0) + continue + + last_killed_pid = killed_pids[-1] if killed_pids else None + killed_pids.append(pid) + last_kill_time = exec_times[-1] if exec_times else None + exec_times.append(datetime.datetime.utcnow()) + + latest_events = _get_last_events_timestamps( + event_log_id=KILL_PROC_EVENT_FORMAT[process_type]['event_id'], + limit=10) + + LOG.info( + 'retry{:02d}-{:02d}: before kill CLI, proc_name={}, pid={}, ' + 'last_killed_pid={}, last_kill_time={}'.format( + i, count, proc_name, pid, last_killed_pid, last_kill_time)) + + LOG.info('\tactive-controller={}, standby-controller={}'.format( + active_controller, standby_controller)) + + kill_cmd = '{} {}'.format(KILL_CMD, pid) + + with host_helper.ssh_to_host(host, con_ssh=con_ssh) as con: + code, output = con.exec_sudo_cmd(kill_cmd, fail_ok=True) + if 0 != code: + # it happens occasionaly + LOG.error('Failed to kill pid:{}, cmd={}, output=<{}>, ' + 'at run:{}, already terminated?'.format( + pid, kill_cmd, output, count)) + + if count < retries: + # IMPACT should not happen yet + if not check_impact(impact, proc_name, + last_events=latest_events, + active_controller=active_controller, + standby_controller=standby_controller, + expecting_impact=False, + process_type=process_type, host=host, + con_ssh=con_ssh): + LOG.error( + 'Impact:{} observed unexpectedly, it should happen ' + 'only after killing {} times, ' + 'actual killed times:{}'.format(impact, retries, count)) + return -2, host + + LOG.info( + 'retry{:02d}-{:02d}: OK, NO impact as expected, impact={}, ' + 'will kill it another time'.format(i, count, impact)) + + time.sleep(max(interval * 1 / 2.0, 5)) + + else: + no_standby_controller = standby_controller is None + expecting_impact = True if not no_standby_controller else False + if not check_impact( + impact, proc_name, last_events=latest_events, + active_controller=active_controller, + standby_controller=standby_controller, + expecting_impact=expecting_impact, + process_type=process_type, host=host, con_ssh=con_ssh): + LOG.error( + 'No impact after killing process {} {} times, while ' + '{}'.format(proc_name, count, + ('expecting impact' if expecting_impact + else 'not expecting impact'))) + + return -1, host + + LOG.info( + 'OK, final retry{:02d}-{:02d}: OK, IMPACT happened ' + '(if applicable) as expected, ' + 'impact={}'.format(i, count, impact)) + + active_controller, standby_controller = \ + system_helper.get_active_standby_controllers( + con_ssh=con_ssh) + + LOG.info( + 'OK, after impact:{} (tried:{} times), ' + 'now active-controller={}, standby-controller={}'.format( + impact, count, active_controller, standby_controller)) + + pid, proc_name = get_process_info(name, cmd=cmd, host=host, + pid_file=pid_file, + process_type=process_type, + con_ssh=con_ssh)[0:2] + + return pid, active_controller + + return -3, host + + +def wait_for_sm_dump_services_active(timeout=60, fail_ok=False, con_ssh=None, + auth_info=Tenant.get('admin_platform')): + """ + Wait for all services + Args: + timeout: + fail_ok: + con_ssh: + auth_info + + Returns: + + """ + active_controller = system_helper.get_active_controller_name( + con_ssh=con_ssh, auth_info=auth_info) + return host_helper.wait_for_sm_dump_desired_states( + controller=active_controller, timeout=timeout, fail_ok=fail_ok) diff --git a/automated-pytest-suite/keywords/security_helper.py b/automated-pytest-suite/keywords/security_helper.py new file mode 100644 index 0000000..76356f5 --- /dev/null +++ b/automated-pytest-suite/keywords/security_helper.py @@ -0,0 +1,1113 @@ +# +# Copyright (c) 2019 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + + +import random +import re +import os +import time +from pexpect import EOF +from string import ascii_lowercase, ascii_uppercase, digits + +from consts.auth import Tenant, HostLinuxUser, CliAuth +from consts.stx import Prompt, EventLogID +from consts.proj_vars import ProjVar +from utils.tis_log import LOG +from utils import exceptions +from utils.clients.ssh import ControllerClient, SSHClient, SSHFromSSH +from keywords import system_helper, keystone_helper, common + +MIN_LINUX_PASSWORD_LEN = 7 +SPECIAL_CHARACTERS = r'!@#$%^&*()<>{}+=_\\\[\]\-?|~`,.;:' + +# use this simple "dictionary" for now, because no english dictionary +# installed on test server +SIMPLE_WORD_DICTIONARY = ''' +and is being proof-read and supplemented by volunteers from around the +world. This is an unfunded project, and future enhancement of this +dictionary will depend on the efforts of volunteers willing to help build +this free resource into a comprehensive body of general information. New +definitions for missing words or words senses and longer explanatory notes, +as well as images to accompany the articles are needed. More modern +illustrative quotations giving recent examples of usage of the words in +their various senses will be very helpful, since most quotations in the +original 1913 dictionary are now well over 100 years old +''' + + +class LinuxUser: + users = {HostLinuxUser.get_user(): HostLinuxUser.get_password()} + con_ssh = None + + def __init__(self, user, password, con_ssh=None): + self.user = user + self.password = password + self.added = False + self.con_ssh = con_ssh if con_ssh is not None else \ + ControllerClient.get_active_controller() + + def add_user(self): + self.added = True + LinuxUser.users[self.user] = self.password + raise NotImplementedError + + def modify_password(self): + raise NotImplementedError + + def delete_user(self): + raise NotImplementedError + + def login(self): + raise NotImplementedError + + @classmethod + def get_user_password(cls): + raise NotImplementedError + + @classmethod + def get_current_user_password(cls, con_ssh=None): + if con_ssh: + cls.con_ssh = con_ssh + elif not cls.con_ssh: + cls.con_ssh = ControllerClient.get_active_controller() + user = cls.con_ssh.get_current_user() + return user, cls.users[user] + + +class Singleton(type): + """ + A singleton used to make sure only one instance of a class is allowed to + create + """ + + __instances = {} + + def __call__(cls, *args, **kwargs): + if cls not in cls.__instances: + cls.__instances[cls] = super(Singleton, cls).__call__(*args, + **kwargs) + return cls.__instances[cls] + + +def get_ldap_user_manager(): + """ + Get the only instance of the LDAP User Manager + + Returns (LdapUserManager): + the only instance of the LDAP User Manager + """ + return LdapUserManager() + + +class LdapUserManager(object, metaclass=Singleton): + """ + The LDAP User Manager + + """ + + LINUX_ROOT_PASSWORD = HostLinuxUser.get_password() + KEYSTONE_USER_NAME = Tenant.get('admin')['user'] + KEYSTONE_USER_DOMAIN_NAME = 'Default' + KEYSTONE_PASSWORD = Tenant.get('admin')['password'] + PROJECT_NAME = 'admin' + PROJECT_DOMAIN_NAME = 'Default' + + def __init__(self, ssh_con=None): + if ssh_con is not None: + self.ssh_con = ssh_con + else: + self.ssh_con = ControllerClient.get_active_controller() + + self.users_info = {} + + def ssh_to_host(self, host=None): + """ + Get the ssh connection to the active controller or the specified host + (if it's the case) + + Args: + host (str): the host to ssh to, using the active controller + if it's unset or None + + Returns (object): + the ssh connection session to the active controller + + """ + if host is None: + return self.ssh_con + else: + return SSHClient(host=host) + + def get_ldap_admin_password(self): + """ + Get the LDAP Administrator's password + + Args: + + Returns (str): + The password of the LDAP Administrator + + """ + cmd = 'grep "credentials" /etc/openldap/slapd.conf.backup' + self.ssh_con.flush() + code, output = self.ssh_con.exec_sudo_cmd(cmd) + + if 0 == code and output.strip(): + for line in output.strip().splitlines(): + if 'credentials' in line and '=' in line: + password = line.split('=')[1] + return password + + return '' + + def get_ldap_user_password(self, user_name): + """ + Get the password of the LDAP User + + Args: + user_name (str): + the user name + + Returns (str): + the password of the user + """ + if user_name in self.users_info and \ + self.users_info[user_name]['passwords']: + return self.users_info[user_name]['passwords'][-1] + + return None + + def login_as_ldap_user_first_time(self, user_name, new_password=None, + host=None): + """ + Login with the specified LDAP User for the first time, + during which change the initial password as a required step. + + Args: + user_name (str): user name of the LDAP user + new_password (str): password of the LDAP user + host (str): host name to which the user will login + + Returns (tuple): + results (bool): True if success, otherwise False + password (str): new password of the LDAP user + + """ + + hostname_ip = 'controller-1' if host is None else host + + if new_password is not None: + password = new_password + else: + password = 'new_{}_Li69nux!'.format( + ''.join(random.sample(user_name, len(user_name)))) + + cmd_expected = [ + ( + 'ssh -l {} -o UserKnownHostsFile=/dev/null {}'.format( + user_name, hostname_ip), + (r'Are you sure you want to continue connecting (yes/no)?',), + ('Failed to get "continue connecting" prompt',) + ), + ( + 'yes', + # ("{}@{}'s password:".format(user_name, hostname_ip),), + (r".*@.*'s password: ".format(hostname_ip),), + ('Failed to get password prompt',) + ), + ( + '{}'.format(user_name), + (r'\(current\) LDAP Password: ',), + ('Failed to get password prompt for current password',) + ), + ( + '{}'.format(user_name), + ('New password: ',), + ('Failed to get password prompt for new password',) + ), + ( + '{}'.format(password), + ('Retype new password: ',), + ('Failed to get confirmation password prompt for new password',) + ), + ( + '{}'.format(password), + ( + 'passwd: all authentication tokens updated successfully.', + 'Connection to controller-1 closed.', + ), + ('Failed to change to new password for current user:{}'.format( + user_name),) + ), + ( + '', + (self.ssh_con.get_prompt(),), + ( + 'Failed in last step of first-time login as LDAP ' + 'User:{}'.format(user_name),) + ), + ] + + result = True + self.ssh_con.flush() + for cmd, expected, errors in cmd_expected: + self.ssh_con.send(cmd) + index = self.ssh_con.expect(blob_list=list(expected + errors)) + if len(expected) <= index: + result = False + break + + self.ssh_con.flush() + + return result, password + + def find_ldap_user(self, user_name): + """ + Find the LDAP User with the specified name + + Args: + user_name (str): - user name of the LDAP User to + search for + + Returns: + existing_flag (boolean) - True, the LDAP User with the + specified name existing + - False, cannot find a LDAP User with + the specified name + + user_info (dict): - user information + """ + + cmd = 'ldapfinger -u {}'.format(user_name) + self.ssh_con.flush() + code, output = self.ssh_con.exec_sudo_cmd(cmd, fail_ok=True, + strict_passwd_prompt=True) + + found = False + user_info = {} + if output.strip(): + for line in output.strip().splitlines(): + if line.startswith('dn: '): + user_info['dn'] = line.split()[1].strip() + elif line.startswith('cn: '): + user_info['cn'] = line.split()[1].strip() + elif line.startswith('uid: '): + user_info['uid'] = line.split()[1].strip() + elif line.startswith('uidNumber: '): + user_info['uid_number'] = int(line.split()[1].strip()) + elif line.startswith('gidNumber: '): + user_info['gid_number'] = int(line.split()[1].strip()) + elif line.startswith('homeDirectory: '): + user_info['home_directory'] = line.split()[1].strip() + elif line.startswith('userPassword:: '): + user_info['user_password'] = line.split()[1].strip() + elif line.startswith('loginShell: '): + user_info['login_shell'] = line.split()[1].strip() + elif line.startswith('shadowMax: '): + user_info['shadow_max'] = int(line.split()[1].strip()) + elif line.startswith('shadowWarning: '): + user_info['shadow_warning'] = int(line.split()[1].strip()) + else: + pass + else: + found = True + + return found, user_info + + def rm_ldap_user(self, user_name): + """ + Delete the LDAP User with the specified name + + Args: + user_name: + + Returns (tuple): + code - 0 successfully deleted the specified LDAP User + otherwise: failed + output - message from the deleting CLI + """ + + cmd = 'ldapdeleteuser {}'.format(user_name) + + self.ssh_con.flush() + code, output = self.ssh_con.exec_sudo_cmd(cmd, fail_ok=True) + + if 0 == code and user_name in self.users_info: + del self.users_info[user_name] + + return code, output + + @staticmethod + def validate_user_settings(secondary_group=False, + secondary_group_name=None, + password_expiry_days=90, + password_expiry_warn_days=2 + ): + """ + Validate the settings to be used as attributes of a LDAP User + + Args: + secondary_group (bool): + True - Secondary group to add user to + False - No secondary group + secondary_group_name (str): Name of secondary group (will be + ignored if secondary_group is False + password_expiry_days (int): + password_expiry_warn_days (int): + + Returns: + + """ + + try: + opt_expiry_days = int(password_expiry_days) + opt_expiry_warn_days = int(password_expiry_warn_days) + bool(secondary_group) + str(secondary_group_name) + except ValueError: + return 1, 'invalid input: {}, {}'.format(password_expiry_days, + password_expiry_warn_days) + + if opt_expiry_days <= 0: + return 4, 'invalid password expiry days:{}'.format(opt_expiry_days) + + if opt_expiry_warn_days <= 0: + return 5, 'invalid password expiry days:{}'.format( + opt_expiry_warn_days) + + return 0, '' + + def create_ldap_user(self, + user_name, + sudoer=False, + secondary_group=False, + secondary_group_name=None, + password_expiry_days=90, + password_expiry_warn_days=2, + delete_if_existing=True, + check_if_existing=True): + """ + + Args: + user_name (str): user name of the LDAP User + sudoer (boo) + True - Add the user to sudoer list + False - Do not add the user to sudoer list + secondary_group (bool): + True - Secondary group to add user to + False - No secondary group + secondary_group_name (str): Name of secondary group (will be + ignored if secondary_group is False + password_expiry_days (int): + password_expiry_warn_days (int): + delete_if_existing (bool): + True - Delete the user if it is already existing + False - Return the existing LDAP User + check_if_existing (bool): + True - Check if the LDAP User existing with the + specified name + False - Do not check if any LDAP Users with the specified + name existing + + Returns tuple(code, user_infor): + code (int): + -1 -- a LDAP User already existing with the same name ( + don't care other attributes for now) + 0 -- successfully created a LDAP User withe specified name + and attributes + 1 -- a LDAP User already existing but fail_on_existing + specified + 2 -- CLI to create a user succeeded but cannot find the user + after + 3 -- failed to create a LDAP User (the CLI failed) + 4 -- failed to change the initial password and login the + first time + 5 -- invalid inputs + """ + password_expiry_days = 90 if password_expiry_days is None else \ + password_expiry_days + password_expiry_warn_days = 2 if password_expiry_warn_days is None \ + else password_expiry_warn_days + secondary_group = False if secondary_group is None else secondary_group + secondary_group_name = '' if secondary_group_name is None else \ + secondary_group_name + + code, message = self.validate_user_settings( + secondary_group=secondary_group, + secondary_group_name=secondary_group_name, + password_expiry_days=password_expiry_days, + password_expiry_warn_days=password_expiry_warn_days) + if 0 != code: + return 5, {} + + if check_if_existing: + existing, user_info = self.find_ldap_user(user_name) + if existing: + if delete_if_existing: + code, message = self.rm_ldap_user(user_name) + if 0 != code: + return 1, user_info + else: + return -1, user_info + cmds_expectings = [ + ( + 'sudo ldapusersetup', + (r'Enter username to add to LDAP:',), + () + ), + ( + '{}'.format(user_name), + (r'Add {} to sudoer list? (yes/NO): '.format(user_name),), + ('Critical setup error: cannot add user.*',), + ), + ( + 'yes' if sudoer else 'NO', + (r'Add .* to secondary user group\? \(yes/NO\):',), + () + ), + ] + + if secondary_group: + cmds_expectings += [ + ( + 'yes', + (r'Secondary group to add user to? [wrs_protected]: ',), + () + ), + ( + '{}'.format(secondary_group_name), + ( + r'Enter days after which user password must be changed ' + r'\[{}\]:'.format(password_expiry_days),), + () + ) + + ] + else: + cmds_expectings += [ + ( + 'NO', + ( + r'Enter days after which user password must be changed ' + r'\[{}\]:'.format(password_expiry_days),), + (), + ), + ] + + cmds_expectings += [ + ( + '{}'.format(password_expiry_days), + ( + r'Enter days before password is to expire that user is ' + r'warned \[{}\]:'.format(password_expiry_warn_days),), + (), + ), + ( + '{}'.format(password_expiry_warn_days), + ( + 'Successfully modified user entry uid=m-user01,ou=People,' + 'dc=cgcs,dc=local in LDAP', + 'Updating password expiry to {} days'.format( + password_expiry_warn_days), + ), + (), + ) + ] + + created = True + self.ssh_con.flush() + for cmd, outputs, errors in cmds_expectings: + self.ssh_con.send(cmd) + expected_outputs = list(outputs + errors) + + index = self.ssh_con.expect(blob_list=expected_outputs, + fail_ok=True) + if len(outputs) <= index: + created = False + break + expected_outputs[:] = [] + + time.sleep(3) + + user_info = {} + if created: + existing, user_info = self.find_ldap_user(user_name) + if existing: + success, password = self.login_as_ldap_user_first_time( + user_name) + if not success: + code = 4 + else: + user_info['passwords'] = [password] + self.users_info[user_name] = user_info + code = 0 + else: + code = 2 + else: + code = 3 + + return code, user_info + + def login_as_ldap_user(self, user_name, password, host=None, + pre_store=False, disconnect_after=False): + """ + Login as the specified user name and password onto the specified host + + Args: + user_name (str): user name + password (str): password + host (str): host to login to + pre_store (bool): + True - pre-store keystone user credentials for + session + False - chose 'N' (by default) meaning do not + pre-store keystone user credentials + disconnect_after (bool): + True - disconnect the logged in session + False - keep the logged in session + + Returns (tuple): + logged_in (bool) - True if successfully logged into the + specified host + using the specified user/password + password (str) - the password used to login + ssh_con (object) - the ssh session logged in + """ + if not host: + host = 'controller-1' + if system_helper.is_aio_simplex(): + host = 'controller-0' + + prompt_keystone_user_name = r'Enter Keystone username \[{}\]: '.format( + user_name) + cmd_expected = ( + ( + 'ssh -l {} -o UserKnownHostsFile=/dev/null {}'.format(user_name, + host), + (r'Are you sure you want to continue connecting \(yes/no\)\?',), + ( + 'ssh: Could not resolve hostname {}: Name or service not ' + 'known'.format(host),), + ), + ( + 'yes', + (r'{}@{}\'s password: '.format(user_name, host),), + (), + ), + ( + '{}'.format(password), + (prompt_keystone_user_name, Prompt.CONTROLLER_PROMPT,), + (r'Permission denied, please try again\.',), + ), + ) + + logged_in = False + self.ssh_con.flush() + for i in range(len(cmd_expected)): + cmd, expected, errors = cmd_expected[i] + LOG.info('cmd={}\nexpected={}\nerrors={}\n'.format(cmd, expected, + errors)) + self.ssh_con.send(cmd) + + index = self.ssh_con.expect(blob_list=list(expected + errors)) + if len(expected) <= index: + break + elif 3 == i: + if expected[index] == prompt_keystone_user_name: + assert pre_store, \ + 'pre_store is False, while selecting "y" to ' \ + '"Pre-store Keystone user credentials ' \ + 'for this session!"' + else: + logged_in = True + break + else: + logged_in = True + + if logged_in: + if disconnect_after: + self.ssh_con.send('exit') + + return logged_in, password, self.ssh_con + + def change_ldap_user_password(self, user_name, password, new_password, + change_own_password=True, + check_if_existing=True, host=None, + disconnect_after=False): + """ + Modify the password of the specified user to the new one + + Args: + user_name (str): + - name of the LDAP User + + password (str): + - password of the LDAP User + + new_password (str): + - new password to change to + change_own_password (bool): + + check_if_existing (bool): + - True: check if the user already existing first + False: change the password without checking the + existence of the user + + host (str): + - The host to log into + + disconnect_after (bool) + - True: disconnect the ssh connection after changing the + password + - False: keep the ssh connection + + Returns (bool): + True if successful, False otherwise + """ + + if check_if_existing: + found, user_info = self.find_ldap_user(user_name) + if not found: + return False + + if not change_own_password: + return False + + logged_in, password, ssh_con = \ + self.login_as_ldap_user(user_name, + password=password, + host=host, + disconnect_after=False) + + if not logged_in or not password or not ssh_con: + return False, ssh_con + + cmds_expected = ( + ( + 'passwd', + (r'\(current\) LDAP Password: ',), + (), + ), + ( + password, + ('New password: ',), + ('passwd: Authentication token manipulation error', EOF,), + ), + ( + new_password, + ('Retype new password: ',), + ( + 'BAD PASSWORD: The password is too similar to the old one', + 'BAD PASSWORD: No password supplied', + 'passwd: Have exhausted maximum number of retries for ' + 'service', + EOF, + ), + ), + ( + new_password, + ('passwd: all authentication tokens updated successfully.',), + (), + ), + ) + + changed = True + ssh_con.flush() + for cmd, expected, errors in cmds_expected: + ssh_con.send(cmd) + index = ssh_con.expect(blob_list=list(expected + errors)) + if len(expected) <= index: + changed = False + break + + if disconnect_after: + ssh_con.send('exit') + + return changed, ssh_con + + +def get_admin_password_in_keyring(con_ssh=None): + """ + Get admin password via 'keyring get CGCS admin' + Args: + con_ssh (SSHClient): active controller client + + Returns (str): admin password returned + + """ + if con_ssh is None: + con_ssh = ControllerClient.get_active_controller() + + admin_pswd = con_ssh.exec_cmd('keyring get CGCS admin', fail_ok=False)[1] + return admin_pswd + + +def change_linux_user_password(password, new_password, user=None, + host=None): + if not user: + user = HostLinuxUser.get_user() + + LOG.info( + 'Attempt to change password, from password:{}, to new-password:{}, ' + 'on host:{}'.format( + password, new_password, host)) + + input_outputs = ( + ( + 'passwd', + (r'\(current\) UNIX password: ',), + (), + ), + ( + password, + ('New password: ',), + (': Authentication token manipulation error', EOF,), + ), + ( + new_password, + ('Retype new password:',), + ( + 'BAD PASSWORD: The password is too similar to the old one', + 'BAD PASSWORD: No password supplied', + 'passwd: Have exhausted maximum number of retries for service', + EOF, + ), + ), + ( + new_password, + (': all authentication tokens updated successfully.', + Prompt.CONTROLLER_PROMPT,), + (), + ), + ) + conn_to_ac = ControllerClient.get_active_controller() + initial_prompt = r'.*{}\:~\$ '.format(host) + LOG.info('Will login as user:"{}", password:"{}", to host:"{}"'.format( + user, password, host)) + + conn = SSHFromSSH(conn_to_ac, host, user, password, force_password=True, + initial_prompt=initial_prompt) + passed = True + try: + conn.connect(retry=False, use_password=True) + for cmd, expected, errors in input_outputs: + # conn.flush() + LOG.info("Send '{}'\n".format(cmd)) + conn.send(cmd) + blob_list = list(expected) + list(errors) + LOG.info("Expect: {}\n".format(blob_list)) + index = conn.expect(blob_list=blob_list) + LOG.info('returned index:{}\n'.format(index)) + if len(expected) <= index: + passed = False + break + + except Exception as e: + LOG.warn( + 'Caught exception when connecting to host:{} as user:{} with ' + 'pasword:{}\n{}\n'.format( + host, user, password, e)) + + raise + + finally: + if user != HostLinuxUser.get_user(): + conn.close() + + # flush the output to the cli so the next cli is correctly registered + conn.flush() + LOG.info( + 'Successfully changed password from:\n{}\nto:{} for user:{} on ' + 'host:{}'.format(password, new_password, user, host)) + + return passed, new_password + + +def gen_linux_password(exclude_list=None, length=32): + if exclude_list is None: + exclude_list = [] + + if not isinstance(exclude_list, list): + exclude_list = [exclude_list] + + if length < MIN_LINUX_PASSWORD_LEN: + LOG.warn( + 'Length requested is too small, must longer than {}, requesting ' + '{}'.format(MIN_LINUX_PASSWORD_LEN, length)) + return None + + total = length + left = 3 + + vocabulary = [ascii_lowercase, ascii_uppercase, digits, SPECIAL_CHARACTERS] + + password = '' + while not password: + raw_password = [] + for chars in vocabulary: + count = random.randint(1, total - left) + raw_password += random.sample(chars, min(count, len(chars))) + left -= 1 + total -= count + + password = ''.join( + random.sample(raw_password, min(length, len(raw_password)))) + + missing_length = length - len(password) + if missing_length > 0: + all_chars = ''.join(vocabulary) + password += ''.join( + random.choice(all_chars) for _ in range(missing_length)) + + if password in exclude_list: + password = '' + + LOG.debug('generated valid password:{}'.format(password)) + + return password + + +def gen_invalid_password(invalid_type='shorter', previous_passwords=None, + minimum_length=7): + if previous_passwords is None: + previous_passwords = [] + + valid_password = list(gen_linux_password(exclude_list=previous_passwords, + length=minimum_length * 4)) + + current_length = len(valid_password) + + if invalid_type == 'shorter': + invalid_len = random.randint(1, minimum_length - 1) + invalid_password = random.sample(valid_password, invalid_len) + + elif invalid_type == '1_lowercase': + invalid_password = ''.join( + c for c in valid_password if c not in ascii_lowercase) + missing_length = current_length - len(invalid_password) + invalid_password += ''.join( + random.choice(ascii_uppercase) for _ in range(missing_length)) + + elif invalid_type == '1_uppercase': + invalid_password = ''.join( + c for c in valid_password if c not in ascii_uppercase) + missing_length = current_length - len(invalid_password) + invalid_password += ''.join( + random.choice(ascii_lowercase) for _ in range(missing_length)) + + elif invalid_type == '1_digit': + invalid_password = ''.join(c for c in valid_password if c not in digits) + missing_length = current_length - len(invalid_password) + invalid_password += ''.join( + random.choice(ascii_lowercase) for _ in range(missing_length)) + + elif invalid_type == '1_special': + invalid_password = ''.join( + c for c in valid_password if c not in SPECIAL_CHARACTERS) + missing_length = current_length - len(invalid_password) + invalid_password += ''.join( + random.choice(ascii_lowercase) for _ in range(missing_length)) + + elif invalid_type == 'not_in_dictionary': + invalid_password = random.choice( + re.split(r'\W', SIMPLE_WORD_DICTIONARY)) + + elif invalid_type == 'diff_more_than_3': + if not previous_passwords or len(previous_passwords) < 1: + return None + + last_password = previous_passwords[-1] + len_last_password = len(last_password) + count_difference = random.randint(0, 2) + for index in random.sample(range(len_last_password), count_difference): + cur_char = last_password[index] + last_password[index] = random.choice( + c for c in last_password if c != cur_char) + invalid_password = ''.join(last_password) + + elif invalid_type == 'not_simple_reverse': + if not previous_passwords or len(previous_passwords) < 1: + return None + invalid_password = ''.join(reversed(previous_passwords[-1])) + + elif invalid_type == 'not_only_case_diff': + if not previous_passwords or len(previous_passwords) < 1: + return None + invalid_password = [] + for ch in valid_password: + if ch.islower(): + invalid_password.append(ch.upper()) + elif ch.isupper(): + invalid_password.append(ch.lower()) + else: + invalid_password.append(ch) + + invalid_password = ''.join(invalid_password) + + elif invalid_type == 'not_last_2': + if not previous_passwords or len(previous_passwords) < 1: + return None + invalid_password = random.choice(previous_passwords[-2:]) + + elif invalid_type == '5_failed_attempts': + invalid_password = '' + + else: + assert False, 'Unknown password rule:{}'.format(invalid_type) + + return ''.join(invalid_password) + + +def modify_https(enable_https=True, check_first=True, con_ssh=None, + auth_info=Tenant.get('admin_platform'), + fail_ok=False): + """ + Modify platform https via 'system modify https_enable=' + + Args: + enable_https (bool): True/False to enable https or not + check_first (bool): if user want to check if the lab is already in + the state that user try to enable + con_ssh (SSHClient): + auth_info (dict): + fail_ok (bool): + + Returns (tuple): + (-1, msg) + (0, msg) + (1, ) + + """ + if check_first: + is_https = keystone_helper.is_https_enabled(source_openrc=False, + auth_info=auth_info, + con_ssh=con_ssh) + if (is_https and enable_https) or (not is_https and not enable_https): + msg = "Https is already {}. Do nothing.".format( + 'enabled' if enable_https else 'disabled') + LOG.info(msg) + return -1, msg + + LOG.info("Modify system to {} https".format( + 'enable' if enable_https else 'disable')) + res, output = system_helper.modify_system(fail_ok=fail_ok, con_ssh=con_ssh, + auth_info=auth_info, + https_enabled='{}'.format( + str(enable_https).lower())) + if res == 1: + return 1, output + + LOG.info("Wait up to 60s for config out-of-date alarm with best effort.") + system_helper.wait_for_alarm(alarm_id=EventLogID.CONFIG_OUT_OF_DATE, + entity_id='controller-', strict=False, + con_ssh=con_ssh, timeout=60, fail_ok=True, + auth_info=auth_info) + + LOG.info("Wait up to 600s for config out-of-date alarm to clear.") + system_helper.wait_for_alarm_gone(EventLogID.CONFIG_OUT_OF_DATE, + con_ssh=con_ssh, timeout=600, + check_interval=20, fail_ok=False, + auth_info=auth_info) + + LOG.info("Wait up to 300s for public endpoints to be updated") + expt_status = 'enabled' if enable_https else 'disabled' + end_time = time.time() + 300 + while time.time() < end_time: + if keystone_helper.is_https_enabled(con_ssh=con_ssh, + source_openrc=False, + auth_info=auth_info) == \ + enable_https: + break + time.sleep(10) + else: + raise exceptions.KeystoneError( + "Https is not {} in 'openstack endpoint list'".format(expt_status)) + + msg = 'Https is {} successfully'.format(expt_status) + LOG.info(msg) + # TODO: install certificate for https. There will be a warning msg if + # self-signed certificate is used + + if not ProjVar.get_var('IS_DC') or \ + (auth_info and auth_info.get('region', None) in ( + 'RegionOne', 'SystemController')): + # If DC, use the central region https as system https, since that is + # the one used for external access + CliAuth.set_vars(HTTPS=enable_https) + + return 0, msg + + +def set_ldap_user_password(user_name, new_password, check_if_existing=True, + fail_ok=False): + """ + Set ldap user password use ldapsetpasswd + + Args: + user_name (str): + - name of the LDAP User + + new_password (str): + - new password to change to + + check_if_existing (bool): + - True: check if the user already existing first + False: change the password without checking the existence of + the user + + fail_ok (bool) + + Returns (bool): + True if successful, False otherwise + """ + + if check_if_existing: + found, user_info = LdapUserManager().find_ldap_user(user_name=user_name) + if not found: + return False + + ssh_client = ControllerClient.get_active_controller() + rc, output = ssh_client.exec_sudo_cmd( + 'ldapsetpasswd {} {}'.format(user_name, new_password), fail_ok=fail_ok) + if rc > 1: + return 1, output + + return rc, output + + +def fetch_cert_file(cert_file=None, scp_to_local=True, con_ssh=None): + """ + fetch cert file from build server. scp to TiS. + Args: + cert_file (str): valid values: ca-cert, server-with-key + scp_to_local (bool): Whether to scp cert file to localhost as well. + con_ssh (SSHClient): active controller ssh client + + Returns (str|None): + cert file path on localhost if scp_to_local=True, else cert file path + on TiS system. If no certificate found, return None. + + """ + if not cert_file: + cert_file = '{}/ca-cert.pem'.format(HostLinuxUser.get_home()) + + if not con_ssh: + con_ssh = ControllerClient.get_active_controller() + + if not con_ssh.file_exists(cert_file): + raise FileNotFoundError( + '{} not found on active controller'.format(cert_file)) + + if scp_to_local: + cert_name = os.path.basename(cert_file) + dest_path = os.path.join(ProjVar.get_var('TEMP_DIR'), cert_name) + common.scp_from_active_controller_to_localhost(source_path=cert_file, + dest_path=dest_path, + timeout=120) + cert_file = dest_path + LOG.info("Cert file copied to {} on localhost".format(dest_path)) + + return cert_file diff --git a/automated-pytest-suite/keywords/storage_helper.py b/automated-pytest-suite/keywords/storage_helper.py new file mode 100644 index 0000000..3e9bf92 --- /dev/null +++ b/automated-pytest-suite/keywords/storage_helper.py @@ -0,0 +1,1677 @@ +# +# Copyright (c) 2019 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + + +""" +This module provides helper functions for storage based testing + +Including: +- system commands for system/host storage configs +- CEPH related helper functions that are not using system commands + +""" + +import re +import time + +from consts.auth import Tenant +from consts.stx import EventLogID, BackendState, BackendTask, GuestImages, \ + PartitionStatus +from consts.timeout import HostTimeout, SysInvTimeout + +from keywords import system_helper, host_helper, keystone_helper, common + +from utils import table_parser, cli, exceptions +from utils.clients.ssh import ControllerClient, get_cli_client +from utils.tis_log import LOG + + +def is_ceph_healthy(con_ssh=None): + """ + Query 'ceph -s' and return True if ceph health is okay + and False otherwise. + + Args: + con_ssh (SSHClient): + + Returns: + - (bool) True if health okay, False otherwise + - (string) message + """ + + health_ok = 'HEALTH_OK' + if con_ssh is None: + con_ssh = ControllerClient.get_active_controller() + + rtn_code, out = con_ssh.exec_cmd('ceph -s') + if rtn_code > 0: + LOG.warning('ceph -s failed to execute.') + return False + + health_state = re.findall('health: (.*)\n', out) + if not health_state: + LOG.warning('Unable to determine ceph health state') + return False + + health_state = health_state[0] + if health_ok in health_state: + LOG.info('CEPH cluster is healthy') + return True + + msg = 'CEPH unhealthy. State: {}'.format(health_state) + LOG.warning(msg) + return False + + +def get_ceph_osd_count(fail_ok=False, con_ssh=None): + """ + Return the number of OSDs on a CEPH system" + Args: + fail_ok + con_ssh(SSHClient): + + Returns (int): Return the number of OSDs on the system, + """ + if not con_ssh: + con_ssh = ControllerClient.get_active_controller() + + rtn_code, out = con_ssh.exec_cmd('ceph -s', fail_ok=fail_ok) + if rtn_code > 0: + return 0 + + osds = re.search(r'(\d+) osds', out) + if osds: + LOG.info('There are {} OSDs on the system'.format(osds.group(1))) + return int(osds.group(1)) + + msg = 'There are no OSDs on the system' + LOG.info(msg) + if fail_ok: + return 0 + else: + raise exceptions.StorageError(msg) + + +def get_osd_host(osd_id, fail_ok=False, con_ssh=None): + """ + Return the host associated with the provided OSD ID + Args: + con_ssh(SSHClient): + fail_ok + osd_id (int): an OSD number, e.g. 0, 1, 2, 3... + + Returns (str|None): hostname is found else None + """ + storage_hosts = system_helper.get_storage_nodes(con_ssh=con_ssh) + for host in storage_hosts: + osd_list = get_host_stors(host, 'osdid') + if int(osd_id) in osd_list: + msg = 'OSD ID {} is on host {}'.format(osd_id, host) + LOG.info(msg) + return host + + msg = 'Could not find host for OSD ID {}'.format(osd_id) + LOG.warning(msg) + if not fail_ok: + raise exceptions.StorageError(msg) + + +def kill_process(host, pid): + """ + Given the id of an OSD, kill the process and ensure it restarts. + Args: + host (string) - the host to ssh into, e.g. 'controller-1' + pid (string) - pid to kill, e.g. '12345' + + Returns: + - (bool) True if process was killed, False otherwise + - (string) message + """ + + cmd = 'kill -9 {}'.format(pid) + + # SSH could be redundant if we are on controller-0 (oh well!) + LOG.info('Kill process {} on {}'.format(pid, host)) + with host_helper.ssh_to_host(host) as host_ssh: + with host_ssh.login_as_root() as root_ssh: + root_ssh.exec_cmd(cmd, expect_timeout=60) + LOG.info(cmd) + + LOG.info('Ensure the PID is no longer listed') + pid_exists, msg = check_pid_exists(pid, root_ssh) + if pid_exists: + return False, msg + + return True, msg + + +def get_osd_pid(osd_host, osd_id, con_ssh=None, fail_ok=False): + """ + Given the id of an OSD, return the pid. + Args: + osd_host (string) - the host to ssh into, e.g. 'storage-0' + osd_id (int|str) - osd_id to get the pid of, e.g. '0' + con_ssh + fail_ok + + Returns (int|None): + + """ + pid_file = '/var/run/ceph/osd.{}.pid'.format(osd_id) + return __get_pid_from_file(osd_host, pid_file=pid_file, con_ssh=con_ssh, + fail_ok=fail_ok) + + +def get_mon_pid(mon_host, con_ssh=None, fail_ok=False): + """ + Given the host name of a monitor, return the pid of the ceph-mon process + Args: + mon_host (string) - the host to get the pid of, e.g. 'storage-1' + con_ssh (SSHClient) + fail_ok + + Returns (int|None) + + """ + pid_file = '/var/run/ceph/mon.{}.pid'.format( + 'controller' if system_helper.is_aio_duplex() else mon_host) + return __get_pid_from_file(mon_host, pid_file=pid_file, con_ssh=con_ssh, + fail_ok=fail_ok) + + +def __get_pid_from_file(host, pid_file, con_ssh=None, fail_ok=False): + with host_helper.ssh_to_host(host, con_ssh=con_ssh) as host_ssh: + rtn_code, out = host_ssh.exec_cmd('cat {}'.format(pid_file), + expect_timeout=10, fail_ok=fail_ok) + mon_match = r'(\d+)' + pid = re.match(mon_match, out) + if pid: + msg = '{} for {} is {}'.format(pid_file, host, pid.group(1)) + LOG.info(msg) + return pid.group(1) + + msg = '{} for {} was not found'.format(pid_file, host) + LOG.warning(msg) + if not fail_ok: + raise exceptions.StorageError(msg) + + +def get_osds(host=None, con_ssh=None): + """ + Given a hostname, get all OSDs on that host + + Args: + con_ssh(SSHClient) + host(str|None): the host to ssh into + Returns: + (list) List of OSDs on the host. Empty list if none. + """ + + osd_list = [] + + if host: + osd_list += get_host_stors(host, 'osdid', con_ssh) + else: + storage_hosts = system_helper.get_storage_nodes() + for host in storage_hosts: + osd_list += get_host_stors(host, 'osdid', con_ssh) + + return osd_list + + +def is_osd_up(osd_id, con_ssh=None): + """ + Determine if a particular OSD is up. + + Args: + osd_id (int) - ID of OSD we want to query + con_ssh + + Returns: + (bool) True if OSD is up, False if OSD is down + """ + + cmd = r"ceph osd tree | grep 'osd.{}\s'".format(osd_id) + rtn_code, out = con_ssh.exec_cmd(cmd, expect_timeout=60) + if re.search('up', out): + return True + else: + return False + + +def check_pid_exists(pid, host_ssh): + """ + Check if a PID exists on a particular host. + Args: + host_ssh (SSHClient) + pid (int|str): the process ID + Returns (bool): + True if pid exists and False otherwise + """ + + cmd = 'kill -0 {}'.format(pid) + + rtn_code, out = host_ssh.exec_cmd(cmd, expect_timeout=60) + if rtn_code != 1: + msg = 'Process {} exists'.format(pid) + return True, msg + + msg = 'Process {} does not exist'.format(pid) + return False, msg + + +def get_storage_group(host): + """ + Determine the storage replication group name associated with the storage + host. + + Args: + host (string) - storage host, e.g. 'storage-0' + Returns: + storage_group (string) - group name, e.g. 'group-0' + msg (string) - log message + """ + peers = system_helper.get_host_values(host, fields='peers')[0] + + storage_group = re.search(r'(group-\d+)', peers) + msg = 'Unable to determine replication group for {}'.format(host) + assert storage_group, msg + storage_group = storage_group.group(0) + msg = 'The replication group for {} is {}'.format(host, storage_group) + return storage_group, msg + + +def download_images(dload_type='all', img_dest='~/images/', con_ssh=None): + """ + Retrieve images for testing purposes. Note, this will add *a lot* of time + to the test execution. + + Args: + - type: 'all' to get all images (default), + 'ubuntu' to get ubuntu images, + 'centos' to get centos images + - con_ssh + - image destination - where on fileystem images are stored + + Returns: + - List containing the names of the imported images + """ + + def _wget(urls): + """ + This function does a wget on the provided urls. + """ + for url in urls: + cmd_ = 'wget {} --no-check-certificate -P {}'.format(url, img_dest) + rtn_code_, out_ = con_ssh.exec_cmd(cmd_, expect_timeout=7200) + assert not rtn_code, out_ + + centos_image_location = \ + [ + 'http://cloud.centos.org/centos/7/images/CentOS-7-x86_64' + '-GenericCloud.qcow2', + 'http://cloud.centos.org/centos/6/images/CentOS-6-x86_64' + '-GenericCloud.qcow2'] + + ubuntu_image_location = \ + [ + 'https://cloud-images.ubuntu.com/precise/current/precise-server' + '-cloudimg-amd64-disk1.img'] + + if not con_ssh: + con_ssh = ControllerClient.get_active_controller() + + LOG.info('Create directory for image storage') + cmd = 'mkdir -p {}'.format(img_dest) + rtn_code, out = con_ssh.exec_cmd(cmd) + assert not rtn_code, out + + LOG.info('wget images') + if dload_type == 'ubuntu' or dload_type == 'all': + LOG.info("Downloading ubuntu image") + _wget(ubuntu_image_location) + elif dload_type == 'centos' or dload_type == 'all': + LOG.info("Downloading centos image") + _wget(centos_image_location) + + +def find_images(con_ssh=None, image_type='qcow2', image_name=None, + location=None): + """ + This function finds all images of a given type, in the given location. + This is designed to save test time, to prevent downloading images if not + necessary. + + Arguments: + - image_type(string): image format, e.g. 'qcow2', 'raw', etc. + - if the user specifies 'all', return all images + - location(string): where to find images, e.g. '~/images' + + Test Steps: + 1. Cycle through the files in a given location + 2. Create a list of image names of the expected type + + Return: + - image_names(list): list of image names of a given type, e.g. + 'cgcs-guest.img' or all images if the user specified 'all' as the + argument to image_type. + """ + + image_names = [] + if not location: + location = GuestImages.DEFAULT['image_dir'] + if not con_ssh: + con_ssh = get_cli_client() + + cmd = 'ls {}'.format(location) + rtn_code, out = con_ssh.exec_cmd(cmd) + image_list = out.split() + LOG.info('Found the following files: {}'.format(image_list)) + if image_type == 'all' and not image_name: + return image_list, location + + # Return a list of image names where the image type matches what the user + # is looking for, e.g. qcow2 + for image in image_list: + if image_name and image_name not in image: + continue + image_path = location + "/" + image + cmd = 'qemu-img info {}'.format(image_path) + rtn_code, out = con_ssh.exec_cmd(cmd) + if image_type in out: + image_names.append(image) + + LOG.info('{} images available: {}'.format(image_type, image_names)) + return image_names, location + + +def find_image_size(con_ssh, image_name='cgcs-guest.img', location='~/images'): + """ + This function uses qemu-img info to determine what size of flavor to use. + Args: + con_ssh: + image_name (str): e.g. 'cgcs-guest.img' + location (str): where to find images, e.g. '~/images' + + Returns: + image_size(int): e.g. 8 + """ + + image_path = location + "/" + image_name + cmd = 'qemu-img info {}'.format(image_path) + rtn_code, out = con_ssh.exec_cmd(cmd) + virtual_size = re.search(r'virtual size: (\d+\.*\d*[M|G])', out) + msg = 'Unable to determine size of image {}'.format(image_name) + assert virtual_size.group(0), msg + # If the size is less than 1G, round to 1 + # If the size is greater than 1G, round up + if 'M' in virtual_size.group(1): + image_size = 1 + else: + image_size = round(float(virtual_size.group(1).strip('G'))) + + return image_size + + +def wait_for_ceph_health_ok(con_ssh=None, timeout=300, fail_ok=False, + check_interval=5): + end_time = time.time() + timeout + output = None + while time.time() < end_time: + rc, output = is_ceph_healthy(con_ssh=con_ssh) + if rc: + return True + + time.sleep(check_interval) + else: + err_msg = "Ceph is not healthy within {} seconds: {}".format(timeout, + output) + if fail_ok: + LOG.warning(err_msg) + return False, err_msg + else: + raise exceptions.TimeoutException(err_msg) + + +def get_storage_backends(field='backend', con_ssh=None, + auth_info=Tenant.get('admin_platform'), **filters): + """ + Get storage backends values from system storage-backend-list + Args: + field (str|list|tuple): + con_ssh: + auth_info: + **filters: + + Returns (list): + + """ + table_ = table_parser.table( + cli.system('storage-backend-list', ssh_client=con_ssh, + auth_info=auth_info)[1], + combine_multiline_entry=True) + return table_parser.get_multi_values(table_, field, **filters) + + +def get_storage_backend_values(backend, fields=None, rtn_dict=False, + con_ssh=None, + auth_info=Tenant.get('admin_platform'), + **kwargs): + """ + Get storage backend values for given backend via system storage-backend-show + + Args: + backend (str): storage backend to get info (e.g. ceph) + fields (list|tuple|str|None): keys to return, e.g., ['name', + 'backend', 'task'] + rtn_dict (bool) + con_ssh: + auth_info + + Returns (list|dict): + Examples: + Input: ('cinder_pool_gib', 'glance_pool_gib', + 'ephemeral_pool_gib', 'object_pool_gib', + 'ceph_total_space_gib', 'object_gateway') + Output: + if rtn_dict: {'cinder_pool_gib': 202, 'glance_pool_gib': 20, + 'ephemeral_pool_gib': 0, + 'object_pool_gib': 0, 'ceph_total_space_gib': + 222, 'object_gateway': False} + if list: [202, 20, 0, 0, 222, False] + """ + # valid_backends = ['ceph-store', 'lvm-store', 'file-store', + # 'shared_services] + backend = backend.lower() + if re.fullmatch('ceph|lvm|file', backend): + backend += '-store' + elif backend == 'external': + backend = 'shared_services' + + table_ = table_parser.table( + cli.system('storage-backend-show', backend, ssh_client=con_ssh, + auth_info=auth_info)[1], + combine_multiline_entry=True) + if not fields: + fields = table_parser.get_column(table_, 'Property') + return table_parser.get_multi_values_two_col_table(table_, fields, + evaluate=True, + rtn_dict=rtn_dict, + **kwargs) + + +def wait_for_storage_backend_vals(backend, timeout=300, fail_ok=False, + con_ssh=None, + auth_info=Tenant.get('admin_platform'), + **expt_values): + if not expt_values: + raise ValueError( + "At least one key/value pair has to be provided via expt_values") + + LOG.info( + "Wait for storage backend {} to reach: {}".format(backend, expt_values)) + end_time = time.time() + timeout + dict_to_check = expt_values.copy() + stor_backend_info = None + while time.time() < end_time: + stor_backend_info = get_storage_backend_values( + backend=backend, fields=list(dict_to_check.keys()), + rtn_dict=True, con_ssh=con_ssh, auth_info=auth_info) + dict_to_iter = dict_to_check.copy() + for key, expt_val in dict_to_iter.items(): + actual_val = stor_backend_info[key] + if str(expt_val) == str(actual_val): + dict_to_check.pop(key) + + if not dict_to_check: + return True, dict_to_check + + if fail_ok: + return False, stor_backend_info + raise exceptions.StorageError( + "Storage backend show field(s) did not reach expected value(s). " + "Expected: {}; Actual: {}".format(dict_to_check, stor_backend_info)) + + +def add_storage_backend(backend='ceph', ceph_mon_gib='20', ceph_mon_dev=None, + ceph_mon_dev_controller_0_uuid=None, + ceph_mon_dev_controller_1_uuid=None, con_ssh=None, + fail_ok=False): + """ + + Args: + backend (str): The backend to add. Only ceph is supported + ceph_mon_gib(int/str): The ceph-mon-lv size in GiB. The default is 20GiB + ceph_mon_dev (str): The disk device that the ceph-mon will be created + on. This applies to both controllers. In + case of separate device names on controllers use the options + below to specify device name for each controller + ceph_mon_dev_controller_0_uuid (str): The uuid of controller-0 disk + device that the ceph-mon will be created on + ceph_mon_dev_controller_1_uuid (str): The uuid of controller-1 disk + device that the ceph-mon will be created on + con_ssh: + fail_ok: + + Returns: + + """ + + if backend is not 'ceph': + msg = "Invalid backend {} specified. Valid choices are {}".format( + backend, ['ceph']) + if fail_ok: + return 1, msg + else: + raise exceptions.CLIRejected(msg) + if isinstance(ceph_mon_gib, int): + ceph_mon_gib = str(ceph_mon_gib) + + cmd = 'system storage-backend-add --ceph-mon-gib {}'.format(ceph_mon_gib) + if ceph_mon_dev: + cmd += ' --ceph-mon-dev {}'.format( + ceph_mon_dev if '/dev' in ceph_mon_dev else '/dev/' + + ceph_mon_dev.strip()) + if ceph_mon_dev_controller_0_uuid: + cmd += ' --ceph_mon_dev_controller_0_uuid {}'.format( + ceph_mon_dev_controller_0_uuid) + if ceph_mon_dev_controller_1_uuid: + cmd += ' --ceph_mon_dev_controller_1_uuid {}'.format( + ceph_mon_dev_controller_1_uuid) + + cmd += " {}".format(backend) + controler_ssh = con_ssh if con_ssh else \ + ControllerClient.get_active_controller() + controler_ssh.send(cmd) + index = controler_ssh.expect([controler_ssh.prompt, r'\[yes/N\]']) + if index == 1: + controler_ssh.send('yes') + controler_ssh.expect() + + rc, output = controler_ssh.process_cmd_result(cmd) + if rc != 0: + if fail_ok: + return rc, output + raise exceptions.CLIRejected("Fail Cli command cmd: {}".format(cmd)) + else: + output = table_parser.table(output) + return rc, output + + +def modify_storage_backend(backend, cinder=None, glance=None, ephemeral=None, + object_gib=None, object_gateway=None, + services=None, lock_unlock=False, fail_ok=False, + con_ssh=None): + """ + Modify ceph storage backend pool allocation + + Args: + backend (str): storage backend to modify (e.g. ceph) + cinder: + glance: + ephemeral: + object_gib: + object_gateway (bool|None) + services (str|list|tuple): + lock_unlock (bool): whether to wait for config out-of-date alarms + against controllers and lock/unlock them + fail_ok: + con_ssh: + + Returns: + 0, dict of new allocation + 1, cli err message + + """ + if re.fullmatch('ceph|lvm|file', backend): + backend += '-store' + backend = backend.lower() + + args = '' + if services: + if isinstance(services, (list, tuple)): + services = ','.join(services) + args = '-s {} '.format(services) + args += backend + + get_storage_backend_values(backend, fields='backend') + + if cinder: + args += ' cinder_pool_gib={}'.format(cinder) + + if 'ceph' in backend: + if glance: + args += ' glance_pool_gib={}'.format(glance) + if ephemeral: + args += ' ephemeral_pool_gib={}'.format(ephemeral) + if object_gateway is not None: + args += ' object_gateway={}'.format(object_gateway) + if object_gib: + args += ' object_pool_gib={}'.format(object_gib) + + code, out = cli.system('storage-backend-modify', args, con_ssh, + fail_ok=fail_ok) + if code > 0: + return 1, out + + if lock_unlock: + from testfixtures.recover_hosts import HostsToRecover + LOG.info( + "Lock unlock controllers and ensure config out-of-date alarms " + "clear") + system_helper.wait_for_alarm(alarm_id=EventLogID.CONFIG_OUT_OF_DATE, + timeout=30, fail_ok=False, + entity_id='controller-') + + active_controller, standby_controller = \ + system_helper.get_active_standby_controllers(con_ssh=con_ssh) + for controller in [standby_controller, active_controller]: + if not controller: + continue + HostsToRecover.add(controller) + host_helper.lock_host(controller, swact=True, con_ssh=con_ssh) + wait_for_storage_backend_vals( + backend=backend, + **{'task': BackendTask.RECONFIG_CONTROLLER, + 'state': BackendState.CONFIGURING}) + + host_helper.unlock_host(controller, con_ssh=con_ssh) + + system_helper.wait_for_alarm_gone( + alarm_id=EventLogID.CONFIG_OUT_OF_DATE, fail_ok=False) + + # TODO return new values of storage allocation and check they are the + # right values + updated_backend_info = get_storage_backend_values(backend, rtn_dict=True) + return 0, updated_backend_info + + +def add_ceph_mon(host, con_ssh=None, fail_ok=False): + """ + + Args: + host: + con_ssh: + fail_ok: + + Returns: + + """ + + valid_ceph_mon_hosts = ['controller-0', 'controller-1', 'storage-0', + 'compute-0'] + if host not in valid_ceph_mon_hosts: + msg = "Invalid host {} specified. Valid choices are {}".format( + host, valid_ceph_mon_hosts) + if fail_ok: + return 1, msg + else: + raise exceptions.CLIRejected(msg) + + if not con_ssh: + con_ssh = ControllerClient.get_active_controller() + + existing_ceph_mons = get_ceph_mon_values(con_ssh=con_ssh) + if host in existing_ceph_mons: + state = get_ceph_mon_state(host, con_ssh=con_ssh) + LOG.warning( + "Host {} is already added as ceph-mon and is in state: {}".format( + host, state)) + if state == 'configuring': + wait_for_ceph_mon_configured(host, con_ssh=con_ssh, fail_ok=True) + state = get_ceph_mon_state(host, con_ssh=con_ssh) + if state == 'configured' or state == 'configuring': + return 0, None + else: + msg = "The existing ceph-mon is in state {}".format(state) + if fail_ok: + return 1, msg + else: + raise exceptions.HostError(msg) + + if not host_helper.is_host_locked(host, con_ssh=con_ssh): + rc, output = host_helper.lock_host(host, con_ssh=con_ssh) + if rc != 0: + msg = "Cannot add ceph-mon to host {} because the host fail to " \ + "lock: {}".format(host, output) + if fail_ok: + return rc, msg + else: + raise exceptions.HostError(msg) + + cmd = 'ceph-mon-add'.format(host) + + rc, output = cli.system(cmd, host, ssh_client=con_ssh, fail_ok=fail_ok) + if rc != 0: + msg = "CLI command {} failed to add ceph mon in host {}: {}".format( + cmd, host, output) + LOG.warning(msg) + if fail_ok: + return rc, msg + else: + raise exceptions.StorageError(msg) + rc, state, output = wait_for_ceph_mon_configured(host, con_ssh=con_ssh, + fail_ok=True) + if state == 'configured': + return 0, None + elif state == 'configuring': + return 1, "The ceph mon in host {} is in state {}".format(host, state) + else: + return 2, "The ceph mon in host {} failed: state = {}; msg = {}".format( + host, state, output) + + +def wait_for_ceph_mon_configured(host, state=None, + timeout=HostTimeout.CEPH_MON_ADD_CONFIG, + con_ssh=None, + fail_ok=False, check_interval=5): + end_time = time.time() + timeout + while time.time() < end_time: + state = get_ceph_mon_state(host, con_ssh=con_ssh) + if state == 'configured': + return True, state, None + + time.sleep(check_interval) + + msg = "The added ceph-mon on host {} did not reach configured state " \ + "within {} seconds. Last state = {}" \ + .format(host, timeout, state) + if fail_ok: + LOG.warning(msg) + return False, state, msg + else: + raise exceptions.StorageError(msg) + + +def get_ceph_mon_values(field='hostname', hostname=None, uuid=None, state=None, + task=None, con_ssh=None): + """ + + Args: + field: + hostname: + uuid: + state: + task: + con_ssh: + + Returns: + + """ + ceph_mons = [] + table_ = table_parser.table( + cli.system('ceph-mon-list', ssh_client=con_ssh)[1], + combine_multiline_entry=True) + + filters = {} + if table_: + if hostname: + filters['hostname'] = hostname + if uuid: + filters['uuid'] = uuid + if state: + filters['state'] = state + if task: + filters['task'] = task + + table_ = table_parser.filter_table(table_, **filters) + ceph_mons = table_parser.get_column(table_, field) + return ceph_mons + + +def get_ceph_mon_state(hostname, con_ssh=None): + return get_ceph_mon_values(field='state', hostname=hostname, + con_ssh=con_ssh)[0] + + +def get_fs_mount_path(ssh_client, fs): + mount_cmd = 'mount | grep --color=never {}'.format(fs) + exit_code, output = ssh_client.exec_sudo_cmd(mount_cmd, fail_ok=True) + + mounted_on = fs_type = None + msg = "Filesystem {} is not mounted".format(fs) + is_mounted = exit_code == 0 + if is_mounted: + # Get the first mount point + mounted_on, fs_type = \ + re.findall('{} on ([^ ]*) type ([^ ]*) '.format(fs), output)[0] + msg = "Filesystem {} is mounted on {}".format(fs, mounted_on) + + LOG.info(msg) + return mounted_on, fs_type + + +def is_fs_auto_mounted(ssh_client, fs): + auto_cmd = 'cat /etc/fstab | grep --color=never {}'.format(fs) + exit_code, output = ssh_client.exec_sudo_cmd(auto_cmd, fail_ok=True) + + is_auto_mounted = exit_code == 0 + LOG.info("Filesystem {} is {}auto mounted".format(fs, + '' if is_auto_mounted + else 'not ')) + return is_auto_mounted + + +def mount_partition(ssh_client, disk, partition=None, fs_type=None): + if not partition: + partition = '/dev/{}'.format(disk) + + disk_id = ssh_client.exec_sudo_cmd( + 'blkid | grep --color=never "{}:"'.format(partition))[1] + if disk_id: + mount_on, fs_type_ = get_fs_mount_path(ssh_client=ssh_client, + fs=partition) + if mount_on: + return mount_on, fs_type_ + + fs_type = re.findall('TYPE="([^ ]*)"', disk_id)[0] + if 'swap' == fs_type: + fs_type = 'swap' + turn_on_swap(ssh_client=ssh_client, disk=disk, partition=partition) + mount_on = 'none' + else: + mount_on = None + if not fs_type: + fs_type = 'ext4' + + LOG.info("mkfs for {}".format(partition)) + + cmd = "mkfs -t {} {}".format(fs_type, partition) + ssh_client.exec_sudo_cmd(cmd, fail_ok=False) + + if not mount_on: + mount_on = '/mnt/{}'.format(disk) + LOG.info("mount {} to {}".format(partition, mount_on)) + ssh_client.exec_sudo_cmd( + 'mkdir -p {}; mount {} {}'.format(mount_on, partition, mount_on), + fail_ok=False) + LOG.info("{} successfully mounted to {}".format(partition, mount_on)) + mount_on_, fs_type_ = get_fs_mount_path(ssh_client=ssh_client, + fs=partition) + assert mount_on == mount_on_ and fs_type == fs_type_ + + return mount_on, fs_type + + +def turn_on_swap(ssh_client, disk, partition=None): + if not partition: + partition = '/dev/{}'.format(disk) + swap_info = ssh_client.exec_sudo_cmd( + 'blkid | grep --color=never "{}:"'.format(partition), fail_ok=False)[1] + swap_uuid = re.findall('UUID="(.*)" TYPE="swap"', swap_info)[0] + LOG.info('swapon for {}'.format(partition)) + proc_swap = ssh_client.exec_sudo_cmd( + 'cat /proc/swaps | grep --color=never "{} "'.format(partition))[1] + if not proc_swap: + ssh_client.exec_sudo_cmd('swapon {}'.format(partition)) + proc_swap = ssh_client.exec_sudo_cmd( + 'cat /proc/swaps | grep --color=never "{} "'.format(partition))[1] + assert proc_swap, "swap partition is not shown in /proc/swaps after " \ + "swapon" + + return swap_uuid + + +def auto_mount_fs(ssh_client, fs, mount_on=None, fs_type=None, + check_first=True): + if check_first: + if is_fs_auto_mounted(ssh_client=ssh_client, fs=fs): + return + + if fs_type == 'swap' and not mount_on: + raise ValueError("swap uuid required via mount_on") + + if not mount_on: + mount_on = '/mnt/{}'.format(fs.rsplit('/', maxsplit=1)[-1]) + + if not fs_type: + fs_type = 'ext4' + cmd = 'echo "{} {} {} defaults 0 0" >> /etc/fstab'.format(fs, mount_on, + fs_type) + ssh_client.exec_sudo_cmd(cmd, fail_ok=False) + ssh_client.exec_sudo_cmd('cat /etc/fstab', get_exit_code=False) + + +def modify_swift(enable=True, check_first=True, fail_ok=False, apply=True, + con_ssh=None): + """ + Enable/disable swift service + Args: + enable: + check_first: + fail_ok: + apply: + con_ssh + + Returns (tuple): + (-1, "swift service parameter is already xxx") only apply when + check_first=True + (0, ) + (1, ) system service-parameter-modify cli got rejected. + + """ + if enable: + expt_val = 'true' + extra_str = 'enable' + else: + expt_val = 'false' + extra_str = 'disable' + + if check_first: + swift_endpoints = keystone_helper.get_endpoints(service_name='swift', + con_ssh=con_ssh, + cli_filter=False) + if enable is bool(swift_endpoints): + msg = "swift service parameter is already {}d. Do nothing.".format( + extra_str) + LOG.info(msg) + return -1, msg + + LOG.info("Modify system service parameter to {} Swift".format(extra_str)) + code, msg = system_helper.modify_service_parameter(service='swift', + section='config', + name='service_enabled', + value=expt_val, + apply=apply, + check_first=False, + fail_ok=fail_ok, + con_ssh=con_ssh) + + if apply and code == 0: + LOG.info("Check Swift endpoints after service {}d".format(extra_str)) + swift_endpoints = keystone_helper.get_endpoints(service_name='swift', + con_ssh=con_ssh, + cli_filter=False) + if enable is not bool(swift_endpoints): + raise exceptions.SwiftError( + "Swift endpoints did not {} after modify".format(extra_str)) + msg = 'Swift is {}d successfully'.format(extra_str) + + return code, msg + + +def get_qemu_image_info(image_filename, ssh_client, fail_ok=False): + """ + Provides information about the disk image filename, like file format, + virtual size and disk size + Args: + image_filename (str); the disk image file name + ssh_client: + fail_ok: + + Returns: + 0, dict { image: , format: , virtual size: + , disk size: 0: + return None + + table_ = table_parser.table(out) + values = [] + for field in fields: + convert_to_gib = False + if field == 'size_gib': + field = 'size_mib' + convert_to_gib = True + + param_value = table_parser.get_value_two_col_table(table_, field) + if '_mib' in field: + param_value = float(param_value) + if convert_to_gib: + param_value = float(param_value) / 1024 + + values.append(param_value) + + return values + + +def delete_host_partition(host, uuid, fail_ok=False, + timeout=SysInvTimeout.PARTITION_DELETE, con_ssh=None, + auth_info=Tenant.get('admin_platform')): + """ + Delete a partition from a specific host. + + Arguments: + * host(str) - hostname, e.g. controller-0 + * uuid(str) - uuid of partition + * timeout(int) - how long to wait for partition deletion (sec) + + Returns: + * rc, out - return code and output of the host-disk-partition-delete + """ + + rc, out = cli.system('host-disk-partition-delete {} {}'.format(host, uuid), + fail_ok=fail_ok, ssh_client=con_ssh, + auth_info=auth_info) + if rc > 0: + return 1, out + + wait_for_host_partition_status(host=host, uuid=uuid, timeout=timeout, + final_status=None, + interim_status=PartitionStatus.DELETING, + con_ssh=con_ssh, auth_info=auth_info) + return 0, "Partition successfully deleted" + + +def create_host_partition(host, device_node, size_gib, fail_ok=False, wait=True, + timeout=SysInvTimeout.PARTITION_CREATE, + con_ssh=None, auth_info=Tenant.get('admin_platform')): + """ + Create a partition on host. + + Arguments: + * host(str) - hostname, e.g. controller-0 + * device_node(str) - device, e.g. /dev/sdh + * size_gib(str) - size of partition in gib + * wait(bool) - if True, wait for partition creation. False, return + * immediately. + * timeout(int) - how long to wait for partition creation (sec) + + Returns: + * rc, out - return code and output of the host-disk-partition-command + """ + args = '{} {} {}'.format(host, device_node, size_gib) + rc, out = cli.system('host-disk-partition-add', args, fail_ok=fail_ok, + ssh_client=con_ssh, auth_info=auth_info) + if rc > 0 or not wait: + return rc, out + + uuid = table_parser.get_value_two_col_table(table_parser.table(out), "uuid") + wait_for_host_partition_status(host=host, uuid=uuid, timeout=timeout, + con_ssh=con_ssh, auth_info=auth_info) + return 0, uuid + + +def modify_host_partition(host, uuid, size_gib, fail_ok=False, + timeout=SysInvTimeout.PARTITION_MODIFY, + final_status=PartitionStatus.READY, con_ssh=None, + auth_info=Tenant.get('admin_platform')): + """ + This test modifies the size of a partition. + + Args: + host(str) - hostname, e.g. controller-0 + uuid(str) - uuid of the partition + size_gib(str) - new partition size in gib + fail_ok + timeout(int) - how long to wait for partition creation (sec) + final_status (str|list) + con_ssh + auth_info + + Returns: + * rc, out - return code and output of the host-disk-partition-command + """ + + args = '-s {} {} {}'.format(size_gib, host, uuid) + rc, out = cli.system('host-disk-partition-modify', args, fail_ok=fail_ok, + ssh_client=con_ssh, auth_info=auth_info) + if rc > 0: + return 1, out + + uuid = table_parser.get_value_two_col_table(table_parser.table(out), "uuid") + wait_for_host_partition_status(host=host, uuid=uuid, timeout=timeout, + interim_status=PartitionStatus.MODIFYING, + final_status=final_status, con_ssh=con_ssh, + auth_info=auth_info) + + msg = "{} partition successfully modified".format(host) + LOG.info(msg) + return 0, msg + + +def wait_for_host_partition_status(host, uuid, + final_status=PartitionStatus.READY, + interim_status=PartitionStatus.CREATING, + timeout=120, fail_ok=False, + con_ssh=None, + auth_info=Tenant.get('admin_platform')): + """ + Wait for host partition to reach given status + Args: + host: + uuid: + final_status (str|list|None|tuple): + interim_status: + timeout: + fail_ok: + con_ssh + auth_info + + Returns (bool): + + """ + if not final_status: + final_status = [None] + elif isinstance(final_status, str): + final_status = (final_status,) + + valid_status = list(final_status) + if isinstance(interim_status, str): + interim_status = (interim_status,) + for status_ in interim_status: + valid_status.append(status_) + + end_time = time.time() + timeout + prev_status = '' + while time.time() < end_time: + status = \ + get_host_partition_values(host, uuid, "status", con_ssh=con_ssh, + auth_info=auth_info)[0] + assert status in valid_status, "Partition has unexpected state " \ + "{}".format(status) + + if status in final_status: + LOG.info( + "Partition {} on host {} has reached state: {}".format(uuid, + host, + status)) + return True + elif status != prev_status: + prev_status = status + LOG.info("Partition {} on host {} is in {} state".format(uuid, host, + status)) + + time.sleep(5) + + msg = "Partition {} on host {} not in {} state within {} seconds".format( + uuid, host, final_status, timeout) + LOG.warning(msg) + if fail_ok: + return False + else: + raise exceptions.StorageError(msg) + + +def get_host_disks(host, field='uuid', auth_info=Tenant.get('admin_platform'), + con_ssh=None, **kwargs): + """ + Get values from system host-disk-list + Args: + host (str): + field (str|list|tuple) + con_ssh (SSHClient): + auth_info (dict): + + Returns (dict): + + """ + table_ = table_parser.table( + cli.system('host-disk-list --nowrap', host, ssh_client=con_ssh, + auth_info=auth_info)[1]) + return table_parser.get_multi_values(table_, field, evaluate=True, **kwargs) + + +def get_host_disk_values(host, disk, fields, + auth_info=Tenant.get('admin_platform'), con_ssh=None): + """ + Get host disk values via system host-disk-show + Args: + host: + disk: + fields: + auth_info: + con_ssh: + + Returns: + + """ + table_ = table_parser.table( + cli.system('host-disk-show', '{} {}'.format(host, disk), + ssh_client=con_ssh, + auth_info=auth_info)[1]) + return table_parser.get_multi_values_two_col_table(table_, fields, + evaluate=True) + + +def get_host_disks_with_free_space(host, disk_list, + auth_info=Tenant.get('admin_platform'), + con_ssh=None): + """ + Given a list of disks, return the ones with free space. + + Arguments: + host(str) - hostname, e.g. ocntroller-0 + disk_list (list) - list of disks + auth_info + con_ssh + + Returns (dict): disks that have usable space. + """ + + free_disks = {} + for disk in disk_list: + LOG.info("Querying disk {} on host {}".format(disk, host)) + available_space = float( + get_host_disk_values(host, disk, fields='available_gib', + auth_info=auth_info, + con_ssh=con_ssh)[0]) + LOG.info("{} has disk {} with {} gib available".format(host, disk, + available_space)) + if available_space <= 0: + LOG.info( + "Removing disk {} from host {} due to insufficient " + "space".format( + disk, host)) + else: + free_disks[disk] = available_space + + return free_disks + + +def get_hosts_rootfs(hosts, auth_info=Tenant.get('admin_platform'), + con_ssh=None): + """ + This returns the rootfs disks of each node. + + Arguments: + * hosts(list) - e.g. controller-0, controller-1, etc. + + Returns: + * Dict of host mapped to rootfs disk + """ + + rootfs_uuid = {} + for host in hosts: + rootfs_device = system_helper.get_host_values(host, 'rootfs_device', + auth_info=auth_info, + con_ssh=con_ssh)[0] + LOG.debug("{} is using rootfs disk: {}".format(host, rootfs_device)) + key = 'device_path' + if '/dev/disk' not in rootfs_device: + key = 'device_node' + rootfs_device = '/dev/{}'.format(rootfs_device) + + disk_uuids = get_host_disks(host, 'uuid', auth_info=auth_info, + con_ssh=con_ssh, **{key: rootfs_device}) + rootfs_uuid[host] = disk_uuids + + LOG.info("Root disk UUIDS: {}".format(rootfs_uuid)) + return rootfs_uuid + + +def get_controllerfs_list(field='Size in GiB', fs_name=None, con_ssh=None, + auth_info=Tenant.get('admin_platform'), + **filters): + table_ = table_parser.table( + cli.system('controllerfs-list --nowrap', ssh_client=con_ssh, + auth_info=auth_info)[1]) + + if fs_name: + filters['FS Name'] = fs_name + + return table_parser.get_multi_values(table_, field, evaluate=True, + **filters) + + +def get_controllerfs_values(filesystem, fields='size', rtn_dict=False, + auth_info=Tenant.get('admin_platform'), + con_ssh=None): + """ + Returns the value of a particular filesystem. + + Arguments: + - fields (str|list|tuple) - what value to get, e.g. size + - filesystem(str) - e.g. scratch, database, etc. + + Returns (list): + + """ + table_ = table_parser.table( + cli.system('controllerfs-show', filesystem, ssh_client=con_ssh, + auth_info=auth_info)[1]) + return table_parser.get_multi_values_two_col_table(table_, fields, + rtn_dict=rtn_dict, + evaluate=True) + + +def get_controller_fs_values(con_ssh=None, + auth_info=Tenant.get('admin_platform')): + table_ = table_parser.table( + cli.system('controllerfs-show', ssh_client=con_ssh, + auth_info=auth_info)[1]) + + rows = table_parser.get_all_rows(table_) + values = {} + for row in rows: + values[row[0].strip()] = row[1].strip() + return values + + +def modify_controllerfs(fail_ok=False, auth_info=Tenant.get('admin_platform'), + con_ssh=None, **kwargs): + """ + Modifies the specified controller filesystem, e.g. scratch, database, etc. + + Arguments: + - kwargs - dict of name:value pair(s) + - fail_ok(bool) - True if failure is expected. False if not. + """ + + attr_values_ = ['{}="{}"'.format(attr, value) for attr, value in + kwargs.items()] + args_ = ' '.join(attr_values_) + + rc, out = cli.system("controllerfs-modify", args_, fail_ok=fail_ok, + ssh_client=con_ssh, auth_info=auth_info) + if rc > 0: + return 1, out + + msg = "Filesystem update succeeded" + LOG.info(msg) + return 0, msg + + +def get_host_stors(host, field='uuid', con_ssh=None, + auth_info=Tenant.get('admin_platform')): + """ + Get host storage values from system host-stor-list + Args: + host: + field (str|tuple|list): + auth_info: + con_ssh: + + Returns (list): + + """ + table_ = table_parser.table( + cli.system('host-stor-list --nowrap', host, ssh_client=con_ssh, + auth_info=auth_info)[1]) + return table_parser.get_multi_values(table_, field, evaluate=True) + + +def get_host_stor_values(host, stor_uuid, fields="size", con_ssh=None, + auth_info=Tenant.get('admin_platform')): + """ + Returns the value of a particular filesystem. + + Arguments: + host + stor_uuid + fields (str|list|tuple) + auth_info + con_ssh + + Returns (list): + + """ + args = '{} {}'.format(host, stor_uuid) + table_ = table_parser.table( + cli.system('host-stor-show', args, ssh_client=con_ssh, + auth_info=auth_info)[1]) + return table_parser.get_multi_values_two_col_table(table_, fields, + evaluate=True) + + +def get_storage_tiers(cluster, field='uuid', con_ssh=None, + auth_info=Tenant.get('admin_platform'), **filters): + """ + + Args: + cluster: + field (str|tuple|list): + con_ssh: + auth_info: + **filters: + + Returns: + + """ + table_ = table_parser.table( + cli.system('storage-tier-list {}'.format(cluster), ssh_client=con_ssh, + auth_info=auth_info), combine_multiline_entry=True) + return table_parser.get_multi_values(table_, field, **filters) + + +def add_host_storage(host, disk_uuid, journal_location=None, journal_size=None, + function=None, tier_uuid=None, + auth_info=Tenant.get('admin_platform'), con_ssh=None, + fail_ok=False): + """ + Add storage to host + Args: + host: + disk_uuid: + journal_location: + journal_size: + function: + tier_uuid: + auth_info: + con_ssh: + fail_ok: + + Returns (tuple): + + """ + if not host or not disk_uuid: + raise ValueError("host name and disk uuid must be specified") + + args_dict = { + '--journal-location': journal_location, + '--journal-size': journal_size, + '--tier-uuid': tier_uuid + } + args = common.parse_args(args_dict) + + function = ' {}'.format(function) if function else '' + args += " {} {}{}".format(host, function, disk_uuid) + LOG.info("Adding storage to {}".format(host)) + rc, output = cli.system('host-stor-add', ssh_client=con_ssh, + fail_ok=fail_ok, auth_info=auth_info) + if rc > 0: + return 1, output + + table_ = table_parser.table(output) + uuid = table_parser.get_value_two_col_table(table_, 'uuid') + LOG.info("Storage added to {} successfully: {}".format(host, uuid)) + return 0, uuid + + +def clear_local_storage_cache(host, con_ssh=None): + with host_helper.ssh_to_host(host, con_ssh=con_ssh) as host_ssh: + with host_ssh.login_as_root() as root_ssh: + root_ssh.exec_cmd('rm -rf /var/lib/nova/instances/_base/*', + fail_ok=True) + root_ssh.exec_cmd('sync;echo 3 > /proc/sys/vm/drop_caches', + fail_ok=True) diff --git a/automated-pytest-suite/keywords/system_helper.py b/automated-pytest-suite/keywords/system_helper.py new file mode 100644 index 0000000..17648b0 --- /dev/null +++ b/automated-pytest-suite/keywords/system_helper.py @@ -0,0 +1,3620 @@ +# +# Copyright (c) 2019 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + + +import ipaddress +import re +import os +import time + +from pytest import skip + +from consts.auth import Tenant, HostLinuxUser +from consts.stx import UUID, Prompt, SysType, EventLogID, HostAvailState +from consts.proj_vars import ProjVar +from consts.timeout import SysInvTimeout, MiscTimeout, HostTimeout +from utils import cli, table_parser, exceptions +from utils.clients.ssh import ControllerClient +from utils.tis_log import LOG +from testfixtures.fixture_resources import ResourceCleanup +from keywords import common + + +def get_sys_type(con_ssh=None): + """ + Please do NOT call this function in testcase/keyword. This is used to set + global variable SYS_TYPE in ProjVar. + Use ProjVar.get_var('SYS_TYPE') in testcase/keyword instead. + Args: + con_ssh: + + Returns: + + """ + auth_info = Tenant.get('admin_platform') + is_aio = is_aio_system(controller_ssh=con_ssh, auth_info=auth_info) + if is_aio: + sys_type = SysType.AIO_DX + if len(get_controllers(con_ssh=con_ssh, auth_info=auth_info)) == 1: + sys_type = SysType.AIO_SX + elif get_storage_nodes(con_ssh=con_ssh): + sys_type = SysType.STORAGE + else: + sys_type = SysType.REGULAR + + LOG.info("============= System type: {} ==============".format(sys_type)) + return sys_type + + +def is_storage_system(con_ssh=None, auth_info=Tenant.get('admin_platform')): + sys_type = ProjVar.get_var('SYS_TYPE') + if sys_type: + if not (ProjVar.get_var('IS_DC') and auth_info and + ProjVar.get_var('PRIMARY_SUBCLOUD') != auth_info.get('region')): + return SysType.STORAGE == sys_type + else: + return bool(get_storage_nodes(con_ssh=con_ssh, auth_info=auth_info)) + + +def is_aio_duplex(con_ssh=None, auth_info=Tenant.get('admin_platform')): + """ + Whether it is two node CPE system + Args: + con_ssh: + auth_info + + Returns (bool): + + """ + + sys_type = ProjVar.get_var('SYS_TYPE') + if sys_type: + if not (ProjVar.get_var('IS_DC') and auth_info and + ProjVar.get_var('PRIMARY_SUBCLOUD') != auth_info.get('region', + None)): + return SysType.AIO_DX == sys_type + else: + return is_aio_system(controller_ssh=con_ssh) \ + and len(get_controllers(con_ssh=con_ssh)) == 2 + + +def is_aio_simplex(con_ssh=None, auth_info=Tenant.get('admin_platform')): + sys_type = ProjVar.get_var('SYS_TYPE') + if sys_type: + if not (ProjVar.get_var('IS_DC') and auth_info and + ProjVar.get_var('PRIMARY_SUBCLOUD') != auth_info.get('region', + None)): + return SysType.AIO_SX == sys_type + else: + return is_aio_system(controller_ssh=con_ssh, + auth_info=auth_info) and \ + len(get_controllers(con_ssh=con_ssh, + auth_info=auth_info)) == 1 + + +def is_aio_system(controller_ssh=None, controller='controller-0', + auth_info=Tenant.get('admin_platform')): + """ + Whether it is AIO-Duplex or AIO-Simplex system where controller has both + controller and compute functions + Args: + controller_ssh (SSHClient): + controller (str): controller to check + auth_info + + Returns (bool): True if CPE or Simplex, else False + + """ + sys_type = ProjVar.get_var('SYS_TYPE') + if sys_type: + if not (ProjVar.get_var('IS_DC') and auth_info and + ProjVar.get_var('PRIMARY_SUBCLOUD') != auth_info.get('region', + None)): + return 'aio' in sys_type.lower() + + subfunc = get_host_values(host=controller, fields='subfunctions', + con_ssh=controller_ssh, auth_info=auth_info)[0] + combined = 'controller' in subfunc and re.search('compute|worker', subfunc) + + str_ = 'not ' if not combined else '' + + LOG.info("This is {}small footprint system.".format(str_)) + return combined + + +def get_storage_nodes(con_ssh=None, auth_info=Tenant.get('admin_platform')): + """ + Get hostnames with 'storage' personality from system host-list + Args: + con_ssh (SSHClient): + auth_info + + Returns (list): list of hostnames. Empty list [] returns when no storage + nodes. + + """ + return get_hosts(personality='storage', con_ssh=con_ssh, + auth_info=auth_info) + + +def get_controllers(administrative=None, operational=None, availability=None, + con_ssh=None, + auth_info=Tenant.get('admin_platform')): + """ + Get hostnames with 'controller' personality from system host-list + Args: + administrative + operational + availability + con_ssh (SSHClient): + auth_info + + Returns (list): list of hostnames + + """ + return get_hosts(personality='controller', administrative=administrative, + operational=operational, + availability=availability, con_ssh=con_ssh, + auth_info=auth_info) + + +def get_computes(administrative=None, operational=None, availability=None, + con_ssh=None, + auth_info=Tenant.get('admin_platform')): + """ + Get hostnames with 'compute' personality from system host-list + Args: + administrative + operational + availability + con_ssh (SSHClient): + auth_info + + Returns (list): list of hostnames. Empty list [] returns when no compute + nodes. + + """ + return get_hosts(personality='compute', administrative=administrative, + operational=operational, + availability=availability, con_ssh=con_ssh, + auth_info=auth_info) + + +def get_hosts(personality=None, administrative=None, operational=None, + availability=None, hostname=None, strict=True, + exclude=False, con_ssh=None, + auth_info=Tenant.get('admin_platform'), + field='hostname', rtn_dict=False): + """ + Get hostnames with given criteria + Args: + personality (None|str|tuple|list): + administrative (None|str|list|tuple): + operational (None|str|list|tuple): + availability (None|str|list|tuple): + hostname (None|tuple|list|str): filter out these hosts only + strict (bool): + exclude (bool): + con_ssh (SSHClient|None): + auth_info + field (str|list|tuple) + rtn_dict (bool): Whether to return dict where each field is a key, + and value is a list + + Returns (list): hosts + + """ + if not con_ssh: + con_name = auth_info.get('region') if ( + auth_info and ProjVar.get_var('IS_DC')) else None + con_ssh = ControllerClient.get_active_controller(name=con_name) + + table_ = table_parser.table( + cli.system('host-list', ssh_client=con_ssh, + auth_info=auth_info)[1]) + + table_ = table_parser.filter_table(table_, exclude=True, hostname='None') + if hostname: + table_ = table_parser.filter_table(table_, hostname=hostname) + + if personality: + compute_personality = 'compute|worker' + if personality == 'compute': + personality = compute_personality + elif not isinstance(personality, str): + personality = list(personality) + if 'compute' in personality: + compute_index = personality.index('compute') + personality[compute_index] = compute_personality + + filters = {'personality': personality, + 'administrative': administrative, + 'operational': operational, + 'availability': availability} + filters = {k: v for k, v in filters.items() if v is not None} + if filters: + table_ = table_parser.filter_table(table_, strict=strict, + exclude=exclude, regex=True, + **filters) + + hostnames = table_parser.get_multi_values(table_, field, rtn_dict=rtn_dict) + LOG.debug("Filtered hosts: {}".format(hostnames)) + + return hostnames + + +def get_hosts_per_personality(availability=None, administrative=None, + operational=None, con_ssh=None, + auth_info=Tenant.get('admin_platform'), + source_rc=False, + rtn_tuple=False): + """ + Args: + availability + administrative + operational + con_ssh: + auth_info + source_rc + rtn_tuple (bool): whether to return tuple instead of dict. i.e., + , , + + Returns (dict|tuple): + e.g., {'controller': ['controller-0', 'controller-1'], 'compute': [ + 'compute-0', 'compute-1], 'storage': []} + + """ + table_ = table_parser.table( + cli.system('host-list', ssh_client=con_ssh, auth_info=auth_info, + source_openrc=source_rc)[1]) + personalities = ('controller', 'compute', 'storage') + res = {} + for personality in personalities: + personality_tmp = 'compute|worker' if personality == 'compute' else \ + personality + hosts = table_parser.get_values(table_, 'hostname', + personality=personality_tmp, + availability=availability, + administrative=administrative, + operational=operational, regex=True) + hosts = [host for host in hosts if host.lower() != 'none'] + res[personality] = hosts + + if rtn_tuple: + res = res['controller'], res['compute'], res['storage'] + + return res + + +def get_active_controller_name(con_ssh=None, + auth_info=Tenant.get('admin_platform')): + """ + This assumes system has 1 active controller + Args: + con_ssh: + auth_info + + Returns: hostname of the active controller + Further info such as ip, uuid can be obtained via System.CONTROLLERS[ + hostname]['uuid'] + """ + return get_active_standby_controllers(con_ssh=con_ssh, auth_info=auth_info)[ + 0] + + +def get_standby_controller_name(con_ssh=None, + auth_info=Tenant.get('admin_platform')): + """ + This assumes system has 1 standby controller + Args: + con_ssh: + auth_info + + Returns (str): hostname of the active controller + Further info such as ip, uuid can be obtained via System.CONTROLLERS[ + hostname]['uuid'] + """ + active, standby = get_active_standby_controllers(con_ssh=con_ssh, + auth_info=auth_info) + return standby if standby else '' + + +def get_active_standby_controllers(con_ssh=None, + auth_info=Tenant.get('admin_platform')): + """ + Get active controller name and standby controller name (if any) + Args: + con_ssh (SSHClient): + auth_info + + Returns (tuple): such as ('controller-0', 'controller-1'), + when non-active controller is in bad state or degraded + state, or any scenarios where standby controller does not exist, + this function will return + (, None) + + """ + table_ = table_parser.table( + cli.system('servicegroup-list', ssh_client=con_ssh, + auth_info=auth_info)[1]) + + table_ = table_parser.filter_table(table_, + service_group_name='controller-services') + active_con = table_parser.get_values(table_, 'hostname', state='active', + strict=False)[0] + standby_con = table_parser.get_values(table_, 'hostname', state='standby', + strict=False) + + standby_con = standby_con[0] if standby_con else None + return active_con, standby_con + + +def get_alarms_table(uuid=True, show_suppress=False, query_key=None, + query_value=None, query_type=None, con_ssh=None, + mgmt_affecting=None, + auth_info=Tenant.get('admin_platform'), + retry=0): + """ + Get active alarms_and_events dictionary with given criteria + Args: + uuid (bool): whether to show uuid + show_suppress (bool): whether to show suppressed alarms_and_events + query_key (str): one of these: 'event_log_id', 'entity_instance_id', + 'uuid', 'severity', + query_value (str): expected value for given key + query_type (str): data type of value. one of these: 'string', + 'integer', 'float', 'boolean' + mgmt_affecting (bool) + con_ssh (SSHClient): + auth_info (dict): + retry (None|int): number of times to retry if the alarm-list cli got + rejected + + Returns: + dict: events table in format: {'headers': , 'values': + } + """ + args = '--nowrap' + args = __process_query_args(args, query_key, query_value, query_type) + if uuid: + args += ' --uuid' + if show_suppress: + args += ' --include_suppress' + if mgmt_affecting: + args += ' --mgmt_affecting' + + fail_ok = True + if not retry: + fail_ok = False + retry = 0 + + output = None + for i in range(retry + 1): + code, output = cli.fm('alarm-list', args, ssh_client=con_ssh, + fail_ok=fail_ok, auth_info=auth_info) + if code == 0: + table_ = table_parser.table(output, combine_multiline_entry=True) + return table_ + + if i < retry: + time.sleep(5) + else: + raise exceptions.CLIRejected( + 'fm alarm-list cli got rejected after {} retries: {}'.format( + retry, output)) + + +def get_alarms(fields=('Alarm ID', 'Entity ID'), alarm_id=None, + reason_text=None, entity_id=None, + severity=None, time_stamp=None, strict=False, + show_suppress=False, query_key=None, query_value=None, + query_type=None, mgmt_affecting=None, con_ssh=None, + auth_info=Tenant.get('admin_platform'), + combine_entries=True): + """ + Get a list of alarms with values for specified fields. + Args: + fields (tuple): fields to get values for + alarm_id (str): filter out the table using given alarm id ( + strict=True). if None, table will not be filtered. + reason_text (str): reason text to filter out the table (strict + defined in param) + entity_id (str): entity instance id to filter out the table (strict + defined in param) + severity (str): severity such as 'critical', 'major' + time_stamp (str): + strict (bool): whether to perform strict filter on reason text, + entity_id, severity, or time_stamp + show_suppress (bool): whether to show suppressed alarms. Default to + False. + query_key (str): key in --query = passed to fm alarm-list + query_value (str): value in --query = passed to fm + alarm-list + query_type (str): 'string', 'integer', 'float', or 'boolean' + mgmt_affecting (bool) + con_ssh (SSHClient): + auth_info (dict): + combine_entries (bool): return list of strings when set to True, + else return a list of tuples. + e.g., when True, returns ["800.003::::cluster=829851fa", + "250.001::::host=controller-0"] + when False, returns [("800.003", "cluster=829851fa"), + ("250.001", "host=controller-0")] + + Returns (list): list of alarms with values of specified fields + + """ + + table_ = get_alarms_table(show_suppress=show_suppress, query_key=query_key, + query_value=query_value, + query_type=query_type, con_ssh=con_ssh, + auth_info=auth_info, + mgmt_affecting=mgmt_affecting) + + if alarm_id: + table_ = table_parser.filter_table(table_, **{'Alarm ID': alarm_id}) + + kwargs_dict = { + 'Reason Text': reason_text, + 'Entity ID': entity_id, + 'Severity': severity, + 'Time Stamp': time_stamp + } + + kwargs = {} + for key, value in kwargs_dict.items(): + if value is not None: + kwargs[key] = value + + if kwargs: + table_ = table_parser.filter_table(table_, strict=strict, **kwargs) + + rtn_vals_list = [] + for field in fields: + vals = table_parser.get_column(table_, field) + rtn_vals_list.append(vals) + + rtn_vals_list = zip(*rtn_vals_list) + if combine_entries: + rtn_vals_list = ['::::'.join(vals) for vals in rtn_vals_list] + else: + rtn_vals_list = list(rtn_vals_list) + + return rtn_vals_list + + +def get_suppressed_alarms(uuid=False, con_ssh=None, + auth_info=Tenant.get('admin_platform')): + """ + Get suppressed alarms_and_events as dictionary + Args: + uuid (bool): whether to show uuid + con_ssh (SSHClient): + auth_info (dict): + + Returns: + dict: events table in format: {'headers': , 'values': + } + """ + args = '' + if uuid: + args += ' --uuid' + args += ' --nowrap --nopaging' + table_ = table_parser.table( + cli.fm('event-suppress-list', args, ssh_client=con_ssh, + auth_info=auth_info)[1]) + return table_ + + +def unsuppress_all_events(ssh_con=None, fail_ok=False, + auth_info=Tenant.get('admin_platform')): + """ + + Args: + ssh_con: + fail_ok: + auth_info: + + Returns (tuple): ((int), (str)) + + """ + LOG.info("Un-suppress all events") + args = '--nowrap --nopaging' + code, output = cli.fm('event-unsuppress-all', positional_args=args, + ssh_client=ssh_con, fail_ok=fail_ok, + auth_info=auth_info) + + if code == 1: + return 1, output + + if not output: + msg = "No suppressed events to un-suppress" + LOG.warning(msg) + return -1, msg + + table_ = table_parser.table(output) + if not table_['values']: + suppressed_list = [] + else: + suppressed_list = table_parser.get_values(table_, + target_header="Suppressed " + "Alarm ID's", + **{'Status': 'suppressed'}) + + if suppressed_list: + msg = "Unsuppress-all failed. Suppressed Alarm IDs: {}".format( + suppressed_list) + if fail_ok: + LOG.warning(msg) + return 2, msg + raise exceptions.NeutronError(msg) + + succ_msg = "All events unsuppressed successfully." + LOG.info(succ_msg) + return 0, succ_msg + + +def get_events(fields=('Event Log ID', 'Entity Instance ID'), limit=10, + event_id=None, entity_id=None, + severity=None, show_suppress=False, start=None, end=None, + state=None, show_uuid=True, + strict=False, time_stamp=None, reason_text=None, uuid=None, + con_ssh=None, auth_info=Tenant.get('admin_platform'), + combine_entries=True): + """ + Get a list of alarms with values for specified fields. + Args: + fields (tuple|list|str): fields to get values for + limit (int) + event_id (str): filter event using event log id + reason_text (str): reason text to filter out the table (strict + defined in param) + entity_id (str): entity instance id to filter out the table (strict + defined in param) + severity (str): severity such as 'critical', 'major' + show_suppress (bool): whether to show suppressed events. Default to + False. + show_uuid (bool): Whether to show uuid in event table + start (str): display events after this time stamp + end (str): display events prior to this time stamp + state (str): filter with events state + time_stamp (str): exact timestamp for the event, filter after events + displayed + uuid (str) + strict (bool): whether to perform strict filter on reason text, + or time_stamp + con_ssh (SSHClient): + auth_info (dict): + combine_entries (bool): return list of strings when set to True, + else return a list of tuples. + e.g., when True, returns ["800.003::::cluster=829851fa", + "250.001::::host=controller-0"] + when False, returns [("800.003", "cluster=829851fa"), + ("250.001", "host=controller-0")] + + Returns (list): list of events with values of specified fields + + """ + + table_ = get_events_table(show_uuid=show_uuid, limit=limit, + event_log_id=event_id, + entity_instance_id=entity_id, + show_suppress=show_suppress, con_ssh=con_ssh, + auth_info=auth_info, + start=start, end=end, severity=severity) + + kwargs_dict = { + 'Reason Text': reason_text, + 'Time Stamp': time_stamp, + 'UUID': uuid, + 'State': state, + } + + kwargs = {} + for key, value in kwargs_dict.items(): + if value is not None: + kwargs[key] = value + + if kwargs: + table_ = table_parser.filter_table(table_, strict=strict, **kwargs) + + rtn_vals_list = [] + if isinstance(fields, str): + fields = (fields,) + for header in fields: + vals = table_parser.get_column(table_, header) + if not vals: + vals = [] + rtn_vals_list.append(vals) + + LOG.warning('{}'.format(rtn_vals_list)) + rtn_vals_list = list(zip(*rtn_vals_list)) + if combine_entries: + rtn_vals_list = ['::::'.join(vals) for vals in rtn_vals_list] + + return rtn_vals_list + + +def get_events_table(limit=5, show_uuid=False, show_only=None, + show_suppress=False, event_log_id=None, + entity_type_id=None, entity_instance_id=None, + severity=None, start=None, end=None, + con_ssh=None, auth_info=Tenant.get('admin_platform'), + regex=False, **kwargs): + """ + Get a list of events with given criteria as dictionary + Args: + limit (int): max number of event logs to return + show_uuid (bool): whether to show uuid + show_only (str): 'alarms_and_events' or 'logs' to return only + alarms_and_events or logs + show_suppress (bool): whether or not to show suppressed + alarms_and_events + event_log_id (str|None): event log id passed to system eventlog -q + event_log_id= + entity_type_id (str|None): entity_type_id passed to system eventlog + -q entity_type_id= + entity_instance_id (str|None): entity_instance_id passed to + system eventlog -q entity_instance_id= + severity (str|None): + start (str|None): start date/time passed to '--query' in format + "20170410"/"20170410 01:23:34" + end (str|None): end date/time passed to '--query' in format + "20170410"/"20170410 01:23:34" + con_ssh (SSHClient): + auth_info (dict): + regex (bool): + **kwargs: filter table after table returned + + Returns: + dict: events table in format: {'headers': , 'values': + } + """ + + args = '-l {}'.format(limit) + + # args = __process_query_args(args, query_key, query_value, query_type) + query_dict = { + 'event_log_id': event_log_id, + 'entity_type_id': entity_type_id, + 'entity_instance_id': entity_instance_id, + 'severity': severity, + 'start': '{}'.format(start) if start else None, + 'end': '{}'.format(end) if end else None + } + + queries = [] + for q_key, q_val in query_dict.items(): + if q_val is not None: + queries.append('{}={}'.format(q_key, str(q_val))) + + query_string = ';'.join(queries) + if query_string: + args += " -q '{}'".format(query_string) + + args += ' --nowrap --nopaging' + if show_uuid: + args += ' --uuid' + if show_only: + args += ' --{}'.format(show_only.lower()) + if show_suppress: + args += ' --include_suppress' + + table_ = table_parser.table( + cli.fm('event-list ', args, ssh_client=con_ssh, auth_info=auth_info)[1]) + + if kwargs: + table_ = table_parser.filter_table(table_, regex=regex, **kwargs) + + return table_ + + +def _compose_events_table(output, uuid=False): + if not output['headers']: + headers = ['UUID', 'Time Stamp', 'State', 'Event Log ID', 'Reason Text', + 'Entity Instance ID', 'Severity'] + if not uuid: + headers.remove('UUID') + values = [] + output['headers'] = headers + output['values'] = values + + return output + + +def __process_query_args(args, query_key, query_value, query_type): + if query_key: + if not query_value: + raise ValueError( + "Query value is not supplied for key - {}".format(query_key)) + data_type_arg = '' if not query_type else "{}::".format( + query_type.lower()) + args += ' -q {}={}"{}"'.format(query_key.lower(), data_type_arg, + query_value.lower()) + return args + + +def wait_for_events(timeout=60, num=30, uuid=False, show_only=None, + event_log_id=None, entity_type_id=None, + entity_instance_id=None, severity=None, start=None, + end=None, field='Event Log ID', + regex=False, strict=True, check_interval=5, fail_ok=True, + con_ssh=None, + auth_info=Tenant.get('admin_platform'), **kwargs): + """ + Wait for event(s) to appear in fm event-list + Args: + timeout (int): max time to wait in seconds + num (int): max number of event logs to return + uuid (bool): whether to show uuid + show_only (str): 'alarms_and_events' or 'logs' to return only + alarms_and_events or logs + fail_ok (bool): whether to return False if event(s) did not appear + within timeout + field (str): list of values to return. Defaults to 'Event Log ID' + con_ssh (SSHClient): + auth_info (dict): + regex (bool): Whether to use regex or string operation to + search/match the value in kwargs + strict (bool): whether it's a strict match (case is always ignored + regardless of this flag) + check_interval (int): how often to check the event logs + event_log_id (str|None): event log id passed to system eventlog -q + event_log_id= + entity_type_id (str|None): entity_type_id passed to system eventlog + -q entity_type_id= + entity_instance_id (str|None): entity_instance_id passed to + system eventlog -q entity_instance_id= + severity (str|None): + start (str|None): start date/time passed to '--query' in format + "20170410"/"20170410 01:23:34" + end (str|None): end date/time passed to '--query' in format + "20170410"/"20170410 01:23:34" + + **kwargs: criteria to filter out event(s) from the events list table + + Returns: + list: list of event log ids (or whatever specified in rtn_value) for + matching events. + + """ + end_time = time.time() + timeout + while time.time() < end_time: + events_tab = get_events_table(limit=num, show_uuid=uuid, + show_only=show_only, + event_log_id=event_log_id, + entity_type_id=entity_type_id, + entity_instance_id=entity_instance_id, + severity=severity, start=start, end=end, + con_ssh=con_ssh, auth_info=auth_info) + events_tab = table_parser.filter_table(events_tab, strict=strict, + regex=regex, **kwargs) + events = table_parser.get_column(events_tab, field) + if events: + LOG.info("Event(s) appeared in event-list: {}".format(events)) + return events + + time.sleep(check_interval) + + msg = "Event(s) did not appear in fm event-list within timeout." + if fail_ok: + LOG.warning(msg) + return [] + else: + raise exceptions.TimeoutException(msg) + + +def delete_alarms(alarms=None, fail_ok=False, con_ssh=None, + auth_info=Tenant.get('admin_platform')): + """ + Delete active alarms_and_events + + Args: + alarms (list|str): UUID(s) of alarms_and_events to delete + fail_ok (bool): whether or not to raise exception if any alarm failed + to delete + con_ssh (SSHClient): + auth_info (dict): + + Returns (tuple): (rtn_code(int), message(str)) + 0, "Alarms deleted successfully" + 1, "Some alarm(s) still exist on system after attempt to delete: + " + + """ + if alarms is None: + alarms_tab = get_alarms_table(uuid=True) + alarms = [] + if alarms_tab['headers']: + alarms = table_parser.get_column(alarms_tab, 'UUID') + + if isinstance(alarms, str): + alarms = [alarms] + + LOG.info("Deleting following alarms_and_events: {}".format(alarms)) + + res = {} + failed_clis = [] + for alarm in alarms: + code, out = cli.fm('alarm-delete', alarm, ssh_client=con_ssh, + auth_info=auth_info) + res[alarm] = code, out + + if code != 0: + failed_clis.append(alarm) + + post_alarms_tab = get_alarms_table(uuid=True) + if post_alarms_tab['headers']: + post_alarms = table_parser.get_column(post_alarms_tab, 'UUID') + else: + post_alarms = [] + + undeleted_alarms = list(set(alarms) & set(post_alarms)) + if undeleted_alarms: + err_msg = "Some alarm(s) still exist on system after attempt to " \ + "delete: {}\nAlarm delete results: {}". \ + format(undeleted_alarms, res) + + if fail_ok: + return 1, err_msg + raise exceptions.SysinvError(err_msg) + + elif failed_clis: + LOG.warning( + "Some alarm-delete cli(s) rejected, but alarm no longer " + "exists.\nAlarm delete results: {}". + format(res)) + + succ_msg = "Alarms deleted successfully" + LOG.info(succ_msg) + return 0, succ_msg + + +def wait_for_alarm_gone(alarm_id, entity_id=None, reason_text=None, + strict=False, timeout=120, check_interval=10, + fail_ok=False, con_ssh=None, + auth_info=Tenant.get('admin_platform')): + """ + Wait for given alarm to disappear from fm alarm-list + Args: + alarm_id (str): such as 200.009 + entity_id (str): entity instance id for the alarm (strict as defined + in param) + reason_text (str): reason text for the alarm (strict as defined in + param) + strict (bool): whether to perform strict string match on entity + instance id and reason + timeout (int): max seconds to wait for alarm to disappear + check_interval (int): how frequent to check + fail_ok (bool): whether to raise exception if alarm did not disappear + within timeout + con_ssh (SSHClient): + auth_info (dict): + + Returns (bool): True if alarm is gone else False + + """ + + LOG.info( + "Waiting for alarm {} to disappear from fm alarm-list".format(alarm_id)) + build_ver = get_sw_version(con_ssh=con_ssh) + + alarmcmd = 'alarm-list' + if build_ver != '15.12': + alarmcmd += ' --nowrap' + + end_time = time.time() + timeout + while time.time() < end_time: + alarms_tab = table_parser.table( + cli.fm(alarmcmd, ssh_client=con_ssh, auth_info=auth_info)[1]) + + alarm_tab = table_parser.filter_table(alarms_tab, + **{'Alarm ID': alarm_id}) + if table_parser.get_all_rows(alarm_tab): + kwargs = {} + if entity_id: + kwargs['Entity ID'] = entity_id + if reason_text: + kwargs['Reason Text'] = reason_text + + if kwargs: + alarms = table_parser.get_values(alarm_tab, + target_header='Alarm ID', + strict=strict, **kwargs) + if not alarms: + LOG.info( + "Alarm {} with {} is not displayed in fm " + "alarm-list".format( + alarm_id, kwargs)) + return True + + else: + LOG.info( + "Alarm {} is not displayed in fm alarm-list".format(alarm_id)) + return True + + time.sleep(check_interval) + + else: + err_msg = "Timed out waiting for alarm {} to disappear".format(alarm_id) + if fail_ok: + LOG.warning(err_msg) + return False + else: + raise exceptions.TimeoutException(err_msg) + + +def _get_alarms(alarms_tab): + alarm_ids = table_parser.get_column(alarms_tab, 'Alarm_ID') + entity_ids = table_parser.get_column(alarms_tab, 'Entity ID') + alarms = list(zip(alarm_ids, entity_ids)) + return alarms + + +def wait_for_alarm(field='Alarm ID', alarm_id=None, entity_id=None, reason=None, + severity=None, timeout=60, + check_interval=3, regex=False, strict=False, fail_ok=False, + con_ssh=None, + auth_info=Tenant.get('admin_platform')): + """ + Wait for given alarm to appear + Args: + field: + alarm_id (str): such as 200.009 + entity_id (str|list|tuple): entity instance id for the alarm (strict + as defined in param) + reason (str): reason text for the alarm (strict as defined in param) + severity (str): severity of the alarm to wait for + timeout (int): max seconds to wait for alarm to appear + check_interval (int): how frequent to check + regex (bool): whether to use regex when matching entity instance id + and reason + strict (bool): whether to perform strict match on entity instance id + and reason + fail_ok (bool): whether to raise exception if alarm did not disappear + within timeout + con_ssh (SSHClient): + auth_info (dict): + + Returns (tuple): (, ). Such as (True, '200.009') or ( + False, None) + + """ + + kwargs = {} + if alarm_id: + kwargs['Alarm ID'] = alarm_id + if reason: + kwargs['Reason Text'] = reason + if severity: + kwargs['Severity'] = severity + + if entity_id and isinstance(entity_id, str): + entity_id = [entity_id] + + end_time = time.time() + timeout + while time.time() < end_time: + current_alarms_tab = get_alarms_table(con_ssh=con_ssh, + auth_info=auth_info) + if kwargs: + current_alarms_tab = table_parser.filter_table( + table_=current_alarms_tab, strict=strict, regex=regex, + **kwargs) + if entity_id: + val = [] + for entity in entity_id: + entity_filter = {'Entity ID': entity} + val_ = table_parser.get_values(current_alarms_tab, field, + strict=strict, regex=regex, + **entity_filter) + if not val_: + LOG.info( + "Alarm for entity {} has not appeared".format(entity)) + time.sleep(check_interval) + continue + val += val_ + else: + val = table_parser.get_values(current_alarms_tab, field) + + if val: + LOG.info('Expected alarm appeared. Filters: {}'.format(kwargs)) + return True, val + + time.sleep(check_interval) + + entity_str = ' for entity {}'.format(entity_id) if entity_id else '' + err_msg = "Alarm {}{} did not appear in fm alarm-list within {} " \ + "seconds".format(kwargs, entity_str, timeout) + if fail_ok: + LOG.warning(err_msg) + return False, None + + raise exceptions.TimeoutException(err_msg) + + +def wait_for_alarms_gone(alarms, timeout=120, check_interval=3, fail_ok=False, + con_ssh=None, + auth_info=Tenant.get('admin_platform')): + """ + Wait for given alarms_and_events to be gone from fm alarm-list + Args: + alarms (list): list of tuple. [(, ), ...] + timeout (int): + check_interval (int): + fail_ok (bool): + con_ssh (SSHClient): + auth_info (dict): + + Returns (tuple): (res(bool), remaining_alarms(list of tuple)) + + """ + pre_alarms = list(alarms) # Don't update the original list + LOG.info( + "Waiting for alarms_and_events to disappear from fm alarm-list: " + "{}".format(pre_alarms)) + alarms_to_check = pre_alarms.copy() + + alarms_cleared = [] + + def _update_alarms(alarms_to_check_, alarms_cleared_): + current_alarms_tab = get_alarms_table(con_ssh=con_ssh, + auth_info=auth_info) + current_alarms = _get_alarms(current_alarms_tab) + + for alarm in pre_alarms: + if alarm not in current_alarms: + LOG.info( + "Removing alarm {} from current alarms_and_events list: " + "{}".format(alarm, alarms_to_check)) + alarms_to_check_.remove(alarm) + alarms_cleared_.append(alarm) + + _update_alarms(alarms_to_check_=alarms_to_check, + alarms_cleared_=alarms_cleared) + if not alarms_to_check: + LOG.info( + "Following alarms_and_events cleared: {}".format(alarms_cleared)) + return True, [] + + end_time = time.time() + timeout + while time.time() < end_time: + pre_alarms = alarms_to_check.copy() + time.sleep(check_interval) + _update_alarms(alarms_to_check_=alarms_to_check, + alarms_cleared_=alarms_cleared) + if not alarms_to_check: + LOG.info("Following alarms_and_events cleared: {}".format( + alarms_cleared)) + return True, [] + else: + err_msg = "Following alarms_and_events did not clear within {} " \ + "seconds: {}".format(timeout, alarms_to_check) + if fail_ok: + LOG.warning(err_msg) + return False, alarms_to_check + else: + raise exceptions.TimeoutException(err_msg) + + +def wait_for_all_alarms_gone(timeout=120, check_interval=3, fail_ok=False, + con_ssh=None, + auth_info=Tenant.get('admin_platform')): + """ + Wait for all alarms_and_events to be cleared from fm alarm-list + Args: + timeout (int): + check_interval (int): + fail_ok (bool): + con_ssh (SSHClient): + auth_info (dict): + + Returns (tuple): (res(bool), remaining_alarms(tuple)) + + """ + + LOG.info( + "Waiting for all existing alarms_and_events to disappear from fm " + "alarm-list: {}".format( + get_alarms())) + + end_time = time.time() + timeout + while time.time() < end_time: + current_alarms_tab = get_alarms_table(con_ssh=con_ssh, + auth_info=auth_info) + current_alarms = _get_alarms(current_alarms_tab) + + if len(current_alarms) == 0: + return True, [] + else: + time.sleep(check_interval) + + else: + existing_alarms = get_alarms() + err_msg = "Alarms did not clear within {} seconds: {}".format( + timeout, existing_alarms) + if fail_ok: + LOG.warning(err_msg) + return False, existing_alarms + else: + raise exceptions.TimeoutException(err_msg) + + +def host_exists(host, field='hostname', con_ssh=None, + auth_info=Tenant.get('admin_platform')): + """ + + Args: + host: + field: + con_ssh: + auth_info + + Returns (bool): whether given host exists in system host-list + + """ + if not field.lower() in ['hostname', 'id']: + raise ValueError("field has to be either \'hostname\' or \'id\'") + + hosts = get_hosts(con_ssh=con_ssh, auth_info=auth_info, field=field) + return host in hosts + + +def modify_system(fail_ok=True, con_ssh=None, + auth_info=Tenant.get('admin_platform'), **kwargs): + """ + Modify the System configs/info. + + Args: + fail_ok (bool): + con_ssh (SSHClient): + auth_info (dict): + **kwargs: attribute-value pairs + + Returns: (int, str) + 0 - success + 1 - error + + Test Steps: + - Set the value via system modify = [,= action=install + - anystr system dns-modify <> action=anystring... + Returns (tuple): + (-1, ) + (0, ) + (1, ) + + """ + if not nameservers: + raise ValueError("Please specify DNS server(s).") + + if check_first: + dns_servers = get_dns_servers(con_ssh=con_ssh, + auth_info=auth_info) + if dns_servers == nameservers and with_action_option is None: + msg = 'DNS servers already set to {}. Do nothing.'.format( + dns_servers) + LOG.info(msg) + return -1, dns_servers + + args_ = 'nameservers="{}"'.format(','.join(nameservers)) + + if with_action_option is not None: + args_ += ' action={}'.format(with_action_option) + + LOG.info('args_:{}'.format(args_)) + code, output = cli.system('dns-modify', args_, ssh_client=con_ssh, + fail_ok=fail_ok, auth_info=auth_info, + timeout=SysInvTimeout.DNS_MODIFY) + if code == 1: + return 1, output + + post_dns_servers = get_dns_servers(auth_info=auth_info, con_ssh=con_ssh) + if post_dns_servers != nameservers: + raise exceptions.SysinvError( + 'dns servers expected: {}; actual: {}'.format(nameservers, + post_dns_servers)) + + LOG.info("DNS servers successfully updated to: {}".format(nameservers)) + return 0, nameservers + + +def get_vm_topology_tables(*table_names, con_ssh=None, combine_multiline=False, + exclude_one_col_table=True, + auth_info=Tenant.get('admin')): + if con_ssh is None: + con_name = auth_info.get('region') if ( + auth_info and ProjVar.get_var('IS_DC')) else None + con_ssh = ControllerClient.get_active_controller(name=con_name) + + show_args = ','.join(table_names) + + tables_ = table_parser.tables(con_ssh.exec_sudo_cmd('vm-topology --show {}'. + format(show_args), + expect_timeout=30)[1], + combine_multiline_entry=combine_multiline) + + if exclude_one_col_table: + new_tables = [] + for table_ in tables_: + if len(table_['headers']) > 1: + new_tables.append(table_) + return new_tables + + return tables_ + + +def __suppress_unsuppress_event(alarm_id, suppress=True, check_first=False, + fail_ok=False, con_ssh=None, + auth_info=Tenant.get('admin_platform')): + """ + suppress/unsuppress an event by uuid + Args: + alarm_id (str): + fail_ok (bool): + con_ssh (SSHClient) + suppress(bool) True or false + + Returns (tuple): (rtn_code, message) + (0, ) + """ + + suppressed_alarms_tab = get_suppressed_alarms(uuid=True, con_ssh=con_ssh, + auth_info=auth_info) + + alarm_status = "unsuppressed" if suppress else "suppressed" + cmd = "event-suppress" if suppress else "event-unsuppress" + alarm_filter = {"Suppressed Event ID's": alarm_id} + + if check_first: + if not suppressed_alarms_tab['values']: + pre_status = "unsuppressed" + else: + pre_status = table_parser.get_values(table_=suppressed_alarms_tab, + target_header='Status', + strict=True, + **alarm_filter)[0] + if pre_status.lower() != alarm_status: + msg = "Event is already {}. Do nothing".format(pre_status) + LOG.info(msg) + return -1, msg + + code, output = cli.fm(cmd, '--alarm_id ' + alarm_id, ssh_client=con_ssh, + fail_ok=fail_ok, auth_info=auth_info) + + if code == 1: + return 1, output + + post_suppressed_alarms_tab = get_suppressed_alarms(uuid=True, + con_ssh=con_ssh) + if not post_suppressed_alarms_tab['values']: + post_status = ["unsuppressed"] + else: + post_status = table_parser.get_values(table_=post_suppressed_alarms_tab, + target_header="Status", + strict=True, + **{"Event id": alarm_id}) + expt_status = "suppressed" if suppress else "unsuppressed" + if post_status[0].lower() != expt_status: + msg = "Alarm {} is not {}".format(alarm_id, expt_status) + if fail_ok: + LOG.warning(msg) + return 2, msg + raise exceptions.TiSError(msg) + + succ_msg = "Event {} is {} successfully".format(alarm_id, expt_status) + LOG.info(succ_msg) + return 0, succ_msg + + +def suppress_event(alarm_id, check_first=False, fail_ok=False, con_ssh=None, + auth_info=Tenant.get('admin_platform')): + return __suppress_unsuppress_event(alarm_id, True, check_first=check_first, + fail_ok=fail_ok, con_ssh=con_ssh, + auth_info=auth_info) + + +def unsuppress_event(alarm_id, check_first=False, fail_ok=False, con_ssh=None, + auth_info=Tenant.get('admin_platform')): + return __suppress_unsuppress_event(alarm_id, False, check_first=check_first, + fail_ok=fail_ok, con_ssh=con_ssh, + auth_info=auth_info) + + +def generate_event(event_id='300.005', state='set', severity='critical', + reason_text='Generated for testing', + entity_id='STXAuto', unknown_text='unknown1', + unknown_two='unknown2', con_ssh=None): + cmd = '''fmClientCli -c "### ###{}###{}###{}###{}### ###{}### ###{}### + {}### ###True###True###"'''. \ + format(event_id, state, reason_text, entity_id, severity, unknown_text, + unknown_two) + + LOG.info("Generate system event: {}".format(cmd)) + if not con_ssh: + con_ssh = ControllerClient.get_active_controller() + + output = con_ssh.exec_cmd(cmd, fail_ok=False)[1] + event_uuid = re.findall(UUID, output)[0] + LOG.info("Event {} generated successfully".format(event_uuid)) + + return event_uuid + + +def get_service_parameter_values(service=None, section=None, name=None, + field='value', con_ssh=None, + auth_info=Tenant.get('admin_platform')): + """ + Returns the list of values from system service-parameter-list + service, section, name can be used to filter the table + Args: + field (str): field to return valueds for. Default to 'value' + service (str): + section (str): + name (str): + con_ssh: + auth_info + + Returns (list): + + """ + kwargs = {} + if service: + kwargs['service'] = service + if section: + kwargs['section'] = section + if name: + kwargs['name'] = name + + table_ = table_parser.table( + cli.system('service-parameter-list --nowrap', ssh_client=con_ssh, + auth_info=auth_info)[1]) + return table_parser.get_values(table_, field, **kwargs) + + +def create_service_parameter(service, section, name, value, con_ssh=None, + fail_ok=False, check_first=True, + modify_existing=True, verify=True, apply=False, + auth_info=Tenant.get('admin_platform')): + """ + Add service-parameter + system service-parameter-add (service) (section) (name)=(value) + Args: + service (str): Required + section (str): Required + name (str): Required + value (str): Required + con_ssh: + fail_ok: + check_first (bool): Check if the service parameter exists before + modify_existing (bool): Whether to modify the service parameter if it + already exists + verify: this enables to skip the verification. sometimes not all + values are displayed in the + service-parameter-list, ex password + apply (bool): whether to apply service parameter after add + auth_info + + Returns (tuple): (rtn_code, err_msg or param_uuid) + + """ + if check_first: + val = get_service_parameter_values(service=service, section=section, + name=name, con_ssh=con_ssh, + auth_info=auth_info) + if val: + val = val[0] + msg = "The service parameter {} {} {} already exists. value: " \ + "{}".format(service, section, name, val) + LOG.info(msg) + if value != val and modify_existing: + return modify_service_parameter(service, section, name, value, + create=False, apply=apply, + con_ssh=con_ssh, + fail_ok=fail_ok, + check_first=False, + verify=verify, + auth_info=auth_info) + return -1, msg + + LOG.info("Creating service parameter") + args = service + ' ' + section + ' ' + name + '=' + value + res, out = cli.system('service-parameter-add', args, ssh_client=con_ssh, + fail_ok=fail_ok) + if res == 1: + return 1, out + + LOG.info("Verifying the service parameter value") + val = get_service_parameter_values(service=service, section=section, + name=name, con_ssh=con_ssh, + auth_info=auth_info)[0] + value = value.strip('\"') + if verify: + if val != value: + msg = 'The service parameter was not added with the correct ' \ + 'value {} to {}'.format(val, value) + if fail_ok: + return 2, msg + raise exceptions.SysinvError(msg) + LOG.info("Service parameter was added with the correct value") + uuid = get_service_parameter_values(field='uuid', service=service, + section=section, name=name, + con_ssh=con_ssh, + auth_info=auth_info)[0] + if apply: + apply_service_parameters(service, wait_for_config=True, + con_ssh=con_ssh, + auth_info=auth_info) + + return 0, uuid + + +def modify_service_parameter(service, section, name, value, apply=False, + con_ssh=None, fail_ok=False, + check_first=True, create=True, verify=True, + auth_info=Tenant.get('admin_platform')): + """ + Modify a service parameter + Args: + service (str): Required + section (str): Required + name (str): Required + value (str): Required + apply + con_ssh: + fail_ok: + check_first (bool): Check if the parameter exists first + create (bool): Whether to create the parameter if it does not exist + verify: this enables to skip the verification. sometimes not all + values are displayed in the service-parameter-list, ex password + auth_info + + Returns (tuple): (rtn_code, message) + + """ + if check_first: + val = get_service_parameter_values(service=service, section=section, + name=name, con_ssh=con_ssh) + if not val: + msg = "The service parameter {} {} {} doesn't exist".format(service, + section, + name) + LOG.info(msg) + if create: + return create_service_parameter(service, section, name, value, + auth_info=auth_info, + con_ssh=con_ssh, + fail_ok=fail_ok, + check_first=False) + return -1, msg + if val[0] == value: + msg = "The service parameter value is already set to {}".format(val) + return -1, msg + + LOG.info("Modifying service parameter") + args = service + ' ' + section + ' ' + name + '=' + value + res, out = cli.system('service-parameter-modify', args, ssh_client=con_ssh, + fail_ok=fail_ok, auth_info=auth_info) + + if res == 1: + return 1, out + + LOG.info("Verifying the service parameter value") + val = get_service_parameter_values(service=service, section=section, + name=name, con_ssh=con_ssh, + auth_info=auth_info)[0] + value = value.strip('\"') + if verify: + if val != value: + msg = 'The service parameter was not modified to the correct value' + if fail_ok: + return 2, msg + raise exceptions.SysinvError(msg) + msg = "Service parameter modified to {}".format(val) + LOG.info(msg) + + if apply: + apply_service_parameters(service, wait_for_config=True, con_ssh=con_ssh, + auth_info=auth_info) + + return 0, msg + + +def delete_service_parameter(uuid, con_ssh=None, fail_ok=False, + check_first=True, + auth_info=Tenant.get('admin_platform')): + """ + Delete a service parameter + Args: + uuid (str): Required + con_ssh: + fail_ok: + check_first (bool): Check if the service parameter exists before + auth_info + + Returns (tuple): + + """ + if check_first: + uuids = get_service_parameter_values(field='uuid', con_ssh=con_ssh, + auth_info=auth_info) + if uuid not in uuids: + return -1, "There is no service parameter with uuid {}".format(uuid) + + res, out = cli.system('service-parameter-delete', uuid, ssh_client=con_ssh, + fail_ok=fail_ok, auth_info=auth_info) + + if res == 1: + return 1, out + + LOG.info("Deleting service parameter") + uuids = get_service_parameter_values(field='uuid', con_ssh=con_ssh, + auth_info=auth_info) + if uuid in uuids: + err_msg = "Service parameter was not deleted" + if fail_ok: + return 2, err_msg + raise exceptions.SysinvError(err_msg) + msg = "The service parameter {} was deleted".format(uuid) + LOG.info(msg) + return 0, msg + + +def apply_service_parameters(service, wait_for_config=True, timeout=300, + con_ssh=None, + fail_ok=False, + auth_info=Tenant.get('admin_platform')): + """ + Apply service parameters + Args: + service (str): Required + wait_for_config (bool): Wait for config out of date alarms to clear + timeout (int): + con_ssh: + auth_info + fail_ok: + + Returns (tuple): (rtn_code, message) + + """ + LOG.info("Applying service parameters {}".format(service)) + res, out = cli.system('service-parameter-apply', service, + ssh_client=con_ssh, fail_ok=fail_ok, + auth_info=auth_info) + + if res == 1: + return res, out + + alarm_id = '250.001' + time.sleep(10) + + if wait_for_config: + LOG.info("Waiting for config-out-of-date alarms to clear. " + "There may be cli errors when active controller's config " + "updates") + end_time = time.time() + timeout + while time.time() < end_time: + table_ = get_alarms_table(uuid=True, con_ssh=con_ssh, retry=3) + alarms_tab = table_parser.filter_table(table_, + **{'Alarm ID': alarm_id}) + uuids = table_parser.get_values(alarms_tab, 'uuid') + if not uuids: + LOG.info("Config has been applied") + break + time.sleep(5) + else: + err_msg = "The config has not finished applying after timeout" + if fail_ok: + return 2, err_msg + raise exceptions.TimeoutException(err_msg) + + return 0, "The {} service parameter was applied".format(service) + + +def get_system_health_query(con_ssh=None, + auth_info=Tenant.get('admin_platform')): + output = cli.system('health-query', ssh_client=con_ssh, fail_ok=False, + auth_info=auth_info, source_openrc=True)[1] + output = output.splitlines() + failed = [] + for line in output: + if "[Fail]" in line: + failed_item = line.split(sep=': ')[0] + failed.append(failed_item.strip()) + + if failed: + return 1, failed + else: + return 0, None + + +def get_build_info(con_ssh=None, refresh=False): + """ + Get build info from /etc/build.info + Args: + con_ssh: + refresh: + + Returns (dict): + + """ + + build_info = ProjVar.get_var('BUILD_INFO') + if build_info and not refresh: + return build_info + + con_client = con_ssh + code, output = con_client.exec_cmd('cat /etc/build.info') + build_info = {} + for line in output.splitlines(): + if '="' in line: + key, value = re.findall('(.*)="(.*)"', line)[0] + build_info[key] = value + + for mandatory_key in ('BUILD_ID', 'BUILD_HOST', 'BUILD_BY', 'JOB'): + if mandatory_key not in build_info: + build_info[mandatory_key] = '' + + ProjVar.set_var(BUILD_INFO=build_info) + sw_version = build_info.get('SW_VERSION') + if sw_version: + existing_versions = ProjVar.get_var('SW_VERSION') + if not (existing_versions and sw_version == existing_versions[-1]): + ProjVar.set_var(append=True, SW_VERSION=sw_version) + + return build_info + + +def get_sw_version(con_ssh=None, use_existing=True): + """ + + Args: + con_ssh: + use_existing + + Returns (str): e.g., 16.10 + + """ + sw_versions = ProjVar.get_var('SW_VERSION') + if use_existing and sw_versions: + return sw_versions[-1] + + info_dict = get_build_info(con_ssh=con_ssh, refresh=True) + return info_dict.get('SW_VERSION') + + +def install_license(license_path, timeout=30, con_ssh=None): + if con_ssh is None: + con_ssh = ControllerClient.get_active_controller() + + cmd = "test -e {}".format(license_path) + rc = con_ssh.exec_cmd(cmd, fail_ok=True)[0] + + if rc != 0: + msg = "The {} file missing from active controller".format(license_path) + return rc, msg + + cmd = "sudo license-install " + license_path + con_ssh.send(cmd) + end_time = time.time() + timeout + rc = 1 + while time.time() < end_time: + index = con_ssh.expect( + [con_ssh.prompt, Prompt.PASSWORD_PROMPT, Prompt.Y_N_PROMPT], + timeout=timeout) + if index == 2: + con_ssh.send('y') + + if index == 1: + con_ssh.send(HostLinuxUser.get_password()) + + if index == 0: + rc = con_ssh.exec_cmd("echo $?")[0] + con_ssh.flush() + break + + return rc + + +def wait_for_services_enable(timeout=300, fail_ok=False, con_ssh=None): + """ + Wait for services to be enabled-active in system service-list + Args: + timeout (int): max wait time in seconds + fail_ok (bool): whether return False or raise exception when some + services fail to reach enabled-active state + con_ssh (SSHClient): + + Returns (tuple): ((bool), (str)) + (True, "All services are enabled-active") + (False, "Some services are not enabled-active: ") + Applicable if fail_ok=True + + """ + LOG.info("Wait for services to be enabled-active in system service-list") + service_list_tab = None + end_time = time.time() + timeout + while time.time() < end_time: + service_list_tab = table_parser.table( + cli.system('service-list', ssh_client=con_ssh)[1]) + states = table_parser.get_column(service_list_tab, 'state') + if all(state == 'enabled-active' for state in states): + LOG.info("All services are enabled-active in system service-list") + return True, "All services are enabled-active" + + LOG.warning( + "Not all services are enabled-ative within {} seconds".format(timeout)) + inactive_services_tab = table_parser.filter_table(service_list_tab, + exclude=True, + state='enabled-active') + msg = "Some services are not enabled-active: {}".format( + table_parser.get_all_rows(inactive_services_tab)) + if fail_ok: + return False, msg + raise exceptions.SysinvError(msg) + + +def enable_service(service_name, con_ssh=None, + auth_info=Tenant.get('admin_platform'), fail_ok=False): + """ + Enable Service + Args: + service_name (str): + con_ssh (SSHClient): + auth_info (dict): + fail_ok: whether return False or raise exception when some services + fail to reach enabled-active state + + Returns (tuple): + + """ + + res, output = cli.system('service-enable', service_name, ssh_client=con_ssh, + fail_ok=fail_ok, auth_info=auth_info) + if res == 1: + return 1, output + + msg = "Service enabled: {]".format(service_name) + LOG.info(msg) + return 0, msg + + +def disable_service(service_name, con_ssh=None, + auth_info=Tenant.get('admin_platform'), fail_ok=False): + """ + Disable Service + Args: + service_name (str) + con_ssh (SSHClient): + auth_info (dict): + fail_ok: whether return False or raise exception when some services + fail to reach enabled-active state + + Returns (tuple): + + """ + + res, output = cli.system('service-disable', service_name, + ssh_client=con_ssh, fail_ok=fail_ok, + auth_info=auth_info) + if res == 1: + return 1, output + + msg = "Service disabled: {}".format(service_name) + LOG.info(msg) + return 0, msg + + +def get_system_networks(field='uuid', uuid=None, net_type=None, mtu=None, + dynamic=None, pool_uuid=None, + auth_info=Tenant.get('admin_platform'), con_ssh=None, + strict=True, + regex=None, **kwargs): + """ + Get networks values from system network-list + Args: + field: 'uuid' (default) + uuid: + net_type: + mtu: + dynamic: + pool_uuid: + auth_info: + con_ssh: + strict: + regex: + **kwargs: + + Returns (list): + """ + table_ = table_parser.table( + cli.system('network-list --nowrap', ssh_client=con_ssh, + auth_info=auth_info)[1]) + args_temp = { + 'uuid': uuid, + 'type': net_type, + 'mtu': mtu, + 'dynamic': dynamic, + 'pool_uuid': pool_uuid + } + kwargs.update({k: v for k, v in args_temp.items() if v is not None}) + return table_parser.get_multi_values(table_, field, strict=strict, + regex=regex, **kwargs) + + +def get_clusters(field='uuid', uuid=None, cluster_uuid=None, ntype=None, + name=None, + auth_info=Tenant.get('admin_platform'), con_ssh=None, + strict=True, regex=None, **kwargs): + """ + Get cluster values from system cluster-list + Args: + field: 'uuid' (default) + uuid: + cluster_uuid: + ntype: (mapped as ntype) + name: + auth_info: + con_ssh: + strict: + regex: + **kwargs: + + Returns (list): + + """ + table_ = table_parser.table( + cli.system('cluster-list --nowrap', ssh_client=con_ssh, + auth_info=auth_info)[1]) + args_temp = { + 'uuid': uuid, + 'cluster_uuid': cluster_uuid, + 'ntype': ntype, + 'name': name, + } + kwargs.update({k: v for k, v in args_temp.items() if v is not None}) + return table_parser.get_multi_values(table_, field, strict=strict, + regex=regex, **kwargs) + + +def get_services(field='id', service_id=None, service_name=None, hostname=None, + state=None, + auth_info=Tenant.get('admin_platform'), con_ssh=None, + strict=True, regex=None, **kwargs): + """ + Get service_list through service service-list command + Args: + field: 'id' (default value) + service_id: + service_name: + hostname: + state: + auth_info: + con_ssh: + strict: + regex: + **kwargs: + + Returns (list): + + """ + table_ = table_parser.table( + cli.system('service-list --nowrap', ssh_client=con_ssh, + auth_info=auth_info)[1]) + args_temp = { + 'id': service_id, + 'service_name': service_name, + 'hostname': hostname, + 'state': state + } + kwargs.update({k: v for k, v in args_temp.items() if v is not None}) + return table_parser.get_multi_values(table_, field, strict=strict, + regex=regex, **kwargs) + + +def get_servicenodes(field='id', servicenode_id=None, name=None, + operational=None, availability=None, + ready_state=None, auth_info=Tenant.get('admin_platform'), + con_ssh=None, strict=True, + regex=None, **kwargs): + """ + Get servicenodes list through service servicenode-list + + Args: + field (str|tuple|list): 'id' (default) + servicenode_id: + name: + operational: + availability: + ready_state: + auth_info: + con_ssh: + strict: + regex: + **kwargs: + + Returns (list): + + """ + table_ = table_parser.table( + cli.system('servicenode-list --nowrap', ssh_client=con_ssh, + auth_info=auth_info)[1]) + args_temp = { + 'id': servicenode_id, + 'name': name, + 'operational': operational, + 'ready_state': ready_state, + 'availability': availability + } + kwargs.update({k: v for k, v in args_temp.items() if v is not None}) + return table_parser.get_multi_values(table_, field, strict=strict, + regex=regex, **kwargs) + + +def get_servicegroups(fields='uuid', uuid=None, service_group_name=None, + hostname=None, state=None, + auth_info=Tenant.get('admin_platform'), con_ssh=None, + strict=True, regex=None, **kwargs): + """ + Get servicegroups via system servicegroup-list + Args: + fields: 'uuid' (default) + uuid: + service_group_name: + hostname: + state: + auth_info: + con_ssh: + strict: + regex + **kwargs: + + Returns (list): + + """ + table_ = table_parser.table( + cli.system('servicegroup-list', ssh_client=con_ssh, + auth_info=auth_info)[1]) + args_temp = { + 'uuid': uuid, + 'service_group_name': service_group_name, + 'hostname': hostname, + 'state': state + } + kwargs.update({k: v for k, v in args_temp.items() if v is not None}) + return table_parser.get_multi_values(table_, fields, strict=strict, + regex=regex, **kwargs) + + +def create_snmp_comm(comm_string, field='uuid', fail_ok=False, con_ssh=None, + auth_info=Tenant.get('admin_platform')): + """ + Create a new SNMP community string + Args: + comm_string (str): Community string to create + field (str): property to return + fail_ok (bool) + con_ssh (SSHClient): + auth_info (dict): + + Returns (tuple): + + """ + args = '-c "{}"'.format(comm_string) + code, out = cli.system('snmp-comm-add', args, ssh_client=con_ssh, + fail_ok=fail_ok, auth_info=auth_info) + + if code > 0: + return 1, out + + val = table_parser.get_value_two_col_table(table_parser.table(out), + field=field) + + return 0, val + + +def create_snmp_trapdest(comm_string, ip_addr, field='uuid', fail_ok=False, + con_ssh=None, + auth_info=Tenant.get('admin_platform')): + """ + Create a new SNMP trap destination + Args: + comm_string (str): SNMP community string + ip_addr (str): IP address of the trap destination + field (str): property to return + fail_ok (bool) + con_ssh (SSHClient): + auth_info (dict): + + Returns (tuple): + + """ + args = '-c "{}" -i "{}"'.format(comm_string, ip_addr) + code, out = cli.system('snmp-trapdest-add', args, ssh_client=con_ssh, + fail_ok=fail_ok, auth_info=auth_info) + + if code > 0: + return 1, out + + val = table_parser.get_value_two_col_table(table_parser.table(out), + field=field) + + return 0, val + + +def get_snmp_comms(field='SNMP community', con_ssh=None, + auth_info=Tenant.get('admin_platform')): + """ + Get SNMP community strings + Args: + field (str|list|tuple) + con_ssh (SSHClient): + auth_info (dict): + + Returns (list): + + """ + table_ = table_parser.table( + cli.system('snmp-comm-list', ssh_client=con_ssh, auth_info=auth_info)[ + 1]) + + return table_parser.get_multi_values(table_, field) + + +def get_snmp_trapdests(field='IP Address', con_ssh=None, + auth_info=Tenant.get('admin_platform'), + exclude_system=True, + **kwargs): + """ + Get SNMP trap destination ips + Args: + field (str|tuple|list): + con_ssh (SSHClient): + auth_info (dict): + exclude_system + kwargs + + Returns (list): + + """ + table_ = table_parser.table( + cli.system('snmp-trapdest-list', ssh_client=con_ssh, + auth_info=auth_info)[1]) + if exclude_system: + table_ = table_parser.filter_table(table_, exclude=True, **{ + 'SNMP Community': 'dcorchAlarmAggregator'}) + + return table_parser.get_multi_values(table_, field, **kwargs) + + +def delete_snmp_comm(comms, check_first=True, fail_ok=False, con_ssh=None, + auth_info=Tenant.get('admin_platform')): + """ + Delete snmp community string + Args: + comms (str): Community string or uuid to delete + check_first (bool) + fail_ok (bool) + con_ssh (SSHClient): + auth_info (dict): + + Returns (tuple): + + """ + if isinstance(comms, str): + comms = comms.split(sep=' ') + else: + comms = list(comms) + + if check_first: + current_comms = get_snmp_comms(con_ssh=con_ssh, auth_info=auth_info) + comms = [comm for comm in comms if comm in current_comms] + if not comms: + msg = '"{}" SNMP community string does not exist. Do ' \ + 'nothing.'.format(comms) + LOG.info(msg) + return -1, msg + + LOG.info('Deleting SNMP community strings: {}'.format(comms)) + comms = ' '.join(['"{}"'.format(comm) for comm in comms]) + code, out = cli.system('snmp-comm-delete', comms, ssh_client=con_ssh, + fail_ok=fail_ok, auth_info=auth_info) + + post_comms = get_snmp_comms(con_ssh=con_ssh, auth_info=auth_info) + undeleted_comms = [comm for comm in comms if comm in post_comms] + if undeleted_comms: + raise exceptions.SysinvError( + "Community string still exist after deletion: {}".format( + undeleted_comms)) + + if code == 0: + msg = 'SNMP community string "{}" is deleted successfully'.format(comms) + else: + msg = 'SNMP community string "{}" failed to delete'.format(comms) + + LOG.info(msg) + return code, out + + +def delete_snmp_trapdest(ip_addrs, fail_ok=False, con_ssh=None, + auth_info=Tenant.get('admin_platform')): + """ + Delete SNMP trap destination + Args: + ip_addrs (str|list): SNMP trap destination IP address(es) + fail_ok (bool) + con_ssh (SSHClient): + auth_info (dict): + + Returns (dict): + + """ + if isinstance(ip_addrs, str): + ip_addrs = ip_addrs.split(sep=' ') + + arg = '' + for ip_addr in ip_addrs: + arg += '"{}" '.format(ip_addr) + code, out = cli.system('snmp-trapdest-delete', arg, ssh_client=con_ssh, + fail_ok=fail_ok, auth_info=auth_info) + + return code, out + + +def get_oam_values(fields=None, con_ssh=None, + auth_info=Tenant.get('admin_platform'), rtn_dict=True): + """ + Get oam info via system oam-show + Args: + fields: + con_ssh: + auth_info: + rtn_dict + + Returns (dict|list): + + """ + table_ = table_parser.table( + cli.system('oam-show', ssh_client=con_ssh, auth_info=auth_info)[1]) + + if not fields: + fields = table_parser.get_column(table_, 'Property') + fields = [field for field in fields if field.startswith('oam_')] + + return table_parser.get_multi_values_two_col_table(table_, fields, + rtn_dict=rtn_dict) + + +def modify_oam_ips(fail_ok=False, con_ssh=None, + auth_info=Tenant.get('admin_platform'), **kwargs): + """ + Modify oam ip(s) + Args: + fail_ok: + con_ssh: + auth_info: + + Returns: + + """ + if not kwargs: + raise ValueError("Nothing is provided to modify") + + args = ' '.join(['{}={}'.format(key, val) for key, val in kwargs.items()]) + LOG.info("Modify oam ip(s): {}".format(args)) + if not con_ssh: + con_ssh = ControllerClient.get_active_controller() + + original = get_oam_values(fields=list(kwargs.keys()), auth_info=auth_info, + con_ssh=con_ssh) + code, output = cli.system('oam-modify', args, ssh_client=con_ssh, + fail_ok=fail_ok, auth_info=auth_info) + if code > 0: + return 1, output + + oam_info = get_oam_values(fields=list(kwargs.keys()), auth_info=auth_info, + con_ssh=con_ssh) + for field, expt_val in kwargs.items(): + actual_val = oam_info[field] + if expt_val != actual_val: + raise exceptions.SysinvError( + "{} expected: {}, actual: {}".format(field, expt_val, + actual_val)) + + from keywords import host_helper + active, standby = get_active_standby_controllers(con_ssh=con_ssh, + auth_info=auth_info) + standby_configured = True + if standby: + standby_configured = False + if wait_for_alarm(alarm_id=EventLogID.CONFIG_OUT_OF_DATE, + entity_id=standby, timeout=120, + con_ssh=con_ssh, fail_ok=True, + auth_info=auth_info)[0]: + host_helper.lock_unlock_hosts(standby, auth_info=auth_info, + con_ssh=con_ssh) + standby_configured = True + + if not standby_configured: + revert_args = ' '.join( + ['{}={}'.format(key, val) for key, val in original.items()]) + LOG.error("Failed to modify oam ip. Revert to: {}".format(revert_args)) + cli.system('oam-modify', revert_args, ssh_client=con_ssh, + auth_info=auth_info) + raise exceptions.SysinvError( + "Config out-of-date alarm did not appear or standby controller " + "lock/unlock" + "failed after oam-modify.") + + # Update system ssh client and global var + fip_field = 'oam_if' if is_aio_simplex(con_ssh=con_ssh, + auth_info=auth_info) else \ + 'oam_floating_ip' + new_lab = ProjVar.get_var('lab') + if fip_field in kwargs: + new_fip = kwargs[fip_field] + con_ssh.update_host() + new_lab['floating ip'] = new_fip + if 'oam_c0_ip' in kwargs: + new_lab['controller-0 ip'] = kwargs['oam_c0_ip'] + if 'oam_c1_ip' in kwargs: + new_lab['controller-1 ip'] = kwargs['oam_c1_ip'] + ProjVar.set_var(LAB=new_lab) + + host_helper.lock_unlock_hosts(active, con_ssh=con_ssh, auth_info=auth_info) + LOG.info("Wait for config out-of-date alarm to clear on system") + wait_for_alarm_gone(alarm_id=EventLogID.CONFIG_OUT_OF_DATE, timeout=120, + auth_info=auth_info, + con_ssh=con_ssh) + + msg = "OAM IP(s) modified successfully." + LOG.info(msg) + return 0, msg + + +def modify_spectre_meltdown_version(version='spectre_meltdown_all', + check_first=True, con_ssh=None, + fail_ok=False, + auth_info=Tenant.get('admin_platform')): + """ + Modify spectre meltdown version + Args: + version (str): valid values: spectre_meltdown_v1, spectre_meltdown_all. + Other values will be rejected by system modify cmd. + check_first (bool): + con_ssh: + fail_ok (bool): + auth_info + + Returns (tuple): + (-1, "Security feature already set to . Do nothing") + (0, "System security_feature is successfully modified to: ") + (1, ) + + """ + current_version = get_system_values(fields='security_feature')[0] + if not current_version: + skip('spectre_meltdown update feature is unavailable in current load') + + from keywords import host_helper + hosts = get_hosts(con_ssh=con_ssh) + check_val = 'nopti nospectre_v2' + if check_first and version == current_version: + LOG.info( + "{} already set in 'system show'. Checking actual cmdline options " + "on each host.".format( + version)) + hosts_to_configure = [] + for host in hosts: + cmdline_options = host_helper.get_host_cmdline_options(host=host) + if 'v1' in version: + if check_val not in cmdline_options: + hosts_to_configure.append(host) + elif check_val in cmdline_options: + hosts_to_configure.append(host) + + hosts = hosts_to_configure + if not hosts_to_configure: + msg = 'Security feature already set to {}. Do nothing.'.format( + current_version) + LOG.info(msg) + return -1, msg + + LOG.info("Set spectre_meltdown version to {}".format(version)) + code, output = cli.system('modify -S {}'.format(version), + ssh_client=con_ssh, fail_ok=fail_ok, + auth_info=auth_info) + if code > 0: + return 1, output + + conf_storage0 = False + if 'storage-0' in hosts: + hosts.remove('storage-0') + conf_storage0 = True + + active_controller = get_active_controller_name(con_ssh=con_ssh, + auth_info=auth_info) + conf_active = False + if active_controller in hosts: + hosts.remove(active_controller) + conf_active = True + + if hosts: + LOG.info( + "Lock/unlock unconfigured hosts other than active controller: " + "{}".format(hosts)) + try: + for host in hosts: + host_helper.lock_host(host=host, con_ssh=con_ssh, + auth_info=auth_info) + finally: + host_helper.unlock_hosts(hosts=hosts, fail_ok=False, + con_ssh=con_ssh, auth_info=auth_info) + host_helper.wait_for_hosts_ready(hosts=hosts, con_ssh=con_ssh, + auth_info=auth_info) + + if conf_storage0: + LOG.info("Lock/unlock storage-0") + try: + host_helper.lock_host(host='storage-0', con_ssh=con_ssh, + auth_info=auth_info) + finally: + host_helper.unlock_host(host='storage-0', con_ssh=con_ssh, + auth_info=auth_info) + + if conf_active: + LOG.info( + "Lock/unlock active controller (swact first if needed): {}".format( + active_controller)) + try: + host_helper.lock_host(host=active_controller, swact=True, + con_ssh=con_ssh, auth_info=auth_info) + finally: + host_helper.unlock_host(host=active_controller, con_ssh=con_ssh, + auth_info=auth_info) + + LOG.info("Check 'system show' is updated to {}".format(version)) + post_version = \ + get_system_values(fields='security_feature', auth_info=auth_info)[0] + assert version == post_version, 'Value is not {} after system ' \ + 'modify'.format(version) + + LOG.info('Check cmdline options are updated on each host via /proc/cmdline') + hosts.append(active_controller) + for host in hosts: + options = host_helper.get_host_cmdline_options(host=host) + if 'v1' in version: + assert check_val in options, '{} not in cmdline options after set' \ + ' to {}'.format(check_val, version) + else: + assert check_val not in options, '{} in cmdline options after set' \ + ' to {}'.format(check_val, version) + + msg = 'System spectre meltdown version is successfully modified to: ' \ + '{}'.format(version) + LOG.info(msg) + return 0, msg + + +def is_avs(con_ssh=None): + vswitch_type = ProjVar.get_var('VSWITCH_TYPE') + if vswitch_type is None: + vswitch_type = get_system_values(fields='vswitch_type', + con_ssh=con_ssh)[0] + ProjVar.set_var(VSWITCH_TYPE=vswitch_type) + return 'ovs' not in vswitch_type + + +def get_controller_uptime(con_ssh, auth_info=Tenant.get('admin_platform')): + """ + Get uptime for all controllers. If no standby controller, then we only + calculate for current active controller. + Args: + con_ssh + auth_info + + Returns (int): in seconds + """ + active_con, standby_con = get_active_standby_controllers( + con_ssh=con_ssh, auth_info=auth_info) + active_con_uptime = int( + get_host_values(host=active_con, fields='uptime', con_ssh=con_ssh, + auth_info=auth_info)[0]) + + con_uptime = active_con_uptime + if standby_con: + standby_con_uptime = int( + get_host_values(host=standby_con, fields='uptime', con_ssh=con_ssh, + auth_info=auth_info)[0]) + con_uptime = min(active_con_uptime, standby_con_uptime) + + return con_uptime + + +def add_ml2_extension_drivers(drivers, auth_info=Tenant.get('admin_platform'), + con_ssh=None): + """ + Add given ml2 extension drivers to helm charts override if they don't + currently exist + Args: + drivers (str|list|tuple): + auth_info: + con_ssh: + + Returns (tuple): + + """ + return __update_ml2_extension_drivers(drivers=drivers, enable=True, + auth_info=auth_info, con_ssh=con_ssh) + + +def remove_ml2_extension_drivers(drivers, + auth_info=Tenant.get('admin_platform'), + con_ssh=None): + """ + Remove given ml2 extension drivers from helm charts override if they exist + Args: + drivers (str|list|tuple): + auth_info: + con_ssh: + + Returns (tuple): + + """ + return __update_ml2_extension_drivers(drivers=drivers, enable=False, + auth_info=auth_info, con_ssh=con_ssh) + + +def __update_ml2_extension_drivers(drivers, enable=True, + auth_info=Tenant.get('admin_platform'), + con_ssh=None): + """ + Add or remove ml2 extension drivers by updating helm charts user override + + Args: + drivers (str|list|tuple): + enable (bool): whether to enable or disable given ml2 extension + driver(s) + auth_info: + con_ssh: + + Returns (tuple): + + """ + if isinstance(drivers, str): + drivers = (drivers,) + + from keywords import container_helper + known_drivers = ['port_security', 'qos', 'dns'] + all_drivers = known_drivers + [driver for driver in drivers if + driver not in known_drivers] + chart = 'neutron' + + LOG.info("Check existing ml2 extension_drivers") + field = 'combined_overrides' + combined_overrides = \ + container_helper.get_helm_override_values(chart, namespace='openstack', + fields=field)[0] + current_drivers = combined_overrides['conf'].get('plugins', {}).get( + 'ml2_conf', {}).get('ml2', {}). \ + get('extension_drivers', '').split(sep=',') + + if enable: + expt_drivers = set(current_drivers + list(drivers)) + # convert expt_drivers to ordered list by removing unwanted drivers + # from ordered all_drivers list + drivers_to_remove = set(all_drivers) - expt_drivers + expt_drivers = [driver for driver in all_drivers if + driver not in drivers_to_remove] + else: + expt_drivers = [driver for driver in current_drivers if + driver not in drivers] + + if expt_drivers == current_drivers: + LOG.info("ml2 extension drivers already set to {}. Do nothing.".format( + expt_drivers)) + return -1, current_drivers + + path = 'conf.plugins.ml2_conf.ml2.extension_drivers' + new_value = ','.join(expt_drivers) + LOG.info("Update neutron helm-override: {}={}".format(path, new_value)) + if len(expt_drivers) <= 1: + kw_args = {'kv_pairs': {path: new_value}} + else: + content = """ + conf: + plugins: + ml2_conf: + ml2: + extension_drivers: {} + """.format(new_value) + yaml_file = os.path.join(HostLinuxUser.get_home(), 'ml2_drivers.yaml') + if not con_ssh: + con_ssh = ControllerClient.get_active_controller() + con_ssh.exec_cmd('rm -f {}'.format(yaml_file), get_exit_code=False) + con_ssh.exec_cmd("echo '{}' >> {}".format(content, yaml_file)) + kw_args = {'yaml_file': yaml_file} + + container_helper.update_helm_override(chart=chart, namespace='openstack', + auth_info=auth_info, con_ssh=con_ssh, + **kw_args) + post_overrides = \ + container_helper.get_helm_override_values(chart, namespace='openstack', + fields=field)[0] + post_drivers = post_overrides['conf'].get('plugins', {}).\ + get('ml2_conf', {}).get('ml2', {}).get('extension_drivers', '').\ + split(sep=',') + + if not post_drivers == expt_drivers: + raise exceptions.SysinvError( + "ml2 extension_drivers override is not reflected") + + LOG.info("Re-apply stx-openstack application") + container_helper.apply_app(app_name='stx-openstack', applied_timeout=1200, + auth_info=auth_info, con_ssh=con_ssh) + return 0, post_drivers + + +def get_ptp_values(fields='mode', rtn_dict=False, con_ssh=None, + auth_info=Tenant.get('admin_platform')): + """ + Get values from system ptp-show table. + Args: + fields (str|tuple|list): + rtn_dict (bool): whether to return dict or list + con_ssh: + auth_info + + Returns (list|dict): + + """ + table_ = table_parser.table( + cli.system('ptp-show', ssh_client=con_ssh, auth_info=auth_info)[1]) + return table_parser.get_multi_values_two_col_table(table_, fields, + rtn_dict=rtn_dict, + merge_lines=True) + + +def modify_ptp(enabled=None, mode=None, transport=None, mechanism=None, + fail_ok=False, con_ssh=None, clear_alarm=True, + wait_with_best_effort=False, check_first=True, + auth_info=Tenant.get('admin_platform')): + """ + Modify ptp with given parameters + Args: + enabled (bool|None): + mode (str|None): + transport (str|None): + mechanism (str|None): + fail_ok (bool): + clear_alarm (bool): + wait_with_best_effort (bool): + check_first: + auth_info (dict): + con_ssh: + + Returns: + + """ + args_map = { + 'enabled': enabled, + 'mode': mode, + 'transport': transport, + 'mechanism': mechanism, + } + + args_dict = {} + for key, val in args_map.items(): + if val is not None: + args_dict[key] = str(val) + + if not args_dict: + raise ValueError("At least one parameter has to be specified.") + + arg_str = ' '.join(['--{} {}'.format(k, v) for k, v in args_dict.items()]) + + if check_first: + actual_val_list = get_ptp_values(fields=list(args_dict.keys()), + con_ssh=con_ssh, rtn_dict=True, + auth_info=auth_info) + changeparm = False + for field in args_dict: + param_val = args_dict[field] + actual_val = actual_val_list[field] + if actual_val != param_val: + changeparm = True + break + if not changeparm: + return -1, 'No parameter chage' + + code, output = cli.system('ptp-modify', arg_str, ssh_client=con_ssh, + fail_ok=fail_ok, auth_info=auth_info) + if code > 0: + return 1, output + + if clear_alarm: + wait_and_clear_config_out_of_date_alarms( + host_type='controller', + wait_with_best_effort=wait_with_best_effort, + con_ssh=con_ssh, + auth_info=auth_info) + + post_args = get_ptp_values(fields=list(args_dict.keys()), con_ssh=con_ssh, + rtn_dict=True, auth_info=auth_info) + for field in args_dict: + expt_val = args_dict[field] + actual_val = post_args[field] + if actual_val != expt_val: + raise exceptions.SysinvError( + "{} in ptp-show is not as expected after modify. Expt: {}; " + "actual: {}". + format(field, expt_val, actual_val)) + + msg = 'ptp modified successfully. {}'.format( + 'Alarm not cleared yet.' if not clear_alarm else '') + return 0, msg + + +def get_ntp_values(fields='ntpservers', rtn_dict=False, con_ssh=None, + auth_info=Tenant.get('admin_platform')): + """ + Get values from system ntp-show table. + Args: + fields (str|tuple|list): + rtn_dict (bool) + con_ssh: + auth_info + + Returns (list|dict): + + """ + table_ = table_parser.table( + cli.system('ntp-show', ssh_client=con_ssh, auth_info=auth_info)[1]) + return table_parser.get_multi_values_two_col_table(table_, fields, + rtn_dict=rtn_dict) + + +def get_ntp_servers(con_ssh=None, auth_info=Tenant.get('admin_platform')): + """ + Get ntp servers via system ntp-show + Args: + con_ssh: + auth_info: + + Returns (list): + + """ + ntp_servers = get_ntp_values(fields='ntpservers', rtn_dict=False, + con_ssh=con_ssh, auth_info=auth_info) + ntp_servers = ntp_servers[0].split(',') + return ntp_servers + + +def modify_ntp(enabled=None, ntp_servers=None, check_first=True, fail_ok=False, + clear_alarm=True, + wait_with_best_effort=False, con_ssh=None, + auth_info=Tenant.get('admin_platform'), **kwargs): + """ + + Args: + enabled (bool|None): + ntp_servers (str|None|list|tuple): + check_first (bool) + fail_ok (bool) + clear_alarm (bool): Whether to wait and lock/unlock hosts to clear alarm + wait_with_best_effort (bool): whether to wait for alarm with best + effort only + con_ssh: + check_first: + auth_info: + **kwargs + + Returns (tuple): + (0, ) + (1, ) # cli rejected + + """ + arg = '' + verify_args = {} + if enabled is not None: + arg += '--enabled {}'.format(enabled).lower() + verify_args['enabled'] = str(enabled) + + if ntp_servers: + if isinstance(ntp_servers, (tuple, list)): + ntp_servers = ','.join(ntp_servers) + arg += ' ntpservers="{}"'.format(ntp_servers) + verify_args['ntpservers'] = ntp_servers + + if kwargs: + for k, v in kwargs.items(): + arg += ' {}={}'.format(k, v) + verify_args[k] = v + + if not arg: + raise ValueError( + "Nothing to modify. enable, ntp_servers or kwwargs has to be " + "provided") + + prev_args = None + toggle_state = False + if enabled is not None: + prev_args = get_ntp_values(fields=list(verify_args.keys()), + con_ssh=con_ssh, rtn_dict=True, + auth_info=auth_info) + if prev_args['enabled'] != verify_args['enabled']: + toggle_state = True + + if check_first and not toggle_state: + if not clear_alarm or (clear_alarm and not get_alarms( + alarm_id=EventLogID.CONFIG_OUT_OF_DATE, con_ssh=con_ssh, + entity_id='controller', auth_info=auth_info)): + if not prev_args: + prev_args = get_ntp_values(fields=list(verify_args.keys()), + con_ssh=con_ssh, rtn_dict=True, + auth_info=auth_info) + + for field in verify_args: + expt_val = verify_args[field] + actual_val = prev_args[field] + if actual_val != expt_val: + break + else: + msg = 'NTP already configured with given criteria {}. Do ' \ + 'nothing.'.format(verify_args) + LOG.info(msg) + return -1, msg + + code, out = cli.system('ntp-modify', arg.strip(), ssh_client=con_ssh, + fail_ok=fail_ok, auth_info=auth_info) + if code > 0: + return 1, out + + if clear_alarm: + # config out-of-date alarm only on controller if only ntp servers are + # changed. + # If ntp state changes, ALL hosts need to be lock/unlock. + host_type = None if toggle_state else 'controller' + wait_and_clear_config_out_of_date_alarms( + host_type=host_type, + con_ssh=con_ssh, + auth_info=auth_info, + wait_with_best_effort=wait_with_best_effort) + + post_args = get_ntp_values(fields=list(verify_args.keys()), con_ssh=con_ssh, + rtn_dict=True, auth_info=auth_info) + for field in verify_args: + expt_val = verify_args[field] + actual_val = post_args[field] + if actual_val != expt_val: + raise exceptions.SysinvError( + "{} in ntp-show is not as expected after modify. Expt: {}; " + "actual: {}". + format(field, expt_val, actual_val)) + + msg = 'ntp modified successfully. {}'.format( + 'Alarm not cleared yet.' if not clear_alarm else '') + return 0, msg + + +def wait_and_clear_config_out_of_date_alarms( + hosts=None, host_type=None, lock_unlock=True, wait_timeout=60, + wait_with_best_effort=False, clear_timeout=60, con_ssh=None, + auth_info=Tenant.get('admin_platform')): + """ + Wait for config out-of-date alarms on given hosts and (lock/unlock and) + wait for clear + Args: + hosts: + host_type (str|list|tuple): valid types: controller, compute, storage + lock_unlock (bool) + wait_timeout (int) + wait_with_best_effort (bool): + clear_timeout (int) + con_ssh: + auth_info + + Returns: + + """ + from keywords.host_helper import lock_unlock_hosts + + if not hosts: + if not host_type: + host_type = ('controller', 'compute', 'storage') + elif isinstance(host_type, str): + host_type = (host_type,) + + avail_states = (HostAvailState.DEGRADED, HostAvailState.AVAILABLE, + HostAvailState.ONLINE) + hosts_per_type = get_hosts_per_personality(availability=avail_states, + con_ssh=con_ssh, + auth_info=auth_info) + + # host_groups: ordered list for controller, compute, storage hosts + host_groups = [hosts_per_type[type_] for type_ in host_type if + hosts_per_type[type_]] + if not host_groups: + raise exceptions.HostError( + "No valid hosts found for host_type: {}".format(host_type)) + + else: + if isinstance(hosts, str): + hosts = [hosts] + host_groups = [hosts] + + hosts_out_of_date = [] + all_hosts = [] + for hosts_ in host_groups: + LOG.info( + "Wait for config out-of-date alarms for {} with best effort".format( + hosts_)) + all_hosts += hosts_ + if wait_for_alarm(alarm_id=EventLogID.CONFIG_OUT_OF_DATE, + entity_id=hosts_, timeout=wait_timeout, + con_ssh=con_ssh, fail_ok=True, + auth_info=auth_info)[0]: + hosts_out_of_date += hosts_ + + hosts_out_of_date = list(set(hosts_out_of_date)) + all_hosts = list(set(all_hosts)) + LOG.info("Config out-of-date hosts: {}".format(hosts_out_of_date)) + if hosts_out_of_date: + if lock_unlock: + LOG.info( + "Wait for 60 seconds, then lock/unlock config out-of-date " + "hosts: {}".format(hosts_out_of_date)) + time.sleep(60) + lock_unlock_hosts(hosts_out_of_date, con_ssh=con_ssh, + auth_info=auth_info) + + LOG.info("Wait for config out-of-date alarm to clear on system") + wait_for_alarm_gone(alarm_id=EventLogID.CONFIG_OUT_OF_DATE, + timeout=clear_timeout, auth_info=auth_info, + con_ssh=con_ssh) + + if not wait_with_best_effort and all_hosts != hosts_out_of_date: + raise exceptions.SysinvError( + "Expect config out of date: {}; actual: {}".format( + all_hosts, hosts_out_of_date)) + + +def get_timezone(auth_info=Tenant.get('admin_platform'), con_ssh=None): + return get_system_values(fields='timezone', auth_info=auth_info, + con_ssh=con_ssh)[0] + + +def modify_timezone(timezone, check_first=True, fail_ok=False, clear_alarm=True, + auth_info=Tenant.get('admin_platform'), + con_ssh=None): + """ + Modify timezone to given zone + Args: + timezone: + check_first: + fail_ok: + clear_alarm: + auth_info: + con_ssh: + + Returns (tuple): + + """ + if check_first: + current_timezone = get_timezone(auth_info=auth_info, con_ssh=con_ssh) + if current_timezone == timezone: + msg = "Timezone is already set to {}. Do nothing.".format(timezone) + LOG.info(msg) + return -1, msg + + LOG.info("Modifying Timezone to {}".format(timezone)) + code, out = modify_system(fail_ok=fail_ok, auth_info=auth_info, + con_ssh=con_ssh, timezone=timezone) + if code > 0: + return 1, out + + if clear_alarm: + if wait_for_alarm(alarm_id=EventLogID.CONFIG_OUT_OF_DATE, timeout=30, + con_ssh=con_ssh, fail_ok=True, + auth_info=auth_info)[0]: + wait_for_alarm_gone(alarm_id=EventLogID.CONFIG_OUT_OF_DATE, + timeout=180, con_ssh=con_ssh, + auth_info=auth_info) + + time.sleep(10) + post_timezone = get_timezone(auth_info=auth_info, con_ssh=con_ssh) + if post_timezone != timezone: + msg = 'Timezone is {} instead of {} after modify'.format(post_timezone, + timezone) + if fail_ok: + LOG.warning(msg) + return 2, post_timezone + + raise exceptions.SysinvError(msg) + + LOG.info("Timezone is successfully modified to {}".format(timezone)) + return 0, timezone + + +def create_data_network(name, net_type='vlan', mode=None, mtu=None, + port_num=None, multicast_group=None, ttl=None, + description=None, field='uuid', fail_ok=False, + con_ssh=None, + auth_info=Tenant.get('admin_platform'), cleanup=None): + """ + Add a datanetwork + Args: + name (str): + net_type (str): vlan, vxlan or flat + mode (None|str|None): + mtu (int|str|None): + port_num (int|str|None): + multicast_group (str|None): + ttl (int|str|None): + description (str|None): + field (str): uuid or name + fail_ok: + con_ssh: + auth_info: + cleanup (str|None): function, class, module or session + + Returns (tuple): + (0, ) + (1, ) + + """ + args_dict = { + 'description': description, + 'mtu': mtu, + 'port_num': port_num, + 'multicast_group': multicast_group, + 'ttl': ttl, + 'mode': mode, + } + args = '{} {} {}'.format(common.parse_args(args_dict), name, net_type) + code, output = cli.system('datanetwork-add', args, ssh_client=con_ssh, + fail_ok=fail_ok, auth_info=auth_info) + if code > 0: + return 1, output + + table_ = table_parser.table(output) + LOG.info("data network {} is created successfully".format(name)) + + if cleanup: + uuid = table_parser.get_value_two_col_table(table_, field='uuid') + ResourceCleanup.add('datanetwork', uuid, scope=cleanup) + + return 0, table_parser.get_value_two_col_table(table_, field) + + +def get_data_networks(field='name', con_ssh=None, + auth_info=Tenant.get('admin_platform'), strict=True, + **kwargs): + """ + Get values from system datanetwork-list + Args: + field (str|tuple|list): + con_ssh: + auth_info: + strict: + **kwargs: + + Returns (list): + + """ + table_ = table_parser.table( + cli.system('datanetwork-list', ssh_client=con_ssh, auth_info=auth_info)[ + 1]) + return table_parser.get_multi_values(table_, fields=field, strict=strict, + **kwargs) + + +def get_data_network_values(datanetwork, fields=('uuid',), fail_ok=False, + con_ssh=None, + auth_info=Tenant.get('admin_platform')): + """ + Get datanetwork values from system datanetwork-show table. + Args: + datanetwork (str): name or uuid of datanetwork + fields (str|tuple|list): + fail_ok: + con_ssh: + auth_info: + + Returns (list|None): values for given fields. None if cli is rejected. + + """ + code, output = cli.system('datanetwork-show', datanetwork, + ssh_client=con_ssh, fail_ok=fail_ok, + auth_info=auth_info) + if code > 0: + return None + + table_ = table_parser.table(output) + return table_parser.get_multi_values_two_col_table(table_=table_, + fields=fields) + + +def delete_data_network(datanetwork_uuid, fail_ok=False, con_ssh=None, + auth_info=Tenant.get('admin_platform')): + """ + Delete given datanetwork + Args: + datanetwork_uuid (str): + fail_ok: + con_ssh: + auth_info: + + Returns (tuple): + (0, "datanetwork deleted successfully") + (1, ) + (2, "datanetwork still exists after deletion") + + """ + code, output = cli.system('datanetwork-delete', datanetwork_uuid, + ssh_client=con_ssh, fail_ok=fail_ok, + auth_info=auth_info) + if code > 0: + return 1, output + + if get_data_network_values(datanetwork=datanetwork_uuid, con_ssh=con_ssh, + auth_info=auth_info, fail_ok=True): + err = 'datanetwork {} still exists after deletion'.format( + datanetwork_uuid) + LOG.warning(err) + if fail_ok: + return 2, err + else: + raise exceptions.SysinvError(err) + + msg = 'datanetwork {} deleted successfully'.format(datanetwork_uuid) + LOG.info(msg) + return 0, msg + + +def get_addr_pools(field, name=None, uuid=None, con_ssh=None, + auth_info=Tenant.get('admin_platform')): + """ + Get values from system addrpool-list + Args: + field (str|list|tuple): + name: + uuid: + con_ssh: + auth_info: + + Returns (list): + + """ + table_ = table_parser.table( + cli.system('addrpool-list --nowrap', ssh_client=con_ssh, + auth_info=auth_info)[1]) + + kwargs = {'name': name, 'uuid': uuid} + return table_parser.get_multi_values(table_=table_, fields=field, + **{k: v for k, v in kwargs.items()}) + + +def get_addr_pool_values(fields, addr_pool=None, network_type=None, + con_ssh=None, + auth_info=Tenant.get('admin_platform')): + """ + Get values from system addrpool-show + Args: + fields (str|tuple|list): + addr_pool: + network_type: + con_ssh: + auth_info: + + Returns (list): + + """ + if not addr_pool and not network_type: + raise ValueError('addr_pool uuid or network_type has to be provided') + + if not addr_pool: + addr_pool = \ + get_system_networks(field='pool_uuid', net_type=network_type, + con_ssh=con_ssh, auth_info=auth_info)[0] + if not addr_pool: + raise exceptions.SysinvError( + "No pool_uuid found for network type {}".format(network_type)) + + table_ = table_parser.table( + cli.system('addrpool-show', addr_pool, ssh_client=con_ssh, + auth_info=auth_info)[1]) + return table_parser.get_multi_values_two_col_table(table_, fields=fields) + + +def get_system_network_cidr(network_type, con_ssh=None, + auth_info=Tenant.get('admin_platform')): + """ + Get cidr for given network type, such as mgmt, oam, cluster-host, etc. + Args: + network_type: + con_ssh: + auth_info: + + Returns (str): + + """ + network, prefix = get_addr_pool_values(fields=('network', 'prefix'), + network_type=network_type, + con_ssh=con_ssh, + auth_info=auth_info) + + return '{}/{}'.format(network, prefix) + + +def get_host_values(host, fields, rtn_dict=False, merge_lines=True, + auth_info=Tenant.get('admin_platform'), + con_ssh=None): + """ + Get values from system host-show + Args: + host (str): + fields (str|list|tuple): + rtn_dict: + merge_lines + auth_info: + con_ssh: + + Returns (list): + + """ + table_ = table_parser.table( + cli.system('host-show', host, ssh_client=con_ssh, auth_info=auth_info)[ + 1], + combine_multiline_entry=merge_lines) + return table_parser.get_multi_values_two_col_table(table_, fields, + rtn_dict=rtn_dict, + evaluate=True) + + +def get_hosts_values(hosts, fields, merge_lines=False, rtn_dict_per_host=True, + con_ssh=None, + auth_info=Tenant.get('admin_platform')): + """ + Get values for multiple hosts via system host-show + Args: + hosts: + fields: + merge_lines: + rtn_dict_per_host: + con_ssh: + auth_info: + + Returns (dict): + e.g., {'controller-0': {'task': booting, 'subfunctions': ...}, + 'controller-1':...} + + """ + if isinstance(fields, str): + fields = [fields] + + states_vals = {} + for host in hosts: + vals = get_host_values(host, fields, merge_lines=merge_lines, + con_ssh=con_ssh, + rtn_dict=rtn_dict_per_host, auth_info=auth_info) + states_vals[host] = vals + + return states_vals + + +def get_ntpq_status(host, mgmt_cidr=None, con_ssh=None, + auth_info=Tenant.get('admin_platform')): + """ + Get ntp status via 'sudo ntpq -pn' + + Args: + host (str): host to check + mgmt_cidr (str): internal management ip from peer host + con_ssh (SSHClient) + auth_info + + Returns(tuple): (, ) + - (0, " NTPQ is in healthy state") + - (1, "No NTP server selected") + - (2, "Some NTP servers are discarded") + + """ + if not mgmt_cidr: + mgmt_cidr = get_system_network_cidr('mgmt', con_ssh=con_ssh, + auth_info=auth_info) + + cmd = 'ntpq -pn' + from keywords import host_helper + with host_helper.ssh_to_host(host, con_ssh=con_ssh) as host_ssh: + output = host_ssh.exec_sudo_cmd(cmd, fail_ok=False)[1] + + output_lines = output.splitlines() + server_lines = list(output_lines) + for line in output_lines: + server_lines.remove(line) + if '======' in line: + break + + selected = None + discarded = [] + for server_line in server_lines: + try: + # Check if its an internal mgmt net ip + if ipaddress.ip_address(server_line[1:]) in ipaddress.ip_network( + mgmt_cidr): + continue + except ValueError: + pass + + if server_line.startswith('*'): + selected = server_line + elif server_line.startswith('-') or server_line.startswith( + 'x') or server_line.startswith(' '): + discarded.append(server_line) + + if not selected: + return 1, "No NTP server selected" + + if discarded: + return 2, "Some NTP servers are discarded" + + return 0, "{} NTPQ is in healthy state".format(host) + + +def wait_for_ntp_sync(host, timeout=MiscTimeout.NTPQ_UPDATE, fail_ok=False, + con_ssh=None, + auth_info=Tenant.get('admin_platform')): + """ + Wait for ntp alarm inline with sudo ntpq output. + Args: + host: + timeout: + fail_ok: + con_ssh: + auth_info: + + Returns (bool): + + """ + + LOG.info("Waiting for ntp alarm to clear or sudo ntpq -pn indicate " + "unhealthy server for {}".format(host)) + end_time = time.time() + timeout + msg = ntp_alarms = None + if not con_ssh: + con_name = auth_info.get('region') if ( + auth_info and ProjVar.get_var('IS_DC')) else None + con_ssh = ControllerClient.get_active_controller(name=con_name) + + mgmt_cidr = get_system_network_cidr('mgmt', con_ssh=con_ssh, + auth_info=auth_info) + while time.time() < end_time: + ntp_alarms = get_alarms(alarm_id=EventLogID.NTP_ALARM, entity_id=host, + strict=False, + con_ssh=con_ssh, auth_info=auth_info) + status, msg = get_ntpq_status(host, mgmt_cidr=mgmt_cidr, + con_ssh=con_ssh, auth_info=auth_info) + if ntp_alarms and status != 0: + LOG.info("Valid NTP alarm") + return True + elif not ntp_alarms and status == 0: + LOG.info("NTP alarm cleared and sudo ntpq shows servers healthy") + return True + + LOG.info("NTPQ status: {}; NTP alarms: {}".format(msg, ntp_alarms)) + time.sleep(30) + + err_msg = "Timed out waiting for NTP alarm to be in sync with ntpq " \ + "output. NTPQ status: {}; NTP alarms: {}".format(msg, ntp_alarms) + if fail_ok: + LOG.warning(err_msg) + return False + + raise exceptions.HostTimeout(err_msg) + + +def __hosts_stay_in_states(hosts, duration=10, con_ssh=None, + auth_info=Tenant.get('admin_platform'), + **states): + """ + Check if hosts stay in specified state(s) for given duration. + + Args: + hosts (list|str): hostname(s) + duration (int): duration to check for in seconds + con_ssh (SSHClient): + **states: such as availability=[online, available] + + Returns: + bool: True if host stayed in specified states for given duration; + False if host is not in specified states + anytime in the duration. + + """ + end_time = time.time() + duration + while time.time() < end_time: + if not __hosts_in_states(hosts=hosts, con_ssh=con_ssh, + auth_info=auth_info, **states): + return False + time.sleep(1) + + return True + + +def wait_for_hosts_states(hosts, timeout=HostTimeout.REBOOT, check_interval=5, + duration=3, con_ssh=None, + fail_ok=True, auth_info=Tenant.get('admin_platform'), + **states): + """ + Wait for hosts to go in specified states via system host-list + + Args: + hosts (str|list): + timeout (int): + check_interval (int): + duration (int): wait for a host to be in given state(s) for at + least seconds + con_ssh (SSHClient): + fail_ok (bool) + auth_info + **states: such as availability=[online, available] + + Returns (bool): True if host reaches specified states within timeout, + and stays in states for given duration; False otherwise + + """ + if not hosts: + raise ValueError("No host(s) provided to wait for states.") + + if isinstance(hosts, str): + hosts = [hosts] + for key, value in states.items(): + if isinstance(value, str): + value = [value] + states[key] = value + + LOG.info("Waiting for {} to reach state(s): {}...".format(hosts, states)) + end_time = time.time() + timeout + while time.time() < end_time: + if __hosts_stay_in_states(hosts, con_ssh=con_ssh, + duration=duration, auth_info=auth_info, + **states): + LOG.info("{} have reached state(s): {}".format(hosts, states)) + return True + time.sleep(check_interval) + else: + msg = "Timed out waiting for {} in state(s) - {}".format(hosts, states) + if fail_ok: + LOG.warning(msg) + return False + raise exceptions.HostTimeout(msg) + + +def __hosts_in_states(hosts, con_ssh=None, + auth_info=Tenant.get('admin_platform'), + **states): + actual_values = get_hosts(hostname=hosts, field=list(states.keys()), + con_ssh=con_ssh, + auth_info=auth_info, rtn_dict=True) + for field, expt_values in states.items(): + actual_states = actual_values[field] + for actual_state in actual_states: + if actual_state not in expt_values: + LOG.debug("At least one host from {} has {} state(s) in {} " + "instead of {}".format(hosts, field, actual_state, + expt_values)) + return False + + return True + + +def wait_for_host_values(host, timeout=HostTimeout.REBOOT, check_interval=3, + strict=True, regex=False, fail_ok=True, + con_ssh=None, auth_info=Tenant.get('admin_platform'), + **kwargs): + """ + Wait for host values via system host-show + Args: + host: + timeout: + check_interval: + strict: + regex: + fail_ok: + con_ssh: + auth_info + **kwargs: key/value pair to wait for. + + Returns: + + """ + if not kwargs: + raise ValueError( + "Expected host state(s) has to be specified via " + "keyword argument states") + + LOG.info("Waiting for {} to reach state(s) - {}".format(host, kwargs)) + end_time = time.time() + timeout + last_vals = {} + for field in kwargs: + last_vals[field] = None + + while time.time() < end_time: + actual_vals = get_host_values(host, fields=list(kwargs.keys()), + con_ssh=con_ssh, rtn_dict=True, + auth_info=auth_info, merge_lines=False) + for field, expt_vals in kwargs.items(): + actual_val = actual_vals[field] + if isinstance(actual_val, list): + actual_val = ' '.join(actual_val) + + actual_val_lower = actual_val.lower() + if isinstance(expt_vals, str): + expt_vals = [expt_vals] + + for expected_val in expt_vals: + expected_val_lower = expected_val.strip().lower() + found_match = False + if regex: + if strict: + res_ = re.match(expected_val_lower, actual_val_lower) + else: + res_ = re.search(expected_val_lower, actual_val_lower) + if res_: + found_match = True + else: + if strict: + found_match = actual_val_lower == expected_val_lower + else: + found_match = actual_val_lower in expected_val_lower + + if found_match: + LOG.info( + "{} {} has reached: {}".format(host, field, actual_val)) + break + else: # no match found. run system host-show again + if last_vals[field] != actual_val_lower: + LOG.info("{} {} is {}.".format(host, field, actual_val)) + last_vals[field] = actual_val_lower + break + else: + LOG.info("{} is in state(s): {}".format(host, kwargs)) + return True + time.sleep(check_interval) + else: + msg = "{} did not reach state(s) within {}s - {}".format(host, timeout, + kwargs) + if fail_ok: + LOG.warning(msg) + return False + raise exceptions.TimeoutException(msg) + + +def is_active_controller(host, con_ssh=None, + auth_info=Tenant.get('admin_platform')): + personality = get_host_values(host, fields='capabilities', + auth_info=auth_info, + merge_lines=True, + con_ssh=con_ssh)[0].get('Personality', '') + return personality.lower() == 'Controller-Active'.lower() + + +def is_lowlatency_host(host): + subfuncs = get_host_values(host=host, fields='subfunctions')[0] + return 'lowlatency' in subfuncs diff --git a/automated-pytest-suite/keywords/vm_helper.py b/automated-pytest-suite/keywords/vm_helper.py new file mode 100755 index 0000000..ab75d61 --- /dev/null +++ b/automated-pytest-suite/keywords/vm_helper.py @@ -0,0 +1,5988 @@ +# +# Copyright (c) 2019 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + + +import copy +import math +import os +import random +import re +import time +import ipaddress +import pexpect +from contextlib import contextmanager + +from consts.auth import Tenant, TestFileServer, HostLinuxUser +from consts.stx import VMStatus, NovaCLIOutput, EXT_IP, ImageStatus, \ + VMNetwork, EventLogID, GuestImages, Networks, FlavorSpec, VimEventID +from consts.filepaths import VMPath, UserData, TestServerPath +from consts.proj_vars import ProjVar +from consts.timeout import VMTimeout, CMDTimeout +from utils import exceptions, cli, table_parser, multi_thread +from utils import local_host +from utils.clients.ssh import NATBoxClient, VMSSHClient, ControllerClient, \ + Prompt, get_cli_client +from utils.clients.local import LocalHostClient +from utils.guest_scripts.scripts import TisInitServiceScript +from utils.multi_thread import MThread, Events +from utils.tis_log import LOG +from keywords import network_helper, nova_helper, cinder_helper, host_helper, \ + glance_helper, common, system_helper, \ + storage_helper +from testfixtures.fixture_resources import ResourceCleanup +from testfixtures.recover_hosts import HostsToRecover + + +def set_vm(vm_id, name=None, state=None, con_ssh=None, auth_info=None, + fail_ok=False, **properties): + """ + Set vm with given parameters - name, state, and/or properties + Args: + vm_id: + name: + state: + con_ssh: + auth_info: + fail_ok: + **properties: + + Returns (tuple): + (0, ) + + """ + args_dict = { + '--name': name, + '--state': state.lower() if state else None, + '--property': properties, + } + args = '{} {}'.format(common.parse_args(args_dict, repeat_arg=True), vm_id) + LOG.info("Setting vm with args: {}".format(args)) + code, output = cli.openstack('server set', args, ssh_client=con_ssh, + fail_ok=fail_ok, auth_info=auth_info) + if code > 0: + return 1, output + + msg = "VM {} is set successfully.".format(vm_id) + LOG.info(msg) + return 0, msg + + +def unset_vm(vm_id, properties, con_ssh=None, auth_info=None, fail_ok=False): + """ + Unset given properties for VM + Args: + vm_id: + properties: + con_ssh: + auth_info: + fail_ok: + + Returns (tuple): + (1, ) - cli rejected + (0, "VM properties unset successfully: ") + + """ + if isinstance(properties, str): + properties = (properties,) + + args = '{} {}'.format( + common.parse_args({'--property': properties}, repeat_arg=True), vm_id) + LOG.info("Unsetting vm {} properties: {}".format(vm_id, properties)) + code, output = cli.openstack('server unset', args, ssh_client=con_ssh, + fail_ok=fail_ok, auth_info=auth_info) + if code > 0: + return 1, output + + msg = "VM {} properties unset successfully: {}".format(vm_id, properties) + LOG.info(msg) + return 0, msg + + +def get_any_vms(count=None, con_ssh=None, auth_info=None, all_tenants=False, + rtn_new=False): + """ + Get a list of ids of any active vms. + + Args: + count (int): number of vms ids to return. If None, all vms for + specific tenant will be returned. If num of + existing vm is less than count additional vm will be created to match + the count + con_ssh (SSHClient): + auth_info (dict): + all_tenants (bool): whether to get any vms from all tenants or just + admin tenant if auth_info is set to Admin + rtn_new (bool): whether to return an extra list containing only the + newly created vms + + Returns (list): + vms(list) # rtn_new=False + [vms(list), new_vms(list)] # rtn_new=True + + """ + vms = get_vms(con_ssh=con_ssh, auth_info=auth_info, + all_projects=all_tenants, Status='ACTIVE') + if count is None: + if rtn_new: + vms = [vms, []] + return vms + diff = count - len(vms) + if diff <= 0: + vms = random.sample(vms, count) + if rtn_new: + vms = [vms, []] + return vms + + new_vms = [] + for i in range(diff): + new_vm = boot_vm(con_ssh=con_ssh, auth_info=auth_info)[1] + vms.append(new_vm) + new_vms.append(new_vm) + + if rtn_new: + vms = [vms, new_vms] + return vms + + +def create_image_from_vm(vm_id, image_name=None, wait=True, + expt_cinder_snapshot=None, + fail_ok=False, con_ssh=None, auth_info=None, + cleanup=None): + """ + Create glance image from an existing vm + Args: + vm_id: + image_name: + wait: + expt_cinder_snapshot (bool): if vm was booted from cinder volume, + then a cinder snapshot is expected + fail_ok: + con_ssh: + auth_info: + cleanup (None|str): valid scopes: function, class, module, session + + Returns (tuple): + + """ + LOG.info("Creating image from vm {}".format(vm_id)) + args_dict = {'--name': image_name, '--wait': wait} + args = '{} {}'.format(common.parse_args(args_dict), vm_id) + code, out = cli.openstack('server image create', args, ssh_client=con_ssh, + fail_ok=fail_ok, auth_info=auth_info) + + table_ = table_parser.table(out) + image_id = table_parser.get_value_two_col_table(table_, 'id') + cinder_snapshot_id = None + if cleanup and image_id: + ResourceCleanup.add('image', image_id, scope=cleanup) + + if code > 0: + return 1, out, cinder_snapshot_id + + post_name = table_parser.get_value_two_col_table(table_, 'name') + if image_name and image_name != post_name: + raise exceptions.NovaError( + "Create image does not expected name. Actual {}, expected: " + "{}".format(post_name, image_name)) + + LOG.info( + "Wait for created image {} to reach active state".format(post_name)) + glance_helper.wait_for_image_status(image_id, status=ImageStatus.ACTIVE, + con_ssh=con_ssh, auth_info=auth_info) + + image_size = table_parser.get_value_two_col_table(table_, 'size') + if str(image_size) == '0' or expt_cinder_snapshot: + cinder_snapshotname = "snapshot for {}".format(post_name) + vol_snapshots = cinder_helper.get_vol_snapshots( + name=cinder_snapshotname) + if not vol_snapshots: + raise exceptions.CinderError( + "cinder snapshot expected, but was not found: {}".format( + cinder_snapshotname)) + cinder_snapshot_id = vol_snapshots[0] + if cleanup: + ResourceCleanup.add('vol_snapshot', cinder_snapshot_id) + + LOG.info("glance image {} successfully created from vm {}".format(post_name, + vm_id)) + return 0, image_id, cinder_snapshot_id + + +def add_security_group(vm_id, security_group, fail_ok=False, con_ssh=None, + auth_info=None): + """ + Add given security group to vm + Args: + vm_id: + security_group: + fail_ok: + con_ssh: + auth_info: + + Returns (tuple): + + """ + LOG.info("Adding security group {} to vm {}".format(security_group, vm_id)) + args = '{} {}'.format(vm_id, security_group) + code, output = cli.openstack('server add security group', args, + ssh_client=con_ssh, fail_ok=fail_ok, + auth_info=auth_info) + if code > 0: + return 1, output + + msg = "Security group {} added to VM {} successfully".format(security_group, + vm_id) + LOG.info(msg) + return 0, msg + + +def wait_for_vol_attach(vm_id, vol_id, timeout=VMTimeout.VOL_ATTACH, + con_ssh=None, auth_info=None, fail_ok=False): + """ + Wait for volume attachment appear in openstack server show as well as + opentstack volume show + Args: + vm_id: + vol_id: + timeout: + con_ssh: + auth_info: + fail_ok: + + Returns (bool): + + """ + end_time = time.time() + timeout + while time.time() < end_time: + vols_attached = get_vm_volumes(vm_id=vm_id, con_ssh=con_ssh, + auth_info=auth_info) + if vol_id in vols_attached: + cinder_helper.wait_for_volume_status(vol_id, status='in-use', + timeout=120, fail_ok=False, + con_ssh=con_ssh, + auth_info=auth_info) + return True + time.sleep(5) + else: + msg = "Volume {} is not shown in nova show {} in {} seconds".format( + vol_id, vm_id, timeout) + LOG.warning(msg) + if not fail_ok: + raise exceptions.VMError(msg) + return False + + +def attach_vol_to_vm(vm_id, vol_id=None, device=None, mount=False, con_ssh=None, + auth_info=None, fail_ok=False, + cleanup=None): + """ + Attach a volume to VM + Args: + vm_id (str): + vol_id (str|None): volume to attach. When None, a non-bootable volume + will be created to attach to given vm + device (str|None): whether to specify --device in cmd + mount (bool): if True, login to vm and attempt to mount the device + after attached. Best effort only. + con_ssh: + auth_info: + fail_ok: + cleanup: + + Returns: + + """ + if not vol_id: + vol_id = \ + cinder_helper.create_volume(bootable=False, auth_info=auth_info, + con_ssh=con_ssh, cleanup=cleanup)[1] + + LOG.info("Attaching volume {} to vm {}".format(vol_id, vm_id)) + args = '{}{} {}'.format('--device {} '.format(device) if device else '', + vm_id, vol_id) + code, output = cli.openstack('server add volume', args, ssh_client=con_ssh, + fail_ok=fail_ok, auth_info=auth_info) + if code > 0: + return 1, output + + LOG.info( + "Waiting for attached volume to appear in openstack server show and " + "volume show") + wait_for_vol_attach(vm_id=vm_id, vol_id=vol_id, con_ssh=con_ssh, + auth_info=auth_info) + + if mount: + LOG.info("Mount attached volume {} to vm {}".format(vol_id, vm_id)) + guest = get_vm_image_name(vm_id) + if not (guest and 'cgcs-guest' in guest): + attached_devs = get_vm_volume_attachments(vm_id=vm_id, + field='device', + vol_id=vol_id, + auth_info=auth_info, + con_ssh=con_ssh) + device_name = attached_devs[0] + device = device_name.split('/')[-1] + LOG.info( + "Volume {} is attached to VM {} as {}".format(vol_id, vm_id, + device_name)) + mount_attached_volume(vm_id, device, vm_image_name=guest) + + return 0, vol_id + + +def is_attached_volume_mounted(vm_id, rootfs, vm_image_name=None, vm_ssh=None): + """ + Checks if an attached volume is mounted in VM + Args: + vm_id (str): - the vm uuid where the volume is attached to + rootfs (str) - the device name of the attached volume like vda, vdb, + vdc, .... + vm_image_name (str): - the guest image the vm is booted with + vm_ssh (VMSSHClient): ssh client session to vm + Returns: bool + + """ + + if vm_image_name is None: + vm_image_name = get_vm_image_name(vm_id) + + cmd = "mount | grep {} | wc -l".format(rootfs) + mounted_msg = "Filesystem /dev/{} is mounted: {}".format(rootfs, vm_id) + not_mount_msg = "Filesystem /dev/{} is not mounted: {}".format(rootfs, + vm_id) + if vm_ssh: + cmd_output = vm_ssh.exec_sudo_cmd(cmd)[1] + if cmd_output != '0': + LOG.info(mounted_msg) + return True + LOG.info(not_mount_msg) + return False + + with ssh_to_vm_from_natbox(vm_id, vm_image_name=vm_image_name) as vm_ssh: + + cmd_output = vm_ssh.exec_sudo_cmd(cmd)[1] + if cmd_output != '0': + LOG.info(mounted_msg) + return True + LOG.info(not_mount_msg) + return False + + +def get_vm_volume_attachments(vm_id, vol_id=None, field='device', con_ssh=None, + auth_info=Tenant.get('admin')): + """ + Get volume attachments for given vm + Args: + vm_id: + vol_id: + field: + con_ssh: + auth_info: + + Returns (list): + + """ + # No replacement in openstack client + table_ = table_parser.table( + cli.nova('volume-attachments', vm_id, ssh_client=con_ssh, + auth_info=auth_info)[1]) + return table_parser.get_values(table_, field, **{'volume id': vol_id}) + + +def mount_attached_volume(vm_id, rootfs, vm_image_name=None): + """ + Mounts an attached volume on VM + Args: + vm_id (str): - the vm uuid where the volume is attached to + rootfs (str) - the device name of the attached volume like vda, vdb, + vdc, .... + vm_image_name (str): - the guest image the vm is booted with + + Returns: bool + + """ + wait_for_vm_pingable_from_natbox(vm_id) + if vm_image_name is None: + vm_image_name = get_vm_image_name(vm_id) + + with ssh_to_vm_from_natbox(vm_id, vm_image_name=vm_image_name) as vm_ssh: + + if not is_attached_volume_mounted(vm_id, rootfs, + vm_image_name=vm_image_name, + vm_ssh=vm_ssh): + LOG.info("Creating ext4 file system on /dev/{} ".format(rootfs)) + cmd = "mkfs -t ext4 /dev/{}".format(rootfs) + rc, output = vm_ssh.exec_cmd(cmd) + if rc != 0: + msg = "Failed to create filesystem on /dev/{} for vm " \ + "{}: {}".format(rootfs, vm_id, output) + LOG.warning(msg) + return False + LOG.info("Mounting /dev/{} to /mnt/volume".format(rootfs)) + cmd = "test -e /mnt/volume" + rc, output = vm_ssh.exec_cmd(cmd) + mount_cmd = '' + if rc == 1: + mount_cmd += "mkdir -p /mnt/volume; mount /dev/{} " \ + "/mnt/volume".format(rootfs) + else: + mount_cmd += "mount /dev/{} /mnt/volume".format(rootfs) + + rc, output = vm_ssh.exec_cmd(mount_cmd) + if rc != 0: + msg = "Failed to mount /dev/{} for vm {}: {}".format(rootfs, + vm_id, + output) + LOG.warning(msg) + return False + + LOG.info( + "Adding /dev/{} mounting point in /etc/fstab".format(rootfs)) + cmd = "echo \"/dev/{} /mnt/volume ext4 defaults 0 0\" >> " \ + "/etc/fstab".format(rootfs) + + rc, output = vm_ssh.exec_cmd(cmd) + if rc != 0: + msg = "Failed to add /dev/{} mount point to /etc/fstab for " \ + "vm {}: {}".format(rootfs, vm_id, output) + LOG.warning(msg) + + LOG.info( + "/dev/{} is mounted to /mnt/volume for vm {}".format(rootfs, + vm_id)) + return True + else: + LOG.info( + "/dev/{} is already mounted to /mnt/volume for vm {}".format( + rootfs, vm_id)) + return True + + +def get_vm_devices_via_virsh(vm_id, con_ssh=None): + """ + Get vm disks in dict format via 'virsh domblklist ' + Args: + vm_id (str): + con_ssh: + + Returns (dict): vm disks per type. + Examples: + {'root_img': {'vda': '/dev/nova-local/a746beb9-08e4-4b08-af2a + -000c8ca72851_disk'}, + 'attached_vol': {'vdb': '/dev/disk/by-path/ip-192.168.205.106:3260-iscsi + -iqn.2010-10.org.openstack:volume-...'}, + 'swap': {}, + 'eph': {}} + + """ + vm_host = get_vm_host(vm_id=vm_id, con_ssh=con_ssh) + inst_name = get_vm_instance_name(vm_id=vm_id, con_ssh=con_ssh) + + with host_helper.ssh_to_host(vm_host, con_ssh=con_ssh) as host_ssh: + output = host_ssh.exec_sudo_cmd('virsh domblklist {}'.format(inst_name), + fail_ok=False)[1] + disk_lines = output.split('-------------------------------\n', 1)[ + -1].splitlines() + + disks = {} + root_line = disk_lines.pop(0) + root_dev, root_source = root_line.split() + if re.search('openstack:volume|cinder-volumes|/dev/sd', root_source): + disk_type = 'root_vol' + else: + disk_type = 'root_img' + disks[disk_type] = {root_dev: root_source} + LOG.info("Root disk: {}".format(disks)) + + disks.update({'eph': {}, 'swap': {}, 'attached_vol': {}}) + for line in disk_lines: + dev, source = line.split() + if re.search('disk.swap', source): + disk_type = 'swap' + elif re.search('openstack:volume|cinder-volumes|/dev/sd', source): + disk_type = 'attached_vol' + elif re.search('disk.eph|disk.local', source): + disk_type = 'eph' + else: + raise exceptions.CommonError( + "Unknown disk in virsh: {}. Automation update " + "required.".format( + line)) + disks[disk_type][dev] = source + + LOG.info("disks for vm {}: {}".format(vm_id, disks)) + return disks + + +def get_vm_boot_volume_via_virsh(vm_id, con_ssh=None): + """ + Get cinder volume id where the vm is booted from via virsh cmd. + Args: + vm_id (str): + con_ssh (SSHClient): + + Returns (str|None): vol_id or None if vm is not booted from cinder volume + + """ + disks = get_vm_devices_via_virsh(vm_id=vm_id, con_ssh=con_ssh) + root_vol = disks.get('root_vol', {}) + if not root_vol: + LOG.info("VM is not booted from volume. Return None") + return + + root_vol = list(root_vol.values())[0] + root_vol = re.findall('openstack:volume-(.*)-lun', root_vol)[0] + LOG.info("vm {} is booted from cinder volume {}".format(vm_id, root_vol)) + return root_vol + + +def auto_mount_vm_devices(vm_id, devices, guest_os=None, check_first=True, + vm_ssh=None): + """ + Mount and auto mount devices on vm + Args: + vm_id (str): - the vm uuid where the volume is attached to + devices (str|list) - the device name(s). such as vdc or [vda, vdb] + guest_os (str): - the guest image the vm is booted with. such as + tis-centos-guest + check_first (bool): where to check if the device is already mounted + and auto mounted before mount and automount + vm_ssh (VMSSHClient): + """ + if isinstance(devices, str): + devices = [devices] + + def _auto_mount(vm_ssh_): + _mounts = [] + for disk in devices: + fs = '/dev/{}'.format(disk) + mount_on, fs_type = storage_helper.mount_partition( + ssh_client=vm_ssh_, disk=disk, partition=fs) + storage_helper.auto_mount_fs(ssh_client=vm_ssh_, fs=fs, + mount_on=mount_on, fs_type=fs_type, + check_first=check_first) + _mounts.append(mount_on) + return _mounts + + if vm_ssh: + mounts = _auto_mount(vm_ssh_=vm_ssh) + else: + with ssh_to_vm_from_natbox(vm_id, vm_image_name=guest_os) as vm_ssh: + mounts = _auto_mount(vm_ssh_=vm_ssh) + + return mounts + + +def touch_files(vm_id, file_dirs, file_name=None, content=None, guest_os=None): + """ + touch files from vm in specified dirs,and adds same content to all + touched files. + Args: + vm_id (str): + file_dirs (list): e.g., ['/', '/mnt/vdb'] + file_name (str|None): defaults to 'test_file.txt' if set to None + content (str|None): defaults to "I'm a test file" if set to None + guest_os (str|None): default guest assumed to set to None + + Returns (tuple): (, ) + + """ + if not file_name: + file_name = 'test_file.txt' + if not content: + content = "I'm a test file" + + if isinstance(file_dirs, str): + file_dirs = [file_dirs] + file_paths = [] + with ssh_to_vm_from_natbox(vm_id=vm_id, vm_image_name=guest_os) as vm_ssh: + for file_dir in file_dirs: + file_path = "{}/{}".format(file_dir, file_name) + file_path = file_path.replace('//', '/') + vm_ssh.exec_sudo_cmd( + 'mkdir -p {}; touch {}'.format(file_dir, file_path), + fail_ok=False) + time.sleep(3) + vm_ssh.exec_sudo_cmd('echo "{}" >> {}'.format(content, file_path), + fail_ok=False) + output = \ + vm_ssh.exec_sudo_cmd('cat {}'.format(file_path), + fail_ok=False)[1] + # TO DELETE: Debugging purpose only + vm_ssh.exec_sudo_cmd('mount | grep vd') + assert content in output, "Expected content {} is not in {}. " \ + "Actual content: {}". \ + format(content, file_path, output) + file_paths.append(file_path) + + vm_ssh.exec_sudo_cmd('sync') + return file_paths, content + + +def auto_mount_vm_disks(vm_id, disks=None, guest_os=None): + """ + Auto mount non-root vm disks and return all the mount points including + root dir + Args: + vm_id (str): + disks (dict|None): disks returned by get_vm_devices_via_virsh() + guest_os (str|None): when None, default guest is assumed. + + Returns (list): list of mount points. e.g., ['/', '/mnt/vdb'] + + """ + if not disks: + disks_to_check = get_vm_devices_via_virsh(vm_id=vm_id) + else: + disks_to_check = copy.deepcopy(disks) + + root_disk = disks_to_check.pop('root_vol', {}) + if not root_disk: + disks_to_check.pop('root_img') + + # add root dir + mounted_on = ['/'] + devs_to_mount = [] + for val in disks_to_check.values(): + devs_to_mount += list(val.keys()) + + LOG.info("Devices to mount: {}".format(devs_to_mount)) + if devs_to_mount: + mounted_on += auto_mount_vm_devices(vm_id=vm_id, devices=devs_to_mount, + guest_os=guest_os) + else: + LOG.info("No non-root disks to mount for vm {}".format(vm_id)) + + return mounted_on + + +vif_map = { + 'e1000': 'normal', + 'rt18139': 'normal', + 'virtio': 'normal', + 'avp': 'normal', + 'pci-sriov': 'direct', + 'pci-passthrough': 'direct-physical'} + + +def _convert_vnics(nics, con_ssh, auth_info, cleanup): + """ + Conversion from wrs vif-model to upstream implementation + Args: + nics (list|tuple|dict): + con_ssh + auth_info + cleanup (None|str) + + Returns (list): + + """ + converted_nics = [] + for nic in nics: + nic = dict(nic) # Do not modify original nic param + if 'vif-model' in nic: + vif_model = nic.pop('vif-model') + if vif_model: + vnic_type = vif_map[vif_model] + vif_model_ = vif_model if ( + system_helper.is_avs() and vnic_type == 'normal')\ + else None + if 'port-id' in nic: + port_id = nic['port-id'] + current_vnic_type, current_vif_model = \ + network_helper.get_port_values( + port=port_id, + fields=('binding_vnic_type', 'binding_profile'), + con_ssh=con_ssh, + auth_info=auth_info) + if current_vnic_type != vnic_type or ( + vif_model_ and vif_model_ not in current_vif_model): + network_helper.set_port(port_id, vnic_type=vnic_type, + con_ssh=con_ssh, + auth_info=auth_info, + wrs_vif=vif_model_) + else: + net_id = nic.pop('net-id') + port_name = common.get_unique_name( + 'port_{}'.format(vif_model)) + port_id = network_helper.create_port(net_id, name=port_name, + wrs_vif=vif_model_, + vnic_type=vnic_type, + con_ssh=con_ssh, + auth_info=auth_info, + cleanup=cleanup)[1] + nic['port-id'] = port_id + converted_nics.append(nic) + + return converted_nics + + +def boot_vm(name=None, flavor=None, source=None, source_id=None, image_id=None, + min_count=None, nics=None, hint=None, + max_count=None, key_name=None, swap=None, ephemeral=None, + user_data=None, block_device=None, + block_device_mapping=None, security_groups=None, vm_host=None, + avail_zone=None, file=None, + config_drive=False, meta=None, tags=None, + fail_ok=False, auth_info=None, con_ssh=None, reuse_vol=False, + guest_os='', poll=True, cleanup=None): + """ + Boot a vm with given parameters + Args: + name (str): + flavor (str): + source (str): 'image', 'volume', 'snapshot', or 'block_device' + source_id (str): id of the specified source. such as volume_id, + image_id, or snapshot_id + image_id (str): id of glance image. Will not be used if source is + image and source_id is specified + min_count (int): + max_count (int): + key_name (str): + swap (int|None): + ephemeral (int): + user_data (str|list): + vm_host (str): which host to place the vm + avail_zone (str): availability zone for vm host, Possible values: + 'nova', 'stxauto', etc + block_device (dict|list|tuple): dist or list of dict, each dictionary + is a block device. + e.g, {'source': 'volume', 'volume_id': xxxx, ...} + block_device_mapping (str): Block device mapping in the format + '=:::'. + auth_info (dict): + con_ssh (SSHClient): + security_groups (str|list|tuple): add nova boot option + --security-groups $(sec_group_name) + nics (list): nics to be created for the vm + each nic: , + vif-pci-address=pci-address> + Examples: [{'net-id': , 'vif-model': }, {'net-id': + , 'vif-model': }, ...] + Notes: valid vif-models: + virtio, avp, e1000, pci-passthrough, pci-sriov, rtl8139, + ne2k_pci, pcnet + + hint (dict): key/value pair(s) sent to scheduler for custom use. such + as group= + file (str): To store files from local + to on the new server. + config_drive (bool): To enable config drive. + meta (dict): key/value pairs for vm meta data. e.g., + {'sw:wrs:recovery_priority': 1, ...} + tags (None|str|tuple|list) + fail_ok (bool): + reuse_vol (bool): whether or not to reuse the existing volume + guest_os (str): Valid values: 'cgcs-guest', 'ubuntu_14', 'centos_6', + 'centos_7', etc. + This will be overriden by image_id if specified. + poll (bool): + cleanup (str|None): valid values: 'module', 'session', 'function', + 'class', vm (and volume) will be deleted as + part of teardown + + Returns (tuple): (rtn_code(int), new_vm_id_if_any(str), message(str), + new_vol_id_if_any(str)) + (0, vm_id, 'VM is booted successfully') # vm is created + successfully and in Active state. + (1, vm_id, ) # boot vm cli command failed, but vm is + still booted + (2, vm_id, "VM building is not 100% complete.") # boot vm cli + accepted, but vm building is not + 100% completed. Only applicable when poll=True + (3, vm_id, "VM did not reach ACTIVE state within . VM + status: ") + # vm is not in Active state after created. + (4, '', ): create vm cli command failed, vm is not booted + + """ + valid_cleanups = (None, 'function', 'class', 'module', 'session') + if cleanup not in valid_cleanups: + raise ValueError( + "Invalid scope provided. Choose from: {}".format(valid_cleanups)) + + LOG.info("Processing boot_vm args...") + # Handle mandatory arg - name + tenant = common.get_tenant_name(auth_info=auth_info) + if name is None: + name = 'vm' + name = "{}-{}".format(tenant, name) + name = common.get_unique_name(name, resource_type='vm') + + # Handle mandatory arg - key_name + key_name = key_name if key_name is not None else get_default_keypair( + auth_info=auth_info, con_ssh=con_ssh) + + # Handle mandatory arg - flavor + if flavor is None: + flavor = nova_helper.get_basic_flavor(auth_info=auth_info, + con_ssh=con_ssh, + guest_os=guest_os) + + if guest_os == 'vxworks': + LOG.tc_step("Add HPET Timer extra spec to flavor") + extra_specs = {FlavorSpec.HPET_TIMER: 'True'} + nova_helper.set_flavor(flavor=flavor, **extra_specs) + + # Handle mandatory arg - nics + if not nics: + mgmt_net_id = network_helper.get_mgmt_net_id(auth_info=auth_info, + con_ssh=con_ssh) + if not mgmt_net_id: + raise exceptions.NeutronError("Cannot find management network") + nics = [{'net-id': mgmt_net_id}] + + if 'edge' not in guest_os and 'vxworks' not in guest_os: + tenant_net_id = network_helper.get_tenant_net_id( + auth_info=auth_info, con_ssh=con_ssh) + if tenant_net_id: + nics.append({'net-id': tenant_net_id}) + + if isinstance(nics, dict): + nics = [nics] + nics = _convert_vnics(nics, con_ssh=con_ssh, auth_info=auth_info, + cleanup=cleanup) + + # Handle mandatory arg - boot source + volume_id = snapshot_id = image = None + if source != 'block_device': + if source is None: + if min_count is None and max_count is None: + source = 'volume' + else: + source = 'image' + + if source.lower() == 'volume': + if source_id: + volume_id = source_id + else: + vol_name = 'vol-' + name + if reuse_vol: + volume_id = cinder_helper.get_any_volume( + new_name=vol_name, + auth_info=auth_info, + con_ssh=con_ssh, + cleanup=cleanup) + else: + volume_id = cinder_helper.create_volume( + name=vol_name, + source_id=image_id, + auth_info=auth_info, + con_ssh=con_ssh, + guest_image=guest_os, + cleanup=cleanup)[1] + + elif source.lower() == 'image': + image = source_id if source_id else image_id + if not image: + img_name = guest_os if guest_os else GuestImages.DEFAULT[ + 'guest'] + image = glance_helper.get_image_id_from_name(img_name, + strict=True, + fail_ok=False) + + elif source.lower() == 'snapshot': + if not snapshot_id: + snapshot_id = cinder_helper.get_vol_snapshots( + auth_info=auth_info, con_ssh=con_ssh) + if not snapshot_id: + raise ValueError( + "snapshot id is required to boot vm; however no " + "snapshot exists on the system.") + snapshot_id = snapshot_id[0] + + if vm_host and not avail_zone: + avail_zone = 'nova' + if avail_zone and vm_host: + avail_zone = '{}:{}'.format(avail_zone, vm_host) + + if user_data is None and guest_os and not re.search( + GuestImages.TIS_GUEST_PATTERN, guest_os): + # create userdata cloud init file to run right after vm + # initialization to get ip on interfaces other than eth0. + user_data = _create_cloud_init_if_conf(guest_os, nics_num=len(nics)) + + if user_data and user_data.startswith('~'): + user_data = user_data.replace('~', HostLinuxUser.get_home(), 1) + + if file and file.startswith('~'): + file = file.replace('~', HostLinuxUser.get_home(), 1) + + # create cmd + non_repeat_args = {'--flavor': flavor, + '--image': image, + '--boot-volume': volume_id, + '--snapshot': snapshot_id, + '--min-count': str( + min_count) if min_count is not None else None, + '--max-count': str( + max_count) if max_count is not None else None, + '--key-name': key_name, + '--swap': swap, + '--user-data': user_data, + '--ephemeral': ephemeral, + '--availability-zone': avail_zone, + '--file': file, + '--config-drive': str( + config_drive) if config_drive else None, + '--block-device-mapping': block_device_mapping, + '--security-groups': security_groups, + '--tags': tags, + '--poll': poll, + } + non_repeat_args = common.parse_args(non_repeat_args, repeat_arg=False, + vals_sep=',') + + repeat_args = { + '--meta': meta, + '--nic': nics, + '--hint': hint, + '--block-device': block_device, + } + repeat_args = common.parse_args(repeat_args, repeat_arg=True, vals_sep=',') + + pre_boot_vms = [] + if not (min_count is None and max_count is None): + name_str = name + '-' + pre_boot_vms = get_vms(auth_info=auth_info, con_ssh=con_ssh, + strict=False, name=name_str) + + args_ = ' '.join([non_repeat_args, repeat_args, name]) + LOG.info("Booting VM {} with args: {}".format(name, args_)) + exitcode, output = cli.nova('boot', positional_args=args_, + ssh_client=con_ssh, fail_ok=True, + auth_info=auth_info, + timeout=VMTimeout.BOOT_VM) + + tmout = VMTimeout.STATUS_CHANGE + if min_count is None and max_count is None: + table_ = table_parser.table(output) + vm_id = table_parser.get_value_two_col_table(table_, 'id') + if cleanup and vm_id: + ResourceCleanup.add('vm', vm_id, scope=cleanup, del_vm_vols=False) + + if exitcode == 1: + if vm_id: + # print out vm show for debugging purpose + cli.openstack('server show', vm_id, ssh_client=con_ssh, + auth_info=Tenant.get('admin')) + if not fail_ok: + raise exceptions.VMOperationFailed(output) + + if vm_id: + return 1, vm_id, output # vm_id = '' if cli is rejected + # without vm created + return 4, '', output + + LOG.info("Post action check...") + if poll and "100% complete" not in output: + message = "VM building is not 100% complete." + if fail_ok: + LOG.warning(message) + return 2, vm_id, "VM building is not 100% complete." + else: + raise exceptions.VMOperationFailed(message) + + if not wait_for_vm_status(vm_id=vm_id, status=VMStatus.ACTIVE, + timeout=tmout, con_ssh=con_ssh, + auth_info=auth_info, fail_ok=True): + vm_status = \ + get_vm_values(vm_id, 'status', strict=True, con_ssh=con_ssh, + auth_info=auth_info)[0] + message = "VM {} did not reach ACTIVE state within {}. VM " \ + "status: {}".format(vm_id, tmout, vm_status) + if fail_ok: + LOG.warning(message) + return 3, vm_id, message + else: + raise exceptions.VMPostCheckFailed(message) + + LOG.info("VM {} is booted successfully.".format(vm_id)) + + return 0, vm_id, 'VM is booted successfully' + + else: + name_str = name + '-' + post_boot_vms = get_vms(auth_info=auth_info, con_ssh=con_ssh, + strict=False, name=name_str) + vm_ids = list(set(post_boot_vms) - set(pre_boot_vms)) + if cleanup and vm_ids: + ResourceCleanup.add('vm', vm_ids, scope=cleanup, del_vm_vols=False) + + if exitcode == 1: + return 1, vm_ids, output + + result, vms_in_state, vms_failed_to_reach_state = wait_for_vms_values( + vm_ids, fail_ok=True, timeout=tmout, + con_ssh=con_ssh, + auth_info=Tenant.get('admin')) + if not result: + msg = "VMs failed to reach ACTIVE state: {}".format( + vms_failed_to_reach_state) + if fail_ok: + LOG.warning(msg=msg) + return 3, vm_ids, msg + + LOG.info("VMs booted successfully: {}".format(vm_ids)) + return 0, vm_ids, "VMs are booted successfully" + + +def wait_for_vm_pingable_from_natbox(vm_id, timeout=200, fail_ok=False, + con_ssh=None, use_fip=False): + """ + Wait for ping vm from natbox succeeds. + + Args: + vm_id (str): id of the vm to ping + timeout (int): max retry time for pinging vm + fail_ok (bool): whether to raise exception if vm cannot be ping'd + successfully from natbox within timeout + con_ssh (SSHClient): TiS server ssh handle + use_fip (bool): whether or not to ping floating ip only if any + + Returns (bool): True if ping vm succeeded, False otherwise. + + """ + ping_end_time = time.time() + timeout + while time.time() < ping_end_time: + if ping_vms_from_natbox(vm_ids=vm_id, fail_ok=True, con_ssh=con_ssh, + num_pings=3, use_fip=use_fip)[0]: + # give it sometime to settle after vm booted and became pingable + time.sleep(5) + return True + else: + msg = "Ping from NatBox to vm {} failed for {} seconds.".format(vm_id, + timeout) + if fail_ok: + LOG.warning(msg) + return False + else: + time_stamp = common.get_date_in_format(ssh_client=con_ssh, + date_format='%Y%m%d_%H-%M') + f_path = '{}/{}-{}'.format(ProjVar.get_var('PING_FAILURE_DIR'), + time_stamp, ProjVar.get_var('TEST_NAME')) + common.write_to_file(f_path, + "=================={}===============\n".format( + msg)) + ProjVar.set_var(PING_FAILURE=True) + get_console_logs(vm_ids=vm_id, sep_file=f_path) + network_helper.collect_networking_info(vms=vm_id, sep_file=f_path, + time_stamp=time_stamp) + raise exceptions.VMNetworkError(msg) + + +def __merge_dict(base_dict, merge_dict): + # identical to {**base_dict, **merge_dict} in python3.6+ + d = dict(base_dict) # id() will be different, making a copy + for k in merge_dict: + d[k] = merge_dict[k] + return d + + +def get_default_keypair(auth_info=None, con_ssh=None): + """ + Get keypair for specific tenant. + + Args: + auth_info (dict): If None, default tenant will be used. + con_ssh (SSHClient): + + Returns (str): key name + + """ + if auth_info is None: + auth_info = Tenant.get_primary() + + keypair_name = auth_info['nova_keypair'] + existing_keypairs = nova_helper.get_keypairs(name=keypair_name, + con_ssh=con_ssh, + auth_info=auth_info) + if existing_keypairs: + return existing_keypairs[0] + + # Assume that public key file already exists since it should have been + # set up in session config. + # In the case of public key file does not exist, there should be existing + # nova keypair, so it should not + # reach this step. Config done via setups.setup_keypair() + keyfile_stx_final = ProjVar.get_var('STX_KEYFILE_SYS_HOME') + public_key_stx = '{}.pub'.format(keyfile_stx_final) + LOG.info("Create nova keypair {} using public key {}".format( + keypair_name, public_key_stx)) + nova_helper.create_keypair(keypair_name, public_key=public_key_stx, + auth_info=auth_info, con_ssh=con_ssh) + + return keypair_name + + +def live_migrate_vm(vm_id, destination_host='', con_ssh=None, + block_migrate=None, force=None, fail_ok=False, + auth_info=Tenant.get('admin')): + """ + + Args: + vm_id (str): + destination_host (str): such as compute-0, compute-1 + con_ssh (SSHClient): + block_migrate (bool): whether to add '--block-migrate' to command + force (None|bool): force live migrate + fail_ok (bool): if fail_ok, return a numerical number to indicate the + execution status + One exception is if the live-migration command exit_code > 1, + which indicating the command itself may + be incorrect. In this case CLICommandFailed exception will be + thrown regardless of the fail_ok flag. + auth_info (dict): + + Returns (tuple): (return_code (int), error_msg_if_migration_rejected (str)) + (0, 'Live migration is successful.'): + live migration succeeded and post migration checking passed + (1, ): # This scenario is changed to host did not change + as excepted + live migration request rejected as expected. e.g., no available + destination host, + or live migrate a vm with block migration + (2, ): live migration request rejected due to unknown + reason. + (3, 'Post action check failed: VM is in ERROR state.'): + live migration command executed successfully, but VM is in Error + state after migration + (4, 'Post action check failed: VM is not in original state.'): + live migration command executed successfully, but VM is not in + before-migration-state + (5, 'Post action check failed: VM host did not change!'): (this + scenario is removed from Newton) + live migration command executed successfully, but VM is still on + the same host after migration + (6, ) This happens when vote_note_to_migrate is set for + vm, or pci device is used in vm, etc + + For the first two scenarios, results will be returned regardless of the + fail_ok flag. + For scenarios other than the first two, returns are only applicable if + fail_ok=True + + Examples: + 1) If a test case is meant to test live migration with a specific + flavor which would block the migration, the + following call can be made: + + return_code, msg = live_migrate_vm(vm_id, fail_ok=True) + expected_err_str = "your error string" + assert return_code in [1, 2] + assert expected_err_str in msg + + 2) For a test that needs to live migrate + + """ + optional_arg = '' + + if block_migrate: + optional_arg += '--block-migrate' + + if force: + optional_arg += '--force' + + before_host = get_vm_host(vm_id, con_ssh=con_ssh) + before_status = get_vm_values(vm_id, 'status', strict=True, con_ssh=con_ssh, + auth_info=Tenant.get('admin'))[0] + if not before_status == VMStatus.ACTIVE: + LOG.warning("Non-active VM status before live migrate: {}".format( + before_status)) + + extra_str = '' + if not destination_host == '': + extra_str = ' to ' + destination_host + positional_args = ' '.join( + [optional_arg.strip(), str(vm_id), destination_host]).strip() + LOG.info( + "Live migrating VM {} from {}{} started.".format(vm_id, before_host, + extra_str)) + LOG.info("nova live-migration {}".format(positional_args)) + # auto host/block migration selection unavailable in openstack client + exit_code, output = cli.nova('live-migration', + positional_args=positional_args, + ssh_client=con_ssh, fail_ok=fail_ok, + auth_info=auth_info) + + if exit_code == 1: + return 6, output + + LOG.info("Waiting for VM status change to {} with best effort".format( + VMStatus.MIGRATING)) + in_mig_state = wait_for_vm_status(vm_id, status=VMStatus.MIGRATING, + timeout=60, fail_ok=True) + if not in_mig_state: + LOG.warning( + "VM did not reach {} state after triggering live-migration".format( + VMStatus.MIGRATING)) + + LOG.info("Waiting for VM status change to original state {}".format( + before_status)) + end_time = time.time() + VMTimeout.LIVE_MIGRATE_COMPLETE + while time.time() < end_time: + time.sleep(2) + status = get_vm_values(vm_id, 'status', strict=True, con_ssh=con_ssh, + auth_info=Tenant.get('admin'))[0] + if status == before_status: + LOG.info("Live migrate vm {} completed".format(vm_id)) + break + elif status == VMStatus.ERROR: + if fail_ok: + return 3, "Post action check failed: VM is in ERROR state." + nova_helper.get_migration_list_table(con_ssh=con_ssh, + auth_info=auth_info) + raise exceptions.VMPostCheckFailed( + "VM {} is in {} state after live migration. Original state " + "before live migration is: {}".format(vm_id, VMStatus.ERROR, + before_status)) + else: + if fail_ok: + return 4, "Post action check failed: VM is not in original state." + else: + nova_helper.get_migration_list_table(con_ssh=con_ssh, + auth_info=auth_info) + raise exceptions.TimeoutException( + "VM {} did not reach original state within {} seconds after " + "live migration".format(vm_id, VMTimeout.LIVE_MIGRATE_COMPLETE)) + + after_host = before_host + for i in range(3): + after_host = get_vm_host(vm_id, con_ssh=con_ssh) + if after_host != before_host: + break + time.sleep(3) + + if before_host == after_host: + LOG.warning( + "Live migration of vm {} failed. Checking if this is expected " + "failure...".format( + vm_id)) + if _is_live_migration_allowed(vm_id, vm_host=before_host, + block_migrate=block_migrate) and \ + (destination_host or get_dest_host_for_live_migrate(vm_id)): + if fail_ok: + return 1, "Unknown live migration failure" + else: + nova_helper.get_migration_list_table(con_ssh=con_ssh, + auth_info=auth_info) + raise exceptions.VMPostCheckFailed( + "Unexpected failure of live migration!") + else: + LOG.debug( + "System does not allow live migrating vm {} as " + "expected.".format( + vm_id)) + return 2, "Live migration failed as expected" + + LOG.info( + "VM {} successfully migrated from {} to {}".format(vm_id, before_host, + after_host)) + return 0, "Live migration is successful." + + +def _is_live_migration_allowed(vm_id, vm_host, con_ssh=None, + block_migrate=None): + vm_info = VMInfo.get_vm_info(vm_id, con_ssh=con_ssh) + storage_backing = vm_info.get_storage_type() + if not storage_backing: + storage_backing = host_helper.get_host_instance_backing(host=vm_host, + con_ssh=con_ssh) + + vm_boot_from = vm_info.boot_info['type'] + + if storage_backing == 'local_image': + if block_migrate and vm_boot_from == 'volume' and not \ + vm_info.has_local_disks(): + LOG.warning( + "Live block migration is not supported for boot-from-volume " + "vm with local_image storage") + return False + return True + + elif storage_backing == 'local_lvm': + if (not block_migrate) and vm_boot_from == 'volume' and not \ + vm_info.has_local_disks(): + return True + else: + LOG.warning( + "Live (block) migration is not supported for local_lvm vm " + "with localdisk") + return False + + else: + # remote backend + if block_migrate: + LOG.warning( + "Live block migration is not supported for vm with remote " + "storage") + return False + else: + return True + + +def get_dest_host_for_live_migrate(vm_id, con_ssh=None): + """ + Check whether a destination host exists with following criteria: + Criteria: + 1) host has same storage backing as the vm + 2) host is unlocked + 3) different than current host + Args: + vm_id (str): + con_ssh (SSHClient): + + Returns (str): hostname for the first host found. Or '' if no proper host + found + """ + vm_info = VMInfo.get_vm_info(vm_id, con_ssh=con_ssh) + vm_storage_backing = vm_info.get_storage_type() + current_host = vm_info.get_host_name() + if not vm_storage_backing: + vm_storage_backing = host_helper.get_host_instance_backing( + host=current_host, con_ssh=con_ssh) + candidate_hosts = host_helper.get_hosts_in_storage_backing( + storage_backing=vm_storage_backing, con_ssh=con_ssh) + + hosts_table_ = table_parser.table(cli.system('host-list')[1]) + for host in candidate_hosts: + if not host == current_host: + host_state = table_parser.get_values(hosts_table_, 'administrative', + hostname=host)[0] + if host_state == 'unlocked': + LOG.debug( + "At least one host - {} is available for live migrating " + "vm {}".format( + host, vm_id)) + return host + + LOG.warning("No valid host found for live migrating vm {}".format(vm_id)) + return '' + + +def cold_migrate_vm(vm_id, revert=False, con_ssh=None, fail_ok=False, + auth_info=Tenant.get('admin')): + """ + Cold migrate a vm and confirm/revert + Args: + vm_id (str): vm to cold migrate + revert (bool): False to confirm resize, True to revert + con_ssh (SSHClient): + fail_ok (bool): True if fail ok. Default to False, ie., throws + exception upon cold migration fail. + auth_info (dict): + + Returns (tuple): (rtn_code, message) + (0, success_msg) # Cold migration and confirm/revert succeeded. VM is + back to original state or Active state. + (1, ) # cold migration cli rejected + # (2, ) # Cold migration cli command rejected. is + the err message returned by cli cmd. + (3, ) # Cold migration cli accepted, but not finished. + is the output of cli cmd. + (4, timeout_message] # Cold migration command ran successfully, + but timed out waiting for VM to reach + 'Verify Resize' state or Error state. + (5, err_msg) # Cold migration command ran successfully, but VM is in + Error state. + (6, err_msg) # Cold migration command ran successfully, and resize + confirm/revert performed. But VM is not in + Active state after confirm/revert. + (7, err_msg) # Cold migration and resize confirm/revert ran + successfully and vm in active state. But host for vm + is not as expected. i.e., still the same host after confirm + resize, or different host after revert resize. + (8, ) # Confirm/Revert resize cli rejected + + """ + before_host = get_vm_host(vm_id, con_ssh=con_ssh) + before_status = \ + get_vm_values(vm_id, 'status', strict=True, con_ssh=con_ssh)[0] + if not before_status == VMStatus.ACTIVE: + LOG.warning("Non-active VM status before cold migrate: {}".format( + before_status)) + + LOG.info("Cold migrating VM {} from {}...".format(vm_id, before_host)) + exitcode, output = cli.nova('migrate --poll', vm_id, ssh_client=con_ssh, + fail_ok=fail_ok, auth_info=auth_info, + timeout=VMTimeout.COLD_MIGRATE_CONFIRM) + + if exitcode == 1: + return 1, output + + LOG.info( + "Waiting for VM status change to {}".format(VMStatus.VERIFY_RESIZE)) + vm_status = wait_for_vm_status(vm_id=vm_id, + status=[VMStatus.VERIFY_RESIZE, + VMStatus.ERROR], + timeout=300, + fail_ok=fail_ok, con_ssh=con_ssh) + + if vm_status is None: + return 4, 'Timed out waiting for Error or Verify_Resize status for ' \ + 'VM {}'.format(vm_id) + + verify_resize_str = 'Revert' if revert else 'Confirm' + if vm_status == VMStatus.VERIFY_RESIZE: + LOG.info("{}ing resize..".format(verify_resize_str)) + res, out = _confirm_or_revert_resize(vm=vm_id, revert=revert, + fail_ok=fail_ok, con_ssh=con_ssh) + if res > 0: + return 8, out + + elif vm_status == VMStatus.ERROR: + err_msg = "VM {} in Error state after cold migrate. {} resize is not " \ + "reached.".format(vm_id, verify_resize_str) + if fail_ok: + return 5, err_msg + nova_helper.get_migration_list_table(con_ssh=con_ssh, + auth_info=auth_info) + raise exceptions.VMPostCheckFailed(err_msg) + + post_confirm_state = wait_for_vm_status( + vm_id, status=VMStatus.ACTIVE, + timeout=VMTimeout.COLD_MIGRATE_CONFIRM, fail_ok=fail_ok, + con_ssh=con_ssh) + + if post_confirm_state is None: + err_msg = "VM {} is not in Active state after {} Resize".format( + vm_id, verify_resize_str) + return 6, err_msg + + # Process results + after_host = get_vm_host(vm_id, con_ssh=con_ssh) + host_changed = before_host != after_host + host_change_str = "changed" if host_changed else "did not change" + operation_ok = not host_changed if revert else host_changed + + if not operation_ok: + err_msg = ( + "VM {} host {} after {} Resize. Before host: {}. After host: {}". + format(vm_id, host_change_str, verify_resize_str, before_host, + after_host)) + if fail_ok: + return 7, err_msg + nova_helper.get_migration_list_table(con_ssh=con_ssh, + auth_info=auth_info) + raise exceptions.VMPostCheckFailed(err_msg) + + success_msg = "VM {} successfully cold migrated and {}ed Resize.".format( + vm_id, verify_resize_str) + LOG.info(success_msg) + return 0, success_msg + + +def resize_vm(vm_id, flavor_id, revert=False, con_ssh=None, fail_ok=False, + auth_info=Tenant.get('admin')): + """ + Resize vm to given flavor + + Args: + vm_id (str): + flavor_id (str): flavor to resize to + revert (bool): True to revert resize, else confirm resize + con_ssh (SSHClient): + fail_ok (bool): + auth_info (dict): + + Returns (tuple): (rtn_code, msg) + (0, "VM successfully resized and confirmed/reverted.") + (1, ) # resize cli rejected + (2, "Timed out waiting for Error or Verify_Resize status for VM + ") + (3, "VM in Error state after resizing. VERIFY_RESIZE is not + reached.") + (4, "VM is not in Active state after confirm/revert Resize") + (5, "Flavor is changed after revert resizing.") + (6, "VM flavor is not changed to expected after resizing.") + """ + before_flavor = get_vm_flavor(vm_id, con_ssh=con_ssh) + before_status = \ + get_vm_values(vm_id, 'status', strict=True, con_ssh=con_ssh)[0] + if not before_status == VMStatus.ACTIVE: + LOG.warning("Non-active VM status before cold migrate: {}".format( + before_status)) + + LOG.info("Resizing VM {} to flavor {}...".format(vm_id, flavor_id)) + args = '--wait --flavor {} {}'.format(flavor_id, vm_id) + exitcode, output = cli.openstack('server resize', args, ssh_client=con_ssh, + fail_ok=fail_ok, auth_info=auth_info, + timeout=VMTimeout.COLD_MIGRATE_CONFIRM) + if exitcode > 0: + return 1, output + + LOG.info( + "Waiting for VM status change to {}".format(VMStatus.VERIFY_RESIZE)) + vm_status = wait_for_vm_status(vm_id=vm_id, + status=[VMStatus.VERIFY_RESIZE, + VMStatus.ERROR], + fail_ok=fail_ok, + timeout=300, con_ssh=con_ssh) + + if vm_status is None: + err_msg = 'Timed out waiting for Error or Verify_Resize status for ' \ + 'VM {}'.format(vm_id) + LOG.error(err_msg) + return 2, err_msg + + verify_resize_str = 'Revert' if revert else 'Confirm' + if vm_status == VMStatus.VERIFY_RESIZE: + LOG.info("{}ing resize..".format(verify_resize_str)) + _confirm_or_revert_resize(vm=vm_id, revert=revert, con_ssh=con_ssh) + + elif vm_status == VMStatus.ERROR: + err_msg = "VM {} in Error state after resizing. {} is not " \ + "reached.".format(vm_id, VMStatus.VERIFY_RESIZE) + if fail_ok: + LOG.error(err_msg) + return 3, err_msg + raise exceptions.VMPostCheckFailed(err_msg) + + post_confirm_state = wait_for_vm_status( + vm_id, status=VMStatus.ACTIVE, timeout=VMTimeout.COLD_MIGRATE_CONFIRM, + fail_ok=fail_ok, con_ssh=con_ssh) + + if post_confirm_state is None: + err_msg = "VM {} is not in Active state after {} Resize".format( + vm_id, verify_resize_str) + LOG.error(err_msg) + return 4, err_msg + + after_flavor = get_vm_flavor(vm_id) + if revert and after_flavor != before_flavor: + err_msg = "Flavor is changed after revert resizing. Before flavor: " \ + "{}, after flavor: {}".format(before_flavor, after_flavor) + if fail_ok: + LOG.error(err_msg) + return 5, err_msg + raise exceptions.VMPostCheckFailed(err_msg) + + if not revert and after_flavor != flavor_id: + err_msg = "VM flavor is not changed to expected after resizing. " \ + "Before flavor: {}, after flavor: {}".\ + format(flavor_id, before_flavor, after_flavor) + if fail_ok: + LOG.error(err_msg) + return 6, err_msg + raise exceptions.VMPostCheckFailed(err_msg) + + success_msg = "VM {} successfully resized and {}ed.".format( + vm_id, verify_resize_str) + LOG.info(success_msg) + return 0, success_msg + + +def wait_for_vm_values(vm_id, timeout=VMTimeout.STATUS_CHANGE, check_interval=3, + fail_ok=True, strict=True, + regex=False, con_ssh=None, auth_info=None, **kwargs): + """ + Wait for vm to reach given states. + + Args: + vm_id (str): vm id + timeout (int): in seconds + check_interval (int): in seconds + fail_ok (bool): whether to return result or raise exception when vm + did not reach expected value(s). + strict (bool): whether to perform strict search(match) for the value(s) + For regular string: if True, match the whole string; if False, + find any substring match + For regex: if True, match from start of the value string; if + False, search anywhere of the value string + regex (bool): whether to use regex to find matching value(s) + con_ssh (SSHClient): + auth_info (dict): + **kwargs: field/value pair(s) to identify the waiting criteria. + + Returns (tuple): (result(bool), actual_vals(dict)) + + """ + if not kwargs: + raise ValueError("No field/value pair is passed via kwargs") + LOG.info("Waiting for vm to reach state(s): {}".format(kwargs)) + + fields_to_check = list(kwargs.keys()) + results = {} + end_time = time.time() + timeout + while time.time() < end_time: + actual_vals = get_vm_values(vm_id=vm_id, con_ssh=con_ssh, + auth_info=auth_info, + fields=fields_to_check) + for i in range(len(fields_to_check)): + field = fields_to_check[i] + expt_vals = kwargs[field] + actual_val = actual_vals[i] + results[field] = actual_val + if not isinstance(expt_vals, list): + expt_vals = [expt_vals] + for expt_val in expt_vals: + if regex: + match_found = re.match(expt_val, + actual_val) if strict else re.search( + expt_val, actual_val) + else: + match_found = expt_val == actual_val if strict else \ + expt_val in actual_val + + if match_found: + fields_to_check.remove(field) + + if not fields_to_check: + LOG.info("VM has reached states: {}".format(results)) + return True, results + + time.sleep(check_interval) + + msg = "VM {} did not reach expected states within timeout. Actual state(" \ + "s): {}".format(vm_id, results) + if fail_ok: + LOG.warning(msg) + return False, results + else: + raise exceptions.VMTimeout(msg) + + +def wait_for_vm_status(vm_id, status=VMStatus.ACTIVE, + timeout=VMTimeout.STATUS_CHANGE, check_interval=3, + fail_ok=False, + con_ssh=None, auth_info=Tenant.get('admin')): + """ + + Args: + vm_id: + status (list|str): + timeout: + check_interval: + fail_ok (bool): + con_ssh: + auth_info: + + Returns: The Status of the vm_id depend on what Status it is looking for + + """ + end_time = time.time() + timeout + if isinstance(status, str): + status = [status] + + current_status = get_vm_values(vm_id, 'status', strict=True, + con_ssh=con_ssh, auth_info=auth_info)[0] + while time.time() < end_time: + for expected_status in status: + if current_status == expected_status: + LOG.info("VM status has reached {}".format(expected_status)) + return expected_status + + time.sleep(check_interval) + current_status = get_vm_values(vm_id, 'status', strict=True, + con_ssh=con_ssh, auth_info=auth_info)[0] + + err_msg = "Timed out waiting for vm status: {}. Actual vm status: " \ + "{}".format(status, current_status) + if fail_ok: + LOG.warning(err_msg) + return None + else: + raise exceptions.VMTimeout(err_msg) + + +def _confirm_or_revert_resize(vm, revert=False, con_ssh=None, fail_ok=False): + args = '--revert' if revert else '--confirm' + args = '{} {}'.format(args, vm) + return cli.openstack('server resize', args, ssh_client=con_ssh, + fail_ok=fail_ok, auth_info=Tenant.get('admin')) + + +def _get_vms_ips(vm_ids, net_types='mgmt', exclude_nets=None, con_ssh=None, + vshell=False): + if isinstance(net_types, str): + net_types = [net_types] + + if isinstance(vm_ids, str): + vm_ids = [vm_ids] + + valid_net_types = ['mgmt', 'data', 'internal', 'external'] + if not set(net_types) <= set(valid_net_types): + raise ValueError( + "Invalid net type(s) provided. Valid net_types: {}. net_types " + "given: {}". + format(valid_net_types, net_types)) + + vms_ips = [] + vshell_ips_dict = dict(data=[], internal=[]) + if 'mgmt' in net_types: + mgmt_ips = network_helper.get_mgmt_ips_for_vms( + vms=vm_ids, con_ssh=con_ssh, exclude_nets=exclude_nets) + if not mgmt_ips: + raise exceptions.VMNetworkError( + "Management net ip is not found for vms {}".format(vm_ids)) + vms_ips += mgmt_ips + + if 'external' in net_types: + ext_ips = network_helper.get_external_ips_for_vms( + vms=vm_ids, con_ssh=con_ssh, exclude_nets=exclude_nets) + if not ext_ips: + raise exceptions.VMNetworkError( + "No external network ip found for vms {}".format(vm_ids)) + vms_ips += ext_ips + + if 'data' in net_types: + data_ips = network_helper.get_tenant_ips_for_vms( + vms=vm_ids, con_ssh=con_ssh, exclude_nets=exclude_nets) + if not data_ips: + raise exceptions.VMNetworkError( + "Data network ip is not found for vms {}".format(vm_ids)) + if vshell: + vshell_ips_dict['data'] = data_ips + else: + vms_ips += data_ips + + if 'internal' in net_types: + internal_ips = network_helper.get_internal_ips_for_vms( + vms=vm_ids, con_ssh=con_ssh, exclude_nets=exclude_nets) + if not internal_ips: + raise exceptions.VMNetworkError( + "Internal net ip is not found for vms {}".format(vm_ids)) + if vshell: + vshell_ips_dict['internal'] = internal_ips + else: + vms_ips += internal_ips + + return vms_ips, vshell_ips_dict + + +def _ping_vms(ssh_client, vm_ids=None, con_ssh=None, num_pings=5, timeout=15, + fail_ok=False, net_types='mgmt', retry=3, + retry_interval=3, vshell=False, sep_file=None, + source_net_types=None): + """ + + Args: + vm_ids (list|str): list of vms to ping + ssh_client (SSHClient): ping from this ssh client. Usually a natbox' + ssh client or another vm's ssh client + con_ssh (SSHClient): active controller ssh client to run cli command + to get all the management ips + num_pings (int): number of pings to send + timeout (int): timeout waiting for response of ping messages in seconds + fail_ok (bool): Whether it's okay to have 100% packet loss rate. + sep_file (str|None) + net_types (str|list|tuple) + source_net_types (str|list|tuple|None): + vshell specific + None: use the same net_type s as the target IPs' + str: use the specified net_type for all target IPs + tuple: (net_type_data, net_type_internal) + use net_type_data for data IPs + use net_type_internal for internal IPs + list: same as tuple + + Returns (tuple): (res (bool), packet_loss_dict (dict)) + Packet loss rate dictionary format: + { + ip1: packet_loss_percentile1, + ip2: packet_loss_percentile2, + ... + } + + """ + vms_ips, vshell_ips_dict = _get_vms_ips(vm_ids=vm_ids, net_types=net_types, + con_ssh=con_ssh, vshell=vshell) + + res_bool = False + res_dict = {} + for i in range(retry + 1): + for ip in vms_ips: + packet_loss_rate = network_helper.ping_server( + server=ip, ssh_client=ssh_client, num_pings=num_pings, + timeout=timeout, fail_ok=True, vshell=False)[0] + res_dict[ip] = packet_loss_rate + + for net_type, vshell_ips in vshell_ips_dict.items(): + + if source_net_types is None: + pass + elif isinstance(source_net_types, str): + net_type = source_net_types + else: + net_type_data, net_type_internal = source_net_types + if net_type == 'data': + net_type = net_type_data + elif net_type == 'internal': + net_type = net_type_internal + else: + raise ValueError(net_type) + + for vshell_ip in vshell_ips: + packet_loss_rate = network_helper.ping_server( + server=vshell_ip, ssh_client=ssh_client, + num_pings=num_pings, timeout=timeout, fail_ok=True, + vshell=True, net_type=net_type)[0] + res_dict[vshell_ip] = packet_loss_rate + + res_bool = not any(loss_rate == 100 for loss_rate in res_dict.values()) + if res_bool: + LOG.info( + "Ping successful from {}: {}".format(ssh_client.host, res_dict)) + return res_bool, res_dict + + if i < retry: + LOG.info("Retry in {} seconds".format(retry_interval)) + time.sleep(retry_interval) + + if not res_dict: + raise ValueError("Ping res dict contains no result.") + + err_msg = "Ping unsuccessful from vm (logged in via {}): {}".format( + ssh_client.host, res_dict) + if fail_ok: + LOG.info(err_msg) + return res_bool, res_dict + else: + if sep_file: + msg = "==========================Ping unsuccessful from vm to " \ + "vms====================" + common.write_to_file( + sep_file, + content="{}\nLogged into vm via {}. Result: {}".format( + msg, ssh_client.host, res_dict)) + raise exceptions.VMNetworkError(err_msg) + + +def configure_vm_vifs_on_same_net(vm_id, vm_ips=None, ports=None, + vm_prompt=None, restart_service=True, + reboot=False): + """ + Configure vm routes if the vm has multiple vifs on same network. + Args: + vm_id (str): + vm_ips (str|list): ips for specific vifs. Only works if vifs are up + with ips assigned + ports (list of dict): vm ports to configure. + vm_prompt (None|str) + restart_service + reboot + + Returns: + + """ + + if isinstance(vm_ips, str): + vm_ips = [vm_ips] + + vnics_info = {} + if ports: + LOG.info("Get vm interfaces' mac and ip addressess") + if isinstance(ports, str): + ports = [ports] + vm_interfaces_table = table_parser.table( + cli.openstack('port list', '--server {}'.format(vm_id))[1]) + vm_interfaces_dict = table_parser.row_dict_table( + table_=vm_interfaces_table, key_header='ID') + for i in range(len(ports)): + port_id = ports[i] + vif_info = vm_interfaces_dict[port_id] + vif_ip = vif_info['fixed ip addresses'] + if vif_ip and 'ip_address' in vif_ip: + vif_ip = \ + re.findall("ip_address='(.*)'", vif_ip.split(sep=',')[0])[0] + else: + if not vm_ips: + raise ValueError( + "vm_ips for matching vnics has to be provided for " + "ports without ip address " + "listed in neutron port-list") + vif_ip = vm_ips[i] + cidr = vif_ip.rsplit('.', maxsplit=1)[0] + '.0/24' + vif_mac = vif_info['mac address'] + vnics_info[vif_mac] = (cidr, vif_ip) + + LOG.info("Configure vm routes if the vm has multiple vifs on same network.") + with ssh_to_vm_from_natbox(vm_id=vm_id, prompt=vm_prompt) as vm_ssh: + vifs_to_conf = {} + if not ports: + extra_grep = '| grep --color=never -E "{}"'.format( + '|'.join(vm_ips)) if vm_ips else '' + kernel_routes = vm_ssh.exec_cmd( + 'ip route | grep --color=never "proto kernel" {}'.format( + extra_grep))[1] + cidr_dict = {} + for line in kernel_routes.splitlines(): + found = re.findall( + r'^(.*/\d+)\sdev\s(.*)\sproto kernel.*\ssrc\s(.*)$', line) + cidr, dev_name, dev_ip = found[0] + if cidr not in cidr_dict: + cidr_dict[cidr] = [] + cidr_dict[cidr].append((dev_name, dev_ip)) + + for cidr_, val in cidr_dict.items(): + if not vm_ips: + val = val[1:] + for eth_info in val: + dev_name, dev_ip = eth_info + vifs_to_conf[dev_name] = \ + (cidr_, dev_ip, 'stxauto_{}'.format(dev_name)) + + if not vifs_to_conf: + LOG.info( + "Did not find multiple vifs on same subnet. Do nothing.") + + else: + for mac_addr in vnics_info: + dev_name = network_helper.get_eth_for_mac(vm_ssh, + mac_addr=mac_addr) + cidr_, dev_ip = vnics_info[mac_addr] + vifs_to_conf[dev_name] = ( + cidr_, dev_ip, 'stxauto_{}'.format(dev_name)) + + used_tables = vm_ssh.exec_cmd( + 'grep --color=never -E "^[0-9]" {}'.format(VMPath.RT_TABLES))[1] + used_tables = [int(re.split(r'[\s\t]', line_)[0].strip()) for line_ in + used_tables.splitlines()] + + start_range = 110 + for eth_name, eth_info in vifs_to_conf.items(): + cidr_, vif_ip, table_name = eth_info + exiting_tab = vm_ssh.exec_cmd( + 'grep --color=never {} {}'.format(table_name, + VMPath.RT_TABLES))[1] + if not exiting_tab: + for i in range(start_range, 250): + if i not in used_tables: + LOG.info( + "Append new routing table {} to rt_tables". + format(table_name)) + vm_ssh.exec_sudo_cmd( + 'echo "{} {}" >> {}'.format(i, table_name, + VMPath.RT_TABLES)) + start_range = i + 1 + break + else: + raise ValueError( + "Unable to get a valid table number to create route " + "for {}".format(eth_name)) + + LOG.info( + "Update arp_filter, arp_announce, route and rule scripts for " + "vm {} {}".format(vm_id, eth_name)) + vm_ssh.exec_sudo_cmd( + 'echo 2 > {}'.format(VMPath.ETH_ARP_ANNOUNCE.format(eth_name))) + vm_ssh.exec_sudo_cmd( + 'echo 1 > {}'.format(VMPath.ETH_ARP_FILTER.format(eth_name))) + route = '{} dev {} proto kernel scope link src {} table {}'.format( + cidr_, eth_name, vif_ip, table_name) + vm_ssh.exec_sudo_cmd('echo "{}" > {}'.format( + route, VMPath.ETH_RT_SCRIPT.format(eth_name))) + rule = 'table {} from {}'.format(table_name, vif_ip) + vm_ssh.exec_sudo_cmd('echo "{}" > {}'.format( + rule, VMPath.ETH_RULE_SCRIPT.format(eth_name))) + + if restart_service and not reboot: + LOG.info("Restart network service after configure vm routes") + vm_ssh.exec_sudo_cmd('systemctl restart network', + expect_timeout=120, get_exit_code=False) + # vm_ssh.exec_cmd('ip addr') + + if reboot: + LOG.info("Reboot vm after configure vm routes") + reboot_vm(vm_id=vm_id) + + +def cleanup_routes_for_vifs(vm_id, vm_ips, rm_ifcfg=True, restart_service=True, + reboot=False): + """ + Cleanup the configured routes for specified vif(s). This is needed when a + vif is detached from a vm. + + Args: + vm_id: + vm_ips: + rm_ifcfg + restart_service + reboot + + Returns: + + """ + with ssh_to_vm_from_natbox(vm_id=vm_id) as vm_ssh: + + if isinstance(vm_ips, str): + vm_ips = [vm_ips] + + for vm_ip in vm_ips: + LOG.info("Clean up route for dev with ip {}".format(vm_ip)) + route = vm_ssh.exec_sudo_cmd( + 'grep --color=never {} {}'.format( + vm_ip, VMPath.ETH_RT_SCRIPT.format('*')))[1] + if not route: + continue + + pattern = '(.*) dev (.*) proto kernel .* src {} table (.*)'.format( + vm_ip) + found = re.findall(pattern, route) + if found: + cidr, eth_name, table_name = found[0] + LOG.info( + "Update arp_filter, arp_announce, route and rule scripts " + "for vm {} {}".format(vm_id, eth_name)) + # vm_ssh.exec_sudo_cmd('rm -f {}'.format( + # VMPath.ETH_ARP_ANNOUNCE.format(eth_name))) + # vm_ssh.exec_sudo_cmd('rm -f {}'.format( + # VMPath.ETH_ARP_FILTER.format(eth_name))) + vm_ssh.exec_sudo_cmd( + 'rm -f {}'.format(VMPath.ETH_RULE_SCRIPT.format(eth_name))) + vm_ssh.exec_sudo_cmd( + 'rm -f {}'.format(VMPath.ETH_RT_SCRIPT.format(eth_name))) + vm_ssh.exec_sudo_cmd("sed -n -i '/{}/!p' {}".format( + table_name, VMPath.RT_TABLES)) + + if rm_ifcfg: + vm_ssh.exec_sudo_cmd('rm -f {}'.format( + VMPath.ETH_PATH_CENTOS.format(eth_name))) + + if restart_service and not reboot: + LOG.info("Restart network service") + vm_ssh.exec_sudo_cmd('systemctl restart network', + get_exit_code=False, expect_timeout=60) + + if reboot: + reboot_vm(vm_id=vm_id) + + +def ping_vms_from_natbox(vm_ids=None, natbox_client=None, con_ssh=None, + num_pings=5, timeout=30, fail_ok=False, + use_fip=False, retry=0): + """ + + Args: + vm_ids: vms to ping. If None, all vms will be ping'd. + con_ssh (SSHClient): active controller client to retrieve the vm info + natbox_client (NATBoxClient): ping vms from this client + num_pings (int): number of pings to send + timeout (int): timeout waiting for response of ping messages in seconds + fail_ok (bool): When False, test will stop right away if one ping + failed. When True, test will continue to ping + the rest of the vms and return results even if pinging one vm + failed. + use_fip (bool): Whether to ping floating ip only if a vm has more + than one management ips + retry (int): number of times to retry if ping fails + + Returns (tuple): (res (bool), packet_loss_dict (dict)) + Packet loss rate dictionary format: + { + ip1: packet_loss_percentile1, + ip2: packet_loss_percentile2, + ... + } + """ + if isinstance(vm_ids, str): + vm_ids = [vm_ids] + + if not natbox_client: + natbox_client = NATBoxClient.get_natbox_client() + + if not con_ssh: + con_ssh = ControllerClient.get_active_controller() + + net_type = 'external' if use_fip else 'mgmt' + res_bool, res_dict = _ping_vms(ssh_client=natbox_client, vm_ids=vm_ids, + con_ssh=con_ssh, num_pings=num_pings, + timeout=timeout, fail_ok=True, + net_types=net_type, retry=retry, + vshell=False) + if not res_bool and not fail_ok: + msg = "==================Ping vm(s) from NatBox failed - Collecting " \ + "extra information===============" + LOG.error(msg) + time_stamp = common.get_date_in_format(ssh_client=con_ssh, + date_format='%Y%m%d_%H-%M') + f_path = '{}/{}-{}'.format(ProjVar.get_var('PING_FAILURE_DIR'), + time_stamp, ProjVar.get_var("TEST_NAME")) + common.write_to_file(file_path=f_path, + content="\n{}\nResult(s): {}\n".format(msg, + res_dict)) + ProjVar.set_var(PING_FAILURE=True) + get_console_logs(vm_ids=vm_ids, sep_file=f_path) + network_helper.collect_networking_info(vms=vm_ids, sep_file=f_path, + time_stamp=time_stamp, + con_ssh=con_ssh) + raise exceptions.VMNetworkError( + "Ping failed from NatBox. Details: {}".format(res_dict)) + + return res_bool, res_dict + + +def get_console_logs(vm_ids, length=None, con_ssh=None, sep_file=None): + """ + Get console logs for given vm(s) + Args: + vm_ids (str|list): + length (int|None): how many lines to tail + con_ssh: + sep_file (str|None): write vm console logs to given sep_file if + specified. + + Returns (dict): {: , : , ...} + """ + if isinstance(vm_ids, str): + vm_ids = [vm_ids] + + vm_ids = list(set(vm_ids)) + console_logs = {} + args = '--lines={} '.format(length) if length else '' + content = '' + for vm_id in vm_ids: + vm_args = '{}{}'.format(args, vm_id) + output = cli.openstack('console log show', vm_args, ssh_client=con_ssh, + auth_info=Tenant.get('admin'))[1] + console_logs[vm_id] = output + content += "\n#### Console log for vm {} ####\n{}\n".format(vm_id, + output) + + if sep_file: + common.write_to_file(sep_file, content=content) + + return console_logs + + +def ping_vms_from_vm(to_vms=None, from_vm=None, user=None, password=None, + prompt=None, con_ssh=None, natbox_client=None, + num_pings=5, timeout=120, fail_ok=False, from_vm_ip=None, + from_fip=False, net_types='mgmt', + retry=3, retry_interval=5, vshell=False, + source_net_types=None): + """ + + Args: + from_vm (str): + to_vms (str|list|None): + user (str): + password (str): + prompt (str): + con_ssh (SSHClient): + natbox_client (SSHClient): + num_pings (int): + timeout (int): max number of seconds to wait for ssh connection to + from_vm + fail_ok (bool): When False, test will stop right away if one ping + failed. When True, test will continue to ping + the rest of the vms and return results even if pinging one vm + failed. + from_vm_ip (str): vm ip to ssh to if given. from_fip flag will be + considered only if from_vm_ip=None + from_fip (bool): whether to ssh to vm's floating ip if it has + floating ip associated with it + net_types (list|str|tuple): 'mgmt', 'data', or 'internal' + retry (int): number of times to retry + retry_interval (int): seconds to wait between each retries + vshell (bool): whether to ping vms' data interface through internal + interface. + Usage: when set to True, use 'vshell ping --count 3 + ' + - dpdk vms should be booted from lab_setup scripts + source_net_types (str|list|tuple|None): + vshell specific + None: use the same net_type s as the target IPs' + str: use the specified net_type for all target IPs + tuple: (net_type_data, net_type_internal) + use net_type_data for data IPs + use net_type_internal for internal IPs + list: same as tuple + + Returns (tuple): + A tuple in form: (res (bool), packet_loss_dict (dict)) + + Packet loss rate dictionary format: + { + ip1: packet_loss_percentile1, + ip2: packet_loss_percentile2, + ... + } + + """ + if isinstance(net_types, str): + net_types = [net_types] + + if from_vm is None or to_vms is None: + vms_ips = network_helper.get_mgmt_ips_for_vms(con_ssh=con_ssh, + rtn_dict=True) + if not vms_ips: + raise exceptions.NeutronError("No management ip found for any vms") + + vms_ids = list(vms_ips.keys()) + if from_vm is None: + from_vm = random.choice(vms_ids) + if to_vms is None: + to_vms = vms_ids + + if isinstance(to_vms, str): + to_vms = [to_vms] + + if not isinstance(from_vm, str): + raise ValueError("from_vm is not a string: {}".format(from_vm)) + + assert from_vm and to_vms, "from_vm: {}, to_vms: {}".format(from_vm, to_vms) + + time_stamp = common.get_date_in_format(ssh_client=con_ssh, + date_format='%Y%m%d_%H-%M') + f_path = '{}/{}-{}'.format(ProjVar.get_var('PING_FAILURE_DIR'), time_stamp, + ProjVar.get_var('TEST_NAME')) + try: + with ssh_to_vm_from_natbox(vm_id=from_vm, username=user, + password=password, + natbox_client=natbox_client, + prompt=prompt, con_ssh=con_ssh, + vm_ip=from_vm_ip, use_fip=from_fip, + retry_timeout=300) as from_vm_ssh: + res = _ping_vms(ssh_client=from_vm_ssh, vm_ids=to_vms, + con_ssh=con_ssh, num_pings=num_pings, + timeout=timeout, fail_ok=fail_ok, + net_types=net_types, retry=retry, + retry_interval=retry_interval, vshell=vshell, + sep_file=f_path, + source_net_types=source_net_types) + return res + + except (exceptions.TiSError, pexpect.ExceptionPexpect): + ProjVar.set_var(PING_FAILURE=True) + collect_to_vms = False if list(to_vms) == [from_vm] else True + get_console_logs(vm_ids=from_vm, length=20, sep_file=f_path) + if collect_to_vms: + get_console_logs(vm_ids=to_vms, sep_file=f_path) + network_helper.collect_networking_info(vms=to_vms, sep_file=f_path, + time_stamp=time_stamp) + try: + LOG.warning( + "Ping vm(s) from vm failed - Attempt to ssh to from_vm and " + "collect vm networking info") + with ssh_to_vm_from_natbox(vm_id=from_vm, username=user, + password=password, + natbox_client=natbox_client, + prompt=prompt, con_ssh=con_ssh, + vm_ip=from_vm_ip, + use_fip=from_fip) as from_vm_ssh: + _collect_vm_networking_info(vm_ssh=from_vm_ssh, sep_file=f_path, + vm_id=from_vm) + + if collect_to_vms: + LOG.warning( + "Ping vm(s) from vm failed - Attempt to ssh to to_vms and " + "collect vm networking info") + for vm_ in to_vms: + with ssh_to_vm_from_natbox(vm_, retry=False, + con_ssh=con_ssh) as to_ssh: + _collect_vm_networking_info(to_ssh, sep_file=f_path, + vm_id=vm_) + except (exceptions.TiSError, pexpect.ExceptionPexpect): + pass + + raise + + +def _collect_vm_networking_info(vm_ssh, sep_file=None, vm_id=None): + vm = vm_id if vm_id else '' + content = '#### VM network info collected when logged into vm {}via {} ' \ + '####'.format(vm, vm_ssh.host) + for cmd in ('ip addr', 'ip neigh', 'ip route'): + output = vm_ssh.exec_cmd(cmd, get_exit_code=False)[1] + content += '\nSent: {}\nOutput:\n{}\n'.format(cmd, output) + + if sep_file: + common.write_to_file(sep_file, content=content) + + +def ping_ext_from_vm(from_vm, ext_ip=None, user=None, password=None, + prompt=None, con_ssh=None, natbox_client=None, + num_pings=5, timeout=30, fail_ok=False, vm_ip=None, + use_fip=False): + if ext_ip is None: + ext_ip = EXT_IP + + with ssh_to_vm_from_natbox(vm_id=from_vm, username=user, password=password, + natbox_client=natbox_client, + prompt=prompt, con_ssh=con_ssh, vm_ip=vm_ip, + use_fip=use_fip) as from_vm_ssh: + from_vm_ssh.exec_cmd('ip addr', get_exit_code=False) + return network_helper.ping_server(ext_ip, ssh_client=from_vm_ssh, + num_pings=num_pings, + timeout=timeout, fail_ok=fail_ok)[0] + + +def scp_to_vm_from_natbox(vm_id, source_file, dest_file, timeout=60, + validate=True, natbox_client=None, sha1sum=None): + """ + scp a file to a vm from natbox + the file must be located in the natbox + the natbox must has connectivity to the VM + + Args: + vm_id (str): vm to scp to + source_file (str): full pathname to the source file + dest_file (str): destination full pathname in the VM + timeout (int): scp timeout + validate (bool): verify src and dest sha1sum + natbox_client (NATBoxClient|None): + sha1sum (str|None): validates the source file prior to operation, + or None, only checked if validate=True + + Returns (None): + + """ + if natbox_client is None: + natbox_client = NATBoxClient.get_natbox_client() + + LOG.info("scp-ing from {} to VM {}".format(natbox_client.host, vm_id)) + + tmp_loc = '/tmp' + fname = os.path.basename(os.path.normpath(source_file)) + + # ensure source file exists + natbox_client.exec_cmd('test -f {}'.format(source_file), fail_ok=False) + + # calculate sha1sum + src_sha1 = None + if validate: + src_sha1 = natbox_client.exec_cmd('sha1sum {}'.format(source_file), + fail_ok=False)[1] + src_sha1 = src_sha1.split(' ')[0] + LOG.info("src: {}, sha1sum: {}".format(source_file, src_sha1)) + if sha1sum is not None and src_sha1 != sha1sum: + raise ValueError( + "src sha1sum validation failed {} != {}".format(src_sha1, + sha1sum)) + + with ssh_to_vm_from_natbox(vm_id) as vm_ssh: + vm_ssh.exec_cmd('mkdir -p {}'.format(tmp_loc)) + vm_ssh.scp_on_dest(natbox_client.user, natbox_client.host, source_file, + '/'.join([tmp_loc, fname]), natbox_client.password, + timeout=timeout) + + # `mv $s $d` fails if $s == $d + if os.path.normpath(os.path.join(tmp_loc, fname)) != os.path.normpath( + dest_file): + vm_ssh.exec_sudo_cmd( + 'mv -f {} {}'.format('/'.join([tmp_loc, fname]), dest_file), + fail_ok=False) + + # ensure destination file exists + vm_ssh.exec_sudo_cmd('test -f {}'.format(dest_file), fail_ok=False) + + # validation + if validate: + dest_sha1 = vm_ssh.exec_sudo_cmd( + 'sha1sum {}'.format(dest_file), fail_ok=False)[1] + dest_sha1 = dest_sha1.split(' ')[0] + LOG.info("dst: {}, sha1sum: {}".format(dest_file, dest_sha1)) + if src_sha1 != dest_sha1: + raise ValueError( + "dst sha1sum validation failed {} != {}".format(src_sha1, + dest_sha1)) + LOG.info("scp completed successfully") + + +def scp_to_vm(vm_id, source_file, dest_file, timeout=60, validate=True, + source_ssh=None, natbox_client=None): + """ + scp a file from any SSHClient to a VM + since not all SSHClient's has connectivity to the VM, this function scps + the source file to natbox first + + Args: + vm_id (str): vm to scp to + source_file (str): full pathname to the source file + dest_file (str): destination path in the VM + timeout (int): scp timeout + validate (bool): verify src and dest sha1sum + source_ssh (SSHClient|None): the source ssh session, or None to use + 'localhost' + natbox_client (NATBoxClient|None): + + Returns (None): + + """ + if not natbox_client: + natbox_client = NATBoxClient.get_natbox_client() + + close_source = False + if not source_ssh: + source_ssh = LocalHostClient() + source_ssh.connect() + close_source = True + + try: + # scp-ing from natbox, forward the call + if source_ssh.host == natbox_client.host: + return scp_to_vm_from_natbox(vm_id, source_file, dest_file, timeout, + validate, natbox_client=natbox_client) + + LOG.info("scp-ing from {} to natbox {}".format(source_ssh.host, + natbox_client.host)) + tmp_loc = '~' + fname = os.path.basename(os.path.normpath(source_file)) + + # ensure source file exists + source_ssh.exec_cmd('test -f {}'.format(source_file), fail_ok=False) + + # calculate sha1sum + if validate: + src_sha1 = source_ssh.exec_cmd('sha1sum {}'.format(source_file), + fail_ok=False)[1] + src_sha1 = src_sha1.split(' ')[0] + LOG.info("src: {}, sha1sum: {}".format(source_file, src_sha1)) + else: + src_sha1 = None + + # scp to natbox + # natbox_client.exec_cmd('mkdir -p {}'.format(tmp_loc)) + source_ssh.scp_on_source( + source_file, natbox_client.user, natbox_client.host, tmp_loc, + natbox_client.password, timeout=timeout) + + return scp_to_vm_from_natbox( + vm_id, '/'.join([tmp_loc, fname]), dest_file, timeout, validate, + natbox_client=natbox_client, sha1sum=src_sha1) + + finally: + if close_source: + source_ssh.close() + + +@contextmanager +def ssh_to_vm_from_natbox(vm_id, vm_image_name=None, username=None, + password=None, prompt=None, + timeout=VMTimeout.SSH_LOGIN, natbox_client=None, + con_ssh=None, vm_ip=None, + vm_ext_port=None, use_fip=False, retry=True, + retry_timeout=120, close_ssh=True, + auth_info=Tenant.get('admin')): + """ + ssh to a vm from natbox. + + Args: + vm_id (str): vm to ssh to + vm_image_name (str): such as cgcs-guest, tis-centos-guest, ubuntu_14 + username (str): + password (str): + prompt (str): + timeout (int): + natbox_client (NATBoxClient): + con_ssh (SSHClient): ssh connection to TiS active controller + vm_ip (str): ssh to this ip from NatBox if given + vm_ext_port (str): port forwarding rule external port. If given this + port will be used. vm_ip must be external + router ip address. + use_fip (bool): Whether to ssh to floating ip if a vm has one + associated. Not applicable if vm_ip is given. + retry (bool): whether or not to retry if fails to connect + retry_timeout (int): max time to retry + close_ssh + auth_info (dict|None) + + Yields (VMSSHClient): + ssh client of the vm + + Examples: + with ssh_to_vm_from_natbox(vm_id=) as vm_ssh: + vm_ssh.exec_cmd(cmd) + + """ + if vm_image_name is None: + vm_image_name = get_vm_image_name(vm_id=vm_id, con_ssh=con_ssh, + auth_info=auth_info).strip().lower() + + if vm_ip is None: + if use_fip: + vm_ip = network_helper.get_external_ips_for_vms( + vms=vm_id, con_ssh=con_ssh, auth_info=auth_info)[0] + else: + vm_ip = network_helper.get_mgmt_ips_for_vms( + vms=vm_id, con_ssh=con_ssh, auth_info=auth_info)[0] + + if not natbox_client: + natbox_client = NATBoxClient.get_natbox_client() + + try: + vm_ssh = VMSSHClient(natbox_client=natbox_client, vm_ip=vm_ip, + vm_ext_port=vm_ext_port, + vm_img_name=vm_image_name, user=username, + password=password, prompt=prompt, + timeout=timeout, retry=retry, + retry_timeout=retry_timeout) + + except (exceptions.TiSError, pexpect.ExceptionPexpect): + LOG.warning( + 'Failed to ssh to VM {}! Collecting vm console log'.format(vm_id)) + get_console_logs(vm_ids=vm_id) + raise + + try: + yield vm_ssh + finally: + if close_ssh: + vm_ssh.close() + + +def get_vm_pid(instance_name, host_ssh): + """ + Get instance pid on its host. + + Args: + instance_name: instance name of a vm + host_ssh: ssh for the host of the given instance + + Returns (str): pid of a instance on its host + + """ + code, vm_pid = host_ssh.exec_sudo_cmd( + "ps aux | grep --color='never' {} | grep -v grep | awk '{{print $2}}'". + format(instance_name)) + if code != 0: + raise exceptions.SSHExecCommandFailed( + "Failed to get pid for vm: {}".format(instance_name)) + + if not vm_pid: + LOG.warning("PID for {} is not found on host!".format(instance_name)) + + return vm_pid + + +class VMInfo: + """ + class for storing and retrieving information for specific VM using + openstack admin. + + Notes: Do not use this class for vm actions, such as boot, delete, + migrate, etc as these actions should be done by + tenants. + """ + __instances = {} + active_controller_ssh = None + + def __init__(self, vm_id, con_ssh=None, auth_info=Tenant.get('admin')): + """ + + Args: + vm_id: + con_ssh: floating controller ssh for the system + + Returns: + + """ + if con_ssh is None: + con_ssh = ControllerClient.get_active_controller() + VMInfo.active_controller_ssh = con_ssh + self.vm_id = vm_id + self.con_ssh = con_ssh + self.auth_info = auth_info + self.initial_table_ = table_parser.table( + cli.openstack('server show', vm_id, ssh_client=con_ssh, + auth_info=self.auth_info, timeout=60)[1]) + self.table_ = self.initial_table_ + self.name = table_parser.get_value_two_col_table(self.initial_table_, + 'name', strict=True) + self.tenant_id = table_parser.get_value_two_col_table( + self.initial_table_, 'project_id') + self.user_id = table_parser.get_value_two_col_table(self.initial_table_, + 'user_id') + self.boot_info = self.__get_boot_info() + self.flavor_table = None + VMInfo.__instances[ + vm_id] = self # add instance to class variable for tracking + + def refresh_table(self): + self.table_ = table_parser.table( + cli.openstack('server show', self.vm_id, ssh_client=self.con_ssh, + auth_info=self.auth_info, timeout=60)[1]) + + def get_host_name(self): + self.refresh_table() + return table_parser.get_value_two_col_table(table_=self.table_, + field=':host', strict=False) + + def get_flavor_id(self): + """ + + Returns: (dict) {'name': flavor_name, 'id': flavor_id} + + """ + flavor = table_parser.get_value_two_col_table(self.table_, 'flavor') + flavor_id = re.findall(r'\((.*)\)', flavor)[0] + return flavor_id + + def refresh_flavor_table(self): + flavor_id = self.get_flavor_id() + self.flavor_table = table_parser.table( + cli.openstack('flavor show', flavor_id, ssh_client=self.con_ssh, + auth_info=Tenant.get('admin'))[1]) + return self.flavor_table + + def __get_boot_info(self): + return _get_boot_info(table_=self.table_, vm_id=self.vm_id, + auth_info=self.auth_info, + con_ssh=self.con_ssh) + + def get_storage_type(self): + table_ = self.flavor_table + if not table_: + table_ = self.refresh_flavor_table() + extra_specs = table_parser.get_value_two_col_table(table_, 'properties', + merge_lines=True) + extra_specs = table_parser.convert_value_to_dict(value=extra_specs) + return extra_specs.get(FlavorSpec.STORAGE_BACKING, None) + + def has_local_disks(self): + if self.boot_info['type'] == 'image': + return True + + table_ = self.flavor_table + if not table_: + table_ = self.refresh_flavor_table() + swap = table_parser.get_value_two_col_table(table_, 'swap') + ephemeral = table_parser.get_value_two_col_table(table_, 'ephemeral', + strict=False) + return bool(swap or int(ephemeral)) + + @classmethod + def get_vms_info(cls): + return tuple(cls.__instances) + + @classmethod + def get_vm_info(cls, vm_id, con_ssh=None): + if vm_id not in cls.__instances: + if vm_id in get_all_vms(con_ssh=con_ssh): + return cls(vm_id, con_ssh) + else: + raise exceptions.VMError( + "VM with id {} does not exist!".format(vm_id)) + instance = cls.__instances[vm_id] + instance.refresh_table() + return instance + + @classmethod + def remove_instance(cls, vm_id): + cls.__instances.pop(vm_id, default="No instance found") + + +def delete_vms(vms=None, delete_volumes=True, check_first=True, + timeout=VMTimeout.DELETE, fail_ok=False, + stop_first=True, con_ssh=None, auth_info=Tenant.get('admin'), + remove_cleanup=None): + """ + Delete given vm(s) (and attached volume(s)). If None vms given, all vms + on the system will be deleted. + + Args: + vms (list|str): list of vm ids to be deleted. If string input, + assume only one vm id is provided. + check_first (bool): Whether to check if given vm(s) exist on system + before attempt to delete + timeout (int): Max time to wait for delete cli finish and wait for + vms actually disappear from system + delete_volumes (bool): delete attached volume(s) if set to True + fail_ok (bool): + stop_first (bool): whether to stop active vm(s) first before + deleting. Best effort only + con_ssh (SSHClient): + auth_info (dict): + remove_cleanup (None|str): remove from vm cleanup list if deleted + successfully + + Returns (tuple): (rtn_code(int), msg(str)) # rtn_code 1,2,3 only returns + when fail_ok=True + (-1, 'No vm(s) to delete.') # "Empty vm list/string provided and + no vm exist on system. + (-1, 'None of the given vm(s) exists on system.') + (0, "VM(s) deleted successfully.") + (1, ) # delete vm(s) cli returns stderr, some or all vms + failed to delete. + (2, "VMs deletion cmd all accepted, but some vms still exist after + deletion") + + """ + existing_vms = None + if not vms: + vms = get_vms(con_ssh=con_ssh, auth_info=auth_info, all_projects=True, + long=False) + existing_vms = list(vms) + elif isinstance(vms, str): + vms = [vms] + + vms = [vm for vm in vms if vm] + if not vms: + LOG.warning( + "Empty vm list/string provided and no vm exist on system. Do " + "Nothing") + return -1, 'No vm(s) to delete.' + + if check_first: + if existing_vms is None: + existing_vms = get_vms(con_ssh=con_ssh, auth_info=auth_info, + all_projects=True, long=False) + + vms = list(set(vms) & set(existing_vms)) + if not vms: + LOG.info("None given vms exist on system. Do nothing") + return -1, 'None of the given vm(s) exists on system.' + + if stop_first: # best effort only + active_vms = get_vms(vms=vms, auth_info=auth_info, con_ssh=con_ssh, + all_projects=True, + Status=VMStatus.ACTIVE) + if active_vms: + stop_vms(active_vms, fail_ok=True, con_ssh=con_ssh, + auth_info=auth_info) + + vols_to_del = [] + if delete_volumes: + vols_to_del = cinder_helper.get_volumes_attached_to_vms( + vms=vms, auth_info=auth_info, con_ssh=con_ssh) + + LOG.info("Deleting vm(s): {}".format(vms)) + vms_accepted = [] + deletion_err = '' + for vm in vms: + # Deleting vm one by one due to the cmd will stop if a failure is + # encountered, causing no attempt to delete + # other vms + code, output = cli.openstack('server delete', vm, ssh_client=con_ssh, + fail_ok=True, auth_info=auth_info, + timeout=timeout) + if code > 0: + deletion_err += '{}\n'.format(output) + else: + vms_accepted.append(vm) + + # check if vms are actually removed from nova list + all_deleted, vms_undeleted = _wait_for_vms_deleted(vms_accepted, + fail_ok=True, + auth_info=auth_info, + timeout=timeout, + con_ssh=con_ssh) + if remove_cleanup: + vms_deleted = list(set(vms_accepted) - set(vms_undeleted)) + ResourceCleanup.remove('vm', vms_deleted, scope=remove_cleanup, + del_vm_vols=False) + + # Delete volumes results will not be returned. Best effort only. + if delete_volumes: + res = cinder_helper.delete_volumes(vols_to_del, fail_ok=True, + auth_info=auth_info, + con_ssh=con_ssh)[0] + if res == 0 and remove_cleanup: + ResourceCleanup.remove('volume', vols_to_del, scope=remove_cleanup) + + # Process returns + if deletion_err: + LOG.warning(deletion_err) + if fail_ok: + return 1, deletion_err + raise exceptions.CLIRejected(deletion_err) + + if vms_undeleted: + msg = 'VM(s) still exsit after deletion: {}'.format(vms_undeleted) + LOG.warning(msg) + if fail_ok: + return 2, msg + raise exceptions.VMPostCheckFailed(msg) + + LOG.info("VM(s) deleted successfully: {}".format(vms)) + return 0, "VM(s) deleted successfully." + + +def _wait_for_vms_deleted(vms, timeout=VMTimeout.DELETE, fail_ok=True, + check_interval=3, con_ssh=None, + auth_info=Tenant.get('admin')): + """ + Wait for specific vm to be removed from nova list + + Args: + vms (str|list): list of vms' ids + timeout (int): in seconds + fail_ok (bool): + check_interval (int): + con_ssh (SSHClient|None): + auth_info (dict|None): + + Returns (tuple): (result(bool), vms_failed_to_delete(list)) + + """ + if isinstance(vms, str): + vms = [vms] + + vms_to_check = list(vms) + end_time = time.time() + timeout + while time.time() < end_time: + try: + vms_to_check = get_vms(vms=vms_to_check, con_ssh=con_ssh, + auth_info=auth_info) + except exceptions.CLIRejected: + pass + + if not vms_to_check: + return True, [] + time.sleep(check_interval) + + if fail_ok: + return False, vms_to_check + raise exceptions.VMPostCheckFailed( + "Some vm(s) are not removed from nova list within {} seconds: {}". + format(timeout, vms_to_check)) + + +def wait_for_vms_values(vms, header='Status', value=VMStatus.ACTIVE, + timeout=VMTimeout.STATUS_CHANGE, fail_ok=True, + check_interval=3, con_ssh=None, + auth_info=Tenant.get('admin')): + """ + Wait for specific vms to reach any of the given state(s) in openstack + server list + + Args: + vms (str|list): id(s) of vms to check + header (str): target header in nova list + value (str|list): expected value(s) + timeout (int): in seconds + fail_ok (bool): + check_interval (int): + con_ssh (SSHClient|None): + auth_info (dict|None): + + Returns (list): [result(bool), vms_in_state(dict), + vms_failed_to_reach_state(dict)] + + """ + if isinstance(vms, str): + vms = [vms] + + if isinstance(value, str): + value = [value] + + res_fail = res_pass = None + end_time = time.time() + timeout + while time.time() < end_time: + res_pass = {} + res_fail = {} + vms_values = get_vms(vms=vms, con_ssh=con_ssh, auth_info=auth_info, + field=header) + for i in range(len(vms)): + vm = vms[i] + vm_value = vms_values[i] + if vm_value in value: + res_pass[vm] = vm_value + else: + res_fail[vm] = vm_value + + if not res_fail: + return True, res_pass, res_fail + + time.sleep(check_interval) + + fail_msg = "Some vm(s) did not reach given status from nova list within " \ + "{} seconds: {}".format(timeout, res_fail) + if fail_ok: + LOG.warning(fail_msg) + return False, res_pass, res_fail + raise exceptions.VMPostCheckFailed(fail_msg) + + +def set_vm_state(vm_id, check_first=False, error_state=True, fail_ok=False, + auth_info=Tenant.get('admin'), + con_ssh=None): + """ + Set vm state to error or active via nova reset-state. + + Args: + vm_id: + check_first: + error_state: + fail_ok: + auth_info: + con_ssh: + + Returns (tuple): + + """ + expt_vm_status = VMStatus.ERROR if error_state else VMStatus.ACTIVE + LOG.info("Setting vm {} state to: {}".format(vm_id, expt_vm_status)) + + if check_first: + pre_vm_status = get_vm_values(vm_id, fields='status', con_ssh=con_ssh, + auth_info=auth_info)[0] + if pre_vm_status.lower() == expt_vm_status.lower(): + msg = "VM {} already in {} state. Do nothing.".format(vm_id, + pre_vm_status) + LOG.info(msg) + return -1, msg + + code, out = set_vm(vm_id=vm_id, state=expt_vm_status, con_ssh=con_ssh, + auth_info=auth_info, fail_ok=fail_ok) + if code > 0: + return 1, out + + result = wait_for_vm_status(vm_id, expt_vm_status, fail_ok=fail_ok) + if result is None: + msg = "VM {} did not reach expected state - {} after " \ + "reset-state.".format(vm_id, expt_vm_status) + LOG.warning(msg) + return 2, msg + + msg = "VM state is successfully set to: {}".format(expt_vm_status) + LOG.info(msg) + return 0, msg + + +def reboot_vm(vm_id, hard=False, fail_ok=False, con_ssh=None, auth_info=None, + cli_timeout=CMDTimeout.REBOOT_VM, + reboot_timeout=VMTimeout.REBOOT): + """ + reboot vm via openstack server reboot + Args: + vm_id: + hard (bool): hard or soft reboot + fail_ok: + con_ssh: + auth_info: + cli_timeout: + reboot_timeout: + + Returns (tuple): + + """ + vm_status = get_vm_status(vm_id, con_ssh=con_ssh) + if not vm_status.lower() == 'active': + LOG.warning( + "VM is not in active state before rebooting. VM status: {}".format( + vm_status)) + + extra_arg = '--hard ' if hard else '' + arg = "{}{}".format(extra_arg, vm_id) + + date_format = "%Y%m%d %T" + start_time = common.get_date_in_format(date_format=date_format) + code, output = cli.openstack('server reboot', arg, ssh_client=con_ssh, + fail_ok=fail_ok, auth_info=auth_info, + timeout=cli_timeout) + if code > 0: + return 1, output + + # expt_reboot = VMStatus.HARD_REBOOT if hard else VMStatus.SOFT_REBOOT + # _wait_for_vm_status(vm_id, expt_reboot, check_interval=0, fail_ok=False) + LOG.info("Wait for vm reboot events to appear in fm event-list") + expt_reason = 'hard-reboot' if hard else 'soft-reboot' + system_helper.wait_for_events( + timeout=30, num=10, entity_instance_id=vm_id, + start=start_time, fail_ok=False, strict=False, + **{'Event Log ID': EventLogID.REBOOT_VM_ISSUED, + 'Reason Text': expt_reason}) + + system_helper.wait_for_events( + timeout=reboot_timeout, num=10, entity_instance_id=vm_id, + start=start_time, fail_ok=False, + **{'Event Log ID': EventLogID.REBOOT_VM_COMPLETE}) + + LOG.info("Check vm status from nova show") + actual_status = wait_for_vm_status(vm_id, + [VMStatus.ACTIVE, VMStatus.ERROR], + fail_ok=fail_ok, con_ssh=con_ssh, + timeout=30) + if not actual_status: + msg = "VM {} did not reach active state after reboot.".format(vm_id) + LOG.warning(msg) + return 2, msg + + if actual_status.lower() == VMStatus.ERROR.lower(): + msg = "VM is in error state after reboot." + if fail_ok: + LOG.warning(msg) + return 3, msg + raise exceptions.VMPostCheckFailed(msg) + + succ_msg = "VM rebooted successfully." + LOG.info(succ_msg) + return 0, succ_msg + + +def __perform_vm_action(vm_id, action, expt_status, + timeout=VMTimeout.STATUS_CHANGE, fail_ok=False, + con_ssh=None, + auth_info=None): + LOG.info("{} vm {} begins...".format(action, vm_id)) + code, output = cli.nova(action, vm_id, ssh_client=con_ssh, fail_ok=fail_ok, + auth_info=auth_info, timeout=120) + + if code == 1: + return 1, output + + actual_status = wait_for_vm_status(vm_id, [expt_status, VMStatus.ERROR], + fail_ok=fail_ok, con_ssh=con_ssh, + timeout=timeout) + + if not actual_status: + msg = "VM {} did not reach expected state {} after {}.".format( + vm_id, expt_status, action) + LOG.warning(msg) + return 2, msg + + if actual_status.lower() == VMStatus.ERROR.lower(): + msg = "VM is in error state after {}.".format(action) + if fail_ok: + LOG.warning(msg) + return 3, msg + raise exceptions.VMPostCheckFailed(msg) + + succ_msg = "{} VM succeeded.".format(action) + LOG.info(succ_msg) + return 0, succ_msg + + +def suspend_vm(vm_id, timeout=VMTimeout.STATUS_CHANGE, fail_ok=False, + con_ssh=None, auth_info=None): + return __perform_vm_action(vm_id, 'suspend', VMStatus.SUSPENDED, + timeout=timeout, fail_ok=fail_ok, + con_ssh=con_ssh, auth_info=auth_info) + + +def resume_vm(vm_id, timeout=VMTimeout.STATUS_CHANGE, fail_ok=False, + con_ssh=None, auth_info=None): + return __perform_vm_action(vm_id, 'resume', VMStatus.ACTIVE, + timeout=timeout, fail_ok=fail_ok, + con_ssh=con_ssh, + auth_info=auth_info) + + +def pause_vm(vm_id, timeout=VMTimeout.PAUSE, fail_ok=False, con_ssh=None, + auth_info=None): + return __perform_vm_action(vm_id, 'pause', VMStatus.PAUSED, timeout=timeout, + fail_ok=fail_ok, con_ssh=con_ssh, + auth_info=auth_info) + + +def unpause_vm(vm_id, timeout=VMTimeout.STATUS_CHANGE, fail_ok=False, + con_ssh=None, auth_info=None): + return __perform_vm_action(vm_id, 'unpause', VMStatus.ACTIVE, + timeout=timeout, fail_ok=fail_ok, + con_ssh=con_ssh, + auth_info=auth_info) + + +def stop_vms(vms, timeout=VMTimeout.STATUS_CHANGE, fail_ok=False, con_ssh=None, + auth_info=None): + return _start_or_stop_vms(vms, 'stop', VMStatus.STOPPED, timeout, + check_interval=1, fail_ok=fail_ok, + con_ssh=con_ssh, auth_info=auth_info) + + +def start_vms(vms, timeout=VMTimeout.STATUS_CHANGE, fail_ok=False, con_ssh=None, + auth_info=None): + return _start_or_stop_vms(vms, 'start', VMStatus.ACTIVE, timeout, + check_interval=1, fail_ok=fail_ok, + con_ssh=con_ssh, auth_info=auth_info) + + +def _start_or_stop_vms(vms, action, expt_status, + timeout=VMTimeout.STATUS_CHANGE, check_interval=3, + fail_ok=False, + con_ssh=None, auth_info=None): + LOG.info("{}ing vms {}...".format(action, vms)) + action = action.lower() + if isinstance(vms, str): + vms = [vms] + + # Not using openstack client due to stop will be aborted at first + # failure, without continue processing other vms + code, output = cli.nova(action, ' '.join(vms), ssh_client=con_ssh, + fail_ok=fail_ok, auth_info=auth_info) + + vms_to_check = list(vms) + if code == 1: + vms_to_check = re.findall( + NovaCLIOutput.VM_ACTION_ACCEPTED.format(action), output) + if not vms_to_check: + return 1, output + + res_bool, res_pass, res_fail = wait_for_vms_values( + vms_to_check, 'Status', [expt_status, VMStatus.ERROR], + fail_ok=fail_ok, check_interval=check_interval, con_ssh=con_ssh, + timeout=timeout) + + if not res_bool: + msg = "Some VM(s) did not reach expected state(s) - {}. Actual " \ + "states: {}".format(expt_status, res_fail) + LOG.warning(msg) + return 2, msg + + error_vms = [vm_id for vm_id in vms_to_check if + res_pass[vm_id].lower() == VMStatus.ERROR.lower()] + if error_vms: + msg = "Some VM(s) in error state after {}: {}".format(action, error_vms) + if fail_ok: + LOG.warning(msg) + return 3, msg + raise exceptions.VMPostCheckFailed(msg) + + succ_msg = "Action {} performed successfully on vms.".format(action) + LOG.info(succ_msg) + return 0, succ_msg + + +def rebuild_vm(vm_id, image_id=None, new_name=None, preserve_ephemeral=None, + fail_ok=False, con_ssh=None, + auth_info=Tenant.get('admin'), **metadata): + if image_id is None: + image_id = glance_helper.get_image_id_from_name( + GuestImages.DEFAULT['guest'], strict=True) + + args = '{} {}'.format(vm_id, image_id) + + if new_name: + args += ' --name {}'.format(new_name) + + if preserve_ephemeral: + args += ' --preserve-ephemeral' + + for key, value in metadata.items(): + args += ' --meta {}={}'.format(key, value) + + LOG.info("Rebuilding vm {}".format(vm_id)) + # Some features such as trusted image cert not available with openstack + # client + code, output = cli.nova('rebuild', args, ssh_client=con_ssh, + fail_ok=fail_ok, auth_info=auth_info) + if code == 1: + return code, output + + LOG.info("Check vm status after vm rebuild") + wait_for_vm_status(vm_id, status=VMStatus.ACTIVE, fail_ok=fail_ok, + con_ssh=con_ssh) + actual_status = wait_for_vm_status(vm_id, [VMStatus.ACTIVE, VMStatus.ERROR], + fail_ok=fail_ok, con_ssh=con_ssh, + timeout=VMTimeout.REBUILD) + + if not actual_status: + msg = "VM {} did not reach active state after rebuild.".format(vm_id) + LOG.warning(msg) + return 2, msg + + if actual_status.lower() == VMStatus.ERROR.lower(): + msg = "VM is in error state after rebuild." + if fail_ok: + LOG.warning(msg) + return 3, msg + raise exceptions.VMPostCheckFailed(msg) + + succ_msg = "VM rebuilded successfully." + LOG.info(succ_msg) + return 0, succ_msg + + +def get_vm_numa_nodes_via_ps(vm_id=None, instance_name=None, host=None, + con_ssh=None, auth_info=Tenant.get('admin'), + per_vcpu=False): + """ + Get numa nodes VM is currently on + Args: + vm_id: + instance_name: + host: + con_ssh: + auth_info: + per_vcpu (bool): if True, return per vcpu, e.g., if vcpu=0,1,2, + returned list will have same length [0,1,0] + + Returns (list): e.g., [0], [0, 1] + + """ + if not instance_name or not host: + if not vm_id: + raise ValueError('vm_id has to be provided') + instance_name, host = get_vm_values(vm_id, + fields=[":instance_name", ":host"], + strict=False, con_ssh=con_ssh, + auth_info=auth_info) + + with host_helper.ssh_to_host(host, con_ssh=con_ssh) as host_ssh: + vcpu_cpu_map = get_vcpu_cpu_map(instance_names=instance_name, + host_ssh=host_ssh, con_ssh=con_ssh)[ + instance_name] + cpus = [] + for i in range(len(vcpu_cpu_map)): + cpus.append(vcpu_cpu_map[i]) + + cpu_non_dup = sorted(list(set(cpus))) + grep_str = ' '.join( + ['-e "processor.*: {}$"'.format(cpu) for cpu in cpu_non_dup]) + cmd = 'cat /proc/cpuinfo | grep -A 10 {} | grep --color=never ' \ + '"physical id"'.format(grep_str) + physical_ids = host_ssh.exec_cmd(cmd, fail_ok=False)[1].splitlines() + physical_ids = [int(proc.split(sep=':')[-1].strip()) for proc in + physical_ids if 'physical' in proc] + if per_vcpu: + physical_ids = [physical_ids[cpu_non_dup.index(cpu)] for cpu in + cpus] + + return physical_ids + + +def get_vm_host_and_numa_nodes(vm_id, con_ssh=None, per_vcpu=False): + """ + Get vm host and numa nodes used for the vm on the host + Args: + vm_id (str): + con_ssh (SSHClient): + per_vcpu (bool): if True, return numa nodes per vcpu, e.g., vcpu=0,1, + 2, returned list can be: [0,1,0] + + Returns (tuple): ( (str), (list of integers)) + + """ + instance_name, host = get_vm_values(vm_id, + fields=[":instance_name", ":host"], + strict=False) + actual_node_vals = get_vm_numa_nodes_via_ps(vm_id=vm_id, + instance_name=instance_name, + host=host, con_ssh=con_ssh, + per_vcpu=per_vcpu) + + return host, actual_node_vals + + +def perform_action_on_vm(vm_id, action, auth_info=Tenant.get('admin'), + con_ssh=None, **kwargs): + """ + Perform action on a given vm. + + Args: + vm_id (str): + action (str): action to perform on vm. Valid_actions: 'start', + 'stop', 'suspend', 'resume', 'pause', 'unpause', + 'reboot', 'live_migrate', or 'cold_migrate' + auth_info (dict): + con_ssh (SSHClient): + **kwargs: extra params to pass to action function, + e.g.destination_host='compute-0' when action is live_migrate + + Returns (None): + + """ + action_function_map = { + 'start': start_vms, + 'stop': stop_vms, + 'suspend': suspend_vm, + 'resume': resume_vm, + 'pause': pause_vm, + 'unpause': unpause_vm, + 'reboot': reboot_vm, + 'rebuild': rebuild_vm, + 'live_migrate': live_migrate_vm, + 'cold_migrate': cold_migrate_vm, + 'cold_mig_revert': cold_migrate_vm, + } + if not vm_id: + raise ValueError("vm id is not provided.") + + valid_actions = list(action_function_map.keys()) + action = action.lower().replace(' ', '_') + if action not in valid_actions: + raise ValueError( + "Invalid action provided: {}. Valid actions: {}".format( + action, valid_actions)) + + if action == 'cold_mig_revert': + kwargs['revert'] = True + + return action_function_map[action](vm_id, con_ssh=con_ssh, + auth_info=auth_info, **kwargs) + + +def get_vm_nics_info(vm_id, network=None, vnic_type=None, rtn_dict=False): + """ + Get vm nics info + Args: + vm_id: + network: + vnic_type: + rtn_dict: + + Returns (list of dict|dict of dict): + list or dict (port as key) of port_info_dict. Each port_info_dict + contains following info: + { + 'port_id': , + 'network': , + 'network_id': , + 'vnic_type': , + 'mac_address': , + 'subnet_id': , + 'subnet_cidr': + } + + """ + vm_ports, vm_macs, vm_ips_info = network_helper.get_ports( + server=vm_id, network=network, + field=('ID', 'MAC Address', 'Fixed IP Addresses')) + vm_subnets = [] + vm_ips = [] + for ip_info in vm_ips_info: + ip_info = ip_info[0] + vm_ips.append(ip_info.get('ip_address')) + vm_subnets.append(ip_info.get('subnet_id')) + + indexes = list(range(len(vm_ports))) + vnic_types = [] + vm_net_ids = [] + for port in vm_ports: + port_vnic_type, port_net_id = network_helper.get_port_values( + port=port, fields=('binding_vnic_type', 'network_id')) + vnic_types.append(port_vnic_type) + vm_net_ids.append(port_net_id) + if vnic_type and vnic_type != port_vnic_type: + indexes.remove(list(vm_ports).index(port)) + + vm_net_names = [] + ids_, names_, = network_helper.get_networks(field=('ID', 'Name'), + strict=False) + for net_id in vm_net_ids: + vm_net_names.append(names_[ids_.index(net_id)]) + + res_dict = {} + res = [] + for i in indexes: + port_dict = { + 'port_id': vm_ports[i], + 'network': vm_net_names[i], + 'network_id': vm_net_ids[i], + 'vnic_type': vnic_types[i], + 'mac_address': vm_macs[i], + 'ip_address': vm_ips[i] + } + if rtn_dict: + res_dict[vm_ports[i]] = port_dict + else: + res.append(port_dict) + + return res_dict if rtn_dict else res + + +def get_vm_interfaces_via_virsh(vm_id, con_ssh=None): + """ + + Args: + vm_id: + con_ssh: + + Returns (list of tuple): + [(mac_0, vif_model_0)...] + + """ + vm_host = get_vm_host(vm_id=vm_id, con_ssh=con_ssh) + inst_name = get_vm_instance_name(vm_id=vm_id, con_ssh=con_ssh) + + vm_ifs = [] + with host_helper.ssh_to_host(vm_host, con_ssh=con_ssh) as host_ssh: + output = host_ssh.exec_sudo_cmd('virsh domiflist {}'.format(inst_name), + fail_ok=False)[1] + if_lines = output.split('-------------------------------\n', 1)[ + -1].splitlines() + for line in if_lines: + if not line.strip(): + continue + + interface, type_, source, model, mac = line.split() + vm_ifs.append((mac, model)) + + return vm_ifs + + +def add_vlan_for_vm_pcipt_interfaces(vm_id, net_seg_id, retry=3, + init_conf=False): + """ + Add vlan for vm pci-passthrough interface and restart networking service. + Do nothing if expected vlan interface already exists in 'ip addr'. + + Args: + vm_id (str): + net_seg_id (int|str|dict): such as 1792 + retry (int): max number of times to reboot vm to try to recover it + from non-exit + init_conf (bool): To workaround upstream bug where mac changes after + migrate or resize https://bugs.launchpad.net/nova/+bug/1617429 + + Returns: None + + Raises: VMNetworkError if vlan interface is not found in 'ip addr' after + adding + + Notes: + Sometimes a non-exist 'rename6' interface will be used for + pci-passthrough nic after vm maintenance + Sudo reboot from the vm as workaround. + By default will try to reboot for a maximum of 3 times + + """ + if not vm_id or not net_seg_id: + raise ValueError("vm_id and/or net_seg_id not provided.") + + net_seg_id_dict = None + if isinstance(net_seg_id, dict): + net_seg_id_dict = net_seg_id + net_seg_id = None + + for i in range(retry): + vm_pcipt_nics = get_vm_nics_info(vm_id, vnic_type='direct-physical') + + if not vm_pcipt_nics: + LOG.warning("No pci-passthrough device found for vm from nova " + "show {}".format(vm_id)) + return + + with ssh_to_vm_from_natbox(vm_id=vm_id) as vm_ssh: + for pcipt_nic in vm_pcipt_nics: + + mac_addr = pcipt_nic['mac_address'] + eth_name = network_helper.get_eth_for_mac(mac_addr=mac_addr, + ssh_client=vm_ssh) + if not eth_name: + if not init_conf: + LOG.warning( + "Interface with mac {} is not listed in 'ip addr' " + "in vm {}".format(mac_addr, vm_id)) + LOG.info("Try to get first eth with mac 90:...") + eth_name = network_helper.get_eth_for_mac( + mac_addr="link/ether 90:", ssh_client=vm_ssh) + if not eth_name: + exceptions.VMNetworkError( + "No Mac starts with 90: in ip addr for vm " + "{}".format(vm_id)) + else: + raise exceptions.VMNetworkError( + "Interface with mac {} is not listed in 'ip addr' " + "in vm {}".format(mac_addr, vm_id)) + + if 'rename' in eth_name: + LOG.warning( + "Retry {}: non-existing interface {} found on " + "pci-passthrough nic in vm {}, " + "reboot vm to try to recover".format( + i + 1, eth_name, vm_id)) + sudo_reboot_from_vm(vm_id=vm_id, vm_ssh=vm_ssh) + wait_for_vm_pingable_from_natbox(vm_id) + break + + else: + if net_seg_id_dict: + net_name = pcipt_nic['network'] + net_seg_id = net_seg_id_dict[net_name] + LOG.info( + "Seg id for {}: {}".format(net_name, net_seg_id)) + + vlan_name = "{}.{}".format(eth_name, net_seg_id) + + output_pre_ipaddr = \ + vm_ssh.exec_cmd('ip addr', fail_ok=False)[1] + if vlan_name in output_pre_ipaddr: + LOG.info("{} already in ip addr. Skip.".format( + vlan_name)) + continue + + # Bring up pcipt interface and assign IP manually. + # Upstream bug causes dev name and MAC addr + # change after reboot,migrate, making it impossible to + # use DHCP or configure permanant static IP. + # https://bugs.launchpad.net/nova/+bug/1617429 + wait_for_interfaces_up(vm_ssh, eth_name, set_up=True) + # 'ip link add' works for all linux guests but it does + # not persists after network service restart + vm_ssh.exec_cmd( + 'ip link add link {} name {} type vlan id {}'.format( + eth_name, vlan_name, + net_seg_id)) + vm_ssh.exec_cmd('ip link set {} up'.format(vlan_name)) + vnic_ip = pcipt_nic['ip_address'] + vm_ssh.exec_cmd( + 'ip addr add {}/24 dev {}'.format(vnic_ip, vlan_name)) + + LOG.info( + "Check if vlan is added successfully with IP assigned") + output_post_ipaddr = \ + vm_ssh.exec_cmd('ip addr', fail_ok=False)[1] + if vlan_name not in output_post_ipaddr: + raise exceptions.VMNetworkError( + "{} is not found in 'ip addr' after adding vlan " + "interface". + format(vlan_name)) + time.sleep(5) + if not is_ip_assigned(vm_ssh, eth_name=vlan_name): + msg = 'No IP assigned to {} vlan interface for VM ' \ + '{}'.format(vlan_name, vm_id) + LOG.warning(msg) + raise exceptions.VMNetworkError(msg) + else: + LOG.info( + "vlan {} is successfully added and an IP is " + "assigned.".format(vlan_name)) + else: + # did not break, meaning no 'rename' interface detected, + # vlan either existed or successfully added + return + + # 'for' loop break which means 'rename' interface detected, + # and vm reboot triggered - known issue with wrl + LOG.info("Reboot vm completed. Retry started.") + + else: + raise exceptions.VMNetworkError( + "'rename' interface still exists in pci-passthrough vm {} with {} " + "reboot attempts.".format(vm_id, retry)) + + +def is_ip_assigned(vm_ssh, eth_name): + output = vm_ssh.exec_cmd('ip addr show {}'.format(eth_name), + fail_ok=False)[1] + return re.search('inet {}'.format(Networks.IPV4_IP), output) + + +def wait_for_interfaces_up(vm_ssh, eth_names, check_interval=10, timeout=180, + set_up=False): + LOG.info( + "Waiting for vm interface(s) to be in UP state: {}".format(eth_names)) + end_time = time.time() + timeout + if isinstance(eth_names, str): + eth_names = [eth_names] + ifs_to_check = list(eth_names) + while time.time() < end_time: + for eth in ifs_to_check: + output = \ + vm_ssh.exec_cmd('ip -d link show {}'.format(eth), + fail_ok=False)[1] + if 'state UP' in output: + ifs_to_check.remove(eth) + continue + else: + if set_up: + vm_ssh.exec_cmd('ip link set {} up'.format(eth)) + LOG.info( + "{} is not up - wait for {} seconds and check again".format( + eth, check_interval)) + break + + if not ifs_to_check: + LOG.info('interfaces are up: {}'.format(eth_names)) + return + + time.sleep(check_interval) + + raise exceptions.VMNetworkError("Interface(s) not up for given vm") + + +def sudo_reboot_from_vm(vm_id, vm_ssh=None, check_host_unchanged=True, + con_ssh=None): + pre_vm_host = None + if check_host_unchanged: + pre_vm_host = get_vm_host(vm_id, con_ssh=con_ssh) + + LOG.info("Initiate sudo reboot from vm") + + def _sudo_reboot(vm_ssh_): + extra_prompt = 'Broken pipe' + output = vm_ssh_.exec_sudo_cmd('reboot -f', get_exit_code=False, + extra_prompt=extra_prompt)[1] + expt_string = 'The system is going down for reboot|Broken pipe' + if re.search(expt_string, output): + # Sometimes system rebooting msg will be displayed right after + # reboot cmd sent + vm_ssh_.parent.flush() + return + + try: + time.sleep(10) + vm_ssh_.send('') + index = vm_ssh_.expect([expt_string, vm_ssh_.prompt], timeout=60) + if index == 1: + raise exceptions.VMOperationFailed("Unable to reboot vm {}") + vm_ssh_.parent.flush() + except pexpect.TIMEOUT: + vm_ssh_.send_control('c') + vm_ssh_.expect() + raise + + if not vm_ssh: + with ssh_to_vm_from_natbox(vm_id) as vm_ssh: + _sudo_reboot(vm_ssh) + else: + _sudo_reboot(vm_ssh) + + LOG.info( + "sudo vm reboot initiated - wait for reboot completes and VM reaches " + "active state") + system_helper.wait_for_events(VMTimeout.AUTO_RECOVERY, strict=False, + fail_ok=False, con_ssh=con_ssh, + **{'Entity Instance ID': vm_id, + 'Event Log ID': + EventLogID.REBOOT_VM_COMPLETE}) + wait_for_vm_status(vm_id, status=VMStatus.ACTIVE, fail_ok=False, + con_ssh=con_ssh) + + if check_host_unchanged: + post_vm_host = get_vm_host(vm_id, con_ssh=con_ssh) + if not pre_vm_host == post_vm_host: + raise exceptions.HostError( + "VM host changed from {} to {} after sudo reboot vm".format( + pre_vm_host, post_vm_host)) + + +def get_proc_nums_from_vm(vm_ssh): + total_cores = common.parse_cpus_list( + vm_ssh.exec_cmd('cat /sys/devices/system/cpu/present', fail_ok=False)[ + 1]) + online_cores = common.parse_cpus_list( + vm_ssh.exec_cmd('cat /sys/devices/system/cpu/online', fail_ok=False)[1]) + offline_cores = common.parse_cpus_list( + vm_ssh.exec_cmd('cat /sys/devices/system/cpu/offline', fail_ok=False)[ + 1]) + + return total_cores, online_cores, offline_cores + + +def get_instance_names_via_virsh(host_ssh): + """ + Get instance names via virsh list on given host + Args: + host_ssh: + + Returns (list): + + """ + inst_names = host_ssh.exec_sudo_cmd( + "virsh list | grep instance- | awk {{'print $2'}}", + get_exit_code=False)[1] + return [name.strip() for name in inst_names.splitlines()] + + +def get_vcpu_cpu_map(instance_names=None, host_ssh=None, host=None, + con_ssh=None): + """ + Get vm(s) vcpu cpu map on given host + Args: + instance_names (str|tuple|list|None): + host_ssh (SSHClient|None): + host (str|None): + con_ssh: + + Returns (dict): {: {0: , + 1: , ...}, ...} + + """ + if not host and not host_ssh: + raise ValueError('host or host_ssh has to be specified') + + extra_grep = '' + if instance_names: + if isinstance(instance_names, str): + instance_names = (instance_names,) + extra_grep = '|grep -E "{}"'.format('|'.join(instance_names)) + cmd = 'ps-sched.sh|grep qemu{}|grep " CPU" '.format(extra_grep) + \ + """| awk '{{print $10" "$12" "$15 ;}}'""" + + if host_ssh: + output = host_ssh.exec_cmd(cmd)[1] + else: + with host_helper.ssh_to_host(host, con_ssh=con_ssh) as host_ssh: + output = host_ssh.exec_cmd(cmd)[1] + vcpu_cpu_map = {} + for line in output.splitlines(): + cpu, vcpu, instance_name = line.split() + instance_name = instance_name.split(sep=',')[0].split(sep='=')[1] + if instance_name not in vcpu_cpu_map: + vcpu_cpu_map[instance_name] = {} + vcpu_cpu_map[instance_name][int(vcpu.split(sep='/')[0])] = int(cpu) + return vcpu_cpu_map + + +def get_affined_cpus_for_vm(vm_id, host_ssh=None, vm_host=None, + instance_name=None, con_ssh=None): + """ + cpu affinity list for vm via taskset -pc + Args: + vm_id (str): + host_ssh + vm_host + instance_name + con_ssh (SSHClient): + + Returns (list): such as [10, 30] + + """ + cmd = "ps-sched.sh|grep qemu|grep {}|grep -v grep|awk '{{print $2;}}'" + \ + '|xargs -i /bin/sh -c "taskset -pc {{}}"' + + if host_ssh: + if not vm_host or not instance_name: + raise ValueError( + "vm_host and instance_name have to be provided together with " + "host_ssh") + + output = host_ssh.exec_cmd(cmd.format(instance_name))[1] + + else: + vm_host = get_vm_host(vm_id, con_ssh=con_ssh) + instance_name = get_vm_instance_name(vm_id, con_ssh=con_ssh) + + with host_helper.ssh_to_host(vm_host, con_ssh=con_ssh) as host_ssh: + output = host_ssh.exec_cmd(cmd.format(instance_name))[1] + + # Sample output: + # pid 6376's current affinity list: 10 + # pid 6380's current affinity list: 10 + # pid 6439's current affinity list: 10 + # pid 6441's current affinity list: 10 + # pid 6442's current affinity list: 30 + # pid 6445's current affinity list: 10 + # pid 24142's current affinity list: 10 + + all_cpus = [] + lines = output.splitlines() + for line in lines: + + # skip line if below output occurs due to timing in executing cmds + # taskset: failed to get pid 17125's affinity: No such process + if "No such process" in line: + continue + + cpu_str = line.split(sep=': ')[-1].strip() + cpus = common.parse_cpus_list(cpus=cpu_str) + all_cpus += cpus + + all_cpus = sorted(list(set(all_cpus))) + LOG.info("Affined cpus on host {} for vm {}: {}".format(vm_host, vm_id, + all_cpus)) + + return all_cpus + + +def _scp_net_config_cloud_init(guest_os): + con_ssh = get_cli_client() + dest_dir = '{}/userdata'.format(ProjVar.get_var('USER_FILE_DIR')) + + if 'ubuntu' in guest_os: + dest_name = 'ubuntu_cloud_init_if_conf.sh' + elif 'centos' in guest_os: + dest_name = 'centos_cloud_init_if_conf.sh' + else: + raise ValueError("Unknown guest_os") + + dest_path = '{}/{}'.format(dest_dir, dest_name) + + if con_ssh.file_exists(file_path=dest_path): + LOG.info('userdata {} already exists. Return existing path'.format( + dest_path)) + return dest_path + + LOG.debug('Create userdata directory if not already exists') + cmd = 'mkdir -p {}'.format(dest_dir) + con_ssh.exec_cmd(cmd, fail_ok=False) + + # LOG.info('wget image from {} to {}/{}'.format(img_url, img_dest, + # new_name)) + # cmd = 'wget {} --no-check-certificate -P {} -O {}'.format(img_url, + # img_dest, new_name) + # con_ssh.exec_cmd(cmd, expect_timeout=7200, fail_ok=False) + + source_path = '{}/userdata/{}'.format(TestFileServer.HOME, dest_name) + LOG.info('scp image from test server to active controller') + + scp_cmd = 'scp -oStrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null' \ + ' {}@{}:{} {}'.format(TestFileServer.USER, TestFileServer.SERVER, + source_path, dest_dir) + + con_ssh.send(scp_cmd) + index = con_ssh.expect( + [con_ssh.prompt, Prompt.PASSWORD_PROMPT, Prompt.ADD_HOST], timeout=3600) + if index == 2: + con_ssh.send('yes') + index = con_ssh.expect([con_ssh.prompt, Prompt.PASSWORD_PROMPT], + timeout=3600) + if index == 1: + con_ssh.send(TestFileServer.PASSWORD) + index = con_ssh.expect() + if index != 0: + raise exceptions.SSHException("Failed to scp files") + + return dest_dir + + +def _create_cloud_init_if_conf(guest_os, nics_num): + """ + + Args: + guest_os: + nics_num: + + Returns (str|None): file path of the cloud init userdata file for given + guest os and number of nics + Sample file content for Centos vm: + #!/bin/bash + sudo cp /etc/sysconfig/network-scripts/ifcfg-eth0 + /etc/sysconfig/network-scripts/ifcfg-eth1 + sudo sed -i 's/eth0/eth1/g' + /etc/sysconfig/network-scripts/ifcfg-eth1 + sudo ifup eth1 + + Sample file content for Ubuntu vm: + + """ + + file_dir = '{}/userdata'.format(ProjVar.get_var('USER_FILE_DIR')) + guest_os = guest_os.lower() + + # default eth_path for non-ubuntu image + eth_path = VMPath.ETH_PATH_CENTOS + new_user = None + + if 'ubuntu' in guest_os or 'trusty_uefi' in guest_os: + guest_os = 'ubuntu' + # vm_if_path = VMPath.VM_IF_PATH_UBUNTU + eth_path = VMPath.ETH_PATH_UBUNTU + new_user = 'ubuntu' + elif 'centos' in guest_os: + # vm_if_path = VMPath.VM_IF_PATH_CENTOS + new_user = 'centos' + + file_name = '{}_{}nic_cloud_init_if_conf.sh'.format(guest_os, nics_num) + + file_path = file_dir + file_name + con_ssh = get_cli_client() + if con_ssh.file_exists(file_path=file_path): + LOG.info('userdata {} already exists. Return existing path'.format( + file_path)) + return file_path + + LOG.info('Create userdata directory if not already exists') + cmd = 'mkdir -p {}'.format(file_dir) + con_ssh.exec_cmd(cmd, fail_ok=False) + + tmp_dir = '{}/userdata'.format(ProjVar.get_var('TEMP_DIR')) + os.makedirs(tmp_dir, exist_ok=True) + tmp_file = tmp_dir + file_name + + # No longer need to specify bash using cloud-config + # if 'centos_7' in guest_os: + # shell = '/usr/bin/bash' + # else: + # shell = '/bin/bash' + + with open(tmp_file, mode='a') as f: + f.write("#cloud-config\n") + + if new_user is not None: + f.write("user: {}\n" + "password: {}\n" + "chpasswd: {{ expire: False}}\n" + "ssh_pwauth: True\n\n".format(new_user, new_user)) + + if eth_path is not None: + eth0_path = eth_path.format('eth0') + f.write("runcmd:\n") + # f.write(" - echo '#!{}'\n".format(shell)) + for i in range(nics_num - 1): + ethi_name = 'eth{}'.format(i + 1) + ethi_path = eth_path.format(ethi_name) + f.write(' - cp {} {}\n'.format(eth0_path, ethi_path)) + f.write( + " - sed -i 's/eth0/{}/g' {}\n".format(ethi_name, ethi_path)) + f.write(' - ifup {}\n'.format(ethi_name)) + + if not ProjVar.get_var('REMOTE_CLI'): + common.scp_from_localhost_to_active_controller(source_path=tmp_file, + dest_path=file_path, + is_dir=False) + + LOG.info("Userdata file created: {}".format(file_path)) + return file_path + + +def _get_cloud_config_add_user(con_ssh=None): + """ + copy the cloud-config userdata to STX server. + This userdata adds stx/li69nux user to guest + + Args: + con_ssh (SSHClient): + + Returns (str): STX filepath of the userdata + + """ + file_dir = ProjVar.get_var('USER_FILE_DIR') + file_name = UserData.ADDUSER_TO_GUEST + file_path = file_dir + file_name + + if con_ssh is None: + con_ssh = get_cli_client() + if con_ssh.file_exists(file_path=file_path): + LOG.info('userdata {} already exists. Return existing path'.format( + file_path)) + return file_path + + source_file = TestServerPath.USER_DATA + file_name + dest_path = common.scp_from_test_server_to_user_file_dir( + source_path=source_file, dest_dir=file_dir, + dest_name=file_name, con_ssh=con_ssh) + if dest_path is None: + raise exceptions.CommonError( + "userdata file {} does not exist after download".format(dest_path)) + + return dest_path + + +def boost_vm_cpu_usage(vm_id, end_event, new_dd_events=None, dd_event=None, + timeout=1200, con_ssh=None): + """ + Boost cpu usage on given number of cpu cores on specified vm using dd cmd + on a new thread + + Args: + vm_id (str): + end_event (Events): Event for kill the dd processes + new_dd_events (list|Events): list of Event(s) for adding new dd + process(es) + dd_event (Events): Event to set after sending first dd cmd. + timeout: Max time to wait for the end_event to be set before killing dd. + con_ssh + + Returns: thread + + Examples: + LOG.tc_step("Boost VM cpu usage") + + + """ + if not new_dd_events: + new_dd_events = [] + elif not isinstance(new_dd_events, list): + new_dd_events = [new_dd_events] + + def _boost_cpu_in_vm(): + LOG.info("Boosting cpu usage for vm {} using 'dd'".format(vm_id)) + dd_cmd = 'dd if=/dev/zero of=/dev/null &' + kill_dd = 'pkill -ex dd' + + with ssh_to_vm_from_natbox(vm_id, con_ssh=con_ssh, timeout=120, + auth_info=None) as vm_ssh: + LOG.info("Start first 2 dd processes in vm") + vm_ssh.exec_cmd(cmd=dd_cmd) + vm_ssh.exec_cmd(cmd=dd_cmd) + if dd_event: + dd_event.set() + + end_time = time.time() + timeout + while time.time() < end_time: + if end_event.is_set(): + LOG.info("End event set, kill dd processes in vm") + vm_ssh.flush() + vm_ssh.exec_cmd(kill_dd, get_exit_code=False) + return + + for event in new_dd_events: + if event.is_set(): + LOG.info( + "New dd event set, start 2 new dd processes in vm") + vm_ssh.exec_cmd(cmd=dd_cmd) + vm_ssh.exec_cmd(cmd=dd_cmd) + new_dd_events.remove(event) + break + + time.sleep(3) + + LOG.error( + "End event is not set within timeout - {}s, kill dd " + "anyways".format( + timeout)) + vm_ssh.exec_cmd(kill_dd) + + LOG.info( + "Creating new thread to spike cpu_usage on vm cores for vm {}".format( + vm_id)) + thread = multi_thread.MThread(_boost_cpu_in_vm) + thread.start_thread(timeout=timeout + 10) + + return thread + + +def write_in_vm(vm_id, end_event, start_event=None, expect_timeout=120, + thread_timeout=None, write_interval=5, + con_ssh=None): + """ + Continue to write in vm using dd + + Args: + vm_id (str): + start_event (Events): set this event when write in vm starts + end_event (Events): if this event is set, end write right away + expect_timeout (int): + thread_timeout (int): + write_interval (int): how frequent to write. Note: 5 seconds seem to + be a good interval, + 1 second interval might have noticeable impact on the performance + of pexpect. + con_ssh (SSHClient): controller ssh client + + Returns (MThread): new_thread + + """ + if not start_event: + start_event = Events("Write in vm {} start".format(vm_id)) + write_cmd = "while (true) do date; dd if=/dev/urandom of=output.txt " \ + "bs=1k count=1 conv=fsync || break; echo ; " \ + "sleep {}; done 2>&1 | tee trace.txt".format(write_interval) + + def _keep_writing(vm_id_): + LOG.info("starting to write to vm using dd...") + with ssh_to_vm_from_natbox(vm_id_, con_ssh=con_ssh, + close_ssh=False) as vm_ssh_: + vm_ssh_.send(cmd=write_cmd) + + start_event.set() + LOG.info("Write_in_vm started") + + LOG.info("Reading the dd output from vm {}".format(vm_id)) + thread.res = True + try: + while True: + expt_output = '1024 bytes' + index = vm_ssh_.expect([expt_output, vm_ssh_.prompt], + timeout=expect_timeout, fail_ok=True, + searchwindowsize=100) + if index != 0: + LOG.warning( + "write has stopped or expected output-'{}' is not " + "found".format( + expt_output)) + thread.res = False + break + + if end_event.is_set(): + LOG.info("End thread now") + break + + LOG.info("Writing in vm continues...") + time.sleep(write_interval) + + finally: + vm_ssh_.send_control('c') + + return vm_ssh_ + + thread = multi_thread.MThread(_keep_writing, vm_id) + thread_timeout = expect_timeout + 30 if thread_timeout is None else \ + thread_timeout + thread.start_thread(timeout=thread_timeout) + + start_event.wait_for_event(timeout=thread_timeout) + + return thread + + +def attach_interface(vm_id, port_id=None, net_id=None, fixed_ip=None, + fail_ok=False, auth_info=None, + con_ssh=None): + """ + Attach interface to a vm via port_id OR net_id + Args: + vm_id (str): + port_id (str): port to attach to vm + net_id (str): port from given net to attach to vm + fixed_ip (str): fixed ip for attached interface. Only works when + attaching interface via net_id + fail_ok (bool): + auth_info (dict): + con_ssh (SSHClient): + + Returns (tuple): (, ) + (0, ) + (1, ) - cli rejected + (2, "Post interface attach check failed: ") - + net_id/port_id, vif_model, or fixed_ip do not match + with + given value + + """ + LOG.info("Attaching interface to VM {}".format(vm_id)) + if not vm_id: + raise ValueError('vm_id is not supplied') + + args = '' + args_dict = { + '--port-id': port_id, + '--net-id': net_id, + '--fixed-ip': fixed_ip, + } + + for key, val in args_dict.items(): + if val is not None: + args += ' {} {}'.format(key, val) + + args += ' {}'.format(vm_id) + + prev_ports = network_helper.get_ports(server=vm_id, auth_info=auth_info, + con_ssh=con_ssh) + # Not switching to openstack client due to nova cli makes more sense. + # openstack client has separate cmds for adding + # port, network and fixed ip, while fixed ip cmd has to specify the network. + code, output = cli.nova('interface-attach', args, ssh_client=con_ssh, + fail_ok=fail_ok, auth_info=auth_info) + + if code == 1: + return code, output + + LOG.info("Post interface-attach checks started...") + post_ports = network_helper.get_ports(server=vm_id, auth_info=auth_info, + con_ssh=con_ssh) + attached_port = list(set(post_ports) - set(prev_ports)) + + err_msgs = [] + if len(attached_port) != 1: + err_msg = "NICs for vm {} is not incremented by 1".format(vm_id) + err_msgs.append(err_msg) + else: + attached_port = attached_port[0] + + if net_id: + net_name = network_helper.get_net_name_from_id(net_id, con_ssh=con_ssh, + auth_info=auth_info) + net_ips = get_vm_values(vm_id, fields=net_name, strict=False, + con_ssh=con_ssh, auth_info=auth_info)[0] + if fixed_ip and fixed_ip not in net_ips.split(sep=', '): + err_msg = "specified fixed ip {} is not found in nova show " \ + "{}".format(fixed_ip, vm_id) + err_msgs.append(err_msg) + + elif port_id and port_id not in post_ports: + err_msg = "port {} is not associated to VM".format(port_id) + err_msgs.append(err_msg) + + if err_msgs: + err_msgs_str = "Post interface attach check failed:\n{}".format( + '\n'.join(err_msgs)) + if fail_ok: + LOG.warning(err_msgs_str) + return 2, attached_port + raise exceptions.NovaError(err_msgs_str) + + succ_msg = "Port {} successfully attached to VM {}".format(attached_port, + vm_id) + LOG.info(succ_msg) + return 0, attached_port + + +def add_ifcfg_scripts(vm_id, mac_addrs, static_ips=None, ipv6='no', reboot=True, + vm_prompt=None, + **extra_configs): + """ + + Args: + vm_id: + mac_addrs (list of str): + static_ips (None|str|list): + ipv6: + reboot: + vm_prompt + **extra_configs: + + Returns: + + """ + LOG.info('Add ifcfg script(s) to VM {}'.format(vm_id)) + with ssh_to_vm_from_natbox(vm_id, prompt=vm_prompt) as vm_ssh: + vm_eths = [] + for mac_addr in mac_addrs: + eth_name = network_helper.get_eth_for_mac(mac_addr=mac_addr, + ssh_client=vm_ssh) + assert eth_name, "vif not found for expected mac_address {} in vm" \ + " {}".format(mac_addr, vm_id) + vm_eths.append(eth_name) + + if static_ips: + if isinstance(static_ips, str): + static_ips = [static_ips] + if len(static_ips) != len(vm_eths): + raise ValueError( + "static_ips count has to be the same as vm devs to be " + "configured") + + for i in range(len(vm_eths)): + eth = vm_eths[i] + if static_ips: + static_ip = static_ips[i] + script_content = VMNetwork.IFCFG_STATIC.format(eth, ipv6, + static_ip) + else: + script_content = VMNetwork.IFCFG_DHCP.format(eth, ipv6) + + if extra_configs: + extra_str = '\n'.join( + ['{}={}'.format(k, v) for k, v in extra_configs.items()]) + script_content += '\n{}'.format(extra_str) + + script_path = VMPath.ETH_PATH_CENTOS.format(eth) + vm_ssh.exec_sudo_cmd('touch {}'.format(script_path)) + vm_ssh.exec_sudo_cmd( + "cat > {} << 'EOT'\n{}\nEOT".format(script_path, + script_content), + fail_ok=False) + + if reboot: + reboot_vm(vm_id=vm_id) + + +def detach_interface(vm_id, port_id, cleanup_route=False, fail_ok=False, + auth_info=None, con_ssh=None, + verify_virsh=True): + """ + Detach a port from vm + Args: + vm_id (str): + port_id (str): existing port that is attached to given vm + fail_ok (bool): + auth_info (dict): + con_ssh (SSHClient): + cleanup_route (bool) + verify_virsh (bool): Whether to verify in virsh xmldump for detached + port + + Returns (tuple): (, ) + (0, Port is successfully detached from VM ) + (1, ) - cli rejected + (2, "Port is not detached from VM ") - detached + port is still shown in nova show + + """ + target_ips = None + if cleanup_route: + fixed_ips = \ + network_helper.get_ports(field='Fixed IP Addresses', + port_id=port_id, + con_ssh=con_ssh, auth_info=auth_info)[0] + target_ips = [fixed_ip['ip_address'] for fixed_ip in fixed_ips] + + mac_to_check = None + if verify_virsh: + prev_ports, prev_macs = network_helper.get_ports( + server=vm_id, auth_info=auth_info, con_ssh=con_ssh, + field=('ID', 'MAC Address')) + for prev_port in prev_ports: + if port_id == prev_port: + mac_to_check = prev_macs[list(prev_ports).index(prev_port)] + break + + LOG.info("Detaching port {} from vm {}".format(port_id, vm_id)) + args = '{} {}'.format(vm_id, port_id) + code, output = cli.nova('interface-detach', args, ssh_client=con_ssh, + fail_ok=fail_ok, auth_info=auth_info) + if code == 1: + return code, output + + post_ports = network_helper.get_ports(server=vm_id, auth_info=auth_info, + con_ssh=con_ssh) + if port_id in post_ports: + err_msg = "Port {} is not detached from VM {}".format(port_id, vm_id) + if fail_ok: + return 2, err_msg + else: + raise exceptions.NeutronError( + 'port {} is still listed for vm {} after detaching'.format( + port_id, vm_id)) + + succ_msg = "Port {} is successfully detached from VM {}".format(port_id, + vm_id) + LOG.info(succ_msg) + + if cleanup_route and target_ips: + cleanup_routes_for_vifs(vm_id=vm_id, vm_ips=target_ips, reboot=True) + + if verify_virsh and mac_to_check: + if not (cleanup_route and target_ips): + reboot_vm(vm_id=vm_id, auth_info=auth_info, con_ssh=con_ssh) + + check_devs_detached(vm_id=vm_id, mac_addrs=mac_to_check, + con_ssh=con_ssh) + + return 0, succ_msg + + +def check_devs_detached(vm_id, mac_addrs, con_ssh=None): + if isinstance(mac_addrs, str): + mac_addrs = [mac_addrs] + + wait_for_vm_pingable_from_natbox(vm_id, con_ssh=con_ssh) + + LOG.info("Check dev detached from vm") + vm_err = '' + with ssh_to_vm_from_natbox(vm_id=vm_id, con_ssh=con_ssh, + retry_timeout=180) as vm_ssh: + for mac_addr in mac_addrs: + if vm_ssh.exec_cmd('ip addr | grep -B 1 "{}"'.format(mac_addr))[0] \ + == 0: + vm_err += 'Interface with mac address {} still exists in ' \ + 'vm\n'.format(mac_addr) + + LOG.info("Check virsh xmldump on compute host") + inst_name, vm_host = get_vm_values(vm_id, + fields=[":instance_name", ":host"], + strict=False) + host_err = '' + with host_helper.ssh_to_host(vm_host, con_ssh=con_ssh) as host_ssh: + for mac_addr in mac_addrs: + if host_ssh.exec_sudo_cmd( + 'virsh dumpxml {} | grep -B 1 -A 1 "{}"'.format( + inst_name, mac_addr))[0] == 0: + host_err += 'VM interface with mac address {} still exists in' \ + ' virsh\n'.format(mac_addr) + assert not host_err, host_err + + assert not vm_err, vm_err + + +def evacuate_vms(host, vms_to_check, con_ssh=None, timeout=600, + wait_for_host_up=False, fail_ok=False, post_host=None, + force=True, ping_vms=False): + """ + Evacuate given vms by rebooting their host. VMs should be on specified + host already when this keyword called. + Args: + host (str): host to reboot + vms_to_check (list): vms to check status for after host reboot + con_ssh (SSHClient): + timeout (int): Max time to wait for vms to reach active state after + reboot -f initiated on host + wait_for_host_up (bool): whether to wait for host reboot completes + before checking vm status + fail_ok (bool): whether to return or to fail test when vm(s) failed + to evacuate + post_host (str): expected host for vms to be evacuated to + force (bool): whether to use 'reboot -f'. This param is only used if + vlm=False. + ping_vms (bool): whether to ping vms after evacuation + + Returns (tuple): ( (int), (list)) + - (0, []) all vms evacuated successfully. i.e., active state, + host changed, pingable from NatBox + - (1, ) some vms did not reach active state after + host reboot + - (2, ) some vms' host did not change after host reboot + + """ + if isinstance(vms_to_check, str): + vms_to_check = [vms_to_check] + + HostsToRecover.add(host) + is_swacted = False + standby = None + if wait_for_host_up: + active, standby = system_helper.get_active_standby_controllers( + con_ssh=con_ssh) + if standby and active == host: + is_swacted = True + + is_sx = system_helper.is_aio_simplex() + + LOG.tc_step("'sudo reboot -f' from {}".format(host)) + host_helper.reboot_hosts(host, wait_for_offline=True, + wait_for_reboot_finish=False, force_reboot=force, + con_ssh=con_ssh) + + if is_sx: + host_helper.wait_for_hosts_ready(hosts=host, con_ssh=con_ssh) + + try: + LOG.tc_step( + "Wait for vms to reach ERROR or REBUILD state with best effort") + if not is_sx: + wait_for_vms_values(vms_to_check, + value=[VMStatus.ERROR, VMStatus.REBUILD], + fail_ok=True, timeout=120, + con_ssh=con_ssh) + + LOG.tc_step( + "Check vms are in Active state and moved to other host(s) (" + "non-sx) after host failure") + res, active_vms, inactive_vms = wait_for_vms_values( + vms=vms_to_check, value=VMStatus.ACTIVE, timeout=timeout, + con_ssh=con_ssh) + + if not is_sx: + vms_host_err = [] + for vm in vms_to_check: + if post_host: + if get_vm_host(vm) != post_host: + vms_host_err.append(vm) + else: + if get_vm_host(vm) == host: + vms_host_err.append(vm) + + if vms_host_err: + if post_host: + err_msg = "Following VMs is not moved to expected host " \ + "{} from {}: {}\nVMs did not reach Active " \ + "state: {}".format(post_host, host, vms_host_err, + inactive_vms) + else: + err_msg = "Following VMs stayed on the same host {}: " \ + "{}\nVMs did not reach Active state: {}".\ + format(host, vms_host_err, inactive_vms) + + if fail_ok: + LOG.warning(err_msg) + return 1, vms_host_err + raise exceptions.VMError(err_msg) + + if inactive_vms: + err_msg = "VMs did not reach Active state after vm host rebooted:" \ + " {}".format(inactive_vms) + if fail_ok: + LOG.warning(err_msg) + return 2, inactive_vms + raise exceptions.VMError(err_msg) + + if ping_vms: + LOG.tc_step("Ping vms after evacuated") + for vm_ in vms_to_check: + wait_for_vm_pingable_from_natbox(vm_id=vm_, + timeout=VMTimeout.DHCP_RETRY) + + LOG.info("All vms are successfully evacuated to other host") + return 0, [] + + finally: + if wait_for_host_up: + LOG.tc_step("Waiting for {} to recover".format(host)) + host_helper.wait_for_hosts_ready(host, con_ssh=con_ssh) + # Do not fail the test due to task affining incomplete for now to + # unblock test case. + host_helper.wait_for_tasks_affined(host=host, con_ssh=con_ssh, + fail_ok=True) + if is_swacted: + host_helper.wait_for_tasks_affined(standby, con_ssh=con_ssh, + fail_ok=True) + time.sleep(60) # Give some idle time before continue. + if system_helper.is_aio_duplex(con_ssh=con_ssh): + system_helper.wait_for_alarm_gone( + alarm_id=EventLogID.CPU_USAGE_HIGH, fail_ok=True, + check_interval=30) + + +def boot_vms_various_types(storage_backing=None, target_host=None, + cleanup='function', avail_zone='nova', vms_num=5): + """ + Boot following 5 vms and ensure they are pingable from NatBox: + - vm1: ephemeral=0, swap=0, boot_from_volume + - vm2: ephemeral=1, swap=1, boot_from_volume + - vm3: ephemeral=0, swap=0, boot_from_image + - vm4: ephemeral=0, swap=0, boot_from_image, attach_volume + - vm5: ephemeral=1, swap=1, boot_from_image + Args: + storage_backing (str|None): storage backing to set in flavor spec. + When None, storage backing which used by + most up hypervisors will be used. + target_host (str|None): Boot vm on target_host when specified. (admin + role has to be added to tenant under test) + cleanup (str|None): Scope for resource cleanup, valid values: + 'function', 'class', 'module', None. + When None, vms/volumes/flavors will be kept on system + avail_zone (str): availability zone to boot the vms + vms_num + + Returns (list): list of vm ids + + """ + LOG.info("Create a flavor without ephemeral or swap disks") + flavor_1 = \ + nova_helper.create_flavor('flv_rootdisk', + storage_backing=storage_backing, + cleanup=cleanup)[1] + + LOG.info("Create another flavor with ephemeral and swap disks") + flavor_2 = nova_helper.create_flavor('flv_ephemswap', ephemeral=1, swap=512, + storage_backing=storage_backing, + cleanup=cleanup)[1] + + launched_vms = [] + for i in range(int(math.ceil(vms_num / 5.0))): + LOG.info( + "Boot vm1 from volume with flavor flv_rootdisk and wait for it " + "pingable from NatBox") + vm1_name = "vol_root" + vm1 = boot_vm(vm1_name, flavor=flavor_1, source='volume', + avail_zone=avail_zone, vm_host=target_host, + cleanup=cleanup)[1] + + wait_for_vm_pingable_from_natbox(vm1) + launched_vms.append(vm1) + if len(launched_vms) == vms_num: + break + + LOG.info( + "Boot vm2 from volume with flavor flv_localdisk and wait for it " + "pingable from NatBox") + vm2_name = "vol_ephemswap" + vm2 = boot_vm(vm2_name, flavor=flavor_2, source='volume', + avail_zone=avail_zone, vm_host=target_host, + cleanup=cleanup)[1] + + wait_for_vm_pingable_from_natbox(vm2) + launched_vms.append(vm2) + if len(launched_vms) == vms_num: + break + + LOG.info( + "Boot vm3 from image with flavor flv_rootdisk and wait for it " + "pingable from NatBox") + vm3_name = "image_root" + vm3 = boot_vm(vm3_name, flavor=flavor_1, source='image', + avail_zone=avail_zone, vm_host=target_host, + cleanup=cleanup)[1] + + wait_for_vm_pingable_from_natbox(vm3) + launched_vms.append(vm3) + if len(launched_vms) == vms_num: + break + + LOG.info( + "Boot vm4 from image with flavor flv_rootdisk, attach a volume to " + "it and wait for it " + "pingable from NatBox") + vm4_name = 'image_root_attachvol' + vm4 = boot_vm(vm4_name, flavor_1, source='image', avail_zone=avail_zone, + vm_host=target_host, + cleanup=cleanup)[1] + + vol = cinder_helper.create_volume(bootable=False, cleanup=cleanup)[1] + attach_vol_to_vm(vm4, vol_id=vol, cleanup=cleanup) + + wait_for_vm_pingable_from_natbox(vm4) + launched_vms.append(vm4) + if len(launched_vms) == vms_num: + break + + LOG.info( + "Boot vm5 from image with flavor flv_localdisk and wait for it " + "pingable from NatBox") + vm5_name = 'image_ephemswap' + vm5 = boot_vm(vm5_name, flavor_2, source='image', avail_zone=avail_zone, + vm_host=target_host, + cleanup=cleanup)[1] + + wait_for_vm_pingable_from_natbox(vm5) + launched_vms.append(vm5) + if len(launched_vms) == vms_num: + break + + assert len(launched_vms) == vms_num + return launched_vms + + +def get_vcpu_model(vm_id, guest_os=None, con_ssh=None): + """ + Get vcpu model of given vm. e.g., Intel(R) Xeon(R) CPU E5-2680 v2 @ 2.80GHz + Args: + vm_id (str): + guest_os (str): + con_ssh (SSHClient): + + Returns (str): + + """ + with ssh_to_vm_from_natbox(vm_id, vm_image_name=guest_os, + con_ssh=con_ssh) as vm_ssh: + out = vm_ssh.exec_cmd("cat /proc/cpuinfo | grep --color=never " + "'model name'", fail_ok=False)[1] + vcpu_model = out.strip().splitlines()[0].split(sep=': ')[1].strip() + + LOG.info("VM {} cpu model: {}".format(vm_id, vcpu_model)) + return vcpu_model + + +def get_quotas(quotas, default=False, tenant=None, auth_info=None, + con_ssh=None): + """ + Get openstack quotas + Args: + quotas (str|list|tuple): + default (bool) + tenant (str|None): Only used if admin user is used in auth_info + auth_info (dict): + con_ssh: + + Returns (list): + + """ + if auth_info is None: + auth_info = Tenant.get_primary() + + args = '' + if default: + args += '--default' + if tenant and auth_info['user'] == 'admin': + args += ' {}'.format(tenant) + + if isinstance(quotas, str): + quotas = [quotas] + + table_ = table_parser.table( + cli.openstack('quota show', args, ssh_client=con_ssh, + auth_info=auth_info)[1]) + + values = [] + for item in quotas: + val = table_parser.get_value_two_col_table(table_, item) + try: + val = eval(val) + except (NameError, SyntaxError): + pass + values.append(val) + + return values + + +def get_quota_details_info(component='compute', tenant=None, detail=True, + resources=None, + auth_info=Tenant.get('admin'), con_ssh=None): + """ + Get quota details table from openstack quota list --detail + Args: + component (str): compute, network or volume + tenant: + detail (bool) + resources (str|list|tuple|None): filter out table. Used only if + detail is True and component is not volume + auth_info: + con_ssh: + + Returns (dict): All keys are converted to lower case. + e.g., + {'server_groups': {'in use': 0, 'reserved': 1, 'limit': 10}, + ...} + + """ + valid_components = ('compute', 'network', 'volume') + if component not in valid_components: + raise ValueError( + "Please specify a valid component: {}".format(valid_components)) + + if not tenant: + tenant = Tenant.get_primary()['tenant'] + + detail_str = ' --detail' if detail and component != 'volume' else '' + args = '--project={} --{}{}'.format(tenant, component, detail_str) + + table_ = table_parser.table( + cli.openstack('quota list', args, ssh_client=con_ssh, + auth_info=auth_info)[1]) + key_header = 'Project ID' + if detail_str: + if resources: + table_ = table_parser.filter_table(table_, Resource=resources) + key_header = 'resource' + + table_ = table_parser.row_dict_table(table_, key_header=key_header, + lower_case=True, + eliminate_keys=key_header) + return {k: int(v) for k, v in table_.items()} + + +def set_quotas(tenant=None, auth_info=Tenant.get('admin'), con_ssh=None, + sys_con_for_dc=True, fail_ok=False, + **kwargs): + """ + Set openstack quotas + Args: + tenant (str): + auth_info (dict): + con_ssh: + sys_con_for_dc (bool): + fail_ok (bool): + **kwargs: quotas to set. e.g., **{'instances': 10, 'volumes': 20} + + Returns (tuple): + + """ + if not tenant: + tenant = Tenant.get_primary()['tenant'] + if not auth_info: + auth_info = Tenant.get_primary() + if ProjVar.get_var('IS_DC') and sys_con_for_dc and auth_info['region'] \ + != 'SystemController': + auth_info = Tenant.get(auth_info['user'], dc_region='SystemController') + + args = common.parse_args( + args_dict={k.replace('_', '-'): v for k, v in kwargs.items()}) + args = '{} {}'.format(args, tenant) + code, output = cli.openstack('quota set', args, ssh_client=con_ssh, + fail_ok=fail_ok, auth_info=auth_info) + if code > 0: + return 1, output + + msg = '{} quotas set successfully'.format(tenant) + LOG.info(msg) + return 0, msg + + +def ensure_vms_quotas(vms_num=10, cores_num=None, vols_num=None, ram=None, + tenant=None, auth_info=Tenant.get('admin'), + con_ssh=None): + """ + Update instances, cores, volumes quotas to given numbers + Args: + vms_num (int): max number of instances allowed for given tenant + cores_num (int|None): twice of the vms quota when None + vols_num (int|None): twice of the vms quota when None + ram (int|None) + tenant (None|str): + auth_info (dict): auth info for admin user + con_ssh (SSHClient): + + """ + if not vols_num: + vols_num = 2 * vms_num + if not cores_num: + cores_num = 2 * vms_num + if not ram: + ram = 2048 * vms_num + + if not tenant: + tenant = Tenant.get_primary()['tenant'] + + volumes_quota, vms_quota, cores_quota, ram_quota = get_quotas( + quotas=['volumes', 'instances', 'cores', 'ram'], + con_ssh=con_ssh, tenant=tenant, auth_info=auth_info) + kwargs = {} + if vms_num > vms_quota: + kwargs['instances'] = vms_num + if cores_num > cores_quota: + kwargs['cores'] = cores_num + if vols_num > volumes_quota: + kwargs['volumes'] = vols_num + if ram > ram_quota: + kwargs['ram'] = ram + + if kwargs: + set_quotas(con_ssh=con_ssh, tenant=tenant, auth_info=auth_info, + **kwargs) + + +def launch_vms(vm_type, count=1, nics=None, flavor=None, storage_backing=None, + image=None, boot_source=None, + guest_os=None, avail_zone=None, target_host=None, ping_vms=False, + con_ssh=None, auth_info=None, + cleanup='function', **boot_vm_kwargs): + """ + + Args: + vm_type: + count: + nics: + flavor: + storage_backing (str): + storage backend for flavor to be created + only used if flavor is None + image: + boot_source: + guest_os + avail_zone: + target_host: + ping_vms + con_ssh: + auth_info: + cleanup: + boot_vm_kwargs (dict): + additional kwargs to pass to boot_vm + + Returns: + + """ + + if not flavor: + flavor = nova_helper.create_flavor(name=vm_type, vcpus=2, + storage_backing=storage_backing, + cleanup=cleanup)[1] + extra_specs = {FlavorSpec.CPU_POLICY: 'dedicated'} + + if vm_type in ['vswitch', 'dpdk', 'vhost']: + extra_specs.update({FlavorSpec.VCPU_MODEL: 'SandyBridge', + FlavorSpec.MEM_PAGE_SIZE: '2048'}) + + nova_helper.set_flavor(flavor=flavor, **extra_specs) + + resource_id = None + boot_source = boot_source if boot_source else 'volume' + if image: + if boot_source == 'volume': + resource_id = \ + cinder_helper.create_volume(name=vm_type, source_id=image, + auth_info=auth_info, + guest_image=guest_os)[1] + if cleanup: + ResourceCleanup.add('volume', resource_id, scope=cleanup) + else: + resource_id = image + + if not nics: + if vm_type in ['pci-sriov', 'pci-passthrough']: + raise NotImplemented("nics has to be provided for pci-sriov and " + "pci-passthrough") + + if vm_type in ['vswitch', 'dpdk', 'vhost']: + vif_model = 'avp' + else: + vif_model = vm_type + + mgmt_net_id = network_helper.get_mgmt_net_id(auth_info=auth_info) + tenant_net_id = network_helper.get_tenant_net_id(auth_info=auth_info) + internal_net_id = network_helper.get_internal_net_id( + auth_info=auth_info) + + nics = [{'net-id': mgmt_net_id}, + {'net-id': tenant_net_id, 'vif-model': vif_model}, + {'net-id': internal_net_id, 'vif-model': vif_model}] + + user_data = None + if vm_type in ['vswitch', 'dpdk', 'vhost']: + user_data = network_helper.get_dpdk_user_data(con_ssh=con_ssh) + + vms = [] + for i in range(count): + vm_id = boot_vm(name="{}-{}".format(vm_type, i), flavor=flavor, + source=boot_source, source_id=resource_id, + nics=nics, guest_os=guest_os, avail_zone=avail_zone, + vm_host=target_host, user_data=user_data, + auth_info=auth_info, con_ssh=con_ssh, cleanup=cleanup, + **boot_vm_kwargs)[1] + vms.append(vm_id) + + if ping_vms: + wait_for_vm_pingable_from_natbox(vm_id=vm_id, con_ssh=con_ssh) + return vms, nics + + +def get_ping_loss_duration_between_vms(from_vm, to_vm, net_type='data', + timeout=600, ipv6=False, + start_event=None, + end_event=None, con_ssh=None, + ping_interval=1): + """ + Get ping loss duration in milliseconds from one vm to another + Args: + from_vm (str): id of the ping source vm + to_vm (str): id of the ping destination vm + net_type (str): e.g., data, internal, mgmt + timeout (int): max time to wait for ping loss before force end it + ipv6 (bool): whether to use ping -6 for ipv6 address + start_event (Event): set given event to signal ping has started + end_event (Event): stop ping loss detection if given event is set + con_ssh (SSHClient): + ping_interval (int|float): timeout of ping cmd in seconds + + Returns (int): milliseconds of ping loss duration + + """ + + to_vm_ip = _get_vms_ips(vm_ids=to_vm, net_types=net_type, + con_ssh=con_ssh)[0][0] + with ssh_to_vm_from_natbox(vm_id=from_vm, con_ssh=con_ssh) as from_vm_ssh: + duration = network_helper.get_ping_failure_duration( + server=to_vm_ip, ssh_client=from_vm_ssh, timeout=timeout, + ipv6=ipv6, start_event=start_event, end_event=end_event, + ping_interval=ping_interval) + return duration + + +def get_ping_loss_duration_from_natbox(vm_id, timeout=900, start_event=None, + end_event=None, con_ssh=None, + ping_interval=0.5): + vm_ip = _get_vms_ips(vm_ids=vm_id, net_types='mgmt', con_ssh=con_ssh)[0][0] + natbox_client = NATBoxClient.get_natbox_client() + duration = network_helper.get_ping_failure_duration( + server=vm_ip, ssh_client=natbox_client, timeout=timeout, + start_event=start_event, end_event=end_event, + ping_interval=ping_interval) + return duration + + +def get_ping_loss_duration_on_operation(vm_id, timeout, ping_interval, + oper_func, *func_args, **func_kwargs): + LOG.tc_step("Start pinging vm {} from NatBox on a new thread".format(vm_id)) + start_event = Events("Ping started") + end_event = Events("Operation completed") + ping_thread = MThread(get_ping_loss_duration_from_natbox, vm_id=vm_id, + timeout=timeout, + start_event=start_event, end_event=end_event, + ping_interval=ping_interval) + ping_thread.start_thread(timeout=timeout + 30) + + try: + if start_event.wait_for_event(timeout=60): + LOG.tc_step( + "Perform operation on vm and ensure it's reachable after that") + oper_func(*func_args, **func_kwargs) + # Operation completed. Set end flag so ping thread can end properly + time.sleep(3) + end_event.set() + # Expect ping thread to end in less than 1 minute after + # live-migration complete + duration = ping_thread.get_output(timeout=60) + # assert duration, "No ping loss detected" + if duration == 0: + LOG.warning("No ping loss detected") + return duration + + assert False, "Ping failed since start" + finally: + ping_thread.wait_for_thread_end(timeout=5) + + +def collect_guest_logs(vm_id): + LOG.info("Attempt to collect guest logs with best effort") + log_names = ['messages', 'user.log'] + try: + res = _recover_vm(vm_id=vm_id) + if not res: + LOG.info( + "VM {} in unrecoverable state, skip collect guest logs.".format( + vm_id)) + return + + with ssh_to_vm_from_natbox(vm_id) as vm_ssh: + for log_name in log_names: + log_path = '/var/log/{}'.format(log_name) + if not vm_ssh.file_exists(log_path): + continue + + local_log_path = '{}/{}_{}'.format( + ProjVar.get_var('GUEST_LOGS_DIR'), log_name, vm_id) + current_user = local_host.get_user() + if current_user == TestFileServer.USER: + vm_ssh.exec_sudo_cmd('chmod -R 755 {}'.format(log_path), + fail_ok=True) + vm_ssh.scp_on_source_to_localhost( + source_file=log_path, + dest_user=current_user, + dest_password=TestFileServer.PASSWORD, + dest_path=local_log_path) + else: + output = vm_ssh.exec_cmd('tail -n 200 {}'.format(log_path), + fail_ok=False)[1] + with open(local_log_path, mode='w') as f: + f.write(output) + return + + except Exception as e: + LOG.warning("Failed to collect guest logs: {}".format(e)) + + +def _recover_vm(vm_id, con_ssh=None): + status = get_vm_status(vm_id=vm_id, con_ssh=con_ssh) + if status == VMStatus.ACTIVE: + return True + elif status == VMStatus.STOPPED: + code, msg = start_vms(vms=vm_id, fail_ok=True) + return code == 0 + elif status == VMStatus.PAUSED: + code, msg = unpause_vm(vm_id=vm_id, fail_ok=True, con_ssh=con_ssh) + if code > 0: + code, msg = resume_vm(vm_id, fail_ok=True, con_ssh=con_ssh) + if code > 0: + return False + return True + else: + return False + + +def get_vim_events(vm_id, event_ids=None, controller=None, con_ssh=None): + """ + Get vim events from nfv-vim-events.log + Args: + vm_id (str): + event_ids (None|str|list|tuple): return only given vim events when + specified + controller (None|str): controller where vim log is on. Use current + active controller if None. + con_ssh (SSHClient): + + Returns (list): list of dictionaries, each dictionary is one event. e.g.,: + [{'log-id': '47', 'event-id': 'instance-live-migrate-begin', ... , + 'timestamp': '2018-03-04 01:34:28.915008'}, + {'log-id': '49', 'event-id': 'instance-live-migrated', ... , + 'timestamp': '2018-03-04 01:35:34.043094'}] + + """ + if not controller: + controller = system_helper.get_active_controller_name() + + if isinstance(event_ids, str): + event_ids = [event_ids] + + with host_helper.ssh_to_host(controller, con_ssh=con_ssh) as controller_ssh: + vm_logs = controller_ssh.exec_cmd( + 'grep --color=never -A 4 -B 6 -E "entity .*{}" ' + '/var/log/nfv-vim-events.log'. + format(vm_id))[1] + + log_lines = vm_logs.splitlines() + vm_events = [] + vm_event = {} + for line in log_lines: + if re.search(' = ', line): + if line.startswith('log-id') and vm_event: + if not event_ids or vm_event['event-id'] in event_ids: + vm_events.append(vm_event) + + vm_event = {} + key, val = re.findall('(.*)= (.*)', line)[0] + vm_event[key.strip()] = val.strip() + + if vm_event and (not event_ids or vm_event['event-id'] in event_ids): + vm_events.append(vm_event) + + LOG.info("VM events: {}".format(vm_events)) + return vm_events + + +def get_live_migrate_duration(vm_id, con_ssh=None): + LOG.info( + "Get live migration duration from nfv-vim-events.log for vm {}".format( + vm_id)) + events = (VimEventID.LIVE_MIG_BEGIN, VimEventID.LIVE_MIG_END) + live_mig_begin, live_mig_end = get_vim_events(vm_id=vm_id, event_ids=events, + con_ssh=con_ssh) + + start_time = live_mig_begin['timestamp'] + end_time = live_mig_end['timestamp'] + duration = common.get_timedelta_for_isotimes(time1=start_time, + time2=end_time).total_seconds() + LOG.info("Live migration for vm {} took {} seconds".format(vm_id, duration)) + + return duration + + +def get_cold_migrate_duration(vm_id, con_ssh=None): + LOG.info("Get cold migration duration from vim-event-log for vm {}".format( + vm_id)) + events = (VimEventID.COLD_MIG_BEGIN, VimEventID.COLD_MIG_END, + VimEventID.COLD_MIG_CONFIRM_BEGIN, VimEventID.COLD_MIG_CONFIRMED) + cold_mig_begin, cold_mig_end, cold_mig_confirm_begin, \ + cold_mig_confirm_end = get_vim_events(vm_id=vm_id, event_ids=events, + con_ssh=con_ssh) + + duration_cold_mig = common.get_timedelta_for_isotimes( + time1=cold_mig_begin['timestamp'], + time2=cold_mig_end['timestamp']).total_seconds() + + duration_confirm = common.get_timedelta_for_isotimes( + time1=cold_mig_confirm_begin['timestamp'], + time2=cold_mig_confirm_end['timestamp']).total_seconds() + + duration = duration_cold_mig + duration_confirm + LOG.info("Cold migrate and confirm for vm {} took {} seconds".format( + vm_id, duration)) + + return duration + + +def live_migrate_force_complete(vm_id, migration_id=None, timeout=300, + fail_ok=False, con_ssh=None): + """ + Run nova live-migration-force-complete against given vm and migration + session. + Args: + vm_id (str): + migration_id (str|int): + timeout: + fail_ok: + con_ssh: + + Returns (tuple): + (0, 'VM is successfully live-migrated after + live-migration-force-complete') + (1, ) # nova live-migration-force-complete cmd + rejected. Only returns if fail_ok=True. + + """ + if not migration_id: + migration_id = get_vm_migration_values(vm_id=vm_id, fail_ok=False, + con_ssh=con_ssh)[0] + + # No replacement in openstack client + code, output = cli.nova('live-migration-force-complete', + '{} {}'.format(vm_id, migration_id), + ssh_client=con_ssh, + fail_ok=fail_ok) + + if code > 0: + return 1, output + + wait_for_vm_migration_status(vm_id=vm_id, migration_id=migration_id, + fail_ok=False, timeout=timeout, + con_ssh=con_ssh) + msg = "VM is successfully live-migrated after live-migration-force-complete" + LOG.info(msg) + return 0, msg + + +def get_vm_migration_values(vm_id, field='Id', migration_type='live-migration', + fail_ok=True, con_ssh=None, **kwargs): + """ + Get values for given vm via nova migration-list + Args: + vm_id (str): + field (str): + migration_type(str): valid types: live-migration, migration + fail_ok: + con_ssh: + **kwargs: + + Returns (list): + + """ + migration_tab = nova_helper.get_migration_list_table(con_ssh=con_ssh) + filters = {'Instance UUID': vm_id, 'Type': migration_type} + if kwargs: + filters.update(kwargs) + mig_ids = table_parser.get_values(migration_tab, target_header=field, + **filters) + if not mig_ids and not fail_ok: + raise exceptions.VMError( + "{} has no {} session with filters: {}".format(vm_id, + migration_type, + kwargs)) + + return mig_ids + + +def wait_for_vm_migration_status(vm_id, migration_id=None, migration_type=None, + expt_status='completed', + fail_ok=False, timeout=300, check_interval=5, + con_ssh=None): + """ + Wait for a migration session to reach given status in nova mgiration-list + Args: + vm_id (str): + migration_id (str|int): + migration_type (str): valid types: live-migration, migration + expt_status (str): migration status to wait for. such as completed, + running, etc + fail_ok (bool): + timeout (int): max time to wait for the state + check_interval (int): + con_ssh: + + Returns (tuple): + (0, ) # migration status reached as expected + (1, ) # did not reach given status. This only + returns if fail_ok=True + + """ + if not migration_id: + migration_id = get_vm_migration_values( + vm_id=vm_id, migration_type=migration_type, fail_ok=False, + con_ssh=con_ssh)[0] + + LOG.info("Waiting for migration {} for vm {} to reach {} status".format( + migration_id, vm_id, expt_status)) + end_time = time.time() + timeout + prev_state = None + while time.time() < end_time: + mig_status = get_vm_migration_values(vm_id=vm_id, field='Status', + **{'Id': migration_id})[0] + if mig_status == expt_status: + LOG.info( + "Migration {} for vm {} reached status: {}".format(migration_id, + vm_id, + expt_status)) + return True, expt_status + + if mig_status != prev_state: + LOG.info( + "Migration {} for vm {} is in status - {}".format(migration_id, + vm_id, + mig_status)) + prev_state = mig_status + + time.sleep(check_interval) + + msg = 'Migration {} for vm {} did not reach {} status within {} seconds. ' \ + 'It is in {} status.'.format(migration_id, vm_id, expt_status, + timeout, prev_state) + if fail_ok: + LOG.warning(msg) + return False, prev_state + else: + raise exceptions.VMError(msg) + + +def get_vms_ports_info(vms, rtn_subnet_id=False): + """ + Get VMs' ports' (ip_addr, subnet_cidr_or_id, mac_addr). + + Args: + vms (str|list): + vm_id, or a list of vm_ids + rtn_subnet_id (bool): + replaces cidr with subnet_id in result + + Returns (dict): + {vms[0]: [(ip_addr, subnet, ...], vms[1]: [...], ...} + """ + if not issubclass(type(vms), (list, tuple)): + vms = [vms] + + info = {} + subnet_tab_ = table_parser.table( + cli.openstack('subnet list', auth_info=Tenant.get('admin'))[1]) + for vm in vms: + info[vm] = [] + vm_ports, vm_macs, vm_fixed_ips = network_helper.get_ports( + server=vm, field=('ID', 'MAC Address', 'Fixed IP Addresses')) + for i in range(len(vm_ports)): + port = vm_ports[i] + mac = vm_macs[i] + fixed_ips = vm_fixed_ips[i] + if not isinstance(fixed_ips, list): + fixed_ips = [fixed_ips] + + for fixed_ip in fixed_ips: + subnet_id = fixed_ip['subnet_id'] + ip_addr = fixed_ip['ip_address'] + subnet = subnet_id if rtn_subnet_id else \ + table_parser.get_values(subnet_tab_, 'Subnet', + id=subnet_id)[0] + net_id = table_parser.get_values(subnet_tab_, 'Network', + id=subnet_id)[0] + + LOG.info( + "VM {} port {}: mac={} ip={} subnet={} net_id={}".format( + vm, port, mac, ip_addr, subnet, net_id)) + info[vm].append((port, ip_addr, subnet, mac, net_id)) + + return info + + +def _set_vm_route(vm_id, target_subnet, via_ip, dev_or_mac, persist=True): + # returns True if the targeted VM is vswitch-enabled + # for vswitch-enabled VMs, it must be setup with TisInitServiceScript if + # persist=True + with ssh_to_vm_from_natbox(vm_id) as ssh_client: + vshell, msg = ssh_client.exec_cmd("vshell port-list", fail_ok=True) + vshell = not vshell + if ':' in dev_or_mac: + dev = network_helper.get_eth_for_mac(ssh_client, dev_or_mac, + vshell=vshell) + else: + dev = dev_or_mac + if not vshell: # not avs managed + param = target_subnet, via_ip, dev + LOG.info("Routing {} via {} on interface {}".format(*param)) + ssh_client.exec_sudo_cmd( + "route add -net {} gw {} {}".format(*param), fail_ok=False) + if persist: + LOG.info("Setting persistent route") + ssh_client.exec_sudo_cmd( + "echo -e \"{} via {}\" > " + "/etc/sysconfig/network-scripts/route-{}".format( + *param), fail_ok=False) + return False + else: + param = target_subnet, via_ip, dev + LOG.info( + "Routing {} via {} on interface {}, AVS-enabled".format(*param)) + ssh_client.exec_sudo_cmd( + "sed -i $'s,quit,route add {} {} {} 1\\\\nquit," + "g' /etc/vswitch/vswitch.cmds.default".format( + target_subnet, dev, via_ip), fail_ok=False) + # reload vswitch + ssh_client.exec_sudo_cmd("/etc/init.d/vswitch restart", + fail_ok=False) + if persist: + LOG.info("Setting persistent route") + ssh_client.exec_sudo_cmd( + # ROUTING_STUB + # "192.168.1.0/24,192.168.111.1,eth0" + "sed -i $'s@#ROUTING_STUB@\"{},{}," + "{}\"\\\\n#ROUTING_STUB@g' {}".format( + target_subnet, via_ip, dev, + TisInitServiceScript.configuration_path + ), fail_ok=False) + return True + + +def route_vm_pair(vm1, vm2, bidirectional=True, validate=True): + """ + Route the pair of VMs' data interfaces through internal interfaces + If multiple interfaces available on either of the VMs, the last one is used + If no interfaces available for data/internal network for either VM, + raises IndexError + The internal interfaces for the pair VM must be on the same gateway + no fail_ok option, since if failed, the vm's state is undefined + + Args: + vm1 (str): + vm_id, src if bidirectional=False + vm2 (str): + vm_id, dest if bidirectional=False + bidirectional (bool): + if True, also routes from vm2 to vm1 + validate (bool): + validate pings between the pair over the data network + + Returns (dict): + the interfaces used for routing, + {vm_id: {'data': {'ip', 'cidr', 'mac'}, + 'internal':{'ip', 'cidr', 'mac'}}} + """ + if vm1 == vm2: + raise ValueError("cannot route to a VM itself") + + auth_info = Tenant.get('admin') + LOG.info("Collecting VMs' networks") + interfaces = { + vm1: {"data": network_helper.get_tenant_ips_for_vms( + vm1, auth_info=auth_info), + "internal": network_helper.get_internal_ips_for_vms(vm1)}, + vm2: {"data": network_helper.get_tenant_ips_for_vms( + vm2, auth_info=auth_info), + "internal": network_helper.get_internal_ips_for_vms(vm2)}, + } + + for vm, info in get_vms_ports_info([vm1, vm2]).items(): + for port, ip, cidr, mac, net_id in info: + # expect one data and one internal + if ip in interfaces[vm]['data']: + interfaces[vm]['data'] = {'ip': ip, 'cidr': cidr, 'mac': mac, + 'port': port} + elif ip in interfaces[vm]['internal']: + interfaces[vm]['internal'] = {'ip': ip, 'cidr': cidr, + 'mac': mac, 'port': port} + + if interfaces[vm1]['internal']['cidr'] != \ + interfaces[vm2]['internal']['cidr']: + raise ValueError( + "the internal interfaces for the VM pair is not on the same " + "gateway") + + vshell_ = _set_vm_route( + vm1, + interfaces[vm2]['data']['cidr'], interfaces[vm2]['internal']['ip'], + interfaces[vm1]['internal']['mac']) + + if bidirectional: + _set_vm_route(vm2, interfaces[vm1]['data']['cidr'], + interfaces[vm1]['internal']['ip'], + interfaces[vm2]['internal']['mac']) + + for vm in (vm1, vm2): + LOG.info("Add vms' data network ip as allowed address for internal " + "network port") + network_helper.set_port( + port_id=interfaces[vm]['internal']['port'], + auth_info=auth_info, + allowed_addr_pairs={'ip-address': interfaces[vm]['data']['ip']}) + + if validate: + LOG.info("Validating route(s) across data") + ping_between_routed_vms(to_vm=vm2, from_vm=vm1, vshell=vshell_, + bidirectional=bidirectional) + + return interfaces + + +def ping_between_routed_vms(to_vm, from_vm, vshell=True, bidirectional=True, + timeout=120): + """ + Ping between routed vm pair + Args: + to_vm: + from_vm: + vshell: + bidirectional: + timeout: + + Returns: + + """ + ping_vms_from_vm(to_vms=to_vm, from_vm=from_vm, timeout=timeout, + net_types='data', vshell=vshell, + source_net_types='internal') + if bidirectional: + ping_vms_from_vm(to_vms=from_vm, from_vm=to_vm, timeout=timeout, + net_types='data', vshell=vshell, + source_net_types='internal') + + +def setup_kernel_routing(vm_id, **kwargs): + """ + Setup kernel routing function for the specified VM + replicates the operation as in wrs_guest_setup.sh (and comes + with the same assumptions) + in order to persist kernel routing after reboots, the operation has to be + stored in /etc/init.d + see TisInitServiceScript for script details + no fail_ok option, since if failed, the vm's state is undefined + + Args: + vm_id (str): + the VM to be configured + kwargs (dict): + kwargs for TisInitServiceScript.configure + + """ + LOG.info( + "Setting up kernel routing for VM {}, kwargs={}".format(vm_id, kwargs)) + + scp_to_vm(vm_id, TisInitServiceScript.src(), TisInitServiceScript.dst()) + with ssh_to_vm_from_natbox(vm_id) as ssh_client: + r, msg = ssh_client.exec_cmd("cat /proc/sys/net/ipv4/ip_forward", + fail_ok=False) + if msg == "1": + LOG.warn( + "VM {} has ip_forward enabled already, skipping".format(vm_id)) + return + TisInitServiceScript.configure(ssh_client, **kwargs) + TisInitServiceScript.enable(ssh_client) + TisInitServiceScript.start(ssh_client) + + +def setup_avr_routing(vm_id, mtu=1500, vm_type='vswitch', **kwargs): + """ + Setup avr routing (vswitch L3) function for the specified VM + replciates the operation as in wrs_guest_setup.sh (and comes with + the same assumptions) + in order to persist kernel routing after reboots, the operation has to be + stored in /etc/init.d + see TisInitServiceScript for script details + no fail_ok option, since if failed, the vm's state is undefined + + Args: + vm_id (str): + the VM to be configured + mtu (int): + 1500 by default + for jumbo frames (9000), tenant net support is required + vm_type (str): + PCI NIC_DEVICE + vhost: "${PCI_VENDOR_VIRTIO}:${PCI_DEVICE_VIRTIO}: + ${PCI_SUBDEVICE_NET}" + any other: "${PCI_VENDOR_VIRTIO}:${PCI_DEVICE_MEMORY}: + ${PCI_SUBDEVICE_AVP}" (default) + kwargs (dict): + kwargs for TisInitServiceScript.configure + + """ + LOG.info( + "Setting up avr routing for VM {}, kwargs={}".format(vm_id, kwargs)) + datas = network_helper.get_tenant_ips_for_vms(vm_id) + data_dict = dict() + try: + internals = network_helper.get_internal_ips_for_vms(vm_id) + except ValueError: + internals = list() + internal_dict = dict() + for vm, info in get_vms_ports_info([vm_id]).items(): + for port, ip, cidr, mac, net_id in info: + if ip in datas: + data_dict[ip] = ipaddress.ip_network(cidr).netmask + elif ip in internals: + internal_dict[ip] = ipaddress.ip_network(cidr).netmask + + interfaces = list() + items = list(data_dict.items()) + list(internal_dict.items()) + + if len(items) > 2: + LOG.warn( + "wrs_guest_setup/tis_automation_init does not support more than " + "two DPDK NICs") + LOG.warn("stripping {} from interfaces".format(items[2:])) + items = items[:2] + + for (ip, netmask), ct in zip(items, range(len(items))): + interfaces.append( + """\"{},{},eth{},{}\"""".format(ip, netmask, ct, str(mtu))) + + nic_device = "" + if vm_type == 'vhost': + nic_device = "\"${PCI_VENDOR_VIRTIO}:${PCI_DEVICE_VIRTIO}:" \ + "${PCI_SUBDEVICE_NET}\"" + + scp_to_vm(vm_id, TisInitServiceScript.src(), TisInitServiceScript.dst()) + with ssh_to_vm_from_natbox(vm_id) as ssh_client: + TisInitServiceScript.configure( + ssh_client, NIC_DEVICE=nic_device, + NIC_COUNT=str(len(items)), FUNCTIONS="avr,", + ROUTES="""( + #ROUTING_STUB +)""", + ADDRESSES="""( + {} +) +""".format("\n ".join(interfaces)), **kwargs) + TisInitServiceScript.enable(ssh_client) + TisInitServiceScript.start(ssh_client) + + +def launch_vm_pair(vm_type='virtio', primary_kwargs=None, secondary_kwargs=None, + **launch_vms_kwargs): + """ + Launch a pair of routed VMs + one on the primary tenant, and the other on the secondary tenant + + Args: + vm_type (str): + one of 'virtio', 'avp', 'dpdk' + primary_kwargs (dict): + launch_vms_kwargs for the VM launched under the primary tenant + secondary_kwargs (dict): + launch_vms_kwargs for the VM launched under the secondary tenant + **launch_vms_kwargs: + additional keyword arguments for launch_vms for both tenants + overlapping keys will be overridden by primary_kwargs and + secondary_kwargs + shall not specify count, ping_vms, auth_info + + Returns (tuple): + (vm_id_on_primary_tenant, vm_id_on_secondary_tenant) + """ + LOG.info("Launch a {} test-observer pair of VMs".format(vm_type)) + for invalid_key in ('count', 'ping_vms'): + if invalid_key in launch_vms_kwargs: + launch_vms_kwargs.pop(invalid_key) + + primary_kwargs = dict() if not primary_kwargs else primary_kwargs + secondary_kwargs = dict() if not secondary_kwargs else secondary_kwargs + if 'auth_info' not in primary_kwargs: + primary_kwargs['auth_info'] = Tenant.get_primary() + if 'auth_info' not in secondary_kwargs: + secondary_kwargs['auth_info'] = Tenant.get_secondary() + + if 'nics' not in primary_kwargs or 'nics' not in secondary_kwargs: + if vm_type in ['pci-sriov', 'pci-passthrough']: + raise NotImplemented( + "nics has to be provided for pci-sriov and pci-passthrough") + + if vm_type in ['vswitch', 'dpdk', 'vhost']: + vif_model = 'avp' + else: + vif_model = vm_type + + internal_net_id = network_helper.get_internal_net_id() + for tenant_info in (primary_kwargs, secondary_kwargs): + auth_info_ = tenant_info['auth_info'] + mgmt_net_id = network_helper.get_mgmt_net_id(auth_info=auth_info_) + tenant_net_id = network_helper.get_tenant_net_id( + auth_info=auth_info_) + nics = [{'net-id': mgmt_net_id}, + {'net-id': tenant_net_id, 'vif-model': vif_model}, + {'net-id': internal_net_id, 'vif-model': vif_model}] + tenant_info['nics'] = nics + + vm_test = launch_vms(vm_type=vm_type, count=1, ping_vms=True, + **__merge_dict(launch_vms_kwargs, primary_kwargs) + )[0][0] + vm_observer = launch_vms(vm_type=vm_type, count=1, ping_vms=True, + **__merge_dict(launch_vms_kwargs, + secondary_kwargs))[0][0] + + LOG.info("Route the {} test-observer VM pair".format(vm_type)) + if vm_type in ('dpdk', 'vhost', 'vswitch'): + setup_avr_routing(vm_test, vm_type=vm_type) + setup_avr_routing(vm_observer, vm_type=vm_type) + else: + # vm_type in ('virtio', 'avp'): + setup_kernel_routing(vm_test) + setup_kernel_routing(vm_observer) + + route_vm_pair(vm_test, vm_observer) + + return vm_test, vm_observer + + +def get_all_vms(field='ID', con_ssh=None, auth_info=Tenant.get('admin')): + """ + Get VMs for all tenants in the systems + + Args: + field: + con_ssh: + auth_info + + Returns (list): list of all vms on the system + + """ + return get_vms(field=field, all_projects=True, long=False, con_ssh=con_ssh, + auth_info=auth_info) + + +def get_vms_info(fields, vms=None, con_ssh=None, long=True, all_projects=True, + host=None, + auth_info=Tenant.get('admin')): + """ + Get vms values for given fields + Args: + fields (str|list|tuple): + vms: + con_ssh: + long: + all_projects: + host + auth_info: + + Returns (dict): vm as key, values for given fields as value + Examples: + input: fields = [field1, field2] + output: {vm_1: [vm1_field1_value, vm1_field2_value], + vm_2: [vm2_field1_value, vm2_field2_value]} + + """ + if isinstance(fields, str): + fields = (fields,) + fields = ['ID'] + list(fields) + + values = get_vms(vms=vms, field=fields, con_ssh=con_ssh, long=long, + all_projects=all_projects, host=host, + auth_info=auth_info) + vm_ids = values.pop(0) + values = list(zip(*values)) + results = {vm_ids[i]: values[i] for i in range(len(vm_ids))} + + return results + + +def get_vms(vms=None, field='ID', long=False, all_projects=True, host=None, + project=None, project_domain=None, + strict=True, regex=False, con_ssh=None, auth_info=None, **kwargs): + """ + get a list of VM IDs or Names for given tenant in auth_info param. + + Args: + vms (list): filter vms from this list if not None + field (str|tuple|list): 'ID' or 'Name' + con_ssh (SSHClient): controller SSHClient. + auth_info (dict): such as ones in auth.py: auth.ADMIN, auth.TENANT1 + long (bool): whether to use --long in cmd + project (str) + project_domain (str) + all_projects (bool): whether to use --a in cmd + host (str): value for --host arg in cmd + strict (bool): applies to search for value(s) specified in kwargs + regex (bool): whether to use regular expression to search for the + kwargs value(s) + **kwargs: header/value pair to filter out the vms + + Returns (list): list of VMs for tenant(s). + + """ + args_dict = {'--long': long, + '--a': all_projects if auth_info and auth_info[ + 'user'] == 'admin' else None, + '--host': host, + '--project': project, + '--project-domain': project_domain} + args = common.parse_args(args_dict) + table_ = table_parser.table( + cli.openstack('server list', args, ssh_client=con_ssh, + auth_info=auth_info)[1]) + if vms: + table_ = table_parser.filter_table(table_, ID=vms) + + return table_parser.get_multi_values(table_, field, strict=strict, + regex=regex, **kwargs) + + +def get_vm_status(vm_id, con_ssh=None, auth_info=Tenant.get('admin')): + return get_vm_values(vm_id, 'status', strict=True, con_ssh=con_ssh, + auth_info=auth_info)[0] + + +def get_vm_id_from_name(vm_name, con_ssh=None, strict=True, regex=False, + fail_ok=False, auth_info=Tenant.get('admin')): + if not auth_info: + auth_info = Tenant.get_primary() + vm_ids = get_vms(name=vm_name, strict=strict, regex=regex, con_ssh=con_ssh, + auth_info=auth_info) + if not vm_ids: + err_msg = "No vm found with name: {}".format(vm_name) + LOG.info(err_msg) + if fail_ok: + return '' + raise exceptions.VMError(err_msg) + + return vm_ids[0] + + +def get_vm_name_from_id(vm_id, con_ssh=None, auth_info=None): + return get_vm_values(vm_id, fields='name', con_ssh=con_ssh, + auth_info=auth_info)[0] + + +def get_vm_volumes(vm_id, con_ssh=None, auth_info=None): + """ + Get volume ids attached to given vm. + + Args: + vm_id (str): + con_ssh (SSHClient): + auth_info (dict): + + Returns (tuple): list of volume ids attached to specific vm + + """ + table_ = table_parser.table( + cli.openstack('server show', vm_id, ssh_client=con_ssh, + auth_info=auth_info)[1]) + return _get_vm_volumes(table_) + + +def get_vm_values(vm_id, fields, strict=True, con_ssh=None, + auth_info=Tenant.get('admin')): + """ + Get vm values via openstack server show + Args: + vm_id (str): + fields (str|list|tuple): fields in openstack server show table + strict (bool): whether to perform a strict search on given field name + con_ssh (SSHClient): + auth_info (dict|None): + + Returns (list): values for given fields + + """ + if isinstance(fields, str): + fields = [fields] + + table_ = table_parser.table( + cli.openstack('server show', vm_id, ssh_client=con_ssh, + auth_info=auth_info)[1]) + + values = [] + for field in fields: + merge = False + if field in ('fault',): + merge = True + value = table_parser.get_value_two_col_table(table_, field, strict, + merge_lines=merge) + if field in ('properties',): + value = table_parser.convert_value_to_dict(value) + elif field in ('security_groups',): + if isinstance(value, str): + value = [value] + value = [re.findall("name='(.*)'", v)[0] for v in value] + values.append(value) + return values + + +def get_vm_fault_message(vm_id, con_ssh=None, auth_info=None): + return get_vm_values(vm_id=vm_id, fields='fault', con_ssh=con_ssh, + auth_info=auth_info)[0] + + +def get_vm_flavor(vm_id, field='id', con_ssh=None, auth_info=None): + """ + Get flavor id of given vm + + Args: + vm_id (str): + field (str): id or name + con_ssh (SSHClient): + auth_info (dict): + + Returns (str): + + """ + flavor = get_vm_values(vm_id, fields='flavor', strict=True, con_ssh=con_ssh, + auth_info=auth_info)[0] + flavor_name, flavor_id = flavor.split('(') + if field == 'id': + flavor = flavor_id.strip().split(')')[0] + else: + flavor = flavor_name.strip() + return flavor + + +def get_vm_host(vm_id, con_ssh=None, auth_info=Tenant.get('admin')): + """ + Get host of given vm via openstack server show + Args: + vm_id: + con_ssh: + auth_info + + Returns (str): + + """ + return get_vm_values(vm_id, ':host', strict=False, con_ssh=con_ssh, + auth_info=auth_info)[0] + + +def get_vms_hosts(vm_ids, con_ssh=None, auth_info=Tenant.get('admin')): + """ + Get vms' hosts via openstack server list + Args: + vm_ids: + con_ssh: + auth_info + + Returns: + + """ + vms_hosts = get_vms_info(vms=vm_ids, fields='host', auth_info=auth_info, + con_ssh=con_ssh) + vms_hosts = [vms_hosts[vm][0] for vm in vm_ids] + + return vms_hosts + + +def get_vms_on_host(hostname, field='ID', con_ssh=None, + auth_info=Tenant.get('admin')): + """ + Get vms on given host + Args: + field: ID or Name + hostname (str):Name of a compute node + con_ssh: + auth_info + + Returns (list): A list of VMs' ID under a hypervisor + + """ + vms = get_vms(host=hostname, all_projects=True, long=False, con_ssh=con_ssh, + auth_info=auth_info, field=field) + return vms + + +def get_vms_per_host(vms=None, con_ssh=None, auth_info=Tenant.get('admin')): + """ + Get vms per host + Args: + vms + con_ssh (SSHClient): + auth_info (dict) + + Returns (dict):return a dictionary where the host(hypervisor) is the key + and value are a list of VMs under the host + + """ + vms_hosts = get_vms_info(vms=vms, fields='host', auth_info=auth_info, + con_ssh=con_ssh, long=True, all_projects=True) + vms_per_host = {} + for vm in vms_hosts: + host = vms_hosts[vm][0] + if host in vms_per_host: + vms_per_host[host].append(vm) + else: + vms_per_host[host] = [vm] + + return vms_per_host + + +def _get_boot_info(table_, vm_id, auth_info=None, con_ssh=None): + image = table_parser.get_value_two_col_table(table_, 'image') + if not image: + volumes = _get_vm_volumes(table_) + if len(volumes) == 0: + raise exceptions.VMError( + "Booted from volume, but no volume id found.") + + from keywords import cinder_helper + if len(volumes) == 1: + vol_id = volumes[0] + vol_name, image_info = cinder_helper.get_volume_show_values( + vol_id, auth_info=auth_info, con_ssh=con_ssh, + fields=('name', 'volume_image_metadata')) + LOG.info("VM booted from volume.") + return {'type': 'volume', 'id': vol_id, 'volume_name': vol_name, + 'image_name': image_info['image_name']} + else: + LOG.info( + "VM booted from volume. Multiple volumes found, taking the " + "first boot-able volume.") + for volume in volumes: + bootable, vol_name, image_info = \ + cinder_helper.get_volume_show_values( + volume, + fields=('bootable', 'name', 'volume_image_metadata'), + auth_info=auth_info, con_ssh=con_ssh) + if str(bootable).lower() == 'true': + return {'type': 'volume', 'id': volume, + 'volume_name': vol_name, + 'image_name': image_info['image_name']} + + raise exceptions.VMError( + "VM {} has no bootable volume attached.".format(vm_id)) + + else: + name, img_uuid = image.strip().split(sep='(') + return {'type': 'image', 'id': img_uuid.split(sep=')')[0], + 'image_name': name.strip()} + + +def get_vm_boot_info(vm_id, auth_info=None, con_ssh=None): + """ + Get vm boot source and id. + + Args: + vm_id (str): + auth_info (dict|None): + con_ssh (SSHClient): + + Returns (dict): VM boot info dict. + Format: {'type': , 'id': }. + is either 'volume' or 'image' + + """ + table_ = table_parser.table( + cli.openstack('server show', vm_id, ssh_client=con_ssh, + auth_info=auth_info)[1]) + return _get_boot_info(table_, vm_id=vm_id, auth_info=auth_info, + con_ssh=con_ssh) + + +def get_vm_image_name(vm_id, auth_info=None, con_ssh=None): + """ + + Args: + vm_id (str): + auth_info (dict): + con_ssh (SSHClient): + + Returns (str): image name for the vm. If vm booted from volume, + then image name in volume image metadata will be returned. + + """ + boot_info = get_vm_boot_info(vm_id, auth_info=auth_info, con_ssh=con_ssh) + + return boot_info['image_name'] + + +def _get_vm_volumes(table_): + """ + Args: + table_ (dict): + + Returns (list: A list of volume ids from the novashow_table. + + """ + volumes = table_parser.get_value_two_col_table(table_, 'volumes_attached', + merge_lines=False) + if not volumes: + return [] + + if isinstance(volumes, str): + volumes = [volumes] + + return [re.findall("id='(.*)'", volume)[0] for volume in volumes] + + +def get_vm_instance_name(vm_id, con_ssh=None): + return get_vm_values(vm_id, ":instance_name", strict=False, + con_ssh=con_ssh)[0] diff --git a/automated-pytest-suite/pytest.ini b/automated-pytest-suite/pytest.ini new file mode 100644 index 0000000..73c4dfe --- /dev/null +++ b/automated-pytest-suite/pytest.ini @@ -0,0 +1,21 @@ +[pytest] +addopts = -s -rxs -v +testpaths = testcases/functional +log_print = False +markers = + sanity: mark test for sanity run + cpe_sanity: mark tests for cpe sanity + storage_sanity: mark tests for storage sanity + sx_sanity: mark tests for simplex sanity + nightly: nightly regression + sx_nightly: mark tests for simplex nightly regression + platform: mark tests for container platform tests that don't require openstack services + p1: mark test priority as p1 + p2: mark test priority as p2 + p3: mark test priority as p3 + domain_sanity: mark test priority as domain sanity + nics: networking testcases for nic testing + dc: distributed cloud test cases + # features(feature1, feature2, ...): mark impacted feature(s) for a test case. + slow: slow test that possibly involves reboot or lock/unlock host(s) + abslast: test case that absolutely should be run the last diff --git a/automated-pytest-suite/requirements.txt b/automated-pytest-suite/requirements.txt new file mode 100644 index 0000000..c32e60b --- /dev/null +++ b/automated-pytest-suite/requirements.txt @@ -0,0 +1,6 @@ +pytest>=3.1.0,<4.0 +pexpect +requests +selenium +pyvirtualdisplay +PyYAML \ No newline at end of file diff --git a/automated-pytest-suite/setups.py b/automated-pytest-suite/setups.py new file mode 100644 index 0000000..b3c75e3 --- /dev/null +++ b/automated-pytest-suite/setups.py @@ -0,0 +1,750 @@ +# +# Copyright (c) 2019 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + + +import os +import re +import time +import ipaddress +import configparser + +from consts.auth import Tenant, HostLinuxUser, CliAuth, Guest +from consts.stx import Prompt, SUBCLOUD_PATTERN, SysType, GuestImages, Networks +from consts.lab import Labs, add_lab_entry, NatBoxes +from consts.proj_vars import ProjVar +from keywords import host_helper, nova_helper, system_helper, keystone_helper, \ + common, container_helper +from utils import exceptions +from utils.clients.ssh import SSHClient, CONTROLLER_PROMPT, ControllerClient, \ + NATBoxClient, PASSWORD_PROMPT +from utils.tis_log import LOG + + +def less_than_two_controllers(con_ssh=None, + auth_info=Tenant.get('admin_platform')): + return len( + system_helper.get_controllers(con_ssh=con_ssh, auth_info=auth_info)) < 2 + + +def setup_tis_ssh(lab): + con_ssh = ControllerClient.get_active_controller(fail_ok=True) + + if con_ssh is None: + con_ssh = SSHClient(lab['floating ip'], HostLinuxUser.get_user(), + HostLinuxUser.get_password(), + CONTROLLER_PROMPT) + con_ssh.connect(retry=True, retry_timeout=30) + ControllerClient.set_active_controller(con_ssh) + + return con_ssh + + +def setup_vbox_tis_ssh(lab): + if 'external_ip' in lab.keys(): + + con_ssh = ControllerClient.get_active_controller(fail_ok=True) + if con_ssh: + con_ssh.disconnect() + + con_ssh = SSHClient(lab['external_ip'], HostLinuxUser.get_user(), + HostLinuxUser.get_password(), + CONTROLLER_PROMPT, port=lab['external_port']) + con_ssh.connect(retry=True, retry_timeout=30) + ControllerClient.set_active_controller(con_ssh) + + else: + con_ssh = setup_tis_ssh(lab) + + return con_ssh + + +def setup_primary_tenant(tenant): + Tenant.set_primary(tenant) + LOG.info("Primary Tenant for test session is set to {}".format( + Tenant.get(tenant)['tenant'])) + + +def setup_natbox_ssh(natbox, con_ssh): + natbox_ip = natbox['ip'] if natbox else None + if not natbox_ip and not container_helper.is_stx_openstack_deployed( + con_ssh=con_ssh): + LOG.info( + "stx-openstack is not applied and natbox is unspecified. Skip " + "natbox config.") + return None + + NATBoxClient.set_natbox_client(natbox_ip) + nat_ssh = NATBoxClient.get_natbox_client() + ProjVar.set_var(natbox_ssh=nat_ssh) + + setup_keypair(con_ssh=con_ssh, natbox_client=nat_ssh) + + return nat_ssh + + +def setup_keypair(con_ssh, natbox_client=None): + """ + copy private keyfile from controller-0:/opt/platform to natbox: priv_keys/ + Args: + natbox_client (SSHClient): NATBox client + con_ssh (SSHClient) + """ + """ + copy private keyfile from controller-0:/opt/platform to natbox: priv_keys/ + Args: + natbox_client (SSHClient): NATBox client + con_ssh (SSHClient) + """ + if not container_helper.is_stx_openstack_deployed(con_ssh=con_ssh): + LOG.info("stx-openstack is not applied. Skip nova keypair config.") + return + + # ssh private key should now exist under keyfile_path + if not natbox_client: + natbox_client = NATBoxClient.get_natbox_client() + + LOG.info("scp key file from controller to NATBox") + # keyfile path that can be specified in testcase config + keyfile_stx_origin = os.path.normpath(ProjVar.get_var('STX_KEYFILE_PATH')) + + # keyfile will always be copied to sysadmin home dir first and update file + # permission + keyfile_stx_final = os.path.normpath( + ProjVar.get_var('STX_KEYFILE_SYS_HOME')) + public_key_stx = '{}.pub'.format(keyfile_stx_final) + + # keyfile will also be saved to /opt/platform as well, so it won't be + # lost during system upgrade. + keyfile_opt_pform = '/opt/platform/{}'.format( + os.path.basename(keyfile_stx_final)) + + # copy keyfile to following NatBox location. This can be specified in + # testcase config + keyfile_path_natbox = os.path.normpath( + ProjVar.get_var('NATBOX_KEYFILE_PATH')) + + auth_info = Tenant.get_primary() + keypair_name = auth_info.get('nova_keypair', + 'keypair-{}'.format(auth_info['user'])) + nova_keypair = nova_helper.get_keypairs(name=keypair_name, + auth_info=auth_info) + + linux_user = HostLinuxUser.get_user() + nonroot_group = _get_nonroot_group(con_ssh=con_ssh, user=linux_user) + if not con_ssh.file_exists(keyfile_stx_final): + with host_helper.ssh_to_host('controller-0', + con_ssh=con_ssh) as con_0_ssh: + if not con_0_ssh.file_exists(keyfile_opt_pform): + if con_0_ssh.file_exists(keyfile_stx_origin): + # Given private key file exists. Need to ensure public + # key exists in same dir. + if not con_0_ssh.file_exists('{}.pub'.format( + keyfile_stx_origin)) and not nova_keypair: + raise FileNotFoundError( + '{}.pub is not found'.format(keyfile_stx_origin)) + else: + # Need to generate ssh key + if nova_keypair: + raise FileNotFoundError( + "Cannot find private key for existing nova " + "keypair {}".format(nova_keypair)) + + con_0_ssh.exec_cmd("ssh-keygen -f '{}' -t rsa -N ''".format( + keyfile_stx_origin), fail_ok=False) + if not con_0_ssh.file_exists(keyfile_stx_origin): + raise FileNotFoundError( + "{} not found after ssh-keygen".format( + keyfile_stx_origin)) + + # keyfile_stx_origin and matching public key should now exist + # on controller-0 + # copy keyfiles to home dir and opt platform dir + con_0_ssh.exec_cmd( + 'cp {} {}'.format(keyfile_stx_origin, keyfile_stx_final), + fail_ok=False) + con_0_ssh.exec_cmd( + 'cp {}.pub {}'.format(keyfile_stx_origin, public_key_stx), + fail_ok=False) + con_0_ssh.exec_sudo_cmd( + 'cp {} {}'.format(keyfile_stx_final, keyfile_opt_pform), + fail_ok=False) + + # Make sure owner is sysadmin + # If private key exists in opt platform, then it must also exist + # in home dir + con_0_ssh.exec_sudo_cmd( + 'chown {}:{} {}'.format(linux_user, nonroot_group, + keyfile_stx_final), + fail_ok=False) + + # ssh private key should now exists under home dir and opt platform + # on controller-0 + if con_ssh.get_hostname() != 'controller-0': + # copy file from controller-0 home dir to controller-1 + con_ssh.scp_on_dest(source_user=HostLinuxUser.get_user(), + source_ip='controller-0', + source_path=keyfile_stx_final, + source_pswd=HostLinuxUser.get_password(), + dest_path=keyfile_stx_final, timeout=60) + + if not nova_keypair: + LOG.info("Create nova keypair {} using public key {}". + format(nova_keypair, public_key_stx)) + if not con_ssh.file_exists(public_key_stx): + con_ssh.scp_on_dest(source_user=HostLinuxUser.get_user(), + source_ip='controller-0', + source_path=public_key_stx, + source_pswd=HostLinuxUser.get_password(), + dest_path=public_key_stx, timeout=60) + con_ssh.exec_sudo_cmd('chown {}:{} {}'.format( + linux_user, nonroot_group, public_key_stx), + fail_ok=False) + + if ProjVar.get_var('REMOTE_CLI'): + dest_path = os.path.join(ProjVar.get_var('TEMP_DIR'), + os.path.basename(public_key_stx)) + common.scp_from_active_controller_to_localhost( + source_path=public_key_stx, dest_path=dest_path, timeout=60) + public_key_stx = dest_path + LOG.info("Public key file copied to localhost: {}".format( + public_key_stx)) + + nova_helper.create_keypair(keypair_name, public_key=public_key_stx, + auth_info=auth_info) + + natbox_client.exec_cmd( + 'mkdir -p {}'.format(os.path.dirname(keyfile_path_natbox))) + tis_ip = ProjVar.get_var('LAB').get('floating ip') + for i in range(10): + try: + natbox_client.scp_on_dest(source_ip=tis_ip, + source_user=HostLinuxUser.get_user(), + source_pswd=HostLinuxUser.get_password(), + source_path=keyfile_stx_final, + dest_path=keyfile_path_natbox, + timeout=120) + LOG.info("private key is copied to NatBox: {}".format( + keyfile_path_natbox)) + break + except exceptions.SSHException as e: + if i == 9: + raise + + LOG.info(e.__str__()) + time.sleep(10) + + +def _get_nonroot_group(con_ssh, user=None): + if not user: + user = HostLinuxUser.get_user() + groups = con_ssh.exec_cmd('groups {}'.format(user), fail_ok=False)[1] + err = 'Please ensure linux_user {} belongs to both root and non_root ' \ + 'groups'.format(user) + if 'root' not in groups: + raise ValueError(err) + + groups = groups.split(': ')[-1].split() + for group in groups: + if group.strip() != 'root': + return group + + raise ValueError('Please ensure linux_user {} belongs to both root ' + 'and at least one non-root groups'.format(user)) + + +def get_lab_dict(labname): + labname = labname.strip().lower().replace('-', '_') + labs = get_labs_list() + + for lab in labs: + if labname in lab.get('name').replace('-', '_').lower().strip() \ + or labname == lab.get('short_name').replace('-', '_').\ + lower().strip() or labname == lab.get('floating ip'): + return lab + else: + return add_lab_entry(labname) + + +def get_labs_list(): + labs = [getattr(Labs, item) for item in dir(Labs) if + not item.startswith('__')] + labs = [lab_ for lab_ in labs if isinstance(lab_, dict)] + return labs + + +def get_natbox_dict(natboxname, user=None, password=None, prompt=None): + natboxname = natboxname.lower().strip() + natboxes = [getattr(NatBoxes, item) for item in dir(NatBoxes) if + item.startswith('NAT_')] + + for natbox in natboxes: + if natboxname.replace('-', '_') in natbox.get('name').\ + replace('-', '_') or natboxname == natbox.get('ip'): + return natbox + else: + if __get_ip_version(natboxname) == 6: + raise ValueError('Only IPv4 address is supported for now') + + return NatBoxes.add_natbox(ip=natboxname, user=user, + password=password, prompt=prompt) + + +def get_tenant_dict(tenantname): + # tenantname = tenantname.lower().strip().replace('_', '').replace('-', '') + tenants = [getattr(Tenant, item) for item in dir(Tenant) if + not item.startswith('_') and item.isupper()] + + for tenant in tenants: + if tenantname == tenant.get('tenant').replace('_', '').replace('-', ''): + return tenant + else: + raise ValueError("{} is not a valid input".format(tenantname)) + + +def collect_tis_logs(con_ssh): + common.collect_software_logs(con_ssh=con_ssh) + + +def get_tis_timestamp(con_ssh): + return con_ssh.exec_cmd('date +"%T"')[1] + + +def set_build_info(con_ssh): + system_helper.get_build_info(con_ssh=con_ssh) + + +def _rsync_files_to_con1(con_ssh=None, central_region=False, + file_to_check=None): + region = 'RegionOne' if central_region else None + auth_info = Tenant.get('admin_platform', dc_region=region) + if less_than_two_controllers(auth_info=auth_info, con_ssh=con_ssh): + LOG.info("Less than two controllers on system. Skip copying file to " + "controller-1.") + return + + LOG.info("rsync test files from controller-0 to controller-1 if not " + "already done") + stx_home = HostLinuxUser.get_home() + if not file_to_check: + file_to_check = '{}/images/tis-centos-guest.img'.format(stx_home) + try: + with host_helper.ssh_to_host("controller-1", + con_ssh=con_ssh) as con_1_ssh: + if con_1_ssh.file_exists(file_to_check): + LOG.info( + "Test files already exist on controller-1. Skip rsync.") + return + + except Exception as e: + LOG.error( + "Cannot ssh to controller-1. Skip rsync. " + "\nException caught: {}".format(e.__str__())) + return + + cmd = "rsync -avr -e 'ssh -o UserKnownHostsFile=/dev/null -o " \ + "StrictHostKeyChecking=no ' " \ + "{}/* controller-1:{}".format(stx_home, stx_home) + + timeout = 1800 + with host_helper.ssh_to_host("controller-0", con_ssh=con_ssh) as con_0_ssh: + LOG.info("rsync files from controller-0 to controller-1...") + con_0_ssh.send(cmd) + + end_time = time.time() + timeout + while time.time() < end_time: + index = con_0_ssh.expect( + [con_0_ssh.prompt, PASSWORD_PROMPT, Prompt.ADD_HOST], + timeout=timeout, + searchwindowsize=100) + if index == 2: + con_0_ssh.send('yes') + + if index == 1: + con_0_ssh.send(HostLinuxUser.get_password()) + + if index == 0: + output = int(con_0_ssh.exec_cmd('echo $?')[1]) + if output in [0, 23]: + LOG.info( + "Test files are successfully copied to controller-1 " + "from controller-0") + break + else: + raise exceptions.SSHExecCommandFailed( + "Failed to rsync files from controller-0 to " + "controller-1") + + else: + raise exceptions.TimeoutException( + "Timed out rsync files to controller-1") + + +def copy_test_files(): + con_ssh = None + central_region = False + if ProjVar.get_var('IS_DC'): + _rsync_files_to_con1( + con_ssh=ControllerClient.get_active_controller( + name=ProjVar.get_var('PRIMARY_SUBCLOUD')), + file_to_check='~/heat/README', + central_region=central_region) + con_ssh = ControllerClient.get_active_controller(name='RegionOne') + central_region = True + + _rsync_files_to_con1(con_ssh=con_ssh, central_region=central_region) + + +def get_auth_via_openrc(con_ssh, use_telnet=False, con_telnet=None): + valid_keys = ['OS_AUTH_URL', + 'OS_ENDPOINT_TYPE', + 'CINDER_ENDPOINT_TYPE', + 'OS_USER_DOMAIN_NAME', + 'OS_PROJECT_DOMAIN_NAME', + 'OS_IDENTITY_API_VERSION', + 'OS_REGION_NAME', + 'OS_INTERFACE', + 'OS_KEYSTONE_REGION_NAME'] + + client = con_telnet if use_telnet and con_telnet else con_ssh + code, output = client.exec_cmd('cat /etc/platform/openrc') + if code != 0: + return None + + lines = output.splitlines() + auth_dict = {} + for line in lines: + if 'export' in line: + if line.split('export ')[1].split(sep='=')[0] in valid_keys: + key, value = line.split(sep='export ')[1].split(sep='=') + auth_dict[key.strip().upper()] = value.strip() + + return auth_dict + + +def is_https(con_ssh): + return keystone_helper.is_https_enabled(con_ssh=con_ssh, source_openrc=True, + auth_info=Tenant.get( + 'admin_platform')) + + +def get_version_and_patch_info(): + version = ProjVar.get_var('SW_VERSION')[0] + info = 'Software Version: {}\n'.format(version) + + patches = ProjVar.get_var('PATCH') + if patches: + info += 'Patches:\n{}\n'.format('\n'.join(patches)) + + # LOG.info("SW Version and Patch info: {}".format(info)) + return info + + +def get_system_mode_from_lab_info(lab, multi_region_lab=False, + dist_cloud_lab=False): + """ + + Args: + lab: + multi_region_lab: + dist_cloud_lab: + + Returns: + + """ + + if multi_region_lab: + return SysType.MULTI_REGION + elif dist_cloud_lab: + return SysType.DISTRIBUTED_CLOUD + + elif 'system_mode' not in lab: + if 'storage_nodes' in lab: + return SysType.STORAGE + elif 'compute_nodes' in lab: + return SysType.REGULAR + + elif len(lab['controller_nodes']) > 1: + return SysType.AIO_DX + else: + return SysType.AIO_SX + + elif 'system_mode' in lab: + if "simplex" in lab['system_mode']: + return SysType.AIO_SX + else: + return SysType.AIO_DX + else: + LOG.warning( + "Can not determine the lab to install system type based on " + "provided information. Lab info: {}" + .format(lab)) + return None + + +def add_ping_failure(test_name): + file_path = '{}{}'.format(ProjVar.get_var('PING_FAILURE_DIR'), + 'ping_failures.txt') + with open(file_path, mode='a') as f: + f.write(test_name + '\n') + + +def set_region(region=None): + """ + set global variable region. + This needs to be called after CliAuth.set_vars, since the custom region + value needs to override what is + specified in openrc file. + + local region and auth url is saved in CliAuth, while the remote region + and auth url is saved in Tenant. + + Args: + region: region to set + + """ + local_region = CliAuth.get_var('OS_REGION_NAME') + if not region: + if ProjVar.get_var('IS_DC'): + region = 'SystemController' + else: + region = local_region + Tenant.set_region(region=region) + ProjVar.set_var(REGION=region) + if re.search(SUBCLOUD_PATTERN, region): + # Distributed cloud, lab specified is a subcloud. + urls = keystone_helper.get_endpoints(region=region, field='URL', + interface='internal', + service_name='keystone') + if not urls: + raise ValueError( + "No internal endpoint found for region {}. Invalid value for " + "--region with specified lab." + "sub-cloud tests can be run on controller, but not the other " + "way round".format( + region)) + Tenant.set_platform_url(urls[0]) + + +def set_sys_type(con_ssh): + sys_type = system_helper.get_sys_type(con_ssh=con_ssh) + ProjVar.set_var(SYS_TYPE=sys_type) + + +def arp_for_fip(lab, con_ssh): + fip = lab['floating ip'] + code, output = con_ssh.exec_cmd( + 'ip addr | grep -B 4 {} | grep --color=never BROADCAST'.format(fip)) + if output: + target_str = output.splitlines()[-1] + dev = target_str.split(sep=': ')[1].split('@')[0] + con_ssh.exec_cmd('arping -c 3 -A -q -I {} {}'.format(dev, fip)) + + +def __get_ip_version(ip_addr): + try: + ip_version = ipaddress.ip_address(ip_addr).version + except ValueError: + ip_version = None + + return ip_version + + +def setup_testcase_config(testcase_config, lab=None, natbox=None): + fip_error = 'A valid IPv4 OAM floating IP has to be specified via ' \ + 'cmdline option --lab=, ' \ + 'or testcase config file has to be provided via ' \ + '--testcase-config with oam_floating_ip ' \ + 'specified under auth_platform section.' + if not testcase_config: + if not lab: + raise ValueError(fip_error) + return lab, natbox + + testcase_config = os.path.expanduser(testcase_config) + auth_section = 'auth' + guest_image_section = 'guest_image' + guest_networks_section = 'guest_networks' + guest_keypair_section = 'guest_keypair' + natbox_section = 'natbox' + + config = configparser.ConfigParser() + config.read(testcase_config) + + # + # Update global variables for auth section + # + # Update OAM floating IP + if lab: + fip = lab.get('floating ip') + config.set(auth_section, 'oam_floating_ip', fip) + else: + fip = config.get(auth_section, 'oam_floating_ip', fallback='').strip() + lab = get_lab_dict(fip) + + if __get_ip_version(fip) != 4: + raise ValueError(fip_error) + + # controller-0 oam ip is updated with best effort if a valid IPv4 IP is + # provided + if not lab.get('controller-0 ip') and config.get(auth_section, + 'controller0_oam_ip', + fallback='').strip(): + con0_ip = config.get(auth_section, 'controller0_oam_ip').strip() + if __get_ip_version(con0_ip) == 4: + lab['controller-0 ip'] = con0_ip + else: + LOG.info( + "controller0_oam_ip specified in testcase config file is not " + "a valid IPv4 address. Ignore.") + + # Update linux user credentials + if config.get(auth_section, 'linux_username', fallback='').strip(): + HostLinuxUser.set_user( + config.get(auth_section, 'linux_username').strip()) + if config.get(auth_section, 'linux_user_password', fallback='').strip(): + HostLinuxUser.set_password( + config.get(auth_section, 'linux_user_password').strip()) + + # Update openstack keystone user credentials + auth_dict_map = { + 'platform_admin': 'admin_platform', + 'admin': 'admin', + 'test1': 'tenant1', + 'test2': 'tenant2', + } + for conf_prefix, dict_name in auth_dict_map.items(): + kwargs = {} + default_auth = Tenant.get(dict_name) + conf_user = config.get(auth_section, '{}_username'.format(conf_prefix), + fallback='').strip() + conf_password = config.get(auth_section, + '{}_password'.format(conf_prefix), + fallback='').strip() + conf_project = config.get(auth_section, + '{}_project_name'.format(conf_prefix), + fallback='').strip() + conf_domain = config.get(auth_section, + '{}_domain_name'.format(conf_prefix), + fallback='').strip() + conf_keypair = config.get(auth_section, + '{}_nova_keypair'.format(conf_prefix), + fallback='').strip() + if conf_user and conf_user != default_auth.get('user'): + kwargs['username'] = conf_user + if conf_password and conf_password != default_auth.get('password'): + kwargs['password'] = conf_password + if conf_project and conf_project != default_auth.get('tenant'): + kwargs['tenant'] = conf_project + if conf_domain and conf_domain != default_auth.get('domain'): + kwargs['domain'] = conf_domain + if conf_keypair and conf_keypair != default_auth.get('nova_keypair'): + kwargs['nova_keypair'] = conf_keypair + + if kwargs: + Tenant.update(dict_name, **kwargs) + + # + # Update global variables for natbox section + # + natbox_host = config.get(natbox_section, 'natbox_host', fallback='').strip() + natbox_user = config.get(natbox_section, 'natbox_user', fallback='').strip() + natbox_password = config.get(natbox_section, 'natbox_password', + fallback='').strip() + natbox_prompt = config.get(natbox_section, 'natbox_prompt', + fallback='').strip() + if natbox_host and (not natbox or natbox_host != natbox['ip']): + natbox = get_natbox_dict(natbox_host, user=natbox_user, + password=natbox_password, prompt=natbox_prompt) + # + # Update global variables for guest_image section + # + img_file_dir = config.get(guest_image_section, 'img_file_dir', + fallback='').strip() + glance_image_name = config.get(guest_image_section, 'glance_image_name', + fallback='').strip() + img_file_name = config.get(guest_image_section, 'img_file_name', + fallback='').strip() + img_disk_format = config.get(guest_image_section, 'img_disk_format', + fallback='').strip() + min_disk_size = config.get(guest_image_section, 'min_disk_size', + fallback='').strip() + img_container_format = config.get(guest_image_section, + 'img_container_format', + fallback='').strip() + image_ssh_user = config.get(guest_image_section, 'image_ssh_user', + fallback='').strip() + image_ssh_password = config.get(guest_image_section, 'image_ssh_password', + fallback='').strip() + + if img_file_dir and img_file_dir != GuestImages.DEFAULT['image_dir']: + # Update default image file directory + img_file_dir = os.path.expanduser(img_file_dir) + if not os.path.isabs(img_file_dir): + raise ValueError( + "Please provide a valid absolute path for img_file_dir " + "under guest_image section in testcase config file") + GuestImages.DEFAULT['image_dir'] = img_file_dir + + if glance_image_name and glance_image_name != GuestImages.DEFAULT['guest']: + # Update default glance image name + GuestImages.DEFAULT['guest'] = glance_image_name + if glance_image_name not in GuestImages.IMAGE_FILES: + # Add guest image info to consts.stx.GuestImages + if not (img_file_name and img_disk_format and min_disk_size): + raise ValueError( + "img_file_name and img_disk_format under guest_image " + "section have to be " + "specified in testcase config file") + + img_container_format = img_container_format if \ + img_container_format else 'bare' + GuestImages.IMAGE_FILES[glance_image_name] = \ + (None, min_disk_size, img_file_name, img_disk_format, + img_container_format) + + # Add guest login credentials + Guest.CREDS[glance_image_name] = { + 'user': image_ssh_user if image_ssh_user else 'root', + 'password': image_ssh_password if image_ssh_password else None, + } + + # + # Update global variables for guest_keypair section + # + natbox_keypair_dir = config.get(guest_keypair_section, 'natbox_keypair_dir', + fallback='').strip() + private_key_path = config.get(guest_keypair_section, 'private_key_path', + fallback='').strip() + + if natbox_keypair_dir: + natbox_keypair_path = os.path.join(natbox_keypair_dir, + 'keyfile_{}.pem'.format( + lab['short_name'])) + ProjVar.set_var(NATBOX_KEYFILE_PATH=natbox_keypair_path) + if private_key_path: + ProjVar.set_var(STX_KEYFILE_PATH=private_key_path) + + # + # Update global variables for guest_networks section + # + net_name_patterns = { + 'mgmt': config.get(guest_networks_section, 'mgmt_net_name_pattern', + fallback='').strip(), + 'data': config.get(guest_networks_section, 'data_net_name_pattern', + fallback='').strip(), + 'internal': config.get(guest_networks_section, + 'internal_net_name_pattern', + fallback='').strip(), + 'external': config.get(guest_networks_section, + 'external_net_name_pattern', fallback='').strip() + } + + for net_type, net_name_pattern in net_name_patterns.items(): + if net_name_pattern: + Networks.set_neutron_net_patterns(net_type=net_type, + net_name_pattern=net_name_pattern) + + return lab, natbox diff --git a/automated-pytest-suite/stx-test_template.conf b/automated-pytest-suite/stx-test_template.conf new file mode 100644 index 0000000..937ccaf --- /dev/null +++ b/automated-pytest-suite/stx-test_template.conf @@ -0,0 +1,137 @@ +[auth] +# +# Auth info to ssh to active controller and run platform commands +# + +# Linux user info for ssh to StarlingX controller node +# controllers' OAM network floating ip and unit ip if applicable. +# controller_fip is mandatory unless --lab= is provided +# via cmdline. Only IPv4 is supported by test framework for now. +# Required by all configurations. + +oam_floating_ip = +controller0_oam_ip = +controller1_oam_ip = +linux_username = sysadmin +linux_user_password = Li69nux* + +# Platform keystone admin user and project info +platform_admin_username = admin +platform_admin_project_name = admin +platform_admin_password = Li69nux* +platform_admin_domain_name = Default + + +# Non-platform keystone info +# Required if stx-openstack is deployed + +# non-platform keystone: admin user and project info +admin_username = admin +admin_project_name = admin +admin_password = Li69nux* +admin_domain_name = Default + +# non-platform keystone: first test user and tenant. Will be used for most of +# the openstack related test cases. +test1_username = tenant1 +test1_project_name = tenant1 +test1_password = Li69nux* +test1_domain_name = Default +# nova keypair to use when create VM +test1_nova_keypair = keypair-tenant1 + +# non-platform keystone: second test user and tenant. Should be in the same +# domain as first test user and tenant. +test2_username = tenant2 +test2_project_name = tenant2 +test2_password = Li69nux* +test2_domain_name = Default +test2_nova_keypair = keypair-tenant2 + + +[natbox] +# +# NATBox will be used to ping/ssh to a guest +# Required if stx-openstack is deployed +# + +# Info to ssh to a NATBox. If NatBox is localhost from where the tests are +# executed from, set: natbox_host = localhost +natbox_host = +natbox_user = +natbox_password = + +# python regex pattern for natbox prompt, +# default prompt is natbox_user@.*[$#] when unspecified +natbox_prompt = + + +[guest_image] +# +# Glance image info +# Required if stx-openstack is deployed +# + +# Image file path on active controller. Will be used to create glance image +# in some test cases. +img_file_dir = /home/sysadmin/images +img_file_name = tis-centos-guest.img +# minimum root disk size in GiB if this image is used to launch VM +min_disk_size = 2 +img_disk_format=raw +img_container_format = bare + +# Full name of an existing glance image that will be used as default image +# to create cinder volume, VM, etc. If glance_image_name is not provided, +# an glance image will be created from above image file at the begining +# of the test session. +glance_image_name = tis-centos-guest + +# username and password that will be used to ssh to VM that is created +# from above glance image +image_ssh_user = root +image_ssh_password = root + + +[guest_keypair] +# +# Nova keypair to ssh to VM from NATBox without using password in some tests +# Required if stx-openstack is deployed +# + +# Directory to store private keyfile on natbox. +natbox_keypair_dir = ~/priv_keys/ + +# private key path on controller-0 that was used to create above nova keypair. +# If not provided or not exist, a nova keypair will be created using a key from +# ssh-keygen on controller-0. +private_key_path = /home/sysadmin/.ssh/id_rsa + + +[guest_networks] +# +# Neutron networks for openstack VM +# Required if stx-openstack is deployed +# + +# Python pattern for different types of neutron networks - +# used in re.search(, ) +# Pattern needs to be unique for each network type + +# mgmt networks - need to be reachable from above NATBox. Will always be +# used to create first nic of the vm, so that VM can be ping'd or ssh'd +# from NATBox. +mgmt_net_name_pattern = tenant\d-mgmt-net + +# data networks - usually un-shared. Will be used in some test cases +# that require communication between two VMs +data_net_name_pattern = tenant\d-net + +# internal network - need to be shared among tenants. Will be used in a few +# test cases to route data network traffic via internal interface between +# two VMs that belong to different tenants +internal_net_name_pattern = internal + +# external network - neutron floating ips will be created off this network. +# Needs to be reachable from NATBox. +external_net_name_pattern = external diff --git a/automated-pytest-suite/testcases/__init__.py b/automated-pytest-suite/testcases/__init__.py new file mode 100755 index 0000000..e69de29 diff --git a/automated-pytest-suite/testcases/conftest.py b/automated-pytest-suite/testcases/conftest.py new file mode 100755 index 0000000..7c902fb --- /dev/null +++ b/automated-pytest-suite/testcases/conftest.py @@ -0,0 +1,72 @@ +import pytest + +import setups +from consts.auth import CliAuth, Tenant +from consts.proj_vars import ProjVar +from utils.tis_log import LOG +from utils.clients.ssh import ControllerClient + +natbox_ssh = None +initialized = False + + +@pytest.fixture(scope='session', autouse=True) +def setup_test_session(global_setup): + """ + Setup primary tenant and Nax Box ssh before the first test gets executed. + STX ssh was already set up at collecting phase. + """ + LOG.fixture_step("(session) Setting up test session...") + setups.setup_primary_tenant(ProjVar.get_var('PRIMARY_TENANT')) + + global con_ssh + if not con_ssh: + con_ssh = ControllerClient.get_active_controller() + # set build id to be used to upload/write test results + setups.set_build_info(con_ssh) + + # Ensure tis and natbox (if applicable) ssh are connected + con_ssh.connect(retry=True, retry_interval=3, retry_timeout=300) + + # set up natbox connection and copy keyfile + natbox_dict = ProjVar.get_var('NATBOX') + global natbox_ssh + natbox_ssh = setups.setup_natbox_ssh(natbox_dict, con_ssh=con_ssh) + + # set global var for sys_type + setups.set_sys_type(con_ssh=con_ssh) + + # rsync files between controllers + setups.copy_test_files() + + +def pytest_collectstart(): + """ + Set up the ssh session at collectstart. Because skipif condition is + evaluated at the collecting test cases phase. + """ + global initialized + if not initialized: + global con_ssh + con_ssh = setups.setup_tis_ssh(ProjVar.get_var("LAB")) + ProjVar.set_var(con_ssh=con_ssh) + CliAuth.set_vars(**setups.get_auth_via_openrc(con_ssh)) + if setups.is_https(con_ssh): + CliAuth.set_vars(HTTPS=True) + + auth_url = CliAuth.get_var('OS_AUTH_URL') + Tenant.set_platform_url(auth_url) + setups.set_region(region=None) + if ProjVar.get_var('IS_DC'): + Tenant.set_platform_url(url=auth_url, central_region=True) + initialized = True + + +def pytest_runtest_teardown(): + for con_ssh_ in ControllerClient.get_active_controllers( + current_thread_only=True): + con_ssh_.flush() + con_ssh_.connect(retry=True, retry_interval=3, retry_timeout=300) + if natbox_ssh: + natbox_ssh.flush() + natbox_ssh.connect(retry=False) diff --git a/automated-pytest-suite/testcases/functional/__init__.py b/automated-pytest-suite/testcases/functional/__init__.py new file mode 100755 index 0000000..e69de29 diff --git a/automated-pytest-suite/testcases/functional/ceilometer/__init__.py b/automated-pytest-suite/testcases/functional/ceilometer/__init__.py new file mode 100755 index 0000000..e69de29 diff --git a/automated-pytest-suite/testcases/functional/ceilometer/conftest.py b/automated-pytest-suite/testcases/functional/ceilometer/conftest.py new file mode 100755 index 0000000..157d7f8 --- /dev/null +++ b/automated-pytest-suite/testcases/functional/ceilometer/conftest.py @@ -0,0 +1,3 @@ +from testfixtures.resource_mgmt import * +from testfixtures.resource_create import * +from testfixtures.config_host import * diff --git a/automated-pytest-suite/testcases/functional/ceilometer/test_ceilometer_statistics.py b/automated-pytest-suite/testcases/functional/ceilometer/test_ceilometer_statistics.py new file mode 100755 index 0000000..a8c0cc4 --- /dev/null +++ b/automated-pytest-suite/testcases/functional/ceilometer/test_ceilometer_statistics.py @@ -0,0 +1,102 @@ +# +# Copyright (c) 2019 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + + +import time +import random +from datetime import datetime, timedelta +from pytest import mark, skip + +from utils.tis_log import LOG + +from consts.stx import GuestImages +from consts.auth import Tenant +from keywords import common, ceilometer_helper, network_helper, \ + glance_helper, system_helper, gnocchi_helper + + +def _wait_for_measurements(meter, resource_type, extra_query, start_time, + overlap=None, timeout=1860, + check_interval=60): + end_time = time.time() + timeout + + while time.time() < end_time: + values = gnocchi_helper.get_aggregated_measures( + metrics=meter, resource_type=resource_type, start=start_time, + overlap=overlap, extra_query=extra_query)[1] + if values: + return values + + time.sleep(check_interval) + + +@mark.cpe_sanity +@mark.sanity +@mark.sx_nightly +@mark.parametrize('meter', [ + 'image.size' +]) +def test_measurements_for_metric(meter): + """ + Validate statistics for one meter + + """ + LOG.tc_step('Get ceilometer statistics table for image.size meter') + + now = datetime.utcnow() + start = (now - timedelta(minutes=10)) + start = start.strftime("%Y-%m-%dT%H:%M:%S") + image_name = GuestImages.DEFAULT['guest'] + resource_type = 'image' + extra_query = "name='{}'".format(image_name) + overlap = None + + code, output = gnocchi_helper.get_aggregated_measures( + metrics=meter, resource_type=resource_type, start=start, + extra_query=extra_query, fail_ok=True) + if code > 0: + if "Metrics can't being aggregated" in output: + # there was another glance image that has the same + # string in its name + overlap = '0' + else: + assert False, output + + values = output + if code == 0 and values: + assert len(values) <= 4, "Incorrect count for {} {} metric via " \ + "'openstack metric measures aggregation'". \ + format(image_name, meter) + else: + values = _wait_for_measurements(meter=meter, + resource_type=resource_type, + extra_query=extra_query, + start_time=start, overlap=overlap) + assert values, "No measurements for image.size for 25+ minutes" + + LOG.tc_step('Check that values are larger than zero') + for val in values: + assert 0 <= float(val), "{} {} value in metric measurements " \ + "table is less than zero".format( + image_name, meter) + + +def check_event_in_tenant_or_admin(resource_id, event_type): + for auth_ in (None, Tenant.get('admin')): + traits = ceilometer_helper.get_events(event_type=event_type, + header='traits:value', + auth_info=auth_) + for trait in traits: + if resource_id in trait: + LOG.info("Resource found in ceilometer events using " + "auth: {}".format(auth_)) + break + else: + continue + break + else: + assert False, "{} event for resource {} was not found under admin or " \ + "tenant".format(event_type, resource_id) diff --git a/automated-pytest-suite/testcases/functional/common/__init__.py b/automated-pytest-suite/testcases/functional/common/__init__.py new file mode 100755 index 0000000..e69de29 diff --git a/automated-pytest-suite/testcases/functional/common/conftest.py b/automated-pytest-suite/testcases/functional/common/conftest.py new file mode 100755 index 0000000..2407e08 --- /dev/null +++ b/automated-pytest-suite/testcases/functional/common/conftest.py @@ -0,0 +1,3 @@ +from testfixtures.resource_mgmt import * +from testfixtures.config_host import * +from testfixtures.resource_create import * diff --git a/automated-pytest-suite/testcases/functional/common/test_host_connections.py b/automated-pytest-suite/testcases/functional/common/test_host_connections.py new file mode 100755 index 0000000..29cacc6 --- /dev/null +++ b/automated-pytest-suite/testcases/functional/common/test_host_connections.py @@ -0,0 +1,66 @@ +# +# Copyright (c) 2019 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + + +from pytest import mark + +from consts.stx import HostAvailState +from keywords import system_helper, network_helper, host_helper +from utils.clients.ssh import ControllerClient +from utils.tis_log import LOG + + +@mark.p3 +def test_ping_hosts(): + con_ssh = ControllerClient.get_active_controller() + + ping_failed_list = [] + for hostname in system_helper.get_hosts(): + LOG.tc_step( + "Send 100 pings to {} from Active Controller".format(hostname)) + ploss_rate, untran_p = network_helper.ping_server(hostname, con_ssh, + num_pings=100, + timeout=300, + fail_ok=True) + if ploss_rate > 0: + if ploss_rate == 100: + ping_failed_list.append( + "{}: Packet loss rate: {}/100\n".format(hostname, + ploss_rate)) + else: + ping_failed_list.append( + "{}: All packets dropped.\n".format(hostname)) + if untran_p > 0: + ping_failed_list.append( + "{}: {}/100 pings are untransmitted within 300 seconds".format( + hostname, untran_p)) + + LOG.tc_step("Ensure all packets are received.") + assert not ping_failed_list, "Dropped/Un-transmitted packets detected " \ + "when ping hosts. " \ + "Details:\n{}".format(ping_failed_list) + + +@mark.sanity +@mark.cpe_sanity +@mark.sx_sanity +def test_ssh_to_hosts(): + """ + Test ssh to every host on system from active controller + + """ + hosts_to_ssh = system_helper.get_hosts( + availability=[HostAvailState.AVAILABLE, HostAvailState.ONLINE]) + failed_list = [] + for hostname in hosts_to_ssh: + LOG.tc_step("Attempt SSH to {}".format(hostname)) + try: + with host_helper.ssh_to_host(hostname): + pass + except Exception as e: + failed_list.append("\n{}: {}".format(hostname, e.__str__())) + + assert not failed_list, "SSH to host(s) failed: {}".format(failed_list) diff --git a/automated-pytest-suite/testcases/functional/common/test_system_health.py b/automated-pytest-suite/testcases/functional/common/test_system_health.py new file mode 100755 index 0000000..178f513 --- /dev/null +++ b/automated-pytest-suite/testcases/functional/common/test_system_health.py @@ -0,0 +1,58 @@ +# +# Copyright (c) 2019 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + +from pytest import mark, fixture + +from utils.tis_log import LOG +from keywords import host_helper, check_helper + + +# Do not check alarms for test in this module, which are read only tests. +@fixture() +def check_alarms(): + pass + + +class TestCoreDumpsAndCrashes: + @fixture(scope='class') + def post_coredumps_and_crash_reports(self): + LOG.fixture_step( + "Gather core dumps and crash reports info for all hosts") + return host_helper.get_coredumps_and_crashreports() + + @mark.abslast + @mark.sanity + @mark.cpe_sanity + @mark.sx_sanity + @mark.parametrize('report_type', [ + 'core_dumps', + 'crash_reports', + ]) + def test_system_coredumps_and_crashes(self, report_type, + post_coredumps_and_crash_reports): + + LOG.tc_step("Check {} does not exist on any host".format(report_type)) + existing_files = {} + for host in post_coredumps_and_crash_reports: + core_dumps, crash_reports = post_coredumps_and_crash_reports[host] + failures = {'core_dumps': core_dumps, + 'crash_reports': crash_reports} + + if failures[report_type]: + existing_files[host] = failures[report_type] + + assert not existing_files, "{} exist on {}".format(report_type, list( + existing_files.keys())) + + +@mark.abslast +@mark.sanity +@mark.cpe_sanity +@mark.sx_sanity +def test_system_alarms(pre_alarms_session): + LOG.tc_step("Gathering system alarms at the end of test session") + check_helper.check_alarms(before_alarms=pre_alarms_session) + LOG.info("No new alarms found after test session.") diff --git a/automated-pytest-suite/testcases/functional/conftest.py b/automated-pytest-suite/testcases/functional/conftest.py new file mode 100755 index 0000000..9408585 --- /dev/null +++ b/automated-pytest-suite/testcases/functional/conftest.py @@ -0,0 +1,5 @@ +# Do NOT remove following imports. Needed for test fixture discovery purpose +from testfixtures.resource_mgmt import delete_resources_func, delete_resources_class, delete_resources_module +from testfixtures.recover_hosts import hosts_recover_func, hosts_recover_class, hosts_recover_module +from testfixtures.verify_fixtures import * +from testfixtures.pre_checks_and_configs import * diff --git a/automated-pytest-suite/testcases/functional/fault_mgmt/__init__.py b/automated-pytest-suite/testcases/functional/fault_mgmt/__init__.py new file mode 100755 index 0000000..e69de29 diff --git a/automated-pytest-suite/testcases/functional/fault_mgmt/conftest.py b/automated-pytest-suite/testcases/functional/fault_mgmt/conftest.py new file mode 100755 index 0000000..157d7f8 --- /dev/null +++ b/automated-pytest-suite/testcases/functional/fault_mgmt/conftest.py @@ -0,0 +1,3 @@ +from testfixtures.resource_mgmt import * +from testfixtures.resource_create import * +from testfixtures.config_host import * diff --git a/automated-pytest-suite/testcases/functional/fault_mgmt/test_fm_on_host_operation.py b/automated-pytest-suite/testcases/functional/fault_mgmt/test_fm_on_host_operation.py new file mode 100755 index 0000000..cb61fc1 --- /dev/null +++ b/automated-pytest-suite/testcases/functional/fault_mgmt/test_fm_on_host_operation.py @@ -0,0 +1,110 @@ +# +# Copyright (c) 2019 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + + +from pytest import mark, skip + +from utils import table_parser, cli +from utils.tis_log import LOG + +from consts.stx import EventLogID +from keywords import system_helper, host_helper, common + +from testfixtures.recover_hosts import HostsToRecover + + +@mark.sanity +def test_system_alarms_and_events_on_lock_unlock_compute(no_simplex): + """ + Verify fm alarm-show command + + Test Steps: + - Delete active alarms + - Lock a host + - Check active alarm generated for host lock + - Check relative values are the same in fm alarm-list and fm alarm-show + + - Check host lock 'set' event logged via fm event-list + - Unlock host + - Check active alarms cleared via fm alarm-list + - Check host lock 'clear' event logged via fm event-list + """ + + # Remove following step because it's unnecessary and fails the test when + # alarm is re-generated + # # Clear the alarms currently present + # LOG.tc_step("Clear the alarms table") + # system_helper.delete_alarms() + + # Raise a new alarm by locking a compute node + # Get the compute + compute_host = host_helper.get_up_hypervisors()[0] + if compute_host == system_helper.get_active_controller_name(): + compute_host = system_helper.get_standby_controller_name() + if not compute_host: + skip('Standby controller unavailable') + + LOG.tc_step("Lock a nova hypervisor host {}".format(compute_host)) + pre_lock_time = common.get_date_in_format() + HostsToRecover.add(compute_host) + host_helper.lock_host(compute_host) + + LOG.tc_step("Check host lock alarm is generated") + post_lock_alarms = \ + system_helper.wait_for_alarm(field='UUID', entity_id=compute_host, + reason=compute_host, + alarm_id=EventLogID.HOST_LOCK, + strict=False, + fail_ok=False)[1] + + LOG.tc_step( + "Check related fields in fm alarm-list and fm alarm-show are of the " + "same values") + post_lock_alarms_tab = system_helper.get_alarms_table(uuid=True) + + alarms_l = ['Alarm ID', 'Entity ID', 'Severity', 'Reason Text'] + alarms_s = ['alarm_id', 'entity_instance_id', 'severity', 'reason_text'] + + # Only 1 alarm since we are now checking the specific alarm ID + for post_alarm in post_lock_alarms: + LOG.tc_step( + "Verify {} for alarm {} in alarm-list are in sync with " + "alarm-show".format( + alarms_l, post_alarm)) + + alarm_show_tab = table_parser.table(cli.fm('alarm-show', post_alarm)[1]) + alarm_list_tab = table_parser.filter_table(post_lock_alarms_tab, + UUID=post_alarm) + + for i in range(len(alarms_l)): + alarm_l_val = table_parser.get_column(alarm_list_tab, + alarms_l[i])[0] + alarm_s_val = table_parser.get_value_two_col_table(alarm_show_tab, + alarms_s[i]) + + assert alarm_l_val == alarm_s_val, \ + "{} value in alarm-list: {} is different than alarm-show: " \ + "{}".format(alarms_l[i], alarm_l_val, alarm_s_val) + + LOG.tc_step("Check host lock is logged via fm event-list") + system_helper.wait_for_events(entity_instance_id=compute_host, + start=pre_lock_time, timeout=60, + event_log_id=EventLogID.HOST_LOCK, + fail_ok=False, **{'state': 'set'}) + + pre_unlock_time = common.get_date_in_format() + LOG.tc_step("Unlock {}".format(compute_host)) + host_helper.unlock_host(compute_host) + + LOG.tc_step("Check host lock active alarm cleared") + alarm_sets = [(EventLogID.HOST_LOCK, compute_host)] + system_helper.wait_for_alarms_gone(alarm_sets, fail_ok=False) + + LOG.tc_step("Check host lock clear event logged") + system_helper.wait_for_events(event_log_id=EventLogID.HOST_LOCK, + start=pre_unlock_time, + entity_instance_id=compute_host, + fail_ok=False, **{'state': 'clear'}) diff --git a/automated-pytest-suite/testcases/functional/horizon/__init__.py b/automated-pytest-suite/testcases/functional/horizon/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/automated-pytest-suite/testcases/functional/horizon/conftest.py b/automated-pytest-suite/testcases/functional/horizon/conftest.py new file mode 100644 index 0000000..150887e --- /dev/null +++ b/automated-pytest-suite/testcases/functional/horizon/conftest.py @@ -0,0 +1 @@ +from testfixtures.horizon import * diff --git a/automated-pytest-suite/testcases/functional/horizon/test_hosts.py b/automated-pytest-suite/testcases/functional/horizon/test_hosts.py new file mode 100755 index 0000000..ee85962 --- /dev/null +++ b/automated-pytest-suite/testcases/functional/horizon/test_hosts.py @@ -0,0 +1,322 @@ +# +# Copyright (c) 2019 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + + +import re + +from pytest import fixture, mark + +from consts import horizon +from utils import table_parser, cli +from utils.tis_log import LOG +from utils.horizon.pages.admin.platform import hostinventorypage +from keywords import system_helper + + +@fixture(scope='function') +def host_inventory_pg(admin_home_pg, request): + LOG.fixture_step('Go to Admin > Platform > Host Inventory') + host_inventory_pg = hostinventorypage.HostInventoryPage( + admin_home_pg.driver) + host_inventory_pg.go_to_target_page() + + def teardown(): + LOG.fixture_step('Back to Host Inventory page') + host_inventory_pg.go_to_target_page() + + request.addfinalizer(teardown) + return host_inventory_pg + + +def format_uptime(uptime): + """ + Uptime displays in horizon may display like this format: + 2 weeks, 10 hours + 2 hours, 2 minutes + 45 minutes + ... + """ + uptime = int(uptime) + min_ = 60 + hour = min_ * 60 + day = hour * 24 + week = day * 7 + month = week * 4 + + uptime_months = uptime // month + uptime_weeks = uptime % month // week + uptime_days = uptime % month % week // day + uptime_hours = uptime % month % week % day // hour + uptime_mins = uptime % month % week % day % hour // min_ + + if uptime < min_: + return '0 minutes' + elif uptime < hour: + return '{} minute'.format(uptime_mins) + elif uptime < day: + return '{} hour, {} minute'.format(uptime_hours, uptime_mins) + elif uptime < week: + return '{} day, {} hour'.format(uptime_days, uptime_hours) + elif uptime < month: + return '{} week, {} day'.format(uptime_weeks, uptime_days) + elif uptime > week: + return '{} month'.format(uptime_months, uptime_weeks) + + +@mark.platform_sanity +def test_horizon_host_inventory_display(host_inventory_pg): + """ + Test the hosts inventory display: + + Setups: + - Login as Admin + - Go to Admin > Platform > Host Inventory + + Test Steps: + - Test host tables display + + Teardown: + - Back to Host Inventory page + - Logout + + """ + LOG.tc_step('Test host inventory display') + host_inventory_pg.go_to_hosts_tab() + host_list = system_helper.get_hosts() + for host_name in host_list: + LOG.info("Checking {}...".format(host_name)) + headers_map = host_inventory_pg.hosts_table( + host_name).get_cli_horizon_mapping() + fields = list(headers_map.keys()) + cli_values = system_helper.get_host_values(host_name, fields, + rtn_dict=True) + cli_values['uptime'] = format_uptime(cli_values['uptime']) + if cli_values.get('peers'): + cli_values['peers'] = cli_values.get('peers').get('name') + + horizon_vals = host_inventory_pg.horizon_vals(host_name) + for cli_field in fields: + cli_val = cli_values[cli_field] + horizon_field = headers_map[cli_field] + horizon_val = horizon_vals[horizon_field] + if cli_field == 'uptime': + assert re.match(r'\d+ [dhm]', horizon_val) + else: + assert str(cli_val).lower() in horizon_val.lower(), \ + '{} {} display incorrectly, expect: {} actual: {}'. \ + format(host_name, horizon_field, cli_val, horizon_val) + + horizon.test_result = True + + +@mark.parametrize('host_name', [ + 'controller-0' +]) +def test_horizon_host_details_display(host_inventory_pg, host_name): + """ + Test the host details display: + + Setups: + - Login as Admin + - Go to Admin > Platform > Host Inventory > Controller-0 + + Test Steps: + - Test host controller-0 overview display + - Test host controller-0 processor display + - Test host controller-0 memory display + - Test host controller-0 storage display + - Test host controller-0 ports display + - Test host controller-0 lldp display + + Teardown: + - Logout + """ + host_table = host_inventory_pg.hosts_table(host_name) + host_details_pg = host_inventory_pg.go_to_host_detail_page(host_name) + + # OVERVIEW TAB + LOG.tc_step('Test host: {} overview display'.format(host_name)) + host_details_pg.go_to_overview_tab() + horizon_vals = host_details_pg.host_detail_overview( + host_table.driver).get_content() + fields_map = host_details_pg.host_detail_overview( + host_table.driver).OVERVIEW_INFO_HEADERS_MAP + cli_host_vals = system_helper.get_host_values(host_name, fields_map.keys(), + rtn_dict=True) + for field in fields_map: + horizon_header = fields_map[field] + cli_host_val = cli_host_vals[field] + horizon_val = horizon_vals.get(horizon_header) + if horizon_val is None: + horizon_val = 'None' + assert cli_host_val == horizon_val, '{} display incorrectly'.\ + format(horizon_header) + else: + assert cli_host_val.upper() in horizon_val.upper(), \ + '{} display incorrectly'.format(horizon_header) + LOG.info('Host: {} overview display correct'.format(host_name)) + + # PROCESSOR TAB + LOG.tc_step('Test host {} processor display'.format(host_name)) + host_details_pg.go_to_processor_tab() + cpu_table = table_parser.table( + cli.system('host-cpu-list {}'.format(host_name))[1]) + expt_cpu_info = { + 'Processor Model:': + table_parser.get_values(cpu_table, 'processor_model')[0], + 'Processors:': str( + len(set(table_parser.get_values(cpu_table, 'processor'))))} + + horizon_cpu_info = host_details_pg.inventory_details_processor_info\ + .get_content() + assert horizon_cpu_info['Processor Model:'] == expt_cpu_info[ + 'Processor Model:'] + assert horizon_cpu_info['Processors:'] == expt_cpu_info['Processors:'] + + # MEMORY TABLE + LOG.tc_step('Test host {} memory display'.format(host_name)) + checking_list = ['mem_total(MiB)', 'mem_avail(MiB)'] + + host_details_pg.go_to_memory_tab() + memory_table = table_parser.table( + cli.system('host-memory-list {}'.format(host_name))[1]) + column_names = host_details_pg.memory_table.column_names + processor_list = table_parser.get_values(memory_table, column_names[0]) + cli_memory_table_dict = table_parser.row_dict_table(memory_table, + column_names[0], + lower_case=False) + + for processor in processor_list: + horizon_vm_pages_val = \ + host_details_pg.get_memory_table_info(processor, column_names[2]) + horizon_memory_val = \ + host_details_pg.get_memory_table_info(processor, 'Memory') + if cli_memory_table_dict[processor]['hugepages(hp)_configured'] == \ + 'False': + assert horizon_vm_pages_val is None, \ + 'Horizon {} display incorrectly'.format(column_names[2]) + else: + for field in checking_list: + assert cli_memory_table_dict[processor][field] in \ + horizon_memory_val, 'Memory {} display incorrectly' + + # STORAGE TABLE + # This test will loop each table and test their display + # Test may fail in following case: + # 1. disk table's Size header eg. Size(GiB) used different unit such as + # Size (MiB), Size (TiB) + # 2. lvg table may display different: + # Case 1: Name | State | Access | Size (GiB) | Avail Size(GiB) | + # Current Physical Volume - Current Logical Volumes + # Case 2: Name | State | Access | Size | + # Current Physical Volume - Current Logical Volumes + # Case 2 Size values in horizon are rounded by 2 digits but in CLI not + # rounded + + LOG.tc_step('Test host {} storage display'.format(host_name)) + host_details_pg.go_to_storage_tab() + + cmd_list = ['host-disk-list {}'.format(host_name), + 'host-disk-partition-list {}'.format(host_name), + 'host-lvg-list {}'.format(host_name), + 'host-pv-list {}'.format(host_name)] + table_names = ['disk table', 'disk partition table', + 'local volume groups table', 'physical volumes table'] + + horizon_storage_tables = [host_details_pg.storage_disks_table, + host_details_pg.storage_partitions_table, + host_details_pg.storage_lvg_table, + host_details_pg.storage_pv_table] + cli_storage_tables = [] + for cmd in cmd_list: + cli_storage_tables.append(table_parser.table(cli.system(cmd))[1]) + + for i in range(len(horizon_storage_tables)): + horizon_table = horizon_storage_tables[i] + unique_key = horizon_table.column_names[0] + horizon_row_dict_table = host_details_pg.get_horizon_row_dict( + horizon_table, key_header_index=0) + cli_table = cli_storage_tables[i] + table_dict_unique_key = list(horizon_table.HEADERS_MAP.keys())[ + list(horizon_table.HEADERS_MAP.values()).index(unique_key)] + + cli_row_dict_storage_table = \ + table_parser.row_dict_table(cli_table, + table_dict_unique_key, + lower_case=False) + for key_header in horizon_row_dict_table: + for cli_header in horizon_table.HEADERS_MAP: + horizon_header = horizon_table.HEADERS_MAP[cli_header] + horizon_row_dict = horizon_row_dict_table[key_header] + cli_row_dict = cli_row_dict_storage_table[key_header] + # Solve parser issue: e.g. Size (GiB)' should be '558.029' + # not ['5589.', '029'] + cli_val = cli_row_dict[cli_header] + if isinstance(cli_val, list): + cli_row_dict[cli_header] = ''.join(cli_val) + assert horizon_row_dict[horizon_header] == cli_row_dict[ + cli_header], \ + 'In {}: disk: {} {} display incorrectly'.format( + table_names[i], key_header, horizon_header) + LOG.info('{} display correct'.format(table_names[i])) + + # PORT TABLE + LOG.tc_step('Test host {} port display'.format(host_name)) + host_details_pg.go_to_ports_tab() + horizon_port_table = host_details_pg.ports_table() + cli_port_table = table_parser.table( + cli.system('host-ethernet-port-list {}'.format(host_name))[1]) + horizon_row_dict_port_table = host_details_pg.get_horizon_row_dict( + horizon_port_table, key_header_index=0) + + cli_row_dict_port_table = table_parser.row_dict_table(cli_port_table, + 'name', + lower_case=False) + for ethernet_name in cli_row_dict_port_table: + for cli_header in horizon_port_table.HEADERS_MAP: + horizon_header = horizon_port_table.HEADERS_MAP[cli_header] + horizon_row_dict = horizon_row_dict_port_table[ethernet_name] + cli_row_dict = cli_row_dict_port_table[ethernet_name] + if cli_header not in cli_row_dict and cli_header == 'mac address': + cli_val = cli_row_dict['macaddress'] + else: + cli_val = cli_row_dict[cli_header] + horizon_val = horizon_row_dict[horizon_header] + # Solve table parser issue: MAC Address returns list eg: [ + # 'a4:bf:01:35:4a:', '32'] + if isinstance(cli_val, list): + cli_val = ''.join(cli_val) + assert cli_val in horizon_val, '{} display incorrectly'.format( + horizon_header) + + # LLDP TABLE + LOG.tc_step('Test host {} lldp display'.format(host_name)) + host_details_pg.go_to_lldp_tab() + lldp_list_table = table_parser.table( + cli.system('host-lldp-neighbor-list {}'.format(host_name))[1]) + lldp_uuid_list = table_parser.get_values(lldp_list_table, 'uuid') + horizon_lldp_table = host_details_pg.lldp_table() + cli_row_dict_lldp_table = {} + horizon_row_dict_lldp_table = host_details_pg.get_horizon_row_dict( + horizon_lldp_table, key_header_index=1) + for uuid in lldp_uuid_list: + cli_row_dict = {} + lldp_show_table = table_parser.table( + cli.system('lldp-neighbor-show {}'.format(uuid))[1]) + row_dict_key = table_parser.get_value_two_col_table(lldp_show_table, + 'port_identifier') + for cli_header in horizon_lldp_table.HEADERS_MAP: + horizon_header = horizon_lldp_table.HEADERS_MAP[cli_header] + horizon_row_dict = horizon_row_dict_lldp_table[row_dict_key] + cli_row_dict[cli_header] = table_parser.get_value_two_col_table( + lldp_show_table, cli_header) + cli_row_dict_lldp_table[row_dict_key] = cli_row_dict + assert cli_row_dict[cli_header] == \ + horizon_row_dict[horizon_header], \ + 'lldp neighbor:{} {} display incorrectly'.\ + format(row_dict_key, horizon_header) + + horizon.test_result = True diff --git a/automated-pytest-suite/testcases/functional/horizon/test_instances.py b/automated-pytest-suite/testcases/functional/horizon/test_instances.py new file mode 100755 index 0000000..9b911fa --- /dev/null +++ b/automated-pytest-suite/testcases/functional/horizon/test_instances.py @@ -0,0 +1,86 @@ +# +# Copyright (c) 2019 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + + +from pytest import fixture, mark + +from consts import horizon +from consts.auth import Tenant +from consts.stx import GuestImages +from keywords import nova_helper +from utils.tis_log import LOG +from utils.horizon import helper +from utils.horizon.regions import messages +from utils.horizon.pages.project.compute import instancespage + + +@fixture(scope='function') +def instances_pg(tenant_home_pg_container, request): + LOG.fixture_step('Go to Project > Compute > Instance') + instance_name = helper.gen_resource_name('instance') + instances_pg = instancespage.InstancesPage( + tenant_home_pg_container.driver, port=tenant_home_pg_container.port) + instances_pg.go_to_target_page() + + def teardown(): + LOG.fixture_step('Back to instance page') + if instances_pg.is_instance_present(instance_name): + instances_pg.delete_instance_by_row(instance_name) + instances_pg.go_to_target_page() + + request.addfinalizer(teardown) + + return instances_pg, instance_name + + +@mark.sanity +@mark.cpe_sanity +@mark.sx_sanity +def test_horizon_create_delete_instance(instances_pg): + """ + Test the instance creation and deletion functionality: + + Setups: + - Login as Tenant + - Go to Project > Compute > Instance + + Teardown: + - Back to Instances page + - Logout + + Test Steps: + - Create a new instance + - Verify the instance appears in the instances table as active + - Delete the newly lunched instance + - Verify the instance does not appear in the table after deletion + """ + instances_pg, instance_name = instances_pg + + mgmt_net_name = '-'.join([Tenant.get_primary()['tenant'], 'mgmt', 'net']) + flavor_name = nova_helper.get_basic_flavor(rtn_id=False) + guest_img = GuestImages.DEFAULT['guest'] + + LOG.tc_step('Create new instance {}'.format(instance_name)) + instances_pg.create_instance(instance_name, + boot_source_type='Image', + source_name=guest_img, + flavor_name=flavor_name, + network_names=mgmt_net_name, + create_new_volume=False) + assert not instances_pg.find_message_and_dismiss(messages.ERROR) + + LOG.tc_step('Verify the instance appears in the instances table as active') + assert instances_pg.is_instance_active(instance_name) + + LOG.tc_step('Delete instance {}'.format(instance_name)) + instances_pg.delete_instance_by_row(instance_name) + assert instances_pg.find_message_and_dismiss(messages.INFO) + assert not instances_pg.find_message_and_dismiss(messages.ERROR) + + LOG.tc_step( + 'Verify the instance does not appear in the table after deletion') + assert instances_pg.is_instance_deleted(instance_name) + horizon.test_result = True diff --git a/automated-pytest-suite/testcases/functional/mtc/__init__.py b/automated-pytest-suite/testcases/functional/mtc/__init__.py new file mode 100755 index 0000000..e69de29 diff --git a/automated-pytest-suite/testcases/functional/mtc/conftest.py b/automated-pytest-suite/testcases/functional/mtc/conftest.py new file mode 100755 index 0000000..157d7f8 --- /dev/null +++ b/automated-pytest-suite/testcases/functional/mtc/conftest.py @@ -0,0 +1,3 @@ +from testfixtures.resource_mgmt import * +from testfixtures.resource_create import * +from testfixtures.config_host import * diff --git a/automated-pytest-suite/testcases/functional/mtc/test_evacuate.py b/automated-pytest-suite/testcases/functional/mtc/test_evacuate.py new file mode 100755 index 0000000..f150c29 --- /dev/null +++ b/automated-pytest-suite/testcases/functional/mtc/test_evacuate.py @@ -0,0 +1,146 @@ +# +# Copyright (c) 2019 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + + +from pytest import fixture, skip, mark + +from utils.tis_log import LOG +from consts.reasons import SkipHypervisor + +from keywords import vm_helper, host_helper, nova_helper, system_helper, \ + network_helper +from testfixtures.fixture_resources import ResourceCleanup + + +@fixture(scope='module', autouse=True) +def skip_test_if_less_than_two_hosts(no_simplex): + hypervisors = host_helper.get_up_hypervisors() + if len(hypervisors) < 2: + skip(SkipHypervisor.LESS_THAN_TWO_HYPERVISORS) + + LOG.fixture_step( + "Update instance and volume quota to at least 10 and 20 respectively") + vm_helper.ensure_vms_quotas(vms_num=10) + + return len(hypervisors) + + +class TestDefaultGuest: + + @fixture(scope='class') + def vms_(self, add_admin_role_class): + LOG.fixture_step("Create a flavor without ephemeral or swap disks") + flavor_1 = nova_helper.create_flavor('flv_nolocaldisk')[1] + ResourceCleanup.add('flavor', flavor_1, scope='class') + + LOG.fixture_step("Create a flavor with ephemeral and swap disks") + flavor_2 = \ + nova_helper.create_flavor('flv_localdisk', ephemeral=1, swap=512)[1] + ResourceCleanup.add('flavor', flavor_2, scope='class') + + LOG.fixture_step( + "Boot vm1 from volume with flavor flv_nolocaldisk and wait for it " + "pingable from NatBox") + vm1_name = "vol_nolocal" + vm1 = vm_helper.boot_vm(vm1_name, flavor=flavor_1, source='volume', + cleanup='class')[1] + vm_helper.wait_for_vm_pingable_from_natbox(vm1) + + vm_host = vm_helper.get_vm_host(vm_id=vm1) + + LOG.fixture_step( + "Boot vm2 from volume with flavor flv_localdisk and wait for it " + "pingable from NatBox") + vm2_name = "vol_local" + vm2 = vm_helper.boot_vm(vm2_name, flavor=flavor_2, source='volume', + cleanup='class', avail_zone='nova', + vm_host=vm_host)[1] + vm_helper.wait_for_vm_pingable_from_natbox(vm2) + + LOG.fixture_step( + "Boot vm3 from image with flavor flv_nolocaldisk and wait for it " + "pingable from NatBox") + vm3_name = "image_novol" + vm3 = vm_helper.boot_vm(vm3_name, flavor=flavor_1, source='image', + cleanup='class', avail_zone='nova', + vm_host=vm_host)[1] + vm_helper.wait_for_vm_pingable_from_natbox(vm3) + + LOG.fixture_step( + "Boot vm4 from image with flavor flv_nolocaldisk and wait for it " + "pingable from NatBox") + vm4_name = 'image_vol' + vm4 = vm_helper.boot_vm(vm4_name, flavor_1, source='image', + cleanup='class', avail_zone='nova', + vm_host=vm_host)[1] + vm_helper.wait_for_vm_pingable_from_natbox(vm4) + + LOG.fixture_step( + "Attach volume to vm4 which was booted from image: {}.".format(vm4)) + vm_helper.attach_vol_to_vm(vm4) + + return [vm1, vm2, vm3, vm4], vm_host + + @mark.trylast + @mark.sanity + @mark.cpe_sanity + def test_evacuate_vms(self, vms_): + """ + Test evacuated vms + Args: + vms_: (fixture to create vms) + + Pre-requisites: + - At least two up hypervisors on system + + Test Steps: + - Create vms with various options: + - vm booted from cinder volume, + - vm booted from glance image, + - vm booted from glance image, and have an extra cinder + volume attached after launch, + - vm booed from cinder volume with ephemeral and swap disks + - Move vms onto same hypervisor + - sudo reboot -f on the host + - Ensure vms are successfully evacuated to other host + - Live migrate vms back to original host + - Check vms can move back, and vms are still reachable from natbox + - Check system services are enabled and neutron agents are alive + + """ + vms, target_host = vms_ + + pre_res_sys, pre_msg_sys = system_helper.wait_for_services_enable( + timeout=20, fail_ok=True) + up_hypervisors = host_helper.get_up_hypervisors() + pre_res_neutron, pre_msg_neutron = \ + network_helper.wait_for_agents_healthy( + up_hypervisors, timeout=20, fail_ok=True) + + LOG.tc_step( + "reboot -f on vms host, ensure vms are successfully evacuated and " + "host is recovered after reboot") + vm_helper.evacuate_vms(host=target_host, vms_to_check=vms, + wait_for_host_up=True, ping_vms=True) + + LOG.tc_step("Check rebooted host can still host vm") + vm_helper.live_migrate_vm(vms[0], destination_host=target_host) + vm_helper.wait_for_vm_pingable_from_natbox(vms[0]) + + LOG.tc_step("Check system services and neutron agents after {} " + "reboot".format(target_host)) + post_res_sys, post_msg_sys = system_helper.wait_for_services_enable( + fail_ok=True) + post_res_neutron, post_msg_neutron = \ + network_helper.wait_for_agents_healthy(hosts=up_hypervisors, + fail_ok=True) + + assert post_res_sys, "\nPost-evac system services stats: {}" \ + "\nPre-evac system services stats: {}". \ + format(post_msg_sys, pre_msg_sys) + assert post_res_neutron, "\nPost evac neutron agents stats: {}" \ + "\nPre-evac neutron agents stats: {}". \ + format(pre_msg_neutron, post_msg_neutron) diff --git a/automated-pytest-suite/testcases/functional/mtc/test_host_operations_negative.py b/automated-pytest-suite/testcases/functional/mtc/test_host_operations_negative.py new file mode 100755 index 0000000..1ecde76 --- /dev/null +++ b/automated-pytest-suite/testcases/functional/mtc/test_host_operations_negative.py @@ -0,0 +1,31 @@ +# +# Copyright (c) 2019 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + + +from pytest import mark + +from utils import cli +from utils.tis_log import LOG + + +@mark.sx_sanity +def test_add_host_simplex_negative(simplex_only): + """ + Test add second controller is rejected on simplex system + Args: + simplex_only: skip if non-sx system detected + + Test Steps: + - On simplex system, check 'system host-add -n controller-1' is rejected + + """ + LOG.tc_step("Check adding second controller is rejected on simplex system") + code, out = cli.system('host-add', '-n controller-1', fail_ok=True) + + assert 1 == code, "Unexpected exitcode for 'system host-add " \ + "controller-1': {}".format(code) + assert 'Adding a host on a simplex system is not allowed' in out, \ + "Unexpected error message: {}".format(out) diff --git a/automated-pytest-suite/testcases/functional/mtc/test_lock_unlock_host.py b/automated-pytest-suite/testcases/functional/mtc/test_lock_unlock_host.py new file mode 100755 index 0000000..98fc7f9 --- /dev/null +++ b/automated-pytest-suite/testcases/functional/mtc/test_lock_unlock_host.py @@ -0,0 +1,93 @@ +# +# Copyright (c) 2019 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + + +import time +from pytest import mark, skip, param + +from utils.tis_log import LOG +from consts.stx import HostOperState, HostAvailState +from testfixtures.recover_hosts import HostsToRecover +from keywords import host_helper, system_helper + + +@mark.platform_sanity +def test_lock_active_controller_reject(no_simplex): + """ + Verify lock unlock active controller. Expected it to fail + + Test Steps: + - Get active controller + - Attempt to lock active controller and ensure it's rejected + + """ + LOG.tc_step('Retrieve the active controller from the lab') + active_controller = system_helper.get_active_controller_name() + assert active_controller, "No active controller available" + + # lock standby controller node and verify it is successfully locked + LOG.tc_step("Lock active controller and ensure it fail to lock") + exit_code, cmd_output = host_helper.lock_host(active_controller, + fail_ok=True, swact=False, + check_first=False) + assert exit_code == 1, 'Expect locking active controller to ' \ + 'be rejected. Actual: {}'.format(cmd_output) + status = system_helper.get_host_values(active_controller, + 'administrative')[0] + assert status == 'unlocked', "Fail: The active controller was locked." + + +@mark.parametrize('host_type', [ + param('controller', marks=mark.priorities('platform_sanity', + 'sanity', 'cpe_sanity')), + param('compute', marks=mark.priorities('platform_sanity')), + param('storage', marks=mark.priorities('platform_sanity')), +]) +def test_lock_unlock_host(host_type): + """ + Verify lock unlock host + + Test Steps: + - Select a host per given type. If type is controller, select + standby controller. + - Lock selected host and ensure it is successfully locked + - Unlock selected host and ensure it is successfully unlocked + + """ + LOG.tc_step("Select a {} node from system if any".format(host_type)) + if host_type == 'controller': + if system_helper.is_aio_simplex(): + host = 'controller-0' + else: + host = system_helper.get_standby_controller_name() + assert host, "No standby controller available" + + else: + if host_type == 'compute' and system_helper.is_aio_system(): + skip("No compute host on AIO system") + elif host_type == 'storage' and not system_helper.is_storage_system(): + skip("System does not have storage nodes") + + hosts = system_helper.get_hosts(personality=host_type, + availability=HostAvailState.AVAILABLE, + operational=HostOperState.ENABLED) + + assert hosts, "No good {} host on system".format(host_type) + host = hosts[0] + + LOG.tc_step("Lock {} host - {} and ensure it is successfully " + "locked".format(host_type, host)) + HostsToRecover.add(host) + host_helper.lock_host(host, swact=False) + + # wait for services to stabilize before unlocking + time.sleep(20) + + # unlock standby controller node and verify controller node is + # successfully unlocked + LOG.tc_step("Unlock {} host - {} and ensure it is successfully " + "unlocked".format(host_type, host)) + host_helper.unlock_host(host) diff --git a/automated-pytest-suite/testcases/functional/mtc/test_services_persists_over_reboot.py b/automated-pytest-suite/testcases/functional/mtc/test_services_persists_over_reboot.py new file mode 100755 index 0000000..822c2a6 --- /dev/null +++ b/automated-pytest-suite/testcases/functional/mtc/test_services_persists_over_reboot.py @@ -0,0 +1,85 @@ +# +# Copyright (c) 2019 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + + +import time +from pytest import mark, skip, param + +from utils.tis_log import LOG + +from consts.stx import VMStatus +from consts.timeout import VMTimeout +from keywords import host_helper, system_helper, vm_helper, network_helper +from testfixtures.recover_hosts import HostsToRecover + + +@mark.usefixtures('check_alarms') +@mark.parametrize('host_type', [ + param('controller', marks=mark.sanity), + 'compute', + # 'storage' +]) +def test_system_persist_over_host_reboot(host_type): + """ + Validate Inventory summary over reboot of one of the controller see if + data persists over reboot + + Test Steps: + - capture Inventory summary for list of hosts on system service-list + and neutron agent-list + - reboot the current Controller-Active + - Wait for reboot to complete + - Validate key items from inventory persist over reboot + + """ + if host_type == 'controller': + host = system_helper.get_active_controller_name() + elif host_type == 'compute': + if system_helper.is_aio_system(): + skip("No compute host for AIO system") + + host = None + else: + hosts = system_helper.get_hosts(personality='storage') + if not hosts: + skip(msg="Lab has no storage nodes. Skip rebooting storage node.") + + host = hosts[0] + + LOG.tc_step("Pre-check for system status") + system_helper.wait_for_services_enable() + up_hypervisors = host_helper.get_up_hypervisors() + network_helper.wait_for_agents_healthy(hosts=up_hypervisors) + + LOG.tc_step("Launch a vm") + vm_id = vm_helper.boot_vm(cleanup='function')[1] + vm_helper.wait_for_vm_pingable_from_natbox(vm_id) + + if host is None: + host = vm_helper.get_vm_host(vm_id) + + LOG.tc_step("Reboot a {} node and wait for reboot completes: " + "{}".format(host_type, host)) + HostsToRecover.add(host) + host_helper.reboot_hosts(host) + host_helper.wait_for_hosts_ready(host) + + LOG.tc_step("Check vm is still active and pingable after {} " + "reboot".format(host)) + vm_helper.wait_for_vm_status(vm_id, status=VMStatus.ACTIVE, fail_ok=False) + vm_helper.wait_for_vm_pingable_from_natbox(vm_id=vm_id, + timeout=VMTimeout.DHCP_RETRY) + + LOG.tc_step("Check neutron agents and system services are in good state " + "after {} reboot".format(host)) + network_helper.wait_for_agents_healthy(up_hypervisors) + system_helper.wait_for_services_enable() + + if host in up_hypervisors: + LOG.tc_step("Check {} can still host vm after reboot".format(host)) + if not vm_helper.get_vm_host(vm_id) == host: + time.sleep(30) + vm_helper.live_migrate_vm(vm_id, destination_host=host) diff --git a/automated-pytest-suite/testcases/functional/mtc/test_swact.py b/automated-pytest-suite/testcases/functional/mtc/test_swact.py new file mode 100755 index 0000000..385954d --- /dev/null +++ b/automated-pytest-suite/testcases/functional/mtc/test_swact.py @@ -0,0 +1,123 @@ +# +# Copyright (c) 2019 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + + +from pytest import mark, skip + +from utils.tis_log import LOG +from consts.reasons import SkipSysType +from keywords import host_helper, system_helper, vm_helper, network_helper, \ + kube_helper + + +@mark.sanity +@mark.cpe_sanity +def test_swact_controllers(wait_for_con_drbd_sync_complete): + """ + Verify swact active controller + + Test Steps: + - Boot a vm on system and check ping works + - Swact active controller + - Verify standby controller and active controller are swapped + - Verify vm is still pingable + + """ + if system_helper.is_aio_simplex(): + skip("Simplex system detected") + + if not wait_for_con_drbd_sync_complete: + skip(SkipSysType.LESS_THAN_TWO_CONTROLLERS) + + LOG.tc_step('retrieve active and available controllers') + pre_active_controller, pre_standby_controller = \ + system_helper.get_active_standby_controllers() + assert pre_standby_controller, "No standby controller available" + + pre_res_sys, pre_msg_sys = system_helper.wait_for_services_enable( + timeout=20, fail_ok=True) + up_hypervisors = host_helper.get_up_hypervisors() + pre_res_neutron, pre_msg_neutron = network_helper.wait_for_agents_healthy( + up_hypervisors, timeout=20, fail_ok=True) + + LOG.tc_step("Boot a vm from image and ping it") + vm_id_img = vm_helper.boot_vm(name='swact_img', source='image', + cleanup='function')[1] + vm_helper.wait_for_vm_pingable_from_natbox(vm_id_img) + + LOG.tc_step("Boot a vm from volume and ping it") + vm_id_vol = vm_helper.boot_vm(name='swact', cleanup='function')[1] + vm_helper.wait_for_vm_pingable_from_natbox(vm_id_vol) + + LOG.tc_step("Swact active controller and ensure active controller is " + "changed") + host_helper.swact_host(hostname=pre_active_controller) + + LOG.tc_step("Verify standby controller and active controller are swapped") + post_active_controller = system_helper.get_active_controller_name() + post_standby_controller = system_helper.get_standby_controller_name() + + assert pre_standby_controller == post_active_controller, \ + "Prev standby: {}; Post active: {}".format( + pre_standby_controller, post_active_controller) + assert pre_active_controller == post_standby_controller, \ + "Prev active: {}; Post standby: {}".format( + pre_active_controller, post_standby_controller) + + LOG.tc_step("Check boot-from-image vm still pingable after swact") + vm_helper.wait_for_vm_pingable_from_natbox(vm_id_img, timeout=30) + LOG.tc_step("Check boot-from-volume vm still pingable after swact") + vm_helper.wait_for_vm_pingable_from_natbox(vm_id_vol, timeout=30) + + LOG.tc_step("Check system services and neutron agents after swact " + "from {}".format(pre_active_controller)) + post_res_sys, post_msg_sys = \ + system_helper.wait_for_services_enable(fail_ok=True) + post_res_neutron, post_msg_neutron = \ + network_helper.wait_for_agents_healthy(hosts=up_hypervisors, + fail_ok=True) + + assert post_res_sys, "\nPost-evac system services stats: {}" \ + "\nPre-evac system services stats: {}". \ + format(post_msg_sys, pre_msg_sys) + assert post_res_neutron, "\nPost evac neutron agents stats: {}" \ + "\nPre-evac neutron agents stats: {}". \ + format(pre_msg_neutron, post_msg_neutron) + + LOG.tc_step("Check hosts are Ready in kubectl get nodes after swact") + kube_helper.wait_for_nodes_ready(hosts=(pre_active_controller, + pre_standby_controller), timeout=30) + + +@mark.platform_sanity +def test_swact_controller_platform(wait_for_con_drbd_sync_complete): + """ + Verify swact active controller + + Test Steps: + - Swact active controller + - Verify standby controller and active controller are swapped + - Verify nodes are ready in kubectl get nodes + + """ + if system_helper.is_aio_simplex(): + skip("Simplex system detected") + + if not wait_for_con_drbd_sync_complete: + skip(SkipSysType.LESS_THAN_TWO_CONTROLLERS) + + LOG.tc_step('retrieve active and available controllers') + pre_active_controller, pre_standby_controller = \ + system_helper.get_active_standby_controllers() + assert pre_standby_controller, "No standby controller available" + + LOG.tc_step("Swact active controller and ensure active controller " + "is changed") + host_helper.swact_host(hostname=pre_active_controller) + + LOG.tc_step("Check hosts are Ready in kubectl get nodes after swact") + kube_helper.wait_for_nodes_ready(hosts=(pre_active_controller, + pre_standby_controller), timeout=30) diff --git a/automated-pytest-suite/testcases/functional/mtc/test_ungraceful_reboot.py b/automated-pytest-suite/testcases/functional/mtc/test_ungraceful_reboot.py new file mode 100644 index 0000000..91a89e9 --- /dev/null +++ b/automated-pytest-suite/testcases/functional/mtc/test_ungraceful_reboot.py @@ -0,0 +1,45 @@ +# +# Copyright (c) 2019 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + + +from pytest import mark, skip, param + +from utils.tis_log import LOG +from consts.stx import HostAvailState +from testfixtures.recover_hosts import HostsToRecover +from keywords import host_helper, system_helper + + +@mark.parametrize('host_type', [ + param('controller', marks=mark.platform), + param('compute', marks=mark.platform), + param('storage', marks=mark.platform), +]) +def test_force_reboot_host(host_type): + """ + Verify lock unlock host + + Test Steps: + - Select a host per given type. If type is controller, select standby + controller. + - Lock selected host and ensure it is successfully locked + - Unlock selected host and ensure it is successfully unlocked + + """ + + LOG.tc_step("Select a {} node from system if any".format(host_type)) + hosts = system_helper.get_hosts(availability=(HostAvailState.AVAILABLE, + HostAvailState.DEGRADED), + personality=host_type) + if not hosts: + skip("No available or degraded {} host found on system".format( + host_type)) + + host = hosts[0] + LOG.tc_step("Force reboot {} host: {}".format(host_type, host)) + HostsToRecover.add(host) + host_helper.reboot_hosts(hostnames=host) + host_helper.wait_for_hosts_ready(host) diff --git a/automated-pytest-suite/testcases/functional/networking/__init__.py b/automated-pytest-suite/testcases/functional/networking/__init__.py new file mode 100755 index 0000000..e69de29 diff --git a/automated-pytest-suite/testcases/functional/networking/conftest.py b/automated-pytest-suite/testcases/functional/networking/conftest.py new file mode 100755 index 0000000..157d7f8 --- /dev/null +++ b/automated-pytest-suite/testcases/functional/networking/conftest.py @@ -0,0 +1,3 @@ +from testfixtures.resource_mgmt import * +from testfixtures.resource_create import * +from testfixtures.config_host import * diff --git a/automated-pytest-suite/testcases/functional/networking/test_dvr.py b/automated-pytest-suite/testcases/functional/networking/test_dvr.py new file mode 100755 index 0000000..ad7b093 --- /dev/null +++ b/automated-pytest-suite/testcases/functional/networking/test_dvr.py @@ -0,0 +1,203 @@ +# +# Copyright (c) 2019 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + + +import time + +from pytest import mark, fixture, skip, param + +from utils.tis_log import LOG + +from consts.auth import Tenant +from consts.stx import RouterStatus +from keywords import network_helper, vm_helper, system_helper, host_helper, \ + cinder_helper +from testfixtures.fixture_resources import ResourceCleanup + +result_ = None + + +@fixture(scope='module') +def router_info(request): + global result_ + result_ = False + + LOG.fixture_step( + "Disable SNAT and update router to DVR if not already done.") + + router_id = network_helper.get_tenant_router() + network_helper.set_router_gateway(router_id, enable_snat=False) + is_dvr = network_helper.get_router_values(router_id, fields='distributed', + auth_info=Tenant.get('admin'))[0] + + def teardown(): + post_dvr = \ + network_helper.get_router_values(router_id, fields='distributed', + auth_info=Tenant.get('admin'))[0] + if post_dvr != is_dvr: + network_helper.set_router_mode(router_id, distributed=is_dvr) + + request.addfinalizer(teardown) + + if not is_dvr: + network_helper.set_router_mode(router_id, distributed=True, + enable_on_failure=False) + + result_ = True + return router_id + + +@fixture() +def _bring_up_router(request): + def _router_up(): + if result_ is False: + router_id = network_helper.get_tenant_router() + network_helper.set_router(router=router_id, fail_ok=False, + enable=True) + + request.addfinalizer(_router_up) + + +@mark.domain_sanity +def test_dvr_update_router(router_info, _bring_up_router): + """ + Test update router to distributed and non-distributed + + Args: + router_info (str): router_id (str) + + Setups: + - Get the router id and original distributed setting + + Test Steps: + - Boot a vm before updating router and ping vm from NatBox + - Change the distributed value of the router and verify it's updated + successfully + - Verify router is in ACTIVE state + - Verify vm can still be ping'd from NatBox + - Repeat the three steps above with the distributed value reverted to + original value + + Teardown: + - Delete vm + - Revert router to it's original distributed setting if not already + done so + + """ + global result_ + result_ = False + router_id = router_info + + LOG.tc_step("Boot a vm before updating router and ping vm from NatBox") + vm_id = vm_helper.boot_vm(name='dvr_update', reuse_vol=False, + cleanup='function')[1] + vm_helper.wait_for_vm_pingable_from_natbox(vm_id, fail_ok=False) + + for update_to_val in [False, True]: + LOG.tc_step("Update router distributed to {}".format(update_to_val)) + network_helper.set_router_mode(router_id, distributed=update_to_val, + enable_on_failure=False) + + # Wait for 30 seconds to allow the router update completes + time.sleep(30) + LOG.tc_step( + "Verify router is in active state and vm can be ping'd from NatBox") + assert RouterStatus.ACTIVE == \ + network_helper.get_router_values(router_id, + fields='status')[0], \ + "Router is not in active state after updating distributed to " \ + "{}.".format(update_to_val) + vm_helper.wait_for_vm_pingable_from_natbox(vm_id, fail_ok=False) + + result_ = True + + +@mark.parametrize(('vms_num', 'srv_grp_policy'), [ + param(2, 'affinity', marks=mark.p2), + param(2, 'anti-affinity', marks=mark.nightly), + param(3, 'affinity', marks=mark.p2), + param(3, 'anti-affinity', marks=mark.p2), +]) +def test_dvr_vms_network_connection(vms_num, srv_grp_policy, server_groups, + router_info): + """ + Test vms East West connection by pinging vms' data network from vm + + Args: + vms_num (int): number of vms to boot + srv_grp_policy (str): affinity to boot vms on same host, + anti-affinity to boot vms on different hosts + server_groups: test fixture to return affinity and anti-affinity + server groups + router_info (str): id of tenant router + + Skip Conditions: + - Only one nova host on the system + + Setups: + - Enable DVR (module) + + Test Steps + - Update router to distributed if not already done + - Boot given number of vms with specific server group policy to + schedule vms on same or different host(s) + - Ping vms' over data and management networks from one vm to test NS + and EW traffic + + Teardown: + - Delete vms + - Revert router to + + """ + # Increase instance quota count if needed + current_vms = len(vm_helper.get_vms(strict=False)) + quota_needed = current_vms + vms_num + vm_helper.ensure_vms_quotas(quota_needed) + + if srv_grp_policy == 'anti-affinity' and len( + host_helper.get_up_hypervisors()) == 1: + skip("Only one nova host on the system.") + + LOG.tc_step("Update router to distributed if not already done") + router_id = router_info + is_dvr = network_helper.get_router_values(router_id, fields='distributed', + auth_info=Tenant.get('admin'))[0] + if not is_dvr: + network_helper.set_router_mode(router_id, distributed=True) + + LOG.tc_step("Boot {} vms with server group policy {}".format( + vms_num, srv_grp_policy)) + affinity_grp, anti_affinity_grp = server_groups(soft=True) + srv_grp_id = affinity_grp if srv_grp_policy == 'affinity' else \ + anti_affinity_grp + + vms = [] + tenant_net_id = network_helper.get_tenant_net_id() + mgmt_net_id = network_helper.get_mgmt_net_id() + internal_net_id = network_helper.get_internal_net_id() + + internal_vif = {'net-id': internal_net_id} + if system_helper.is_avs(): + internal_vif['vif-model'] = 'avp' + + nics = [{'net-id': mgmt_net_id}, {'net-id': tenant_net_id}, internal_vif] + for i in range(vms_num): + vol = cinder_helper.create_volume()[1] + ResourceCleanup.add(resource_type='volume', resource_id=vol) + vm_id = \ + vm_helper.boot_vm('dvr_ew_traffic', source='volume', source_id=vol, + nics=nics, cleanup='function', + hint={'group': srv_grp_id})[1] + vms.append(vm_id) + LOG.tc_step("Wait for vm {} pingable from NatBox".format(vm_id)) + vm_helper.wait_for_vm_pingable_from_natbox(vm_id, fail_ok=False) + + from_vm = vms[0] + LOG.tc_step( + "Ping vms over management and data networks from vm {}, and " + "verify ping successful.".format(from_vm)) + vm_helper.ping_vms_from_vm(to_vms=vms, from_vm=from_vm, fail_ok=False, + net_types=['data', 'mgmt', 'internal']) diff --git a/automated-pytest-suite/testcases/functional/networking/test_multiple_ports.py b/automated-pytest-suite/testcases/functional/networking/test_multiple_ports.py new file mode 100755 index 0000000..94afe2b --- /dev/null +++ b/automated-pytest-suite/testcases/functional/networking/test_multiple_ports.py @@ -0,0 +1,538 @@ +# +# Copyright (c) 2019 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + + +import copy + +from pytest import fixture, mark, skip, param + +from utils.tis_log import LOG + +from consts.stx import FlavorSpec, VMStatus +from consts.reasons import SkipHostIf +from keywords import vm_helper, nova_helper, network_helper, glance_helper, \ + system_helper +from testfixtures.fixture_resources import ResourceCleanup + + +def id_params(val): + if not isinstance(val, str): + new_val = [] + for val_1 in val: + if isinstance(val_1, (tuple, list)): + val_1 = '_'.join([str(val_2).lower() for val_2 in val_1]) + new_val.append(val_1) + else: + new_val = val + + return '_'.join(new_val) + + +def _append_nics_for_net(vifs, net_id, nics): + glance_vif = None + nics = copy.deepcopy(nics) + for vif in vifs: + vif_ = vif.split(sep='_x') + vif_model = vif_[0] + if vif_model in ('e1000', 'rt18139'): + glance_vif = vif_model + iter_ = int(vif_[1]) if len(vif_) > 1 else 1 + for i in range(iter_): + nic = {'net-id': net_id, 'vif-model': vif_model} + nics.append(nic) + + return nics, glance_vif + + +def _boot_multiports_vm(flavor, mgmt_net_id, vifs, net_id, net_type, base_vm, + pcipt_seg_id=None): + nics = [{'net-id': mgmt_net_id}] + + nics, glance_vif = _append_nics_for_net(vifs, net_id=net_id, nics=nics) + img_id = None + if glance_vif: + img_id = glance_helper.create_image(name=glance_vif, + hw_vif_model=glance_vif, + cleanup='function')[1] + + LOG.tc_step("Boot a test_vm with following nics on same networks as " + "base_vm: {}".format(nics)) + vm_under_test = \ + vm_helper.boot_vm(name='multiports', nics=nics, flavor=flavor, + cleanup='function', + image_id=img_id)[1] + vm_helper.wait_for_vm_pingable_from_natbox(vm_under_test, fail_ok=False) + + if pcipt_seg_id: + LOG.tc_step("Add vlan to pci-passthrough interface for VM.") + vm_helper.add_vlan_for_vm_pcipt_interfaces(vm_id=vm_under_test, + net_seg_id=pcipt_seg_id, + init_conf=True) + + LOG.tc_step("Ping test_vm's own {} network ips".format(net_type)) + vm_helper.ping_vms_from_vm(to_vms=vm_under_test, from_vm=vm_under_test, + net_types=net_type) + + vm_helper.configure_vm_vifs_on_same_net(vm_id=vm_under_test) + + LOG.tc_step( + "Ping test_vm from base_vm to verify management and data networks " + "connection") + vm_helper.ping_vms_from_vm(to_vms=vm_under_test, from_vm=base_vm, + net_types=['mgmt', net_type]) + + return vm_under_test, nics + + +class TestMutiPortsBasic: + @fixture(scope='class') + def base_setup(self): + + flavor_id = nova_helper.create_flavor(name='dedicated')[1] + ResourceCleanup.add('flavor', flavor_id, scope='class') + + extra_specs = {FlavorSpec.CPU_POLICY: 'dedicated'} + nova_helper.set_flavor(flavor=flavor_id, **extra_specs) + + mgmt_net_id = network_helper.get_mgmt_net_id() + tenant_net_id = network_helper.get_tenant_net_id() + internal_net_id = network_helper.get_internal_net_id() + + nics = [{'net-id': mgmt_net_id}, + {'net-id': tenant_net_id}, + {'net-id': internal_net_id}] + + LOG.fixture_step( + "(class) Boot a base vm with following nics: {}".format(nics)) + base_vm = vm_helper.boot_vm(name='multiports_base', + flavor=flavor_id, nics=nics, + cleanup='class', + reuse_vol=False)[1] + + vm_helper.wait_for_vm_pingable_from_natbox(base_vm) + vm_helper.ping_vms_from_vm(base_vm, base_vm, net_types='data') + + return base_vm, flavor_id, mgmt_net_id, tenant_net_id, internal_net_id + + @mark.parametrize('vifs', [ + param(('virtio_x4',), marks=mark.priorities('nightly', 'sx_nightly')) + ], ids=id_params) + def test_multiports_on_same_network_vm_actions(self, vifs, base_setup): + """ + Test vm actions on vm with multiple ports with given vif models on + the same tenant network + + Args: + vifs (tuple): each item in the tuple is 1 nic to be added to vm + with specified (vif_mode, pci_address) + base_setup (list): test fixture to boot base vm + + Setups: + - create a flavor with dedicated cpu policy (class) + - choose one tenant network and one internal network to be used + by test (class) + - boot a base vm - vm1 with above flavor and networks, and ping + it from NatBox (class) + - Boot a vm under test - vm2 with above flavor and with multiple + ports on same tenant network with base vm, + and ping it from NatBox (class) + - Ping vm2's own data network ips (class) + - Ping vm2 from vm1 to verify management and data networks + connection (class) + + Test Steps: + - Perform given actions on vm2 (migrate, start/stop, etc) + - Verify pci_address preserves + - Verify ping from vm1 to vm2 over management and data networks + still works + + Teardown: + - Delete created vms and flavor + """ + base_vm, flavor, mgmt_net_id, tenant_net_id, internal_net_id = \ + base_setup + + vm_under_test, nics = _boot_multiports_vm(flavor=flavor, + mgmt_net_id=mgmt_net_id, + vifs=vifs, + net_id=tenant_net_id, + net_type='data', + base_vm=base_vm) + + for vm_actions in [['auto_recover'], + ['cold_migrate'], + ['pause', 'unpause'], + ['suspend', 'resume'], + ['hard_reboot']]: + if vm_actions[0] == 'auto_recover': + LOG.tc_step( + "Set vm to error state and wait for auto recovery " + "complete, then verify ping from " + "base vm over management and data networks") + vm_helper.set_vm_state(vm_id=vm_under_test, error_state=True, + fail_ok=False) + vm_helper.wait_for_vm_values(vm_id=vm_under_test, + status=VMStatus.ACTIVE, + fail_ok=True, timeout=600) + else: + LOG.tc_step("Perform following action(s) on vm {}: {}".format( + vm_under_test, vm_actions)) + for action in vm_actions: + if 'migrate' in action and system_helper.is_aio_simplex(): + continue + + kwargs = {} + if action == 'hard_reboot': + action = 'reboot' + kwargs['hard'] = True + kwargs['action'] = action + + vm_helper.perform_action_on_vm(vm_under_test, **kwargs) + + vm_helper.wait_for_vm_pingable_from_natbox(vm_under_test) + + # LOG.tc_step("Verify vm pci address preserved after {}".format( + # vm_actions)) + # check_helper.check_vm_pci_addr(vm_under_test, nics) + + LOG.tc_step( + "Verify ping from base_vm to vm_under_test over management " + "and data networks still works " + "after {}".format(vm_actions)) + vm_helper.ping_vms_from_vm(to_vms=vm_under_test, from_vm=base_vm, + net_types=['mgmt', 'data']) + + +class TestMutiPortsPCI: + + @fixture(scope='class') + def base_setup_pci(self): + LOG.fixture_step( + "(class) Get an internal network that supports both pci-sriov and " + "pcipt vif to boot vm") + avail_pcipt_nets, is_cx4 = network_helper.get_pci_vm_network( + pci_type='pci-passthrough', + net_name='internal0-net', rtn_all=True) + avail_sriov_nets, _ = network_helper.get_pci_vm_network( + pci_type='pci-sriov', + net_name='internal0-net', rtn_all=True) + + if not avail_pcipt_nets and not avail_sriov_nets: + skip(SkipHostIf.PCI_IF_UNAVAIL) + + avail_nets = list(set(avail_pcipt_nets) & set(avail_sriov_nets)) + extra_pcipt_net = avail_pcipt_net = avail_sriov_net = None + pcipt_seg_ids = {} + if avail_nets: + avail_net_name = avail_nets[-1] + avail_net, segment_id = network_helper.get_network_values( + network=avail_net_name, + fields=('id', 'provider:segmentation_id')) + internal_nets = [avail_net] + pcipt_seg_ids[avail_net_name] = segment_id + avail_pcipt_net = avail_sriov_net = avail_net + LOG.info( + "Internal network(s) selected for pcipt and sriov: {}".format( + avail_net_name)) + else: + LOG.info("No internal network support both sriov and pcipt") + internal_nets = [] + if avail_pcipt_nets: + avail_pcipt_net_name = avail_pcipt_nets[-1] + avail_pcipt_net, segment_id = network_helper.get_network_values( + network=avail_pcipt_net_name, + fields=('id', 'provider:segmentation_id')) + internal_nets.append(avail_pcipt_net) + pcipt_seg_ids[avail_pcipt_net_name] = segment_id + LOG.info("pci-passthrough net: {}".format(avail_pcipt_net_name)) + if avail_sriov_nets: + avail_sriov_net_name = avail_sriov_nets[-1] + avail_sriov_net = network_helper.get_net_id_from_name( + avail_sriov_net_name) + internal_nets.append(avail_sriov_net) + LOG.info("pci-sriov net: {}".format(avail_sriov_net_name)) + + mgmt_net_id = network_helper.get_mgmt_net_id() + tenant_net_id = network_helper.get_tenant_net_id() + base_nics = [{'net-id': mgmt_net_id}, {'net-id': tenant_net_id}] + nics = base_nics + [{'net-id': net_id} for net_id in internal_nets] + + if avail_pcipt_nets and is_cx4: + extra_pcipt_net_name = avail_nets[0] if avail_nets else \ + avail_pcipt_nets[0] + extra_pcipt_net, seg_id = network_helper.get_network_values( + network=extra_pcipt_net_name, + fields=('id', 'provider:segmentation_id')) + if extra_pcipt_net not in internal_nets: + nics.append({'net-id': extra_pcipt_net}) + pcipt_seg_ids[extra_pcipt_net_name] = seg_id + + LOG.fixture_step("(class) Create a flavor with dedicated cpu policy.") + flavor_id = \ + nova_helper.create_flavor(name='dedicated', vcpus=2, ram=2048, + cleanup='class')[1] + extra_specs = {FlavorSpec.CPU_POLICY: 'dedicated', + FlavorSpec.PCI_NUMA_AFFINITY: 'preferred'} + nova_helper.set_flavor(flavor=flavor_id, **extra_specs) + + LOG.fixture_step( + "(class) Boot a base pci vm with following nics: {}".format(nics)) + base_vm_pci = \ + vm_helper.boot_vm(name='multiports_pci_base', flavor=flavor_id, + nics=nics, cleanup='class')[1] + + LOG.fixture_step("(class) Ping base PCI vm interfaces") + vm_helper.wait_for_vm_pingable_from_natbox(base_vm_pci) + vm_helper.ping_vms_from_vm(to_vms=base_vm_pci, from_vm=base_vm_pci, + net_types=['data', 'internal']) + + return base_vm_pci, flavor_id, base_nics, avail_sriov_net, \ + avail_pcipt_net, pcipt_seg_ids, extra_pcipt_net + + @mark.parametrize('vifs', [ + param(('virtio', 'pci-sriov', 'pci-passthrough'), marks=mark.p3), + param(('pci-passthrough',), marks=mark.nightly), + param(('pci-sriov',), marks=mark.nightly), + ], ids=id_params) + def test_multiports_on_same_network_pci_vm_actions(self, base_setup_pci, + vifs): + """ + Test vm actions on vm with multiple ports with given vif models on + the same tenant network + + Args: + base_setup_pci (tuple): base_vm_pci, flavor, mgmt_net_id, + tenant_net_id, internal_net_id, seg_id + vifs (list): list of vifs to add to same internal net + + Setups: + - Create a flavor with dedicated cpu policy (class) + - Choose management net, one tenant net, and internal0-net1 to be + used by test (class) + - Boot a base pci-sriov vm - vm1 with above flavor and networks, + ping it from NatBox (class) + - Ping vm1 from itself over data, and internal networks + + Test Steps: + - Boot a vm under test - vm2 with above flavor and with multiple + ports on same tenant network with vm1, + and ping it from NatBox + - Ping vm2's own data and internal network ips + - Ping vm2 from vm1 to verify management and data networks + connection + - Perform one of the following actions on vm2 + - set to error/ wait for auto recovery + - suspend/resume + - cold migration + - pause/unpause + - Update vlan interface to proper eth if pci-passthrough device + moves to different eth + - Verify ping from vm1 to vm2 over management and data networks + still works + - Repeat last 3 steps with different vm actions + + Teardown: + - Delete created vms and flavor + """ + + base_vm_pci, flavor, base_nics, avail_sriov_net, avail_pcipt_net, \ + pcipt_seg_ids, extra_pcipt_net = base_setup_pci + + pcipt_included = False + internal_net_id = None + for vif in vifs: + if not isinstance(vif, str): + vif = vif[0] + if 'pci-passthrough' in vif: + if not avail_pcipt_net: + skip(SkipHostIf.PCIPT_IF_UNAVAIL) + internal_net_id = avail_pcipt_net + pcipt_included = True + continue + elif 'pci-sriov' in vif: + if not avail_sriov_net: + skip(SkipHostIf.SRIOV_IF_UNAVAIL) + internal_net_id = avail_sriov_net + + assert internal_net_id, "test script error. Internal net should have " \ + "been determined." + + nics, glance_vif = _append_nics_for_net(vifs, net_id=internal_net_id, + nics=base_nics) + if pcipt_included and extra_pcipt_net: + nics.append( + {'net-id': extra_pcipt_net, 'vif-model': 'pci-passthrough'}) + + img_id = None + if glance_vif: + img_id = glance_helper.create_image(name=glance_vif, + hw_vif_model=glance_vif, + cleanup='function')[1] + + LOG.tc_step("Boot a vm with following vifs on same internal net: " + "{}".format(vifs)) + vm_under_test = vm_helper.boot_vm(name='multiports_pci', + nics=nics, flavor=flavor, + cleanup='function', + reuse_vol=False, image_id=img_id)[1] + vm_helper.wait_for_vm_pingable_from_natbox(vm_under_test, fail_ok=False) + + if pcipt_included: + LOG.tc_step("Add vlan to pci-passthrough interface for VM.") + vm_helper.add_vlan_for_vm_pcipt_interfaces(vm_id=vm_under_test, + net_seg_id=pcipt_seg_ids, + init_conf=True) + + LOG.tc_step("Ping vm's own data and internal network ips") + vm_helper.ping_vms_from_vm(to_vms=vm_under_test, from_vm=vm_under_test, + net_types=['data', 'internal']) + + LOG.tc_step( + "Ping vm_under_test from base_vm over management, data, " + "and internal networks") + vm_helper.ping_vms_from_vm(to_vms=vm_under_test, from_vm=base_vm_pci, + net_types=['mgmt', 'data', 'internal']) + + for vm_actions in [['auto_recover'], ['cold_migrate'], + ['pause', 'unpause'], ['suspend', 'resume']]: + if 'auto_recover' in vm_actions: + LOG.tc_step( + "Set vm to error state and wait for auto recovery " + "complete, " + "then verify ping from base vm over management and " + "internal networks") + vm_helper.set_vm_state(vm_id=vm_under_test, error_state=True, + fail_ok=False) + vm_helper.wait_for_vm_values(vm_id=vm_under_test, + status=VMStatus.ACTIVE, + fail_ok=False, timeout=600) + else: + LOG.tc_step("Perform following action(s) on vm {}: {}".format( + vm_under_test, vm_actions)) + for action in vm_actions: + vm_helper.perform_action_on_vm(vm_under_test, action=action) + + vm_helper.wait_for_vm_pingable_from_natbox(vm_id=vm_under_test) + if pcipt_included: + LOG.tc_step( + "Bring up vlan interface for pci-passthrough vm {}.".format( + vm_under_test)) + vm_helper.add_vlan_for_vm_pcipt_interfaces( + vm_id=vm_under_test, net_seg_id=pcipt_seg_ids) + + LOG.tc_step( + "Verify ping from base_vm to vm_under_test over management " + "and internal networks still works " + "after {}".format(vm_actions)) + vm_helper.ping_vms_from_vm(to_vms=vm_under_test, + from_vm=base_vm_pci, + net_types=['mgmt', 'internal']) + + @mark.parametrize('vifs', [ + ('pci-sriov',), + ('pci-passthrough',), + ], ids=id_params) + def test_multiports_on_same_network_pci_evacuate_vm(self, base_setup_pci, + vifs): + """ + Test evacuate vm with multiple ports on same network + + Args: + base_setup_pci (tuple): base vm id, vm under test id, segment id + for internal0-net1 + vifs (list): list of vifs to add to same internal net + + Setups: + - create a flavor with dedicated cpu policy (module) + - choose one tenant network and one internal network to be used + by test (module) + - boot a base vm - vm1 with above flavor and networks, and ping + it from NatBox (module) + - Boot a vm under test - vm2 with above flavor and with multiple + ports on same tenant network with base vm, + and ping it from NatBox (class) + - Ping vm2's own data network ips (class) + - Ping vm2 from vm1 to verify management and internal networks + connection (class) + + Test Steps: + - Reboot vm2 host + - Wait for vm2 to be evacuated to other host + - Wait for vm2 pingable from NatBox + - Verify ping from vm1 to vm2 over management and internal + networks still works + + Teardown: + - Delete created vms and flavor + """ + base_vm_pci, flavor, base_nics, avail_sriov_net, avail_pcipt_net, \ + pcipt_seg_ids, extra_pcipt_net = base_setup_pci + + internal_net_id = None + pcipt_included = False + nics = copy.deepcopy(base_nics) + if 'pci-passthrough' in vifs: + if not avail_pcipt_net: + skip(SkipHostIf.PCIPT_IF_UNAVAIL) + pcipt_included = True + internal_net_id = avail_pcipt_net + if extra_pcipt_net: + nics.append( + {'net-id': extra_pcipt_net, 'vif-model': 'pci-passthrough'}) + if 'pci-sriov' in vifs: + if not avail_sriov_net: + skip(SkipHostIf.SRIOV_IF_UNAVAIL) + internal_net_id = avail_sriov_net + assert internal_net_id, "test script error. sriov or pcipt has to be " \ + "included." + + for vif in vifs: + nics.append({'net-id': internal_net_id, 'vif-model': vif}) + + LOG.tc_step( + "Boot a vm with following vifs on same network internal0-net1: " + "{}".format(vifs)) + vm_under_test = vm_helper.boot_vm(name='multiports_pci_evac', + nics=nics, flavor=flavor, + cleanup='function', + reuse_vol=False)[1] + vm_helper.wait_for_vm_pingable_from_natbox(vm_under_test, fail_ok=False) + + if pcipt_included: + LOG.tc_step("Add vlan to pci-passthrough interface.") + vm_helper.add_vlan_for_vm_pcipt_interfaces(vm_id=vm_under_test, + net_seg_id=pcipt_seg_ids, + init_conf=True) + + LOG.tc_step("Ping vm's own data and internal network ips") + vm_helper.ping_vms_from_vm(to_vms=vm_under_test, from_vm=vm_under_test, + net_types=['data', 'internal']) + vm_helper.configure_vm_vifs_on_same_net(vm_id=vm_under_test) + + LOG.tc_step( + "Ping vm_under_test from base_vm over management, data, and " + "internal networks") + vm_helper.ping_vms_from_vm(to_vms=vm_under_test, from_vm=base_vm_pci, + net_types=['mgmt', 'data', 'internal']) + + host = vm_helper.get_vm_host(vm_under_test) + + LOG.tc_step("Reboot vm host {}".format(host)) + vm_helper.evacuate_vms(host=host, vms_to_check=vm_under_test, + ping_vms=True) + + if pcipt_included: + LOG.tc_step( + "Add/Check vlan interface is added to pci-passthrough device " + "for vm {}.".format(vm_under_test)) + vm_helper.add_vlan_for_vm_pcipt_interfaces(vm_id=vm_under_test, + net_seg_id=pcipt_seg_ids) + + LOG.tc_step( + "Verify ping from base_vm to vm_under_test over management and " + "internal networks still works after evacuation.") + vm_helper.ping_vms_from_vm(to_vms=vm_under_test, from_vm=base_vm_pci, + net_types=['mgmt', 'internal']) diff --git a/automated-pytest-suite/testcases/functional/networking/test_ping_vms.py b/automated-pytest-suite/testcases/functional/networking/test_ping_vms.py new file mode 100755 index 0000000..fb8eeb7 --- /dev/null +++ b/automated-pytest-suite/testcases/functional/networking/test_ping_vms.py @@ -0,0 +1,117 @@ +# +# Copyright (c) 2019 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + + +from pytest import mark, param + +from utils.tis_log import LOG +from consts.stx import FlavorSpec, GuestImages +from keywords import vm_helper, glance_helper, nova_helper, network_helper, \ + cinder_helper + + +def id_gen(val): + if not isinstance(val, str): + new_val = [] + for val_1 in val: + if not isinstance(val_1, str): + val_1 = '_'.join([str(val_2).lower() for val_2 in val_1]) + new_val.append(val_1) + new_val = '_'.join(new_val) + else: + new_val = val + + return new_val + + +def _compose_nics(vifs, net_ids, image_id, guest_os): + nics = [] + glance_vif = None + if isinstance(vifs, str): + vifs = (vifs,) + for i in range(len(vifs)): + vif_model = vifs[i] + nic = {'net-id': net_ids[i]} + if vif_model in ('e1000', 'rt18139'): + glance_vif = vif_model + elif vif_model != 'virtio': + nic['vif-model'] = vif_model + nics.append(nic) + + if glance_vif: + glance_helper.set_image(image=image_id, hw_vif_model=glance_vif, + new_name='{}_{}'.format(guest_os, glance_vif)) + + return nics + + +@mark.parametrize(('guest_os', 'vm1_vifs', 'vm2_vifs'), [ + param('default', 'virtio', 'virtio', + marks=mark.priorities('cpe_sanity', 'sanity', 'sx_sanity')), + ('ubuntu_14', 'virtio', 'virtio'), +], ids=id_gen) +def test_ping_between_two_vms(guest_os, vm1_vifs, vm2_vifs): + """ + Ping between two vms with given vif models + + Test Steps: + - Create a favor with dedicated cpu policy and proper root disk size + - Create a volume from guest image under test with proper size + - Boot two vms with given vif models from above volume and flavor + - Ping VMs from NatBox and between two vms + + Test Teardown: + - Delete vms, volumes, flavor, glance image created + + """ + if guest_os == 'default': + guest_os = GuestImages.DEFAULT['guest'] + + reuse = False if 'e1000' in vm1_vifs or 'e1000' in vm2_vifs else True + cleanup = 'function' if not reuse or 'ubuntu' in guest_os else None + image_id = glance_helper.get_guest_image(guest_os, cleanup=cleanup, + use_existing=reuse) + + LOG.tc_step("Create a favor dedicated cpu policy") + flavor_id = nova_helper.create_flavor(name='dedicated', guest_os=guest_os, + cleanup='function')[1] + nova_helper.set_flavor(flavor_id, **{FlavorSpec.CPU_POLICY: 'dedicated'}) + + mgmt_net_id = network_helper.get_mgmt_net_id() + tenant_net_id = network_helper.get_tenant_net_id() + internal_net_id = network_helper.get_internal_net_id() + net_ids = (mgmt_net_id, tenant_net_id, internal_net_id) + vms = [] + for vifs_for_vm in (vm1_vifs, vm2_vifs): + # compose vm nics + nics = _compose_nics(vifs_for_vm, net_ids=net_ids, image_id=image_id, + guest_os=guest_os) + net_types = ['mgmt', 'data', 'internal'][:len(nics)] + LOG.tc_step("Create a volume from {} image".format(guest_os)) + vol_id = cinder_helper.create_volume(name='vol-{}'.format(guest_os), + source_id=image_id, + guest_image=guest_os, + cleanup='function')[1] + + LOG.tc_step( + "Boot a {} vm with {} vifs from above flavor and volume".format( + guest_os, vifs_for_vm)) + vm_id = vm_helper.boot_vm('{}_vifs'.format(guest_os), flavor=flavor_id, + cleanup='function', + source='volume', source_id=vol_id, nics=nics, + guest_os=guest_os)[1] + + LOG.tc_step("Ping VM {} from NatBox(external network)".format(vm_id)) + vm_helper.wait_for_vm_pingable_from_natbox(vm_id, fail_ok=False) + + vms.append(vm_id) + + LOG.tc_step( + "Ping between two vms over management, data, and internal networks") + vm_helper.ping_vms_from_vm(to_vms=vms[0], from_vm=vms[1], + net_types=net_types) + vm_helper.ping_vms_from_vm(to_vms=vms[1], from_vm=vms[0], + net_types=net_types) diff --git a/automated-pytest-suite/testcases/functional/networking/test_vm_meta_data_retrieval.py b/automated-pytest-suite/testcases/functional/networking/test_vm_meta_data_retrieval.py new file mode 100755 index 0000000..3a06b8c --- /dev/null +++ b/automated-pytest-suite/testcases/functional/networking/test_vm_meta_data_retrieval.py @@ -0,0 +1,45 @@ +from pytest import mark + +from utils.tis_log import LOG +from keywords import vm_helper +from consts.stx import METADATA_SERVER + + +@mark.sanity +def test_vm_meta_data_retrieval(): + """ + VM meta-data retrieval + + Test Steps: + - Launch a boot-from-image vm + - Retrieve vm meta_data within vm from metadata server + - Ensure vm uuid from metadata server is the same as nova show + + Test Teardown: + - Delete created vm and flavor + """ + LOG.tc_step("Launch a boot-from-image vm") + vm_id = vm_helper.boot_vm(source='image', cleanup='function')[1] + vm_helper.wait_for_vm_pingable_from_natbox(vm_id, fail_ok=False) + + LOG.tc_step('Retrieve vm meta_data within vm from metadata server') + # retrieve meta instance id by ssh to VM from natbox and wget to remote + # server + _access_metadata_server_from_vm(vm_id=vm_id) + + +def _access_metadata_server_from_vm(vm_id): + with vm_helper.ssh_to_vm_from_natbox(vm_id) as vm_ssh: + vm_ssh.exec_cmd('ip route') + command = 'wget http://{}/openstack/latest/meta_data.json'.format( + METADATA_SERVER) + vm_ssh.exec_cmd(command, fail_ok=False) + metadata = vm_ssh.exec_cmd('more meta_data.json', fail_ok=False)[1] + + LOG.tc_step("Ensure vm uuid from metadata server is the same as nova show") + metadata = metadata.replace('\n', '') + LOG.info(metadata) + metadata_uuid = eval(metadata)['uuid'] + + assert vm_id == metadata_uuid, "VM UUID retrieved from metadata server " \ + "is not the same as nova show" diff --git a/automated-pytest-suite/testcases/functional/nova/__init__.py b/automated-pytest-suite/testcases/functional/nova/__init__.py new file mode 100755 index 0000000..e69de29 diff --git a/automated-pytest-suite/testcases/functional/nova/conftest.py b/automated-pytest-suite/testcases/functional/nova/conftest.py new file mode 100755 index 0000000..157d7f8 --- /dev/null +++ b/automated-pytest-suite/testcases/functional/nova/conftest.py @@ -0,0 +1,3 @@ +from testfixtures.resource_mgmt import * +from testfixtures.resource_create import * +from testfixtures.config_host import * diff --git a/automated-pytest-suite/testcases/functional/nova/test_config_drive.py b/automated-pytest-suite/testcases/functional/nova/test_config_drive.py new file mode 100755 index 0000000..4142a7d --- /dev/null +++ b/automated-pytest-suite/testcases/functional/nova/test_config_drive.py @@ -0,0 +1,131 @@ +from pytest import fixture, skip, mark + +from consts.timeout import VMTimeout +from keywords import vm_helper, host_helper, cinder_helper, glance_helper, \ + system_helper +from testfixtures.fixture_resources import ResourceCleanup +from testfixtures.recover_hosts import HostsToRecover +from utils.tis_log import LOG + +TEST_STRING = 'Config-drive test file content' + + +@fixture(scope='module') +def hosts_per_stor_backing(): + hosts_per_backing = host_helper.get_hosts_per_storage_backing() + LOG.fixture_step("Hosts per storage backing: {}".format(hosts_per_backing)) + + return hosts_per_backing + + +@mark.nightly +@mark.sx_nightly +def test_vm_with_config_drive(hosts_per_stor_backing): + """ + Skip Condition: + - no host with local_image backend + + Test Steps: + - Launch a vm using config drive + - Add test data to config drive on vm + - Do some operations (reboot vm for simplex, cold migrate and lock + host for non-simplex) and + check test data persisted in config drive after each operation + Teardown: + - Delete created vm, volume, flavor + + """ + guest_os = 'cgcs-guest' + img_id = glance_helper.get_guest_image(guest_os) + hosts_num = len(hosts_per_stor_backing.get('local_image', [])) + if hosts_num < 1: + skip("No host with local_image storage backing") + + volume_id = cinder_helper.create_volume(name='vol_inst1', source_id=img_id, + guest_image=guest_os)[1] + ResourceCleanup.add('volume', volume_id, scope='function') + + block_device = {'source': 'volume', 'dest': 'volume', 'id': volume_id, + 'device': 'vda'} + vm_id = vm_helper.boot_vm(name='config_drive', config_drive=True, + block_device=block_device, + cleanup='function', guest_os=guest_os, + meta={'foo': 'bar'})[1] + + LOG.tc_step("Confirming the config drive is set to True in vm ...") + assert str(vm_helper.get_vm_values(vm_id, "config_drive")[ + 0]) == 'True', "vm config-drive not true" + + LOG.tc_step("Add date to config drive ...") + check_vm_config_drive_data(vm_id) + + vm_host = vm_helper.get_vm_host(vm_id) + instance_name = vm_helper.get_vm_instance_name(vm_id) + LOG.tc_step("Check config_drive vm files on hypervisor after vm launch") + check_vm_files_on_hypervisor(vm_id, vm_host=vm_host, + instance_name=instance_name) + + if not system_helper.is_aio_simplex(): + LOG.tc_step("Cold migrate VM") + vm_helper.cold_migrate_vm(vm_id) + + LOG.tc_step("Check config drive after cold migrate VM...") + check_vm_config_drive_data(vm_id) + + LOG.tc_step("Lock the compute host") + compute_host = vm_helper.get_vm_host(vm_id) + HostsToRecover.add(compute_host) + host_helper.lock_host(compute_host, swact=True) + + LOG.tc_step("Check config drive after locking VM host") + check_vm_config_drive_data(vm_id, ping_timeout=VMTimeout.DHCP_RETRY) + vm_host = vm_helper.get_vm_host(vm_id) + + else: + LOG.tc_step("Reboot vm") + vm_helper.reboot_vm(vm_id) + + LOG.tc_step("Check config drive after vm rebooted") + check_vm_config_drive_data(vm_id) + + LOG.tc_step("Check vm files exist after nova operations") + check_vm_files_on_hypervisor(vm_id, vm_host=vm_host, + instance_name=instance_name) + + +def check_vm_config_drive_data(vm_id, ping_timeout=VMTimeout.PING_VM): + """ + Args: + vm_id: + ping_timeout + + Returns: + + """ + vm_helper.wait_for_vm_pingable_from_natbox(vm_id, timeout=ping_timeout) + dev = '/dev/hd' + with vm_helper.ssh_to_vm_from_natbox(vm_id) as vm_ssh: + # Run mount command to determine the /dev/hdX is mount at: + cmd = """mount | grep "{}" | awk '{{print $3}} '""".format(dev) + mount = vm_ssh.exec_cmd(cmd)[1] + assert mount, "{} is not mounted".format(dev) + + file_path = '{}/openstack/latest/meta_data.json'.format(mount) + content = vm_ssh.exec_cmd('python -m json.tool {} | grep ' + 'foo'.format(file_path), fail_ok=False)[1] + assert '"foo": "bar"' in content + + +def check_vm_files_on_hypervisor(vm_id, vm_host, instance_name): + with host_helper.ssh_to_host(vm_host) as host_ssh: + cmd = " ls /var/lib/nova/instances/{}".format(vm_id) + cmd_output = host_ssh.exec_cmd(cmd)[1] + for expt_file in ('console.log', 'disk.config'): + assert expt_file in cmd_output, \ + "{} is not found for config drive vm {} on " \ + "{}".format(expt_file, vm_id, vm_host) + + output = host_ssh.exec_cmd('ls /run/libvirt/qemu')[1] + libvirt = "{}.xml".format(instance_name) + assert libvirt in output, "{} is not found in /run/libvirt/qemu on " \ + "{}".format(libvirt, vm_host) diff --git a/automated-pytest-suite/testcases/functional/nova/test_cpu_policy.py b/automated-pytest-suite/testcases/functional/nova/test_cpu_policy.py new file mode 100755 index 0000000..8d4c196 --- /dev/null +++ b/automated-pytest-suite/testcases/functional/nova/test_cpu_policy.py @@ -0,0 +1,185 @@ +# +# Copyright (c) 2019 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + + +from pytest import mark, param + +from utils.tis_log import LOG + +from consts.stx import FlavorSpec, ImageMetadata, GuestImages +from consts.cli_errs import CPUPolicyErr # used by eval + +from keywords import nova_helper, vm_helper, glance_helper, cinder_helper, \ + check_helper, host_helper +from testfixtures.fixture_resources import ResourceCleanup + + +@mark.parametrize( + ('flv_vcpus', 'flv_pol', 'img_pol', 'boot_source', 'expt_err'), [ + param(3, None, 'shared', 'image', None, marks=mark.p3), + param(4, 'dedicated', 'dedicated', 'volume', None, marks=mark.p3), + param(1, 'dedicated', None, 'image', None, marks=mark.p3), + param(1, 'shared', 'shared', 'volume', None, marks=mark.p3), + param(2, 'shared', None, 'image', None, marks=mark.p3), + param(3, 'dedicated', 'shared', 'volume', None, + marks=mark.domain_sanity), + param(1, 'shared', 'dedicated', 'image', + 'CPUPolicyErr.CONFLICT_FLV_IMG', marks=mark.p3), + ]) +def test_boot_vm_cpu_policy_image(flv_vcpus, flv_pol, img_pol, boot_source, + expt_err): + LOG.tc_step("Create flavor with {} vcpus".format(flv_vcpus)) + flavor_id = nova_helper.create_flavor(name='cpu_pol_{}'.format(flv_pol), + vcpus=flv_vcpus)[1] + ResourceCleanup.add('flavor', flavor_id) + + if flv_pol is not None: + specs = {FlavorSpec.CPU_POLICY: flv_pol} + + LOG.tc_step("Set following extra specs: {}".format(specs)) + nova_helper.set_flavor(flavor_id, **specs) + + if img_pol is not None: + image_meta = {ImageMetadata.CPU_POLICY: img_pol} + LOG.tc_step( + "Create image with following metadata: {}".format(image_meta)) + image_id = glance_helper.create_image( + name='cpu_pol_{}'.format(img_pol), cleanup='function', + **image_meta)[1] + else: + image_id = glance_helper.get_image_id_from_name( + GuestImages.DEFAULT['guest'], strict=True) + + if boot_source == 'volume': + LOG.tc_step("Create a volume from image") + source_id = cinder_helper.create_volume(name='cpu_pol_img', + source_id=image_id)[1] + ResourceCleanup.add('volume', source_id) + else: + source_id = image_id + + prev_cpus = host_helper.get_vcpus_for_computes(field='used_now') + + LOG.tc_step("Attempt to boot a vm from above {} with above flavor".format( + boot_source)) + code, vm_id, msg = vm_helper.boot_vm(name='cpu_pol', flavor=flavor_id, + source=boot_source, + source_id=source_id, fail_ok=True, + cleanup='function') + + # check for negative tests + if expt_err is not None: + LOG.tc_step( + "Check VM failed to boot due to conflict in flavor and image.") + assert 4 == code, "Expect boot vm cli reject and no vm booted. " \ + "Actual: {}".format(msg) + assert eval(expt_err) in msg, \ + "Expected error message is not found in cli return." + return # end the test for negative cases + + # Check for positive tests + LOG.tc_step("Check vm is successfully booted.") + assert 0 == code, "Expect vm boot successfully. Actual: {}".format(msg) + + # Calculate expected policy: + expt_cpu_pol = flv_pol if flv_pol else img_pol + expt_cpu_pol = expt_cpu_pol if expt_cpu_pol else 'shared' + + vm_host = vm_helper.get_vm_host(vm_id) + check_helper.check_topology_of_vm(vm_id, vcpus=flv_vcpus, + cpu_pol=expt_cpu_pol, vm_host=vm_host, + prev_total_cpus=prev_cpus[vm_host]) + + +@mark.parametrize(('flv_vcpus', 'cpu_pol', 'pol_source', 'boot_source'), [ + param(4, None, 'flavor', 'image', marks=mark.p2), + param(2, 'dedicated', 'flavor', 'volume', marks=mark.domain_sanity), + param(3, 'shared', 'flavor', 'volume', marks=mark.p2), + param(1, 'dedicated', 'flavor', 'image', marks=mark.p2), + param(2, 'dedicated', 'image', 'volume', marks=mark.nightly), + param(3, 'shared', 'image', 'volume', marks=mark.p2), + param(1, 'dedicated', 'image', 'image', marks=mark.domain_sanity), +]) +def test_cpu_pol_vm_actions(flv_vcpus, cpu_pol, pol_source, boot_source): + LOG.tc_step("Create flavor with {} vcpus".format(flv_vcpus)) + flavor_id = nova_helper.create_flavor(name='cpu_pol', vcpus=flv_vcpus)[1] + ResourceCleanup.add('flavor', flavor_id) + + image_id = glance_helper.get_image_id_from_name( + GuestImages.DEFAULT['guest'], strict=True) + if cpu_pol is not None: + if pol_source == 'flavor': + specs = {FlavorSpec.CPU_POLICY: cpu_pol} + + LOG.tc_step("Set following extra specs: {}".format(specs)) + nova_helper.set_flavor(flavor_id, **specs) + else: + image_meta = {ImageMetadata.CPU_POLICY: cpu_pol} + LOG.tc_step( + "Create image with following metadata: {}".format(image_meta)) + image_id = glance_helper.create_image( + name='cpu_pol_{}'.format(cpu_pol), cleanup='function', + **image_meta)[1] + if boot_source == 'volume': + LOG.tc_step("Create a volume from image") + source_id = cinder_helper.create_volume(name='cpu_pol'.format(cpu_pol), + source_id=image_id)[1] + ResourceCleanup.add('volume', source_id) + else: + source_id = image_id + + prev_cpus = host_helper.get_vcpus_for_computes(field='used_now') + + LOG.tc_step( + "Boot a vm from {} with above flavor and check vm topology is as " + "expected".format(boot_source)) + vm_id = vm_helper.boot_vm(name='cpu_pol_{}_{}'.format(cpu_pol, flv_vcpus), + flavor=flavor_id, source=boot_source, + source_id=source_id, cleanup='function')[1] + + vm_helper.wait_for_vm_pingable_from_natbox(vm_id) + vm_host = vm_helper.get_vm_host(vm_id) + check_helper.check_topology_of_vm(vm_id, vcpus=flv_vcpus, cpu_pol=cpu_pol, + vm_host=vm_host, + prev_total_cpus=prev_cpus[vm_host]) + + LOG.tc_step("Suspend/Resume vm and check vm topology stays the same") + vm_helper.suspend_vm(vm_id) + vm_helper.resume_vm(vm_id) + + vm_helper.wait_for_vm_pingable_from_natbox(vm_id) + check_helper.check_topology_of_vm(vm_id, vcpus=flv_vcpus, cpu_pol=cpu_pol, + vm_host=vm_host, + prev_total_cpus=prev_cpus[vm_host]) + + LOG.tc_step("Stop/Start vm and check vm topology stays the same") + vm_helper.stop_vms(vm_id) + vm_helper.start_vms(vm_id) + + vm_helper.wait_for_vm_pingable_from_natbox(vm_id) + prev_siblings = check_helper.check_topology_of_vm( + vm_id, vcpus=flv_vcpus, cpu_pol=cpu_pol, vm_host=vm_host, + prev_total_cpus=prev_cpus[vm_host])[1] + + LOG.tc_step("Live migrate vm and check vm topology stays the same") + vm_helper.live_migrate_vm(vm_id=vm_id) + + vm_helper.wait_for_vm_pingable_from_natbox(vm_id) + vm_host = vm_helper.get_vm_host(vm_id) + prev_siblings = prev_siblings if cpu_pol == 'dedicated' else None + check_helper.check_topology_of_vm(vm_id, vcpus=flv_vcpus, cpu_pol=cpu_pol, + vm_host=vm_host, + prev_total_cpus=prev_cpus[vm_host], + prev_siblings=prev_siblings) + + LOG.tc_step("Cold migrate vm and check vm topology stays the same") + vm_helper.cold_migrate_vm(vm_id=vm_id) + + vm_helper.wait_for_vm_pingable_from_natbox(vm_id) + vm_host = vm_helper.get_vm_host(vm_id) + check_helper.check_topology_of_vm(vm_id, vcpus=flv_vcpus, cpu_pol=cpu_pol, + vm_host=vm_host, + prev_total_cpus=prev_cpus[vm_host]) diff --git a/automated-pytest-suite/testcases/functional/nova/test_cpu_thread.py b/automated-pytest-suite/testcases/functional/nova/test_cpu_thread.py new file mode 100755 index 0000000..848374e --- /dev/null +++ b/automated-pytest-suite/testcases/functional/nova/test_cpu_thread.py @@ -0,0 +1,437 @@ +# +# Copyright (c) 2019 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + + +from pytest import mark, fixture, skip, param + +from utils.tis_log import LOG + +from consts.reasons import SkipHypervisor, SkipHyperthreading +from consts.stx import FlavorSpec, ImageMetadata +# Do not remove used imports below as they are used in eval() +from consts.cli_errs import CPUThreadErr + +from keywords import nova_helper, vm_helper, host_helper, glance_helper, \ + check_helper +from testfixtures.fixture_resources import ResourceCleanup +from testfixtures.recover_hosts import HostsToRecover + + +def id_gen(val): + if isinstance(val, list): + return '-'.join(val) + + +@fixture(scope='module') +def ht_and_nonht_hosts(): + LOG.fixture_step( + "(Module) Get hyper-threading enabled and disabled hypervisors") + nova_hosts = host_helper.get_up_hypervisors() + ht_hosts = [] + non_ht_hosts = [] + for host in nova_hosts: + if host_helper.is_host_hyperthreaded(host): + ht_hosts.append(host) + else: + non_ht_hosts.append(host) + + LOG.info( + '-- Hyper-threading enabled hosts: {}; Hyper-threading disabled ' + 'hosts: {}'.format( + ht_hosts, non_ht_hosts)) + return ht_hosts, non_ht_hosts + + +class TestHTEnabled: + + @fixture(scope='class', autouse=True) + def ht_hosts_(self, ht_and_nonht_hosts): + ht_hosts, non_ht_hosts = ht_and_nonht_hosts + + if not ht_hosts: + skip("No up hypervisor found with Hyper-threading enabled.") + + return ht_hosts, non_ht_hosts + + def test_isolate_vm_on_ht_host(self, ht_hosts_, add_admin_role_func): + """ + Test isolate vms take the host log_core sibling pair for each vcpu + when HT is enabled. + Args: + ht_hosts_: + add_admin_role_func: + + Pre-conditions: At least on hypervisor has HT enabled + + Test Steps: + - Launch VM with isolate thread policy and 4 vcpus, until all + Application cores on thread-0 are taken + - Attempt to launch another vm on same host, and ensure it fails + + """ + ht_hosts, non_ht_hosts = ht_hosts_ + vcpu_count = 4 + cpu_thread_policy = 'isolate' + LOG.tc_step("Create flavor with {} vcpus and {} thread policy".format( + vcpu_count, cpu_thread_policy)) + flavor_id = nova_helper.create_flavor( + name='cpu_thread_{}'.format(cpu_thread_policy), vcpus=vcpu_count, + cleanup='function')[1] + specs = {FlavorSpec.CPU_POLICY: 'dedicated', + FlavorSpec.CPU_THREAD_POLICY: cpu_thread_policy} + nova_helper.set_flavor(flavor_id, **specs) + + LOG.tc_step( + "Get used vcpus for vm host before booting vm, and ensure " + "sufficient instance and core quotas") + host = ht_hosts[0] + vms = vm_helper.get_vms_on_host(hostname=host) + vm_helper.delete_vms(vms=vms) + log_core_counts = host_helper.get_logcores_counts( + host, thread='0', functions='Applications') + max_vm_count = int(log_core_counts[0] / vcpu_count) + int( + log_core_counts[1] / vcpu_count) + vm_helper.ensure_vms_quotas(vms_num=max_vm_count + 10, + cores_num=4 * (max_vm_count + 2) + 10) + + LOG.tc_step( + "Boot {} isolate 4vcpu vms on a HT enabled host, and check " + "topology of vm on host and vms". + format(max_vm_count)) + for i in range(max_vm_count): + name = '4vcpu_isolate-{}'.format(i) + LOG.info( + "Launch VM {} on {} and check it's topology".format(name, host)) + prev_cpus = host_helper.get_vcpus_for_computes( + hosts=[host], field='used_now')[host] + vm_id = vm_helper.boot_vm(name=name, flavor=flavor_id, vm_host=host, + cleanup='function')[1] + + check_helper.check_topology_of_vm(vm_id, vcpus=vcpu_count, + prev_total_cpus=prev_cpus, + cpu_pol='dedicated', + cpu_thr_pol=cpu_thread_policy, + vm_host=host) + + LOG.tc_step( + "Attempt to boot another vm on {}, and ensure it fails due to no " + "free sibling pairs".format(host)) + code = vm_helper.boot_vm(name='cpu_thread_{}'.format(cpu_thread_policy), + flavor=flavor_id, vm_host=host, + fail_ok=True, cleanup='function')[0] + assert code > 0, "VM is still scheduled even though all sibling " \ + "pairs should have been occupied" + + @mark.parametrize(('vcpus', 'cpu_thread_policy', 'min_vcpus'), [ + param(4, 'require', None), + param(3, 'require', None), + param(3, 'prefer', None), + ]) + def test_boot_vm_cpu_thread_positive(self, vcpus, cpu_thread_policy, + min_vcpus, ht_hosts_): + """ + Test boot vm with specific cpu thread policy requirement + + Args: + vcpus (int): number of vpus to set when creating flavor + cpu_thread_policy (str): cpu thread policy to set in flavor + min_vcpus (int): min_vcpus extra spec to set + ht_hosts_ (tuple): (ht_hosts, non-ht_hosts) + + Skip condition: + - no host is hyperthreading enabled on system + + Setups: + - Find out HT hosts and non-HT_hosts on system (module) + + Test Steps: + - Create a flavor with given number of vcpus + - Set cpu policy to dedicated and extra specs as per test params + - Get the host vcpu usage before booting vm + - Boot a vm with above flavor + - Ensure vm is booted on HT host for 'require' vm + - Check vm-topology, host side vcpu usage, topology from within + the guest to ensure vm is properly booted + + Teardown: + - Delete created vm, volume, flavor + + """ + ht_hosts, non_ht_hosts = ht_hosts_ + LOG.tc_step("Create flavor with {} vcpus".format(vcpus)) + flavor_id = nova_helper.create_flavor( + name='cpu_thread_{}'.format(cpu_thread_policy), vcpus=vcpus)[1] + ResourceCleanup.add('flavor', flavor_id) + + specs = {FlavorSpec.CPU_POLICY: 'dedicated'} + if cpu_thread_policy is not None: + specs[FlavorSpec.CPU_THREAD_POLICY] = cpu_thread_policy + + if min_vcpus is not None: + specs[FlavorSpec.MIN_VCPUS] = min_vcpus + + LOG.tc_step("Set following extra specs: {}".format(specs)) + nova_helper.set_flavor(flavor_id, **specs) + + LOG.tc_step("Get used cpus for all hosts before booting vm") + hosts_to_check = ht_hosts if cpu_thread_policy == 'require' else \ + ht_hosts + non_ht_hosts + pre_hosts_cpus = host_helper.get_vcpus_for_computes( + hosts=hosts_to_check, field='used_now') + + LOG.tc_step( + "Boot a vm with above flavor and ensure it's booted on a HT " + "enabled host.") + vm_id = vm_helper.boot_vm( + name='cpu_thread_{}'.format(cpu_thread_policy), + flavor=flavor_id, + cleanup='function')[1] + + vm_host = vm_helper.get_vm_host(vm_id) + if cpu_thread_policy == 'require': + assert vm_host in ht_hosts, "VM host {} is not hyper-threading " \ + "enabled.".format(vm_host) + + LOG.tc_step("Check topology of the {}vcpu {} vm on hypervisor and " + "on vm".format(vcpus, cpu_thread_policy)) + prev_cpus = pre_hosts_cpus[vm_host] + check_helper.check_topology_of_vm(vm_id, vcpus=vcpus, + prev_total_cpus=prev_cpus, + cpu_pol='dedicated', + cpu_thr_pol=cpu_thread_policy, + min_vcpus=min_vcpus, vm_host=vm_host) + + @mark.parametrize(('vcpus', 'cpu_pol', 'cpu_thr_pol', 'flv_or_img', + 'vs_numa_affinity', 'boot_source', 'nova_actions'), [ + param(2, 'dedicated', 'isolate', 'image', None, 'volume', + 'live_migrate', marks=mark.priorities('domain_sanity', + 'nightly')), + param(3, 'dedicated', 'require', 'image', None, 'volume', + 'live_migrate', marks=mark.domain_sanity), + param(3, 'dedicated', 'prefer', 'flavor', None, 'volume', + 'live_migrate', marks=mark.p2), + param(3, 'dedicated', 'require', 'flavor', None, 'volume', + 'live_migrate', marks=mark.p2), + param(3, 'dedicated', 'isolate', 'flavor', None, 'volume', + 'cold_migrate', marks=mark.domain_sanity), + param(2, 'dedicated', 'require', 'image', None, 'image', + 'cold_migrate', marks=mark.domain_sanity), + param(2, 'dedicated', 'require', 'flavor', None, 'volume', + 'cold_mig_revert', marks=mark.p2), + param(5, 'dedicated', 'prefer', 'image', None, 'volume', + 'cold_mig_revert'), + param(4, 'dedicated', 'isolate', 'image', None, 'volume', + ['suspend', 'resume', 'rebuild'], marks=mark.p2), + param(6, 'dedicated', 'require', 'image', None, 'image', + ['suspend', 'resume', 'rebuild'], marks=mark.p2), + ], ids=id_gen) + def test_cpu_thread_vm_topology_nova_actions(self, vcpus, cpu_pol, + cpu_thr_pol, flv_or_img, + vs_numa_affinity, + boot_source, nova_actions, + ht_hosts_): + ht_hosts, non_ht_hosts = ht_hosts_ + if 'mig' in nova_actions: + if len(ht_hosts) + len(non_ht_hosts) < 2: + skip(SkipHypervisor.LESS_THAN_TWO_HYPERVISORS) + if cpu_thr_pol in ['require', 'isolate'] and len(ht_hosts) < 2: + skip(SkipHyperthreading.LESS_THAN_TWO_HT_HOSTS) + + name_str = 'cpu_thr_{}_in_img'.format(cpu_pol) + + LOG.tc_step("Create flavor with {} vcpus".format(vcpus)) + flavor_id = nova_helper.create_flavor(name='vcpus{}'.format(vcpus), + vcpus=vcpus)[1] + ResourceCleanup.add('flavor', flavor_id) + + specs = {} + if vs_numa_affinity: + specs[FlavorSpec.VSWITCH_NUMA_AFFINITY] = vs_numa_affinity + + if flv_or_img == 'flavor': + specs[FlavorSpec.CPU_POLICY] = cpu_pol + specs[FlavorSpec.CPU_THREAD_POLICY] = cpu_thr_pol + + if specs: + LOG.tc_step("Set following extra specs: {}".format(specs)) + nova_helper.set_flavor(flavor_id, **specs) + + image_id = None + if flv_or_img == 'image': + image_meta = {ImageMetadata.CPU_POLICY: cpu_pol, + ImageMetadata.CPU_THREAD_POLICY: cpu_thr_pol} + LOG.tc_step( + "Create image with following metadata: {}".format(image_meta)) + image_id = glance_helper.create_image(name=name_str, + cleanup='function', + **image_meta)[1] + + LOG.tc_step("Get used cpus for all hosts before booting vm") + hosts_to_check = ht_hosts if cpu_thr_pol == 'require' else \ + ht_hosts + non_ht_hosts + pre_hosts_cpus = host_helper.get_vcpus_for_computes( + hosts=hosts_to_check, field='used_now') + + LOG.tc_step("Boot a vm from {} with above flavor".format(boot_source)) + vm_id = vm_helper.boot_vm(name=name_str, flavor=flavor_id, + source=boot_source, image_id=image_id, + cleanup='function')[1] + + vm_host = vm_helper.get_vm_host(vm_id) + + if cpu_thr_pol == 'require': + LOG.tc_step("Check vm is booted on a HT host") + assert vm_host in ht_hosts, "VM host {} is not hyper-threading " \ + "enabled.".format(vm_host) + + prev_cpus = pre_hosts_cpus[vm_host] + prev_siblings = check_helper.check_topology_of_vm( + vm_id, vcpus=vcpus, prev_total_cpus=prev_cpus, cpu_pol=cpu_pol, + cpu_thr_pol=cpu_thr_pol, vm_host=vm_host)[1] + + LOG.tc_step("Perform following nova action(s) on vm {}: " + "{}".format(vm_id, nova_actions)) + if isinstance(nova_actions, str): + nova_actions = [nova_actions] + + check_prev_siblings = False + for action in nova_actions: + kwargs = {} + if action == 'rebuild': + kwargs['image_id'] = image_id + elif action == 'live_migrate': + check_prev_siblings = True + vm_helper.perform_action_on_vm(vm_id, action=action, **kwargs) + + post_vm_host = vm_helper.get_vm_host(vm_id) + pre_action_cpus = pre_hosts_cpus[post_vm_host] + + if cpu_thr_pol == 'require': + LOG.tc_step("Check vm is still on HT host") + assert post_vm_host in ht_hosts, "VM host {} is not " \ + "hyper-threading " \ + "enabled.".format(vm_host) + + LOG.tc_step( + "Check VM topology is still correct after {}".format(nova_actions)) + if cpu_pol != 'dedicated' or not check_prev_siblings: + # Allow prev_siblings in live migration case + prev_siblings = None + check_helper.check_topology_of_vm(vm_id, vcpus=vcpus, + prev_total_cpus=pre_action_cpus, + cpu_pol=cpu_pol, + cpu_thr_pol=cpu_thr_pol, + vm_host=post_vm_host, + prev_siblings=prev_siblings) + + @fixture(scope='class') + def _add_hosts_to_stxauto(self, request, ht_hosts_, add_stxauto_zone): + ht_hosts, non_ht_hosts = ht_hosts_ + + if not non_ht_hosts: + skip("No non-HT host available") + + LOG.fixture_step("Add one HT host and nonHT hosts to stxauto zone") + + if len(ht_hosts) > 1: + ht_hosts = [ht_hosts[0]] + + host_in_stxauto = ht_hosts + non_ht_hosts + + def _revert(): + nova_helper.remove_hosts_from_aggregate(aggregate='stxauto', + hosts=host_in_stxauto) + + request.addfinalizer(_revert) + + nova_helper.add_hosts_to_aggregate('stxauto', ht_hosts + non_ht_hosts) + + LOG.info( + "stxauto zone: HT: {}; non-HT: {}".format(ht_hosts, non_ht_hosts)) + return ht_hosts, non_ht_hosts + + +class TestHTDisabled: + + @fixture(scope='class', autouse=True) + def ensure_nonht(self, ht_and_nonht_hosts): + ht_hosts, non_ht_hosts = ht_and_nonht_hosts + if not non_ht_hosts: + skip("No host with HT disabled") + + if ht_hosts: + LOG.fixture_step( + "Locking HT hosts to ensure only non-HT hypervisors available") + HostsToRecover.add(ht_hosts, scope='class') + for host_ in ht_hosts: + host_helper.lock_host(host_, swact=True) + + @mark.parametrize(('vcpus', 'cpu_thread_policy', 'min_vcpus', 'expt_err'), [ + param(2, 'require', None, 'CPUThreadErr.HT_HOST_UNAVAIL'), + param(3, 'require', None, 'CPUThreadErr.HT_HOST_UNAVAIL'), + param(3, 'isolate', None, None), + param(2, 'prefer', None, None), + ]) + def test_boot_vm_cpu_thread_ht_disabled(self, vcpus, cpu_thread_policy, + min_vcpus, expt_err): + """ + Test boot vm with specified cpu thread policy when no HT host is + available on system + + Args: + vcpus (int): number of vcpus to set in flavor + cpu_thread_policy (str): cpu thread policy in flavor extra spec + min_vcpus (int): min_vpus in flavor extra spec + expt_err (str|None): expected error message in nova show if any + + Skip condition: + - All hosts are hyperthreading enabled on system + + Setups: + - Find out HT hosts and non-HT_hosts on system (module) + - Enusre no HT hosts on system + + Test Steps: + - Create a flavor with given number of vcpus + - Set flavor extra specs as per test params + - Get the host vcpu usage before booting vm + - Attempt to boot a vm with above flavor + - if expt_err is None: + - Ensure vm is booted on non-HT host for 'isolate'/'prefer' + vm + - Check vm-topology, host side vcpu usage, topology from + within the guest to ensure vm is properly booted + - else, ensure expected error message is included in nova + show for 'require' vm + + Teardown: + - Delete created vm, volume, flavor + + """ + + LOG.tc_step("Create flavor with {} vcpus".format(vcpus)) + flavor_id = nova_helper.create_flavor(name='cpu_thread', vcpus=vcpus)[1] + ResourceCleanup.add('flavor', flavor_id) + + specs = {FlavorSpec.CPU_THREAD_POLICY: cpu_thread_policy, + FlavorSpec.CPU_POLICY: 'dedicated'} + if min_vcpus is not None: + specs[FlavorSpec.MIN_VCPUS] = min_vcpus + + LOG.tc_step("Set following extra specs: {}".format(specs)) + nova_helper.set_flavor(flavor_id, **specs) + + LOG.tc_step("Attempt to boot a vm with the above flavor.") + code, vm_id, msg = vm_helper.boot_vm( + name='cpu_thread_{}'.format(cpu_thread_policy), + flavor=flavor_id, fail_ok=True, cleanup='function') + + if expt_err: + assert 1 == code, "Boot vm cli is not rejected. Details: " \ + "{}".format(msg) + else: + assert 0 == code, "Boot vm with isolate policy was unsuccessful. " \ + "Details: {}".format(msg) diff --git a/automated-pytest-suite/testcases/functional/nova/test_evacuate_vms.py b/automated-pytest-suite/testcases/functional/nova/test_evacuate_vms.py new file mode 100755 index 0000000..639da81 --- /dev/null +++ b/automated-pytest-suite/testcases/functional/nova/test_evacuate_vms.py @@ -0,0 +1,318 @@ +# +# Copyright (c) 2019 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + + +from pytest import fixture, skip, mark + +import keywords.host_helper +from utils.tis_log import LOG +from consts.timeout import VMTimeout +from consts.stx import VMStatus +from consts.reasons import SkipStorageBacking, SkipHypervisor + +from keywords import vm_helper, host_helper, nova_helper, cinder_helper, \ + system_helper, check_helper +from testfixtures.fixture_resources import ResourceCleanup + +from testfixtures.recover_hosts import HostsToRecover + + +@fixture(scope='module', autouse=True) +def update_quotas(add_admin_role_module): + LOG.fixture_step("Update instance and volume quota to at least 10 and " + "20 respectively") + vm_helper.ensure_vms_quotas() + + +@fixture(scope='module') +def hosts_per_backing(): + hosts_per_backend = host_helper.get_hosts_per_storage_backing() + return hosts_per_backend + + +def touch_files_under_vm_disks(vm_id, ephemeral, swap, vm_type, disks): + expt_len = 1 + int(bool(ephemeral)) + int(bool(swap)) + \ + (1 if 'with_vol' in vm_type else 0) + + LOG.info("\n--------------------------Auto mount non-root disks if any") + mounts = vm_helper.auto_mount_vm_disks(vm_id=vm_id, disks=disks) + assert expt_len == len(mounts) + + if bool(swap): + mounts.remove('none') + + LOG.info("\n--------------------------Create files under vm disks: " + "{}".format(mounts)) + file_paths, content = vm_helper.touch_files(vm_id=vm_id, file_dirs=mounts) + return file_paths, content + + +class TestDefaultGuest: + + @fixture(scope='class', autouse=True) + def skip_test_if_less_than_two_hosts(self): + if len(host_helper.get_up_hypervisors()) < 2: + skip(SkipHypervisor.LESS_THAN_TWO_HYPERVISORS) + + @mark.parametrize('storage_backing', [ + 'local_image', + 'remote', + ]) + def test_evacuate_vms_with_inst_backing(self, hosts_per_backing, + storage_backing): + """ + Test evacuate vms with various vm storage configs and host instance + backing configs + + Args: + storage_backing: storage backing under test + + Skip conditions: + - Less than two hosts configured with storage backing under test + + Setups: + - Add admin role to primary tenant (module) + + Test Steps: + - Create flv_rootdisk without ephemeral or swap disks, and set + storage backing extra spec + - Create flv_ephemswap with ephemeral AND swap disks, and set + storage backing extra spec + - Boot following vms on same host and wait for them to be + pingable from NatBox: + - Boot vm1 from volume with flavor flv_rootdisk + - Boot vm2 from volume with flavor flv_localdisk + - Boot vm3 from image with flavor flv_rootdisk + - Boot vm4 from image with flavor flv_rootdisk, and attach a + volume to it + - Boot vm5 from image with flavor flv_localdisk + - sudo reboot -f on vms host + - Ensure evacuation for all 5 vms are successful (vm host + changed, active state, pingable from NatBox) + + Teardown: + - Delete created vms, volumes, flavors + - Remove admin role from primary tenant (module) + + """ + hosts = hosts_per_backing.get(storage_backing, []) + if len(hosts) < 2: + skip(SkipStorageBacking.LESS_THAN_TWO_HOSTS_WITH_BACKING.format( + storage_backing)) + + target_host = hosts[0] + + LOG.tc_step("Create a flavor without ephemeral or swap disks") + flavor_1 = nova_helper.create_flavor('flv_rootdisk', + storage_backing=storage_backing)[1] + ResourceCleanup.add('flavor', flavor_1, scope='function') + + LOG.tc_step("Create another flavor with ephemeral and swap disks") + flavor_2 = nova_helper.create_flavor('flv_ephemswap', ephemeral=1, + swap=512, + storage_backing=storage_backing)[1] + ResourceCleanup.add('flavor', flavor_2, scope='function') + + LOG.tc_step("Boot vm1 from volume with flavor flv_rootdisk and wait " + "for it pingable from NatBox") + vm1_name = "vol_root" + vm1 = vm_helper.boot_vm(vm1_name, flavor=flavor_1, source='volume', + avail_zone='nova', vm_host=target_host, + cleanup='function')[1] + + vms_info = {vm1: {'ephemeral': 0, + 'swap': 0, + 'vm_type': 'volume', + 'disks': vm_helper.get_vm_devices_via_virsh(vm1)}} + vm_helper.wait_for_vm_pingable_from_natbox(vm1) + + LOG.tc_step("Boot vm2 from volume with flavor flv_localdisk and wait " + "for it pingable from NatBox") + vm2_name = "vol_ephemswap" + vm2 = vm_helper.boot_vm(vm2_name, flavor=flavor_2, source='volume', + avail_zone='nova', vm_host=target_host, + cleanup='function')[1] + + vm_helper.wait_for_vm_pingable_from_natbox(vm2) + vms_info[vm2] = {'ephemeral': 1, + 'swap': 512, + 'vm_type': 'volume', + 'disks': vm_helper.get_vm_devices_via_virsh(vm2)} + + LOG.tc_step("Boot vm3 from image with flavor flv_rootdisk and wait for " + "it pingable from NatBox") + vm3_name = "image_root" + vm3 = vm_helper.boot_vm(vm3_name, flavor=flavor_1, source='image', + avail_zone='nova', vm_host=target_host, + cleanup='function')[1] + + vm_helper.wait_for_vm_pingable_from_natbox(vm3) + vms_info[vm3] = {'ephemeral': 0, + 'swap': 0, + 'vm_type': 'image', + 'disks': vm_helper.get_vm_devices_via_virsh(vm3)} + + LOG.tc_step("Boot vm4 from image with flavor flv_rootdisk, attach a " + "volume to it and wait for it " + "pingable from NatBox") + vm4_name = 'image_root_attachvol' + vm4 = vm_helper.boot_vm(vm4_name, flavor_1, source='image', + avail_zone='nova', + vm_host=target_host, + cleanup='function')[1] + + vol = cinder_helper.create_volume(bootable=False)[1] + ResourceCleanup.add('volume', vol, scope='function') + vm_helper.attach_vol_to_vm(vm4, vol_id=vol, mount=False) + + vm_helper.wait_for_vm_pingable_from_natbox(vm4) + vms_info[vm4] = {'ephemeral': 0, + 'swap': 0, + 'vm_type': 'image_with_vol', + 'disks': vm_helper.get_vm_devices_via_virsh(vm4)} + + LOG.tc_step("Boot vm5 from image with flavor flv_localdisk and wait " + "for it pingable from NatBox") + vm5_name = 'image_ephemswap' + vm5 = vm_helper.boot_vm(vm5_name, flavor_2, source='image', + avail_zone='nova', vm_host=target_host, + cleanup='function')[1] + vm_helper.wait_for_vm_pingable_from_natbox(vm5) + vms_info[vm5] = {'ephemeral': 1, + 'swap': 512, + 'vm_type': 'image', + 'disks': vm_helper.get_vm_devices_via_virsh(vm5)} + + LOG.tc_step("Check all VMs are booted on {}".format(target_host)) + vms_on_host = vm_helper.get_vms_on_host(hostname=target_host) + vms = [vm1, vm2, vm3, vm4, vm5] + assert set(vms) <= set(vms_on_host), "VMs booted on host: {}. " \ + "Current vms on host: {}". \ + format(vms, vms_on_host) + + for vm_ in vms: + LOG.tc_step("Touch files under vm disks {}: " + "{}".format(vm_, vms_info[vm_])) + file_paths, content = touch_files_under_vm_disks(vm_, + **vms_info[vm_]) + vms_info[vm_]['file_paths'] = file_paths + vms_info[vm_]['content'] = content + + LOG.tc_step("Reboot target host {}".format(target_host)) + vm_helper.evacuate_vms(host=target_host, vms_to_check=vms, + ping_vms=True) + + LOG.tc_step("Check files after evacuation") + for vm_ in vms: + LOG.info("--------------------Check files for vm {}".format(vm_)) + check_helper.check_vm_files(vm_id=vm_, vm_action='evacuate', + storage_backing=storage_backing, + prev_host=target_host, **vms_info[vm_]) + vm_helper.ping_vms_from_natbox(vms) + + @fixture(scope='function') + def check_hosts(self): + storage_backing, hosts = \ + keywords.host_helper.get_storage_backing_with_max_hosts() + if len(hosts) < 2: + skip("at least two hosts with the same storage backing are " + "required") + + acceptable_hosts = [] + for host in hosts: + numa_num = len(host_helper.get_host_procs(host)) + if numa_num > 1: + acceptable_hosts.append(host) + if len(acceptable_hosts) == 2: + break + else: + skip("at least two hosts with multiple numa nodes are required") + + target_host = acceptable_hosts[0] + return target_host + + +class TestOneHostAvail: + @fixture(scope='class') + def get_zone(self, request, add_stxauto_zone): + if system_helper.is_aio_simplex(): + zone = 'nova' + return zone + + zone = 'stxauto' + storage_backing, hosts = \ + keywords.host_helper.get_storage_backing_with_max_hosts() + host = hosts[0] + LOG.fixture_step('Select host {} with backing ' + '{}'.format(host, storage_backing)) + nova_helper.add_hosts_to_aggregate(aggregate='stxauto', hosts=[host]) + + def remove_hosts_from_zone(): + nova_helper.remove_hosts_from_aggregate(aggregate='stxauto', + check_first=False) + + request.addfinalizer(remove_hosts_from_zone) + return zone + + @mark.sx_sanity + def test_reboot_only_host(self, get_zone): + """ + Test reboot only hypervisor on the system + + Args: + get_zone: fixture to create stxauto aggregate, to ensure vms can + only on one host + + Setups: + - If more than 1 hypervisor: Create stxauto aggregate and add + one host to the aggregate + + Test Steps: + - Launch various vms on target host + - vm booted from cinder volume, + - vm booted from glance image, + - vm booted from glance image, and have an extra cinder + volume attached after launch, + - vm booed from cinder volume with ephemeral and swap disks + - sudo reboot -f only host + - Check host is recovered + - Check vms are recovered and reachable from NatBox + + """ + zone = get_zone + + LOG.tc_step("Launch 5 vms in {} zone".format(zone)) + vms = vm_helper.boot_vms_various_types(avail_zone=zone, + cleanup='function') + target_host = vm_helper.get_vm_host(vm_id=vms[0]) + for vm in vms[1:]: + vm_host = vm_helper.get_vm_host(vm) + assert target_host == vm_host, "VMs are not booted on same host" + + LOG.tc_step("Reboot -f from target host {}".format(target_host)) + HostsToRecover.add(target_host) + host_helper.reboot_hosts(target_host) + + LOG.tc_step("Check vms are in Active state after host come back up") + res, active_vms, inactive_vms = vm_helper.wait_for_vms_values( + vms=vms, value=VMStatus.ACTIVE, timeout=600) + + vms_host_err = [] + for vm in vms: + if vm_helper.get_vm_host(vm) != target_host: + vms_host_err.append(vm) + + assert not vms_host_err, "Following VMs are not on the same host {}: " \ + "{}\nVMs did not reach Active state: {}". \ + format(target_host, vms_host_err, inactive_vms) + + assert not inactive_vms, "VMs did not reach Active state after " \ + "evacuated to other host: " \ + "{}".format(inactive_vms) + + LOG.tc_step("Check VMs are pingable from NatBox after evacuation") + vm_helper.wait_for_vm_pingable_from_natbox(vms, + timeout=VMTimeout.DHCP_RETRY) diff --git a/automated-pytest-suite/testcases/functional/nova/test_lock_with_vms.py b/automated-pytest-suite/testcases/functional/nova/test_lock_with_vms.py new file mode 100755 index 0000000..dc5bb79 --- /dev/null +++ b/automated-pytest-suite/testcases/functional/nova/test_lock_with_vms.py @@ -0,0 +1,183 @@ +# +# Copyright (c) 2019 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + + +import random + +from pytest import fixture, mark, skip + +import keywords.host_helper +from utils.tis_log import LOG +from consts.reasons import SkipStorageBacking +from consts.stx import VMStatus, SysType +from consts.timeout import VMTimeout +from testfixtures.recover_hosts import HostsToRecover +from keywords import vm_helper, nova_helper, host_helper, system_helper + + +@fixture(scope='module', autouse=True) +def update_instances_quota(): + vm_helper.ensure_vms_quotas() + + +def _boot_migrable_vms(storage_backing): + """ + Create vms with specific storage backing that can be live migrated + + Args: + storage_backing: 'local_image' or 'remote' + + Returns: (vms_info (list), flavors_created (list)) + vms_info : [(vm_id1, block_mig1), (vm_id2, block_mig2), ...] + + """ + vms_to_test = [] + flavors_created = [] + flavor_no_localdisk = nova_helper.create_flavor( + ephemeral=0, swap=0, storage_backing=storage_backing)[1] + flavors_created.append(flavor_no_localdisk) + + vm_1 = vm_helper.boot_vm(flavor=flavor_no_localdisk, source='volume')[1] + + block_mig_1 = False + vms_to_test.append((vm_1, block_mig_1)) + + LOG.info("Boot a VM from image if host storage backing is local_image or " + "remote...") + vm_2 = vm_helper.boot_vm(flavor=flavor_no_localdisk, source='image')[1] + block_mig_2 = True + vms_to_test.append((vm_2, block_mig_2)) + if storage_backing == 'remote': + LOG.info("Boot a VM from volume with local disks if storage backing " + "is remote...") + ephemeral_swap = random.choice([[0, 512], [1, 512], [1, 0]]) + flavor_with_localdisk = nova_helper.create_flavor( + ephemeral=ephemeral_swap[0], swap=ephemeral_swap[1])[1] + flavors_created.append(flavor_with_localdisk) + vm_3 = vm_helper.boot_vm(flavor=flavor_with_localdisk, + source='volume')[1] + block_mig_3 = False + vms_to_test.append((vm_3, block_mig_3)) + LOG.info("Boot a VM from image with volume attached if " + "storage backing is remote...") + vm_4 = vm_helper.boot_vm(flavor=flavor_no_localdisk, source='image')[1] + vm_helper.attach_vol_to_vm(vm_id=vm_4) + block_mig_4 = False + vms_to_test.append((vm_4, block_mig_4)) + + return vms_to_test, flavors_created + + +class TestLockWithVMs: + @fixture() + def target_hosts(self): + """ + Test fixture for test_lock_with_vms(). + Calculate target host(s) to perform lock based on storage backing of + vms_to_test, and live migrate suitable vms + to target host before test start. + """ + + storage_backing, target_hosts = \ + keywords.host_helper.get_storage_backing_with_max_hosts() + if len(target_hosts) < 2: + skip(SkipStorageBacking.LESS_THAN_TWO_HOSTS_WITH_BACKING. + format(storage_backing)) + + target_host = target_hosts[0] + if SysType.AIO_DX == system_helper.get_sys_type(): + target_host = system_helper.get_standby_controller_name() + + return storage_backing, target_host + + @mark.nightly + def test_lock_with_vms(self, target_hosts, no_simplex, add_admin_role_func): + """ + Test lock host with vms on it. + + Args: + target_hosts (list): targeted host(s) to lock that was prepared + by the target_hosts test fixture. + + Skip Conditions: + - Less than 2 hypervisor hosts on the system + + Prerequisites: + - Hosts storage backing are pre-configured to storage backing + under test + ie., 2 or more hosts should support the storage backing under + test. + Test Setups: + - Set instances quota to 10 if it was less than 8 + - Determine storage backing(s) under test. i.e.,storage backings + supported by at least 2 hosts on the system + - Create flavors with storage extra specs set based on storage + backings under test + - Create vms_to_test that can be live migrated using created flavors + - Determine target host(s) to perform lock based on which host(s) + have the most vms_to_test + - Live migrate vms to target host(s) + Test Steps: + - Lock target host + - Verify lock succeeded and vms status unchanged + - Repeat above steps if more than one target host + Test Teardown: + - Delete created vms and volumes + - Delete created flavors + - Unlock locked target host(s) + + """ + storage_backing, host = target_hosts + vms_num = 5 + vm_helper.ensure_vms_quotas(vms_num=vms_num) + + LOG.tc_step("Boot {} vms with various storage settings".format(vms_num)) + vms = vm_helper.boot_vms_various_types(cleanup='function', + vms_num=vms_num, + storage_backing=storage_backing, + target_host=host) + + LOG.tc_step("Attempt to lock target host {}...".format(host)) + HostsToRecover.add(host) + host_helper.lock_host(host=host, check_first=False, fail_ok=False, + swact=True) + + LOG.tc_step("Verify lock succeeded and vms still in good state") + vm_helper.wait_for_vms_values(vms=vms, fail_ok=False) + for vm in vms: + vm_host = vm_helper.get_vm_host(vm_id=vm) + assert vm_host != host, "VM is still on {} after lock".format(host) + + vm_helper.wait_for_vm_pingable_from_natbox( + vm_id=vm, timeout=VMTimeout.DHCP_RETRY) + + @mark.sx_nightly + def test_lock_with_max_vms_simplex(self, simplex_only): + vms_num = host_helper.get_max_vms_supported(host='controller-0') + vm_helper.ensure_vms_quotas(vms_num=vms_num) + + LOG.tc_step("Boot {} vms with various storage settings".format(vms_num)) + vms = vm_helper.boot_vms_various_types(cleanup='function', + vms_num=vms_num) + + LOG.tc_step("Lock vm host on simplex system") + HostsToRecover.add('controller-0') + host_helper.lock_host('controller-0') + + LOG.tc_step("Ensure vms are in {} state after locked host come " + "online".format(VMStatus.STOPPED)) + vm_helper.wait_for_vms_values(vms, value=VMStatus.STOPPED, + fail_ok=False) + + LOG.tc_step("Unlock host on simplex system") + host_helper.unlock_host(host='controller-0') + + LOG.tc_step("Ensure vms are Active and Pingable from NatBox") + vm_helper.wait_for_vms_values(vms, value=VMStatus.ACTIVE, + fail_ok=False, timeout=600) + for vm in vms: + vm_helper.wait_for_vm_pingable_from_natbox( + vm, timeout=VMTimeout.DHCP_RETRY) diff --git a/automated-pytest-suite/testcases/functional/nova/test_mempage_size.py b/automated-pytest-suite/testcases/functional/nova/test_mempage_size.py new file mode 100755 index 0000000..39ec35e --- /dev/null +++ b/automated-pytest-suite/testcases/functional/nova/test_mempage_size.py @@ -0,0 +1,501 @@ +# +# Copyright (c) 2019 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + + +import re +import random + +from pytest import fixture, mark, skip, param + +import keywords.host_helper +from utils.tis_log import LOG +from consts.stx import FlavorSpec, ImageMetadata, NovaCLIOutput +from keywords import nova_helper, vm_helper, system_helper, cinder_helper, \ + host_helper, glance_helper + +MEMPAGE_HEADERS = ('app_total_4K', 'app_hp_avail_2M', 'app_hp_avail_1G') + + +def skip_4k_for_ovs(mempage_size): + if mempage_size in (None, 'any', 'small') and not system_helper.is_avs(): + skip("4K VM is unsupported by OVS by default") + + +@fixture(scope='module') +def prepare_resource(add_admin_role_module): + hypervisor = random.choice(host_helper.get_up_hypervisors()) + flavor = nova_helper.create_flavor(name='flavor-1g', ram=1024, + cleanup='module')[1] + vol_id = cinder_helper.create_volume('vol-mem_page_size', + cleanup='module')[1] + return hypervisor, flavor, vol_id + + +def _get_expt_indices(mempage_size): + if mempage_size in ('small', None): + expt_mempage_indices = (0,) + elif str(mempage_size) == '2048': + expt_mempage_indices = (1,) + elif str(mempage_size) == '1048576': + expt_mempage_indices = (2,) + elif mempage_size == 'large': + expt_mempage_indices = (1, 2) + else: + expt_mempage_indices = (0, 1, 2) + return expt_mempage_indices + + +def is_host_mem_sufficient(host, mempage_size=None, mem_gib=1): + host_mems_per_proc = host_helper.get_host_memories(host, + headers=MEMPAGE_HEADERS) + mempage_size = 'small' if not mempage_size else mempage_size + expt_mempage_indices = _get_expt_indices(mempage_size) + + for proc, mems_for_proc in host_mems_per_proc.items(): + pages_4k, pages_2m, pages_1g = mems_for_proc + mems_for_proc = (int(pages_4k * 4 / 1048576), + int(pages_2m * 2 / 1024), int(pages_1g)) + for index in expt_mempage_indices: + avail_g_for_memsize = mems_for_proc[index] + if avail_g_for_memsize >= mem_gib: + LOG.info("{} has sufficient {} mempages to launch {}G " + "vm".format(host, mempage_size, mem_gib)) + return True, host_mems_per_proc + + LOG.info("{} does not have sufficient {} mempages to launch {}G " + "vm".format(host, mempage_size, mem_gib)) + return False, host_mems_per_proc + + +def check_mempage_change(vm, host, prev_host_mems, mempage_size=None, + mem_gib=1, numa_node=None): + expt_mempage_indics = _get_expt_indices(mempage_size) + if numa_node is None: + numa_node = vm_helper.get_vm_numa_nodes_via_ps(vm_id=vm, host=host)[0] + + prev_host_mems = prev_host_mems[numa_node] + current_host_mems = host_helper.get_host_memories( + host, headers=MEMPAGE_HEADERS)[numa_node] + + if 0 in expt_mempage_indics: + if current_host_mems[1:] == prev_host_mems[1:] and \ + abs(prev_host_mems[0] - current_host_mems[ + 0]) <= mem_gib * 512 * 1024 / 4: + return + + for i in expt_mempage_indics: + if i == 0: + continue + + expt_pagecount = 1 if i == 2 else 1024 + if prev_host_mems[i] - expt_pagecount == current_host_mems[i]: + LOG.info("{} {} memory page reduced by {}GiB as " + "expected".format(host, MEMPAGE_HEADERS[i], mem_gib)) + return + + LOG.info("{} {} memory pages - Previous: {}, current: " + "{}".format(host, MEMPAGE_HEADERS[i], + prev_host_mems[i], current_host_mems[i])) + + assert 0, "{} available vm {} memory page count did not change as " \ + "expected".format(host, mempage_size) + + +@mark.parametrize('mem_page_size', [ + param('2048', marks=mark.domain_sanity), + param('large', marks=mark.p1), + param('small', marks=mark.domain_sanity), + param('1048576', marks=mark.p3), +]) +def test_vm_mem_pool_default_config(prepare_resource, mem_page_size): + """ + Test memory used by vm is taken from the expected memory pool + + Args: + prepare_resource (tuple): test fixture + mem_page_size (str): mem page size setting in flavor + + Setup: + - Create a flavor with 1G RAM (module) + - Create a volume with default values (module) + - Select a hypervisor to launch vm on + + Test Steps: + - Set memory page size flavor spec to given value + - Attempt to boot a vm with above flavor and a basic volume + - Verify the system is taking memory from the expected memory pool: + - If boot vm succeeded: + - Calculate the available/used memory change on the vm host + - Verify the memory is taken from memory pool specified via + mem_page_size + - If boot vm failed: + - Verify system attempted to take memory from expected pool, + but insufficient memory is available + + Teardown: + - Delete created vm + - Delete created volume and flavor (module) + + """ + hypervisor, flavor_1g, volume_ = prepare_resource + + LOG.tc_step("Set memory page size extra spec in flavor") + nova_helper.set_flavor(flavor_1g, + **{FlavorSpec.CPU_POLICY: 'dedicated', + FlavorSpec.MEM_PAGE_SIZE: mem_page_size}) + + LOG.tc_step("Check system host-memory-list before launch vm") + is_sufficient, prev_host_mems = is_host_mem_sufficient( + host=hypervisor, mempage_size=mem_page_size) + + LOG.tc_step("Boot a vm with mem page size spec - {}".format(mem_page_size)) + code, vm_id, msg = vm_helper.boot_vm('mempool_' + mem_page_size, flavor_1g, + source='volume', fail_ok=True, + vm_host=hypervisor, source_id=volume_, + cleanup='function') + + if not is_sufficient: + LOG.tc_step("Check boot vm rejected due to insufficient memory from " + "{} pool".format(mem_page_size)) + assert 1 == code, "{} vm launched successfully when insufficient " \ + "mempage configured on {}". \ + format(mem_page_size, hypervisor) + else: + LOG.tc_step("Check vm launches successfully and {} available mempages " + "change accordingly".format(hypervisor)) + assert 0 == code, "VM failed to launch with '{}' " \ + "mempages".format(mem_page_size) + check_mempage_change(vm_id, host=hypervisor, + prev_host_mems=prev_host_mems, + mempage_size=mem_page_size) + + +def get_hosts_to_configure(candidates): + hosts_selected = [None, None] + hosts_to_configure = [None, None] + max_4k, expt_p1_4k, max_1g, expt_p1_1g = \ + 1.5 * 1048576 / 4, 2.5 * 1048576 / 4, 1, 2 + for host in candidates: + host_mems = host_helper.get_host_memories(host, headers=MEMPAGE_HEADERS) + if 1 not in host_mems: + LOG.info("{} has only 1 processor".format(host)) + continue + + proc0_mems, proc1_mems = host_mems[0], host_mems[1] + p0_4k, p1_4k, p0_1g, p1_1g = \ + proc0_mems[0], proc1_mems[0], proc0_mems[2], proc1_mems[2] + + if p0_4k <= max_4k and p0_1g <= max_1g: + if not hosts_selected[1] and p1_4k >= expt_p1_4k and \ + p1_1g <= max_1g: + hosts_selected[1] = host + elif not hosts_selected[0] and p1_4k <= max_4k and \ + p1_1g >= expt_p1_1g: + hosts_selected[0] = host + + if None not in hosts_selected: + LOG.info("1G and 4k hosts already configured and selected: " + "{}".format(hosts_selected)) + break + else: + for i in range(len(hosts_selected)): + if hosts_selected[i] is None: + hosts_selected[i] = hosts_to_configure[i] = \ + list(set(candidates) - set(hosts_selected))[0] + LOG.info("Hosts selected: {}; To be configured: " + "{}".format(hosts_selected, hosts_to_configure)) + + return hosts_selected, hosts_to_configure + + +class TestConfigMempage: + MEM_CONFIGS = [None, 'any', 'large', 'small', '2048', '1048576'] + + @fixture(scope='class') + def add_1g_and_4k_pages(self, request, config_host_class, + skip_for_one_proc, add_stxauto_zone, + add_admin_role_module): + storage_backing, candidate_hosts = \ + keywords.host_helper.get_storage_backing_with_max_hosts() + + if len(candidate_hosts) < 2: + skip("Less than two up hosts have same storage backing") + + LOG.fixture_step("Check mempage configs for hypervisors and select " + "host to use or configure") + hosts_selected, hosts_to_configure = get_hosts_to_configure( + candidate_hosts) + + if set(hosts_to_configure) != {None}: + def _modify(host): + is_1g = True if hosts_selected.index(host) == 0 else False + proc1_kwargs = {'gib_1g': 2, 'gib_4k_range': (None, 2)} if \ + is_1g else {'gib_1g': 0, 'gib_4k_range': (2, None)} + kwargs = {'gib_1g': 0, 'gib_4k_range': (None, 2)}, proc1_kwargs + + actual_mems = host_helper._get_actual_mems(host=host) + LOG.fixture_step("Modify {} proc0 to have 0 of 1G pages and " + "<2GiB of 4K pages".format(host)) + host_helper.modify_host_memory(host, proc=0, + actual_mems=actual_mems, + **kwargs[0]) + LOG.fixture_step("Modify {} proc1 to have >=2GiB of {} " + "pages".format(host, '1G' if is_1g else '4k')) + host_helper.modify_host_memory(host, proc=1, + actual_mems=actual_mems, + **kwargs[1]) + + for host_to_config in hosts_to_configure: + if host_to_config: + config_host_class(host=host_to_config, modify_func=_modify) + LOG.fixture_step("Check mem pages for {} are modified " + "and updated successfully". + format(host_to_config)) + host_helper.wait_for_memory_update(host=host_to_config) + + LOG.fixture_step("Check host memories for {} after mem config " + "completed".format(hosts_selected)) + _, hosts_unconfigured = get_hosts_to_configure(hosts_selected) + assert not hosts_unconfigured[0], \ + "Failed to configure {}. Expt: proc0:1g<2,4k<2gib;" \ + "proc1:1g>=2,4k<2gib".format(hosts_unconfigured[0]) + assert not hosts_unconfigured[1], \ + "Failed to configure {}. Expt: proc0:1g<2,4k<2gib;" \ + "proc1:1g<2,4k>=2gib".format(hosts_unconfigured[1]) + + LOG.fixture_step('(class) Add hosts to stxauto aggregate: ' + '{}'.format(hosts_selected)) + nova_helper.add_hosts_to_aggregate(aggregate='stxauto', + hosts=hosts_selected) + + def remove_host_from_zone(): + LOG.fixture_step('(class) Remove hosts from stxauto aggregate: ' + '{}'.format(hosts_selected)) + nova_helper.remove_hosts_from_aggregate(aggregate='stxauto', + check_first=False) + + request.addfinalizer(remove_host_from_zone) + + return hosts_selected, storage_backing + + @fixture(scope='class') + def flavor_2g(self, add_1g_and_4k_pages): + hosts, storage_backing = add_1g_and_4k_pages + LOG.fixture_step("Create a 2G memory flavor to be used by mempage " + "testcases") + flavor = nova_helper.create_flavor(name='flavor-2g', ram=2048, + storage_backing=storage_backing, + cleanup='class')[1] + return flavor, hosts, storage_backing + + @fixture(scope='class') + def image_mempage(self): + LOG.fixture_step("(class) Create a glance image for mempage testcases") + image_id = glance_helper.create_image(name='mempage', + cleanup='class')[1] + return image_id + + @fixture() + def check_alarms(self, add_1g_and_4k_pages): + hosts, storage_backing = add_1g_and_4k_pages + host_helper.get_hypervisor_info(hosts=hosts) + for host in hosts: + host_helper.get_host_memories(host, wait_for_update=False) + + @fixture(params=MEM_CONFIGS) + def flavor_mem_page_size(self, request, flavor_2g): + flavor_id = flavor_2g[0] + mem_page_size = request.param + skip_4k_for_ovs(mem_page_size) + + if mem_page_size is None: + nova_helper.unset_flavor(flavor_id, FlavorSpec.MEM_PAGE_SIZE) + else: + nova_helper.set_flavor(flavor_id, + **{FlavorSpec.MEM_PAGE_SIZE: mem_page_size}) + + return mem_page_size + + @mark.parametrize('image_mem_page_size', MEM_CONFIGS) + def test_boot_vm_mem_page_size(self, flavor_2g, flavor_mem_page_size, + image_mempage, image_mem_page_size): + """ + Test boot vm with various memory page size setting in flavor and image. + + Args: + flavor_2g (tuple): flavor id of a flavor with ram set to 2G, + hosts configured and storage_backing + flavor_mem_page_size (str): memory page size extra spec value to + set in flavor + image_mempage (str): image id for tis image + image_mem_page_size (str): memory page metadata value to set in + image + + Setup: + - Create a flavor with 2G RAM (module) + - Get image id of tis image (module) + + Test Steps: + - Set/Unset flavor memory page size extra spec with given value ( + unset if None is given) + - Set/Unset image memory page size metadata with given value ( + unset if None if given) + - Attempt to boot a vm with above flavor and image + - Verify boot result based on the mem page size values in the + flavor and image + + Teardown: + - Delete vm if booted + - Delete created flavor (module) + + """ + skip_4k_for_ovs(image_mem_page_size) + + flavor_id, hosts, storage_backing = flavor_2g + + if image_mem_page_size is None: + glance_helper.unset_image(image_mempage, + properties=ImageMetadata.MEM_PAGE_SIZE) + expt_code = 0 + else: + glance_helper.set_image(image=image_mempage, + properties={ImageMetadata.MEM_PAGE_SIZE: + image_mem_page_size}) + if flavor_mem_page_size is None: + expt_code = 4 + elif flavor_mem_page_size.lower() in ['any', 'large']: + expt_code = 0 + else: + expt_code = 0 if flavor_mem_page_size.lower() == \ + image_mem_page_size.lower() else 4 + + LOG.tc_step("Attempt to boot a vm with flavor_mem_page_size: {}, and " + "image_mem_page_size: {}. And check return " + "code is {}.".format(flavor_mem_page_size, + image_mem_page_size, expt_code)) + + actual_code, vm_id, msg = vm_helper.boot_vm(name='mem_page_size', + flavor=flavor_id, + source='image', + source_id=image_mempage, + fail_ok=True, + avail_zone='stxauto', + cleanup='function') + + assert expt_code == actual_code, "Expect boot vm to return {}; " \ + "Actual result: {} with msg: " \ + "{}".format(expt_code, actual_code, + msg) + + if expt_code != 0: + assert re.search( + NovaCLIOutput.VM_BOOT_REJECT_MEM_PAGE_SIZE_FORBIDDEN, msg) + else: + assert vm_helper.get_vm_host(vm_id) in hosts, \ + "VM is not booted on hosts in stxauto zone" + LOG.tc_step("Ensure VM is pingable from NatBox") + vm_helper.wait_for_vm_pingable_from_natbox(vm_id) + + @mark.parametrize('mem_page_size', [ + param('1048576', marks=mark.priorities('domain_sanity', 'nightly')), + param('large'), + param('small', marks=mark.nightly), + ]) + def test_schedule_vm_mempage_config(self, flavor_2g, mem_page_size): + """ + Test memory used by vm is taken from the expected memory pool and the + vm was scheduled on the correct + host/processor + + Args: + flavor_2g (tuple): flavor id of a flavor with ram set to 2G, + hosts, storage_backing + mem_page_size (str): mem page size setting in flavor + + Setup: + - Create host aggregate + - Add two hypervisors to the host aggregate + - Host-0 configuration: + - Processor-0: + - Insufficient 1g pages to boot vm that requires 2g + - Insufficient 4k pages to boot vm that requires 2g + - Processor-1: + - Sufficient 1g pages to boot vm that requires 2g + - Insufficient 4k pages to boot vm that requires 2g + - Host-1 configuration: + - Processor-0: + - Insufficient 1g pages to boot vm that requires 2g + - Insufficient 4k pages to boot vm that requires 2g + - Processor-1: + - Insufficient 1g pages to boot vm that requires 2g + - Sufficient 4k pages to boot vm that requires 2g + - Configure a compute to have 4 1G hugepages (module) + - Create a flavor with 2G RAM (module) + - Create a volume with default values (module) + + Test Steps: + - Set memory page size flavor spec to given value + - Boot a vm with above flavor and a basic volume + - Calculate the available/used memory change on the vm host + - Verify the memory is taken from 1G hugepage memory pool + - Verify the vm was booted on a supporting host + + Teardown: + - Delete created vm + - Delete created volume and flavor (module) + - Re-Configure the compute to have 0 hugepages (module) + - Revert host mem pages back to original + """ + skip_4k_for_ovs(mem_page_size) + + flavor_id, hosts_configured, storage_backing = flavor_2g + LOG.tc_step("Set memory page size extra spec in flavor") + nova_helper.set_flavor(flavor_id, + **{FlavorSpec.CPU_POLICY: 'dedicated', + FlavorSpec.MEM_PAGE_SIZE: mem_page_size}) + + host_helper.wait_for_hypervisors_up(hosts_configured) + prev_computes_mems = {} + for host in hosts_configured: + prev_computes_mems[host] = host_helper.get_host_memories( + host=host, headers=MEMPAGE_HEADERS) + + LOG.tc_step( + "Boot a vm with mem page size spec - {}".format(mem_page_size)) + + host_1g, host_4k = hosts_configured + code, vm_id, msg = vm_helper.boot_vm('mempool_configured', flavor_id, + fail_ok=True, + avail_zone='stxauto', + cleanup='function') + assert 0 == code, "VM is not successfully booted." + + instance_name, vm_host = vm_helper.get_vm_values( + vm_id, fields=[":instance_name", ":host"], strict=False) + vm_node = vm_helper.get_vm_numa_nodes_via_ps( + vm_id=vm_id, instance_name=instance_name, host=vm_host) + if mem_page_size == '1048576': + assert host_1g == vm_host, \ + "VM is not created on the configured host " \ + "{}".format(hosts_configured[0]) + assert vm_node == [1], "VM (huge) did not boot on the correct " \ + "processor" + elif mem_page_size == 'small': + assert host_4k == vm_host, "VM is not created on the configured " \ + "host {}".format(hosts_configured[1]) + assert vm_node == [1], "VM (small) did not boot on the correct " \ + "processor" + else: + assert vm_host in hosts_configured + + LOG.tc_step("Calculate memory change on vm host - {}".format(vm_host)) + check_mempage_change(vm_id, vm_host, + prev_host_mems=prev_computes_mems[vm_host], + mempage_size=mem_page_size, mem_gib=2, + numa_node=vm_node[0]) + + LOG.tc_step("Ensure vm is pingable from NatBox") + vm_helper.wait_for_vm_pingable_from_natbox(vm_id) diff --git a/automated-pytest-suite/testcases/functional/nova/test_migrate_vms.py b/automated-pytest-suite/testcases/functional/nova/test_migrate_vms.py new file mode 100755 index 0000000..89c7069 --- /dev/null +++ b/automated-pytest-suite/testcases/functional/nova/test_migrate_vms.py @@ -0,0 +1,412 @@ +# +# Copyright (c) 2019 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + + +from pytest import fixture, mark, skip, param + +from utils.tis_log import LOG +from consts.stx import FlavorSpec, EventLogID +# Don't remove this import, used by eval() +from consts.cli_errs import LiveMigErr +from keywords import vm_helper, nova_helper, host_helper, cinder_helper, \ + glance_helper, check_helper, system_helper +from testfixtures.fixture_resources import ResourceCleanup + + +@fixture(scope='module') +def check_system(): + up_hypervisors = host_helper.get_up_hypervisors() + if len(up_hypervisors) < 2: + skip("Less than two up hypervisors") + + +@fixture(scope='module') +def hosts_per_stor_backing(check_system): + hosts_per_backing = host_helper.get_hosts_per_storage_backing() + LOG.fixture_step("Hosts per storage backing: {}".format(hosts_per_backing)) + + return hosts_per_backing + + +def touch_files_under_vm_disks(vm_id, ephemeral=0, swap=0, vm_type='volume', + disks=None): + expt_len = 1 + int(bool(ephemeral)) + int(bool(swap)) + ( + 1 if 'with_vol' in vm_type else 0) + + LOG.tc_step("Auto mount ephemeral, swap, and attached volume if any") + mounts = vm_helper.auto_mount_vm_disks(vm_id=vm_id, disks=disks) + assert expt_len == len(mounts) + + LOG.tc_step("Create files under vm disks: {}".format(mounts)) + file_paths, content = vm_helper.touch_files(vm_id=vm_id, file_dirs=mounts) + return file_paths, content + + +@mark.parametrize(('storage_backing', 'ephemeral', 'swap', 'cpu_pol', 'vcpus', + 'vm_type', 'block_mig'), [ + param('local_image', 0, 0, None, 1, 'volume', False, + marks=mark.p1), + param('local_image', 0, 0, 'dedicated', 2, 'volume', + False, marks=mark.p1), + ('local_image', 1, 0, 'dedicated', 2, 'volume', False), + ('local_image', 0, 512, 'shared', 1, 'volume', False), + ('local_image', 1, 512, 'dedicated', 2, 'volume', True), + # Supported from Newton + param('local_image', 0, 0, 'shared', 2, 'image', True, + marks=mark.domain_sanity), + param('local_image', 1, 512, 'dedicated', 1, 'image', + False, marks=mark.domain_sanity), + ('local_image', 0, 0, None, 2, 'image_with_vol', False), + ('local_image', 0, 0, 'dedicated', 1, 'image_with_vol', + True), + ('local_image', 1, 512, 'dedicated', 2, 'image_with_vol', + True), + ('local_image', 1, 512, 'dedicated', 1, 'image_with_vol', + False), + param('remote', 0, 0, None, 2, 'volume', False, + marks=mark.p1), + param('remote', 1, 0, 'dedicated', 1, 'volume', False, + marks=mark.p1), + param('remote', 1, 512, None, 1, 'image', False, + marks=mark.domain_sanity), + param('remote', 0, 512, 'dedicated', 2, 'image_with_vol', + False, marks=mark.domain_sanity), + ]) +def test_live_migrate_vm_positive(hosts_per_stor_backing, storage_backing, + ephemeral, swap, cpu_pol, vcpus, vm_type, + block_mig): + """ + Skip Condition: + - Less than two hosts have specified storage backing + + Test Steps: + - create flavor with specified vcpus, cpu_policy, ephemeral, swap, + storage_backing + - boot vm from specified boot source with above flavor + - (attach volume to vm if 'image_with_vol', specified in vm_type) + - Live migrate the vm with specified block_migration flag + - Verify VM is successfully live migrated to different host + + Teardown: + - Delete created vm, volume, flavor + + """ + if len(hosts_per_stor_backing.get(storage_backing, [])) < 2: + skip("Less than two hosts have {} storage backing".format( + storage_backing)) + + vm_id = _boot_vm_under_test(storage_backing, ephemeral, swap, cpu_pol, + vcpus, vm_type) + + prev_vm_host = vm_helper.get_vm_host(vm_id) + vm_helper.wait_for_vm_pingable_from_natbox(vm_id) + + vm_disks = vm_helper.get_vm_devices_via_virsh(vm_id) + file_paths, content = touch_files_under_vm_disks(vm_id=vm_id, + ephemeral=ephemeral, + swap=swap, vm_type=vm_type, + disks=vm_disks) + + LOG.tc_step("Live migrate VM and ensure it succeeded") + # block_mig = True if boot_source == 'image' else False + code, output = vm_helper.live_migrate_vm(vm_id, block_migrate=block_mig) + assert 0 == code, "Live migrate is not successful. Details: {}".format( + output) + + post_vm_host = vm_helper.get_vm_host(vm_id) + assert prev_vm_host != post_vm_host + + LOG.tc_step("Ensure vm is pingable from NatBox after live migration") + vm_helper.wait_for_vm_pingable_from_natbox(vm_id) + + LOG.tc_step("Check files after live migrate") + check_helper.check_vm_files(vm_id=vm_id, storage_backing=storage_backing, + ephemeral=ephemeral, swap=swap, + vm_type=vm_type, vm_action='live_migrate', + file_paths=file_paths, content=content, + disks=vm_disks, prev_host=prev_vm_host, + post_host=post_vm_host) + + +@mark.parametrize(('storage_backing', 'ephemeral', 'swap', 'vm_type', + 'block_mig', 'expt_err'), [ + param('local_image', 0, 0, 'volume', True, + 'LiveMigErr.BLOCK_MIG_UNSUPPORTED'), + param('remote', 0, 0, 'volume', True, + 'LiveMigErr.BLOCK_MIG_UNSUPPORTED'), + param('remote', 1, 0, 'volume', True, + 'LiveMigErr.BLOCK_MIG_UNSUPPORTED'), + param('remote', 0, 512, 'volume', True, + 'LiveMigErr.BLOCK_MIG_UNSUPPORTED'), + param('remote', 0, 512, 'image', True, + 'LiveMigErr.BLOCK_MIG_UNSUPPORTED'), + param('remote', 0, 0, 'image_with_vol', True, + 'LiveMigErr.BLOCK_MIG_UNSUPPORTED'), + ]) +def test_live_migrate_vm_negative(storage_backing, ephemeral, swap, vm_type, + block_mig, expt_err, + hosts_per_stor_backing, no_simplex): + """ + Skip Condition: + - Less than two hosts have specified storage backing + + Test Steps: + - create flavor with specified vcpus, cpu_policy, ephemeral, swap, + storage_backing + - boot vm from specified boot source with above flavor + - (attach volume to vm if 'image_with_vol', specified in vm_type) + - Live migrate the vm with specified block_migration flag + - Verify VM is successfully live migrated to different host + + Teardown: + - Delete created vm, volume, flavor + + """ + if len(hosts_per_stor_backing.get(storage_backing, [])) < 2: + skip("Less than two hosts have {} storage backing".format( + storage_backing)) + + vm_id = _boot_vm_under_test(storage_backing, ephemeral, swap, None, 1, + vm_type) + vm_helper.wait_for_vm_pingable_from_natbox(vm_id) + + prev_vm_host = vm_helper.get_vm_host(vm_id) + vm_disks = vm_helper.get_vm_devices_via_virsh(vm_id) + file_paths, content = touch_files_under_vm_disks(vm_id=vm_id, + ephemeral=ephemeral, + swap=swap, vm_type=vm_type, + disks=vm_disks) + + LOG.tc_step( + "Live migrate VM and ensure it's rejected with proper error message") + # block_mig = True if boot_source == 'image' else False + code, output = vm_helper.live_migrate_vm(vm_id, block_migrate=block_mig) + assert 2 == code, "Expect live migration to have expected fail. Actual: " \ + "{}".format(output) + + # Remove below code due to live-migration is async in newton + # assert 'Unexpected API Error'.lower() not in output.lower(), + # "'Unexpected API Error' returned." + # + # # remove extra spaces in error message + # output = re.sub(r'\s\s+', " ", output) + # assert eval(expt_err) in output, "Expected error message {} is not in + # actual error message: {}".\ + # format(eval(expt_err), output) + + post_vm_host = vm_helper.get_vm_host(vm_id) + assert prev_vm_host == post_vm_host, "VM host changed even though live " \ + "migration request rejected." + + LOG.tc_step( + "Ensure vm is pingable from NatBox after live migration rejected") + vm_helper.wait_for_vm_pingable_from_natbox(vm_id) + + LOG.tc_step("Check files after live migrate attempt") + check_helper.check_vm_files(vm_id=vm_id, storage_backing=storage_backing, + ephemeral=ephemeral, swap=swap, + vm_type=vm_type, vm_action='live_migrate', + file_paths=file_paths, content=content, + disks=vm_disks, prev_host=prev_vm_host, + post_host=post_vm_host) + + +@mark.parametrize(('storage_backing', 'ephemeral', 'swap', 'cpu_pol', + 'vcpus', 'vm_type', 'resize'), [ + param('local_image', 0, 0, None, 1, 'volume', 'confirm'), + param('local_image', 0, 0, 'dedicated', 2, 'volume', 'confirm'), + param('local_image', 1, 0, 'shared', 2, 'image', 'confirm'), + param('local_image', 0, 512, 'dedicated', 1, 'image', 'confirm'), + param('local_image', 0, 0, None, 1, 'image_with_vol', 'confirm'), + param('remote', 0, 0, None, 2, 'volume', 'confirm'), + param('remote', 1, 0, None, 1, 'volume', 'confirm'), + param('remote', 1, 512, None, 1, 'image', 'confirm'), + param('remote', 0, 0, None, 2, 'image_with_vol', 'confirm'), + param('local_image', 0, 0, None, 2, 'volume', 'revert'), + param('local_image', 0, 0, 'dedicated', 1, 'volume', 'revert'), + param('local_image', 1, 0, 'shared', 2, 'image', 'revert'), + param('local_image', 0, 512, 'dedicated', 1, 'image', 'revert'), + param('local_image', 0, 0, 'dedicated', 2, 'image_with_vol', 'revert'), + param('remote', 0, 0, None, 2, 'volume', 'revert'), + param('remote', 1, 512, None, 1, 'volume', 'revert'), + param('remote', 0, 0, None, 1, 'image', 'revert'), + param('remote', 1, 0, None, 2, 'image_with_vol', 'revert'), +]) +def test_cold_migrate_vm(storage_backing, ephemeral, swap, cpu_pol, vcpus, + vm_type, resize, hosts_per_stor_backing, + no_simplex): + """ + Skip Condition: + - Less than two hosts have specified storage backing + + Test Steps: + - create flavor with specified vcpus, cpu_policy, ephemeral, swap, + storage_backing + - boot vm from specified boot source with above flavor + - (attach volume to vm if 'image_with_vol', specified in vm_type) + - Cold migrate vm + - Confirm/Revert resize as specified + - Verify VM is successfully cold migrated and confirmed/reverted resize + - Verify that instance files are not found on original host. (TC6621) + + Teardown: + - Delete created vm, volume, flavor + + """ + if len(hosts_per_stor_backing.get(storage_backing, [])) < 2: + skip("Less than two hosts have {} storage backing".format( + storage_backing)) + + vm_id = _boot_vm_under_test(storage_backing, ephemeral, swap, cpu_pol, + vcpus, vm_type) + prev_vm_host = vm_helper.get_vm_host(vm_id) + vm_helper.wait_for_vm_pingable_from_natbox(vm_id) + + vm_disks = vm_helper.get_vm_devices_via_virsh(vm_id) + file_paths, content = touch_files_under_vm_disks(vm_id=vm_id, + ephemeral=ephemeral, + swap=swap, vm_type=vm_type, + disks=vm_disks) + + LOG.tc_step("Cold migrate VM and {} resize".format(resize)) + revert = True if resize == 'revert' else False + code, output = vm_helper.cold_migrate_vm(vm_id, revert=revert) + assert 0 == code, "Cold migrate {} is not successful. Details: {}".format( + resize, output) + + # Below steps are unnecessary as host is already checked in + # cold_migrate_vm keyword. Add steps below just in case. + LOG.tc_step( + "Check VM host is as expected after cold migrate {}".format(resize)) + post_vm_host = vm_helper.get_vm_host(vm_id) + if revert: + assert prev_vm_host == post_vm_host, "vm host changed after cold " \ + "migrate revert" + else: + assert prev_vm_host != post_vm_host, "vm host did not change after " \ + "cold migrate" + LOG.tc_step("Check that source host no longer has instance files") + with host_helper.ssh_to_host(prev_vm_host) as prev_ssh: + assert not prev_ssh.file_exists( + '/var/lib/nova/instances/{}'.format(vm_id)), \ + "Instance files found on previous host {} after cold migrate " \ + "to {}".format(prev_vm_host, post_vm_host) + + LOG.tc_step("Ensure vm is pingable from NatBox after cold migration " + "{}".format(resize)) + vm_helper.wait_for_vm_pingable_from_natbox(vm_id) + + LOG.tc_step("Check files after cold migrate {}".format(resize)) + action = None if revert else 'cold_migrate' + check_helper.check_vm_files(vm_id=vm_id, storage_backing=storage_backing, + ephemeral=ephemeral, swap=swap, + vm_type=vm_type, vm_action=action, + file_paths=file_paths, content=content, + disks=vm_disks, prev_host=prev_vm_host, + post_host=post_vm_host) + + +def _boot_vm_under_test(storage_backing, ephemeral, swap, cpu_pol, vcpus, + vm_type): + LOG.tc_step( + "Create a flavor with {} vcpus, {}G ephemera disk, {}M swap " + "disk".format(vcpus, ephemeral, swap)) + flavor_id = nova_helper.create_flavor( + name='migration_test', ephemeral=ephemeral, swap=swap, vcpus=vcpus, + storage_backing=storage_backing, cleanup='function')[1] + + if cpu_pol is not None: + specs = {FlavorSpec.CPU_POLICY: cpu_pol} + + LOG.tc_step("Add following extra specs: {}".format(specs)) + nova_helper.set_flavor(flavor=flavor_id, **specs) + + boot_source = 'volume' if vm_type == 'volume' else 'image' + LOG.tc_step("Boot a vm from {}".format(boot_source)) + vm_id = vm_helper.boot_vm('migration_test', + flavor=flavor_id, source=boot_source, + reuse_vol=False, + cleanup='function')[1] + + if vm_type == 'image_with_vol': + LOG.tc_step("Attach volume to vm") + vm_helper.attach_vol_to_vm(vm_id=vm_id, mount=False) + + return vm_id + + +@mark.parametrize(('guest_os', 'mig_type', 'cpu_pol'), [ + ('ubuntu_14', 'live', 'dedicated'), + # Live migration with pinned VM may not be unsupported + param('ubuntu_14', 'cold', 'dedicated', + marks=mark.priorities('sanity', 'cpe_sanity')), + param('tis-centos-guest', 'live', None, + marks=mark.priorities('sanity', 'cpe_sanity')), + ('tis-centos-guest', 'cold', None), +]) +def test_migrate_vm(check_system, guest_os, mig_type, cpu_pol): + """ + Test migrate vms for given guest type + Args: + check_system: + guest_os: + mig_type: + cpu_pol: + + Test Steps: + - Create a glance image from given guest type + - Create a vm from cinder volume using above image + - Live/cold migrate the vm + - Ensure vm moved to other host and in good state (active and + reachabe from NatBox) + + """ + LOG.tc_step("Create a flavor with 1 vcpu") + flavor_id = \ + nova_helper.create_flavor(name='{}-mig'.format(mig_type), vcpus=1, + root_disk=9, cleanup='function')[1] + + if cpu_pol is not None: + specs = {FlavorSpec.CPU_POLICY: cpu_pol} + LOG.tc_step("Add following extra specs: {}".format(specs)) + nova_helper.set_flavor(flavor=flavor_id, **specs) + + LOG.tc_step("Create a volume from {} image".format(guest_os)) + image_id = glance_helper.get_guest_image(guest_os=guest_os) + + vol_id = cinder_helper.create_volume(source_id=image_id, size=9, + guest_image=guest_os)[1] + ResourceCleanup.add('volume', vol_id) + + LOG.tc_step("Boot a vm from above flavor and volume") + vm_id = vm_helper.boot_vm(guest_os, flavor=flavor_id, source='volume', + source_id=vol_id, cleanup='function')[1] + vm_helper.wait_for_vm_pingable_from_natbox(vm_id) + + if guest_os == 'ubuntu_14': + system_helper.wait_for_alarm_gone(alarm_id=EventLogID.CINDER_IO_CONGEST, + entity_id='cinder_io_monitor', + strict=False, timeout=300, + fail_ok=False) + + LOG.tc_step("{} migrate vm and check vm is moved to different host".format( + mig_type)) + prev_vm_host = vm_helper.get_vm_host(vm_id) + + if mig_type == 'live': + code, output = vm_helper.live_migrate_vm(vm_id) + if code == 1: + assert False, "No host to live migrate to. System may not be in " \ + "good state." + else: + vm_helper.cold_migrate_vm(vm_id) + + vm_host = vm_helper.get_vm_host(vm_id) + assert prev_vm_host != vm_host, "vm host did not change after {} " \ + "migration".format(mig_type) + + LOG.tc_step("Ping vm from NatBox after {} migration".format(mig_type)) + vm_helper.wait_for_vm_pingable_from_natbox(vm_id) diff --git a/automated-pytest-suite/testcases/functional/nova/test_nova_actions.py b/automated-pytest-suite/testcases/functional/nova/test_nova_actions.py new file mode 100755 index 0000000..05bf517 --- /dev/null +++ b/automated-pytest-suite/testcases/functional/nova/test_nova_actions.py @@ -0,0 +1,91 @@ +# +# Copyright (c) 2019 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + + +from pytest import mark, skip, param + +from utils.tis_log import LOG +from consts.stx import FlavorSpec, VMStatus +from consts.reasons import SkipStorageSpace + +from keywords import vm_helper, nova_helper, glance_helper, cinder_helper +from testfixtures.fixture_resources import ResourceCleanup + + +def id_gen(val): + if isinstance(val, list): + return '-'.join(val) + + +@mark.parametrize(('guest_os', 'cpu_pol', 'actions'), [ + param('tis-centos-guest', 'dedicated', ['pause', 'unpause'], + marks=mark.priorities('sanity', 'cpe_sanity', 'sx_sanity')), + param('ubuntu_14', 'shared', ['stop', 'start'], marks=mark.sanity), + param('ubuntu_14', 'dedicated', ['auto_recover'], marks=mark.sanity), + param('tis-centos-guest', 'dedicated', ['suspend', 'resume'], + marks=mark.priorities('sanity', 'cpe_sanity', 'sx_sanity')), +], ids=id_gen) +def test_nova_actions(guest_os, cpu_pol, actions): + """ + + Args: + guest_os: + cpu_pol: + actions: + + Test Steps: + - Create a glance image from given guest type + - Create a vm from cinder volume using above image with specified cpu + policy + - Perform given nova actions on vm + - Ensure nova operation succeeded and vm still in good state (active + and reachable from NatBox) + + """ + if guest_os == 'opensuse_12': + if not cinder_helper.is_volumes_pool_sufficient(min_size=40): + skip(SkipStorageSpace.SMALL_CINDER_VOLUMES_POOL) + + img_id = glance_helper.get_guest_image(guest_os=guest_os) + + LOG.tc_step("Create a flavor with 1 vcpu") + flavor_id = nova_helper.create_flavor(name=cpu_pol, vcpus=1, root_disk=9)[1] + ResourceCleanup.add('flavor', flavor_id) + + if cpu_pol is not None: + specs = {FlavorSpec.CPU_POLICY: cpu_pol} + LOG.tc_step("Add following extra specs: {}".format(specs)) + nova_helper.set_flavor(flavor=flavor_id, **specs) + + LOG.tc_step("Create a volume from {} image".format(guest_os)) + vol_id = \ + cinder_helper.create_volume(name='vol-' + guest_os, source_id=img_id, + guest_image=guest_os)[1] + ResourceCleanup.add('volume', vol_id) + + LOG.tc_step("Boot a vm from above flavor and volume") + vm_id = vm_helper.boot_vm('nova_actions', flavor=flavor_id, source='volume', + source_id=vol_id, + cleanup='function')[1] + + LOG.tc_step("Wait for VM pingable from NATBOX") + vm_helper.wait_for_vm_pingable_from_natbox(vm_id) + + for action in actions: + if action == 'auto_recover': + LOG.tc_step( + "Set vm to error state and wait for auto recovery complete, " + "then verify ping from base vm over " + "management and data networks") + vm_helper.set_vm_state(vm_id=vm_id, error_state=True, fail_ok=False) + vm_helper.wait_for_vm_values(vm_id=vm_id, status=VMStatus.ACTIVE, + fail_ok=True, timeout=600) + else: + LOG.tc_step( + "Perform following action on vm {}: {}".format(vm_id, action)) + vm_helper.perform_action_on_vm(vm_id, action=action) + + vm_helper.wait_for_vm_pingable_from_natbox(vm_id) diff --git a/automated-pytest-suite/testcases/functional/nova/test_resize_vm.py b/automated-pytest-suite/testcases/functional/nova/test_resize_vm.py new file mode 100755 index 0000000..744a563 --- /dev/null +++ b/automated-pytest-suite/testcases/functional/nova/test_resize_vm.py @@ -0,0 +1,508 @@ +# +# Copyright (c) 2019 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + + +import time +import math + +from pytest import fixture, mark, skip, param + +from utils.tis_log import LOG + +from keywords import vm_helper, nova_helper, host_helper, check_helper, \ + glance_helper +from testfixtures.fixture_resources import ResourceCleanup +from consts.stx import FlavorSpec, GuestImages +from consts.reasons import SkipStorageBacking + + +def id_gen(val): + if isinstance(val, (tuple, list)): + val = '_'.join([str(val_) for val_ in val]) + return val + + +def touch_files_under_vm_disks(vm_id, ephemeral=0, swap=0, vm_type='volume', + disks=None): + expt_len = 1 + int(bool(ephemeral)) + int(bool(swap)) + ( + 1 if 'with_vol' in vm_type else 0) + + LOG.tc_step("Auto mount non-root disk(s)") + mounts = vm_helper.auto_mount_vm_disks(vm_id=vm_id, disks=disks) + assert expt_len == len(mounts) + + if bool(swap): + mounts.remove('none') + + LOG.tc_step("Create files under vm disks: {}".format(mounts)) + file_paths, content = vm_helper.touch_files(vm_id=vm_id, file_dirs=mounts) + return file_paths, content + + +def get_expt_disk_increase(origin_flavor, dest_flavor, boot_source, + storage_backing): + root_diff = dest_flavor[0] - origin_flavor[0] + ephemeral_diff = dest_flavor[1] - origin_flavor[1] + swap_diff = (dest_flavor[2] - origin_flavor[2]) / 1024 + + if storage_backing == 'remote': + expected_increase = 0 + expect_to_check = True + else: + if boot_source == 'volume': + expected_increase = ephemeral_diff + swap_diff + expect_to_check = False + else: + expected_increase = root_diff + ephemeral_diff + swap_diff + expect_to_check = expected_increase >= 2 + + return expected_increase, expect_to_check + + +def get_disk_avail_least(host): + return \ + host_helper.get_hypervisor_info(hosts=host, + field='disk_available_least')[host] + + +def check_correct_post_resize_value(original_disk_value, expected_increase, + host, sleep=True): + if sleep: + time.sleep(65) + + post_resize_value = get_disk_avail_least(host) + LOG.info( + "{} original_disk_value: {}. post_resize_value: {}. " + "expected_increase: {}".format( + host, original_disk_value, post_resize_value, expected_increase)) + expt_post = original_disk_value + expected_increase + + if expected_increase < 0: + # vm is on this host, backup image files may be created if not + # already existed + backup_val = math.ceil( + glance_helper.get_image_size(guest_os=GuestImages.DEFAULT['guest'], + virtual_size=False)) + assert expt_post - backup_val <= post_resize_value <= expt_post + elif expected_increase > 0: + # vm moved away from this host, or resized to smaller disk on same + # host, backup files will stay + assert expt_post - 1 <= post_resize_value <= expt_post + 1, \ + "disk_available_least on {} expected: {}+-1, actual: {}".format( + host, expt_post, post_resize_value) + else: + assert expt_post == post_resize_value, \ + "{} disk_available_least value changed to {} unexpectedly".format( + host, post_resize_value) + + return post_resize_value + + +@fixture(scope='module') +def get_hosts_per_backing(add_admin_role_module): + return host_helper.get_hosts_per_storage_backing() + + +class TestResizeSameHost: + @fixture(scope='class') + def add_hosts_to_zone(self, request, add_stxauto_zone, + get_hosts_per_backing): + hosts_per_backing = get_hosts_per_backing + avail_hosts = {key: vals[0] for key, vals in hosts_per_backing.items() + if vals} + + if not avail_hosts: + skip("No host in any storage aggregate") + + nova_helper.add_hosts_to_aggregate(aggregate='stxauto', + hosts=list(avail_hosts.values())) + + def remove_hosts_from_zone(): + nova_helper.remove_hosts_from_aggregate(aggregate='stxauto', + check_first=False) + + request.addfinalizer(remove_hosts_from_zone) + return avail_hosts + + @mark.parametrize(('storage_backing', 'origin_flavor', 'dest_flavor', + 'boot_source'), [ + ('remote', (4, 0, 0), (5, 1, 512), 'image'), + ('remote', (4, 1, 512), (5, 2, 1024), 'image'), + ('remote', (4, 1, 512), (4, 1, 0), 'image'), + # LP1762423 + param('remote', (4, 0, 0), (1, 1, 512), 'volume', + marks=mark.priorities('nightly', 'sx_nightly')), + ('remote', (4, 1, 512), (8, 2, 1024), 'volume'), + ('remote', (4, 1, 512), (0, 1, 0), 'volume'), + ('local_image', (4, 0, 0), (5, 1, 512), 'image'), + param('local_image', (4, 1, 512), (5, 2, 1024), + 'image', + marks=mark.priorities('nightly', 'sx_nightly')), + ('local_image', (5, 1, 512), (5, 1, 0), 'image'), + ('local_image', (4, 0, 0), (5, 1, 512), 'volume'), + ('local_image', (4, 1, 512), (0, 2, 1024), 'volume'), + ('local_image', (4, 1, 512), (1, 1, 0), 'volume'), + # LP1762423 + ], ids=id_gen) + def test_resize_vm_positive(self, add_hosts_to_zone, storage_backing, + origin_flavor, dest_flavor, boot_source): + """ + Test resizing disks of a vm + - Resize root disk is allowed except 0 & boot-from-image + - Resize to larger or same ephemeral is allowed + - Resize swap to any size is allowed including removing + + Args: + storage_backing: The host storage backing required + origin_flavor: The flavor to boot the vm from, listed by GBs for + root, ephemeral, and swap disks, i.e. for a + system with a 2GB root disk, a 1GB ephemeral disk, + and no swap disk: (2, 1, 0) + boot_source: Which source to boot the vm from, either 'volume' or + 'image' + add_hosts_to_zone + dest_flavor + + Skip Conditions: + - No hosts exist with required storage backing. + Test setup: + - Put a single host of each backing in stxautozone to prevent + migration and instead force resize. + - Create two flavors based on origin_flavor and dest_flavor + - Create a volume or image to boot from. + - Boot VM with origin_flavor + Test Steps: + - Resize VM to dest_flavor with revert + - If vm is booted from image and has a non-remote backing, + check that the amount of disk space post-revert + is around the same pre-revert # TC5155 + - Resize VM to dest_flavor with confirm + - If vm is booted from image and has a non-remote backing, + check that the amount of disk space post-confirm + is reflects the increase in disk-space taken up # TC5155 + Test Teardown: + - Delete created VM + - Delete created volume or image + - Delete created flavors + - Remove hosts from stxautozone + - Delete stxautozone + + """ + vm_host = add_hosts_to_zone.get(storage_backing, None) + + if not vm_host: + skip( + SkipStorageBacking.NO_HOST_WITH_BACKING.format(storage_backing)) + + expected_increase, expect_to_check = get_expt_disk_increase( + origin_flavor, dest_flavor, + boot_source, storage_backing) + LOG.info("Expected_increase of vm compute occupancy is {}".format( + expected_increase)) + + LOG.tc_step('Create origin flavor') + origin_flavor_id = _create_flavor(origin_flavor, storage_backing) + vm_id = _boot_vm_to_test(boot_source, vm_host, origin_flavor_id) + vm_helper.wait_for_vm_pingable_from_natbox(vm_id) + + vm_disks = vm_helper.get_vm_devices_via_virsh(vm_id) + root, ephemeral, swap = origin_flavor + if boot_source == 'volume': + root = GuestImages.IMAGE_FILES[GuestImages.DEFAULT['guest']][1] + file_paths, content = touch_files_under_vm_disks(vm_id=vm_id, + ephemeral=ephemeral, + swap=swap, + vm_type=boot_source, + disks=vm_disks) + + if expect_to_check: + LOG.tc_step('Check initial disk usage') + original_disk_value = get_disk_avail_least(vm_host) + LOG.info("{} space left on compute".format(original_disk_value)) + + LOG.tc_step('Create destination flavor') + dest_flavor_id = _create_flavor(dest_flavor, storage_backing) + LOG.tc_step('Resize vm to dest flavor and revert') + vm_helper.resize_vm(vm_id, dest_flavor_id, revert=True, fail_ok=False) + vm_helper.wait_for_vm_pingable_from_natbox(vm_id) + + swap_size = swap + LOG.tc_step("Check files after resize revert") + if storage_backing == 'remote' and swap and dest_flavor[2]: + swap_size = dest_flavor[2] + + time.sleep(30) + prev_host = vm_helper.get_vm_host(vm_id) + check_helper.check_vm_files(vm_id=vm_id, + storage_backing=storage_backing, root=root, + ephemeral=ephemeral, + swap=swap_size, vm_type=boot_source, + vm_action=None, file_paths=file_paths, + content=content, disks=vm_disks, + check_volume_root=True) + + LOG.tc_step('Resize vm to dest flavor and confirm') + vm_helper.resize_vm(vm_id, dest_flavor_id, revert=False, fail_ok=False) + vm_helper.wait_for_vm_pingable_from_natbox(vm_id) + post_host = vm_helper.get_vm_host(vm_id) + post_root, post_ephemeral, post_swap = dest_flavor + if boot_source == 'volume': + post_root = GuestImages.IMAGE_FILES[GuestImages.DEFAULT['guest']][1] + post_ephemeral = ephemeral if ephemeral else post_ephemeral + LOG.tc_step("Check files after resize attempt") + check_helper.check_vm_files( + vm_id=vm_id, storage_backing=storage_backing, + ephemeral=post_ephemeral, + swap=post_swap, vm_type=boot_source, + vm_action='resize', file_paths=file_paths, + content=content, prev_host=prev_host, + post_host=post_host, root=post_root, + disks=vm_disks, + post_disks=vm_helper.get_vm_devices_via_virsh(vm_id), + check_volume_root=True) + + @mark.parametrize( + ('storage_backing', 'origin_flavor', 'dest_flavor', 'boot_source'), [ + # Root disk can be resized, but cannot be 0 + ('remote', (5, 0, 0), (0, 0, 0), 'image'), + # check ephemeral disk cannot be smaller than origin + ('remote', (5, 2, 512), (5, 1, 512), 'image'), + # check ephemeral disk cannot be smaller than origin + ('remote', (1, 1, 512), (1, 0, 512), 'volume'), + # Root disk can be resized, but cannot be 0 + ('local_image', (5, 0, 0), (0, 0, 0), 'image'), + ('local_image', (5, 2, 512), (5, 1, 512), 'image'), + ('local_image', (5, 1, 512), (4, 1, 512), 'image'), + ('local_image', (5, 1, 512), (4, 1, 0), 'image'), + ('local_image', (1, 1, 512), (1, 0, 512), 'volume'), + ], ids=id_gen) + def test_resize_vm_negative(self, add_hosts_to_zone, storage_backing, + origin_flavor, dest_flavor, boot_source): + """ + Test resizing disks of a vm not allowed: + - Resize to smaller ephemeral flavor is not allowed + - Resize to zero disk flavor is not allowed (boot from image only) + + Args: + storage_backing: The host storage backing required + origin_flavor: The flavor to boot the vm from, listed by GBs for + root, ephemeral, and swap disks, i.e. for a + system with a 2GB root disk, a 1GB ephemeral disk, + and no swap disk: (2, 1, 0) + boot_source: Which source to boot the vm from, either 'volume' or + 'image' + Skip Conditions: + - No hosts exist with required storage backing. + Test setup: + - Put a single host of each backing in stxautozone to prevent + migration and instead force resize. + - Create two flavors based on origin_flavor and dest_flavor + - Create a volume or image to boot from. + - Boot VM with origin_flavor + Test Steps: + - Resize VM to dest_flavor with revert + - Resize VM to dest_flavor with confirm + Test Teardown: + - Delete created VM + - Delete created volume or image + - Delete created flavors + - Remove hosts from stxauto zone + - Delete stxauto zone + + """ + vm_host = add_hosts_to_zone.get(storage_backing, None) + + if not vm_host: + skip("No available host with {} storage backing".format( + storage_backing)) + + LOG.tc_step('Create origin flavor') + origin_flavor_id = _create_flavor(origin_flavor, storage_backing) + LOG.tc_step('Create destination flavor') + dest_flavor_id = _create_flavor(dest_flavor, storage_backing) + vm_id = _boot_vm_to_test(boot_source, vm_host, origin_flavor_id) + vm_helper.wait_for_vm_pingable_from_natbox(vm_id) + + vm_disks = vm_helper.get_vm_devices_via_virsh(vm_id) + root, ephemeral, swap = origin_flavor + file_paths, content = touch_files_under_vm_disks(vm_id=vm_id, + ephemeral=ephemeral, + swap=swap, + vm_type=boot_source, + disks=vm_disks) + + LOG.tc_step('Resize vm to dest flavor') + code, output = vm_helper.resize_vm(vm_id, dest_flavor_id, fail_ok=True) + vm_helper.wait_for_vm_pingable_from_natbox(vm_id) + + assert vm_helper.get_vm_flavor( + vm_id) == origin_flavor_id, 'VM did not keep origin flavor' + assert code > 0, "Resize VM CLI is not rejected" + + LOG.tc_step("Check files after resize attempt") + check_helper.check_vm_files(vm_id=vm_id, + storage_backing=storage_backing, root=root, + ephemeral=ephemeral, + swap=swap, vm_type=boot_source, + vm_action=None, file_paths=file_paths, + content=content, disks=vm_disks) + + +def _create_flavor(flavor_info, storage_backing): + root_disk = flavor_info[0] + ephemeral = flavor_info[1] + swap = flavor_info[2] + + flavor_id = nova_helper.create_flavor(ephemeral=ephemeral, swap=swap, + root_disk=root_disk, + storage_backing=storage_backing)[1] + ResourceCleanup.add('flavor', flavor_id) + return flavor_id + + +def _boot_vm_to_test(boot_source, vm_host, flavor_id): + LOG.tc_step('Boot a vm with given flavor') + vm_id = vm_helper.boot_vm(flavor=flavor_id, avail_zone='stxauto', + vm_host=vm_host, source=boot_source, + cleanup='function')[1] + return vm_id + + +def get_cpu_count(hosts_with_backing): + LOG.fixture_step("Find suitable vm host and cpu count and backing of host") + compute_space_dict = {} + + vm_host = hosts_with_backing[0] + numa0_used_cpus, numa0_total_cpus = \ + host_helper.get_vcpus_per_proc(vm_host)[vm_host][0] + numa0_avail_cpus = len(numa0_total_cpus) - len(numa0_used_cpus) + for host in hosts_with_backing: + free_space = get_disk_avail_least(host) + compute_space_dict[host] = free_space + LOG.info("{} space on {}".format(free_space, host)) + + # increase quota + LOG.fixture_step("Increase quota of allotted cores") + vm_helper.ensure_vms_quotas(cores_num=int(numa0_avail_cpus + 30)) + + return vm_host, numa0_avail_cpus, compute_space_dict + + +class TestResizeDiffHost: + @mark.parametrize('storage_backing', [ + 'local_image', + 'remote', + ]) + def test_resize_different_comp_node(self, storage_backing, + get_hosts_per_backing): + """ + Test resizing disks of a larger vm onto a different compute node and + check hypervisor statistics to + make sure difference in disk usage of both nodes involved is + correctly reflected + + Args: + storage_backing: The host storage backing required + Skip Conditions: + - 2 hosts must exist with required storage backing. + Test setup: + - For each of the two backings tested, the setup will return the + number of nodes for each backing, + the vm host that the vm will initially be created on and the + number of hosts for that backing. + Test Steps: + - Create a flavor with a root disk size that is slightly larger + than the default image used to boot up + the VM + - Create a VM with the aforementioned flavor + - Create a flavor will enough cpus to occupy the rest of the cpus + on the same host as the first VM + - Create another VM on the same host as the first VM + - Create a similar flavor to the first one, except that it has + one more vcpu + - Resize the first VM and confirm that it is on a different host + - Check hypervisor-show on both computes to make sure that disk + usage goes down on the original host and + goes up on the new host + Test Teardown: + - Delete created VMs + - Delete created flavors + + """ + hosts_with_backing = get_hosts_per_backing.get(storage_backing, []) + if len(hosts_with_backing) < 2: + skip(SkipStorageBacking.LESS_THAN_TWO_HOSTS_WITH_BACKING.format( + storage_backing)) + + origin_host, cpu_count, compute_space_dict = get_cpu_count( + hosts_with_backing) + + root_disk_size = \ + GuestImages.IMAGE_FILES[GuestImages.DEFAULT['guest']][1] + 5 + + # make vm (1 cpu) + LOG.tc_step("Create flavor with 1 cpu") + numa0_specs = {FlavorSpec.CPU_POLICY: 'dedicated', FlavorSpec.NUMA_0: 0} + flavor_1 = \ + nova_helper.create_flavor(ephemeral=0, swap=0, + root_disk=root_disk_size, vcpus=1, + storage_backing=storage_backing)[1] + ResourceCleanup.add('flavor', flavor_1) + nova_helper.set_flavor(flavor_1, **numa0_specs) + + LOG.tc_step("Boot a vm with above flavor") + vm_to_resize = \ + vm_helper.boot_vm(flavor=flavor_1, source='image', + cleanup='function', vm_host=origin_host)[1] + vm_helper.wait_for_vm_pingable_from_natbox(vm_to_resize) + + # launch another vm + LOG.tc_step("Create a flavor to occupy vcpus") + occupy_amount = int(cpu_count) - 1 + second_specs = {FlavorSpec.CPU_POLICY: 'dedicated', + FlavorSpec.NUMA_0: 0} + flavor_2 = nova_helper.create_flavor(vcpus=occupy_amount, + storage_backing=storage_backing)[1] + ResourceCleanup.add('flavor', flavor_2) + nova_helper.set_flavor(flavor_2, **second_specs) + + LOG.tc_step("Boot a vm with above flavor to occupy remaining vcpus") + vm_2 = vm_helper.boot_vm(flavor=flavor_2, source='image', + cleanup='function', vm_host=origin_host)[1] + vm_helper.wait_for_vm_pingable_from_natbox(vm_2) + + LOG.tc_step('Check disk usage before resize') + prev_val_origin_host = get_disk_avail_least(origin_host) + LOG.info("{} space left on compute".format(prev_val_origin_host)) + + # create a larger flavor and resize + LOG.tc_step("Create a flavor that has an extra vcpu to force resize " + "to a different node") + resize_flavor = nova_helper.create_flavor( + ephemeral=0, swap=0, root_disk=root_disk_size, vcpus=2, + storage_backing=storage_backing)[1] + ResourceCleanup.add('flavor', resize_flavor) + nova_helper.set_flavor(resize_flavor, **numa0_specs) + + LOG.tc_step("Resize the vm and verify if it is on a different host") + vm_helper.resize_vm(vm_to_resize, resize_flavor) + new_host = vm_helper.get_vm_host(vm_to_resize) + assert new_host != origin_host, "vm did not change hosts " \ + "following resize" + + LOG.tc_step('Check disk usage on computes after resize') + if storage_backing == 'remote': + LOG.info("Compute disk usage change should be minimal for " + "remote storage backing") + root_disk_size = 0 + + check_correct_post_resize_value(prev_val_origin_host, root_disk_size, + origin_host) + + prev_val_new_host = compute_space_dict[new_host] + check_correct_post_resize_value(prev_val_new_host, -root_disk_size, + new_host, sleep=False) + vm_helper.wait_for_vm_pingable_from_natbox(vm_to_resize) diff --git a/automated-pytest-suite/testcases/functional/nova/test_vm_recovery.py b/automated-pytest-suite/testcases/functional/nova/test_vm_recovery.py new file mode 100755 index 0000000..36b22bd --- /dev/null +++ b/automated-pytest-suite/testcases/functional/nova/test_vm_recovery.py @@ -0,0 +1,105 @@ +# +# Copyright (c) 2019 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + + +from pytest import mark, param + +from consts.stx import FlavorSpec, ImageMetadata, VMStatus +from keywords import nova_helper, vm_helper, glance_helper +from utils.tis_log import LOG + + +# Note auto recovery metadata in image will not be passed to vm if vm is booted +# from Volume + + +@mark.parametrize(('cpu_policy', 'flavor_auto_recovery', 'image_auto_recovery', + 'disk_format', 'container_format', 'expt_result'), [ + param(None, None, None, 'raw', 'bare', True, marks=mark.p1), + param(None, 'false', 'true', 'qcow2', 'bare', False, marks=mark.p3), + param(None, 'true', 'false', 'raw', 'bare', True, marks=mark.p3), + param('dedicated', 'false', None, 'raw', 'bare', False, marks=mark.p3), + param('dedicated', None, 'false', 'qcow2', 'bare', False, + marks=mark.domain_sanity), + param('shared', None, 'true', 'raw', 'bare', True, marks=mark.p3), + param('shared', 'false', None, 'raw', 'bare', False, marks=mark.p3), +]) +def test_vm_autorecovery(cpu_policy, flavor_auto_recovery, image_auto_recovery, + disk_format, container_format, expt_result): + """ + Test auto recovery setting in vm with various auto recovery settings in + flavor and image. + + Args: + cpu_policy (str|None): cpu policy to set in flavor + flavor_auto_recovery (str|None): None (unset) or true or false + image_auto_recovery (str|None): None (unset) or true or false + disk_format (str): + container_format (str): + expt_result (bool): Expected vm auto recovery behavior. + False > disabled, True > enabled. + + Test Steps: + - Create a flavor with auto recovery and cpu policy set to given + values in extra spec + - Create an image with auto recovery set to given value in metadata + - Boot a vm with the flavor and from the image + - Set vm state to error via nova reset-state + - Verify vm auto recovery behavior is as expected + + Teardown: + - Delete created vm, volume, image, flavor + + """ + + LOG.tc_step("Create a flavor with cpu_policy set to {} and auto_recovery " + "set to {} in extra spec".format(cpu_policy, + flavor_auto_recovery)) + flavor_id = nova_helper.create_flavor( + name='auto_recover_'+str(flavor_auto_recovery), cleanup='function')[1] + + # Add extra specs as specified + extra_specs = {} + if cpu_policy is not None: + extra_specs[FlavorSpec.CPU_POLICY] = cpu_policy + if flavor_auto_recovery is not None: + extra_specs[FlavorSpec.AUTO_RECOVERY] = flavor_auto_recovery + + if extra_specs: + nova_helper.set_flavor(flavor=flavor_id, **extra_specs) + + property_key = ImageMetadata.AUTO_RECOVERY + LOG.tc_step("Create an image with property auto_recovery={}, " + "disk_format={}, container_format={}". + format(image_auto_recovery, disk_format, container_format)) + if image_auto_recovery is None: + image_id = glance_helper.create_image(disk_format=disk_format, + container_format=container_format, + cleanup='function')[1] + else: + image_id = glance_helper.create_image( + disk_format=disk_format, container_format=container_format, + cleanup='function', **{property_key: image_auto_recovery})[1] + + LOG.tc_step("Boot a vm from image with auto recovery - {} and " + "using the flavor with auto recovery - " + "{}".format(image_auto_recovery, flavor_auto_recovery)) + vm_id = vm_helper.boot_vm(name='auto_recov', flavor=flavor_id, + source='image', source_id=image_id, + cleanup='function')[1] + vm_helper.wait_for_vm_pingable_from_natbox(vm_id) + + LOG.tc_step("Verify vm auto recovery is {} by setting vm to error " + "state.".format(expt_result)) + vm_helper.set_vm_state(vm_id=vm_id, error_state=True, fail_ok=False) + res_bool, actual_val = vm_helper.wait_for_vm_values( + vm_id=vm_id, status=VMStatus.ACTIVE, fail_ok=True, timeout=600) + + assert expt_result == res_bool, "Expected auto_recovery: {}. Actual vm " \ + "status: {}".format(expt_result, actual_val) + + LOG.tc_step("Ensure vm is pingable after auto recovery") + vm_helper.wait_for_vm_pingable_from_natbox(vm_id) diff --git a/automated-pytest-suite/testcases/functional/security/__init__.py b/automated-pytest-suite/testcases/functional/security/__init__.py new file mode 100755 index 0000000..e69de29 diff --git a/automated-pytest-suite/testcases/functional/security/test_ima.py b/automated-pytest-suite/testcases/functional/security/test_ima.py new file mode 100755 index 0000000..de050ea --- /dev/null +++ b/automated-pytest-suite/testcases/functional/security/test_ima.py @@ -0,0 +1,412 @@ +# +# Copyright (c) 2019 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + + +from pytest import mark, fixture, skip + +from consts.auth import HostLinuxUser +from consts.stx import EventLogID +from keywords import system_helper, common +from utils.clients.ssh import ControllerClient +from utils.tis_log import LOG + +files_to_delete = [] + + +@fixture(scope='module', autouse=True) +def ima_precheck(): + """ + This tests if the system is enabled with IMA. If not, we + should skip IMA-related tests. + """ + + LOG.info("Checking if IMA is enabled") + con_ssh = ControllerClient.get_active_controller() + + exitcode, output = con_ssh.exec_cmd("cat /proc/cmdline") + if "extended" not in output: + skip("IMA must be enabled in order to run this test") + else: + LOG.info("IMA is enabled") + + +@fixture(autouse=True) +def delete_files(request): + global files_to_delete + files_to_delete = [] + + def teardown(): + """ + Delete any created files on teardown. + """ + for filename in files_to_delete: + delete_file(filename) + + request.addfinalizer(teardown) + + +def checksum_compare(source_file, dest_file): + """ + This does a checksum comparison of two files. It returns True if the + checksum matches, and False if it doesn't. + """ + + con_ssh = ControllerClient.get_active_controller() + + LOG.info("Compare checksums on source file and destination file") + cmd = "getfattr -m . -d {}" + + exitcode, source_sha = con_ssh.exec_cmd(cmd.format(source_file)) + LOG.info("Raw source file checksum is: {}".format(source_sha)) + source_sha2 = source_sha.split("\n") + print("This is source_sha2: {}".format(source_sha2)) + assert source_sha2 != [''], "No signature on source file" + + if source_file.startswith("/"): + source_sha = source_sha2[2] + " " + source_sha2[3] + else: + source_sha = source_sha2[1] + " " + source_sha2[2] + + LOG.info("Extracted source file checksum: {}".format(source_sha)) + + exitcode, dest_sha = con_ssh.exec_cmd(cmd.format(dest_file)) + LOG.info("Raw symlink checksum is: {}".format(dest_sha)) + dest_sha2 = dest_sha.split("\n") + + if dest_file.startswith("/"): + dest_sha = dest_sha2[2] + " " + dest_sha2[3] + else: + dest_sha = dest_sha2[1] + " " + dest_sha2[2] + + LOG.info("Extracted destination file checksum: {}".format(dest_sha)) + + if source_sha == dest_sha: + return True + else: + return False + + +def create_symlink(source_file, dest_file, sudo=True): + """ + This creates a symlink given a source filename and a destination filename. + """ + LOG.info("Creating symlink to {} called {}".format(source_file, dest_file)) + cmd = "ln -sf {} {}".format(source_file, dest_file) + _exec_cmd(cmd=cmd, sudo=sudo, fail_ok=False) + + +def delete_file(filename, sudo=True): + """ + This deletes a file. + """ + LOG.info("Deleting file {}".format(filename)) + cmd = "rm {}".format(filename) + _exec_cmd(cmd=cmd, sudo=sudo, fail_ok=False) + + +def chmod_file(filename, permissions, sudo=True): + """ + This modifies permissions of a file + """ + LOG.info("Changing file permissions for {}".format(filename)) + cmd = "chmod {} {}".format(permissions, filename) + _exec_cmd(cmd=cmd, sudo=sudo, fail_ok=False) + + +def chgrp_file(filename, group, sudo=True): + """ + This modifies the group ownership of a file + """ + LOG.info("Changing file permissions for {}".format(filename)) + cmd = "chgrp {} {}".format(group, filename) + _exec_cmd(cmd=cmd, sudo=sudo, fail_ok=False) + + +def chown_file(filename, file_owner, sudo=True): + """ + This modifies the user that owns the file + """ + LOG.info("Changing the user that owns {}".format(filename)) + cmd = "chown {} {}".format(file_owner, filename) + _exec_cmd(cmd=cmd, sudo=sudo, fail_ok=False) + + +def copy_file(source_file, dest_file, sudo=True, preserve=True, cleanup=None): + """ + This creates a copy of a file + + Args: + source_file: + dest_file: + sudo (bool): whether to copy with sudo + cleanup (None|str): source or dest. Add source or dest file to files to + delete list + preserve (bool): whether to preserve attributes of source file + + Returns: + + """ + LOG.info("Copy file {} preserve attributes".format('and' if preserve + else 'without')) + preserve_str = '--preserve=all ' if preserve else '' + cmd = "cp {} {}{}".format(source_file, preserve_str, dest_file) + _exec_cmd(cmd, sudo=sudo, fail_ok=False) + + if cleanup: + file_path = source_file if cleanup == 'source' else dest_file + files_to_delete.append(file_path) + + +def move_file(source_file, dest_file, sudo=True): + """ + This moves a file from source to destination + """ + LOG.info("Copy file and preserve attributes") + cmd = "mv {} {}".format(source_file, dest_file) + _exec_cmd(cmd=cmd, sudo=sudo, fail_ok=False) + + +def create_and_execute(file_path, sudo=True): + LOG.tc_step("Create a new {} file and execute it".format('root' if sudo + else 'non-root')) + cmd = "touch {}".format(file_path) + _exec_cmd(cmd=cmd, sudo=sudo, fail_ok=False) + files_to_delete.append(file_path) + + LOG.info("Set file to be executable") + chmod_file(file_path, "755", sudo=sudo) + + LOG.info("Append to copy of monitored file") + cmd = 'echo "ls" | {}tee -a {}'.format('sudo -S ' if sudo else '', + file_path) + _exec_cmd(cmd=cmd, sudo=False, fail_ok=False) + + LOG.info("Execute created file") + _exec_cmd(file_path, sudo=sudo, fail_ok=False) + + +@mark.priorities('nightly', 'sx_nightly') +@mark.parametrize(('operation', 'file_path'), [ + ('create_symlink', '/usr/sbin/ntpq'), + ('copy_and_execute', '/usr/sbin/ntpq'), + ('change_file_attributes', '/usr/sbin/ntpq'), + ('create_and_execute', 'new_nonroot_file') +]) +def test_ima_no_event(operation, file_path): + """ + This test validates following scenarios will not generate IMA event: + - create symlink of a monitored file + - copy a root file with the proper IMA signature, the nexcute it + - make file attribute changes, include: chgrp, chown, chmod + - create and execute a files as sysadmin + + Test Steps: + - Perform specified operation on given file + - Confirm IMA violation event is not triggered + + Teardown: + - Delete created test file + + Maps to TC_17684/TC_17644/TC_17640/TC_17902 from US105523 + This test also covers TC_17665/T_16397 from US105523 (FM Event Log Updates) + + """ + + + global files_to_delete + start_time = common.get_date_in_format() + source_file = file_path + con_ssh = ControllerClient.get_active_controller() + + LOG.tc_step("{} for {}".format(operation, source_file)) + if operation == 'create_symlink': + dest_file = "my_symlink" + create_symlink(source_file, dest_file) + files_to_delete.append(dest_file) + + checksum_match = checksum_compare(source_file, dest_file) + assert checksum_match, "SHA256 checksum should match source file and " \ + "the symlink but didn't" + + elif operation == 'copy_and_execute': + dest_file = "/usr/sbin/TEMP" + copy_file(source_file, dest_file) + files_to_delete.append(dest_file) + + LOG.info("Execute the copied file") + con_ssh.exec_sudo_cmd("{} -p".format(dest_file)) + + elif operation == 'change_file_attributes': + if HostLinuxUser.get_home() != 'sysadmin': + skip('sysadmin user is required to run this test') + dest_file = "/usr/sbin/TEMP" + copy_file(source_file, dest_file) + files_to_delete.append(dest_file) + + LOG.info("Change permission of copy") + chmod_file(dest_file, "777") + LOG.info("Changing group ownership of file") + chgrp_file(dest_file, "sys_protected") + LOG.info("Changing file ownership") + chown_file(dest_file, "sysadmin:sys_protected") + + elif operation == 'create_and_execute': + dest_file = "{}/TEMP".format(HostLinuxUser.get_home()) + create_and_execute(file_path=dest_file, sudo=False) + + LOG.tc_step("Ensure no IMA events are raised") + events_found = system_helper.wait_for_events(start=start_time, + timeout=60, num=10, + event_log_id=EventLogID.IMA, + fail_ok=True, strict=False) + + assert not events_found, "Unexpected IMA events found" + + +def _exec_cmd(cmd, con_ssh=None, sudo=False, fail_ok=True): + if not con_ssh: + con_ssh = ControllerClient.get_active_controller() + + if sudo: + return con_ssh.exec_sudo_cmd(cmd, fail_ok=fail_ok) + else: + return con_ssh.exec_cmd(cmd, fail_ok=fail_ok) + + +@mark.priorities('nightly', 'sx_nightly') +@mark.parametrize(('operation', 'file_path'), [ + ('edit_and_execute', '/usr/sbin/ntpq'), + ('append_and_execute', '/usr/sbin/logrotate'), + ('replace_library', '/lib64/libcrypt.so.1'), + ('create_and_execute', 'new_root_file') +]) +def test_ima_event_generation(operation, file_path): + """ + Following IMA violation scenarios are covered: + - append/edit data to/of a monitored file, result in changing of the + hash + - dynamic library changes + - create and execute a files as sysadmin + + Test Steps: + - Perform specified file operations + - Check IMA violation event is logged + + """ + global files_to_delete + + con_ssh = ControllerClient.get_active_controller() + start_time = common.get_date_in_format() + + source_file = file_path + backup_file = None + + if operation in ('edit_and_execute', 'append_and_execute'): + dest_file = "/usr/sbin/TEMP" + copy_file(source_file, dest_file, cleanup='dest') + + if operation == 'edit_and_execute': + LOG.tc_step("Open copy of monitored file and save") + cmd = "vim {} '+:wq!'".format(dest_file) + con_ssh.exec_sudo_cmd(cmd, fail_ok=False) + execute_cmd = "{} -p".format(dest_file) + else: + LOG.tc_step("Append to copy of monitored file") + cmd = 'echo "output" | sudo -S tee -a /usr/sbin/TEMP'.format( + HostLinuxUser.get_password()) + con_ssh.exec_cmd(cmd, fail_ok=False) + LOG.tc_step("Execute modified file") + con_ssh.exec_sudo_cmd(dest_file) + execute_cmd = "{}".format(dest_file) + + LOG.tc_step("Execute modified file") + con_ssh.exec_sudo_cmd(execute_cmd) + + elif operation == 'replace_library': + backup_file = "/root/{}".format(source_file.split('/')[-1]) + dest_file_nocsum = "/root/TEMP" + + LOG.info("Backup source file {} to {}".format(source_file, backup_file)) + copy_file(source_file, backup_file) + LOG.info("Copy the library without the checksum") + copy_file(source_file, dest_file_nocsum, preserve=False) + LOG.info("Replace the library with the unsigned one") + move_file(dest_file_nocsum, source_file) + + elif operation == 'create_and_execute': + dest_file = "{}/TEMP".format(HostLinuxUser.get_home()) + create_and_execute(file_path=dest_file, sudo=True) + + LOG.tc_step("Check for IMA event") + ima_events = system_helper.wait_for_events(start=start_time, + timeout=60, num=10, + event_log_id=EventLogID.IMA, + state='log', severity='major', + fail_ok=True, strict=False) + + if backup_file: + LOG.info("Restore backup file {} to {}".format(backup_file, + source_file)) + move_file(backup_file, source_file) + + assert ima_events, "IMA event is not generated after {} on " \ + "{}".format(operation, file_path) + + +# CHECK TEST PROCEDURE - FAILS in the middle + + +@mark.priorities('nightly', 'sx_nightly') +def test_ima_keyring_protection(): + """ + This test validates that the IMA keyring is safe from user space attacks. + + Test Steps: + - Attempt to add new keys to the keyring + - Extract key ID and save + - Attempt to change the key timeout + - Attempt to change the group and ownership of the key + - Attempt to delete the key + + This test maps to TC_17667/T_16387 from US105523 (IMA keyring is safe from + user space attacks) + + """ + + con_ssh = ControllerClient.get_active_controller() + + LOG.info("Extract ima key ID") + exitcode, msg = con_ssh.exec_sudo_cmd("cat /proc/keys | grep _ima") + raw_key_id = msg.split(" ", maxsplit=1)[0] + key_id = "0x{}".format(raw_key_id) + LOG.info("Extracted key is: {}".format(key_id)) + + LOG.info("Attempting to add new keys to keyring") + exitcode, msg = con_ssh.exec_sudo_cmd("keyctl add keyring TEST stuff " + "{}".format(key_id)) + assert exitcode != 0, \ + "Key addition should have failed but instead succeeded" + + LOG.info("Attempt to change the timeout on a key") + exitcode, msg = con_ssh.exec_sudo_cmd("keyctl timeout {} " + "3600".format(key_id)) + assert exitcode != 0, \ + "Key timeout modification should be rejected but instead succeeded" + + LOG.info("Attempt to change the group of a key") + exitcode, msg = con_ssh.exec_sudo_cmd("keyctl chgrp {} 0".format(key_id)) + assert exitcode != 0, \ + "Key group modification should be rejected but instead succeeded" + + LOG.info("Attempt to change the ownership of a key") + exitcode, msg = con_ssh.exec_sudo_cmd("keyctl chown {} 1875".format(key_id)) + assert exitcode != 0, \ + "Key ownership modification should be rejected but instead succeeded" + + LOG.info("Attempt to delete a key") + exitcode, msg = con_ssh.exec_sudo_cmd("keyctl clear {}".format(key_id)) + assert exitcode != 0, \ + "Key ownership deletion should be rejected but instead succeeded" diff --git a/automated-pytest-suite/testcases/functional/security/test_kernel_modules.py b/automated-pytest-suite/testcases/functional/security/test_kernel_modules.py new file mode 100755 index 0000000..a48e69f --- /dev/null +++ b/automated-pytest-suite/testcases/functional/security/test_kernel_modules.py @@ -0,0 +1,71 @@ +# +# Copyright (c) 2019 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + + +import re +from pytest import mark + +from keywords import system_helper, host_helper +from utils.tis_log import LOG + + +@mark.nightly +def test_kernel_module_signatures(): + """ + Test kernel modules are properly signed on all stx hosts. + + Steps on each host: + - 'cat /proc/sys/kernel/tainted', ensure value is 4096. + If not, do following steps: + - 'grep --color=never -i "module verification failed" + /var/log/kern.log' to find out failed modules + - 'modinfo | grep --color=never -E "sig|filename" + to display signing info for each module + + """ + hosts = system_helper.get_hosts() + failed_hosts = {} + + for host in hosts: + with host_helper.ssh_to_host(host) as host_ssh: + LOG.tc_step( + "Check for unassigned kernel modules on {}".format(host)) + output = host_ssh.exec_cmd('cat /proc/sys/kernel/tainted', + fail_ok=False)[1] + output_binary = '{0:b}'.format(int(output)) + unassigned_module_bit = '0' + # 14th bit is to flag unassigned module + if len(output_binary) >= 14: + unassigned_module_bit = output_binary[-14] + if unassigned_module_bit != '0': + LOG.error( + "Kernel module verification(s) failed on {}. Collecting " + "more info".format( + host)) + + LOG.tc_step( + "Check kern.log for modules with failed verification") + failed_modules = [] + err_out = host_ssh.exec_cmd( + 'grep --color=never -i "module verification failed" ' + '/var/log/kern.log')[ + 1] + for line in err_out.splitlines(): + module = re.findall(r'\] (.*): module verification failed', + line)[0].strip() + if module not in failed_modules: + failed_modules.append(module) + + failed_hosts[host] = failed_modules + LOG.tc_step("Display signing info for {} failed kernel " + "modules: {}".format(host, failed_modules)) + for module in failed_modules: + host_ssh.exec_cmd( + 'modinfo {} | grep --color=never -E ' + '"sig|filename"'.format(module)) + + assert not failed_hosts, "Kernel module signature verification " \ + "failed on: {}".format(failed_hosts) diff --git a/automated-pytest-suite/testcases/functional/storage/__init__.py b/automated-pytest-suite/testcases/functional/storage/__init__.py new file mode 100755 index 0000000..e69de29 diff --git a/automated-pytest-suite/testcases/functional/storage/ceph/test_ceph.py b/automated-pytest-suite/testcases/functional/storage/ceph/test_ceph.py new file mode 100755 index 0000000..f15ff61 --- /dev/null +++ b/automated-pytest-suite/testcases/functional/storage/ceph/test_ceph.py @@ -0,0 +1,115 @@ +""" +This file contains CEPH-related storage test cases. +""" + +import time + +from pytest import mark, param + +from consts.stx import EventLogID +from keywords import host_helper, system_helper, storage_helper +from utils.tis_log import LOG + +PROC_RESTART_TIME = 30 # number of seconds between process restarts + + +# Tested on PV1. Runtime: 278.40 Date: Aug 2nd, 2017. Status: Pass + + +@mark.parametrize('monitor', [ + param('controller-0', marks=mark.nightly), + 'controller-1', + 'storage-0']) +# Tested on PV0. Runtime: 222.34 seconds. Date: Aug 4, 2017 Status: Pass +@mark.usefixtures('ceph_precheck') +def test_ceph_mon_process_kill(monitor): + """ + us69932_tc2_ceph_mon_process_kill from us69932_ceph_monitoring.odt + + Verify that ceph mon processes recover when they are killed. + + Args: + - Nothing + + Setup: + - Requires system with storage nodes + + Test Steps: + 1. Run CEPH pre-check fixture to check: + - system has storage nodes + - health of the ceph cluster is okay + - that we have OSDs provisioned + 2. Pick one ceph monitor and remove it from the quorum + 3. Kill the monitor process + 4. Check that the appropriate alarms are raised + 5. Restore the monitor to the quorum + 6. Check that the alarms clear + 7. Ensure the ceph monitor is restarted under a different pid + + Potential flaws: + 1. We're not checking if unexpected alarms are raised (TODO) + + Teardown: + - None + + """ + LOG.tc_step('Get process ID of ceph monitor') + mon_pid = storage_helper.get_mon_pid(monitor) + + with host_helper.ssh_to_host(monitor) as host_ssh: + with host_ssh.login_as_root() as root_ssh: + LOG.tc_step('Remove the monitor') + cmd = 'ceph mon remove {}'.format(monitor) + root_ssh.exec_cmd(cmd) + + LOG.tc_step('Stop the ceph monitor') + cmd = 'service ceph stop mon.{}'.format(monitor) + root_ssh.exec_cmd(cmd) + + LOG.tc_step('Check that ceph monitor failure alarm is raised') + system_helper.wait_for_alarm(alarm_id=EventLogID.STORAGE_DEGRADE, timeout=300) + + with host_helper.ssh_to_host(monitor) as host_ssh: + with host_ssh.login_as_root() as root_ssh: + LOG.tc_step('Get cluster fsid') + cmd = 'ceph fsid' + fsid = host_ssh.exec_cmd(cmd)[0] + ceph_conf = '/etc/ceph/ceph.conf' + + LOG.tc_step('Remove old ceph monitor directory') + cmd = 'rm -rf /var/lib/ceph/mon/ceph-{}'.format(monitor) + root_ssh.exec_cmd(cmd) + + LOG.tc_step('Re-add the monitor') + cmd = 'ceph-mon -i {} -c {} --mkfs --fsid {}'.format(monitor, ceph_conf, fsid) + root_ssh.exec_cmd(cmd) + + LOG.tc_step('Check the ceph storage alarm condition clears') + system_helper.wait_for_alarm_gone(alarm_id=EventLogID.STORAGE_DEGRADE, timeout=360) + + LOG.tc_step('Check the ceph-mon process is restarted with a different pid') + mon_pid2 = None + for i in range(0, PROC_RESTART_TIME): + mon_pid2 = storage_helper.get_mon_pid(monitor, fail_ok=True) + if mon_pid2 and mon_pid2 != mon_pid: + break + time.sleep(5) + + LOG.info('Old pid is {} and new pid is {}'.format(mon_pid, mon_pid2)) + msg = 'Process did not restart in time' + assert mon_pid2 and mon_pid2 != mon_pid, msg + + +# Testd on PV0. Ruentime: 1899.93 seconds. Date: Aug 4, 2017. Status: Pass + + +# Tested on PV0. Runtime: 2770.23 seconds sec. Date: Aug 4, 2017 Status: # Pass + + +# Tested on PV1. Runtime: 762.41 secs Date: Aug 2nd, 2017. Status: Pass + + +# Tested on PV1. Runtime: 1212.55 secs Date: Aug 2nd, 2017. Status: Pass + + +# Tested on PV0. Runtime: 58.82 seconds. Status: Pass Date: Aug 8, 2017 diff --git a/automated-pytest-suite/testcases/functional/storage/conftest.py b/automated-pytest-suite/testcases/functional/storage/conftest.py new file mode 100755 index 0000000..157d7f8 --- /dev/null +++ b/automated-pytest-suite/testcases/functional/storage/conftest.py @@ -0,0 +1,3 @@ +from testfixtures.resource_mgmt import * +from testfixtures.resource_create import * +from testfixtures.config_host import * diff --git a/automated-pytest-suite/testcases/functional/storage/test_storage_vm_migration.py b/automated-pytest-suite/testcases/functional/storage/test_storage_vm_migration.py new file mode 100755 index 0000000..a27b7b5 --- /dev/null +++ b/automated-pytest-suite/testcases/functional/storage/test_storage_vm_migration.py @@ -0,0 +1,521 @@ +# +# Copyright (c) 2019 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + + +import time + +from pytest import fixture, skip, mark + +from consts.stx import VMStatus, GuestImages +from keywords import host_helper, vm_helper, cinder_helper, glance_helper, \ + system_helper, network_helper +from testfixtures.fixture_resources import ResourceCleanup +from utils import table_parser, exceptions +from utils.tis_log import LOG + + +@fixture(scope='module', autouse=True) +def check_system(): + if not cinder_helper.is_volumes_pool_sufficient(min_size=80): + skip("Cinder volume pool size is smaller than 80G") + + if len(host_helper.get_up_hypervisors()) < 2: + skip("at least two computes are required") + + if len(host_helper.get_storage_backing_with_max_hosts()[1]) < 2: + skip("at least two hosts with the same storage backing are required") + + +@fixture(scope='function', autouse=True) +def pre_alarm_(): + """ + Text fixture to get pre-test existing alarm list. + Args:None + + Returns: list of alarms + + """ + pre_alarms = system_helper.get_alarms_table() + pre_list = table_parser.get_all_rows(pre_alarms) + # Time stamps are removed before comparing alarms with post test alarms. + # The time stamp is the last item in each alarm row. + for n in pre_list: + n.pop() + return pre_list + + +@fixture(scope='module') +def image_(): + """ + Text fixture to get guest image + Args: + + Returns: the guest image id + + """ + return glance_helper.get_image_id_from_name() + + +@fixture(scope='function') +def volumes_(image_): + """ + Text fixture to create two large cinder volumes with size of 20 and 40 GB. + Args: + image_: the guest image_id + + Returns: list of volume dict as following: + {'id': , + 'display_name': , + 'size': <20 or 40> + } + """ + + volumes = [] + cinder_params = [{'name': 'vol_inst1', + 'size': 20}, + {'name': 'vol_inst2', + 'size': 40}] + + for param in cinder_params: + volume_id = \ + cinder_helper.create_volume(name=param['name'], source_id=image_, + size=param['size'])[1] + volume = { + 'id': volume_id, + 'display_name': param['name'], + 'size': param['size'] + } + volumes.append(volume) + ResourceCleanup.add('volume', volume['id'], scope='function') + + return volumes + + +@fixture(scope='function') +def vms_(volumes_): + """ + Text fixture to create cinder volume with specific 'display-name', + and 'size' + Args: + volumes_: list of two large volumes dict created by volumes_ fixture + + Returns: volume dict as following: + {'id': , + 'display_name': , + 'size': <20 or 40> + } + """ + vms = [] + vm_names = ['test_inst1', 'test_inst2'] + index = 0 + for vol_params in volumes_: + instance_name = vm_names[index] + vm_id = vm_helper.boot_vm(name=instance_name, source='volume', + source_id=vol_params['id'], + cleanup='function')[ + 1] # , user_data=get_user_data_file())[1] + vm = { + 'id': vm_id, + 'display_name': instance_name, + } + vms.append(vm) + index += 1 + return vms + + +@mark.storage_sanity +def test_vm_with_a_large_volume_live_migrate(vms_, pre_alarm_): + """ + Test instantiate a vm with a large volume ( 20 GB and 40 GB) and live + migrate: + Args: + vms_ (dict): vms created by vms_ fixture + pre_alarm_ (list): alarm lists obtained by pre_alarm_ fixture + + Test Setups: + - get tenant1 and management networks which are already created for lab + setup + - get or create a "small" flavor + - get the guest image id + - create two large volumes (20 GB and 40 GB) in cinder + - boot two vms ( test_inst1, test_inst2) using volumes 20 GB and 40 GB + respectively + + + Test Steps: + - Verify VM status is ACTIVE + - Validate that VMs boot, and that no timeouts or error status occur. + - Verify the VM can be pinged from NATBOX + - Verify login to VM and rootfs (dev/vda) filesystem is rw mode + - Attempt to live migrate of VMs + - Validate that the VMs migrated and no errors or alarms are present + - Log into both VMs and validate that file systems are read-write + - Terminate VMs + + Skip conditions: + - less than two computes + - no storage node + + """ + for vm in vms_: + vm_id = vm['id'] + + LOG.tc_step( + "Checking VM status; VM Instance id is: {}......".format(vm_id)) + vm_state = vm_helper.get_vm_status(vm_id) + + assert vm_state == VMStatus.ACTIVE, 'VM {} state is {}; Not in ' \ + 'ACTIVATE state as expected' \ + .format(vm_id, vm_state) + + LOG.tc_step("Verify VM can be pinged from NAT box...") + rc, boot_time = check_vm_boot_time(vm_id) + assert rc, "VM is not pingable after {} seconds ".format(boot_time) + + LOG.tc_step("Verify Login to VM and check filesystem is rw mode....") + assert is_vm_filesystem_rw( + vm_id), 'rootfs filesystem is not RW as expected for VM {}' \ + .format(vm['display_name']) + + LOG.tc_step( + "Attempting live migration; vm id = {}; vm_name = {} ....".format( + vm_id, vm['display_name'])) + + code, msg = vm_helper.live_migrate_vm(vm_id=vm_id, fail_ok=False) + LOG.tc_step("Verify live migration succeeded...") + assert code == 0, "Expected return code 0. Actual return code: {}; " \ + "details: {}".format(code, msg) + + LOG.tc_step("Verifying filesystem is rw mode after live migration....") + assert is_vm_filesystem_rw( + vm_id), 'After live migration rootfs filesystem is not RW as ' \ + 'expected for VM {}'. \ + format(vm['display_name']) + + +@mark.domain_sanity +def test_vm_with_large_volume_and_evacuation(vms_, pre_alarm_): + """ + Test instantiate a vm with a large volume ( 20 GB and 40 GB) and evacuate: + + Args: + vms_ (dict): vms created by vms_ fixture + pre_alarm_ (list): alarm lists obtained by pre_alarm_ fixture + + Test Setups: + - get tenant1 and management networks which are already created for lab + setup + - get or create a "small" flavor + - get the guest image id + - create two large volumes (20 GB and 40 GB) in cinder + - boot two vms ( test_inst1, test_inst2) using volumes 20 GB and 40 GB + respectively + + + Test Steps: + - Verify VM status is ACTIVE + - Validate that VMs boot, and that no timeouts or error status occur. + - Verify the VM can be pinged from NATBOX + - Verify login to VM and rootfs (dev/vda) filesystem is rw mode + - live migrate, if required, to bring both VMs to the same compute + - Validate migrated VM and no errors or alarms are present + - Reboot compute host to initiate evacuation + - Verify VMs are evacuated + - Check for any system alarms + - Verify login to VM and rootfs (dev/vda) filesystem is still rw mode + after evacuation + - Terminate VMs + + Skip conditions: + - less that two computes + - no storage node + + """ + vm_ids = [] + for vm in vms_: + vm_id = vm['id'] + vm_ids.append(vm_id) + LOG.tc_step( + "Checking VM status; VM Instance id is: {}......".format(vm_id)) + vm_state = vm_helper.get_vm_status(vm_id) + assert vm_state == VMStatus.ACTIVE, 'VM {} state is {}; Not in ' \ + 'ACTIVATE state as expected' \ + .format(vm_id, vm_state) + + LOG.tc_step("Verify VM can be pinged from NAT box...") + rc, boot_time = check_vm_boot_time(vm_id) + assert rc, "VM is not pingable after {} seconds ".format(boot_time) + + LOG.tc_step("Verify Login to VM and check filesystem is rw mode....") + assert is_vm_filesystem_rw( + vm_id), 'rootfs filesystem is not RW as expected for VM {}' \ + .format(vm['display_name']) + + LOG.tc_step( + "Checking if live migration is required to put the vms to a single " + "compute....") + host_0 = vm_helper.get_vm_host(vm_ids[0]) + host_1 = vm_helper.get_vm_host(vm_ids[1]) + + if host_0 != host_1: + LOG.tc_step("Attempting to live migrate vm {} to host {} ....".format( + (vms_[1])['display_name'], host_0)) + code, msg = vm_helper.live_migrate_vm(vm_ids[1], + destination_host=host_0) + LOG.tc_step("Verify live migration succeeded...") + assert code == 0, "Live migration of vm {} to host {} did not " \ + "success".format((vms_[1])['display_name'], host_0) + + LOG.tc_step("Verify both VMs are in same host....") + assert host_0 == vm_helper.get_vm_host( + vm_ids[1]), "VMs are not in the same compute host" + + LOG.tc_step( + "Rebooting compute {} to initiate vm evacuation .....".format(host_0)) + vm_helper.evacuate_vms(host=host_0, vms_to_check=vm_ids, ping_vms=True) + + LOG.tc_step("Login to VM and to check filesystem is rw mode....") + assert is_vm_filesystem_rw((vms_[0])[ + 'id']), 'After evacuation the rootfs ' \ + 'filesystem is not RW as expected ' \ + 'for VM {}'.format( + (vms_[0])['display_name']) + + LOG.tc_step("Login to VM and to check filesystem is rw mode....") + assert is_vm_filesystem_rw((vms_[1])['id']), \ + 'After evacuation the rootfs filesystem is not RW as expected ' \ + 'for VM {}'.format((vms_[1])['display_name']) + + +@mark.domain_sanity +def test_instantiate_a_vm_with_a_large_volume_and_cold_migrate(vms_, + pre_alarm_): + """ + Test instantiate a vm with a large volume ( 20 GB and 40 GB) and cold + migrate: + Args: + vms_ (dict): vms created by vms_ fixture + pre_alarm_ (list): alarm lists obtained by pre_alarm_ fixture + + Test Setups: + - get tenant1 and management networks which are already created for lab + setup + - get or create a "small" flavor + - get the guest image id + - create two large volumes (20 GB and 40 GB) in cinder + - boot two vms ( test_inst1, test_inst2) using volumes 20 GB and 40 GB + respectively + + + Test Steps: + - Verify VM status is ACTIVE + - Validate that VMs boot, and that no timeouts or error status occur. + - Verify the VM can be pinged from NATBOX + - Verify login to VM and rootfs (dev/vda) filesystem is rw mode + - Attempt to cold migrate of VMs + - Validate that the VMs migrated and no errors or alarms are present + - Log into both VMs and validate that file systems are read-write + - Terminate VMs + + Skip conditions: + - less than two hosts with the same storage backing + - less than two computes + - no storage node + + """ + LOG.tc_step("Instantiate a vm with large volume.....") + + vms = vms_ + + for vm in vms: + vm_id = vm['id'] + + LOG.tc_step( + "Checking VM status; VM Instance id is: {}......".format(vm_id)) + vm_state = vm_helper.get_vm_status(vm_id) + + assert vm_state == VMStatus.ACTIVE, 'VM {} state is {}; Not in ' \ + 'ACTIVATE state as expected' \ + .format(vm_id, vm_state) + + LOG.tc_step("Verify VM can be pinged from NAT box...") + rc, boot_time = check_vm_boot_time(vm_id) + assert rc, "VM is not pingable after {} seconds ".format(boot_time) + + LOG.tc_step("Verify Login to VM and check filesystem is rw mode....") + assert is_vm_filesystem_rw( + vm_id), 'rootfs filesystem is not RW as expected for VM {}' \ + .format(vm['display_name']) + + LOG.tc_step( + "Attempting cold migration; vm id = {}; vm_name = {} ....".format( + vm_id, vm['display_name'])) + + code, msg = vm_helper.cold_migrate_vm(vm_id=vm_id, fail_ok=True) + LOG.tc_step("Verify cold migration succeeded...") + assert code == 0, "Expected return code 0. Actual return code: {}; " \ + "details: {}".format(code, msg) + + LOG.tc_step("Verifying filesystem is rw mode after cold migration....") + assert is_vm_filesystem_rw( + vm_id), 'After cold migration rootfs filesystem is not RW as ' \ + 'expected for ' \ + 'VM {}'.format(vm['display_name']) + + # LOG.tc_step("Checking for any system alarm ....") + # rc, new_alarm = is_new_alarm_raised(pre_alarms) + # assert not rc, " alarm(s) found: {}".format(new_alarm) + + +def test_instantiate_a_vm_with_multiple_volumes_and_migrate(): + """ + Test a vm with a multiple volumes live, cold migration and evacuation: + + Test Setups: + - get guest image_id + - get or create 'small' flavor_id + - get tenenat and managment network ids + + Test Steps: + - create volume for boot and another extra size 8GB + - boot vms from the created volume + - Validate that VMs boot, and that no timeouts or error status occur. + - Verify VM status is ACTIVE + - Attach the second volume to VM + - Attempt live migrate VM + - Login to VM and verify the filesystem is rw mode on both volumes + - Attempt cold migrate VM + - Login to VM and verify the filesystem is rw mode on both volumes + - Reboot the compute host to initiate evacuation + - Login to VM and verify the filesystem is rw mode on both volumes + - Terminate VMs + + Skip conditions: + - less than two computes + - less than one storage + + """ + # skip("Currently not working. Centos image doesn't see both volumes") + LOG.tc_step("Creating a volume size=8GB.....") + vol_id_0 = cinder_helper.create_volume(size=8)[1] + ResourceCleanup.add('volume', vol_id_0, scope='function') + + LOG.tc_step("Creating a second volume size=8GB.....") + vol_id_1 = cinder_helper.create_volume(size=8, bootable=False)[1] + LOG.tc_step("Volume id is: {}".format(vol_id_1)) + ResourceCleanup.add('volume', vol_id_1, scope='function') + + LOG.tc_step("Booting instance vm_0...") + + vm_id = vm_helper.boot_vm(name='vm_0', source='volume', source_id=vol_id_0, + cleanup='function')[1] + time.sleep(5) + + LOG.tc_step("Verify VM can be pinged from NAT box...") + rc, boot_time = check_vm_boot_time(vm_id) + assert rc, "VM is not pingable after {} seconds ".format(boot_time) + + LOG.tc_step("Login to VM and to check filesystem is rw mode....") + assert is_vm_filesystem_rw( + vm_id), 'vol_0 rootfs filesystem is not RW as expected.' + + LOG.tc_step("Attemping to attach a second volume to VM...") + vm_helper.attach_vol_to_vm(vm_id, vol_id_1) + + LOG.tc_step( + "Login to VM and to check filesystem is rw mode for both volumes....") + assert is_vm_filesystem_rw(vm_id, rootfs=['vda', + 'vdb']), 'volumes rootfs ' \ + 'filesystem is not RW ' \ + 'as expected.' + + LOG.tc_step("Attemping live migrate VM...") + vm_helper.live_migrate_vm(vm_id=vm_id) + + LOG.tc_step( + "Login to VM and to check filesystem is rw mode after live " + "migration....") + assert is_vm_filesystem_rw(vm_id, rootfs=['vda', + 'vdb']), 'After live migration ' \ + 'rootfs filesystem is ' \ + 'not RW' + + LOG.tc_step("Attempting cold migrate VM...") + vm_helper.cold_migrate_vm(vm_id) + + LOG.tc_step( + "Login to VM and to check filesystem is rw mode after live " + "migration....") + assert is_vm_filesystem_rw(vm_id, rootfs=['vda', + 'vdb']), 'After cold migration ' \ + 'rootfs filesystem is ' \ + 'not RW' + LOG.tc_step("Testing VM evacuation.....") + before_host_0 = vm_helper.get_vm_host(vm_id) + + LOG.tc_step("Rebooting compute {} to initiate vm evacuation .....".format( + before_host_0)) + vm_helper.evacuate_vms(host=before_host_0, vms_to_check=vm_id, + ping_vms=True) + + LOG.tc_step( + "Login to VM and to check filesystem is rw mode after live " + "migration....") + assert is_vm_filesystem_rw(vm_id, rootfs=['vda', + 'vdb']), 'After evacuation ' \ + 'filesystem is not RW' + + +def check_vm_boot_time(vm_id): + start_time = time.time() + output = vm_helper.wait_for_vm_pingable_from_natbox(vm_id, fail_ok=False) + elapsed_time = time.time() - start_time + return output, elapsed_time + + +def is_vm_filesystem_rw(vm_id, rootfs='vda', vm_image_name=None): + """ + + Args: + vm_id: + rootfs (str|list): + vm_image_name (None|str): + + Returns: + + """ + vm_helper.wait_for_vm_pingable_from_natbox(vm_id, timeout=240) + + if vm_image_name is None: + vm_image_name = GuestImages.DEFAULT['guest'] + + router_host = dhcp_host = None + try: + LOG.info( + "---------Collecting router and dhcp agent host info-----------") + router_host = network_helper.get_router_host() + mgmt_net = network_helper.get_mgmt_net_id() + dhcp_host = network_helper.get_network_agents(field='Host', + network=mgmt_net) + + with vm_helper.ssh_to_vm_from_natbox(vm_id, vm_image_name=vm_image_name, + retry_timeout=300) as vm_ssh: + if isinstance(rootfs, str): + rootfs = [rootfs] + for fs in rootfs: + cmd = "mount | grep {} | grep rw | wc -l".format(fs) + cmd_output = vm_ssh.exec_sudo_cmd(cmd)[1] + if cmd_output != '1': + LOG.info("Filesystem /dev/{} is not rw for VM: " + "{}".format(fs, vm_id)) + return False + return True + except exceptions.SSHRetryTimeout: + LOG.error("Failed to ssh, collecting vm console log.") + vm_helper.get_console_logs(vm_ids=vm_id) + LOG.info("Router host: {}. dhcp agent host: {}".format(router_host, + dhcp_host)) + raise diff --git a/automated-pytest-suite/testcases/functional/z_containers/__init__.py b/automated-pytest-suite/testcases/functional/z_containers/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/automated-pytest-suite/testcases/functional/z_containers/test_custom_containers.py b/automated-pytest-suite/testcases/functional/z_containers/test_custom_containers.py new file mode 100644 index 0000000..f9e49e0 --- /dev/null +++ b/automated-pytest-suite/testcases/functional/z_containers/test_custom_containers.py @@ -0,0 +1,389 @@ +# +# Copyright (c) 2019 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + + +import os +import re +import time + +from pytest import fixture, mark, skip + +from utils.tis_log import LOG +from utils.clients.ssh import ControllerClient +from utils.clients.local import LocalHostClient + +from keywords import common, kube_helper, host_helper, system_helper, \ + container_helper, keystone_helper +from consts.filepaths import TestServerPath, StxPath +from consts.stx import HostAvailState, Container +from consts.proj_vars import ProjVar +from consts.auth import HostLinuxUser +from testfixtures.recover_hosts import HostsToRecover + + +POD_YAML = 'hellokitty.yaml' +POD_NAME = 'hellokitty' + +HELM_TAR = 'hello-kitty.tgz' +HELM_APP_NAME = 'hello-kitty' +HELM_POD_FULL_NAME = 'hk-hello-kitty-hello-kit' +HELM_MSG = '

Hello Kitty World!

' + + +def controller_precheck(controller): + host = system_helper.get_active_controller_name() + if controller == 'standby': + controllers = system_helper.get_controllers( + availability=(HostAvailState.AVAILABLE, HostAvailState.DEGRADED, + HostAvailState.ONLINE)) + controllers.remove(host) + if not controllers: + skip('Standby controller does not exist or not in good state') + host = controllers[0] + + return host + + +@fixture(scope='module') +def copy_test_apps(): + skip('Shared Test File Server is not ready') + stx_home = HostLinuxUser.get_home() + con_ssh = ControllerClient.get_active_controller() + app_dir = os.path.join(stx_home, 'custom_apps/') + if not con_ssh.file_exists(app_dir + POD_YAML): + common.scp_from_test_server_to_active_controller( + source_path=TestServerPath.CUSTOM_APPS, con_ssh=con_ssh, + dest_dir=stx_home, timeout=60, is_dir=True) + + if not system_helper.is_aio_simplex(): + dest_host = 'controller-1' if con_ssh.get_hostname() == \ + 'controller-0' else 'controller-0' + con_ssh.rsync(source=app_dir, dest_server=dest_host, dest=app_dir, + timeout=60) + + return app_dir + + +@fixture() +def delete_test_pod(): + LOG.info("Delete {} pod if exists".format(POD_NAME)) + kube_helper.delete_resources(resource_names=POD_NAME, fail_ok=True) + + +@mark.platform_sanity +@mark.parametrize('controller', [ + 'active', + 'standby' +]) +def test_launch_pod_via_kubectl(copy_test_apps, delete_test_pod, controller): + """ + Test custom pod apply and delete + Args: + copy_test_apps (str): module fixture + delete_test_pod: fixture + controller: test param + + Setups: + - Copy test files from test server to stx system (module) + - Delete test pod if already exists on system + + Test Steps: + - ssh to given controller + - kubectl apply custom pod yaml and verify custom pod is added to + both controllers (if applicable) + - kubectl delete custom pod and verify it is removed from both + controllers (if applicable) + + """ + host = controller_precheck(controller) + + with host_helper.ssh_to_host(hostname=host) as con_ssh: + app_path = os.path.join(copy_test_apps, POD_YAML) + LOG.tc_step('kubectl apply {}, and check {} pod is created and ' + 'running'.format(POD_YAML, POD_NAME)) + kube_helper.apply_pod(file_path=app_path, pod_name=POD_NAME, + check_both_controllers=True, con_ssh=con_ssh) + + LOG.tc_step("Delete {} pod and check it's removed from both " + "controllers if applicable".format(POD_NAME)) + kube_helper.delete_resources(resource_names=POD_NAME, con_ssh=con_ssh) + + +@fixture() +def cleanup_app(): + if container_helper.get_apps(application=HELM_APP_NAME): + LOG.fixture_step("Remove {} app if applied".format(HELM_APP_NAME)) + container_helper.remove_app(app_name=HELM_APP_NAME) + + LOG.fixture_step("Delete {} app".format(HELM_APP_NAME)) + container_helper.delete_app(app_name=HELM_APP_NAME) + + +@mark.platform_sanity +def test_launch_app_via_sysinv(copy_test_apps, cleanup_app): + """ + Test upload, apply, remove, delete custom app via system cmd + Args: + copy_test_apps (str): module fixture + cleanup_app: fixture + + Setups: + - Copy test files from test server to stx system (module) + - Remove and delete test app if exists + + Test Steps: + - system application-upload test app tar file and wait for it to be + uploaded + - system application-apply test app and wait for it to be applied + - wget : from remote host + - Verify app contains expected content + - system application-remove test app and wait for it to be uninstalled + - system application-delete test app from system + + """ + app_dir = copy_test_apps + app_name = HELM_APP_NAME + + LOG.tc_step("Upload {} helm charts".format(app_name)) + container_helper.upload_app(app_name=app_name, app_version='1.0', + tar_file=os.path.join(app_dir, HELM_TAR)) + + LOG.tc_step("Apply {}".format(app_name)) + container_helper.apply_app(app_name=app_name) + + LOG.tc_step("wget app via :") + json_path = '{.spec.ports[0].nodePort}' + node_port = kube_helper.get_pod_value_jsonpath( + type_name='service/{}'.format(HELM_POD_FULL_NAME), jsonpath=json_path) + assert re.match(r'\d+', node_port), "Unable to get nodePort via " \ + "jsonpath '{}'".format(json_path) + + localhost = LocalHostClient(connect=True) + prefix = 'https' if keystone_helper.is_https_enabled() else 'http' + oam_ip = ProjVar.get_var('LAB')['floating ip'] + output_file = '{}/{}.html'.format(ProjVar.get_var('TEMP_DIR'), + HELM_APP_NAME) + localhost.exec_cmd('wget {}://{}:{} -O {}'.format( + prefix, oam_ip, node_port, output_file), fail_ok=False) + + LOG.tc_step("Verify app contains expected content") + app_content = localhost.exec_cmd('cat {}; echo'.format(output_file), + get_exit_code=False)[1] + assert app_content.startswith(HELM_MSG), \ + "App does not start with expected message." + + LOG.tc_step("Remove applied app") + container_helper.remove_app(app_name=app_name) + + LOG.tc_step("Delete uninstalled app") + container_helper.delete_app(app_name=app_name) + + LOG.tc_step("Wait for pod terminate") + kube_helper.wait_for_resources_gone(resource_names=HELM_POD_FULL_NAME, + check_interval=10, namespace='default') + + +def remove_cache_and_pull(con_ssh, name, test_image, fail_ok=False): + container_helper.remove_docker_images(images=(test_image, name), + con_ssh=con_ssh, fail_ok=fail_ok) + container_helper.pull_docker_image(name=name, con_ssh=con_ssh) + + +@mark.platform_sanity +@mark.parametrize('controller', [ + 'active', + 'standby' +]) +def test_push_docker_image_to_local_registry(controller): + """ + Test push a docker image to local docker registry + Args: + controller: + + Setup: + - Copy test files from test server to stx system (module) + + Test Steps: + On specified controller (active or standby): + - Pull test image busybox and get its ID + - Remove busybox repo from local registry if exists + - Tag image with local registry + - Push test image to local registry + - Remove cached test images + - Pull test image from local registry + On the other controller if exists, verify local registry is synced: + - Remove cached test images + - Pull test image from local registry + + """ + test_image = 'busybox' + reg_addr = Container.LOCAL_DOCKER_REG + host = controller_precheck(controller) + controllers = system_helper.get_controllers( + availability=(HostAvailState.AVAILABLE, HostAvailState.DEGRADED, + HostAvailState.ONLINE)) + controllers.remove(host) + + with host_helper.ssh_to_host(hostname=host) as con_ssh: + + LOG.tc_step("Pull {} image from external on {} controller " + "{}".format(test_image, controller, host)) + image_id = container_helper.pull_docker_image(name=test_image, + con_ssh=con_ssh)[1] + + LOG.tc_step("Remove {} from local registry if" + " exists".format(test_image)) + con_ssh.exec_sudo_cmd('rm -rf {}/{}'.format(StxPath.DOCKER_REPO, + test_image)) + + LOG.tc_step("Tag image with local registry") + target_name = '{}/{}'.format(reg_addr, test_image) + container_helper.tag_docker_image(source_image=image_id, + target_name=target_name, + con_ssh=con_ssh) + + LOG.tc_step("Login to local docker registry and push test image from " + "{} controller {}".format(controller, host)) + container_helper.login_to_docker(registry=reg_addr, con_ssh=con_ssh) + container_helper.push_docker_image(target_name, con_ssh=con_ssh) + + LOG.tc_step("Remove cached test images and pull from local " + "registry on {}".format(host)) + remove_cache_and_pull(con_ssh=con_ssh, name=target_name, + test_image=test_image) + container_helper.remove_docker_images(images=(target_name, ), + con_ssh=con_ssh) + + if controllers: + other_host = controllers[0] + with host_helper.ssh_to_host(other_host, con_ssh=con_ssh) as \ + other_ssh: + LOG.tc_step("Remove cached test images on the other " + "controller {} if exists and pull from local " + "registry".format(other_host)) + container_helper.login_to_docker(registry=reg_addr, + con_ssh=other_ssh) + remove_cache_and_pull(con_ssh=other_ssh, name=target_name, + fail_ok=True, test_image=test_image) + container_helper.remove_docker_images(images=(target_name,), + con_ssh=other_ssh) + + LOG.tc_step("Cleanup {} from local docker registry after " + "test".format(test_image)) + con_ssh.exec_sudo_cmd('rm -rf {}/{}'.format(StxPath.DOCKER_REPO, + test_image)) + + +# Taking out following test case until a shared file server is available for +# community and test charts are available to public +@mark.platform_sanity +def test_upload_charts_via_helm_upload(copy_test_apps): + """ + Test upload helm charts via helm-upload cmd directly. i.e., without + using sysinv cmd. + Args: + copy_test_apps: + + Setups: + - Copy test files from test server to stx system (module) + + Test Steps: + - Upload helm charts from given controller via 'helm-upload ' + - Verify the charts appear at /www/pages/helm_charts/ on both + controllers (if applicable) + + """ + app_dir = copy_test_apps + + LOG.tc_step("Upload helm charts via helm-upload cmd from active controller " + "and check charts are in /www/pages/") + file_path = container_helper.upload_helm_charts( + tar_file=os.path.join(app_dir, HELM_TAR), delete_first=True)[1] + + if system_helper.get_standby_controller_name(): + LOG.tc_step("Swact active controller and verify uploaded charts " + "are synced over") + host_helper.swact_host() + con_ssh = ControllerClient.get_active_controller() + charts_exist = con_ssh.file_exists(file_path) + assert charts_exist, "{} does not exist after swact to {}".format( + file_path, con_ssh.get_hostname()) + LOG.info("{} successfully synced after swact".format(file_path)) + + +@fixture() +def deploy_delete_kubectl_app(request): + app_name = 'resource-consumer' + app_params = \ + '--image=gcr.io/kubernetes-e2e-test-images/resource-consumer:1.4' \ + + ' --expose' \ + + ' --service-overrides=' \ + + "'{ " + '"spec": { "type": "LoadBalancer" } }' \ + + "' --port 8080 --requests='cpu=1000m,memory=1024Mi'" + + LOG.fixture_step("Create {} test app by kubectl run".format(app_name)) + sub_cmd = "run {}".format(app_name) + kube_helper.exec_kube_cmd(sub_cmd=sub_cmd, args=app_params, fail_ok=False) + + LOG.fixture_step("Check {} test app is created ".format(app_name)) + pod_name = kube_helper.get_pods(field='NAME', namespace='default', + name=app_name, strict=False)[0] + + def delete_app(): + LOG.fixture_step("Delete {} pod if exists after test " + "run".format(app_name)) + kube_helper.delete_resources(resource_names=app_name, + resource_types=('deployment', 'service'), + namespace='default', post_check=False) + kube_helper.wait_for_resources_gone(resource_names=pod_name, + namespace='default') + request.addfinalizer(delete_app) + + kube_helper.wait_for_pods_status(pod_names=pod_name, namespace='default', + fail_ok=False) + return app_name, pod_name + + +@mark.platform_sanity +def test_host_operations_with_custom_kubectl_app(deploy_delete_kubectl_app): + """ + Test create, delete custom app via kubectl run cmd + Args: + deploy_delete_kubectl_app: fixture + + Setups: + - Create kubectl app via kubectl run + + Test Steps: + - If duplex: swact and verify pod still Running + - Lock/unlock controller and verify pod still Running + + Teardown: + - Delete kubectl deployment and service + - Verify pod is removed + + """ + app_name, pod_name = deploy_delete_kubectl_app + active, standby = system_helper.get_active_standby_controllers() + + if standby: + LOG.tc_step("Swact active controller and verify {} test app is " + "running ".format(pod_name)) + host_helper.swact_host() + kube_helper.wait_for_pods_status(pod_names=pod_name, + namespace='default', fail_ok=False) + + LOG.tc_step("Lock/unlock {} and verify {} test app is " + "running.".format(active, pod_name)) + HostsToRecover.add(active) + host_helper.lock_host(active, swact=False) + + # wait for services to stabilize before unlocking + time.sleep(20) + + host_helper.unlock_host(active) + kube_helper.wait_for_pods_status(pod_names=pod_name, namespace=None, + fail_ok=False) diff --git a/automated-pytest-suite/testcases/functional/z_containers/test_kube_edgex_services.py b/automated-pytest-suite/testcases/functional/z_containers/test_kube_edgex_services.py new file mode 100644 index 0000000..80137eb --- /dev/null +++ b/automated-pytest-suite/testcases/functional/z_containers/test_kube_edgex_services.py @@ -0,0 +1,117 @@ +# +# Copyright (c) 2019 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + +from pytest import fixture, mark, skip + +from keywords import kube_helper, system_helper, host_helper +from consts.stx import PodStatus, HostAvailState +from utils.tis_log import LOG +from utils.clients.ssh import ControllerClient + +EDGEX_URL = \ + 'https://github.com/rohitsardesai83/edgex-on-kubernetes/archive/master.zip' +EDGEX_ARCHIVE = '~/master.zip' +EDGEX_DIR = '~/edgex-on-kubernetes-master' +EDGEX_START = '{}/hack/edgex-up.sh'.format(EDGEX_DIR) +EDGEX_STOP = '{}/hack/edgex-down.sh'.format(EDGEX_DIR) + + +@fixture(scope='module') +def deploy_edgex(request): + con_ssh = ControllerClient.get_active_controller() + + LOG.fixture_step("Downloading EdgeX-on-Kubernetes") + con_ssh.exec_cmd('wget {}'.format(EDGEX_URL), fail_ok=False) + charts_exist = con_ssh.file_exists(EDGEX_ARCHIVE) + assert charts_exist, '{} does not exist'.format(EDGEX_ARCHIVE) + + LOG.fixture_step("Extracting EdgeX-on-Kubernetes") + con_ssh.exec_cmd('unzip {}'.format(EDGEX_ARCHIVE), fail_ok=False) + + LOG.fixture_step("Deploying EdgeX-on-Kubernetes") + con_ssh.exec_cmd(EDGEX_START, 300, fail_ok=False) + + def delete_edgex(): + LOG.fixture_step("Destroying EdgeX-on-Kubernetes") + con_ssh.exec_cmd(EDGEX_STOP, 300, fail_ok=False) + + LOG.fixture_step("Removing EdgeX-on-Kubernetes") + con_ssh.exec_cmd('rm -rf {} {}'.format(EDGEX_ARCHIVE, EDGEX_DIR)) + request.addfinalizer(delete_edgex) + + return + + +def check_host(controller): + host = system_helper.get_active_controller_name() + if controller == 'standby': + controllers = system_helper.get_controllers( + availability=(HostAvailState.AVAILABLE, HostAvailState.DEGRADED, + HostAvailState.ONLINE)) + controllers.remove(host) + if not controllers: + skip('Standby controller does not exist or not in good state') + host = controllers[0] + return host + + +@mark.platform +@mark.parametrize('controller', [ + 'active', + 'standby' +]) +def test_kube_edgex_services(deploy_edgex, controller): + """ + Test edgex pods are deployed and running + Args: + deploy_edgex (str): module fixture + controller: test param + Test Steps: + - ssh to given controller + - Wait for EdgeX pods deployment + - Check all EdgeX pods are running + - Check EdgeX services displayed: + 'edgex-core-command', 'edgex-core-consul', + 'edgex-core-data', 'edgex-core-metadata' + - Check EdgeX deployments displayed: + 'edgex-core-command', 'edgex-core-consul', + 'edgex-core-data', 'edgex-core-metadata' + + """ + pods = ('edgex-core-command', 'edgex-core-consul', + 'edgex-core-data', 'edgex-core-metadata') + services = ('edgex-core-command', 'edgex-core-consul', + 'edgex-core-data', 'edgex-core-metadata') + deployments = ('edgex-core-command', 'edgex-core-consul', + 'edgex-core-data', 'edgex-core-metadata') + + host = check_host(controller=controller) + with host_helper.ssh_to_host(hostname=host) as con_ssh: + LOG.tc_step("Check EdgeX pods on {}: {}".format(controller, pods)) + edgex_services = kube_helper.get_resources(resource_type='service', + namespace='default', + con_ssh=con_ssh) + edgex_deployments = kube_helper.get_resources( + resource_type='deployment.apps', namespace='default', + con_ssh=con_ssh) + + LOG.tc_step("Wait for EdgeX pods Running") + kube_helper.wait_for_pods_status(partial_names=pods, + namespace='default', + status=PodStatus.RUNNING, + con_ssh=con_ssh, fail_ok=False) + + LOG.tc_step("Check EdgeX services on {}: {}".format(controller, + services)) + for service in services: + assert service in edgex_services, "{} not in kube-system " \ + "service table".format(service) + + LOG.tc_step("Check EdgeX deployments on {}: {}".format(controller, + deployments)) + for deployment in deployments: + assert deployment in edgex_deployments, \ + "{} not in kube-system deployment.apps table".format(deployment) diff --git a/automated-pytest-suite/testcases/functional/z_containers/test_kube_system_services.py b/automated-pytest-suite/testcases/functional/z_containers/test_kube_system_services.py new file mode 100644 index 0000000..6a5b997 --- /dev/null +++ b/automated-pytest-suite/testcases/functional/z_containers/test_kube_system_services.py @@ -0,0 +1,93 @@ +# +# Copyright (c) 2019 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + + +import re + +from pytest import mark, skip + +from keywords import kube_helper, system_helper, host_helper +from consts.stx import PodStatus, HostAvailState +from utils.tis_log import LOG + + +def check_host(controller): + host = system_helper.get_active_controller_name() + if controller == 'standby': + controllers = system_helper.get_controllers( + availability=(HostAvailState.AVAILABLE, HostAvailState.DEGRADED, + HostAvailState.ONLINE)) + controllers.remove(host) + if not controllers: + skip('Standby controller does not exist or not in good state') + host = controllers[0] + return host + + +@mark.platform_sanity +@mark.parametrize('controller', [ + 'active', + 'standby' +]) +def test_kube_system_services(controller): + """ + Test kube-system pods are deployed and running + + Test Steps: + - ssh to given controller + - Check all kube-system pods are running + - Check kube-system services displayed: 'calico-typha', + 'kube-dns', 'tiller-deploy' + - Check kube-system deployments displayed: 'calico-typha', + 'coredns', 'tiller-deploy' + + """ + host = check_host(controller=controller) + + with host_helper.ssh_to_host(hostname=host) as con_ssh: + + kube_sys_pods_values = kube_helper.get_resources( + field=('NAME', 'STATUS'), resource_type='pod', + namespace='kube-system', con_ssh=con_ssh) + kube_sys_services = kube_helper.get_resources( + resource_type='service', namespace='kube-system', con_ssh=con_ssh) + kube_sys_deployments = kube_helper.get_resources( + resource_type='deployment.apps', namespace='kube-system', + con_ssh=con_ssh) + + LOG.tc_step("Check kube-system pods status on {}".format(controller)) + # allow max 1 coredns pending on aio-sx + coredns_pending = False if system_helper.is_aio_simplex() else True + for pod_info in kube_sys_pods_values: + pod_name, pod_status = pod_info + if not coredns_pending and 'coredns-' in pod_name and \ + pod_status == PodStatus.PENDING: + coredns_pending = True + continue + + valid_status = PodStatus.RUNNING + if re.search('audit-|init-', pod_name): + valid_status = PodStatus.COMPLETED + + if pod_status not in valid_status: + kube_helper.wait_for_pods_status(pod_names=pod_name, + status=valid_status, + namespace='kube-system', + con_ssh=con_ssh, timeout=300) + + services = ('kube-dns', 'tiller-deploy') + LOG.tc_step("Check kube-system services on {}: {}".format(controller, + services)) + for service in services: + assert service in kube_sys_services, \ + "{} not in kube-system service table".format(service) + + deployments = ('calico-kube-controllers', 'coredns', 'tiller-deploy') + LOG.tc_step("Check kube-system deployments on {}: " + "{}".format(controller, deployments)) + for deployment in deployments: + assert deployment in kube_sys_deployments, \ + "{} not in kube-system deployment.apps table".format(deployment) diff --git a/automated-pytest-suite/testcases/functional/z_containers/test_openstack_services.py b/automated-pytest-suite/testcases/functional/z_containers/test_openstack_services.py new file mode 100644 index 0000000..74b0532 --- /dev/null +++ b/automated-pytest-suite/testcases/functional/z_containers/test_openstack_services.py @@ -0,0 +1,292 @@ +# +# Copyright (c) 2019 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + + +import time + +from pytest import skip, mark, fixture + +from keywords import container_helper, system_helper, host_helper, kube_helper +from consts.stx import HostAvailState, PodStatus, AppStatus +from utils.tis_log import LOG + + +def get_valid_controllers(): + controllers = system_helper.get_controllers( + availability=(HostAvailState.AVAILABLE, HostAvailState.DEGRADED, + HostAvailState.ONLINE)) + return controllers + + +def check_openstack_pods_healthy(host, timeout): + with host_helper.ssh_to_host(hostname=host) as con_ssh: + kube_helper.wait_for_pods_healthy(namespace='stx-openstack', + con_ssh=con_ssh, timeout=timeout) + + +@mark.sanity +@mark.sx_sanity +@mark.cpe_sanity +def test_openstack_services_healthy(): + """ + Pre-requisite: + - stx-openstack application exists + + Test steps: + - Check stx-openstack application in applied state via system + application-list + - Check all openstack pods in running or completed state via kubectl get + + """ + LOG.tc_step("Check stx-openstack application is applied") + status = container_helper.get_apps(application='stx-openstack')[0] + if not status: + skip('Openstack application is not uploaded.') + assert status == AppStatus.APPLIED, "stx-openstack is in {} status " \ + "instead of applied".format(status) + + LOG.tc_step("Check openstack pods are in running or completed status via " + "kubectl get on all controllers") + controllers = get_valid_controllers() + for host in controllers: + check_openstack_pods_healthy(host=host, timeout=60) + + +@mark.trylast +@mark.sanity +@mark.sx_sanity +@mark.cpe_sanity +@mark.parametrize('controller', [ + 'controller-0', + 'controller-1' +]) +def test_reapply_stx_openstack_no_change(stx_openstack_required, controller): + """ + Args: + stx_openstack_required: + + Pre-requisite: + - stx-openstack application in applied state + + Test Steps: + - Re-apply stx-openstack application + - Check openstack pods healthy + + """ + if system_helper.is_aio_simplex() and controller != 'controller-0': + skip('Simplex system only has controller-0') + + active, standby = system_helper.get_active_standby_controllers() + if active != controller: + if not standby: + skip('{} is not ready to take over'.format(controller)) + + LOG.tc_step("Swact active controller to test reapply from " + "{}".format(controller)) + host_helper.swact_host() + time.sleep(60) + + LOG.info("helm list before reapply after swact") + from utils.clients.ssh import ControllerClient + con_ssh = ControllerClient.get_active_controller() + end_time = time.time() + 180 + while time.time() < end_time: + code = con_ssh.exec_cmd('helm list', expect_timeout=60)[0] + if code == 0: + break + time.sleep(30) + + LOG.tc_step("Re-apply stx-openstack application") + container_helper.apply_app(app_name='stx-openstack') + + LOG.tc_step("Check openstack pods in good state on all controllers " + "after stx-openstack re-applied") + for host in get_valid_controllers(): + check_openstack_pods_healthy(host=host, timeout=120) + + +NEW_NOVA_COMPUTE_PODS = None + + +@fixture() +def reset_if_modified(request): + if not container_helper.is_stx_openstack_deployed(applied_only=True): + skip('stx-openstack application is not in Applied status. Skip test.') + + valid_hosts = get_valid_controllers() + conf_path = '/etc/nova/nova.conf' + + def reset(): + app_name = 'stx-openstack' + post_status = container_helper.get_apps(application=app_name, + field='status')[0] + if not post_status.endswith('ed'): + LOG.fixture_step("Wait for application apply finish") + container_helper.wait_for_apps_status(apps=app_name, + status=AppStatus.APPLIED, + timeout=1800, + check_interval=15, + fail_ok=False) + + user_overrides = container_helper.get_helm_override_values( + chart='nova', namespace='openstack', fields='user_overrides')[0] + if not user_overrides or user_overrides == 'None': + LOG.info("No change in nova user_overrides. Do nothing.") + return + + LOG.fixture_step("Update nova helm-override to reset values") + container_helper.update_helm_override(chart='nova', + namespace='openstack', + reset_vals=True) + user_overrides = container_helper.get_helm_override_values( + chart='nova', namespace='openstack', fields='user_overrides')[0] + assert not user_overrides, "nova helm user_overrides still exist " \ + "after reset-values" + + LOG.fixture_step("Re-apply stx-openstack application and ensure " + "it is applied") + container_helper.apply_app(app_name='stx-openstack', check_first=False, + applied_timeout=1800) + + check_cmd = 'grep foo {}'.format(conf_path) + LOG.fixture_step("Ensure user_override is removed from {} in " + "nova-compute containers".format(conf_path)) + for host in valid_hosts: + with host_helper.ssh_to_host(host) as host_ssh: + LOG.info( + "Wait for nova-cell-setup completed on {}".format(host)) + kube_helper.wait_for_openstack_pods_status( + application='nova', component='cell-setup', + con_ssh=host_ssh, status=PodStatus.COMPLETED) + + LOG.info("Check new release generated for nova compute " + "pods on {}".format(host)) + nova_compute_pods = kube_helper.get_openstack_pods( + field='NAME', application='nova', component='compute', + con_ssh=host_ssh)[0] + nova_compute_pods = sorted(nova_compute_pods) + if NEW_NOVA_COMPUTE_PODS: + assert NEW_NOVA_COMPUTE_PODS != nova_compute_pods, \ + "No new release generated after reset values" + + LOG.info("Check custom conf is removed from {} in nova " + "compute container on {}".format(conf_path, host)) + for nova_compute_pod in nova_compute_pods: + code, output = kube_helper.exec_cmd_in_container( + cmd=check_cmd, pod=nova_compute_pod, fail_ok=True, + con_ssh=host_ssh, namespace='openstack', + container_name='nova-compute') + assert code == 1, \ + "{} on {} still contains user override info after " \ + "reset nova helm-override values and reapply " \ + "stx-openstack app: {}".format(conf_path, host, output) + + request.addfinalizer(reset) + + return valid_hosts, conf_path + + +@mark.trylast +@mark.sanity +@mark.sx_sanity +@mark.cpe_sanity +def test_stx_openstack_helm_override_update_and_reset(reset_if_modified): + """ + Test helm override for openstack nova chart and reset + Args: + reset_if_modified: + + Pre-requisite: + - stx-openstack application in applied state + + Test Steps: + - Update nova helm-override default conf + - Check nova helm-override is updated in system helm-override-show + - Re-apply stx-openstack application and ensure it is applied (in + applied status and alarm cleared) + - On all controller(s): + - Check nova compute pods names are changed in kubectl get + - Check actual nova-compute.conf is updated in all nova-compute + containers + + Teardown: + - Update nova helm-override to reset values + - Re-apply stx-openstack application and ensure it is applied + + """ + valid_hosts, conf_path = reset_if_modified + new_conf = 'conf.nova.DEFAULT.foo=bar' + + LOG.tc_step("Update nova helm-override: {}".format(new_conf)) + container_helper.update_helm_override( + chart='nova', namespace='openstack', + kv_pairs={'conf.nova.DEFAULT.foo': 'bar'}) + + LOG.tc_step("Check nova helm-override is updated in system " + "helm-override-show") + fields = ('combined_overrides', 'system_overrides', 'user_overrides') + combined_overrides, system_overrides, user_overrides = \ + container_helper.get_helm_override_values(chart='nova', + namespace='openstack', + fields=fields) + + assert 'bar' == \ + user_overrides['conf']['nova'].get('DEFAULT', {}).get('foo'), \ + "{} is not shown in user overrides".format(new_conf) + assert 'bar' == \ + combined_overrides['conf']['nova'].get('DEFAULT', {}).get('foo'), \ + "{} is not shown in combined overrides".format(new_conf) + assert not system_overrides['conf']['nova'].get('DEFAULT', {}).get('foo'), \ + "User override {} listed in system overrides " \ + "unexpectedly".format(new_conf) + + prev_nova_cell_setup_pods = kube_helper.get_openstack_pods( + application='nova', component='cell-setup', fail_ok=False) + prev_count = len(prev_nova_cell_setup_pods) + prev_nova_compute_pods = sorted(kube_helper.get_openstack_pods( + application='nova', component='compute')) + + LOG.tc_step("Re-apply stx-openstack application and ensure it is applied") + container_helper.apply_app(app_name='stx-openstack', check_first=False, + applied_timeout=1800, fail_ok=False, + check_interval=10) + + post_names = None + for host in valid_hosts: + with host_helper.ssh_to_host(hostname=host) as host_ssh: + LOG.tc_step("Wait for all nova-cell-setup pods reach completed " + "status on {}".format(host)) + kube_helper.wait_for_openstack_pods_status( + application='nova', component='cell-setup', + status=PodStatus.COMPLETED, con_ssh=host_ssh) + + LOG.tc_step("Check nova compute pods names are changed in kubectl " + "get on {}".format(host)) + post_nova_cell_setup_pods = kube_helper.get_openstack_pods( + application='nova', component='cell-setup', con_ssh=host_ssh) + post_nova_compute_pods = sorted(kube_helper.get_openstack_pods( + application='nova', component='compute', con_ssh=host_ssh)) + + assert prev_count + 1 == len(post_nova_cell_setup_pods), \ + "No new nova cell setup pod created" + if post_names: + assert post_nova_compute_pods == post_names, \ + "nova compute pods names differ on two controllers" + else: + post_names = post_nova_compute_pods + assert prev_nova_compute_pods != post_names, \ + "No new release generated for nova compute pods" + + LOG.tc_step("Check actual {} is updated in nova-compute " + "containers on {}".format(conf_path, host)) + check_cmd = 'grep foo {}'.format(conf_path) + for nova_compute_pod in post_nova_compute_pods: + kube_helper.exec_cmd_in_container(cmd=check_cmd, + pod=nova_compute_pod, + fail_ok=False, + con_ssh=host_ssh, + namespace='openstack', + container_name='nova-compute') diff --git a/automated-pytest-suite/testcases/rest/rest_test_helper.py b/automated-pytest-suite/testcases/rest/rest_test_helper.py new file mode 100644 index 0000000..7f62582 --- /dev/null +++ b/automated-pytest-suite/testcases/rest/rest_test_helper.py @@ -0,0 +1,41 @@ +# +# Copyright (c) 2019 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + + +from pytest import skip + +from utils.tis_log import LOG + + +def get(rest_client, resource, auth=True): + """ + Test GET of with valid authentication. + + Args: + n/a + + Prerequisites: system is running + Test Setups: + n/a + Test Steps: + - Using requests GET with proper authentication + - Determine if expected status_code of 200 is received + Test Teardown: + n/a + """ + message = "Using requests GET {} with proper authentication" + LOG.info(message.format(resource)) + + status_code, text = rest_client.get(resource=resource, auth=auth) + message = "Retrieved: status_code: {} message: {}" + LOG.debug(message.format(status_code, text)) + + if status_code == 404: + skip("Unsupported resource in this configuration.") + else: + LOG.info("Determine if expected status_code of 200 is received") + message = "Expected status_code of 200 - received {} and message {}" + assert status_code == 200, message.format(status_code, text) diff --git a/automated-pytest-suite/testcases/rest/test_GET_adversarial.py b/automated-pytest-suite/testcases/rest/test_GET_adversarial.py new file mode 100755 index 0000000..e43c7c8 --- /dev/null +++ b/automated-pytest-suite/testcases/rest/test_GET_adversarial.py @@ -0,0 +1,98 @@ +# +# Copyright (c) 2019 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + + +import pytest + +from utils.tis_log import LOG +from utils.rest import Rest +from keywords import system_helper +import string + + +@pytest.fixture(scope='module') +def sysinv_rest(): + r = Rest('sysinv', platform=True) + return r + + +def test_GET_ihosts_host_id_shortUUID(sysinv_rest): + """ + Test GET of with valid authentication and upper + case UUID values. + RFC 4122 covers the need for uppercase UUID values + + Args: + n/a + + Prerequisites: system is running + Test Setups: + n/a + Test Steps: + - Using requests GET with proper authentication + - Determine if expected status_code of 200 is received + Test Teardown: + n/a + """ + path = "/ihosts/{}/addresses" + r = sysinv_rest + LOG.info(path) + LOG.info(system_helper.get_hosts()) + for host in system_helper.get_hosts(): + uuid = system_helper.get_host_values(host, 'uuid')[0] + LOG.info("host: {} uuid: {}".format(host, uuid)) + message = "Using requests GET {} with proper authentication" + LOG.tc_step(message.format(path)) + + short_uuid = uuid[:-1] + status_code, text = r.get(resource=path.format(short_uuid), + auth=True) + message = "Retrieved: status_code: {} message: {}" + LOG.info(message.format(status_code, text)) + LOG.tc_step("Determine if expected code of 400 is received") + message = "Expected code of 400 - received {} and message {}" + assert status_code == 400, message.format(status_code, text) + + +def test_GET_ihosts_host_id_invalidUUID(sysinv_rest): + """ + Test GET of with valid authentication and upper + case UUID values. + RFC 4122 covers the need for uppercase UUID values + + Args: + n/a + + Prerequisites: system is running + Test Setups: + n/a + Test Steps: + - Using requests GET with proper authentication + - Determine if expected status_code of 200 is received + Test Teardown: + n/a + """ + path = "/ihosts/{}/addresses" + r = sysinv_rest + LOG.info(path) + LOG.info(system_helper.get_hosts()) + for host in system_helper.get_hosts(): + uuid = system_helper.get_host_values(host, 'uuid')[0] + LOG.info("host: {} uuid: {}".format(host, uuid)) + message = "Using requests GET {} with proper authentication" + LOG.tc_step(message.format(path)) + + # shift a->g, b->h, etc - all to generate invalid uuid + shifted_uuid = \ + ''.join(map(lambda x: chr((ord(x) - ord('a') + 6) % 26 + ord( + 'a')) if x in string.ascii_lowercase else x, uuid.lower())) + status_code, text = r.get(resource=path.format(shifted_uuid), + auth=True) + message = "Retrieved: status_code: {} message: {}" + LOG.info(message.format(status_code, text)) + LOG.tc_step("Determine if expected code of 400 is received") + message = "Expected code of 400 - received {} and message {}" + assert status_code == 400, message.format(status_code, text) diff --git a/automated-pytest-suite/testcases/rest/test_GET_good_authentication.py b/automated-pytest-suite/testcases/rest/test_GET_good_authentication.py new file mode 100755 index 0000000..44138d6 --- /dev/null +++ b/automated-pytest-suite/testcases/rest/test_GET_good_authentication.py @@ -0,0 +1,67 @@ +# +# Copyright (c) 2019 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + + +import pytest +from utils.tis_log import LOG +from utils.rest import Rest + +from testcases.rest import rest_test_helper + + +@pytest.fixture(scope='module') +def sysinv_rest(): + r = Rest('sysinv', platform=True) + return r + + +@pytest.mark.parametrize( + 'operation,resource', [ + ('GET', '/addrpools'), + ('GET', '/ceph_mon'), + ('GET', '/clusters'), + ('GET', '/controller_fs'), + ('GET', '/drbdconfig'), + ('GET', '/event_log'), + ('GET', '/event_suppression'), + ('GET', '/health'), + ('GET', '/health/upgrade'), + ('GET', '/ialarms'), + ('GET', '/icommunity'), + ('GET', '/idns'), + ('GET', '/iextoam'), + ('GET', '/ihosts'), + ('GET', '/ihosts/bulk_export'), + ('GET', '/iinfra'), + ('GET', '/intp'), + ('GET', '/ipm'), + ('GET', '/iprofiles'), + ('GET', '/istorconfig'), + ('GET', '/isystems'), + ('GET', '/itrapdest'), + ('GET', '/lldp_agents'), + ('GET', '/lldp_neighbors'), + ('GET', '/loads'), + ('GET', '/networks'), + ('GET', '/remotelogging'), + ('GET', '/sdn_controller'), + ('GET', '/servicegroup'), + ('GET', '/servicenodes'), + ('GET', '/service_parameter'), + ('GET', '/services'), + ('GET', '/storage_backend'), + ('GET', '/storage_backend/usage'), + ('GET', '/storage_ceph'), + ('GET', '/storage_lvm'), + # ('GET', '/tpmconfig'), + ('GET', '/upgrade'), + ('GET', '/') + ] +) +def test_good_authentication(sysinv_rest, operation, resource): + if operation == "GET": + LOG.info("getting... {}".format(resource)) + rest_test_helper.get(sysinv_rest, resource=resource) diff --git a/automated-pytest-suite/testcases/rest/test_GET_ihosts_host_id_valid.py b/automated-pytest-suite/testcases/rest/test_GET_ihosts_host_id_valid.py new file mode 100755 index 0000000..b8a1871 --- /dev/null +++ b/automated-pytest-suite/testcases/rest/test_GET_ihosts_host_id_valid.py @@ -0,0 +1,72 @@ +# +# Copyright (c) 2019 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + + +import re +import pytest + +from utils.tis_log import LOG +from utils.rest import Rest +from keywords import system_helper + + +@pytest.fixture(scope='module') +def sysinv_rest(): + r = Rest('sysinv', platform=True) + return r + + +@pytest.mark.parametrize( + 'path', [ + '/ihosts/-/addresses', + '/ihosts/-/idisks', + '/ihosts/-/ilvgs', + '/ihosts/-/imemories', + '/ihosts/-/ipvs', + '/ihosts/-/isensors', + '/ihosts/-/isensorgroups', + '/ihosts/-/istors', + '/ihosts/-/pci_devices', + '/ihosts/-/routes', + '/ihosts/-', + ] +) +def test_GET_various_host_id_valid(sysinv_rest, path): + """ + Test GET of with valid authentication. + + Args: + sysinv_rest + path + + Prerequisites: system is running + Test Setups: + n/a + Test Steps: + - Using requests GET with proper authentication + - Determine if expected status_code of 200 is received + Test Teardown: + n/a + """ + r = sysinv_rest + path = re.sub("-", "{}", path) + LOG.info(path) + LOG.info(system_helper.get_hosts()) + for host in system_helper.get_hosts(): + uuid = system_helper.get_host_values(host, 'uuid')[0] + res = path.format(uuid) + message = "Using requests GET {} with proper authentication" + LOG.tc_step(message.format(res)) + status_code, text = r.get(resource=res, auth=True) + message = "Retrieved: status_code: {} message: {}" + LOG.info(message.format(status_code, text)) + if status_code == 404: + pytest.skip("Unsupported resource in this configuration.") + else: + message = "Determine if expected code of 200 is received" + LOG.tc_step(message) + message = "Expected code of 200 - received {} and message {}" + assert status_code == 200, message.format(status_code, text) diff --git a/automated-pytest-suite/testcases/rest/test_GET_ports_valid.py b/automated-pytest-suite/testcases/rest/test_GET_ports_valid.py new file mode 100755 index 0000000..3b1e263 --- /dev/null +++ b/automated-pytest-suite/testcases/rest/test_GET_ports_valid.py @@ -0,0 +1,535 @@ +# +# Copyright (c) 2019 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + + +import pytest + +from utils.tis_log import LOG +from utils.rest import Rest +from keywords import system_helper, host_helper, storage_helper + + +@pytest.fixture(scope='module') +def sysinv_rest(): + r = Rest('sysinv', platform=True) + return r + + +@pytest.mark.parametrize(('authorize_valid', 'resource_valid', + 'expected_status'), [ + (True, True, 200), + (True, False, 400), + (False, True, 401) +]) +def test_GET_networks_valid(sysinv_rest, authorize_valid, resource_valid, + expected_status): + """ + Test GET of with valid authentication. + + Args: + n/a + + Prerequisites: system is running + Test Setups: + n/a + Test Steps: + - Using requests GET with proper authentication + - Determine if expected status_code of 200 is received + Test Teardown: + n/a + """ + r = sysinv_rest + path = "/networks/{}" + if resource_valid: + network_list = system_helper.get_system_networks() + else: + network_list = ['ffffffffffff-ffff-ffff-ffff-ffffffffffff'] + + for network in network_list: + message = "Using requests GET {} with proper authentication" + LOG.tc_step(message.format(path)) + res = path.format(network) + status_code, text = r.get(resource=res, auth=authorize_valid) + message = "Retrieved: status_code: {} message: {}" + LOG.debug(message.format(status_code, text)) + if status_code == 404: + pytest.skip("Unsupported resource in this configuration.") + else: + message = "Determine if expected code of {} is received" + LOG.tc_step(message.format(expected_status)) + message = "Expected code of {} - received {} and message {}" + assert status_code == expected_status, \ + message.format(expected_status, status_code, text) + + +@pytest.mark.parametrize(('authorize_valid', 'resource_valid', + 'expected_status'), [ + (True, True, 200), + (True, False, 400), + (False, True, 401) +]) +def test_GET_clusters_valid(sysinv_rest, authorize_valid, resource_valid, + expected_status): + """ + Test GET of with valid authentication. + + Args: + n/a + + Prerequisites: system is running + Test Setups: + n/a + Test Steps: + - Using requests GET with proper authentication + - Determine if expected status_code of 200 is received + Test Teardown: + n/a + """ + r = sysinv_rest + path = "/clusters/{}" + if resource_valid: + cluster_list = system_helper.get_clusters() + else: + cluster_list = ['ffffffff-ffff-ffff-ffff-ffffffffffff'] + + for cluster in cluster_list: + message = "Using requests GET {} with proper authentication" + LOG.tc_step(message.format(path)) + res = path.format(cluster) + status_code, text = r.get(resource=res, auth=authorize_valid) + message = "Retrieved: status_code: {} message: {}" + LOG.debug(message.format(status_code, text)) + if status_code == 404: + pytest.skip("Unsupported resource in this configuration.") + else: + message = "Determine if expected code of {} is received" + LOG.tc_step(message.format(expected_status)) + message = "Expected code of {} - received {} and message {}" + assert status_code == expected_status, \ + message.format(expected_status, status_code, text) + + +@pytest.mark.parametrize(('authorize_valid', 'resource_valid', + 'expected_status'), [ + (True, True, 200), + (True, False, 400), + (False, True, 401) +]) +def test_GET_ialarms_valid(sysinv_rest, authorize_valid, resource_valid, + expected_status): + """ + Test GET of with valid authentication. + + Args: + n/a + + Prerequisites: system is running + Test Setups: + n/a + Test Steps: + - Using requests GET with proper authentication + - Determine if expected status_code of 200 is received + Test Teardown: + n/a + """ + r = sysinv_rest + path = "/ialarms/{}" + if resource_valid: + alarm_list = system_helper.get_alarms_table() + else: + alarm_list = {'values': [['ffffffff-ffff-ffff-ffff-ffffffffffff']]} + + for alarm in alarm_list['values']: + alarm_uuid = alarm + message = "Using requests GET {} with proper authentication" + LOG.tc_step(message.format(path)) + res = path.format(alarm_uuid) + status_code, text = r.get(resource=res, auth=authorize_valid) + message = "Retrieved: status_code: {} message: {}" + LOG.debug(message.format(status_code, text)) + if status_code == 404: + pytest.skip("Unsupported resource in this configuration.") + else: + message = "Determine if expected code of {} is received" + LOG.tc_step(message.format(expected_status)) + message = \ + "Expected code of expected_status - received {} and message {}" + assert status_code == expected_status, \ + message.format(expected_status, status_code, text) + + +@pytest.mark.parametrize(('authorize_valid', 'resource_valid', + 'expected_status'), [ + (True, True, 200), + (True, False, 400), + (False, True, 401) +]) +def test_GET_devices(sysinv_rest, authorize_valid, resource_valid, + expected_status): + """ + Test GET of with valid authentication. + + Args: + n/a + + Prerequisites: system is running + Test Setups: + n/a + Test Steps: + - Using requests GET with proper authentication + - Determine if expected status_code of 200 is received + Test Teardown: + n/a + """ + r = sysinv_rest + path = "/devices/{}" + + hostnames = system_helper.get_hosts() + for host in hostnames: + res = path.format(host) + message = "Using requests GET {} with proper authentication" + LOG.tc_step(message.format(res)) + status_code, text = r.get(resource=res, auth=authorize_valid) + message = "Retrieved: status_code: {} message: {}" + LOG.debug(message.format(status_code, text)) + if status_code == 404: + pytest.skip("Unsupported resource in this configuration.") + else: + message = "Determine if expected code of {} is received" + LOG.tc_step(message.format(expected_status)) + message = "Expected code of {} - received {} and message {}" + assert status_code == expected_status, message.format( + expected_status, status_code, text) + + +def test_GET_idisks(sysinv_rest): + """ + Test GET of with valid authentication. + + Args: + n/a + + Prerequisites: system is running + Test Setups: + n/a + Test Steps: + - Using requests GET with proper authentication + - Determine if expected status_code of 200 is received + Test Teardown: + n/a + """ + r = sysinv_rest + path = "/idisks/{}" + hostnames = system_helper.get_hosts() + for host in hostnames: + disk_uuids = storage_helper.get_host_disks(host) + for disk_uuid in disk_uuids: + res = path.format(disk_uuid) + message = "Using requests GET {} with proper authentication" + LOG.tc_step(message.format(res)) + status_code, text = r.get(resource=res, auth=True) + message = "Retrieved: status_code: {} message: {}" + LOG.debug(message.format(status_code, text)) + if status_code == 404: + pytest.skip("Unsupported resource in this configuration.") + else: + message = "Determine if expected code of 200 is received" + LOG.tc_step(message) + message = "Expected code of 200 - received {} and message {}" + assert status_code == 200, message.format(status_code, text) + + +@pytest.mark.parametrize(('authorize_valid', 'resource_valid', + 'expected_status'), [ + (True, True, 200), + (True, False, 400), + (False, True, 401) +]) +def test_GET_lldp_agents(sysinv_rest, authorize_valid, resource_valid, + expected_status): + """ + Test GET of with valid authentication. + + Args: + n/a + + Prerequisites: system is running + Test Setups: + n/a + Test Steps: + - Using requests GET with proper authentication + - Determine if expected status_code of 200 is received + Test Teardown: + n/a + """ + r = sysinv_rest + path = "/lldp_agents/{}" + hostnames = system_helper.get_hosts() + for host in hostnames: + LOG.info(host) + if resource_valid: + lldp_table = host_helper.get_host_lldp_agents(host) + else: + lldp_table = ['ffffffff-ffff-ffff-ffff-ffffffffffff'] + LOG.info(lldp_table) + for lldp_uuid in lldp_table: + res = path.format(lldp_uuid) + message = "Using requests GET {} with proper authentication" + LOG.tc_step(message.format(res)) + status_code, text = r.get(resource=res, auth=authorize_valid) + message = "Retrieved: status_code: {} message: {}" + LOG.debug(message.format(status_code, text)) + if status_code == 404: + pytest.skip("Unsupported resource in this configuration.") + else: + message = "Determine if expected code of {} is received" + LOG.tc_step(message.format(expected_status)) + message = "Expected code of {} - received {} and message {}" + assert status_code == expected_status, message.format( + expected_status, status_code, text) + + +@pytest.mark.parametrize(('authorize_valid', 'resource_valid', + 'expected_status'), [ + (True, True, 200), + (True, False, 400), + (False, True, 401) +]) +def test_GET_lldp_neighbors(sysinv_rest, authorize_valid, resource_valid, + expected_status): + """ + Test GET of with valid authentication. + + Args: + n/a + + Prerequisites: system is running + Test Setups: + n/a + Test Steps: + - Using requests GET with proper authentication + - Determine if expected status_code of 200 is received + Test Teardown: + n/a + """ + r = sysinv_rest + path = "/lldp_neighbors/{}" + hostnames = system_helper.get_hosts() + for host in hostnames: + LOG.info(host) + if resource_valid: + lldp_table = host_helper.get_host_lldp_neighbors(host) + else: + lldp_table = ['ffffffff-ffff-ffff-ffff-ffffffffffff'] + LOG.info(lldp_table) + for lldp_uuid in lldp_table: + res = path.format(lldp_uuid) + message = "Using requests GET {} with proper authentication" + LOG.tc_step(message.format(res)) + status_code, text = r.get(resource=res, auth=authorize_valid) + message = "Retrieved: status_code: {} message: {}" + LOG.debug(message.format(status_code, text)) + if status_code == 404: + pytest.skip("Unsupported resource in this configuration.") + else: + message = "Determine if expected code of {} is received" + LOG.tc_step(message.format(expected_status)) + message = "Expected code of {} - received {} and message {}" + assert status_code == expected_status, \ + message.format(expected_status, status_code, text) + + +@pytest.mark.parametrize(('authorize_valid', 'resource_valid', + 'expected_status'), [ + (True, True, 200), + (True, False, 400), + (False, True, 401) +]) +def test_GET_services(sysinv_rest, authorize_valid, resource_valid, + expected_status): + """ + Test GET of with valid authentication. + + Args: + n/a + + Prerequisites: system is running + Test Setups: + n/a + Test Steps: + - Using requests GET with proper authentication + - Determine if expected status_code of 200 is received + Test Teardown: + n/a + """ + r = sysinv_rest + path = "/services/{}" + if resource_valid: + service_list = system_helper.get_services() + else: + service_list = ['ffffffff-ffff-ffff-ffff-ffffffffffff'] + for service in service_list: + LOG.info(service) + res = path.format(service) + message = "Using requests GET {} with proper authentication" + LOG.tc_step(message.format(res)) + status_code, text = r.get(resource=res, auth=authorize_valid) + message = "Retrieved: status_code: {} message: {}" + LOG.debug(message.format(status_code, text)) + if status_code == 404: + pytest.skip("Unsupported resource in this configuration.") + else: + message = "Determine if expected code of {} is received" + LOG.tc_step(message.format(expected_status)) + message = "Expected code of {} - received {} and message {}" + assert status_code == expected_status, \ + message.format(expected_status, status_code, text) + + +@pytest.mark.parametrize(('authorize_valid', 'resource_valid', + 'expected_status'), [ + (True, True, 200), + (True, False, 400), + (False, True, 401) +] + ) +def test_GET_servicenodes(sysinv_rest, authorize_valid, resource_valid, + expected_status): + """ + Test GET of with valid authentication. + + Args: + n/a + + Prerequisites: system is running + Test Setups: + n/a + Test Steps: + - Using requests GET with proper authentication + - Determine if expected status_code of 200 is received + Test Teardown: + n/a + """ + r = sysinv_rest + path = "/servicenodes/{}" + if resource_valid: + service_list = system_helper.get_servicenodes() + else: + service_list = ['ffffffff-ffff-ffff-ffff-ffffffffffff'] + for service in service_list: + LOG.info(service) + res = path.format(service) + message = "Using requests GET {} with proper authentication" + LOG.tc_step(message.format(res)) + status_code, text = r.get(resource=res, auth=authorize_valid) + message = "Retrieved: status_code: {} message: {}" + LOG.debug(message.format(status_code, text)) + if status_code == 404: + pytest.skip("Unsupported resource in this configuration.") + else: + message = "Determine if expected code of {} is received" + LOG.tc_step(message.format(expected_status)) + message = "Expected code of {} - received {} and message {}" + assert status_code == expected_status, \ + message.format(expected_status, status_code, text) + + +@pytest.mark.parametrize(('authorize_valid', 'resource_valid', + 'expected_status'), [ + (True, True, 200), + (True, False, 400), + (False, True, 401) +]) +def test_GET_servicegroup(sysinv_rest, authorize_valid, resource_valid, + expected_status): + """ + Test GET of with valid authentication. + + Args: + authorize_valid - whether to use authentication or not + resource_valid - whether the pathvariable is valid or not + expected_status - what status is expected + + Prerequisites: system is running + Test Setups: + n/a + Test Steps: + - Using requests GET with proper authentication + - Determine if expected status_code of 200 is received + Test Teardown: + n/a + """ + r = sysinv_rest + path = "/servicegroup/{}" + if resource_valid: + service_list = system_helper.get_servicegroups() + else: + service_list = ['ffffffff-ffff-ffff-ffff-ffffffffffff'] + for service in service_list: + LOG.info(service) + res = path.format(service) + message = "Using requests GET {} with proper authentication" + LOG.tc_step(message.format(res)) + status_code, text = r.get(resource=res, auth=authorize_valid) + message = "Retrieved: status_code: {} message: {}" + LOG.debug(message.format(status_code, text)) + if status_code == 404: + pytest.skip("Unsupported resource in this configuration.") + else: + message = "Determine if expected code of {} is received" + LOG.tc_step(message.format(expected_status)) + message = "Expected code of {} - received {} and message {}" + assert status_code == expected_status, \ + message.format(expected_status, status_code, text) + + +@pytest.mark.parametrize(('authorize_valid', 'resource_valid', + 'expected_status'), [ + (True, True, 200), + (True, False, 400), + (False, True, 401) +]) +def test_GET_service_parameter(sysinv_rest, authorize_valid, resource_valid, + expected_status): + """ + Test GET of with valid authentication. + + Args: + authorize_valid - whether to use authentication or not + resource_valid - whether the pathvariable is valid or not + expected_status - what status is expected + + Prerequisites: system is running + Test Setups: + n/a + Test Steps: + - Using requests GET with proper authentication + - Determine if expected status_code of 200 is received + Test Teardown: + n/a + """ + r = sysinv_rest + path = "/service_parameter/{}" + if resource_valid: + service_list = system_helper.get_service_parameter_values(field='uuid') + else: + service_list = ['ffffffff-ffff-ffff-ffff-ffffffffffff'] + for service in service_list: + LOG.info(service) + res = path.format(service) + message = "Using requests GET {} with proper authentication" + LOG.tc_step(message.format(res)) + status_code, text = r.get(resource=res, auth=authorize_valid) + message = "Retrieved: status_code: {} message: {}" + LOG.debug(message.format(status_code, text)) + if status_code == 404: + pytest.skip("Unsupported resource in this configuration.") + else: + message = "Determine if expected code of {} is received" + LOG.tc_step(message.format(expected_status)) + message = "Expected code of {} - received {} and message {}" + assert status_code == expected_status, \ + message.format(expected_status, status_code, text) diff --git a/automated-pytest-suite/testcases/rest/test_bad_authentication.py b/automated-pytest-suite/testcases/rest/test_bad_authentication.py new file mode 100755 index 0000000..541a817 --- /dev/null +++ b/automated-pytest-suite/testcases/rest/test_bad_authentication.py @@ -0,0 +1,349 @@ +# +# Copyright (c) 2019 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + + +import pytest +from utils.tis_log import LOG +from utils.rest import Rest + + +@pytest.fixture(scope='module') +def sysinv_rest(): + r = Rest('sysinv', platform=True) + return r + + +def normalize(resource_name): + import re + if resource_name: + return re.sub(r'__((\w|_)+)__', r'{\1}', resource_name) + return '' + + +def attempt(operation, resource): + print("Testing {} with {}".format(operation, resource)) + if operation == operation: + return True + else: + return False + + +def get(sysinv_rest, resource): + """ + Test GET of with invalid authentication. + + Args: + sysinv_rest + resource + + Prerequisites: system is running + Test Setups: + n/a + Test Steps: + - Using requests GET without proper authentication + - Determine if expected status_code of 401 is received + Test Teardown: + n/a + """ + message = "Using requests GET {} without proper authentication" + LOG.tc_step(message.format(resource)) + + status_code, text = sysinv_rest.get(resource=resource, auth=False) + message = "Retrieved: status_code: {} message: {}" + LOG.debug(message.format(status_code, text)) + + LOG.tc_step("Determine if expected status_code of 401 is received") + message = "Expected status_code of 401 - received {} and message {}" + assert status_code == 401, message.format(status_code, text) + + +def delete(sysinv_rest, resource): + """ + Test DELETE of with invalid authentication. + + Args: + sysinv_rest + resource + + Prerequisites: system is running + Test Setups: + n/a + Test Steps: + - Using requests DELETE without proper authentication + - Determine if expected status_code of 401 is received + Test Teardown: + n/a + """ + r = sysinv_rest + LOG.tc_step("DELETE without proper authentication") + status_code, text = r.delete(resource=resource, auth=False) + LOG.info("Retrieved: status_code: {} message: {}".format(status_code, text)) + LOG.tc_step("Determine if expected status_code of 401 is received") + + message = "Expected status_code of 401 - received {} and message {}" + assert status_code == 401, message.format(status_code, text) + + +def post(sysinv_rest, resource): + """ + Test POST of with invalid authentication. + + Args: + sysinv_rest + resource + + Prerequisites: system is running + Test Setups: + n/a + Test Steps: + - Using requests POST + - Determine if expected status_code of 401 is received + Test Teardown: + n/a + """ + LOG.tc_step("POST {}".format(resource)) + status_code, text = sysinv_rest.post(resource=resource, json_data={}, + auth=False) + message = "Retrieved: status_code: {} message: {}" + LOG.info(message.format(status_code, text)) + LOG.tc_step("Determine if expected_code of 401 is received") + message = "Expected code of 401 - received {} and message {}" + assert status_code == 401, \ + message.format(status_code, text) + + +def patch(sysinv_rest, resource): + """ + Test PATCH of with invalid authentication. + + Args: + sysinv_rest + resource + + Prerequisites: system is running + Test Setups: + n/a + Test Steps: + - Using requests PATCH without proper authentication + - Determine if expected status_code of 401 is received + Test Teardown: + n/a + """ + LOG.tc_step("PATCH {} with bad authentication".format(resource)) + status_code, text = sysinv_rest.patch(resource=resource, json_data={}, + auth=False) + + message = "Retrieved: status_code: {} message: {}" + LOG.info(message.format(status_code, text)) + LOG.tc_step("Determine if expected status_code of 401 is received") + message = "Expected code of 401 - received {} and message {}" + assert status_code == 401, message.format(status_code, text) + + +def put(sysinv_rest, resource): + """ + Test PUT of with invalid authentication. + + Args: + sysinv_rest + resource + + Prerequisites: system is running + Test Setups: + n/a + Test Steps: + - Using requests PUT without proper authentication + - Determine if expected status_code of 401 is received + Test Teardown: + n/a + """ + r = sysinv_rest + LOG.tc_step("PUT {} with bad authentication".format(resource)) + status_code, text = r.put(resource=resource, + json_data={}, auth=False) + message = "Retrieved: status_code: {} message: {}" + LOG.debug(message.format(status_code, text)) + + LOG.tc_step("Determine if expected status_code of 401 is received") + message = "Expected code of 401 - received {} and message {}" + assert status_code == 401, message.format(status_code, text) + + +@pytest.mark.parametrize( + 'operation,resource', [ + ('DELETE', '/addrpools/__pool_id__'), + ('DELETE', '/ialarms/__alarm_uuid__'), + ('DELETE', '/icommunity/__community_id__'), + ('DELETE', '/ihosts/__host_id__/addresses/__address_id__'), + ('DELETE', '/ihosts/__host_id__'), + ('DELETE', '/ihosts/__host_id__/routes/__route_id__'), + ('DELETE', '/iinterfaces/__interface_id__'), + ('DELETE', '/ilvgs/__volumegroup_id__'), + ('DELETE', '/iprofiles/__profile_id__'), + ('DELETE', '/ipvs/__physicalvolume_id__'), + ('DELETE', '/istors/__stor_id__'), + ('DELETE', '/itrapdest/__trapdest_id__'), + ('DELETE', '/loads/__load_id__'), + ('DELETE', '/sdn_controller/__controller_id__'), + ('DELETE', '/service_parameter/__parameter_id__'), + ('DELETE', '/tpmconfig/__tpmconfig_id__'), + ('DELETE', '/upgrade'), + ('GET', '/addrpools'), + ('GET', '/addrpools/__pool_id__'), + ('GET', '/ceph_mon'), + ('GET', '/ceph_mon/__ceph_mon_id__'), + ('GET', '/clusters'), + ('GET', '/clusters/__uuid__'), + ('GET', '/controller_fs'), + ('GET', '/devices/__device_id__'), + ('GET', '/drbdconfig'), + ('GET', '/event_log'), + ('GET', '/event_log/__log_uuid__'), + ('GET', '/event_suppression'), + ('GET', '/health'), + ('GET', '/health/upgrade'), + ('GET', '/ialarms/__alarm_uuid__'), + ('GET', '/ialarms'), + ('GET', '/icommunity'), + ('GET', '/icommunity/__community_id__'), + ('GET', '/icpus/__cpu_id__'), + ('GET', '/idisks/__disk_id__'), + ('GET', '/idns'), + ('GET', '/iextoam'), + ('GET', '/ihosts'), + ('GET', '/ihosts/bulk_export'), + ('GET', '/ihosts/__host_id__/addresses/__address_id__'), + ('GET', '/ihosts/__host_id__/addresses'), + ('GET', '/ihosts/__host_id__'), + ('GET', '/ihosts/__host_id__/idisks'), + ('GET', '/ihosts/__host_id__/ilvgs'), + ('GET', '/ihosts/__host_id__/imemorys'), + ('GET', '/ihosts/__host_id__/ipvs'), + ('GET', '/ihosts/__host_id__/isensorgroups'), + ('GET', '/ihosts/__host_id__/isensors'), + ('GET', '/ihosts/__host_id__/istors'), + ('GET', '/ihosts/__host_id__/pci_devices'), + ('GET', '/ihosts/__host_id__/routes'), + ('GET', '/ihosts/__host_id__/routes/__route_id__'), + ('GET', '/iinfra'), + ('GET', '/iinterfaces/__interface_id__'), + ('GET', '/ilvgs/__volumegroup_id__'), + ('GET', '/imemorys/__memory_id__'), + ('GET', '/intp'), + ('GET', '/ipm'), + ('GET', '/iprofiles'), + ('GET', '/iprofiles/__profile_id__'), + ('GET', '/iprofiles/__profile_id__/icpus'), + ('GET', '/iprofiles/__profile_id__/iinterfaces'), + ('GET', '/iprofiles/__profile_id__/ports'), + ('GET', '/ipvs/__physicalvolume_id__'), + ('GET', '/isensorgroups/__sensorgroup_id__'), + ('GET', '/isensors/__sensor_id__'), + ('GET', '/istorconfig'), + ('GET', '/istors/__stor_id__'), + ('GET', '/isystems'), + ('GET', '/itrapdest'), + ('GET', '/itrapdest/__trapdest_id__'), + ('GET', '/lldp_agents'), + ('GET', '/lldp_agents/__lldp_agent_id__'), + ('GET', '/lldp_neighbors'), + ('GET', '/lldp_neighbors/__lldp_neighbor_id__'), + ('GET', '/loads'), + ('GET', '/loads/__load_id__'), + ('GET', '/networks'), + ('GET', '/networks/__network_id__'), + ('GET', '/ports/__port_id__'), + ('GET', '/remotelogging'), + ('GET', '/sdn_controller'), + ('GET', '/sdn_controller/__controller_id__'), + ('GET', '/servicegroup'), + ('GET', '/servicegroup/__servicegroup_id__'), + ('GET', '/servicenodes'), + ('GET', '/servicenodes/__node_id__'), + ('GET', '/service_parameter'), + ('GET', '/service_parameter/__parameter_id__'), + ('GET', '/services'), + ('GET', '/services/__service_id__'), + ('GET', '/storage_backend'), + ('GET', '/storage_backend/usage'), + ('GET', '/storage_ceph'), + ('GET', '/storage_lvm'), + ('GET', '/tpmconfig'), + ('GET', '/upgrade'), + ('PATCH', '/addrpools/__pool_id__'), + ('PATCH', '/ceph_mon/__ceph_mon_id__'), + ('PATCH', '/controller_fs/__controller_fs_id__'), + ('PATCH', '/devices/__device_id__'), + ('PATCH', '/drbdconfig/__drbdconfig_id__'), + ('PATCH', '/event_suppression/__event_suppression_uuid__'), + ('PATCH', '/icommunity/__community_id__'), + ('PATCH', '/idns/__dns_id__'), + ('PATCH', '/iextoam/__extoam_id__'), + ('PATCH', '/ihosts/__host_id__'), + ('PATCH', '/ihosts/__host_id__'), + ('PATCH', '/iinfra/__infra_id__'), + ('PATCH', '/iinterfaces/__interface_id__'), + ('PATCH', '/ilvgs/__volumegroup_id__'), + ('PATCH', '/imemorys/__memory_id__'), + ('PATCH', '/intp/__ntp_id__'), + ('PATCH', '/ipm/__pm_id__'), + ('PATCH', '/isensorgroups/__sensorgroup_id__'), + ('PATCH', '/isensors/__sensor_id__'), + ('PATCH', '/istors/__stor_id__'), + ('PATCH', '/isystems'), + ('PATCH', '/itrapdest/__trapdest_id__'), + ('PATCH', '/remotelogging/__remotelogging_id__'), + ('PATCH', '/sdn_controller/__controller_id__'), + ('PATCH', '/service_parameter/__parameter_id__'), + ('PATCH', '/services/__service_name__'), + ('PATCH', '/storage_ceph/__storage_ceph_id__'), + ('PATCH', '/storage_lvm/__storage_lvm_id__'), + ('PATCH', '/tpmconfig/__tpmconfig_id__'), + ('PATCH', '/upgrade'), + ('POST', '/addrpools'), + ('POST', '/firewallrules/import_firewall_rules'), + ('POST', '/icommunity'), + ('POST', '/ihosts'), + ('POST', '/ihosts/bulk_add'), + ('POST', '/ihosts/__host_id__/addresses'), + ('POST', '/ihosts/__host_id__/downgrade'), + ('POST', '/ihosts/__host_id__/iinterfaces'), + ('POST', '/ihosts/__host_id__/istors'), + ('POST', '/ihosts/__host_id__/routes'), + ('POST', '/ihosts/__host_id__/upgrade'), + ('POST', '/iinfra'), + ('POST', '/ilvgs'), + ('POST', '/iprofiles'), + ('POST', '/ipvs'), + ('POST', '/itrapdest'), + ('POST', '/loads/import_load'), + ('POST', '/sdn_controller'), + ('POST', '/service_parameter/apply'), + ('POST', '/service_parameter'), + ('POST', '/storage_ceph'), + ('POST', '/tpmconfig'), + ('POST', '/upgrade'), + ('PUT', '/ihosts/__host_id__/state/host_cpus_modify') + ] +) +def test_bad_authentication(sysinv_rest, operation, resource): + resource = normalize(resource) + + if operation == "GET": + LOG.info("getting... {}".format(resource)) + get(sysinv_rest, resource) + elif operation == "DELETE": + LOG.info("deleting... {}".format(resource)) + delete(sysinv_rest, resource) + elif operation == "PATCH": + LOG.info("patching... {} {}".format(operation, resource)) + patch(sysinv_rest, resource) + elif operation == "POST": + LOG.info("posting... {} {}".format(operation, resource)) + post(sysinv_rest, resource) + elif operation == "PUT": + LOG.info("putting... {} {}".format(operation, resource)) + put(sysinv_rest, resource) diff --git a/automated-pytest-suite/testcases/rest/test_rest_fm.py b/automated-pytest-suite/testcases/rest/test_rest_fm.py new file mode 100644 index 0000000..94f33d7 --- /dev/null +++ b/automated-pytest-suite/testcases/rest/test_rest_fm.py @@ -0,0 +1,28 @@ +# +# Copyright (c) 2019 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + + +from pytest import fixture, mark +from utils.tis_log import LOG +from utils.rest import Rest + +from testcases.rest import rest_test_helper + + +@fixture(scope='module') +def fm_rest(): + r = Rest('fm', platform=True) + return r + + +@mark.parametrize('resource', ( + '/alarms', + '/event_suppression', + '/invalid_resource' +)) +def test_rest_fm_get(resource, fm_rest): + LOG.tc_step("Get fm resource {}".format(resource)) + rest_test_helper.get(rest_client=fm_rest, resource=resource) diff --git a/automated-pytest-suite/testcases/rest/test_rest_gnocchi.py b/automated-pytest-suite/testcases/rest/test_rest_gnocchi.py new file mode 100755 index 0000000..7664f7a --- /dev/null +++ b/automated-pytest-suite/testcases/rest/test_rest_gnocchi.py @@ -0,0 +1,30 @@ +# +# Copyright (c) 2019 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + + +import pytest +from utils.tis_log import LOG +from utils.rest import Rest + +from testcases.rest import rest_test_helper + + +@pytest.fixture(scope='module') +def gnocchi_rest(): + r = Rest('gnocchi', platform=False) + return r + + +@pytest.mark.parametrize(('operation', 'resource'), [ + ('GET', '/v1/metric?limit=2'), + ('GET', '/v1/resource'), + ('GET', '/v1/resource_type'), + ('GET', '/') +]) +def test_rest_gnocchi(gnocchi_rest, operation, resource): + if operation == "GET": + LOG.info("getting... {}".format(resource)) + rest_test_helper.get(gnocchi_rest, resource=resource) diff --git a/automated-pytest-suite/testcases/system_config/__init__.py b/automated-pytest-suite/testcases/system_config/__init__.py new file mode 100755 index 0000000..e69de29 diff --git a/automated-pytest-suite/testcases/system_config/conftest.py b/automated-pytest-suite/testcases/system_config/conftest.py new file mode 100755 index 0000000..dca9e9c --- /dev/null +++ b/automated-pytest-suite/testcases/system_config/conftest.py @@ -0,0 +1,6 @@ +# Do NOT remove following imports. Needed for test fixture discovery purpose +from testfixtures.resource_mgmt import delete_resources_func, \ + delete_resources_class, delete_resources_module +from testfixtures.recover_hosts import hosts_recover_func, \ + hosts_recover_class, hosts_recover_module +from testfixtures.verify_fixtures import * diff --git a/automated-pytest-suite/testcases/system_config/test_config_host_storage_backing.py b/automated-pytest-suite/testcases/system_config/test_config_host_storage_backing.py new file mode 100755 index 0000000..6377f1e --- /dev/null +++ b/automated-pytest-suite/testcases/system_config/test_config_host_storage_backing.py @@ -0,0 +1,194 @@ +# +# Copyright (c) 2019 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + + +from pytest import mark, fixture + +from utils.tis_log import LOG +from testfixtures.recover_hosts import HostsToRecover +from keywords import host_helper, vm_helper + + +@fixture(autouse=True) +def check_alarms(): + pass + + +@mark.parametrize(('instance_backing', 'number_of_hosts'), [ + ('image', 'two'), + ('image', 'one'), + ('image', 'all'), + ('remote', 'two'), + ('remote', 'one'), + ('remote', 'all'), +]) +def test_set_hosts_storage_backing_min(instance_backing, number_of_hosts): + """ + Modify hosts storage backing if needed so that system has minimal number + of hosts in given instance backing + + Args: + instance_backing: + number_of_hosts: + + Test Steps: + - Calculate the hosts to be configured based on test params + - Configure hosts to meet given criteria + - Check number of hosts in given instance backing is as specified + + """ + LOG.tc_step("Determine the hosts to configure") + hosts = host_helper.get_up_hypervisors() + hosts_len = len(hosts) + host_num_mapping = { + 'all': hosts_len, + 'two': 2, + 'one': 1 + } + number_of_hosts = host_num_mapping[number_of_hosts] + + hosts_with_backing = \ + host_helper.get_hosts_in_storage_backing(instance_backing) + if len(hosts_with_backing) >= number_of_hosts: + LOG.info("Already have {} hosts in {} backing. Do " + "nothing".format(len(hosts_with_backing), instance_backing)) + return + + candidate_hosts = get_candidate_hosts(number_of_hosts=number_of_hosts) + + number_to_config = number_of_hosts - len(hosts_with_backing) + hosts_to_config = list(set(candidate_hosts) - + set(hosts_with_backing))[0:number_to_config] + + LOG.tc_step("Delete vms if any to prepare for system configuration " + "change with best effort") + vm_helper.delete_vms(fail_ok=True) + + LOG.tc_step("Configure following hosts to {} backing: " + "{}".format(hosts_to_config, instance_backing)) + for host in hosts_to_config: + HostsToRecover.add(host) + host_helper.set_host_storage_backing(host=host, + inst_backing=instance_backing, + unlock=False, + wait_for_configured=False) + + host_helper.unlock_hosts(hosts_to_config, check_hypervisor_up=True, + fail_ok=False) + + LOG.tc_step("Waiting for hosts in {} aggregate".format(instance_backing)) + for host in hosts_to_config: + host_helper.wait_for_host_in_instance_backing( + host, storage_backing=instance_backing) + + LOG.tc_step("Check number of {} hosts is at least " + "{}".format(instance_backing, number_of_hosts)) + assert number_of_hosts <= \ + len(host_helper.get_hosts_in_storage_backing(instance_backing)), \ + "Number of {} hosts is less than {} after " \ + "configuration".format(instance_backing, number_of_hosts) + + +def get_candidate_hosts(number_of_hosts): + + candidate_hosts = host_helper.get_up_hypervisors() + hosts_len = len(candidate_hosts) + + if hosts_len < number_of_hosts: + # configure down hosts as well in case not enought up hosts available + extra_num = number_of_hosts - hosts_len + down_hosts = host_helper.get_hypervisors(state='down') + assert len(down_hosts) >= extra_num, \ + "Less than {} hypervisors on system to" \ + " configure".format(number_of_hosts) + candidate_hosts += down_hosts[:extra_num] + + # Following assert should never fail, otherwise automation code needs + # to be checked + assert len(candidate_hosts) >= number_of_hosts, \ + "Not enough hosts available for configuration." + + return candidate_hosts + + +@mark.parametrize(('instance_backing', 'number_of_hosts'), [ + ('image', 'two'), + ('image', 'one'), + ('image', 'zero'), + ('remote', 'zero'), + ('remote', 'one'), + ('remote', 'two') +]) +def test_set_hosts_storage_backing_equal(instance_backing, number_of_hosts): + """ + Modify hosts storage backing if needed so that system has exact number + of hosts in given instance backing + + Args: + instance_backing: + number_of_hosts: + + Test Steps: + - Calculate the hosts to be configured based on test params + - Configure hosts to meet given criteria + - Check number of hosts in given instance backing is as specified + + """ + host_num_mapping = { + 'zero': 0, + 'one': 1, + 'two': 2 + } + number_of_hosts = host_num_mapping[number_of_hosts] + LOG.tc_step("Calculate the hosts to be configured based on test params") + candidate_hosts = get_candidate_hosts(number_of_hosts=number_of_hosts) + + hosts_with_backing = \ + host_helper.get_hosts_in_storage_backing(instance_backing) + if len(hosts_with_backing) == number_of_hosts: + LOG.info("Already have {} hosts in {} backing. Do " + "nothing".format(number_of_hosts, instance_backing)) + return + + elif len(hosts_with_backing) < number_of_hosts: + backing_to_config = instance_backing + number_to_config = number_of_hosts - len(hosts_with_backing) + hosts_pool = list(set(candidate_hosts) - set(hosts_with_backing)) + else: + backing_to_config = 'remote' if 'image' in instance_backing else \ + 'local_image' + number_to_config = len(hosts_with_backing) - number_of_hosts + hosts_pool = hosts_with_backing + + LOG.tc_step("Delete vms if any to prepare for system configuration " + "change with best effort") + vm_helper.delete_vms(fail_ok=True) + + hosts_to_config = hosts_pool[0:number_to_config] + LOG.tc_step("Configure following hosts to {} backing: " + "{}".format(hosts_to_config, backing_to_config)) + + for host in hosts_to_config: + host_helper.set_host_storage_backing(host=host, + inst_backing=backing_to_config, + unlock=False, + wait_for_configured=False) + HostsToRecover.add(host) + + host_helper.unlock_hosts(hosts_to_config, check_hypervisor_up=True, + fail_ok=False) + + LOG.tc_step("Waiting for hosts in {} aggregate".format(backing_to_config)) + for host in hosts_to_config: + host_helper.wait_for_host_in_instance_backing( + host, storage_backing=backing_to_config) + + LOG.tc_step("Check number of {} hosts is {}".format(instance_backing, + number_of_hosts)) + assert number_of_hosts == \ + len(host_helper.get_hosts_in_storage_backing(instance_backing)), \ + "Number of {} hosts is not {} after " \ + "configuration".format(instance_backing, number_of_hosts) diff --git a/automated-pytest-suite/testcases/system_config/test_system_cleanup.py b/automated-pytest-suite/testcases/system_config/test_system_cleanup.py new file mode 100755 index 0000000..a1a77f1 --- /dev/null +++ b/automated-pytest-suite/testcases/system_config/test_system_cleanup.py @@ -0,0 +1,19 @@ +# +# Copyright (c) 2019 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + + +from keywords import vm_helper, cinder_helper + + +def test_delete_vms_and_vols(): + """ + Delete vms and volumes on the system. + Usage: normally run before a formal test session (sanity, regression, etc) + starts to ensure a clean system + + """ + vm_helper.delete_vms() + cinder_helper.delete_volumes() diff --git a/automated-pytest-suite/testfixtures/__init__.py b/automated-pytest-suite/testfixtures/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/automated-pytest-suite/testfixtures/config_host.py b/automated-pytest-suite/testfixtures/config_host.py new file mode 100644 index 0000000..a953bdd --- /dev/null +++ b/automated-pytest-suite/testfixtures/config_host.py @@ -0,0 +1,101 @@ +from pytest import fixture, mark + +from utils.tis_log import LOG +from keywords import host_helper +from testfixtures.recover_hosts import HostsToRecover + + +@mark.tryfirst +@fixture(scope='module') +def config_host_module(request): + """ + Module level fixture to configure a host. + + Setup: + - Lock a host + - Configure host + - Unlock host + + Teardown (if revert_func is given): + - Lock host + - Run revert_func + - Unlock host + + Args: + request: pytest param. caller of this func. + + Returns (function): config_host_func. + Test or another fixture can execute it to pass the hostname, + modify_func, and revert_func + + Examples: + see 'add_shared_cpu' fixture in nova/test_shared_cpu.py for usage. + + """ + return __config_host_base(scope='module', request=request) + + +@mark.tryfirst +@fixture(scope='class') +def config_host_class(request): + """ + Class level fixture to configure a host. + + Setup: + - Lock a host + - Configure host + - Unlock host + + Teardown (if revert_func is given): + - Lock host + - Run revert_func + - Unlock host + + Args: + request: pytest param. caller of this func. + + Returns (function): config_host_func. + Test or another fixture can execute it to pass the hostname, + modify_func, and revert_func + + Examples: + see 'add_shared_cpu' fixture in nova/test_shared_cpu.py for usage. + + """ + return __config_host_base(scope='class', request=request) + + +def __config_host_base(scope, request): + + def config_host_func(host, modify_func, revert_func=None, *args, **kwargs): + + HostsToRecover.add(host, scope=scope) + LOG.fixture_step("({}) Lock host: {}".format(scope, host)) + host_helper.lock_host(host=host, swact=True) + + # add teardown before running modify (as long as host is locked + # successfully) in case modify or unlock fails. + if revert_func is not None: + def revert_host(): + LOG.fixture_step("({}) Lock host: {}".format(scope, host)) + host_helper.lock_host(host=host, swact=True) + try: + LOG.fixture_step("({}) Execute revert function: {}".format( + scope, revert_func)) + revert_func(host) + finally: + LOG.fixture_step("({}) Unlock host: {}".format(scope, host)) + # Put it in finally block in case revert_func fails - + # host will still be unlocked for other tests. + host_helper.unlock_host(host=host) + + request.addfinalizer(revert_host) + + LOG.fixture_step("({}) Execute modify function: {}".format( + scope, modify_func)) + modify_func(host, *args, **kwargs) + + LOG.fixture_step("({}) Unlock host: {}".format(scope, host)) + host_helper.unlock_host(host=host) + + return config_host_func diff --git a/automated-pytest-suite/testfixtures/fixture_resources.py b/automated-pytest-suite/testfixtures/fixture_resources.py new file mode 100755 index 0000000..bdf68bc --- /dev/null +++ b/automated-pytest-suite/testfixtures/fixture_resources.py @@ -0,0 +1,184 @@ +# +# Copyright (c) 2019 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + + +from copy import deepcopy + +VALID_SCOPES = ['function', 'class', 'module', 'session'] +_RESOURCE_TYPES = ['vm', 'volume', 'volume_type', 'volume_qos', + 'flavor', 'image', 'server_group', 'router', + 'subnet', 'floating_ip', 'heat_stack', 'port', + 'trunk', 'network', 'security_group_rule' + 'security_group', 'network_qos', 'vol_snapshot', 'aggregate', + 'port_pair', 'port_pair_group', 'flow_classifier', + 'port_chain', 'datanetwork', 'providernet'] + +_RESOURCE_DICT = {key: [] for key in _RESOURCE_TYPES+['vm_with_vol']} + + +def _check_values(value, val_type='scope', valid_vals=None): + value = value.lower() + if not valid_vals: + valid_vals = VALID_SCOPES + if value not in valid_vals: + raise ValueError("'{}' param value has to be one of the: " + "{}".format(val_type, valid_vals)) + + +class ResourceCleanup: + """ + Class to hold the cleanup list and related functions. + """ + + __resources_to_cleanup = \ + {key_: deepcopy(_RESOURCE_DICT) for key_ in VALID_SCOPES} + + @classmethod + def _get_resources(cls, scope): + return cls.__resources_to_cleanup[scope] + + @classmethod + def _reset(cls, scope): + for key in cls.__resources_to_cleanup[scope]: + cls.__resources_to_cleanup[scope][key] = [] + + @classmethod + def add(cls, resource_type, resource_id, scope='function', + del_vm_vols=True): + """ + Add resource to cleanup list. + + Args: + resource_type (str): one of these: 'vm', 'volume', 'flavor + resource_id (str|list): id(s) of the resource to add to cleanup list + scope (str): when the cleanup should be done. Valid value is one of + these: 'function', 'class', 'module' + del_vm_vols (bool): whether to delete attached volume(s) if given + resource is vm. + + """ + _check_values(scope) + _check_values(resource_type, val_type='resource_type', + valid_vals=_RESOURCE_TYPES) + + if resource_type == 'vm' and del_vm_vols: + resource_type = 'vm_with_vol' + + if not isinstance(resource_id, (list, tuple)): + resource_id = [resource_id] + + for res_id in resource_id: + cls.__resources_to_cleanup[scope][resource_type].append(res_id) + + @classmethod + def remove(cls, resource_type, resource_id, scope='function', + del_vm_vols=True): + """ + Add resource to cleanup list. + + Args: + resource_type (str): one of these: 'vm', 'volume', 'flavor + resource_id (str|list): id(s) of the resource to add to cleanup list + scope (str): when the cleanup should be done. Valid value is one of + these: 'function', 'class', 'module' + del_vm_vols (bool): whether to delete attached volume(s) if given + resource is vm. + + """ + if scope is None: + return + + _check_values(scope) + _check_values(resource_type, val_type='resource_type', + valid_vals=_RESOURCE_TYPES) + + if resource_type == 'vm' and del_vm_vols: + resource_type = 'vm_with_vol' + + if not isinstance(resource_id, (list, tuple)): + resource_id = [resource_id] + + existing_list = cls.__resources_to_cleanup[scope][resource_type] + for res_id in resource_id: + if res_id in existing_list: + existing_list.remove(res_id) + + +class VlmHostsReserved: + __hosts_reserved_dict = {key: [] for key in VALID_SCOPES} + + @classmethod + def _reset(cls, scope): + cls.__hosts_reserved_dict[scope] = [] + + @classmethod + def _get_hosts_reserved(cls, scope): + return list(cls.__hosts_reserved_dict[scope]) + + @classmethod + def add(cls, hosts, scope='session'): + """ + Add resource to cleanup list. + + Args: + hosts (str|list): hostname(s) + scope (str): one of these: 'function', 'class', 'module', 'session' + + """ + _check_values(scope) + + if not isinstance(hosts, (list, tuple)): + hosts = [hosts] + + for host in hosts: + cls.__hosts_reserved_dict[scope].append(host) + + +class GuestLogs: + __guests_to_collect = {key: [] for key in VALID_SCOPES} + + @classmethod + def _reset(cls, scope): + cls.__guests_to_collect[scope] = [] + + @classmethod + def remove(cls, vm_id): + """ + Remove a guest from collect log list. Call this if test passed. + + Args: + vm_id (str): vm to remove from collection list + + """ + for scope in VALID_SCOPES: + try: + cls.__guests_to_collect[scope].remove(vm_id) + except ValueError: + continue + + @classmethod + def _get_guests(cls, scope): + return list(cls.__guests_to_collect[scope]) + + @classmethod + def add(cls, vm_id, scope='function'): + """ + Add a guest to collect log list. Applicable to guest heartbeat, + server group, vm scaling test cases. + - Use fixture_resources.GuestLogs.add() to add a guest to collect + list + - Use fixture_resources.GuestLogs.remove() to remove a guest + from collect list if test passed + + Args: + vm_id (str): vm to add to collection list + scope (str): one of these: 'function', 'class', 'module', 'session' + + """ + _check_values(scope) + + if vm_id not in cls.__guests_to_collect[scope]: + cls.__guests_to_collect[scope].append(vm_id) diff --git a/automated-pytest-suite/testfixtures/horizon.py b/automated-pytest-suite/testfixtures/horizon.py new file mode 100644 index 0000000..858f8a7 --- /dev/null +++ b/automated-pytest-suite/testfixtures/horizon.py @@ -0,0 +1,87 @@ +# +# Copyright (c) 2019 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + + +import os +import datetime +from pytest import fixture + +from utils.horizon.pages import loginpage +from utils.horizon import video_recorder +from utils.horizon.helper import HorizonDriver +from utils.tis_log import LOG + +from consts import horizon +from consts.auth import Tenant +from consts.proj_vars import ProjVar + + +@fixture(scope="session") +def driver(request): + driver_ = HorizonDriver.get_driver() + + def teardown(): + HorizonDriver.quit_driver() + request.addfinalizer(teardown) + return driver_ + + +@fixture(scope='function') +def admin_home_pg(driver, request): + return __login_base(request=request, driver=driver, + auth_info=Tenant.get('admin_platform')) + + +@fixture(scope='function') +def admin_home_pg_container(driver, request): + return __login_base(request=request, driver=driver, + auth_info=Tenant.get('admin')) + + +@fixture(scope='function') +def tenant_home_pg_container(driver, request): + return __login_base(request=request, driver=driver, + auth_info=Tenant.get_primary()) + + +def __login_base(request, driver, auth_info, port=None): + + horizon.test_result = False + if not auth_info: + auth_info = Tenant.get_primary() + + user = auth_info['user'] + password = auth_info['password'] + project = auth_info['tenant'] + if not port and not auth_info.get('platform'): + port = 31000 + + gmttime = datetime.datetime.utcnow().strftime("%Y%m%d%H%M%S") + video_path = ProjVar.get_var('LOG_DIR') + '/horizon/' + \ + str(gmttime) + '.mp4' + recorder = video_recorder.VideoRecorder(1920, 1080, os.environ['DISPLAY'], + video_path) + recorder.start() + home_pg = None + + try: + LOG.fixture_step('Login as {}'.format(user)) + login_pg = loginpage.LoginPage(driver, port=port) + login_pg.go_to_target_page() + home_pg = login_pg.login(user=user, password=password) + home_pg.change_project(name=project) + finally: + def teardown(): + if home_pg: + LOG.fixture_step('Logout') + home_pg.log_out() + recorder.stop() + if horizon.test_result: + recorder.clear() + + request.addfinalizer(teardown) + + return home_pg diff --git a/automated-pytest-suite/testfixtures/pre_checks_and_configs.py b/automated-pytest-suite/testfixtures/pre_checks_and_configs.py new file mode 100755 index 0000000..7ec7834 --- /dev/null +++ b/automated-pytest-suite/testfixtures/pre_checks_and_configs.py @@ -0,0 +1,173 @@ +# +# Copyright (c) 2019 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + + +import time + +from pytest import fixture, skip + +from consts.auth import Tenant +from consts.stx import EventLogID, HostAvailState, AppStatus +from consts.reasons import SkipSysType +from keywords import system_helper, host_helper, keystone_helper, \ + security_helper, container_helper, kube_helper +from utils.tis_log import LOG + + +@fixture(scope='function') +def stx_openstack_required(request): + app_name = 'stx-openstack' + if not container_helper.is_stx_openstack_deployed(applied_only=True): + skip('stx-openstack application is not applied') + + def wait_for_recover(): + + post_status = container_helper.get_apps(application=app_name)[0] + if not post_status == AppStatus.APPLIED: + LOG.info("Dump info for unhealthy pods") + kube_helper.dump_pods_info() + + if not post_status.endswith('ed'): + LOG.fixture_step("Wait for application apply finish") + container_helper.wait_for_apps_status(apps=app_name, + status=AppStatus.APPLIED, + timeout=3600, + check_interval=15, + fail_ok=False) + + request.addfinalizer(wait_for_recover) + + +@fixture(scope='session') +def skip_for_one_proc(): + hypervisor = host_helper.get_up_hypervisors() + if not hypervisor: + skip("No up hypervisor on system.") + + if len(host_helper.get_host_procs(hostname=hypervisor[0])) < 2: + skip( + 'At least two processor per compute host is required for this ' + 'test.') + + +@fixture(scope='session') +def no_simplex(): + LOG.fixture_step("(Session) Skip if Simplex") + if system_helper.is_aio_simplex(): + skip(SkipSysType.SIMPLEX_SYSTEM) + + +@fixture(scope='session') +def simplex_only(): + LOG.fixture_step("(Session) Skip if not Simplex") + if not system_helper.is_aio_simplex(): + skip(SkipSysType.SIMPLEX_ONLY) + + +@fixture(scope='session') +def check_numa_num(): + hypervisor = host_helper.get_up_hypervisors() + if not hypervisor: + skip("No up hypervisor on system.") + + return len(host_helper.get_host_procs(hostname=hypervisor[0])) + + +@fixture(scope='session') +def wait_for_con_drbd_sync_complete(): + if len(system_helper.get_controllers()) < 2: + LOG.info( + "Less than two controllers on system. Do not wait for drbd sync") + return False + + host = 'controller-1' + LOG.fixture_step("Waiting for controller-1 drbd sync alarm gone if present") + end_time = time.time() + 1200 + while time.time() < end_time: + drbd_alarms = system_helper.get_alarms( + alarm_id=EventLogID.CON_DRBD_SYNC, reason_text='drbd-', + entity_id=host, strict=False) + + if not drbd_alarms: + LOG.info("{} drbd sync alarm is cleared".format(host)) + break + time.sleep(10) + + else: + assert False, "drbd sync alarm {} is not cleared within timeout".format( + EventLogID.CON_DRBD_SYNC) + + LOG.fixture_step( + "Wait for {} becomes available in system host-list".format(host)) + system_helper.wait_for_host_values(host, + availability=HostAvailState.AVAILABLE, + timeout=120, fail_ok=False, + check_interval=10) + + LOG.fixture_step( + "Wait for {} drbd-cinder in sm-dump to reach desired state".format( + host)) + host_helper.wait_for_sm_dump_desired_states(host, 'drbd-', strict=False, + timeout=30, fail_ok=False) + return True + + +@fixture(scope='session') +def change_admin_password_session(request, wait_for_con_drbd_sync_complete): + more_than_one_controllers = wait_for_con_drbd_sync_complete + prev_pswd = Tenant.get('admin')['password'] + post_pswd = '!{}9'.format(prev_pswd) + + LOG.fixture_step( + '(Session) Changing admin password to {}'.format(post_pswd)) + keystone_helper.set_user('admin', password=post_pswd) + + def _lock_unlock_controllers(): + LOG.fixture_step("Sleep for 300 seconds after admin password change") + time.sleep(300) + if more_than_one_controllers: + active, standby = system_helper.get_active_standby_controllers() + if standby: + LOG.fixture_step( + "(Session) Locking unlocking controllers to complete " + "action") + host_helper.lock_host(standby) + host_helper.unlock_host(standby) + + host_helper.lock_host(active, swact=True) + host_helper.unlock_host(active) + else: + LOG.warning( + "Standby controller unavailable. Skip lock unlock " + "controllers post admin password change.") + elif system_helper.is_aio_simplex(): + LOG.fixture_step( + "(Session) Simplex lab - lock/unlock controller to complete " + "action") + host_helper.lock_host('controller-0', swact=False) + host_helper.unlock_host('controller-0') + + def revert_pswd(): + LOG.fixture_step( + "(Session) Reverting admin password to {}".format(prev_pswd)) + keystone_helper.set_user('admin', password=prev_pswd) + _lock_unlock_controllers() + + LOG.fixture_step( + "(Session) Check admin password is reverted to {} in " + "keyring".format(prev_pswd)) + assert prev_pswd == security_helper.get_admin_password_in_keyring() + + request.addfinalizer(revert_pswd) + + _lock_unlock_controllers() + + LOG.fixture_step( + "(Session) Check admin password is changed to {} in keyring".format( + post_pswd)) + assert post_pswd == security_helper.get_admin_password_in_keyring() + + return post_pswd diff --git a/automated-pytest-suite/testfixtures/recover_hosts.py b/automated-pytest-suite/testfixtures/recover_hosts.py new file mode 100644 index 0000000..8a8099a --- /dev/null +++ b/automated-pytest-suite/testfixtures/recover_hosts.py @@ -0,0 +1,143 @@ +# +# Copyright (c) 2019 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + + +from pytest import fixture + +from utils import table_parser, cli +from utils.tis_log import LOG +from keywords import host_helper, system_helper + + +@fixture(scope='function', autouse=True) +def hosts_recover_func(request): + def recover_(): + hosts = HostsToRecover._get_hosts_to_recover(scope='function') + if hosts: + HostsToRecover._reset('function') + HostsToRecover._recover_hosts(hosts, 'function') + + request.addfinalizer(recover_) + + +@fixture(scope='class', autouse=True) +def hosts_recover_class(request): + def recover_hosts(): + hosts = HostsToRecover._get_hosts_to_recover(scope='class') + if hosts: + HostsToRecover._reset('class') + HostsToRecover._recover_hosts(hosts, 'class') + + request.addfinalizer(recover_hosts) + + +@fixture(scope='module', autouse=True) +def hosts_recover_module(request): + def recover_hosts(): + hosts = HostsToRecover._get_hosts_to_recover(scope='module') + if hosts: + HostsToRecover._reset('module') + HostsToRecover._recover_hosts(hosts, 'module') + + request.addfinalizer(recover_hosts) + + +class HostsToRecover(): + __hosts_to_recover = { + 'function': [], + 'class': [], + 'module': [], + } + + @classmethod + def __check_scope(cls, scope): + valid_scope = cls.__hosts_to_recover.keys() + if scope not in valid_scope: + raise ValueError( + "scope has to be one of the following: {}".format(valid_scope)) + + @classmethod + def add(cls, hostnames, scope='function'): + """ + Add host(s) to recover list. Will wait for host(s) to recover as test + teardown. + + Args: + hostnames (str|list): + scope + + """ + if scope is None: + return + + cls.__check_scope(scope) + if isinstance(hostnames, str): + hostnames = [hostnames] + + cls.__hosts_to_recover[scope] += hostnames + + @classmethod + def remove(cls, hostnames, scope='function'): + """ + Remove host(s) from recover list. Only remove one instance if host + has multiple occurances in the recover list. + + Args: + hostnames (str|list|tuple): + scope: + + """ + if scope is None: + return + + cls.__check_scope(scope) + + if isinstance(hostnames, str): + hostnames = [hostnames] + + for host in hostnames: + cls.__hosts_to_recover[scope].remove(host) + + @classmethod + def _reset(cls, scope): + cls.__hosts_to_recover[scope] = [] + + @classmethod + def _get_hosts_to_recover(cls, scope): + return list(cls.__hosts_to_recover[scope]) + + @staticmethod + def _recover_hosts(hostnames, scope): + if system_helper.is_aio_simplex(): + LOG.fixture_step('{} Recover simplex host'.format(scope)) + host_helper.recover_simplex(fail_ok=False) + return + + # Recover hosts for non-simplex system + hostnames = sorted(set(hostnames)) + table_ = table_parser.table(cli.system('host-list')[1]) + table_ = table_parser.filter_table(table_, hostname=hostnames) + + # unlocked_hosts = table_parser.get_values(table_, 'hostname', + # administrative='unlocked') + locked_hosts = table_parser.get_values(table_, 'hostname', + administrative='locked') + + err_msg = [] + if locked_hosts: + LOG.fixture_step( + "({}) Unlock hosts: {}".format(scope, locked_hosts)) + # Hypervisor state will be checked later in wait_for_hosts_ready + # which handles platform only deployment + res1 = host_helper.unlock_hosts(hosts=locked_hosts, fail_ok=True, + check_hypervisor_up=False) + for host in res1: + if res1[host][0] not in [0, 4]: + err_msg.append( + "Not all host(s) unlocked successfully. Detail: " + "{}".format(res1)) + + host_helper.wait_for_hosts_ready(hostnames) diff --git a/automated-pytest-suite/testfixtures/resource_create.py b/automated-pytest-suite/testfixtures/resource_create.py new file mode 100644 index 0000000..b52c89e --- /dev/null +++ b/automated-pytest-suite/testfixtures/resource_create.py @@ -0,0 +1,172 @@ +# +# Copyright (c) 2019 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + + +from pytest import fixture + +from consts.stx import GuestImages +from keywords import nova_helper, glance_helper, keystone_helper, \ + container_helper +from utils.tis_log import LOG + + +# Session fixture to add affinitiy and anti-affinity server group +@fixture(scope='session') +def server_groups(): + def create_server_groups(soft=True, auth_info=None): + srv_grps_tenant = [] + + LOG.fixture_step( + '(session) Creating affinity and anti-affinity server groups with ' + 'best_effort set to {}'. + format(soft)) + for policy in ['affinity', 'anti_affinity']: + if soft: + policy = 'soft_{}'.format(policy) + + name = 'srv_group_{}'.format(policy) + srv_grp_id = \ + nova_helper.create_server_group(name=name, policy=policy, + auth_info=auth_info, + rtn_exist=True)[1] + srv_grps_tenant.append(srv_grp_id) + return srv_grps_tenant + + return create_server_groups + + +# Session fixture to add stxauto aggregate with stxauto availability zone +@fixture(scope='session') +def add_stxauto_zone(request): + LOG.fixture_step( + "(session) Add stxauto aggregate and stxauto availability zone") + nova_helper.create_aggregate(name='stxauto', avail_zone='stxauto', + check_first=True) + + def remove_aggregate(): + LOG.fixture_step("(session) Delete stxauto aggregate") + nova_helper.delete_aggregates('stxauto') + + request.addfinalizer(remove_aggregate) + + # return name of aggregate/availability zone + return 'stxauto' + + +# Fixtures to add admin role to primary tenant +@fixture(scope='module') +def add_admin_role_module(request): + __add_admin_role(scope='module', request=request) + + +@fixture(scope='class') +def add_admin_role_class(request): + __add_admin_role(scope='class', request=request) + + +@fixture(scope='function') +def add_admin_role_func(request): + __add_admin_role(scope='function', request=request) + + +def __add_admin_role(scope, request): + LOG.fixture_step( + "({}) Add admin role to user under primary tenant".format(scope)) + code = keystone_helper.add_or_remove_role(add_=True, role='admin')[0] + + def remove_admin(): + if code != -1: + LOG.fixture_step( + "({}) Remove admin role from user under primary tenant".format( + scope)) + keystone_helper.add_or_remove_role(add_=False, role='admin') + + request.addfinalizer(remove_admin) + + +@fixture(scope='session') +def ubuntu14_image(): + return __create_image('ubuntu_14', 'session') + + +@fixture(scope='session') +def ubuntu12_image(): + return __create_image('ubuntu_12', 'session') + + +@fixture(scope='session') +def centos7_image(): + return __create_image('centos_7', 'session') + + +@fixture(scope='session') +def centos6_image(): + return __create_image('centos_6', 'session') + + +@fixture(scope='session') +def opensuse11_image(): + return __create_image('opensuse_11', 'session') + + +@fixture(scope='session') +def opensuse12_image(): + return __create_image('opensuse_12', 'session') + + +@fixture(scope='session') +def opensuse13_image(): + return __create_image('opensuse_13', 'session') + + +@fixture(scope='session') +def rhel6_image(): + return __create_image('rhel_6', 'session') + + +@fixture(scope='session') +def rhel7_image(): + return __create_image('rhel_7', 'session') + + +@fixture(scope='session', autouse=True) +def default_glance_image(): + if not container_helper.is_stx_openstack_deployed(): + return None + return __create_image(None, 'session') + + +@fixture(scope='session', autouse=False) +def cgcs_guest_image(): + return __create_image('cgcs-guest', 'session') + + +def __create_image(img_os, scope): + if not img_os: + img_os = GuestImages.DEFAULT['guest'] + + LOG.fixture_step( + "({}) Get or create a glance image with {} guest OS".format(scope, + img_os)) + img_info = GuestImages.IMAGE_FILES[img_os] + img_id = glance_helper.get_image_id_from_name(img_os, strict=True) + if not img_id: + if img_info[0] is not None: + image_path = glance_helper.scp_guest_image(img_os=img_os) + else: + img_dir = GuestImages.DEFAULT['image_dir'] + image_path = "{}/{}".format(img_dir, img_info[2]) + + disk_format = 'raw' if img_os in ['cgcs-guest', 'tis-centos-guest', + 'vxworks'] else 'qcow2' + img_id = \ + glance_helper.create_image(name=img_os, + source_image_file=image_path, + disk_format=disk_format, + container_format='bare', + cleanup=scope)[1] + + return img_id diff --git a/automated-pytest-suite/testfixtures/resource_mgmt.py b/automated-pytest-suite/testfixtures/resource_mgmt.py new file mode 100755 index 0000000..e180abc --- /dev/null +++ b/automated-pytest-suite/testfixtures/resource_mgmt.py @@ -0,0 +1,267 @@ +# +# Copyright (c) 2019 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + + +from pytest import fixture + +from utils.tis_log import LOG +from utils import exceptions + +from consts.auth import Tenant +from keywords import nova_helper, vm_helper, cinder_helper, glance_helper, \ + network_helper, system_helper +from testfixtures.fixture_resources import ResourceCleanup, GuestLogs + + +# SIMPLEX_RECOVERED = False + + +@fixture(scope='function', autouse=True) +def delete_resources_func(request): + """ + Function level fixture to delete created resources after each caller + testcase. + + Notes: Auto used fixture - import it to a conftest.py file under a + feature directory to auto use it on all children testcases. + + Examples: + - see nova/conftest.py for importing + - see ResourceCleanup.add function usages in nova/test_shared_cpu.py + for adding resources to cleanups + + Args: + request: pytest param present caller test function + + """ + + def delete_(): + _delete_resources(ResourceCleanup._get_resources('function'), + scope='function') + ResourceCleanup._reset('function') + + request.addfinalizer(delete_) + + +@fixture(scope='class', autouse=True) +def delete_resources_class(request): + """ + Class level fixture to delete created resources after each caller testcase. + + Notes: Auto used fixture - import it to a conftest.py file under a + feature directory to auto use it on all children + testcases. + + Examples: + - see nova/conftest.py for importing + - see ResourceCleanup.add function usages in nova/test_shared_cpu.py + for adding resources to cleanups + + Args: + request: pytest param present caller test function + + """ + + def delete_(): + _delete_resources(ResourceCleanup._get_resources('class'), + scope='class') + ResourceCleanup._reset('class') + + request.addfinalizer(delete_) + + +@fixture(scope='module', autouse=True) +def delete_resources_module(request): + """ + Module level fixture to delete created resources after each caller testcase. + + Notes: Auto used fixture - import it to a conftest.py file under a + feature directory to auto use it on all children + testcases. + + Examples: + - see nova/conftest.py for importing + - see ResourceCleanup.add function usages in nova/test_shared_cpu.py + for adding resources to cleanups + + Args: + request: pytest param present caller test function + + """ + + def delete_(): + _delete_resources(ResourceCleanup._get_resources('module'), + scope='module') + ResourceCleanup._reset('module') + + request.addfinalizer(delete_) + + +@fixture(scope='session', autouse=True) +def delete_resources_session(request): + """ + Module level fixture to delete created resources after each caller testcase. + + Notes: Auto used fixture - import it to a conftest.py file under a + feature directory to auto use it on all children + testcases. + + Examples: + - see nova/conftest.py for importing + - see ResourceCleanup.add function usages in nova/test_shared_cpu.py + for adding resources to cleanups + + Args: + request: pytest param present caller test function + + """ + + def delete_(): + _delete_resources(ResourceCleanup._get_resources('session'), + scope='session') + ResourceCleanup._reset('session') + + request.addfinalizer(delete_) + + +@fixture(scope='module') +def flavor_id_module(): + """ + Create basic flavor and volume to be used by test cases as test setup, + at the beginning of the test module. + Delete the created flavor and volume as test teardown, at the end of the + test module. + """ + flavor = nova_helper.create_flavor()[1] + ResourceCleanup.add('flavor', resource_id=flavor, scope='module') + + return flavor + + +def _delete_resources(resources, scope): + # global SIMPLEX_RECOVERED + # if not SIMPLEX_RECOVERED and system_helper.is_simplex(): + # LOG.fixture_step('{} Ensure simplex host is up before cleaning + # up'.format(scope)) + # host_helper.recover_simplex(fail_ok=True) + # SIMPLEX_RECOVERED = True + + def __del_aggregate(aggregate_, **kwargs): + nova_helper.remove_hosts_from_aggregate(aggregate=aggregate_, + check_first=False, **kwargs) + return nova_helper.delete_aggregates(names=aggregate_, **kwargs) + + # List resources in proper order if there are dependencies! + del_list = [ + # resource, del_fun, fun_params, whether to delete all resources + # together. + ('port_chain', network_helper.delete_sfc_port_chain, + {'check_first': True}, False), + ('flow_classifier', network_helper.delete_flow_classifier, + {'check_first': True}, False), + ('vm', vm_helper.delete_vms, {'delete_volumes': False}, True), + ('vm_with_vol', vm_helper.delete_vms, {'delete_volumes': True}, True), + ('vol_snapshot', cinder_helper.delete_volume_snapshots, {}, True), + ('volume', cinder_helper.delete_volumes, {}, True), + ('volume_type', cinder_helper.delete_volume_types, {}, True), + ('volume_qos', cinder_helper.delete_volume_qos, {}, True), + ('flavor', nova_helper.delete_flavors, {}, True), + ('image', glance_helper.delete_images, {}, True), + ('server_group', nova_helper.delete_server_groups, {}, True), + ('floating_ip', network_helper.delete_floating_ips, {}, True), + ('trunk', network_helper.delete_trunks, {}, True), + ('port_pair_group', network_helper.delete_sfc_port_pair_group, + {'check_first': True}, False), + ('port_pair', network_helper.delete_sfc_port_pairs, + {'check_first': True}, True), + ('port', network_helper.delete_port, {}, False), + ('router', network_helper.delete_router, {}, False), + ('subnet', network_helper.delete_subnets, {}, True), + ('network_qos', network_helper.delete_qos, {}, False), + ('network', network_helper.delete_network, {}, False), + ('security_group_rule', network_helper.delete_security_group_rules, {}, + True), + ('security_group', network_helper.delete_security_group, {}, False), + ('aggregate', __del_aggregate, {}, False), + ('datanetwork', system_helper.delete_data_network, {}, False), + ] + + err_msgs = [] + for item in del_list: + resource_type, del_fun, fun_kwargs, del_all = item + resource_ids = resources.get(resource_type, []) + if not resource_ids: + continue + + LOG.fixture_step("({}) Attempt to delete following {}: " + "{}".format(scope, resource_type, resource_ids)) + if 'auth_info' not in fun_kwargs: + fun_kwargs['auth_info'] = Tenant.get('admin') + + if del_all: + resource_ids = [resource_ids] + for resource_id in resource_ids: + try: + code, msg = del_fun(resource_id, fail_ok=True, **fun_kwargs)[ + 0:2] + if code > 0: + err_msgs.append(msg) + except exceptions.TiSError as e: + err_msgs.append(e.__str__()) + + # Attempt all deletions before raising exception. + if err_msgs: + LOG.error("ERROR: Failed to delete resource(s). \nDetails: {}".format( + err_msgs)) + # raise exceptions.CommonError("Failed to delete resource(s). + # Details: {}".format(err_msgs)) + + +@fixture(scope='function', autouse=True) +def guest_logs_func(request): + """ + Collect guest logs for guests in collect list. Applicable to guest + heartbeat, server group, vm scaling test cases. + - Use fixture_resources.GuestLogs.add() to add a guest to collect list + - Use fixture_resources.GuestLogs.remove() to remove a guest from + collect list if test passed + + Examples: + see testcases/functional/mtc/guest_heartbeat/test_vm_voting + .py for usage + + """ + + def _collect(): + _collect_guest_logs(scope='function') + + request.addfinalizer(_collect) + + +@fixture(scope='class', autouse=True) +def guest_logs_class(request): + def _collect(): + _collect_guest_logs(scope='class') + + request.addfinalizer(_collect) + + +@fixture(scope='module', autouse=True) +def guest_logs_module(request): + def _collect(): + _collect_guest_logs(scope='module') + + request.addfinalizer(_collect) + + +def _collect_guest_logs(scope): + guests = GuestLogs._get_guests(scope=scope) + if guests: + LOG.fixture_step( + "({}) Attempt to collect guest logs for: {}".format(scope, guests)) + for guest in guests: + vm_helper.collect_guest_logs(vm_id=guest) + GuestLogs._reset(scope=scope) diff --git a/automated-pytest-suite/testfixtures/verify_fixtures.py b/automated-pytest-suite/testfixtures/verify_fixtures.py new file mode 100644 index 0000000..2fe01e3 --- /dev/null +++ b/automated-pytest-suite/testfixtures/verify_fixtures.py @@ -0,0 +1,226 @@ +# +# Copyright (c) 2019 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + + +from pytest import fixture + +from consts.auth import Tenant +from consts.stx import AppStatus +from keywords import system_helper, vm_helper, storage_helper, host_helper, \ + common, check_helper, kube_helper, container_helper +from utils.tis_log import LOG + + +######################## +# Test Fixtures Module # +######################## + +@fixture(scope='function') +def check_alarms(request): + """ + Check system alarms before and after test case. + + Args: + request: caller of this fixture. i.e., test func. + """ + __verify_alarms(request=request, scope='function') + + +@fixture(scope='module') +def check_alarms_module(request): + """ + Check system alarms before and after test session. + + Args: + request: caller of this fixture. i.e., test func. + """ + __verify_alarms(request=request, scope='module') + + +def __verify_alarms(request, scope): + before_alarms = __get_alarms(scope=scope) + prev_bad_pods = kube_helper.get_unhealthy_pods(all_namespaces=True) + prev_applied_apps = container_helper.get_apps(field='application', + status=AppStatus.APPLIED) + + def verify_(): + LOG.fixture_step( + "({}) Verify system alarms, applications and pods status after " + "test {} ended...". + format(scope, scope)) + alarm_res, new_alarms = check_helper.check_alarms( + before_alarms=before_alarms, fail_ok=True) + post_apps_status = container_helper.get_apps( + field=('application', 'status'), application=prev_applied_apps) + kube_helper.wait_for_pods_healthy(timeout=120, all_namespaces=True, + name=prev_bad_pods, exclude=True, + strict=True) + + # check no new bad application + LOG.info("Check applications status...") + new_bad_apps = [item for item in post_apps_status if + item[1] != AppStatus.APPLIED] + assert not new_bad_apps, "Applications no longer applied after test " \ + "{}: {}".format(scope, new_bad_apps) + # check no new alarms + assert alarm_res, "New alarm(s) appeared within test {}: {}".format( + scope, new_alarms) + + request.addfinalizer(verify_) + + return + + +@fixture(scope='session', autouse=True) +def pre_alarms_session(): + if container_helper.is_stx_openstack_deployed(): + from keywords import network_helper + for auth_info in (Tenant.get_primary(), Tenant.get_secondary()): + project = auth_info['tenant'] + default_group = network_helper.get_security_groups( + auth_info=auth_info, name='default', strict=True) + if not default_group: + LOG.info( + "No default security group for {}. Skip security group " + "rule config.".format( + project)) + continue + + default_group = default_group[0] + security_rules = network_helper.get_security_group_rules( + auth_info=auth_info, **{'IP Protocol': ('tcp', 'icmp'), + 'Security Group': default_group}) + if len(security_rules) >= 2: + LOG.info( + "Default security group rules for {} already configured " + "to allow ping and ssh".format( + project)) + continue + + LOG.info( + "Create icmp and ssh security group rules for {} with best " + "effort".format( + project)) + for rules in (('icmp', None), ('tcp', 22)): + protocol, dst_port = rules + network_helper.create_security_group_rule(group=default_group, + protocol=protocol, + dst_port=dst_port, + fail_ok=True, + auth_info=auth_info) + + return __get_alarms('session') + + +@fixture(scope='function') +def pre_alarms_function(): + return __get_alarms('function') + + +def __get_alarms(scope): + LOG.fixture_step("({}) Gathering system health info before test {} " + "begins.".format(scope, scope)) + alarms = system_helper.get_alarms() + return alarms + + +@fixture(scope='session') +def pre_coredumps_and_crash_reports_session(): + return __get_system_crash_and_coredumps('session') + + +def __get_system_crash_and_coredumps(scope): + LOG.fixture_step( + "({}) Getting existing system crash reports and coredumps before test " + "{} begins.".format(scope, scope)) + + core_dumps_and_reports = host_helper.get_coredumps_and_crashreports() + return core_dumps_and_reports + + +@fixture() +def check_vms(request): + """ + Check Status of the VMs before and after test run. + + Args: + request: caller of this fixture. i.e., test func. + """ + LOG.fixture_step("Gathering system VMs info before test begins.") + before_vms_status = vm_helper.get_vms_info(fields=['status'], long=False, + all_projects=True, + auth_info=Tenant.get('admin')) + + def verify_vms(): + LOG.fixture_step("Verifying system VMs after test ended...") + after_vms_status = vm_helper.get_vms_info(fields=['status'], long=False, + all_projects=True, + auth_info=Tenant.get('admin')) + + # compare status between the status of each VMs before/after the test + common_vms = set(before_vms_status) & set(after_vms_status) + LOG.debug("VMs to verify: {}".format(common_vms)) + failure_msgs = [] + for vm in common_vms: + before_status = before_vms_status[vm][0] + post_status = after_vms_status[vm][0] + + if post_status.lower() != 'active' and post_status != before_status: + msg = "VM {} is not in good state. Previous status: {}. " \ + "Current status: {}". \ + format(vm, before_status, post_status) + failure_msgs.append(msg) + + assert not failure_msgs, '\n'.join(failure_msgs) + LOG.info("VMs status verified.") + + request.addfinalizer(verify_vms) + return + + +@fixture() +def ping_vms_from_nat(request): + """ + TODO: - should only compare common vms + - should pass as long as after test ping results are good regardless + of the pre test results + - if post test ping failed, then compare it with pre test ping to see + if it's a okay failure. + - better to re-utilize the check vm fixture so that we don't need to + retrieving the info again. + i.e., use fixture inside a fixture. + + Args: + request: + + Returns: + + """ + LOG.info("Gathering VMs ping to NAT before test begins.") + + before_ping_result = vm_helper.ping_vms_from_natbox() + + def verify_nat_ping(): + after_ping_result = vm_helper.ping_vms_from_natbox() + + assert before_ping_result == after_ping_result + + LOG.info("Ping from NAT Box to VMs verified.") + + request.addfinalizer(verify_nat_ping) + return + + +@fixture() +def ceph_precheck(): + """ + Run test pre-checks before running CEPH tests. + + """ + + LOG.info('Verify the health of the CEPH cluster') + rtn, msg = storage_helper.is_ceph_healthy() + LOG.info('{}'.format(msg)) diff --git a/automated-pytest-suite/utils/__init__.py b/automated-pytest-suite/utils/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/automated-pytest-suite/utils/cli.py b/automated-pytest-suite/utils/cli.py new file mode 100644 index 0000000..c8ad27e --- /dev/null +++ b/automated-pytest-suite/utils/cli.py @@ -0,0 +1,327 @@ +import os + +from pytest import skip + +from consts.auth import Tenant +from consts.stx import Prompt +from consts.proj_vars import ProjVar +from consts.timeout import CLI_TIMEOUT +from utils import exceptions +from utils.clients.ssh import ControllerClient +from utils.clients.telnet import TelnetClient + + +def exec_cli(cmd, sub_cmd, positional_args='', client=None, flags='', + fail_ok=False, cli_dir='', auth_info=None, + source_openrc=None, timeout=CLI_TIMEOUT): + """ + + Args: + cmd: such as 'neutron' + sub_cmd: such as 'net-show' + client: SSHClient, TelnetClient or LocalHostClient + positional_args: string or list. + Single arg examples: 'arg0' or ['arg0'] + Multiple args string example: 'arg1 arg2' + Multiple args list example: ['arg1','arg2'] + flags: string or list. + Single arg examples: 'arg0 value0' or ['arg0 value'] + Multiple args string example: 'arg1 value1 arg2 value2 arg3' + Multiple args list example: ['arg1 value1','arg2 value2', 'arg3'] + auth_info: (dict) authorization information to run cli commands. + source_openrc (None|bool): In general this should NOT be set unless + necessary. + fail_ok: + cli_dir: + timeout: + + Returns: + if command executed successfully: return command_output + if command failed to execute such as authentication failure: + if fail_ok: return exit_code, command_output + if not fail_ok: raise exception + """ + use_telnet = True if isinstance(client, TelnetClient) else False + + # Determine region and auth_url + raw_cmd = cmd.strip().split()[0] + is_dc = ProjVar.get_var('IS_DC') + platform_cmds = ('system', 'fm') + + if auth_info is None: + auth_info = Tenant.get_primary() + + platform = True if auth_info.get('platform') else False + + if not platform and ProjVar.get_var('OPENSTACK_DEPLOYED') is False: + skip('stx-openstack application is not applied.') + + region = auth_info.get('region') + dc_region = region if region and is_dc else None + default_region_and_url = Tenant.get_region_and_url(platform=platform, + dc_region=dc_region) + + region = region if region else default_region_and_url['region'] + auth_url = auth_info.get('auth_url', default_region_and_url['auth_url']) + + if is_dc: + # Set proper region when cmd is against DC central cloud. This is + # needed due to the same auth_info may be + # passed to different keywords that require different region + if region in ('RegionOne', 'SystemController'): + region = 'RegionOne' if raw_cmd in platform_cmds else \ + 'SystemController' + + # # Reset auth_url if cmd is against DC central cloud RegionOne + # containerized services. This is needed due to + # # the default auth_url for central controller RegionOne is platform + # auth_url + # if region == 'RegionOne' and not platform: + # auth_url = default_region_and_url['auth_url'] + + positional_args = __convert_args(positional_args) + flags = __convert_args(flags) + + if not use_telnet and not client: + if is_dc: + # This may not exist if cli cmd used before DC vars are initialized + client = ControllerClient.get_active_controller(name=region, + fail_ok=True) + + if not client: + client = ControllerClient.get_active_controller() + + if source_openrc is None: + source_openrc = ProjVar.get_var('SOURCE_OPENRC') + + if source_openrc: + source_file = _get_rc_path(user=auth_info['user'], platform=platform) + if use_telnet: + cmd = 'source {}; {}'.format(source_file, cmd) + else: + source_openrc_file(ssh_client=client, auth_info=auth_info, + rc_file=source_file, fail_ok=fail_ok) + flags = '' + elif auth_info: + # auth params + auth_args = ( + "--os-username '{}' --os-password '{}' --os-project-name {} " + "--os-auth-url {} " + "--os-user-domain-name Default --os-project-domain-name Default". + format(auth_info['user'], auth_info['password'], + auth_info['tenant'], auth_url)) + + flags = '{} {}'.format(auth_args.strip(), flags.strip()) + + # internal URL handling + if raw_cmd in ('openstack', 'sw-manager'): + flags += ' --os-interface internal' + else: + flags += ' --os-endpoint-type internalURL' + + # region handling + if raw_cmd != 'dcmanager': + if raw_cmd == 'cinder': + flags += ' --os_region_name {}'.format(region) + else: + flags += ' --os-region-name {}'.format(region) + + complete_cmd = ' '.join([os.path.join(cli_dir, cmd), flags.strip(), sub_cmd, + positional_args]).strip() + + # workaround for dcmanager cmd not supporting --os-project-name + if complete_cmd.startswith('dcmanager'): + complete_cmd = complete_cmd.replace('--os-project-name', + '--os-tenant-name') + + kwargs = {'searchwindowsize': 100} if not use_telnet else {} + exit_code, cmd_output = client.exec_cmd(complete_cmd, + expect_timeout=timeout, **kwargs) + + if exit_code == 0: + return 0, cmd_output + + if fail_ok and exit_code in [1, 2]: + return 1, cmd_output + + raise exceptions.CLIRejected( + "CLI '{}' failed to execute. Output: {}".format(complete_cmd, + cmd_output)) + + +def __convert_args(args): + if args is None: + args = '' + elif isinstance(args, list): + args = ' '.join(str(arg) for arg in args) + else: + args = str(args) + + return args.strip() + + +def _get_rc_path(user, remote_cli=False, platform=None): + if remote_cli: + openrc_path = os.path.join(ProjVar.get_var('LOG_DIR'), 'horizon', + '{}-openrc.sh'.format(user)) + else: + openrc_path = '/etc/platform/openrc' if platform and user == 'admin' \ + else '~/openrc.{}'.format(user) + + return openrc_path + + +def source_openrc_file(ssh_client, auth_info, rc_file, fail_ok=False, + force=True): + """ + Source to the given openrc file on the ssh client. + Args: + ssh_client: + auth_info: + rc_file: + fail_ok: + force (bool): Whether to source even if already sourced. + + Returns: + (-1, None) # Already sourced, no action done + (0, ) # sourced to openrc file successfully + (1, ) # Failed to source + + """ + exit_code, cmd_output = -1, None + user = auth_info['user'] + if force or 'keystone_{}'.format(user) not in ssh_client.prompt: + password = auth_info['password'] + new_prompt = Prompt.TENANT_PROMPT.format(user) + + cmd = 'source {}'.format(rc_file) + ssh_client.send(cmd) + prompts = [new_prompt] + index = ssh_client.expect(prompts, fail_ok=False) + + if index == 2: + ssh_client.send() + index = ssh_client.expect(prompts[0:3]) + if index == 1: + ssh_client.send(password) + index = ssh_client.expect(prompts[0:2]) + + if index == 0: + ssh_client.set_prompt(new_prompt) + exit_code = ssh_client.get_exit_code() + else: + cmd_output = ssh_client.cmd_output + ssh_client.send_control() + ssh_client.expect() + exit_code = 1 + + if exit_code != 0: + if not fail_ok: + raise exceptions.SSHExecCommandFailed( + "Failed to Source. Output: {}".format(cmd_output)) + + return exit_code, cmd_output + + +def openstack(cmd, positional_args='', ssh_client=None, flags='', fail_ok=False, + cli_dir='', auth_info=None, + timeout=CLI_TIMEOUT, source_openrc=False): + flags += ' --os-identity-api-version 3' + + return exec_cli('openstack', sub_cmd=cmd, positional_args=positional_args, + client=ssh_client, flags=flags, + fail_ok=fail_ok, cli_dir=cli_dir, auth_info=auth_info, + source_openrc=source_openrc, timeout=timeout) + + +def nova(cmd, positional_args='', ssh_client=None, flags='', fail_ok=False, + cli_dir='', auth_info=None, + timeout=CLI_TIMEOUT): + return exec_cli('nova', sub_cmd=cmd, positional_args=positional_args, + client=ssh_client, flags=flags, + fail_ok=fail_ok, cli_dir=cli_dir, auth_info=auth_info, + timeout=timeout) + + +def heat(cmd, positional_args='', ssh_client=None, flags='', fail_ok=False, + cli_dir='', auth_info=Tenant.get('admin'), + timeout=CLI_TIMEOUT): + return exec_cli('heat', sub_cmd=cmd, positional_args=positional_args, + client=ssh_client, flags=flags, + fail_ok=fail_ok, cli_dir=cli_dir, auth_info=auth_info, + timeout=timeout) + + +def neutron(cmd, positional_args='', ssh_client=None, flags='', fail_ok=False, + cli_dir='', auth_info=None, + timeout=CLI_TIMEOUT): + return exec_cli('neutron', sub_cmd=cmd, positional_args=positional_args, + client=ssh_client, flags=flags, + fail_ok=fail_ok, cli_dir=cli_dir, auth_info=auth_info, + timeout=timeout) + + +def cinder(cmd, positional_args='', ssh_client=None, flags='', fail_ok=False, + cli_dir='', auth_info=None, + timeout=CLI_TIMEOUT): + return exec_cli('cinder', sub_cmd=cmd, positional_args=positional_args, + client=ssh_client, flags=flags, + fail_ok=fail_ok, cli_dir=cli_dir, auth_info=auth_info, + timeout=timeout) + + +def swift(cmd, positional_args='', ssh_client=None, flags='', fail_ok=False, + cli_dir='', auth_info=Tenant.get('admin'), + source_openrc=None, timeout=CLI_TIMEOUT): + return exec_cli('swift', sub_cmd=cmd, positional_args=positional_args, + client=ssh_client, flags=flags, + fail_ok=fail_ok, cli_dir=cli_dir, auth_info=auth_info, + source_openrc=source_openrc, timeout=timeout) + + +def sw_manager(cmd, positional_args='', ssh_client=None, flags='', + fail_ok=False, cli_dir='', + auth_info=Tenant.get('admin_platform'), source_openrc=None, + timeout=CLI_TIMEOUT): + return exec_cli('sw-manager', sub_cmd=cmd, positional_args=positional_args, + client=ssh_client, flags=flags, + fail_ok=fail_ok, cli_dir=cli_dir, auth_info=auth_info, + source_openrc=source_openrc, timeout=timeout) + + +def system(cmd, positional_args='', ssh_client=None, flags='', fail_ok=False, + cli_dir='', auth_info=Tenant.get('admin_platform'), + source_openrc=None, timeout=CLI_TIMEOUT): + return exec_cli('system', sub_cmd=cmd, positional_args=positional_args, + client=ssh_client, flags=flags, + fail_ok=fail_ok, cli_dir=cli_dir, auth_info=auth_info, + source_openrc=source_openrc, + timeout=timeout) + + +def fm(cmd, positional_args='', ssh_client=None, flags='', fail_ok=False, + cli_dir='', + auth_info=Tenant.get('admin_platform'), source_openrc=None, + timeout=CLI_TIMEOUT): + build = ProjVar.get_var('BUILD_INFO').get('BUILD_ID') + cmd_ = 'fm' + if build and build != 'n/a' and build < '2018-08-19': + cmd_ = 'system' + + return exec_cli(cmd_, sub_cmd=cmd, positional_args=positional_args, + client=ssh_client, flags=flags, fail_ok=fail_ok, + cli_dir=cli_dir, auth_info=auth_info, + source_openrc=source_openrc, timeout=timeout) + + +def dcmanager(cmd, positional_args='', ssh_client=None, flags='', fail_ok=False, + cli_dir='', + auth_info=Tenant.get('admin_platform', dc_region='RegionOne'), + timeout=CLI_TIMEOUT, + source_openrc=None): + if ssh_client is None: + ssh_client = ControllerClient.get_active_controller('RegionOne') + return exec_cli('dcmanager', sub_cmd=cmd, positional_args=positional_args, + client=ssh_client, flags=flags, + fail_ok=fail_ok, cli_dir=cli_dir, auth_info=auth_info, + source_openrc=source_openrc, timeout=timeout) diff --git a/automated-pytest-suite/utils/clients/__init__.py b/automated-pytest-suite/utils/clients/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/automated-pytest-suite/utils/clients/local.py b/automated-pytest-suite/utils/clients/local.py new file mode 100644 index 0000000..336049d --- /dev/null +++ b/automated-pytest-suite/utils/clients/local.py @@ -0,0 +1,299 @@ +# +# Copyright (c) 2019 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + + +import getpass +import os +import re +import socket +import sys +import time + +import pexpect + +from consts.proj_vars import ProjVar +from consts.stx import PING_LOSS_RATE +from utils import exceptions +from utils.clients.ssh import SSHClient +from utils.tis_log import LOG + +LOCAL_HOST = socket.gethostname() +LOCAL_USER = getpass.getuser() +LOCAL_PROMPT = re.escape('{}@{}$ '.format( + LOCAL_USER, LOCAL_HOST.split(sep='.wrs.com')[0])).replace(r'\$ ', r'.*\$') +COUNT = 0 + + +def get_unique_name(name_str): + global COUNT + COUNT += 1 + return '{}-{}'.format(name_str, COUNT) + + +class LocalHostClient(SSHClient): + def __init__(self, initial_prompt=None, timeout=60, + session=None, searchwindowsisze=None, name=None, + connect=False): + """ + + Args: + initial_prompt + timeout + session + searchwindowsisze + connect (bool) + + Returns: + + """ + if not initial_prompt: + initial_prompt = LOCAL_PROMPT + if not name: + name = 'localclient' + self.name = get_unique_name(name) + super(LocalHostClient, self).__init__( + host=LOCAL_HOST, user=LOCAL_USER, + password=None, + force_password=False, + initial_prompt=initial_prompt, + timeout=timeout, session=session, + searchwindownsize=searchwindowsisze) + + if connect: + self.connect() + + def connect(self, retry=False, retry_interval=3, retry_timeout=300, + prompt=None, + use_current=True, timeout=None): + # Do nothing if current session is connected and force_close is False: + if use_current and self.is_connected(): + LOG.debug("Already connected to {}. Do nothing.".format(self.host)) + # LOG.debug("ID of the session: {}".format(id(self))) + return + + # use original prompt instead of self.prompt when connecting in case + # of prompt change during a session + if not prompt: + prompt = self.initial_prompt + if timeout is None: + timeout = self.timeout + + # Connect to host + end_time = time.time() + retry_timeout + while time.time() < end_time: + try: + LOG.debug( + "Attempt to connect to localhost - {}".format(self.host)) + self.session = pexpect.spawnu(command='bash', timeout=timeout, + maxread=100000) + + self.logpath = self._get_logpath() + if self.logpath: + self.session.logfile = open(self.logpath, 'w+') + + # Set prompt for matching + self.set_prompt(prompt) + self.send(r'export PS1="\u@\h\$ "') + self.expect() + LOG.debug("Connected to localhost!") + return + + except (OSError, pexpect.TIMEOUT, pexpect.EOF): + if not retry: + raise + + self.close() + LOG.debug("Retry in {} seconds".format(retry_interval)) + time.sleep(retry_interval) + + else: + raise exceptions.LocalHostError( + "Unable to spawn pexpect object on {}. Expected prompt: " + "{}".format(self.host, self.prompt)) + + def remove_virtualenv(self, venv_name=None, venv_dir=None, fail_ok=False, + deactivate_first=True, + python_executable=None): + + if not python_executable: + python_executable = sys.executable + + if not venv_name: + venv_name = ProjVar.get_var('RELEASE') + venv_dir = _get_virtualenv_dir(venv_dir) + + if deactivate_first: + self.deactivate_virtualenv(venv_name=venv_name) + + LOG.info("Removing virtualenv {}/{}".format(venv_dir, venv_name)) + cmd = "export WORKON_HOME={}; export VIRTUALENVWRAPPER_PYTHON={}; " \ + "source virtualenvwrapper.sh". \ + format(venv_dir, python_executable) + code, output = self.exec_cmd(cmd=cmd, fail_ok=fail_ok) + if code == 0: + code = self.exec_cmd("rmvirtualenv {}".format(venv_name), + fail_ok=fail_ok)[0] + if code == 0: + # Remove files generated by virtualwrapper + for line in output.splitlines(): + if 'user_scripts creating ' in line: + new_file = output.split('user_scripts creating ')[-1].\ + strip() + self.exec_cmd('rm -f {}'.format(new_file)) + LOG.info('virtualenv {} removed successfully'.format(venv_name)) + return True + + return False + + def create_virtualenv(self, venv_name=None, venv_dir=None, activate=True, + fail_ok=False, check_first=True, + python_executable=None): + if not venv_name: + venv_name = ProjVar.get_var('RELEASE') + venv_dir = _get_virtualenv_dir(venv_dir) + + if check_first: + if self.file_exists( + os.path.join(venv_dir, venv_name, 'bin', 'activate')): + if activate: + self.activate_virtualenv(venv_name=venv_name, + venv_dir=venv_dir, fail_ok=fail_ok) + return + + if not python_executable: + python_executable = sys.executable + + LOG.info("Creating virtualenv {}/{}".format(venv_dir, venv_name)) + os.makedirs(venv_dir, exist_ok=True) + cmd = "cd {}; virtualenv --python={} {}".format(venv_dir, + python_executable, + venv_name) + code = self.exec_cmd(cmd=cmd, fail_ok=fail_ok)[0] + if code == 0: + LOG.info('virtualenv {} created successfully'.format(venv_name)) + if activate: + self.activate_virtualenv(venv_name=venv_name, venv_dir=venv_dir, + fail_ok=fail_ok) + + return venv_name, venv_dir, python_executable + + def activate_virtualenv(self, venv_name=None, venv_dir=None, fail_ok=False): + if not venv_name: + venv_name = ProjVar.get_var('RELEASE') + venv_dir = _get_virtualenv_dir(venv_dir) + assert os.path.exists(venv_dir) + + LOG.info("Activating virtualenv {}/{}".format(venv_dir, venv_name)) + code = self.exec_cmd( + 'cd {}; source {}/bin/activate'.format(venv_dir, venv_name), + fail_ok=fail_ok)[0] + if code == 0: + new_prompt = r'\({}\) {}'.format(venv_name, self.get_prompt()) + self.set_prompt(prompt=new_prompt) + LOG.info('virtualenv {} activated successfully'.format(venv_name)) + + time.sleep(3) + code, output = self.exec_cmd('pip -V') + if code != 0: + LOG.warning('pip is not working properly. Listing env variables.') + all_env = self.exec_cmd('declare -p')[1] + LOG.info("declare -p: \n{}".format(all_env)) + + def deactivate_virtualenv(self, venv_name, new_prompt=None): + # determine on the new prompt + if not new_prompt: + if venv_name in self.prompt: + new_prompt = self.prompt.split(r'\({}\) '.format(venv_name))[-1] + else: + new_prompt = self.initial_prompt + + LOG.info("Deactivating virtualenv {}".format(venv_name)) + self.set_prompt(new_prompt) + code, output = self.exec_cmd('deactivate', fail_ok=True) + if code == 0 or 'command not found' in output: + LOG.info('virtualenv {} deactivated successfully'.format(venv_name)) + else: + raise exceptions.LocalHostError( + "Unable to deactivate venv. Output: {}".format(output)) + + def get_ssh_key(self, ssh_key_path=None): + if not ssh_key_path: + ssh_key_path = os.path.expanduser('~/.ssh/id_rsa_stxauto') + # KNOWN_HOSTS_PATH = SSH_DIR + "/known_hosts" + # REMOVE_HOSTS_SSH_KEY_CMD = "ssh-keygen -f {} -R {}" + if not self.file_exists(ssh_key_path): + self.exec_cmd("ssh-keygen -f {} -t rsa -N ''".format(ssh_key_path), + fail_ok=False) + ssh_key = self.exec_cmd( + "ssh-keygen -y -f {} -P ''".format(ssh_key_path), fail_ok=False) + + return ssh_key + + def ping_server(self, server, ping_count=5, timeout=60, fail_ok=False, + retry=0): + """ + + Args: + server (str): server ip to ping + ping_count (int): + timeout (int): max time to wait for ping response in seconds + fail_ok (bool): whether to raise exception if packet loss rate is + 100% + retry (int): + + Returns (int): packet loss percentile, such as 100, 0, 25 + + """ + output = packet_loss_rate = None + for i in range(max(retry + 1, 1)): + cmd = 'ping -c {} {}'.format(ping_count, server) + code, output = self.exec_cmd(cmd=cmd, expect_timeout=timeout, + fail_ok=True) + if code != 0: + packet_loss_rate = 100 + else: + packet_loss_rate = re.findall(PING_LOSS_RATE, output)[-1] + + packet_loss_rate = int(packet_loss_rate) + if packet_loss_rate < 100: + if packet_loss_rate > 0: + LOG.warning( + "Some packets dropped when ping from {} ssh session " + "to {}. Packet loss rate: {}%". + format(self.host, server, packet_loss_rate)) + else: + LOG.info("All packets received by {}".format(server)) + break + + LOG.info("retry in 3 seconds") + time.sleep(3) + else: + msg = "Ping from {} to {} failed.".format(self.host, server) + if not fail_ok: + raise exceptions.LocalHostError(msg) + else: + LOG.warning(msg) + + untransmitted_packets = re.findall(r"(\d+) packets transmitted,", + output) + if untransmitted_packets: + untransmitted_packets = int(ping_count) - int( + untransmitted_packets[0]) + else: + untransmitted_packets = ping_count + + return packet_loss_rate, untransmitted_packets + + +def _get_virtualenv_dir(venv_dir=None): + if not venv_dir: + if ProjVar.get_var('LOG_DIR'): + lab_logs_dir = os.path.dirname(ProjVar.get_var( + 'LOG_DIR')) # e.g., .../AUTOMATION_LOGS/ip_18_19/ + venv_dir = os.path.join(lab_logs_dir, '.virtualenvs') + else: + venv_dir = os.path.expanduser('~') + return venv_dir diff --git a/automated-pytest-suite/utils/clients/ssh.py b/automated-pytest-suite/utils/clients/ssh.py new file mode 100644 index 0000000..65539fb --- /dev/null +++ b/automated-pytest-suite/utils/clients/ssh.py @@ -0,0 +1,1688 @@ +# +# Copyright (c) 2019 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + + +import os +import re +import threading +import time +from contextlib import contextmanager + +import pexpect +from pexpect import pxssh + +from consts.auth import Guest, HostLinuxUser +from consts.stx import Prompt, DATE_OUTPUT +from consts.lab import Labs, NatBoxes +from consts.proj_vars import ProjVar +from utils import exceptions, local_host +from utils.tis_log import LOG + +# setup color.format strings +colorred = "\033[1;31m{0}\033[00m" +colorgrn = "\033[1;32m{0}\033[00m" +colorblue = "\033[1;34m{0}\033[00m" +coloryel = "\033[1;34m{0}\033[00m" + +CONTROLLER_PROMPT = Prompt.CONTROLLER_PROMPT +ADMIN_PROMPT = Prompt.ADMIN_PROMPT +TENANT1_PROMPT = Prompt.TENANT1_PROMPT +TENANT2_PROMPT = Prompt.TENANT2_PROMPT +COMPUTE_PROMPT = Prompt.COMPUTE_PROMPT +PASSWORD_PROMPT = Prompt.PASSWORD_PROMPT +ROOT_PROMPT = Prompt.ROOT_PROMPT +CONNECTION_REFUSED = '.*Connection refused.*' +AUTHORIZED_KEYS_FPATH = "~/.ssh/authorized_keys" + +_SSH_OPTS = (' -o RSAAuthentication=no' + + ' -o PubkeyAuthentication=no' + + ' -o StrictHostKeyChecking=no' + + ' -o UserKnownHostsFile=/dev/null') + +_SSH_OPTS_UBUNTU_VM = (' -o RSAAuthentication=no' + + ' -o StrictHostKeyChecking=no' + + ' -o UserKnownHostsFile=/dev/null') + +EXIT_CODE_CMD = 'echo $?' +TIMEOUT_EXPECT = 10 + +RSYNC_SSH_OPTIONS = ['-o StrictHostKeyChecking=no', + '-o UserKnownHostsFile=/dev/null'] + + +class SSHClient: + """ + Base SSH Class that uses pexpect and pexpect.pxssh + + Supports: + Multiple sessions, via instanciation of session objects + connect connects a session + send sends string to remote host + expect() waits for prompt + expect('value') expects 'value' + expect('value', show_exp=yes) expects 'value' and prints + value found + expect(var) expects python variable + expect('\w+::\w+') expect short IPv6 address like 2001::0001 + close() disconnects session + reconnect() reconnects to session + """ + + def __init__(self, host, user=HostLinuxUser.get_user(), + password=HostLinuxUser.get_password(), + force_password=True, initial_prompt=CONTROLLER_PROMPT, + timeout=60, session=None, + searchwindownsize=None, port=None): + """ + Initiate an object for connecting to remote host + Args: + host: hostname or ip. such as "yow-cgcs-ironpass-1.wrs.com" or + "128.224.151.212" + user: linux username for login to host. such as "sysadmin" + password: password for given user. such as "Li69nux*" + + Returns: + + """ + + self.host = host + self.user = user + self.password = password + self.initial_prompt = initial_prompt + self.prompt = initial_prompt + self.session = session + self.cmd_sent = '' + self.cmd_output = '' + self.force_password = force_password + self.timeout = timeout + self.searchwindowsize = searchwindownsize + self.logpath = None + self.port = port + + def _get_logpath(self): + lab_list = [getattr(Labs, attr) for attr in dir(Labs) if + not attr.startswith('__')] + lab_list = [lab_ for lab_ in lab_list if isinstance(lab_, dict)] + for lab in lab_list: + if lab.get('floating ip') == self.host or \ + lab.get('controller-0 ip') == self.host \ + or lab.get('external_ip') == self.host: + lab_name = lab.get('short_name') + break + else: + lab_name = self.host + + log_dir = ProjVar.get_var('LOG_DIR') + if log_dir: + current_thread = threading.current_thread() + if current_thread is threading.main_thread(): + logpath = log_dir + '/ssh_' + lab_name + ".log" + else: + log_dir += '/threads/' + logpath = log_dir + current_thread.name + '_ssh_' + lab_name \ + + ".log" + os.makedirs(log_dir, exist_ok=True) + else: + logpath = None + + return logpath + + def connect(self, retry=False, retry_interval=3, retry_timeout=300, + prompt=None, + use_current=True, timeout=None): + + # Do nothing if current session is connected and force_close is False: + if self._is_alive() and use_current and self.is_connected(): + LOG.debug("Already connected to {}. Do nothing.".format(self.host)) + # LOG.debug("ID of the session: {}".format(id(self))) + return + + # use original prompt instead of self.prompt when connecting in case + # of prompt change during a session + if not prompt: + prompt = self.initial_prompt + if timeout is None: + timeout = self.timeout + + # Connect to host + LOG.info("Attempt to connect to host - {}".format(self.host)) + end_time = time.time() + retry_timeout + while time.time() < end_time: + # LOG into remote host + # print(str(self.searchwindowsize)) + try: + self.session = pxssh.pxssh( + encoding='utf-8', searchwindowsize=self.searchwindowsize) + + # set to ignore ssh host fingerprinting + self.session.SSH_OPTS = _SSH_OPTS + self.session.force_password = self.force_password + self.session.maxread = 100000 + self.logpath = self._get_logpath() + + if self.logpath: + self.session.logfile = open(self.logpath, 'a+') + + # Login + self.session.login(self.host, self.user, self.password, + login_timeout=timeout, + port=self.port, auto_prompt_reset=False, + quiet=False) + + # Set prompt for matching + self.set_prompt(prompt) + + # try to goto next line to ensure login really succeeded. + # pxssh login method has a bug where it + # almost won't report any login failures. + # Login successful if prompt matching is found + if self.is_connected(): + LOG.info("Login successful!") + # LOG.debug(self.session) + # next 5 lines change ssh window size and flush its buffer + self.session.setwinsize(150, 250) + # self.session.maxread = 100000 + self.send() + + end_time = time.time() + 20 + while time.time() < end_time: + index = self.expect(timeout=3, fail_ok=True) + if index != 0: + break + else: + LOG.warning( + "Still getting prompt from the buffer. Buffer " + "might not be cleared yet.") + + self.exec_cmd('unset PROMPT_COMMAND', get_exit_code=False) + self.exec_cmd('export TMOUT=0', get_exit_code=False) + return + + # retry if this line is reached. it would've returned if + # login succeeded. + LOG.debug("Login failed although no exception caught.") + if not retry: + raise exceptions.SSHException("Unable to connect to host") + + # pxssh has a bug where the TIMEOUT exception during pxssh.login + # is completely eaten. i.e., it will still + # pretend login passed even if timeout exception was thrown. So + # below exceptions are unlikely to be received + # at all. But leave as is in case pxssh fix it in future releases. + except (OSError, pexpect.TIMEOUT, pxssh.TIMEOUT, pexpect.EOF, + pxssh.ExceptionPxssh) as e: + # fail login if retry=False + # LOG.debug("Reset session.after upon ssh error") + # self.session.after = '' + if not retry: + raise + + # don't retry if login credentials incorrect + if "permission denied" in e.__str__(): + LOG.error( + "Login credentials denied by {}. User: {} Password: " + "{}".format(self.host, self.user, self.password)) + raise + + # print out error for more info before retrying + LOG.debug("Login failed due to error: {}".format(e.__str__())) + + if 'password refused' in e.__str__(): + if self.searchwindowsize is None: + before_str = self._parse_output(self.session.before) + after_str = self._parse_output(self.session.after) + output = before_str + after_str + if 'your password' in output: + LOG.warning( + "Login failed possibly due to password expire " + "warning. " + "Retry with small searchwindowsize") + self.searchwindowsize = 50 + else: + raise + else: + self.searchwindowsize = None + raise + + self.close() + LOG.debug("Retry in {} seconds".format(retry_interval)) + time.sleep(retry_interval) + + else: + raise exceptions.SSHRetryTimeout("Host: {}, User: {}, Password: {}". + format(self.host, self.user, + self.password)) + + def _is_alive(self): + return self.session is not None and self.session.isalive() + + def is_connected(self): + if not self.session: + return False + + # Connection is good if send and expect commands can be executed + try: + self.send() + except OSError: + return False + return self.expect(timeout=3, fail_ok=True) == 0 + + def wait_for_disconnect(self, timeout=120, check_interval=5, fail_ok=False): + """ Wait for ssh connection disconnect """ + end_time = time.time() + timeout + while time.time() < end_time: + if not self.is_connected(): + LOG.info("ssh session to {} disconnected".format(self.host)) + return True + time.sleep(check_interval) + + msg = "Did not disconnect to {} within {}s".format(self.host, timeout) + LOG.warning(msg) + if not fail_ok: + raise exceptions.SSHException(msg) + return False + + def send(self, cmd='', reconnect=False, reconnect_timeout=300, flush=False): + """ + goto next line if no cmd is specified + Args: + cmd: + reconnect: + reconnect_timeout: + flush: whether to flush out the expect buffer before sending a + new command + + Returns:number of bytes sent + + """ + if flush: + self.flush() + + LOG.debug("Send '{}'".format(cmd)) + + try: + rtn = self.session.sendline(cmd) + except Exception as e: + if not reconnect: + raise + else: + LOG.exception("Failed to send line. {}".format(e.__str__())) + self.close() + self.connect(retry_timeout=reconnect_timeout) + rtn = self.session.sendline(cmd) + + # LOG.debug("Command sent successfully") + self.cmd_sent = cmd + + return str(rtn) + + def send_sudo(self, cmd='', reconnect=False, expt_pswd_timeout=60, + reconnect_timeout=300, flush=False): + cmd = 'sudo ' + cmd + self.send(cmd, reconnect=reconnect, reconnect_timeout=reconnect_timeout, + flush=flush) + pw_prompt = Prompt.PASSWORD_PROMPT + + index = self.expect(pw_prompt, timeout=expt_pswd_timeout, + searchwindowsize=100, fail_ok=True) + if index == 0: + self.send(self.password) + + def flush(self, timeout=3): + """ + flush before sending the next command. + Returns: + + """ + self.expect(fail_ok=True, timeout=timeout) + LOG.debug("Buffer is flushed by reading out the rest of the output") + + def expect(self, blob_list=None, timeout=60, fail_ok=False, rm_date=False, + searchwindowsize=None): + """ + Look for match in the output. Stop if 1) match is found, 2) match is + not found and prompt is reached, 3) match + is not found and timeout is reached. For scenario 2 and 3, either + throw timeout exception or return False based + on the 'fail' argument. + Args: + blob_list: pattern(s) to find match for + timeout: max timeout value to wait for pattern(s) + fail_ok: True or False. When False: throws exception if match not + found. When True: return -1 when match not + found. + rm_date (bool): Whether to remove the date output before expecting + searchwindowsize (int|None): number of chars from end of the + buffer to search for the strings in blob_list + + Returns: the index of the pattern matched in the output, assuming + that blob can be a list. + + Examples: + expect(): to wait for prompt + expect('good'): to wait for a match starts with 'good' + expect(['good', 'bad'], 10, False): to wait for a match start + with 'good' or 'bad' with 10seconds timeout + + """ + + if blob_list is None: + blob_list = self.prompt + + if not isinstance(blob_list, (list, tuple)): + blob_list = [blob_list] + + kwargs = {} + if searchwindowsize is not None: + kwargs['searchwindowsize'] = searchwindowsize + elif blob_list == [self.prompt]: + kwargs['searchwindowsize'] = 100 + + try: + index = self.session.expect(blob_list, timeout=timeout, **kwargs) + except pexpect.EOF: + if fail_ok: + return -1 + else: + LOG.warning("EOF caught.") + raise + except pexpect.TIMEOUT: + if fail_ok: + return -2 + else: + LOG.warning("No match found for {}. \nexpect timeout.".format( + blob_list)) + raise + except Exception as e: + if fail_ok: + return -100 + else: + LOG.warning( + "Exception occurred when expecting {}. " + "{}".format(blob_list, e.__str__())) + raise + + # Match found, reformat the outputs + before_str = self._parse_output(self.session.before) + after_str = self._parse_output(self.session.after) + output = before_str + after_str + if not self.cmd_sent == '': + output_list = output.split('\r\n') + output_list[0] = '' # do not display the sent command + + if rm_date: # remove date output if any + if re.search(DATE_OUTPUT, output_list[-1]): + output_list = output_list[:-1] + + output = '\n'.join(output_list) + self.cmd_sent = '' # Make sure sent line is only removed once + + self.cmd_output = output + extra_str = '' # extra logging info + + LOG.debug("Output{}: {}".format(extra_str, output)) + return index + + def __force_end(self, force): + if force: + self.flush(3) + self.send_control('c') + self.flush(10) + + def exec_cmd(self, cmd, expect_timeout=60, reconnect=False, + reconnect_timeout=300, err_only=False, rm_date=False, + fail_ok=True, get_exit_code=True, blob=None, force_end=False, + searchwindowsize=None, + prefix_space=False): + """ + + Args: + cmd: + expect_timeout: + reconnect: + reconnect_timeout: + err_only: if true, stdout will not be included in output + rm_date (bool): weather to remove date output from cmd output + before returning + fail_ok (bool): whether to raise exception when non-zero + exit-code is returned + get_exit_code + blob + force_end + searchwindowsize (int): max chars to look for match from the end + of the output. + Usage: when expecting a prompt, set this to slightly larger + than the number of chars of the prompt, + to speed up the search, and to avoid matching in the + middle of the output. + prefix_space + + Returns (tuple): (exit code (int), command output (str)) + + """ + if blob is None: + blob = self.prompt + + LOG.debug("Executing command...") + if err_only: + cmd += ' 1> /dev/null' # discard stdout + + if prefix_space: + cmd = ' {}'.format(cmd) + + self.send(cmd, reconnect, reconnect_timeout) + code_force = 0 + try: + self.expect(blob_list=blob, timeout=expect_timeout, + searchwindowsize=searchwindowsize) + except pexpect.TIMEOUT as e: + code_force = 130 + self.send_control('c') + self.flush(timeout=10) + if fail_ok: + LOG.warning(e.__str__()) + else: + raise + + code, output = self._process_exec_result(rm_date, + get_exit_code=get_exit_code) + if code_force != 0: + code = code_force + + self.__force_end(force_end) + + if code > 0 and not fail_ok: + raise exceptions.SSHExecCommandFailed( + "Non-zero return code for cmd: {}. Output: {}".format(cmd, + output)) + + return code, output + + def _process_exec_result(self, rm_date=True, get_exit_code=True): + cmd_output_list = self.cmd_output.split('\n')[0:-1] # exclude prompt + # LOG.info("cmd output list: {}".format(cmd_output_list)) + # cmd_output_list[0] = '' # + # exclude command, already done in expect + + if rm_date: # remove date output if any + if re.search(DATE_OUTPUT, cmd_output_list[-1]): + cmd_output_list = cmd_output_list[:-1] + + cmd_output = '\n'.join(cmd_output_list) + + if get_exit_code: + exit_code = self.get_exit_code() + else: + exit_code = -1 + + cmd_output = cmd_output.strip() + return exit_code, cmd_output + + def process_cmd_result(self, cmd, rm_date=True, get_exit_code=True): + return self._process_exec_result(rm_date=rm_date, + get_exit_code=get_exit_code) + + @staticmethod + def _parse_output(output): + if type(output) is bytes: + output = output.decode("utf-8") + return str(output) + + def set_prompt(self, prompt=CONTROLLER_PROMPT): + self.prompt = prompt + + def get_prompt(self): + return self.prompt + + def get_exit_code(self): + self.send(EXIT_CODE_CMD) + self.expect(timeout=30, fail_ok=False) + matches = re.findall("\n([-+]?[0-9]+)\n", self.cmd_output) + return int(matches[-1]) + + def get_hostname(self): + return self.exec_cmd('hostname', get_exit_code=False)[1].splitlines()[0] + + def rsync(self, source, dest_server, dest, dest_user=None, + dest_password=None, ssh_port=None, extra_opts=None, + pre_opts=None, timeout=120, fail_ok=False): + + dest_user = dest_user or HostLinuxUser.get_user() + dest_password = dest_password or HostLinuxUser.get_password() + if extra_opts: + extra_opts_str = ' '.join(extra_opts) + ' ' + else: + extra_opts_str = '' + + if not pre_opts: + pre_opts = '' + + ssh_opts = 'ssh {}'.format(' '.join(RSYNC_SSH_OPTIONS)) + if ssh_port: + ssh_opts += ' -p {}'.format(ssh_port) + + cmd = "{} rsync -are \"{}\" {} {} ".format(pre_opts, ssh_opts, + extra_opts_str, source) + cmd += "{}@{}:{}".format(dest_user, dest_server, dest) + + LOG.info( + "Rsyncing file(s) from {} to {}: {}".format(self.host, dest_server, + cmd)) + self.send(cmd) + index = self.expect(blob_list=[self.prompt, PASSWORD_PROMPT], + timeout=timeout) + + if index == 1: + self.send(dest_password) + self.expect(timeout=timeout, searchwindowsize=100, fail_ok=fail_ok) + + code, output = self._process_exec_result(rm_date=True) + if code != 0 and not fail_ok: + raise exceptions.SSHExecCommandFailed( + "Non-zero return code for rsync cmd: {}. Output: {}". + format(cmd, output)) + + return code, output + + def scp_on_source_to_localhost(self, source_file, dest_password, + dest_user=None, dest_path=None, timeout=120): + + if not dest_path: + dest_path = ProjVar.get_var('TEMP_DIR') + '/' + + to_host = local_host.get_host_ip() + ':' + to_user = ( + dest_user if dest_user is not None else + local_host.get_user()) + '@' + + destination = to_user + to_host + dest_path + scp_cmd = ' '.join([ + 'scp -o StrictHostKeyChecking=no -o ' + 'UserKnownHostsFile=/dev/null -r', + source_file, + destination]).strip() + LOG.info( + "Copying files from ssh client to {}: {}".format(to_host, scp_cmd)) + self.send(scp_cmd) + index = self.expect([self.prompt, PASSWORD_PROMPT, Prompt.ADD_HOST], + timeout=timeout) + if index == 2: + self.send('yes') + index = self.expect([self.prompt, PASSWORD_PROMPT], timeout=timeout) + if index == 1: + self.send(dest_password) + index = self.expect() + if not index == 0: + raise exceptions.SSHException("Failed to scp files") + + def scp_on_dest(self, source_user, source_ip, source_path, dest_path, + source_pswd, timeout=3600, cleanup=True, + is_dir=False): + source = source_path + if source_ip: + source = '{}:{}'.format(source_ip, source) + if source_user: + source = '{}@{}'.format(source_user, source) + + option = '-r ' if is_dir else '' + scp_cmd = 'scp -o StrictHostKeyChecking=no -o ' \ + 'UserKnownHostsFile=/dev/null {}{} ' \ + '{}'.format(option, source, dest_path) + + try: + self.send(scp_cmd) + index = self.expect( + [self.prompt, Prompt.PASSWORD_PROMPT, Prompt.ADD_HOST], + timeout=timeout) + if index == 2: + self.send('yes') + index = self.expect([self.prompt, Prompt.PASSWORD_PROMPT], + timeout=timeout) + if index == 1: + self.send(source_pswd) + index = self.expect(timeout=timeout) + if index != 0: + raise exceptions.SSHException("Failed to scp files") + + exit_code = self.get_exit_code() + if not exit_code == 0: + raise exceptions.CommonError("scp unsuccessfully") + + except: + if cleanup: + LOG.info( + "Attempt to remove {} to cleanup the system due to scp " + "failed".format( + dest_path)) + self.exec_cmd('rm -f {}'.format(dest_path), fail_ok=True, + get_exit_code=False) + raise + + def scp_on_source(self, source_path, dest_user, dest_ip, dest_path, + dest_password, timeout=3600, is_dir=False): + dest = dest_path + if dest_ip: + dest = '{}:{}'.format(dest_ip, dest) + if dest_user: + dest = '{}@{}'.format(dest_user, dest) + + if is_dir: + if not source_path.endswith('/'): + source_path += '/' + source_path = '-r {}'.format(source_path) + + scp_cmd = 'scp -o StrictHostKeyChecking=no -o ' \ + 'UserKnownHostsFile=/dev/null {} {}'. \ + format(source_path, dest) + + self.send(scp_cmd) + index = self.expect( + [self.prompt, Prompt.PASSWORD_PROMPT, Prompt.ADD_HOST], + timeout=timeout) + if index == 2: + self.send('yes') + index = self.expect([self.prompt, Prompt.PASSWORD_PROMPT], + timeout=timeout) + if index == 1: + self.send(dest_password) + index = self.expect(timeout=timeout) + if index != 0: + raise exceptions.SSHException("Failed to scp files") + + exit_code = self.get_exit_code() + if not exit_code == 0: + raise exceptions.CommonError("scp unsuccessfully") + + def file_exists(self, file_path): + return self.exec_cmd('stat {}'.format(file_path), fail_ok=True)[0] == 0 + + @contextmanager + def login_as_root(self, timeout=10): + self.send('sudo su -') + index = self.expect([ROOT_PROMPT, PASSWORD_PROMPT], timeout=timeout) + if index == 1: + self.send(self.password) + self.expect(ROOT_PROMPT) + original_prompt = self.get_prompt() + self.set_prompt(ROOT_PROMPT) + self.set_session_timeout(timeout=0) + try: + yield self + finally: + try: + current_user = self.get_current_user( + prompt=[ROOT_PROMPT, original_prompt]) + except: + current_user = None + if current_user == 'root': + self.set_prompt(original_prompt) + self.send('exit') + self.expect() + + def exec_sudo_cmd(self, cmd, expect_timeout=60, rm_date=True, fail_ok=True, + get_exit_code=True, + searchwindowsize=None, strict_passwd_prompt=False, + extra_prompt=None, prefix_space=False): + """ + Execute a command with sudo. + + Args: + cmd (str): command to execute. such as 'ifconfig' + expect_timeout (int): timeout waiting for command to return + rm_date (bool): whether to remove date info at the end of the output + fail_ok (bool): whether to raise exception when non-zero exit + code is returned + get_exit_code + searchwindowsize (int): max chars to look for match from the end + of the output. + Usage: when expecting a prompt, set this to slightly larger + than the number of chars of the prompt, + to speed up the search, and to avoid matching in the + middle of the output. + strict_passwd_prompt (bool): whether to search output with strict + password prompt (Not recommended. Use + searchwindowsize instead) + extra_prompt (str|None) + prefix_space (bool): prefix ' ' to cmd, so that it will not go + into bash history if HISTCONTROL=ignorespace + + Returns (tuple): (exit code (int), command output (str)) + + """ + cmd = 'sudo ' + cmd + if prefix_space: + cmd = ' {}'.format(cmd) + LOG.debug("Executing sudo command...") + self.send(cmd) + pw_prompt = Prompt.PASSWORD_PROMPT if not strict_passwd_prompt else \ + Prompt.SUDO_PASSWORD_PROMPT + prompts = [self.prompt] + if extra_prompt is not None: + prompts.append(extra_prompt) + prompts.append(pw_prompt) + + index = self.expect(prompts, timeout=expect_timeout, + searchwindowsize=searchwindowsize, fail_ok=fail_ok) + if index == prompts.index(pw_prompt): + self.send(self.password) + prompts.remove(pw_prompt) + self.expect(prompts, timeout=expect_timeout, + searchwindowsize=searchwindowsize, fail_ok=fail_ok) + + code, output = self._process_exec_result(rm_date, + get_exit_code=get_exit_code) + if code != 0 and not fail_ok: + raise exceptions.SSHExecCommandFailed( + "Non-zero return code for sudo cmd: {}. Output: {}". + format(cmd, output)) + + return code, output + + def send_control(self, char='c'): + LOG.debug("Sending ctrl+{}".format(char)) + self.session.sendcontrol(char=char) + + def get_current_user(self, prompt=None): + output = self.exec_cmd('whoami', blob=prompt, expect_timeout=10, + get_exit_code=False)[1] + if output: + output = output.splitlines()[0] + + return output + + def close(self): + self.session.close(True) + LOG.debug("connection closed. host: {}, user: {}. Object ID: {}".format( + self.host, self.user, id(self))) + + def set_session_timeout(self, timeout=0): + self.send('TMOUT={}'.format(timeout)) + self.expect() + + def wait_for_cmd_output(self, cmd, content, timeout, strict=False, + regex=False, expt_timeout=10, + check_interval=3, disappear=False, + non_zero_rtn_ok=False, blob=None): + """ + Wait for given content to appear or disappear in cmd output. + + Args: + cmd (str): cmd to run repeatedly until given content + appears|disappears or timeout reaches + content (str): string expected to appear|disappear in cmd output + timeout (int): max seconds to wait for the expected content + strict (bool): whether to perform strict search (search is NOT + case sensitive even if strict=True) + regex (bool): whether given content is regex pattern + expt_timeout (int): max time to wait for cmd to return + check_interval (int): how long to wait to execute the cmd again + in seconds. + disappear (bool): whether to wait for content appear or disappear + non_zero_rtn_ok (bool): whether it's okay for cmd to have + none-zero return code. Raise exception if False. + blob (str): string to wait for + + Returns (bool): True if content appears in cmd output within max wait + time. + + """ + end_time = time.time() + timeout + while time.time() < end_time: + code, output = self.exec_cmd(cmd, expect_timeout=expt_timeout, + blob=blob) + if not non_zero_rtn_ok and code > 0: + raise exceptions.SSHExecCommandFailed( + "Get non-zero return code for command: {}".format(cmd)) + + content_exists = False + if regex: + if strict: + if re.match(content, output): + content_exists = True + else: + if re.search(content, output): + content_exists = True + else: + if strict: + if content.lower() == output.lower(): + content_exists = True + else: + if content.lower() in output.lower(): + content_exists = True + + if (content_exists and not disappear) or ( + not content_exists and disappear): + return True + + time.sleep(check_interval) + + else: + return False + + def wait_for_cmd_output_persists(self, cmd, content, timeout=60, + time_to_stay=10, strict=False, regex=False, + expt_timeout=10, check_interval=1, + exclude=False, non_zero_rtn_ok=False, + sudo=False, fail_ok=True): + """ + Wait for given content to be included/excluded in cmd output for more + than seconds. + + Args: + cmd (str): cmd to run repeatedly until given content + appears|disappears or timeout reaches + content (str): string expected to appear|disappear in cmd output + time_to_stay (int): how long the expected content be + included/excluded from cmd output to return True + timeout (int): max seconds to wait for content to consistently be + included/excluded from cmd output + strict (bool): whether to perform strict search (search is NOT + case sensitive even if strict=True) + regex (bool): whether given content is regex pattern + expt_timeout (int): max time to wait for cmd to return + check_interval (int): how long to wait to execute the cmd again + in seconds. + exclude (bool): whether to wait for content be consistently + included or excluded from cmd output + non_zero_rtn_ok (bool): whether it's okay for cmd to have + none-zero return code. Raise exception if False. + sudo (bool): whether to run cmd using sudo + fail_ok (bool): whether to raise exception when False + + Returns (bool): True if content appears in cmd output within max wait + time. + + """ + end_time = time.time() + timeout + while time.time() < end_time: + + stay_end_time = time.time() + time_to_stay + while time.time() < stay_end_time: + if sudo: + code, output = self.exec_sudo_cmd( + cmd, expect_timeout=expt_timeout) + else: + code, output = self.exec_cmd( + cmd, expect_timeout=expt_timeout) + if not non_zero_rtn_ok and code > 0: + raise exceptions.SSHExecCommandFailed( + "Get non-zero return code for command: {}".format(cmd)) + + content_exists = False + if regex: + if strict: + if re.match(content, output): + content_exists = True + else: + if re.search(content, output): + content_exists = True + else: + if strict: + if content.lower() == output.lower(): + content_exists = True + else: + if content.lower() in output.lower(): + content_exists = True + + if (content_exists and not exclude) or ( + not content_exists and exclude): + time.sleep(check_interval) + continue + else: + LOG.debug("Reset stay start time") + break + else: + # Did not break - meaning time to stay has reached + return True + + else: + if fail_ok: + return False + extra_str = 'is not excluded' if exclude else 'did not persist' + raise exceptions.SSHException( + "Expected output from {} - '{}' {} for {} seconds within {} " + "seconds". + format(cmd, content, extra_str, time_to_stay, timeout)) + + def deploy_ssh_key(self, ssh_key=None): + if ssh_key: + self.exec_cmd("mkdir -p ~/.ssh/") + cmd = 'grep -q "{}" {}'.format(ssh_key, AUTHORIZED_KEYS_FPATH) + if self.exec_cmd(cmd) != 0: + LOG.info( + "Adding public key to {}".format(AUTHORIZED_KEYS_FPATH)) + self.exec_cmd(r'echo -e "{}\n" >> {}'.format( + ssh_key, AUTHORIZED_KEYS_FPATH)) + self.exec_cmd("chmod 700 ~/.ssh/ && chmod 644 {}".format( + AUTHORIZED_KEYS_FPATH)) + + def get_host(self): + return self.host + + def update_host(self, new_host): + self.host = new_host + + +class ContainerClient(SSHClient): + """ + Base class for Starting Docker Container + """ + + def __init__(self, ssh_client, entry_cmd, user='root', host=None, + password=None, initial_prompt=None, + timeout=60): + """ + Instantiate a container client + Args: + ssh_client: SSH Client object that's currently connected + entry_cmd: cmd to run to enter the container shell, e.g., + docker run -it -e /bin/bash + docker start ; docker attach + host: host to connect to from the existing ssh session + user: default user in container shell + password: password for given user + initial_prompt: prompt for container shell + + """ + if not initial_prompt: + initial_prompt = '.*{}@.*# .*'.format(user) + if not host: + host = ssh_client.host + if not password: + password = ssh_client.password + + super(ContainerClient, self).__init__(host=host, user=user, + password=password, + initial_prompt=initial_prompt, + timeout=timeout, + session=ssh_client.session) + self.parent = ssh_client + self.docker_cmd = entry_cmd + self.timeout = timeout + + def connect(self, retry=False, retry_interval=1, retry_timeout=60, + prompt=None, + use_current=True, use_password=False, timeout=30): + """ + Enter interactive mode for a container + Args: + retry: + retry_interval: + retry_timeout: + prompt: + use_current: + use_password: + timeout: + + Returns: + + """ + docker_cmd = self.docker_cmd + if prompt: + self.prompt = prompt + self.exec_sudo_cmd(docker_cmd, expect_timeout=timeout, + get_exit_code=False) + + # Known issue with docker where an extra ENTER is needed to show prompt + self.send() + self.expect(timeout=5) + + # Ensure exec_cmd works after above workaround + self.exec_cmd(cmd='', expect_timeout=5) + + def exec_cmd(self, cmd, expect_timeout=60, reconnect=False, + reconnect_timeout=300, err_only=False, rm_date=True, + fail_ok=True, get_exit_code=True, blob=None, force_end=False, + searchwindowsize=None, + prefix_space=False): + """ + + Args: + cmd: + expect_timeout: + reconnect: + reconnect_timeout: + err_only: if true, stdout will not be included in output + rm_date (bool): weather to remove date output from cmd output + before returning + fail_ok (bool): whether to raise exception when non-zero + exit-code is returned + get_exit_code + blob + force_end + searchwindowsize (int): max chars to look for match from the end + of the output. + Usage: when expecting a prompt, set this to slightly larger + than the number of chars of the prompt, + to speed up the search, and to avoid matching in the + middle of the output. + prefix_space + + Returns (tuple): (exit code (int), command output (str)) + + """ + if blob is None: + blob = [self.prompt, self.parent.prompt] + elif not isinstance(blob, (tuple, list)): + blob = [blob] + + LOG.debug("Executing command...") + if err_only: + cmd += ' 1> /dev/null' # discard stdout + + if prefix_space: + cmd = ' {}'.format(cmd) + + self.send(cmd, reconnect, reconnect_timeout) + code_force = 0 + try: + index = self.expect(blob_list=blob, timeout=expect_timeout, + searchwindowsize=searchwindowsize) + except pexpect.TIMEOUT as e: + code_force = 130 + index = 0 + self.send_control('c') + self.flush(timeout=10) + if fail_ok: + LOG.warning(e.__str__()) + else: + raise + + if blob[index] == self.parent.prompt: + # Connection lost. Returned to parent session. + _, output = self._process_exec_result(get_exit_code=False) + code = 100 + else: + code, output = self._process_exec_result( + get_exit_code=get_exit_code) + + if code_force != 0: + code = code_force + + self.__force_end(force_end) + + if code > 0 and not fail_ok: + raise exceptions.SSHExecCommandFailed( + "Non-zero return code for cmd: {}. Output: {}".format(cmd, + output)) + + return code, output + + def close(self, force=False): + if force or self.is_connected(): + self.send('exit') + self.parent.expect() + LOG.info( + "ssh session to {} is closed and returned to parent session {}". + format(self.host, self.parent.host)) + else: + LOG.info( + "ssh session to {} is not open. Flushing the buffer for " + "parent session.".format( + self.host)) + self.parent.flush() + + +class SSHFromSSH(SSHClient): + """ + Base class for ssh to another node from an existing ssh session + """ + + def __init__(self, ssh_client, host, user, password, force_password=True, + initial_prompt=COMPUTE_PROMPT, + timeout=60): + """ + + Args: + ssh_client: SSH Client object that's currently connected + host: host to connect to from the existing ssh session + user: username + password: password for given user + + Returns: + + """ + super(SSHFromSSH, self).__init__(host=host, user=user, + password=password, + force_password=force_password, + initial_prompt=initial_prompt, + timeout=timeout, + session=ssh_client.session) + self.parent = ssh_client + self.ssh_cmd = '/usr/bin/ssh{} {}@{}'.format(_SSH_OPTS, self.user, + self.host) + self.timeout = timeout + + def connect(self, retry=False, retry_interval=10, retry_timeout=300, + prompt=None, + use_current=True, use_password=True, timeout=None): + """ + + Args: + retry: + retry_interval: + retry_timeout: + timeout: + prompt: + use_current: + use_password + + Returns: + return the ssh client + + """ + self.logpath = self.parent.logpath + self.session.logfile = self.parent.session.logfile + + if timeout is None: + timeout = self.timeout + if prompt is None: + prompt = self.initial_prompt + + if use_current and self.is_connected(): + LOG.info( + "Already connected to {} from {}. Do " + "nothing.".format(self.host, self.parent.host)) + return + + LOG.info("Attempt to connect to {} from {}...".format(self.host, + self.parent.host)) + start_time = time.time() + end_time = start_time + retry_timeout + while time.time() < end_time: + self.send(self.ssh_cmd) + try: + res_index = self.expect( + [prompt, PASSWORD_PROMPT, Prompt.ADD_HOST, + self.parent.get_prompt()], + timeout=timeout, fail_ok=False, searchwindowsize=100) + if res_index == 3: + raise exceptions.SSHException( + "Unable to login to {}. \nOutput: " + "{}".format(self.host, self.cmd_output)) + + if res_index == 2: + self.send('yes') + self.expect([prompt, PASSWORD_PROMPT]) + + if res_index == 1: + if not use_password: + retry = False + raise exceptions.SSHException( + 'password prompt appeared. Non-password auth ' + 'failed.') + + self.send(self.password) + self.expect(prompt, timeout=timeout) + + # Set prompt for matching + self.set_prompt(prompt) + LOG.info( + "Successfully connected to {} from " + "{}!".format(self.host, self.parent.host)) + self.exec_cmd('export TMOUT=0') + return + + except (OSError, pxssh.TIMEOUT, pexpect.EOF, pxssh.ExceptionPxssh, + exceptions.SSHException) as e: + LOG.info("Unable to ssh to {}".format(self.host)) + if isinstance(e, pexpect.TIMEOUT): + self.parent.send_control('c') + self.parent.flush(timeout=3) + # fail login if retry=False + if not retry: + raise + # don't retry if login credentials incorrect + if "permission denied" in e.__str__().lower(): + LOG.error( + "Login credentials denied by {}. User: {} Password: " + "{}".format(self.host, self.user, self.password)) + raise + + LOG.info("Retry in {} seconds".format(retry_interval)) + time.sleep(retry_interval) + else: + try: + self.parent.flush() + except: + pass + raise exceptions.SSHRetryTimeout("Host: {}, User: {}, Password: {}". + format(self.host, self.user, + self.password)) + + def close(self, force=False): + if force or self.is_connected(): + self.send('exit') + self.parent.expect() + LOG.info( + "ssh session to {} is closed and returned to parent session {}". + format(self.host, self.parent.host)) + else: + LOG.info( + "ssh session to {} is not open. Flushing the buffer for " + "parent session.".format(self.host)) + self.parent.flush() + + def is_connected(self): + # Connection is good if send and expect commands can be executed + try: + self.send() + except OSError: + return False + + index = self.expect( + blob_list=[self.prompt, self.parent.get_prompt(), pexpect.TIMEOUT], + timeout=3, + fail_ok=True) + if 2 == index: + self.send_control('c') + index = self.expect( + blob_list=[self.prompt, self.parent.get_prompt()], timeout=3, + fail_ok=True) + return 0 == index + + +class VMSSHClient(SSHFromSSH): + + def __init__(self, vm_ip, vm_img_name=Guest, user=None, password=None, + vm_ext_port=None, + natbox_client=None, prompt=None, timeout=60, retry=True, + retry_timeout=120): + """ + + Args: + vm_ip: + vm_img_name: + user: + password: + natbox_client: + prompt: + retry + retry_timeout + + Returns: + + """ + LOG.debug("vm_image_name: {}".format(vm_img_name)) + if vm_img_name is None: + vm_img_name = '' + + vm_img_name = vm_img_name.strip().lower() + + if not natbox_client: + natbox_client = NATBoxClient.get_natbox_client() + + if user: + if not password: + password = None + else: + for image_name in Guest.CREDS: + if image_name.lower() in vm_img_name.lower(): + vm_creds = Guest.CREDS[image_name] + user = vm_creds['user'] + password = vm_creds['password'] + break + else: + user = 'root' + password = 'root' + known_guests = list(Guest.CREDS.keys()) + + LOG.warning( + "User/password are not provided, and VM image type is not " + "in the list: {}. " + "Use root/root to login.".format(known_guests)) + + if prompt is None: + # prompt = r'.*{}\@{}.*\~.*[$#]'.format(user, + # str(vm_name).replace('_', '-')) + prompt = r'.*{}\@.*\~.*[$#]'.format(user) + super(VMSSHClient, self).__init__(ssh_client=natbox_client, host=vm_ip, + user=user, password=password, + initial_prompt=prompt, + timeout=timeout) + + # This needs to be modified in centos case. + if not password: + ssh_options = " -i {}{}".format( + ProjVar.get_var('NATBOX_KEYFILE_PATH'), _SSH_OPTS_UBUNTU_VM) + else: + ssh_options = _SSH_OPTS + + # Check if connecting to vm through port forwarding rule + if vm_ext_port: + self.ssh_cmd = 'ssh -vvv {} -p {} {}@{}'.format(ssh_options, + vm_ext_port, + self.user, + self.host) + else: + self.ssh_cmd = 'ssh -vvv {} {}@{}'.format(ssh_options, self.user, + self.host) + + self.connect(use_password=password, retry=retry, + retry_timeout=retry_timeout) + + +class FloatingClient(SSHClient): + def __init__(self, floating_ip, user=HostLinuxUser.get_user(), + password=HostLinuxUser.get_password(), + initial_prompt=CONTROLLER_PROMPT): + + # get a list of floating ips for all known labs + __lab_list = [getattr(Labs, attr) for attr in dir(Labs) if + not attr.startswith(r'__')] + __lab_list = [lab_ for lab_ in __lab_list if isinstance(lab_, dict)] + ips = [] + for lab in __lab_list: + ip = lab.get('floating ip') + ips.append(ip) + if not floating_ip.strip() in ips: + raise ValueError( + "Invalid input. No matching floating ips found in lab.Labs " + "class") + super(FloatingClient, self).__init__(host=floating_ip, user=user, + password=password, + initial_prompt=initial_prompt) + + +class NATBoxClient: + # a list of natbox dicts from lab.NatBox class + @classmethod + def _get_natbox_list(cls): + return [getattr(NatBoxes, attr) for attr in dir(NatBoxes) if + attr.startswith('NAT_')] + + # internal dict that holds the natbox client if set_natbox_client was called + __natbox_ssh_map = {} + + _PROMPT = r'\@.*[$#]' # use user+_PROMPT to differentiate before + # and after ssh to vm + + @classmethod + def get_natbox_client(cls, natbox_ip=None): + """ + + Args: + natbox_ip (str): natbox ip + + Returns (SSHClient): natbox ssh client + + """ + curr_thread = threading.current_thread() + idx = 0 if curr_thread is threading.main_thread() else int( + curr_thread.name.split('-')[-1]) + if not natbox_ip: + natbox_ip = ProjVar.get_var('NATBOX').get('ip') + num_natbox = len(cls.__natbox_ssh_map) + if num_natbox == 0: + raise exceptions.NatBoxClientUnsetException + + if len(cls.__natbox_ssh_map[natbox_ip]) > idx: + nat_client = cls.__natbox_ssh_map[natbox_ip][ + idx] # KeyError will be thrown if not exist + LOG.info("Getting NatBox Client...") + return nat_client + + LOG.warning('No NatBox client set for Thread-{}'.format(idx)) + return None + + @classmethod + def set_natbox_client(cls, natbox_ip=None): + if not natbox_ip: + natbox_dict = ProjVar.get_var('NATBOX') + if not natbox_dict: + natbox_dict = NatBoxes.NAT_BOX_HW_EXAMPLE + ProjVar.set_var(NATBOX=natbox_dict) + natbox_ip = natbox_dict['ip'] + + for natbox in cls._get_natbox_list(): + ip = natbox.get('ip') + if ip == natbox_ip.strip(): + curr_thread = threading.current_thread() + idx = 0 if curr_thread is threading.main_thread() else int( + curr_thread.name.split('-')[-1]) + user = natbox.get('user') + if ip == 'localhost': + # use localhost as natbox + from utils.clients.local import LocalHostClient + nat_ssh = LocalHostClient() + else: + nat_prompt = natbox.get('prompt', user+cls._PROMPT) + nat_ssh = SSHClient(ip, user, natbox.get('password'), + initial_prompt=nat_prompt) + nat_ssh.connect(use_current=False) + + if ip not in cls.__natbox_ssh_map: + cls.__natbox_ssh_map[ip] = [] + + if len(cls.__natbox_ssh_map[ip]) == idx: + cls.__natbox_ssh_map[ip].append(nat_ssh) + elif len(cls.__natbox_ssh_map[ip]) > idx: + cls.__natbox_ssh_map[ip][idx] = nat_ssh + else: + if ip == 'localhost': + from utils.clients.local import LocalHostClient + new_ssh = LocalHostClient() + else: + new_ssh = SSHClient(ip, user, natbox.get('password'), + initial_prompt=user + cls._PROMPT) + + new_ssh.connect(use_current=False) + while len(cls.__natbox_ssh_map[ip]) < idx: + cls.__natbox_ssh_map[ip].append(new_ssh) + cls.__natbox_ssh_map[ip].append(nat_ssh) + + LOG.info("NatBox {} ssh client is set".format(ip)) + return nat_ssh + + raise ValueError( + ("No matching natbox ip found from natbox list. IP provided: {}\n" + "List of natbox(es) available: {}").format(natbox_ip, + cls._get_natbox_list())) + + +class ControllerClient: + # Each entry is a lab dictionary such as Labs.VBOX. For newly created + # dict entry, 'name' must be provided. + __lab_attr_list = [attr for attr in dir(Labs) if not attr.startswith('__')] + __lab_list = [getattr(Labs, attr) for attr in __lab_attr_list] + __lab_list = [lab for lab in __lab_list if isinstance(lab, dict)] + __lab_ssh_map = {} # item such as 'PV0': [con_ssh, ...] + + __default_name = None + __prev_client = None + __prev_idx = None + + @classmethod + def get_active_controller(cls, name=None, fail_ok=False): + """ + Attempt to match given lab or current lab, otherwise return first ssh + Args: + name: The lab dictionary name in Labs class, such as 'PV0', 'HP380' + fail_ok: when True: return None if no active controller was set + + Returns: + + """ + if not name: + if cls.__default_name: + name = cls.__default_name + else: + lab_dict = ProjVar.get_var('lab') + if lab_dict is None: + return None + + for lab_ in cls.__lab_list: + if lab_dict['floating ip'] == lab_.get('floating ip'): + name = lab_.get('short_name') + break + else: + name = 'no_name' + + if name in ('SystemController', 'central_region'): + name = 'RegionOne' + + curr_thread = threading.current_thread() + idx = 0 if curr_thread is threading.main_thread() else int( + curr_thread.name.split('-')[-1]) + for lab_ in cls.__lab_ssh_map: + if lab_ == name: + controller_ssh = cls.__lab_ssh_map[lab_][idx] + if isinstance(controller_ssh, SSHClient): + msg = "Getting active controller client for {}".format(lab_) + if name != cls.__prev_client or idx != cls.__prev_idx: + LOG.info(msg) + cls.__prev_client = name + cls.__prev_idx = idx + else: + LOG.debug(msg) + return controller_ssh + + if fail_ok: + LOG.warning('No ssh client found for {}'.format(name)) + return None + raise exceptions.ActiveControllerUnsetException( + ("The name - {} does not have a corresponding " + "controller ssh session set. ssh_map: {}"). + format(name, cls.__lab_ssh_map)) + + @classmethod + def get_active_controllers(cls, fail_ok=True, current_thread_only=True): + """ Get all the active controllers ssh sessions. + + Used when running tests in multiple labs in parallel. i.e.,get all + the active controllers' ssh sessions, and + execute cli commands on all these controllers + + Returns: list of active controllers ssh clients. + + """ + controllers = [] + idx = 0 + if current_thread_only: + curr_thread = threading.current_thread() + idx = 0 if curr_thread is threading.main_thread() else int( + curr_thread.name.split('-')[-1]) + for value in cls.__lab_ssh_map.values(): + if value: + if current_thread_only: + if len(value) > idx: + controllers.append(value[idx]) + else: + controllers += value + + if len(controllers) == 0 and not fail_ok: + raise exceptions.ActiveControllerUnsetException + + return controllers + + @classmethod + def get_active_controllers_map(cls): + return cls.__lab_ssh_map + + @classmethod + def set_active_controller(cls, ssh_client, name=None): + """ + lab_name for new entry + + Args: + ssh_client: + name: used in distributed cloud, when ssh for multiple systems + need to be stored. e.g., name='subcloud-1' + + Returns: + + """ + if not isinstance(ssh_client, SSHClient): + raise TypeError("ssh_client has to be an instance of SSHClient!") + + if not name: + for lab_ in cls.__lab_list: + if ssh_client.host == lab_.get( + 'floating ip') or ssh_client.host == lab_.get( + 'controller-0 ip') \ + or ssh_client.host == lab_.get('external_ip'): + name = lab_.get('short_name') + break + else: + name = 'no_name' + + # new lab or ip address + if name not in cls.__lab_ssh_map: + cls.__lab_ssh_map[name] = [] + + curr_thread = threading.current_thread() + idx = 0 if curr_thread is threading.main_thread() else int( + curr_thread.name.split('-')[-1]) + # set ssh for new lab + if len(cls.__lab_ssh_map[name]) == idx: + cls.__lab_ssh_map[name].append(ssh_client) + # change existing ssh + elif len(cls.__lab_ssh_map[name]) > idx: + cls.__lab_ssh_map[name][idx] = ssh_client + # fill with copy of new ssh session until list is correct length + # (only when a different lab or ip address has also been added) + else: + new_ssh = SSHClient(ssh_client.host, ssh_client.user, + ssh_client.password) + new_ssh.connect(use_current=False) + while len(cls.__lab_ssh_map[name]) < idx: + cls.__lab_ssh_map[name].append(new_ssh) + cls.__lab_ssh_map[name].append(ssh_client) + + LOG.info( + "Active controller client for {} is set. Host ip/name: {}".format( + name, ssh_client.host)) + + @classmethod + def set_active_controllers(cls, *args): + """ + Set active controller(s) for lab(s). + + Args: + *args:ssh clients for lab(s) + e.g.,ip_1-4_ssh , hp380_ssh + + """ + for lab_ssh in args: + cls.set_active_controller(ssh_client=lab_ssh) + + @classmethod + def set_default_ssh(cls, name=None): + """ + Set ssh client to be used by default. This is usually used by + distributed cloud. + Unset if name=None. + Args: + name (str|None): + """ + if not name: + cls.__default_name = None + elif name in cls.__lab_ssh_map: + cls.__default_name = name + else: + raise ValueError( + '{} is not in lab_ssh_map: {}. Please add the ssh client to ' + 'lab_ssh_map via set_active_controller() before setting it ' + 'to default'.format(name, cls.__lab_ssh_map)) + + +def ssh_to_controller0(ssh_client=None): + if ssh_client is None: + ssh_client = ControllerClient.get_active_controller() + if ssh_client.get_hostname() == 'controller-0': + LOG.info("Already on controller-0. Do nothing.") + return ssh_client + con_0_ssh = SSHFromSSH(ssh_client=ssh_client, host='controller-0', + user=HostLinuxUser.get_user(), + password=HostLinuxUser.get_password(), + initial_prompt=Prompt.CONTROLLER_0) + con_0_ssh.connect() + return con_0_ssh + + +def get_cli_client(central_region=False): + name = 'RegionOne' if central_region and ProjVar.get_var('IS_DC') else None + return ControllerClient.get_active_controller(name=name) diff --git a/automated-pytest-suite/utils/clients/telnet.py b/automated-pytest-suite/utils/clients/telnet.py new file mode 100644 index 0000000..fe91f6b --- /dev/null +++ b/automated-pytest-suite/utils/clients/telnet.py @@ -0,0 +1,549 @@ +# +# Copyright (c) 2019 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + + +import os +import re +import time +from telnetlib import Telnet, theNULL, DO, DONT, WILL, WONT, NOOPT, IAC, \ + SGA, ECHO, SE, SB + +from consts.auth import HostLinuxUser +from consts.stx import DATE_OUTPUT, Prompt +from consts.proj_vars import ProjVar +from utils import exceptions +from utils.clients.ssh import PASSWORD_PROMPT, EXIT_CODE_CMD +from utils.tis_log import get_tis_logger, LOG + + +def telnet_logger(host): + log_dir = ProjVar.get_var('LOG_DIR') + if log_dir: + log_dir = '{}/telnet'.format(log_dir) + os.makedirs(log_dir, exist_ok=True) + logpath = log_dir + '/telnet_' + host + ".log" + else: + logpath = None + + logger = get_tis_logger(logger_name='telnet_{}'.format(host), + log_path=logpath) + + return logger + + +LOGIN_REGEX = re.compile(r'^(.*[\w]+-[\d]+)( login:|:~\$)'.encode(), + re.MULTILINE) +TELNET_LOGIN_PROMPT = re.compile(r'^(?![L|l]ast).*[L|l]ogin:[ ]?$'.encode(), + re.MULTILINE) +NEWPASSWORD_PROMPT = '' +LOGGED_IN_REGEX = re.compile(r'^(.*-[\d]+):~\$ '.encode(), re.MULTILINE) + +# VT100 values +ESC = bytes([27]) # Escape character +VT100_DEVICE_STATUS = bytes([27, 91, 53, 110]) # Device Status Query +VT100_DEVICE_OK = bytes([27, 91, 48, 110]) # Device OK + + +class TelnetClient(Telnet): + + def __init__(self, host, prompt=None, port=0, timeout=30, hostname=None, + user=HostLinuxUser.get_user(), + password=HostLinuxUser.get_password(), negotiate=False, + vt100query=False, console_log_file=None): + + self.logger = LOG + super(TelnetClient, self).__init__(host=host, port=port, + timeout=timeout) + + if not hostname: + self.send('\r\n\r\n') + prompts = [LOGIN_REGEX, LOGGED_IN_REGEX] + index, re_obj, matched_text = super().expect(prompts, timeout=10) + if index in (0, 1): + hostname = prompts[index].search(matched_text).group(1).decode( + errors='ignore') + + if not prompt: + prompt = r':~\$ ' + + # -- mod begins + self.console_log_file = self.get_log_file(console_log_file) + self.negotiate = negotiate + self.vt100query = vt100query + if self.vt100query: + self.vt100querybuffer = b'' # Buffer for VT100 queries + # -- mod ends + + self.flush(timeout=1) + self.logger = telnet_logger(hostname) if hostname else telnet_logger( + host + ":" + str(port)) + self.hostname = hostname + self.prompt = prompt + self.cmd_output = '' + self.cmd_sent = '' + self.timeout = timeout + self.user = user + self.password = password + + self.logger.info( + 'Telnet connection to {}:{} ({}) is established'.format(host, port, + hostname)) + + def connect(self, timeout=None, login=True, login_timeout=10, + fail_ok=False): + timeout_arg = {'timeout': timeout} if timeout else {} + if self.eof: + self.logger.info( + "Re-open telnet connection to {}:{}".format(self.host, + self.port)) + self.open(host=self.host, port=self.port, **timeout_arg) + + if login: + self.login(fail_ok=fail_ok, expect_prompt_timeout=login_timeout) + + return self.sock + + def login(self, expect_prompt_timeout=10, fail_ok=False, + handle_init_login=False): + self.write(b'\r\n') + index = self.expect(blob_list=[TELNET_LOGIN_PROMPT, self.prompt], + timeout=expect_prompt_timeout, + fail_ok=fail_ok, searchwindowsize=50) + self.flush() + code = 0 + if index == 0: + self.send(self.user) + self.expect(PASSWORD_PROMPT, searchwindowsize=50, + timeout=expect_prompt_timeout) + self.send(self.password) + index = self.expect([self.prompt, TELNET_LOGIN_PROMPT], + searchwindowsize=50, + timeout=expect_prompt_timeout) + if index == 1: + if not handle_init_login: + raise exceptions.TelnetError( + 'Unable to login to {} with credential {}/{}'. + format(self.hostname, self.user, self.password)) + self.send(self.user) + self.expect(PASSWORD_PROMPT, searchwindowsize=50, + timeout=expect_prompt_timeout) + self.send( + self.user) # in initial login, assume password=username + self.expect(PASSWORD_PROMPT, searchwindowsize=50, + timeout=expect_prompt_timeout) + self.send(self.user) # enter original password + self.expect(PASSWORD_PROMPT, searchwindowsize=50, + timeout=expect_prompt_timeout) + self.send(self.password) # enter new password + self.expect(PASSWORD_PROMPT, searchwindowsize=50, + timeout=expect_prompt_timeout) + self.send(self.password) # confirm new password + self.expect(searchwindowsize=50, timeout=expect_prompt_timeout) + + elif index < 0: + self.logger.warning( + "System is not in login page and default prompt is not found " + "either") + code = 1 + + return code + + def write(self, buffer, log=True): + if log: + self.logger.debug( + 'Write: {}'.format(buffer.decode(errors='ignore'))) + super(TelnetClient, self).write(buffer=buffer) + + def send(self, cmd='', reconnect=False, reconnect_timeout=300, flush=False): + if reconnect: + self.connect(timeout=reconnect_timeout) + if flush: + self.flush(timeout=1) + + cmd_for_exitcode = (cmd == EXIT_CODE_CMD) + is_read_only_cmd = (not cmd) or re.search('show|list|cat', cmd) + + if cmd_for_exitcode or is_read_only_cmd: + self.logger.debug("Send: {}".format(cmd)) + else: + self.logger.info("Send: {}".format(cmd)) + + self.cmd_sent = cmd + if not cmd.endswith('\n'): + cmd = '{}\n'.format(cmd) + + cmd = cmd.replace('\r\n', '\n') + # cmd = cmd.replace('\n', '\r\n') + self.write(cmd.encode(), log=False) + + def send_control(self, char='c'): + valid_chars = ["[", "\\", "]", "^", "_"] + if char.isalpha() or char in valid_chars: + code = chr(ord(char.upper()) - 64) + else: + raise NotImplemented("ctrl+{} is not supported".format(char)) + self.logger.info("Send: ctrl+{}".format(char)) + self.write(code.encode()) + + def _process_output(self, output, rm_date=False): + output_list = output.splitlines() + if isinstance(output, bytes): + output_list = [line.decode(errors='ignore') for line in output_list] + + if self.cmd_sent != '': + output_list[0] = '' # do not display the sent command + if rm_date: # remove date output if any + if re.search(DATE_OUTPUT, output_list[-1]): + output_list = output_list[:-1] + + output = '\n'.join(output_list) + self.cmd_sent = '' # Make sure sent line is only removed once + + self.cmd_output = output + return output + + def expect(self, blob_list=None, timeout=None, fail_ok=False, rm_date=False, + searchwindowsize=None): + if timeout is None: + timeout = self.timeout + if not blob_list: + blob_list = self.prompt + if isinstance(blob_list, (str, bytes)): + blob_list = [blob_list] + + blobs = [] + for blob in blob_list: + if isinstance(blob, str): + blob = blob.encode() + blobs.append(blob) + + try: + # index, re_obj, matched_text = super(TelnetClient, self).expect( + # list=blobs, timeout=timeout) + index, re_obj, matched_text = super(TelnetClient, self).expect( + blobs, timeout=timeout) + # Reformat the output + output = self._process_output(output=matched_text, rm_date=rm_date) + if index >= 0: + # Match found + self.logger.debug("Found: {}".format(output)) + return index + + # Error handling + self.logger.debug( + "No match found for: {}. Actual output: {}".format(blob_list, + output)) + if self.eof: + err_msg = 'EOF encountered before {} appear. '.format(blob_list) + index = -1 + else: + err_msg = "Timed out waiting for {} to appear. ".format( + blob_list) + index = -2 + + except EOFError: + err_msg = 'EOF encountered and before receiving anything. ' + index = -1 + + if fail_ok: + self.logger.warning(err_msg) + return index + + if index == -1: + raise exceptions.TelnetEOF(err_msg) + elif index == -2: + raise exceptions.TelnetTimeout(err_msg) + else: + raise exceptions.TelnetError( + "Unknown error! Please update telnet expect method") + + def flush(self, timeout=3): + time.sleep(timeout) # Wait for given time before reading. + buffer = self.read_very_eager() + if buffer: + output = '\n'.join( + [line.decode(errors='ignore') for line in buffer.splitlines()]) + self.logger.debug("Flushed: \n{}".format(output)) + return buffer + + def exec_cmd(self, cmd, expect_timeout=None, reconnect=False, + reconnect_timeout=300, err_only=False, rm_date=False, + fail_ok=True, get_exit_code=True, blob=None, force_end=False, + searchwindowsize=None): + if blob is None: + blob = self.prompt + if expect_timeout is None: + expect_timeout = self.timeout + + self.logger.debug("Executing command...") + if err_only: + cmd += ' 1> /dev/null' + self.send(cmd, reconnect, reconnect_timeout) + try: + self.expect(blob_list=blob, timeout=expect_timeout, + searchwindowsize=searchwindowsize) + except exceptions.TelnetTimeout as e: + self.send_control() + self.expect(fail_ok=True, timeout=5) + self.flush(timeout=1) + if fail_ok: + self.logger.warning(e) + else: + raise + + code, output = self._process_exec_result(rm_date, + get_exit_code=get_exit_code) + + self.__force_end(force_end) + + if code > 0 and not fail_ok: + raise exceptions.SSHExecCommandFailed( + "Non-zero return code for cmd: {}".format(cmd)) + + return code, output + + def exec_sudo_cmd(self, cmd, expect_timeout=60, rm_date=True, fail_ok=True, + get_exit_code=True, + searchwindowsize=None, strict_passwd_prompt=False, + extra_prompt=None, prefix_space=False): + """ + Execute a command with sudo. + + Args: + cmd (str): command to execute. such as 'ifconfig' + expect_timeout (int): timeout waiting for command to return + rm_date (bool): whether to remove date info at the end of the output + fail_ok (bool): whether to raise exception when non-zero exit + code is returned + get_exit_code + searchwindowsize (int): max chars to look for match from the end + of the output. + Usage: when expecting a prompt, set this to slightly larger + than the number of chars of the prompt, + to speed up the search, and to avoid matching in the + middle of the output. + strict_passwd_prompt (bool): whether to search output with strict + password prompt (Not recommended. Use + searchwindowsize instead) + extra_prompt (str|None) + prefix_space (bool): prefix ' ' to cmd, so that it will not go + into bash history if HISTCONTROL=ignorespace + + Returns (tuple): (exit code (int), command output (str)) + + """ + cmd = 'sudo ' + cmd + if prefix_space: + cmd = ' {}'.format(cmd) + LOG.debug("Executing sudo command...") + self.send(cmd) + pw_prompt = Prompt.PASSWORD_PROMPT if not strict_passwd_prompt else \ + Prompt.SUDO_PASSWORD_PROMPT + prompts = [self.prompt] + if extra_prompt is not None: + prompts.append(extra_prompt) + prompts.append(pw_prompt) + + index = self.expect(prompts, timeout=expect_timeout, + searchwindowsize=searchwindowsize, fail_ok=fail_ok) + if index == prompts.index(pw_prompt): + self.send(self.password) + prompts.remove(pw_prompt) + self.expect(prompts, timeout=expect_timeout, + searchwindowsize=searchwindowsize, fail_ok=fail_ok) + + code, output = self._process_exec_result(rm_date, + get_exit_code=get_exit_code) + if code != 0 and not fail_ok: + raise exceptions.TelnetError( + "Non-zero return code for sudo cmd: {}. Output: " + "{}".format(cmd, output)) + + return code, output + + def msg(self, msg, *args): + return + + def _process_exec_result(self, rm_date=False, get_exit_code=True): + + cmd_output_list = self.cmd_output.splitlines()[0:-1] # exclude prompt + if rm_date: # remove date output if any + if re.search(DATE_OUTPUT, cmd_output_list[-1]): + cmd_output_list = cmd_output_list[:-1] + + cmd_output = '\n'.join(cmd_output_list) + + if get_exit_code: + exit_code = self.get_exit_code() + else: + exit_code = -1 + self.logger.debug("Actual exit code is unknown") + + cmd_output = cmd_output.strip() + return exit_code, cmd_output + + def get_exit_code(self): + self.flush(timeout=1) + self.send(EXIT_CODE_CMD) + self.expect(timeout=10) + # LOG.debug("echo output: {}".format(self.cmd_output)) + matches = re.findall("\n([-+]?[0-9]+)\n", self.cmd_output) + # LOG.debug("matches: {}".format(matches)) + return int(matches[-1]) + + def __force_end(self, force): + if force: + self.flush(timeout=1) + self.send_control('c') + self.flush() + + def set_prompt(self, prompt): + self.prompt = prompt + + def get_hostname(self): + return self.exec_cmd('hostname')[1].splitlines()[0] + + def process_rawq(self): + """Transfer from raw queue to cooked queue. + + Set self.eof when connection is closed. Don't block unless in + the midst of an IAC sequence. + + """ + buf = [b'', b''] + try: + while self.rawq: + c = self.rawq_getchar() + if not self.iacseq: + if c == theNULL: + continue + if c == b"\021": + continue + # -- mod begins + # deal with vt100 escape sequences + if self.vt100query: + if self.vt100querybuffer: + self.vt100querybuffer += c + if len(self.vt100querybuffer) > 10: + self.vt100querybuffer = b'' # too long, ignore + elif self.vt100querybuffer == VT100_DEVICE_STATUS: + self.sock.sendall(VT100_DEVICE_OK) + self.vt100querybuffer = b'' + if not self.vt100querybuffer and c == ESC: + self.vt100querybuffer += c + # deal with IAC sequences + # -- mod ends + if c != IAC: + buf[self.sb] = buf[self.sb] + c + continue + else: + self.iacseq += c + elif len(self.iacseq) == 1: + # 'IAC: IAC CMD [OPTION only for WILL/WONT/DO/DONT]' + if c in (DO, DONT, WILL, WONT): + self.iacseq += c + continue + + self.iacseq = b'' + if c == IAC: + buf[self.sb] = buf[self.sb] + c + else: + if c == SB: # SB ... SE start. + self.sb = 1 + self.sbdataq = b'' + elif c == SE: + self.sb = 0 + self.sbdataq = self.sbdataq + buf[1] + buf[1] = b'' + if self.option_callback: + # Callback is supposed to look into + # the sbdataq + self.option_callback(self.sock, c, NOOPT) + else: + # We can't offer automatic processing of + # suboptions. Alas, we should not get any + # unless we did a WILL/DO before. + self.msg('IAC %d not recognized' % ord(c)) + elif len(self.iacseq) == 2: + cmd = self.iacseq[1:2] + self.iacseq = b'' + opt = c + if cmd in (DO, DONT): + self.msg('IAC %s %d', cmd == DO and 'DO' or 'DONT', + ord(opt)) + if self.option_callback: + self.option_callback(self.sock, cmd, opt) + else: + # -- mod begins + if self.negotiate: + # do some limited logic to use SGA if asked + if cmd == DONT and opt == SGA: + self.sock.sendall(IAC + WILL + opt) + elif cmd == DO and opt == SGA: + self.sock.sendall(IAC + WILL + opt) + else: + self.sock.sendall(IAC + WONT + opt) + else: + # -- mod ends + self.sock.sendall(IAC + WONT + opt) + elif cmd in (WILL, WONT): + self.msg('IAC %s %d', cmd == WILL and 'WILL' or 'WONT', + ord(opt)) + if self.option_callback: + self.option_callback(self.sock, cmd, opt) + else: + # -- mod begins + if self.negotiate: + # do some limited logic to use SGA if asked + if cmd == WONT and opt == SGA: + self.sock.sendall(IAC + DO + opt) + elif cmd == WILL and opt == SGA: + self.sock.sendall(IAC + DO + opt) + elif cmd == WILL and opt == ECHO: + self.sock.sendall(IAC + DO + opt) + else: + self.sock.sendall(IAC + DONT + opt) + else: + # -- mod ends + self.sock.sendall(IAC + DONT + opt) + except EOFError: # raised by self.rawq_getchar() + self.iacseq = b'' # Reset on EOF + self.sb = 0 + pass + self.cookedq = self.cookedq + buf[0] + # -- mod begins + self.log_write(buf[0]) + # -- mod ends + self.sbdataq = self.sbdataq + buf[1] + + def log_write(self, text): + if not text: + return + + try: + if not isinstance(text, str): + text = text.decode('utf-8', 'ignore') + except AttributeError as e: + print('log_write exception: ', e) + pass + + if self.console_log_file: + try: + self.console_log_file.write(text) + self.console_log_file.flush() + + except UnicodeEncodeError: + pass + # -- mod ends + + def get_log_file(self, log_dir): + + if log_dir: + logfile = open(log_dir, 'a') + else: + logfile = None + + return logfile diff --git a/automated-pytest-suite/utils/exceptions.py b/automated-pytest-suite/utils/exceptions.py new file mode 100644 index 0000000..63fae85 --- /dev/null +++ b/automated-pytest-suite/utils/exceptions.py @@ -0,0 +1,261 @@ +class TiSError(Exception): + """ + Base class for TiS test automation exceptions. + + Notes: + Each module (or package depends on which makes more sense) should + have its own sub-base-class that + inherits this class.Then the specific exception for that module/package + should inherit the sub-base-class. + + Examples: + sub-base-class for ssh.py: SSHException(TiSError); ssh retry timeout + exception: SSHRetryTimeout(SSHException) + """ + message = "An unknown exception occurred" + + def __init__(self, detailed_message="No details provided"): + super(TiSError, self).__init__() + self._error_string = self.message + "\nDetails: " + detailed_message + + def __str__(self): + return self._error_string + + +class NoMatchFoundError(TiSError): + message = "No match found." + + +class InvalidStructure(TiSError): + message = "Invalid cli output table structure." + + +class SSHException(TiSError): + """ + Base class for SSH Exceptions. All SSH exceptions thrown from utils > + ssh.py module should inherit this class. + Examples: SSHRetryTimeout(SSHException) + """ + message = "SSH error." + + +class TelnetError(TiSError): + message = "Telnet Error" + + +class TelnetTimeout(TelnetError): + message = 'Telnet timeout' + + +class TelnetEOF(TelnetError): + message = 'Telnet EOF.' + + +class LocalHostError(TiSError): + message = 'Localhost error.' + + +class SSHRetryTimeout(SSHException): + message = "Timed out to connect to host." + + +class IncorrectCredential(SSHException): + message = "Login credential rejected by host." + + +class SSHExecCommandFailed(SSHException): + """Raised when remotely executed command returns nonzero status.""" + message = "Failed to execute command via SSH." + + +class TimeoutException(SSHException): + message = "Request(s) timed out" + + +class ImproperUsage(SSHException): + message = "Improper use of test framework" + + +class ActiveControllerUnsetException(SSHException): + message = ("Active controller ssh client is not set! " + "Please use ControllerClient.set_active_controller(ssh_client) " + "to set an active controller client.") + + +class NatBoxClientUnsetException(SSHException): + message = "NatBox ssh client it not set! Please use " \ + "NATBoxClient.set_natbox_client(ip) to set an natbox client" + + +class CLIRejected(TiSError): + """Throw when cli command is rejected due to unexpected reasons, such as + missing arguments""" + message = "CLI command is rejected." + + +class HostError(TiSError): + """Generic Host error""" + message = "Host error." + + +class HostPostCheckFailed(HostError): + """Throws when expected host status is not reached after running certain + host action cli command.""" + message = "Check failed post host operation." + + +class HostPreCheckFailed(HostError): + message = "Check failed pre host operation." + + +class HostTimeout(HostError): + message = "Host operation timed out." + + +class VMError(TiSError): + message = "VM error." + + +class VMPostCheckFailed(VMError): + message = "Check failed post VM operation." + + +class VMNetworkError(VMError): + message = "VM network error." + + +class VMTimeout(VMError): + message = "VM operation timed out." + + +class VMOperationFailed(VMError): + """Failure indicated by CLI output""" + message = "VM operation failed." + + +class VolumeError(TiSError): + message = "Volume error." + + +class ImageError(TiSError): + message = "Image error." + + +class FlavorError(TiSError): + message = "Flavor error." + + +class CommonError(TiSError): + message = "Setup/Teardown error." + + +class NovaError(TiSError): + message = "Nova error." + + +class NeutronError(TiSError): + message = "Neutron error." + + +class HeatError(TiSError): + message = "Heat error." + + +class CeilometerError(TiSError): + message = "Ceilometer error." + + +class SysinvError(TiSError): + message = 'Sysinv error.' + + +class ContainerError(SysinvError): + message = 'Container error.' + + +class CinderError(TiSError): + message = 'Cinder error.' + + +class KeystoneError(TiSError): + message = 'Keystone error.' + + +class BuildServerError(TiSError): + message = "Build Server error." + + +class ThreadingError(TiSError): + message = "Multi threading error." + + +class VLMError(TiSError): + message = "VLM Operation Error." + + +class SwiftError(TiSError): + message = "Swift error." + + +class OrchestrationError(TiSError): + message = 'Orchestration error.' + + +class UpgradeError(TiSError): + message = 'Upgrade error.' + + +class BackupSystem(TiSError): + message = 'System Backup error.' + + +class RestoreSystem(TiSError): + message = 'System Restore error.' + + +class StorageError(TiSError): + message = 'Storage error.' + + +class HorizonError(TiSError): + message = 'Horizon error.' + + +class IxiaError(TiSError): + message = 'Ixia error.' + + +class RefStackError(TiSError): + message = 'RefStack test(s) failed.' + + +class DovetailError(TiSError): + message = 'Dovetail test(s) failed.' + + +class MuranoError(TiSError): + message = 'Murano error.' + + +class DCError(TiSError): + message = 'DC error.' + + +class PatchError(TiSError): + message = 'Patch error.' + + +class KubeError(TiSError): + message = 'Kubernetes error.' + + +class KubeCmdError(KubeError): + message = 'Kubernetes cmd failed.' + + +class InstallError(TiSError): + message = 'Install error' + + +class K8sError(TiSError): + message = 'K8s error' diff --git a/automated-pytest-suite/utils/guest_scripts/dpdk_pktgen.sh b/automated-pytest-suite/utils/guest_scripts/dpdk_pktgen.sh new file mode 100644 index 0000000..19ecac3 --- /dev/null +++ b/automated-pytest-suite/utils/guest_scripts/dpdk_pktgen.sh @@ -0,0 +1,49 @@ +#!/bin/bash + +# the original version from /usr/local/bin/launch_pktgen.sh +# written by Allain Legacy + +sleep 60 + +PKTGEN=${PKTGEN:-/usr/local/bin/pktgen} +HUGETLBMNT=${HUGETLBMNT:-/dev/hugepages} + +# Assumes 3 nic guest, with first nic virtio +# using only the first nic +DEVICES=("0000:00:04.0") + +## Automatically unbind/rebind PCI devices +modprobe igb_uio +USEDEVICES="" +for DEVICE in ${DEVICES[@]}; do + UIO_DRIVER=/sys/bus/pci/drivers/igb_uio + SYSFS=/sys/bus/pci/devices/${DEVICE} + + if [ ! -d ${SYSFS} ]; then + echo "Unable to find device directory: ${SYSFS}" + exit 1 + fi + + # Add the device to the list of supported devices of the UIO driver + UEVENT=${SYSFS}/uevent + PCI_ID=($(cat ${UEVENT} | grep PCI_ID | sed -e 's/^.*=//' | tr ":" " ")) + echo "${PCI_ID[0]} ${PCI_ID[1]}" > ${UIO_DRIVER}/new_id + + # Unbind from the old driver and bind to the new driver + echo -n ${DEVICE} > ${SYSFS}/driver/unbind + echo -n ${DEVICE} > ${UIO_DRIVER}/bind + USEDEVICES+=" --pci-whitelist ${DEVICE}" +done + +# cpu mask cannot be set for lcores not exist on the system +# 2vcpus setup +# ${PKTGEN} -c 0x3 -n 1 --huge-dir ${HUGETLBMNT} --proc-type primary +# --socket-mem 512 ${USEDEVICES} --file-prefix pg -- -p 0xFFFF -P -N +# -m "[1:1].0" -f /root/dpdk_pktgen.config + +# 3vcpus setup +${PKTGEN} -c 0x7 -n 2 --huge-dir ${HUGETLBMNT} --proc-type primary \ +--socket-mem 512 ${USEDEVICES} --file-prefix pg -- -p 0xFFFF -P -N \ +-m "[1:1-2].0" -f /root/dpdk_pktgen.config + +exit $? diff --git a/automated-pytest-suite/utils/guest_scripts/kpktgen.sh b/automated-pytest-suite/utils/guest_scripts/kpktgen.sh new file mode 100644 index 0000000..7b96bb4 --- /dev/null +++ b/automated-pytest-suite/utils/guest_scripts/kpktgen.sh @@ -0,0 +1,81 @@ +#! /bin/sh + +modprobe pktgen + +function pgset() { + local result + + echo $1 > $PGDEV + + result=`cat $PGDEV | fgrep "Result: OK:"` + if [ "$result" = "" ]; then + cat $PGDEV | fgrep Result: + fi +} + +function pg() { + echo inject > $PGDEV + cat $PGDEV +} + +# Config Start Here ----------------------------------------------------------- + + +# thread config +# Each CPU has own thread. Two CPU exammple. We add eth1, eth2 respectivly. + +# setup_netif_multiqueue "eth1" +ethtool -G eth1 tx 1024 +PGDEV=/proc/net/pktgen/kpktgend_1 + echo "Removing all devices" + pgset "rem_device_all" + echo "Adding eth1" + pgset "add_device eth1" + echo "Setting max_before_softirq 10000" + pgset "max_before_softirq 10000" + +# We need to remove old config since we dont use this thread. We can only +# one NIC on one CPU due to affinity reasons. + +# Guest might be launched with less than 2 vcpus, ignore other threads +# PGDEV=/proc/net/pktgen/kpktgend_1 +# echo "Removing all devices" +# pgset "rem_device_all" + +# device config +# ipg is inter packet gap. 0 means maximum speed. + +CLONE_SKB="clone_skb 0" +# NIC adds 4 bytes CRC +PKT_SIZE="pkt_size 60" + +# COUNT 0 means forever +#COUNT="count 0" +COUNT="count 0" + +# rate 300M means 300Mb/s +RATE="" + +DST_IP="dst 10.10.11.2" +DST_MAC="dst_mac 00:04:23:08:91:dc" +source /root/kpktgen.config + +PGDEV=/proc/net/pktgen/eth1 + echo "Configuring $PGDEV" + pgset "$COUNT" + pgset "$CLONE_SKB" + pgset "$PKT_SIZE" + pgset "$DST_IP" + pgset "$DST_MAC" + if [ -n "$RATE" ]; then + pgset "$RATE" + fi + +# Time to run +PGDEV=/proc/net/pktgen/pgctrl + + echo "Running... ctrl^C to stop" + pgset "start" + echo "Done" + +# Result can be vieved in /proc/net/pktgen/eth1 diff --git a/automated-pytest-suite/utils/guest_scripts/scripts.py b/automated-pytest-suite/utils/guest_scripts/scripts.py new file mode 100644 index 0000000..fde118b --- /dev/null +++ b/automated-pytest-suite/utils/guest_scripts/scripts.py @@ -0,0 +1,111 @@ +import os + + +class TisInitServiceScript(object): + script_path = "/etc/init.d/tis_automation_init.sh" + configuration_path = "/etc/init.d/tis_automation_init.config" + service_name = "tis_automation_init.service" + service_path = "/etc/systemd/system/{}".format(service_name) + service = """ +[Unit] +Description=TiS Automation Initialization +After=NetworkManager.service network.service wrs-guest-setup.service + +[Service] +Type=simple +RemainAfterExit=yes +ExecStart=/bin/bash {} start +ExecStop=/bin/bash {} stop + +[Install] +WantedBy=multi-user.target +""".format(script_path, script_path) + + @classmethod + def configure(cls, vm_ssh, **kwargs): + cfg = "\n".join(["{}={}".format(*kv) for kv in kwargs.items()]) + vm_ssh.exec_sudo_cmd( + "cat > {} << 'EOT'\n{}\nEOT".format(cls.configuration_path, cfg), + fail_ok=False) + vm_ssh.exec_sudo_cmd( + "cat > %s << 'EOT'\n%s\nEOT" % (cls.service_path, cls.service), + fail_ok=False) + + @classmethod + def enable(cls, vm_ssh): + vm_ssh.exec_sudo_cmd( + "systemctl daemon-reload", fail_ok=False) + vm_ssh.exec_sudo_cmd( + "systemctl enable %s" % cls.service_name, fail_ok=False) + + @classmethod + def start(cls, vm_ssh): + vm_ssh.exec_sudo_cmd( + "systemctl daemon-reload", fail_ok=False) + vm_ssh.exec_sudo_cmd( + "systemctl start %s" % cls.service_name, fail_ok=False) + + @classmethod + def src(cls): + return os.path.join(os.path.dirname(os.path.abspath(__file__)), + "tis_automation_init.sh") + + @classmethod + def dst(cls): + return cls.script_path + + +class KPktgen(object): + script_path = "/root/kpktgen.sh" + configuration_path = "/root/kpktgen.config" + + @classmethod + def src(cls): + return os.path.join(os.path.dirname(os.path.abspath(__file__)), + "kpktgen.sh") + + @classmethod + def dst(cls): + return cls.script_path + + @classmethod + def configure(cls, vm_ssh, **kwargs): + cfg = "\n".join(["{}={}".format(*kv) for kv in kwargs.items()]) + vm_ssh.exec_sudo_cmd( + "cat > {} << 'EOT'\n{}\nEOT".format(cls.configuration_path, cfg), + fail_ok=False) + + @classmethod + def start(cls, vm_ssh): + vm_ssh.exec_sudo_cmd("nohup bash {} &>/dev/null &".format( + cls.script_path)) + + +class DPDKPktgen(object): + script_path = "/root/dpdk_pktgen.sh" + configuration_path = "/root/dpdk_pktgen.config" + + @classmethod + def src(cls): + return os.path.join(os.path.dirname(os.path.abspath(__file__)), + "dpdk_pktgen.sh") + + @classmethod + def dst(cls): + return cls.script_path + + @classmethod + def configure(cls, vm_ssh, *cmds): + cfg = "\n".join(cmds) + vm_ssh.exec_sudo_cmd( + "cat > {} << 'EOT'\n{}\nEOT".format(cls.configuration_path, cfg), + fail_ok=False) + + @classmethod + def start(cls, vm_ssh): + # dpdk pktgen REQUIRES a tty (fails during initialization otherwise) + # echo-ing 'quit' will not work, use 'kill' with 'ps aux | grep nohup' + # to terminate + vm_ssh.exec_sudo_cmd( + "nohup socat EXEC:{},pty PTY,link=pktgen.pty,echo=0,icanon=0 " + "&>/dev/null &".format(cls.script_path)) diff --git a/automated-pytest-suite/utils/guest_scripts/tis_automation_init.sh b/automated-pytest-suite/utils/guest_scripts/tis_automation_init.sh new file mode 100644 index 0000000..d98a51c --- /dev/null +++ b/automated-pytest-suite/utils/guest_scripts/tis_automation_init.sh @@ -0,0 +1,604 @@ +#!/bin/bash +################################################################################ +# Copyright (c) 2014-2015 Wind River Systems, Inc. +# +# The right to copy, distribute, modify, or otherwise make use of this +# software may be licensed only pursuant to the terms of an applicable Wind +# River license agreement. +# +################################################################################ +# chkconfig: 2345 10 99 +PROGRAM="tis_automation_init.sh" + +# Default configurations +FUNCTIONS=routing, +LOW_LATENCY='no' +VSWITCH_CMDFILE=/etc/vswitch/vswitch.cmds.default +VSWITCH_INIFILE=/etc/vswitch/vswitch.ini +VSWITCH_CONFIG=/etc/vswitch/vswitch.conf +BRIDGE_PORTS="eth1,eth2.2" +BRIDGE_MTU=1500 + +# PCI vendor/device IDs +PCI_VENDOR_VIRTIO="0x1af4" +PCI_DEVICE_VIRTIO="0x1000" +PCI_DEVICE_MEMORY="0x1110" +PCI_SUBDEVICE_NET="0x0001" +PCI_SUBDEVICE_AVP="0x1104" + +# Default NIC device type +NIC_DEVICE_DEFAULT="${PCI_VENDOR_VIRTIO}:${PCI_DEVICE_MEMORY}:${PCI_SUBDEVICE_AVP}" +NIC_COUNT_DEFAULT=2 + +################################################################################ +# Generate a log to the syslog stream and stdout +################################################################################ +function log() +{ + local MESSAGE=$1 + local TEXT="${PROGRAM}: ${MESSAGE}" + logger ${TEXT} + echo ${TEXT} +} + +function setup_netif_multiqueue() +{ + local IFNAME=$1 + + DRIVER=$(basename $(readlink /sys/class/net/${IFNAME}/device/driver)) + if [ "$DRIVER" == "virtio_net" ]; then + CPU_COUNT=$(cat /proc/cpuinfo |grep "^processor"|wc -l) + + CPU_START=0 + CPU_END=$((CPU_COUNT-1)) + + if [ "$LOW_LATENCY" == "yes" ]; then + # CPU 0 should not be used when configured for low latency + # since VCPU0 does not run as a realtime thread + CPU_START=1 + CPU_COUNT=$((CPU_COUNT-1)) + fi + + ethtool -L ${IFNAME} combined $CPU_COUNT + + QUEUE=0 + for ((CPUID=$CPU_START; CPUID <= $CPU_END; CPUID++)) + do + CPUMASK=$(echo "(2^${CPUID})" | bc -l) + IFNUMBER=${IFNAME#eth} + IRQ=$(cat /proc/interrupts | grep "virtio${IFNUMBER}-input.${QUEUE}" \ + | awk '{print $1}' | sed 's/://') + echo ${CPUMASK} > /proc/irq/${IRQ}/smp_affinity + QUEUE=$((QUEUE+1)) + done + fi + + return 0 +} + +function setup_kernel_routing() +{ + echo 1 > /proc/sys/net/ipv4/ip_forward + echo 0 > /proc/sys/net/ipv4/conf/default/rp_filter + echo 0 > /proc/sys/net/ipv4/conf/all/rp_filter + echo 1 > /proc/sys/net/ipv6/conf/default/forwarding + echo 1 > /proc/sys/net/ipv6/conf/all/forwarding + modprobe 8021q + for IFNAME in $(find /sys/class/net -maxdepth 1 -type l -exec basename {} \\;); do + if [[ $IFNAME != "lo" ]]; then + echo "${IFNAME}" | grep -q "\\." + if [ $? -eq 0 ]; then + # VLAN is being created, create interface and setup underlying interface + UIFNAME=$(echo ${IFNAME}|awk -F '.' '{print $1}') + VLANID=$(echo ${IFNAME}|awk -F '.' '{print $2}') + + # enable multiqueue support if using the virtio-net driver + setup_netif_multiqueue ${UIFNAME} + else + setup_netif_multiqueue ${IFNAME} + fi + echo 0 > /proc/sys/net/ipv4/conf/${IFNAME}/rp_filter + echo 1 > /proc/sys/net/ipv6/conf/${IFNAME}/forwarding + fi + done + return 0 +} + +## Reload the wrs-avp driver while taking care to only shutdown eth0 if it is +## driven by this driver. This exception is made because we rarely use an AVP +## device for eth0 and shutting down that interface can interfere with +## automated testing if they were able to SSH in before this step and then get +## kicked out of the SSH session. +## +function reload_wrs_avp_driver() +{ + local ETH0_DRIVER=$(cat /sys/class/net/eth0/device/uevent | grep DRIVER | sed 's#DRIVER=##') + + if [ "${ETH0_DRIVER}" == "wrs_avp" ]; then + ## Bring down the interface gracefully before unloading the module + ifdown eth0 + fi + + rmmod wrs_avp + modprobe wrs_avp + + local RET=0 + if [ "${ETH0_DRIVER}" == "wrs_avp" ]; then + ## Bring up the interface again + ifup eth0 + RET=$? + fi + + return ${RET} +} + +## Count the number of 1 bits that are set in an IPv4 byte value. This +## function assumes that the input value is a proper IPv4 mask byte; meaning +## that all upper bits are 1's and all lower bits are 0's. +## +function count_1bits() +{ + local VALUE=$1 + local COUNT=0 + + while [ $((VALUE & 0x80)) -ne 0 ]; do + VALUE=$((VALUE * 2)) + COUNT=$((COUNT + 1)) + done + + return $COUNT +} + +## Convert an IPv4 mask value to a CIDR prefix length. +## +function convert_ipv4_mask_to_length() +{ + local MASK=(${1//./ }) + + local LENGTH=0 + for I in $(seq 0 3); do + local BYTE=${MASK[${I}]} + if [ ${BYTE} -eq 255 ]; then + BITS=8 + elif [ ${BYTE} -eq 0 ]; then + break + else + count_1bits ${BYTE} + BITS=$? + fi + LENGTH=$((LENGTH + $BITS)) + done + + return ${LENGTH} +} + +## Setup the vswitch offline CLI commands file which is consumed directly by the vswitch +## process to load the logical interface objects +## +function setup_vswitch_layer2_commands_file() +{ + local BRIDGE_PORTS=$1 + local BRIDGE_MTU=$2 + local PCI_UUIDS=("${!3}") + local NETUUID=$(uuidgen -r) + + echo "## AVS bridge configuration" > ${VSWITCH_CMDFILE} + echo "##" >> ${VSWITCH_CMDFILE} + + ## Setup a network to connect the bridge ports + echo "network add default ${NETUUID}" >> ${VSWITCH_CMDFILE} + + PORTS=(${BRIDGE_PORTS//,/ }) + for I in ${!PORTS[@]}; do + local PORT=${PORTS[${I}]} + local UUID=${PCI_UUIDS[${I}]} + local DATA=(${PORT//./ }) + local IFNAME=${DATA[0]} + local VLANID=${DATA[1]} + local VLAN_MTU=$((BRIDGE_MTU - 4)) + + ## Setup logical interface. + ## Note: we use the same UUID for the port and interface because + ## since there is no agents or management software it does not + ## matter. + echo "ethernet add ${UUID} ${UUID} ${BRIDGE_MTU}" >> ${VSWITCH_CMDFILE} + + if [ "0${VLANID}" -ne 0 ]; then + ## Setup a VLAN interface (if necessary) + local IFUUID=$(uuidgen -r) + echo "vlan add ${IFNAME} ${VLANID} ${IFUUID} ${VLAN_MTU}" >> ${VSWITCH_CMDFILE} + else + ## Drop the ".0" from the name + PORT=${IFNAME} + fi + + ## Attach the logical interface + echo "bridge attach ${PORT} default" >> ${VSWITCH_CMDFILE} + done + + echo "quit" >> ${VSWITCH_CMDFILE} + return 0 +} + +## Setup the vswitch offline CLI commands file which is consumed directly by the vswitch +## process to load the logical interface objects and setup a layer3 routed environment +## +function setup_vswitch_layer3_commands_file() +{ + local ADDRESSES=("${!1}") + local ROUTES=("${!2}") + local PCI_UUIDS=("${!3}") + local NATARGS="" + + echo "## AVR router configuration" > ${VSWITCH_CMDFILE} + echo "##" >> ${VSWITCH_CMDFILE} + + ## Enable forwarding on the default router context + echo "router enable forwarding default" >> ${VSWITCH_CMDFILE} + + for ADDRESS in ${ADDRESSES[@]}; do + DATA=(${ADDRESS//,/ }) + IPADDR=${DATA[0]} + IPMASK=${DATA[1]} + IFNAME=${DATA[2]} + IFMTU=${DATA[3]} + IFDATA=(${IFNAME//./ }) + IFNAME=${IFDATA[0]} + VLANID=${IFDATA[1]} + IFNUMBER=${IFNAME#eth} + UUID=${PCI_UUIDS[${IFNUMBER}]} + + ## Shift the array so that the processed elements are removed + for I in $(seq 0 3); do + unset DATA[0] + DATA=(${DATA[@]}) + done + + convert_ipv4_mask_to_length ${IPMASK} + LENGTH=$? + + ## Setup logical interface. + ## Note: we use the same UUID for the port and interface because + ## since there is no agents or management software it does not + ## matter. + echo "ethernet add ${UUID} ${UUID} ${IFMTU}" >> ${VSWITCH_CMDFILE} + + if [ "0${VLANID}" -ne 0 ]; then + ## Setup a VLAN interface (if necessary) + local IFUUID=$(uuidgen -r) + echo "vlan add ${IFNAME} ${VLANID} ${IFUUID} ${IFMTU}" >> ${VSWITCH_CMDFILE} + IFNAME=${IFNAME}.${VLANID} + fi + + if [ "${DATA[0]}" == "nat" ]; then + ## Setup a NAT translation from this address to an internal + ## address. The CIDR length is irrelevant but required at the CLI + INTERNAL_ADDRESS=${DATA[1]} + NATARGS="nat ${INTERNAL_ADDRESS}" + + elif [ "${DATA[0]}" == "snat" ]; then + ## Mark this interface as an external gateway interface and enable + ## SNAT on the router. This is because AVS is optimized to only + ## enter the SNAT code path on external interfaces. + echo "interface set flag ${IFNAME} external" >> ${VSWITCH_CMDFILE} + echo "router enable snat default" >> ${VSWITCH_CMDFILE} + fi + + ## Add address to the interface + echo "interface add addr ${IFNAME} ${IPADDR}/${LENGTH} ${NATARGS}" >> ${VSWITCH_CMDFILE} + done + + for ROUTE in ${ROUTES[@]}; do + DATA=(${ROUTE//,/ }) + SUBNET=${DATA[0]} + GWIP=${DATA[1]} + IFNAME=${DATA[2]} + echo "route add ${SUBNET} ${IFNAME} ${GWIP} 1" >> ${VSWITCH_CMDFILE} + done + + echo "quit" >> ${VSWITCH_CMDFILE} + return 0 +} + + +## Calculate the vswitch CPU list based on the CPU count +## +function get_vswitch_cpu_list() +{ + local CPU_COUNT=$1 + local CPU_LIST="" + + if [ ${CPU_COUNT} -gt 2 ]; then + ## Limit to N-1 processors starting at 1 + CPU_LIST="1-$((CPU_COUNT-1))" + else + CPU_LIST="1" + fi + + echo ${CPU_LIST} +} + +## Build a vswitch engine-map string based on the CPU count +## +function get_vswitch_engine_map() +{ + local CPU_COUNT=$1 + local ENGINE_MAP="" + local SEPARATOR="" + + for I in $(seq 1 $CPU_COUNT); do + ENGINE_MAP="${ENGINE_MAP}${SEPARATOR}$(uuidgen -r)=${I}" + SEPARATOR="," + done + + echo "${ENGINE_MAP}" +} + +## Build a vswitch pci-map based on the list of PCI devices and pre-generated +## UUID values +## +function get_vswitch_pci_map() +{ + local PCI_LIST=("${!1}") + local PCI_UUIDS=("${!2}") + local PCI_MAP="" + local SEPARATOR="" + + for I in ${!PCI_LIST[@]}; do + DEVICE=${PCI_LIST[$I]} + UUID=${PCI_UUIDS[$I]} + PCI_MAP="${PCI_MAP}${SEPARATOR}${UUID}=${DEVICE}" + SEPARATOR="," + done + + echo "${PCI_MAP}" +} + +## Generate a list of UUID values that will later be used to map to the +## vswitch PCI device list +## +function get_vswitch_pci_uuids() +{ + local COUNT=$1 + local UUIDS="" + + for I in $(seq 1 ${COUNT}); do + UUIDS="${UUIDS} $(uuidgen -r)" + done + + echo ${UUIDS} +} + +## Auto-detect the list of PCI devices for vswitch. This selects the last 2 AVP +## devices in the PCI list +## +function get_vswitch_pci_devices() +{ + local DEVICE_TYPE=$1 + local DEVICE_COUNT=$2 + + # split the device type into the PCI IDs + local DEVICE=(${DEVICE_TYPE//:/ }) + + echo $(pci_device_list ${DEVICE[0]} ${DEVICE[1]} ${DEVICE[2]} ${DEVICE_COUNT}) +} + +## Generate a mapping between port indexes and cpu indexes for vswitch engines. +## +function get_vswitch_port_map() +{ + local PCI_COUNT=$1 + local CPU_COUNT=$2 + local PCI_INDEXES=$(expand_sequence "0,$((PCI_COUNT-1))") + + if [ ${CPU_COUNT} -lt 2 ]; then + echo "${PCI_INDEXES}:1" + else + local CPU_INDEXES=$(expand_sequence "1-${CPU_COUNT}") + echo "${PCI_INDEXES}:${CPU_INDEXES}" + fi +} + +## Setup the vswitch init.d configuration file which is used to launch the vswitch +## process with the correct DPDK parameters +## +function setup_vswitch_config_file() +{ + local CPU_LIST=$(get_vswitch_cpu_list $1) + local PCI_LIST="${!2}" + + ## Set the CPU list and master core + sed -i -e "s#^\(VSWITCH_CPU_LIST\)=.*#\1=${CPU_LIST}#g" ${VSWITCH_CONFIG} + sed -i -e "s#^\(VSWITCH_MASTER_CPUID\)=.*#\1=0#g" ${VSWITCH_CONFIG} + + ## Set the PCI device list + sed -i -e "s#^\(VSWITCH_PCI_DEVICES\)=.*#\1=\"${PCI_LIST}\"#g" ${VSWITCH_CONFIG} + + ## Point vswitch to this custom commands files + sed -i -e "s#^\(VSWITCH_CMDFILE\)=.*#\1=${VSWITCH_CMDFILE}#g" ${VSWITCH_CONFIG} + + if [ ! -z "${VSWITCH_MEM_SIZES}" ]; then + ## Override the AVS memory settings if set + sed -i -e "s#^\(VSWITCH_MEM_SIZES\)=.*#\1=${VSWITCH_MEM_SIZES}#g" ${VSWITCH_CONFIG} + fi + + if [ ! -z ${VSWITCH_TEST_MODE} ]; then + ## Enable chaining if we are trying simulate an L3 environment + echo "VSWITCH_TEST_MODE=${VSWITCH_TESTMODE}" >> ${VSWITCH_CONFIG} + fi +} + +## Setup the vswitch ini file which is consumed directly by the vswitch process +## +function setup_vswitch_ini_file() +{ + local NUMA_COUNT=$1 + local CPU_COUNT=$2 + local PCI_LIST=("${!3}") + local PCI_UUIDS=("${!4}") + local ENGINE_MAP=$(get_vswitch_engine_map $((CPU_COUNT-1))) + local PORT_MAP=$(get_vswitch_port_map ${#PCI_LIST[@]} $((CPU_COUNT-1))) + local PCI_MAP=$(get_vswitch_pci_map PCI_LIST[@] PCI_UUIDS[@]) + local POOL_SIZE=${VSWITCH_MBUF_POOL_SIZE:-"16384"} + local IDLE_DELAY=${VSWITCH_ENGINE_IDLE_DELAY:-"1-10000"} + + cat << EOF > ${VSWITCH_INIFILE} +[DEFAULT] +master-core=0 +numa-nodes=1 +mbuf-pool-size=${POOL_SIZE} +avp-guest-desc=1024 +avp-host-desc=128 +command-file=${VSWITCH_CMDFILE} +command-logfile=/var/log/vswitch.cmds.log +[ENGINE] +engine-map=${ENGINE_MAP} +idle-delay=${IDLE_DELAY} +port-map=${PORT_MAP} +[PCI] +device-map=${PCI_MAP} +EOF +} + +function setup_vswitch() +{ + local MODE=$1 + local PCI_LIST="" + local PCI_UUIDS="" + + if [ "x${MODE}" == "xlayer3" ]; then + log "Setting up vswitch layer3 routing" + else + log "Setting up vswitch layer2 bridging on ${BRIDGE_PORTS}" + fi + + ## Source the vswitch functions to determine the VSWITCH_PCI_DEVICES list for later. + source /etc/vswitch/vswitch_functions.sh + + ## Auto detect CPU, NUMA, and PCI devices + NUMA_COUNT=$(numa_count) + CPU_COUNT=$(cpu_count) + PCI_LIST=($(get_vswitch_pci_devices ${NIC_DEVICE:-$NIC_DEVICE_DEFAULT} \ + ${NIC_COUNT:-$NIC_COUNT_DEFAULT})) + PCI_UUIDS=($(get_vswitch_pci_uuids ${#PCI_LIST[@]})) + + setup_vswitch_ini_file ${NUMA_COUNT} ${CPU_COUNT} PCI_LIST[@] PCI_UUIDS[@] + setup_vswitch_config_file ${CPU_COUNT} PCI_LIST[@] + + if [ "x${MODE}" == "xlayer3" ]; then + setup_vswitch_layer3_commands_file ADDRESSES[@] ROUTES[@] PCI_UUIDS[@] + else + setup_vswitch_layer2_commands_file ${BRIDGE_PORTS} ${BRIDGE_MTU} PCI_UUIDS[@] + fi + + ## Reload the wrs-avp driver to activate the options change + reload_wrs_avp_driver + RET=$? + if [ ${RET} -ne 0 ]; then + log "Failed to re-enable AVP driver, ret=${RET}" + return ${RET} + fi + + ## Start services + /etc/init.d/dpdk restart + RET=$? + if [ ${RET} -ne 0 ]; then + log "Failed to start dpdk, ret=${RET}" + return ${RET} + fi + + /etc/init.d/vswitch restart + RET=$? + if [ ${RET} -ne 0 ]; then + log "Failed to start vswitch, ret=${RET}" + return ${RET} + fi + + return 0 +} + +################################################################################ +# Start Action +################################################################################ +function start() +{ + log "waiting for wrs-guest-setup to finish" + # must wait for wrs-guest-setup to avoid race conditions + while ! systemctl show -p SubState wrs-guest-setup | grep 'exited\|dead'; do + sleep 1 + done + + log "stopping dpdk and vswitch" + /etc/init.d/dpdk stop + /etc/init.d/vswitch stop + + log "loading configurations" + source /etc/init.d/tis_automation_init.config + + FUNCTIONS=(${FUNCTIONS//,/ }) + for FUNCTION in ${FUNCTIONS[@]}; do + case ${FUNCTION} in + "vswitch") + setup_vswitch "layer2" + RET=$? + ;; + "avr") + setup_vswitch "layer3" + RET=$? + ;; + "routing") + setup_kernel_routing + RET=$? + ;; + *) + log "Unknown function '${FUNCTION}'; ignoring" + RET=0 + esac + if [ ${RET} -ne 0 ]; then + log "Failed to setup function '${FUNCTION}'; stopping" + return ${RET} + fi + done + return 0 +} + +################################################################################ +# Stop Action +################################################################################ +function stop() +{ + return 0 +} + +################################################################################ +# Status Action +################################################################################ +function status() +{ + return 0 +} + +################################################################################ +# Main Entry +################################################################################ + +case "$1" in + start) + start + ;; + stop) + stop + ;; + restart) + stop + start + ;; + status) + status + ;; + *) + echo $"Usage: $0 {start|stop|restart|status}" + exit 1 +esac + +exit 0 diff --git a/automated-pytest-suite/utils/horizon/__init__.py b/automated-pytest-suite/utils/horizon/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/automated-pytest-suite/utils/horizon/basewebobject.py b/automated-pytest-suite/utils/horizon/basewebobject.py new file mode 100644 index 0000000..c0b20d0 --- /dev/null +++ b/automated-pytest-suite/utils/horizon/basewebobject.py @@ -0,0 +1,169 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import contextlib + +import selenium.common.exceptions as Exceptions +from selenium.webdriver.common import by +from selenium.webdriver.remote import webelement +import selenium.webdriver.support.ui as Support +from selenium.webdriver.support import wait + + +class BaseWebObject: + """Base class for all web objects.""" + _spinner_locator = (by.By.CSS_SELECTOR, '.modal-body > .loader') + + def __init__(self, driver): + self.driver = driver + self.explicit_wait = 400 # unlock a host usually need more than 300 seconds + + def _is_element_present(self, *locator): + with self.waits_disabled(): + try: + self._get_element(*locator) + return True + except Exceptions.NoSuchElementException: + return False + + def _is_element_visible(self, *locator): + try: + return self._get_element(*locator).is_displayed() + except (Exceptions.NoSuchElementException, + Exceptions.ElementNotVisibleException): + return False + + def _is_element_displayed(self, element): + if element is None: + return False + try: + if isinstance(element, webelement.WebElement): + return element.is_displayed() + else: + return element.src_elem.is_displayed() + except (Exceptions.ElementNotVisibleException, + Exceptions.StaleElementReferenceException): + return False + + def _is_text_visible(self, element, text, strict=True): + if not self._is_element_displayed(element): + return False + # if not hasattr(element, 'text'): + # return False + if strict: + if not self._is_element_displayed(element): + return False + try: + return element.text == text + except Exceptions.StaleElementReferenceException: + return False + else: + return text in element.text + + def _get_element(self, *locator): + return self.driver.find_element(*locator) + + def _get_elements(self, *locator): + return self.driver.find_elements(*locator) + + def _fill_field_element(self, data, field_element): + field_element.clear() + field_element.send_keys(data) + return field_element + + def _select_dropdown(self, value, element): + select = Support.Select(element) + select.select_by_visible_text(value) + + def _select_dropdown_by_value(self, value, element): + select = Support.Select(element) + select.select_by_value(value) + + def _get_dropdown_options(self, element): + select = Support.Select(element) + return select.options + + def _turn_off_implicit_wait(self): + self.driver.implicitly_wait(0) + + def _turn_on_implicit_wait(self): + self.driver.implicitly_wait(10) + + def _wait_until(self, predicate, timeout=None, poll_frequency=0.001): + """Wait until the value returned by predicate is not False. + + It also returns when the timeout is elapsed. + 'predicate' takes the driver as argument. + """ + if not timeout: + timeout = self.explicit_wait + return wait.WebDriverWait(self.driver, timeout, poll_frequency).until( + predicate) + + def _wait_till_text_present_in_element(self, element, texts, timeout=None): + """Waiting for a text to appear in a certain element. + + Most frequent usage is actually to wait for a _different_ element + with a different text to appear in place of an old element. + So a way to avoid capturing stale element reference should be provided + for this use case. + + Better to wrap getting entity status cell in a lambda + to avoid problems with cell being replaced with totally different + element by Javascript + """ + if not isinstance(texts, (list, tuple)): + texts = (texts,) + + def predicate(_): + try: + elt = element() if hasattr(element, '__call__') else element + except: + return False + for text in texts: # + if self._is_text_visible(elt, 'Error'): # + s = 'error' # + raise s # + if self._is_text_visible(elt, text): + return text + return False + + return self._wait_until(predicate, timeout) + + def _wait_till_element_visible(self, locator, timeout=None): + self._wait_until(lambda x: self._is_element_visible(*locator), timeout) + + def _wait_till_element_disappears(self, element, timeout=None): + self._wait_until(lambda x: not self._is_element_displayed(element), + timeout) + + @contextlib.contextmanager + def waits_disabled(self): + try: + self._turn_off_implicit_wait() + yield + finally: + self._turn_on_implicit_wait() + + def wait_till_element_disappears(self, element_getter): + with self.waits_disabled(): + try: + self._wait_till_element_disappears(element_getter()) + except Exceptions.NoSuchElementException: + # NOTE(mpavlase): This is valid state. When request completes + # even before Selenium get a chance to get the spinner element, + # it will raise the NoSuchElementException exception. + pass + + def wait_till_spinner_disappears(self): + getter = lambda: self.driver.find_element(*self._spinner_locator) + self.wait_till_element_disappears(getter) diff --git a/automated-pytest-suite/utils/horizon/helper.py b/automated-pytest-suite/utils/horizon/helper.py new file mode 100644 index 0000000..f2e07a7 --- /dev/null +++ b/automated-pytest-suite/utils/horizon/helper.py @@ -0,0 +1,93 @@ +import contextlib +import tempfile +import os +import time + +from selenium import webdriver + +try: + from pyvirtualdisplay import Display +except ImportError: + Display = None + +from consts.proj_vars import ProjVar +from utils.tis_log import LOG + + +@contextlib.contextmanager +def gen_temporary_file(name='', suffix='.qcow2', size=10485760): + """Generate temporary file with provided parameters. + + :param name: file name except the extension /suffix + :param suffix: file extension/suffix + :param size: size of the file to create, bytes are generated randomly + :return: path to the generated file + """ + with tempfile.NamedTemporaryFile(prefix=name, suffix=suffix) as tmp_file: + tmp_file.write(os.urandom(size)) + yield tmp_file.name + + +def gen_resource_name(resource="", timestamp=True): + """Generate random resource name using uuid and timestamp. + + Input fields are usually limited to 255 or 80 characters hence their + provide enough space for quite long resource names, but it might be + the case that maximum field length is quite restricted, it is then + necessary to consider using shorter resource argument or avoid using + timestamp by setting timestamp argument to False. + """ + fields = ['test'] + if resource: + fields.append(resource) + if timestamp: + tstamp = time.strftime("%d-%m-%H-%M-%S") + fields.append(tstamp) + return "_".join(fields) + + +class HorizonDriver: + driver_info = [] + + @classmethod + def get_driver(cls): + if cls.driver_info: + return cls.driver_info[0][0] + + LOG.info("Setting Firefox download preferences") + profile = webdriver.FirefoxProfile() + # Change default download directory to automation logs dir + # 2 - download to custom folder + horizon_dir = ProjVar.get_var('LOG_DIR') + '/horizon' + os.makedirs(horizon_dir, exist_ok=True) + profile.set_preference("browser.download.folderList", 2) + profile.set_preference("browser.download.manager.showWhenStarting", + False) + profile.set_preference("browser.download.dir", horizon_dir) + profile.set_preference("browser.helperApps.neverAsk.saveToDisk", + "text/plain,application/x-shellscript") + # profile.update_preferences() + display = None + if Display is not None: + display = Display(visible=ProjVar.get_var('HORIZON_VISIBLE'), + size=(1920, 1080)) + display.start() + + driver_ = webdriver.Firefox(firefox_profile=profile) + # driver_.maximize_window() + cls.driver_info.append((driver_, display)) + LOG.info("Web driver created with download preference set") + return driver_ + + @classmethod + def quit_driver(cls, *driver_display): + if cls.driver_info: + driver_, display_ = cls.driver_info[0] + driver_.quit() + if display_: + display_.stop() + cls.driver_info = [] + profile = webdriver.FirefoxProfile() + profile.set_preference("browser.download.folderList", 1) + LOG.info( + "Quit web driver and reset Firefox download folder to default") diff --git a/automated-pytest-suite/utils/horizon/pages/__init__.py b/automated-pytest-suite/utils/horizon/pages/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/automated-pytest-suite/utils/horizon/pages/admin/__init__.py b/automated-pytest-suite/utils/horizon/pages/admin/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/automated-pytest-suite/utils/horizon/pages/admin/compute/__init__.py b/automated-pytest-suite/utils/horizon/pages/admin/compute/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/automated-pytest-suite/utils/horizon/pages/admin/compute/flavorspage.py b/automated-pytest-suite/utils/horizon/pages/admin/compute/flavorspage.py new file mode 100644 index 0000000..6b9f603 --- /dev/null +++ b/automated-pytest-suite/utils/horizon/pages/admin/compute/flavorspage.py @@ -0,0 +1,174 @@ +from utils.horizon.pages import basepage +from utils.horizon.regions import forms +from utils.horizon.regions import menus +from utils.horizon.regions import tables +from time import sleep + + +class FlavorsTable(tables.TableRegion): + name = "flavors" + + CREATE_FLAVOR_FORM_FIELDS = (("name", "flavor_id", "vcpus", "memory_mb", + "disk_gb", "eph_gb", + "swap_mb", + "rxtx_factor"), + {"members": menus.MembershipMenuRegion}) + + EDIT_FLAVOR_FORM_FIELDS = (("name", "vcpus", "memory_mb", + "disk_gb", "eph_gb", "swap_mb", + "rxtx_factor"), + {"members": menus.MembershipMenuRegion}) + UPDATE_METADATA_FORM_FIELD = "customItem" + + @tables.bind_table_action('create') + def create_flavor(self, create_button): + create_button.click() + self.wait_till_spinner_disappears() + return forms.TabbedFormRegion( + self.driver, + field_mappings=self.CREATE_FLAVOR_FORM_FIELDS + ) + + @tables.bind_row_action('update') + def edit_flavor(self, edit_button, row): + edit_button.click() + self.wait_till_spinner_disappears() + return forms.TabbedFormRegion( + self.driver, + field_mappings=self.EDIT_FLAVOR_FORM_FIELDS + ) + + @tables.bind_row_action('update_metadata') + def update_metadata(self, metadata_button, row): + metadata_button.click() + return forms.MetadataFormRegion(self.driver) + + @tables.bind_row_action('projects') + def modify_access(self, modify_button, row): + modify_button.click() + self.wait_till_spinner_disappears() + return forms.TabbedFormRegion( + self.driver, + field_mappings=self.EDIT_FLAVOR_FORM_FIELDS, + default_tab=1 + ) + + @tables.bind_row_action('delete') + def delete_flavor_by_row(self, delete_button, row): + delete_button.click() + sleep(0.5) + return forms.BaseFormRegion(self.driver) + + @tables.bind_table_action('delete') + def delete_flavor(self, delete_button): + delete_button.click() + sleep(0.5) + return forms.BaseFormRegion(self.driver) + + +class FlavorsPage(basepage.BasePage): + PARTIAL_URL = 'admin/flavors' + + FLAVOR_INFORMATION_TAB_INDEX = 0 + FLAVOR_ACCESS_TAB_INDEX = 1 + FLAVORS_TABLE_NAME_COLUMN = 'Flavor Name' + + @property + def flavors_table(self): + return FlavorsTable(self.driver) + + def _get_row_by_flavor_name(self, name): + return self.flavors_table.get_row(self.FLAVORS_TABLE_NAME_COLUMN, name) + + def create_flavor(self, name, flavor_id=None, vcpus=1, ram=1024, + root_disk=20, ephemeral_disk=None, + swap_disk=None, rxtx_factor=None, + allocate_projects=None): + create_flavor_form = self.flavors_table.create_flavor() + create_flavor_form.name.text = name + if flavor_id is not None: + create_flavor_form.flavor_id.text = flavor_id + if vcpus is not None: + create_flavor_form.vcpus.value = vcpus + if ram is not None: + create_flavor_form.memory_mb.value = ram + if root_disk is not None: + create_flavor_form.disk_gb.value = root_disk + if ephemeral_disk is not None: + create_flavor_form.eph_gb.value = ephemeral_disk + if swap_disk is not None: + create_flavor_form.swap_mb.value = swap_disk + if rxtx_factor is not None: + create_flavor_form.rxtx_factor = rxtx_factor + create_flavor_form.switch_to(self.FLAVOR_ACCESS_TAB_INDEX) + if allocate_projects is not None: + for project in allocate_projects: + create_flavor_form.members.allocate_member(project) + create_flavor_form.submit() + + def is_flavor_present(self, name): + return bool(self._get_row_by_flavor_name(name)) + + def get_flavor_info(self, name, header): + row = self._get_row_by_flavor_name(name) + return row.cells[header].text + + def edit_flavor(self, name, newname=None, flavor_id=None, vcpus=None, ram=None, + root_disk=None, ephemeral_disk=None, + swap_disk=None, rxtx_factor=None, + allocate_projects=None, deallocate_projects=None): + row = self._get_row_by_flavor_name(name) + edit_flavor_form = self.flavors_table.edit_flavor(row) + if newname is not None: + edit_flavor_form.name.text = newname + if flavor_id is not None: + edit_flavor_form.flavor_id.text = flavor_id + if vcpus is not None: + edit_flavor_form.vcpus.value = vcpus + if ram is not None: + edit_flavor_form.memory_mb.value = ram + if root_disk is not None: + edit_flavor_form.disk_gb.value = root_disk + if ephemeral_disk is not None: + edit_flavor_form.eph_gb.value = ephemeral_disk + if swap_disk is not None: + edit_flavor_form.swap_mb.value = swap_disk + if rxtx_factor is not None: + edit_flavor_form.rxtx_factor = rxtx_factor + edit_flavor_form.switch_to(self.FLAVOR_ACCESS_TAB_INDEX) + if allocate_projects is not None: + for project in allocate_projects: + edit_flavor_form.members.allocate_member(project) + if deallocate_projects is not None: + for project in deallocate_projects: + edit_flavor_form.members.deallocate_member(project) + edit_flavor_form.submit() + + def modify_access(self, name, allocate_projects=None, deallocate_projects=None): + row = self._get_row_by_flavor_name(name) + edit_flavor_form = self.flavors_table.modify_access(row) + if allocate_projects is not None: + for project in allocate_projects: + edit_flavor_form.members.allocate_member(project) + if deallocate_projects is not None: + for project in deallocate_projects: + edit_flavor_form.members.deallocate_member(project) + edit_flavor_form.submit() + + def add_custom_metadata(self, name, metadata): + row = self._get_row_by_flavor_name(name) + update_metadata_form = self.flavors_table.update_metadata(row) + for field_name, value in metadata.items(): + update_metadata_form.add_custom_field(field_name, value) + update_metadata_form.submit() + + def delete_flavor_by_row(self, name): + row = self._get_row_by_flavor_name(name) + confirm_delete_form = self.flavors_table.delete_flavor_by_row(row) + confirm_delete_form.submit() + + def delete_flavor(self, name): + row = self._get_row_by_flavor_name(name) + row.mark() + confirm_delete_form = self.flavors_table.delete_flavor() + confirm_delete_form.submit() diff --git a/automated-pytest-suite/utils/horizon/pages/admin/compute/hostaggregatespage.py b/automated-pytest-suite/utils/horizon/pages/admin/compute/hostaggregatespage.py new file mode 100644 index 0000000..6209360 --- /dev/null +++ b/automated-pytest-suite/utils/horizon/pages/admin/compute/hostaggregatespage.py @@ -0,0 +1,93 @@ +from utils.horizon.pages import basepage +from utils.horizon.regions import forms +from utils.horizon.regions import tables +from utils.horizon.regions import menus +from utils.horizon import helper + + +class HostAggregatesTable(tables.TableRegion): + name = "host_aggregates" + + CREATE_HOST_AGGREGATE_FORM_FIELDS = (("name", "availability_zone"), + {"members": menus.MembershipMenuRegion}) + MANAGE_HOSTS_FORM_FIELDS = ({"members": menus.MembershipMenuRegion}) + + @tables.bind_table_action('create') + def create_host_aggregate(self, create_button): + create_button.click() + self.wait_till_spinner_disappears() + return forms.TabbedFormRegion(self.driver, + field_mappings=self.CREATE_HOST_AGGREGATE_FORM_FIELDS) + + @tables.bind_table_action('delete') + def delete_host_aggregate(self, delete_button): + delete_button.click() + return forms.BaseFormRegion(self.driver) + + @tables.bind_row_action('delete') + def delete_host_aggregate_by_row(self, delete_button, row): + delete_button.click() + return forms.BaseFormRegion(self.driver) + + @tables.bind_row_action('manage') + def manage_hosts(self, manage_button, row): + manage_button.click() + return forms.FormRegion(self.driver, field_mappings=self.MANAGE_HOSTS_FORM_FIELDS) + + +class AvilabilityZoneTable(tables.TableRegion): + name = "availability_zones" + pass + + +class HostaggregatesPage(basepage.BasePage): + + PARTIAL_URL = 'admin/aggregates' + HOST_AGGREGATES_TABLE_NAME_COLUMN = 'Name' + AVAILABILITY_ZONES_TABLE_NAME_COLUMN = 'Availability Zone Name' + + @property + def host_aggregates_table(self): + return HostAggregatesTable(self.driver) + + @property + def availability_zons_table(self): + return AvilabilityZoneTable(self.driver) + + def _get_host_aggregate_row_by_name(self, name): + return self.host_aggregates_table.get_row( + self.HOST_AGGREGATES_TABLE_NAME_COLUMN, name) + + def _get_availability_zone_row_by_name(self, name): + return self.availability_zons_table.get_row( + self.AVAILABILITY_ZONES_TABLE_NAME_COLUMN, name) + + def create_host_aggregate(self, name=None, availability_zone=None): + create_host_aggregate_form = self.host_aggregates_table.create_host_aggregate() + if name is None: + name = helper.gen_resource_name('aggregate') + create_host_aggregate_form.name.text = name + if availability_zone is not None: + create_host_aggregate_form.availability_zone.text = availability_zone + create_host_aggregate_form.submit() + return name + + def delete_host_aggregate(self, name): + row = self._get_host_aggregate_row_by_name(name) + row.mark() + confirmation_form = self.host_aggregates_table.delete_host_aggregate() + confirmation_form.submit() + + def is_host_aggregate_present(self, name): + return bool(self._get_host_aggregate_row_by_name(name)) + + def is_availability_zones_present(self, name): + return bool(self._get_availability_zone_row_by_name(name)) + + def get_host_aggregate_info(self, name, header): + row = self._get_host_aggregate_row_by_name(name) + return row.cells[header].text + + def get_availability_zone_info(self, name, header): + row = self._get_availability_zone_row_by_name(name) + return row.cells[header].text diff --git a/automated-pytest-suite/utils/horizon/pages/admin/compute/hypervisorspage.py b/automated-pytest-suite/utils/horizon/pages/admin/compute/hypervisorspage.py new file mode 100644 index 0000000..3ce92db --- /dev/null +++ b/automated-pytest-suite/utils/horizon/pages/admin/compute/hypervisorspage.py @@ -0,0 +1,81 @@ +from utils.horizon.pages import basepage +from utils.horizon.regions import forms +from utils.horizon.regions import tables + + +class HypervisorTable(tables.TableRegion): + name = "hypervisors" + pass + + +class ComputeHostTable(tables.TableRegion): + name = "compute_host" + + DISABLE_SERVICE_FORM_FIELDS = ('host', 'reason') + + @tables.bind_row_action('disable') + def disable_service(self, disable_button, row): + disable_button.click() + self.wait_till_spinner_disappears() + return forms.FormRegion(self.driver, + field_mappings=self.DISABLE_SERVICE_FORM_FIELDS) + + @tables.bind_row_action('enable') + def enable_service(self, enable_button, row): + enable_button.click() + self.wait_till_spinner_disappears() + + +class HypervisorsPage(basepage.BasePage): + + PARTIAL_URL = 'admin/hypervisors' + + HYPERVISOR_TAB_INDEX = 0 + COMPUTEHOST_TAB_INDEX = 1 + HYPERVISOR_TABLE_NAME_COLUMN = 'Hostname' + COMPUTE_HOST_TABLE_NAME_COLUMN = 'Host' + + @property + def hypervisor_table(self): + return HypervisorTable(self.driver) + + @property + def compute_host_table(self): + return ComputeHostTable(self.driver) + + def _get_row_with_hypervisor_name(self, name): + return self.hypervisor_table.get_row(self.HYPERVISOR_TABLE_NAME_COLUMN, name) + + def _get_row_with_compute_host_name(self, name): + return self.compute_host_table.get_row(self.COMPUTE_HOST_TABLE_NAME_COLUMN, name) + + def is_hypervisor_present(self, name): + return bool(self._get_row_with_hypervisor_name(name)) + + def get_hypervisor_info(self, name, header): + row = self._get_row_with_hypervisor_name(name) + return row.cells[header].text + + def is_compute_host_present(self, name): + return bool(self._get_row_with_compute_host_name(name)) + + def get_compute_host_info(self, name, header): + row = self._get_row_with_compute_host_name(name) + return row.cells[header].text + + def disable_service(self, name, reason=None): + row = self._get_row_with_compute_host_name(name) + disable_service_form = self.compute_host_table.disable_service(row) + if reason is not None: + disable_service_form.reason.text = reason + disable_service_form.submit() + + def enable_service(self, name, reason=None): + row = self._get_row_with_compute_host_name(name) + self.compute_host_table.enable_service(row) + + def go_to_hypervisor_tab(self): + self.go_to_tab(self.HYPERVISOR_TAB_INDEX) + + def go_to_compute_host_tab(self): + self.go_to_tab(self.COMPUTEHOST_TAB_INDEX) diff --git a/automated-pytest-suite/utils/horizon/pages/admin/compute/imagespage.py b/automated-pytest-suite/utils/horizon/pages/admin/compute/imagespage.py new file mode 100644 index 0000000..4e94564 --- /dev/null +++ b/automated-pytest-suite/utils/horizon/pages/admin/compute/imagespage.py @@ -0,0 +1,6 @@ +from utils.horizon.pages.project.compute import imagespage + + +class ImagesPage(imagespage.ImagesPage): + PARTIAL_URL = 'admin/images' + pass diff --git a/automated-pytest-suite/utils/horizon/pages/admin/compute/instancespage.py b/automated-pytest-suite/utils/horizon/pages/admin/compute/instancespage.py new file mode 100644 index 0000000..f244108 --- /dev/null +++ b/automated-pytest-suite/utils/horizon/pages/admin/compute/instancespage.py @@ -0,0 +1,6 @@ +from utils.horizon.pages.project.compute import instancespage + + +class InstancesPage(instancespage.InstancesPage): + PARTIAL_URL = 'admin/instances' + pass diff --git a/automated-pytest-suite/utils/horizon/pages/admin/compute/servergroupspage.py b/automated-pytest-suite/utils/horizon/pages/admin/compute/servergroupspage.py new file mode 100644 index 0000000..003c698 --- /dev/null +++ b/automated-pytest-suite/utils/horizon/pages/admin/compute/servergroupspage.py @@ -0,0 +1,63 @@ +from utils.horizon.pages import basepage +from utils.horizon.regions import forms +from utils.horizon.regions import tables + + +class ServerGroupsTable(tables.TableRegion): + + name = "server_groups" + + CREATE_SERVER_GROUP_FORM_FIELDS = ("tenantP", "name", "policy", "is_best_effort", "group_size") + + @tables.bind_table_action('create') + def create_group(self, create_button): + create_button.click() + self.wait_till_spinner_disappears() + return forms.FormRegion(self.driver, field_mappings=self.CREATE_SERVER_GROUP_FORM_FIELDS) + + @tables.bind_table_action('delete') + def delete_group(self, delete_button): + delete_button.click() + self.wait_till_spinner_disappears() + return forms.BaseFormRegion(self.driver) + + +class ServerGroupsPage(basepage.BasePage): + + PARTIAL_URL = 'admin/server_groups' + + SERVER_GROUPS_TABLE_NAME_COLUMN = 'Group Name' + + @property + def server_groups_table(self): + return ServerGroupsTable(self.driver) + + def create_group(self, name, project=None, policy=None, is_best_effort=False, group_size=None): + create_form = self.server_groups_table.create_group() + create_form.name.text = name + if project is not None: + create_form.tenantP.text = project + create_form.tenantP.text = project + if policy is not None: + create_form.policy.text = policy + if is_best_effort: + create_form.is_best_effort.mark() + if group_size is not None: + create_form.group_size.text = group_size + create_form.submit() + + def _get_row_with_server_group_name(self, name): + return self.server_groups_table.get_row(self.SERVER_GROUPS_TABLE_NAME_COLUMN, name) + + def delete_group(self, name): + row = self._get_row_with_server_group_name(name) + row.mark() + confirm_delete_form = self.server_groups_table.delete_group() + confirm_delete_form.submit() + + def is_server_group_present(self, name): + return bool(self._get_row_with_server_group_name(name)) + + def get_server_group_info(self, server_group_name, header): + row = self._get_row_with_server_group_name(server_group_name) + return row.cells[header].text diff --git a/automated-pytest-suite/utils/horizon/pages/admin/fault_management/__init__.py b/automated-pytest-suite/utils/horizon/pages/admin/fault_management/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/automated-pytest-suite/utils/horizon/pages/admin/fault_management/activealarmspage.py b/automated-pytest-suite/utils/horizon/pages/admin/fault_management/activealarmspage.py new file mode 100644 index 0000000..f6efa70 --- /dev/null +++ b/automated-pytest-suite/utils/horizon/pages/admin/fault_management/activealarmspage.py @@ -0,0 +1,24 @@ +from utils.horizon.pages import basepage +from utils.horizon.regions import tables +from utils.horizon.regions import forms + + +class AlarmsTable(tables.TableRegion): + name = "alarms" + pass + + +class ActiveAlarmsPage(basepage.BasePage): + + PARTIAL_URL = 'admin/active_alarms' + ACTIVE_ALARMS_TABLE_NAME_COLUMN = 'Timestamp' + + @property + def alarms_table(self): + return AlarmsTable(self.driver) + + def _get_row_with_alarm_timestamp(self, timestamp): + return self.alarms_table.get_row(self.ACTIVE_ALARMS_TAB, timestamp) + + def is_active_alarm_present(self, timestamp): + return bool(self._get_row_with_alarm_timestamp(timestamp)) diff --git a/automated-pytest-suite/utils/horizon/pages/admin/fault_management/eventspage.py b/automated-pytest-suite/utils/horizon/pages/admin/fault_management/eventspage.py new file mode 100644 index 0000000..a277e8e --- /dev/null +++ b/automated-pytest-suite/utils/horizon/pages/admin/fault_management/eventspage.py @@ -0,0 +1,24 @@ +from utils.horizon.pages import basepage +from utils.horizon.regions import tables +from utils.horizon.regions import forms + + +class EventLogsTable(tables.TableRegion): + name = "eventlogs" + pass + + +class EventLogsPage(basepage.BasePage): + + PARTIAL_URL = 'admin/events' + EVENT_LOGS_TABLE_NAME_COLUMN = 'Timestamp' + + @property + def event_logs_table(self): + return EventLogsTable(self.driver) + + def _get_row_with_event_log_timestamp(self, timestamp): + return self.event_logs_table.get_row(self.EVENT_LOGS_TABLE_NAME_COLUMN, timestamp) + + def is_event_log_present(self, timestamp): + return bool(self._get_row_with_event_log_timestamp(timestamp)) diff --git a/automated-pytest-suite/utils/horizon/pages/admin/fault_management/eventssuppressionpage.py b/automated-pytest-suite/utils/horizon/pages/admin/fault_management/eventssuppressionpage.py new file mode 100644 index 0000000..0c387a5 --- /dev/null +++ b/automated-pytest-suite/utils/horizon/pages/admin/fault_management/eventssuppressionpage.py @@ -0,0 +1,49 @@ +from utils.horizon.pages import basepage +from utils.horizon.regions import forms +from utils.horizon.regions import tables +from selenium.webdriver.common import by + + +class EventsSuppressionTable(tables.TableRegion): + name = "OS::StarlingX::EventsSuppression" + + @tables.bind_row_action('danger', attribute_search='class') + def suppress_event(self, suppress_button, row): + suppress_button.click() + self.wait_till_spinner_disappears() + return forms.BaseFormRegion(self.driver) + + @tables.bind_row_action('danger', attribute_search='class') + def unsuppress_event(self, unsuppress_button, row): + unsuppress_button.click() + self.wait_till_spinner_disappears() + return forms.BaseFormRegion(self.driver) + + def _table_locator(self, table_name): + return by.By.CSS_SELECTOR, \ + 'hz-resource-table[resource-type-name="%s"]' % table_name + + +class EventsSuppressionPage(basepage.BasePage): + + PARTIAL_URL = 'admin/events_suppression' + EVENTS_SUPPRESSION_TABLE_NAME_COLUMN = 'Event ID' + # EVENTS_SUPPRESSION_TAB = 2 + + @property + def events_suppression_table(self): + return EventsSuppressionTable(self.driver) + + def _get_row_with_event_id(self, event_id): + return self.events_suppression_table.get_row( + self.EVENTS_SUPPRESSION_TABLE_NAME_COLUMN, event_id) + + def suppress_event(self, event_id): + row = self._get_row_with_event_id(event_id) + confirm_form = self.events_suppression_table.suppress_event(row) + confirm_form.submit() + + def unsuppress_event(self, event_id): + row = self._get_row_with_event_id(event_id) + confirm_form = self.events_suppression_table.unsuppress_event(row) + confirm_form.submit() diff --git a/automated-pytest-suite/utils/horizon/pages/admin/network/__init__.py b/automated-pytest-suite/utils/horizon/pages/admin/network/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/automated-pytest-suite/utils/horizon/pages/admin/network/floatingipspage.py b/automated-pytest-suite/utils/horizon/pages/admin/network/floatingipspage.py new file mode 100644 index 0000000..b536911 --- /dev/null +++ b/automated-pytest-suite/utils/horizon/pages/admin/network/floatingipspage.py @@ -0,0 +1,25 @@ +import re + +from utils.horizon.pages.project.network import floatingipspage + + +class FloatingipsPage(floatingipspage.FloatingipsPage): + + PARTIAL_URL = 'admin/floating_ips' + + def allocate_floatingip(self, pool=None, tenant=None, floating_ip_address=None): + floatingip_form = self.floatingips_table.allocate_ip() + if pool is not None: + floatingip_form.pool.text = pool + if tenant is not None: + floatingip_form.tenant.text = tenant + if floating_ip_address is not None: + floatingip_form.floating_ip_address.text = floating_ip_address + floatingip_form.submit() + ip = re.compile('(([2][5][0-5]\.)|([2][0-4][0-9]\.)' + + '|([0-1]?[0-9]?[0-9]\.)){3}(([2][5][0-5])|' + '([2][0-4][0-9])|([0-1]?[0-9]?[0-9]))') + match = ip.search((self._get_element( + *self._floatingips_fadein_popup_locator)).text) + floatingip = str(match.group()) + return floatingip \ No newline at end of file diff --git a/automated-pytest-suite/utils/horizon/pages/admin/network/networkspage.py b/automated-pytest-suite/utils/horizon/pages/admin/network/networkspage.py new file mode 100644 index 0000000..4576013 --- /dev/null +++ b/automated-pytest-suite/utils/horizon/pages/admin/network/networkspage.py @@ -0,0 +1,173 @@ +from utils.horizon.pages import basepage +from utils.horizon.regions import forms +from utils.horizon.regions import tables + + +class NetworksTable(tables.TableRegion): + name = "networks" + + CREATE_NETWORK_FORM_FIELDS = (("name", "tenant_id", "network_type", + "physical_network", "segmentation_id", + "admin_state", "shared", "external", + "with_subnet"), + ("subnet_name", "cidr", "ip_version", + "gateway_ip", "no_gateway"), + ("enable_dhcp", "allocation_pools", + "dns_nameservers", "host_routes")) + + EDIT_NETWORK_FORM_FIELDS = ("name", "admin_state", "shared", "external", + "qos", "vlan_transparent", "providernet_type", + "providernet", "segmentation_id") + + @tables.bind_table_action('create') + def create_network(self, create_button): + create_button.click() + self.wait_till_spinner_disappears() + return forms.TabbedFormRegion(self.driver, field_mappings=self.CREATE_NETWORK_FORM_FIELDS) + + @tables.bind_table_action('delete') + def delete_network(self, delete_button): + delete_button.click() + return forms.BaseFormRegion(self.driver) + + @tables.bind_row_action("delete") + def delete_network_by_row(self, delete_button, row): + delete_button.click() + return forms.BaseFormRegion(self.driver) + + @tables.bind_row_action("update") + def edit_network(self, edit_button, row): + edit_button.click() + self.wait_till_spinner_disappears() + return forms.FormRegion(self.driver, field_mappings=self.EDIT_NETWORK_FORM_FIELDS) + + +class NetworksPage(basepage.BasePage): + PARTIAL_URL = 'admin/networks' + + NETWORKS_TABLE_NAME_COLUMN = 'Network Name' + + def _get_row_with_network_name(self, name): + return self.networks_table.get_row( + self.NETWORKS_TABLE_NAME_COLUMN, name) + + @property + def networks_table(self): + return NetworksTable(self.driver) + + def create_network(self, network_name, project, provider_network_type, + physical_network, segmentation_id=None, enable_admin_state=None, + qos=None, shared=None, external=None, vlan_transparent=None, + create_subnet=None, subnet_name=None, network_address=None, + ip_version=None, gateway_ip=None, disable_gateway=None, + enable_dhcp=None, allocation_pools=None, dns_name_servers=None, + host_routes=None): + + create_network_form = self.networks_table.create_network() + create_network_form.name.text = network_name + create_network_form.tenant_id.text = project + create_network_form.network_type.value = provider_network_type + if provider_network_type == 'vlan': + create_network_form.physical_network.text = physical_network + if segmentation_id is not None: + create_network_form.segmentation_id.text = segmentation_id + if provider_network_type == 'vxlan': + create_network_form.physical_network.value = physical_network + if provider_network_type == 'flat': + create_network_form.physical_network.value = physical_network + if enable_admin_state is True: + create_network_form.admin_state.mark() + if enable_admin_state is False: + create_network_form.admin_state.unmark() + if qos is not None: + create_network_form.qos.value = qos + if shared is True: + create_network_form.shared.mark() + if shared is False: + create_network_form.shared.unmark() + if external is True: + create_network_form.external.mark() + if external is False: + create_network_form.external.unmark() + if vlan_transparent is True: + create_network_form.vlan_transparent.mark() + if vlan_transparent is False: + create_network_form.vlan_transparent.unmark() + if create_subnet is False: + create_network_form.with_subnet.unmark() + create_network_form.switch_to(2) + else: + create_network_form.with_subnet.mark() + create_network_form.switch_to(1) + create_network_form.subnet_name.text = subnet_name + create_network_form.cidr.text = network_address + if ip_version is not None: + create_network_form.ip_version.value = ip_version + if gateway_ip is not None: + create_network_form.gateway_ip.text = gateway_ip + if disable_gateway is True: + create_network_form.disable_gateway.mark() + if disable_gateway is False: + create_network_form.disable_gateway.unmark() + create_network_form.switch_to(2) + if enable_dhcp is False: + create_network_form.enable_dhcp.unmark() + if enable_dhcp is True: + create_network_form.enable_dhcp.mark() + if allocation_pools is not None: + create_network_form.allocation_pools.text = allocation_pools + if dns_name_servers is not None: + create_network_form.dns_nameservers.text = dns_name_servers + if host_routes is not None: + create_network_form.host_routes.text = host_routes + create_network_form.submit() + + def edit_network(self, name, new_name=None, enable_admin_state=None, is_shared=None, + is_external_network=None, qos_policy=None, vlan_transparent=None): + row = self._get_row_with_network_name(name) + edit_network_form = self.networks_table.edit_network(row) + if new_name is not None: + edit_network_form.name.text = new_name + if enable_admin_state is True: + edit_network_form.admin_state.mark() + if enable_admin_state is False: + edit_network_form.admin_state.unmark() + if is_shared is True: + edit_network_form.shared.mark() + if is_shared is False: + edit_network_form.shared.unmark() + if is_external_network is True: + edit_network_form.external.mark() + if is_external_network is False: + edit_network_form.external.unmark() + if qos_policy is not None: + edit_network_form.qos.text = qos_policy + if vlan_transparent is True: + edit_network_form.vlan_transparent.mark() + if vlan_transparent is False: + edit_network_form.vlan_transparent.unmark() + edit_network_form.submit() + + def delete_network(self, name): + row = self._get_row_with_network_name(name) + row.mark() + confirm_delete_networks_form = self.networks_table.delete_network() + confirm_delete_networks_form.submit() + + def delete_network_by_row(self, name): + row = self._get_row_with_network_name(name) + confirm_delete_networks_form = self.networks_table.delete_network_by_row(row) + confirm_delete_networks_form.submit() + + def is_network_present(self, name): + return bool(self._get_row_with_network_name(name)) + + def get_network_info(self, network_name, header): + row = self._get_row_with_network_name(network_name) + return row.cells[header].text + + def go_to_networks_tab(self): + self.go_to_tab(0) + + def go_to_qos_policies_tab(self): + self.go_to_tab(1) diff --git a/automated-pytest-suite/utils/horizon/pages/admin/network/routerspage.py b/automated-pytest-suite/utils/horizon/pages/admin/network/routerspage.py new file mode 100644 index 0000000..474a886 --- /dev/null +++ b/automated-pytest-suite/utils/horizon/pages/admin/network/routerspage.py @@ -0,0 +1,6 @@ +from utils.horizon.pages.project.network import routerspage + + +class RoutersPage(routerspage.RoutersPage): + PARTIAL_URL = 'admin/routers' + pass diff --git a/automated-pytest-suite/utils/horizon/pages/admin/overviewpage.py b/automated-pytest-suite/utils/horizon/pages/admin/overviewpage.py new file mode 100644 index 0000000..a168349 --- /dev/null +++ b/automated-pytest-suite/utils/horizon/pages/admin/overviewpage.py @@ -0,0 +1,7 @@ +from utils.horizon.pages import basepage + + +class OverviewPage(basepage.BasePage): + def __init__(self, driver): + super(OverviewPage, self).__init__(driver) + self._page_title = "Usage Overview" diff --git a/automated-pytest-suite/utils/horizon/pages/admin/platform/__init__.py b/automated-pytest-suite/utils/horizon/pages/admin/platform/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/automated-pytest-suite/utils/horizon/pages/admin/platform/hostinventorypage.py b/automated-pytest-suite/utils/horizon/pages/admin/platform/hostinventorypage.py new file mode 100644 index 0000000..9fa18dc --- /dev/null +++ b/automated-pytest-suite/utils/horizon/pages/admin/platform/hostinventorypage.py @@ -0,0 +1,472 @@ +import re +import copy + +from selenium.webdriver.common import by + +from utils.horizon.pages import basepage +from utils.horizon.regions import forms, tables +from utils.horizon.helper import HorizonDriver +from keywords import system_helper + + +class HostsTable(tables.TableRegion): + _cli_horizon_fields_map = { + 'hostname': 'Host Name', + 'personality': 'Personality', + 'administrative': 'Admin State', + 'operational': 'Operational State', + 'availability': 'Availability State', + 'uptime': 'Uptime', + 'task': 'Status' + } + + def get_cli_horizon_mapping(self): + return self._cli_horizon_fields_map + + @tables.bind_row_action('update') + def edit_host(self, edit_button, row): + edit_button.click() + self.wait_till_spinner_disappears() + return forms.TabbedFormRegion(self.driver, + field_mappings=self.EDIT_HOST_FORM_FIELDS) + + @tables.bind_row_action('lock') + def lock_host(self, lock_button, row): + lock_button.click() + return forms.BaseFormRegion(self.driver) + + @tables.bind_row_action('unlock') + def unlock_host(self, unlock_button, row): + unlock_button.click() + + @tables.bind_row_anchor_column('Host Name') + def go_to_host_detail_page(self, row_link, row): + row_link.click() + + @tables.bind_row_anchor_column('Host Name') + def host_detail_overview_description(self): + self.go_to_host_detail_page() + return HostDetailOverviewDescription(self.driver) + + +class ControllerHostsTable(HostsTable): + name = 'hostscontroller' + EDIT_HOST_FORM_FIELDS = ( + ("personality", "subfunctions", "hostname", "location", "cpuProfile", + "interfaceProfile", "diskProfile", "memoryProfile", "ttys_dcd"), + ("boot_device", "rootfs_device", "install_output", "console"), + ("bm_type", "bm_ip", "bm_username", "bm_password", "bm_confirm_password")) + + @tables.bind_row_action('swact') + def swact_host(self, swact_button, row): + swact_button.click() + return forms.BaseFormRegion(self.driver) + + @tables.bind_table_action('create') + def add_host(self, add_button): + add_button.click() + self.wait_till_spinner_disappears() + return forms.TabbedFormRegion(self.driver, + field_mappings=self.ADD_HOST_FORM_FIELDS) + + +class StorageHostsTable(HostsTable): + name = 'hostsstorage' + + def get_cli_horizon_mapping(self): + map_ = copy.deepcopy(self._cli_horizon_fields_map) + map_.pop('personality') + return map_ + + @tables.bind_table_action('install-async') + def install_paches(self, install_button): + install_button.click() + + +class ComputeHostsTable(HostsTable): + name = 'hostsworker' + + EDIT_HOST_FORM_FIELDS = ( + ("personality", "location", "cpuProfile", "interfaceProfile", "ttys_dcd"), + ("boot_device", "rootfs_device", "install_output", "console"), + ("bm_type", "bm_ip", "bm_username", "bm_password", "bm_confirm_password")) + + @tables.bind_table_action('install-async') + def install_paches(self, install_button): + install_button.click() + + +class HostInventoryPage(basepage.BasePage): + + PARTIAL_URL = 'admin' + + HOSTS_TABLE_NAME_COLUMN = 'Host Name' + HOSTS_TABLE_ADMIN_STATE_COLUMN = 'Admin State' + HOSTS_TABLE_AVAILABILITY_STATE_COLUMN = 'Availability State' + HOSTS_TAB_INDEX = 0 + DEVICE_USAGE_TAB_INDEX = 5 + + def _get_row_with_host_name(self, name): + return self.hosts_table(name).get_row( + self.HOSTS_TABLE_NAME_COLUMN, name) + + def hosts_table(self, name): + if 'controller' in name: + return ControllerHostsTable(self.driver) + elif 'storage' in name: + return StorageHostsTable(self.driver) + elif re.search('compute|worker', name): + return ComputeHostsTable(self.driver) + + def edit_host(self, name): + row = self._get_row_with_host_name(name) + host_edit_form = self.hosts_table(name).edit_host(row) + # ... + host_edit_form.submit() + + def lock_host(self, name): + row = self._get_row_with_host_name(name) + confirm_form = self.hosts_table(name).lock_host(row) + confirm_form.submit() + + def unlock_host(self, name): + row = self._get_row_with_host_name(name) + self.hosts_table(name).unlock_host(row) + + def is_host_present(self, name): + return bool(self._get_row_with_host_name(name)) + + def is_host_admin_state(self, name, state): + def cell_getter(): + row = self._get_row_with_host_name(name) + return row and row.cells[self.HOSTS_TABLE_ADMIN_STATE_COLUMN] + + return bool(self.hosts_table(name).wait_cell_status(cell_getter, state)) + + def is_host_availability_state(self, name, state): + def cell_getter(): + row = self._get_row_with_host_name(name) + return row and row.cells[self.HOSTS_TABLE_AVAILABILITY_STATE_COLUMN] + + return bool(self.hosts_table(name).wait_cell_status(cell_getter, state)) + + def get_host_info(self, host_name, header): + row = self._get_row_with_host_name(host_name) + return row.cells[header].text + + def go_to_hosts_tab(self): + self.go_to_tab(self.HOSTS_TAB_INDEX) + + def go_to_host_detail_page(self, host_name): + row = self._get_row_with_host_name(host_name) + self.hosts_table(host_name).go_to_host_detail_page(row) + return HostInventoryDetailPage(HorizonDriver.get_driver(), host_name) + + def go_to_device_usage_tab(self): + if system_helper.is_aio_simplex(): + self.go_to_tab(1) + else: + self.go_to_tab(self.DEVICE_USAGE_TAB_INDEX) + + def horizon_vals(self, host_name): + horizon_headers = self.hosts_table(host_name).\ + get_cli_horizon_mapping().values() + horizon_vals = {} + for horizon_header in horizon_headers: + horizon_val = self.get_host_info(host_name, horizon_header) + if horizon_val == '' or horizon_val == '-': + horizon_val = 'None' + horizon_vals[horizon_header] = horizon_val + return horizon_vals + + +class HostDetailOverviewDescription(forms.ItemTextDescription): + _separator_locator = (by.By.CSS_SELECTOR, 'dl.dl-horizontal-wide') + OVERVIEW_INFO_HEADERS_MAP = { + 'hostname': 'Host Name', + 'personality': 'Personality', + 'uuid': 'Host UUID', + 'id': 'Host ID', + 'mgmt_mac': 'Management MAC', + 'mgmt_ip': 'Management IP', + 'serialid': 'Serial ID', + 'administrative': 'Administrative State', + 'operational': 'Operational State', + 'availability': 'Availability State', + 'boot_device': 'Boot Device', + 'rootfs_device': 'Rootfs Device', + 'install_output': 'Installation Output', + 'console': 'Console', + 'bm_ip': 'Board Management Controller IP Address', + 'bm_username': 'Board Management Controller User Name' + } + + +class HostDetailProcessorDescription(forms.ItemTextDescription): + name = 'inventory_details__cpufunctions' + _separator_locator = (by.By.CSS_SELECTOR, 'div#cpufunctions') + + +class MemoryTable(tables.TableRegion): + name = "memorys" + MEMORY_TABLE_HEADERS_MAP = { + 'processor': 'Processor', + 'mem_total(MiB)': 'Memory', + 'mem_avail(MiB)': 'Memory', + + } + CREATE_MEMORY_PROFILE_FORM_FIELDS = 'profilename' + + @tables.bind_table_action('createMemoryProfile') + def create_memory_profile(self, create_button): + create_button.click() + self.wait_till_spinner_disappears() + return forms.TabbedFormRegion( + self.driver, field_mappings=self.CREATE_MEMORY_PROFILE_FORM_FIELDS) + + +class StorageDisksTable(tables.TableRegion): + name = "disks" + HEADERS_MAP = { + # 'Model' not found from cli table + 'uuid': 'UUID', + 'device_path': 'Disk info', + 'device_type': 'Type', + 'size_gib': 'Size (GiB)', + 'available_gib': 'Available Size (GiB)', + 'rpm': 'RPM', + 'serial_id': 'Serial ID' + } + + +class StoragePartitionsTable(tables.TableRegion): + name = "partitions" + HEADERS_MAP = { + 'uuid': 'UUID', + 'device_path': 'Partition Device Path', + 'size_gib': 'Size (GiB)', + 'type_name': 'Partition Type', + 'status': 'Status' + } + CREATE_PARTITION_FORM_FIELDS = ("hostname", "disks", "size_gib", "type_guid") + + @tables.bind_table_action('createpartition') + def create_new_partition(self, create_button): + create_button.click() + self.wait_till_spinner_disappears() + return forms.TabbedFormRegion( + self.driver, field_mappings=self.CREATE_PARTITION_FORM_FIELDS) + + +class StorageLocalVolumeGroupTable(tables.TableRegion): + name = "localvolumegroups" + HEADERS_MAP = { + 'LVG Name': 'Name', + 'State': 'State', + 'Access': 'Access', + 'Total Size (GiB)': 'Size (GiB)', + 'Avail Size (GiB)': 'Avail Size (GiB)', + 'Current PVs': 'Current Physical Volumes', + 'Current LVs': 'Current Logical Volumes' + } + + CREATE_STORAGE_PROFILE_FORM_FIELDS = "profilename" + + @tables.bind_table_action('creatediskprofile') + def create_storage_profie(self, create_button): + create_button.click() + self.wait_till_spinner_disappears() + return forms.TabbedFormRegion( + self.driver, field_mappings=self.CREATE_STORAGE_PROFILE_FORM_FIELDS) + + @tables.bind_table_action('addlocalvolumegroup') + def add_lvg(self, add_button): + add_button.click() + self.wait_till_spinner_disappears() + return forms.TabbedFormRegion(self.driver) + + +class StoragePhysicalVolumeTable(tables.TableRegion): + name = "physicalvolumes" + HEADERS_MAP = { + 'lvm_pv_name': 'Name', + 'pv_state': 'State', + 'pv_type': 'Type', + 'disk_or_part_uuid': 'Disk or Partition UUID', + 'disk_or_part_device_path': 'Disk or Partition Device Path', + 'lvm_vg_name': 'LVM Volume Group Name' + } + + ADD_PHYSICL_VOLUME_FORM_FIELDS = ("hostname", 'lvg', 'pv_type', 'disks') + + @tables.bind_table_action('addphysicakvolume') + def add_physical_volume(self, add_button): + add_button.click() + self.wait_till_spinner_disappears() + return forms.TabbedFormRegion( + self.driver, field_mappings=self.ADD_PHYSICL_VOLUME_FORM_FIELDS) + + +class PortsTable(tables.TableRegion): + name = "ports" + HEADERS_MAP = { + # Accelerate not in the cli table + 'name': 'Name', + 'mac address': 'MAC Address', + 'pci address': 'PCI Address', + 'processor': 'Processor', + 'auto neg': 'Auto Negotiation', + 'boot i/f': 'Boot Interface', + 'device type': 'Device Type', + } + + +class InterfaceTable(tables.TableRegion): + name = "interfaces" + + CREATE_INTERFACE_PROFILE_FORM_FIELDS = "profilename" + + @tables.bind_table_action('createprofile') + def create_inferface_profile(self, create_button): + create_button.click() + self.wait_till_spinner_disappears() + return forms.TabbedFormRegion( + self.driver, field_mappings=self.CREATE_INTERFACE_PROFILE_FORM_FIELDS) + + +class LLDPTable(tables.TableRegion): + name = "neighbours" + HEADERS_MAP = { + 'local_port': 'Name', + 'port_identifier': 'Neighbor', + 'port_description': 'Port Description', + # 'ttl': 'Time To Live (Rx)', This one is dynamic, horizon exists delay + 'system_name': 'System Name', + 'dot3_max_frame': 'Max Frame Size' + } + + +class HostInventoryDetailPage(basepage.BasePage): + OVERVIEW_TAB_INDEX = 0 + PROCESSOR_TAB_INDEX = 1 + MEMORY_TAB_INDEX = 2 + SOTRAGE_TAB_INDEX = 3 + PORTS_TAB_INDEX = 4 + INTEFACES_TAB_INDEX = 5 + LLDP_TAB_INDEX = 6 + SENSORS_TAB_INDEX = 7 + DEVICES_TAB_INDEX = 8 + MEMORYTABLE_PROCESSOR_COL = 'Processor' + + def __init__(self, driver, host_name): + super(HostInventoryDetailPage, self).__init__(driver) + self._page_title = 'Host Detail: {}'.format(host_name) + + def _get_memory_table_row_with_processor(self, processor): + return self.memory_table.get_row(self.MEMORYTABLE_PROCESSOR_COL, processor) + + def get_memory_table_info(self, processor, header): + row = self._get_memory_table_row_with_processor(processor) + if row.cells[header].text == '': + return None + else: + return row.cells[header].text + + def get_horizon_row_dict(self, table_, key_header_index): + """ + In a table, each row as a dict, horizon headers as key, cells as value + Args: + table_ (table): table object + key_header_index(int): The unique column header index of table + Return: + A row dict table, the unique column value as a key, usually are + uuid or name + Examples: + { + '53194b0f-543c-4b33-9b2e-276ab9c70671': + {'uuid': 53194b0f-543c-4b33-9b2e-276ab9c70671, 'type': 'SSD'...} + 'ea89031c-8f60-41b0-ad14-c5fcf5df96eb': + {'uuid': ea89031c-8f60-41b0-ad14-c5fcf5df96eb, 'type': 'SSD'...} + } + """ + rtn_dict = {} + keys = table_.HEADERS_MAP.values() + for row in table_.rows: + row_dict = {} + key_header = row.cells[table_.column_names[key_header_index]].text + for key in keys: + val = row.cells[key].text + if val == '' or val == '-': + val = 'None' + row_dict[key] = val + rtn_dict[key_header] = row_dict + return rtn_dict + + def get_storage_partitons_table_rows(self): + return self.storage_partitions_table.rows + + def get_storage_lvg_table_rows(self): + return self.storage_lvg_table.rows + + def get_storage_pv_table_rows(self): + return self.storage_pv_table.rows + + def host_detail_overview(self, driver): + return HostDetailOverviewDescription(driver) + + @property + def inventory_details_processor_info(self): + return HostDetailProcessorDescription(self.driver) + + @property + def memory_table(self): + return MemoryTable(self.driver) + + @property + def storage_disks_table(self): + return StorageDisksTable(self.driver) + + @property + def storage_partitions_table(self): + return StoragePartitionsTable(self.driver) + + @property + def storage_lvg_table(self): + return StorageLocalVolumeGroupTable(self.driver) + + @property + def storage_pv_table(self): + return StoragePhysicalVolumeTable(self.driver) + + def ports_table(self): + return PortsTable(self.driver) + + def lldp_table(self): + return LLDPTable(self.driver) + + def go_to_overview_tab(self): + self.go_to_tab(self.OVERVIEW_TAB_INDEX) + + def go_to_processor_tab(self): + self.go_to_tab(self.PROCESSOR_TAB_INDEX) + + def go_to_memory_tab(self): + self.go_to_tab(self.MEMORY_TAB_INDEX) + + def go_to_storage_tab(self): + self.go_to_tab(self.SOTRAGE_TAB_INDEX) + + def go_to_ports_tab(self): + self.go_to_tab(self.PORTS_TAB_INDEX) + + def go_to_interfaces_tab(self): + self.go_to_tab(self.SOTRAGE_TAB_INDEX) + + def go_to_lldp_tab(self): + self.go_to_tab(self.LLDP_TAB_INDEX) + + def go_to_sensors_tab(self): + self.go_to_tab(self.SENSORS_TAB_INDEX) + + def go_to_devices_tab(self): + self.go_to_tab(self.DEVICES_TAB_INDEX) diff --git a/automated-pytest-suite/utils/horizon/pages/admin/platform/providernetworkoverviewpage.py b/automated-pytest-suite/utils/horizon/pages/admin/platform/providernetworkoverviewpage.py new file mode 100644 index 0000000..60972c8 --- /dev/null +++ b/automated-pytest-suite/utils/horizon/pages/admin/platform/providernetworkoverviewpage.py @@ -0,0 +1,13 @@ +from selenium.webdriver.common import by +from utils.horizon.regions import forms +from utils.horizon.pages import basepage + + +class ProvidernetOverviewPage(basepage.BasePage): + + def __init__(self, driver, pnet_name): + super(ProvidernetOverviewPage, self).__init__(driver) + self._page_title = 'Provider Network Detail: {}'.format(pnet_name) + + def pnet_overview_info_dict(self): + return forms.ItemTextDescription(self.driver).get_content() diff --git a/automated-pytest-suite/utils/horizon/pages/admin/platform/providernetworks.py b/automated-pytest-suite/utils/horizon/pages/admin/platform/providernetworks.py new file mode 100644 index 0000000..d0b89b5 --- /dev/null +++ b/automated-pytest-suite/utils/horizon/pages/admin/platform/providernetworks.py @@ -0,0 +1,123 @@ +from utils.horizon.pages import basepage +from utils.horizon.regions import forms +from utils.horizon.regions import tables + + +class ProviderNetworksTable(tables.TableRegion): + name = "provider_networks" + + CREATE_PROVIDER_NETWORK_FORM_FIELDS = ("name", "description", "type", "mtu", "vlan_transparent") + EDIT_PROVIDER_NETWORK_FORM_FIELDS = ("name", "type", "description", "mtu", "vlan_transparent") + CREATE_SEGMENTATION_RANGE_FORM_FIELDS = ("name", "description", "shared", "tenant_id", + "minimum", "maximum", "mode", "group", "id_port_0", + "id_port_1", "id_port_2", "ttl") + + @tables.bind_table_action('create') + def create_provider_network(self, create_button): + create_button.click() + self.wait_till_spinner_disappears() + return forms.FormRegion(self.driver, self.CREATE_PROVIDER_NETWORK_FORM_FIELDS) + + @tables.bind_table_action('delete') + def delete_provider_network(self, delete_button): + delete_button.click() + return forms.BaseFormRegion(self.driver) + + @tables.bind_row_action('update') + def edit_provider_network(self, edit_button, row): + edit_button.click() + self.wait_till_spinner_disappears() + return forms.FormRegion(self.driver, self.EDIT_PROVIDER_NETWORK_FORM_FIELDS) + + @tables.bind_row_action('addrange') + def create_segmentation_range(self, create_button, row): + create_button.click() + self.wait_till_spinner_disappears() + return forms.FormRegion(self.driver, self.CREATE_SEGMENTATION_RANGE_FORM_FIELDS) + + +class ProviderNetworksPage(basepage.BasePage): + PARTIAL_URL = 'admin/providernets' + + PROVIDER_NETWORKS_TABLE_NAME_COLUMN = "Network Name" + + def _get_row_with_provier_network_name(self, name): + return self.provider_networks_table.get_row( + self.PROVIDER_NETWORKS_TABLE_NAME_COLUMN, name) + + @property + def provider_networks_table(self): + return ProviderNetworksTable(self.driver) + + def create_provider_network(self, name, description=None, type=None, + mtu=None, is_vlan_transparent=None): + + create_provier_network_form = self.provider_networks_table.create_network() + create_provier_network_form.name.text = name + if description is not None: + create_provier_network_form.description.text = description + if type is not None: + create_provier_network_form.type.text = type + if mtu is not None: + create_provier_network_form.mtu.value = mtu + if is_vlan_transparent: + create_provier_network_form.vlan_transparent.mark() + create_provier_network_form.submit() + + def edit_provider_network(self, description=None, mtu=None, is_vlan_transparent=None): + edit_provider_network_form = self.provider_networks_table.edit_provider_network() + if description is not None: + edit_provider_network_form.description.text = description + if mtu is not None: + edit_provider_network_form.mtu.value = mtu + if is_vlan_transparent is True: + edit_provider_network_form.vlan_transparent.mark() + if is_vlan_transparent is False: + edit_provider_network_form.vlan_transparent.unmark() + edit_provider_network_form.submit() + + def create_segmentation_range(self, name=None, description=None, + is_shared=None, project=None, + minimum=None, maximum=None, mode=None, + multicase_group_address=None, + port_0=None, port_1=None, + port_2=None, ttl=None): + create_segmentation_range_form = self.provider_networks_table.create_segmentation_range() + if name is not None: + create_segmentation_range_form.name.text = name + if description is not None: + create_segmentation_range_form.description.text = name + if is_shared: + create_segmentation_range_form.shared.mark() + if project is not None: + create_segmentation_range_form.tenant_id.text = project + if minimum is not None: + create_segmentation_range_form.minimum.value = minimum + if maximum is not None: + create_segmentation_range_form.maximum.value = maximum + if mode is not None: + create_segmentation_range_form.mode.text = mode + if multicase_group_address is not None: + create_segmentation_range_form.group.value = multicase_group_address + if port_0: + create_segmentation_range_form.id_port_0.mark() + if port_1: + create_segmentation_range_form.id_port_0.mark() + if port_2: + create_segmentation_range_form.id_port_0.mark() + if ttl is not None: + create_segmentation_range_form.ttl.value = ttl + create_segmentation_range_form.submit() + + def delete_provider_network(self, name): + row = self._get_row_with_provier_network_name(name) + row.mark() + confirm_form = self.provider_networks_table.delete_provider_network() + confirm_form.submit() + + def is_provider_network_present(self, name): + return bool(self._get_row_with_provier_network_name(name)) + + def get_provider_network_info(self, name, header): + row = self._get_row_with_provier_network_name(name) + return row.cells[header].text diff --git a/automated-pytest-suite/utils/horizon/pages/admin/platform/providernetworkstopology.py b/automated-pytest-suite/utils/horizon/pages/admin/platform/providernetworkstopology.py new file mode 100644 index 0000000..217ee2a --- /dev/null +++ b/automated-pytest-suite/utils/horizon/pages/admin/platform/providernetworkstopology.py @@ -0,0 +1,87 @@ +from selenium.webdriver.common import by +from utils.horizon.pages import basepage +from utils.horizon.helper import HorizonDriver +from utils.horizon.pages.admin.platform.providernetworkoverviewpage import ProvidernetOverviewPage +from utils.horizon.regions import baseregion, tables, forms +from utils import exceptions + + +class ContainerRegion(baseregion.BaseRegion): + name = None + _element_locator = (by.By.CSS_SELECTOR, 'a') + _text_fluid_locator = (by.By.CSS_SELECTOR, 'detail_view') + + def _container_locator(self, container_name): + return by.By.CSS_SELECTOR, 'div#%s' % container_name + + def __init__(self, driver, src_element=None): + if not src_element: + self._default_src_locator = self._container_locator(self.__class__.name) + super(ContainerRegion, self).__init__(driver) + else: + super(ContainerRegion, self).__init__(driver, src_elem=src_element) + + def select_element_by_name(self, name): + for element in self._get_elements(*self._element_locator): + if name == element.text: + element.click() + return + else: + raise exceptions.HorizonError('{} not found'.format(name)) + + +# class PnetDetail(forms.ItemTextDescription): +# name = None +# _detail_view_locator = (by.By.CSS_SELECTOR, 'div#detail_view') +# _row_fluid_locator = (by.By.CSS_SELECTOR, 'dl > dt') +# def _container_locator(self, container_name): +# return by.By.CSS_SELECTOR, 'div#%s' % container_name +# +# def __init__(self, driver, src_element=None): +# if not src_element: +# self._default_src_locator = self._detail_view_locator +# super(PnetDetail, self).__init__(driver) +# else: +# super(PnetDetail, self).__init__(driver, src_elem=src_element) +# +# def get_detail_view(self): +# return self._get_element(*self._detail_view_locator) + + +class ProviderNetworkList(ContainerRegion): + name = "network_list" + + +class HostList(ContainerRegion): + name = "host_list" + + +class ProviderNetworkDetail(forms.ItemTextDescription): + _separator_locator = (by.By.CSS_SELECTOR, 'div#info detail') + + +class ProviderNetworkTopologyPage(basepage.BasePage): + + PARTIAL_URL = 'admin/host_topology' + # SERVICES_TAB_INDEX = 0 + # USAGE_TAB_INDEX = 1) + + @property + def host_list(self): + return HostList(self.driver) + + @property + def providernet_list(self): + return ProviderNetworkList(self.driver) + + def providernet_detail(self): + return ProviderNetworkDetail(self.driver).get_content() + + def go_to_topology_tab(self): + self.go_to_tab(0) + + def go_to_pnet_overview(self, name): + link = self._get_element(by.By.LINK_TEXT, name) + link.click() + + return ProvidernetOverviewPage(HorizonDriver.get_driver(), name) diff --git a/automated-pytest-suite/utils/horizon/pages/admin/platform/softwaremanagementpage.py b/automated-pytest-suite/utils/horizon/pages/admin/platform/softwaremanagementpage.py new file mode 100644 index 0000000..9a82225 --- /dev/null +++ b/automated-pytest-suite/utils/horizon/pages/admin/platform/softwaremanagementpage.py @@ -0,0 +1,100 @@ +from utils.horizon.pages import basepage +from utils.horizon.regions import tables +from utils.horizon.regions import forms + + +class PatchesTable(tables.TableRegion): + name = "patches" + + UPLOAD_PATCHES_FORM_FIELDS = ("patch_files",) + + @tables.bind_table_action('patchupload') + def upload_patches(self, upload_button): + upload_button.click() + self.wait_till_spinner_disappears() + return forms.FormRegion(self.driver, field_mappings=self.form_fields) + + +class PatchStagesTable(tables.TableRegion): + name = "patchstages" + + CREATE_STRATEGY_FORM_FIELDS = ("controller_apply_type", "compute_apply_type", + "default_instance_action", "alarm_restrictions") + + @tables.bind_table_action('createpatchstrategy') + def create_strategy(self, create_button): + create_button.click() + self.wait_till_spinner_disappears() + return forms.FormRegion(self.driver, field_mappings=self.CREATE_STRATEGY_FORM_FIELDS) + + @tables.bind_table_action('delete_patch_strategy') + def delete_strategy(self, delete_button): + delete_button.click() + self.wait_till_spinner_disappears() + return forms.BaseFormRegion(self.driver) + + +class UpgradeStagesTable(PatchStagesTable): + name = "upgradestages" + + CREATE_STRATEGY_FORM_FIELDS = ("compute_apply_type", "alarm_restrictions") + + +class SoftwareManagementPage(basepage.BasePage): + + PARTIAL_URL = 'admin/software_management' + PATCHES_TAB_INDEX = 0 + PATCH_ORCHESTRATION_TAB_INDEX = 1 + UPGRADE_ORCHESTRATION_TAB_INDEX = 2 + + @property + def patches_table(self): + return PatchesTable(self.driver) + + @property + def patch_stages_table(self): + return PatchStagesTable(self.driver) + + @property + def upgrade_stages_table(self): + return UpgradeStagesTable(self.driver) + + def create_patch_strategy(self, controller_apply_type=None, + compute_apply_type=None, + default_instance_action=None, + alarm_restrictions=None): + create_strategy_form = self.patch_stages_table.create_strategy() + if controller_apply_type is not None: + create_strategy_form.controller_apply_type.text = controller_apply_type + if compute_apply_type is not None: + create_strategy_form.compute_apply_type.text = compute_apply_type + if default_instance_action is not None: + create_strategy_form.default_instance_action.text = default_instance_action + if alarm_restrictions is not None: + create_strategy_form.alarm_restrictions.text = alarm_restrictions + create_strategy_form.submit() + + def delete_patch_strategy(self): + confirm_form = self.patch_stages_table.delete_strategy() + confirm_form.submit() + + def create_upgrade_strategy(self, compute_apply_type=None, alarm_restrictions=None): + create_strategy_form = self.upgrade_stages_table.create_strategy() + if compute_apply_type is not None: + create_strategy_form.compute_apply_type.text = compute_apply_type + if alarm_restrictions is not None: + create_strategy_form.alarm_restrictions.text = alarm_restrictions + create_strategy_form.submit() + + def delete_upgrade_strategy(self): + confirm_form = self.upgrade_stages_table.delete_strategy() + confirm_form.submit() + + def go_to_patches_tab(self): + self.go_to_tab(self.PATCHES_TAB_INDEX) + + def go_to_patch_orchestration_tab(self): + self.go_to_tab(self.PATCH_ORCHESTRATION_TAB_INDEX) + + def go_to_upgrade_orchestration_tab(self): + self.go_to_tab(self.UPGRADE_ORCHESTRATION_TAB_INDEX) diff --git a/automated-pytest-suite/utils/horizon/pages/admin/platform/storageoverviewpage.py b/automated-pytest-suite/utils/horizon/pages/admin/platform/storageoverviewpage.py new file mode 100644 index 0000000..169fe2e --- /dev/null +++ b/automated-pytest-suite/utils/horizon/pages/admin/platform/storageoverviewpage.py @@ -0,0 +1,83 @@ +from selenium.webdriver.common import by +from utils.horizon.pages import basepage +from utils.horizon.regions import tables, forms + + +class StorageSeviceDescription(forms.ItemTextDescription): + _separator_locator = (by.By.CSS_SELECTOR, 'div#storageservice') + +class MonitorsTable(tables.TableRegion): + name = "monitors" + + +class OsdsTable(tables.TableRegion): + name = "osds" + + +class UsageTable(tables.TableRegion): + name = "usage" + + +class StorageOverviewPage(basepage.BasePage): + PARTIAL_URL = 'admin/storage_overview' + SERVICES_TAB_INDEX = 0 + SERVICES_MONITOR_HOST_COL = 'Host' + SERVICES_OSD_NAME_COL = 'Name' + USAGE_TAB_INDEX = 1 + USAGE_BACKEND_TYPE_COL = 'Backend type' + USAGE_BACKEND_SERVICE_NAME_COLUMN = 'Service name' + + usage_headers_map = { + 'Backend type': 'backend type', + 'Backend name': 'backend name', + 'Service name': 'service', + 'Total Capacity (GiB)': 'total capacity (GiB)', + 'Free Capacity (GiB)': 'free capacity (GiB)' + } + + @property + def storage_service_info(self): + return StorageSeviceDescription(self.driver) + + @property + def monitors_table(self): + return MonitorsTable(self.driver) + + @property + def osds_table(self): + return OsdsTable(self.driver) + + @property + def usage_table(self): + return UsageTable(self.driver) + + def _get_row_with_service_name(self, name): + return self.usage_table.get_row(self.USAGE_BACKEND_SERVICE_NAME_COLUMN, name) + + def get_rows_from_usage_table(self): + return self.usage_table.rows + + def _get_monitor_table_row_with_host(self, host_name): + return self.monitors_table.get_row(self.SERVICES_MONITOR_HOST_COL, host_name) + + def _get_osd_table_row_with_osd_name(self, osd_name): + return self.osds_table.get_row(self. SERVICES_OSD_NAME_COL, osd_name) + + def get_storage_overview_monitor_info(self, host_name, header): + row = self._get_monitor_table_row_with_host(host_name) + return row.cells[header].text + + def get_storage_overview_osd_info(self, name, header): + row = self._get_osd_table_row_with_osd_name(name) + return row.cells[header].text + + def get_storage_overview_usage_info(self, name, header): + row = self._get_row_with_service_name(name) + return row.cells[header].text + + def go_to_services_tab(self): + self.go_to_tab(self.SERVICES_TAB_INDEX) + + def go_to_usage_tab(self): + self.go_to_tab(self.USAGE_TAB_INDEX) + diff --git a/automated-pytest-suite/utils/horizon/pages/admin/platform/systemconfigurationpage.py b/automated-pytest-suite/utils/horizon/pages/admin/platform/systemconfigurationpage.py new file mode 100644 index 0000000..9ec06ee --- /dev/null +++ b/automated-pytest-suite/utils/horizon/pages/admin/platform/systemconfigurationpage.py @@ -0,0 +1,494 @@ +from utils.horizon.pages import basepage +from utils.horizon.regions import forms, tables, messages +from utils import exceptions +from utils.tis_log import LOG + + +class SystemsTable(tables.TableRegion): + name = "systems" + + EDIT_SYSTEM_FORM_FIELDS = ("name", "description") + SYSTEMS_MAP = { + 'name': 'Name', + 'system_type': 'System Type', + 'system_mode': 'System Mode', + 'description': 'Description', + 'software_version': 'Version' + } + + @tables.bind_row_action('update') + def edit_system(self, edit_button, row): + edit_button.click() + self.wait_till_spinner_disappears() + return forms.FormRegion(self.driver, field_mappings=self.EDIT_SYSTEM_FORM_FIELDS) + + +class AddressPoolsTable(tables.TableRegion): + name = "address_pools" + + ADDRESS_POOL_FORM_FIELDS = ("name", "network", "order", "ranges") + + @tables.bind_table_action('create') + def create_address_pool(self, create_button): + create_button.click() + self.wait_till_spinner_disappears() + return forms.FormRegion(self.driver, field_mappings=self.ADDRESS_POOL_FORM_FIELDS) + + @tables.bind_table_action('delete') + def delete_address_pool(self, delete_button): + delete_button.click() + self.wait_till_spinner_disappears() + return forms.BaseFormRegion(self.driver) + + @tables.bind_row_action('update') + def update_address_pool(self, update_button, row): + update_button.click() + self.wait_till_spinner_disappears() + return forms.FormRegion(self.driver, field_mappings=self.ADDRESS_POOL_FORM_FIELDS) + + +class DNSTable(tables.TableRegion): + name = "cdns_table" + + EDIT_DNS_FORM_FIELDS = ("NAMESERVER_1", "NAMESERVER_2", "NAMESERVER_3") + PTP_MAP = { + 'enabled': 'PTP Enabled', + 'mode': 'PTP Time Stamping Mode', + 'transport': 'PTP Network Transport', + 'mechanism': 'PTP Delay Mechanism' + } + + @tables.bind_table_action('update_cdns') + def edit_dns(self, edit_button): + edit_button.click() + self.wait_till_spinner_disappears() + return forms.FormRegion(self.driver, field_mappings=self.EDIT_DNS_FORM_FIELDS) + + +class NTPTable(tables.TableRegion): + name = "cntp_table" + + EDIT_NTP_FORM_FIELDS = ("NTP_SERVER_1", "NTP_SERVER_2", "NTP_SERVER_3") + + @tables.bind_table_action('update_cntp') + def edit_ntp(self, edit_button): + edit_button.click() + self.wait_till_spinner_disappears() + return forms.FormRegion(self.driver, field_mappings=self.EDIT_NTP_FORM_FIELDS) + + +class PTPTable(tables.TableRegion): + name = "cptp_table" + + EDIT_PTP_FORM_FIELDS = ("mode", "transport", "mechanism") + PTP_MAP = { + 'enabled': 'PTP Enabled', + 'mode': 'PTP Time Stamping Mode', + 'transport': 'PTP Network Transport', + 'mechanism': 'PTP Delay Mechanism', + } + + @tables.bind_table_action('update_cptp') + def edit_ptp(self, edit_button): + edit_button.click() + self.wait_till_spinner_disappears() + return forms.FormRegion(self.driver, field_mappings=self.EDIT_PTP_FORM_FIELDS) + + +class OAMTable(tables.TableRegion): + name = "coam_table" + + EDIT_OAM_FORM_FIELDS = ("EXTERNAL_OAM_SUBNET", "EXTERNAL_OAM_GATEWAY_ADDRESS", + "EXTERNAL_OAM_FLOATING_ADDRESS", "EXTERNAL_OAM_0_ADDRESS", + "EXTERNAL_OAM_1_ADDRESS") + SIMPLEX_OAM_MAP = { + 'oam_subnet': 'OAM Subnet', + 'oam_ip': 'OAM IP', + 'oam_gateway_ip': 'OAM Gateway IP' + } + OAM_MAP = { + 'oam_subnet': 'OAM Subnet', + 'oam_floating_ip': 'OAM Floating IP', + 'oam_gateway_ip': 'OAM Gateway IP', + 'oam_c0_ip': 'OAM controller-0 IP', + 'oam_c1_ip': 'OAM controller-1 IP' + } + + @tables.bind_table_action('update_coam') + def edit_oam(self, edit_button): + edit_button.click() + self.wait_till_spinner_disappears() + return forms.FormRegion(self.driver, field_mappings=self.EDIT_OAM_FORM_FIELDS) + + +class ControlerfsTable(tables.TableRegion): + name = "storage_table" + + EDIT_FILESYSTEM_FORM_FIELDS = ("database", "glance", "backup", "scratch", + "extension", "img_converstions", "ceph_mon") + CONTROLERFS_MAP = { + 'name': 'Storage Name', + 'size': 'Size (GiB)' + } + + @tables.bind_table_action('update_storage') + def edit_filesystem(self, edit_button): + edit_button.click() + self.wait_till_spinner_disappears() + return forms.FormRegion(self.driver, field_mappings=self.EDIT_FILESYSTEM_FORM_FIELDS) + + +class CephStoragePoolsTable(tables.TableRegion): + name = "storage_pools_table" + + EDIT_POOL_QUOTAS_FIELDS = ("cinder_pool_gib", "glance_pool_gib", + "ephemeral_pool_gib", "object_pool_gib") + CEPH_STORAGE_POOLS_MAP = { + 'tier_name': 'Ceph Storage Tier', + 'cinder_pool_gib': 'Cinder Volume Storage (GiB)', + 'glance_pool_gib': 'Glance Image Storage (GiB)', + 'ephemeral_pool_gib': 'Nova Ephemeral Disk Storage (GiB)', + 'object_pool_gib': 'Object Storage (GiB)', + 'ceph_total_space_gib': 'Ceph total space (GiB)' + } + + @tables.bind_row_action('update_storage_pools') + def edit_ceph_storage_pools(self, edit_button, row): + edit_button.click() + self.wait_till_spinner_disappears() + return forms.FormRegion(self.driver, field_mappings=self.EDIT_POOL_QUOTAS_FIELDS) + + +class SystemConfigurationPage(basepage.BasePage): + + PARTIAL_URL = 'admin/system_config' + SYSTEMS_TAB_INDEX = 0 + ADDRESS_POOLS_TAB_INDEX = 1 + DNS_TAB_INDEX = 2 + NTP_TAB_INDEX = 3 + PTP_TAB_INDEX = 4 + OAM_IP_TAB_INDEX = 5 + CONTROLLER_FILESYSTEM_TAB_INDEX = 6 + CEPH_STORAGE_POOLS_INDEX = 7 + SYSTEMS_TABLE_NAME_COLUMN = 'Name' + ADDRESS_POOLS_TABLE_NAME_COLUMN = 'Name' + DNS_TABLE_SERVER1_IP = 'DNS Server 1 IP' + NTP_TABLE_SERVER1_ADDR = 'NTP Server 1 Address' + PTP_TABLE_ENABLED = 'PTP Enabled' + OAM_TABLE_SUBNET = 'OAM Subnet' + CONTROLLERFS_TABLE_STORAGE_NAME = 'Storage Name' + CEPH_STORAGE_TABLE_TIER_COLUMN = 'Ceph Storage Tier' + + @property + def systems_table(self): + return SystemsTable(self.driver) + + @property + def address_pools_table(self): + return AddressPoolsTable(self.driver) + + @property + def dns_table(self): + return DNSTable(self.driver) + + @property + def ntp_table(self): + return NTPTable(self.driver) + + @property + def ptp_table(self): + return PTPTable(self.driver) + + @property + def oam_table(self): + return OAMTable(self.driver) + + @property + def controllerfs_table(self): + return ControlerfsTable(self.driver) + + @property + def ceph_storage_pools_table(self): + return CephStoragePoolsTable(self.driver) + + def _get_row_with_system_name(self, name): + return self.systems_table.get_row(self.SYSTEMS_TABLE_NAME_COLUMN, name) + + def get_system_info(self, name, header): + row = self._get_row_with_system_name(name) + return row.cells[header].text + + def _get_row_with_address_pool_name(self, name): + return self.address_pools_table.get_row(self.ADDRESS_POOLS_TABLE_NAME_COLUMN, name) + + def get_address_pool_info(self, name, header): + row = self._get_row_with_address_pool_name(name) + return row.cells[header].text + + def _get_row_with_dns_server_ip(self, ip): + return self.dns_table.get_row(self.DNS_TABLE_SERVER1_IP, ip) + + def get_dns_info(self, ip, header): + row = self._get_row_with_dns_server_ip(ip) + return row.cells[header].text + + def _get_row_with_ntp_server_addr(self, addr): + return self.ntp_table.get_row(self.NTP_TABLE_SERVER1_ADDR, addr) + + def get_ntp_info(self, addr, header): + row = self._get_row_with_ntp_server_addr(addr) + return row.cells[header].text + + def _get_row_with_ptp_enabled(self, enabled): + return self.ptp_table.get_row(self.PTP_TABLE_ENABLED, enabled) + + def get_ptp_info(self, enabled, header): + row = self._get_row_with_ptp_enabled(enabled) + return row.cells[header].text + + def _get_row_with_oam_subnet(self, subnet): + return self.oam_table.get_row(self.OAM_TABLE_SUBNET, subnet) + + def get_oam_info(self, subnet, header): + row = self._get_row_with_oam_subnet(subnet) + return row.cells[header].text + + def _get_row_with_controllerfs_storage_name(self, storage_name): + return self.controllerfs_table.get_row(self.CONTROLLERFS_TABLE_STORAGE_NAME, storage_name) + + def get_controllerfs_info(self, storage_name, header): + row = self._get_row_with_controllerfs_storage_name(storage_name) + return row.cells[header].text + + def _get_row_with_ceph_tier_name(self, tier_name): + return self.ceph_storage_pools_table.get_row(self.CEPH_STORAGE_TABLE_TIER_COLUMN, tier_name) + + def get_ceph_storage_pools_info(self, tier_name, header): + row = self._get_row_with_ceph_tier_name(tier_name) + return row.cells[header].text + + def go_to_systems_tab(self): + self.go_to_tab(self.SYSTEMS_TAB_INDEX) + + def go_to_address_pools_tab(self): + self.go_to_tab(self.ADDRESS_POOLS_TAB_INDEX) + + def go_to_dns_tab(self): + self.go_to_tab(self.DNS_TAB_INDEX) + + def go_to_ntp_tab(self): + self.go_to_tab(self.NTP_TAB_INDEX) + + def go_to_ptp_tab(self): + self.go_to_tab(self.PTP_TAB_INDEX) + + def go_to_oam_ip_tab(self): + self.go_to_tab(self.OAM_IP_TAB_INDEX) + + def go_to_controller_filesystem_tab(self): + self.go_to_tab(self.CONTROLLER_FILESYSTEM_TAB_INDEX) + + def go_to_ceph_storage_pools_tab(self): + self.go_to_tab(self.CEPH_STORAGE_POOLS_INDEX) + + def is_systems_present(self, name): + return bool(self._get_row_with_system_name(name)) + + def create_address_pool(self, name, network, order=None, ranges=None, fail_ok=False): + create_form = self.address_pools_table.create_address_pool() + create_form.name.text = name + create_form.network.text = network + if order is not None: + create_form.order.text = order + if ranges is not None: + create_form.ranges.text = ranges + create_form.submit() + if not self.find_message_and_dismiss(messages.SUCCESS): + found_err = self.find_message_and_dismiss(messages.ERROR) + if fail_ok and found_err: + err_msg = "Failed to create address pool {}".format(name) + LOG.info(err_msg) + return 1, err_msg + else: + raise exceptions.HorizonError("No info message found after " + "creating address pool {}".format(name)) + succ_msg = "Address pool {} is successfully created.".format(name) + LOG.info(succ_msg) + return 0, succ_msg + + def delete_address_pool(self, name, fail_ok=False): + row = self._get_row_with_address_pool_name(name) + row.mark() + confirm_delete_form = self.address_pools_table.delete_address_pool() + confirm_delete_form.submit() + if not self.find_message_and_dismiss(messages.SUCCESS): + found_err = self.find_message_and_dismiss(messages.ERROR) + if fail_ok and found_err: + err_msg = "Failed to delete address pool {}".format(name) + LOG.info(err_msg) + return 1, err_msg + else: + raise exceptions.HorizonError("No info message found after " + "deleting address pool {}".format(name)) + succ_msg = "Address pool {} is successfully deleted.".format(name) + LOG.info(succ_msg) + return 0, succ_msg + + def update_address_pool(self, name, new_name=None, new_order=None, new_ranges=None, fail_ok=False): + row = self._get_row_with_address_pool_name(name) + edit_form = self.address_pools_table.update_address_pool(row) + if new_name is not None: + edit_form.name.text = new_name + if new_order is not None: + edit_form.order.text = new_order + if new_ranges is not None: + edit_form.ranges.text = new_ranges + edit_form.submit() + if not self.find_message_and_dismiss(messages.INFO): + found_err = self.find_message_and_dismiss(messages.ERROR) + if fail_ok and found_err: + err_msg = "Failed to update address pool {}".format(name) + LOG.info(err_msg) + return 1, err_msg + else: + raise exceptions.HorizonError("No info message found after " + "updating address pool {}".format(name)) + succ_msg = "Address pool {} is successfully updated.".format(name) + LOG.info(succ_msg) + return 0, succ_msg + + def is_address_present(self, name): + return bool(self._get_row_with_address_pool_name(name)) + + def edit_dns(self, server1=None, server2=None, server3=None, cancel=False, fail_ok=False): + edit_form = self.dns_table.edit_dns() + if server1 is not None: + edit_form.NAMESERVER_1.text = server1 + if server2 is not None: + edit_form.NAMESERVER_2.text = server2 + if server3 is not None: + edit_form.NAMESERVER_3.text = server3 + if cancel: + edit_form.cancel() + else: + edit_form.submit() + if not self.find_message_and_dismiss(messages.SUCCESS): + found_err = self.find_message_and_dismiss(messages.ERROR) + if fail_ok and found_err: + err_msg = "Failed to edit DNS" + LOG.info(err_msg) + return 1, err_msg + else: + raise exceptions.HorizonError("No info message found after editing DNS") + succ_msg = "DNS is successfully updated." + LOG.info(succ_msg) + return 0, succ_msg + + def edit_ntp(self, server1=None, server2=None, server3=None, cancel=False): + edit_form = self.ntp_table.edit_ntp() + if server1 is not None: + edit_form.NTP_SERVER_1.text = server1 + if server2 is not None: + edit_form.NTP_SERVER_2.text = server2 + if server3 is not None: + edit_form.NTP_SERVER_3.text = server3 + if cancel: + edit_form.cancel() + else: + edit_form.submit() + + def edit_ptp(self, mode=None, transport=None, mechanism=None, cancel=False): + edit_form = self.ptp_table.edit_ptp() + if mode is not None: + edit_form.mode.value = mode + if transport is not None: + edit_form.transport.value = transport + if mechanism is not None: + edit_form.mechanism.value = mechanism + if cancel: + edit_form.cancel() + else: + edit_form.submit() + + def edit_oam(self, subnet=None, gateway=None, floating=None, + controller0=None, controller1=None, cancel=False): + edit_form = self.oam_table.edit_oam() + if subnet is not None: + edit_form.EXTERNAL_OAM_SUBNET.text = subnet + if gateway is not None: + edit_form.EXTERNAL_OAM_GATEWAY_ADDRESS.text = gateway + if floating is not None: + edit_form.EXTERNAL_OAM_FLOATING_ADDRESS.text = floating + if controller0 is not None: + edit_form.EXTERNAL_OAM_0_ADDRESS.text = controller0 + if controller1 is not None: + edit_form.EXTERNAL_OAM_1_ADDRESS.text = controller1 + if cancel: + edit_form.cancel() + else: + edit_form.submit() + + def edit_filesystem(self, database=None, glance=None, backup=None, + scratch=None, extension=None, img_conversions=None, + cancel=False): + edit_form = self.controllerfs_table.edit_filesystem() + if database is not None: + edit_form.database.value = database + if glance is not None: + edit_form.glance.value = glance + if backup is not None: + edit_form.backup.value = backup + if scratch is not None: + edit_form.scratch.value = scratch + if extension is not None: + edit_form.extension.value = extension + if img_conversions is not None: + edit_form.img_conversions.value = img_conversions + if cancel: + edit_form.cancel() + else: + edit_form.submit() + + def edit_storage_pool(self, tier_name, cinder_pool=None, glance_pool=None, + ephemeral_pool=None, object_pool=None, cancel=False): + row = self._get_row_with_ceph_tier_name(tier_name) + edit_form = self.ceph_storage_pools_table.edit_ceph_storage_pools(row) + if cinder_pool is not None: + edit_form.cinder_pool_gib.value = cinder_pool + if glance_pool is False: + edit_form.glance_pool_gib.value = glance_pool + if ephemeral_pool is True: + edit_form.ephemeral_pool_gib = ephemeral_pool + if object_pool is not None: + edit_form.object_pool_gib.value = object_pool + if cancel: + edit_form.cancel() + else: + edit_form.submit() + + def check_horizon_displays(self, expt_horizon, table_name): + horizon_value = None + for horizon_header in expt_horizon: + if table_name == self.systems_table.name: + horizon_value = self.get_system_info(name=expt_horizon['Name'], + header=horizon_header) + elif table_name == self.address_pools_table.name: + horizon_value = self.get_address_pool_info( + name=expt_horizon['Name'], header=horizon_header) + elif table_name == self.ptp_table.name: + horizon_value = self.get_ptp_info( + enabled=expt_horizon['PTP Enabled'], header=horizon_header) + elif table_name == self.oam_table.name: + horizon_value = self.get_oam_info( + subnet=expt_horizon['OAM Subnet'], header=horizon_header) + elif table_name == self.controllerfs_table.name: + horizon_value = self.get_controllerfs_info( + storage_name=expt_horizon['Storage Name'], header=horizon_header) + elif table_name == self.ceph_storage_pools_table.name: + horizon_value = self.get_ceph_storage_pools_info( + tier_name=expt_horizon['Ceph Storage Tier'], header=horizon_header) + if str(expt_horizon[horizon_header]) != horizon_value: + err_msg = '{} display incorrectly'.format(horizon_header) + raise exceptions.HorizonError(err_msg) + succ_msg = 'All content display correctly' + LOG.info(succ_msg) diff --git a/automated-pytest-suite/utils/horizon/pages/admin/system/__init__.py b/automated-pytest-suite/utils/horizon/pages/admin/system/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/automated-pytest-suite/utils/horizon/pages/admin/system/defaultspage.py b/automated-pytest-suite/utils/horizon/pages/admin/system/defaultspage.py new file mode 100644 index 0000000..1d40a15 --- /dev/null +++ b/automated-pytest-suite/utils/horizon/pages/admin/system/defaultspage.py @@ -0,0 +1,158 @@ +from utils.horizon.pages import basepage +from utils.horizon.regions import forms +from utils.horizon.regions import tables + + +class DefaultComputeQuotasTable(tables.TableRegion): + name = "compute_quotas" + + UPDATE_DEFAULTS_FORM_FIELDS = ( + ("instances", + "cores", + "ram", + "metadata_items", + "key_pairs", + "server_groups", + "server_group_members", + "injected_files", + "injected_file_content_bytes", + "injected_file_path_bytes"), + ("volumes", + "gigabytes", + "snapshots") + ) + + @tables.bind_table_action('update_compute_defaults') + def update(self, update_button): + update_button.click() + self.wait_till_spinner_disappears() + return forms.TabbedFormRegion( + self.driver, + self.UPDATE_DEFAULTS_FORM_FIELDS + ) + + +class DefaultVolumeQuotasTable(DefaultComputeQuotasTable): + name = "volume_quotas" + + @tables.bind_table_action('update_volume_defaults') + def update(self, update_button): + update_button.click() + self.wait_till_spinner_disappears() + return forms.TabbedFormRegion( + self.driver, + self.UPDATE_DEFAULTS_FORM_FIELDS + ) + + +class DefaultNetworkQuotasTable(tables.TableRegion): + name = "network_quotas" + + +class DefaultsPage(basepage.BasePage): + PARTIAL_URL = 'admin/defaults' + COMPUTE_QUOTAS_TAB = 0 + VOLUME_QUOTAS_TAB = 1 + NETWORK_QUOTAS_TAB = 2 + QUOTAS_TABLE_NAME_COLUMN = 'Quota Name' + QUOTAS_TABLE_LIMIT_COLUMN = 'Limit' + DEFAULT_COMPUTE_QUOTA_NAMES = [ + 'Instances', + 'VCPUs', + 'RAM', + 'Metadata Items', + 'Key Pairs', + 'Server Groups', + 'Server Group Members', + 'Injected Files', + 'Injected File Content Bytes', + 'Length of Injected File Path' + ] + DEFAULT_VOLUME_QUOTA_NAMES = [ + 'Volumes', + 'Total Size of Volumes and Snapshots (GiB)', + 'Volume Snapshots' + ] + + def _get_compute_quota_row(self, name): + return self.default_compute_quotas_table.get_row( + self.QUOTAS_TABLE_NAME_COLUMN, name) + + def _get_volume_quota_row(self, name): + return self.default_volume_quotas_table.get_row( + self.QUOTAS_TABLE_NAME_COLUMN, name) + + @property + def default_compute_quotas_table(self): + return DefaultComputeQuotasTable(self.driver) + + @property + def default_volume_quotas_table(self): + return DefaultVolumeQuotasTable(self.driver) + + @property + def default_network_quotas_table(self): + return DefaultNetworkQuotasTable(self.driver) + + @property + def compute_quota_values(self): + quota_dict = {} + for row in self.default_compute_quotas_table.rows: + if row.cells[self.QUOTAS_TABLE_NAME_COLUMN].text in \ + self.DEFAULT_COMPUTE_QUOTA_NAMES: + quota_dict[row.cells[self.QUOTAS_TABLE_NAME_COLUMN].text] = \ + int(row.cells[self.QUOTAS_TABLE_LIMIT_COLUMN].text) + return quota_dict + + @property + def volume_quota_values(self): + quota_dict = {} + for row in self.default_volume_quotas_table.rows: + if row.cells[self.QUOTAS_TABLE_NAME_COLUMN].text in \ + self.DEFAULT_VOLUME_QUOTA_NAMES: + quota_dict[row.cells[self.QUOTAS_TABLE_NAME_COLUMN].text] = \ + int(row.cells[self.QUOTAS_TABLE_LIMIT_COLUMN].text) + return quota_dict + + def update_compute_defaults(self, add_up): + update_form = self.default_compute_quotas_table.update() + update_form.instances.value = int(update_form.instances.value) + add_up + update_form.cores.value = int(update_form.cores.value) + add_up + update_form.ram.value = int(update_form.ram.value) + add_up + update_form.metadata_items.value = \ + int(update_form.metadata_items.value) + add_up + update_form.key_pairs.value = int(update_form.key_pairs.value) + add_up + update_form.server_groups.value = int(update_form.server_groups.value) + add_up + update_form.server_group_members.value = int(update_form.server_group_members.value) + add_up + update_form.injected_files.value = int( + update_form.injected_files.value) + add_up + update_form.injected_file_content_bytes.value = \ + int(update_form.injected_file_content_bytes.value) + add_up + update_form.injected_file_path_bytes.value = \ + int(update_form.injected_file_path_bytes.value) + add_up + update_form.submit() + + def update_volume_defaults(self, add_up): + update_form = self.default_volume_quotas_table.update() + update_form.switch_to(self.VOLUME_QUOTAS_TAB) + update_form.volumes.value = int(update_form.volumes.value) + add_up + update_form.gigabytes.value = int(update_form.gigabytes.value) + add_up + update_form.snapshots.value = int(update_form.snapshots.value) + add_up + update_form.submit() + + def is_compute_quota_a_match(self, quota_name, limit): + row = self._get_compute_quota_row(quota_name) + return row.cells[self.QUOTAS_TABLE_LIMIT_COLUMN].text == str(limit) + + def is_volume_quota_a_match(self, quota_name, limit): + row = self._get_volume_quota_row(quota_name) + return row.cells[self.QUOTAS_TABLE_LIMIT_COLUMN].text == str(limit) + + def go_to_compute_quotas_tab(self): + self.go_to_tab(self.COMPUTE_QUOTAS_TAB) + + def go_to_volume_quotas_tab(self): + self.go_to_tab(self.VOLUME_QUOTAS_TAB) + + def go_to_network_quotas_tab(self): + self.go_to_tab(self.NETWORK_QUOTAS_TAB) diff --git a/automated-pytest-suite/utils/horizon/pages/admin/system/metadatadefinitionspage.py b/automated-pytest-suite/utils/horizon/pages/admin/system/metadatadefinitionspage.py new file mode 100644 index 0000000..c18197c --- /dev/null +++ b/automated-pytest-suite/utils/horizon/pages/admin/system/metadatadefinitionspage.py @@ -0,0 +1,128 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import json + +from utils.horizon.pages import basepage +from utils.horizon.regions import forms +from utils.horizon.regions import tables + + +class MetadatadefinitionsTable(tables.TableRegion): + name = "namespaces" + + CREATE_NAMESPACE_FORM_FIELDS = ( + "source_type", "direct_input", "metadef_file", "public", "protected") + + @tables.bind_table_action('import') + def import_namespace(self, create_button): + create_button.click() + return forms.FormRegion( + self.driver, + field_mappings=self.CREATE_NAMESPACE_FORM_FIELDS) + + @tables.bind_table_action('delete') + def delete_namespace(self, delete_button): + delete_button.click() + return forms.BaseFormRegion(self.driver) + + +class MetadatadefinitionsPage(basepage.BasePage): + + NAMESPACE_TABLE_NAME_COLUMN = 'Name' + NAMESPACE_TABLE_DESCRIPTION_COLUMN = 'Description' + NAMESPACE_TABLE_RESOURCE_TYPES_COLUMN = 'Resource Types' + NAMESPACE_TABLE_PUBLIC_COLUMN = 'Public' + NAMESPACE_TABLE_PROTECTED_COLUMN = 'Protected' + + boolean_mapping = {True: 'Yes', False: 'No'} + + def __init__(self, driver): + super(MetadatadefinitionsPage, self).__init__(driver) + self._page_title = "Metadata Definitions" + + def _get_row_with_namespace_name(self, name): + return self.namespaces_table.get_row( + self.NAMESPACE_TABLE_NAME_COLUMN, + name) + + @property + def namespaces_table(self): + return MetadatadefinitionsTable(self.driver) + + def json_load_template(self, namespace_template_name): + """Read template for namespace creation + + :param namespace_template_name: Path to template + :return = json data container + """ + try: + with open(namespace_template_name, 'r') as template: + json_template = json.load(template) + except Exception: + raise EOFError("Can not read template file: [{0}]".format( + namespace_template_name)) + return json_template + + def import_namespace( + self, namespace_source_type, namespace_json_container, + is_public=True, is_protected=False): + + create_namespace_form = self.namespaces_table.import_namespace() + create_namespace_form.source_type.value = namespace_source_type + if namespace_source_type == 'raw': + json_template_dump = json.dumps(namespace_json_container) + create_namespace_form.direct_input.text = json_template_dump + elif namespace_source_type == 'file': + metadeffile = namespace_json_container + create_namespace_form.metadef_file.choose(metadeffile) + + if is_public: + create_namespace_form.public.mark() + if is_protected: + create_namespace_form.protected.mark() + + create_namespace_form.submit() + + def delete_namespace(self, name): + row = self._get_row_with_namespace_name(name) + row.mark() + confirm_delete_namespaces_form = \ + self.namespaces_table.delete_namespace() + confirm_delete_namespaces_form.submit() + + def is_namespace_present(self, name): + return bool(self._get_row_with_namespace_name(name)) + + def is_public_set_correct(self, name, exp_value, row=None): + if type(exp_value) != bool: + raise ValueError('Expected value "exp_value" is not boolean') + if not row: + row = self._get_row_with_namespace_name(name) + cell = row.cells[self.NAMESPACE_TABLE_PUBLIC_COLUMN] + return self._is_text_visible(cell, self.boolean_mapping[exp_value]) + + def is_protected_set_correct(self, name, exp_value, row=None): + if type(exp_value) != bool: + raise ValueError('Expected value "exp_value" is not boolean') + if not row: + row = self._get_row_with_namespace_name(name) + cell = row.cells[self.NAMESPACE_TABLE_PROTECTED_COLUMN] + return self._is_text_visible(cell, self.boolean_mapping[exp_value]) + + def is_resource_type_set_correct(self, name, expected_resources, row=None): + if not row: + row = self._get_row_with_namespace_name(name) + cell = row.cells[self.NAMESPACE_TABLE_RESOURCE_TYPES_COLUMN] + return all( + [self._is_text_visible(cell, res, strict=False) + for res in expected_resources]) diff --git a/automated-pytest-suite/utils/horizon/pages/admin/system/systeminformationpage.py b/automated-pytest-suite/utils/horizon/pages/admin/system/systeminformationpage.py new file mode 100644 index 0000000..ddf8a18 --- /dev/null +++ b/automated-pytest-suite/utils/horizon/pages/admin/system/systeminformationpage.py @@ -0,0 +1,87 @@ +from utils.horizon.pages import basepage +from utils.horizon.regions import tables + + +class ServicesTable(tables.TableRegion): + name = "services" + pass + + +class ControllerServicesTable(tables.TableRegion): + name = "controller_services" + pass + + +class ComputeServicesTable(tables.TableRegion): + name = "nova_services" + pass + + +class BlockStorageServicesTable(tables.TableRegion): + name = "cinder_services" + pass + + +class NetworkAgentsTable(tables.TableRegion): + name = "network_agents" + pass + + +class OrchestrationServicesTable(tables.TableRegion): + name = "heat_services" + pass + + +class SystemInformationPage(basepage.BasePage): + + PARTIAL_URL = 'admin/info' + SERVICES_TAB = 0 + CONTROLLER_SERVICES_TAB = 1 + COMPUTE_SERVICES_TAB = 2 + BLOCK_STORAGE_SERVICES_TAB = 3 + NETWORK_AGENTS_TAB = 4 + ORCHESTRATION_SERVICES_TAB = 5 + + @property + def services_table(self): + return ServicesTable(self.driver) + + @property + def controller_services_table(self): + return ControllerServicesTable(self.driver) + + @property + def compute_services_table(self): + return ComputeServicesTable(self.driver) + + @property + def block_torage_services_table(self): + return BlockStorageServicesTable(self.driver) + + @property + def network_agents_table(self): + return NetworkAgentsTable(self.driver) + + @property + def orchestration_services_table(self): + return OrchestrationServicesTable(self.driver) + + def go_to_services_tab(self): + self.go_to_tab(self.SERVICES_TAB) + + def go_to_controller_services_tab(self): + self.go_to_tab(self.CONTROLLER_SERVICES_TAB) + + def go_to_compute_services_tab(self): + self.go_to_tab(self.COMPUTE_SERVICES_TAB) + + def go_to_block_storage_services_tab(self): + self.go_to_tab(self.BLOCK_STORAGE_SERVICES_TAB) + + def go_to_network_agents_tab(self): + self.go_to_tab(self.NETWORK_AGENTS_TAB) + + def go_to_orchestration_services_tab(self): + self.go_to_tab(self.ORCHESTRATION_SERVICES_TAB) + + diff --git a/automated-pytest-suite/utils/horizon/pages/admin/volume/__init__.py b/automated-pytest-suite/utils/horizon/pages/admin/volume/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/automated-pytest-suite/utils/horizon/pages/admin/volume/volumesnapshotspage.py b/automated-pytest-suite/utils/horizon/pages/admin/volume/volumesnapshotspage.py new file mode 100644 index 0000000..d3f5a12 --- /dev/null +++ b/automated-pytest-suite/utils/horizon/pages/admin/volume/volumesnapshotspage.py @@ -0,0 +1 @@ + diff --git a/automated-pytest-suite/utils/horizon/pages/admin/volume/volumespage.py b/automated-pytest-suite/utils/horizon/pages/admin/volume/volumespage.py new file mode 100644 index 0000000..d3f5a12 --- /dev/null +++ b/automated-pytest-suite/utils/horizon/pages/admin/volume/volumespage.py @@ -0,0 +1 @@ + diff --git a/automated-pytest-suite/utils/horizon/pages/admin/volume/volumetypespage.py b/automated-pytest-suite/utils/horizon/pages/admin/volume/volumetypespage.py new file mode 100644 index 0000000..278f838 --- /dev/null +++ b/automated-pytest-suite/utils/horizon/pages/admin/volume/volumetypespage.py @@ -0,0 +1,171 @@ +from utils.horizon.pages import basepage +from utils.horizon.regions import forms +from utils.horizon.regions import tables + + +class QosSpecsTable(tables.TableRegion): + name = 'qos_specs' + CREATE_QOS_SPEC_FORM_FIELDS = ("name", "consumer") + EDIT_CONSUMER_FORM_FIELDS = ("consumer_choice", ) + + @tables.bind_table_action('create') + def create_qos_spec(self, create_button): + create_button.click() + self.wait_till_spinner_disappears() + return forms.FormRegion( + self.driver, + field_mappings=self.CREATE_QOS_SPEC_FORM_FIELDS) + + @tables.bind_table_action('delete') + def delete_qos_spec(self, delete_button): + delete_button.click() + return forms.BaseFormRegion(self.driver) + + @tables.bind_row_action('delete') + def delete_qos_spec_by_row(self, delete_button, row): + delete_button.click() + return forms.BaseFormRegion(self.driver) + + @tables.bind_row_action('edit_consumer') + def edit_consumer(self, edit_consumer_button, row): + edit_consumer_button.click() + self.wait_till_spinner_disappears() + return forms.FormRegion( + self.driver, + field_mappings=self.EDIT_CONSUMER_FORM_FIELDS) + + +class VolumeTypesTable(tables.TableRegion): + name = 'volume_types' + + CREATE_VOLUME_TYPE_FORM_FIELDS = ("name", "vol_type_description", "is_public") + CREATE_ECRYPTION_FORM_FIELDS = ("name", "provider", "control_location", "cipher", "key_size") + MANAGE_QOS_SPEC_ASSOCIATION_FORM_FIELDS = ("qos_spec_choice",) + + @tables.bind_table_action('create') + def create_volume_type(self, create_button): + create_button.click() + self.wait_till_spinner_disappears() + return forms.FormRegion( + self.driver, + field_mappings=self.CREATE_VOLUME_TYPE_FORM_FIELDS) + + @tables.bind_table_action('delete') + def delete_volume_type(self, delete_button): + delete_button.click() + return forms.BaseFormRegion(self.driver) + + @tables.bind_row_action('delete') + def delete_volume_type_by_row(self, delete_button, row): + delete_button.click() + return forms.BaseFormRegion(self.driver) + + @tables.bind_row_action('create_encryption') + def create_encryption(self, create_button, row): + create_button.click() + self.wait_till_spinner_disappears() + return forms.FormRegion( + self.driver, field_mappings=self.CREATE_QOS_SPEC_FORM_FIELDS) + + @tables.bind_row_action('associate') + def manage_qos_spec_association(self, manage_button, row): + manage_button.click() + self.wait_till_spinner_disappears() + return forms.FormRegion( + self.driver, field_mappings=self.MANAGE_QOS_SPEC_ASSOCIATION_FORM_FIELDS) + + +class VolumetypesPage(basepage.BasePage): + PARTIAL_URL = 'admin/volume_types' + QOS_SPECS_TABLE_NAME_COLUMN = 'Name' + VOLUME_TYPES_TABLE_NAME_COLUMN = 'Name' + + @property + def qos_specs_table(self): + return QosSpecsTable(self.driver) + + @property + def volume_types_table(self): + return VolumeTypesTable(self.driver) + + def _get_row_with_qos_spec_name(self, name): + return self.qos_specs_table.get_row( + self.QOS_SPECS_TABLE_NAME_COLUMN, name) + + def _get_row_with_volume_type_name(self, name): + return self.volume_types_table.get_row( + self.VOLUME_TYPES_TABLE_NAME_COLUMN, name) + + def create_qos_spec(self, qos_spec_name, consumer=None): + create_qos_spec_form = self.qos_specs_table.create_qos_spec() + create_qos_spec_form.name.text = qos_spec_name + if consumer is not None: + create_qos_spec_form.consumer.text = consumer + create_qos_spec_form.submit() + + def create_volume_type(self, volume_type_name, description=None): + volume_type_form = self.volume_types_table.create_volume_type() + volume_type_form.name.text = volume_type_name + if description is not None: + volume_type_form.description.text = description + volume_type_form.submit() + + def delete_qos_spec(self, name): + row = self._get_row_with_qos_spec_name(name) + row.mark() + confirm_delete_qos_spec_form = self.qos_specs_table.delete_qos_spec() + confirm_delete_qos_spec_form.submit() + + def delete_qos_spec_by_row(self, name): + row = self._get_row_with_qos_spec_name(name) + confirm_delete_qos_spec_form = self.qos_specs_table.delete_qos_spec_by_row(row) + confirm_delete_qos_spec_form.submit() + + def delete_volume_type(self, name): + row = self._get_row_with_volume_type_name(name) + row.mark() + confirm_delete_volume_types_form = self.volume_types_table.delete_volume_type() + confirm_delete_volume_types_form.submit() + + def delete_volume_type_by_row(self, name): + row = self._get_row_with_volume_type_name(name) + confirm_delete_volume_types_form = self.volume_types_table.delete_volume_type_by_row(row) + confirm_delete_volume_types_form.submit() + + def edit_consumer(self, name, consumer_choice): + row = self._get_row_with_qos_spec_name(name) + edit_consumer_form = self.qos_specs_table.edit_consumer(row) + edit_consumer_form.consumer_choice.value = consumer_choice + edit_consumer_form.submit() + + def create_encryption(self, name, provider, control_location=None, cipher=None, key_size=None): + row = self._get_row_with_volume_type_name(name) + create_encrypted_form = self.volume_types_table.create_encryption(row) + create_encrypted_form.provider.tesxt = provider + if control_location is not None: + create_encrypted_form.control_location.text = control_location + if cipher is not None: + create_encrypted_form.cipher.text = cipher + if key_size is not None: + create_encrypted_form.key_size = key_size + create_encrypted_form.submit() + + def manage_qos_spec_association(self, name, associated_qos_spec): + row = self._get_row_with_volume_type_name(name) + associate_qos_spec_form = self.volume_types_table.manage_qos_spec_association(row) + associate_qos_spec_form.qos_spec_choice.text = associated_qos_spec + associate_qos_spec_form.submit() + + def is_qos_spec_present(self, name): + return bool(self._get_row_with_qos_spec_name(name)) + + def is_volume_type_present(self, name): + return bool(self._get_row_with_volume_type_name(name)) + + def get_volume_type_info(self, name, header): + row = self._get_row_with_volume_type_name(name) + return row.cells[header].text + + def get_qos_spec_info(self, name, header): + row = self._get_row_with_qos_spec_name(name) + return row.cells[header].text diff --git a/automated-pytest-suite/utils/horizon/pages/basepage.py b/automated-pytest-suite/utils/horizon/pages/basepage.py new file mode 100644 index 0000000..6082c4f --- /dev/null +++ b/automated-pytest-suite/utils/horizon/pages/basepage.py @@ -0,0 +1,55 @@ +from selenium.webdriver.common import by +from utils.horizon.pages import pageobject +from utils.horizon.regions import bars +from utils.horizon.regions import messages + + +class BasePage(pageobject.PageObject): + """Base class for all dashboard page objects.""" + + _heading_locator = (by.By.CSS_SELECTOR, 'div.page-header') + _nav_tabs_locator = (by.By.CSS_SELECTOR, 'ul>li>a[data-toggle="tab"]') + + @property + def heading(self): + return self._get_element(*self._heading_locator) + + @property + def topbar(self): + return bars.TopBarRegion(self.driver) + + @property + def tabs(self): + return self._get_elements(*self._nav_tabs_locator) + + @property + def is_logged_in(self): + return self.topbar.is_logged_in + + def go_to_home_page(self): + self.topbar.brand.click() + + def go_to_tab(self, index): + self.tabs[index].click() + + def log_out(self): + self.topbar.user_dropdown_menu.click_on_logout() + + def download_rc_v2(self): + self.topbar.user_dropdown_menu.click_on_rc_v2() + + def download_rc_v3(self): + self.topbar.user_dropdown_menu.click_on_rc_v3() + + def go_to_help_page(self): + self.topbar.user_dropdown_menu.click_on_help() + + def find_message_and_dismiss(self, message_level=messages.SUCCESS): + message = messages.MessageRegion(self.driver, message_level) + is_message_present = message.exists() + if is_message_present: + message.close() + return is_message_present + + def change_project(self, name): + self.topbar.user_dropdown_project.click_on_project(name) diff --git a/automated-pytest-suite/utils/horizon/pages/identity/__init__.py b/automated-pytest-suite/utils/horizon/pages/identity/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/automated-pytest-suite/utils/horizon/pages/identity/groupspage.py b/automated-pytest-suite/utils/horizon/pages/identity/groupspage.py new file mode 100644 index 0000000..a310a42 --- /dev/null +++ b/automated-pytest-suite/utils/horizon/pages/identity/groupspage.py @@ -0,0 +1,82 @@ +from utils.horizon.pages import basepage +from utils.horizon.regions import forms +from utils.horizon.regions import tables + + +class GroupsTable(tables.TableRegion): + + name = "groups" + + MODIFY_GROUP_FORM_FIELDS = ("name", "description") + + @tables.bind_table_action('create') + def create_group(self, create_button): + create_button.click() + self.wait_till_spinner_disappears() + return forms.FormRegion(self.driver, field_mappings=self.MODIFY_GROUP_FORM_FIELDS) + + @tables.bind_table_action('delete') + def delete_group(self, delete_button): + delete_button.click() + self.wait_till_spinner_disappears() + return forms.BaseFormRegion(self.driver) + + @tables.bind_row_action('delete') + def delete_group_by_row(self, delete_button, row): + delete_button.click() + self.wait_till_spinner_disappears() + return forms.BaseFormRegion(self.driver) + + @tables.bind_row_action('edit') + def edit_group(self, edit_button, row): + edit_button.click() + self.wait_till_spinner_disappears() + return forms.FormRegion(self.driver, field_mappings=self.MODIFY_GROUP_FORM_FIELDS) + + +class GroupsPage(basepage.BasePage): + + PARTIAL_URL = 'identity/groups' + + GROUPS_TABLE_NAME_COLUMN = 'Name' + + @property + def groups_table(self): + return GroupsTable(self.driver) + + def _get_row_with_group_name(self, name, strict=True): + return self.groups_table.get_row(self.GROUPS_TABLE_NAME_COLUMN, name, exact_match=strict) + + def create_group(self, name, description=None): + create_form = self.groups_table.create_group() + create_form.name.text = name + if description is not None: + create_form.description.text = description + create_form.submit() + + def delete_group(self, name, strict=True): + row = self._get_row_with_group_name(name, strict=strict) + row.mark() + confirm_delete_form = self.groups_table.delete_group() + confirm_delete_form.submit() + + def delete_group_by_row(self, name): + row = self._get_row_with_group_name(name) + confirm_delete_form = self.groups_table.delete_group_by_row(row) + confirm_delete_form.submit() + + def edit_group(self, name, new_name=None, new_description=None): + row = self._get_row_with_group_name(name) + edit_form = self.groups_table.edit_group(row) + if new_name is not None: + edit_form.name.text = new_name + if new_description is not None: + edit_form.description.text = new_description + edit_form.submit() + + def is_group_present(self, name): + return bool(self._get_row_with_group_name(name)) + + def get_group_info(self, group_name, header): + row = self._get_row_with_group_name(group_name) + return row.cells[header].text diff --git a/automated-pytest-suite/utils/horizon/pages/identity/projectspage.py b/automated-pytest-suite/utils/horizon/pages/identity/projectspage.py new file mode 100644 index 0000000..3a12488 --- /dev/null +++ b/automated-pytest-suite/utils/horizon/pages/identity/projectspage.py @@ -0,0 +1,226 @@ +from utils.horizon.pages import basepage +from utils.horizon.regions import forms +from utils.horizon.regions import menus +from utils.horizon.regions import tables + +# name link +# view usage +# edit_quotas + + +class ProjectsTable(tables.TableRegion): + name = 'tenants' + + MODIFY_PROJECT_FORM_FIELDS = ( + ("domain_id", "domain_name", "name", "description", "enabled"), + {'members': menus.MembershipMenuRegion}, + {'groups': menus.MembershipMenuRegion}, + ("metadata_items", "cores", "instances", + "injected_files", "injected_file_content_bytes", + "key_pairs", "injected_file_path_bytes", "volumes", + "snapshots", "gigabytes", "ram", "security_group", + "security_group_rule", + "floatingip", "network", "port", "router", "subnet"), + ("mac_filtering",)) + + @tables.bind_table_action('create') + def create_project(self, create_button): + create_button.click() + self.wait_till_spinner_disappears() + return forms.TabbedFormRegion(self.driver, + field_mappings=self.MODIFY_PROJECT_FORM_FIELDS, + default_tab=0) + + @tables.bind_row_action('users') + def manage_members(self, manage_button, row): + manage_button.click() + self.wait_till_spinner_disappears() + return forms.TabbedFormRegion(self.driver, + field_mappings=self.MODIFY_PROJECT_FORM_FIELDS, + default_tab=1) + + @tables.bind_row_action('groups') + def modify_groups(self, modify_button, row): + modify_button.click() + self.wait_till_spinner_disappears() + return forms.TabbedFormRegion(self.driver, + field_mappings=self.MODIFY_PROJECT_FORM_FIELDS, + default_tab=2) + + @tables.bind_row_action('update') + def edit_project(self, edit_button, row): + edit_button.click() + self.wait_till_spinner_disappears() + return forms.TabbedFormRegion(self.driver, + field_mappings=self.MODIFY_PROJECT_FORM_FIELDS, + default_tab=3) + + @tables.bind_row_action('quotas') + def modify_quotas(self, modify_button, row): + modify_button.click() + self.wait_till_spinner_disappears() + return forms.TabbedFormRegion(self.driver, + field_mappings=self.MODIFY_PROJECT_FORM_FIELDS, + default_tab=3) + + @tables.bind_table_action('delete') + def delete_project(self, delete_button): + delete_button.click() + self.wait_till_spinner_disappears() + return forms.BaseFormRegion(self.driver) + + @tables.bind_row_action('delete') + def delete_project_by_row(self, delete_button, row): + delete_button.click() + self.wait_till_spinner_disappears() + return forms.BaseFormRegion(self.driver) + + +class ProjectsPage(basepage.BasePage): + + PARTIAL_URL = 'identity' + PROJECTS_TABLE_NAME_COLUMN = 'Name' + + @property + def projects_table(self): + return ProjectsTable(self.driver) + + def _get_row_with_project_name(self, name): + return self.projects_table.get_row( + self.PROJECTS_TABLE_NAME_COLUMN, name) + + def is_project_present(self, project_name): + return bool(self._get_row_with_project_name(project_name)) + + def get_project_info(self, project_name, header): + row = self._get_row_with_project_name(project_name) + return row.cells[header].text + + def create_project(self, project_name, description=None, is_enabled=None): + create_project_form = self.projects_table.create_project() + create_project_form.name.text = project_name + if description is not None: + create_project_form.description.text = description + if is_enabled is True: + create_project_form.enabled.mark() + if is_enabled is False: + create_project_form.enabled.unmark() + create_project_form.submit() + + def delete_project(self, project_name): + row = self._get_row_with_project_name(project_name) + row.mark() + modal_confirmation_form = self.projects_table.delete_project() + modal_confirmation_form.submit() + + def delete_project_by_row(self, project_name): + row = self._get_row_with_project_name(project_name) + confirm_form = self.projects_table.delete_project_by_row(row) + confirm_form.submit() + + def manage_members(self, project_name, users2allocate=None, members2deallocate=None): + row = self._get_row_with_project_name(project_name) + members_form = self.projects_table.manage_members(row) + if users2allocate is not None: + for user in users2allocate: + members_form.members.allocate_member(user) + if members2deallocate is not None: + for member in members2deallocate: + members_form.members.deallocate_member(member) + members_form.submit() + + def manage_member_roles(self, project_name, member_name=None, + roles2allocate=None, roles2deallocate=None): + row = self._get_row_with_project_name(project_name) + members_form = self.projects_table.manage_members(row) + if member_name is not None: + if roles2allocate is not None: + for role in roles2allocate: + members_form.members.allocate_member_roles(member_name, role) + if roles2deallocate is not None: + for role in roles2deallocate: + members_form.members.deallocate_member_roles(member_name, role) + members_form.submit() + + def modify_groups(self, project_name, groups2allocate=None, groups2deallocate=None): + row = self._get_row_with_project_name(project_name) + groups_form = self.projects_table.modify_groups(row) + if groups2allocate is not None: + for group in groups2allocate: + groups_form.groups.allocate_member(group) + if groups2deallocate is not None: + for group in groups2deallocate: + groups_form.groups.deallocate_member(group) + groups_form.submit() + + def modify_group_roles(self, project_name, group_name=None, + roles2allocate=None, roles2deallocate=None): + row = self._get_row_with_project_name(project_name) + groups_form = self.projects_table.modify_groups(row) + if group_name is not None: + if roles2allocate is not None: + for role in roles2allocate: + groups_form.groups.allocate_member_roles(group_name, role) + if roles2deallocate is not None: + for role in roles2deallocate: + groups_form.groups.deallocate_member_roles(group_name, role) + groups_form.submit() + + def get_member_roles_at_project(self, project_name, member_name): + row = self._get_row_with_project_name(project_name) + members_form = self.projects_table.manage_members(row) + roles = members_form.members.get_member_allocated_roles(member_name) + members_form.cancel() + return set(roles) + + def get_group_roles_at_project(self, project_name, group_name): + row = self._get_row_with_project_name(project_name) + groups_form = self.projects_table.modify_groups(row) + roles = groups_form.groups.get_member_allocated_roles(group_name) + groups_form.cancel() + return set(roles) + + def modify_quotas(self, project_name, metadata_items=None, vcpus=None, instances=None, + injected_files=None, injected_file_content=None, key_pairs=None, + length_of_injected_file_path=None, volumes=None, volume_snapshots=None, + total_size_of_volumes_and_snapshots=None, ram=None, security_groups=None, + security_group_rules=None, floating_ips=None, networks=None, + ports=None, routers=None, subnets=None): + row = self._get_row_with_project_name(project_name) + quotas_form = self.projects_table.modify_quotas(row) + if metadata_items is not None: + quotas_form.metadata_items.value = metadata_items + if vcpus is not None: + quotas_form.cores.value = vcpus + if instances is not None: + quotas_form.instances.value = instances + if injected_files is not None: + quotas_form.injected_files.value = injected_files + if injected_file_content is not None: + quotas_form.injected_file_content_bytes.value = injected_file_content + if key_pairs is not None: + quotas_form.key_pairs.value = key_pairs + if length_of_injected_file_path is not None: + quotas_form.injected_file_path_bytes.value = length_of_injected_file_path + if volumes is not None: + quotas_form.volumes.value = volumes + if volume_snapshots is not None: + quotas_form.snapshots.value = volume_snapshots + if total_size_of_volumes_and_snapshots is not None: + quotas_form.gigabytes.value = total_size_of_volumes_and_snapshots + if ram is not None: + quotas_form.ram.value = ram + if security_groups is not None: + quotas_form.security_group.value = security_groups + if security_group_rules is not None: + quotas_form.security_group_rule.value = security_group_rules + if floating_ips is not None: + quotas_form.floatingip.value = floating_ips + if networks is not None: + quotas_form.network.value = networks + if ports is not None: + quotas_form.port.value = ports + if routers is not None: + quotas_form.router.value = routers + if subnets is not None: + quotas_form.subnet.value = subnets diff --git a/automated-pytest-suite/utils/horizon/pages/identity/rolespage.py b/automated-pytest-suite/utils/horizon/pages/identity/rolespage.py new file mode 100644 index 0000000..915584a --- /dev/null +++ b/automated-pytest-suite/utils/horizon/pages/identity/rolespage.py @@ -0,0 +1,86 @@ +from selenium.webdriver.common import by + +from utils.horizon.pages import basepage +from utils.horizon.regions import forms +from utils.horizon.regions import tables + + +class RolesForm(forms.FormRegion): + _fields_locator = (by.By.CSS_SELECTOR, 'form') + + +class RolesTable(tables.TableRegion): + name = "OS::Keystone::Role" + _rows_locator = (by.By.CSS_SELECTOR, 'tbody > tr[class="ng-scope"]') + MODIFY_ROLE_FORM_FIELDS = ('name',) + + @tables.bind_table_action('btn-default', attribute_search='class') + def create_role(self, create_button): + create_button.click() + self.wait_till_spinner_disappears() + return RolesForm(self.driver, field_mappings=self.MODIFY_ROLE_FORM_FIELDS) + + @tables.bind_table_action('btn-danger', attribute_search='class') + def delete_role(self, delete_button): + delete_button.click() + self.wait_till_spinner_disappears() + return forms.BaseFormRegion(self.driver) + + @tables.bind_row_action('danger', attribute_search='class') + def delete_role_by_row(self, delete_button, row): + delete_button.click() + self.wait_till_spinner_disappears() + return forms.BaseFormRegion(self.driver) + + @tables.bind_row_action('btn-default', attribute_search='class') + def edit_role(self, edit_button, row): + edit_button.click() + self.wait_till_spinner_disappears() + return RolesForm(self.driver, field_mappings=self.MODIFY_ROLE_FORM_FIELDS) + + def _table_locator(self, table_name): + return by.By.CSS_SELECTOR, 'hz-resource-table[resource-type-name="%s"]' % table_name + + +class RolesPage(basepage.BasePage): + + PARTIAL_URL = 'identity/roles' + + ROLES_TABLE_NAME_COLUMN = "Name" + + @property + def roles_table(self): + return RolesTable(self.driver) + + def _get_row_with_role_name(self, name): + return self.roles_table.get_row(self.ROLES_TABLE_NAME_COLUMN, name) + + def create_role(self, name): + create_form = self.roles_table.create_role() + create_form.name.text = name + create_form.submit() + + def delete_role(self, name): + row = self._get_row_with_role_name(name) + row.mark() + confirm_delete_form = self.roles_table.delete_role() + confirm_delete_form.submit() + + def delete_role_by_row(self, name): + row = self._get_row_with_role_name(name) + confirm_delete_form = self.roles_table.delete_role_by_row(row) + confirm_delete_form.submit() + + def edit_role(self, name, new_name=None): + row = self._get_row_with_role_name(name) + edit_form = self.roles_table.edit_role(row) + if new_name is not None: + edit_form.name.text = new_name + edit_form.submit() + + def is_role_present(self, name): + return bool(self._get_row_with_role_name(name)) + + def get_role_info(self, name, header): + row = self._get_row_with_role_name(name) + return row.cells[header].text diff --git a/automated-pytest-suite/utils/horizon/pages/identity/userspage.py b/automated-pytest-suite/utils/horizon/pages/identity/userspage.py new file mode 100644 index 0000000..17b384a --- /dev/null +++ b/automated-pytest-suite/utils/horizon/pages/identity/userspage.py @@ -0,0 +1,105 @@ +from utils.horizon.pages import basepage +from utils.horizon.regions import forms +from utils.horizon.regions import tables + + +class UsersTable(tables.TableRegion): + name = 'users' + + CREATE_USER_FORM_FIELDS = ("name", "description", "email", "password", + "confirm_password", "project", "role_id", "enabled") + + EDIT_USER_FORM_FIELDS = ("name", "description", "email", "project") + + CHANGE_PASSWORD_FORM_FIELDS = ("password", "confirm_password", "name") + + @tables.bind_table_action('create') + def create_user(self, create_button): + create_button.click() + self.wait_till_spinner_disappears() + return forms.FormRegion(self.driver, + field_mappings=self.CREATE_USER_FORM_FIELDS) + + @tables.bind_table_action('delete') + def delete_user(self, delete_button): + delete_button.click() + return forms.BaseFormRegion(self.driver) + + @tables.bind_row_action('delete') + def delete_user_by_row(self, delete_button, row): + delete_button.click() + return forms.BaseFormRegion(self.driver) + + @tables.bind_row_action('edit') + def edit_user(self, edit_button, row): + edit_button.click() + self.wait_till_spinner_disappears() + return forms.FormRegion(self.driver, + field_mappings=self.EDIT_USER_FORM_FIELDS) + + @tables.bind_row_action('change_password') + def change_password(self, change_password_button, row): + change_password_button.click() + self.wait_till_spinner_disappears() + return forms.FormRegion(self.driver, + field_mappings=self.CHANGE_PASSWORD_FORM_FIELDS) + + @tables.bind_row_action('toggle') + def disable_user(self, disable_button, row): + disable_button.click() + + @tables.bind_row_action('toggle') + def enable_user(self, enable_button, row): + enable_button.click() + + +class UsersPage(basepage.BasePage): + PARTIAL_URL = 'identity/users' + + USERS_TABLE_NAME_COLUMN = 'User Name' + + def _get_row_with_user_name(self, name): + return self.users_table.get_row(self.USERS_TABLE_NAME_COLUMN, name) + + @property + def users_table(self): + return UsersTable(self.driver) + + def create_user(self, name, password, + project, role, email=None): + create_user_form = self.users_table.create_user() + create_user_form.name.text = name + if email is not None: + create_user_form.email.text = email + create_user_form.password.text = password + create_user_form.confirm_password.text = password + create_user_form.src_elem.click() # Workaround for firefox insecure warning msg + create_user_form.project.text = project + create_user_form.role_id.text = role + create_user_form.submit() + + def delete_user(self, name): + row = self._get_row_with_user_name(name) + row.mark() + confirm_delete_users_form = self.users_table.delete_user() + confirm_delete_users_form.submit() + + def delete_user_by_row(self, name): + row = self._get_row_with_user_name(name) + confirm_delete_users_form = self.users_table.delete_user_by_row(row) + confirm_delete_users_form.submit() + + def disable_user(self, name): + row = self._get_row_with_user_name(name) + self.users_table.disable_user(row) + + def enable_user(self, name): + row = self._get_row_with_user_name(name) + self.users_table.enable_user(row) + + def is_user_present(self, name): + return bool(self._get_row_with_user_name(name)) + + def get_user_info(self, user_name, header): + row = self._get_row_with_user_name(user_name) + return row.cells[header].text diff --git a/automated-pytest-suite/utils/horizon/pages/loginpage.py b/automated-pytest-suite/utils/horizon/pages/loginpage.py new file mode 100644 index 0000000..0fccd7a --- /dev/null +++ b/automated-pytest-suite/utils/horizon/pages/loginpage.py @@ -0,0 +1,44 @@ +import time +from selenium.webdriver.common import by +from utils.horizon.pages import pageobject +from utils.horizon.pages.project.compute import overviewpage +from utils.exceptions import HorizonError + + +class LoginPage(pageobject.PageObject): + + PARTIAL_URL = 'auth/login' + + _login_username_field_locator = (by.By.ID, 'id_username') + _login_password_field_locator = (by.By.ID, 'id_password') + _login_submit_button_locator = (by.By.ID, 'loginBtn') + _login_logout_reason_locator = (by.By.ID, 'logout_reason') + + @property + def username(self): + return self._get_element(*self._login_username_field_locator) + + @property + def password(self): + return self._get_element(*self._login_password_field_locator) + + @property + def login_button(self): + return self._get_element(*self._login_submit_button_locator) + + def is_logout_reason_displayed(self): + return self._get_element(*self._login_logout_reason_locator) + + def login(self, user=None, password=None): + self.username.send_keys(user) + self.password.send_keys(password) + self.login_button.click() + time.sleep(1) + + timeout = 30 + end_time = time.time() + timeout + while time.time() < end_time: + page = overviewpage.OverviewPage(self.driver, self.port) + if page.is_logged_in: + return page + raise HorizonError("Did not log in within 30 seconds.") diff --git a/automated-pytest-suite/utils/horizon/pages/pageobject.py b/automated-pytest-suite/utils/horizon/pages/pageobject.py new file mode 100644 index 0000000..e3684ec --- /dev/null +++ b/automated-pytest-suite/utils/horizon/pages/pageobject.py @@ -0,0 +1,88 @@ +from utils.horizon import basewebobject +from utils.horizon.helper import HorizonDriver +from consts.proj_vars import ProjVar +from time import sleep + + +class PageObject(basewebobject.BaseWebObject): + """Base class for page objects.""" + # BASE_URL = 'http://' + ProjVar.get_var("LAB")['floating ip'] + PARTIAL_URL = None + + def __init__(self, driver=None, port=None): + if not driver: + driver = HorizonDriver.get_driver() + super(PageObject, self).__init__(driver) + self._page_title = None + self.port = port + + @property + def page_title(self): + return self.driver.title + + @property + def base_url(self): + from consts.auth import CliAuth + prefix = 'http' + if CliAuth.get_var('HTTPS'): + prefix = 'https' + oam_ip = ProjVar.get_var("LAB")['floating ip'] + + if not self.port: + self.port = 8080 if prefix == 'http' else 8443 + base_url = '{}://{}:{}'.format(prefix, oam_ip, self.port) # horizon url matt + if not base_url.endswith('/'): + base_url += '/' + return base_url + + @property + def target_url(self): + return self.base_url + self.PARTIAL_URL + + def get_current_page_url(self): + return self.driver.current_url + + def close_window(self): + return self.driver.close() + + def is_nth_window_opened(self, n): + return len(self.driver.window_handles) == n + + def switch_window(self, window_name=None, window_index=None): + """Switches focus between the webdriver windows. + + Args: + - window_name: The name of the window to switch to. + - window_index: The index of the window handle to switch to. + If the method is called without arguments it switches to the + last window in the driver window_handles list. + In case only one window exists nothing effectively happens. + Usage: + page.switch_window('_new') + page.switch_window(2) + page.switch_window() + """ + + if window_name is not None and window_index is not None: + raise ValueError("switch_window receives the window's name or " + "the window's index, not both.") + if window_name is not None: + self.driver.switch_to.window(window_name) + elif window_index is not None: + self.driver.switch_to.window( + self.driver.window_handles[window_index]) + else: + self.driver.switch_to.window(self.driver.window_handles[-1]) + + def go_to_previous_page(self): + self.driver.back() + + def go_to_next_page(self): + self.driver.forward() + + def refresh_page(self): + self.driver.refresh() + + def go_to_target_page(self): + self.driver.get(self.target_url) + sleep(1) diff --git a/automated-pytest-suite/utils/horizon/pages/project/__init__.py b/automated-pytest-suite/utils/horizon/pages/project/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/automated-pytest-suite/utils/horizon/pages/project/apiaccesspage.py b/automated-pytest-suite/utils/horizon/pages/project/apiaccesspage.py new file mode 100644 index 0000000..40a8636 --- /dev/null +++ b/automated-pytest-suite/utils/horizon/pages/project/apiaccesspage.py @@ -0,0 +1,75 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from os import listdir +from os.path import isfile +from os.path import join +from re import search + +from utils.horizon.pages import basepage +from utils.horizon.regions import tables + + +class ApiAccessTable(tables.TableRegion): + name = "endpoints" + + # FIXME: following methods do not work. Only view credentials button can be + # found in bind_table_action + + @tables.bind_table_action('download_openrc_v2') + def download_openstack_rc_v2(self, download_button): + download_button.contextClick() + + @tables.bind_table_action('download_openrc') + def download_openstack_rc_v3(self, download_button): + download_button.contextclick() + + +class ApiAccessPage(basepage.BasePage): + PARTIAL_URL = 'project/api_access' + + @property + def apiaccess_table(self): + return ApiAccessTable(self.driver) + + def download_openstack_rc_file(self, version=3): + if version == 2: + self.apiaccess_table.download_openstack_rc_v2() + elif version == 3: + self.apiaccess_table.download_openstack_rc_v3() + + def list_of_files(self, directory, template): + return [f for f in listdir(directory) if isfile(join(directory, f)) + and f.endswith(template)] + + def get_credentials_from_file(self, version, directory, template): + self._wait_until( + lambda _: len(self.list_of_files(directory, template)) > 0) + file_name = self.list_of_files(directory, template)[0] + with open(join(directory, file_name)) as cred_file: + content = cred_file.read() + username_re = r'export OS_USERNAME="([^"]+)"' + if version == 2: + tenant_name_re = r'export OS_TENANT_NAME="([^"]+)"' + tenant_id_re = r'export OS_TENANT_ID=(.+)' + elif version == 3: + tenant_name_re = r'export OS_PROJECT_NAME="([^"]+)"' + tenant_id_re = r'export OS_PROJECT_ID=(.+)' + else: + raise ValueError("Unknown version: {}. Valid versions: 2, 3 ".format(version)) + username = search(username_re, content).group(1) + tenant_name = search(tenant_name_re, content).group(1) + tenant_id = search(tenant_id_re, content).group(1) + cred_dict = {'OS_USERNAME': username, + 'OS_TENANT_NAME': tenant_name, + 'OS_TENANT_ID': tenant_id} + return cred_dict diff --git a/automated-pytest-suite/utils/horizon/pages/project/compute/__init__.py b/automated-pytest-suite/utils/horizon/pages/project/compute/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/automated-pytest-suite/utils/horizon/pages/project/compute/imagespage.py b/automated-pytest-suite/utils/horizon/pages/project/compute/imagespage.py new file mode 100644 index 0000000..3a74443 --- /dev/null +++ b/automated-pytest-suite/utils/horizon/pages/project/compute/imagespage.py @@ -0,0 +1,278 @@ +from selenium.webdriver.common import by + +from utils.horizon.pages import basepage +from utils.horizon.regions import forms +from utils.horizon.regions import tables +from utils.horizon.pages.project.compute import instancespage +from utils.horizon.pages.project.volumes.volumespage import VolumesPage + + +class ImagesForm(forms.FormRegion): + _fields_locator = (by.By.CSS_SELECTOR, 'ng-include') + _submit_locator = (by.By.CSS_SELECTOR, '*.btn.btn-primary.finish') + + +class ImagesClickVisibility(forms.YesOrNoFormFieldRegion): + _buttons_locator = (by.By.CSS_SELECTOR, 'label') + + +class ImagesTable(tables.TableRegion): + name = "OS::Glance::Image" + _rows_locator = (by.By.CSS_SELECTOR, 'tbody > tr[class="ng-scope"]') + _search_field_locator = (by.By.CSS_SELECTOR, 'div.search-bar input.search-input') + _clear_btn_locator = (by.By.CSS_SELECTOR, 'div.search-bar a.magic-search-clear') + + CREATE_IMAGE_FORM_FIELDS = ( + "name", "description", "image_file", "kernel", "ramdisk", + "format", "architecture", "min_disk", "min_ram", + "is_public", "protected" + ) + + CREATE_VOLUME_FROM_IMAGE_FORM_FIELDS = ( + "name", "description", "image_source", + "type", "volume-size", "availability-zone") + + EDIT_IMAGE_FORM_FIELDS = ( + "name", "description", "format", "min_disk", + "min_ram", "public", "protected" + ) + + def filter(self, value): + self._set_search_field(value) + + def clear(self): + btn = self._get_element(*self._clear_btn_locator) + btn.click() + + @tables.bind_table_action('btn-default', attribute_search='class') + def create_image(self, create_button): + create_button.click() + self.wait_till_spinner_disappears() + return ImagesForm(self.driver, + field_mappings=self.CREATE_IMAGE_FORM_FIELDS) + + @tables.bind_table_action('btn-danger', attribute_search='class') + def delete_image(self, delete_button): + delete_button.click() + return forms.BaseFormRegion(self.driver) + + @tables.bind_row_action('text-danger', attribute_search='class') + def delete_image_by_row(self, delete_button, row): + delete_button.click() + return forms.BaseFormRegion(self.driver) + + @tables.bind_row_action('create_volume_from_image', secondary_locator_index=1) + def create_volume(self, create_volume, row): + """ + Create volume must be referenced by index using the secondary_locator_index + since the a tag does not have defining attributes. The create volume button + is under the first li tag under ul.dropdown-menu for the specified row. + The parameter is explained in the tables.bind_row_action docstring. + """ + create_volume.click() + self.wait_till_spinner_disappears() + return forms.FormRegion( + self.driver, + field_mappings=self.CREATE_VOLUME_FROM_IMAGE_FORM_FIELDS) + + @tables.bind_row_action('btn-default', attribute_search='class') + def launch_instance(self, launch_button, row): + launch_button.click() + return instancespage.LaunchInstanceForm(self.driver) + + @tables.bind_row_action('update_metadata', secondary_locator_index=3) + def update_metadata(self, metadata_button, row): + """ + Metadata must be referenced by index using the secondary_locator_index + since the a tag does not have defining attributes. The update metadata + button is under the third li under ul.dropdown-menu for the specified row. + The parameter is explained in the tables.bind_row_action docstring. + """ + metadata_button.click() + self.wait_till_spinner_disappears() + return forms.MetadataFormRegion(self.driver) + + @tables.bind_row_action('edit', secondary_locator_index=2) + def edit_image(self, edit_button, row): + """ + Edit Image must be referenced by index using the secondary_locator_index + since the a tag does not have defining attributes. The edit image button + is under the second li under ul.dropdown-menu for the specified row. + The parameter is explained in the tables.bind_row_action docstring. + """ + edit_button.click() + self.wait_till_spinner_disappears() + return ImagesForm(self.driver, + field_mappings=self.EDIT_IMAGE_FORM_FIELDS) + + @tables.bind_row_anchor_column('Image Name') + def go_to_image_description_page(self, row_link, row): + row_link.click() + return forms.ItemTextDescription(self.driver) + + def _table_locator(self, table_name): + return by.By.CSS_SELECTOR, 'hz-resource-table[resource-type-name="%s"]' % table_name + + +class ImagesPage(basepage.BasePage): + + PARTIAL_URL = 'project/images' + + IMAGES_TABLE_NAME_COLUMN = 'Name' + IMAGES_TABLE_STATUS_COLUMN = 'Status' + + def _get_row_with_image_name(self, name): + return self.images_table.get_row(self.IMAGES_TABLE_NAME_COLUMN, name) + + @property + def images_table(self): + return ImagesTable(self.driver) + + def create_image(self, name, description=None, image_file=None, + image_format=None, architecture=None, + minimum_disk=None, minimum_ram=None, + is_public=None, is_protected=None): + create_image_form = self.images_table.create_image() + create_image_form.name.text = name + if description is not None: + create_image_form.description.text = description + create_image_form.image_file.choose(image_file) + if image_format is not None: + create_image_form.disk_format.value = image_format + if architecture is not None: + create_image_form.architecture.text = architecture + if minimum_disk is not None: + create_image_form.minimum_disk.value = minimum_disk + if minimum_ram is not None: + create_image_form.minimum_disk.value = minimum_ram + if is_public is True: + create_image_form.is_public.mark() + if is_public is False: + create_image_form.is_public.unmark() + if is_protected is True: + create_image_form.protected.mark() + if is_protected is False: + create_image_form.protected.unmark() + create_image_form.submit() + + def delete_image(self, name): + row = self._get_row_with_image_name(name) + row.mark() + confirm_delete_images_form = self.images_table.delete_image() + confirm_delete_images_form.submit() + + def delete_image_by_row(self, name): + row = self._get_row_with_image_name(name) + delete_image_form = self.images_table.delete_image_by_row(row) + delete_image_form.submit() + + def add_custom_metadata(self, name, metadata): + row = self._get_row_with_image_name(name) + update_metadata_form = self.images_table.update_metadata(row) + for field_name, value in metadata.items(): + update_metadata_form.add_custom_field(field_name, value) + update_metadata_form.submit() + + def check_image_details(self, name, dict_with_details): + row = self._get_row_with_image_name(name) + matches = [] + description_page = self.images_table.go_to_image_description_page(row) + content = description_page.get_content() + + for name, value in content.items(): + if name in dict_with_details: + if dict_with_details[name] in value: + matches.append(True) + return matches + + def edit_image(self, name, new_name=None, description=None, + disk_format=None, minimum_disk=None, + minimum_ram=None, public=None, protected=None): + row = self._get_row_with_image_name(name) + confirm_edit_images_form = self.images_table.edit_image(row) + if new_name is not None: + confirm_edit_images_form.name.text = new_name + if description is not None: + confirm_edit_images_form.description.text = description + if disk_format is not None: + confirm_edit_images_form.disk_format = disk_format + if minimum_disk is not None: + confirm_edit_images_form.minimum_disk.value = minimum_disk + if minimum_ram is not None: + confirm_edit_images_form.minimum_ram.value = minimum_ram + if public is True: + confirm_edit_images_form.public.mark() + if public is False: + confirm_edit_images_form.public.unmark() + if protected is True: + confirm_edit_images_form.protected.mark() + if protected is False: + confirm_edit_images_form.protected.unmark() + confirm_edit_images_form.submit() + + def is_image_present(self, name): + return bool(self._get_row_with_image_name(name)) + + def is_image_active(self, name): + def cell_getter(): + row = self._get_row_with_image_name(name) + return row and row.cells[self.IMAGES_TABLE_STATUS_COLUMN] + return bool(self.images_table.wait_cell_status(cell_getter, 'Active')) + + def wait_until_image_active(self, name): + self._wait_until(lambda x: self.is_image_active(name)) + + def get_image_info(self, image_name, header): + row = self._get_row_with_image_name(image_name) + return row.cells[header].text + + def create_volume_from_image(self, image_name, volume_name=None, + description=None, type=None, + volume_size=None, availability_zone=None): + row = self._get_row_with_image_name(image_name) + create_volume_form = self.images_table.create_volume(row) + if volume_name is not None: + create_volume_form.name.text = volume_name + if description is not None: + create_volume_form.description.text = description + if type is not None: + create_volume_form.type.text = type + if volume_size is not None: + create_volume_form.size.value = volume_size + if availability_zone is not None: + create_volume_form.availability_zone.text = availability_zone + create_volume_form.submit() + return VolumesPage(self.driver, self.port) + + def launch_instance_from_image(self, name, instance_name, availability_zone=None, count=None, + boot_source_type=None, create_new_volume=None, + delete_volume_on_instance_delete=None, volume_size=None, + source_name=None, flavor_name=None, network_names=None): + row = self._get_row_with_image_name(name) + instance_form = self.images_table.launch_instance(row) + instance_form.fields['name'].text = instance_name + if availability_zone is not None: + instance_form.fields['availability-zone'].text = availability_zone + if count is not None: + instance_form.fields['instance-count'].value = count + instance_form.switch_to(1) + if boot_source_type is not None: + instance_form.fields['boot-source-type'].text = boot_source_type + instance_form._init_tab_fields(1) + if create_new_volume is True: + instance_form.fields['Create New Volume'].click_yes() + if delete_volume_on_instance_delete is True: + instance_form.fields['Delete Volume on Instance Delete'].click_yes() + if delete_volume_on_instance_delete is False: + instance_form.fields['Delete Volume on Instance Delete'].click_no() + if create_new_volume is False: + instance_form.fields['Create New Volume'].click_no() + if volume_size is not None: + instance_form.fields['volume-size'].value = volume_size + if source_name is not None: + instance_form.addelement('Name', source_name) + instance_form.switch_to(2) + instance_form.addelement('Name', flavor_name) + instance_form.switch_to(3) + instance_form.addelements('Network', network_names) + instance_form.submit() diff --git a/automated-pytest-suite/utils/horizon/pages/project/compute/instancespage.py b/automated-pytest-suite/utils/horizon/pages/project/compute/instancespage.py new file mode 100644 index 0000000..050ecaa --- /dev/null +++ b/automated-pytest-suite/utils/horizon/pages/project/compute/instancespage.py @@ -0,0 +1,406 @@ +import time +import re + +from selenium.common import exceptions +from selenium.webdriver.common import by + +from utils.horizon.pages import basepage +from utils.horizon.regions import forms +from utils.horizon.regions import tables +from utils.horizon.regions import menus +from consts.stx import Networks + + +class LaunchInstanceForm(forms.TabbedFormRegion): + _submit_locator = (by.By.XPATH, '//button[@class="btn btn-primary finish"]') + _fields_locator = (by.By.XPATH, "//div[starts-with(@class,'step ng-scope')]") + _tables_locator = (by.By.XPATH, ".//table") + + field_mappings = ( + ("name", "availability-zone", "count"), + ("boot-source-type", "volume-size", "Create New Volume", + "Delete Volume on Instance Delete"), + (), + (), + (), + (), + (), + ("customization-script", "load-script", "disk-partition", "config-drive"), + (), + (), + (), + ("min-inst-count",) + ) + + def _init_tab_fields(self, tab_index): + self.src_elem = self.driver + fieldsets = self._get_elements(*self._fields_locator) + self.fields_src_elem = fieldsets[tab_index] + self.src_elem = fieldsets[tab_index] + + @property + def tabs(self): + return menus.InstancesTabbedMenuRegion(self.driver, + src_elem=self.src_elem) + + @property + def contained_tables(self): + return self._get_elements(*self._tables_locator) + + class AllocatedTable(tables.TableRegion): + _rows_locator = (by.By.CSS_SELECTOR, 'tbody>tr[class="ng-scope"]') + + class AvailableTable(tables.TableRegion): + _rows_locator = (by.By.CSS_SELECTOR, 'tbody>tr[class="ng-scope"]') + + # server group's available table contains a inner table so use a different column names locator + class ServerGrpAvailableTable(tables.TableRegion): + _rows_locator = (by.By.CSS_SELECTOR, 'tbody>tr[class="ng-scope"]') + _columns_names_locator = (by.By.CSS_SELECTOR, 'thead > tr:nth-child(2) > th') + + @property + def allocated_table(self): + return self.AllocatedTable(self.driver, self.contained_tables[0]) + + @property + def available_table(self): + return self.AvailableTable(self.driver, self.contained_tables[1]) + + @property + def server_grp_available_table(self): + return self.ServerGrpAvailableTable(self.driver, self.contained_tables[1]) + + def __init__(self, driver): + super(LaunchInstanceForm, self).__init__( + driver, field_mappings=()) + + def addservergrp(self, column_name, name): + self.server_grp_available_table.get_row(column_name, name).add() + + def addelement(self, column_name, name): + self.available_table.get_row(column_name, name).add() + + def addelements(self, column_name, names): + for name in names: + self.available_table.get_row(column_name, name).add() + + +class InstancesTable(tables.TableRegion): + name = "instances" + + CREATE_SNAPSHOT_FORM_FIELDS = ("name",) + ASSOCIATE_FLOATING_IP_FORM_FIELDS = ("ip_id", "instance_id") + EDIT_INSTANCE_FORM_FIELDS = (("name",), + {"groups": menus.MembershipMenuRegion}) + ATTACH_VOLUME_FORM_FIELDS = ("volume",) + RESIZE_INSTANCE_FORM_FIELDS = (("old_flavor_name", "flavor"), + ("disk_config", "min_count", "server_group")) + REBUILD_INSTANCE_FORM_FIELDS = ("image", "disk_config") + + @tables.bind_table_action('launch-ng') + def launch_instance(self, launch_button): + launch_button.click() + self.wait_till_spinner_disappears() + return LaunchInstanceForm(self.driver) + + @tables.bind_table_action('delete') + def delete_instance(self, delete_button): + delete_button.click() + return forms.BaseFormRegion(self.driver) + + @tables.bind_row_action('delete') + def delete_instance_by_row(self, delete_instance, row): + delete_instance.click() + return forms.BaseFormRegion(self.driver) + + @tables.bind_row_action('snapshot') + def create_snapshot(self, create_button, row): + create_button.click() + self.wait_till_spinner_disappears() + return forms.FormRegion(self.driver, field_mappings=self.CREATE_SNAPSHOT_FORM_FIELDS) + + @tables.bind_row_action('associate') + def associate_floating_ip(self, associate_button, row): + associate_button.click() + self.wait_till_spinner_disappears() + return forms.FormRegion(self.driver, field_mappings=self.ASSOCIATE_FLOATING_IP_FORM_FIELDS) + + @tables.bind_row_action('edit') + def edit_instance(self, edit_button, row): + edit_button.click() + self.wait_till_spinner_disappears() + return forms.TabbedFormRegion(self.driver, field_mappings=self.EDIT_INSTANCE_FORM_FIELDS) + + @tables.bind_row_action('attach_volume') + def attach_volume(self, attach_button, row): + attach_button.click() + self.wait_till_spinner_disappears() + return forms.FormRegion(self.driver, field_mappings=self.ATTACH_VOLUME_FORM_FIELDS) + + @tables.bind_row_action('detach_volume') + def detach_volume(self, detach_button, row): + detach_button.click() + self.wait_till_spinner_disappears() + return forms.FormRegion(self.driver, field_mappings=self.ATTACH_VOLUME_FORM_FIELDS) + + @tables.bind_row_action('edit') + def edit_security_groups(self, edit_button, row): + edit_button.click() + self.wait_till_spinner_disappears() + return forms.TabbedFormRegion(self.driver, + field_mappings=self.EDIT_INSTANCE_FORM_FIELDS, + default_tab=1) + + @tables.bind_row_action('pause') + def pause_instance(self, pause_button, row): + pause_button.click() + + @tables.bind_row_action('resume') + def resume_instance(self, resume_button, row): + resume_button.click() + + @tables.bind_row_action('suspend') + def suspend_instance(self, suspend_button, row): + suspend_button.click() + + @tables.bind_row_action('resize') + def resize_instance(self, edit_button, row): + edit_button.click() + self.wait_till_spinner_disappears() + return forms.TabbedFormRegion(self.driver, + field_mappings=self.RESIZE_INSTANCE_FORM_FIELDS) + + @tables.bind_row_action('lock') + def lock_instance(self, lock_button, row): + lock_button.click() + + @tables.bind_row_action('unlock') + def lock_instance(self, unlock_button, row): + unlock_button.click() + + @tables.bind_row_action('soft_reboot') + def soft_reboot_instance(self, soft_reboot_button, row): + soft_reboot_button.click() + return forms.BaseFormRegion(self.driver) + + @tables.bind_row_action('reboot') + def hard_reboot_instance(self, hard_reboot_button, row): + hard_reboot_button.click() + return forms.BaseFormRegion(self.driver) + + @tables.bind_row_action('stop') + def shut_off_instance(self, shut_off_button, row): + shut_off_button.click() + return forms.BaseFormRegion(self.driver) + + @tables.bind_row_action('start') + def start_instance(self, start_button, row): + start_button.click() + + @tables.bind_row_action('rebuild') + def rebuild_instance(self, rebuild_button, row): + rebuild_button.click() + self.wait_till_spinner_disappears() + return forms.FormRegion(self.driver, field_mappings=self.REBUILD_INSTANCE_FORM_FIELDS) + + +class InstancesPage(basepage.BasePage): + PARTIAL_URL = 'project/instances' + + INSTANCES_TABLE_NAME_COLUMN = 'Instance Name' + INSTANCES_TABLE_STATUS_COLUMN = 'Status' + INSTANCES_TABLE_IP_COLUMN = 'IP Address' + + def _get_row_with_instance_name(self, name): + return self.instances_table.get_row(self.INSTANCES_TABLE_NAME_COLUMN, name) + + @property + def instances_table(self): + return InstancesTable(self.driver) + + def is_instance_present(self, name): + return bool(self._get_row_with_instance_name(name)) + + def create_instance(self, instance_name, availability_zone=None, count=None, + boot_source_type='Image', create_new_volume=False, + delete_volume_on_instance_delete=None, volume_size=None, + source_name=None, flavor_name=None, network_names=None, + server_group_name=None): + instance_form = self.instances_table.launch_instance() + instance_form.fields['name'].text = instance_name + if availability_zone is not None: + instance_form.fields['availability-zone'].text = availability_zone + if count is not None: + instance_form.fields['instance-count'].value = count + instance_form.switch_to(1) + if boot_source_type is not None: + instance_form.fields['boot-source-type'].text = boot_source_type + time.sleep(1) + instance_form._init_tab_fields(1) + if create_new_volume is True: + instance_form.fields['Create New Volume'].click_yes() + if delete_volume_on_instance_delete is True: + instance_form.fields['Delete Volume on Instance Delete'].click_yes() + if delete_volume_on_instance_delete is False: + instance_form.fields['Delete Volume on Instance Delete'].click_no() + if create_new_volume is False: + instance_form.fields['Create New Volume'].click_no() + if volume_size is not None: + instance_form.fields['volume-size'].value = volume_size + instance_form.addelement('Name', source_name) + instance_form.switch_to(2) + instance_form.addelement('Name', flavor_name) + instance_form.switch_to(3) + + if isinstance(network_names, str): + network_names = [network_names] + instance_form.addelements('Network', network_names) + if server_group_name is not None: + instance_form.switch_to(8) + instance_form.addservergrp('Name', server_group_name) + instance_form.submit() + + def delete_instance_by_row(self, name): + row = self._get_row_with_instance_name(name) + confirm_delete_instances_form = self.instances_table.delete_instance_by_row(row) + confirm_delete_instances_form.submit() + + def delete_instance(self, name): + row = self._get_row_with_instance_name(name) + row.mark() + confirm_form = self.instances_table.delete_instance() + confirm_form.submit() + + def is_instance_deleted(self, name): + return self.instances_table.is_row_deleted( + lambda: self._get_row_with_instance_name(name)) + + def is_instance_active(self, name): + def cell_getter(): + row = self._get_row_with_instance_name(name) + try: + return row and row.cells[self.INSTANCES_TABLE_STATUS_COLUMN] + except exceptions.StaleElementReferenceException: + raise + + status = self.instances_table.wait_cell_status(cell_getter, + ('Active', 'Error')) + return status == 'Active' + + def get_fixed_ipv4(self, name): + row = self._get_row_with_instance_name(name) + ips = row.cells[self.INSTANCES_TABLE_IP_COLUMN].text + for ip in ips.split(): + if re.match(Networks.IPV4_IP, ip): + return ip + + def get_instance_info(self, name, header): + row = self._get_row_with_instance_name(name) + return row.cells[header].text + + def create_snapshot(self, name, snapshot_name): + row = self._get_row_with_instance_name(name) + create_snapshot_form = self.instances_table.create_snapshot(row) + create_snapshot_form.name.text = snapshot_name + create_snapshot_form.submit() + + def associate_floating_ip(self, name, ip_address=None, port=None): + row = self._get_row_with_instance_name(name) + associate_floating_ip_form = self.instances_table.associate_floating_ip(row) + if ip_address is not None: + associate_floating_ip_form.ip_id.text = ip_address + if port is not None: + associate_floating_ip_form.instance_id.text = port + associate_floating_ip_form.submit() + + def edit_instance(self, name, newname=None): + row = self._get_row_with_instance_name(name) + edit_instance_form = self.instances_table.edit_instance(row) + if newname is not None: + edit_instance_form.name.text = newname + edit_instance_form.submit() + + def attach_volume(self, name, volume_id): + row = self._get_row_with_instance_name(name) + attach_volume_form = self.instances_table.attach_volume(row) + attach_volume_form.volume.text = volume_id + attach_volume_form.submit() + + def detach_volume(self, name, volume_id): + row = self._get_row_with_instance_name(name) + attach_volume_form = self.instances_table.detach_volume(row) + attach_volume_form.volume.text = volume_id + attach_volume_form.submit() + + def edit_security_groups(self, name, security_groups_to_allocate=None, + securtiy_groups_to_deallocate=None): + row = self._get_row_with_instance_name(name) + edit_form = self.instances_table.edit_security_groups(row) + if security_groups_to_allocate is not None: + for security_group in security_groups_to_allocate: + edit_form.groups.allocate_member(security_group) + if securtiy_groups_to_deallocate is not None: + for security_group in securtiy_groups_to_deallocate: + edit_form.groups.deallocate_member(security_group) + edit_form.submit() + + def pause_instance(self, name): + row = self._get_row_with_instance_name(name) + self.instances_table.pause_instance(row) + + def resume_instance(self, name): + row = self._get_row_with_instance_name(name) + self.instances_table.resume_instance(row) + + def suspend_instance(self, name): + row = self._get_row_with_instance_name(name) + self.instances_table.suspend_instance(row) + + def resize_instance(self, name, new_flavor, disk_partition=None, + min_instance_count=None, server_group=None): + row = self._get_row_with_instance_name(name) + resize_instance_form = self.instances_table.resize_instance(row) + resize_instance_form.flavor.text = new_flavor + resize_instance_form.switch_to(1) + if disk_partition is not None: + resize_instance_form.disk_config.text = disk_partition + if min_instance_count is not None: + resize_instance_form.min_count.text = min_instance_count + if server_group is not None: + resize_instance_form.server_group.text = server_group + resize_instance_form.submit() + + def lock_instance(self, name): + row = self._get_row_with_instance_name(name) + self.instances_table.lock_instance(row) + + def unlock_instance(self, name): + row = self._get_row_with_instance_name(name) + self.instances_table.unlock_instance(row) + + def soft_reboot_instance(self, name): + row = self._get_row_with_instance_name(name) + confirm_form = self.instances_table.soft_reboot_instance(row) + confirm_form.submit() + + def hard_reboot_instance(self, name): + row = self._get_row_with_instance_name(name) + confirm_form = self.instances_table.hard_reboot_instance(row) + confirm_form.submit() + + def shut_off_instance(self, name): + row = self._get_row_with_instance_name(name) + confirm_form = self.instances_table.shut_off_instance(row) + confirm_form.submit() + + def start_instance(self, name): + row = self._get_row_with_instance_name(name) + self.instances_table.start_instance(row) + + def rebuild_instance(self, name, image_name, disk_partition=None): + row = self._get_row_with_instance_name(name) + rebuild_instance_form = self.instances_table.rebuild_instance(row) + rebuild_instance_form.image.text = image_name + if disk_partition is not None: + rebuild_instance_form.disk_config.text = disk_partition + rebuild_instance_form.submit() diff --git a/automated-pytest-suite/utils/horizon/pages/project/compute/keypairspage.py b/automated-pytest-suite/utils/horizon/pages/project/compute/keypairspage.py new file mode 100644 index 0000000..bddce47 --- /dev/null +++ b/automated-pytest-suite/utils/horizon/pages/project/compute/keypairspage.py @@ -0,0 +1,94 @@ +from selenium.webdriver.common import by + +from utils.horizon.pages import basepage +from utils.horizon.regions import forms +from utils.horizon.regions import tables + + +class KeypairForm: + + def setname(self, name): + name_element = self.driver.find_element_by_css_selector("div.modal-body input") + name_element.send_keys(name) + + def submit(self): + submit_btn = self.driver.find_elements_by_css_selector("button.btn.btn-primary")[0] + submit_btn.click() + + def done(self): + submit_btn = self.driver.find_elements_by_css_selector( + "button[class='btn btn-primary ng-binding']") + submit_btn.click() + + def __init__(self, driver): + self.driver = driver + + +class KeypairsTable(tables.TableRegion): + name = "OS::Nova::Keypair" + CREATE_KEY_PAIR_FORM_FIELDS = 'name' + _rows_locator = (by.By.CSS_SELECTOR, 'tbody > tr[class="ng-scope"]') + + @tables.bind_table_action('btn-default', attribute_search='class') + def create_keypair(self, create_button): + create_button.click() + return KeypairForm(self.driver) + + @tables.bind_row_action('btn-danger', attribute_search='class') + def delete_keypair_by_row(self, delete_button, row): + delete_button.click() + return forms.BaseFormRegion(self.driver) + + @tables.bind_table_action('btn-danger', attribute_search='class') + def delete_keypair(self, delete_button): + delete_button.click() + return forms.BaseFormRegion(self.driver) + + def _table_locator(self, table_name): + return by.By.CSS_SELECTOR, 'hz-resource-table[resource-type-name="%s"]' % table_name + + +class KeypairsPage(basepage.BasePage): + PARTIAL_URL = 'project/key_pairs' + + KEY_PAIRS_TABLE_ACTIONS = ("create", "import", "delete") + KEY_PAIRS_TABLE_ROW_ACTION = "delete" + KEY_PAIRS_TABLE_NAME_COLUMN = 'Name' + + def __init__(self, driver, port=None): + super(KeypairsPage, self).__init__(driver, port=port) + self._page_title = "Key Pairs" + + def _get_row_with_keypair_name(self, name): + return self.keypairs_table.get_row(self.KEY_PAIRS_TABLE_NAME_COLUMN, name) + + @property + def keypairs_table(self): + return KeypairsTable(self.driver) + + @property + def delete_keypair_form(self): + return forms.BaseFormRegion(self.driver) + + def is_keypair_present(self, name): + return bool(self._get_row_with_keypair_name(name)) + + def get_keypair_info(self, name, header): + row = self._get_row_with_keypair_name(name) + return row.cells[header].text + + def create_keypair(self, keypair_name): + create_keypair_form = self.keypairs_table.create_keypair() + create_keypair_form.setname(keypair_name) + create_keypair_form.submit() + + def delete_keypair_by_row(self, name): + row = self._get_row_with_keypair_name(name) + delete_keypair_form = self.keypairs_table.delete_keypair_by_row(row) + delete_keypair_form.submit() + + def delete_keypair(self, name): + row = self._get_row_with_keypair_name(name) + row.mark() + delete_keypair_form = self.keypairs_table.delete_keypair() + delete_keypair_form.submit() diff --git a/automated-pytest-suite/utils/horizon/pages/project/compute/overviewpage.py b/automated-pytest-suite/utils/horizon/pages/project/compute/overviewpage.py new file mode 100644 index 0000000..9a8908c --- /dev/null +++ b/automated-pytest-suite/utils/horizon/pages/project/compute/overviewpage.py @@ -0,0 +1,48 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +from selenium.webdriver.common import by + +from utils.horizon.pages import basepage +from utils.horizon.regions import forms +from utils.horizon.regions import tables + + +class UsageTable(tables.TableRegion): + name = 'project_usage' + + +class OverviewPage(basepage.BasePage): + _date_form_locator = (by.By.ID, 'date_form') + + USAGE_TABLE_NAME_COLUMN = 'Instance Name' + + @property + def usage_table(self): + return UsageTable(self.driver, self) + + def _get_row_with_instance_name(self, name): + return self.usage_table.get_row(self.USAGE_TABLE_NAME_COLUMN, name) + + def is_instance_present(self, name): + return bool(self._get_row_with_instance_name(name)) + + def get_instance_info(self, instance_name, header): + row = self._get_row_with_instance_name(instance_name) + return row.cells[header].text + + @property + def date_form(self): + src_elem = self._get_element(*self._date_form_locator) + return forms.DateFormRegion(self.driver, src_elem) + + def set_usage_query_time_period(self, start_date, end_date): + self.date_form.query(start_date, end_date) diff --git a/automated-pytest-suite/utils/horizon/pages/project/compute/servergroupspage.py b/automated-pytest-suite/utils/horizon/pages/project/compute/servergroupspage.py new file mode 100644 index 0000000..0a4826a --- /dev/null +++ b/automated-pytest-suite/utils/horizon/pages/project/compute/servergroupspage.py @@ -0,0 +1,81 @@ +import time + +from selenium.webdriver.common import by + +from utils.horizon.pages import basepage +from utils.horizon.regions import forms +from utils.horizon.regions import tables + + +class ServerGroupsForm(forms.FormRegion): + _fields_locator = (by.By.CSS_SELECTOR, 'form') + + +class ServerGroupsTable(tables.TableRegion): + + name = "OS::Nova::ServerGroup" + + CREATE_SERVER_GROUP_FORM_FIELDS = ("name", "policy") + + @tables.bind_table_action('btn-default', attribute_search='class') + def create_server_group(self, create_button): + create_button.click() + time.sleep(5) + self.wait_till_spinner_disappears() + + return ServerGroupsForm(self.driver, field_mappings=self.CREATE_SERVER_GROUP_FORM_FIELDS) + + @tables.bind_table_action('btn-danger', attribute_search='class') + def delete_server_group(self, delete_button): + delete_button.click() + return forms.BaseFormRegion(self.driver) + + @tables.bind_table_action('delete') + def delete_server_group_by_row(self, delete_button, row): + delete_button.click() + return forms.BaseFormRegion(self.driver) + + def _table_locator(self, table_name): + return by.By.CSS_SELECTOR, 'hz-resource-table[resource-type-name="%s"]' % table_name + + +class ServerGroupsPage(basepage.BasePage): + + PARTIAL_URL = 'project/server_groups' + GROUP_TABLE_NAME_COLUMN = 'Name' + + @property + def server_groups_table(self): + return ServerGroupsTable(self.driver) + + def _get_row_with_server_group_name(self, name): + return self.server_groups_table.get_row(self.GROUP_TABLE_NAME_COLUMN, name) + + def create_server_group(self, name, policy=None, is_best_effort=False, group_size=None): + create_form = self.server_groups_table.create_server_group() + create_form.name.text = name + if policy is not None: + create_form.policy.value = policy + if is_best_effort: + create_form.is_best_effort.mark() + if group_size is not None: + create_form.group_size.text = group_size + create_form.submit() + + def delete_server_group(self, name): + row = self._get_row_with_server_group_name(name) + row.mark() + confirm_delete_form = self.server_groups_table.delete_server_group() + confirm_delete_form.submit() + + def delete_server_group_by_row(self, name): + row = self._get_row_with_server_group_name(name) + confirm_delete_form = self.server_groups_table.delete_server_group_by_row(row) + confirm_delete_form.submit() + + def is_server_group_present(self, name): + return bool(self._get_row_with_server_group_name(name)) + + def get_server_group_info(self, name, header): + row = self._get_row_with_server_group_name(name) + return row.cells[header].text diff --git a/automated-pytest-suite/utils/horizon/pages/project/network/__init__.py b/automated-pytest-suite/utils/horizon/pages/project/network/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/automated-pytest-suite/utils/horizon/pages/project/network/floatingipspage.py b/automated-pytest-suite/utils/horizon/pages/project/network/floatingipspage.py new file mode 100644 index 0000000..920f0fe --- /dev/null +++ b/automated-pytest-suite/utils/horizon/pages/project/network/floatingipspage.py @@ -0,0 +1,103 @@ +import re + +from selenium.webdriver.common import by + +from utils.horizon.pages import basepage +from utils.horizon.regions import forms +from utils.horizon.regions import tables + + +class FloatingIPTable(tables.TableRegion): + name = 'floating_ips' + ALLOCATE_IP_FORM_FIELDS = ("pool", "tenant", "floating_ip_address") + FLOATING_IP_ASSOCIATIONS_FORM_FIELDS = ("ip_id", "instance_id") + + @tables.bind_table_action('allocate') + def allocate_ip(self, allocate_button): + allocate_button.click() + self.wait_till_spinner_disappears() + return forms.FormRegion(self.driver, + field_mappings=self.ALLOCATE_IP_FORM_FIELDS) + + @tables.bind_table_action('release') + def release_ip(self, release_button): + release_button.click() + return forms.BaseFormRegion(self.driver) + + @tables.bind_row_action('release') + def release_ip_by_row(self, release_button, row): + release_button.click() + return forms.BaseFormRegion(self.driver) + + @tables.bind_row_action('associate') + def associate_ip(self, associate_button, row): + associate_button.click() + self.wait_till_spinner_disappears() + return forms.FormRegion(self.driver, + field_mappings=self.FLOATING_IP_ASSOCIATIONS_FORM_FIELDS) + + @tables.bind_row_action('disassociate') + def disassociate_ip(self, disassociate_button, row): + disassociate_button.click() + self.wait_till_spinner_disappears() + return forms.BaseFormRegion(self.driver) + + +class FloatingipsPage(basepage.BasePage): + PARTIAL_URL = 'project/floating_ips' + FLOATING_IPS_TABLE_IP_COLUMN = 'IP Address' + + _floatingips_fadein_popup_locator = ( + by.By.CSS_SELECTOR, '.alert.alert-success.alert-dismissable.fade.in>p') + + def _get_row_with_floatingip(self, floatingip): + return self.floatingips_table.get_row( + self.FLOATING_IPS_TABLE_IP_COLUMN, floatingip) + + @property + def floatingips_table(self): + return FloatingIPTable(self.driver) + + def get_floatingip_info(self, floatingip, header): + row = self._get_row_with_floatingip(floatingip) + return row.cells[header].text + + def allocate_floatingip(self, pool=None): + floatingip_form = self.floatingips_table.allocate_ip() + if pool is not None: + floatingip_form.pool.text = pool + floatingip_form.submit() + ip = re.compile('(([2][5][0-5]\.)|([2][0-4][0-9]\.)' + + '|([0-1]?[0-9]?[0-9]\.)){3}(([2][5][0-5])|' + '([2][0-4][0-9])|([0-1]?[0-9]?[0-9]))') + match = ip.search((self._get_element( + *self._floatingips_fadein_popup_locator)).text) + floatingip = str(match.group()) + return floatingip + + def release_floatingip(self, floatingip): + row = self._get_row_with_floatingip(floatingip) + row.mark() + confirm_form = self.floatingips_table.release_ip() + confirm_form.submit() + + def release_floatingip_by_row(self, floatingip): + row = self._get_row_with_floatingip(floatingip) + confirm_form = self.floatingips_table.release_ip_by_row(row) + confirm_form.submit() + + def is_floatingip_present(self, floatingip): + return bool(self._get_row_with_floatingip(floatingip)) + + def associate_floatingip(self, floatingip, instance_name=None, + instance_ip=None): + row = self._get_row_with_floatingip(floatingip) + floatingip_form = self.floatingips_table.associate_ip(row) + floatingip_form.instance_id.text = "{}: {}".format(instance_name, + instance_ip) + floatingip_form.submit() + + def disassociate_floatingip(self, floatingip): + row = self._get_row_with_floatingip(floatingip) + floatingip_form = self.floatingips_table.disassociate_ip(row) + floatingip_form.submit() diff --git a/automated-pytest-suite/utils/horizon/pages/project/network/managerulespage.py b/automated-pytest-suite/utils/horizon/pages/project/network/managerulespage.py new file mode 100644 index 0000000..ca9b716 --- /dev/null +++ b/automated-pytest-suite/utils/horizon/pages/project/network/managerulespage.py @@ -0,0 +1,63 @@ +from utils.horizon.pages import basepage +from utils.horizon.regions import forms +from utils.horizon.regions import tables + + +class RulesTable(tables.TableRegion): + name = 'rules' + ADD_RULE_FORM_FIELDS = ("rule_menu", "direction", "port_or_range", "port", + "remote", "cidr") + + @tables.bind_table_action('add_rule') + def create_rule(self, create_button): + create_button.click() + self.wait_till_spinner_disappears() + return forms.FormRegion( + self.driver, + field_mappings=self.ADD_RULE_FORM_FIELDS) + + @tables.bind_table_action('delete') + def delete_rules(self, delete_button): + delete_button.click() + return forms.BaseFormRegion(self.driver, None) + + @tables.bind_row_action('delete') + def delete_rule_by_row(self, delete_button, row): + delete_button.click() + return forms.BaseFormRegion(self.driver, None) + + +class ManageRulesPage(basepage.BasePage): + + RULES_TABLE_PORT_RANGE_COLUMN = 'Port Range' + + def _get_row_with_port_range(self, port): + return self.rules_table.get_row( + self.RULES_TABLE_PORT_RANGE_COLUMN, port) + + @property + def rules_table(self): + return RulesTable(self.driver) + + def create_rule(self, port): + create_rule_form = self.rules_table.create_rule() + create_rule_form.port.text = port + create_rule_form.submit() + + def delete_rule(self, port): + row = self._get_row_with_port_range(port) + modal_confirmation_form = self.rules_table.delete_rule_by_row(row) + modal_confirmation_form.submit() + + def delete_rule_by_table(self, port): + row = self._get_row_with_port_range(port) + row.mark() + modal_confirmation_form = self.rules_table.delete_rules_by_table() + modal_confirmation_form.submit() + + def is_rule_present(self, port): + return bool(self._get_row_with_port_range(port)) + + def get_rule_info(self, port, header): + row = self._get_row_with_port_range(port) + return row.cells[header].text diff --git a/automated-pytest-suite/utils/horizon/pages/project/network/networkoverviewpage.py b/automated-pytest-suite/utils/horizon/pages/project/network/networkoverviewpage.py new file mode 100644 index 0000000..0bbe9c8 --- /dev/null +++ b/automated-pytest-suite/utils/horizon/pages/project/network/networkoverviewpage.py @@ -0,0 +1,18 @@ +from selenium.webdriver.common import by + +from utils.horizon.pages import basepage + + +class NetworkOverviewPage(basepage.BasePage): + + DEFAULT_NETWORK_NAME = 'external-net0' + + def is_network_name_present(self, network_name=DEFAULT_NETWORK_NAME): + dd_text = self._get_element(by.By.XPATH, + "//dd[.='{0}']".format(network_name)).text + return dd_text == network_name + + def is_network_status(self, status): + dd_text = self._get_element(by.By.XPATH, + "//dd[.='{0}']".format(status)).text + return dd_text == status diff --git a/automated-pytest-suite/utils/horizon/pages/project/network/networkspage.py b/automated-pytest-suite/utils/horizon/pages/project/network/networkspage.py new file mode 100644 index 0000000..998a4ab --- /dev/null +++ b/automated-pytest-suite/utils/horizon/pages/project/network/networkspage.py @@ -0,0 +1,177 @@ +from utils.horizon.pages import basepage +from utils.horizon.regions import forms +from utils.horizon.regions import tables + + +class NetworksTable(tables.TableRegion): + name = "networks" + CREATE_NETWORK_FORM_FIELDS = (("net_name", "admin_state", + "with_subnet", "az_hints"), + ("subnet_name", "cidr", "ip_version", + "gateway_ip", "no_gateway"), + ("enable_dhcp", "allocation_pools", + "dns_nameservers", "host_routes")) + + EDIT_NETWORK_FORM_FIELDS = ("name", "admin_state", "shared") + CREATE_SUBNET_FORM_FIELDS = (("subnet_name", "cidr", "ip_version", + "gateway_ip", "no_gateway"), + ("enable_dhcp", "allocation_pools", + "dns_nameservers", "host_routes")) + + @tables.bind_table_action('create') + def create_network(self, create_button): + create_button.click() + self.wait_till_spinner_disappears() + return forms.TabbedFormRegion(self.driver, field_mappings=self.CREATE_NETWORK_FORM_FIELDS) + + @tables.bind_table_action('delete') + def delete_network(self, delete_button): + delete_button.click() + return forms.BaseFormRegion(self.driver) + + @tables.bind_row_action("delete") + def delete_network_by_row(self, delete_button, row): + delete_button.click() + return forms.BaseFormRegion(self.driver) + + @tables.bind_row_action("update") + def edit_network(self, edit_button, row): + edit_button.click() + self.wait_till_spinner_disappears() + return forms.FormRegion(self.driver, field_mappings=self.EDIT_NETWORK_FORM_FIELDS) + + @tables.bind_row_action("subnet") + def create_subnet(self, create_button, row): + create_button.click() + self.wait_till_spinner_disappears() + return forms.FormRegion(self.driver, field_mappings=self.CREATE_SUBNET_FORM_FIELDS) + + +class NetworksPage(basepage.BasePage): + PARTIAL_URL = 'project/networks' + NETWORKS_TABLE_NAME_COLUMN = 'Name' + NETWORKS_TABLE_STATUS_COLUMN = 'Status' + + def _get_row_with_network_name(self, name): + return self.networks_table.get_row( + self.NETWORKS_TABLE_NAME_COLUMN, name) + + @property + def networks_table(self): + return NetworksTable(self.driver) + + def create_network(self, network_name, enable_admin_state=None, qos_policy=None, + vlan_transparent=None, create_subnet=None, + subnet_name=None, network_address=None, + ip_version=None, gateway_ip=None, disable_gateway=None, + enable_dhcp=None, allocation_pools=None, + dns_name_servers=None, host_routes=None): + create_network_form = self.networks_table.create_network() + create_network_form.net_name.text = network_name + if enable_admin_state is True: + create_network_form.admin_state.mark() + if enable_admin_state is False: + create_network_form.admin_state.unmark() + if qos_policy is not None: + create_network_form.qos.text = qos_policy + if vlan_transparent is True: + create_network_form.vlan_transparent.mark() + if vlan_transparent is False: + create_network_form.vlan_transparent.unmark() + if create_subnet is True: + create_network_form.with_subnet.mark() + if create_subnet is False: + create_network_form.with_subnet.unmark() + create_network_form.switch_to(1) + if subnet_name is not None: + create_network_form.subnet_name.text = subnet_name + if network_address is not None: + create_network_form.cidr.text = network_address + if ip_version is not None: + create_network_form.ip_version.value = ip_version + if gateway_ip is not None: + create_network_form.gateway_ip.text = gateway_ip + if disable_gateway is True: + create_network_form.disable_gateway.mark() + if disable_gateway is False: + create_network_form.disable_gateway.unmark() + create_network_form.switch_to(2) + if enable_dhcp is True: + create_network_form.enable_dhcp.mark() + if enable_dhcp is False: + create_network_form.enable_dhcp.unmark() + if allocation_pools is not None: + create_network_form.allocation_pools.text = allocation_pools + if dns_name_servers is not None: + create_network_form.dns_nameservers.text = dns_name_servers + if host_routes is not None: + create_network_form.host_routes.text = host_routes + create_network_form.submit() + + def edit_network(self, name, new_name=None, enable_admin_state=None, is_shared=None): + row = self._get_row_with_network_name(name) + edit_network_form = self.networks_table.edit_network(row) + if new_name is not None: + edit_network_form.name.text = new_name + if enable_admin_state is True: + edit_network_form.admin_state.mark() + if enable_admin_state is False: + edit_network_form.admin_state.unmark() + if is_shared is True: + edit_network_form.shared.mark() + if is_shared is False: + edit_network_form.shared.unmark() + edit_network_form.submit() + + def create_subnet(self, name, subnet_name, network_address, + ip_version=None, gateway_ip=None, disable_gateway=None, + enable_dhcp=None, allocation_pools=None, + dns_name_servers=None, host_routes=None): + row = self._get_row_with_network_name(name) + create_subnet_form = self.networks_table.create_subnet(row) + create_subnet_form.subnet_name.text = subnet_name + create_subnet_form.cidr.text = network_address + if ip_version is not None: + create_subnet_form.ip_version.text = ip_version + if gateway_ip is not None: + create_subnet_form.gateway_ip.text = gateway_ip + if disable_gateway is True: + create_subnet_form.no_gateway.mark() + if disable_gateway is False: + create_subnet_form.no_gateway.unmark() + create_subnet_form.switch_to(1) + if enable_dhcp is True: + create_subnet_form.enable_dhcp.mark() + if enable_dhcp is False: + create_subnet_form.enable_dhcp.unmark() + if allocation_pools is not None: + create_subnet_form.allocation_pools.text = allocation_pools + if dns_name_servers is not None: + create_subnet_form.dns_nameservers.text = dns_name_servers + if host_routes is not None: + create_subnet_form.host_routes.text = host_routes + create_subnet_form.submit() + + def delete_network(self, name): + row = self._get_row_with_network_name(name) + row.mark() + confirm_delete_networks_form = self.networks_table.delete_network() + confirm_delete_networks_form.submit() + + def delete_network_by_row(self, name): + row = self._get_row_with_network_name(name) + confirm_delete_networks_form = self.networks_table.delete_network_by_row(row) + confirm_delete_networks_form.submit() + + def is_network_present(self, name): + return bool(self._get_row_with_network_name(name)) + + def get_network_info(self, network_name, header): + row = self._get_row_with_network_name(network_name) + return row.cells[header].text + + def go_to_networks_tab(self): + self.go_to_tab(0) + + def go_to_qos_policies_tab(self): + self.go_to_tab(1) diff --git a/automated-pytest-suite/utils/horizon/pages/project/network/routerinterfacespage.py b/automated-pytest-suite/utils/horizon/pages/project/network/routerinterfacespage.py new file mode 100644 index 0000000..de7b61e --- /dev/null +++ b/automated-pytest-suite/utils/horizon/pages/project/network/routerinterfacespage.py @@ -0,0 +1,76 @@ +from selenium.webdriver.common import by + +from utils.horizon.pages import basepage +from utils.horizon.regions import forms +from utils.horizon.regions import tables + + +class InterfacesTable(tables.TableRegion): + name = "interfaces" + CREATE_INTERFACE_FORM_FIELDS = ("subnet_id", "ip_address") + + @tables.bind_table_action('create') + def create_interface(self, create_button): + create_button.click() + self.wait_till_spinner_disappears() + return forms.FormRegion( + self.driver, + field_mappings=self.CREATE_INTERFACE_FORM_FIELDS + ) + + @tables.bind_table_action('delete') + def delete_interface(self, delete_button): + delete_button.click() + return forms.BaseFormRegion(self.driver) + + @tables.bind_row_action('delete') + def delete_interface_by_row(self, delete_button, row): + delete_button.click() + return forms.BaseFormRegion(self.driver) + + +class RouterInterfacesPage(basepage.BasePage): + + INTERFACES_TABLE_STATUS_COLUMN = 'Status' + INTERFACES_TABLE_NAME_COLUMN = 'Name' + DEFAULT_SUBNET = 'external-net0' + + def _get_row_with_interface_name(self, name): + return self.interfaces_table.get_row( + self.INTERFACES_TABLE_NAME_COLUMN, name) + + @property + def interfaces_table(self): + return InterfacesTable(self.driver) + + @property + def interfaces_names(self): + return list(map(lambda row: row.cells[self. + INTERFACES_TABLE_NAME_COLUMN].text, + self.interfaces_table.rows)) + + def create_interface(self): + interface_form = self.interfaces_table.create_interface() + interface_form.subnet_id.text = self.DEFAULT_SUBNET + # interface_form.ip_address.text = self.DEFAULT_IPv4_ADDRESS + interface_form.submit() + + def delete_interface(self, interface_name): + row = self._get_row_with_interface_name(interface_name) + row.mark() + confirm_delete_interface_form = self.interfaces_table.\ + delete_interface() + confirm_delete_interface_form.submit() + + def delete_interface_by_row_action(self, interface_name): + row = self._get_row_with_interface_name(interface_name) + confirm_delete_interface = self.interfaces_table.\ + delete_interface_by_row_action(row) + confirm_delete_interface.submit() + + def is_interface_present(self, interface_name): + return bool(self._get_row_with_interface_name(interface_name)) + + def is_interface_status(self, interface_name, status): + row = self._get_row_with_interface_name(interface_name) + return row.cells[self.INTERFACES_TABLE_STATUS_COLUMN].text == status diff --git a/automated-pytest-suite/utils/horizon/pages/project/network/routeroverviewpage.py b/automated-pytest-suite/utils/horizon/pages/project/network/routeroverviewpage.py new file mode 100644 index 0000000..508b839 --- /dev/null +++ b/automated-pytest-suite/utils/horizon/pages/project/network/routeroverviewpage.py @@ -0,0 +1,28 @@ +from selenium.webdriver.common import by + +from utils.horizon.pages import basepage +from utils.horizon.pages.project.network.networkoverviewpage import NetworkOverviewPage + + +class RouterOverviewPage(basepage.BasePage): + + _network_link_locator = (by.By.CSS_SELECTOR, + 'hr+dl.dl-horizontal>dt:nth-child(3)+dd>a') + + def __init__(self, driver, router_name): + super(RouterOverviewPage, self).__init__(driver) + self._page_title = router_name + + def is_router_name_present(self, router_name): + dd_text = self._get_element(by.By.XPATH, + "//dd[.='{0}']".format(router_name)).text + return dd_text == router_name + + def is_router_status(self, status): + dd_text = self._get_element(by.By.XPATH, + "//dd[.='{0}']".format(status)).text + return dd_text == status + + def go_to_router_network(self): + self._get_element(*self._network_link_locator).click() + return NetworkOverviewPage(self.driver) diff --git a/automated-pytest-suite/utils/horizon/pages/project/network/routerspage.py b/automated-pytest-suite/utils/horizon/pages/project/network/routerspage.py new file mode 100644 index 0000000..b30566c --- /dev/null +++ b/automated-pytest-suite/utils/horizon/pages/project/network/routerspage.py @@ -0,0 +1,133 @@ +from selenium.common import exceptions + +from selenium.webdriver.common import by + +from utils.horizon.pages import basepage +from utils.horizon.pages.project.network.routerinterfacespage import RouterInterfacesPage +from utils.horizon.pages.project.network.routeroverviewpage import RouterOverviewPage +from utils.horizon.regions import forms +from utils.horizon.regions import tables +from time import sleep + + +class RoutersTable(tables.TableRegion): + name = "routers" + CREATE_ROUTER_FORM_FIELDS = ("name", "admin_state_up", "external_network", "mode") + SET_GATEWAY_FORM_FIELDS = ("network_id",) + EIDT_ROUTER_FORM_FIELDS = ("name", "admin_state", "mode") + + @tables.bind_table_action('create') + def create_router(self, create_button): + create_button.click() + self.wait_till_spinner_disappears() + return forms.FormRegion(self.driver, field_mappings=self.CREATE_ROUTER_FORM_FIELDS) + + @tables.bind_table_action('delete') + def delete_router(self, delete_button): + delete_button.click() + return forms.BaseFormRegion(self.driver) + + @tables.bind_row_action('delete') + def delete_router_by_row(self, delete_button, row): + delete_button.click() + return forms.BaseFormRegion(self.driver) + + @tables.bind_row_action('clear') + def clear_gateway(self, clear_gateway_button, row): + clear_gateway_button.click() + return forms.BaseFormRegion(self.driver) + + @tables.bind_row_action('setgateway') + def set_gateway(self, set_gateway_button, row): + set_gateway_button.click() + self.wait_till_spinner_disappears() + return forms.FormRegion(self.driver, field_mappings=self.SET_GATEWAY_FORM_FIELDS) + + @tables.bind_row_action('update') + def edit_router(self, edit_button, row): + edit_button.click() + self.wait_till_spinner_disappears() + return forms.FormRegion(self.driver, field_mappings=self.EIDT_ROUTER_FORM_FIELDS) + + +class RoutersPage(basepage.BasePage): + + PARTIAL_URL = 'project/routers' + ROUTERS_TABLE_NAME_COLUMN = 'Name' + + _interfaces_tab_locator = (by.By.CSS_SELECTOR, + 'a[href*="tab=router_details__interfaces"]') + + def _get_row_with_router_name(self, name): + return self.routers_table.get_row( + self.ROUTERS_TABLE_NAME_COLUMN, name) + + @property + def routers_table(self): + return RoutersTable(self.driver) + + def create_router(self, name, admin_state_up=None, + external_network=None, router_type=None): + create_router_form = self.routers_table.create_router() + create_router_form.name.text = name + if admin_state_up is True: + create_router_form.admin_state_up.mark() + if admin_state_up is False: + create_router_form.admin_state_up.unmark() + if external_network is not None: + create_router_form.external_network.text = external_network + if router_type is not None: + create_router_form.mode.text = router_type + create_router_form.submit() + + def edit_router(self, name, new_name=None, + enable_admin_state=None, router_type=None): + row = self._get_row_with_router_name(name) + edit_rouer_form = self.routers_table.edit_router(row) + if new_name is not None: + edit_rouer_form.name.text = new_name + if enable_admin_state is True: + edit_rouer_form.admin_state.mark() + if enable_admin_state is False: + edit_rouer_form.admin_state.unmark() + if router_type is not None: + edit_rouer_form.mode.text = router_type + edit_rouer_form.submit() + + def set_gateway(self, router_name, external_network): + row = self._get_row_with_router_name(router_name) + set_gateway_form = self.routers_table.set_gateway(row) + set_gateway_form.network_id.text = external_network + set_gateway_form.submit() + + def clear_gateway(self, name): + row = self._get_row_with_router_name(name) + confirm_clear_gateway_form = self.routers_table.clear_gateway(row) + confirm_clear_gateway_form.submit() + + def delete_router(self, name): + row = self._get_row_with_router_name(name) + row.mark() + confirm_delete_routers_form = self.routers_table.delete_router() + confirm_delete_routers_form.submit() + + def delete_router_by_row(self, name): + row = self._get_row_with_router_name(name) + confirm_delete_routers_form = self.routers_table.delete_router_by_row(row) + confirm_delete_routers_form.submit() + + def is_router_present(self, name): + return bool(self._get_row_with_router_name(name)) + + def get_router_info(self, router_name, header): + row = self._get_row_with_router_name(router_name) + return row.cells[header].text + + def go_to_interfaces_page(self, name): + self._get_element(by.By.LINK_TEXT, name).click() + self._get_element(*self._interfaces_tab_locator).click() + return RouterInterfacesPage(self.driver) + + def go_to_overview_page(self, name): + self._get_element(by.By.LINK_TEXT, name).click() + return RouterOverviewPage(self.driver, name) diff --git a/automated-pytest-suite/utils/horizon/pages/project/network/securitygroupspage.py b/automated-pytest-suite/utils/horizon/pages/project/network/securitygroupspage.py new file mode 100644 index 0000000..c783ac4 --- /dev/null +++ b/automated-pytest-suite/utils/horizon/pages/project/network/securitygroupspage.py @@ -0,0 +1,64 @@ +from utils.horizon.pages import basepage +from utils.horizon.regions import forms +from utils.horizon.regions import tables +from utils.horizon.pages.project.network.managerulespage import ManageRulesPage + + +class SecurityGroupsTable(tables.TableRegion): + name = "security_groups" + CREATE_SECURITYGROUP_FORM_FIELDS = ("name", "description") + + @tables.bind_table_action('create') + def create_group(self, create_button): + create_button.click() + self.wait_till_spinner_disappears() + return forms.FormRegion( + self.driver, field_mappings=self.CREATE_SECURITYGROUP_FORM_FIELDS) + + @tables.bind_table_action('delete') + def delete_group(self, delete_button): + delete_button.click() + return forms.BaseFormRegion(self.driver, None) + + @tables.bind_row_action('manage_rules') + def manage_rules(self, manage_rules_button, row): + manage_rules_button.click() + return ManageRulesPage(self.driver) + + +class SecuritygroupsPage(basepage.BasePage): + PARTIAL_URL = 'project/security_groups' + + SECURITYGROUPS_TABLE_NAME_COLUMN = 'Name' + + def _get_row_with_securitygroup_name(self, name): + return self.securitygroups_table.get_row( + self.SECURITYGROUPS_TABLE_NAME_COLUMN, name) + + @property + def securitygroups_table(self): + return SecurityGroupsTable(self.driver) + + def create_securitygroup(self, name, description=None): + create_securitygroups_form = self.securitygroups_table.create_group() + create_securitygroups_form.name.text = name + if description is not None: + create_securitygroups_form.description.text = description + create_securitygroups_form.submit() + + def delete_securitygroup(self, name): + row = self._get_row_with_securitygroup_name(name) + row.mark() + modal_confirmation_form = self.securitygroups_table.delete_group() + modal_confirmation_form.submit() + + def is_securitygroup_present(self, name): + return bool(self._get_row_with_securitygroup_name(name)) + + def get_security_group_info(self, name, header): + row = self._get_row_with_securitygroup_name(name) + return row.cells[header].text + + def go_to_manage_rules(self, name): + row = self._get_row_with_securitygroup_name(name) + return self.securitygroups_table.manage_rules(row) diff --git a/automated-pytest-suite/utils/horizon/pages/project/orchestration/__init__.py b/automated-pytest-suite/utils/horizon/pages/project/orchestration/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/automated-pytest-suite/utils/horizon/pages/project/orchestration/resourcestypespage.py b/automated-pytest-suite/utils/horizon/pages/project/orchestration/resourcestypespage.py new file mode 100644 index 0000000..e69de29 diff --git a/automated-pytest-suite/utils/horizon/pages/project/orchestration/stackspage.py b/automated-pytest-suite/utils/horizon/pages/project/orchestration/stackspage.py new file mode 100644 index 0000000..e69de29 diff --git a/automated-pytest-suite/utils/horizon/pages/project/orchestration/templateversionspage.py b/automated-pytest-suite/utils/horizon/pages/project/orchestration/templateversionspage.py new file mode 100644 index 0000000..e69de29 diff --git a/automated-pytest-suite/utils/horizon/pages/project/volumes/__init__.py b/automated-pytest-suite/utils/horizon/pages/project/volumes/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/automated-pytest-suite/utils/horizon/pages/project/volumes/shotspage.py b/automated-pytest-suite/utils/horizon/pages/project/volumes/shotspage.py new file mode 100644 index 0000000..9c5654c --- /dev/null +++ b/automated-pytest-suite/utils/horizon/pages/project/volumes/shotspage.py @@ -0,0 +1,143 @@ +from utils.horizon.pages import basepage +from utils.horizon.regions import forms +from utils.horizon.regions import tables +from utils.horizon.pages.project.compute import instancespage +from time import sleep + + +class VolumesnapshotsTable(tables.TableRegion): + name = 'volume_snapshots' + + EDIT_SNAPSHOT_FORM_FIELDS = ("name", "description") + + CREATE_VOLUME_FORM_FIELDS = ("name", "description", "snapshot_source", "size") + + @tables.bind_table_action('delete') + def delete_volume_snapshot(self, delete_button): + delete_button.click() + return forms.BaseFormRegion(self.driver) + + @tables.bind_row_action('delete') + def delete_volume_snapshot_by_row(self, delete_button, row): + delete_button.click() + return forms.BaseFormRegion(self.driver) + + @tables.bind_row_action('edit') + def edit_snapshot(self, edit_button, row): + edit_button.click() + self.wait_till_spinner_disappears() + return forms.FormRegion(self.driver, + field_mappings=self.EDIT_SNAPSHOT_FORM_FIELDS) + + @tables.bind_row_action('create_from_snapshot') + def create_volume(self, create_volume_button, row): + create_volume_button.click() + self.wait_till_spinner_disappears() + return forms.FormRegion(self.driver, + field_mappings=self.CREATE_VOLUME_FORM_FIELDS) + + @tables.bind_row_action('launch_snapshot_ng') + def launch_as_instance(self, launch_button, row): + launch_button.click() + self.wait_till_spinner_disappears() + return instancespage.LaunchInstanceForm(self.driver) + + +class VolumesnapshotsPage(basepage.BasePage): + PARTIAL_URL = 'project/snapshots' + SNAPSHOT_TABLE_NAME_COLUMN = 'Name' + SNAPSHOT_TABLE_STATUS_COLUMN = 'Status' + + @property + def volumes_napshots_table(self): + return VolumesnapshotsTable(self.driver) + + def _get_row_with_volume_snapshot_name(self, name): + return self.volumes_napshots_table.get_row( + self.SNAPSHOT_TABLE_NAME_COLUMN, + name) + + def is_snapshot_present(self, name): + return bool(self._get_row_with_volume_snapshot_name(name)) + + def get_snapshot_info(self, name, header): + row = self._get_row_with_volume_snapshot_name(name) + return row.cells[header].text + + def delete_volume_snapshot_by_row(self, name): + row = self._get_row_with_volume_snapshot_name(name) + confirm_form = self.volumes_napshots_table.delete_volume_snapshot_by_row(row) + confirm_form.submit() + + def delete_volume_snapshots(self, names): + for name in names: + row = self._get_row_with_volume_snapshot_name(name) + row.mark() + confirm_form = self.volumes_napshots_table.delete_volume_snapshots() + confirm_form.submit() + + def is_volume_snapshot_deleted(self, name): + return self.volumes_napshots_table.is_row_deleted( + lambda: self._get_row_with_volume_snapshot_name(name)) + + def is_volume_snapshot_available(self, name): + def cell_getter(): + row = self._get_row_with_volume_snapshot_name(name) + return row and row.cells[self.SNAPSHOT_TABLE_STATUS_COLUMN] + + return bool(self.volumes_napshots_table.wait_cell_status(cell_getter, + 'Available')) + + def edit_snapshot(self, name, new_name=None, description=None): + row = self._get_row_with_volume_snapshot_name(name) + snapshot_edit_form = self.volumes_napshots_table.edit_snapshot(row) + if new_name: + snapshot_edit_form.name.text = new_name + if description: + snapshot_edit_form.description.text = description + snapshot_edit_form.submit() + + def create_volume_from_snapshot(self, snapshot_name, volume_name=None, + description=None, volume_size=None): + row = self._get_row_with_volume_snapshot_name(snapshot_name) + volume_form = self.volumes_napshots_table.create_volume(row) + if volume_name: + volume_form.name.text = volume_name + if description: + volume_form.description.text = description + if volume_size is not None: + volume_form.size.value = volume_size + volume_form.submit() + + def launch_as_instance(self, name, instance_name, availability_zone=None, count=None, + boot_source_type=None, create_new_volume=None, + delete_volume_on_instance_delete=None, volume_size=None, + source_name=None, flavor_name=None, network_names=None): + row = self._get_row_with_volume_snapshot_name(name) + instance_form = self.volumes_napshots_table.launch_as_instance(row) + instance_form.fields['name'].text = instance_name + if availability_zone is not None: + instance_form.fields['availability-zone'].text = availability_zone + if count is not None: + instance_form.fields['instance-count'].value = count + instance_form.switch_to(1) + if boot_source_type is not None: + instance_form.fields['boot-source-type'].text = boot_source_type + sleep(1) + instance_form._init_tab_fields(1) + if create_new_volume is True: + instance_form.fields['Create New Volume'].click_yes() + if delete_volume_on_instance_delete is True: + instance_form.fields['Delete Volume on Instance Delete'].click_yes() + if delete_volume_on_instance_delete is False: + instance_form.fields['Delete Volume on Instance Delete'].click_no() + if create_new_volume is False: + instance_form.fields['Create New Volume'].click_no() + if volume_size is not None: + instance_form.fields['volume-size'].value = volume_size + instance_form.addelement('Name', source_name) + instance_form.switch_to(2) + instance_form.addelement('Name', flavor_name) + instance_form.switch_to(3) + instance_form.addelements('Network', network_names) + instance_form.submit() diff --git a/automated-pytest-suite/utils/horizon/pages/project/volumes/volumespage.py b/automated-pytest-suite/utils/horizon/pages/project/volumes/volumespage.py new file mode 100644 index 0000000..ea90a9a --- /dev/null +++ b/automated-pytest-suite/utils/horizon/pages/project/volumes/volumespage.py @@ -0,0 +1,363 @@ +from time import sleep + +from selenium.webdriver.common.by import By + +from utils.horizon.pages import basepage +from utils.horizon.pages.project.compute import instancespage +from utils.horizon.regions import forms, tables, messages +from utils import exceptions +from utils.tis_log import LOG + +from consts.timeout import VolumeTimeout + + +class VolumesTable(tables.TableRegion): + name = 'volumes' + + CREATE_VOLUME_FORM_FIELDS = ( + "name", "description", "volume_source_type", "image_source", "volume_source", + "type", "size", "availability_zone") + + EDIT_VOLUME_FORM_FIELDS = ("name", "description", 'bootable') + + CREATE_VOLUME_SNAPSHOT_FORM_FIELDS = ("name", "description") + + EXTEND_VOLUME_FORM_FIELDS = ("new_size",) + + UPLOAD_VOLUME_FORM_FIELDS = ("image_name", "disk_format") + + CHANGE_VOLUME_TYPE_FORM_FIELDS = ("name", "volume_type", "migration_policy") + + @tables.bind_table_action('create') + def create_volume(self, create_button): + create_button.click() + self.wait_till_spinner_disappears() + return forms.FormRegion( + self.driver, field_mappings=self.CREATE_VOLUME_FORM_FIELDS) + + @tables.bind_table_action('delete') + def delete_volume(self, delete_button): + delete_button.click() + return forms.BaseFormRegion(self.driver) + + @tables.bind_row_action('delete') + def delete_volume_by_row(self, delete_button, row): + delete_button.click() + return forms.BaseFormRegion(self.driver) + + @tables.bind_row_action('edit') + def edit_volume(self, edit_button, row): + edit_button.click() + self.wait_till_spinner_disappears() + return forms.FormRegion(self.driver, + field_mappings=self.EDIT_VOLUME_FORM_FIELDS) + + @tables.bind_row_action('retype') + def change_volume_type(self, change_button, row): + change_button.click() + self.wait_till_spinner_disappears() + return forms.FormRegion(self.driver, + field_mappings=self.CHANGE_VOLUME_TYPE_FORM_FIELDS) + + @tables.bind_row_action('snapshots') + def create_snapshot(self, create_snapshot_button, row): + create_snapshot_button.click() + self.wait_till_spinner_disappears() + return forms.FormRegion( + self.driver, + field_mappings=self.CREATE_VOLUME_SNAPSHOT_FORM_FIELDS) + + @tables.bind_row_action('extend') + def extend_volume(self, extend_button, row): + extend_button.click() + self.wait_till_spinner_disappears() + return forms.FormRegion(self.driver, + field_mappings=self.EXTEND_VOLUME_FORM_FIELDS) + + @tables.bind_row_action('launch_volume_ng') + def launch_as_instance(self, launch_volume_button, row): + launch_volume_button.click() + self.wait_till_spinner_disappears() + return instancespage.LaunchInstanceForm(self.driver) + + @tables.bind_row_action('upload_to_image') + def upload_to_image(self, upload_button, row): + upload_button.click() + self.wait_till_spinner_disappears() + return forms.FormRegion(self.driver, + field_mappings=self.UPLOAD_VOLUME_FORM_FIELDS) + + @tables.bind_row_action('attachments') + def manage_attachments(self, manage_attachments, row): + manage_attachments.click() + self.wait_till_spinner_disappears() + return VolumeAttachForm(self.driver) + + +class VolumesPage(basepage.BasePage): + PARTIAL_URL = 'project/volumes' + VOLUMES_TABLE_NAME_COLUMN = 'Name' + VOLUMES_TABLE_STATUS_COLUMN = 'Status' + + def _get_row_with_volume_name(self, name): + return self.volumes_table.get_row( + self.VOLUMES_TABLE_NAME_COLUMN, name) + + def _get_rows_with_volumes_names(self, names): + return [self.volumes_table.get_row(self.VOLUMES_TABLE_NAME_COLUMN, n) + for n in names] + + @property + def volumes_table(self): + return VolumesTable(self.driver) + + def create_volume(self, volume_name, description=None, + volume_source_type=None, source_name=None, + type=None, volume_size=None, availability_zone=None, + fail_ok=False): + volume_form = self.volumes_table.create_volume() + volume_form.name.text = volume_name + if description is not None: + volume_form.description.text = description + if volume_source_type is not None: + volume_form.volume_source_type.text = volume_source_type + if volume_source_type == 'Image': + volume_form.image_source.text = source_name + if type is not None: + volume_form.type.text = type + if volume_source_type == 'Volume': + volume_form.volume_source.text = source_name + if volume_size is not None: + volume_form.size.value = volume_size + if availability_zone is not None: + volume_form.availability_zone.text = availability_zone + volume_form.submit() + if not self.find_message_and_dismiss(messages.INFO): + found_err = self.find_message_and_dismiss(messages.ERROR) + if fail_ok and found_err: + err_msg = "Failed to create volume {}".format(volume_name) + LOG.info(err_msg) + return 1, err_msg + else: + raise exceptions.HorizonError("No info message found after " + "creating volume {}".format(volume_name)) + succ_msg = "Volume {} is successfully created.".format(volume_name) + LOG.info(succ_msg) + return 0, succ_msg + + def delete_volume(self, name, fail_ok=False): + row = self._get_row_with_volume_name(name) + row.mark() + confirm_delete_volumes_form = self.volumes_table.delete_volume() + confirm_delete_volumes_form.submit() + if not self.find_message_and_dismiss(messages.INFO): + found_err = self.find_message_and_dismiss(messages.ERROR) + if fail_ok and found_err: + err_msg = "Failed to delete volume {}".format(name) + LOG.info(err_msg) + return 1, err_msg + else: + raise exceptions.HorizonError("No success message found after " + "deleting volume {}".format(name)) + succ_msg = "Volume {} is successfully deleted.".format(name) + LOG.info(succ_msg) + return 0, succ_msg + + def delete_volume_by_row(self, name): + row = self._get_row_with_volume_name(name) + confirm_delete_volumes_form = self.volumes_table.delete_volume_by_row(row) + confirm_delete_volumes_form.submit() + + def delete_volumes(self, volumes_names): + for volume_name in volumes_names: + self._get_row_with_volume_name(volume_name).mark() + confirm_delete_volumes_form = self.volumes_table.delete_volume() + confirm_delete_volumes_form.submit() + + def edit_volume(self, name, new_name=None, description=None, bootable=None, + fail_ok=False): + row = self._get_row_with_volume_name(name) + volume_edit_form = self.volumes_table.edit_volume(row) + if new_name is not None: + volume_edit_form.name.text = new_name + if description is not None: + volume_edit_form.description.text = description + if bootable is True: + volume_edit_form.bootable.mark() + if bootable is False: + volume_edit_form.bootable.unmark() + volume_edit_form.submit() + if not self.find_message_and_dismiss(messages.INFO): + found_err = self.find_message_and_dismiss(messages.ERROR) + if fail_ok and found_err: + err_msg = "Failed to edit volume {}".format(name) + LOG.info(err_msg) + return 1, err_msg + else: + raise exceptions.HorizonError("No info message found after " + "editing volume {}".format(name)) + succ_msg = "Volume {} is successfully edited.".format(name) + LOG.info(succ_msg) + return 0, succ_msg + + def is_volume_present(self, name): + return bool(self._get_row_with_volume_name(name)) + + def is_volume_status(self, name, status, timeout=VolumeTimeout.STATUS_CHANGE): + def cell_getter(): + row = self._get_row_with_volume_name(name) + return row and row.cells[self.VOLUMES_TABLE_STATUS_COLUMN] + return bool(self.volumes_table.wait_cell_status(cell_getter, status, timeout=timeout)) + + def is_volume_deleted(self, name): + return self.volumes_table.is_row_deleted( + lambda: self._get_row_with_volume_name(name)) + + def are_volumes_deleted(self, volumes_names): + return self.volumes_table.are_rows_deleted( + lambda: self._get_rows_with_volumes_names(volumes_names)) + + def create_volume_snapshot(self, volume_name, snapshot_name, description=None): + from utils.horizon.pages.project.volumes.shotspage import VolumesnapshotsPage + row = self._get_row_with_volume_name(volume_name) + snapshot_form = self.volumes_table.create_snapshot(row) + snapshot_form.name.text = snapshot_name + if description is not None: + snapshot_form.description.text = description + snapshot_form.submit() + return VolumesnapshotsPage(self.driver, port=self.port) + + def extend_volume(self, name, new_size, fail_ok=False): + row = self._get_row_with_volume_name(name) + extend_volume_form = self.volumes_table.extend_volume(row) + extend_volume_form.new_size.value = new_size + extend_volume_form.submit() + if not self.find_message_and_dismiss(messages.INFO): + found_err = self.find_message_and_dismiss(messages.ERROR) + if fail_ok and found_err: + err_msg = "Failed to extend volume {}".format(name) + LOG.info(err_msg) + return 1, err_msg + else: + raise exceptions.HorizonError("No info message found after " + "extending volume {}".format(name)) + succ_msg = "Volume {} is successfully extended.".format(name) + LOG.info(succ_msg) + return 0, succ_msg + + def upload_to_image(self, volume_name, image_name, disk_format=None): + row = self._get_row_with_volume_name(volume_name) + upload_volume_form = self.volumes_table.upload_to_image(row) + upload_volume_form.image_name.text = image_name + if disk_format is not None: + upload_volume_form.disk_format.value = disk_format + upload_volume_form.submit() + + def change_volume_type(self, volume_name, type=None, migration_policy=None): + row = self._get_row_with_volume_name(volume_name) + change_volume_type_form = self.volumes_table.change_volume_type(row) + if type is not None: + change_volume_type_form.type.text = type + if migration_policy is not None: + change_volume_type_form.migration_policy.text = migration_policy + change_volume_type_form.submit() + + def launch_as_instance(self, name, instance_name, availability_zone=None, count=None, + boot_source_type=None, create_new_volume=None, + delete_volume_on_instance_delete=None, volume_size=None, + source_name=None, flavor_name=None, network_names=None): + row = self._get_row_with_volume_name(name) + instance_form = self.volumes_table.launch_as_instance(row) + instance_form.fields['name'].text = instance_name + if availability_zone is not None: + instance_form.fields['availability-zone'].text = availability_zone + if count is not None: + instance_form.fields['instance-count'].value = count + instance_form.switch_to(1) + if boot_source_type is not None: + instance_form.fields['boot-source-type'].text = boot_source_type + instance_form._init_tab_fields(1) + if create_new_volume is True: + instance_form.fields['Create New Volume'].click_yes() + if delete_volume_on_instance_delete is True: + instance_form.fields['Delete Volume on Instance Delete'].click_yes() + if delete_volume_on_instance_delete is False: + instance_form.fields['Delete Volume on Instance Delete'].click_no() + if create_new_volume is False: + instance_form.fields['Create New Volume'].click_no() + if volume_size is not None: + instance_form.fields['volume-size'].value = volume_size + if source_name is not None: + instance_form.addelement('Name', source_name) + instance_form.switch_to(2) + instance_form.addelement('Name', flavor_name) + instance_form.switch_to(3) + instance_form.addelements('Network', network_names) + instance_form.submit() + + def attach_volume_to_instance(self, volume, instance, fail_ok=False): + row = self._get_row_with_volume_name(volume) + attach_form = self.volumes_table.manage_attachments(row) + attach_form.attach_instance(instance) + if not self.find_message_and_dismiss(messages.INFO): + found_err = self.find_message_and_dismiss(messages.ERROR) + if fail_ok and found_err: + err_msg = "Failed to attach volume {}".format(volume) + LOG.info(err_msg) + return 1, err_msg + else: + raise exceptions.HorizonError("No info message found after " + "attaching volume {}".format(volume)) + succ_msg = "Volume {} is successfully attached.".format(volume) + LOG.info(succ_msg) + return 0, succ_msg + + def detach_volume_from_instance(self, volume, instance, fail_ok=False): + row = self._get_row_with_volume_name(volume) + attachment_form = self.volumes_table.manage_attachments(row) + detach_form = attachment_form.detach(volume, instance) + detach_form.submit() + if not self.find_message_and_dismiss(messages.SUCCESS): + found_err = self.find_message_and_dismiss(messages.ERROR) + if fail_ok and found_err: + err_msg = "Failed to detach volume {}".format(volume) + LOG.info(err_msg) + return 1, err_msg + else: + raise exceptions.HorizonError("No info message found after " + "detaching volume {}".format(volume)) + succ_msg = "Volume {} is successfully detached.".format(volume) + LOG.info(succ_msg) + return 0, succ_msg + + def get_volume_info(self, volume_name, header): + row = self._get_row_with_volume_name(volume_name) + return row.cells[header].text + + +class VolumeAttachForm(forms.BaseFormRegion): + _attach_to_instance_selector = (By.CSS_SELECTOR, 'div > .themable-select') + _attachments_table_selector = (By.CSS_SELECTOR, 'table[id="attachments"]') + _detach_template = 'tr[data-display="Volume {0} on instance {1}"] button' + + @property + def attachments_table(self): + return self._get_element(*self._attachments_table_selector) + + @property + def instance_selector(self): + src_elem = self._get_element(*self._attach_to_instance_selector) + return forms.ThemableSelectFormFieldRegion( + self.driver, src_elem=src_elem, + strict_options_match=False) + + def detach(self, volume, instance): + detach_button = self.attachments_table.find_element( + By.CSS_SELECTOR, self._detach_template.format(volume, instance)) + detach_button.click() + sleep(2) + return forms.BaseFormRegion(self.driver) + + def attach_instance(self, instance_name): + self.instance_selector.text = instance_name + self.submit() diff --git a/automated-pytest-suite/utils/horizon/pages/settings/__init__.py b/automated-pytest-suite/utils/horizon/pages/settings/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/automated-pytest-suite/utils/horizon/pages/settings/changepasswordpage.py b/automated-pytest-suite/utils/horizon/pages/settings/changepasswordpage.py new file mode 100644 index 0000000..8b43d1f --- /dev/null +++ b/automated-pytest-suite/utils/horizon/pages/settings/changepasswordpage.py @@ -0,0 +1,53 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import time + +from selenium.webdriver.common import by + +from utils.horizon.pages import basepage +from utils.horizon.regions import forms +from consts.auth import Tenant + + +class ChangepasswordPage(basepage.BasePage): + PARTIAL_URL = 'settings/password' + + _password_form_locator = (by.By.ID, 'change_password_modal') + + CHANGE_PASSWORD_FORM_FIELDS = ("current_password", "new_password", + "confirm_password") + + @property + def password_form(self): + src_elem = self._get_element(*self._password_form_locator) + return forms.FormRegion( + self.driver, src_elem=src_elem, + field_mappings=self.CHANGE_PASSWORD_FORM_FIELDS) + + def change_password(self, current, new): + self.password_form.current_password.text = current + self.password_form.new_password.text = new + self.password_form.confirm_password.text = new + self.password_form.submit() + # NOTE(tsufiev): try to apply the same fix as Tempest did for the + # issue of Keystone Fernet tokens lacking sub-second precision + # (in which case it's possible to log in the same second that + # token was revoked due to password change), see bug 1473567 + time.sleep(1) + + def reset_to_default_password(self, current): + password = Tenant.get('admin_platform')['password'] + if self.topbar.user.text == 'admin': + return self.change_password(current, password) + else: + return self.change_password(current, password) diff --git a/automated-pytest-suite/utils/horizon/pages/settings/usersettingspage.py b/automated-pytest-suite/utils/horizon/pages/settings/usersettingspage.py new file mode 100644 index 0000000..24ffd24 --- /dev/null +++ b/automated-pytest-suite/utils/horizon/pages/settings/usersettingspage.py @@ -0,0 +1,71 @@ +from selenium.webdriver.common import by + +from utils.horizon.pages import basepage +from utils.horizon.pages.settings import changepasswordpage +from utils.horizon.regions import forms + + +class UsersettingsPage(basepage.BasePage): + PARTIAL_URL = 'settings' + DEFAULT_LANGUAGE = "en" + DEFAULT_TIMEZONE = "UTC" + DEFAULT_PAGESIZE = "20" + DEFAULT_LOGLINES = "35" + DEFAULT_SETTINGS = { + "language": DEFAULT_LANGUAGE, + "timezone": DEFAULT_TIMEZONE, + "pagesize": DEFAULT_PAGESIZE, + "loglines": DEFAULT_LOGLINES + } + + SETTINGS_FORM_FIELDS = ( + "language", "timezone", "pagesize", "instance_log_length") + + _settings_form_locator = (by.By.ID, 'user_settings_modal') + _change_password_tab_locator = (by.By.CSS_SELECTOR, + 'a[href*="/settings/password/"]') + + def __init__(self, driver, port=None): + super(UsersettingsPage, self).__init__(driver, port=port) + self._page_title = "User Settings" + + @property + def settings_form(self): + src_elem = self._get_element(*self._settings_form_locator) + return forms.FormRegion( + self.driver, src_elem=src_elem, + field_mappings=self.SETTINGS_FORM_FIELDS) + + @property + def changepassword(self): + return changepasswordpage.ChangepasswordPage(self.driver) + + @property + def change_password_tab(self): + return self._get_element(*self._change_password_tab_locator) + + def change_language(self, lang=DEFAULT_LANGUAGE): + self.settings_form.language.value = lang + self.settings_form.submit() + + def change_timezone(self, timezone=DEFAULT_TIMEZONE): + self.settings_form.timezone.value = timezone + self.settings_form.submit() + + def change_pagesize(self, size=DEFAULT_PAGESIZE): + self.settings_form.pagesize.value = size + self.settings_form.submit() + + def change_loglines(self, lines=DEFAULT_LOGLINES): + self.settings_form.instance_log_length.value = lines + self.settings_form.submit() + + def return_to_default_settings(self): + self.change_language() + self.change_timezone() + self.change_pagesize() + self.change_loglines() + + def go_to_change_password_page(self): + self.change_password_tab.click() + return changepasswordpage.ChangepasswordPage(self.driver) diff --git a/automated-pytest-suite/utils/horizon/regions/__init__.py b/automated-pytest-suite/utils/horizon/regions/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/automated-pytest-suite/utils/horizon/regions/bars.py b/automated-pytest-suite/utils/horizon/regions/bars.py new file mode 100644 index 0000000..cb3d5c1 --- /dev/null +++ b/automated-pytest-suite/utils/horizon/regions/bars.py @@ -0,0 +1,52 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from selenium.webdriver.common import by + +from utils.horizon.regions import baseregion +from utils.horizon.regions import menus + + +class TopBarRegion(baseregion.BaseRegion): + _user_dropdown_menu_locator = (by.By.XPATH, + '//li[@class="dropdown user-menu"]') + _openstack_brand_locator = (by.By.CSS_SELECTOR, 'a[href*="/home/"]') + + _user_dropdown_project_locator = ( + by.By.CSS_SELECTOR, '.nav.navbar-nav') + _header_locator = (by.By.CSS_SELECTOR, 'nav.navbar-fixed-top') + + @property + def user(self): + return self._get_element(*self._user_dropdown_menu_locator) + + @property + def brand(self): + return self._get_element(*self._openstack_brand_locator) + + @property + def header(self): + return self._get_element(*self._header_locator) + + @property + def user_dropdown_menu(self): + src_elem = self._get_element(*self._user_dropdown_menu_locator) + return menus.UserDropDownMenuRegion(self.driver, src_elem) + + @property + def is_logged_in(self): + return self._is_element_visible(*self._user_dropdown_menu_locator) + + @property + def user_dropdown_project(self): + src_elem = self._get_element(*self._user_dropdown_project_locator) + return menus.ProjectDropDownRegion(self.driver, src_elem) diff --git a/automated-pytest-suite/utils/horizon/regions/baseregion.py b/automated-pytest-suite/utils/horizon/regions/baseregion.py new file mode 100644 index 0000000..1828c4f --- /dev/null +++ b/automated-pytest-suite/utils/horizon/regions/baseregion.py @@ -0,0 +1,68 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +from utils.horizon import basewebobject +from selenium.common.exceptions import StaleElementReferenceException +from time import sleep + + +class BaseRegion(basewebobject.BaseWebObject): + """Base class for region module + + * there is necessity to override some basic methods for obtaining elements + as in content of regions it is required to do relative searches + + * self.driver cannot be easily replaced with self.src_elem because that + would result in functionality loss, self.driver is WebDriver and + src_elem is WebElement its usage is different. + + * this does not mean that self.src_elem cannot be self.driver + """ + + _default_src_locator = None + + # private methods + def __init__(self, driver, src_elem=None): + super(BaseRegion, self).__init__(driver) + if self._default_src_locator: + root = src_elem or driver + src_elem = root.find_element(*self._default_src_locator) + + self.src_elem = src_elem or driver + + # variable for storing names of dynamic properties and + # associated 'getters' - meaning method that are supplying + # regions or web elements + self._dynamic_properties = {} + + def __getattr__(self, name): + # It is not possible to create property bounded just to object + # and not class at runtime, therefore it is necessary to + # override __getattr__ and make fake 'properties' by storing them in + # the protected attribute _dynamic_attributes and returning result + # of the method associated with the specified attribute. + + # This way the feeling of having regions accessed as 'properties' + # is created, which is one of the requirement of page object pattern. + try: + return self._dynamic_properties[name] + except KeyError: + msg = "'{0}' object has no attribute '{1}'" + raise AttributeError(msg.format(type(self).__name__, name)) + + def _get_element(self, *locator): + return self.src_elem.find_element(*locator) + + def _get_elements(self, *locator): + try: + return self.src_elem.find_elements(*locator) + except StaleElementReferenceException: + raise diff --git a/automated-pytest-suite/utils/horizon/regions/exceptions.py b/automated-pytest-suite/utils/horizon/regions/exceptions.py new file mode 100644 index 0000000..4951f9d --- /dev/null +++ b/automated-pytest-suite/utils/horizon/regions/exceptions.py @@ -0,0 +1,22 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +class BaseRegionException(Exception): + """Base exception class for region module.""" + pass + + +class UnknownFormFieldTypeException(BaseRegionException): + + def __str__(self): + return "No FormField class matched the scope of web content." diff --git a/automated-pytest-suite/utils/horizon/regions/forms.py b/automated-pytest-suite/utils/horizon/regions/forms.py new file mode 100644 index 0000000..1a27826 --- /dev/null +++ b/automated-pytest-suite/utils/horizon/regions/forms.py @@ -0,0 +1,588 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import collections +from selenium.common import exceptions +from selenium.webdriver.common import by +import selenium.webdriver.support.ui as Support +import six + +from utils.horizon.regions import baseregion +from utils.horizon.regions import menus +from time import sleep + + +class FieldFactory(baseregion.BaseRegion): + """Factory for creating form field objects.""" + + FORM_FIELDS_TYPES = set() + _element_locator_str_prefix = 'div.form-group' + + def __init__(self, driver, src_elem=None): + super(FieldFactory, self).__init__(driver, src_elem) + + def fields(self): + for field_cls in self.FORM_FIELDS_TYPES: + locator = (by.By.CSS_SELECTOR, + '%s %s' % (self._element_locator_str_prefix, + field_cls._element_locator_str_suffix)) + elements = super(FieldFactory, self)._get_elements(*locator) + for element in elements: + yield field_cls(self.driver, src_elem=element) + + @classmethod + def register_field_cls(cls, field_class, base_classes=None): + """Register new field class. + + Add new field class and remove all base classes from the set of + registered classes as they should not be in. + """ + cls.FORM_FIELDS_TYPES.add(field_class) + cls.FORM_FIELDS_TYPES -= set(base_classes) + + +class MetaBaseFormFieldRegion(type): + """Register form field class in FieldFactory.""" + + def __init__(cls, name, bases, dct): + FieldFactory.register_field_cls(cls, bases) + super(MetaBaseFormFieldRegion, cls).__init__(name, bases, dct) + + +@six.add_metaclass(MetaBaseFormFieldRegion) +class BaseFormFieldRegion(baseregion.BaseRegion): + """Base class for form fields classes.""" + + _label_locator = None + _element_locator = None + + @property + def label(self): + return self._get_element(*self._label_locator) + + @property + def element(self): + return self.src_elem + + @property + def name(self): + return self.element.get_attribute('name') \ + or self.element.get_attribute('id') + # some region in create instance form don't have attribute `name`, use `id` instead + + def is_required(self): + classes = self.driver.get_attribute('class') + return 'required' in classes + + def is_displayed(self): + return self.element.is_displayed() + + +class CheckBoxMixin(object): + + @property + def label(self): + id_attribute = self.element.get_attribute('id') + return self.element.find_element( + by.By.XPATH, '../..//label[@for="{}"]'.format(id_attribute)) + + def is_marked(self): + return self.element.is_selected() + + def mark(self): + if not self.is_marked(): + self.label.click() + + def unmark(self): + if self.is_marked(): + self.label.click() + + +class CheckBoxFormFieldRegion(CheckBoxMixin, BaseFormFieldRegion): + """Checkbox field.""" + + _element_locator_str_suffix = 'input[type=checkbox]' + + +class RadioCheckBoxFormFieldRegion(CheckBoxMixin, BaseFormFieldRegion): + """Checkbox field.""" + + _element_locator_str_suffix = 'input[type=radio]' + + +class ChooseFileFormFieldRegion(BaseFormFieldRegion): + """Choose file field.""" + + _element_locator_str_suffix = 'input[type=file]' + + def choose(self, path): + self.element.send_keys(path) + + +class BaseTextFormFieldRegion(BaseFormFieldRegion): + + _element_locator = None + + @property + def text(self): + return self.element.text + + @text.setter + def text(self, text): + self._fill_field_element(text, self.element) + + +class TextInputFormFieldRegion(BaseTextFormFieldRegion): + """Text input box.""" + + _element_locator_str_suffix = \ + 'input[type=text], input[type=None]' # 'div > input[type=text], div > input[type=None]' + + +class PasswordInputFormFieldRegion(BaseTextFormFieldRegion): + """Password text input box.""" + + _element_locator_str_suffix = 'input[type=password]' + + +class EmailInputFormFieldRegion(BaseTextFormFieldRegion): + """Email text input box.""" + + _element_locator_str_suffix = 'input[type=email]' + + +class TextAreaFormFieldRegion(BaseTextFormFieldRegion): + """Multi-line text input box.""" + + _element_locator_str_suffix = 'textarea' + + +class IntegerFormFieldRegion(BaseFormFieldRegion): + """Integer input box.""" + + _element_locator_str_suffix = 'input[type=number]' + + @property + def value(self): + return self.element.get_attribute("value") + + @value.setter + def value(self, value): + self._fill_field_element(value, self.element) + + +class SelectFormFieldRegion(BaseFormFieldRegion): + """Select box field.""" + + _element_locator_str_suffix = 'select.form-control' + + def is_displayed(self): + return self.element._el.is_displayed() + + @property + def element(self): + return Support.Select(self.src_elem) + + @property + def values(self): + results = [] + for option in self.element.all_selected_options: + results.append(option.get_attribute('value')) + return results + + @property + def options(self): + results = collections.OrderedDict() + for option in self.element.options: + results[option.get_attribute('value')] = option.text + return results + + @property + def name(self): + return self.element._el.get_attribute('name') \ + or self.element._el.get_attribute('id') + + @property + def text(self): + return self.element.first_selected_option.text + + @text.setter + def text(self, text): + self.element.select_by_visible_text(text) + + @property + def value(self): + return self.element.first_selected_option.get_attribute('value') + + @value.setter + def value(self, value): + self.element.select_by_value(value) + + +class ThemableSelectFormFieldRegion(BaseFormFieldRegion): + """Select box field.""" + + _element_locator_str_suffix = 'div > .themable-select' + _raw_select_locator = (by.By.CSS_SELECTOR, 'select') + _selected_label_locator = (by.By.CSS_SELECTOR, '.dropdown-title') + _dropdown_menu_locator = (by.By.CSS_SELECTOR, 'ul.dropdown-menu > li > a') + + def __init__(self, driver, strict_options_match=False, **kwargs): + super(ThemableSelectFormFieldRegion, self).__init__( + driver, **kwargs) + self.strict_options_match = strict_options_match + + @property + def hidden_element(self): + elem = self._get_element(*self._raw_select_locator) + return SelectFormFieldRegion(self.driver, src_elem=elem) + + @property + def name(self): + return self.hidden_element.name + + @property + def text(self): + return self._get_element(*self._selected_label_locator).text.strip() + + @property + def value(self): + return self.hidden_element.value + + @property + def options(self): + return self._get_elements(*self._dropdown_menu_locator) + + @text.setter + def text(self, text): + if text != self.text: + self.src_elem.click() + for option in self.options: + if self.strict_options_match: + match = text == option.text.strip() + else: + match = option.text.startswith(text) + if match: + option.click() + return + raise ValueError('Widget "%s" does have an option with text "%s"' + % (self.name, text)) + + @value.setter + def value(self, value): + if value != self.value: + self.src_elem.click() + for option in self.options: + if value == option.get_attribute('data-select-value'): + option.click() + return + raise ValueError('Widget "%s" does have an option with value "%s"' + % (self.name, value)) + + +class YesOrNoFormFieldRegion(BaseFormFieldRegion): + _element_locator_str_suffix = 'div.btn-group' + _buttons_locator = (by.By.CSS_SELECTOR, 'label') + _name_locator = (by.By.XPATH, '..//label/span') + + def click_yes(self): + button = self._get_elements(*self._buttons_locator)[0] + button.click() + + def click_no(self): + button = self._get_elements(*self._buttons_locator)[1] + button.click() + + @property + def name(self): + name_element = self._get_element(*self._name_locator) + return name_element.text + + +class BaseFormRegion(baseregion.BaseRegion): + """Base class for forms.""" + + _submit_locator = (by.By.CSS_SELECTOR, '*.btn.btn-primary') + _submit_danger_locator = (by.By.CSS_SELECTOR, '*.btn.btn-danger') + _cancel_locator = (by.By.CSS_SELECTOR, '*.btn.cancel') + _default_form_locator = (by.By.CSS_SELECTOR, 'div.modal-dialog') + + def __init__(self, driver, src_elem=None): + # In most cases forms can be located through _default_form_locator, + # so specifying source element can be skipped. + if src_elem is None: + # fake self.src_elem must be set up in order self._get_element work + self.src_elem = driver + # bind the topmost modal form in a modal stack + src_elem = self._get_elements(*self._default_form_locator)[-1] + super(BaseFormRegion, self).__init__(driver, src_elem) + + @property + def _submit_element(self): + try: + submit_element = self._get_element(*self._submit_locator) + except exceptions.NoSuchElementException: + submit_element = self._get_element(*self._submit_danger_locator) + return submit_element + + def submit(self): + self._submit_element.click() + self.wait_till_spinner_disappears() + + @property + def _cancel_element(self): + return self._get_element(*self._cancel_locator) + + def cancel(self): + self._cancel_element.click() + + +class FormRegion(BaseFormRegion): + """Standard form.""" + FIELDS = None + _header_locator = (by.By.CSS_SELECTOR, 'div.modal-header > h3') + _side_info_locator = (by.By.CSS_SELECTOR, 'div.right') + _fields_locator = (by.By.CSS_SELECTOR, 'fieldset') + + # private methods + def __init__(self, driver, src_elem=None, field_mappings=None): + super(FormRegion, self).__init__(driver, src_elem) + self.field_mappings = self._prepare_mappings(field_mappings) + self._init_form_fields() + + def _prepare_mappings(self, field_mappings): + if isinstance(field_mappings, tuple): + return {item: item for item in field_mappings} + else: + return field_mappings + + # protected methods + def _init_form_fields(self): + self.fields_src_elem = self._get_element(*self._fields_locator) + self.FIELDS = self._get_form_fields() + for accessor_name, accessor_expr in self.field_mappings.items(): + if isinstance(accessor_expr, six.string_types): + try: + self._dynamic_properties[accessor_name] = self.FIELDS[accessor_expr] + except: + self._dynamic_properties[accessor_name] = None + else: # it is a class + self._dynamic_properties[accessor_name] = accessor_expr( + self.driver) + + def _get_form_fields(self): + factory = FieldFactory(self.driver, self.fields_src_elem) + try: + self._turn_off_implicit_wait() + form_fields = {} + for field in factory.fields(): + if 'ThemableSelectFormFieldRegion' in str(field): + # ThemableSelectFormFieldRegion is a special SelectFormFieldRegion + form_fields[field.name] = field + elif not field.name in form_fields: + form_fields[field.name] = field + return form_fields + finally: + self._turn_on_implicit_wait() + + def set_field_values(self, data): + """Set fields values + + data - {field_name: field_value, field_name: field_value ...} + """ + for field_name in data: + field = getattr(self, field_name, None) + # Field form does not exist + if field is None: + raise AttributeError("Unknown form field name.") + value = data[field_name] + # if None - default value is left in field + if value is not None: + # all text fields + if hasattr(field, "text"): + field.text = value + # file upload field + elif hasattr(field, "path"): + field.path = value + # integers fields + elif hasattr(field, "value"): + field.value = value + + # properties + @property + def header(self): + """Form header.""" + return self._get_element(*self._header_locator) + + @property + def sideinfo(self): + """Right part of form, usually contains description.""" + return self._get_element(*self._side_info_locator) + + @property + def fields(self): + """List of all fields that form contains.""" + return self._get_form_fields() + + +class TabbedFormRegion(FormRegion): + """Forms that are divided with tabs. + + As example is taken form under the + Project/Network/Networks/Create Network, on initialization form needs + to have form field names divided into tuples, that represents the tabs + and the fields located under them. + + Usage: + + form_field_names = (("network_name", "admin_state"), + ("create_subnet", "subnet_name", "network_address", + "ip_version", "gateway_ip", "disable_gateway"), + ("enable_dhcp", "allocation_pools", "dns_name_servers", + "host_routes")) + form = TabbedFormRegion(self.driver, None, form_field_names) + form.network_name.text = "test_network_name" + """ + _submit_locator = (by.By.CSS_SELECTOR, '*.btn.btn-primary[type=submit]') + _side_info_locator = (by.By.CSS_SELECTOR, "td.help_text") + + def __init__(self, driver, field_mappings=None, default_tab=0): + self.current_tab = default_tab + super(TabbedFormRegion, self).__init__( + driver, field_mappings=field_mappings) + + def _prepare_mappings(self, field_mappings): + return [super(TabbedFormRegion, self)._prepare_mappings(tab_mappings) + for tab_mappings in field_mappings] + + def _init_form_fields(self): + self.switch_to(self.current_tab) + + def _init_tab_fields(self, tab_index): + self.src_elem = self.driver + fieldsets = self._get_elements(*self._fields_locator) + self.fields_src_elem = fieldsets[tab_index] + # self.src_elem = fieldsets[tab_index] + self.FIELDS = self._get_form_fields() + current_tab_mappings = self.field_mappings[tab_index] + for accessor_name, accessor_expr in current_tab_mappings.items(): + if isinstance(accessor_expr, six.string_types): + self._dynamic_properties[accessor_name] = self.FIELDS[accessor_expr] + else: # it is a class + self._dynamic_properties[accessor_name] = accessor_expr( + self.driver) + + def switch_to(self, tab_index=0): + self.tabs.switch_to(index=tab_index) + self._init_tab_fields(tab_index) + + @property + def tabs(self): + return menus.TabbedMenuRegion(self.driver, + src_elem=self.src_elem) + + +class DateFormRegion(BaseFormRegion): + """Form that queries data to table that is regularly below the form. + + A typical example is located on Project/Compute/Overview page. + """ + + _from_field_locator = (by.By.CSS_SELECTOR, 'input#id_start') + _to_field_locator = (by.By.CSS_SELECTOR, 'input#id_end') + + @property + def from_date(self): + return self._get_element(*self._from_field_locator) + + @property + def to_date(self): + return self._get_element(*self._to_field_locator) + + def query(self, start, end): + self._set_from_field(start) + self._set_to_field(end) + self.submit() + + def _set_from_field(self, value): + self._fill_field_element(value, self.from_date) + + def _set_to_field(self, value): + self._fill_field_element(value, self.to_date) + + +class MetadataFormRegion(BaseFormRegion): + + _input_fields = (by.By.CSS_SELECTOR, 'div.input-group') + _custom_input_field = (by.By.XPATH, "//input[@name='customItem']") + _custom_input_button = (by.By.CSS_SELECTOR, 'span.input-group-btn > .btn') + _submit_locator = (by.By.CSS_SELECTOR, '.modal-footer > .btn.btn-primary') + _cancel_locator = (by.By.CSS_SELECTOR, '.modal-footer > .btn.btn-default') + + def _form_getter(self): + return self.driver.find_element(*self._default_form_locator) + + @property + def custom_field_value(self): + return self._get_element(*self._custom_input_field) + + @property + def add_button(self): + return self._get_element(*self._custom_input_button) + + def add_custom_field(self, field_name, field_value): + self.custom_field_value.send_keys(field_name) + sleep(5) + self.add_button.click() + for div in self._get_elements(*self._input_fields): + if div.text in field_name: + field = div.find_element(by.By.CSS_SELECTOR, 'input') + if not hasattr(self, field_name): + self._dynamic_properties[field_name] = field + self.set_field_value(field_name, field_value) + + def set_field_value(self, field_name, field_value): + if hasattr(self, field_name): + field = getattr(self, field_name) + field.send_keys(field_value) + else: + raise AttributeError("Unknown form field '{}'.".format(field_name)) + + def wait_till_spinner_disappears(self): + # No spinner is invoked after the 'Save' button click + # Will wait till the form itself disappears + try: + self.wait_till_element_disappears(self._form_getter) + except exceptions.StaleElementReferenceException: + # The form might be absent already by the time the first check + # occurs. So just suppress the exception here. + pass + + +class ItemTextDescription(baseregion.BaseRegion): + + _separator_locator = (by.By.CSS_SELECTOR, 'dl.dl-horizontal') + _key_locator = (by.By.CSS_SELECTOR, 'dt') + _value_locator = (by.By.CSS_SELECTOR, 'dd') + + def __init__(self, driver, src=None): + super(ItemTextDescription, self).__init__(driver, src) + + def get_content(self): + keys = [] + values = [] + for section in self._get_elements(*self._separator_locator): + keys.extend([x.text for x in + section.find_elements(*self._key_locator)]) + values.extend([x.text for x in + section.find_elements(*self._value_locator)]) + return dict(zip(keys, values)) diff --git a/automated-pytest-suite/utils/horizon/regions/menus.py b/automated-pytest-suite/utils/horizon/regions/menus.py new file mode 100644 index 0000000..e1bef8a --- /dev/null +++ b/automated-pytest-suite/utils/horizon/regions/menus.py @@ -0,0 +1,236 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +from selenium.common import exceptions +from selenium.webdriver.common import by +from utils.horizon.regions import baseregion +import time + + +class DropDownMenuRegion(baseregion.BaseRegion): + """Drop down menu region.""" + + _menu_container_locator = (by.By.CSS_SELECTOR, 'ul.dropdown-menu') + _menu_items_locator = (by.By.CSS_SELECTOR, + 'ul.dropdown-menu > li > *') + _dropdown_locator = (by.By.CSS_SELECTOR, '.dropdown') + _active_cls = 'selenium-active' + + @property + def menu_items(self): + self.src_elem.click() + menu_items = self._get_elements(*self._menu_items_locator) + return menu_items + + +class UserDropDownMenuRegion(DropDownMenuRegion): + """Drop down menu located in the right side of the topbar. + + This menu contains links to settings and help. + """ + _settings_link_locator = (by.By.CSS_SELECTOR, + 'a[href*="/settings/"]') + _help_link_locator = (by.By.CSS_SELECTOR, + 'ul#editor_list li:nth-of-type(2) > a') + _logout_link_locator = (by.By.CSS_SELECTOR, + 'a[href*="/auth/logout/"]') + _rc_v3_link_locator = (by.By.CSS_SELECTOR, + 'a[href*="api_access/openrc/"]') + + def _theme_picker_locator(self, theme_name): + return (by.By.CSS_SELECTOR, + '.theme-picker-item[data-theme="%s"]' % theme_name) + + @property + def settings_link(self): + return self._get_element(*self._settings_link_locator) + + @property + def help_link(self): + return self._get_element(*self._help_link_locator) + + @property + def logout_link(self): + return self._get_element(*self._logout_link_locator) + + @property + def rc_v2_link(self): + return self._get_element(*self._rc_v2_link_locator) + + @property + def rc_v3_link(self): + return self._get_element(*self._rc_v3_link_locator) + + def click_on_settings(self): + self.src_elem.click() + self.settings_link.click() + + def click_on_help(self): + self.src_elem.click() + self.help_link.click() + + def click_on_rc_v2(self): + self.src_elem.click() + self.rc_v2_link.click() + + def click_on_rc_v3(self): + self.src_elem.click() + self.rc_v3_link.click() + + def choose_theme(self, theme_name): + self.open() + self.theme_picker_link(theme_name).click() + + def click_on_logout(self): + self.src_elem.click() + self.logout_link.click() + + +class TabbedMenuRegion(baseregion.BaseRegion): + + _tab_locator = (by.By.CSS_SELECTOR, 'a') + _default_src_locator = (by.By.XPATH, '//ul[@role="tablist"]') + + def switch_to(self, index=0): + self._get_elements(*self._tab_locator)[index].click() + + +class InstancesTabbedMenuRegion(TabbedMenuRegion): + + _tab_locator = (by.By.XPATH, '//li[starts-with(@class,"nav-item ng-scope")]') + _default_src_locator = (by.By.XPATH, '//ul[@class="nav nav-pills nav-stacked"]') + + +class ProjectDropDownRegion(DropDownMenuRegion): + _menu_items_locator = ( + by.By.CSS_SELECTOR, 'ul.context-selection li > a') + + def click_on_project(self, name): + for item in self.menu_items: + if item.text == name: + item.click() + break + else: + raise exceptions.NoSuchElementException( + "Not found element with text: %s" % name) + + +class MembershipMenuRegion(baseregion.BaseRegion): + _available_members_locator = ( + by.By.CSS_SELECTOR, 'ul.available_members > ul.btn-group') + + _allocated_members_locator = ( + by.By.CSS_SELECTOR, 'ul.members > ul.btn-group') + + _add_remove_member_sublocator = ( + by.By.CSS_SELECTOR, 'li > a[href="#add_remove"]') + + _member_name_sublocator = ( + by.By.CSS_SELECTOR, 'li.member > span.display_name') + + _member_roles_widget_sublocator = (by.By.CSS_SELECTOR, 'li.role_options') + + _member_roles_widget_open_subsublocator = (by.By.CSS_SELECTOR, 'a.btn') + + _member_roles_widget_roles_subsublocator = ( + by.By.CSS_SELECTOR, 'ul.role_dropdown > li') + + def _get_member_name(self, element): + return element.find_element(*self._member_name_sublocator).text + + @property + def available_members(self): + return {self._get_member_name(el): el for el in + self._get_elements(*self._available_members_locator)} + + @property + def allocated_members(self): + return {self._get_member_name(el): el for el in + self._get_elements(*self._allocated_members_locator)} + + def allocate_member(self, name, available_members=None): + # NOTE(tsufiev): available_members here (and allocated_members below) + # are meant to be used for performance optimization to reduce the + # amount of calls to selenium by reusing still valid element reference + if available_members is None: + available_members = self.available_members + + available_members[name].find_element( + *self._add_remove_member_sublocator).click() + + def deallocate_member(self, name, allocated_members=None): + if allocated_members is None: + allocated_members = self.allocated_members + + allocated_members[name].find_element( + *self._add_remove_member_sublocator).click() + + def _get_member_roles_widget(self, name, allocated_members=None): + if allocated_members is None: + allocated_members = self.allocated_members + + return allocated_members[name].find_element( + *self._member_roles_widget_sublocator) + + def _get_member_all_roles(self, name, allocated_members=None): + roles_widget = self._get_member_roles_widget(name, allocated_members) + return roles_widget.find_elements( + *self._member_roles_widget_roles_subsublocator) + + @staticmethod + def _is_role_selected(role): + return 'selected' in role.get_attribute('class').split() + + @staticmethod + def _get_hidden_text(role): + return role.get_attribute('textContent') + + def get_member_available_roles(self, name, allocated_members=None, + strip=True): + roles = self._get_member_all_roles(name, allocated_members) + return [(self._get_hidden_text(role).strip() if strip else role) + for role in roles if not self._is_role_selected(role)] + + def get_member_allocated_roles(self, name, allocated_members=None, + strip=True): + roles = self._get_member_all_roles(name, allocated_members) + return [(self._get_hidden_text(role).strip() if strip else role) + for role in roles if self._is_role_selected(role)] + + def open_member_roles_dropdown(self, name, allocated_members=None): + widget = self._get_member_roles_widget(name, allocated_members) + button = widget.find_element( + *self._member_roles_widget_open_subsublocator) + button.click() + + def _switch_member_roles(self, name, roles2toggle, method, + allocated_members=None): + self.open_member_roles_dropdown(name, allocated_members) + roles = method(name, allocated_members, False) + roles2toggle = set(roles2toggle) + for role in roles: + role_name = role.text.strip() + if role_name in roles2toggle: + role.click() + roles2toggle.remove(role_name) + if not roles2toggle: + break + + def allocate_member_roles(self, name, roles2add, allocated_members=None): + self._switch_member_roles( + name, roles2add, self.get_member_available_roles, + allocated_members=allocated_members) + + def deallocate_member_roles(self, name, roles2remove, + allocated_members=None): + self._switch_member_roles( + name, roles2remove, self.get_member_allocated_roles, + allocated_members=allocated_members) diff --git a/automated-pytest-suite/utils/horizon/regions/messages.py b/automated-pytest-suite/utils/horizon/regions/messages.py new file mode 100644 index 0000000..3d5df4e --- /dev/null +++ b/automated-pytest-suite/utils/horizon/regions/messages.py @@ -0,0 +1,45 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from selenium.common.exceptions import NoSuchElementException +from selenium.webdriver.common import by + +from utils.horizon.regions import baseregion + +ERROR = 'alert-danger' +INFO = 'alert-info' +SUCCESS = 'alert-success' + + +class MessageRegion(baseregion.BaseRegion): + _close_locator = (by.By.CSS_SELECTOR, 'a.close') + + def _msg_locator(self, level): + return (by.By.CSS_SELECTOR, 'div.alert.%s' % level) + + def __init__(self, driver, level=SUCCESS): + self._default_src_locator = self._msg_locator(level) + # NOTE(tsufiev): we cannot use self._turn_off_implicit_wait() at this + # point, because the instance is not initialized by ancestor's __init__ + driver.implicitly_wait(0.5) + try: + super(MessageRegion, self).__init__(driver) + except NoSuchElementException: + self.src_elem = None + finally: + self._turn_on_implicit_wait() + + def exists(self): + return self._is_element_displayed(self.src_elem) + + def close(self): + self._get_element(*self._close_locator).click() diff --git a/automated-pytest-suite/utils/horizon/regions/tables.py b/automated-pytest-suite/utils/horizon/regions/tables.py new file mode 100644 index 0000000..3b773e1 --- /dev/null +++ b/automated-pytest-suite/utils/horizon/regions/tables.py @@ -0,0 +1,368 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import functools + +from selenium.common import exceptions +from selenium.webdriver.common import by + +from utils.horizon.regions import baseregion +NORMAL_COLUMN_CLASS = 'normal_column' + + +class RowRegion(baseregion.BaseRegion): + """Classic table row.""" + + _add_locator = (by.By.XPATH, ".//action-list") + _cell_locator = (by.By.CSS_SELECTOR, 'td') + _row_checkbox_locator = ( + by.By.CSS_SELECTOR, + 'td .themable-checkbox [type="checkbox"] + label' + ) + + def __init__(self, driver, src_elem, column_names): + self.column_names = column_names + super(RowRegion, self).__init__(driver, src_elem) + + @property + def cells(self): + try: + elements = self._get_elements(*self._cell_locator) + except exceptions.StaleElementReferenceException: + raise + return {column_name: elements[i] + for i, column_name in enumerate(self.column_names)} + + def mark(self): + chck_box = self._get_element(*self._row_checkbox_locator) + chck_box.click() + + def add(self): + add_btn = self._get_element(*self._add_locator) + add_btn.click() + + +class TableRegion(baseregion.BaseRegion): + """Basic class representing table object.""" + name = None + + _heading_locator = (by.By.CSS_SELECTOR, 'h3.table_title') + _columns_names_locator = (by.By.CSS_SELECTOR, 'thead > tr > th') + _footer_locator = (by.By.CSS_SELECTOR, 'tfoot > tr > td > span') + _rows_locator = (by.By.CSS_SELECTOR, 'tbody > tr') + _empty_table_locator = (by.By.CSS_SELECTOR, 'tbody > tr.empty') + _search_field_locator = (by.By.CSS_SELECTOR, + 'div.table_search input.form-control') + _search_button_locator = (by.By.CSS_SELECTOR, + 'div.table_search > button') + _search_option_locator = (by.By.CSS_SELECTOR, + 'div.table_search > .themable-select') + marker_name = 'marker' + prev_marker_name = 'prev_marker' + + def _table_locator(self, table_name): + return by.By.CSS_SELECTOR, 'table#%s' % table_name + + @property + def _next_locator(self): + return by.By.CSS_SELECTOR, 'a[href^="?%s"]' % self.marker_name + + @property + def _prev_locator(self): + return by.By.CSS_SELECTOR, 'a[href^="?%s"]' % self.prev_marker_name + + def _search_menu_value_locator(self, value): + return (by.By.CSS_SELECTOR, + 'ul.dropdown-menu a[data-select-value="%s"]' % value) + + def __init__(self, driver, src_element=None): + if not src_element: + self._default_src_locator = self._table_locator(self.__class__.name) + super(TableRegion, self).__init__(driver) + else: + super(TableRegion, self).__init__(driver, src_elem=src_element) + + @property + def heading(self): + return self._get_element(*self._heading_locator) + + @property + def rows(self): + if self._is_element_present(*self._empty_table_locator): + return [] + else: + return self._get_rows() + + @property + def column_names(self): + names = [] + for element in self._get_elements(*self._columns_names_locator): + names.append(element.text) + return names + + @property + def footer(self): + return self._get_element(*self._footer_locator) + + def filter(self, value): + self._set_search_field(value) + self._click_search_btn() + + def set_filter_value(self, value): + search_menu = self._get_element(*self._search_option_locator) + search_menu.click() + item_locator = self._search_menu_value_locator(value) + search_menu.find_element(*item_locator).click() + + def get_row(self, column_name, text, exact_match=True): + """Get row that contains specified text in specified column. + + In case exact_match is set to True, text contained in row must equal + searched text, otherwise occurrence of searched text in the column + text will result in row match. + """ + def get_text(element): + # print(element.text) + # text = element.text('data-selenium') + return element.text + + for row in self.rows: + try: + cell = row.cells[column_name] + if exact_match and text == get_text(cell): + return row + if not exact_match and text in get_text(cell): + return row + # NOTE(tsufiev): if a row was deleted during iteration + except exceptions.StaleElementReferenceException: + pass + return None + + def _set_search_field(self, value): + srch_field = self._get_element(*self._search_field_locator) + srch_field.clear() + srch_field.send_keys(value) + + def _click_search_btn(self): + btn = self._get_element(*self._search_button_locator) + btn.click() + + def _get_rows(self, *args): + return [RowRegion(self.driver, elem, self.column_names) + for elem in self._get_elements(*self._rows_locator)] + + def _is_row_deleted(self, evaluator): + def predicate(driver): + if self._is_element_present(*self._empty_table_locator): + return True + with self.waits_disabled(): + return evaluator() + try: + self._wait_until(predicate) + except exceptions.TimeoutException: + return False + except IndexError: + return True + return True + + def is_row_deleted(self, row_getter): + return self._is_row_deleted( + lambda: not self._is_element_displayed(row_getter())) + + def are_rows_deleted(self, rows_getter): + return self._is_row_deleted( + lambda: all([not self._is_element_displayed(row) for row + in rows_getter()])) + + def wait_cell_status(self, cell_getter, statuses, timeout=None): + if not isinstance(statuses, (list, tuple)): + statuses = (statuses,) + try: + return self._wait_till_text_present_in_element(cell_getter, + statuses, timeout=timeout) + except: # exceptions.TimeoutException: + return False + + def is_next_link_available(self): + try: + self._turn_off_implicit_wait() + return self._is_element_visible(*self._next_locator) + finally: + self._turn_on_implicit_wait() + + def is_prev_link_available(self): + try: + self._turn_off_implicit_wait() + return self._is_element_visible(*self._prev_locator) + finally: + self._turn_on_implicit_wait() + + def turn_next_page(self): + if self.is_next_link_available(): + lnk = self._get_element(*self._next_locator) + lnk.click() + + def turn_prev_page(self): + if self.is_prev_link_available(): + lnk = self._get_element(*self._prev_locator) + lnk.click() + + def assert_definition(self, expected_table_definition, sorting=False): + """Checks that actual table is expected one. + + Items to compare: 'next' and 'prev' links, count of rows and names of + elements in list + :param expected_table_definition: expected values (dictionary) + :param sorting: boolean arg specifying whether to sort actual names + :return: + """ + names = [row.cells['name'].text for row in self.rows] + if sorting: + names.sort() + actual_table = {'Next': self.is_next_link_available(), + 'Prev': self.is_prev_link_available(), + 'Count': len(self.rows), + 'Names': names} + self.assertDictEqual(actual_table, expected_table_definition) + + +def bind_table_action(action_name, attribute_search='id'): + """Decorator to bind table region method to an actual table action button. + + Many table actions when started (by clicking a corresponding button + in UI) lead to some form showing up. To further interact with this form, + a Python/ Selenium wrapper needs to be created for it. It is very + convenient to return this newly created wrapper in the same method that + initiates clicking an actual table action button. Binding the method to a + button is performed behind the scenes in this decorator. + + .. param:: action_name + + Part of the action button id which is specific to action itself. It + is safe to use action `name` attribute from the dashboard tables.py + code. + """ + _actions_locator = (by.By.CSS_SELECTOR, 'div.table_actions > button,' + 'delete-image-selected > button,' + 'actions > action-list > button,' + 'div.table_actions > a') + + def decorator(method): + @functools.wraps(method) + def wrapper(table): + actions = table._get_elements(*_actions_locator) + action_element = None + for action in actions: + target_action_id = '%s__action_%s' % (table.name, action_name) + if action.get_attribute(attribute_search).endswith(action_name): + action_element = action + break + if action_element is None: + msg = "Could not bind method '%s' to action control '%s'" % ( + method.__name__, action_name) + raise ValueError(msg) + return method(table, action_element) + return wrapper + return decorator + + +def bind_row_action(action_name, attribute_search='id', secondary_locator_index=None): + """A decorator to bind table region method to an actual row action button. + + Many table actions when started (by clicking a corresponding button + in UI) lead to some form showing up. To further interact with this form, + a Python/ Selenium wrapper needs to be created for it. It is very + convenient to return this newly created wrapper in the same method that + initiates clicking an actual action button. Row action could be + either primary (if its name is written right away on row action + button) or secondary (if its name is inside of a button drop-down). Binding + the method to a button and toggling the button drop-down open (in case + a row action is secondary) is performed behind the scenes in this + decorator. + + .. param:: action_name + + Part of the action button id which is specific to action itself. It + is safe to use action `name` attribute from the dashboard tables.py + code. + + .. param:: attribute_search + + Attribute that is searched for to find action element. By default it + looks for id but another identifying attribute can be specified. + + .. param:: secondary_locator_index + + Used to look for nth child of 'ul' when all children have the + identical attributes. + """ + # NOTE(tsufiev): button tag could be either or