From 120bd48006418ff7aa52ce7624fcdeb2bb8df505 Mon Sep 17 00:00:00 2001 From: Ghanshyam Mann Date: Sat, 24 Feb 2024 11:32:46 -0800 Subject: [PATCH] Retire Tripleo: remove repo content TripleO project is retiring - https://review.opendev.org/c/openstack/governance/+/905145 this commit remove the content of this project repo Change-Id: I2fcd63ee46cf8e3651fb997e414a1a556f2b2455 --- .coveragerc | 7 - .gitignore | 58 - .mailmap | 3 - .pre-commit-config.yaml | 33 - .pylintrc | 55 - .stestr.conf | 3 - CONTRIBUTING.rst | 16 - HACKING.rst | 4 - LICENSE | 176 - README.rst | 26 +- bindep.txt | 38 - container-images/ceph.j2 | 55 - .../container_image_prepare_defaults.yaml | 61 - container-images/kolla/barbican-base/sudoers | 1 - container-images/kolla/base/httpd_setup.sh | 20 - container-images/kolla/base/set_configs.py | 434 --- container-images/kolla/base/start.sh | 19 - container-images/kolla/base/sudoers | 18 - container-images/kolla/base/uid_gid_manage.sh | 126 - .../kolla/cinder-backup/cinder-backup-sudoers | 1 - .../kolla/cinder-backup/extend_start.sh | 5 - .../kolla/cinder-volume/cinder-volume-sudoers | 1 - .../kolla/cinder-volume/extend_start.sh | 5 - .../kolla/glance-api/extend_start.sh | 9 - .../kolla/horizon/extend_start.sh | 119 - container-images/kolla/iscsid/extend_start.sh | 7 - .../kolla/keystone/extend_start.sh | 31 - .../kolla/mariadb/extend_start.sh | 35 - .../kolla/mariadb/security_reset.expect | 58 - .../kolla/mariadb/security_reset.expect.10.5 | 67 - .../kolla/neutron-base/neutron_sudoers | 4 - .../ovn-nb-db-server/start_nb_db_server.sh | 28 - .../ovn-sb-db-server/start_sb_db_server.sh | 29 - .../kolla/rabbitmq/extend_start.sh | 16 - .../kolla/swift-base/swift-rootwrap | 10 - .../kolla/swift-base/swift-sudoers | 2 - .../tripleo-ansible-ee/requirements.yaml | 19 - .../kolla/tripleo-ansible-ee/settings | 26 - .../tripleo-ansible-ee/tripleo_entrypoint.sh | 17 - .../kolla/tripleoclient/create_super_user.sh | 18 - .../kolla/tripleoclient/tripleoclient_sudoers | 1 - container-images/tcib/base/base.yaml | 102 - .../tcib/base/collectd/collectd.yaml | 59 - container-images/tcib/base/cron/cron.yaml | 6 - container-images/tcib/base/etcd/etcd.yaml | 8 - container-images/tcib/base/frr/frr.yaml | 9 - .../tcib/base/haproxy/haproxy.yaml | 10 - .../tcib/base/mariadb/mariadb.yaml | 31 - .../tcib/base/memcached/memcached.yaml | 8 - .../tcib/base/multipathd/multipathd.yaml | 6 - .../base/os/aodh-base/aodh-api/aodh-api.yaml | 11 - .../tcib/base/os/aodh-base/aodh-base.yaml | 6 - .../aodh-evaluator/aodh-evaluator.yaml | 7 - .../aodh-listener/aodh-listener.yaml | 7 - .../aodh-notifier/aodh-notifier.yaml | 7 - .../barbican-api/barbican-api.yaml | 11 - .../base/os/barbican-base/barbican-base.yaml | 8 - .../barbican-keystone-listener.yaml | 7 - .../barbican-worker/barbican-worker.yaml | 7 - .../os/ceilometer-base/ceilometer-base.yaml | 8 - .../ceilometer-central.yaml | 7 - .../ceilometer-compute.yaml | 6 - .../ceilometer-ipmi/ceilometer-ipmi.yaml | 7 - .../ceilometer-notification.yaml | 8 - .../os/cinder-base/cinder-api/cinder-api.yaml | 11 - .../cinder-backup/cinder-backup.yaml | 16 - .../tcib/base/os/cinder-base/cinder-base.yaml | 7 - .../cinder-scheduler/cinder-scheduler.yaml | 3 - .../cinder-volume/cinder-volume.yaml | 17 - .../designate-api/designate-api.yaml | 9 - .../designate-backend-bind9.yaml | 6 - .../os/designate-base/designate-base.yaml | 9 - .../designate-central/designate-central.yaml | 6 - .../designate-mdns/designate-mdns.yaml | 6 - .../designate-producer.yaml | 6 - .../designate-sink/designate-sink.yaml | 6 - .../designate-worker/designate-worker.yaml | 7 - .../tcib/base/os/glance-api/glance-api.yaml | 17 - .../gnocchi-base/gnocchi-api/gnocchi-api.yaml | 7 - .../base/os/gnocchi-base/gnocchi-base.yaml | 14 - .../gnocchi-metricd/gnocchi-metricd.yaml | 7 - .../gnocchi-statsd/gnocchi-statsd.yaml | 7 - .../base/os/heat-base/heat-all/heat-all.yaml | 8 - .../heat-base/heat-api-cfn/heat-api-cfn.yaml | 7 - .../base/os/heat-base/heat-api/heat-api.yaml | 7 - .../tcib/base/os/heat-base/heat-base.yaml | 10 - .../os/heat-base/heat-engine/heat-engine.yaml | 7 - .../tcib/base/os/horizon/horizon.yaml | 20 - .../os/ironic-base/ironic-api/ironic-api.yaml | 12 - .../tcib/base/os/ironic-base/ironic-base.yaml | 6 - .../ironic-conductor/ironic-conductor.yaml | 26 - .../ironic-inspector/ironic-inspector.yaml | 9 - .../os/ironic-base/ironic-pxe/ironic-pxe.yaml | 14 - .../tcib/base/os/iscsid/iscsid.yaml | 11 - .../tcib/base/os/keystone/keystone.yaml | 25 - .../os/manila-base/manila-api/manila-api.yaml | 11 - .../tcib/base/os/manila-base/manila-base.yaml | 6 - .../manila-scheduler/manila-scheduler.yaml | 3 - .../manila-share/manila-share.yaml | 16 - .../ironic-neutron-agent.yaml | 8 - .../neutron-agent-base.yaml | 6 - .../neutron-dhcp-agent.yaml | 3 - .../neutron-l3-agent/neutron-l3-agent.yaml | 3 - .../neutron-metadata-agent-ovn.yaml | 7 - .../base/os/neutron-base/neutron-base.yaml | 16 - .../neutron-metadata-agent.yaml | 3 - .../neutron-mlnx-agent.yaml | 8 - .../neutron-openvswitch-agent.yaml | 7 - .../neutron-server-ovn.yaml | 11 - .../neutron-server/neutron-server.yaml | 12 - .../neutron-sriov-agent.yaml | 7 - .../base/os/nova-base/nova-api/nova-api.yaml | 11 - .../tcib/base/os/nova-base/nova-base.yaml | 6 - .../nova-compute-ironic.yaml | 11 - .../nova-base/nova-compute/nova-compute.yaml | 38 - .../nova-conductor/nova-conductor.yaml | 7 - .../nova-base/nova-libvirt/nova-libvirt.yaml | 29 - .../nova-novncproxy/nova-novncproxy.yaml | 8 - .../nova-scheduler/nova-scheduler.yaml | 7 - .../octavia-base/octavia-api/octavia-api.yaml | 12 - .../base/os/octavia-base/octavia-base.yaml | 6 - .../octavia-health-manager.yaml | 7 - .../octavia-housekeeping.yaml | 7 - .../octavia-worker/octavia-worker.yaml | 7 - container-images/tcib/base/os/os.yaml | 20 - .../base/os/placement-api/placement-api.yaml | 11 - .../swift-account/swift-account.yaml | 7 - .../tcib/base/os/swift-base/swift-base.yaml | 12 - .../swift-container/swift-container.yaml | 7 - .../swift-base/swift-object/swift-object.yaml | 7 - .../swift-proxy-server.yaml | 11 - .../tcib/base/os/tempest/tempest.yaml | 11 - .../tcib/base/os/tempest/tempest_sudoers | 1 - .../tcib/base/ovn-base/ovn-base.yaml | 9 - .../ovn-controller/ovn-controller.yaml | 6 - .../ovn-nb-db-server/ovn-nb-db-server.yaml | 7 - .../base/ovn-base/ovn-northd/ovn-northd.yaml | 12 - .../ovn-sb-db-server/ovn-sb-db-server.yaml | 7 - .../base/ovn-bgp-agent/ovn_bgp_agent.yaml | 8 - .../tcib/base/qdrouterd/qdrouterd.yaml | 11 - .../tcib/base/rabbitmq/rabbitmq.yaml | 16 - container-images/tcib/base/redis/redis.yaml | 16 - .../tcib/base/rsyslog/rsyslog.yaml | 9 - .../base/tripleoclient/tripleoclient.yaml | 28 - .../tcib/base/unbound/unbound.yaml | 5 - .../tripleo-ansible-ee.yaml | 45 - container-images/tripleo_containers.yaml | 193 -- container-images/tripleo_containers.yaml.j2 | 743 ---- contrib/tripleo_kolla_template_overrides.j2 | 1 - doc/requirements.txt | 4 - doc/source/_exts/workbooks.py | 129 - doc/source/conf.py | 83 - doc/source/contributing.rst | 4 - doc/source/image/build.rst | 20 - doc/source/image/upload.rst | 19 - doc/source/images.rst | 29 - doc/source/index.rst | 26 - doc/source/installation.rst | 13 - doc/source/readme.rst | 1 - doc/source/uploads.rst | 22 - doc/source/usage.rst | 8 - healthcheck/README.md | 27 - healthcheck/aodh-api | 6 - healthcheck/aodh-evaluator | 14 - healthcheck/aodh-listener | 14 - healthcheck/aodh-notifier | 14 - healthcheck/barbican-api | 6 - healthcheck/barbican-keystone-listener | 14 - healthcheck/barbican-worker | 14 - healthcheck/ceilometer-agent-central | 14 - healthcheck/ceilometer-agent-compute | 14 - healthcheck/ceilometer-agent-ipmi | 14 - healthcheck/ceilometer-agent-notification | 14 - healthcheck/cinder-api | 6 - healthcheck/cinder-backup | 14 - healthcheck/cinder-scheduler | 14 - healthcheck/cinder-volume | 14 - healthcheck/collectd | 4 - healthcheck/common.sh | 173 - healthcheck/cron | 10 - healthcheck/etcd | 6 - healthcheck/frr | 10 - healthcheck/glance-api | 9 - healthcheck/gnocchi-api | 6 - healthcheck/gnocchi-metricd | 14 - healthcheck/gnocchi-statsd | 11 - healthcheck/heat-api | 6 - healthcheck/heat-api-cfn | 6 - healthcheck/heat-engine | 14 - healthcheck/horizon | 10 - healthcheck/http-healthcheck.py | 44 - healthcheck/ironic-api | 6 - healthcheck/ironic-conductor | 14 - healthcheck/ironic-inspector | 12 - healthcheck/ironic-neutron-agent | 14 - healthcheck/ironic-pxe | 13 - healthcheck/iscsid | 4 - healthcheck/keystone | 6 - healthcheck/manila-api | 6 - healthcheck/manila-scheduler | 14 - healthcheck/manila-share | 14 - healthcheck/mariadb | 24 - healthcheck/memcached | 21 - healthcheck/multipathd | 6 - healthcheck/neutron-api | 8 - healthcheck/neutron-dhcp | 14 - healthcheck/neutron-l3 | 14 - healthcheck/neutron-metadata | 14 - healthcheck/neutron-ovs-agent | 14 - healthcheck/neutron-sriov-agent | 14 - healthcheck/nova-api | 12 - healthcheck/nova-compute | 22 - healthcheck/nova-conductor | 14 - healthcheck/nova-ironic | 14 - healthcheck/nova-libvirt | 12 - healthcheck/nova-metadata | 6 - healthcheck/nova-scheduler | 14 - healthcheck/nova-vnc-proxy | 15 - healthcheck/octavia-api | 5 - healthcheck/octavia-health-manager | 14 - healthcheck/octavia-housekeeping | 14 - healthcheck/octavia-worker | 14 - healthcheck/ovn-bgp-agent | 12 - healthcheck/ovn-controller | 14 - healthcheck/ovn-dbs | 4 - healthcheck/ovn-metadata | 14 - healthcheck/placement-api | 6 - healthcheck/qdrouterd | 18 - healthcheck/rabbitmq | 3 - healthcheck/redis | 27 - ...-proxy-ipv6-handling-5d0625f1ab10d13f.yaml | 5 - ...ift-fix-healthchecks-b3a02139230f4258.yaml | 6 - healthcheck/swift-account-server | 20 - healthcheck/swift-container-server | 20 - healthcheck/swift-object-server | 31 - healthcheck/swift-proxy | 17 - healthcheck/swift-rsync | 14 - ...vercloud-hardened-images-uefi-centos9.yaml | 17 - ...vercloud-hardened-images-uefi-python3.yaml | 56 - .../overcloud-hardened-images-uefi-rhel9.yaml | 18 - image-yaml/overcloud-images-centos9.yaml | 21 - image-yaml/overcloud-images-python3.yaml | 49 - image-yaml/overcloud-images-rhel9.yaml | 22 - .../notes/5.8.0-d1ca2298ba598431.yaml | 46 - ...e-fernet-keys-action-a1080bf5fb18413f.yaml | 8 - ...ifactURLs-Simplified-e3993493022653ab.yaml | 9 - ...cept-glance-image-id-6e8bf439d93b3fb5.yaml | 3 - ...de-support-for-hiera-f15fed971d4397f8.yaml | 4 - ...an-simple-crypto-kek-507fd2f21cdcf21b.yaml | 4 - ...ph-image-build-yamls-8ad8fe8b013a314e.yaml | 8 - ...l-time-kernel-images-cc790c6d7b6229da.yaml | 4 - ...tstackname-on-update-258dbf091fea497e.yaml | 5 - ...adds-ansible-actions-4da45efa8a98cade.yaml | 10 - ...e-container-workflow-77ee4557779563c0.yaml | 5 - ...delete-plan-workflow-d625682fdddd3f48.yaml | 5 - ...e-fencing-parameters-e2ea121247779db3.yaml | 5 - ...s-list-plan-workflow-c0c6f91c9460a09a.yaml | 5 - ...low-upload-big-files-f67ff35fcd166612.yaml | 12 - .../ansible-action-log-20904253f962557f.yaml | 4 - ...onic-staging-drivers-d278905bb1ec0683.yaml | 9 - ...lable-roles-workflow-fe81806915124cb6.yaml | 4 - .../notes/bindep-tox-606dbe4ddf68f7a6.yaml | 12 - ...cklisted_ips_support-f362e008ae1af210.yaml | 6 - ...sted_serverid_config-e079e64e8a04cdb4.yaml | 7 - .../break-up-messages-0c438e658ce32892.yaml | 6 - .../notes/buildah_build-727eb0f35f819731.yaml | 4 - .../notes/caps-fix-f6f8817a48fa5c25.yaml | 4 - ...ment-status-and-logs-3462d6ebbc9ecf2e.yaml | 5 - ...-odl-healthcheck-uri-84d6dea51b110772.yaml | 5 - .../check-boot-action-548e38d17cf1ad96.yaml | 4 - ...check-flavors-action-59b7f2dd5103ad9d.yaml | 4 - .../check-node-counts-bb80a5cdd8d10475.yaml | 4 - ...heck_for_default_tag-09fe34d2ac434890.yaml | 6 - .../check_ovs_upgrade-99cecd6b7bfdcf83.yaml | 8 - ...d-glance-api-version-bca6acd809d4151c.yaml | 7 - ...-consistent-work-dir-b8a37550c3970722.yaml | 5 - ...load-deploy-workflow-55f26302a42cf379.yaml | 5 - ...oad-dont-use-tmpdirs-3641db9fd687f85e.yaml | 9 - ...ig-download-git-repo-9a18681afbfb9136.yaml | 5 - ...-git-repo-commit-msg-9a550daaae1fc55e.yaml | 10 - ...fig-download-timeout-7296683716f78022.yaml | 5 - ...g-download-verbosity-ab2e89e169c208a7.yaml | 7 - ...ify_only_with_source-d9be8cc7236e7c94.yaml | 6 - ...overcloudrc-workflow-e5150b6b0af462f0.yaml | 6 - ..._parameters_workflow-8c35b813289a5479.yaml | 5 - ...apabilities_workbook-091f0ce2ab5fff3a.yaml | 5 - ...fault-arch-selection-d5fd2fcdba725dd4.yaml | 6 - ...-prefix-is-openstack-3cd42220d6cdfed0.yaml | 5 - .../notes/delete-stack-a3c0951d9af04a0f.yaml | 4 - ...ver-clear-breakpoint-ee1a984f3366598a.yaml | 6 - ...ent-status-workflows-7f6ba3b69f805f06.yaml | 6 - ...ployments-per-server-ea747fcff19c884d.yaml | 7 - ...te-list-roles-action-12744cee0e6d70e5.yaml | 6 - ...kopeo-image-uploader-a8b8b4b46d7be706.yaml | 5 - ...precated-params-list-a4edf6e341520ead.yaml | 3 - ...eployment-parameters-c5e97d3df9bfc114.yaml | 13 - ...sing-scheduler-hints-5bb65bc78c1f6f91.yaml | 7 - .../disable_nouveau-bbaf1263fe43821a.yaml | 6 - .../discover_hosts-f1733234ba32a909.yaml | 5 - ...p-on-package-install-a00cd921b0af7168.yaml | 8 - .../notes/drac-address-d835a529a7c17242.yaml | 5 - ...ntory-config-support-c2132b897da2d290.yaml | 6 - ...dmin-honor-blacklist-f1371554ab1b38f6.yaml | 4 - ...son-ironic-port-data-0905da3f7b13d149.yaml | 38 - .../enroll-root-device-a172f98f50706a12.yaml | 9 - ...ror-msg-no-baremetal-a583117ecd9836dc.yaml | 7 - .../notes/etcdtoken-4c46bdfac940acda.yaml | 7 - ..._os_nova_api_version-d5d1501306f8013b.yaml | 8 - ...-download-executions-bf1f0984cd8af5f0.yaml | 7 - .../fencing-hw-types-fddcdb6bf6d79414.yaml | 11 - ...pi-network-rendering-5a65009051a0f464.yaml | 7 - ...-to-git-clone-action-d13942fc07e8e089.yaml | 5 - ...de-server-deployment-098bcae9e0227c57.yaml | 5 - ...certs_key_passphrase-60cba4653109992c.yaml | 5 - ...-install-permissions-846cd6780a527084.yaml | 5 - .../fix-octavia-pub-key-d195fbf1976a8d36.yaml | 3 - ...daylight-healthcheck-f9bc1d2e067c4680.yaml | 4 - ...avia-undercloud-role-c02b0c5b0f1ece34.yaml | 3 - ...stom-overcloud-names-35404ceae3ac380e.yaml | 7 - .../flatten_parameters-b37065a0f0071523.yaml | 3 - ...-ansible-deployments-8bc0de3b4dbfa69c.yaml | 6 - ...ate-roles-with-colon-c903826db084b8a6.yaml | 9 - ...for-deployment-plans-cac4d3746689cbda.yaml | 13 - ...os-apply-config-warn-beece0b9fcff74b7.yaml | 4 - .../notes/gui-logging-5413d0d86e618c59.yaml | 5 - ...no-deployment-status-a70a4b950171afbe.yaml | 5 - .../healthcheck-cron-37de4a861e1a1cbf.yaml | 3 - ...when_not_using_queue-f7c5a3051f5b90cc.yaml | 6 - ...rity-hardened-images-3fc4df73a48d4a91.yaml | 7 - ...mphora-image-red-hat-bc8545e36d88f951.yaml | 3 - .../integrate-skydive-b3b569d996c24cc5.yaml | 5 - .../notes/interfaces-cd94c12dd4744c50.yaml | 11 - ...ospection-batch-size-47723bceb0281baf.yaml | 5 - .../ipmi-discovery-72f93156bcaf461d.yaml | 12 - .../ironic-api-version-d2b4ec1474918f12.yaml | 6 - ...c-api-version-latest-328a5894677f801d.yaml | 8 - ...nic-api-version-pike-4264d815385cba7a.yaml | 13 - .../ironic-boot-config-77addfde192cee0f.yaml | 4 - ...ronic-hardware-types-791dad3f75a67454.yaml | 4 - .../notes/ironic-rescue-ce08f432ccdcece4.yaml | 8 - ...-node-uniqueness-fix-c74110a9728d1023.yaml | 11 - ...nder-raise-extension-87c7ed150a252ff5.yaml | 12 - .../jinja2_include-8bef46285f25ddea.yaml | 3 - ...limit_over_blacklist-3ce81ecf04b09997.yaml | 10 - ...from-roles-data-yaml-7ca573169f888bd7.yaml | 6 - .../manage-workflow-2668b50940c10d97.yaml | 4 - .../migration_ssh_key-6e772d18d4d24485.yaml | 4 - ...inor-update-workflow-6106c1a91cb6d029.yaml | 19 - ...swift-tempurl-action-ce4946a0b76db53c.yaml | 6 - .../notes/more-hw-types-a837145e41409382.yaml | 19 - ...dis_vip_to_all_nodes-bdd1c96438d6ed91.yaml | 7 - .../multi_arch_image-3c3730cbba95be19.yaml | 9 - .../notes/no-cisco-46992167cd0ab6d0.yaml | 6 - .../no-classic-drivers-d56f8c3ff15af2c3.yaml | 7 - ...no-verify-registries-215e4df615e441ff.yaml | 3 - .../notes/no_ss-368721c3af17b782.yaml | 12 - ...ield-no-longer-valid-6ed035c442c4fc68.yaml | 7 - .../nodes-with-profile-662f2c6cb61c4ac4.yaml | 5 - ...metadata_wsgi_change-4a191009d7ef9963.yaml | 8 - ...etadata_config_image-26e727263be52408.yaml | 7 - ...metadata_healthcheck-44a9b0a1f436826a.yaml | 5 - ...roxy_ssl_healthcheck-f9ad0dffb459ef4b.yaml | 7 - ...ove_nova-consoleauth-95df6d63822ef787.yaml | 15 - .../octavia-amphora-ssh-5dee3678d7b66476.yaml | 3 - ...tes-path-and-content-e8acf1e859e75135.yaml | 6 - ...internal-tls-support-f595ed1c3a1f3353.yaml | 5 - .../octavia-passphrase-285a06885ac735df.yaml | 7 - ...a-set-image-owner-id-adb197d5daae54f1.yaml | 10 - ...penstack-heat-agents-31a1a2908745f3bc.yaml | 5 - ...net_config_packaging-36b94a08bbb3e11d.yaml | 17 - ...ss-keystone-endpoint-9cdc1a4e1341a933.yaml | 7 - .../notes/ovirt-driver-77af6913e280a19e.yaml | 4 - ...epare-neutron-server-abb60292341b5782.yaml | 5 - .../notes/passwords-79661a3f27a33528.yaml | 8 - ...ployment-validations-1e8eacd36571d5c9.yaml | 4 - .../prepare-includes-0c9a077369e99619.yaml | 6 - .../notes/profile-17e2650c8da9e8b5.yaml | 9 - .../notes/provide-name-f75b6b61d3d8d693.yaml | 4 - .../notes/qemu_user_id-32d8f17099a6f002.yaml | 10 - .../notes/quote-$@-a3d47106c9b7eeb6.yaml | 5 - .../notes/redfish-550a0e0f0fd4ea41.yaml | 6 - ...e-memory-consumption-d7effb68ab63b8c5.yaml | 5 - ...mt-subnet-to-class-b-1cd832ef08a30c85.yaml | 5 - .../notes/remove-abort-7214f376c9672644.yaml | 8 - ...ing-drivers-override-ce9776ec030ec02a.yaml | 6 - ...emove-overcloudrc.v3-2118c053035c1439.yaml | 6 - ...kopeo-image-uploader-51e7574cc386a3e9.yaml | 5 - ...move-skydive-support-1cea22a7419a3b13.yaml | 5 - ...st-container-support-596426994bbb5c9d.yaml | 8 - .../resource-class-6fffaaf3b39b36c5.yaml | 6 - ...ault_deployment_plan-397b259f6f641ab9.yaml | 7 - .../role-config-none-d440bd0dcbb63534.yaml | 5 - ...-specific-validation-5ea0a31711ced6fe.yaml | 3 - ...elect-roles-workflow-01860e1ba8b7b86c.yaml | 6 - ...r-keep-alive-options-071e1b3b570e78a7.yaml | 5 - ...ip-deploy-identifier-d5abb0d4e6af0ecd.yaml | 10 - ...kup-if-tag-specified-2284c45dc0f87693.yaml | 7 - ..._off_ceph_containers-e1a66fa39076c2cf.yaml | 38 - .../notes/stack-update-1530096686438046.yaml | 4 - ...mphora-image-install-5d26e3d37c7b508f.yaml | 8 - ...op-using-mistral-env-2ed6e17c4cdb9761.yaml | 6 - ...ditional-healtchecks-ab8cd9c7562654f3.yaml | 5 - ...ipleomaster_registry-bd795a51f4e572c9.yaml | 9 - .../tripleo-bootstrap-721b73d21ade7d6d.yaml | 5 - ...tripleo-container-rm-082aa93d2de1e8bc.yaml | 7 - ...ripleo-container-tag-ec42e64289cb17e2.yaml | 5 - ...tripleo-create-admin-0ce59d13ce2c07f6.yaml | 6 - ...-openshift-plan-name-89135e3a68307047.yaml | 10 - ...y-openshift-playbook-ac8b49a212545c0f.yaml | 6 - .../tripleo-docker-rm-b64297d5f9f42988.yaml | 5 - .../tripleo-module-load-80f7fd8c8dd6a81e.yaml | 3 - .../tripleo-mount-image-e038a7d9d51c4828.yaml | 9 - ...pleo-ssh-known-hosts-d27c54b0a6f9a028.yaml | 5 - ...actions-and-workflow-1d661bba3fb2f974.yaml | 5 - ...pdate-keystone-utils-bfd14da957d34ec5.yaml | 6 - ...date-params-workflow-b26fd4cc40549537.yaml | 5 - ...programming-language-54ded15322426458.yaml | 5 - ...date-ps1-in-rc-files-c710832fc1ee37f5.yaml | 7 - ...pdate-roles-workflow-00be679eb8e9548c.yaml | 5 - ...stnames-in-inventory-6d1a3572baebf509.yaml | 6 - ...pleo-containers-file-0590a59f56fb3907.yaml | 6 - ...create_admin_via_ssh-dc9cae99934e1fbe.yaml | 8 - .../v3-only-overcloudrc-8439cfed2145341f.yaml | 6 - ...idations-in-workflow-8ce6a053cacece0d.yaml | 7 - .../verify-profiles-a9d075f565bc3df6.yaml | 4 - ...n_host_atomic_update-481e0baf3b3d6342.yaml | 5 - ...nfig-download-export-d22f3eb958b8c97a.yaml | 5 - releasenotes/source/_static/.placeholder | 0 releasenotes/source/conf.py | 262 -- releasenotes/source/index.rst | 28 - releasenotes/source/ocata.rst | 6 - releasenotes/source/pike.rst | 6 - releasenotes/source/queens.rst | 6 - releasenotes/source/rocky.rst | 6 - releasenotes/source/stein.rst | 6 - releasenotes/source/train.rst | 6 - releasenotes/source/unreleased.rst | 5 - releasenotes/source/ussuri.rst | 6 - releasenotes/source/victoria.rst | 6 - releasenotes/source/wallaby.rst | 6 - releasenotes/source/zed.rst | 6 - requirements.txt | 19 - scripts/README-tripleo.sh | 3 - scripts/bindep-install | 45 - scripts/bootstrap_host_exec | 19 - scripts/bootstrap_host_only_eval | 19 - scripts/bootstrap_host_only_exec | 19 - scripts/containerfile-converter.py | 245 -- scripts/pull-puppet-modules | 115 - scripts/tripleo-build-images | 78 - scripts/tripleo-config-download | 57 - scripts/tripleo-mount-image | 342 -- scripts/tripleo-unmount-image | 1 - scripts/upload-artifacts | 128 - scripts/upload-puppet-modules | 88 - scripts/upload-swift-artifacts | 1 - setup.cfg | 59 - setup.py | 20 - test-requirements.txt | 11 - tools/check_duplicate_jinja_blocks.sh | 21 - tools/releasenotes_tox.sh | 27 - tox.ini | 101 - tripleo_common/__init__.py | 18 - tripleo_common/arch.py | 29 - tripleo_common/constants.py | 194 -- tripleo_common/exception.py | 128 - tripleo_common/filters/__init__.py | 0 tripleo_common/filters/capabilities_filter.py | 46 - tripleo_common/filters/list.py | 27 - tripleo_common/i18n.py | 25 - tripleo_common/image/__init__.py | 0 tripleo_common/image/base.py | 95 - tripleo_common/image/build.py | 85 - tripleo_common/image/builder/__init__.py | 0 tripleo_common/image/builder/base.py | 25 - tripleo_common/image/builder/buildah.py | 383 --- tripleo_common/image/exception.py | 39 - tripleo_common/image/image_builder.py | 138 - tripleo_common/image/image_export.py | 474 --- tripleo_common/image/image_uploader.py | 2612 -------------- tripleo_common/image/kolla_builder.py | 596 ---- tripleo_common/inventories.py | 145 - tripleo_common/inventory.py | 846 ----- ...y-failed-deployments-baf0c701e6d1ad4a.yaml | 5 - ...onvert-docker-params-84dfc6083e88bb52.yaml | 6 - tripleo_common/templates/deployment.j2 | 27 - tripleo_common/templates/deployments.yaml | 219 -- tripleo_common/templates/heat-config.j2 | 1 - tripleo_common/templates/host_var_server.j2 | 22 - tripleo_common/tests/__init__.py | 0 tripleo_common/tests/base.py | 23 - tripleo_common/tests/fake_config/__init__.py | 0 tripleo_common/tests/fake_config/fakes.py | 97 - tripleo_common/tests/fake_neutron/__init__.py | 0 tripleo_common/tests/fake_neutron/fakes.py | 178 - tripleo_common/tests/fake_neutron/stubs.py | 141 - tripleo_common/tests/fake_nova/README | 4 - tripleo_common/tests/fake_nova/__init__.py | 0 .../tests/fake_nova/scheduler/__init__.py | 0 .../tests/fake_nova/scheduler/filters.py | 18 - tripleo_common/tests/image/__init__.py | 0 .../tests/image/builder/__init__.py | 0 .../tests/image/builder/test_buildah.py | 306 -- tripleo_common/tests/image/fakes.py | 45 - tripleo_common/tests/image/test_base.py | 166 - tripleo_common/tests/image/test_build.py | 80 - .../tests/image/test_image_builder.py | 108 - .../tests/image/test_image_export.py | 535 --- .../tests/image/test_image_uploader.py | 2996 ----------------- .../tests/image/test_kolla_builder.py | 1299 ------- .../tests/inventory_data/cell1_dynamic.json | 362 -- .../tests/inventory_data/cell1_static.yaml | 188 -- .../tests/inventory_data/list_overcloud.json | 826 ----- .../tests/inventory_data/merged_dynamic.json | 1204 ------- .../tests/inventory_data/merged_static.yaml | 741 ---- .../inventory_data/overcloud_dynamic.json | 548 --- .../inventory_data/overcloud_static.yaml | 282 -- .../inventory_data/undercloud_dynamic.json | 27 - .../undercloud_dynamic_merged.json | 28 - .../inventory_data/undercloud_static.yaml | 16 - .../undercloud_static_merged.yaml | 17 - tripleo_common/tests/test_arch.py | 39 - tripleo_common/tests/test_filters.py | 65 - tripleo_common/tests/test_inventories.py | 167 - tripleo_common/tests/test_inventory.py | 1430 -------- tripleo_common/tests/test_update.py | 120 - tripleo_common/tests/utils/__init__.py | 0 tripleo_common/tests/utils/data/Compute | 2 - tripleo_common/tests/utils/data/Controller | 5 - .../tests/utils/data/config_data.yaml | 138 - .../data/host_vars/overcloud-controller-0 | 12 - .../data/host_vars/overcloud-novacompute-0 | 10 - .../data/host_vars/overcloud-novacompute-1 | 10 - .../data/host_vars/overcloud-novacompute-2 | 11 - .../ControllerHostEntryDeployment | 17 - .../overcloud-controller-0/MyExtraConfigPost | 18 - .../data/overcloud-controller-0/MyPostConfig | 17 - .../overcloud-controller-0/NetworkDeployment | 17 - .../ComputeHostEntryDeployment | 16 - .../overcloud-novacompute-0/MyExtraConfigPost | 17 - .../overcloud-novacompute-0/NetworkDeployment | 16 - .../ComputeHostEntryDeployment | 16 - .../overcloud-novacompute-1/MyExtraConfigPost | 18 - .../overcloud-novacompute-1/NetworkDeployment | 16 - .../overcloud-novacompute-2/AnsibleDeployment | 20 - .../ComputeHostEntryDeployment | 16 - .../overcloud-novacompute-2/MyExtraConfigPost | 17 - .../overcloud-novacompute-2/NetworkDeployment | 16 - tripleo_common/tests/utils/test_ansible.py | 250 -- tripleo_common/tests/utils/test_config.py | 998 ------ tripleo_common/tests/utils/test_nodes.py | 1431 -------- .../tests/utils/test_overcloudrc.py | 84 - tripleo_common/tests/utils/test_parameters.py | 142 - tripleo_common/tests/utils/test_passwords.py | 89 - tripleo_common/tests/utils/test_plan.py | 534 --- tripleo_common/tests/utils/test_process.py | 81 - tripleo_common/tests/utils/test_roles.py | 267 -- .../tests/utils/test_stack_parameters.py | 369 -- tripleo_common/update.py | 80 - tripleo_common/utils/__init__.py | 0 tripleo_common/utils/ansible.py | 375 --- tripleo_common/utils/common.py | 27 - tripleo_common/utils/config.py | 601 ---- tripleo_common/utils/heat.py | 98 - tripleo_common/utils/image.py | 26 - tripleo_common/utils/locks/__init__.py | 0 tripleo_common/utils/locks/base.py | 27 - tripleo_common/utils/locks/processlock.py | 33 - tripleo_common/utils/locks/threadinglock.py | 23 - tripleo_common/utils/nodes.py | 791 ----- tripleo_common/utils/overcloudrc.py | 130 - tripleo_common/utils/parameters.py | 108 - tripleo_common/utils/passwords.py | 229 -- tripleo_common/utils/plan.py | 186 - tripleo_common/utils/process.py | 59 - tripleo_common/utils/roles.py | 185 - tripleo_common/utils/stack.py | 121 - tripleo_common/utils/stack_parameters.py | 107 - zuul.d/cross-jobs.yaml | 9 - zuul.d/layout.yaml | 35 - 580 files changed, 8 insertions(+), 34712 deletions(-) delete mode 100644 .coveragerc delete mode 100644 .gitignore delete mode 100644 .mailmap delete mode 100644 .pre-commit-config.yaml delete mode 100644 .pylintrc delete mode 100644 .stestr.conf delete mode 100644 CONTRIBUTING.rst delete mode 100644 HACKING.rst delete mode 100644 LICENSE delete mode 100644 bindep.txt delete mode 100644 container-images/ceph.j2 delete mode 100644 container-images/container_image_prepare_defaults.yaml delete mode 100644 container-images/kolla/barbican-base/sudoers delete mode 100644 container-images/kolla/base/httpd_setup.sh delete mode 100644 container-images/kolla/base/set_configs.py delete mode 100644 container-images/kolla/base/start.sh delete mode 100644 container-images/kolla/base/sudoers delete mode 100755 container-images/kolla/base/uid_gid_manage.sh delete mode 100644 container-images/kolla/cinder-backup/cinder-backup-sudoers delete mode 100644 container-images/kolla/cinder-backup/extend_start.sh delete mode 100644 container-images/kolla/cinder-volume/cinder-volume-sudoers delete mode 100644 container-images/kolla/cinder-volume/extend_start.sh delete mode 100644 container-images/kolla/glance-api/extend_start.sh delete mode 100644 container-images/kolla/horizon/extend_start.sh delete mode 100644 container-images/kolla/iscsid/extend_start.sh delete mode 100644 container-images/kolla/keystone/extend_start.sh delete mode 100644 container-images/kolla/mariadb/extend_start.sh delete mode 100644 container-images/kolla/mariadb/security_reset.expect delete mode 100644 container-images/kolla/mariadb/security_reset.expect.10.5 delete mode 100644 container-images/kolla/neutron-base/neutron_sudoers delete mode 100755 container-images/kolla/ovn/ovn-nb-db-server/start_nb_db_server.sh delete mode 100755 container-images/kolla/ovn/ovn-sb-db-server/start_sb_db_server.sh delete mode 100644 container-images/kolla/rabbitmq/extend_start.sh delete mode 100644 container-images/kolla/swift-base/swift-rootwrap delete mode 100644 container-images/kolla/swift-base/swift-sudoers delete mode 100644 container-images/kolla/tripleo-ansible-ee/requirements.yaml delete mode 100644 container-images/kolla/tripleo-ansible-ee/settings delete mode 100644 container-images/kolla/tripleo-ansible-ee/tripleo_entrypoint.sh delete mode 100644 container-images/kolla/tripleoclient/create_super_user.sh delete mode 100644 container-images/kolla/tripleoclient/tripleoclient_sudoers delete mode 100644 container-images/tcib/base/base.yaml delete mode 100644 container-images/tcib/base/collectd/collectd.yaml delete mode 100644 container-images/tcib/base/cron/cron.yaml delete mode 100644 container-images/tcib/base/etcd/etcd.yaml delete mode 100644 container-images/tcib/base/frr/frr.yaml delete mode 100644 container-images/tcib/base/haproxy/haproxy.yaml delete mode 100644 container-images/tcib/base/mariadb/mariadb.yaml delete mode 100644 container-images/tcib/base/memcached/memcached.yaml delete mode 100644 container-images/tcib/base/multipathd/multipathd.yaml delete mode 100644 container-images/tcib/base/os/aodh-base/aodh-api/aodh-api.yaml delete mode 100644 container-images/tcib/base/os/aodh-base/aodh-base.yaml delete mode 100644 container-images/tcib/base/os/aodh-base/aodh-evaluator/aodh-evaluator.yaml delete mode 100644 container-images/tcib/base/os/aodh-base/aodh-listener/aodh-listener.yaml delete mode 100644 container-images/tcib/base/os/aodh-base/aodh-notifier/aodh-notifier.yaml delete mode 100644 container-images/tcib/base/os/barbican-base/barbican-api/barbican-api.yaml delete mode 100644 container-images/tcib/base/os/barbican-base/barbican-base.yaml delete mode 100644 container-images/tcib/base/os/barbican-base/barbican-keystone-listener/barbican-keystone-listener.yaml delete mode 100644 container-images/tcib/base/os/barbican-base/barbican-worker/barbican-worker.yaml delete mode 100644 container-images/tcib/base/os/ceilometer-base/ceilometer-base.yaml delete mode 100644 container-images/tcib/base/os/ceilometer-base/ceilometer-central/ceilometer-central.yaml delete mode 100644 container-images/tcib/base/os/ceilometer-base/ceilometer-compute/ceilometer-compute.yaml delete mode 100644 container-images/tcib/base/os/ceilometer-base/ceilometer-ipmi/ceilometer-ipmi.yaml delete mode 100644 container-images/tcib/base/os/ceilometer-base/ceilometer-notification/ceilometer-notification.yaml delete mode 100644 container-images/tcib/base/os/cinder-base/cinder-api/cinder-api.yaml delete mode 100644 container-images/tcib/base/os/cinder-base/cinder-backup/cinder-backup.yaml delete mode 100644 container-images/tcib/base/os/cinder-base/cinder-base.yaml delete mode 100644 container-images/tcib/base/os/cinder-base/cinder-scheduler/cinder-scheduler.yaml delete mode 100644 container-images/tcib/base/os/cinder-base/cinder-volume/cinder-volume.yaml delete mode 100644 container-images/tcib/base/os/designate-base/designate-api/designate-api.yaml delete mode 100644 container-images/tcib/base/os/designate-base/designate-backend-bind9/designate-backend-bind9.yaml delete mode 100644 container-images/tcib/base/os/designate-base/designate-base.yaml delete mode 100644 container-images/tcib/base/os/designate-base/designate-central/designate-central.yaml delete mode 100644 container-images/tcib/base/os/designate-base/designate-mdns/designate-mdns.yaml delete mode 100644 container-images/tcib/base/os/designate-base/designate-producer/designate-producer.yaml delete mode 100644 container-images/tcib/base/os/designate-base/designate-sink/designate-sink.yaml delete mode 100644 container-images/tcib/base/os/designate-base/designate-worker/designate-worker.yaml delete mode 100644 container-images/tcib/base/os/glance-api/glance-api.yaml delete mode 100644 container-images/tcib/base/os/gnocchi-base/gnocchi-api/gnocchi-api.yaml delete mode 100644 container-images/tcib/base/os/gnocchi-base/gnocchi-base.yaml delete mode 100644 container-images/tcib/base/os/gnocchi-base/gnocchi-metricd/gnocchi-metricd.yaml delete mode 100644 container-images/tcib/base/os/gnocchi-base/gnocchi-statsd/gnocchi-statsd.yaml delete mode 100644 container-images/tcib/base/os/heat-base/heat-all/heat-all.yaml delete mode 100644 container-images/tcib/base/os/heat-base/heat-api-cfn/heat-api-cfn.yaml delete mode 100644 container-images/tcib/base/os/heat-base/heat-api/heat-api.yaml delete mode 100644 container-images/tcib/base/os/heat-base/heat-base.yaml delete mode 100644 container-images/tcib/base/os/heat-base/heat-engine/heat-engine.yaml delete mode 100644 container-images/tcib/base/os/horizon/horizon.yaml delete mode 100644 container-images/tcib/base/os/ironic-base/ironic-api/ironic-api.yaml delete mode 100644 container-images/tcib/base/os/ironic-base/ironic-base.yaml delete mode 100644 container-images/tcib/base/os/ironic-base/ironic-conductor/ironic-conductor.yaml delete mode 100644 container-images/tcib/base/os/ironic-base/ironic-inspector/ironic-inspector.yaml delete mode 100644 container-images/tcib/base/os/ironic-base/ironic-pxe/ironic-pxe.yaml delete mode 100644 container-images/tcib/base/os/iscsid/iscsid.yaml delete mode 100644 container-images/tcib/base/os/keystone/keystone.yaml delete mode 100644 container-images/tcib/base/os/manila-base/manila-api/manila-api.yaml delete mode 100644 container-images/tcib/base/os/manila-base/manila-base.yaml delete mode 100644 container-images/tcib/base/os/manila-base/manila-scheduler/manila-scheduler.yaml delete mode 100644 container-images/tcib/base/os/manila-base/manila-share/manila-share.yaml delete mode 100644 container-images/tcib/base/os/neutron-base/ironic-neutron-agent/ironic-neutron-agent.yaml delete mode 100644 container-images/tcib/base/os/neutron-base/neutron-agent-base/neutron-agent-base.yaml delete mode 100644 container-images/tcib/base/os/neutron-base/neutron-agent-base/neutron-dhcp-agent/neutron-dhcp-agent.yaml delete mode 100644 container-images/tcib/base/os/neutron-base/neutron-agent-base/neutron-l3-agent/neutron-l3-agent.yaml delete mode 100644 container-images/tcib/base/os/neutron-base/neutron-agent-base/neutron-metadata-agent-ovn/neutron-metadata-agent-ovn.yaml delete mode 100644 container-images/tcib/base/os/neutron-base/neutron-base.yaml delete mode 100644 container-images/tcib/base/os/neutron-base/neutron-metadata-agent/neutron-metadata-agent.yaml delete mode 100644 container-images/tcib/base/os/neutron-base/neutron-mlnx-agent/neutron-mlnx-agent.yaml delete mode 100644 container-images/tcib/base/os/neutron-base/neutron-openvswitch-agent/neutron-openvswitch-agent.yaml delete mode 100644 container-images/tcib/base/os/neutron-base/neutron-server-ovn/neutron-server-ovn.yaml delete mode 100644 container-images/tcib/base/os/neutron-base/neutron-server/neutron-server.yaml delete mode 100644 container-images/tcib/base/os/neutron-base/neutron-sriov-agent/neutron-sriov-agent.yaml delete mode 100644 container-images/tcib/base/os/nova-base/nova-api/nova-api.yaml delete mode 100644 container-images/tcib/base/os/nova-base/nova-base.yaml delete mode 100644 container-images/tcib/base/os/nova-base/nova-compute-ironic/nova-compute-ironic.yaml delete mode 100644 container-images/tcib/base/os/nova-base/nova-compute/nova-compute.yaml delete mode 100644 container-images/tcib/base/os/nova-base/nova-conductor/nova-conductor.yaml delete mode 100644 container-images/tcib/base/os/nova-base/nova-libvirt/nova-libvirt.yaml delete mode 100644 container-images/tcib/base/os/nova-base/nova-novncproxy/nova-novncproxy.yaml delete mode 100644 container-images/tcib/base/os/nova-base/nova-scheduler/nova-scheduler.yaml delete mode 100644 container-images/tcib/base/os/octavia-base/octavia-api/octavia-api.yaml delete mode 100644 container-images/tcib/base/os/octavia-base/octavia-base.yaml delete mode 100644 container-images/tcib/base/os/octavia-base/octavia-health-manager/octavia-health-manager.yaml delete mode 100644 container-images/tcib/base/os/octavia-base/octavia-housekeeping/octavia-housekeeping.yaml delete mode 100644 container-images/tcib/base/os/octavia-base/octavia-worker/octavia-worker.yaml delete mode 100644 container-images/tcib/base/os/os.yaml delete mode 100644 container-images/tcib/base/os/placement-api/placement-api.yaml delete mode 100644 container-images/tcib/base/os/swift-base/swift-account/swift-account.yaml delete mode 100644 container-images/tcib/base/os/swift-base/swift-base.yaml delete mode 100644 container-images/tcib/base/os/swift-base/swift-container/swift-container.yaml delete mode 100644 container-images/tcib/base/os/swift-base/swift-object/swift-object.yaml delete mode 100644 container-images/tcib/base/os/swift-base/swift-proxy-server/swift-proxy-server.yaml delete mode 100644 container-images/tcib/base/os/tempest/tempest.yaml delete mode 100644 container-images/tcib/base/os/tempest/tempest_sudoers delete mode 100644 container-images/tcib/base/ovn-base/ovn-base.yaml delete mode 100644 container-images/tcib/base/ovn-base/ovn-controller/ovn-controller.yaml delete mode 100644 container-images/tcib/base/ovn-base/ovn-nb-db-server/ovn-nb-db-server.yaml delete mode 100644 container-images/tcib/base/ovn-base/ovn-northd/ovn-northd.yaml delete mode 100644 container-images/tcib/base/ovn-base/ovn-sb-db-server/ovn-sb-db-server.yaml delete mode 100644 container-images/tcib/base/ovn-bgp-agent/ovn_bgp_agent.yaml delete mode 100644 container-images/tcib/base/qdrouterd/qdrouterd.yaml delete mode 100644 container-images/tcib/base/rabbitmq/rabbitmq.yaml delete mode 100644 container-images/tcib/base/redis/redis.yaml delete mode 100644 container-images/tcib/base/rsyslog/rsyslog.yaml delete mode 100644 container-images/tcib/base/tripleoclient/tripleoclient.yaml delete mode 100644 container-images/tcib/base/unbound/unbound.yaml delete mode 100644 container-images/tcib/tripleo-ansible-ee/tripleo-ansible-ee.yaml delete mode 100644 container-images/tripleo_containers.yaml delete mode 100644 container-images/tripleo_containers.yaml.j2 delete mode 120000 contrib/tripleo_kolla_template_overrides.j2 delete mode 100644 doc/requirements.txt delete mode 100644 doc/source/_exts/workbooks.py delete mode 100644 doc/source/conf.py delete mode 100644 doc/source/contributing.rst delete mode 100644 doc/source/image/build.rst delete mode 100644 doc/source/image/upload.rst delete mode 100644 doc/source/images.rst delete mode 100644 doc/source/index.rst delete mode 100644 doc/source/installation.rst delete mode 100644 doc/source/readme.rst delete mode 100644 doc/source/uploads.rst delete mode 100644 doc/source/usage.rst delete mode 100644 healthcheck/README.md delete mode 100755 healthcheck/aodh-api delete mode 100755 healthcheck/aodh-evaluator delete mode 100755 healthcheck/aodh-listener delete mode 100755 healthcheck/aodh-notifier delete mode 100755 healthcheck/barbican-api delete mode 100755 healthcheck/barbican-keystone-listener delete mode 100755 healthcheck/barbican-worker delete mode 100755 healthcheck/ceilometer-agent-central delete mode 100755 healthcheck/ceilometer-agent-compute delete mode 100755 healthcheck/ceilometer-agent-ipmi delete mode 100755 healthcheck/ceilometer-agent-notification delete mode 100755 healthcheck/cinder-api delete mode 100755 healthcheck/cinder-backup delete mode 100755 healthcheck/cinder-scheduler delete mode 100755 healthcheck/cinder-volume delete mode 100755 healthcheck/collectd delete mode 100755 healthcheck/common.sh delete mode 100755 healthcheck/cron delete mode 100755 healthcheck/etcd delete mode 100755 healthcheck/frr delete mode 100755 healthcheck/glance-api delete mode 100755 healthcheck/gnocchi-api delete mode 100755 healthcheck/gnocchi-metricd delete mode 100755 healthcheck/gnocchi-statsd delete mode 100755 healthcheck/heat-api delete mode 100755 healthcheck/heat-api-cfn delete mode 100755 healthcheck/heat-engine delete mode 100755 healthcheck/horizon delete mode 100755 healthcheck/http-healthcheck.py delete mode 100755 healthcheck/ironic-api delete mode 100755 healthcheck/ironic-conductor delete mode 100755 healthcheck/ironic-inspector delete mode 100755 healthcheck/ironic-neutron-agent delete mode 100755 healthcheck/ironic-pxe delete mode 100755 healthcheck/iscsid delete mode 100755 healthcheck/keystone delete mode 100755 healthcheck/manila-api delete mode 100755 healthcheck/manila-scheduler delete mode 100755 healthcheck/manila-share delete mode 100755 healthcheck/mariadb delete mode 100755 healthcheck/memcached delete mode 100755 healthcheck/multipathd delete mode 100755 healthcheck/neutron-api delete mode 100755 healthcheck/neutron-dhcp delete mode 100755 healthcheck/neutron-l3 delete mode 100755 healthcheck/neutron-metadata delete mode 100755 healthcheck/neutron-ovs-agent delete mode 100755 healthcheck/neutron-sriov-agent delete mode 100755 healthcheck/nova-api delete mode 100755 healthcheck/nova-compute delete mode 100755 healthcheck/nova-conductor delete mode 100755 healthcheck/nova-ironic delete mode 100755 healthcheck/nova-libvirt delete mode 100755 healthcheck/nova-metadata delete mode 100755 healthcheck/nova-scheduler delete mode 100755 healthcheck/nova-vnc-proxy delete mode 100755 healthcheck/octavia-api delete mode 100755 healthcheck/octavia-health-manager delete mode 100755 healthcheck/octavia-housekeeping delete mode 100755 healthcheck/octavia-worker delete mode 100755 healthcheck/ovn-bgp-agent delete mode 100755 healthcheck/ovn-controller delete mode 100755 healthcheck/ovn-dbs delete mode 100755 healthcheck/ovn-metadata delete mode 100755 healthcheck/placement-api delete mode 100644 healthcheck/qdrouterd delete mode 100755 healthcheck/rabbitmq delete mode 100755 healthcheck/redis delete mode 100644 healthcheck/releasenotes/notes/nova-vnc-proxy-ipv6-handling-5d0625f1ab10d13f.yaml delete mode 100644 healthcheck/releasenotes/notes/swift-fix-healthchecks-b3a02139230f4258.yaml delete mode 100755 healthcheck/swift-account-server delete mode 100755 healthcheck/swift-container-server delete mode 100755 healthcheck/swift-object-server delete mode 100755 healthcheck/swift-proxy delete mode 100755 healthcheck/swift-rsync delete mode 100644 image-yaml/overcloud-hardened-images-uefi-centos9.yaml delete mode 100644 image-yaml/overcloud-hardened-images-uefi-python3.yaml delete mode 100644 image-yaml/overcloud-hardened-images-uefi-rhel9.yaml delete mode 100644 image-yaml/overcloud-images-centos9.yaml delete mode 100644 image-yaml/overcloud-images-python3.yaml delete mode 100644 image-yaml/overcloud-images-rhel9.yaml delete mode 100644 releasenotes/notes/5.8.0-d1ca2298ba598431.yaml delete mode 100644 releasenotes/notes/Add-rotate-fernet-keys-action-a1080bf5fb18413f.yaml delete mode 100644 releasenotes/notes/DeployArtifactURLs-Simplified-e3993493022653ab.yaml delete mode 100644 releasenotes/notes/accept-glance-image-id-6e8bf439d93b3fb5.yaml delete mode 100644 releasenotes/notes/add-check-mode-support-for-hiera-f15fed971d4397f8.yaml delete mode 100644 releasenotes/notes/add-generation-of-barbican-simple-crypto-kek-507fd2f21cdcf21b.yaml delete mode 100644 releasenotes/notes/add-overcloud-ceph-image-build-yamls-8ad8fe8b013a314e.yaml delete mode 100644 releasenotes/notes/add-real-time-kernel-images-cc790c6d7b6229da.yaml delete mode 100644 releasenotes/notes/add-rootstackname-on-update-258dbf091fea497e.yaml delete mode 100644 releasenotes/notes/adds-ansible-actions-4da45efa8a98cade.yaml delete mode 100644 releasenotes/notes/adds-create-container-workflow-77ee4557779563c0.yaml delete mode 100644 releasenotes/notes/adds-delete-plan-workflow-d625682fdddd3f48.yaml delete mode 100644 releasenotes/notes/adds-generate-fencing-parameters-e2ea121247779db3.yaml delete mode 100644 releasenotes/notes/adds-list-plan-workflow-c0c6f91c9460a09a.yaml delete mode 100644 releasenotes/notes/allow-upload-big-files-f67ff35fcd166612.yaml delete mode 100644 releasenotes/notes/ansible-action-log-20904253f962557f.yaml delete mode 100644 releasenotes/notes/append-ironic-staging-drivers-d278905bb1ec0683.yaml delete mode 100644 releasenotes/notes/available-roles-workflow-fe81806915124cb6.yaml delete mode 100644 releasenotes/notes/bindep-tox-606dbe4ddf68f7a6.yaml delete mode 100644 releasenotes/notes/blacklisted_ips_support-f362e008ae1af210.yaml delete mode 100644 releasenotes/notes/blacklisted_serverid_config-e079e64e8a04cdb4.yaml delete mode 100644 releasenotes/notes/break-up-messages-0c438e658ce32892.yaml delete mode 100644 releasenotes/notes/buildah_build-727eb0f35f819731.yaml delete mode 100644 releasenotes/notes/caps-fix-f6f8817a48fa5c25.yaml delete mode 100644 releasenotes/notes/capture-environment-status-and-logs-3462d6ebbc9ecf2e.yaml delete mode 100644 releasenotes/notes/change-odl-healthcheck-uri-84d6dea51b110772.yaml delete mode 100644 releasenotes/notes/check-boot-action-548e38d17cf1ad96.yaml delete mode 100644 releasenotes/notes/check-flavors-action-59b7f2dd5103ad9d.yaml delete mode 100644 releasenotes/notes/check-node-counts-bb80a5cdd8d10475.yaml delete mode 100644 releasenotes/notes/check_for_default_tag-09fe34d2ac434890.yaml delete mode 100644 releasenotes/notes/check_ovs_upgrade-99cecd6b7bfdcf83.yaml delete mode 100644 releasenotes/notes/cinder-and-glance-api-version-bca6acd809d4151c.yaml delete mode 100644 releasenotes/notes/config-download-consistent-work-dir-b8a37550c3970722.yaml delete mode 100644 releasenotes/notes/config-download-deploy-workflow-55f26302a42cf379.yaml delete mode 100644 releasenotes/notes/config-download-dont-use-tmpdirs-3641db9fd687f85e.yaml delete mode 100644 releasenotes/notes/config-download-git-repo-9a18681afbfb9136.yaml delete mode 100644 releasenotes/notes/config-download-git-repo-commit-msg-9a550daaae1fc55e.yaml delete mode 100644 releasenotes/notes/config-download-timeout-7296683716f78022.yaml delete mode 100644 releasenotes/notes/config-download-verbosity-ab2e89e169c208a7.yaml delete mode 100644 releasenotes/notes/container-image-prepare-modify_only_with_source-d9be8cc7236e7c94.yaml delete mode 100644 releasenotes/notes/create-overcloudrc-workflow-e5150b6b0af462f0.yaml delete mode 100644 releasenotes/notes/create_get_flattened_parameters_workflow-8c35b813289a5479.yaml delete mode 100644 releasenotes/notes/create_heat_capabilities_workbook-091f0ce2ab5fff3a.yaml delete mode 100644 releasenotes/notes/default-arch-selection-d5fd2fcdba725dd4.yaml delete mode 100644 releasenotes/notes/default-container-prefix-is-openstack-3cd42220d6cdfed0.yaml delete mode 100644 releasenotes/notes/delete-stack-a3c0951d9af04a0f.yaml delete mode 100644 releasenotes/notes/deployed-server-clear-breakpoint-ee1a984f3366598a.yaml delete mode 100644 releasenotes/notes/deployment-status-workflows-7f6ba3b69f805f06.yaml delete mode 100644 releasenotes/notes/deployments-per-server-ea747fcff19c884d.yaml delete mode 100644 releasenotes/notes/deprecate-list-roles-action-12744cee0e6d70e5.yaml delete mode 100644 releasenotes/notes/deprecate-skopeo-image-uploader-a8b8b4b46d7be706.yaml delete mode 100644 releasenotes/notes/deprecated-params-list-a4edf6e341520ead.yaml delete mode 100644 releasenotes/notes/derive-deployment-parameters-c5e97d3df9bfc114.yaml delete mode 100644 releasenotes/notes/derive-parameters-using-scheduler-hints-5bb65bc78c1f6f91.yaml delete mode 100644 releasenotes/notes/disable_nouveau-bbaf1263fe43821a.yaml delete mode 100644 releasenotes/notes/discover_hosts-f1733234ba32a909.yaml delete mode 100644 releasenotes/notes/dont-fail-tripleo-bootstrap-on-package-install-a00cd921b0af7168.yaml delete mode 100644 releasenotes/notes/drac-address-d835a529a7c17242.yaml delete mode 100644 releasenotes/notes/drop-inventory-config-support-c2132b897da2d290.yaml delete mode 100644 releasenotes/notes/enable-ssh-admin-honor-blacklist-f1371554ab1b38f6.yaml delete mode 100644 releasenotes/notes/enrich-nodes-json-ironic-port-data-0905da3f7b13d149.yaml delete mode 100644 releasenotes/notes/enroll-root-device-a172f98f50706a12.yaml delete mode 100644 releasenotes/notes/error-msg-no-baremetal-a583117ecd9836dc.yaml delete mode 100644 releasenotes/notes/etcdtoken-4c46bdfac940acda.yaml delete mode 100644 releasenotes/notes/export_os_nova_api_version-d5d1501306f8013b.yaml delete mode 100644 releasenotes/notes/fail-multiple-config-download-executions-bf1f0984cd8af5f0.yaml delete mode 100644 releasenotes/notes/fencing-hw-types-fddcdb6bf6d79414.yaml delete mode 100644 releasenotes/notes/fix-api-network-rendering-5a65009051a0f464.yaml delete mode 100644 releasenotes/notes/fix-call-to-git-clone-action-d13942fc07e8e089.yaml delete mode 100644 releasenotes/notes/fix-check-mode-server-deployment-098bcae9e0227c57.yaml delete mode 100644 releasenotes/notes/fix-generated-server_certs_key_passphrase-60cba4653109992c.yaml delete mode 100644 releasenotes/notes/fix-octavia-image-rpm-install-permissions-846cd6780a527084.yaml delete mode 100644 releasenotes/notes/fix-octavia-pub-key-d195fbf1976a8d36.yaml delete mode 100644 releasenotes/notes/fix-opendaylight-healthcheck-f9bc1d2e067c4680.yaml delete mode 100644 releasenotes/notes/fix-syntax-error-in-octavia-undercloud-role-c02b0c5b0f1ece34.yaml delete mode 100644 releasenotes/notes/fix-update-roles-workflow-with-custom-overcloud-names-35404ceae3ac380e.yaml delete mode 100644 releasenotes/notes/flatten_parameters-b37065a0f0071523.yaml delete mode 100644 releasenotes/notes/format-ansible-deployments-8bc0de3b4dbfa69c.yaml delete mode 100644 releasenotes/notes/generate-roles-with-colon-c903826db084b8a6.yaml delete mode 100644 releasenotes/notes/git-support-for-deployment-plans-cac4d3746689cbda.yaml delete mode 100644 releasenotes/notes/group-os-apply-config-warn-beece0b9fcff74b7.yaml delete mode 100644 releasenotes/notes/gui-logging-5413d0d86e618c59.yaml delete mode 100644 releasenotes/notes/handle-no-deployment-status-a70a4b950171afbe.yaml delete mode 100644 releasenotes/notes/healthcheck-cron-37de4a861e1a1cbf.yaml delete mode 100644 releasenotes/notes/honor_trash_output_when_not_using_queue-f7c5a3051f5b90cc.yaml delete mode 100644 releasenotes/notes/increase-size-security-hardened-images-3fc4df73a48d4a91.yaml delete mode 100644 releasenotes/notes/install-octavia-amphora-image-red-hat-bc8545e36d88f951.yaml delete mode 100644 releasenotes/notes/integrate-skydive-b3b569d996c24cc5.yaml delete mode 100644 releasenotes/notes/interfaces-cd94c12dd4744c50.yaml delete mode 100644 releasenotes/notes/introspection-batch-size-47723bceb0281baf.yaml delete mode 100644 releasenotes/notes/ipmi-discovery-72f93156bcaf461d.yaml delete mode 100644 releasenotes/notes/ironic-api-version-d2b4ec1474918f12.yaml delete mode 100644 releasenotes/notes/ironic-api-version-latest-328a5894677f801d.yaml delete mode 100644 releasenotes/notes/ironic-api-version-pike-4264d815385cba7a.yaml delete mode 100644 releasenotes/notes/ironic-boot-config-77addfde192cee0f.yaml delete mode 100644 releasenotes/notes/ironic-hardware-types-791dad3f75a67454.yaml delete mode 100644 releasenotes/notes/ironic-rescue-ce08f432ccdcece4.yaml delete mode 100644 releasenotes/notes/ironic-ucs-driver-node-uniqueness-fix-c74110a9728d1023.yaml delete mode 100644 releasenotes/notes/jinja2-template-render-raise-extension-87c7ed150a252ff5.yaml delete mode 100644 releasenotes/notes/jinja2_include-8bef46285f25ddea.yaml delete mode 100644 releasenotes/notes/limit_over_blacklist-3ce81ecf04b09997.yaml delete mode 100644 releasenotes/notes/list-roles-from-roles-data-yaml-7ca573169f888bd7.yaml delete mode 100644 releasenotes/notes/manage-workflow-2668b50940c10d97.yaml delete mode 100644 releasenotes/notes/migration_ssh_key-6e772d18d4d24485.yaml delete mode 100644 releasenotes/notes/minor-update-workflow-6106c1a91cb6d029.yaml delete mode 100644 releasenotes/notes/mistral-swift-tempurl-action-ce4946a0b76db53c.yaml delete mode 100644 releasenotes/notes/more-hw-types-a837145e41409382.yaml delete mode 100644 releasenotes/notes/move_redis_vip_to_all_nodes-bdd1c96438d6ed91.yaml delete mode 100644 releasenotes/notes/multi_arch_image-3c3730cbba95be19.yaml delete mode 100644 releasenotes/notes/no-cisco-46992167cd0ab6d0.yaml delete mode 100644 releasenotes/notes/no-classic-drivers-d56f8c3ff15af2c3.yaml delete mode 100644 releasenotes/notes/no-verify-registries-215e4df615e441ff.yaml delete mode 100644 releasenotes/notes/no_ss-368721c3af17b782.yaml delete mode 100644 releasenotes/notes/nodes-json-mac-field-no-longer-valid-6ed035c442c4fc68.yaml delete mode 100644 releasenotes/notes/nodes-with-profile-662f2c6cb61c4ac4.yaml delete mode 100644 releasenotes/notes/nova_api_healthcheck_metadata_wsgi_change-4a191009d7ef9963.yaml delete mode 100644 releasenotes/notes/nova_metadata_config_image-26e727263be52408.yaml delete mode 100644 releasenotes/notes/nova_metadata_healthcheck-44a9b0a1f436826a.yaml delete mode 100644 releasenotes/notes/nova_novnc_proxy_ssl_healthcheck-f9ad0dffb459ef4b.yaml delete mode 100644 releasenotes/notes/nova_remove_nova-consoleauth-95df6d63822ef787.yaml delete mode 100644 releasenotes/notes/octavia-amphora-ssh-5dee3678d7b66476.yaml delete mode 100644 releasenotes/notes/octavia-fix-certificates-path-and-content-e8acf1e859e75135.yaml delete mode 100644 releasenotes/notes/octavia-internal-tls-support-f595ed1c3a1f3353.yaml delete mode 100644 releasenotes/notes/octavia-passphrase-285a06885ac735df.yaml delete mode 100644 releasenotes/notes/octavia-set-image-owner-id-adb197d5daae54f1.yaml delete mode 100644 releasenotes/notes/openstack-heat-agents-31a1a2908745f3bc.yaml delete mode 100644 releasenotes/notes/os_net_config_packaging-36b94a08bbb3e11d.yaml delete mode 100644 releasenotes/notes/overcloudrc-versionless-keystone-endpoint-9cdc1a4e1341a933.yaml delete mode 100644 releasenotes/notes/ovirt-driver-77af6913e280a19e.yaml delete mode 100644 releasenotes/notes/ovn-image-prepare-neutron-server-abb60292341b5782.yaml delete mode 100644 releasenotes/notes/passwords-79661a3f27a33528.yaml delete mode 100644 releasenotes/notes/predeployment-validations-1e8eacd36571d5c9.yaml delete mode 100644 releasenotes/notes/prepare-includes-0c9a077369e99619.yaml delete mode 100644 releasenotes/notes/profile-17e2650c8da9e8b5.yaml delete mode 100644 releasenotes/notes/provide-name-f75b6b61d3d8d693.yaml delete mode 100644 releasenotes/notes/qemu_user_id-32d8f17099a6f002.yaml delete mode 100644 releasenotes/notes/quote-$@-a3d47106c9b7eeb6.yaml delete mode 100644 releasenotes/notes/redfish-550a0e0f0fd4ea41.yaml delete mode 100644 releasenotes/notes/reduce-memory-consumption-d7effb68ab63b8c5.yaml delete mode 100644 releasenotes/notes/releasenotes/notes/update-lb-mgmt-subnet-to-class-b-1cd832ef08a30c85.yaml delete mode 100644 releasenotes/notes/remove-abort-7214f376c9672644.yaml delete mode 100644 releasenotes/notes/remove-ironic-staging-drivers-override-ce9776ec030ec02a.yaml delete mode 100644 releasenotes/notes/remove-overcloudrc.v3-2118c053035c1439.yaml delete mode 100644 releasenotes/notes/remove-skopeo-image-uploader-51e7574cc386a3e9.yaml delete mode 100644 releasenotes/notes/remove-skydive-support-1cea22a7419a3b13.yaml delete mode 100644 releasenotes/notes/remove-tempest-container-support-596426994bbb5c9d.yaml delete mode 100644 releasenotes/notes/resource-class-6fffaaf3b39b36c5.yaml delete mode 100644 releasenotes/notes/rm_create_default_deployment_plan-397b259f6f641ab9.yaml delete mode 100644 releasenotes/notes/role-config-none-d440bd0dcbb63534.yaml delete mode 100644 releasenotes/notes/role-specific-validation-5ea0a31711ced6fe.yaml delete mode 100644 releasenotes/notes/select-roles-workflow-01860e1ba8b7b86c.yaml delete mode 100644 releasenotes/notes/set-ssh-server-keep-alive-options-071e1b3b570e78a7.yaml delete mode 100644 releasenotes/notes/skip-deploy-identifier-d5abb0d4e6af0ecd.yaml delete mode 100644 releasenotes/notes/skip-tag-lookup-if-tag-specified-2284c45dc0f87693.yaml delete mode 100644 releasenotes/notes/split_off_ceph_containers-e1a66fa39076c2cf.yaml delete mode 100644 releasenotes/notes/stack-update-1530096686438046.yaml delete mode 100644 releasenotes/notes/stop-octavia-amphora-image-install-5d26e3d37c7b508f.yaml delete mode 100644 releasenotes/notes/stop-using-mistral-env-2ed6e17c4cdb9761.yaml delete mode 100644 releasenotes/notes/swift-additional-healtchecks-ab8cd9c7562654f3.yaml delete mode 100644 releasenotes/notes/switch_to_tripleomaster_registry-bd795a51f4e572c9.yaml delete mode 100644 releasenotes/notes/tripleo-bootstrap-721b73d21ade7d6d.yaml delete mode 100644 releasenotes/notes/tripleo-container-rm-082aa93d2de1e8bc.yaml delete mode 100644 releasenotes/notes/tripleo-container-tag-ec42e64289cb17e2.yaml delete mode 100644 releasenotes/notes/tripleo-create-admin-0ce59d13ce2c07f6.yaml delete mode 100644 releasenotes/notes/tripleo-deploy-openshift-plan-name-89135e3a68307047.yaml delete mode 100644 releasenotes/notes/tripleo-deploy-openshift-playbook-ac8b49a212545c0f.yaml delete mode 100644 releasenotes/notes/tripleo-docker-rm-b64297d5f9f42988.yaml delete mode 100644 releasenotes/notes/tripleo-module-load-80f7fd8c8dd6a81e.yaml delete mode 100644 releasenotes/notes/tripleo-mount-image-e038a7d9d51c4828.yaml delete mode 100644 releasenotes/notes/tripleo-ssh-known-hosts-d27c54b0a6f9a028.yaml delete mode 100644 releasenotes/notes/undercloud-backup-actions-and-workflow-1d661bba3fb2f974.yaml delete mode 100644 releasenotes/notes/update-keystone-utils-bfd14da957d34ec5.yaml delete mode 100644 releasenotes/notes/update-params-workflow-b26fd4cc40549537.yaml delete mode 100644 releasenotes/notes/update-programming-language-54ded15322426458.yaml delete mode 100644 releasenotes/notes/update-ps1-in-rc-files-c710832fc1ee37f5.yaml delete mode 100644 releasenotes/notes/update-roles-workflow-00be679eb8e9548c.yaml delete mode 100644 releasenotes/notes/use-hostnames-in-inventory-6d1a3572baebf509.yaml delete mode 100644 releasenotes/notes/use-tripleo-containers-file-0590a59f56fb3907.yaml delete mode 100644 releasenotes/notes/use_trash_output_in_create_admin_via_ssh-dc9cae99934e1fbe.yaml delete mode 100644 releasenotes/notes/v3-only-overcloudrc-8439cfed2145341f.yaml delete mode 100644 releasenotes/notes/validations-in-workflow-8ce6a053cacece0d.yaml delete mode 100644 releasenotes/notes/verify-profiles-a9d075f565bc3df6.yaml delete mode 100644 releasenotes/notes/workaround_ssh_known_host_atomic_update-481e0baf3b3d6342.yaml delete mode 100644 releasenotes/notes/workflow-config-download-export-d22f3eb958b8c97a.yaml delete mode 100644 releasenotes/source/_static/.placeholder delete mode 100644 releasenotes/source/conf.py delete mode 100644 releasenotes/source/index.rst delete mode 100644 releasenotes/source/ocata.rst delete mode 100644 releasenotes/source/pike.rst delete mode 100644 releasenotes/source/queens.rst delete mode 100644 releasenotes/source/rocky.rst delete mode 100644 releasenotes/source/stein.rst delete mode 100644 releasenotes/source/train.rst delete mode 100644 releasenotes/source/unreleased.rst delete mode 100644 releasenotes/source/ussuri.rst delete mode 100644 releasenotes/source/victoria.rst delete mode 100644 releasenotes/source/wallaby.rst delete mode 100644 releasenotes/source/zed.rst delete mode 100644 requirements.txt delete mode 100644 scripts/README-tripleo.sh delete mode 100755 scripts/bindep-install delete mode 100755 scripts/bootstrap_host_exec delete mode 100755 scripts/bootstrap_host_only_eval delete mode 100755 scripts/bootstrap_host_only_exec delete mode 100755 scripts/containerfile-converter.py delete mode 100755 scripts/pull-puppet-modules delete mode 100755 scripts/tripleo-build-images delete mode 100755 scripts/tripleo-config-download delete mode 100755 scripts/tripleo-mount-image delete mode 120000 scripts/tripleo-unmount-image delete mode 100755 scripts/upload-artifacts delete mode 100755 scripts/upload-puppet-modules delete mode 120000 scripts/upload-swift-artifacts delete mode 100644 setup.cfg delete mode 100644 setup.py delete mode 100644 test-requirements.txt delete mode 100755 tools/check_duplicate_jinja_blocks.sh delete mode 100755 tools/releasenotes_tox.sh delete mode 100644 tox.ini delete mode 100644 tripleo_common/__init__.py delete mode 100644 tripleo_common/arch.py delete mode 100644 tripleo_common/constants.py delete mode 100644 tripleo_common/exception.py delete mode 100644 tripleo_common/filters/__init__.py delete mode 100644 tripleo_common/filters/capabilities_filter.py delete mode 100644 tripleo_common/filters/list.py delete mode 100644 tripleo_common/i18n.py delete mode 100644 tripleo_common/image/__init__.py delete mode 100644 tripleo_common/image/base.py delete mode 100644 tripleo_common/image/build.py delete mode 100644 tripleo_common/image/builder/__init__.py delete mode 100644 tripleo_common/image/builder/base.py delete mode 100644 tripleo_common/image/builder/buildah.py delete mode 100644 tripleo_common/image/exception.py delete mode 100644 tripleo_common/image/image_builder.py delete mode 100644 tripleo_common/image/image_export.py delete mode 100644 tripleo_common/image/image_uploader.py delete mode 100644 tripleo_common/image/kolla_builder.py delete mode 100644 tripleo_common/inventories.py delete mode 100644 tripleo_common/inventory.py delete mode 100644 tripleo_common/releasenotes/notes/automatically-retry-failed-deployments-baf0c701e6d1ad4a.yaml delete mode 100644 tripleo_common/releasenotes/notes/convert-docker-params-84dfc6083e88bb52.yaml delete mode 100644 tripleo_common/templates/deployment.j2 delete mode 100644 tripleo_common/templates/deployments.yaml delete mode 100644 tripleo_common/templates/heat-config.j2 delete mode 100644 tripleo_common/templates/host_var_server.j2 delete mode 100644 tripleo_common/tests/__init__.py delete mode 100644 tripleo_common/tests/base.py delete mode 100644 tripleo_common/tests/fake_config/__init__.py delete mode 100644 tripleo_common/tests/fake_config/fakes.py delete mode 100644 tripleo_common/tests/fake_neutron/__init__.py delete mode 100644 tripleo_common/tests/fake_neutron/fakes.py delete mode 100644 tripleo_common/tests/fake_neutron/stubs.py delete mode 100644 tripleo_common/tests/fake_nova/README delete mode 100644 tripleo_common/tests/fake_nova/__init__.py delete mode 100644 tripleo_common/tests/fake_nova/scheduler/__init__.py delete mode 100644 tripleo_common/tests/fake_nova/scheduler/filters.py delete mode 100644 tripleo_common/tests/image/__init__.py delete mode 100644 tripleo_common/tests/image/builder/__init__.py delete mode 100644 tripleo_common/tests/image/builder/test_buildah.py delete mode 100644 tripleo_common/tests/image/fakes.py delete mode 100644 tripleo_common/tests/image/test_base.py delete mode 100644 tripleo_common/tests/image/test_build.py delete mode 100644 tripleo_common/tests/image/test_image_builder.py delete mode 100644 tripleo_common/tests/image/test_image_export.py delete mode 100644 tripleo_common/tests/image/test_image_uploader.py delete mode 100644 tripleo_common/tests/image/test_kolla_builder.py delete mode 100644 tripleo_common/tests/inventory_data/cell1_dynamic.json delete mode 100644 tripleo_common/tests/inventory_data/cell1_static.yaml delete mode 100644 tripleo_common/tests/inventory_data/list_overcloud.json delete mode 100644 tripleo_common/tests/inventory_data/merged_dynamic.json delete mode 100644 tripleo_common/tests/inventory_data/merged_static.yaml delete mode 100644 tripleo_common/tests/inventory_data/overcloud_dynamic.json delete mode 100644 tripleo_common/tests/inventory_data/overcloud_static.yaml delete mode 100644 tripleo_common/tests/inventory_data/undercloud_dynamic.json delete mode 100644 tripleo_common/tests/inventory_data/undercloud_dynamic_merged.json delete mode 100644 tripleo_common/tests/inventory_data/undercloud_static.yaml delete mode 100644 tripleo_common/tests/inventory_data/undercloud_static_merged.yaml delete mode 100644 tripleo_common/tests/test_arch.py delete mode 100644 tripleo_common/tests/test_filters.py delete mode 100644 tripleo_common/tests/test_inventories.py delete mode 100644 tripleo_common/tests/test_inventory.py delete mode 100644 tripleo_common/tests/test_update.py delete mode 100644 tripleo_common/tests/utils/__init__.py delete mode 100644 tripleo_common/tests/utils/data/Compute delete mode 100644 tripleo_common/tests/utils/data/Controller delete mode 100644 tripleo_common/tests/utils/data/config_data.yaml delete mode 100644 tripleo_common/tests/utils/data/host_vars/overcloud-controller-0 delete mode 100644 tripleo_common/tests/utils/data/host_vars/overcloud-novacompute-0 delete mode 100644 tripleo_common/tests/utils/data/host_vars/overcloud-novacompute-1 delete mode 100644 tripleo_common/tests/utils/data/host_vars/overcloud-novacompute-2 delete mode 100644 tripleo_common/tests/utils/data/overcloud-controller-0/ControllerHostEntryDeployment delete mode 100644 tripleo_common/tests/utils/data/overcloud-controller-0/MyExtraConfigPost delete mode 100644 tripleo_common/tests/utils/data/overcloud-controller-0/MyPostConfig delete mode 100644 tripleo_common/tests/utils/data/overcloud-controller-0/NetworkDeployment delete mode 100644 tripleo_common/tests/utils/data/overcloud-novacompute-0/ComputeHostEntryDeployment delete mode 100644 tripleo_common/tests/utils/data/overcloud-novacompute-0/MyExtraConfigPost delete mode 100644 tripleo_common/tests/utils/data/overcloud-novacompute-0/NetworkDeployment delete mode 100644 tripleo_common/tests/utils/data/overcloud-novacompute-1/ComputeHostEntryDeployment delete mode 100644 tripleo_common/tests/utils/data/overcloud-novacompute-1/MyExtraConfigPost delete mode 100644 tripleo_common/tests/utils/data/overcloud-novacompute-1/NetworkDeployment delete mode 100644 tripleo_common/tests/utils/data/overcloud-novacompute-2/AnsibleDeployment delete mode 100644 tripleo_common/tests/utils/data/overcloud-novacompute-2/ComputeHostEntryDeployment delete mode 100644 tripleo_common/tests/utils/data/overcloud-novacompute-2/MyExtraConfigPost delete mode 100644 tripleo_common/tests/utils/data/overcloud-novacompute-2/NetworkDeployment delete mode 100644 tripleo_common/tests/utils/test_ansible.py delete mode 100644 tripleo_common/tests/utils/test_config.py delete mode 100644 tripleo_common/tests/utils/test_nodes.py delete mode 100644 tripleo_common/tests/utils/test_overcloudrc.py delete mode 100644 tripleo_common/tests/utils/test_parameters.py delete mode 100644 tripleo_common/tests/utils/test_passwords.py delete mode 100644 tripleo_common/tests/utils/test_plan.py delete mode 100644 tripleo_common/tests/utils/test_process.py delete mode 100644 tripleo_common/tests/utils/test_roles.py delete mode 100644 tripleo_common/tests/utils/test_stack_parameters.py delete mode 100644 tripleo_common/update.py delete mode 100644 tripleo_common/utils/__init__.py delete mode 100644 tripleo_common/utils/ansible.py delete mode 100644 tripleo_common/utils/common.py delete mode 100644 tripleo_common/utils/config.py delete mode 100644 tripleo_common/utils/heat.py delete mode 100644 tripleo_common/utils/image.py delete mode 100644 tripleo_common/utils/locks/__init__.py delete mode 100644 tripleo_common/utils/locks/base.py delete mode 100644 tripleo_common/utils/locks/processlock.py delete mode 100644 tripleo_common/utils/locks/threadinglock.py delete mode 100644 tripleo_common/utils/nodes.py delete mode 100644 tripleo_common/utils/overcloudrc.py delete mode 100644 tripleo_common/utils/parameters.py delete mode 100644 tripleo_common/utils/passwords.py delete mode 100644 tripleo_common/utils/plan.py delete mode 100644 tripleo_common/utils/process.py delete mode 100644 tripleo_common/utils/roles.py delete mode 100644 tripleo_common/utils/stack.py delete mode 100644 tripleo_common/utils/stack_parameters.py delete mode 100644 zuul.d/cross-jobs.yaml delete mode 100644 zuul.d/layout.yaml diff --git a/.coveragerc b/.coveragerc deleted file mode 100644 index cc11b498d..000000000 --- a/.coveragerc +++ /dev/null @@ -1,7 +0,0 @@ -[run] -branch = True -source = tripleo_common -omit = tripleo_common/tests/* - -[report] -ignore_errors = True diff --git a/.gitignore b/.gitignore deleted file mode 100644 index dbd960eb2..000000000 --- a/.gitignore +++ /dev/null @@ -1,58 +0,0 @@ -*.py[cod] - -# C extensions -*.so - -# Packages -*.egg* -dist -build -eggs -parts -bin -var -sdist -develop-eggs -.installed.cfg -lib -lib64 - -# Installer logs -pip-log.txt - -# Unit test / coverage reports -.coverage -cover -.tox -.venv -.stestr/* - -# Translations -*.mo - -# Mr Developer -.mr.developer.cfg -.project -.pydevproject - -# Complexity -output/*.html -output/*/index.html - -# Sphinx -doc/build - -# pbr generates these -AUTHORS -ChangeLog - -# Editors -*~ -.*.swp -.*sw? - -# Files created by releasenotes build -releasenotes/build - -# Playbook retry files -*.retry diff --git a/.mailmap b/.mailmap deleted file mode 100644 index 516ae6fe0..000000000 --- a/.mailmap +++ /dev/null @@ -1,3 +0,0 @@ -# Format is: -# -# diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml deleted file mode 100644 index 0fc98e471..000000000 --- a/.pre-commit-config.yaml +++ /dev/null @@ -1,33 +0,0 @@ ---- -repos: - - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v3.4.0 - hooks: - - id: trailing-whitespace - - id: mixed-line-ending - - id: check-byte-order-marker - - id: check-executables-have-shebangs - - id: check-merge-conflict - - id: debug-statements - - id: check-yaml - files: .*\.(yaml|yml)$ - - repo: https://github.com/pycqa/flake8.git - rev: 3.9.0 - hooks: - - id: flake8 - - repo: https://github.com/openstack-dev/bashate.git - rev: 2.0.0 - hooks: - - id: bashate - entry: bashate --error . --ignore=E006,E040,E042 - # Run bashate check for all bash scripts - # Ignores the following rules: - # E006: Line longer than 79 columns (as many scripts use jinja - # templating, this is very difficult) - # E040: Syntax error determined using `bash -n` (as many scripts - # use jinja templating, this will often fail and the syntax - # error will be discovered in execution anyway) - - repo: https://github.com/PyCQA/pylint - rev: pylint-2.7.2 - hooks: - - id: pylint diff --git a/.pylintrc b/.pylintrc deleted file mode 100644 index 1601ed390..000000000 --- a/.pylintrc +++ /dev/null @@ -1,55 +0,0 @@ -[MESSAGES CONTROL] - -disable = - # TODO(ssbarnea): remove temporary skips adding during initial adoption: - arguments-differ, - attribute-defined-outside-init, - broad-except, - consider-iterating-dictionary, - consider-merging-isinstance, - consider-using-dict-comprehension, - consider-using-in, - consider-using-set-comprehension, - dangerous-default-value, - duplicate-code, - fixme, - global-statement, - import-error, - inconsistent-return-statements, - invalid-name, - missing-class-docstring, - missing-function-docstring, - missing-module-docstring, - no-self-use, - no-value-for-parameter, - protected-access, - raise-missing-from, - redefined-argument-from-local, - redefined-builtin, - redefined-outer-name, - super-init-not-called, - super-with-arguments, - superfluous-parens, - too-few-public-methods, - too-many-ancestors, - too-many-arguments, - too-many-branches, - too-many-instance-attributes, - too-many-lines, - too-many-locals, - too-many-nested-blocks, - too-many-public-methods, - too-many-statements, - unidiomatic-typecheck, - unnecessary-comprehension, - unnecessary-pass, - unsubscriptable-object, - unused-argument, - unused-variable, - useless-object-inheritance, - useless-super-delegation, - wrong-import-order, - wrong-import-position - -[REPORTS] -output-format = colorized diff --git a/.stestr.conf b/.stestr.conf deleted file mode 100644 index 095558a0b..000000000 --- a/.stestr.conf +++ /dev/null @@ -1,3 +0,0 @@ -[DEFAULT] -test_path=${TEST_PATH:-./tripleo_common/tests} -top_dir=./ diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst deleted file mode 100644 index 6607a04a6..000000000 --- a/CONTRIBUTING.rst +++ /dev/null @@ -1,16 +0,0 @@ -If you would like to contribute to the development of OpenStack, -you must follow the steps in this page: - - https://docs.openstack.org/infra/manual/developers.html - -Once those steps have been completed, changes to OpenStack -should be submitted for review via the Gerrit tool, following -the workflow documented at: - - https://docs.openstack.org/infra/manual/developers.html#development-workflow - -Pull requests submitted through GitHub will be ignored. - -Bugs should be filed on Launchpad, not GitHub: - - https://bugs.launchpad.net/tripleo diff --git a/HACKING.rst b/HACKING.rst deleted file mode 100644 index 54188d010..000000000 --- a/HACKING.rst +++ /dev/null @@ -1,4 +0,0 @@ -tripleo-common Style Commandments -================================= - -Read the OpenStack Style Commandments https://docs.openstack.org/hacking/latest/ diff --git a/LICENSE b/LICENSE deleted file mode 100644 index 68c771a09..000000000 --- a/LICENSE +++ /dev/null @@ -1,176 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - diff --git a/README.rst b/README.rst index b9aac7412..4ee2c5f13 100644 --- a/README.rst +++ b/README.rst @@ -1,20 +1,10 @@ -======================== -Team and repository tags -======================== +This project is no longer maintained. -.. image:: https://governance.openstack.org/tc/badges/tripleo-common.svg - :target: https://governance.openstack.org/tc/reference/tags/index.html +The contents of this repository are still available in the Git +source code management system. To see the contents of this +repository before it reached its end of life, please check out the +previous commit with "git checkout HEAD^1". -.. Change things from this point on - -============== -tripleo-common -============== - -A common library for TripleO workflows. - -* Free software: Apache license -* Documentation: https://docs.openstack.org/tripleo-common/latest/ -* Source: http://opendev.org/openstack/tripleo-common -* Bugs: https://bugs.launchpad.net/tripleo-common -* Release notes: https://docs.openstack.org/releasenotes/tripleo-common +For any further questions, please email +openstack-discuss@lists.openstack.org or join #openstack-dev on +OFTC. diff --git a/bindep.txt b/bindep.txt deleted file mode 100644 index 317a8b4b9..000000000 --- a/bindep.txt +++ /dev/null @@ -1,38 +0,0 @@ -# This file facilitates OpenStack-CI package installation -# before the execution of any tests. -# -# See the following for details: -# - https://docs.openstack.org/infra/bindep/ -# - https://opendev.org/opendev/bindep/ -# -# Even if the role does not make use of this facility, it -# is better to have this file empty, otherwise OpenStack-CI -# will fall back to installing its default packages which -# will potentially be detrimental to the tests executed. - -# The gcc compiler -gcc - -# Base requirements for RPM distros -gcc-c++ [platform:rpm] -git [platform:rpm] -libffi-devel [platform:rpm] -openssl-devel [platform:rpm] -python-devel [(platform:rpm platform:base-py2)] -python2-dnf [(platform:rpm platform:base-py2)] - -# For SELinux -libselinux-python [(platform:rpm platform:base-py2)] -python3-libselinux [(platform:rpm platform:base-py3)] -libsemanage-python [(platform:rpm platform:base-py2)] -python3-libsemanage-python [(platform:rpm platform:base-py3)] - -# Required for compressing collected log files in CI -gzip - -# Required to build language docs -gettext - -# debian requirements (linters) -libffi-dev [platform:dpkg] -libssl-dev [platform:dpkg] diff --git a/container-images/ceph.j2 b/container-images/ceph.j2 deleted file mode 100644 index ab12e5ef5..000000000 --- a/container-images/ceph.j2 +++ /dev/null @@ -1,55 +0,0 @@ -- imagename: "{{ceph_namespace}}/{{ceph_image}}:{{ceph_tag}}" - image_source: ceph - params: - - ContainerCephDaemonImage - services: - - OS::TripleO::Services::CephClient - - OS::TripleO::Services::CephExternal - - OS::TripleO::Services::CephMds - - OS::TripleO::Services::CephMgr - - OS::TripleO::Services::CephMon - - OS::TripleO::Services::CephOSD - - OS::TripleO::Services::CephRgw - - OS::TripleO::Services::CephRbdMirror - -- imagename: "{{ceph_prometheus_namespace}}/{{ceph_prometheus_image}}:{{ceph_prometheus_tag}}" - image_source: prom - params: - - PrometheusContainerImage - services: - - OS::TripleO::Services::CephGrafana - -- imagename: "{{ceph_alertmanager_namespace}}/{{ceph_alertmanager_image}}:{{ceph_alertmanager_tag}}" - image_source: prom - params: - - AlertManagerContainerImage - services: - - OS::TripleO::Services::CephGrafana - -- imagename: "{{ceph_node_exporter_namespace}}/{{ceph_node_exporter_image}}:{{ceph_node_exporter_tag}}" - image_source: prom - params: - - NodeExporterContainerImage - services: - - OS::TripleO::Services::CephGrafana - -- imagename: "{{ceph_grafana_namespace}}/{{ceph_grafana_image}}:{{ceph_grafana_tag}}" - image_source: grafana - params: - - GrafanaContainerImage - services: - - OS::TripleO::Services::CephGrafana - -- imagename: "{{ceph_keepalived_namespace}}/{{ceph_keepalived_image}}:{{ceph_keepalived_tag}}" - image_source: keepalived - params: - - KeepalivedContainerImage - services: - - OS::TripleO::Services::CephIngress - -- imagename: "{{ceph_haproxy_namespace}}/{{ceph_haproxy_image}}:{{ceph_haproxy_tag}}" - image_source: haproxy - params: - - HaproxyContainerImage - services: - - OS::TripleO::Services::CephIngress diff --git a/container-images/container_image_prepare_defaults.yaml b/container-images/container_image_prepare_defaults.yaml deleted file mode 100644 index 52c20d11f..000000000 --- a/container-images/container_image_prepare_defaults.yaml +++ /dev/null @@ -1,61 +0,0 @@ -parameter_defaults: - ContainerImagePrepare: - # Image label which allows the versioned tag to be looked up from the - # image. - - tag_from_label: rdo_version - # Uncomment to serve images from the undercloud registry. Images will be - # copied to the undercloud registry during preparation. - # To copy/serve from a different local registry, set the value to - #
: of the registry service. - # push_destination: true - - # Substitutions to be made when processing the template file - # /share/tripleo-common/container-images/tripleo_containers.yaml.j2 - set: - # Container image name components for OpenStack images. - namespace: quay.io/tripleomastercentos9 - name_prefix: openstack- - name_suffix: '' - tag: current-tripleo - rhel_containers: false - - # Substitute neutron images based on driver. Can be 'other' or 'ovn'. - # This is usually set automatically by detecting if ovn services are - # deployed. - neutron_driver: 'ovn' - - # Container image name components for Ceph images. - # Only used if Ceph is deployed. - - # Pass ceph_images: false to avoid pulling the Ceph related images - ceph_namespace: quay.rdoproject.org/tripleomastercentos9 - ceph_image: daemon - ceph_tag: current-ceph - - ceph_prometheus_namespace: quay.io/prometheus - ceph_prometheus_image: prometheus - ceph_prometheus_tag: v2.33.4 - - ceph_alertmanager_namespace: quay.io/prometheus - ceph_alertmanager_image: alertmanager - ceph_alertmanager_tag: v0.23.0 - - ceph_node_exporter_namespace: quay.io/prometheus - ceph_node_exporter_image: node-exporter - ceph_node_exporter_tag: v1.3.1 - - ceph_grafana_namespace: quay.io/ceph - ceph_grafana_image: ceph-grafana - ceph_grafana_tag: 6.7.4 - - ceph_haproxy_namespace: quay.io/ceph - ceph_haproxy_image: haproxy - ceph_haproxy_tag: 2.3 - - ceph_keepalived_namespace: quay.io/ceph - ceph_keepalived_image: keepalived - ceph_keepalived_tag: 2.1.5 - - pushgateway_namespace: quay.io/prometheus - pushgateway_image: pushgateway - pushgateway_tag: v1.4.2 diff --git a/container-images/kolla/barbican-base/sudoers b/container-images/kolla/barbican-base/sudoers deleted file mode 100644 index 8252327e4..000000000 --- a/container-images/kolla/barbican-base/sudoers +++ /dev/null @@ -1 +0,0 @@ -%kolla ALL=(root) NOPASSWD: /usr/bin/chown -R barbican /var/lib/barbican/, /bin/chown -R barbican /var/lib/barbican/ diff --git a/container-images/kolla/base/httpd_setup.sh b/container-images/kolla/base/httpd_setup.sh deleted file mode 100644 index f8921063d..000000000 --- a/container-images/kolla/base/httpd_setup.sh +++ /dev/null @@ -1,20 +0,0 @@ -#!/bin/bash - -# This script performs setup necessary to run the Apache httpd web server. -# It should be sourced rather than executed as environment variables are set. - -# Assume the service runs on top of Apache httpd when user is root. -if [[ "$(whoami)" == 'root' ]]; then - # NOTE(pbourke): httpd will not clean up after itself in some cases which - # results in the container not being able to restart. (bug #1489676, 1557036) - rm -rf /var/run/httpd/* /run/httpd/* /tmp/httpd* - - # CentOS 8 has an issue with mod_ssl which produces an invalid Apache - # configuration in /etc/httpd/conf.d/ssl.conf. This causes the following error - # on startup: - # SSLCertificateFile: file '/etc/pki/tls/certs/localhost.crt' does not exist or is empty - # Work around this by generating certificates manually. - if [[ ! -e /etc/pki/tls/certs/localhost.crt ]]; then - /usr/libexec/httpd-ssl-gencerts - fi -fi diff --git a/container-images/kolla/base/set_configs.py b/container-images/kolla/base/set_configs.py deleted file mode 100644 index c33ce9de5..000000000 --- a/container-images/kolla/base/set_configs.py +++ /dev/null @@ -1,434 +0,0 @@ -#!/usr/bin/env python3 - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import argparse -import glob -import grp -import json -import logging -import os -import pwd -import shutil -import sys - - -# TODO(rhallisey): add docstring. -logging.basicConfig() -LOG = logging.getLogger(__name__) -LOG.setLevel(logging.INFO) - - -class ExitingException(Exception): - def __init__(self, message, exit_code=1): - super(ExitingException, self).__init__(message) - self.exit_code = exit_code - - -class ImmutableConfig(ExitingException): - pass - - -class InvalidConfig(ExitingException): - pass - - -class MissingRequiredSource(ExitingException): - pass - - -class UserNotFound(ExitingException): - pass - - -class ConfigFileBadState(ExitingException): - pass - - -class ConfigFile(object): - - def __init__(self, source, dest, owner=None, perm=None, optional=False, - preserve_properties=False, merge=False): - self.source = source - self.dest = dest - self.owner = owner - self.perm = perm - self.optional = optional - self.merge = merge - self.preserve_properties = preserve_properties - - def __str__(self): - return ''.format(self.source, - self.dest) - - def _copy_file(self, source, dest): - self._delete_path(dest) - # dest endswith / means copy the to folder - LOG.info('Copying %s to %s', source, dest) - if self.merge and self.preserve_properties and os.path.islink(source): - link_target = os.readlink(source) - os.symlink(link_target, dest) - else: - shutil.copy(source, dest) - self._set_properties(source, dest) - - def _merge_directories(self, source, dest): - if os.path.isdir(source): - if os.path.lexists(dest) and not os.path.isdir(dest): - self._delete_path(dest) - if not os.path.isdir(dest): - LOG.info('Creating directory %s', dest) - os.makedirs(dest) - self._set_properties(source, dest) - - dir_content = os.listdir(source) - for to_copy in dir_content: - self._merge_directories(os.path.join(source, to_copy), - os.path.join(dest, to_copy)) - else: - self._copy_file(source, dest) - - def _delete_path(self, path): - if not os.path.lexists(path): - return - LOG.info('Deleting %s', path) - if os.path.isdir(path): - shutil.rmtree(path) - else: - os.remove(path) - - def _create_parent_dirs(self, path): - parent_path = os.path.dirname(path) - if not os.path.exists(parent_path): - os.makedirs(parent_path) - - def _set_properties(self, source, dest): - if self.preserve_properties: - self._set_properties_from_file(source, dest) - else: - self._set_properties_from_conf(dest) - - def _set_properties_from_file(self, source, dest): - shutil.copystat(source, dest) - stat = os.stat(source) - os.chown(dest, stat.st_uid, stat.st_gid) - - def _set_properties_from_conf(self, path): - config = {'permissions': - [{'owner': self.owner, 'path': path, 'perm': self.perm}]} - handle_permissions(config) - - def copy(self): - - sources = glob.glob(self.source) - - if not self.optional and not sources: - raise MissingRequiredSource('%s file is not found' % self.source) - # skip when there is no sources and optional - if self.optional and not sources: - return - - for source in sources: - dest = self.dest - # dest endswith / means copy the into folder, - # otherwise means copy the source to dest - if dest.endswith(os.sep): - dest = os.path.join(dest, os.path.basename(source)) - if not self.merge: - self._delete_path(dest) - self._create_parent_dirs(dest) - try: - self._merge_directories(source, dest) - except OSError: - # If a source is tried to merge with a read-only mount, it - # may throw an OSError. Because we don't print the source or - # dest anywhere, let's catch the exception and log a better - # message to help with tracking down the issue. - LOG.error('Unable to merge %s with %s', source, dest) - raise - - def _cmp_file(self, source, dest): - # check exsit - if (os.path.exists(source) and - not self.optional and - not os.path.exists(dest)): - return False - # check content - with open(source) as f1, open(dest) as f2: - if f1.read() != f2.read(): - LOG.error('The content of source file(%s) and' - ' dest file(%s) are not equal.', source, dest) - return False - # check perm - file_stat = os.stat(dest) - actual_perm = oct(file_stat.st_mode)[-4:] - if self.perm != actual_perm: - LOG.error('Dest file does not have expected perm: %s, actual: %s', - self.perm, actual_perm) - return False - # check owner - desired_user, desired_group = user_group(self.owner) - actual_user = pwd.getpwuid(file_stat.st_uid) - if actual_user.pw_name != desired_user: - LOG.error('Dest file does not have expected user: %s,' - ' actual: %s ', desired_user, actual_user.pw_name) - return False - actual_group = grp.getgrgid(file_stat.st_gid) - if actual_group.gr_name != desired_group: - LOG.error('Dest file does not have expected group: %s,' - ' actual: %s ', desired_group, actual_group.gr_name) - return False - return True - - def _cmp_dir(self, source, dest): - for root, dirs, files in os.walk(source): - for dir_ in dirs: - full_path = os.path.join(root, dir_) - dest_full_path = os.path.join(dest, os.path.relpath(source, - full_path)) - dir_stat = os.stat(dest_full_path) - actual_perm = oct(dir_stat.st_mode)[-4:] - if self.perm != actual_perm: - LOG.error('Dest dir does not have expected perm: %s,' - ' actual %s', self.perm, actual_perm) - return False - for file_ in files: - full_path = os.path.join(root, file_) - dest_full_path = os.path.join(dest, os.path.relpath(source, - full_path)) - if not self._cmp_file(full_path, dest_full_path): - return False - return True - - def check(self): - bad_state_files = [] - sources = glob.glob(self.source) - - if not sources and not self.optional: - raise MissingRequiredSource('%s file is not found' % self.source) - if self.optional and not sources: - return - - for source in sources: - dest = self.dest - # dest endswith / means copy the into folder, - # otherwise means copy the source to dest - if dest.endswith(os.sep): - dest = os.path.join(dest, os.path.basename(source)) - if os.path.isdir(source) and not self._cmp_dir(source, dest): - bad_state_files.append(source) - elif not self._cmp_file(source, dest): - bad_state_files.append(source) - if len(bad_state_files) != 0: - msg = 'Following files are in bad state: %s' % bad_state_files - raise ConfigFileBadState(msg) - - -def validate_config(config): - required_keys = {'source', 'dest'} - - if 'command' not in config: - raise InvalidConfig('Config is missing required "command" key') - - # Validate config sections - for data in config.get('config_files', list()): - # Verify required keys exist. - if not set(data.keys()) >= required_keys: - message = 'Config is missing required keys: %s' % required_keys - raise InvalidConfig(message) - if ('owner' not in data or 'perm' not in data) \ - and not data.get('preserve_properties', False): - raise InvalidConfig( - 'Config needs preserve_properties or owner and perm') - - -def validate_source(data): - source = data.get('source') - - # Only check existence if no wildcard found - if '*' not in source: - if not os.path.exists(source): - if data.get('optional'): - LOG.info("%s does not exist, but is not required", source) - return False - raise MissingRequiredSource( - "The source to copy does not exist: %s" % source) - - return True - - -def load_config(): - def load_from_env(): - config_raw = os.environ.get("KOLLA_CONFIG") - if config_raw is None: - return None - - # Attempt to read config - try: - return json.loads(config_raw) - except ValueError: - raise InvalidConfig('Invalid json for Kolla config') - - def load_from_file(): - config_file = os.environ.get("KOLLA_CONFIG_FILE") - if not config_file: - config_file = '/var/lib/kolla/config_files/config.json' - LOG.info("Loading config file at %s", config_file) - - # Attempt to read config file - with open(config_file) as f: - try: - return json.load(f) - except ValueError: - raise InvalidConfig( - "Invalid json file found at %s" % config_file) - except IOError as e: - raise InvalidConfig( - "Could not read file %s: %r" % (config_file, e)) - - config = load_from_env() - if config is None: - config = load_from_file() - - LOG.info('Validating config file') - validate_config(config) - return config - - -def copy_config(config): - if 'config_files' in config: - LOG.info('Copying service configuration files') - for data in config['config_files']: - config_file = ConfigFile(**data) - config_file.copy() - else: - LOG.debug('No files to copy found in config') - - LOG.info('Writing out command to execute') - LOG.debug("Command is: %s", config['command']) - # The value from the 'command' key will be written to '/run_command' - cmd = '/run_command' - with open(cmd, 'w+') as f: - f.write(config['command']) - # Make sure the generated file is readable by all users - try: - os.chmod(cmd, 0o644) - except OSError: - LOG.exception('Failed to set permission of %s to 0o644', cmd) - - -def user_group(owner): - if ':' in owner: - user, group = owner.split(':', 1) - if not group: - group = user - else: - user, group = owner, owner - return user, group - - -def handle_permissions(config): - for permission in config.get('permissions', list()): - path = permission.get('path') - owner = permission.get('owner') - recurse = permission.get('recurse', False) - perm = permission.get('perm') - - desired_user, desired_group = user_group(owner) - uid = pwd.getpwnam(desired_user).pw_uid - gid = grp.getgrnam(desired_group).gr_gid - - def set_perms(path, uid, gid, perm): - LOG.info('Setting permission for %s', path) - if not os.path.exists(path): - LOG.warning('%s does not exist', path) - return - - try: - os.chown(path, uid, gid) - except OSError: - LOG.exception('Failed to change ownership of %s to %s:%s', - path, uid, gid) - - if perm: - # NOTE(Jeffrey4l): py3 need '0oXXX' format for octal literals, - # and py2 support such format too. - if len(perm) == 4 and perm[1] != 'o': - perm = ''.join([perm[:1], 'o', perm[1:]]) - perm = int(perm, base=0) - - try: - os.chmod(path, perm) - except OSError: - LOG.exception('Failed to set permission of %s to %s', - path, perm) - - for dest in glob.glob(path): - set_perms(dest, uid, gid, perm) - if recurse and os.path.isdir(dest): - for root, dirs, files in os.walk(dest): - for dir_ in dirs: - set_perms(os.path.join(root, dir_), uid, gid, perm) - for file_ in files: - set_perms(os.path.join(root, file_), uid, gid, perm) - - -def execute_config_strategy(config): - config_strategy = os.environ.get("KOLLA_CONFIG_STRATEGY") - LOG.info("Kolla config strategy set to: %s", config_strategy) - if config_strategy == "COPY_ALWAYS": - copy_config(config) - handle_permissions(config) - elif config_strategy == "COPY_ONCE": - if os.path.exists('/configured'): - raise ImmutableConfig( - "The config strategy prevents copying new configs", - exit_code=0) - copy_config(config) - handle_permissions(config) - os.mknod('/configured') - else: - raise InvalidConfig('KOLLA_CONFIG_STRATEGY is not set properly') - - -def execute_config_check(config): - for data in config['config_files']: - config_file = ConfigFile(**data) - config_file.check() - - -def main(): - try: - parser = argparse.ArgumentParser() - parser.add_argument('--check', - action='store_true', - required=False, - help='Check whether the configs changed') - args = parser.parse_args() - config = load_config() - - if args.check: - execute_config_check(config) - else: - execute_config_strategy(config) - except ExitingException as e: - LOG.error("%s: %s", e.__class__.__name__, e) - return e.exit_code - except Exception: - LOG.exception('Unexpected error:') - return 2 - return 0 - - -if __name__ == "__main__": - sys.exit(main()) diff --git a/container-images/kolla/base/start.sh b/container-images/kolla/base/start.sh deleted file mode 100644 index 975fec289..000000000 --- a/container-images/kolla/base/start.sh +++ /dev/null @@ -1,19 +0,0 @@ -#!/bin/bash -set -o errexit -set -o xtrace - -# Processing /var/lib/kolla/config_files/config.json as root. This is necessary -# to permit certain files to be controlled by the root user which should -# not be writable by the dropped-privileged user, especially /run_command -sudo -E kolla_set_configs -CMD=$(cat /run_command) -ARGS="" - -if [[ ! "${!KOLLA_SKIP_EXTEND_START[@]}" ]]; then - # Run additional commands if present - . kolla_extend_start -fi - -echo "Running command: '${CMD}${ARGS:+ $ARGS}'" -umask "${TRIPLEO_KOLLA_UMASK:-0022}" -exec ${CMD} ${ARGS} diff --git a/container-images/kolla/base/sudoers b/container-images/kolla/base/sudoers deleted file mode 100644 index 61e2d7c0f..000000000 --- a/container-images/kolla/base/sudoers +++ /dev/null @@ -1,18 +0,0 @@ -# The idea here is a container service adds their UID to the kolla group -# via usermod -a -G kolla . Then the kolla_start may run -# kolla_set_configs via sudo as the root user which is necessary to protect -# the immutability of the container - -# anyone in the kolla group may sudo -E (set the environment) -Defaults: %kolla setenv - -# root may run any commands via sudo as the network seervice user. This is -# neededfor database migrations of existing services which have not been -# converted to run as a non-root user, but instead do that via sudo -E glance -root ALL=(ALL) ALL - -# anyone in the kolla group may run /usr/local/bin/kolla_set_configs as the -# root user via sudo without password confirmation -%kolla ALL=(root) NOPASSWD: /usr/local/bin/kolla* - -#includedir /etc/sudoers.d diff --git a/container-images/kolla/base/uid_gid_manage.sh b/container-images/kolla/base/uid_gid_manage.sh deleted file mode 100755 index 81c7d47ad..000000000 --- a/container-images/kolla/base/uid_gid_manage.sh +++ /dev/null @@ -1,126 +0,0 @@ -#!/bin/bash -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# This script maintains compatibility when upgrading kolla images to the -# TCIB images. To allow containers reading configuration files, we need to -# maintain the same UIDs/GIDs for now until we update file permissions during -# update/upgrade tasks. -# -# Usage: -# ./uid_gid_manage.sh qemu nova -# -# Note: order of args is maintained during the creation. -# - -set -o errexit -set -o xtrace - -[ -z $1 ] && echo "Argument missing: name of user to create" && exit 1 -_USERS_TO_CREATE=$@ - -declare -A _SUPPORTED_USERS -# This comes from kolla/common/config.py. -# Format: -# Note: if homedir isn't specified, extra groups aren't supported -_SUPPORTED_USERS['aodh']='aodh 42402 42402 /var/lib/aodh kolla' -_SUPPORTED_USERS['barbican']='barbican 42403 42403 /var/lib/barbican kolla,nfast' -_SUPPORTED_USERS['ceilometer']='ceilometer 42405 42405 /var/lib/ceilometer kolla' -_SUPPORTED_USERS['cinder']='cinder 42407 42407 /var/lib/cinder kolla' -_SUPPORTED_USERS['cloud-admin']='cloud-admin 42401 42401 /home/cloud-admin' -_SUPPORTED_USERS['collectd']='collectd 42409 42409 /var/lib/collectd kolla' -_SUPPORTED_USERS['designate']='designate 42411 42411 /var/lib/designate kolla' -_SUPPORTED_USERS['etcd']='etcd 42413 42413 /var/lib/etcd kolla' -_SUPPORTED_USERS['frrvty']='frrvty 42483 42483' -_SUPPORTED_USERS['frr']='frr 42484 42484 /var/run/frr kolla,frrvty' -_SUPPORTED_USERS['glance']='glance 42415 42415 /var/lib/glance kolla' -_SUPPORTED_USERS['gnocchi']='gnocchi 42416 42416 /var/lib/gnocchi kolla' -_SUPPORTED_USERS['haproxy']='haproxy 42454 42454 /var/lib/haproxy kolla' -_SUPPORTED_USERS['heat']='heat 42418 42418 /var/lib/heat kolla' -_SUPPORTED_USERS['horizon']='horizon 42420 42420 /var/lib/horizon kolla' -_SUPPORTED_USERS['hugetlbfs']='hugetlbfs 42477 42477' -_SUPPORTED_USERS['ironic']='ironic 42422 42422 /var/lib/ironic kolla' -_SUPPORTED_USERS['ironic-inspector']='ironic-inspector 42461 42461 /var/lib/ironic-inspector kolla' -_SUPPORTED_USERS['keystone']='keystone 42425 42425 /var/lib/keystone kolla' -_SUPPORTED_USERS['kolla']='kolla 42400 42400' -_SUPPORTED_USERS['libvirt']='libvirt 42473 42473' -_SUPPORTED_USERS['manila']='manila 42429 42429 /var/lib/manila kolla' -_SUPPORTED_USERS['memcached']='memcached 42457 42457 /run/memcache kolla' -_SUPPORTED_USERS['mysql']='mysql 42434 42434 /var/lib/mysql kolla' -_SUPPORTED_USERS['neutron']='neutron 42435 42435 /var/lib/neutron kolla' -_SUPPORTED_USERS['nfast']='nfast 42481 42481' -_SUPPORTED_USERS['nova']='nova 42436 42436 /var/lib/nova qemu,libvirt,tss,kolla' -_SUPPORTED_USERS['octavia']='octavia 42437 42437 /var/lib/octavia kolla' -_SUPPORTED_USERS['openvswitch']='openvswitch 42476 42476' -_SUPPORTED_USERS['ovn-bgp']='ovn-bgp 42486 42486 /var/lib/ovn-bgp kolla' -_SUPPORTED_USERS['placement']='placement 42482 42482 /var/lib/placement kolla' -_SUPPORTED_USERS['qdrouterd']='qdrouterd 42465 42465 /var/lib/qdrouterd kolla' -_SUPPORTED_USERS['qemu']='qemu 107 107' -_SUPPORTED_USERS['rabbitmq']='rabbitmq 42439 42439 /var/lib/rabbitmq kolla' -_SUPPORTED_USERS['redis']='redis 42460 42460 /run/redis kolla' -_SUPPORTED_USERS['swift']='swift 42445 42445 /var/lib/swift kolla' -_SUPPORTED_USERS['tempest']='tempest 42480 42480 /var/lib/tempest kolla' -_SUPPORTED_USERS['tss']='tss 59 59' - -for _USER_TO_CREATE in $_USERS_TO_CREATE; do - # Initialize computed args - _EXTRA_GROUPS_ARG= - _EXTRA_PERMS= - _HOME_ARGS= - - _NAME=$(echo ${_SUPPORTED_USERS[$_USER_TO_CREATE]} | awk '{ print $1 }') - _UID=$(echo ${_SUPPORTED_USERS[$_USER_TO_CREATE]} | awk '{ print $2 }') - _GID=$(echo ${_SUPPORTED_USERS[$_USER_TO_CREATE]} | awk '{ print $3 }') - _HOME_DIR=$(echo ${_SUPPORTED_USERS[$_USER_TO_CREATE]} | awk '{ print $4 }') - _EXTRA_GROUPS=$(echo ${_SUPPORTED_USERS[$_USER_TO_CREATE]} | awk '{ print $5 }') - - # User was not found, we fail - if [[ "$_NAME" != "$_USER_TO_CREATE" ]]; then - echo "User ${_USER_TO_CREATE} was not found in the supported list" - exit 1 - fi - - if [[ ! -z $_EXTRA_GROUPS ]]; then - _EXTRA_GROUPS_ARG="--groups $_EXTRA_GROUPS" - fi - - # Some users don't need a home directory - if [[ -z $_HOME_DIR ]]; then - _HOME_ARGS="-M" - else - _HOME_ARGS="-m --home $_HOME_DIR" - fi - - if id -g $_NAME 2>/dev/null; then - _GROUPADD_CMD="groupmod --gid $_GID $_NAME" - else - _GROUPADD_CMD="groupadd --gid $_GID $_NAME" - fi - - if id $_NAME 2>/dev/null; then - # -M argument doesn't exist with usermod - if [[ -z $_HOME_DIR ]]; then - _HOME_ARGS= - # usermod doesn't guaranty the home directory permissions (best effort) - else - _EXTRA_PERMS="&& mkdir -p $_HOME_DIR && chown -R $_UID:$_GID $_HOME_DIR" - fi - # --append only exists with usermod - [[ ! -z $_EXTRA_GROUPS_ARG ]] && _EXTRA_GROUPS_ARG="--append $_EXTRA_GROUPS_ARG" - _USERADD_CMD="usermod ${_HOME_ARGS} --gid $_GID --uid $_UID ${_EXTRA_GROUPS_ARG} $_NAME ${_EXTRA_PERMS}" - else - _USERADD_CMD="useradd -l ${_HOME_ARGS} --shell /usr/sbin/nologin --uid $_UID --gid $_GID ${_EXTRA_GROUPS_ARG} $_NAME" - fi - eval $_GROUPADD_CMD - eval $_USERADD_CMD -done diff --git a/container-images/kolla/cinder-backup/cinder-backup-sudoers b/container-images/kolla/cinder-backup/cinder-backup-sudoers deleted file mode 100644 index 0c76a2f10..000000000 --- a/container-images/kolla/cinder-backup/cinder-backup-sudoers +++ /dev/null @@ -1 +0,0 @@ -%kolla ALL=(root) NOPASSWD: /bin/chown -R cinder\:kolla /var/lib/cinder diff --git a/container-images/kolla/cinder-backup/extend_start.sh b/container-images/kolla/cinder-backup/extend_start.sh deleted file mode 100644 index 73a941ed4..000000000 --- a/container-images/kolla/cinder-backup/extend_start.sh +++ /dev/null @@ -1,5 +0,0 @@ -#!/bin/bash - -if [[ $(stat -c %U:%G /var/lib/cinder) != "cinder:kolla" ]]; then - sudo chown -R cinder:kolla /var/lib/cinder -fi diff --git a/container-images/kolla/cinder-volume/cinder-volume-sudoers b/container-images/kolla/cinder-volume/cinder-volume-sudoers deleted file mode 100644 index 0c76a2f10..000000000 --- a/container-images/kolla/cinder-volume/cinder-volume-sudoers +++ /dev/null @@ -1 +0,0 @@ -%kolla ALL=(root) NOPASSWD: /bin/chown -R cinder\:kolla /var/lib/cinder diff --git a/container-images/kolla/cinder-volume/extend_start.sh b/container-images/kolla/cinder-volume/extend_start.sh deleted file mode 100644 index 73a941ed4..000000000 --- a/container-images/kolla/cinder-volume/extend_start.sh +++ /dev/null @@ -1,5 +0,0 @@ -#!/bin/bash - -if [[ $(stat -c %U:%G /var/lib/cinder) != "cinder:kolla" ]]; then - sudo chown -R cinder:kolla /var/lib/cinder -fi diff --git a/container-images/kolla/glance-api/extend_start.sh b/container-images/kolla/glance-api/extend_start.sh deleted file mode 100644 index 3d8d7f512..000000000 --- a/container-images/kolla/glance-api/extend_start.sh +++ /dev/null @@ -1,9 +0,0 @@ -#!/bin/bash - -# Bootstrap and exit if KOLLA_BOOTSTRAP variable is set. This catches all cases -# of the KOLLA_BOOTSTRAP variable being set, including empty. -if [[ "${!KOLLA_BOOTSTRAP[@]}" ]]; then - glance-manage db_sync - glance-manage db_load_metadefs - exit 0 -fi diff --git a/container-images/kolla/horizon/extend_start.sh b/container-images/kolla/horizon/extend_start.sh deleted file mode 100644 index e4feae2d2..000000000 --- a/container-images/kolla/horizon/extend_start.sh +++ /dev/null @@ -1,119 +0,0 @@ -#!/bin/bash - -set -o errexit - -FORCE_GENERATE="${FORCE_GENERATE}" -HASH_PATH=/var/lib/kolla/.settings.md5sum.txt -MANAGE_PY="/usr/bin/python3 /usr/bin/manage.py" -PYTHON_VERSION=$(python3 --version | awk '{print $2}' | awk -F'.' '{print $1"."$2}') -SITE_PACKAGES="/usr/lib/python${PYTHON_VERSION}/site-packages" - -if [[ -f /etc/openstack-dashboard/custom_local_settings ]]; then - CUSTOM_SETTINGS_FILE="${SITE_PACKAGES}/openstack_dashboard/local/custom_local_settings.py" - if [[ ! -L ${CUSTOM_SETTINGS_FILE} ]]; then - ln -s /etc/openstack-dashboard/custom_local_settings ${CUSTOM_SETTINGS_FILE} - fi -fi - -# Bootstrap and exit if KOLLA_BOOTSTRAP variable is set. This catches all cases -# of the KOLLA_BOOTSTRAP variable being set, including empty. -if [[ "${!KOLLA_BOOTSTRAP[@]}" ]]; then - $MANAGE_PY migrate --noinput - exit 0 -fi - -function config_dashboard { - ENABLE=$1 - SRC=$2 - DEST=$3 - if [[ ! -f ${SRC} ]]; then - echo "WARNING: ${SRC} is required" - elif [[ "${ENABLE}" == "yes" ]] && [[ ! -f "${DEST}" ]]; then - cp -a "${SRC}" "${DEST}" - FORCE_GENERATE="yes" - elif [[ "${ENABLE}" != "yes" ]] && [[ -f "${DEST}" ]]; then - # remove pyc pyo files too - rm -f "${DEST}" "${DEST}c" "${DEST}o" - FORCE_GENERATE="yes" - fi -} - -function config_designate_dashboard { - for file in ${SITE_PACKAGES}/designatedashboard/enabled/_*[^__].py; do - config_dashboard "${ENABLE_DESIGNATE}" \ - "${SITE_PACKAGES}/designatedashboard/enabled/${file##*/}" \ - "${SITE_PACKAGES}/openstack_dashboard/local/enabled/${file##*/}" - done -} - -function config_heat_dashboard { - for file in ${SITE_PACKAGES}/heat_dashboard/enabled/_*[^__].py; do - config_dashboard "${ENABLE_HEAT}" \ - "${SITE_PACKAGES}/heat_dashboard/enabled/${file##*/}" \ - "${SITE_PACKAGES}/openstack_dashboard/local/enabled/${file##*/}" - done - - config_dashboard "${ENABLE_HEAT}" \ - "${SITE_PACKAGES}/heat_dashboard/conf/heat_policy.json" \ - "/etc/openstack-dashboard/heat_policy.json" -} - -function config_ironic_dashboard { - for file in ${SITE_PACKAGES}/ironic_ui/enabled/_*[^__].py; do - config_dashboard "${ENABLE_IRONIC}" \ - "${SITE_PACKAGES}/ironic_ui/enabled/${file##*/}" \ - "${SITE_PACKAGES}/openstack_dashboard/local/enabled/${file##*/}" - done -} - -function config_manila_ui { - for file in ${SITE_PACKAGES}/manila_ui/local/enabled/_*[^__].py; do - config_dashboard "${ENABLE_MANILA}" \ - "${SITE_PACKAGES}/manila_ui/local/enabled/${file##*/}" \ - "${SITE_PACKAGES}/openstack_dashboard/local/enabled/${file##*/}" - done -} - -function config_octavia_dashboard { - config_dashboard "${ENABLE_OCTAVIA}" \ - "${SITE_PACKAGES}/octavia_dashboard/enabled/_1482_project_load_balancer_panel.py" \ - "${SITE_PACKAGES}/openstack_dashboard/local/enabled/_1482_project_load_balancer_panel.py" -} - -# Regenerate the compressed javascript and css if any configuration files have -# changed. Use a static modification date when generating the tarball -# so that we only trigger on content changes. -function settings_bundle { - tar -cf- --mtime=1970-01-01 \ - /etc/openstack-dashboard/local_settings \ - /etc/openstack-dashboard/custom_local_settings \ - /etc/openstack-dashboard/local_settings.d 2> /dev/null -} - -function settings_changed { - changed=1 - - if [[ ! -f $HASH_PATH ]] || ! settings_bundle | md5sum -c --status $HASH_PATH || [[ $FORCE_GENERATE == yes ]]; then - changed=0 - fi - - return ${changed} -} - -config_designate_dashboard -config_heat_dashboard -config_ironic_dashboard -config_manila_ui -config_octavia_dashboard - -if settings_changed; then - ${MANAGE_PY} collectstatic --noinput --clear - ${MANAGE_PY} compress --force - settings_bundle | md5sum > $HASH_PATH -fi - -if [[ -f ${SITE_PACKAGES}/openstack_dashboard/local/.secret_key_store ]] && [[ $(stat -c %U ${SITE_PACKAGES}/openstack_dashboard/local/.secret_key_store) != "horizon" ]]; then - chown horizon ${SITE_PACKAGES}/openstack_dashboard/local/.secret_key_store -fi - -. /usr/local/bin/kolla_httpd_setup diff --git a/container-images/kolla/iscsid/extend_start.sh b/container-images/kolla/iscsid/extend_start.sh deleted file mode 100644 index 78f2317d2..000000000 --- a/container-images/kolla/iscsid/extend_start.sh +++ /dev/null @@ -1,7 +0,0 @@ -#!/bin/bash - -# check if unique iSCSI initiator name already exists -if [[ ! -f /etc/iscsi/initiatorname.iscsi ]]; then - echo "Generating new iSCSI initiator name" - echo InitiatorName=$(/sbin/iscsi-iname) > /etc/iscsi/initiatorname.iscsi -fi diff --git a/container-images/kolla/keystone/extend_start.sh b/container-images/kolla/keystone/extend_start.sh deleted file mode 100644 index 98571ebc6..000000000 --- a/container-images/kolla/keystone/extend_start.sh +++ /dev/null @@ -1,31 +0,0 @@ -#!/bin/bash - -# Create log dir for Keystone logs -KEYSTONE_LOG_DIR="/var/log/keystone" -if [[ ! -d "${KEYSTONE_LOG_DIR}" ]]; then - mkdir -p ${KEYSTONE_LOG_DIR} -fi -if [[ $(stat -c %U:%G ${KEYSTONE_LOG_DIR}) != "keystone:kolla" ]]; then - chown keystone:kolla ${KEYSTONE_LOG_DIR} -fi -if [ ! -f "${KEYSTONE_LOG_DIR}/keystone.log" ]; then - touch ${KEYSTONE_LOG_DIR}/keystone.log -fi -if [[ $(stat -c %U:%G ${KEYSTONE_LOG_DIR}/keystone.log) != "keystone:keystone" ]]; then - chown keystone:keystone ${KEYSTONE_LOG_DIR}/keystone.log -fi -if [[ $(stat -c %a ${KEYSTONE_LOG_DIR}) != "755" ]]; then - chmod 755 ${KEYSTONE_LOG_DIR} -fi - -EXTRA_KEYSTONE_MANAGE_ARGS=${EXTRA_KEYSTONE_MANAGE_ARGS-} -# Bootstrap and exit if KOLLA_BOOTSTRAP variable is set. This catches all cases -# of the KOLLA_BOOTSTRAP variable being set, including empty. -if [[ "${!KOLLA_BOOTSTRAP[@]}" ]]; then - sudo -H -u keystone keystone-manage ${EXTRA_KEYSTONE_MANAGE_ARGS} db_sync - exit 0 -fi - -. /usr/local/bin/kolla_httpd_setup - -ARGS="-DFOREGROUND" diff --git a/container-images/kolla/mariadb/extend_start.sh b/container-images/kolla/mariadb/extend_start.sh deleted file mode 100644 index 24356ee91..000000000 --- a/container-images/kolla/mariadb/extend_start.sh +++ /dev/null @@ -1,35 +0,0 @@ -#!/bin/bash - -function bootstrap_db { - mysqld_safe --wsrep-new-cluster --skip-networking --wsrep-on=OFF --pid-file=/var/lib/mysql/mariadb.pid & - # Wait for the mariadb server to be "Ready" before starting the security reset with a max timeout - # NOTE(huikang): the location of mysql's socket file varies depending on the OS distributions. - # Querying the cluster status has to be executed after the existence of mysql.sock and mariadb.pid. - TIMEOUT=${DB_MAX_TIMEOUT:-60} - while [[ ! -S /var/lib/mysql/mysql.sock ]] && \ - [[ ! -S /var/run/mysqld/mysqld.sock ]] || \ - [[ ! -f /var/lib/mysql/mariadb.pid ]]; do - if [[ ${TIMEOUT} -gt 0 ]]; then - let TIMEOUT-=1 - sleep 1 - else - exit 1 - fi - done - - sudo -E kolla_security_reset - mysql -u root --password="${DB_ROOT_PASSWORD}" -e "GRANT ALL PRIVILEGES ON *.* TO 'root'@'localhost' IDENTIFIED BY '${DB_ROOT_PASSWORD}' WITH GRANT OPTION;" - mysql -u root --password="${DB_ROOT_PASSWORD}" -e "GRANT ALL PRIVILEGES ON *.* TO 'root'@'%' IDENTIFIED BY '${DB_ROOT_PASSWORD}' WITH GRANT OPTION;" - mysqladmin -uroot -p"${DB_ROOT_PASSWORD}" shutdown -} - -# This catches all cases of the BOOTSTRAP variable being set, including empty -if [[ "${!KOLLA_BOOTSTRAP[@]}" ]]; then - mysql_install_db - bootstrap_db - exit 0 -fi - -if [[ "${!BOOTSTRAP_ARGS[@]}" ]]; then - ARGS="${BOOTSTRAP_ARGS}" -fi diff --git a/container-images/kolla/mariadb/security_reset.expect b/container-images/kolla/mariadb/security_reset.expect deleted file mode 100644 index 6d2755e4d..000000000 --- a/container-images/kolla/mariadb/security_reset.expect +++ /dev/null @@ -1,58 +0,0 @@ -#!/usr/bin/expect -f - -if [catch {set timeout $env(DB_MAX_TIMEOUT)}] {set timeout 10} -spawn mysql_secure_installation -expect { - timeout { send_user "\nFailed to get 'Enter current password for root (enter for none):' prompt\n"; exit 1 } - eof { send_user "\nFailed to get 'Enter current password for root (enter for none):' prompt\n"; exit 1 } - "Enter current password for root (enter for none):" -} -send "\r" -expect { - timeout { send_user "\nFailed to get 'Set root password?' prompt\n"; exit 1 } - eof { send_user "\nFailed to get 'Set root password?' prompt\n"; exit 1 } - "Set root password?" -} -send "y\r" -expect { - timeout { send_user "\nFailed to get 'New password:' prompt\n"; exit 1 } - eof { send_user "\nFailed to get 'New password:' prompt\n"; exit 1 } - "New password:" -} -send "$env(DB_ROOT_PASSWORD)\r" - -expect { - timeout { send_user "\nFailed to get 'Re-enter new password:' prompt\n"; exit 1 } - eof { send_user "\nFailed to get 'Re-enter new password:' prompt\n"; exit 1 } - "Re-enter new password:" -} -send "$env(DB_ROOT_PASSWORD)\r" - -expect { - timeout { send_user "\nFailed to get 'Remove anonymous users?' prompt\n"; exit 1 } - eof { send_user "\nFailed to get 'Remove anonymous users?' prompt\n"; exit 1 } - "Remove anonymous users?" -} -send "y\r" - -expect { - timeout { send_user "\nFailed to get 'Disallow root login remotely?' prompt\n"; exit 1 } - eof { send_user "\nFailed to get 'Disallow root login remotely?' prompt\n"; exit 1 } - "Disallow root login remotely?" -} -send "n\r" - -expect { - timeout { send_user "\nFailed to get 'Remove test database and access to it?' prompt\n"; exit 1 } - eof { send_user "\nFailed to get 'Remove test database and access to it?' prompt\n"; exit 1 } - "Remove test database and access to it?" -} -send "y\r" - -expect { - timeout { send_user "\nFailed to get 'Reload privilege tables now?' prompt\n"; exit 1 } - eof { send_user "\nFailed to get 'Reload privilege tables now?' prompt\n"; exit 1 } - "Reload privilege tables now?" -} -send "y\r" -expect eof diff --git a/container-images/kolla/mariadb/security_reset.expect.10.5 b/container-images/kolla/mariadb/security_reset.expect.10.5 deleted file mode 100644 index ec40eaf90..000000000 --- a/container-images/kolla/mariadb/security_reset.expect.10.5 +++ /dev/null @@ -1,67 +0,0 @@ -#!/usr/bin/expect -f - -if [catch {set timeout $env(DB_MAX_TIMEOUT)}] {set timeout 10} -spawn mysql_secure_installation -expect { - timeout { send_user "\nFailed to get 'Enter current password for root (enter for none):' prompt\n"; exit 1 } - eof { send_user "\nFailed to get 'Enter current password for root (enter for none):' prompt\n"; exit 1 } - "Enter current password for root (enter for none):" -} -send "\r" - -expect { - timeout { send_user "\nFailed to get 'Switch to unix_socket authentication [Y/n] ' prompt\n"; exit 1 } - eof { send_user "\nFailed to get 'Switch to unix_socket authentication' prompt\n"; exit 1 } - "Switch to unix_socket authentication \\\[Y/n\\\] " -} -send "n\r" - -expect { - timeout { send_user "\nFailed to get 'Change the root password? [Y/n]' prompt\n"; exit 1 } - eof { send_user "\nFailed to get 'Change the root password?' prompt\n"; exit 1 } - "Change the root password? \\\[Y/n\\\] " -} -send "y\r" - -expect { - timeout { send_user "\nFailed to get 'New password:' prompt\n"; exit 1 } - eof { send_user "\nFailed to get 'New password:' prompt\n"; exit 1 } - "New password:" -} -send "$env(DB_ROOT_PASSWORD)\r" - -expect { - timeout { send_user "\nFailed to get 'Re-enter new password:' prompt\n"; exit 1 } - eof { send_user "\nFailed to get 'Re-enter new password:' prompt\n"; exit 1 } - "Re-enter new password:" -} -send "$env(DB_ROOT_PASSWORD)\r" - -expect { - timeout { send_user "\nFailed to get 'Remove anonymous users?' prompt\n"; exit 1 } - eof { send_user "\nFailed to get 'Remove anonymous users?' prompt\n"; exit 1 } - "Remove anonymous users?" -} -send "y\r" - -expect { - timeout { send_user "\nFailed to get 'Disallow root login remotely?' prompt\n"; exit 1 } - eof { send_user "\nFailed to get 'Disallow root login remotely?' prompt\n"; exit 1 } - "Disallow root login remotely?" -} -send "n\r" - -expect { - timeout { send_user "\nFailed to get 'Remove test database and access to it?' prompt\n"; exit 1 } - eof { send_user "\nFailed to get 'Remove test database and access to it?' prompt\n"; exit 1 } - "Remove test database and access to it?" -} -send "y\r" - -expect { - timeout { send_user "\nFailed to get 'Reload privilege tables now?' prompt\n"; exit 1 } - eof { send_user "\nFailed to get 'Reload privilege tables now?' prompt\n"; exit 1 } - "Reload privilege tables now?" -} -send "y\r" -expect eof diff --git a/container-images/kolla/neutron-base/neutron_sudoers b/container-images/kolla/neutron-base/neutron_sudoers deleted file mode 100644 index b859003e9..000000000 --- a/container-images/kolla/neutron-base/neutron_sudoers +++ /dev/null @@ -1,4 +0,0 @@ -neutron ALL = (root) NOPASSWD: /usr/bin/update-alternatives --set iptables /usr/sbin/iptables-legacy -neutron ALL = (root) NOPASSWD: /usr/bin/update-alternatives --set ip6tables /usr/sbin/ip6tables-legacy -neutron ALL = (root) NOPASSWD: /usr/bin/update-alternatives --auto iptables -neutron ALL = (root) NOPASSWD: /usr/bin/update-alternatives --auto ip6tables diff --git a/container-images/kolla/ovn/ovn-nb-db-server/start_nb_db_server.sh b/container-images/kolla/ovn/ovn-nb-db-server/start_nb_db_server.sh deleted file mode 100755 index d08dd2890..000000000 --- a/container-images/kolla/ovn/ovn-nb-db-server/start_nb_db_server.sh +++ /dev/null @@ -1,28 +0,0 @@ -#!/bin/bash - -# All the option passed to this script will be -# passed to the ovn-ctl script. Please see the options -# supported by ovn-ctl script - -# https://github.com/ovn-org/ovn/blob/master/utilities/ovn-ctl -args=$@ - -# Use ovn-ctl script to start ovn NB db server as it -# takes care of creating the db file from the schema -# file if the db file is not present. It also takes care -# of updating the db file if the schema file is updated. - -# Check for the presence of ovn-ctl script in two locations. -# If latest OVN is used (post split from openvswitch), -# then the new location for the ovn-ctl script is -# is - /usr/share/ovn/scripts/ovn-ctl. Otherwise it is -# /usr/share/openvswitch/scripts/ovn-ctl. - -if [[ -f "/usr/share/openvswitch/scripts/ovn-ctl" ]]; then - set /usr/share/openvswitch/scripts/ovn-ctl --no-monitor -elif [[ -f "/usr/share/ovn/scripts/ovn-ctl" ]]; then - set /usr/share/ovn/scripts/ovn-ctl --no-monitor -else - exit 1 -fi - -$@ $args run_nb_ovsdb diff --git a/container-images/kolla/ovn/ovn-sb-db-server/start_sb_db_server.sh b/container-images/kolla/ovn/ovn-sb-db-server/start_sb_db_server.sh deleted file mode 100755 index ef39b8d25..000000000 --- a/container-images/kolla/ovn/ovn-sb-db-server/start_sb_db_server.sh +++ /dev/null @@ -1,29 +0,0 @@ -#!/bin/bash - -# All the option passed to this script will be -# passed to the ovn-ctl script. Please see the options -# supported by ovn-ctl script - -# https://github.com/ovn-org/ovn/blob/master/utilities/ovn-ctl -args=$@ - -# Use ovn-ctl script to start ovn SB db server as it -# takes care of creating the db file from the schema -# file if the db file is not present. It also takes care -# of updating the db file if the schema file is updated. - -# Check for the presence of ovn-ctl script in two locations. -# If latest OVN is used (post split from openvswitch), -# then the new location for the ovn-ctl script is -# is - /usr/share/ovn/scripts/ovn-ctl. Otherwise it is -# /usr/share/openvswitch/scripts/ovn-ctl. - - -if [[ -f "/usr/share/openvswitch/scripts/ovn-ctl" ]]; then - set /usr/share/openvswitch/scripts/ovn-ctl --no-monitor -elif [[ -f "/usr/share/ovn/scripts/ovn-ctl" ]]; then - set /usr/share/ovn/scripts/ovn-ctl --no-monitor -else - exit 1 -fi - -$@ $args run_sb_ovsdb diff --git a/container-images/kolla/rabbitmq/extend_start.sh b/container-images/kolla/rabbitmq/extend_start.sh deleted file mode 100644 index 858d3aa16..000000000 --- a/container-images/kolla/rabbitmq/extend_start.sh +++ /dev/null @@ -1,16 +0,0 @@ -#!/bin/bash - -# Bootstrap and exit if KOLLA_BOOTSTRAP variable is set. This catches all cases -# of the KOLLA_BOOTSTRAP variable being set, including empty. -if [[ "${!KOLLA_BOOTSTRAP[@]}" ]]; then - -# NOTE(sbezverk): In kubernetes environment, if this file exists from previous -# bootstrap, the system does not allow to overwrite it (it bootstrap files with -# permission denied error) but it allows to delete it and then recreate it. - if [[ -e "/var/lib/rabbitmq/.erlang.cookie" ]]; then - rm -f /var/lib/rabbitmq/.erlang.cookie - fi - echo "${RABBITMQ_CLUSTER_COOKIE}" > /var/lib/rabbitmq/.erlang.cookie - chmod 400 /var/lib/rabbitmq/.erlang.cookie - exit 0 -fi diff --git a/container-images/kolla/swift-base/swift-rootwrap b/container-images/kolla/swift-base/swift-rootwrap deleted file mode 100644 index 5fe6099a5..000000000 --- a/container-images/kolla/swift-base/swift-rootwrap +++ /dev/null @@ -1,10 +0,0 @@ -#!/usr/bin/python3 -# PBR Generated from u'console_scripts' - -import sys - -from oslo_rootwrap.cmd import main - - -if __name__ == "__main__": - sys.exit(main()) diff --git a/container-images/kolla/swift-base/swift-sudoers b/container-images/kolla/swift-base/swift-sudoers deleted file mode 100644 index f60e2260b..000000000 --- a/container-images/kolla/swift-base/swift-sudoers +++ /dev/null @@ -1,2 +0,0 @@ -swift ALL=(root) NOPASSWD: /bin/find /srv/node/ -maxdepth 1 -type d -execdir chown swift\:swift {} \\+ -swift ALL=(root) NOPASSWD: /usr/bin/find /srv/node/ -maxdepth 1 -type d -execdir chown swift\:swift {} \\+ diff --git a/container-images/kolla/tripleo-ansible-ee/requirements.yaml b/container-images/kolla/tripleo-ansible-ee/requirements.yaml deleted file mode 100644 index 700519e45..000000000 --- a/container-images/kolla/tripleo-ansible-ee/requirements.yaml +++ /dev/null @@ -1,19 +0,0 @@ ---- -# Copyright 2021 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# Custom Roles and collections not shipped as a part of tripleo-ansible -collections: [] -roles: [] diff --git a/container-images/kolla/tripleo-ansible-ee/settings b/container-images/kolla/tripleo-ansible-ee/settings deleted file mode 100644 index f96409e8f..000000000 --- a/container-images/kolla/tripleo-ansible-ee/settings +++ /dev/null @@ -1,26 +0,0 @@ -# If no output is detected from ansible in this number of seconds the execution will -# be terminated. -idle_timeout: ${RUNNER_IDLE_TIMEOUT:-600} -# The maximum amount of time to allow the job to run for, exceeding this and the -# execution will be terminated. -job_timeout: ${RUNNER_JOB_TIMEOUT:-3600} - -# Number of seconds for the internal pexpect command to wait to block on -# input before continuing. -pexpect_timeout: ${RUNNER_PEXPECT_TIMEOUT:-10} -# Use poll() function for communication with child processes instead of select(). -# select() is used when the value is set to False. select() has a known limitation of -# using only up to 1024 file descriptors. -pexpect_use_poll: ${RUNNER_PEXPECT_USE_POLL:-True} - -# Allow output from ansible to not be streamed to the stdout or stderr files inside -# of the artifacts directory. -suppress_output_file: ${RUNNER_SUPPRESS_OUTPUT_FILE:-False} -# Allow output from ansible to not be printed to the screen. -suppress_ansible_output: ${RUNNER_SUPPRESS_ANSIBLE_OUTPUT:-False} - -# The directory relative to artifacts where jsonfile fact caching will be stored. -# Defaults to fact_cache. This is ignored if fact_cache_type is different than jsonfile. -fact_cache: ${RUNNER_FACT_CACHE:-'fact_cache'} -# The type of fact cache to use. Defaults to jsonfile. -fact_cache_type: ${RUNNER_FACT_CACHE_TYPE:-'jsonfile'} diff --git a/container-images/kolla/tripleo-ansible-ee/tripleo_entrypoint.sh b/container-images/kolla/tripleo-ansible-ee/tripleo_entrypoint.sh deleted file mode 100644 index 5a6239a19..000000000 --- a/container-images/kolla/tripleo-ansible-ee/tripleo_entrypoint.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/usr/bin/env bash - -# Adding tripleo ansible-runner specific scripts here -# Expand the variables -eval "echo \"$(cat /runner/env/settings)\"" > /runner/env/settings - -if [ -n "$RUNNER_INVENTORY" ]; then - echo "---" > /runner/inventory/inventory.yaml - echo "$RUNNER_INVENTORY" >> /runner/inventory/inventory.yaml -fi - -if [ -n "$RUNNER_PLAYBOOK" ]; then - echo "---" > /runner/project/playbook.yaml - echo "$RUNNER_PLAYBOOK" >> /runner/project/playbook.yaml -fi - -# Contents from ansible-runner entrypoint diff --git a/container-images/kolla/tripleoclient/create_super_user.sh b/container-images/kolla/tripleoclient/create_super_user.sh deleted file mode 100644 index 7bdefbc18..000000000 --- a/container-images/kolla/tripleoclient/create_super_user.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/bin/bash -# This is a useful entrypoint/cmd if you wish to run commands in a container -# in an existing users $HOME directory -# For example: podman run -ti -e USER=stack -e UID=1000 --privileged=true --volume=/home/stack/:/home/stack/ tripleoclient:latest /usr/local/bin/create_super_user.sh - -if [ -n "$USER" -a -n "$UID" ]; then - useradd "$USER" -u "$UID" -M -cat >> /etc/sudoers <- - dnf install -y crudini && - crudini --del /etc/dnf/dnf.conf main override_install_langs && - crudini --set /etc/dnf/dnf.conf main clean_requirements_on_remove True && - crudini --set /etc/dnf/dnf.conf main exactarch 1 && - crudini --set /etc/dnf/dnf.conf main gpgcheck 1 && - crudini --set /etc/dnf/dnf.conf main install_weak_deps False && - if [ '{{ tcib_distro }}' == 'centos' ];then crudini --set /etc/dnf/dnf.conf main best False; fi && - crudini --set /etc/dnf/dnf.conf main installonly_limit 0 && - crudini --set /etc/dnf/dnf.conf main keepcache 0 && - crudini --set /etc/dnf/dnf.conf main obsoletes 1 && - crudini --set /etc/dnf/dnf.conf main plugins 1 && - crudini --set /etc/dnf/dnf.conf main skip_missing_names_on_install False && - crudini --set /etc/dnf/dnf.conf main tsflags nodocs -- run: >- - if [ '{{ tcib_distro }}' == 'rhel' ] && [ '{{ tcib_release }}' == '8' ]; then - {%- if "el" ~ tcib_release in tcib_packages.modules %} - {% for item in tcib_packages.modules["el" ~ tcib_release] %}{% set key, value = (item.items() | list).0 %}dnf module -y {{ key }} {{ value }}; {% endfor %} - {%- else %} - echo "WARNING: No modules defined for el{{ tcib_release}}"; - {%- endif %} - fi -- run: dnf install -y openstack-tripleo-common-containers -- run: cp /usr/share/openstack-tripleo-common-containers/container-images/kolla/base/uid_gid_manage.sh /usr/local/bin/uid_gid_manage -- run: chmod 755 /usr/local/bin/uid_gid_manage -- run: bash /usr/local/bin/uid_gid_manage kolla hugetlbfs libvirt qemu -- run: touch /usr/local/bin/kolla_extend_start && chmod 755 /usr/local/bin/kolla_extend_start -- run: cp /usr/share/openstack-tripleo-common-containers/container-images/kolla/base/set_configs.py /usr/local/bin/kolla_set_configs -- run: chmod 755 /usr/local/bin/kolla_set_configs -- run: cp /usr/share/openstack-tripleo-common-containers/container-images/kolla/base/start.sh /usr/local/bin/kolla_start -- run: chmod 755 /usr/local/bin/kolla_start -- run: cp /usr/share/openstack-tripleo-common-containers/container-images/kolla/base/httpd_setup.sh /usr/local/bin/kolla_httpd_setup -- run: chmod 755 /usr/local/bin/kolla_httpd_setup -- run: cp /usr/share/openstack-tripleo-common-containers/container-images/kolla/base/sudoers /etc/sudoers -- run: chmod 440 /etc/sudoers -- run: >- - if [ '{{ tcib_release }}' == '8' ];then - sed -ri '/-session(\s+)optional(\s+)pam_systemd.so/d' /etc/pam.d/system-auth && - dnf install -y curl; fi -- run: sed -ri '/^(passwd:|group:)/ s/systemd//g' /etc/nsswitch.conf -- run: dnf install -y {{ tcib_packages['common'] | join(' ') }} -- run: >- - if [ '{{ tcib_release }}' == '9' ];then - dnf -y reinstall which && - rpm -e --nodeps tzdata && - dnf -y install tzdata; fi -- run: mkdir -p /openstack -- run: >- - if [ '{{ tcib_distro }}' == 'centos' ];then - if [ -n "$(rpm -qa redhat-release)" ];then rpm -e --nodeps redhat-release; fi ; - dnf -y install centos-stream-release; fi -# TODO: Temporary pinning nettle to 3.8-3.el9, so it can be reinstalled from centos-9 repos. -# nettle-3.8-3 is already installed in ubi9 image, but it conflicts with newer versions on gnutls -# installed from centos-9 repos. This workaround can be reverted once ubi9.2 is released, which -# should contain a newer version of gnutls with fixes to run under FIPS mode. -# See: https://bugzilla.redhat.com/show_bug.cgi?id=2154924 and -# https://bugs.launchpad.net/tripleo/+bug/1984237 -- run: >- - if [ '{{ tcib_release }}' == '9' ];then - dnf -y install nettle-3.8-3.el9; fi -- run: dnf update --excludepkgs redhat-release -y && dnf clean all && rm -rf /var/cache/dnf -tcib_cmd: kolla_start -tcib_entrypoint: dumb-init --single-child -- -tcib_envs: - LANG: en_US.UTF-8 - container: oci -tcib_labels: - maintainer: OpenStack TripleO team - tcib_managed: True -tcib_packages: - common: - - ca-certificates - - dumb-init - - glibc-langpack-en - - iscsi-initiator-utils - - openstack-tripleo-common-containers - - openstack-tripleo-common-container-base - - procps-ng - - python3 - - rsync - - socat - - sudo - - tar - - util-linux-user - - which - modules: - el8: - - disable: container-tools:rhel8 - - disable: virt:rhel - - enable: container-tools:{{ tcib_rhel_modules['container-tools'] | default('3.0') }} - - enable: mariadb:{{ tcib_rhel_modules['mariadb'] | default('10.3') }} - - enable: virt:{{ tcib_rhel_modules['virt'] | default('av') }} - - enable: redis:{{ tcib_rhel_modules['redis'] | default('5') }} - - enable: nodejs:{{ tcib_rhel_modules['nodejs'] | default('14') }} - el9: - - disable: virt:rhel - - enable: virt:{{ tcib_rhel_modules['virt'] | default('av') }} - - enable: redis:{{ tcib_rhel_modules['redis'] | default('5') }} - - enable: mariadb:{{ tcib_rhel_modules['mariadb'] | default('10.5') }} -tcib_stopsignal: SIGTERM diff --git a/container-images/tcib/base/collectd/collectd.yaml b/container-images/tcib/base/collectd/collectd.yaml deleted file mode 100644 index d1c1d5070..000000000 --- a/container-images/tcib/base/collectd/collectd.yaml +++ /dev/null @@ -1,59 +0,0 @@ -tcib_actions: -- run: bash /usr/local/bin/uid_gid_manage collectd -- run: if [ "{{ tcib_distro }}" == "rhel" ]; then dnf -y install {{ tcib_packages['rhel'] | join(' ') }}; fi -- run: if [ '{{ tcib_release }}' == '8' ];then dnf -y install {{ tcib_packages['el8'] | join(' ') }}; fi -- run: if [ "$(uname -m)" == "x86_64" ]; then dnf -y install {{ tcib_packages['x86_64'] | join(' ') }}; fi -- run: dnf -y install {{ tcib_packages['common'] | join(' ') }} && dnf clean all && rm -rf /var/cache/dnf -- run: chown -R collectd:collectd /etc/collectd* /var/run/ -- run: ln -s /usr/share/openstack-tripleo-common/healthcheck/collectd /openstack/healthcheck && chmod a+rx /openstack/healthcheck -tcib_packages: - common: - - collectd - - collectd-amqp1 - - collectd-apache - - collectd-bind - - collectd-ceph - - collectd-chrony - - collectd-connectivity - - collectd-curl - - collectd-curl_json - - collectd-curl_xml - - collectd-disk - - collectd-dns - - collectd-ipmi - - collectd-libpod-stats - - collectd-log_logstash - - collectd-mcelog - - collectd-mysql - - collectd-netlink - - collectd-openldap - - collectd-ovs-events - - collectd-ovs-stats - - collectd-ping - - collectd-pmu - - collectd-procevent - - collectd-python - - collectd-sensors - - collectd-sensubility - - collectd-smart - - collectd-snmp - - collectd-snmp-agent - - collectd-sysevent - - collectd-utils - - collectd-virt - - collectd-write_http - - collectd-write_kafka - - collectd-write_prometheus - - python3-sqlalchemy-collectd - - podman-remote - - jq - rhel: - - python3-collectd-rabbitmq-monitoring - x86_64: - - collectd-hugepages - - collectd-pcie-errors - - collectd-rdt - - collectd-turbostat - el8: - - collectd-generic-jmx - - collectd-iptables diff --git a/container-images/tcib/base/cron/cron.yaml b/container-images/tcib/base/cron/cron.yaml deleted file mode 100644 index 6d45d5d4f..000000000 --- a/container-images/tcib/base/cron/cron.yaml +++ /dev/null @@ -1,6 +0,0 @@ -tcib_actions: -- run: dnf -y install {{ tcib_packages['common'] | join(' ') }} && dnf clean all && rm -rf /var/cache/dnf -tcib_packages: - common: - - cronie - - logrotate diff --git a/container-images/tcib/base/etcd/etcd.yaml b/container-images/tcib/base/etcd/etcd.yaml deleted file mode 100644 index dc323f000..000000000 --- a/container-images/tcib/base/etcd/etcd.yaml +++ /dev/null @@ -1,8 +0,0 @@ -tcib_actions: -- run: bash /usr/local/bin/uid_gid_manage {{ tcib_user }} -- run: dnf -y install {{ tcib_packages.common | join(' ') }} && dnf clean all && rm -rf /var/cache/dnf -- run: ln -s /usr/share/openstack-tripleo-common/healthcheck/etcd /openstack/healthcheck && chmod a+rx /openstack/healthcheck -tcib_packages: - common: - - etcd -tcib_user: etcd diff --git a/container-images/tcib/base/frr/frr.yaml b/container-images/tcib/base/frr/frr.yaml deleted file mode 100644 index 36fc8f561..000000000 --- a/container-images/tcib/base/frr/frr.yaml +++ /dev/null @@ -1,9 +0,0 @@ -tcib_actions: -- run: bash /usr/local/bin/uid_gid_manage frrvty frr -- run: dnf install -y {{ tcib_packages['common'] | join(' ') }} && dnf clean all && rm -rf /var/cache/dnf -- run: ln -s /usr/share/openstack-tripleo-common/healthcheck/frr /openstack/healthcheck && chmod a+rx /openstack/healthcheck -- run: mkdir -p /var/lock/subsys && chown "frr:" /var/lock/subsys -tcib_packages: - common: - - frr -tcib_user: frr diff --git a/container-images/tcib/base/haproxy/haproxy.yaml b/container-images/tcib/base/haproxy/haproxy.yaml deleted file mode 100644 index eb6536be1..000000000 --- a/container-images/tcib/base/haproxy/haproxy.yaml +++ /dev/null @@ -1,10 +0,0 @@ -tcib_actions: -- run: dnf install -y {{ tcib_packages['common'] | join(' ') }} && dnf clean all && rm -rf /var/cache/dnf -tcib_packages: - common: - - haproxy - - libqb - - pacemaker - - pacemaker-remote - - pcs - - resource-agents diff --git a/container-images/tcib/base/mariadb/mariadb.yaml b/container-images/tcib/base/mariadb/mariadb.yaml deleted file mode 100644 index 661338faa..000000000 --- a/container-images/tcib/base/mariadb/mariadb.yaml +++ /dev/null @@ -1,31 +0,0 @@ -tcib_actions: -- run: bash /usr/local/bin/uid_gid_manage {{ tcib_user }} -- run: dnf -y install {{ tcib_packages['common'] | join(' ') }} && dnf clean all && rm -rf /var/cache/dnf -- run: cp /usr/share/openstack-tripleo-common-containers/container-images/kolla/mariadb/extend_start.sh /usr/local/bin/kolla_extend_start -- run: chmod 755 /usr/local/bin/kolla_extend_start -- run: cp /usr/share/openstack-tripleo-common-containers/container-images/kolla/mariadb/security_reset.expect{{ tcib_release is version('8', '==') | ternary('', '.10.5') }} /usr/local/bin/kolla_security_reset -- run: chmod 755 /usr/local/bin/kolla_security_reset -- run: rm -rf /var/lib/mysql/* /etc/my.cnf.d/mariadb-server.cnf /etc/my.cnf.d/auth_gssapi.cnf -- run: mkdir -p /etc/libqb -- run: ln -s /usr/share/openstack-tripleo-common/healthcheck/mariadb /openstack/healthcheck && chmod a+rx /openstack/healthcheck -tcib_cmd: kolla_start -tcib_entrypoint: dumb-init -- -tcib_packages: - common: - - expect - - galera - - hostname - - libqb - - mariadb - - mariadb-backup - - mariadb-server-galera - - mariadb-server-utils - - pacemaker - - pacemaker-remote - - pcs - - python3-pynacl - - resource-agents - - rsync - - socat - - tar -tcib_user: mysql diff --git a/container-images/tcib/base/memcached/memcached.yaml b/container-images/tcib/base/memcached/memcached.yaml deleted file mode 100644 index 1099041ad..000000000 --- a/container-images/tcib/base/memcached/memcached.yaml +++ /dev/null @@ -1,8 +0,0 @@ -tcib_actions: -- run: bash /usr/local/bin/uid_gid_manage {{ tcib_user }} -- run: dnf install -y {{ tcib_packages['common'] | join(' ') }} && dnf clean all && rm -rf /var/cache/dnf -- run: ln -s /usr/share/openstack-tripleo-common/healthcheck/memcached /openstack/healthcheck && chmod a+rx /openstack/healthcheck -tcib_packages: - common: - - memcached -tcib_user: memcached diff --git a/container-images/tcib/base/multipathd/multipathd.yaml b/container-images/tcib/base/multipathd/multipathd.yaml deleted file mode 100644 index ebbee661f..000000000 --- a/container-images/tcib/base/multipathd/multipathd.yaml +++ /dev/null @@ -1,6 +0,0 @@ -tcib_actions: -- run: dnf -y install {{ tcib_packages['common'] | join(' ') }} && dnf clean all && rm -rf /var/cache/dnf -- run: ln -s /usr/share/openstack-tripleo-common/healthcheck/multipathd /openstack/healthcheck && chmod a+rx /openstack/healthcheck -tcib_packages: - common: - - device-mapper-multipath diff --git a/container-images/tcib/base/os/aodh-base/aodh-api/aodh-api.yaml b/container-images/tcib/base/os/aodh-base/aodh-api/aodh-api.yaml deleted file mode 100644 index 32ecbf579..000000000 --- a/container-images/tcib/base/os/aodh-base/aodh-api/aodh-api.yaml +++ /dev/null @@ -1,11 +0,0 @@ -tcib_actions: -- run: dnf -y install {{ tcib_packages.common | join(' ') }} && dnf clean all && rm -rf /var/cache/dnf -- run: mkdir -p /var/www/cgi-bin/aodh && chmod 755 /var/www/cgi-bin/aodh && cp -a /usr/bin/aodh-api /var/www/cgi-bin/aodh/ && sed -i -r 's,^(Listen 80),#\1,' /etc/httpd/conf/httpd.conf && sed -i -r 's,^(Listen 443),#\1,' /etc/httpd/conf.d/ssl.conf -- run: ln -s /usr/share/openstack-tripleo-common/healthcheck/aodh-api /openstack/healthcheck && chmod a+rx /openstack/healthcheck -tcib_packages: - common: - - httpd - - mod_ssl - - openstack-aodh-api - - python3-ldappool - - python3-mod_wsgi diff --git a/container-images/tcib/base/os/aodh-base/aodh-base.yaml b/container-images/tcib/base/os/aodh-base/aodh-base.yaml deleted file mode 100644 index 3d8263344..000000000 --- a/container-images/tcib/base/os/aodh-base/aodh-base.yaml +++ /dev/null @@ -1,6 +0,0 @@ -tcib_actions: -- run: bash /usr/local/bin/uid_gid_manage aodh -- run: dnf -y install {{ tcib_packages.common | join(' ') }} && dnf clean all && rm -rf /var/cache/dnf -tcib_packages: - common: - - openstack-aodh-common diff --git a/container-images/tcib/base/os/aodh-base/aodh-evaluator/aodh-evaluator.yaml b/container-images/tcib/base/os/aodh-base/aodh-evaluator/aodh-evaluator.yaml deleted file mode 100644 index b3160ce7b..000000000 --- a/container-images/tcib/base/os/aodh-base/aodh-evaluator/aodh-evaluator.yaml +++ /dev/null @@ -1,7 +0,0 @@ -tcib_actions: -- run: dnf -y install {{ tcib_packages.common | join(' ') }} && dnf clean all && rm -rf /var/cache/dnf -- run: ln -s /usr/share/openstack-tripleo-common/healthcheck/aodh-evaluator /openstack/healthcheck && chmod a+rx /openstack/healthcheck -tcib_packages: - common: - - openstack-aodh-evaluator -tcib_user: aodh diff --git a/container-images/tcib/base/os/aodh-base/aodh-listener/aodh-listener.yaml b/container-images/tcib/base/os/aodh-base/aodh-listener/aodh-listener.yaml deleted file mode 100644 index edb186606..000000000 --- a/container-images/tcib/base/os/aodh-base/aodh-listener/aodh-listener.yaml +++ /dev/null @@ -1,7 +0,0 @@ -tcib_actions: -- run: dnf -y install {{ tcib_packages.common | join(' ') }} && dnf clean all && rm -rf /var/cache/dnf -- run: ln -s /usr/share/openstack-tripleo-common/healthcheck/aodh-listener /openstack/healthcheck && chmod a+rx /openstack/healthcheck -tcib_packages: - common: - - openstack-aodh-listener -tcib_user: aodh diff --git a/container-images/tcib/base/os/aodh-base/aodh-notifier/aodh-notifier.yaml b/container-images/tcib/base/os/aodh-base/aodh-notifier/aodh-notifier.yaml deleted file mode 100644 index a0b5c4fc6..000000000 --- a/container-images/tcib/base/os/aodh-base/aodh-notifier/aodh-notifier.yaml +++ /dev/null @@ -1,7 +0,0 @@ -tcib_actions: -- run: dnf -y install {{ tcib_packages.common | join(' ') }} && dnf clean all && rm -rf /var/cache/dnf -- run: ln -s /usr/share/openstack-tripleo-common/healthcheck/aodh-notifier /openstack/healthcheck && chmod a+rx /openstack/healthcheck -tcib_packages: - common: - - openstack-aodh-notifier -tcib_user: aodh diff --git a/container-images/tcib/base/os/barbican-base/barbican-api/barbican-api.yaml b/container-images/tcib/base/os/barbican-base/barbican-api/barbican-api.yaml deleted file mode 100644 index 0df7e34aa..000000000 --- a/container-images/tcib/base/os/barbican-base/barbican-api/barbican-api.yaml +++ /dev/null @@ -1,11 +0,0 @@ -tcib_actions: -- run: dnf -y install {{ tcib_packages['common'] | join(' ') }} && dnf clean all && rm -rf /var/cache/dnf -- run: sed -i -r 's,^(Listen 80),#\1,' /etc/httpd/conf/httpd.conf && sed -i -r 's,^(Listen 443),#\1,' /etc/httpd/conf.d/ssl.conf -- run: ln -s /usr/share/openstack-tripleo-common/healthcheck/barbican-api /openstack/healthcheck && chmod a+rx /openstack/healthcheck -tcib_packages: - common: - - httpd - - mod_ssl - - openstack-barbican-api - - python3-mod_wsgi -tcib_user: barbican diff --git a/container-images/tcib/base/os/barbican-base/barbican-base.yaml b/container-images/tcib/base/os/barbican-base/barbican-base.yaml deleted file mode 100644 index 7317486a1..000000000 --- a/container-images/tcib/base/os/barbican-base/barbican-base.yaml +++ /dev/null @@ -1,8 +0,0 @@ -tcib_actions: -- run: bash /usr/local/bin/uid_gid_manage nfast barbican -- run: dnf -y install {{ tcib_packages['common'] | join(' ') }} && dnf clean all && rm -rf /var/cache/dnf -- run: cp /usr/share/openstack-tripleo-common-containers/container-images/kolla/barbican-base/sudoers /etc/sudoers.d/barbican_sudoers -- run: chmod 640 /etc/sudoers.d/barbican_sudoers -tcib_packages: - common: - - openstack-barbican-common diff --git a/container-images/tcib/base/os/barbican-base/barbican-keystone-listener/barbican-keystone-listener.yaml b/container-images/tcib/base/os/barbican-base/barbican-keystone-listener/barbican-keystone-listener.yaml deleted file mode 100644 index 10b455b0d..000000000 --- a/container-images/tcib/base/os/barbican-base/barbican-keystone-listener/barbican-keystone-listener.yaml +++ /dev/null @@ -1,7 +0,0 @@ -tcib_actions: -- run: dnf -y install {{ tcib_packages['common'] | join(' ') }} && dnf clean all && rm -rf /var/cache/dnf -- run: ln -s /usr/share/openstack-tripleo-common/healthcheck/barbican-keystone-listener /openstack/healthcheck && chmod a+rx /openstack/healthcheck -tcib_packages: - common: - - openstack-barbican-keystone-listener -tcib_user: barbican diff --git a/container-images/tcib/base/os/barbican-base/barbican-worker/barbican-worker.yaml b/container-images/tcib/base/os/barbican-base/barbican-worker/barbican-worker.yaml deleted file mode 100644 index 727c88311..000000000 --- a/container-images/tcib/base/os/barbican-base/barbican-worker/barbican-worker.yaml +++ /dev/null @@ -1,7 +0,0 @@ -tcib_actions: -- run: dnf -y install {{ tcib_packages['common'] | join(' ') }} && dnf clean all && rm -rf /var/cache/dnf -- run: ln -s /usr/share/openstack-tripleo-common/healthcheck/barbican-worker /openstack/healthcheck && chmod a+rx /openstack/healthcheck -tcib_packages: - common: - - openstack-barbican-worker -tcib_user: barbican diff --git a/container-images/tcib/base/os/ceilometer-base/ceilometer-base.yaml b/container-images/tcib/base/os/ceilometer-base/ceilometer-base.yaml deleted file mode 100644 index 2f8e3c3c8..000000000 --- a/container-images/tcib/base/os/ceilometer-base/ceilometer-base.yaml +++ /dev/null @@ -1,8 +0,0 @@ -tcib_actions: -- run: bash /usr/local/bin/uid_gid_manage ceilometer -- run: dnf -y install {{ tcib_packages.common | join(' ') }} && dnf clean all && rm -rf /var/cache/dnf -tcib_packages: - common: - - openstack-ceilometer-common - - python3-oslo-db - - python3-tooz diff --git a/container-images/tcib/base/os/ceilometer-base/ceilometer-central/ceilometer-central.yaml b/container-images/tcib/base/os/ceilometer-base/ceilometer-central/ceilometer-central.yaml deleted file mode 100644 index ebe9f4a0f..000000000 --- a/container-images/tcib/base/os/ceilometer-base/ceilometer-central/ceilometer-central.yaml +++ /dev/null @@ -1,7 +0,0 @@ -tcib_actions: -- run: dnf -y install {{ tcib_packages.common | join(' ') }} && dnf clean all && rm -rf /var/cache/dnf -- run: ln -s /usr/share/openstack-tripleo-common/healthcheck/ceilometer-agent-central /openstack/healthcheck && chmod a+rx /openstack/healthcheck -tcib_packages: - common: - - openstack-ceilometer-central -tcib_user: ceilometer diff --git a/container-images/tcib/base/os/ceilometer-base/ceilometer-compute/ceilometer-compute.yaml b/container-images/tcib/base/os/ceilometer-base/ceilometer-compute/ceilometer-compute.yaml deleted file mode 100644 index a3981dd52..000000000 --- a/container-images/tcib/base/os/ceilometer-base/ceilometer-compute/ceilometer-compute.yaml +++ /dev/null @@ -1,6 +0,0 @@ -tcib_actions: -- run: dnf -y install {{ tcib_packages.common | join(' ') }} && dnf clean all && rm -rf /var/cache/dnf -- run: ln -s /usr/share/openstack-tripleo-common/healthcheck/ceilometer-agent-compute /openstack/healthcheck && chmod a+rx /openstack/healthcheck -tcib_packages: - common: - - openstack-ceilometer-compute diff --git a/container-images/tcib/base/os/ceilometer-base/ceilometer-ipmi/ceilometer-ipmi.yaml b/container-images/tcib/base/os/ceilometer-base/ceilometer-ipmi/ceilometer-ipmi.yaml deleted file mode 100644 index 97bd0897e..000000000 --- a/container-images/tcib/base/os/ceilometer-base/ceilometer-ipmi/ceilometer-ipmi.yaml +++ /dev/null @@ -1,7 +0,0 @@ -tcib_actions: -- run: dnf -y install {{ tcib_packages.common | join(' ') }} && dnf clean all && rm -rf /var/cache/dnf -- run: ln -s /usr/share/openstack-tripleo-common/healthcheck/ceilometer-agent-ipmi /openstack/healthcheck && chmod a+rx /openstack/healthcheck -tcib_packages: - common: - - openstack-ceilometer-ipmi -tcib_user: ceilometer diff --git a/container-images/tcib/base/os/ceilometer-base/ceilometer-notification/ceilometer-notification.yaml b/container-images/tcib/base/os/ceilometer-base/ceilometer-notification/ceilometer-notification.yaml deleted file mode 100644 index dbf8e12d3..000000000 --- a/container-images/tcib/base/os/ceilometer-base/ceilometer-notification/ceilometer-notification.yaml +++ /dev/null @@ -1,8 +0,0 @@ -tcib_actions: -- run: dnf -y install {{ tcib_packages.common | join(' ') }} && dnf clean all && rm -rf /var/cache/dnf -- run: ln -s /usr/share/openstack-tripleo-common/healthcheck/ceilometer-agent-notification /openstack/healthcheck && chmod a+rx /openstack/healthcheck -tcib_packages: - common: - - openstack-ceilometer-notification - - python3-pyngus -tcib_user: ceilometer diff --git a/container-images/tcib/base/os/cinder-base/cinder-api/cinder-api.yaml b/container-images/tcib/base/os/cinder-base/cinder-api/cinder-api.yaml deleted file mode 100644 index 353287117..000000000 --- a/container-images/tcib/base/os/cinder-base/cinder-api/cinder-api.yaml +++ /dev/null @@ -1,11 +0,0 @@ -tcib_actions: -- run: dnf -y install {{ tcib_packages.common | join(' ') }} && dnf clean all && rm -rf /var/cache/dnf -- run: mkdir -p /var/www/cgi-bin/cinder && cp -a /usr/bin/cinder-wsgi /var/www/cgi-bin/cinder/cinder-wsgi && sed -i -r 's,^(Listen 80),#\1,' /etc/httpd/conf/httpd.conf && sed -i -r 's,^(Listen 443),#\1,' /etc/httpd/conf.d/ssl.conf -- run: chown -R cinder /var/www/cgi-bin/cinder && chmod 755 /var/www/cgi-bin/cinder/cinder-wsgi -- run: ln -s /usr/share/openstack-tripleo-common/healthcheck/cinder-api /openstack/healthcheck && chmod a+rx /openstack/healthcheck -tcib_packages: - common: - - httpd - - mod_ssl - - python3-keystone - - python3-mod_wsgi diff --git a/container-images/tcib/base/os/cinder-base/cinder-backup/cinder-backup.yaml b/container-images/tcib/base/os/cinder-base/cinder-backup/cinder-backup.yaml deleted file mode 100644 index 7e51d401f..000000000 --- a/container-images/tcib/base/os/cinder-base/cinder-backup/cinder-backup.yaml +++ /dev/null @@ -1,16 +0,0 @@ -tcib_envs: - MALLOC_ARENA_MAX: 1 - MALLOC_MMAP_THRESHOLD_: 131072 - MALLOC_TRIM_THRESHOLD_: 262144 -tcib_actions: -- run: dnf -y install {{ tcib_packages.common | join(' ') }} && dnf clean all && rm -rf /var/cache/dnf -- run: cp /usr/share/openstack-tripleo-common-containers/container-images/kolla/cinder-backup/extend_start.sh /usr/local/bin/kolla_extend_start -- run: cp /usr/share/openstack-tripleo-common-containers/container-images/kolla/cinder-backup/cinder-backup-sudoers /etc/sudoers.d/cinder-backup-sudoers -- run: chmod 755 /usr/local/bin/kolla_extend_start && chmod 440 /etc/sudoers.d/cinder-backup-sudoers && mkdir -p /etc/libqb -- run: ln -s /usr/share/openstack-tripleo-common/healthcheck/cinder-backup /openstack/healthcheck && chmod a+rx /openstack/healthcheck -tcib_packages: - common: - - pacemaker - - pacemaker-remote - - pcs -tcib_user: cinder diff --git a/container-images/tcib/base/os/cinder-base/cinder-base.yaml b/container-images/tcib/base/os/cinder-base/cinder-base.yaml deleted file mode 100644 index 1bb0e49cb..000000000 --- a/container-images/tcib/base/os/cinder-base/cinder-base.yaml +++ /dev/null @@ -1,7 +0,0 @@ -tcib_actions: -- run: bash /usr/local/bin/uid_gid_manage cinder -- run: dnf -y install {{ tcib_packages.common | join(' ') }} && dnf clean all && rm -rf /var/cache/dnf -tcib_packages: - common: - - ceph-common - - openstack-cinder diff --git a/container-images/tcib/base/os/cinder-base/cinder-scheduler/cinder-scheduler.yaml b/container-images/tcib/base/os/cinder-base/cinder-scheduler/cinder-scheduler.yaml deleted file mode 100644 index c61a21f83..000000000 --- a/container-images/tcib/base/os/cinder-base/cinder-scheduler/cinder-scheduler.yaml +++ /dev/null @@ -1,3 +0,0 @@ -tcib_actions: -- run: ln -s /usr/share/openstack-tripleo-common/healthcheck/cinder-scheduler /openstack/healthcheck && chmod a+rx /openstack/healthcheck -tcib_user: cinder diff --git a/container-images/tcib/base/os/cinder-base/cinder-volume/cinder-volume.yaml b/container-images/tcib/base/os/cinder-base/cinder-volume/cinder-volume.yaml deleted file mode 100644 index 4fde5e221..000000000 --- a/container-images/tcib/base/os/cinder-base/cinder-volume/cinder-volume.yaml +++ /dev/null @@ -1,17 +0,0 @@ -tcib_envs: - MALLOC_ARENA_MAX: 1 - MALLOC_MMAP_THRESHOLD_: 131072 - MALLOC_TRIM_THRESHOLD_: 262144 -tcib_actions: -- run: dnf -y install {{ tcib_packages.common | join(' ') }} && dnf clean all && rm -rf /var/cache/dnf -- run: cp /usr/share/openstack-tripleo-common-containers/container-images/kolla/cinder-volume/extend_start.sh /usr/local/bin/kolla_extend_start -- run: cp /usr/share/openstack-tripleo-common-containers/container-images/kolla/cinder-volume/cinder-volume-sudoers /etc/sudoers.d/cinder-volume-sudoers -- run: chmod 755 /usr/local/bin/kolla_extend_start && chmod 440 /etc/sudoers.d/cinder-volume-sudoers && mkdir -p /etc/libqb -- run: ln -s /usr/share/openstack-tripleo-common/healthcheck/cinder-volume /openstack/healthcheck && chmod a+rx /openstack/healthcheck -tcib_packages: - common: - - pacemaker - - pacemaker-remote - - pcs - - python3-cinderlib -tcib_user: cinder diff --git a/container-images/tcib/base/os/designate-base/designate-api/designate-api.yaml b/container-images/tcib/base/os/designate-base/designate-api/designate-api.yaml deleted file mode 100644 index 352577524..000000000 --- a/container-images/tcib/base/os/designate-base/designate-api/designate-api.yaml +++ /dev/null @@ -1,9 +0,0 @@ -tcib_actions: -- run: dnf -y install {{ tcib_packages.common | join(' ') }} && dnf clean all && rm -rf /var/cache/dnf -tcib_packages: - common: - - openstack-designate-api - - httpd - - mod_ssl - - python3-mod_wsgi -tcib_user: designate diff --git a/container-images/tcib/base/os/designate-base/designate-backend-bind9/designate-backend-bind9.yaml b/container-images/tcib/base/os/designate-base/designate-backend-bind9/designate-backend-bind9.yaml deleted file mode 100644 index f24ce7272..000000000 --- a/container-images/tcib/base/os/designate-base/designate-backend-bind9/designate-backend-bind9.yaml +++ /dev/null @@ -1,6 +0,0 @@ -tcib_actions: -- run: dnf -y install {{ tcib_packages.common | join(' ') }} && dnf clean all && rm -rf /var/cache/dnf -- run: mkdir -p /var/lib/named/ /run/named && chown -R root /var/lib/named /run/named && chmod 755 /run/named -tcib_packages: - common: - - bind diff --git a/container-images/tcib/base/os/designate-base/designate-base.yaml b/container-images/tcib/base/os/designate-base/designate-base.yaml deleted file mode 100644 index 9008568b9..000000000 --- a/container-images/tcib/base/os/designate-base/designate-base.yaml +++ /dev/null @@ -1,9 +0,0 @@ -tcib_actions: -- run: bash /usr/local/bin/uid_gid_manage designate -- run: dnf -y install {{ tcib_packages.common | join(' ') }} && dnf clean all && rm -rf /var/cache/dnf -tcib_packages: - common: - - openstack-designate-common - - python3-oslo-reports - - python3-suds - - python3-tooz diff --git a/container-images/tcib/base/os/designate-base/designate-central/designate-central.yaml b/container-images/tcib/base/os/designate-base/designate-central/designate-central.yaml deleted file mode 100644 index 686a9e492..000000000 --- a/container-images/tcib/base/os/designate-base/designate-central/designate-central.yaml +++ /dev/null @@ -1,6 +0,0 @@ -tcib_actions: -- run: dnf -y install {{ tcib_packages.common | join(' ') }} && dnf clean all && rm -rf /var/cache/dnf -tcib_packages: - common: - - openstack-designate-central -tcib_user: designate diff --git a/container-images/tcib/base/os/designate-base/designate-mdns/designate-mdns.yaml b/container-images/tcib/base/os/designate-base/designate-mdns/designate-mdns.yaml deleted file mode 100644 index d08a43044..000000000 --- a/container-images/tcib/base/os/designate-base/designate-mdns/designate-mdns.yaml +++ /dev/null @@ -1,6 +0,0 @@ -tcib_actions: -- run: dnf -y install {{ tcib_packages.common | join(' ') }} && dnf clean all && rm -rf /var/cache/dnf -tcib_packages: - common: - - openstack-designate-mdns -tcib_user: designate diff --git a/container-images/tcib/base/os/designate-base/designate-producer/designate-producer.yaml b/container-images/tcib/base/os/designate-base/designate-producer/designate-producer.yaml deleted file mode 100644 index 953b78c5a..000000000 --- a/container-images/tcib/base/os/designate-base/designate-producer/designate-producer.yaml +++ /dev/null @@ -1,6 +0,0 @@ -tcib_actions: -- run: dnf -y install {{ tcib_packages.common | join(' ') }} && dnf clean all && rm -rf /var/cache/dnf -tcib_packages: - common: - - openstack-designate-producer -tcib_user: designate diff --git a/container-images/tcib/base/os/designate-base/designate-sink/designate-sink.yaml b/container-images/tcib/base/os/designate-base/designate-sink/designate-sink.yaml deleted file mode 100644 index 4eac94499..000000000 --- a/container-images/tcib/base/os/designate-base/designate-sink/designate-sink.yaml +++ /dev/null @@ -1,6 +0,0 @@ -tcib_actions: -- run: dnf -y install {{ tcib_packages.common | join(' ') }} && dnf clean all && rm -rf /var/cache/dnf -tcib_packages: - common: - - openstack-designate-sink -tcib_user: designate diff --git a/container-images/tcib/base/os/designate-base/designate-worker/designate-worker.yaml b/container-images/tcib/base/os/designate-base/designate-worker/designate-worker.yaml deleted file mode 100644 index 8b1471e1a..000000000 --- a/container-images/tcib/base/os/designate-base/designate-worker/designate-worker.yaml +++ /dev/null @@ -1,7 +0,0 @@ -tcib_actions: -- run: dnf -y install {{ tcib_packages.common | join(' ') }} && dnf clean all && rm -rf /var/cache/dnf -tcib_packages: - common: - - bind - - openstack-designate-worker -tcib_user: designate diff --git a/container-images/tcib/base/os/glance-api/glance-api.yaml b/container-images/tcib/base/os/glance-api/glance-api.yaml deleted file mode 100644 index e91eb5423..000000000 --- a/container-images/tcib/base/os/glance-api/glance-api.yaml +++ /dev/null @@ -1,17 +0,0 @@ -tcib_actions: -- run: bash /usr/local/bin/uid_gid_manage {{ tcib_user }} -- run: dnf -y install {{ tcib_packages['common'] | join(' ') }} && dnf clean all && rm -rf /var/cache/dnf -- run: cp /usr/share/openstack-tripleo-common-containers/container-images/kolla/glance-api/extend_start.sh /usr/local/bin/kolla_extend_start -- run: chmod 755 /usr/local/bin/kolla_extend_start -- run: ln -s /usr/share/openstack-tripleo-common/healthcheck/glance-api /openstack/healthcheck && chmod a+rx /openstack/healthcheck -- run: sed -i -r 's,^(Listen 80),#\1,' /etc/httpd/conf/httpd.conf && sed -i -r 's,^(Listen 443),#\1,' /etc/httpd/conf.d/ssl.conf -tcib_packages: - common: - - httpd - - mod_ssl - - openstack-glance - - python3-oslo-vmware - - python3-rados - - python3-rbd - - qemu-img -tcib_user: glance diff --git a/container-images/tcib/base/os/gnocchi-base/gnocchi-api/gnocchi-api.yaml b/container-images/tcib/base/os/gnocchi-base/gnocchi-api/gnocchi-api.yaml deleted file mode 100644 index 00f4874cc..000000000 --- a/container-images/tcib/base/os/gnocchi-base/gnocchi-api/gnocchi-api.yaml +++ /dev/null @@ -1,7 +0,0 @@ -tcib_actions: -- run: dnf -y install {{ tcib_packages.common | join(' ') }} && dnf clean all && rm -rf /var/cache/dnf -- run: sed -i -r 's,^(Listen 80),#\1,' /etc/httpd/conf/httpd.conf && sed -i -r 's,^(Listen 443),#\1,' /etc/httpd/conf.d/ssl.conf -- run: ln -s /usr/share/openstack-tripleo-common/healthcheck/gnocchi-api /openstack/healthcheck && chmod a+rx /openstack/healthcheck -tcib_packages: - common: - - gnocchi-api diff --git a/container-images/tcib/base/os/gnocchi-base/gnocchi-base.yaml b/container-images/tcib/base/os/gnocchi-base/gnocchi-base.yaml deleted file mode 100644 index 1b3fe2a48..000000000 --- a/container-images/tcib/base/os/gnocchi-base/gnocchi-base.yaml +++ /dev/null @@ -1,14 +0,0 @@ -tcib_actions: -- run: bash /usr/local/bin/uid_gid_manage gnocchi -- run: dnf -y install {{ tcib_packages.common | join(' ') }} && dnf clean all && rm -rf /var/cache/dnf -tcib_packages: - common: - - gnocchi-common - - python3-rados - - python3-eventlet - - httpd - - librados2 - - mod_ssl - - python3-boto3 - - python3-ldappool - - python3-mod_wsgi diff --git a/container-images/tcib/base/os/gnocchi-base/gnocchi-metricd/gnocchi-metricd.yaml b/container-images/tcib/base/os/gnocchi-base/gnocchi-metricd/gnocchi-metricd.yaml deleted file mode 100644 index 1773f9eca..000000000 --- a/container-images/tcib/base/os/gnocchi-base/gnocchi-metricd/gnocchi-metricd.yaml +++ /dev/null @@ -1,7 +0,0 @@ -tcib_actions: -- run: dnf -y install {{ tcib_packages.common | join(' ') }} && dnf clean all && rm -rf /var/cache/dnf -- run: ln -s /usr/share/openstack-tripleo-common/healthcheck/gnocchi-metricd /openstack/healthcheck && chmod a+rx /openstack/healthcheck -tcib_packages: - common: - - gnocchi-metricd -tcib_user: gnocchi diff --git a/container-images/tcib/base/os/gnocchi-base/gnocchi-statsd/gnocchi-statsd.yaml b/container-images/tcib/base/os/gnocchi-base/gnocchi-statsd/gnocchi-statsd.yaml deleted file mode 100644 index 1929f8371..000000000 --- a/container-images/tcib/base/os/gnocchi-base/gnocchi-statsd/gnocchi-statsd.yaml +++ /dev/null @@ -1,7 +0,0 @@ -tcib_actions: -- run: dnf -y install {{ tcib_packages.common | join(' ') }} && dnf clean all && rm -rf /var/cache/dnf -- run: ln -s /usr/share/openstack-tripleo-common/healthcheck/gnocchi-statsd /openstack/healthcheck && chmod a+rx /openstack/healthcheck -tcib_packages: - common: - - gnocchi-statsd -tcib_user: gnocchi diff --git a/container-images/tcib/base/os/heat-base/heat-all/heat-all.yaml b/container-images/tcib/base/os/heat-base/heat-all/heat-all.yaml deleted file mode 100644 index b1a3ff460..000000000 --- a/container-images/tcib/base/os/heat-base/heat-all/heat-all.yaml +++ /dev/null @@ -1,8 +0,0 @@ -tcib_actions: -- run: dnf -y install {{ tcib_packages['common'] | join(' ') }} && dnf clean all && rm -rf /var/cache/dnf -tcib_packages: - common: - - openstack-heat-api - - openstack-heat-engine - - openstack-heat-monolith -tcib_user: heat diff --git a/container-images/tcib/base/os/heat-base/heat-api-cfn/heat-api-cfn.yaml b/container-images/tcib/base/os/heat-base/heat-api-cfn/heat-api-cfn.yaml deleted file mode 100644 index dc28702d3..000000000 --- a/container-images/tcib/base/os/heat-base/heat-api-cfn/heat-api-cfn.yaml +++ /dev/null @@ -1,7 +0,0 @@ -tcib_actions: -- run: dnf -y install {{ tcib_packages['common'] | join(' ') }} && dnf clean all && rm -rf /var/cache/dnf -- run: ln -s /usr/share/openstack-tripleo-common/healthcheck/heat-api-cfn /openstack/healthcheck && chmod a+rx /openstack/healthcheck -tcib_packages: - common: - - openstack-heat-api-cfn -tcib_user: heat diff --git a/container-images/tcib/base/os/heat-base/heat-api/heat-api.yaml b/container-images/tcib/base/os/heat-base/heat-api/heat-api.yaml deleted file mode 100644 index ecd9a2c8d..000000000 --- a/container-images/tcib/base/os/heat-base/heat-api/heat-api.yaml +++ /dev/null @@ -1,7 +0,0 @@ -tcib_actions: -- run: dnf -y install {{ tcib_packages['common'] | join(' ') }} && dnf clean all && rm -rf /var/cache/dnf -- run: ln -s /usr/share/openstack-tripleo-common/healthcheck/heat-api /openstack/healthcheck && chmod a+rx /openstack/healthcheck -tcib_packages: - common: - - openstack-heat-api -tcib_user: heat diff --git a/container-images/tcib/base/os/heat-base/heat-base.yaml b/container-images/tcib/base/os/heat-base/heat-base.yaml deleted file mode 100644 index e3599e80f..000000000 --- a/container-images/tcib/base/os/heat-base/heat-base.yaml +++ /dev/null @@ -1,10 +0,0 @@ -tcib_actions: -- run: bash /usr/local/bin/uid_gid_manage heat -- run: dnf -y install {{ tcib_packages['common'] | join(' ') }} && dnf clean all && rm -rf /var/cache/dnf -- run: sed -i -r 's,^(Listen 80),#\1,' /etc/httpd/conf/httpd.conf && sed -i -r 's,^(Listen 443),#\1,' /etc/httpd/conf.d/ssl.conf -tcib_packages: - common: - - httpd - - mod_ssl - - openstack-heat-common - - python3-mod_wsgi diff --git a/container-images/tcib/base/os/heat-base/heat-engine/heat-engine.yaml b/container-images/tcib/base/os/heat-base/heat-engine/heat-engine.yaml deleted file mode 100644 index dad664a3f..000000000 --- a/container-images/tcib/base/os/heat-base/heat-engine/heat-engine.yaml +++ /dev/null @@ -1,7 +0,0 @@ -tcib_actions: -- run: dnf -y install {{ tcib_packages['common'] | join(' ') }} && dnf clean all && rm -rf /var/cache/dnf -- run: ln -s /usr/share/openstack-tripleo-common/healthcheck/heat-engine /openstack/healthcheck && chmod a+rx /openstack/healthcheck -tcib_packages: - common: - - openstack-heat-engine -tcib_user: heat diff --git a/container-images/tcib/base/os/horizon/horizon.yaml b/container-images/tcib/base/os/horizon/horizon.yaml deleted file mode 100644 index 12007029d..000000000 --- a/container-images/tcib/base/os/horizon/horizon.yaml +++ /dev/null @@ -1,20 +0,0 @@ -tcib_actions: -- run: mv /etc/rpm/macros.image-language-conf /tmp && dnf -y install {{ tcib_packages.with_localization | join(' ') }} && mv /tmp/macros.image-language-conf /etc/rpm && dnf -y install {{ tcib_packages.common | join(' ') }} && dnf clean all && rm -rf /var/cache/dnf -- run: cp /usr/share/openstack-tripleo-common-containers/container-images/kolla/horizon/extend_start.sh /usr/local/bin/kolla_extend_start -- run: chmod 755 /usr/local/bin/kolla_extend_start -- run: 'sed -i -r ''s,^(Listen 80),#\1,'' /etc/httpd/conf/httpd.conf && sed -i -r ''s,^(Listen 443),#\1,'' /etc/httpd/conf.d/ssl.conf && ln -s /usr/share/openstack-dashboard/openstack_dashboard /usr/lib/python{{ tcib_python_version | default("3.9" if tcib_release is version("9", "==") else "3.6") }}/site-packages/openstack_dashboard && ln -s /usr/share/openstack-dashboard/static /usr/lib/python{{ tcib_python_version | default("3.9" if tcib_release is version("9", "==") else "3.6") }}/site-packages/static && chown -R apache /etc/openstack-dashboard /usr/share/openstack-dashboard && chown -R apache /usr/share/openstack-dashboard/static && sed -i "s|WEBROOT = ''/dashboard/''|WEBROOT = ''/''|" /etc/openstack-dashboard/local_settings && cp /usr/share/openstack-dashboard/manage.py /usr/bin/manage.py && rm -f /usr/share/openstack-dashboard/openstack_dashboard/local/enabled/?[^_]*.py* && rm -f /usr/lib/python{{ tcib_python_version | default("3.9" if tcib_release is version("9", "==") else "3.6") }}/site-packages/openstack_dashboard/local/enabled/?[^_]*.py* && for locale in /usr/lib/python{{ tcib_python_version | default("3.9" if tcib_release is version("9", "==") else "3.6") }}/site-packages/*/locale; do (cd ${locale%/*} && /usr/bin/django-admin compilemessages) done' -- run: ln -s /usr/share/openstack-tripleo-common/healthcheck/horizon /openstack/healthcheck && chmod a+rx /openstack/healthcheck -tcib_packages: - common: - - gettext - - httpd - - mod_ssl - - python3-mod_wsgi - - python3-PyMySQL - with_localization: - - openstack-dashboard - - openstack-heat-ui - - openstack-ironic-ui - - openstack-manila-ui - - openstack-octavia-ui - - openstack-designate-ui diff --git a/container-images/tcib/base/os/ironic-base/ironic-api/ironic-api.yaml b/container-images/tcib/base/os/ironic-base/ironic-api/ironic-api.yaml deleted file mode 100644 index 4b2f2a2ae..000000000 --- a/container-images/tcib/base/os/ironic-base/ironic-api/ironic-api.yaml +++ /dev/null @@ -1,12 +0,0 @@ -tcib_actions: -- run: dnf -y install {{ tcib_packages['common'] | join(' ') }} && dnf clean all && rm -rf /var/cache/dnf -- run: mkdir -p /var/www/cgi-bin/ironic && cp -a /usr/bin/ironic-api-wsgi /var/www/cgi-bin/ironic/app -- run: sed -i -r 's,^(Listen 80),#\1,' /etc/httpd/conf/httpd.conf && sed -i -r 's,^(Listen 443),#\1,' /etc/httpd/conf.d/ssl.conf -- run: ln -s /usr/share/openstack-tripleo-common/healthcheck/ironic-api /openstack/healthcheck && chmod a+rx /openstack/healthcheck -tcib_packages: - common: - - httpd - - mod_ssl - - openstack-ironic-api - - python3-mod_wsgi -tcib_user: ironic diff --git a/container-images/tcib/base/os/ironic-base/ironic-base.yaml b/container-images/tcib/base/os/ironic-base/ironic-base.yaml deleted file mode 100644 index 3fde28c6a..000000000 --- a/container-images/tcib/base/os/ironic-base/ironic-base.yaml +++ /dev/null @@ -1,6 +0,0 @@ -tcib_actions: -- run: bash /usr/local/bin/uid_gid_manage ironic -- run: dnf -y install {{ tcib_packages['common'] | join(' ') }} && dnf clean all && rm -rf /var/cache/dnf -tcib_packages: - common: - - openstack-ironic-common diff --git a/container-images/tcib/base/os/ironic-base/ironic-conductor/ironic-conductor.yaml b/container-images/tcib/base/os/ironic-base/ironic-conductor/ironic-conductor.yaml deleted file mode 100644 index 9dd333522..000000000 --- a/container-images/tcib/base/os/ironic-base/ironic-conductor/ironic-conductor.yaml +++ /dev/null @@ -1,26 +0,0 @@ -tcib_actions: -- run: dnf -y install {{ tcib_packages['common'] | join(' ') }} && dnf clean all && rm -rf /var/cache/dnf -- run: ln -s /usr/share/openstack-tripleo-common/healthcheck/ironic-conductor /openstack/healthcheck && chmod a+rx /openstack/healthcheck -tcib_packages: - common: - - xorriso - - dosfstools - - e2fsprogs - - gdisk - - ipmitool - - openssh-clients - - openstack-ironic-conductor - - openstack-ironic-staging-drivers - - parted - - psmisc - - python3-dracclient - - python3-ironic-inspector-client - - python3-proliantutils - - python3-pysnmp - - python3-scciclient - - python3-sushy - - python3-systemd - - qemu-img - - util-linux - - xfsprogs -tcib_user: ironic diff --git a/container-images/tcib/base/os/ironic-base/ironic-inspector/ironic-inspector.yaml b/container-images/tcib/base/os/ironic-base/ironic-inspector/ironic-inspector.yaml deleted file mode 100644 index 6425f1298..000000000 --- a/container-images/tcib/base/os/ironic-base/ironic-inspector/ironic-inspector.yaml +++ /dev/null @@ -1,9 +0,0 @@ -tcib_actions: -- run: bash /usr/local/bin/uid_gid_manage {{ tcib_user }} -- run: dnf -y install {{ tcib_packages['common'] | join(' ') }} && dnf clean all && rm -rf /var/cache/dnf -- run: ln -s /usr/share/openstack-tripleo-common/healthcheck/ironic-inspector /openstack/healthcheck && chmod a+rx /openstack/healthcheck -tcib_packages: - common: - - openstack-ironic-inspector - - openstack-ironic-inspector-dnsmasq -tcib_user: ironic-inspector diff --git a/container-images/tcib/base/os/ironic-base/ironic-pxe/ironic-pxe.yaml b/container-images/tcib/base/os/ironic-base/ironic-pxe/ironic-pxe.yaml deleted file mode 100644 index 4898da5e5..000000000 --- a/container-images/tcib/base/os/ironic-base/ironic-pxe/ironic-pxe.yaml +++ /dev/null @@ -1,14 +0,0 @@ -tcib_actions: -- run: dnf -y install {{ tcib_packages['common'] | join(' ') }} && dnf clean all && rm -rf /var/cache/dnf -- run: sed -i -r 's,^(Listen 80),#\1,' /etc/httpd/conf/httpd.conf && sed -i -r 's,^(Listen 443),#\1,' /etc/httpd/conf.d/ssl.conf -- run: echo ". /usr/local/bin/kolla_httpd_setup"> /usr/local/bin/kolla_extend_start && chmod 755 /usr/local/bin/kolla_extend_start -- run: ln -s /usr/share/openstack-tripleo-common/healthcheck/ironic-pxe /openstack/healthcheck && chmod a+rx /openstack/healthcheck -tcib_packages: - common: - - httpd - - ipxe-bootimgs - - mod_ssl - - python3-mod_wsgi - - dnsmasq - - grub2-efi-x64 - - shim diff --git a/container-images/tcib/base/os/iscsid/iscsid.yaml b/container-images/tcib/base/os/iscsid/iscsid.yaml deleted file mode 100644 index 2bc5d60eb..000000000 --- a/container-images/tcib/base/os/iscsid/iscsid.yaml +++ /dev/null @@ -1,11 +0,0 @@ -tcib_actions: -- run: dnf -y install {{ tcib_packages['common'] | join(' ') }} && dnf clean all && rm -rf /var/cache/dnf -- run: cp /usr/share/openstack-tripleo-common-containers/container-images/kolla/iscsid/extend_start.sh /usr/local/bin/kolla_extend_start -- run: chmod 755 /usr/local/bin/kolla_extend_start -- run: rm -f /etc/iscsi/initiatorname.iscsi -- run: ln -s /usr/share/openstack-tripleo-common/healthcheck/iscsid /openstack/healthcheck && chmod a+rx /openstack/healthcheck -tcib_packages: - common: - - iscsi-initiator-utils - - python3-rtslib - - targetcli diff --git a/container-images/tcib/base/os/keystone/keystone.yaml b/container-images/tcib/base/os/keystone/keystone.yaml deleted file mode 100644 index 6d460ca5e..000000000 --- a/container-images/tcib/base/os/keystone/keystone.yaml +++ /dev/null @@ -1,25 +0,0 @@ -tcib_actions: -- run: bash /usr/local/bin/uid_gid_manage keystone -- run: >- - if [ '{{ tcib_release }}' == '8' ];then - dnf module -y enable mod_auth_openidc; fi -- run: dnf -y install {{ tcib_packages['common'] | join(' ') }} && dnf clean all && rm -rf /var/cache/dnf -- run: mkdir -p /var/www/cgi-bin/keystone && chown -R keystone /var/www/cgi-bin/keystone -- run: cp /usr/share/openstack-tripleo-common-containers/container-images/kolla/keystone/extend_start.sh /usr/local/bin/kolla_extend_start -- run: chmod 755 /usr/local/bin/kolla_extend_start -- run: cp -a /usr/bin/keystone-wsgi-public /var/www/cgi-bin/keystone/main -- run: cp -a /usr/bin/keystone-wsgi-admin /var/www/cgi-bin/keystone/admin -- run: sed -i -r 's,^(Listen 80),#\1,' /etc/httpd/conf/httpd.conf -- run: sed -i -r 's,^(Listen 443),#\1,' /etc/httpd/conf.d/ssl.conf -- run: ln -s /usr/share/openstack-tripleo-common/healthcheck/keystone /openstack/healthcheck && chmod a+rx /openstack/healthcheck -tcib_packages: - common: - - httpd - - mod_auth_gssapi - - mod_auth_mellon - - mod_auth_openidc - - mod_ssl - - openstack-keystone - - python3-ldappool - - python3-mod_wsgi - - python3-requests-kerberos diff --git a/container-images/tcib/base/os/manila-base/manila-api/manila-api.yaml b/container-images/tcib/base/os/manila-base/manila-api/manila-api.yaml deleted file mode 100644 index f940db655..000000000 --- a/container-images/tcib/base/os/manila-base/manila-api/manila-api.yaml +++ /dev/null @@ -1,11 +0,0 @@ -tcib_actions: -- run: dnf -y install {{ tcib_packages.common | join(' ') }} && dnf clean all && rm -rf /var/cache/dnf -- run: mkdir -p /var/www/cgi-bin/manila && cp -a /usr/bin/manila-wsgi /var/www/cgi-bin/manila/manila-wsgi && sed -i -r 's,^(Listen 80),#\1,' /etc/httpd/conf/httpd.conf && sed -i -r 's,^(Listen 443),#\1,' /etc/httpd/conf.d/ssl.conf -- run: chown -R manila /var/www/cgi-bin/manila && chmod 755 /var/www/cgi-bin/manila/manila-wsgi -- run: ln -s /usr/share/openstack-tripleo-common/healthcheck/manila-api /openstack/healthcheck && chmod a+rx /openstack/healthcheck -tcib_packages: - common: - - httpd - - mod_ssl - - python3-mod_wsgi -tcib_user: manila diff --git a/container-images/tcib/base/os/manila-base/manila-base.yaml b/container-images/tcib/base/os/manila-base/manila-base.yaml deleted file mode 100644 index 8f69e14a9..000000000 --- a/container-images/tcib/base/os/manila-base/manila-base.yaml +++ /dev/null @@ -1,6 +0,0 @@ -tcib_actions: -- run: bash /usr/local/bin/uid_gid_manage manila -- run: dnf -y install {{ tcib_packages.common | join(' ') }} && dnf clean all && rm -rf /var/cache/dnf -tcib_packages: - common: - - openstack-manila diff --git a/container-images/tcib/base/os/manila-base/manila-scheduler/manila-scheduler.yaml b/container-images/tcib/base/os/manila-base/manila-scheduler/manila-scheduler.yaml deleted file mode 100644 index b89dc7f01..000000000 --- a/container-images/tcib/base/os/manila-base/manila-scheduler/manila-scheduler.yaml +++ /dev/null @@ -1,3 +0,0 @@ -tcib_actions: -- run: ln -s /usr/share/openstack-tripleo-common/healthcheck/manila-scheduler /openstack/healthcheck && chmod a+rx /openstack/healthcheck -tcib_user: manila diff --git a/container-images/tcib/base/os/manila-base/manila-share/manila-share.yaml b/container-images/tcib/base/os/manila-base/manila-share/manila-share.yaml deleted file mode 100644 index e3e995cbd..000000000 --- a/container-images/tcib/base/os/manila-base/manila-share/manila-share.yaml +++ /dev/null @@ -1,16 +0,0 @@ -tcib_actions: -- run: dnf -y install {{ tcib_packages.common | join(' ') }} && dnf clean all && rm -rf /var/cache/dnf -- run: mkdir -p /etc/libqb -- run: ln -s /usr/share/openstack-tripleo-common/healthcheck/manila-api /openstack/healthcheck && chmod a+rx /openstack/healthcheck -tcib_packages: - common: - - ceph-common - - dbus-tools - - libqb - - openstack-manila-share - - pacemaker - - pacemaker-remote - - pcs - - resource-agents - - sqlite -tcib_user: manila diff --git a/container-images/tcib/base/os/neutron-base/ironic-neutron-agent/ironic-neutron-agent.yaml b/container-images/tcib/base/os/neutron-base/ironic-neutron-agent/ironic-neutron-agent.yaml deleted file mode 100644 index 23ee30abf..000000000 --- a/container-images/tcib/base/os/neutron-base/ironic-neutron-agent/ironic-neutron-agent.yaml +++ /dev/null @@ -1,8 +0,0 @@ -tcib_actions: -- run: dnf -y install {{ tcib_packages['common'] | join(' ') }} && dnf clean all && rm -rf /var/cache/dnf -- run: ln -s /usr/share/openstack-tripleo-common/healthcheck/ironic-neutron-agent /openstack/healthcheck && chmod a+rx /openstack/healthcheck -tcib_packages: - common: - - python3-ironic-neutron-agent - - python3-networking-baremetal -tcib_user: neutron diff --git a/container-images/tcib/base/os/neutron-base/neutron-agent-base/neutron-agent-base.yaml b/container-images/tcib/base/os/neutron-base/neutron-agent-base/neutron-agent-base.yaml deleted file mode 100644 index c562a7f40..000000000 --- a/container-images/tcib/base/os/neutron-base/neutron-agent-base/neutron-agent-base.yaml +++ /dev/null @@ -1,6 +0,0 @@ -tcib_actions: -- run: dnf -y install {{ tcib_packages['common'] | join(' ') }} && dnf clean all && rm -rf /var/cache/dnf -tcib_packages: - common: - - libseccomp - - podman diff --git a/container-images/tcib/base/os/neutron-base/neutron-agent-base/neutron-dhcp-agent/neutron-dhcp-agent.yaml b/container-images/tcib/base/os/neutron-base/neutron-agent-base/neutron-dhcp-agent/neutron-dhcp-agent.yaml deleted file mode 100644 index e3113e273..000000000 --- a/container-images/tcib/base/os/neutron-base/neutron-agent-base/neutron-dhcp-agent/neutron-dhcp-agent.yaml +++ /dev/null @@ -1,3 +0,0 @@ -tcib_actions: -- run: ln -s /usr/share/openstack-tripleo-common/healthcheck/neutron-dhcp /openstack/healthcheck && chmod a+rx /openstack/healthcheck -tcib_user: neutron diff --git a/container-images/tcib/base/os/neutron-base/neutron-agent-base/neutron-l3-agent/neutron-l3-agent.yaml b/container-images/tcib/base/os/neutron-base/neutron-agent-base/neutron-l3-agent/neutron-l3-agent.yaml deleted file mode 100644 index c7cef64de..000000000 --- a/container-images/tcib/base/os/neutron-base/neutron-agent-base/neutron-l3-agent/neutron-l3-agent.yaml +++ /dev/null @@ -1,3 +0,0 @@ -tcib_actions: -- run: ln -s /usr/share/openstack-tripleo-common/healthcheck/neutron-l3 /openstack/healthcheck && chmod a+rx /openstack/healthcheck -tcib_user: neutron diff --git a/container-images/tcib/base/os/neutron-base/neutron-agent-base/neutron-metadata-agent-ovn/neutron-metadata-agent-ovn.yaml b/container-images/tcib/base/os/neutron-base/neutron-agent-base/neutron-metadata-agent-ovn/neutron-metadata-agent-ovn.yaml deleted file mode 100644 index 8f63b2085..000000000 --- a/container-images/tcib/base/os/neutron-base/neutron-agent-base/neutron-metadata-agent-ovn/neutron-metadata-agent-ovn.yaml +++ /dev/null @@ -1,7 +0,0 @@ -tcib_actions: -- run: dnf -y install {{ tcib_packages['common'] | join(' ') }} && dnf clean all && rm -rf /var/cache/dnf -- run: ln -s /usr/share/openstack-tripleo-common/healthcheck/ovn-metadata /openstack/healthcheck && chmod a+rx /openstack/healthcheck -tcib_packages: - common: - - python3-networking-ovn-metadata-agent -tcib_user: neutron diff --git a/container-images/tcib/base/os/neutron-base/neutron-base.yaml b/container-images/tcib/base/os/neutron-base/neutron-base.yaml deleted file mode 100644 index 4c5bb52c4..000000000 --- a/container-images/tcib/base/os/neutron-base/neutron-base.yaml +++ /dev/null @@ -1,16 +0,0 @@ -tcib_actions: -- run: bash /usr/local/bin/uid_gid_manage neutron -- run: dnf -y install {{ tcib_packages['common'] | join(' ') }} && dnf clean all && rm -rf /var/cache/dnf -- run: cp /usr/share/openstack-tripleo-common-containers/container-images/kolla/neutron-base/neutron_sudoers /etc/sudoers.d/neutron_sudoers -- run: chmod 440 /etc/sudoers.d/neutron_sudoers -# TODO(emilien) add support for tripleo-common being installed from source -tcib_packages: - common: - - iputils - - net-tools - - openstack-neutron - - openstack-neutron-rpc-server - - openstack-neutron-ml2 - - openvswitch - - python3-networking-baremetal - - python3-openvswitch diff --git a/container-images/tcib/base/os/neutron-base/neutron-metadata-agent/neutron-metadata-agent.yaml b/container-images/tcib/base/os/neutron-base/neutron-metadata-agent/neutron-metadata-agent.yaml deleted file mode 100644 index 5c0c4412c..000000000 --- a/container-images/tcib/base/os/neutron-base/neutron-metadata-agent/neutron-metadata-agent.yaml +++ /dev/null @@ -1,3 +0,0 @@ -tcib_actions: -- run: ln -s /usr/share/openstack-tripleo-common/healthcheck/neutron-metadata /openstack/healthcheck && chmod a+rx /openstack/healthcheck -tcib_user: neutron diff --git a/container-images/tcib/base/os/neutron-base/neutron-mlnx-agent/neutron-mlnx-agent.yaml b/container-images/tcib/base/os/neutron-base/neutron-mlnx-agent/neutron-mlnx-agent.yaml deleted file mode 100644 index 8aaddaaed..000000000 --- a/container-images/tcib/base/os/neutron-base/neutron-mlnx-agent/neutron-mlnx-agent.yaml +++ /dev/null @@ -1,8 +0,0 @@ -tcib_actions: -- run: dnf -y install {{ tcib_packages.common | join(' ') }} && dnf clean all && rm -rf /var/cache/dnf -tcib_packages: - common: - - python3-networking-mlnx - - python3-libvirt - - python3-ethtool -tcib_user: neutron diff --git a/container-images/tcib/base/os/neutron-base/neutron-openvswitch-agent/neutron-openvswitch-agent.yaml b/container-images/tcib/base/os/neutron-base/neutron-openvswitch-agent/neutron-openvswitch-agent.yaml deleted file mode 100644 index b22b3156c..000000000 --- a/container-images/tcib/base/os/neutron-base/neutron-openvswitch-agent/neutron-openvswitch-agent.yaml +++ /dev/null @@ -1,7 +0,0 @@ -tcib_actions: -- run: dnf -y install {{ tcib_packages['common'] | join(' ') }} && dnf clean all && rm -rf /var/cache/dnf -- run: ln -s /usr/share/openstack-tripleo-common/healthcheck/neutron-ovs-agent /openstack/healthcheck && chmod a+rx /openstack/healthcheck -tcib_packages: - common: - - openstack-neutron-openvswitch -tcib_user: neutron diff --git a/container-images/tcib/base/os/neutron-base/neutron-server-ovn/neutron-server-ovn.yaml b/container-images/tcib/base/os/neutron-base/neutron-server-ovn/neutron-server-ovn.yaml deleted file mode 100644 index 0d408591e..000000000 --- a/container-images/tcib/base/os/neutron-base/neutron-server-ovn/neutron-server-ovn.yaml +++ /dev/null @@ -1,11 +0,0 @@ -tcib_actions: -- run: dnf -y install {{ tcib_packages['common'] | join(' ') }} && dnf clean all && rm -rf /var/cache/dnf -- run: sed -i -r 's,^(Listen 80),#\1,' /etc/httpd/conf/httpd.conf && sed -i -r 's,^(Listen 443),#\1,' /etc/httpd/conf.d/ssl.conf -- run: ln -s /usr/share/openstack-tripleo-common/healthcheck/neutron-api /openstack/healthcheck && chmod a+rx /openstack/healthcheck -tcib_packages: - common: - - httpd - - mod_ssl - - python3-networking-baremetal - - python3-networking-ovn -tcib_user: neutron diff --git a/container-images/tcib/base/os/neutron-base/neutron-server/neutron-server.yaml b/container-images/tcib/base/os/neutron-base/neutron-server/neutron-server.yaml deleted file mode 100644 index 5630942b3..000000000 --- a/container-images/tcib/base/os/neutron-base/neutron-server/neutron-server.yaml +++ /dev/null @@ -1,12 +0,0 @@ -tcib_actions: -- run: dnf -y install {{ tcib_packages['common'] | join(' ') }} && dnf clean all && rm -rf /var/cache/dnf -- run: sed -i -r 's,^(Listen 80),#\1,' /etc/httpd/conf/httpd.conf && sed -i -r 's,^(Listen 443),#\1,' /etc/httpd/conf.d/ssl.conf -- run: ln -s /usr/share/openstack-tripleo-common/healthcheck/neutron-api /openstack/healthcheck && chmod a+rx /openstack/healthcheck -tcib_packages: - common: - - httpd - - mod_ssl - - python3-networking-baremetal - - python3-networking-bgpvpn - - python3-mod_wsgi -tcib_user: neutron diff --git a/container-images/tcib/base/os/neutron-base/neutron-sriov-agent/neutron-sriov-agent.yaml b/container-images/tcib/base/os/neutron-base/neutron-sriov-agent/neutron-sriov-agent.yaml deleted file mode 100644 index 10d743e2e..000000000 --- a/container-images/tcib/base/os/neutron-base/neutron-sriov-agent/neutron-sriov-agent.yaml +++ /dev/null @@ -1,7 +0,0 @@ -tcib_actions: -- run: dnf -y install {{ tcib_packages.common | join(' ') }} && dnf clean all && rm -rf /var/cache/dnf -- run: ln -s /usr/share/openstack-tripleo-common/healthcheck/neutron-sriov-agent /openstack/healthcheck && chmod a+rx /openstack/healthcheck -tcib_packages: - common: - - openstack-neutron-sriov-nic-agent -tcib_user: neutron diff --git a/container-images/tcib/base/os/nova-base/nova-api/nova-api.yaml b/container-images/tcib/base/os/nova-base/nova-api/nova-api.yaml deleted file mode 100644 index f9a3501b9..000000000 --- a/container-images/tcib/base/os/nova-base/nova-api/nova-api.yaml +++ /dev/null @@ -1,11 +0,0 @@ -tcib_actions: -- run: dnf -y install {{ tcib_packages['common'] | join(' ') }} && dnf clean all && rm -rf /var/cache/dnf -- run: sed -i -r 's,^(Listen 80),#\1,' /etc/httpd/conf/httpd.conf && sed -i -r 's,^(Listen 443),#\1,' /etc/httpd/conf.d/ssl.conf -- run: ln -s /usr/share/openstack-tripleo-common/healthcheck/nova-api /openstack/healthcheck && chmod a+rx /openstack/healthcheck -tcib_packages: - common: - - httpd - - mod_ssl - - openstack-nova-api - - python3-mod_wsgi -tcib_user: nova diff --git a/container-images/tcib/base/os/nova-base/nova-base.yaml b/container-images/tcib/base/os/nova-base/nova-base.yaml deleted file mode 100644 index 1e4971b77..000000000 --- a/container-images/tcib/base/os/nova-base/nova-base.yaml +++ /dev/null @@ -1,6 +0,0 @@ -tcib_actions: -- run: bash /usr/local/bin/uid_gid_manage nova -- run: mkdir -p /etc/ssh && touch /etc/ssh/ssh_known_host -- run: dnf install -y {{ tcib_packages | join(' ') }} && dnf clean all && rm -rf /var/cache/dnf -tcib_packages: -- openstack-nova-common diff --git a/container-images/tcib/base/os/nova-base/nova-compute-ironic/nova-compute-ironic.yaml b/container-images/tcib/base/os/nova-base/nova-compute-ironic/nova-compute-ironic.yaml deleted file mode 100644 index 577e621f5..000000000 --- a/container-images/tcib/base/os/nova-base/nova-compute-ironic/nova-compute-ironic.yaml +++ /dev/null @@ -1,11 +0,0 @@ -tcib_actions: -- run: dnf -y install {{ tcib_packages['common'] | join(' ') }} && dnf clean all && rm -rf /var/cache/dnf -# this need to happen after installing nova-compute because the distgit does usermod to add libvirt/qemu groups -- run: bash /usr/local/bin/uid_gid_manage nova -- run: ln -s /usr/share/openstack-tripleo-common/healthcheck/nova-ironic /openstack/healthcheck && chmod a+rx /openstack/healthcheck -tcib_packages: - common: - - xorriso - - nvme-cli - - openstack-nova-compute -tcib_user: nova diff --git a/container-images/tcib/base/os/nova-base/nova-compute/nova-compute.yaml b/container-images/tcib/base/os/nova-base/nova-compute/nova-compute.yaml deleted file mode 100644 index a83abfb81..000000000 --- a/container-images/tcib/base/os/nova-base/nova-compute/nova-compute.yaml +++ /dev/null @@ -1,38 +0,0 @@ -tcib_actions: -- run: if [ "$(uname -m)" == "x86_64" ]; then dnf -y install {{ tcib_packages['x86_64'] | join(' ') }}; fi -- run: if [ '{{ tcib_release }}' == '8' ]; then dnf -y install {{ tcib_packages['el8'] | join(' ') }}; fi -- run: dnf -y install {{ tcib_packages['common'] | join(' ') }} && dnf clean all && rm -rf /var/cache/dnf -# this need to happen after installing nova-compute because the distgit does usermod to add libvirt/qemu groups -- run: bash /usr/local/bin/uid_gid_manage nova -- run: rm -f /etc/machine-id -- run: ln -s /usr/share/openstack-tripleo-common/healthcheck/nova-compute /openstack/healthcheck && chmod a+rx /openstack/healthcheck -- run: if [ -f /usr/share/qemu/firmware/50-edk2-ovmf-cc.json ] && [ -f /usr/share/qemu/firmware/50-edk2-ovmf-amdsev.json ]; then jq ".mapping[\"nvram-template\"] = $(jq ".mapping[\"nvram-template\"]" /usr/share/qemu/firmware/50-edk2-ovmf-cc.json)" /usr/share/qemu/firmware/50-edk2-ovmf-amdsev.json > /tmp/50-edk2-ovmf-amdsev_.json && mv -f /tmp/50-edk2-ovmf-amdsev_.json /usr/share/qemu/firmware/50-edk2-ovmf-amdsev.json; fi -tcib_packages: - common: - - ceph-common - - device-mapper-multipath - - e2fsprogs - - jq - - xorriso - - iscsi-initiator-utils - - ndctl - - nfs-utils - - nvme-cli - - openssh-server - - openstack-nova-compute - - openstack-nova-migration - - openvswitch - - parted - - python3-libguestfs - - python3-oslo-vmware - - python3-rtslib - - swtpm - - swtpm-tools - - targetcli - - xfsprogs - x86_64: - - daxio - el8: - - sysfsutils - - trousers -tcib_user: nova diff --git a/container-images/tcib/base/os/nova-base/nova-conductor/nova-conductor.yaml b/container-images/tcib/base/os/nova-base/nova-conductor/nova-conductor.yaml deleted file mode 100644 index 5c17cd388..000000000 --- a/container-images/tcib/base/os/nova-base/nova-conductor/nova-conductor.yaml +++ /dev/null @@ -1,7 +0,0 @@ -tcib_actions: -- run: dnf -y install {{ tcib_packages['common'] | join(' ') }} && dnf clean all && rm -rf /var/cache/dnf -- run: ln -s /usr/share/openstack-tripleo-common/healthcheck/nova-conductor /openstack/healthcheck && chmod a+rx /openstack/healthcheck -tcib_packages: - common: - - openstack-nova-conductor -tcib_user: nova diff --git a/container-images/tcib/base/os/nova-base/nova-libvirt/nova-libvirt.yaml b/container-images/tcib/base/os/nova-base/nova-libvirt/nova-libvirt.yaml deleted file mode 100644 index de7eb188c..000000000 --- a/container-images/tcib/base/os/nova-base/nova-libvirt/nova-libvirt.yaml +++ /dev/null @@ -1,29 +0,0 @@ -tcib_actions: -- run: if [ "$(uname -m)" == "x86_64" ]; then dnf -y install {{ tcib_packages['x86_64'] | join(' ') }}; fi -- run: if [ '{{ tcib_release }}' == '8' ]; then dnf -y install {{ tcib_packages['el8'] | join(' ') }}; fi -- run: dnf -y install {{ tcib_packages['common'] | join(' ') }} && dnf clean all && rm -rf /var/cache/dnf -- run: ln -s /usr/share/openstack-tripleo-common/healthcheck/nova-libvirt /openstack/healthcheck && chmod a+rx /openstack/healthcheck -- run: if [ -f /usr/share/qemu/firmware/50-edk2-ovmf-cc.json ] && [ -f /usr/share/qemu/firmware/50-edk2-ovmf-amdsev.json ]; then jq ".mapping[\"nvram-template\"] = $(jq ".mapping[\"nvram-template\"]" /usr/share/qemu/firmware/50-edk2-ovmf-cc.json)" /usr/share/qemu/firmware/50-edk2-ovmf-amdsev.json > /tmp/50-edk2-ovmf-amdsev_.json && mv -f /tmp/50-edk2-ovmf-amdsev_.json /usr/share/qemu/firmware/50-edk2-ovmf-amdsev.json; fi -tcib_packages: - common: - - ceph-common - - cyrus-sasl-scram - - jq - - libguestfs - - libseccomp - - libvirt-admin - - libvirt-client - - libvirt-daemon - - libvirt-daemon-config-nwfilter - - libvirt-daemon-driver-nwfilter - - openssl-perl - - openstack-nova-migration - - openvswitch - - podman - - qemu-kvm - - swtpm - - swtpm-tools - x86_64: - - edk2-ovmf - el8: - - trousers diff --git a/container-images/tcib/base/os/nova-base/nova-novncproxy/nova-novncproxy.yaml b/container-images/tcib/base/os/nova-base/nova-novncproxy/nova-novncproxy.yaml deleted file mode 100644 index 465a8a3de..000000000 --- a/container-images/tcib/base/os/nova-base/nova-novncproxy/nova-novncproxy.yaml +++ /dev/null @@ -1,8 +0,0 @@ -tcib_actions: -- run: dnf -y install {{ tcib_packages['common'] | join(' ') }} && dnf clean all && rm -rf /var/cache/dnf -- run: ln -s /usr/share/openstack-tripleo-common/healthcheck/nova-vnc-proxy /openstack/healthcheck && chmod a+rx /openstack/healthcheck -tcib_packages: - common: - - novnc - - openstack-nova-novncproxy -tcib_user: nova diff --git a/container-images/tcib/base/os/nova-base/nova-scheduler/nova-scheduler.yaml b/container-images/tcib/base/os/nova-base/nova-scheduler/nova-scheduler.yaml deleted file mode 100644 index 0775222d9..000000000 --- a/container-images/tcib/base/os/nova-base/nova-scheduler/nova-scheduler.yaml +++ /dev/null @@ -1,7 +0,0 @@ -tcib_actions: -- run: dnf -y install {{ tcib_packages['common'] | join(' ') }} && dnf clean all && rm -rf /var/cache/dnf -- run: ln -s /usr/share/openstack-tripleo-common/healthcheck/nova-scheduler /openstack/healthcheck && chmod a+rx /openstack/healthcheck -tcib_packages: - common: - - openstack-nova-scheduler -tcib_user: nova diff --git a/container-images/tcib/base/os/octavia-base/octavia-api/octavia-api.yaml b/container-images/tcib/base/os/octavia-base/octavia-api/octavia-api.yaml deleted file mode 100644 index ceef338fa..000000000 --- a/container-images/tcib/base/os/octavia-base/octavia-api/octavia-api.yaml +++ /dev/null @@ -1,12 +0,0 @@ -tcib_actions: -- run: dnf -y install {{ tcib_packages.common | join(' ') }} && dnf clean all && rm -rf /var/cache/dnf -- run: sed -i -r 's,^(Listen 80),#\1,' /etc/httpd/conf/httpd.conf && sed -i -r 's,^(Listen 443),#\1,' /etc/httpd/conf.d/ssl.conf -- run: ln -s /usr/share/openstack-tripleo-common/healthcheck/octavia-api /openstack/healthcheck && chmod a+rx /openstack/healthcheck -tcib_packages: - common: - - httpd - - mod_ssl - - openstack-octavia-api - - python3-mod_wsgi - - python3-ovn-octavia-provider -tcib_user: octavia diff --git a/container-images/tcib/base/os/octavia-base/octavia-base.yaml b/container-images/tcib/base/os/octavia-base/octavia-base.yaml deleted file mode 100644 index 4dcbbb01e..000000000 --- a/container-images/tcib/base/os/octavia-base/octavia-base.yaml +++ /dev/null @@ -1,6 +0,0 @@ -tcib_actions: -- run: bash /usr/local/bin/uid_gid_manage octavia -- run: dnf -y install {{ tcib_packages.common | join(' ') }} && dnf clean all && rm -rf /var/cache/dnf -tcib_packages: - common: - - openstack-octavia-common diff --git a/container-images/tcib/base/os/octavia-base/octavia-health-manager/octavia-health-manager.yaml b/container-images/tcib/base/os/octavia-base/octavia-health-manager/octavia-health-manager.yaml deleted file mode 100644 index f7e48296a..000000000 --- a/container-images/tcib/base/os/octavia-base/octavia-health-manager/octavia-health-manager.yaml +++ /dev/null @@ -1,7 +0,0 @@ -tcib_actions: -- run: dnf -y install {{ tcib_packages.common | join(' ') }} && dnf clean all && rm -rf /var/cache/dnf -- run: ln -s /usr/share/openstack-tripleo-common/healthcheck/octavia-health-manager /openstack/healthcheck && chmod a+rx /openstack/healthcheck -tcib_packages: - common: - - openstack-octavia-health-manager -tcib_user: octavia diff --git a/container-images/tcib/base/os/octavia-base/octavia-housekeeping/octavia-housekeeping.yaml b/container-images/tcib/base/os/octavia-base/octavia-housekeeping/octavia-housekeeping.yaml deleted file mode 100644 index fa06e1cd4..000000000 --- a/container-images/tcib/base/os/octavia-base/octavia-housekeeping/octavia-housekeeping.yaml +++ /dev/null @@ -1,7 +0,0 @@ -tcib_actions: -- run: dnf -y install {{ tcib_packages.common | join(' ') }} && dnf clean all && rm -rf /var/cache/dnf -- run: ln -s /usr/share/openstack-tripleo-common/healthcheck/octavia-housekeeping /openstack/healthcheck && chmod a+rx /openstack/healthcheck -tcib_packages: - common: - - openstack-octavia-housekeeping -tcib_user: octavia diff --git a/container-images/tcib/base/os/octavia-base/octavia-worker/octavia-worker.yaml b/container-images/tcib/base/os/octavia-base/octavia-worker/octavia-worker.yaml deleted file mode 100644 index b579ac6e9..000000000 --- a/container-images/tcib/base/os/octavia-base/octavia-worker/octavia-worker.yaml +++ /dev/null @@ -1,7 +0,0 @@ -tcib_actions: -- run: dnf -y install {{ tcib_packages.common | join(' ') }} && dnf clean all && rm -rf /var/cache/dnf -- run: ln -s /usr/share/openstack-tripleo-common/healthcheck/octavia-worker /openstack/healthcheck && chmod a+rx /openstack/healthcheck -tcib_packages: - common: - - openstack-octavia-worker -tcib_user: octavia diff --git a/container-images/tcib/base/os/os.yaml b/container-images/tcib/base/os/os.yaml deleted file mode 100644 index cba4cf547..000000000 --- a/container-images/tcib/base/os/os.yaml +++ /dev/null @@ -1,20 +0,0 @@ -tcib_actions: -- run: dnf install -y {{ tcib_packages['common'] | join(' ') }} && dnf clean all && rm -rf /var/cache/dnf -tcib_packages: - common: - - cronie - - iscsi-initiator-utils - - python3-barbicanclient - - python3-cinderclient - - python3-designateclient - - python3-glanceclient - - python3-gnocchiclient - - python3-ironicclient - - python3-keystoneclient - - python3-manilaclient - - python3-neutronclient - - python3-novaclient - - python3-octaviaclient - - python3-openstackclient - - python3-swiftclient - - python3-pymemcache diff --git a/container-images/tcib/base/os/placement-api/placement-api.yaml b/container-images/tcib/base/os/placement-api/placement-api.yaml deleted file mode 100644 index eeb91a454..000000000 --- a/container-images/tcib/base/os/placement-api/placement-api.yaml +++ /dev/null @@ -1,11 +0,0 @@ -tcib_actions: -- run: dnf -y install {{ tcib_packages['common'] | join(' ') }} && dnf clean all && rm -rf /var/cache/dnf -- run: sed -i -r 's,^(Listen 80),#\1,' /etc/httpd/conf/httpd.conf && sed -i -r 's,^(Listen 443),#\1,' /etc/httpd/conf.d/ssl.conf -- run: ln -s /usr/share/openstack-tripleo-common/healthcheck/placement-api /openstack/healthcheck && chmod a+rx /openstack/healthcheck -tcib_packages: - common: - - httpd - - mod_ssl - - openstack-placement-api - - openstack-placement-common - - python3-mod_wsgi diff --git a/container-images/tcib/base/os/swift-base/swift-account/swift-account.yaml b/container-images/tcib/base/os/swift-base/swift-account/swift-account.yaml deleted file mode 100644 index 265f88a93..000000000 --- a/container-images/tcib/base/os/swift-base/swift-account/swift-account.yaml +++ /dev/null @@ -1,7 +0,0 @@ -tcib_actions: -- run: dnf -y install {{ tcib_packages['common'] | join(' ') }} && dnf clean all && rm -rf /var/cache/dnf -- run: ln -s /usr/share/openstack-tripleo-common/healthcheck/swift-account-server /openstack/healthcheck && chmod a+rx /openstack/healthcheck -tcib_packages: - common: - - openstack-swift-account -tcib_user: swift diff --git a/container-images/tcib/base/os/swift-base/swift-base.yaml b/container-images/tcib/base/os/swift-base/swift-base.yaml deleted file mode 100644 index beaffcdc5..000000000 --- a/container-images/tcib/base/os/swift-base/swift-base.yaml +++ /dev/null @@ -1,12 +0,0 @@ -tcib_actions: -- run: bash /usr/local/bin/uid_gid_manage swift -- run: dnf -y install {{ tcib_packages['common'] | join(' ') }} && dnf clean all && rm -rf /var/cache/dnf -- run: cp /usr/share/openstack-tripleo-common-containers/container-images/kolla/swift-base/swift-rootwrap /usr/bin/swift-rootwrap -- run: cp /usr/share/openstack-tripleo-common-containers/container-images/kolla/swift-base/swift-sudoers /etc/sudoers.d/swift-sudoers -- run: chmod 755 /usr/bin/swift-rootwrap && chmod 440 /etc/sudoers.d/swift-sudoers -- run: touch /etc/swift/rootwrap.conf && chmod 644 /etc/swift/rootwrap.conf && crudini --set /etc/swift/rootwrap.conf DEFAULT filters_path /etc/swift/rootwrap.d,/usr/share/swift/rootwrap && crudini --set /etc/swift/rootwrap.conf DEFAULT exec_dirs /sbin,/usr/sbin,/bin,/usr/bin && crudini --set /etc/swift/rootwrap.conf DEFAULT use_syslog False && crudini --set /etc/swift/rootwrap.conf DEFAULT syslog_log_facility syslog && crudini --set /etc/swift/rootwrap.conf DEFAULT syslog_log_level ERROR -tcib_packages: - common: - - nmap-ncat - - openstack-swift - - rsync-daemon diff --git a/container-images/tcib/base/os/swift-base/swift-container/swift-container.yaml b/container-images/tcib/base/os/swift-base/swift-container/swift-container.yaml deleted file mode 100644 index bbbf41ad5..000000000 --- a/container-images/tcib/base/os/swift-base/swift-container/swift-container.yaml +++ /dev/null @@ -1,7 +0,0 @@ -tcib_actions: -- run: dnf -y install {{ tcib_packages['common'] | join(' ') }} && dnf clean all && rm -rf /var/cache/dnf -- run: ln -s /usr/share/openstack-tripleo-common/healthcheck/swift-container-server /openstack/healthcheck && chmod a+rx /openstack/healthcheck -tcib_packages: - common: - - openstack-swift-container -tcib_user: swift diff --git a/container-images/tcib/base/os/swift-base/swift-object/swift-object.yaml b/container-images/tcib/base/os/swift-base/swift-object/swift-object.yaml deleted file mode 100644 index 5cc9a201f..000000000 --- a/container-images/tcib/base/os/swift-base/swift-object/swift-object.yaml +++ /dev/null @@ -1,7 +0,0 @@ -tcib_actions: -- run: dnf -y install {{ tcib_packages['common'] | join(' ') }} && dnf clean all && rm -rf /var/cache/dnf -- run: ln -s /usr/share/openstack-tripleo-common/healthcheck/swift-object-server /openstack/healthcheck && chmod a+rx /openstack/healthcheck -tcib_packages: - common: - - openstack-swift-object -tcib_user: swift diff --git a/container-images/tcib/base/os/swift-base/swift-proxy-server/swift-proxy-server.yaml b/container-images/tcib/base/os/swift-base/swift-proxy-server/swift-proxy-server.yaml deleted file mode 100644 index 7b86bba81..000000000 --- a/container-images/tcib/base/os/swift-base/swift-proxy-server/swift-proxy-server.yaml +++ /dev/null @@ -1,11 +0,0 @@ -tcib_actions: -- run: dnf -y install {{ tcib_packages['common'] | join(' ') }} && dnf clean all && rm -rf /var/cache/dnf -- run: ln -s /usr/share/openstack-tripleo-common/healthcheck/swift-proxy /openstack/healthcheck && chmod a+rx /openstack/healthcheck -- run: sed -i -r 's,^(Listen 80),#\1,' /etc/httpd/conf/httpd.conf && sed -i -r 's,^(Listen 443),#\1,' /etc/httpd/conf.d/ssl.conf -tcib_packages: - common: - - httpd - - mod_ssl - - openstack-swift-proxy - - python3-ceilometermiddleware -tcib_user: swift diff --git a/container-images/tcib/base/os/tempest/tempest.yaml b/container-images/tcib/base/os/tempest/tempest.yaml deleted file mode 100644 index e3303f9f9..000000000 --- a/container-images/tcib/base/os/tempest/tempest.yaml +++ /dev/null @@ -1,11 +0,0 @@ -tcib_actions: -- run: bash /usr/local/bin/uid_gid_manage {{ tcib_user }} -- run: dnf -y install {{ tcib_packages['common'] | join(' ') }} && dnf clean all && rm -rf /var/cache/dnf -- run: cp /usr/share/openstack-tripleo-common-containers/container-images/tcib/base/os/tempest/tempest_sudoers /etc/sudoers.d/tempest_sudoers -- run: chmod 440 /etc/sudoers.d/tempest_sudoers - -tcib_packages: - common: - - iputils - - openstack-tempest-all -tcib_user: tempest diff --git a/container-images/tcib/base/os/tempest/tempest_sudoers b/container-images/tcib/base/os/tempest/tempest_sudoers deleted file mode 100644 index 0b7b4a873..000000000 --- a/container-images/tcib/base/os/tempest/tempest_sudoers +++ /dev/null @@ -1 +0,0 @@ -tempest ALL=(ALL) NOPASSWD: ALL diff --git a/container-images/tcib/base/ovn-base/ovn-base.yaml b/container-images/tcib/base/ovn-base/ovn-base.yaml deleted file mode 100644 index 36971a201..000000000 --- a/container-images/tcib/base/ovn-base/ovn-base.yaml +++ /dev/null @@ -1,9 +0,0 @@ -tcib_actions: -- run: dnf -y install {{ tcib_packages['common'] | join(' ') }} && dnf clean all && rm -rf /var/cache/dnf -tcib_packages: - common: - - openvswitch - - openvswitch-ovn-common - - python3-netifaces - - python3-openvswitch - - tcpdump diff --git a/container-images/tcib/base/ovn-base/ovn-controller/ovn-controller.yaml b/container-images/tcib/base/ovn-base/ovn-controller/ovn-controller.yaml deleted file mode 100644 index 5fb1e0182..000000000 --- a/container-images/tcib/base/ovn-base/ovn-controller/ovn-controller.yaml +++ /dev/null @@ -1,6 +0,0 @@ -tcib_actions: -- run: dnf -y install {{ tcib_packages['common'] | join(' ') }} && dnf clean all && rm -rf /var/cache/dnf -- run: ln -s /usr/share/openstack-tripleo-common/healthcheck/ovn-controller /openstack/healthcheck && chmod a+rx /openstack/healthcheck -tcib_packages: - common: - - openvswitch-ovn-host diff --git a/container-images/tcib/base/ovn-base/ovn-nb-db-server/ovn-nb-db-server.yaml b/container-images/tcib/base/ovn-base/ovn-nb-db-server/ovn-nb-db-server.yaml deleted file mode 100644 index aad8ea9a3..000000000 --- a/container-images/tcib/base/ovn-base/ovn-nb-db-server/ovn-nb-db-server.yaml +++ /dev/null @@ -1,7 +0,0 @@ -tcib_actions: -- run: dnf -y install {{ tcib_packages['common'] | join(' ') }} && dnf clean all && rm -rf /var/cache/dnf -- run: cp /usr/share/openstack-tripleo-common-containers/container-images/kolla/ovn/ovn-nb-db-server/start_nb_db_server.sh /usr/local/bin/start-nb-db-server -- run: chmod 755 /usr/local/bin/start-nb-db-server -tcib_packages: - common: - - openvswitch-ovn-central diff --git a/container-images/tcib/base/ovn-base/ovn-northd/ovn-northd.yaml b/container-images/tcib/base/ovn-base/ovn-northd/ovn-northd.yaml deleted file mode 100644 index 3f9a6211d..000000000 --- a/container-images/tcib/base/ovn-base/ovn-northd/ovn-northd.yaml +++ /dev/null @@ -1,12 +0,0 @@ -tcib_actions: -- run: dnf -y install {{ tcib_packages['common'] | join(' ') }} && dnf clean all && rm -rf /var/cache/dnf -- run: mkdir -p /etc/libqb -- run: ln -s /usr/share/openstack-tripleo-common/healthcheck/ovn-dbs /openstack/healthcheck && chmod a+rx /openstack/healthcheck -tcib_packages: - common: - - libqb - - openvswitch-ovn-central - - pacemaker - - pacemaker-remote - - pcs - - resource-agents diff --git a/container-images/tcib/base/ovn-base/ovn-sb-db-server/ovn-sb-db-server.yaml b/container-images/tcib/base/ovn-base/ovn-sb-db-server/ovn-sb-db-server.yaml deleted file mode 100644 index 30b516790..000000000 --- a/container-images/tcib/base/ovn-base/ovn-sb-db-server/ovn-sb-db-server.yaml +++ /dev/null @@ -1,7 +0,0 @@ -tcib_actions: -- run: dnf -y install {{ tcib_packages['common'] | join(' ') }} && dnf clean all && rm -rf /var/cache/dnf -- run: cp /usr/share/openstack-tripleo-common-containers/container-images/kolla/ovn/ovn-sb-db-server/start_sb_db_server.sh /usr/local/bin/start-sb-db-server -- run: chmod 755 /usr/local/bin/start-sb-db-server -tcib_packages: - common: - - openvswitch-ovn-central diff --git a/container-images/tcib/base/ovn-bgp-agent/ovn_bgp_agent.yaml b/container-images/tcib/base/ovn-bgp-agent/ovn_bgp_agent.yaml deleted file mode 100644 index 0fd4956aa..000000000 --- a/container-images/tcib/base/ovn-bgp-agent/ovn_bgp_agent.yaml +++ /dev/null @@ -1,8 +0,0 @@ -tcib_actions: -- run: bash /usr/local/bin/uid_gid_manage ovn-bgp -- run: dnf install -y {{ tcib_packages['common'] | join(' ') }} && dnf clean all && rm -rf /var/cache/dnf -- run: ln -s /usr/share/openstack-tripleo-common/healthcheck/ovn-bgp-agent /openstack/healthcheck && chmod a+rx /openstack/healthcheck -tcib_packages: - common: - - ovn-bgp-agent -tcib_user: ovn-bgp diff --git a/container-images/tcib/base/qdrouterd/qdrouterd.yaml b/container-images/tcib/base/qdrouterd/qdrouterd.yaml deleted file mode 100644 index b355308f0..000000000 --- a/container-images/tcib/base/qdrouterd/qdrouterd.yaml +++ /dev/null @@ -1,11 +0,0 @@ -tcib_actions: -- run: bash /usr/local/bin/uid_gid_manage {{ tcib_user }} -- run: dnf -y install {{ tcib_packages.common | join(' ') }} && dnf clean all && rm -rf /var/cache/dnf -- run: ln -s /usr/share/openstack-tripleo-common/healthcheck/qdrouterd /openstack/healthcheck && chmod a+rx /openstack/healthcheck -tcib_packages: - common: - - cyrus-sasl-lib - - cyrus-sasl-plain - - qpid-dispatch-router - - qpid-dispatch-tools -tcib_user: qdrouterd diff --git a/container-images/tcib/base/rabbitmq/rabbitmq.yaml b/container-images/tcib/base/rabbitmq/rabbitmq.yaml deleted file mode 100644 index a2d4815b1..000000000 --- a/container-images/tcib/base/rabbitmq/rabbitmq.yaml +++ /dev/null @@ -1,16 +0,0 @@ -tcib_actions: -- run: bash /usr/local/bin/uid_gid_manage {{ tcib_user }} -- run: dnf -y install {{ tcib_packages['common'] | join(' ') }} && dnf clean all && rm -rf /var/cache/dnf && rm -f /etc/rabbitmq/rabbitmq.conf -- run: cp /usr/share/openstack-tripleo-common-containers/container-images/kolla/rabbitmq/extend_start.sh /usr/local/bin/kolla_extend_start -- run: chmod 755 /usr/local/bin/kolla_extend_start -- run: ln -s /usr/share/openstack-tripleo-common/healthcheck/rabbitmq /openstack/healthcheck && chmod a+rx /openstack/healthcheck -tcib_packages: - common: - - hostname - - libqb - - pacemaker - - pacemaker-remote - - pcs - - rabbitmq-server - - resource-agents -tcib_user: rabbitmq diff --git a/container-images/tcib/base/redis/redis.yaml b/container-images/tcib/base/redis/redis.yaml deleted file mode 100644 index 22181494f..000000000 --- a/container-images/tcib/base/redis/redis.yaml +++ /dev/null @@ -1,16 +0,0 @@ -tcib_actions: -- run: bash /usr/local/bin/uid_gid_manage {{ tcib_user }} -- run: dnf -y install {{ tcib_packages.common | join(' ') }} && dnf clean all && rm -rf /var/cache/dnf -- run: mkdir /etc/libqb -- run: ln -s /usr/share/openstack-tripleo-common/healthcheck/redis /openstack/healthcheck && chmod a+rx /openstack/healthcheck -tcib_packages: - common: - - libqb - - pacemaker - - pacemaker-remote - - pcs - - procps-ng - - redis - - resource-agents - - stunnel -tcib_user: redis diff --git a/container-images/tcib/base/rsyslog/rsyslog.yaml b/container-images/tcib/base/rsyslog/rsyslog.yaml deleted file mode 100644 index 234cb2efd..000000000 --- a/container-images/tcib/base/rsyslog/rsyslog.yaml +++ /dev/null @@ -1,9 +0,0 @@ -tcib_actions: -- run: dnf -y install {{ tcib_packages['common'] | join(' ') }} && dnf clean all && rm -rf /var/cache/dnf -tcib_packages: - common: - - rsyslog - - rsyslog-elasticsearch - - rsyslog-gnutls - - rsyslog-mmjsonparse - - rsyslog-mmnormalize diff --git a/container-images/tcib/base/tripleoclient/tripleoclient.yaml b/container-images/tcib/base/tripleoclient/tripleoclient.yaml deleted file mode 100644 index 5aabe3b0e..000000000 --- a/container-images/tcib/base/tripleoclient/tripleoclient.yaml +++ /dev/null @@ -1,28 +0,0 @@ -tcib_actions: -- run: bash /usr/local/bin/uid_gid_manage {{ tcib_user }} -- run: dnf -y install {{ tcib_packages['el' ~ tcib_release] | join(' ') }}; -- run: dnf -y install {{ tcib_packages['common'] | join(' ') }} && dnf clean all && rm -rf /var/cache/dnf -- run: openstack complete | tee /etc/bash_completion.d/osc.bash_completion > /dev/null -- run: cp /usr/share/openstack-tripleo-common-containers/container-images/kolla/tripleoclient/create_super_user.sh /usr/local/bin/create_super_user.sh -- run: cp /usr/share/openstack-tripleo-common-containers/container-images/kolla/tripleoclient/tripleoclient_sudoers /etc/sudoers.d/tripleoclient_sudoers -- run: chmod 750 /usr/local/bin/create_super_user.sh -- run: chmod 440 /etc/sudoers.d/tripleoclient_sudoers -tcib_packages: - common: - - sudo - - e2fsprogs - - git - - openssh-clients - - openstack-tripleo-validations - - puppet-tripleo - - python3-openstackclient - - python3-tripleoclient - - xfsprogs - - python3-osc-placement - - bash-completion - - ipa-client - el8: - - ceph-ansible - el9: - - cephadm -tcib_user: cloud-admin diff --git a/container-images/tcib/base/unbound/unbound.yaml b/container-images/tcib/base/unbound/unbound.yaml deleted file mode 100644 index 33b3fd362..000000000 --- a/container-images/tcib/base/unbound/unbound.yaml +++ /dev/null @@ -1,5 +0,0 @@ -tcib_actions: -- run: dnf -y install {{ tcib_packages['common'] | join(' ') }} && dnf clean all && rm -rf /var/cache/dnf -tcib_packages: - common: - - unbound diff --git a/container-images/tcib/tripleo-ansible-ee/tripleo-ansible-ee.yaml b/container-images/tcib/tripleo-ansible-ee/tripleo-ansible-ee.yaml deleted file mode 100644 index 4008394d3..000000000 --- a/container-images/tcib/tripleo-ansible-ee/tripleo-ansible-ee.yaml +++ /dev/null @@ -1,45 +0,0 @@ -tcib_args: - TRIPLEO_ANSIBLE_REQ: "/usr/share/openstack-tripleo-common-containers/container-images/kolla/tripleo-ansible-ee/requirements.yaml" -tcib_from: quay.io/tripleoansible/ansible-runner:stream9 -tcib_actions: -- run: dnf install -y {{ tcib_packages['common'] | join(' ') }} && dnf clean all && rm -rf /var/cache/dnf -- user: root -- run: >- - cp /usr/share/openstack-tripleo-common-containers/container-images/kolla/tripleo-ansible-ee/settings /runner/env/settings && - chmod 777 /runner/env/settings -# To be able for ansible to deal with the arbirtray uid's that Openshift is enforcing -# in its default scc, it would be necessary to add "RUN chmod g=u /etc/passwd /etc/group" -# to the tcib file so the subsequent image let the operator set temporary permissions -# on the openshift uid for ansible to work. -- run: chmod g=u /etc/passwd /etc/group -- run: chmod -R 777 /usr/share/ansible -- workdir: /usr/share/ansible/roles -- run: >- - if [ -f "$TRIPLEO_ANSIBLE_REQ" ]; then - ansible-galaxy role install -r $TRIPLEO_ANSIBLE_REQ --roles-path /usr/share/ansible/roles; fi -- workdir: /usr/share/ansible/collections -- run: >- - if [ -f "$TRIPLEO_ANSIBLE_REQ" ]; then - ansible-galaxy collection install -r $TRIPLEO_ANSIBLE_REQ --collections-path /usr/share/ansible/collections; fi -- workdir: /runner -- run: >- - if [ -d "/usr/share/ansible/roles" ]; then - rm -rf /runner/roles && ln -snf /usr/share/ansible/roles roles; fi -- run: >- - if [ -d "/usr/share/ansible/tripleo-playbooks" ]; then - rm -rf /runner/project && ln -snf /usr/share/ansible/tripleo-playbooks project; fi -# Append the ansible-runner entrypoint content in tripleo_entrypoint by removing bash shebang -- run: >- - cp /usr/share/openstack-tripleo-common-containers/container-images/kolla/tripleo-ansible-ee/tripleo_entrypoint.sh /bin/tripleo_entrypoint && - sed -i '1d' /bin/entrypoint && - cat /bin/entrypoint >> /bin/tripleo_entrypoint && - chmod +x /bin/tripleo_entrypoint -- user: 1001 -tcib_entrypoint: tripleo_entrypoint -# Install dumb-init from package instead from pip, used in ansible-runner entrypoint. -tcib_packages: - common: - - ansible-role-chrony - - dumb-init - - openstack-tripleo-common-containers - - tripleo-ansible diff --git a/container-images/tripleo_containers.yaml b/container-images/tripleo_containers.yaml deleted file mode 100644 index 7e20ace07..000000000 --- a/container-images/tripleo_containers.yaml +++ /dev/null @@ -1,193 +0,0 @@ -container_images: -- imagename: quay.io/tripleomastercentos9/openstack-aodh-api:current-tripleo - image_source: tripleo -- imagename: quay.io/tripleomastercentos9/openstack-aodh-evaluator:current-tripleo - image_source: tripleo -- imagename: quay.io/tripleomastercentos9/openstack-aodh-listener:current-tripleo - image_source: tripleo -- imagename: quay.io/tripleomastercentos9/openstack-aodh-notifier:current-tripleo - image_source: tripleo -- imagename: quay.io/tripleomastercentos9/openstack-barbican-api:current-tripleo - image_source: tripleo -- imagename: quay.io/tripleomastercentos9/openstack-barbican-keystone-listener:current-tripleo - image_source: tripleo -- imagename: quay.io/tripleomastercentos9/openstack-barbican-worker:current-tripleo - image_source: tripleo -- imagename: quay.io/tripleomastercentos9/openstack-ceilometer-central:current-tripleo - image_source: tripleo -- imagename: quay.io/tripleomastercentos9/openstack-ceilometer-compute:current-tripleo - image_source: tripleo -- imagename: quay.io/tripleomastercentos9/openstack-ceilometer-notification:current-tripleo - image_source: tripleo -- imagename: quay.io/tripleomastercentos9/openstack-ceilometer-ipmi:current-tripleo - image_source: tripleo -- imagename: quay.io/prometheus/pushgateway:v1.4.2 - image_source: prom -- imagename: quay.io/tripleomastercentos9/openstack-cinder-api:current-tripleo - image_source: tripleo -- imagename: quay.io/tripleomastercentos9/openstack-cinder-backup:current-tripleo - image_source: tripleo -- imagename: quay.io/tripleomastercentos9/openstack-cinder-scheduler:current-tripleo - image_source: tripleo -- imagename: quay.io/tripleomastercentos9/openstack-cinder-volume:current-tripleo - image_source: tripleo -- imagename: quay.io/tripleomastercentos9/openstack-collectd:current-tripleo - image_source: tripleo -- imagename: quay.io/tripleomastercentos9/openstack-cron:current-tripleo - image_source: tripleo -- imagename: quay.io/tripleomastercentos9/openstack-designate-api:current-tripleo - image_source: tripleo -- imagename: quay.io/tripleomastercentos9/openstack-designate-base:current-tripleo - image_source: tripleo -- imagename: quay.io/tripleomastercentos9/openstack-designate-backend-bind9:current-tripleo - image_source: tripleo -- imagename: quay.io/tripleomastercentos9/openstack-designate-central:current-tripleo - image_source: tripleo -- imagename: quay.io/tripleomastercentos9/openstack-designate-mdns:current-tripleo - image_source: tripleo -- imagename: quay.io/tripleomastercentos9/openstack-designate-producer:current-tripleo - image_source: tripleo -- imagename: quay.io/tripleomastercentos9/openstack-designate-sink:current-tripleo - image_source: tripleo -- imagename: quay.io/tripleomastercentos9/openstack-designate-worker:current-tripleo - image_source: tripleo -- imagename: quay.io/tripleomastercentos9/openstack-etcd:current-tripleo - image_source: tripleo -- imagename: quay.io/tripleomastercentos9/openstack-frr:current-tripleo - image_source: tripleo -- imagename: quay.io/tripleomastercentos9/openstack-ovn-bgp-agent:current-tripleo - image_source: tripleo -- imagename: quay.io/tripleomastercentos9/openstack-glance-api:current-tripleo - image_source: tripleo -- imagename: quay.io/tripleomastercentos9/openstack-gnocchi-api:current-tripleo - image_source: tripleo -- imagename: quay.io/tripleomastercentos9/openstack-gnocchi-metricd:current-tripleo - image_source: tripleo -- imagename: quay.io/tripleomastercentos9/openstack-gnocchi-statsd:current-tripleo - image_source: tripleo -- imagename: quay.io/tripleomastercentos9/openstack-haproxy:current-tripleo - image_source: tripleo -- imagename: quay.io/tripleomastercentos9/openstack-heat-all:current-tripleo - image_source: tripleo -- imagename: quay.io/tripleomastercentos9/openstack-heat-api-cfn:current-tripleo - image_source: tripleo -- imagename: quay.io/tripleomastercentos9/openstack-heat-api:current-tripleo - image_source: tripleo -- imagename: quay.io/tripleomastercentos9/openstack-heat-engine:current-tripleo - image_source: tripleo -- imagename: quay.io/tripleomastercentos9/openstack-horizon:current-tripleo - image_source: tripleo -- imagename: quay.io/tripleomastercentos9/openstack-ironic-api:current-tripleo - image_source: tripleo -- imagename: quay.io/tripleomastercentos9/openstack-ironic-conductor:current-tripleo - image_source: tripleo -- imagename: quay.io/tripleomastercentos9/openstack-ironic-inspector:current-tripleo - image_source: tripleo -- imagename: quay.io/tripleomastercentos9/openstack-ironic-pxe:current-tripleo - image_source: tripleo -- imagename: quay.io/tripleomastercentos9/openstack-ironic-neutron-agent:current-tripleo - image_source: tripleo -- imagename: quay.io/tripleomastercentos9/openstack-iscsid:current-tripleo - image_source: tripleo -- imagename: quay.io/tripleomastercentos9/openstack-keystone:current-tripleo - image_source: tripleo -- imagename: quay.io/tripleomastercentos9/openstack-manila-api:current-tripleo - image_source: tripleo -- imagename: quay.io/tripleomastercentos9/openstack-manila-scheduler:current-tripleo - image_source: tripleo -- imagename: quay.io/tripleomastercentos9/openstack-manila-share:current-tripleo - image_source: tripleo -- imagename: quay.io/tripleomastercentos9/openstack-mariadb:current-tripleo - image_source: tripleo -- imagename: quay.io/tripleomastercentos9/openstack-memcached:current-tripleo - image_source: tripleo -- imagename: quay.io/tripleomastercentos9/openstack-multipathd:current-tripleo - image_source: tripleo -- imagename: quay.io/tripleomastercentos9/openstack-neutron-dhcp-agent:current-tripleo - image_source: tripleo -- imagename: quay.io/tripleomastercentos9/openstack-neutron-l3-agent:current-tripleo - image_source: tripleo -- imagename: quay.io/tripleomastercentos9/openstack-neutron-metadata-agent:current-tripleo - image_source: tripleo -- imagename: quay.io/tripleomastercentos9/openstack-neutron-openvswitch-agent:current-tripleo - image_source: tripleo -- imagename: quay.io/tripleomastercentos9/openstack-neutron-sriov-agent:current-tripleo - image_source: tripleo -- imagename: quay.io/tripleomastercentos9/openstack-neutron-mlnx-agent:current-tripleo - image_source: tripleo -- imagename: quay.io/tripleomastercentos9/openstack-neutron-server:current-tripleo - image_source: tripleo -- imagename: quay.io/tripleomastercentos9/openstack-neutron-metadata-agent-ovn:current-tripleo - image_source: tripleo -- imagename: quay.io/tripleomastercentos9/openstack-nova-api:current-tripleo - image_source: tripleo -- imagename: quay.io/tripleomastercentos9/openstack-nova-compute-ironic:current-tripleo - image_source: tripleo -- imagename: quay.io/tripleomastercentos9/openstack-nova-compute:current-tripleo - image_source: tripleo -- imagename: quay.io/tripleomastercentos9/openstack-nova-conductor:current-tripleo - image_source: tripleo -- imagename: quay.io/tripleomastercentos9/openstack-nova-libvirt:current-tripleo - image_source: tripleo -- imagename: quay.io/tripleomastercentos9/openstack-nova-novncproxy:current-tripleo - image_source: tripleo -- imagename: quay.io/tripleomastercentos9/openstack-nova-scheduler:current-tripleo - image_source: tripleo -- imagename: quay.io/tripleomastercentos9/openstack-octavia-api:current-tripleo - image_source: tripleo -- imagename: quay.io/tripleomastercentos9/openstack-octavia-health-manager:current-tripleo - image_source: tripleo -- imagename: quay.io/tripleomastercentos9/openstack-octavia-housekeeping:current-tripleo - image_source: tripleo -- imagename: quay.io/tripleomastercentos9/openstack-octavia-worker:current-tripleo - image_source: tripleo -- imagename: quay.io/tripleomastercentos9/openstack-ovn-controller:current-tripleo - image_source: tripleo -- imagename: quay.io/tripleomastercentos9/openstack-ovn-nb-db-server:current-tripleo - image_source: tripleo -- imagename: quay.io/tripleomastercentos9/openstack-ovn-northd:current-tripleo - image_source: tripleo -- imagename: quay.io/tripleomastercentos9/openstack-ovn-sb-db-server:current-tripleo - image_source: tripleo -- imagename: quay.io/tripleomastercentos9/openstack-placement-api:current-tripleo - image_source: tripleo -- imagename: quay.io/tripleomastercentos9/openstack-qdrouterd:current-tripleo - image_source: tripleo -- imagename: quay.io/tripleomastercentos9/openstack-rabbitmq:current-tripleo - image_source: tripleo -- imagename: quay.io/tripleomastercentos9/openstack-redis:current-tripleo - image_source: tripleo -- imagename: quay.io/tripleomastercentos9/openstack-swift-account:current-tripleo - image_source: tripleo -- imagename: quay.io/tripleomastercentos9/openstack-swift-container:current-tripleo - image_source: tripleo -- imagename: quay.io/tripleomastercentos9/openstack-swift-object:current-tripleo - image_source: tripleo -- imagename: quay.io/tripleomastercentos9/openstack-swift-proxy-server:current-tripleo - image_source: tripleo -- imagename: quay.rdoproject.org/tripleomastercentos9/daemon:current-ceph - image_source: ceph -- imagename: quay.io/prometheus/prometheus:v2.33.4 - image_source: prom -- imagename: quay.io/prometheus/alertmanager:v0.23.0 - image_source: prom -- imagename: quay.io/prometheus/node-exporter:v1.3.1 - image_source: prom -- imagename: quay.io/prometheus/pushgateway:v1.4.2 - image_source: prom -- imagename: quay.io/ceph/ceph-grafana:6.7.4 - image_source: grafana -- imagename: quay.io/ceph/keepalived:2.1.5 - image_source: keepalived -- imagename: quay.io/ceph/haproxy:2.3 - image_source: haproxy -- imagename: quay.io/tripleomastercentos9/openstack-rsyslog:current-tripleo - image_source: tripleo -- imagename: quay.io/tripleomastercentos9/openstack-unbound:current-tripleo - image_source: tripleo -- imagename: quay.io/tripleomastercentos9/openstack-tempest:current-tripleo - image_source: tripleo -- imagename: quay.io/tripleomastercentos9/openstack-tripleoclient:current-tripleo - image_source: tripleo -- imagename: quay.io/tripleomastercentos9/tripleo-ansible-ee:current-tripleo - image_source: tripleo diff --git a/container-images/tripleo_containers.yaml.j2 b/container-images/tripleo_containers.yaml.j2 deleted file mode 100644 index 87a2f7b5d..000000000 --- a/container-images/tripleo_containers.yaml.j2 +++ /dev/null @@ -1,743 +0,0 @@ -container_images_template: - -- imagename: "{{namespace}}/{{name_prefix}}aodh-api{{name_suffix}}:{{tag}}" - image_source: tripleo - params: - - ContainerAodhApiImage - - ContainerAodhConfigImage - services: - - OS::TripleO::Services::AodhApi - - OS::TripleO::Services::AodhEvaluator - - OS::TripleO::Services::AodhListener - - OS::TripleO::Services::AodhNotifier - -- imagename: "{{namespace}}/{{name_prefix}}aodh-evaluator{{name_suffix}}:{{tag}}" - image_source: tripleo - params: - - ContainerAodhEvaluatorImage - services: - - OS::TripleO::Services::AodhEvaluator - -- imagename: "{{namespace}}/{{name_prefix}}aodh-listener{{name_suffix}}:{{tag}}" - image_source: tripleo - params: - - ContainerAodhListenerImage - services: - - OS::TripleO::Services::AodhListener - -- imagename: "{{namespace}}/{{name_prefix}}aodh-notifier{{name_suffix}}:{{tag}}" - image_source: tripleo - params: - - ContainerAodhNotifierImage - services: - - OS::TripleO::Services::AodhNotifier - -- imagename: "{{namespace}}/{{name_prefix}}barbican-api{{name_suffix}}:{{tag}}" - image_source: tripleo - params: - - ContainerBarbicanApiImage - - ContainerBarbicanConfigImage - services: - - OS::TripleO::Services::BarbicanApi - -- imagename: "{{namespace}}/{{name_prefix}}barbican-keystone-listener{{name_suffix}}:{{tag}}" - image_source: tripleo - params: - - ContainerBarbicanKeystoneListenerImage - - ContainerBarbicanKeystoneListenerConfigImage - services: - - OS::TripleO::Services::BarbicanApi - - OS::TripleO::Services::BarbicanKeystoneListener - -- imagename: "{{namespace}}/{{name_prefix}}barbican-worker{{name_suffix}}:{{tag}}" - image_source: tripleo - params: - - ContainerBarbicanWorkerImage - - ContainerBarbicanWorkerConfigImage - services: - - OS::TripleO::Services::BarbicanApi - - OS::TripleO::Services::BarbicanWorker - -- imagename: "{{namespace}}/{{name_prefix}}ceilometer-central{{name_suffix}}:{{tag}}" - image_source: tripleo - params: - - ContainerCeilometerCentralImage - - ContainerCeilometerConfigImage - services: - - OS::TripleO::Services::CeilometerAgentCentral - - OS::TripleO::Services::CeilometerAgentIpmi - - OS::TripleO::Services::CeilometerAgentNotification - - OS::TripleO::Services::ComputeCeilometerAgent - -- imagename: "{{namespace}}/{{name_prefix}}ceilometer-compute{{name_suffix}}:{{tag}}" - image_source: tripleo - params: - - ContainerCeilometerComputeImage - services: - - OS::TripleO::Services::ComputeCeilometerAgent - -- imagename: "{{namespace}}/{{name_prefix}}ceilometer-notification{{name_suffix}}:{{tag}}" - image_source: tripleo - params: - - ContainerCeilometerNotificationImage - services: - - OS::TripleO::Services::CeilometerAgentNotification - -- imagename: "{{namespace}}/{{name_prefix}}ceilometer-ipmi{{name_suffix}}:{{tag}}" - image_source: tripleo - params: - - ContainerCeilometerIpmiImage - services: - - OS::TripleO::Services::CeilometerAgentIpmi - -- imagename: "{{pushgateway_namespace}}/{{pushgateway_image}}:{{pushgateway_tag}}" - image_source: prom - params: - - ContainerCeilometerPushgatewayImage - services: - - OS::Tripleo::Services::CeilometerPushgateway - -- imagename: "{{namespace}}/{{name_prefix}}cinder-api{{name_suffix}}:{{tag}}" - image_source: tripleo - params: - - ContainerCinderApiImage - - ContainerCinderConfigImage - services: - - OS::TripleO::Services::BlockStorageCinderVolume - - OS::TripleO::Services::CinderApi - - OS::TripleO::Services::CinderBackup - - OS::TripleO::Services::CinderScheduler - - OS::TripleO::Services::CinderVolume - - OS::TripleO::Services::CinderVolumeEdge - -- imagename: "{{namespace}}/{{name_prefix}}cinder-backup{{name_suffix}}:{{tag}}" - image_source: tripleo - params: - - ContainerCinderBackupImage - services: - - OS::TripleO::Services::CinderBackup - -- imagename: "{{namespace}}/{{name_prefix}}cinder-scheduler{{name_suffix}}:{{tag}}" - image_source: tripleo - params: - - ContainerCinderSchedulerImage - services: - - OS::TripleO::Services::CinderScheduler - -- imagename: "{{namespace}}/{{name_prefix}}cinder-volume{{name_suffix}}:{{tag}}" - image_source: tripleo - params: - - ContainerCinderVolumeImage - services: - - OS::TripleO::Services::BlockStorageCinderVolume - - OS::TripleO::Services::CinderVolume - - OS::TripleO::Services::CinderVolumeEdge - -- imagename: "{{namespace}}/{{name_prefix}}collectd{{name_suffix}}:{{tag}}" - image_source: tripleo - params: - - ContainerCollectdConfigImage - - ContainerCollectdImage - services: - - OS::TripleO::Services::Collectd - -- imagename: "{{namespace}}/{{name_prefix}}cron{{name_suffix}}:{{tag}}" - image_source: tripleo - params: - - ContainerCrondConfigImage - - ContainerCrondImage - services: - - OS::TripleO::Services::ContainersLogrotateCrond - -- imagename: "{{namespace}}/{{name_prefix}}designate-api{{name_suffix}}:{{tag}}" - image_source: tripleo - params: - - ContainerDesignateApiImage - - ContainerDesignateApiConfigImage - services: - - OS::TripleO::Services::DesignateApi - - OS::TripleO::Services::DesignateCentral - - OS::TripleO::Services::DesignateMDNS - - OS::TripleO::Services::DesignateSink - - OS::TripleO::Services::DesignateProducer - - OS::TripleO::Services::DesignateWorker - -- imagename: "{{namespace}}/{{name_prefix}}designate-base{{name_suffix}}:{{tag}}" - image_source: tripleo - params: - - ContainerDesignateConfigImage - services: - - OS::TripleO::Services::DesignateCentral - - OS::TripleO::Services::DesignateMDNS - - OS::TripleO::Services::DesignateSink - - OS::TripleO::Services::DesignateProducer - - OS::TripleO::Services::DesignateWorker - -- imagename: "{{namespace}}/{{name_prefix}}designate-backend-bind9{{name_suffix}}:{{tag}}" - image_source: tripleo - params: - - ContainerDesignateBackendBIND9Image - services: - - OS::TripleO::Services::DesignateWorker - -- imagename: "{{namespace}}/{{name_prefix}}designate-central{{name_suffix}}:{{tag}}" - image_source: tripleo - params: - - ContainerDesignateCentralImage - services: - - OS::TripleO::Services::DesignateCentral - -- imagename: "{{namespace}}/{{name_prefix}}designate-mdns{{name_suffix}}:{{tag}}" - image_source: tripleo - params: - - ContainerDesignateMDNSImage - services: - - OS::TripleO::Services::DesignateMDNS - -- imagename: "{{namespace}}/{{name_prefix}}designate-producer{{name_suffix}}:{{tag}}" - image_source: tripleo - params: - - ContainerDesignateProducerImage - services: - - OS::TripleO::Services::DesignateProducer - -- imagename: "{{namespace}}/{{name_prefix}}designate-sink{{name_suffix}}:{{tag}}" - image_source: tripleo - params: - - ContainerDesignateSinkImage - services: - - OS::TripleO::Services::DesignateSink - -- imagename: "{{namespace}}/{{name_prefix}}designate-worker{{name_suffix}}:{{tag}}" - image_source: tripleo - params: - - ContainerDesignateWorkerImage - services: - - OS::TripleO::Services::DesignateWorker - -- imagename: "{{namespace}}/{{name_prefix}}etcd{{name_suffix}}:{{tag}}" - image_source: tripleo - params: - - ContainerEtcdConfigImage - - ContainerEtcdImage - services: - - OS::TripleO::Services::Etcd - -- imagename: "{{namespace}}/{{name_prefix}}frr{{name_suffix}}:{{tag}}" - image_source: tripleo - params: - - ContainerFrrImage - services: - - OS::TripleO::Services::Frr - -- imagename: "{{namespace}}/{{name_prefix}}ovn-bgp-agent{{name_suffix}}:{{tag}}" - image_source: tripleo - params: - - ContainerOvnBgpAgentImage - services: - - OS::TripleO::Services::Frr - -- imagename: "{{namespace}}/{{name_prefix}}glance-api{{name_suffix}}:{{tag}}" - image_source: tripleo - params: - - ContainerGlanceApiConfigImage - - ContainerGlanceApiImage - - ContainerGlanceApiInternalConfigImage - services: - - OS::TripleO::Services::GlanceApi - - OS::TripleO::Services::GlanceApiEdge - - OS::TripleO::Services::GlanceApiInternal - -- imagename: "{{namespace}}/{{name_prefix}}gnocchi-api{{name_suffix}}:{{tag}}" - image_source: tripleo - params: - - ContainerGnocchiApiImage - - ContainerGnocchiConfigImage - services: - - OS::TripleO::Services::GnocchiApi - - OS::TripleO::Services::GnocchiMetricd - - OS::TripleO::Services::GnocchiStatsd - -- imagename: "{{namespace}}/{{name_prefix}}gnocchi-metricd{{name_suffix}}:{{tag}}" - image_source: tripleo - params: - - ContainerGnocchiMetricdImage - services: - - OS::TripleO::Services::GnocchiMetricd - -- imagename: "{{namespace}}/{{name_prefix}}gnocchi-statsd{{name_suffix}}:{{tag}}" - image_source: tripleo - params: - - ContainerGnocchiStatsdImage - services: - - OS::TripleO::Services::GnocchiStatsd - -- imagename: "{{namespace}}/{{name_prefix}}haproxy{{name_suffix}}:{{tag}}" - image_source: tripleo - params: - - ContainerHAProxyConfigImage - - ContainerHAProxyImage - services: - - OS::TripleO::Services::HAproxy - - OS::TripleO::Services::HAproxyEdge - -- imagename: "{{namespace}}/{{name_prefix}}heat-all{{name_suffix}}:{{tag}}" - image_source: tripleo - params: - - ContainerHeatAllImage - services: - - OS::TripleO::Services::HeatEphemeral - -- imagename: "{{namespace}}/{{name_prefix}}heat-api-cfn{{name_suffix}}:{{tag}}" - image_source: tripleo - params: - - ContainerHeatApiCfnConfigImage - - ContainerHeatApiCfnImage - services: - - OS::TripleO::Services::HeatApiCfn - -- imagename: "{{namespace}}/{{name_prefix}}heat-api{{name_suffix}}:{{tag}}" - image_source: tripleo - params: - - ContainerHeatApiConfigImage - - ContainerHeatApiImage - - ContainerHeatConfigImage - services: - - OS::TripleO::Services::HeatApi - - OS::TripleO::Services::HeatEngine - - OS::TripleO::Services::HeatEphemeral - -- imagename: "{{namespace}}/{{name_prefix}}heat-engine{{name_suffix}}:{{tag}}" - image_source: tripleo - params: - - ContainerHeatEngineImage - services: - - OS::TripleO::Services::HeatEngine - - OS::TripleO::Services::HeatEphemeral - -- imagename: "{{namespace}}/{{name_prefix}}horizon{{name_suffix}}:{{tag}}" - image_source: tripleo - params: - - ContainerHorizonConfigImage - - ContainerHorizonImage - services: - - OS::TripleO::Services::Horizon - -- imagename: "{{namespace}}/{{name_prefix}}ironic-api{{name_suffix}}:{{tag}}" - image_source: tripleo - params: - - ContainerIronicApiImage - - ContainerIronicApiConfigImage - services: - - OS::TripleO::Services::IronicApi - -- imagename: "{{namespace}}/{{name_prefix}}ironic-conductor{{name_suffix}}:{{tag}}" - image_source: tripleo - params: - - ContainerIronicConductorImage - services: - - OS::TripleO::Services::IronicConductor - -- imagename: "{{namespace}}/{{name_prefix}}ironic-inspector{{name_suffix}}:{{tag}}" - image_source: tripleo - params: - - ContainerIronicInspectorImage - - ContainerIronicInspectorConfigImage - services: - - OS::TripleO::Services::IronicInspector - -- imagename: "{{namespace}}/{{name_prefix}}ironic-pxe{{name_suffix}}:{{tag}}" - image_source: tripleo - params: - - ContainerIronicConfigImage - - ContainerIronicPxeImage - services: - - OS::TripleO::Services::IronicConductor - - OS::TripleO::Services::IronicPxe - -- imagename: "{{namespace}}/{{name_prefix}}ironic-neutron-agent{{name_suffix}}:{{tag}}" - image_source: tripleo - params: - - ContainerIronicNeutronAgentImage - - ContainerNeutronConfigImage - services: - - OS::TripleO::Services::IronicNeutronAgent - -- imagename: "{{namespace}}/{{name_prefix}}iscsid{{name_suffix}}:{{tag}}" - image_source: tripleo - params: - - ContainerIscsidConfigImage - - ContainerIscsidImage - services: - - OS::TripleO::Services::Iscsid - -- imagename: "{{namespace}}/{{name_prefix}}keystone{{name_suffix}}:{{tag}}" - image_source: tripleo - params: - - ContainerKeystoneConfigImage - - ContainerKeystoneImage - services: - - OS::TripleO::Services::Keystone - -- imagename: "{{namespace}}/{{name_prefix}}manila-api{{name_suffix}}:{{tag}}" - image_source: tripleo - params: - - ContainerManilaApiImage - - ContainerManilaConfigImage - services: - - OS::TripleO::Services::ManilaApi - - OS::TripleO::Services::ManilaScheduler - - OS::TripleO::Services::ManilaShare - -- imagename: "{{namespace}}/{{name_prefix}}manila-scheduler{{name_suffix}}:{{tag}}" - image_source: tripleo - params: - - ContainerManilaSchedulerImage - services: - - OS::TripleO::Services::ManilaScheduler - -- imagename: "{{namespace}}/{{name_prefix}}manila-share{{name_suffix}}:{{tag}}" - image_source: tripleo - params: - - ContainerManilaShareImage - services: - - OS::TripleO::Services::ManilaShare - -- imagename: "{{namespace}}/{{name_prefix}}mariadb{{name_suffix}}:{{tag}}" - image_source: tripleo - params: - - ContainerClustercheckConfigImage - - ContainerClustercheckImage - - ContainerMysqlConfigImage - - ContainerMysqlImage - - ContainerMysqlClientConfigImage - services: - - OS::TripleO::Services::MySQL - -- imagename: "{{namespace}}/{{name_prefix}}memcached{{name_suffix}}:{{tag}}" - image_source: tripleo - params: - - ContainerMemcachedConfigImage - - ContainerMemcachedImage - services: - - OS::TripleO::Services::Memcached - -- imagename: "{{namespace}}/{{name_prefix}}multipathd{{name_suffix}}:{{tag}}" - image_source: tripleo - params: - - ContainerMultipathdConfigImage - - ContainerMultipathdImage - services: - - OS::TripleO::Services::Multipathd - -- imagename: "{{namespace}}/{{name_prefix}}neutron-dhcp-agent{{name_suffix}}:{{tag}}" - image_source: tripleo - params: - - ContainerNeutronDHCPImage - services: - - OS::TripleO::Services::NeutronDhcpAgent - -- imagename: "{{namespace}}/{{name_prefix}}neutron-l3-agent{{name_suffix}}:{{tag}}" - image_source: tripleo - params: - - ContainerNeutronL3AgentImage - services: - - OS::TripleO::Services::NeutronL3Agent - - OS::TripleO::Services::ComputeNeutronL3Agent - -- imagename: "{{namespace}}/{{name_prefix}}neutron-metadata-agent{{name_suffix}}:{{tag}}" - image_source: tripleo - params: - - ContainerNeutronMetadataImage - services: - - OS::TripleO::Services::NeutronMetadataAgent - - OS::TripleO::Services::ComputeNeutronMetadataAgent - -- imagename: "{{namespace}}/{{name_prefix}}neutron-openvswitch-agent{{name_suffix}}:{{tag}}" - image_source: tripleo - params: - - ContainerOpenvswitchImage - services: - - OS::TripleO::Services::ComputeNeutronOvsAgent - - OS::TripleO::Services::NeutronOvsAgent - -- imagename: "{{namespace}}/{{name_prefix}}neutron-sriov-agent{{name_suffix}}:{{tag}}" - image_source: tripleo - params: - - ContainerNeutronSriovImage - services: - - OS::TripleO::Services::NeutronSriovAgent - -- imagename: "{{namespace}}/{{name_prefix}}neutron-mlnx-agent{{name_suffix}}:{{tag}}" - image_source: tripleo - params: - - ContainerNeutronMlnxImage - services: - - OS::TripleO::Services::NeutronMlnxAgent - -- imagename: "{{namespace}}/{{name_prefix}}neutron-server{{name_suffix}}:{{tag}}" - image_source: tripleo - params: - - ContainerNeutronApiImage - - ContainerNeutronConfigImage - services: - - OS::TripleO::Services::ComputeNeutronOvsAgent - - OS::TripleO::Services::NeutronApi - - OS::TripleO::Services::NeutronCorePlugin - - OS::TripleO::Services::NeutronDhcpAgent - - OS::TripleO::Services::NeutronL3Agent - - OS::TripleO::Services::NeutronMetadataAgent - - OS::TripleO::Services::NeutronOvsAgent - - OS::TripleO::Services::NeutronServer - - OS::TripleO::Services::NeutronSriovAgent -{% if neutron_driver == "ovn" %} - - OS::TripleO::Services::OVNController - -- imagename: "{{namespace}}/{{name_prefix}}neutron-metadata-agent-ovn{{name_suffix}}:{{tag}}" - image_source: tripleo - params: - - ContainerOvnMetadataImage - services: - - OS::TripleO::Services::OVNMetadataAgent -{% endif %} - -- imagename: "{{namespace}}/{{name_prefix}}nova-api{{name_suffix}}:{{tag}}" - image_source: tripleo - params: - - ContainerNovaApiImage - - ContainerNovaConfigImage - - ContainerNovaMetadataImage - - ContainerNovaMetadataConfigImage - services: - - OS::TripleO::Services::NovaApi - - OS::TripleO::Services::NovaConductor - - OS::TripleO::Services::NovaIronic - - OS::TripleO::Services::NovaScheduler - - OS::TripleO::Services::NovaVncProxy - - OS::TripleO::Services::NovaMetadata - -- imagename: "{{namespace}}/{{name_prefix}}nova-compute-ironic{{name_suffix}}:{{tag}}" - image_source: tripleo - params: - - ContainerNovaComputeIronicImage - services: - - OS::TripleO::Services::NovaIronic - -- imagename: "{{namespace}}/{{name_prefix}}nova-compute{{name_suffix}}:{{tag}}" - image_source: tripleo - params: - - ContainerNovaComputeImage - - ContainerNovaLibvirtConfigImage - services: - - OS::TripleO::Services::NovaCompute - - OS::TripleO::Services::NovaLibvirt - - OS::TripleO::Services::NovaMigrationTarget - -- imagename: "{{namespace}}/{{name_prefix}}nova-conductor{{name_suffix}}:{{tag}}" - image_source: tripleo - params: - - ContainerNovaConductorImage - services: - - OS::TripleO::Services::NovaConductor - -- imagename: "{{namespace}}/{{name_prefix}}nova-libvirt{{name_suffix}}:{{tag}}" - image_source: tripleo - params: - - ContainerNovaLibvirtImage - services: - - OS::TripleO::Services::NovaLibvirt - -- imagename: "{{namespace}}/{{name_prefix}}nova-novncproxy{{name_suffix}}:{{tag}}" - image_source: tripleo - params: - - ContainerNovaVncProxyImage - services: - - OS::TripleO::Services::NovaVncProxy - -- imagename: "{{namespace}}/{{name_prefix}}nova-scheduler{{name_suffix}}:{{tag}}" - image_source: tripleo - params: - - ContainerNovaSchedulerImage - services: - - OS::TripleO::Services::NovaScheduler - -- imagename: "{{namespace}}/{{name_prefix}}octavia-api{{name_suffix}}:{{tag}}" - image_source: tripleo - params: - - ContainerOctaviaApiImage - - ContainerOctaviaConfigImage - - ContainerOctaviaDriverAgentImage - - ContainerOctaviaDriverAgentConfigImage - services: - - OS::TripleO::Services::OctaviaApi - - OS::TripleO::Services::OctaviaHealthManager - - OS::TripleO::Services::OctaviaHousekeeping - - OS::TripleO::Services::OctaviaWorker - -- imagename: "{{namespace}}/{{name_prefix}}octavia-health-manager{{name_suffix}}:{{tag}}" - image_source: tripleo - params: - - ContainerOctaviaHealthManagerImage - services: - - OS::TripleO::Services::OctaviaHealthManager - -- imagename: "{{namespace}}/{{name_prefix}}octavia-housekeeping{{name_suffix}}:{{tag}}" - image_source: tripleo - params: - - ContainerOctaviaHousekeepingImage - services: - - OS::TripleO::Services::OctaviaHousekeeping - -- imagename: "{{namespace}}/{{name_prefix}}octavia-worker{{name_suffix}}:{{tag}}" - image_source: tripleo - params: - - ContainerOctaviaWorkerImage - services: - - OS::TripleO::Services::OctaviaWorker - -{% if neutron_driver == "ovn" %} -- imagename: "{{namespace}}/{{name_prefix}}ovn-controller{{name_suffix}}:{{tag}}" - image_source: tripleo - params: - - ContainerOvnControllerConfigImage - - ContainerOvnControllerImage - services: - - OS::TripleO::Services::OVNController - -- imagename: "{{namespace}}/{{name_prefix}}ovn-nb-db-server{{name_suffix}}:{{tag}}" - image_source: tripleo - params: - - ContainerOvnNbDbImage - services: - - OS::TripleO::Services::OVNDBs - -- imagename: "{{namespace}}/{{name_prefix}}ovn-northd{{name_suffix}}:{{tag}}" - image_source: tripleo - params: - - ContainerOvnNorthdImage - - ContainerOvnDbsImage - - ContainerOvnDbsConfigImage - services: - - OS::TripleO::Services::OVNDBs - -- imagename: "{{namespace}}/{{name_prefix}}ovn-sb-db-server{{name_suffix}}:{{tag}}" - image_source: tripleo - params: - - ContainerOvnSbDbImage - services: - - OS::TripleO::Services::OVNDBs -{% endif %} - -- imagename: "{{namespace}}/{{name_prefix}}placement-api{{name_suffix}}:{{tag}}" - image_source: tripleo - params: - - ContainerPlacementConfigImage - - ContainerPlacementImage - services: - - OS::TripleO::Services::PlacementApi - -- imagename: "{{namespace}}/{{name_prefix}}qdrouterd{{name_suffix}}:{{tag}}" - image_source: tripleo - params: - - ContainerQdrouterdConfigImage - - ContainerQdrouterdImage - - ContainerMetricsQdrConfigImage - - ContainerMetricsQdrImage - services: - - OS::TripleO::Services::Qdr - - OS::TripleO::Services::OsloMessagingRpc - - OS::TripleO::Services::MetricsQdr - -- imagename: "{{namespace}}/{{name_prefix}}rabbitmq{{name_suffix}}:{{tag}}" - image_source: tripleo - params: - - ContainerRabbitmqConfigImage - - ContainerRabbitmqImage - services: - - OS::TripleO::Services::RabbitMQ - - OS::TripleO::Services::OsloMessagingRpc - - OS::TripleO::Services::OsloMessagingNotify - -- imagename: "{{namespace}}/{{name_prefix}}redis{{name_suffix}}:{{tag}}" - image_source: tripleo - params: - - ContainerRedisConfigImage - - ContainerRedisImage - services: - - OS::TripleO::Services::Redis - -- imagename: "{{namespace}}/{{name_prefix}}swift-account{{name_suffix}}:{{tag}}" - image_source: tripleo - params: - - ContainerSwiftAccountImage - services: - - OS::TripleO::Services::SwiftStorage - -- imagename: "{{namespace}}/{{name_prefix}}swift-container{{name_suffix}}:{{tag}}" - image_source: tripleo - params: - - ContainerSwiftContainerImage - services: - - OS::TripleO::Services::SwiftStorage - -- imagename: "{{namespace}}/{{name_prefix}}swift-object{{name_suffix}}:{{tag}}" - image_source: tripleo - params: - - ContainerSwiftObjectImage - services: - - OS::TripleO::Services::SwiftStorage - -- imagename: "{{namespace}}/{{name_prefix}}swift-proxy-server{{name_suffix}}:{{tag}}" - image_source: tripleo - params: - - ContainerSwiftConfigImage - - ContainerSwiftProxyImage - services: - - OS::TripleO::Services::SwiftProxy - - OS::TripleO::Services::SwiftRingBuilder - - OS::TripleO::Services::SwiftStorage - -{% if ceph_images | default(true) %} -{% include 'ceph.j2' %} -{% endif %} - -- imagename: "{{namespace}}/{{name_prefix}}rsyslog{{name_suffix}}:{{tag}}" - image_source: tripleo - params: - - ContainerRsyslogImage - - ContainerRsyslogConfigImage - - ContainerRsyslogSidecarImage - - ContainerRsyslogSidecarConfigImage - - ContainerOctaviaRsyslogImage - services: - - OS::TripleO::Services::HAproxy - - OS::TripleO::Services::OctaviaHealthManager - - OS::TripleO::Services::Rsyslog - - OS::TripleO::Services::RsyslogSidecar - -- imagename: "{{namespace}}/{{name_prefix}}unbound{{name_suffix}}:{{tag}}" - image_source: tripleo - params: - - ContainerUnboundImage - - ContainerUnboundConfigImage - services: - - OS::TripleO::Services::Unbound - -- imagename: "{{namespace}}/{{name_prefix}}tempest{{name_suffix}}:{{tag}}" - image_source: tripleo - params: - - ContainerTempestImage - services: - - OS::TripleO::Services::Tempest - -- imagename: "{{namespace}}/{{name_prefix}}tripleoclient{{name_suffix}}:{{tag}}" - image_source: tripleo - params: - - ContainerTripleoClientImage - services: - - OS::TripleO::Services::TripleoClient - -- imagename: "{{namespace}}/tripleo-ansible-ee:{{tag}}" - image_source: tripleo - params: - - ContainerAEEImage diff --git a/contrib/tripleo_kolla_template_overrides.j2 b/contrib/tripleo_kolla_template_overrides.j2 deleted file mode 120000 index bf21613c7..000000000 --- a/contrib/tripleo_kolla_template_overrides.j2 +++ /dev/null @@ -1 +0,0 @@ -../container-images/tripleo_kolla_template_overrides.j2 \ No newline at end of file diff --git a/doc/requirements.txt b/doc/requirements.txt deleted file mode 100644 index 3a9a6af5c..000000000 --- a/doc/requirements.txt +++ /dev/null @@ -1,4 +0,0 @@ -openstackdocstheme>=2.2.1 # Apache-2.0 -sphinx>=2.0.0,!=2.1.0 # BSD -docutils>=0.11 # OSI-Approved Open Source, Public Domain -reno>=3.1.0 # Apache-2.0 diff --git a/doc/source/_exts/workbooks.py b/doc/source/_exts/workbooks.py deleted file mode 100644 index cd3ab2dab..000000000 --- a/doc/source/_exts/workbooks.py +++ /dev/null @@ -1,129 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import os - -from docutils import nodes -from docutils.parsers import rst -from docutils.statemachine import ViewList -from sphinx.util import logging -from sphinx.util.nodes import nested_parse_with_titles -import yaml - -LOG = logging.getLogger(__name__) - - -WORKFLOW_PATH = os.path.abspath( - os.path.join(os.path.dirname(__file__), '../../../workbooks', )) - - -def _title(name): - return name.replace('_', ' ').capitalize() + " Workbook" - - -def _workbook_to_rst(name, workbook): - - title = _title(name) - - yield '.. _workbook-%s:' % name - yield '' - yield '=' * len(title) - yield title - yield '=' * len(title) - yield '' - yield ':Workbook name: {}'.format(workbook['name']) - yield '' - if 'description' in workbook: - yield workbook['description'] - yield '' - yield 'Workflows in the {}'.format(title) - yield '' - - for wf_name, workflow in sorted(workbook['workflows'].items()): - - yield '.. object:: ' + workbook['name'] + '.' + wf_name - yield '' - - if 'type' in workflow: - yield ' :type: {}'.format(workflow['type']) - yield '' - - if 'description' in workflow: - if len(workflow['description'].split("\n")) > 2: - for _line in workflow['description'].split("\n"): - _new_line = ' ' + _line - yield _new_line - else: - yield ' {}'.format(workflow['description']) - yield '' - - if 'input' in workflow: - yield "\n Workflow inputs:\n" - for input_param in workflow['input']: - try: - yield ' :input {}: Default: {}'.format( - *input_param.items()[0]) - except Exception: - yield ' :input {}: Required.'.format(input_param) - yield '' - - -def get_workbooks(): - - all_workbooks = {} - - for root, dirs, files in os.walk(WORKFLOW_PATH): - for file in files: - with open(os.path.join(root, file)) as f: - all_workbooks[file.split('.')[0]] = yaml.safe_load(f) - - return all_workbooks - - -def _write_workbook_pages(app): - all_workbooks = get_workbooks() - files = [] - - for name, workbook in all_workbooks.items(): - filename = 'doc/source/reference/workbooks/%s.rst' % name - LOG.info('generating workbook page for %s' % name) - with open(filename, 'w') as f: - f.write('\n'.join(_workbook_to_rst(name, workbook))) - files.append(filename) - return files - - -class WorkflowListDirective(rst.Directive): - - has_content = False - - def run(self): - all_workbooks = get_workbooks() - - # Build the view of the data to be parsed for rendering. - result = ViewList() - for workbook_name in sorted(all_workbooks.keys()): - workbook = all_workbooks[workbook_name] - for line in _workbook_to_rst(workbook_name, workbook): - result.append(line, '<' + __name__ + '>') - - # Parse what we have into a new section. - node = nodes.section() - node.document = self.state.document - nested_parse_with_titles(self.state, result, node) - - return node.children - - -def setup(app): - LOG.info('loading workbooks extension') - app.add_directive('workbooklist', WorkflowListDirective) - _write_workbook_pages(app) diff --git a/doc/source/conf.py b/doc/source/conf.py deleted file mode 100644 index 6a511e8f2..000000000 --- a/doc/source/conf.py +++ /dev/null @@ -1,83 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -import sys - -# Add the project -sys.path.insert(0, os.path.abspath('../..')) -# Add the extensions -sys.path.insert(0, os.path.join(os.path.abspath('.'), '_exts')) - -# -- General configuration ---------------------------------------------------- - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. -extensions = [ - 'openstackdocstheme', - 'sphinx.ext.autodoc', - 'workbooks' -] - -# autodoc generation is a bit aggressive and a nuisance when doing heavy -# text edit cycles. -# execute "export SPHINX_DEBUG=1" in your terminal to disable - -# The suffix of source filenames. -source_suffix = '.rst' - -# The master toctree document. -master_doc = 'index' - -# General information about the project. -copyright = u'2013, OpenStack Foundation' - -# If true, '()' will be appended to :func: etc. cross-reference text. -add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -add_module_names = True - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'native' - -# -- Options for HTML output -------------------------------------------------- - -# The theme to use for HTML and HTML Help pages. Major themes that come with -# Sphinx are currently 'default' and 'sphinxdoc'. -# html_theme_path = ["."] -# html_theme = '_theme' -# html_static_path = ['static'] - -# Output file base name for HTML help builder. -htmlhelp_basename = 'tripleo-commondoc' -html_theme = 'openstackdocs' - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, author, documentclass -# [howto/manual]). -latex_documents = [ - ('index', - 'tripleo-common.tex', - u'tripleo-common Documentation', - u'OpenStack Foundation', 'manual'), -] - -# Example configuration for intersphinx: refer to the Python standard library. -# intersphinx_mapping = {'http://docs.python.org/': None} - -# openstackdocstheme options -openstackdocs_repo_name = 'openstack/tripleo-common' -openstackdocs_bug_project = 'tripleo' -openstackdocs_bug_tag = 'documentation' diff --git a/doc/source/contributing.rst b/doc/source/contributing.rst deleted file mode 100644 index 1728a61ca..000000000 --- a/doc/source/contributing.rst +++ /dev/null @@ -1,4 +0,0 @@ -============ -Contributing -============ -.. include:: ../../CONTRIBUTING.rst diff --git a/doc/source/image/build.rst b/doc/source/image/build.rst deleted file mode 100644 index f921fbe64..000000000 --- a/doc/source/image/build.rst +++ /dev/null @@ -1,20 +0,0 @@ -=============== -Building images -=============== - -Call the image build manager:: - - manager = ImageBuildManager(['path/to/config.yaml']) - manager.build() - - -.. autoclass:: tripleo_common.image.build.ImageBuildManager - :members: - -Multiple config files ---------------------- - -Multiple config files can be passed to the ImageBuildManager. Certain attributes -will be merged (currently, 'elements', 'options', and 'packages'), while other -attributes will only be set by the first encountered. The 'imagename' attribute -will be the primary key. \ No newline at end of file diff --git a/doc/source/image/upload.rst b/doc/source/image/upload.rst deleted file mode 100644 index ff65addab..000000000 --- a/doc/source/image/upload.rst +++ /dev/null @@ -1,19 +0,0 @@ -================ -Uploading images -================ - -Call the image upload manager:: - - manager = ImageUploadManager(['path/to/config.yaml']) - manager.upload() - - -.. autoclass:: tripleo_common.image.image_uploader.ImageUploadManager - :members: - -Multiple config files ---------------------- - -Multiple config files can be passed to the ImageUploadManager. -Attributes are set by the first encountered with the 'imagename' attribute -being the primary key. diff --git a/doc/source/images.rst b/doc/source/images.rst deleted file mode 100644 index 43c3f10bb..000000000 --- a/doc/source/images.rst +++ /dev/null @@ -1,29 +0,0 @@ -=========== -Disk images -=========== - -.. toctree:: - :glob: - :maxdepth: 2 - - image/* - - -YAML file format ----------------- -:: - - disk_images: - - - imagename: overcloud-compute - builder: dib - arch: amd64 - type: qcow2 - distro: centos7 - elements: - - overcloud-compute - - other-element - packages: - - vim - options: - diff --git a/doc/source/index.rst b/doc/source/index.rst deleted file mode 100644 index 5d93c3bfc..000000000 --- a/doc/source/index.rst +++ /dev/null @@ -1,26 +0,0 @@ -.. tripleo-common documentation master file, created by - sphinx-quickstart on Tue Jul 9 22:26:36 2013. - You can adapt this file completely to your liking, but it should at least - contain the root `toctree` directive. - -Welcome to tripleo-common's documentation! -========================================== - -Contents: - -.. toctree:: - :maxdepth: 2 - - Introduction - installation - usage - images - contributing - uploads - -Indices and tables -================== - -* :ref:`genindex` -* :ref:`search` - diff --git a/doc/source/installation.rst b/doc/source/installation.rst deleted file mode 100644 index 16e00c8a1..000000000 --- a/doc/source/installation.rst +++ /dev/null @@ -1,13 +0,0 @@ -============ -Installation -============ - -At the command line:: - - $ pip install tripleo-common - -Or, if you have virtualenvwrapper installed:: - - $ mkvirtualenv tripleo-common - $ pip install tripleo-common - diff --git a/doc/source/readme.rst b/doc/source/readme.rst deleted file mode 100644 index a6210d3d8..000000000 --- a/doc/source/readme.rst +++ /dev/null @@ -1 +0,0 @@ -.. include:: ../../README.rst diff --git a/doc/source/uploads.rst b/doc/source/uploads.rst deleted file mode 100644 index 948b5c8c2..000000000 --- a/doc/source/uploads.rst +++ /dev/null @@ -1,22 +0,0 @@ -============= -Image uploads -============= - -.. toctree:: - :glob: - :maxdepth: 2 - - image/build - image/upload - - -YAML file format ----------------- -:: - - uploads: - - imagename: namespace/heat-docker-agents:latest - uploader: docker - pull_source: docker.io - push_destination: localhost:8787 - diff --git a/doc/source/usage.rst b/doc/source/usage.rst deleted file mode 100644 index 2fc868e5b..000000000 --- a/doc/source/usage.rst +++ /dev/null @@ -1,8 +0,0 @@ -===== -Usage -===== - -To use tripleo-common in a project:: - - import tripleo_common - diff --git a/healthcheck/README.md b/healthcheck/README.md deleted file mode 100644 index 409c98cc9..000000000 --- a/healthcheck/README.md +++ /dev/null @@ -1,27 +0,0 @@ -# Health check commands - -The scripts in this directory are meant to implement the -[container-healthcheck][] blueprint. They are written to be compatible -with the Docker [HEALTHCHECK][] api. - -[container-healthcheck]: https://blueprints.launchpad.net/tripleo/+spec/container-healthchecks -[healthcheck]: https://docs.docker.com/engine/reference/builder/#healthcheck - -The scripts expect to source -`/usr/share/tripleo-common/healthcheck/common.sh`. If you -want to run scripts without installing to that file, you can set the -`HEALTHCHECKS_DIR` environment variable, e.g: - - $ export HEALTHCHECKS_DIR=$PWD - $ ./heat-api - {"versions": [{"status": "CURRENT", "id": "v1.0", "links": [{"href": "http://192.168.24.1:8004/v1/", "rel": "self"}]}]} - 300 192.168.24.1:8004 0.002 seconds - -# Notes about changing healthchecks - -Because healthchecks are provided via a package when building containers, -you cannot rename or remove a health check in combination with a change to a, -file in tripleo-common/container-images/. Changes need to be backwards and -forwards compatible when updating healthchecks. You may also need to land -a new healthcheck first and update the container build process in a subsequent -change that lands later. diff --git a/healthcheck/aodh-api b/healthcheck/aodh-api deleted file mode 100755 index 50acf5914..000000000 --- a/healthcheck/aodh-api +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/bash - -. ${HEALTHCHECK_SCRIPTS:-/usr/share/openstack-tripleo-common/healthcheck}/common.sh - -check_url=$(get_url_from_vhost /etc/httpd/conf.d/10-aodh_wsgi.conf) -healthcheck_curl ${check_url} diff --git a/healthcheck/aodh-evaluator b/healthcheck/aodh-evaluator deleted file mode 100755 index 74dc4e017..000000000 --- a/healthcheck/aodh-evaluator +++ /dev/null @@ -1,14 +0,0 @@ -#!/bin/bash - -. ${HEALTHCHECK_SCRIPTS:-/usr/share/openstack-tripleo-common/healthcheck}/common.sh - -process='aodh-evaluator' -args="${@:-6379}" - -if healthcheck_port $process $args; then - exit 0 -else - ports=${args// /,} - echo "There is no $process process with opened Redis ports ($ports) running in the container" - exit 1 -fi diff --git a/healthcheck/aodh-listener b/healthcheck/aodh-listener deleted file mode 100755 index 0fe940626..000000000 --- a/healthcheck/aodh-listener +++ /dev/null @@ -1,14 +0,0 @@ -#!/bin/bash - -. ${HEALTHCHECK_SCRIPTS:-/usr/share/openstack-tripleo-common/healthcheck}/common.sh - -process='aodh-listener' -args="${@:-5671 5672}" - -if healthcheck_port $process $args; then - exit 0 -else - ports=${args// /,} - echo "There is no $process process with opened RabbitMQ ports ($ports) running in the container" - exit 1 -fi diff --git a/healthcheck/aodh-notifier b/healthcheck/aodh-notifier deleted file mode 100755 index aaf4c89bc..000000000 --- a/healthcheck/aodh-notifier +++ /dev/null @@ -1,14 +0,0 @@ -#!/bin/bash - -. ${HEALTHCHECK_SCRIPTS:-/usr/share/openstack-tripleo-common/healthcheck}/common.sh - -process='aodh-notifier' -args="${@:-5671 5672}" - -if healthcheck_port $process $args; then - exit 0 -else - ports=${args// /,} - echo "There is no $process process with opened RabbitMQ ports ($ports) running in the container" - exit 1 -fi diff --git a/healthcheck/barbican-api b/healthcheck/barbican-api deleted file mode 100755 index f872c3f7f..000000000 --- a/healthcheck/barbican-api +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/bash - -. ${HEALTHCHECK_SCRIPTS:-/usr/share/openstack-tripleo-common/healthcheck}/common.sh - -check_url=$(get_url_from_vhost /etc/httpd/conf.d/10-barbican_wsgi_main.conf) -healthcheck_curl ${check_url} diff --git a/healthcheck/barbican-keystone-listener b/healthcheck/barbican-keystone-listener deleted file mode 100755 index e2bf2d852..000000000 --- a/healthcheck/barbican-keystone-listener +++ /dev/null @@ -1,14 +0,0 @@ -#!/bin/bash - -. ${HEALTHCHECK_SCRIPTS:-/usr/share/openstack-tripleo-common/healthcheck}/common.sh - -process='barbican-keystone-listener' -args="${@:-5671 5672}" - -if healthcheck_port $process $args; then - exit 0 -else - ports=${args// /,} - echo "There is no $process process with opened RabbitMQ ports ($ports) running in the container" - exit 1 -fi diff --git a/healthcheck/barbican-worker b/healthcheck/barbican-worker deleted file mode 100755 index 72fd7542f..000000000 --- a/healthcheck/barbican-worker +++ /dev/null @@ -1,14 +0,0 @@ -#!/bin/bash - -. ${HEALTHCHECK_SCRIPTS:-/usr/share/openstack-tripleo-common/healthcheck}/common.sh - -process='barbican-worker' -args="${@:-5671 5672}" - -if healthcheck_port $process $args; then - exit 0 -else - ports=${args// /,} - echo "There is no $process process with opened RabbitMQ ports ($ports) running in the container" - exit 1 -fi diff --git a/healthcheck/ceilometer-agent-central b/healthcheck/ceilometer-agent-central deleted file mode 100755 index 1fdc78d28..000000000 --- a/healthcheck/ceilometer-agent-central +++ /dev/null @@ -1,14 +0,0 @@ -#!/bin/bash - -. ${HEALTHCHECK_SCRIPTS:-/usr/share/openstack-tripleo-common/healthcheck}/common.sh - -process='ceilometer-polling' -args="${@:-6379}" - -if healthcheck_port $process $args; then - exit 0 -else - ports=${args// /,} - echo "There is no $process process with opened Redis ports ($ports) running in the container" - exit 1 -fi diff --git a/healthcheck/ceilometer-agent-compute b/healthcheck/ceilometer-agent-compute deleted file mode 100755 index 1fdc78d28..000000000 --- a/healthcheck/ceilometer-agent-compute +++ /dev/null @@ -1,14 +0,0 @@ -#!/bin/bash - -. ${HEALTHCHECK_SCRIPTS:-/usr/share/openstack-tripleo-common/healthcheck}/common.sh - -process='ceilometer-polling' -args="${@:-6379}" - -if healthcheck_port $process $args; then - exit 0 -else - ports=${args// /,} - echo "There is no $process process with opened Redis ports ($ports) running in the container" - exit 1 -fi diff --git a/healthcheck/ceilometer-agent-ipmi b/healthcheck/ceilometer-agent-ipmi deleted file mode 100755 index 1fdc78d28..000000000 --- a/healthcheck/ceilometer-agent-ipmi +++ /dev/null @@ -1,14 +0,0 @@ -#!/bin/bash - -. ${HEALTHCHECK_SCRIPTS:-/usr/share/openstack-tripleo-common/healthcheck}/common.sh - -process='ceilometer-polling' -args="${@:-6379}" - -if healthcheck_port $process $args; then - exit 0 -else - ports=${args// /,} - echo "There is no $process process with opened Redis ports ($ports) running in the container" - exit 1 -fi diff --git a/healthcheck/ceilometer-agent-notification b/healthcheck/ceilometer-agent-notification deleted file mode 100755 index 6495b39ab..000000000 --- a/healthcheck/ceilometer-agent-notification +++ /dev/null @@ -1,14 +0,0 @@ -#!/bin/bash - -. ${HEALTHCHECK_SCRIPTS:-/usr/share/openstack-tripleo-common/healthcheck}/common.sh - -process='ceilometer-agent-notification' -args="${@:-5671 5672}" - -if healthcheck_port $process $args; then - exit 0 -else - ports=${args// /,} - echo "There is no $process process with opened RabbitMQ ports ($ports) running in the container" - exit 1 -fi diff --git a/healthcheck/cinder-api b/healthcheck/cinder-api deleted file mode 100755 index 65cae6a46..000000000 --- a/healthcheck/cinder-api +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/bash - -. ${HEALTHCHECK_SCRIPTS:-/usr/share/openstack-tripleo-common/healthcheck}/common.sh - -check_url=$(get_url_from_vhost /etc/httpd/conf.d/10-cinder_wsgi.conf) -healthcheck_curl ${check_url} diff --git a/healthcheck/cinder-backup b/healthcheck/cinder-backup deleted file mode 100755 index 315854d71..000000000 --- a/healthcheck/cinder-backup +++ /dev/null @@ -1,14 +0,0 @@ -#!/bin/bash - -. ${HEALTHCHECK_SCRIPTS:-/usr/share/openstack-tripleo-common/healthcheck}/common.sh - -process='cinder-backup' -args="${@:-5671 5672}" - -if healthcheck_port $process $args; then - exit 0 -else - ports=${args// /,} - echo "There is no $process process with opened RabbitMQ ports ($ports) running in the container" - exit 1 -fi diff --git a/healthcheck/cinder-scheduler b/healthcheck/cinder-scheduler deleted file mode 100755 index e3d381334..000000000 --- a/healthcheck/cinder-scheduler +++ /dev/null @@ -1,14 +0,0 @@ -#!/bin/bash - -. ${HEALTHCHECK_SCRIPTS:-/usr/share/openstack-tripleo-common/healthcheck}/common.sh - -process='cinder-scheduler' -args="${@:-5671 5672}" - -if healthcheck_port $process $args; then - exit 0 -else - ports=${args// /,} - echo "There is no $process process with opened RabbitMQ ports ($ports) running in the container" - exit 1 -fi diff --git a/healthcheck/cinder-volume b/healthcheck/cinder-volume deleted file mode 100755 index 5798fdbb5..000000000 --- a/healthcheck/cinder-volume +++ /dev/null @@ -1,14 +0,0 @@ -#!/bin/bash - -. ${HEALTHCHECK_SCRIPTS:-/usr/share/openstack-tripleo-common/healthcheck}/common.sh - -process='cinder-volume' -args="${@:-5671 5672}" - -if healthcheck_port $process $args; then - exit 0 -else - ports=${args// /,} - echo "There is no $process process with opened RabbitMQ ports ($ports) running in the container" - exit 1 -fi diff --git a/healthcheck/collectd b/healthcheck/collectd deleted file mode 100755 index 27cb76d0a..000000000 --- a/healthcheck/collectd +++ /dev/null @@ -1,4 +0,0 @@ -#!/bin/bash - -collectdctl -s /var/run/collectd-socket listval -exit $? diff --git a/healthcheck/common.sh b/healthcheck/common.sh deleted file mode 100755 index 87172b37c..000000000 --- a/healthcheck/common.sh +++ /dev/null @@ -1,173 +0,0 @@ -#!/bin/bash -set -euo pipefail -: ${HEALTHCHECK_DEBUG:=0} -if [ $HEALTHCHECK_DEBUG -ne 0 ]; then - set -x - exec 3>&1 -else - exec 3>/dev/null -fi -: ${HEALTHCHECK_CURL_MAX_TIME:=10} -: ${HEALTHCHECK_CURL_USER_AGENT:=curl-healthcheck} -: ${HEALTHCHECK_CURL_PY_USER_AGENT:=pyrequests-healthcheck} -: ${HEALTHCHECK_CURL_WRITE_OUT:='\n%{http_code} %{remote_ip}:%{remote_port} %{time_total} seconds\n'} -: ${HEALTHCHECK_CURL_OUTPUT:='/dev/null'} -: ${HEALTHCHECK_USE_PROXY:=0} -if [ $HEALTHCHECK_USE_PROXY -eq 0 ]; then - for prox_var in http_proxy https_proxy no_proxy; do - unset $prox_var - unset ${prox_var^^} - done -fi - -get_user_from_process() { - process=$1 - - # This helps to capture the actual pid running the process - pid=$(pgrep -d ',' -f $process) - - # Here, we use the embedded `ps' filter capabilities, and remove the - # output header. We ensure we get the user for the selected PIDs only. - # In order to ensure we don't get multiple lines, we truncate it with `head' - ps -h -q${pid} -o user | head -n1 -} - -healthcheck_curl () { - if [ $# == 0 ]; then - echo 'healthcheck_curl: no parameter provided' - return 1 - fi - export NSS_SDB_USE_CACHE=no - if [ -n "${HEALTHCHECK_CURL_PY+x}" ] || [ -n "${no_proxy+x}" ] || [ -n "${NO_PROXY+x}" ]; then - ${HEALTHCHECK_SCRIPTS:-/usr/share/openstack-tripleo-common/healthcheck}/http-healthcheck.py \ - --max-time "${HEALTHCHECK_CURL_MAX_TIME}" \ - --user-agent "${HEALTHCHECK_CURL_PY_USER_AGENT}" \ - --write-out "${HEALTHCHECK_CURL_WRITE_OUT}" \ - "$@" || return 1 - else - curl -g -k -q -s -S --fail -o "${HEALTHCHECK_CURL_OUTPUT}" \ - --max-time "${HEALTHCHECK_CURL_MAX_TIME}" \ - --user-agent "${HEALTHCHECK_CURL_USER_AGENT}" \ - --write-out "${HEALTHCHECK_CURL_WRITE_OUT}" \ - "$@" || return 1 - fi -} - -healthcheck_port () { - process=$1 - - shift 1 - ports="" - # First convert port to hex value. We need to 0-pad it in order to get the - # right format (4 chars). - for p in $@; do - ports="${ports}|$(printf '%0.4x' $p)" - done - # Format the string - will be ":(hex1|hex2|...)" - ports=":(${ports:1})" - # Parse the files. We need to extract only one value (socket inode) based on the matching port. Let's check local and target for establised connection. - # Line example: - # 534: DE0D10AC:1628 DE0D10AC:8B7C 01 00000000:00000000 02:000000D3 00000000 42439 0 574360 2 0000000000000000 20 4 0 10 -1 - # | | | - # $2 local connection | $10 Socket inode - # $3 Connection target - # Using the main /proc/net/{tcp,udp} allow to take only the connections existing in the current container. If we were using /proc/PID/net/{tcp,udp}, we - # would get all the connections existing in the same network namespace as the PID. Since we're using network=host, that would show *everything*. - # the "join" method is weird, and fails if the array is empty. - # Note: join comes from gawk's /usr/share/awk/join.awk and has some weird parameters. - sockets=$(awk -i join -v m=${ports} '{IGNORECASE=1; if ($2 ~ m || $3 ~ m) {output[counter++] = $10} } END{if (length(output)>0) {print join(output, 0, length(output)-1, "|")}}' /proc/net/{tcp,udp,tcp6,udp6}) - - # If no socket, just fail early - test -z $sockets && exit 1 - match=0 - for pid in $(pgrep -f $process); do - # Here, we check if a socket is actually associated to the process PIDs - match=$(( $match+$(find /proc/$pid/fd/ -ilname "socket*" -printf "%l\n" 2>/dev/null | grep -c -E "(${sockets})") )) - test $match -gt 0 && exit 0 # exit as soon as we get a match - done - exit 1 # no early exit, meaning failure. -} - -healthcheck_listen () { - process=$1 - - shift 1 - args=$@ - ports=${args// /,} - pids=$(pgrep -d ',' -f $process) - lsof -n -w -P -a -p${pids} -iTCP:${ports} -s TCP:LISTEN >&3 2>&1 -} - -healthcheck_socket () { - process=$1 - socket=$2 - pids=$(pgrep -d ',' -f $process) - - lsof -n -Fc -Ua -p${pids} $socket >&3 2>&1 -} - -healthcheck_file_modification () { - file_path=$1 - limit_seconds=$2 - - # if the file doesn't exist, return 1 - if [ ! -f $file_path ]; then - echo "${file_path} does not exist for file modification check" - return 1 - fi - curr_time=$(date +%s) - last_mod=$(stat -c '%Y' $file_path) - limit_epoch=$(( curr_time-limit_seconds )) - if [ ${limit_epoch} -gt ${last_mod} ]; then - return 1 - fi -} - -get_config_val () { - crudini --get "$1" "$2" "$3" 2> /dev/null || echo "$4" -} - -# apachectl -S is slightly harder to parse and doesn't say if the vhost is serving SSL -get_url_from_vhost () { - vhost_file=$1 - if test -n "${vhost_file}" && test -r "${vhost_file}" ; then - server_name=$(awk '/ServerName/ {print $2}' $vhost_file) - ssl_enabled=$(awk '/SSLEngine/ {print $2}' $vhost_file) - bind_port=$(grep -h "" $vhost_file | sed 's//\1/') - wsgi_alias=$(awk '/WSGIScriptAlias / {print $2; exit}' $vhost_file) - proto=http - if [[ $ssl_enabled == "on" ]]; then - proto=https - fi - if [[ $wsgi_alias != "/" ]]; then - wsgi_alias="${wsgi_alias}/" - fi - echo ${proto}://${server_name}:${bind_port}${wsgi_alias} - else - exit 1 - fi -} - -check_swift_interval () { - service=$1 - if ps -ef | grep --quiet [s]wift-${service} >&3 2>&1; then - interval=$(get_config_val $conf $service interval 300) - last=`grep -o "\"replication_last\": [0-9]*" $cache | cut -f 2 -d " "` - now=`date +%s` - if [ `expr $now - $last` -gt $interval ]; then - echo "Last replication run did not finish within interval of $interval seconds." - exit 1 - fi - fi -} - -# Wrap an IPv6 address in square brackets if not already wrapped -wrap_ipv6 () { - ip=$1 - - if [[ $ip =~ ":" ]] && [[ $ip != *\] ]]; then - echo [$ip] - else - echo $ip - fi -} diff --git a/healthcheck/cron b/healthcheck/cron deleted file mode 100755 index 450bdaee2..000000000 --- a/healthcheck/cron +++ /dev/null @@ -1,10 +0,0 @@ -#!/bin/bash - -file="${1:-root}" -if [ -f /var/spool/cron/${file} ]; then - nb_lines=$(grep -cEv '^#' /var/spool/cron/${file}) - if [ $nb_lines -ge 2 ]; then - exit 0 - fi -fi -exit 1 diff --git a/healthcheck/etcd b/healthcheck/etcd deleted file mode 100755 index 739827b49..000000000 --- a/healthcheck/etcd +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/bash - -export ETCDCTL_API=3 -export $(grep ETCD_LISTEN_CLIENT_URLS /etc/etcd/etcd.conf) -ETCDCTL_ENDPOINTS=$ETCD_LISTEN_CLIENT_URLS etcdctl endpoint health | grep -q "is healthy" -exit $? diff --git a/healthcheck/frr b/healthcheck/frr deleted file mode 100755 index 495ca9209..000000000 --- a/healthcheck/frr +++ /dev/null @@ -1,10 +0,0 @@ -#!/bin/bash - -. ${HEALTHCHECK_SCRIPTS:-/usr/share/openstack-tripleo-common/healthcheck}/common.sh - -if ps -e | grep watchfrr; then - exit 0 -else - echo "No watch frr running" - exit 1 -fi diff --git a/healthcheck/glance-api b/healthcheck/glance-api deleted file mode 100755 index ddadf07c5..000000000 --- a/healthcheck/glance-api +++ /dev/null @@ -1,9 +0,0 @@ -#!/bin/bash - -. ${HEALTHCHECK_SCRIPTS:-/usr/share/openstack-tripleo-common/healthcheck}/common.sh - -bind_host=$(wrap_ipv6 $(get_config_val /etc/glance/glance-api.conf DEFAULT bind_host 127.0.0.1)) -bind_port=$(get_config_val /etc/glance/glance-api.conf DEFAULT bind_port 9292) - -# glance-api is still eventlet -healthcheck_curl http://${bind_host}:${bind_port}/ diff --git a/healthcheck/gnocchi-api b/healthcheck/gnocchi-api deleted file mode 100755 index b07b17b6f..000000000 --- a/healthcheck/gnocchi-api +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/bash - -. ${HEALTHCHECK_SCRIPTS:-/usr/share/openstack-tripleo-common/healthcheck}/common.sh - -check_url=$(get_url_from_vhost /etc/httpd/conf.d/10-gnocchi_wsgi.conf) -healthcheck_curl ${check_url} diff --git a/healthcheck/gnocchi-metricd b/healthcheck/gnocchi-metricd deleted file mode 100755 index 5f18e4944..000000000 --- a/healthcheck/gnocchi-metricd +++ /dev/null @@ -1,14 +0,0 @@ -#!/bin/bash - -. ${HEALTHCHECK_SCRIPTS:-/usr/share/openstack-tripleo-common/healthcheck}/common.sh - -process='gnocchi-metricd' -args="${@:-6379}" - -if healthcheck_port $process $args; then - exit 0 -else - ports=${args// /,} - echo "There is no $process process with opened Redis ports ($ports) running in the container" - exit 1 -fi diff --git a/healthcheck/gnocchi-statsd b/healthcheck/gnocchi-statsd deleted file mode 100755 index 1aee264fa..000000000 --- a/healthcheck/gnocchi-statsd +++ /dev/null @@ -1,11 +0,0 @@ -#!/bin/bash - -. ${HEALTHCHECK_SCRIPTS:-/usr/share/openstack-tripleo-common/healthcheck}/common.sh - -process='gnocchi-statsd' -bind_port=$(get_config_val /etc/gnocchi/gnocchi.conf statsd port 8125) - -if ! healthcheck_port $process $bind_port; then - echo "There is no $process process listening on ports $bind_port in the container." - exit 1 -fi diff --git a/healthcheck/heat-api b/healthcheck/heat-api deleted file mode 100755 index fa9ea526d..000000000 --- a/healthcheck/heat-api +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/bash - -. ${HEALTHCHECK_SCRIPTS:-/usr/share/openstack-tripleo-common/healthcheck}/common.sh - -check_url=$(get_url_from_vhost /etc/httpd/conf.d/10-heat_api_wsgi.conf) -healthcheck_curl ${check_url} diff --git a/healthcheck/heat-api-cfn b/healthcheck/heat-api-cfn deleted file mode 100755 index a5188189f..000000000 --- a/healthcheck/heat-api-cfn +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/bash - -. ${HEALTHCHECK_SCRIPTS:-/usr/share/openstack-tripleo-common/healthcheck}/common.sh - -check_url=$(get_url_from_vhost /etc/httpd/conf.d/10-heat_api_cfn_wsgi.conf) -healthcheck_curl ${check_url} diff --git a/healthcheck/heat-engine b/healthcheck/heat-engine deleted file mode 100755 index 6381fc3d1..000000000 --- a/healthcheck/heat-engine +++ /dev/null @@ -1,14 +0,0 @@ -#!/bin/bash - -. ${HEALTHCHECK_SCRIPTS:-/usr/share/openstack-tripleo-common/healthcheck}/common.sh - -process='heat-engine' -args="${@:-5671 5672}" - -if healthcheck_port $process $args; then - exit 0 -else - ports=${args// /,} - echo "There is no $process process with opened RabbitMQ ports ($ports) running in the container" - exit 1 -fi diff --git a/healthcheck/horizon b/healthcheck/horizon deleted file mode 100755 index 7225fac87..000000000 --- a/healthcheck/horizon +++ /dev/null @@ -1,10 +0,0 @@ -#!/bin/bash - -. ${HEALTHCHECK_SCRIPTS:-/usr/share/openstack-tripleo-common/healthcheck}/common.sh - -check_url=$(get_url_from_vhost /etc/httpd/conf.d/10-horizon_vhost.conf) -if healthcheck_curl ${check_url}; then - exit 0 -else - healthcheck_curl $(get_url_from_vhost /etc/httpd/conf.d/10-horizon_ssl_vhost.conf) -fi diff --git a/healthcheck/http-healthcheck.py b/healthcheck/http-healthcheck.py deleted file mode 100755 index 223d7e592..000000000 --- a/healthcheck/http-healthcheck.py +++ /dev/null @@ -1,44 +0,0 @@ -#!/usr/bin/python3 -import argparse -import os -import requests - -default_output = ("\n%(http_code)s %(remote_ip)s:%(remote_port)s " - "%(time_total)s seconds\n") - -parser = argparse.ArgumentParser(description='Check remote HTTP') -parser.add_argument('uri', metavar='URI', type=str, nargs=1, - help='Remote URI to check') -parser.add_argument('--max-time', type=int, default=10, - help=('Maximum time in seconds that you allow the' - ' whole operation to take.') - ) -parser.add_argument('--user-agent', type=str, default='pyrequests-healthcheck', - help=('Specify the User-Agent string to send to the' - ' HTTP server.') - ) -parser.add_argument('--write-out', type=str, default=default_output, - help=('Display information on stdout after a completed' - ' transfer.') - ) - -args = parser.parse_args() -uri = args.uri[0] -output = args.write_out.replace('%{', '%(').replace('}', ')s') \ - .replace('\\n', os.linesep) - -headers = {'User-Agent': args.user_agent} -with requests.get(uri, headers=headers, timeout=args.max_time, - allow_redirects=True, stream=True, verify=False) as req: - r_ip, r_port = req.raw._original_response.fp.raw._sock.getpeername()[0:2] - resp = {'http_code': req.status_code, - 'remote_ip': r_ip, - 'remote_port': r_port, - 'time_total': req.elapsed.total_seconds() - } - try: - print(output % resp) - except KeyError: - print(default_output % resp) - except ValueError: - print(default_output % resp) diff --git a/healthcheck/ironic-api b/healthcheck/ironic-api deleted file mode 100755 index b0df8c8dc..000000000 --- a/healthcheck/ironic-api +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/bash - -. ${HEALTHCHECK_SCRIPTS:-/usr/share/openstack-tripleo-common/healthcheck}/common.sh - -check_url=$(get_url_from_vhost /etc/httpd/conf.d/10-ironic_wsgi.conf) -healthcheck_curl ${check_url} diff --git a/healthcheck/ironic-conductor b/healthcheck/ironic-conductor deleted file mode 100755 index 97b63e0e0..000000000 --- a/healthcheck/ironic-conductor +++ /dev/null @@ -1,14 +0,0 @@ -#!/bin/bash - -. ${HEALTHCHECK_SCRIPTS:-/usr/share/openstack-tripleo-common/healthcheck}/common.sh - -process='ironic-conductor' -args="${@:-5671 5672}" - -if healthcheck_port $process $args; then - exit 0 -else - ports=${args// /,} - echo "There is no $process process with opened RabbitMQ ports ($ports) running in the container" - exit 1 -fi diff --git a/healthcheck/ironic-inspector b/healthcheck/ironic-inspector deleted file mode 100755 index 80d1dac92..000000000 --- a/healthcheck/ironic-inspector +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/sh - -. ${HEALTHCHECK_SCRIPTS:-/usr/share/openstack-tripleo-common/healthcheck}/common.sh - -process='dnsmasq' -if pgrep $process; then - exit 0 -else - bind_host=$(wrap_ipv6 $(get_config_val /etc/ironic-inspector/inspector.conf DEFAULT listen_address 127.0.0.1)) - bind_port=$(get_config_val /etc/ironic-inspector/inspector.conf DEFAULT listen_port 5050) - healthcheck_curl http://${bind_host}:${bind_port} -fi diff --git a/healthcheck/ironic-neutron-agent b/healthcheck/ironic-neutron-agent deleted file mode 100755 index 7c4ef5876..000000000 --- a/healthcheck/ironic-neutron-agent +++ /dev/null @@ -1,14 +0,0 @@ -#!/bin/bash - -. ${HEALTHCHECK_SCRIPTS:-/usr/share/openstack-tripleo-common/healthcheck}/common.sh - -process='ironic-neutron-agent' -args="${@:-5671 5672}" - -if healthcheck_port $process $args; then - exit 0 -else - ports=${args// /,} - echo "There is no $process process with opened RabbitMQ ports ($ports) running in the container" - exit 1 -fi diff --git a/healthcheck/ironic-pxe b/healthcheck/ironic-pxe deleted file mode 100755 index 0455c452c..000000000 --- a/healthcheck/ironic-pxe +++ /dev/null @@ -1,13 +0,0 @@ -#!/bin/bash - -. ${HEALTHCHECK_SCRIPTS:-/usr/share/openstack-tripleo-common/healthcheck}/common.sh - -if grep "/httpd " /run_command 2>/dev/null ; then - bind_host=$(wrap_ipv6 $(get_config_val /etc/ironic/ironic.conf DEFAULT my_ip 127.0.0.1)) - bind_port=$(grep "^Listen " /etc/httpd/conf.d/10-ipxe_vhost.conf | awk '{print $2}') - protocol=http - path= -else - exit 0 -fi -healthcheck_curl ${protocol}://${bind_host}:${bind_port}/${path} diff --git a/healthcheck/iscsid b/healthcheck/iscsid deleted file mode 100755 index da2db2829..000000000 --- a/healthcheck/iscsid +++ /dev/null @@ -1,4 +0,0 @@ -#!/bin/bash - -socat - ABSTRACT-CONNECT:ISCSIADM_ABSTRACT_NAMESPACE /dev/null | grep -w clustercheck; then - TRIPLEO_SOCAT_BIND=$(sed -nE "s/^TRIPLEO_SOCAT_BIND='?([^']*)'?$/\1/p" $SOCAT_CONF) - PORT=$(echo $TRIPLEO_SOCAT_BIND | sed -n -E 's/.*listen:([0-9]*),.*/\1/p') - ADDR=$(echo $TRIPLEO_SOCAT_BIND | sed -n -E 's/.*bind="?([^",]*)"?,?.*/\1/p') - if echo $TRIPLEO_SOCAT_BIND | grep -q '^tcp6'; then - ADDR="[${ADDR}]" - fi -else - IS_MYSQL=1 -fi - -if [ -n "$IS_MYSQL" ]; then - mysql -e 'select 1' || exit 1 -else - bind_host=${ADDR:-$(hostname)} - bind_port=${PORT:-9200} - healthcheck_curl http://${bind_host}:${bind_port}/ -fi diff --git a/healthcheck/memcached b/healthcheck/memcached deleted file mode 100755 index 16b2d470e..000000000 --- a/healthcheck/memcached +++ /dev/null @@ -1,21 +0,0 @@ -#!/bin/bash -. ${HEALTHCHECK_SCRIPTS:-/usr/share/openstack-tripleo-common/healthcheck}/common.sh - -# if memcached has TLS enabled, look for a notls ip entry in the options -listen_addr=$(awk 'match($0, /notls:([0-9a-fA-F\.\:]+):11211[, ]/, a) {print a[1]}' /etc/sysconfig/memcached) - -if [ -z "$listen_addr" ]; then - # otherwise look for the first ip available among all the possible ones - # passed to the -l option - listen_addr=$(awk 'match($0, /-l +([0-9a-fA-F\.\:]+)[, ]/, a) {print a[1]}' /etc/sysconfig/memcached) - # get the configured memcached port or the default one - port=$(awk -F= '$1=="PORT" {gsub(/"/, "",$2); print $2}' /etc/sysconfig/memcached) - port=${port:-11211} -else - # with TLS-e, TripleO always exposes the notls IP on port 11211 - port=11211 -fi -listen_addr=$(wrap_ipv6 $listen_addr) - -echo "version" | socat - TCP:$listen_addr:$port 1>/dev/null -exit $? diff --git a/healthcheck/multipathd b/healthcheck/multipathd deleted file mode 100755 index 3d2d81f53..000000000 --- a/healthcheck/multipathd +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/bash - -. ${HEALTHCHECK_SCRIPTS:-/usr/share/openstack-tripleo-common/healthcheck}/common.sh - -multipathd show status -exit $? diff --git a/healthcheck/neutron-api b/healthcheck/neutron-api deleted file mode 100755 index 84aa154e5..000000000 --- a/healthcheck/neutron-api +++ /dev/null @@ -1,8 +0,0 @@ -#!/bin/bash - -. ${HEALTHCHECK_SCRIPTS:-/usr/share/openstack-tripleo-common/healthcheck}/common.sh - -bind_host=$(wrap_ipv6 $(get_config_val /etc/neutron/neutron.conf DEFAULT bind_host 127.0.0.1)) -bind_port=$(get_config_val /etc/neutron/neutron.conf DEFAULT bind_port 9696) - -healthcheck_curl http://${bind_host}:${bind_port}/ diff --git a/healthcheck/neutron-dhcp b/healthcheck/neutron-dhcp deleted file mode 100755 index 8825be6da..000000000 --- a/healthcheck/neutron-dhcp +++ /dev/null @@ -1,14 +0,0 @@ -#!/bin/bash - -. ${HEALTHCHECK_SCRIPTS:-/usr/share/openstack-tripleo-common/healthcheck}/common.sh - -process='neutron-dhcp-agent' -args="${@:-5671 5672}" - -if healthcheck_port $process $args; then - exit 0 -else - ports=${args// /,} - echo "There is no $process process with opened RabbitMQ ports ($ports) running in the container" - exit 1 -fi diff --git a/healthcheck/neutron-l3 b/healthcheck/neutron-l3 deleted file mode 100755 index 0d39ffedf..000000000 --- a/healthcheck/neutron-l3 +++ /dev/null @@ -1,14 +0,0 @@ -#!/bin/bash - -. ${HEALTHCHECK_SCRIPTS:-/usr/share/openstack-tripleo-common/healthcheck}/common.sh - -process='neutron-l3-agent' -args="${@:-5671 5672}" - -if healthcheck_port $process $args; then - exit 0 -else - ports=${args// /,} - echo "There is no $process process with opened RabbitMQ ports ($ports) running in the container" - exit 1 -fi diff --git a/healthcheck/neutron-metadata b/healthcheck/neutron-metadata deleted file mode 100755 index bc5b0ed8d..000000000 --- a/healthcheck/neutron-metadata +++ /dev/null @@ -1,14 +0,0 @@ -#!/bin/bash - -. ${HEALTHCHECK_SCRIPTS:-/usr/share/openstack-tripleo-common/healthcheck}/common.sh - -process='neutron-metadata-agent' -args="${@:-5671 5672}" - -if healthcheck_port $process $args; then - exit 0 -else - ports=${args// /,} - echo "There is no $process process with opened RabbitMQ ports ($ports) running in the container" - exit 1 -fi diff --git a/healthcheck/neutron-ovs-agent b/healthcheck/neutron-ovs-agent deleted file mode 100755 index 1e9879b16..000000000 --- a/healthcheck/neutron-ovs-agent +++ /dev/null @@ -1,14 +0,0 @@ -#!/bin/bash - -. ${HEALTHCHECK_SCRIPTS:-/usr/share/openstack-tripleo-common/healthcheck}/common.sh - -process='neutron-openvswitch-agent' -args="${@:-5671 5672}" - -if healthcheck_port $process $args; then - exit 0 -else - ports=${args// /,} - echo "There is no $process process with opened RabbitMQ ports ($ports) running in the container" - exit 1 -fi diff --git a/healthcheck/neutron-sriov-agent b/healthcheck/neutron-sriov-agent deleted file mode 100755 index 8de79d4b0..000000000 --- a/healthcheck/neutron-sriov-agent +++ /dev/null @@ -1,14 +0,0 @@ -#!/bin/bash - -. ${HEALTHCHECK_SCRIPTS:-/usr/share/openstack-tripleo-common/healthcheck}/common.sh - -process='neutron-sriov-nic-agent' -args="${@:-5671 5672}" - -if healthcheck_port $process $args; then - exit 0 -else - ports=${args// /,} - echo "There is no $process process connected via RabbitMQ ports ($ports) running in the container" - exit 1 -fi diff --git a/healthcheck/nova-api b/healthcheck/nova-api deleted file mode 100755 index e66991bf4..000000000 --- a/healthcheck/nova-api +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/bash - -. ${HEALTHCHECK_SCRIPTS:-/usr/share/openstack-tripleo-common/healthcheck}/common.sh - - -if pgrep -f nova_metadata; then - check_url=$(get_url_from_vhost /etc/httpd/conf.d/10-nova_metadata_wsgi.conf) -else - check_url=$(get_url_from_vhost /etc/httpd/conf.d/10-nova_api_wsgi.conf) -fi - -healthcheck_curl ${check_url} diff --git a/healthcheck/nova-compute b/healthcheck/nova-compute deleted file mode 100755 index f1ca8bfb4..000000000 --- a/healthcheck/nova-compute +++ /dev/null @@ -1,22 +0,0 @@ -#!/bin/bash - -. ${HEALTHCHECK_SCRIPTS:-/usr/share/openstack-tripleo-common/healthcheck}/common.sh - -if ps -e | grep nova-compute; then - process='nova-compute' - args="${@:-5671 5672}" - test_func='healthcheck_port' - err_msg="There is no $process process with opened RabbitMQ ports (${args// /,}) running in the container" -else - process='sshd' - args="${@:-2022}" - test_func='healthcheck_listen' - err_msg="There is no $process process listening on port(s) ${args// /,} in the container" -fi - -if $test_func $process $args; then - exit 0 -else - echo $err_msg - exit 1 -fi diff --git a/healthcheck/nova-conductor b/healthcheck/nova-conductor deleted file mode 100755 index 65533dcfb..000000000 --- a/healthcheck/nova-conductor +++ /dev/null @@ -1,14 +0,0 @@ -#!/bin/bash - -. ${HEALTHCHECK_SCRIPTS:-/usr/share/openstack-tripleo-common/healthcheck}/common.sh - -process='nova-conductor' -args="${@:-5671 5672}" - -if healthcheck_port $process $args; then - exit 0 -else - ports=${args// /,} - echo "There is no $process process with opened RabbitMQ ports ($ports) running in the container" - exit 1 -fi diff --git a/healthcheck/nova-ironic b/healthcheck/nova-ironic deleted file mode 100755 index 27a9ec172..000000000 --- a/healthcheck/nova-ironic +++ /dev/null @@ -1,14 +0,0 @@ -#!/bin/bash - -. ${HEALTHCHECK_SCRIPTS:-/usr/share/openstack-tripleo-common/healthcheck}/common.sh - -process='nova-compute' -args="${@:-5671 5672}" - -if healthcheck_port $process $args; then - exit 0 -else - ports=${args// /,} - echo "There is no $process process with opened RabbitMQ ports ($ports) running in the container" - exit 1 -fi diff --git a/healthcheck/nova-libvirt b/healthcheck/nova-libvirt deleted file mode 100755 index f4e4e8504..000000000 --- a/healthcheck/nova-libvirt +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/bash - -. ${HEALTHCHECK_SCRIPTS:-/usr/share/openstack-tripleo-common/healthcheck}/common.sh - -process=$1 -socket='/run/libvirt/virtlogd-sock' -if [[ $process == "virtlogd" ]]; then - healthcheck_socket $process $socket - exit $? -else - virsh version --daemon -fi diff --git a/healthcheck/nova-metadata b/healthcheck/nova-metadata deleted file mode 100755 index 785423a84..000000000 --- a/healthcheck/nova-metadata +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/bash - -. ${HEALTHCHECK_SCRIPTS:-/usr/share/openstack-tripleo-common/healthcheck}/common.sh - -check_url=$(get_url_from_vhost /etc/httpd/conf.d/10-nova_metadata_wsgi.conf) -healthcheck_curl ${check_url} diff --git a/healthcheck/nova-scheduler b/healthcheck/nova-scheduler deleted file mode 100755 index e27c30669..000000000 --- a/healthcheck/nova-scheduler +++ /dev/null @@ -1,14 +0,0 @@ -#!/bin/bash - -. ${HEALTHCHECK_SCRIPTS:-/usr/share/openstack-tripleo-common/healthcheck}/common.sh - -process='nova-scheduler' -args="${@:-5671 5672}" - -if healthcheck_port $process $args; then - exit 0 -else - ports=${args// /,} - echo "There is no $process process with opened RabbitMQ ports ($ports) running in the container" - exit 1 -fi diff --git a/healthcheck/nova-vnc-proxy b/healthcheck/nova-vnc-proxy deleted file mode 100755 index 3a1ed6865..000000000 --- a/healthcheck/nova-vnc-proxy +++ /dev/null @@ -1,15 +0,0 @@ -#!/bin/bash - -. ${HEALTHCHECK_SCRIPTS:-/usr/share/openstack-tripleo-common/healthcheck}/common.sh - -bind_host=$(wrap_ipv6 $(get_config_val /etc/nova/nova.conf vnc novncproxy_host 127.0.0.1)) -bind_port=$(get_config_val /etc/nova/nova.conf vnc novncproxy_port 6080) -proto_is_ssl=$(get_config_val /etc/nova/nova.conf DEFAULT ssl_only false) - -bind_proto=http - -if [ "${proto_is_ssl,,}" = true ] ; then - bind_proto=https -fi - -healthcheck_curl ${bind_proto}://${bind_host}:${bind_port}/ diff --git a/healthcheck/octavia-api b/healthcheck/octavia-api deleted file mode 100755 index 4906e4190..000000000 --- a/healthcheck/octavia-api +++ /dev/null @@ -1,5 +0,0 @@ -#!/bin/bash -. ${HEALTHCHECK_SCRIPTS:-/usr/share/openstack-tripleo-common/healthcheck}/common.sh - -check_url=$(get_url_from_vhost /etc/httpd/conf.d/10-octavia_wsgi.conf) -healthcheck_curl ${check_url} diff --git a/healthcheck/octavia-health-manager b/healthcheck/octavia-health-manager deleted file mode 100755 index 1434aedde..000000000 --- a/healthcheck/octavia-health-manager +++ /dev/null @@ -1,14 +0,0 @@ -#!/bin/bash - -. ${HEALTHCHECK_SCRIPTS:-/usr/share/openstack-tripleo-common/healthcheck}/common.sh - -process='octavia-health-manager' -args="${@:-5555}" - -if healthcheck_port $process $args; then - exit 0 -else - ports=${args// /,} - echo "There is no $process process listening on ports $ports." - exit 1 -fi diff --git a/healthcheck/octavia-housekeeping b/healthcheck/octavia-housekeeping deleted file mode 100755 index 84e254b36..000000000 --- a/healthcheck/octavia-housekeeping +++ /dev/null @@ -1,14 +0,0 @@ -#!/bin/bash - -. ${HEALTHCHECK_SCRIPTS:-/usr/share/openstack-tripleo-common/healthcheck}/common.sh - -process='octavia-housekeeping' -args="${@:-3306}" - -if healthcheck_port $process $args; then - exit 0 -else - ports=${args// /,} - echo "Did not find $process process running and connected to MySQL via ports ($ports) in the container." - exit 1 -fi diff --git a/healthcheck/octavia-worker b/healthcheck/octavia-worker deleted file mode 100755 index 2f27b07d1..000000000 --- a/healthcheck/octavia-worker +++ /dev/null @@ -1,14 +0,0 @@ -#!/bin/bash - -. ${HEALTHCHECK_SCRIPTS:-/usr/share/openstack-tripleo-common/healthcheck}/common.sh - -process='octavia-worker' -args="${@:-5671 5672}" - -if healthcheck_port $process $args; then - exit 0 -else - ports=${args// /,} - echo "There is no $process process with opened RabbitMQ ports ($ports) running in the container" - exit 1 -fi diff --git a/healthcheck/ovn-bgp-agent b/healthcheck/ovn-bgp-agent deleted file mode 100755 index f4c8b7c42..000000000 --- a/healthcheck/ovn-bgp-agent +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/bash - -. ${HEALTHCHECK_SCRIPTS:-/usr/share/openstack-tripleo-common/healthcheck}/common.sh - -process='ovn-bgp-agent' - -if ps -e | grep $process; then - exit 0 -else - echo "There is no $process process running in the container" - exit 1 -fi diff --git a/healthcheck/ovn-controller b/healthcheck/ovn-controller deleted file mode 100755 index 5ddaf0830..000000000 --- a/healthcheck/ovn-controller +++ /dev/null @@ -1,14 +0,0 @@ -#!/bin/bash - -. ${HEALTHCHECK_SCRIPTS:-/usr/share/openstack-tripleo-common/healthcheck}/common.sh - -process='ovn-controller' -args="${@:-6642}" - -if healthcheck_port $process $args; then - exit 0 -else - ports=${args// /,} - echo "There is no $process process connected to ovsdb ports $ports running in the container" - exit 1 -fi diff --git a/healthcheck/ovn-dbs b/healthcheck/ovn-dbs deleted file mode 100755 index 4a0b4de62..000000000 --- a/healthcheck/ovn-dbs +++ /dev/null @@ -1,4 +0,0 @@ -#!/bin/bash - -process='ovn-northd' -/usr/bin/ovs-appctl -t ${process} version 1>/dev/null diff --git a/healthcheck/ovn-metadata b/healthcheck/ovn-metadata deleted file mode 100755 index 9fe1f2039..000000000 --- a/healthcheck/ovn-metadata +++ /dev/null @@ -1,14 +0,0 @@ -#!/bin/bash - -. ${HEALTHCHECK_SCRIPTS:-/usr/share/openstack-tripleo-common/healthcheck}/common.sh - -process='networking-ovn-metadata-agent' -args="${@:-6642}" - -if healthcheck_port $process $args; then - exit 0 -else - ports=${args// /,} - echo "There is no $process process connected to ovsdb ports $ports running in the container" - exit 1 -fi diff --git a/healthcheck/placement-api b/healthcheck/placement-api deleted file mode 100755 index ef0e05fe6..000000000 --- a/healthcheck/placement-api +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/bash - -. ${HEALTHCHECK_SCRIPTS:-/usr/share/openstack-tripleo-common/healthcheck}/common.sh - -check_url=$(get_url_from_vhost /etc/httpd/conf.d/10-placement_wsgi.conf) -healthcheck_curl ${check_url} diff --git a/healthcheck/qdrouterd b/healthcheck/qdrouterd deleted file mode 100644 index b0e7643e1..000000000 --- a/healthcheck/qdrouterd +++ /dev/null @@ -1,18 +0,0 @@ -#!/bin/bash - -LISTENER_HOST=$(grep -m1 -A7 listener /etc/qpid-dispatch/qdrouterd.conf | awk -F: '/:/{gsub(/ /, "", $1); /:/gsub(/ /, "", $2); if ($1 == "host") print $2}') -LISTENER_PORT=$(grep -m1 -A7 listener /etc/qpid-dispatch/qdrouterd.conf | awk -F: '/:/{gsub(/ /, "", $1); /:/gsub(/ /, "", $2); if ($1 == "port") print $2}') -CONNECTION_URL=amqp://$LISTENER_HOST:$LISTENER_PORT - -SSLPROFILE=$(grep -m1 -A7 listener /etc/qpid-dispatch/qdrouterd.conf | awk -F: '/:/{gsub(/ /, "", $1); /:/gsub(/ /, "", $2); if ($1 == "sslProfile") print $2}') - -if [ -z "$SSLPROFILE" ]; then - SSLARGS="" -else - CERTDB=$(grep -m1 -A5 "name: $SSLPROFILE" /etc/qpid-dispatch/qdrouterd.conf | awk -F: '/:/{gsub(/ /, "", $1); /:/gsub(/ /, "", $2); if ($1 == "certDb") print $2}') - CERT=$(grep -m1 -A5 "name: $SSLPROFILE" /etc/qpid-dispatch/qdrouterd.conf | awk -F: '/:/{gsub(/ /, "", $1); /:/gsub(/ /, "", $2); if ($1 == "certFile") print $2}') - KEY=$(grep -m1 -A5 "name: $SSLPROFILE" /etc/qpid-dispatch/qdrouterd.conf | awk -F: '/:/{gsub(/ /, "", $1); /:/gsub(/ /, "", $2); if ($1 == "keyFile") print $2}') - SSLARGS="--ssl-certificate=$CERT --ssl-key=$KEY --ssl-trustfile=$CERTDB" -fi - -qdstat -c $SSLARGS -b $CONNECTION_URL diff --git a/healthcheck/rabbitmq b/healthcheck/rabbitmq deleted file mode 100755 index de6cc9edf..000000000 --- a/healthcheck/rabbitmq +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/bash - -rabbitmq-diagnostics check_running || exit 1 diff --git a/healthcheck/redis b/healthcheck/redis deleted file mode 100755 index 160094456..000000000 --- a/healthcheck/redis +++ /dev/null @@ -1,27 +0,0 @@ -#!/bin/bash - -if parse_out=$(cat "/etc/redis/redis.conf" | egrep "^bind +.*$"); then - redis_host=$(echo -n $parse_out | awk '{print $2}') -else - redis_host=127.0.0.1 -fi - -if parse_out=$(cat "/etc/redis/redis.conf" | egrep "^port +.*$"); then - redis_port=$(echo -n $parse_out | awk '{print $2}') -else - redis_port=6379 -fi - -if parse_out=$(cat "/etc/redis/redis.conf" | egrep "^requirepass +.*$"); then - redis_pass=$(echo -n $parse_out | awk '{print $2}') - result=$(printf "*2\r\n\$4\r\nAUTH\r\n\$${#redis_pass}\r\n${redis_pass}\r\n*1\r\n\$4\r\nPING\r\n" | socat - TCP:$redis_host:$redis_port) -else - result=$(printf "*1\r\n\$4\r\nPING\r\n" | socat - TCP:$redis_host:$redis_port) -fi - -if echo $result | grep -q '+PONG'; then - echo "Redis server responded correctly on ${redis_host}:${redis_port}." -else - echo "Redis server does not respond correctly: ${result}" - exit 1 -fi diff --git a/healthcheck/releasenotes/notes/nova-vnc-proxy-ipv6-handling-5d0625f1ab10d13f.yaml b/healthcheck/releasenotes/notes/nova-vnc-proxy-ipv6-handling-5d0625f1ab10d13f.yaml deleted file mode 100644 index 714193ece..000000000 --- a/healthcheck/releasenotes/notes/nova-vnc-proxy-ipv6-handling-5d0625f1ab10d13f.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -fixes: - - | - In case of IPv6 URLs brackets are required. This adds a check for IPv6 and - adds brackets if needed. diff --git a/healthcheck/releasenotes/notes/swift-fix-healthchecks-b3a02139230f4258.yaml b/healthcheck/releasenotes/notes/swift-fix-healthchecks-b3a02139230f4258.yaml deleted file mode 100644 index f91e2486a..000000000 --- a/healthcheck/releasenotes/notes/swift-fix-healthchecks-b3a02139230f4258.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -fixes: - - | - Fixes a few Swift healthchecks by using correct procps output filters. Also - merged a few healthcheck scripts because some containers do use the same - base images. diff --git a/healthcheck/swift-account-server b/healthcheck/swift-account-server deleted file mode 100755 index 5d3092d07..000000000 --- a/healthcheck/swift-account-server +++ /dev/null @@ -1,20 +0,0 @@ -#!/bin/bash - -. ${HEALTHCHECK_SCRIPTS:-/usr/share/openstack-tripleo-common/healthcheck}/common.sh - -conf=/etc/swift/account-server.conf -cache=/var/cache/swift/account.recon - -if ps -ef | grep --quiet [s]wift-account-server; then - if ! crudini --get $conf pipeline:main pipeline | grep -q healthcheck; then - echo "healthcheck is not available" >&2 - exit 0 - fi - - # swift-account-server is still eventlet - bind_host=$(wrap_ipv6 $(get_config_val $conf DEFAULT bind_ip 127.0.0.1)) - bind_port=$(get_config_val $conf DEFAULT bind_port 6002) - healthcheck_curl http://${bind_host}:${bind_port}/healthcheck -elif ps -ef | grep --quiet [s]wift-account-replicator; then - check_swift_interval account-replicator -fi diff --git a/healthcheck/swift-container-server b/healthcheck/swift-container-server deleted file mode 100755 index fe77c2de1..000000000 --- a/healthcheck/swift-container-server +++ /dev/null @@ -1,20 +0,0 @@ -#!/bin/bash - -. ${HEALTHCHECK_SCRIPTS:-/usr/share/openstack-tripleo-common/healthcheck}/common.sh - -conf=/etc/swift/container-server.conf -cache=/var/cache/swift/container.recon - -if ps -ef | grep --quiet [s]wift-container-server; then - if ! crudini --get $conf pipeline:main pipeline | grep -q healthcheck; then - echo "healthcheck is not available" >&2 - exit 0 - fi - - # swift-container-server is still eventlet - bind_host=$(wrap_ipv6 $(get_config_val $conf DEFAULT bind_ip 127.0.0.1)) - bind_port=$(get_config_val $conf DEFAULT bind_port 6001) - healthcheck_curl http://${bind_host}:${bind_port}/healthcheck -elif ps -ef | grep --quiet [s]wift-container-replicator; then - check_swift_interval container-replicator -fi diff --git a/healthcheck/swift-object-server b/healthcheck/swift-object-server deleted file mode 100755 index 896aae672..000000000 --- a/healthcheck/swift-object-server +++ /dev/null @@ -1,31 +0,0 @@ -#!/bin/bash - -. ${HEALTHCHECK_SCRIPTS:-/usr/share/openstack-tripleo-common/healthcheck}/common.sh - -conf=/etc/swift/object-server.conf -cache=/var/cache/swift/object.recon - -if ps -ef | grep --quiet [s]wift-object-server; then - if ! crudini --get $conf pipeline:main pipeline | grep -q healthcheck; then - echo "healthcheck is not available" >&2 - exit 0 - fi - - # swift-object-server is still eventlet - bind_host=$(wrap_ipv6 $(get_config_val $conf DEFAULT bind_ip 127.0.0.1)) - bind_port=$(get_config_val $conf DEFAULT bind_port 6000) - healthcheck_curl http://${bind_host}:${bind_port}/healthcheck -elif ps -ef | grep --quiet [s]wift-object-replicator; then - check_swift_interval object-replicator -elif ps -ef | grep --quiet rsync; then - process='rsync' - args="${@:-873}" - - if healthcheck_listen $process $args; then - exit 0 - else - ports=${args// /,} - echo "There is no $process process, listening on port(s) $ports, running in the container." - exit 1 - fi -fi diff --git a/healthcheck/swift-proxy b/healthcheck/swift-proxy deleted file mode 100755 index b7fbfdc9b..000000000 --- a/healthcheck/swift-proxy +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/bash - -. ${HEALTHCHECK_SCRIPTS:-/usr/share/openstack-tripleo-common/healthcheck}/common.sh - -if ps -ef | grep --quiet [s]wift-proxy-server; then - conf=/etc/swift/proxy-server.conf - - if ! crudini --get $conf pipeline:main pipeline | grep -q healthcheck; then - echo "healthcheck is not available" >&2 - exit 0 - fi - - # swift-proxy is still eventlet - bind_host=$(wrap_ipv6 $(get_config_val $conf DEFAULT bind_ip 127.0.0.1)) - bind_port=$(get_config_val $conf DEFAULT bind_port 8080) - healthcheck_curl http://${bind_host}:${bind_port}/healthcheck -fi diff --git a/healthcheck/swift-rsync b/healthcheck/swift-rsync deleted file mode 100755 index ee8d76384..000000000 --- a/healthcheck/swift-rsync +++ /dev/null @@ -1,14 +0,0 @@ -#!/bin/bash - -. ${HEALTHCHECK_SCRIPTS:-/usr/share/openstack-tripleo-common/healthcheck}/common.sh - -process='rsync' -args="${@:-873}" - -if healthcheck_listen $process $args; then - exit 0 -else - ports=${args// /,} - echo "There is no $process process, listening on port(s) $ports, running in the container." - exit 1 -fi diff --git a/image-yaml/overcloud-hardened-images-uefi-centos9.yaml b/image-yaml/overcloud-hardened-images-uefi-centos9.yaml deleted file mode 100644 index 5167d0e3e..000000000 --- a/image-yaml/overcloud-hardened-images-uefi-centos9.yaml +++ /dev/null @@ -1,17 +0,0 @@ -disk_images: - - - imagename: overcloud-hardened-uefi-full - type: qcow2 - distro: centos - release: 9 - elements: - - selinux-permissive - - interface-names - - - imagename: ironic-python-agent - type: qcow2 - distro: centos - release: 9 - elements: - - selinux-permissive - - interface-names diff --git a/image-yaml/overcloud-hardened-images-uefi-python3.yaml b/image-yaml/overcloud-hardened-images-uefi-python3.yaml deleted file mode 100644 index 83aab1910..000000000 --- a/image-yaml/overcloud-hardened-images-uefi-python3.yaml +++ /dev/null @@ -1,56 +0,0 @@ -disk_images: - - - imagename: overcloud-hardened-uefi-full - type: qcow2 - elements: - - openvswitch - - overcloud-base - - overcloud-controller - - overcloud-compute - - overcloud-ceph-storage - - puppet-modules - - stable-interface-names - - bootloader - - element-manifest - - dynamic-login - - iptables - - enable-packages-install - - override-pip-and-virtualenv - - dracut-regenerate - - remove-resolvconf - - modprobe - - overcloud-partition-uefi - - overcloud-secure - - openssh - - disable-nouveau - - reset-bls-entries - options: - - "--min-tmpfs=7" - environment: - DIB_PYTHON_VERSION: '3' - DIB_MODPROBE_BLACKLIST: 'usb-storage cramfs freevxfs jffs2 hfs hfsplus squashfs udf bluetooth' - DIB_BOOTLOADER_DEFAULT_CMDLINE: 'nofb nomodeset vga=normal console=tty0 console=ttyS0,115200 audit=1 nousb' - DIB_IMAGE_SIZE: '6' - COMPRESS_IMAGE: '1' - DIB_BLOCK_DEVICE: 'efi' - - - imagename: ironic-python-agent - # This is bogus, but there's no initrd type in diskimage-builder - type: qcow2 - # So we just override the extension instead - imageext: initramfs - elements: - - ironic-python-agent-ramdisk - - ironic-agent-multipath - - dynamic-login - - element-manifest - - network-gateway - - enable-packages-install - - override-pip-and-virtualenv - - extra-hardware - options: - - "--min-tmpfs=7" - environment: - DIB_EPEL_DISABLED: '1' - DIB_PYTHON_VERSION: '3' - DIB_DHCP_TIMEOUT: '60' diff --git a/image-yaml/overcloud-hardened-images-uefi-rhel9.yaml b/image-yaml/overcloud-hardened-images-uefi-rhel9.yaml deleted file mode 100644 index 16d6fc29b..000000000 --- a/image-yaml/overcloud-hardened-images-uefi-rhel9.yaml +++ /dev/null @@ -1,18 +0,0 @@ -disk_images: - - - imagename: overcloud-hardened-uefi-full - type: qcow2 - distro: rhel - release: 9 - elements: - - interface-names - packages: - - rhosp-release - - - imagename: ironic-python-agent - type: qcow2 - distro: rhel - release: 9 - elements: - - selinux-permissive - - interface-names diff --git a/image-yaml/overcloud-images-centos9.yaml b/image-yaml/overcloud-images-centos9.yaml deleted file mode 100644 index 0af226db8..000000000 --- a/image-yaml/overcloud-images-centos9.yaml +++ /dev/null @@ -1,21 +0,0 @@ -disk_images: - - - imagename: overcloud-full - type: qcow2 - distro: centos - release: 9 - elements: - - selinux-permissive - - interface-names - environment: - FS_TYPE: xfs - options: - - "--mkfs-options '-s size=4096'" - - - imagename: ironic-python-agent - type: qcow2 - distro: centos - release: 9 - elements: - - selinux-permissive - - interface-names diff --git a/image-yaml/overcloud-images-python3.yaml b/image-yaml/overcloud-images-python3.yaml deleted file mode 100644 index 2c05d0958..000000000 --- a/image-yaml/overcloud-images-python3.yaml +++ /dev/null @@ -1,49 +0,0 @@ -disk_images: - - - imagename: overcloud-full - type: qcow2 - elements: - - baremetal - - openvswitch - - overcloud-base - - overcloud-controller - - overcloud-compute - - overcloud-ceph-storage - - puppet-modules - - stable-interface-names - - bootloader - - element-manifest - - dynamic-login - - iptables - - enable-packages-install - - override-pip-and-virtualenv - - dracut-regenerate - - remove-resolvconf - - openssh - - disable-nouveau - - reset-bls-entries - options: - - "--min-tmpfs=7" - environment: - DIB_PYTHON_VERSION: '3' - - - imagename: ironic-python-agent - # This is bogus, but there's no initrd type in diskimage-builder - type: qcow2 - # So we just override the extension instead - imageext: initramfs - elements: - - ironic-python-agent-ramdisk - - ironic-agent-multipath - - dynamic-login - - element-manifest - - network-gateway - - enable-packages-install - - override-pip-and-virtualenv - - extra-hardware - options: - - "--min-tmpfs=7" - environment: - DIB_EPEL_DISABLED: '1' - DIB_PYTHON_VERSION: '3' - DIB_DHCP_TIMEOUT: '60' diff --git a/image-yaml/overcloud-images-rhel9.yaml b/image-yaml/overcloud-images-rhel9.yaml deleted file mode 100644 index 550e451f0..000000000 --- a/image-yaml/overcloud-images-rhel9.yaml +++ /dev/null @@ -1,22 +0,0 @@ -disk_images: - - - imagename: overcloud-full - type: qcow2 - distro: rhel - release: 9 - elements: - - interface-names - packages: - - rhosp-release - environment: - FS_TYPE: xfs - options: - - "--mkfs-options '-s size=4096'" - - - imagename: ironic-python-agent - type: qcow2 - distro: rhel - release: 9 - elements: - - selinux-permissive - - interface-names diff --git a/releasenotes/notes/5.8.0-d1ca2298ba598431.yaml b/releasenotes/notes/5.8.0-d1ca2298ba598431.yaml deleted file mode 100644 index 6e1c8bfbe..000000000 --- a/releasenotes/notes/5.8.0-d1ca2298ba598431.yaml +++ /dev/null @@ -1,46 +0,0 @@ ---- -prelude: > - 5.8.0 is the final release for Ocata. - It's the first release where release notes are added. -features: - - Introduces a new workflow for creating baremetal RAID configuration. - - Add FreeIPA enrollment environment generator. - - Add a new Workflow which can be used to wait for Heat stacks finish with - COMPLETE or FAILED. - - CephMdsKey is now a generated Heat parameter. - - Add an new Action which generates environment parameters for configuring - fencing. - - Add utility functions for deleting/emptying swift containers. - - Enhance the plan create and plan update workflows to support plan import. - A new plan environment file (located in t-h-t) is now used to store the - Mistral environment, so it can easily be imported and exported. Root - template and root environment settings (previously stored in the - capabilities map file) are now being stored in this file. - - Add a new plan export action which exports contents of a deployment plan - to a tarball and uploads the tarball to Swift. -fixes: - - Fixes `bug 1644756 `__ so - that flavour matching works as expected with the object-storage role. - - Fixes `bug 1649284 `__ by - removing extra default Neutron vendor plugins. - - Fixes `bug 1640770 `__ by - updating the scale down workflow to wait until the stack has reached - COMPLETE or FAILED. - - Fixes `bug 1651508 `__ by - adding the missing MySQL and Memcache packages to the container. - - Fixes `bug 1644587 `__ - with a new validation step of the plan name to prevent disallowed - characters in hostnames. - - Fixes `bug 1648781 `__ by - passing the Zaqar queue to any sub-workflow executions to allow them to - add messages to the queue. - - Fixes `bug 1637474 `__ by - adding support for initial state to the register_or_update workflow. - - Fixes `bug 1614939 `__ by - providing feedback to the user if a plan delete fails. - - Fixes `bug 1651704 `__ by - marking workflows as failed when they send an error messsage. -other: - - Add a script that pulls the latest puppet source - - Force qemu to log to a file when containerized - - Add passwords for Congress and Tacker diff --git a/releasenotes/notes/Add-rotate-fernet-keys-action-a1080bf5fb18413f.yaml b/releasenotes/notes/Add-rotate-fernet-keys-action-a1080bf5fb18413f.yaml deleted file mode 100644 index e78b6c8bf..000000000 --- a/releasenotes/notes/Add-rotate-fernet-keys-action-a1080bf5fb18413f.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- -features: - - In the parameters section of actions, the rotate_fernet_keys action was - added. It does a rotation based on the values of the generated passwords - or the parameter_defaults given by the user. Note that this merely does - the rotation, deletes the excess keys and persists the new value in the plan - environment. However, the action doesn't go to the nodes and adds the - keys to the actual repository; that's part of a separate workflow. diff --git a/releasenotes/notes/DeployArtifactURLs-Simplified-e3993493022653ab.yaml b/releasenotes/notes/DeployArtifactURLs-Simplified-e3993493022653ab.yaml deleted file mode 100644 index bfb10ef8e..000000000 --- a/releasenotes/notes/DeployArtifactURLs-Simplified-e3993493022653ab.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- -deprecations: - - The DeployArtifactURL(s) server is no longer makes any use of Swift when - using built-in scripts to upload artifacts. The old - `upload-swift-artifacts` file is now a symlink pointed to the updated - `upload-artifacts` script. While the file has changed, the API remains the - same. The symlink will ensure that legacy automation remains functional - while moving over to the new script. The symlink will be removed in a - future release. diff --git a/releasenotes/notes/accept-glance-image-id-6e8bf439d93b3fb5.yaml b/releasenotes/notes/accept-glance-image-id-6e8bf439d93b3fb5.yaml deleted file mode 100644 index dd1e18727..000000000 --- a/releasenotes/notes/accept-glance-image-id-6e8bf439d93b3fb5.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -fixes: - - Accept the glance image ID in addition to the name. diff --git a/releasenotes/notes/add-check-mode-support-for-hiera-f15fed971d4397f8.yaml b/releasenotes/notes/add-check-mode-support-for-hiera-f15fed971d4397f8.yaml deleted file mode 100644 index fb751d9ef..000000000 --- a/releasenotes/notes/add-check-mode-support-for-hiera-f15fed971d4397f8.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -other: - - Individual server deployments that are of type group:hiera now support - check mode, and when running under check mode, also support diff mode. diff --git a/releasenotes/notes/add-generation-of-barbican-simple-crypto-kek-507fd2f21cdcf21b.yaml b/releasenotes/notes/add-generation-of-barbican-simple-crypto-kek-507fd2f21cdcf21b.yaml deleted file mode 100644 index 6690563ef..000000000 --- a/releasenotes/notes/add-generation-of-barbican-simple-crypto-kek-507fd2f21cdcf21b.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - Add generation of the key encryption key for the Barbican simple - crypto backend. diff --git a/releasenotes/notes/add-overcloud-ceph-image-build-yamls-8ad8fe8b013a314e.yaml b/releasenotes/notes/add-overcloud-ceph-image-build-yamls-8ad8fe8b013a314e.yaml deleted file mode 100644 index 400b5e05a..000000000 --- a/releasenotes/notes/add-overcloud-ceph-image-build-yamls-8ad8fe8b013a314e.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- -features: - - | - Added `overcloud-images-ceph.yaml`, `overcloud-images-ceph-centos8.yaml`, - and `overcloud-images-ceph-rhel8.yaml` to allow an operator to build an - image that can be used for dedicated ceph nodes. This `overcloud-ceph` - image would not have the openstack client, ha or openvswitch related - packages. diff --git a/releasenotes/notes/add-real-time-kernel-images-cc790c6d7b6229da.yaml b/releasenotes/notes/add-real-time-kernel-images-cc790c6d7b6229da.yaml deleted file mode 100644 index 99e9e2ce1..000000000 --- a/releasenotes/notes/add-real-time-kernel-images-cc790c6d7b6229da.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - Added a image yaml for real time kernels support . If you use it, - don't forget to also add the rhel-8-for-x86_64-rt-rpms channel. diff --git a/releasenotes/notes/add-rootstackname-on-update-258dbf091fea497e.yaml b/releasenotes/notes/add-rootstackname-on-update-258dbf091fea497e.yaml deleted file mode 100644 index 5aa528116..000000000 --- a/releasenotes/notes/add-rootstackname-on-update-258dbf091fea497e.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -fixes: - - The RootStackName parameter is now added to the plan in - plan-environment.yaml on both stack create and update. Previously it was - only added on create. diff --git a/releasenotes/notes/adds-ansible-actions-4da45efa8a98cade.yaml b/releasenotes/notes/adds-ansible-actions-4da45efa8a98cade.yaml deleted file mode 100644 index 860562b51..000000000 --- a/releasenotes/notes/adds-ansible-actions-4da45efa8a98cade.yaml +++ /dev/null @@ -1,10 +0,0 @@ ---- -features: - - | - Adds actions for calling ansible and ansible playbook executables from a - workflow. -deprecations: - - | - The actions for calling ansible and ansible playbook executables from a - workflow will be removed in the Queens release as they are intended to - be migrated to the mistral-extra project. diff --git a/releasenotes/notes/adds-create-container-workflow-77ee4557779563c0.yaml b/releasenotes/notes/adds-create-container-workflow-77ee4557779563c0.yaml deleted file mode 100644 index 7e6683441..000000000 --- a/releasenotes/notes/adds-create-container-workflow-77ee4557779563c0.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - | - Adds a workflow to create a container so the underlying action does - not need to be called directly. \ No newline at end of file diff --git a/releasenotes/notes/adds-delete-plan-workflow-d625682fdddd3f48.yaml b/releasenotes/notes/adds-delete-plan-workflow-d625682fdddd3f48.yaml deleted file mode 100644 index e15e19826..000000000 --- a/releasenotes/notes/adds-delete-plan-workflow-d625682fdddd3f48.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - | - Adds a workflow to delete a deployment plan so the tripleo.plan.delete does - not need to be called directly. diff --git a/releasenotes/notes/adds-generate-fencing-parameters-e2ea121247779db3.yaml b/releasenotes/notes/adds-generate-fencing-parameters-e2ea121247779db3.yaml deleted file mode 100644 index 45b88c3cd..000000000 --- a/releasenotes/notes/adds-generate-fencing-parameters-e2ea121247779db3.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - | - Add a workflow to generate fencing parameters so action - tripleo.parameters.generate_fencing does not need to be called directly. \ No newline at end of file diff --git a/releasenotes/notes/adds-list-plan-workflow-c0c6f91c9460a09a.yaml b/releasenotes/notes/adds-list-plan-workflow-c0c6f91c9460a09a.yaml deleted file mode 100644 index aef8a5c15..000000000 --- a/releasenotes/notes/adds-list-plan-workflow-c0c6f91c9460a09a.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - | - Adds a workflow to list deployment plans so the tripleo.plan.list action - does not need to be called directly. diff --git a/releasenotes/notes/allow-upload-big-files-f67ff35fcd166612.yaml b/releasenotes/notes/allow-upload-big-files-f67ff35fcd166612.yaml deleted file mode 100644 index 296a92b34..000000000 --- a/releasenotes/notes/allow-upload-big-files-f67ff35fcd166612.yaml +++ /dev/null @@ -1,12 +0,0 @@ ---- -features: - - | - Allow uploading files bigger than 5GB to swift. - Currently we have support for uploading files - to swift using the swift client class, this class - does not allow to upload files bigger than 5GB. - This change enables the upload of files bigger than - 5GB by using the swift service class and adjusting - the headers to allow this operations. This new helper - will be used for the Undercloud backup, to be able to - store files bigger than 5GB. diff --git a/releasenotes/notes/ansible-action-log-20904253f962557f.yaml b/releasenotes/notes/ansible-action-log-20904253f962557f.yaml deleted file mode 100644 index 7c51c3eab..000000000 --- a/releasenotes/notes/ansible-action-log-20904253f962557f.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - The Ansible actions will now log to a log file named ansible.log in the - working directory. diff --git a/releasenotes/notes/append-ironic-staging-drivers-d278905bb1ec0683.yaml b/releasenotes/notes/append-ironic-staging-drivers-d278905bb1ec0683.yaml deleted file mode 100644 index 8715a166c..000000000 --- a/releasenotes/notes/append-ironic-staging-drivers-d278905bb1ec0683.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- -features: - - | - The ``ironic-staging-drivers`` are now installed in the ironic-conductor - container so that these drivers can be used without rebuilding the - container. The `Ironic Staging Drivers - `_ is used to - hold out-of-tree Ironic drivers which doesn't have means to provide a 3rd - Party CI at this point in time which is required by Ironic. diff --git a/releasenotes/notes/available-roles-workflow-fe81806915124cb6.yaml b/releasenotes/notes/available-roles-workflow-fe81806915124cb6.yaml deleted file mode 100644 index fcb13b3eb..000000000 --- a/releasenotes/notes/available-roles-workflow-fe81806915124cb6.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - | - Adds a new workflow to list available roles for a given deployment plan. diff --git a/releasenotes/notes/bindep-tox-606dbe4ddf68f7a6.yaml b/releasenotes/notes/bindep-tox-606dbe4ddf68f7a6.yaml deleted file mode 100644 index c1fb8d409..000000000 --- a/releasenotes/notes/bindep-tox-606dbe4ddf68f7a6.yaml +++ /dev/null @@ -1,12 +0,0 @@ ---- - -features: - - The `bindep.txt` file located in the project root contains all of the - basic required packages needed when running local tests. - - Developers can now use bindep to list system requirements. The bindep - command will load the list of packages for the given platform using - the `bindep.txt` file. - - Bindep can now be leveraged via tox using the environment **bindep**. - This tox environment will use bindep via the `bindep-install` script - to install any missing packages on the local system which are required - for development purposes. diff --git a/releasenotes/notes/blacklisted_ips_support-f362e008ae1af210.yaml b/releasenotes/notes/blacklisted_ips_support-f362e008ae1af210.yaml deleted file mode 100644 index bd1146548..000000000 --- a/releasenotes/notes/blacklisted_ips_support-f362e008ae1af210.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -security: - - | - The `enable_ssh_admin` workflow is now always expecting a list of - servers to operate on, passed via `ssh_servers` input which is - left empty when unset. \ No newline at end of file diff --git a/releasenotes/notes/blacklisted_serverid_config-e079e64e8a04cdb4.yaml b/releasenotes/notes/blacklisted_serverid_config-e079e64e8a04cdb4.yaml deleted file mode 100644 index de420e65b..000000000 --- a/releasenotes/notes/blacklisted_serverid_config-e079e64e8a04cdb4.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -fixes: - - | - Fixes `bug 1793605 `__ so - when nodes are blacklisted, they are not included in the Overcloud config. - A warning will show that the server_id that was ignored if the it can't - be found in the stack. diff --git a/releasenotes/notes/break-up-messages-0c438e658ce32892.yaml b/releasenotes/notes/break-up-messages-0c438e658ce32892.yaml deleted file mode 100644 index c30195eb7..000000000 --- a/releasenotes/notes/break-up-messages-0c438e658ce32892.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -fixes: - - Messages posted back to a zaqar queue by the ansible-playbook action could - easily exceed the max message size for the queue. Instead of posting a - single message each time, break it up based on the max message size and - post a separate message for each. diff --git a/releasenotes/notes/buildah_build-727eb0f35f819731.yaml b/releasenotes/notes/buildah_build-727eb0f35f819731.yaml deleted file mode 100644 index 125607f8c..000000000 --- a/releasenotes/notes/buildah_build-727eb0f35f819731.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - | - kolla_builder now supports Buildah and not just Docker. diff --git a/releasenotes/notes/caps-fix-f6f8817a48fa5c25.yaml b/releasenotes/notes/caps-fix-f6f8817a48fa5c25.yaml deleted file mode 100644 index 9d715a049..000000000 --- a/releasenotes/notes/caps-fix-f6f8817a48fa5c25.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -fixes: - - | - Node update now works correctly when capabilities are specified as a dict. diff --git a/releasenotes/notes/capture-environment-status-and-logs-3462d6ebbc9ecf2e.yaml b/releasenotes/notes/capture-environment-status-and-logs-3462d6ebbc9ecf2e.yaml deleted file mode 100644 index 4d2409b04..000000000 --- a/releasenotes/notes/capture-environment-status-and-logs-3462d6ebbc9ecf2e.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - | - Implemented new Mistral workflows to execute sosreport on overcloud nodes - and upload them to a Swift container on the undercloud. diff --git a/releasenotes/notes/change-odl-healthcheck-uri-84d6dea51b110772.yaml b/releasenotes/notes/change-odl-healthcheck-uri-84d6dea51b110772.yaml deleted file mode 100644 index bc9013dfc..000000000 --- a/releasenotes/notes/change-odl-healthcheck-uri-84d6dea51b110772.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -fixes: - - | - Modifies the healthcheck for OpenDaylight to a supported URL. See - https://bugs.launchpad.net/tripleo/+bug/1751857 diff --git a/releasenotes/notes/check-boot-action-548e38d17cf1ad96.yaml b/releasenotes/notes/check-boot-action-548e38d17cf1ad96.yaml deleted file mode 100644 index 813f88c3b..000000000 --- a/releasenotes/notes/check-boot-action-548e38d17cf1ad96.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - Adds an action and workflow used to check the status of - the boot images in Glance. diff --git a/releasenotes/notes/check-flavors-action-59b7f2dd5103ad9d.yaml b/releasenotes/notes/check-flavors-action-59b7f2dd5103ad9d.yaml deleted file mode 100644 index fa2e4b3fa..000000000 --- a/releasenotes/notes/check-flavors-action-59b7f2dd5103ad9d.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - Adds an action and workflow used to check the status of - the defined and passed flavors in Nova. diff --git a/releasenotes/notes/check-node-counts-bb80a5cdd8d10475.yaml b/releasenotes/notes/check-node-counts-bb80a5cdd8d10475.yaml deleted file mode 100644 index 70cda7223..000000000 --- a/releasenotes/notes/check-node-counts-bb80a5cdd8d10475.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - Adds an action and workflow used to check the node - counts and the hypervisor. diff --git a/releasenotes/notes/check_for_default_tag-09fe34d2ac434890.yaml b/releasenotes/notes/check_for_default_tag-09fe34d2ac434890.yaml deleted file mode 100644 index 6bdf019b9..000000000 --- a/releasenotes/notes/check_for_default_tag-09fe34d2ac434890.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -fixes: - - When the default tag doesn't exist in the container repo during container - image prepare, and a tag wasn't set in the actual input for - ContainerImagePrepare, the latest tag from the repo will be used instead of - failing with a not found error. diff --git a/releasenotes/notes/check_ovs_upgrade-99cecd6b7bfdcf83.yaml b/releasenotes/notes/check_ovs_upgrade-99cecd6b7bfdcf83.yaml deleted file mode 100644 index 0e2366b5b..000000000 --- a/releasenotes/notes/check_ovs_upgrade-99cecd6b7bfdcf83.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- -features: - - | - Prevent upgrading a stack to a version of tripleo templates or - environment that specifies neutron mechanism drivers that are - incompatible with the existing stack. Upgrade can be forced - by ForceNeutronDriverUpdate parameter which need to be set in - deployment parameters. \ No newline at end of file diff --git a/releasenotes/notes/cinder-and-glance-api-version-bca6acd809d4151c.yaml b/releasenotes/notes/cinder-and-glance-api-version-bca6acd809d4151c.yaml deleted file mode 100644 index 510a6cec8..000000000 --- a/releasenotes/notes/cinder-and-glance-api-version-bca6acd809d4151c.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -features: - - | - Set ``OS_VOLUME_API_VERSION`` and ``OS_IMAGE_API_VERSION`` in - ``overcloudrc`` in order to establish the default API versions for - the Volume and Image services. The values match the default major API - versions for Cinder (3) and Glance (2). diff --git a/releasenotes/notes/config-download-consistent-work-dir-b8a37550c3970722.yaml b/releasenotes/notes/config-download-consistent-work-dir-b8a37550c3970722.yaml deleted file mode 100644 index 26a179cd2..000000000 --- a/releasenotes/notes/config-download-consistent-work-dir-b8a37550c3970722.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - The config_download_deploy workflow now uses a consistent working directory - for the config-download directory. Since the directory is now managed by - git, it can be reused across executions. diff --git a/releasenotes/notes/config-download-deploy-workflow-55f26302a42cf379.yaml b/releasenotes/notes/config-download-deploy-workflow-55f26302a42cf379.yaml deleted file mode 100644 index 53673c74b..000000000 --- a/releasenotes/notes/config-download-deploy-workflow-55f26302a42cf379.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - Adds a new workflow, tripleo.deployment.v1.config_download_deploy, - that does an overcloud configuration using the config download - mechanism. diff --git a/releasenotes/notes/config-download-dont-use-tmpdirs-3641db9fd687f85e.yaml b/releasenotes/notes/config-download-dont-use-tmpdirs-3641db9fd687f85e.yaml deleted file mode 100644 index 276a6685e..000000000 --- a/releasenotes/notes/config-download-dont-use-tmpdirs-3641db9fd687f85e.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- -upgrade: - - | - ``openstack overcloud config download`` now writes directly to the - directory specified by ``--config-dir``. The directory contents will be - overwritten, preserving any contents not originating from the stack. A - ``--no-preserve-config`` option is provided which will cause the - ``--config-dir`` to be deleted and recreated if the``--config-dir`` - location exists. Tmpdirs are no longer used. diff --git a/releasenotes/notes/config-download-git-repo-9a18681afbfb9136.yaml b/releasenotes/notes/config-download-git-repo-9a18681afbfb9136.yaml deleted file mode 100644 index 4a4aefee8..000000000 --- a/releasenotes/notes/config-download-git-repo-9a18681afbfb9136.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - | - Initialize a git repository in the config-download directory and - automatically snapshot changes made to the repository. diff --git a/releasenotes/notes/config-download-git-repo-commit-msg-9a550daaae1fc55e.yaml b/releasenotes/notes/config-download-git-repo-commit-msg-9a550daaae1fc55e.yaml deleted file mode 100644 index 208eddd99..000000000 --- a/releasenotes/notes/config-download-git-repo-commit-msg-9a550daaae1fc55e.yaml +++ /dev/null @@ -1,10 +0,0 @@ ---- -features: - - The GetOvercloudConfig action now sets a commit message that indicates the - config was downloaded by the Mistral action and what user/project were used - to execute the action. - - Since the config download directory is now managed by git, the - GetOvercloudConfig action will now first download the existing config - container (default of overcloud-config), so that the git history is - preserved and new changes will reuse the same git repo. Each new change to - the config-download directory creates a new git commit. diff --git a/releasenotes/notes/config-download-timeout-7296683716f78022.yaml b/releasenotes/notes/config-download-timeout-7296683716f78022.yaml deleted file mode 100644 index 8989099eb..000000000 --- a/releasenotes/notes/config-download-timeout-7296683716f78022.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -fixes: - - The config_download_deploy workflow now has a config_download_timeout input - that will honor the user requested timeout. Previously, no timeout was - honored even though the user could request one via tripleoclient. diff --git a/releasenotes/notes/config-download-verbosity-ab2e89e169c208a7.yaml b/releasenotes/notes/config-download-verbosity-ab2e89e169c208a7.yaml deleted file mode 100644 index dd99eacaa..000000000 --- a/releasenotes/notes/config-download-verbosity-ab2e89e169c208a7.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -fixes: - - The verbosity of the config-download ansible tasks now defaults to 0 - instead of 1. This makes the tasks not verbose by default. The verbosity - specified on the command line with the deployment command is now honored - and can be used to disable verbosity or increase the verbosity level as - needed. diff --git a/releasenotes/notes/container-image-prepare-modify_only_with_source-d9be8cc7236e7c94.yaml b/releasenotes/notes/container-image-prepare-modify_only_with_source-d9be8cc7236e7c94.yaml deleted file mode 100644 index b0fe8d7f0..000000000 --- a/releasenotes/notes/container-image-prepare-modify_only_with_source-d9be8cc7236e7c94.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -features: - - | - Added `modify_only_with_source` to the ContainerImagePrepare set that can - be used to limit modify container images to a specific image_source as - defined in the services to container images mapping. diff --git a/releasenotes/notes/create-overcloudrc-workflow-e5150b6b0af462f0.yaml b/releasenotes/notes/create-overcloudrc-workflow-e5150b6b0af462f0.yaml deleted file mode 100644 index 9585d5461..000000000 --- a/releasenotes/notes/create-overcloudrc-workflow-e5150b6b0af462f0.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -features: - - | - Adds a workflow to generate the overcloudrc files in a given deployment - so the tripleo.deployment.overcloudrc action does not need to be called - directly. diff --git a/releasenotes/notes/create_get_flattened_parameters_workflow-8c35b813289a5479.yaml b/releasenotes/notes/create_get_flattened_parameters_workflow-8c35b813289a5479.yaml deleted file mode 100644 index 2b6428a5c..000000000 --- a/releasenotes/notes/create_get_flattened_parameters_workflow-8c35b813289a5479.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - | - Creates a worflow to get flattened deployment parameters, so the related - action does not need to be called directly. \ No newline at end of file diff --git a/releasenotes/notes/create_heat_capabilities_workbook-091f0ce2ab5fff3a.yaml b/releasenotes/notes/create_heat_capabilities_workbook-091f0ce2ab5fff3a.yaml deleted file mode 100644 index 54c8f3f20..000000000 --- a/releasenotes/notes/create_heat_capabilities_workbook-091f0ce2ab5fff3a.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - | - Creates a workbook to update and get heat capabilities, so the related actions - do not need to be called directly. diff --git a/releasenotes/notes/default-arch-selection-d5fd2fcdba725dd4.yaml b/releasenotes/notes/default-arch-selection-d5fd2fcdba725dd4.yaml deleted file mode 100644 index d282a3e8c..000000000 --- a/releasenotes/notes/default-arch-selection-d5fd2fcdba725dd4.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -features: - - | - The default architecure for image builds now defaults to the cpu of the - host instead of x86_64/amd64. This allows for a single package of - tripleo-common to be used across multiple architectures to generate images. diff --git a/releasenotes/notes/default-container-prefix-is-openstack-3cd42220d6cdfed0.yaml b/releasenotes/notes/default-container-prefix-is-openstack-3cd42220d6cdfed0.yaml deleted file mode 100644 index b1f2665a6..000000000 --- a/releasenotes/notes/default-container-prefix-is-openstack-3cd42220d6cdfed0.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -upgrade: - - | - The default container prefix is `openstack-` instead of `centos-binary-` - starting with Victoria. diff --git a/releasenotes/notes/delete-stack-a3c0951d9af04a0f.yaml b/releasenotes/notes/delete-stack-a3c0951d9af04a0f.yaml deleted file mode 100644 index 712a57d41..000000000 --- a/releasenotes/notes/delete-stack-a3c0951d9af04a0f.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -fixes: - - Fixes `bug 1657461 `__ by - adding a workflow to delete a Heat stack. diff --git a/releasenotes/notes/deployed-server-clear-breakpoint-ee1a984f3366598a.yaml b/releasenotes/notes/deployed-server-clear-breakpoint-ee1a984f3366598a.yaml deleted file mode 100644 index 33c6c7e20..000000000 --- a/releasenotes/notes/deployed-server-clear-breakpoint-ee1a984f3366598a.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -fixes: - - When performing an interactive minor update with deployed-server, the - client never prompted to clear breakpoints and just ran to completion and - exited. The stack was left IN_PROGRESS. That issue has now been fixed so - that the client will prompt to clear breakpoints. diff --git a/releasenotes/notes/deployment-status-workflows-7f6ba3b69f805f06.yaml b/releasenotes/notes/deployment-status-workflows-7f6ba3b69f805f06.yaml deleted file mode 100644 index 884e34775..000000000 --- a/releasenotes/notes/deployment-status-workflows-7f6ba3b69f805f06.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -features: - - New workflows are added for manipulating the deployment status, including - tripleo.deployment.v1.set_deployment_status_success, - tripleo.deployment.v1.set_deployment_status_failed, and - tripleo.deployment.v1.set_deployment_status_deploying. diff --git a/releasenotes/notes/deployments-per-server-ea747fcff19c884d.yaml b/releasenotes/notes/deployments-per-server-ea747fcff19c884d.yaml deleted file mode 100644 index 4bb7ebc94..000000000 --- a/releasenotes/notes/deployments-per-server-ea747fcff19c884d.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -fixes: - - The list of pre and post deployment names generated with config-download - are now written per server instead of per role. This change handles the - case where a deployment may apply to only an individual or set of servers - within a role, and not all servers in that role. host_vars are used to set - the variable of deployment names per server instead of group_vars. diff --git a/releasenotes/notes/deprecate-list-roles-action-12744cee0e6d70e5.yaml b/releasenotes/notes/deprecate-list-roles-action-12744cee0e6d70e5.yaml deleted file mode 100644 index 2ca015332..000000000 --- a/releasenotes/notes/deprecate-list-roles-action-12744cee0e6d70e5.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -deprecations: - - | - The tripleo.roles.list action is deprecated. Please use the - tripleo.plan_management.v1.list_roles workflow instead. Calling actions - directly is no longer supported. diff --git a/releasenotes/notes/deprecate-skopeo-image-uploader-a8b8b4b46d7be706.yaml b/releasenotes/notes/deprecate-skopeo-image-uploader-a8b8b4b46d7be706.yaml deleted file mode 100644 index fd53c8f25..000000000 --- a/releasenotes/notes/deprecate-skopeo-image-uploader-a8b8b4b46d7be706.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -deprecations: - - | - The SkopeoImageUploader is deprecated. It does not work with the Stein - image repository and will be removed in a future release. diff --git a/releasenotes/notes/deprecated-params-list-a4edf6e341520ead.yaml b/releasenotes/notes/deprecated-params-list-a4edf6e341520ead.yaml deleted file mode 100644 index 9df00d4ca..000000000 --- a/releasenotes/notes/deprecated-params-list-a4edf6e341520ead.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Added a workflow to list all the deprecated parameters in the plan diff --git a/releasenotes/notes/derive-deployment-parameters-c5e97d3df9bfc114.yaml b/releasenotes/notes/derive-deployment-parameters-c5e97d3df9bfc114.yaml deleted file mode 100644 index 50bbcb561..000000000 --- a/releasenotes/notes/derive-deployment-parameters-c5e97d3df9bfc114.yaml +++ /dev/null @@ -1,13 +0,0 @@ ---- -features: - - | - Add a Mistral workflow that uses hardware introspection data to derive - deployment parameters for features such as DPDK and HCI (hyperconverged - Nova compute and Ceph OSD nodes). The derived parameters workflow is - automatically invoked during deployment when the workflow is listed in - the plan environment file. - - For each role in the deployment, the workflow analyzes the Heat resource - tree to determine which features are relevant to that role. The main - workflow invokes secondary workflows responsible for deriving parameters - associated with each feature. diff --git a/releasenotes/notes/derive-parameters-using-scheduler-hints-5bb65bc78c1f6f91.yaml b/releasenotes/notes/derive-parameters-using-scheduler-hints-5bb65bc78c1f6f91.yaml deleted file mode 100644 index 7dd256d90..000000000 --- a/releasenotes/notes/derive-parameters-using-scheduler-hints-5bb65bc78c1f6f91.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -fixes: - - Fix `bug 1760659 `__ by - updating the derived parameters workflow to use scheduler hints associated - with a given role. The scheduler hints are used to identify overcloud - nodes associated with the role, and take precedence over nodes identified - by their profile/flavor. diff --git a/releasenotes/notes/disable_nouveau-bbaf1263fe43821a.yaml b/releasenotes/notes/disable_nouveau-bbaf1263fe43821a.yaml deleted file mode 100644 index b93689d3a..000000000 --- a/releasenotes/notes/disable_nouveau-bbaf1263fe43821a.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -features: - - | - Add disable-nouveau element to tripleo images - This ensures nouveau is not loaded at boot, as this can prevent PCI passthrough - or loading the NVIDIA binary drivers that are required for vGPU support. diff --git a/releasenotes/notes/discover_hosts-f1733234ba32a909.yaml b/releasenotes/notes/discover_hosts-f1733234ba32a909.yaml deleted file mode 100644 index 1d545eba2..000000000 --- a/releasenotes/notes/discover_hosts-f1733234ba32a909.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - | - Run ``nova-manage cell_v2 discover_hosts`` when any baremetal nodes are - registered with the undercloud. diff --git a/releasenotes/notes/dont-fail-tripleo-bootstrap-on-package-install-a00cd921b0af7168.yaml b/releasenotes/notes/dont-fail-tripleo-bootstrap-on-package-install-a00cd921b0af7168.yaml deleted file mode 100644 index 9278cfb29..000000000 --- a/releasenotes/notes/dont-fail-tripleo-bootstrap-on-package-install-a00cd921b0af7168.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- -fixes: - - The tripleo-bootstrap ansible role will no longer fail if yum fails to - install the required packages. This fixed behavior aligns with previous - requirements where enabled package repositories and a working package - manager are not required on the initially deployed images. Errors are - ignored on the package install task, and then a subsequent task will cause - a failure indicating the required packages are not present. diff --git a/releasenotes/notes/drac-address-d835a529a7c17242.yaml b/releasenotes/notes/drac-address-d835a529a7c17242.yaml deleted file mode 100644 index 1919b3931..000000000 --- a/releasenotes/notes/drac-address-d835a529a7c17242.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -fixes: - - | - Stop relying on deprecated alias ``drac_host`` for the ``drac_address`` - field when enrolling Dell nodes. diff --git a/releasenotes/notes/drop-inventory-config-support-c2132b897da2d290.yaml b/releasenotes/notes/drop-inventory-config-support-c2132b897da2d290.yaml deleted file mode 100644 index 2fbcdb275..000000000 --- a/releasenotes/notes/drop-inventory-config-support-c2132b897da2d290.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -other: - - | - TripleoInventory class no longer supports the parameters being passed in as - as config object. This was added to support transition in in - tripleo-validations that was corrected in Queens. diff --git a/releasenotes/notes/enable-ssh-admin-honor-blacklist-f1371554ab1b38f6.yaml b/releasenotes/notes/enable-ssh-admin-honor-blacklist-f1371554ab1b38f6.yaml deleted file mode 100644 index 16fa373d5..000000000 --- a/releasenotes/notes/enable-ssh-admin-honor-blacklist-f1371554ab1b38f6.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -fixes: - - tripleo.access.v1.enable_ssh_admin now honors the server blacklist if one - is set. Servers in the blacklist will not be used by the workflow. diff --git a/releasenotes/notes/enrich-nodes-json-ironic-port-data-0905da3f7b13d149.yaml b/releasenotes/notes/enrich-nodes-json-ironic-port-data-0905da3f7b13d149.yaml deleted file mode 100644 index 175b173c6..000000000 --- a/releasenotes/notes/enrich-nodes-json-ironic-port-data-0905da3f7b13d149.yaml +++ /dev/null @@ -1,38 +0,0 @@ ---- -features: - - | - Adds support to specify additional parameters for Bare Metal ports when - registering nodes. - - The ``mac`` key in nodes_json (instackenv.json) is replaced by the new - ``ports`` key. Each port-entry supports the following keys: ``address``, - ``physical_network`` and ``local_link_connection``. (The keys in ``ports`` - mirror a subset off the `Bare Metal service API `_ - .) - - Example specifying port mac address only:: - - "ports": [ - { - "address": "52:54:00:87:c8:2e" - } - ] - - Example specifying additional parameters:: - - "ports": [ - { - "address": "52:54:00:87:c8:2f", - "physical_network": "network", - "local_link_connection": { - "switch_info": "switch", - "port_id": "gi1/0/11", - "switch_id": "a6:18:66:33:cb:49" - } - } - ] -deprecations: - - | - The ``mac`` key in nodes_json is replaced by ``ports``. The ``ports`` key - expect a list of dictionaries specifying ``address`` (mac address), and - optional keys ``physical_network`` and ``local_link_connection``. diff --git a/releasenotes/notes/enroll-root-device-a172f98f50706a12.yaml b/releasenotes/notes/enroll-root-device-a172f98f50706a12.yaml deleted file mode 100644 index e2b3b8def..000000000 --- a/releasenotes/notes/enroll-root-device-a172f98f50706a12.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- -fixes: - - | - Recognizes the ``root_device`` property when enrolling nodes. We recommend - it to be set for multi-disk nodes, but the enrolling procedure does not - actually accept it. - - | - Node properties are no longer converted to strings on enrolling. This is - not required by the Bare Metal service and may yield incorrect results. diff --git a/releasenotes/notes/error-msg-no-baremetal-a583117ecd9836dc.yaml b/releasenotes/notes/error-msg-no-baremetal-a583117ecd9836dc.yaml deleted file mode 100644 index 9bb0908e6..000000000 --- a/releasenotes/notes/error-msg-no-baremetal-a583117ecd9836dc.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -fixes: - - | - Add an error message if there are no bare metal nodes available in an - available or active state and with maintenance mode off. Previously, - the message was misleading about missing control or compute flavor - having no profile associated. diff --git a/releasenotes/notes/etcdtoken-4c46bdfac940acda.yaml b/releasenotes/notes/etcdtoken-4c46bdfac940acda.yaml deleted file mode 100644 index 591854c2a..000000000 --- a/releasenotes/notes/etcdtoken-4c46bdfac940acda.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -security: - - | - Add EtcdInitialClusterToken to the list of passwords - to generate, so users don't have to pick a string or - rely on a default value. - Fixes `bug 1673266 `__. diff --git a/releasenotes/notes/export_os_nova_api_version-d5d1501306f8013b.yaml b/releasenotes/notes/export_os_nova_api_version-d5d1501306f8013b.yaml deleted file mode 100644 index 726a1ec87..000000000 --- a/releasenotes/notes/export_os_nova_api_version-d5d1501306f8013b.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- -fixes: - - | - openstack cli doesn't negotiate a microversion. Live migration and - multiattach are 2 examples of operations which require arcane incantations - to make them work correctly, and therefore usually don't. - This adds ``OS_COMPUTE_API_VERSION=2.latest`` to the overcloudrc file to - fix it. diff --git a/releasenotes/notes/fail-multiple-config-download-executions-bf1f0984cd8af5f0.yaml b/releasenotes/notes/fail-multiple-config-download-executions-bf1f0984cd8af5f0.yaml deleted file mode 100644 index 3057f66be..000000000 --- a/releasenotes/notes/fail-multiple-config-download-executions-bf1f0984cd8af5f0.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -other: - - | - Add an initial task to the config_download_deploy workflow that queries for - existing executions of the same workflow on the same plan. If any are - found, that means that config-download is already running on the existing - plan, so the additional one that is trying to start is failed. diff --git a/releasenotes/notes/fencing-hw-types-fddcdb6bf6d79414.yaml b/releasenotes/notes/fencing-hw-types-fddcdb6bf6d79414.yaml deleted file mode 100644 index d25c0ec66..000000000 --- a/releasenotes/notes/fencing-hw-types-fddcdb6bf6d79414.yaml +++ /dev/null @@ -1,11 +0,0 @@ ---- -fixes: - - | - Fixes handling hardware types (new-style Ironic drivers) when generating - fencing parameters. Also completely removes support for no longer existing - ``pxe_ssh`` driver. -deprecations: - - | - The ``os_auth`` argument to the ``generate_fencing_parameters`` workflow - is deprecated and should not be provided. It will be removed in a future - version. diff --git a/releasenotes/notes/fix-api-network-rendering-5a65009051a0f464.yaml b/releasenotes/notes/fix-api-network-rendering-5a65009051a0f464.yaml deleted file mode 100644 index 983e180e6..000000000 --- a/releasenotes/notes/fix-api-network-rendering-5a65009051a0f464.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -fixes: - - | - Fixes compatibility between older deployments with Heat resource network - "InternalNetwork" and corrected "InternalApiNetwork". Upgrades from - previous versions will still use the old naming scheme, while new - deployments will use the correct name of "InternalApiNetwork". diff --git a/releasenotes/notes/fix-call-to-git-clone-action-d13942fc07e8e089.yaml b/releasenotes/notes/fix-call-to-git-clone-action-d13942fc07e8e089.yaml deleted file mode 100644 index 67070a417..000000000 --- a/releasenotes/notes/fix-call-to-git-clone-action-d13942fc07e8e089.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -fixes: - - Fixes `bug 1691740 `__ by - adding container argument to calls to tripleo.git.clone action in the - create_deployment_plan and update_deployment_plan workflows. diff --git a/releasenotes/notes/fix-check-mode-server-deployment-098bcae9e0227c57.yaml b/releasenotes/notes/fix-check-mode-server-deployment-098bcae9e0227c57.yaml deleted file mode 100644 index af99db24e..000000000 --- a/releasenotes/notes/fix-check-mode-server-deployment-098bcae9e0227c57.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -fixes: - - Previously, running ansible-playbook with --check would cause a failure - during the individual server deployments when checking the result of a - previous attempt. diff --git a/releasenotes/notes/fix-generated-server_certs_key_passphrase-60cba4653109992c.yaml b/releasenotes/notes/fix-generated-server_certs_key_passphrase-60cba4653109992c.yaml deleted file mode 100644 index f22b91305..000000000 --- a/releasenotes/notes/fix-generated-server_certs_key_passphrase-60cba4653109992c.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -fixes: - - The passphrase for config option 'server_certs_key_passphrase', is used as - a Fernet key in Octavia and thus must be 32 bytes long. TripleO will now - auto-generate 32 bytes long passphrase for OctaviaServerCertsKeyPassphrase. \ No newline at end of file diff --git a/releasenotes/notes/fix-octavia-image-rpm-install-permissions-846cd6780a527084.yaml b/releasenotes/notes/fix-octavia-image-rpm-install-permissions-846cd6780a527084.yaml deleted file mode 100644 index 27c55387e..000000000 --- a/releasenotes/notes/fix-octavia-image-rpm-install-permissions-846cd6780a527084.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -fixes: - - Fix Octavia amphora image RPM install on undercloud node for Red Hat based - deployments (`bug 1772880 - `) diff --git a/releasenotes/notes/fix-octavia-pub-key-d195fbf1976a8d36.yaml b/releasenotes/notes/fix-octavia-pub-key-d195fbf1976a8d36.yaml deleted file mode 100644 index 8c891cd8c..000000000 --- a/releasenotes/notes/fix-octavia-pub-key-d195fbf1976a8d36.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -fixes: - - Check pub key file permissions and default to pub key data for Octavia. diff --git a/releasenotes/notes/fix-opendaylight-healthcheck-f9bc1d2e067c4680.yaml b/releasenotes/notes/fix-opendaylight-healthcheck-f9bc1d2e067c4680.yaml deleted file mode 100644 index d5ed1e90f..000000000 --- a/releasenotes/notes/fix-opendaylight-healthcheck-f9bc1d2e067c4680.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -fixes: - - | - Fixes OpenDaylight healthcheck for TLS and regular deployments. diff --git a/releasenotes/notes/fix-syntax-error-in-octavia-undercloud-role-c02b0c5b0f1ece34.yaml b/releasenotes/notes/fix-syntax-error-in-octavia-undercloud-role-c02b0c5b0f1ece34.yaml deleted file mode 100644 index b3a3bb799..000000000 --- a/releasenotes/notes/fix-syntax-error-in-octavia-undercloud-role-c02b0c5b0f1ece34.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -fixes: - - Fix syntax error in octavia-undercloud role. diff --git a/releasenotes/notes/fix-update-roles-workflow-with-custom-overcloud-names-35404ceae3ac380e.yaml b/releasenotes/notes/fix-update-roles-workflow-with-custom-overcloud-names-35404ceae3ac380e.yaml deleted file mode 100644 index 71bde81a8..000000000 --- a/releasenotes/notes/fix-update-roles-workflow-with-custom-overcloud-names-35404ceae3ac380e.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -fixes: - - | - The tripleo.plan_management.v1.update_roles workflow didn't pass the plan - name (container name) or Zaqar queue name to the sub-workflow it triggered. - This caused the behaviour to be incorrect when using a name other than the - default. It now correctly passes on these parameters. diff --git a/releasenotes/notes/flatten_parameters-b37065a0f0071523.yaml b/releasenotes/notes/flatten_parameters-b37065a0f0071523.yaml deleted file mode 100644 index 2d54484c0..000000000 --- a/releasenotes/notes/flatten_parameters-b37065a0f0071523.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Added an action to flatten the nested heat resource tree and parameters. diff --git a/releasenotes/notes/format-ansible-deployments-8bc0de3b4dbfa69c.yaml b/releasenotes/notes/format-ansible-deployments-8bc0de3b4dbfa69c.yaml deleted file mode 100644 index e065b1339..000000000 --- a/releasenotes/notes/format-ansible-deployments-8bc0de3b4dbfa69c.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -fixes: - - The group:ansible deployments were not formatted as human readable in the - group_vars. It was all just one long line. This made manual review and - debugging more difficult. They are now formatted in a human readable - format. diff --git a/releasenotes/notes/generate-roles-with-colon-c903826db084b8a6.yaml b/releasenotes/notes/generate-roles-with-colon-c903826db084b8a6.yaml deleted file mode 100644 index 36bfde92a..000000000 --- a/releasenotes/notes/generate-roles-with-colon-c903826db084b8a6.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- -features: - - | - Generating roles_data.yaml file has been enhanced to generate the defined - roles's properties with a differnet name, so that a cluster can have - multiple roles with same set of service, without manual edit. Adds the - support to provide role name input as ``Compute:ComputeA`` so that the - role ``ComputeA`` can be generated from the defined role ``Compute``, by - only chaning the name. diff --git a/releasenotes/notes/git-support-for-deployment-plans-cac4d3746689cbda.yaml b/releasenotes/notes/git-support-for-deployment-plans-cac4d3746689cbda.yaml deleted file mode 100644 index 34875b3c8..000000000 --- a/releasenotes/notes/git-support-for-deployment-plans-cac4d3746689cbda.yaml +++ /dev/null @@ -1,13 +0,0 @@ ---- -features: - - | - The create_deployment_plan workflow has been updated to provide support for - creating a deployment plan from a git repository of heat templates. A tag - or branch can be specified in the repo url with an '@'. - Example: https://github.com/openstack/project.git@stable/newton -deprecations: - - | - The tripleo.plan_management.v1.create_default_deployment_plan is deprecated - and will be removed in the Queens release. The udpates to the - tripleo.plan_management.v1.create_deployment_plan ensures that it provides - the same functionality. diff --git a/releasenotes/notes/group-os-apply-config-warn-beece0b9fcff74b7.yaml b/releasenotes/notes/group-os-apply-config-warn-beece0b9fcff74b7.yaml deleted file mode 100644 index f9054afd6..000000000 --- a/releasenotes/notes/group-os-apply-config-warn-beece0b9fcff74b7.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -deprecations: - - group:os-apply-config deployments are deprecated for use with - config-download and they will not be applied. diff --git a/releasenotes/notes/gui-logging-5413d0d86e618c59.yaml b/releasenotes/notes/gui-logging-5413d0d86e618c59.yaml deleted file mode 100644 index cce5611c7..000000000 --- a/releasenotes/notes/gui-logging-5413d0d86e618c59.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - | - GUI logging - we added actions and workflows to support processing and - storage of logging data from tripleo-ui diff --git a/releasenotes/notes/handle-no-deployment-status-a70a4b950171afbe.yaml b/releasenotes/notes/handle-no-deployment-status-a70a4b950171afbe.yaml deleted file mode 100644 index 44b3b71eb..000000000 --- a/releasenotes/notes/handle-no-deployment-status-a70a4b950171afbe.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -fixes: - - The tripleo.deployment.v1.get_deployment_status workflow will no longer - error when requesting the deployment status for a non-existant plan. A - message is sent in the output instead of failing the workflow. diff --git a/releasenotes/notes/healthcheck-cron-37de4a861e1a1cbf.yaml b/releasenotes/notes/healthcheck-cron-37de4a861e1a1cbf.yaml deleted file mode 100644 index bd107abd6..000000000 --- a/releasenotes/notes/healthcheck-cron-37de4a861e1a1cbf.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - new health check for "cron" containers, ensuring it exists and has content diff --git a/releasenotes/notes/honor_trash_output_when_not_using_queue-f7c5a3051f5b90cc.yaml b/releasenotes/notes/honor_trash_output_when_not_using_queue-f7c5a3051f5b90cc.yaml deleted file mode 100644 index 2e1007862..000000000 --- a/releasenotes/notes/honor_trash_output_when_not_using_queue-f7c5a3051f5b90cc.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -fixes: - - Previously, trash_output was not honored if a queue was not being used to - post messages. The behavior has changed so that trash_output will be - honored even if a queue is not being used, and all stdout/stderr will be - discarded. diff --git a/releasenotes/notes/increase-size-security-hardened-images-3fc4df73a48d4a91.yaml b/releasenotes/notes/increase-size-security-hardened-images-3fc4df73a48d4a91.yaml deleted file mode 100644 index 9c9f14a99..000000000 --- a/releasenotes/notes/increase-size-security-hardened-images-3fc4df73a48d4a91.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -features: - - | - Increase the size of the security hardened images to 40G. With the move - to containers more disk space is needed and the disk layout has been - modified. It needs a global size of 40G to work. - diff --git a/releasenotes/notes/install-octavia-amphora-image-red-hat-bc8545e36d88f951.yaml b/releasenotes/notes/install-octavia-amphora-image-red-hat-bc8545e36d88f951.yaml deleted file mode 100644 index d7824166f..000000000 --- a/releasenotes/notes/install-octavia-amphora-image-red-hat-bc8545e36d88f951.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Install Octavia amphora image on the undercloud if Red Hat. diff --git a/releasenotes/notes/integrate-skydive-b3b569d996c24cc5.yaml b/releasenotes/notes/integrate-skydive-b3b569d996c24cc5.yaml deleted file mode 100644 index b5e5cd6b6..000000000 --- a/releasenotes/notes/integrate-skydive-b3b569d996c24cc5.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - | - Add support for troubleshooting network issues using `Skydive - `_. diff --git a/releasenotes/notes/interfaces-cd94c12dd4744c50.yaml b/releasenotes/notes/interfaces-cd94c12dd4744c50.yaml deleted file mode 100644 index 65d2ff2a8..000000000 --- a/releasenotes/notes/interfaces-cd94c12dd4744c50.yaml +++ /dev/null @@ -1,11 +0,0 @@ ---- -features: - - | - It is now possible to set various interface fields when enrolling nodes - via ``instackenv.json``. This only works for new-style drivers like - ``ipmi`` or ``redfish``. - ironicclient 1.15 is required for setting the ``storage_interface`` field. -upgrade: - - | - The minimum required Bare Metal (Ironic) API version was bumped to 1.33 - (late Pike). diff --git a/releasenotes/notes/introspection-batch-size-47723bceb0281baf.yaml b/releasenotes/notes/introspection-batch-size-47723bceb0281baf.yaml deleted file mode 100644 index a2d3a3dca..000000000 --- a/releasenotes/notes/introspection-batch-size-47723bceb0281baf.yaml +++ /dev/null @@ -1,5 +0,0 @@ -features: - - | - The introspection workflow now accepts an additional parameter - ``concurrency`` which defines how many nodes should be introspected in - parallel. The default is 20, but can be changed to any positive integer. diff --git a/releasenotes/notes/ipmi-discovery-72f93156bcaf461d.yaml b/releasenotes/notes/ipmi-discovery-72f93156bcaf461d.yaml deleted file mode 100644 index 425846eed..000000000 --- a/releasenotes/notes/ipmi-discovery-72f93156bcaf461d.yaml +++ /dev/null @@ -1,12 +0,0 @@ ---- -features: - - | - Add two new workflows for discovering IPMI BMC: ``discover_nodes`` and - ``discover_and_enroll_nodes``. - - The former scans given IP addresses and ports, and tries to log into BMC - using given credentials. It returns node information in a format accepted - by the TripleO enrollment workflow. - - The latter calls the former, enrolls the resulting nodes and optionally - moves them to manageable state. diff --git a/releasenotes/notes/ironic-api-version-d2b4ec1474918f12.yaml b/releasenotes/notes/ironic-api-version-d2b4ec1474918f12.yaml deleted file mode 100644 index 95dd8e0d1..000000000 --- a/releasenotes/notes/ironic-api-version-d2b4ec1474918f12.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -fixes: - - | - The default ``OS_BAREMETAL_API_VERSION`` and ``IRONIC_API_VERSION`` in - ``overcloudrc`` were bumped to 1.29, which corresponds to Ocata final and - allows using all recent features without specifying an explicit version. diff --git a/releasenotes/notes/ironic-api-version-latest-328a5894677f801d.yaml b/releasenotes/notes/ironic-api-version-latest-328a5894677f801d.yaml deleted file mode 100644 index f54201941..000000000 --- a/releasenotes/notes/ironic-api-version-latest-328a5894677f801d.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- -upgrade: - - | - The environment variables ``IRONIC_API_VERSION`` and - ``OS_BAREMETAL_API_VERSION`` are no longer set in ``overcloudrc``. - Starting with python-ironicclient 2.0.0 this will result in the latest - supported API version to be used. Scripts that rely on a particular API - version behavior must set these versions explicitly. diff --git a/releasenotes/notes/ironic-api-version-pike-4264d815385cba7a.yaml b/releasenotes/notes/ironic-api-version-pike-4264d815385cba7a.yaml deleted file mode 100644 index f5bd95f1f..000000000 --- a/releasenotes/notes/ironic-api-version-pike-4264d815385cba7a.yaml +++ /dev/null @@ -1,13 +0,0 @@ ---- -upgrade: - - | - The default bare metal API version used by the overcloud was bumped - to 1.34, which is latest API version supported by Pike ironicclient. -deprecations: - - | - In the Queens release ironicclient will start defaulting to the latest - supported bare metal API version, instead of the lowest. After that, - we will stop pinning the bare metal API version in ``overcloudrc``. - All scripts that rely on a specific version should set it explicitly via - either ``OS_BAREMETAL_API_VERSION`` environment variable or - ``--os-baremetal-api-version`` command line argument. diff --git a/releasenotes/notes/ironic-boot-config-77addfde192cee0f.yaml b/releasenotes/notes/ironic-boot-config-77addfde192cee0f.yaml deleted file mode 100644 index 167818e88..000000000 --- a/releasenotes/notes/ironic-boot-config-77addfde192cee0f.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - Adds an action and workflow used to check the ironic boot - configuration. diff --git a/releasenotes/notes/ironic-hardware-types-791dad3f75a67454.yaml b/releasenotes/notes/ironic-hardware-types-791dad3f75a67454.yaml deleted file mode 100644 index 795b2431d..000000000 --- a/releasenotes/notes/ironic-hardware-types-791dad3f75a67454.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - | - Add support for enrolling nodes with ``ipmi`` hardware type. diff --git a/releasenotes/notes/ironic-rescue-ce08f432ccdcece4.yaml b/releasenotes/notes/ironic-rescue-ce08f432ccdcece4.yaml deleted file mode 100644 index e67a6ea8a..000000000 --- a/releasenotes/notes/ironic-rescue-ce08f432ccdcece4.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- -features: - - | - Sets ``rescue_kernel`` and ``rescue_ramdisk`` to the same values as - ``deploy_kernel`` and ``deploy_ramdisk`` on node enrollment or - configuration. - - | - Adds support for ``rescue_interface`` when enrolling nodes. diff --git a/releasenotes/notes/ironic-ucs-driver-node-uniqueness-fix-c74110a9728d1023.yaml b/releasenotes/notes/ironic-ucs-driver-node-uniqueness-fix-c74110a9728d1023.yaml deleted file mode 100644 index bcf1ac8a4..000000000 --- a/releasenotes/notes/ironic-ucs-driver-node-uniqueness-fix-c74110a9728d1023.yaml +++ /dev/null @@ -1,11 +0,0 @@ ---- -deprecations: - - | - Un-deprecated `pm_service_profile` option support at the UCS ironic - driver. -fixes: - - | - Previously, ironic nodes that only differ in `pm_service_profile` - or `ucs_service_profile` would override one another ultimately leaving - just one of them in ironic configuration. This fix un-deprecates - `pm_service_profile` option support at the UCS ironic driver. diff --git a/releasenotes/notes/jinja2-template-render-raise-extension-87c7ed150a252ff5.yaml b/releasenotes/notes/jinja2-template-render-raise-extension-87c7ed150a252ff5.yaml deleted file mode 100644 index 3b54f89a8..000000000 --- a/releasenotes/notes/jinja2-template-render-raise-extension-87c7ed150a252ff5.yaml +++ /dev/null @@ -1,12 +0,0 @@ ---- -other: - - | - The jinja2 template rendering function is extensended with a ``raise`` - method. This can be used to raise errors conditionally in the Jinja2 - templated tripleo-heat-tempaltes, for example in case some required property - is not defined in roles_data or network_data. The following example - demonstrates how to raise an error conditionally in a template:: - - {%- if condition %} - {{ raise('MESSAGE') }} - {%- endif %} diff --git a/releasenotes/notes/jinja2_include-8bef46285f25ddea.yaml b/releasenotes/notes/jinja2_include-8bef46285f25ddea.yaml deleted file mode 100644 index c52e3b350..000000000 --- a/releasenotes/notes/jinja2_include-8bef46285f25ddea.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Adds support for the Jinja2 include statement in tripleo-heat-templates. diff --git a/releasenotes/notes/limit_over_blacklist-3ce81ecf04b09997.yaml b/releasenotes/notes/limit_over_blacklist-3ce81ecf04b09997.yaml deleted file mode 100644 index 734cc406f..000000000 --- a/releasenotes/notes/limit_over_blacklist-3ce81ecf04b09997.yaml +++ /dev/null @@ -1,10 +0,0 @@ ---- -fixes: - - | - Fix `bug 1887692 `__ so - limit_hosts will take precedence over the blacklisted_hostnames. - And therefore Ansible won't be run with two --limit if both limit hosts - and blacklisted hostnames are in use. When we want to run Ansible on - specific hosts, we will ignore the blacklisted nodes and assume we know - what we do. In the case of the scale-down scenario, the unreachable nodes - are ignored. diff --git a/releasenotes/notes/list-roles-from-roles-data-yaml-7ca573169f888bd7.yaml b/releasenotes/notes/list-roles-from-roles-data-yaml-7ca573169f888bd7.yaml deleted file mode 100644 index 876bbf278..000000000 --- a/releasenotes/notes/list-roles-from-roles-data-yaml-7ca573169f888bd7.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -fixes: - - The tripleo.role.list action now returns the list of roles based - directly on the data from roles_data.yaml in the deployment - plan. (See blueprint `get-roles-actions - `__) diff --git a/releasenotes/notes/manage-workflow-2668b50940c10d97.yaml b/releasenotes/notes/manage-workflow-2668b50940c10d97.yaml deleted file mode 100644 index a95bb4133..000000000 --- a/releasenotes/notes/manage-workflow-2668b50940c10d97.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - Add a workflow to move a list of baremetal nodes to - 'manage' state. diff --git a/releasenotes/notes/migration_ssh_key-6e772d18d4d24485.yaml b/releasenotes/notes/migration_ssh_key-6e772d18d4d24485.yaml deleted file mode 100644 index 54c09ee69..000000000 --- a/releasenotes/notes/migration_ssh_key-6e772d18d4d24485.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - Add MigrationSshKey to generated passwords. This ssh key-pair is used by - nova cold-migration and libvirt live-migration unless TLS is enabled. diff --git a/releasenotes/notes/minor-update-workflow-6106c1a91cb6d029.yaml b/releasenotes/notes/minor-update-workflow-6106c1a91cb6d029.yaml deleted file mode 100644 index 6313594c2..000000000 --- a/releasenotes/notes/minor-update-workflow-6106c1a91cb6d029.yaml +++ /dev/null @@ -1,19 +0,0 @@ ---- -features: - - A new minor update workflow has been added, which implemented all the - steps in Mistral. It include the following, setup the Heat outputs of the - Overcloud, pushed the configuration files of the deployment into swift, - including Ansible playbook and tasks, the Puppet files, and run the - ansible update playbook via the Ansible action. - - - The config download code has been moved from python-tripleoclient to a - dedicated library in order to be consumed by other APIs or tools. - A mistral action has been added to handle this library - -fixes: - - Fixes - `bug 1691740 `__ - -deprecations: - - The old minor update workflow is now deprecated, the code for the - action ClearBreakpointsAction has been removed diff --git a/releasenotes/notes/mistral-swift-tempurl-action-ce4946a0b76db53c.yaml b/releasenotes/notes/mistral-swift-tempurl-action-ce4946a0b76db53c.yaml deleted file mode 100644 index 47ee57c85..000000000 --- a/releasenotes/notes/mistral-swift-tempurl-action-ce4946a0b76db53c.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -features: - - A new Mistral action has been added to create signed temporary URLs. It - also sets the required metadata with a random key if not yet existing. - This can be used on overcloud nodes to pull and push objects, for example - to distribute Swift rings across all nodes. diff --git a/releasenotes/notes/more-hw-types-a837145e41409382.yaml b/releasenotes/notes/more-hw-types-a837145e41409382.yaml deleted file mode 100644 index 66d474a23..000000000 --- a/releasenotes/notes/more-hw-types-a837145e41409382.yaml +++ /dev/null @@ -1,19 +0,0 @@ ---- -features: - - | - Adds support for enrolling nodes with all production hardware types, - matching previously supported classic drivers, namely ``ilo``, ``idrac``, - ``irmc`` and ``cisco-ucs-managed``. -upgrade: - - | - Removes support for enrolling nodes with ``pxe_ssh`` driver (already - removed from ironic). - - | - Removes support for deprecated ``instackenv.json`` parameters: - - * pm_service_profile (use ucs_service_profile) - * pm_auth_method (use irmc_auth_method) - * pm_client_timeout (use irmc_client_timeout) - * pm_sensor_method (use irmc_sensor_method) - * pm_deploy_iso (use irmc_deploy_iso) - diff --git a/releasenotes/notes/move_redis_vip_to_all_nodes-bdd1c96438d6ed91.yaml b/releasenotes/notes/move_redis_vip_to_all_nodes-bdd1c96438d6ed91.yaml deleted file mode 100644 index 63e9a51cd..000000000 --- a/releasenotes/notes/move_redis_vip_to_all_nodes-bdd1c96438d6ed91.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -fixes: - - | - In case of cell stacks we need to pass redis_vip as an input - to be able to use redis on the central controllers. This - moves the redis_vip setting to all_nodes and only set it if - it is not an additional cell. diff --git a/releasenotes/notes/multi_arch_image-3c3730cbba95be19.yaml b/releasenotes/notes/multi_arch_image-3c3730cbba95be19.yaml deleted file mode 100644 index 7511239dc..000000000 --- a/releasenotes/notes/multi_arch_image-3c3730cbba95be19.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- -features: - - | - If the `AdditionalArchitectures` parameter has entries then the container - image prepare will prepare images for all architectures instead of just - the default one. A new boolean field `multi_arch` can also be set in - `ContainerImagePrepare` entries to determine the multi arch behaviour for - images in that entry. If any entry sets a `multi_arch` value then - `AdditionalArchitectures` is ignored. \ No newline at end of file diff --git a/releasenotes/notes/no-cisco-46992167cd0ab6d0.yaml b/releasenotes/notes/no-cisco-46992167cd0ab6d0.yaml deleted file mode 100644 index ef95b88da..000000000 --- a/releasenotes/notes/no-cisco-46992167cd0ab6d0.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -upgrade: - - | - Support for the ``cisco-ucs-managed`` and ``cisco-ucs-standalone`` - hardware types has been removed since these hardware types have been - removed from Ironic due to lack of maintenance. diff --git a/releasenotes/notes/no-classic-drivers-d56f8c3ff15af2c3.yaml b/releasenotes/notes/no-classic-drivers-d56f8c3ff15af2c3.yaml deleted file mode 100644 index 4a7079565..000000000 --- a/releasenotes/notes/no-classic-drivers-d56f8c3ff15af2c3.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -features: - - | - On enrollment, all classic drivers are replaced with their hardware type - equivalents (e.g. ``pxe_ipmitool`` is replaced with ``ipmi``). - The ``fake_pxe`` classic driver is replaced with the ``manual-management`` - hardware type (which must be enabled in the undercloud). diff --git a/releasenotes/notes/no-verify-registries-215e4df615e441ff.yaml b/releasenotes/notes/no-verify-registries-215e4df615e441ff.yaml deleted file mode 100644 index f5a8b2c40..000000000 --- a/releasenotes/notes/no-verify-registries-215e4df615e441ff.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - add support for unknown CA diff --git a/releasenotes/notes/no_ss-368721c3af17b782.yaml b/releasenotes/notes/no_ss-368721c3af17b782.yaml deleted file mode 100644 index c5b66ca5b..000000000 --- a/releasenotes/notes/no_ss-368721c3af17b782.yaml +++ /dev/null @@ -1,12 +0,0 @@ ---- -features: - - | - This patch moves away from "ss" execs, using lsof instead. This allows - to drop most of the piping and subshells, making things stronger. - - | - Introduce new HEALTHCHECK_DEBUG variable in order to toggle verbosity, - defaults to 0 (no verbosity). Setting it to 1 will activate -x flag, - among other things. - - | - Push some verbose output to a third descriptor, visible only if we set - the healthcheck to debug. diff --git a/releasenotes/notes/nodes-json-mac-field-no-longer-valid-6ed035c442c4fc68.yaml b/releasenotes/notes/nodes-json-mac-field-no-longer-valid-6ed035c442c4fc68.yaml deleted file mode 100644 index 7000e3052..000000000 --- a/releasenotes/notes/nodes-json-mac-field-no-longer-valid-6ed035c442c4fc68.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -other: - - | - The compatibility wrapper for the deprecated field: ``mac`` in nodes JSON - *(instackenv.json)* has been removed. The ``mac`` field is no longer - supported as it was replaced by ``ports`` in the Stein release. - diff --git a/releasenotes/notes/nodes-with-profile-662f2c6cb61c4ac4.yaml b/releasenotes/notes/nodes-with-profile-662f2c6cb61c4ac4.yaml deleted file mode 100644 index c38e1b59e..000000000 --- a/releasenotes/notes/nodes-with-profile-662f2c6cb61c4ac4.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - Added a new workflow to fetch all the matching nodes with the given - profile. To support it, a new action to fetch the node's capabilities - has been added too. diff --git a/releasenotes/notes/nova_api_healthcheck_metadata_wsgi_change-4a191009d7ef9963.yaml b/releasenotes/notes/nova_api_healthcheck_metadata_wsgi_change-4a191009d7ef9963.yaml deleted file mode 100644 index 7e8ca8a4c..000000000 --- a/releasenotes/notes/nova_api_healthcheck_metadata_wsgi_change-4a191009d7ef9963.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- -fixes: - - | - While we have a dedicated nova_metadata healthcheck script, the - nova_metadata and nova_api container the same image and the current nova - api healtcheck script still checks the non wsgi implementation. This - changes the nova_api healthcheck script to check the metadata wsgi vhost - config for details instead of the details in nova.conf. diff --git a/releasenotes/notes/nova_metadata_config_image-26e727263be52408.yaml b/releasenotes/notes/nova_metadata_config_image-26e727263be52408.yaml deleted file mode 100644 index 455f0ec1d..000000000 --- a/releasenotes/notes/nova_metadata_config_image-26e727263be52408.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -features: - - | - We are changing nova metadata api to be served via httpd wsgi. Therefore - we'll have a new config volume for the nova_metadata container. - - Adding DockerNovaMetadataConfigImage for this. diff --git a/releasenotes/notes/nova_metadata_healthcheck-44a9b0a1f436826a.yaml b/releasenotes/notes/nova_metadata_healthcheck-44a9b0a1f436826a.yaml deleted file mode 100644 index a1d09ecfe..000000000 --- a/releasenotes/notes/nova_metadata_healthcheck-44a9b0a1f436826a.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - | - Adds nova_metadata healthcheck script when nova metadata api is run via - httpd wsgi to check service status. diff --git a/releasenotes/notes/nova_novnc_proxy_ssl_healthcheck-f9ad0dffb459ef4b.yaml b/releasenotes/notes/nova_novnc_proxy_ssl_healthcheck-f9ad0dffb459ef4b.yaml deleted file mode 100644 index 887cfd8bf..000000000 --- a/releasenotes/notes/nova_novnc_proxy_ssl_healthcheck-f9ad0dffb459ef4b.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -features: - - | - If nova novnc proxy is configured to ssl only, (see LP 178570) - we need to make sure to also use ssl with the healthcheck script. - With this change we verify if ssl_only is configured in nova.conf - and set https as the proto to use for the novnc healthcheck. diff --git a/releasenotes/notes/nova_remove_nova-consoleauth-95df6d63822ef787.yaml b/releasenotes/notes/nova_remove_nova-consoleauth-95df6d63822ef787.yaml deleted file mode 100644 index cb94de095..000000000 --- a/releasenotes/notes/nova_remove_nova-consoleauth-95df6d63822ef787.yaml +++ /dev/null @@ -1,15 +0,0 @@ ---- -fixes: - - | - As of Rocky [1], the nova-consoleauth service has been deprecated and - cell databases are used for storing token authorizations. All new consoles - will be supported by the database backend and existing consoles will be - reset. Console proxies must be run per cell because the new console token - authorizations are stored in cell databases. - - nova-consoleauth was deprecated in tripleo with: - I68485a6c4da4476d07ec0ab5e7b5a4c528820a4f - - This change now removes the NovaConsoleauth Service. - - [1] https://docs.openstack.org/releasenotes/nova/rocky.html diff --git a/releasenotes/notes/octavia-amphora-ssh-5dee3678d7b66476.yaml b/releasenotes/notes/octavia-amphora-ssh-5dee3678d7b66476.yaml deleted file mode 100644 index 9f74c18f2..000000000 --- a/releasenotes/notes/octavia-amphora-ssh-5dee3678d7b66476.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Create keypair for SSH access to Octavia amphorae. diff --git a/releasenotes/notes/octavia-fix-certificates-path-and-content-e8acf1e859e75135.yaml b/releasenotes/notes/octavia-fix-certificates-path-and-content-e8acf1e859e75135.yaml deleted file mode 100644 index 8fc897f40..000000000 --- a/releasenotes/notes/octavia-fix-certificates-path-and-content-e8acf1e859e75135.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -fixes: - - | - Fixed an issue were amphora load balancers would fail to create. The - problem was because Octavia certificate files were being created in a wrong - path and with invalid content. diff --git a/releasenotes/notes/octavia-internal-tls-support-f595ed1c3a1f3353.yaml b/releasenotes/notes/octavia-internal-tls-support-f595ed1c3a1f3353.yaml deleted file mode 100644 index b0226900f..000000000 --- a/releasenotes/notes/octavia-internal-tls-support-f595ed1c3a1f3353.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -fixes: - - | - Add missing httpd and mod_ssl packages to octavia container image to - support TLS proxy for internal TLS. diff --git a/releasenotes/notes/octavia-passphrase-285a06885ac735df.yaml b/releasenotes/notes/octavia-passphrase-285a06885ac735df.yaml deleted file mode 100644 index d1d159839..000000000 --- a/releasenotes/notes/octavia-passphrase-285a06885ac735df.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -features: - - | - Add OctaviaCaKeyPassphrase to the list of passwords - to generate, so users don't have to pick a string or - rely on a default value for octavia CA private key - passphrase. diff --git a/releasenotes/notes/octavia-set-image-owner-id-adb197d5daae54f1.yaml b/releasenotes/notes/octavia-set-image-owner-id-adb197d5daae54f1.yaml deleted file mode 100644 index f8c82419d..000000000 --- a/releasenotes/notes/octavia-set-image-owner-id-adb197d5daae54f1.yaml +++ /dev/null @@ -1,10 +0,0 @@ ---- -security: - - | - Fixed a vulnerability where an attacker may cause new Octavia amphorae to - run based on any arbitrary image (CVE-2019-3895). -fixes: - - | - Ensure [controller_worker]/amp_image_owner_id is set. This configuration - option restricts Glance image selection to a specific owner ID. This is a - recommended security setting. diff --git a/releasenotes/notes/openstack-heat-agents-31a1a2908745f3bc.yaml b/releasenotes/notes/openstack-heat-agents-31a1a2908745f3bc.yaml deleted file mode 100644 index 769910f01..000000000 --- a/releasenotes/notes/openstack-heat-agents-31a1a2908745f3bc.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -fixes: - - Use the openstack-heat-agents package to install all of the - python-heat-agent packages in the image, instead of having to specify each - individually. diff --git a/releasenotes/notes/os_net_config_packaging-36b94a08bbb3e11d.yaml b/releasenotes/notes/os_net_config_packaging-36b94a08bbb3e11d.yaml deleted file mode 100644 index b0d16d463..000000000 --- a/releasenotes/notes/os_net_config_packaging-36b94a08bbb3e11d.yaml +++ /dev/null @@ -1,17 +0,0 @@ ---- -features: - - | - Install os-net-config as an RPM package directly via DIB rather than - rely on the os-net-config element. This change will allow us to - deprecated further use of tripleo-image-elements for this feature. -upgrade: - - | - In the Ocata release we started using a tripleo-heat-templates script - to drive os-net-config. This approach gave us better signal handling - capabilities, reduces our dependencies on os-apply-config, and makes - it easier to integrate and fine tune network configuration with - for example custom mapping files. Users who have network scripts - using the older 'os-apply-config' format will need to update - to the new t-h-t script format as part of this change. All - in tree templates were updated in t-h-t as part of - git commit 2c11e9e179178d074af91d8c5c798078ac3e0966. diff --git a/releasenotes/notes/overcloudrc-versionless-keystone-endpoint-9cdc1a4e1341a933.yaml b/releasenotes/notes/overcloudrc-versionless-keystone-endpoint-9cdc1a4e1341a933.yaml deleted file mode 100644 index f7d32947d..000000000 --- a/releasenotes/notes/overcloudrc-versionless-keystone-endpoint-9cdc1a4e1341a933.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -other: - - | - Switched to a versionless Keystone url in the overcloudrc. Previously, - /v3 was being appended to the OS_AUTH_URL url but is not required when - configuring OS_IDENTITY_API_VERSION - diff --git a/releasenotes/notes/ovirt-driver-77af6913e280a19e.yaml b/releasenotes/notes/ovirt-driver-77af6913e280a19e.yaml deleted file mode 100644 index 859974b63..000000000 --- a/releasenotes/notes/ovirt-driver-77af6913e280a19e.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - | - Allows enrolling oVirt nodes using the ``staging-ovirt`` hardware type. diff --git a/releasenotes/notes/ovn-image-prepare-neutron-server-abb60292341b5782.yaml b/releasenotes/notes/ovn-image-prepare-neutron-server-abb60292341b5782.yaml deleted file mode 100644 index df5c7658a..000000000 --- a/releasenotes/notes/ovn-image-prepare-neutron-server-abb60292341b5782.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -fixes: - - | - Preparing docker image containers with just OVN now also generates the - corresponding Neutron Server OVN docker image. diff --git a/releasenotes/notes/passwords-79661a3f27a33528.yaml b/releasenotes/notes/passwords-79661a3f27a33528.yaml deleted file mode 100644 index 1d7bd1876..000000000 --- a/releasenotes/notes/passwords-79661a3f27a33528.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- -features: - - | - HeatAuthEncryptionKey, HorizonSecret, MysqlRootPassword, PcsdPassword - and RabbitCookie are now generated by tripleo-common among other - passwords managed by TripleO. If existing version of these parameters - have been generated by the Heat stack we first harvest those before - generating new version. diff --git a/releasenotes/notes/predeployment-validations-1e8eacd36571d5c9.yaml b/releasenotes/notes/predeployment-validations-1e8eacd36571d5c9.yaml deleted file mode 100644 index c0cf3be97..000000000 --- a/releasenotes/notes/predeployment-validations-1e8eacd36571d5c9.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - Add a workflow to run all predeployment validations - and report back all errors and warnings. diff --git a/releasenotes/notes/prepare-includes-0c9a077369e99619.yaml b/releasenotes/notes/prepare-includes-0c9a077369e99619.yaml deleted file mode 100644 index f5763f4a2..000000000 --- a/releasenotes/notes/prepare-includes-0c9a077369e99619.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -features: - - | - ContainerImagePrepare entries can now take an `includes` option, which like - `excludes` will take a list of regex patterns. `includes` will filter - entries which do not match at least one of the include expressions. diff --git a/releasenotes/notes/profile-17e2650c8da9e8b5.yaml b/releasenotes/notes/profile-17e2650c8da9e8b5.yaml deleted file mode 100644 index a3e5128e3..000000000 --- a/releasenotes/notes/profile-17e2650c8da9e8b5.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- -features: - - | - Node's profile can now be specified as a separate ``profile`` field in - the ``instackenv.json`` instead of inside capabilities. -deprecations: - - | - Specifying profile in capabilities when enrolling nodes is deprecated. - Please use the new ``profile`` field instead. diff --git a/releasenotes/notes/provide-name-f75b6b61d3d8d693.yaml b/releasenotes/notes/provide-name-f75b6b61d3d8d693.yaml deleted file mode 100644 index ead4eb493..000000000 --- a/releasenotes/notes/provide-name-f75b6b61d3d8d693.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -fixes: - - | - Fixes running the baremetal ``provide`` workflow with node names. diff --git a/releasenotes/notes/qemu_user_id-32d8f17099a6f002.yaml b/releasenotes/notes/qemu_user_id-32d8f17099a6f002.yaml deleted file mode 100644 index 5e16fc94f..000000000 --- a/releasenotes/notes/qemu_user_id-32d8f17099a6f002.yaml +++ /dev/null @@ -1,10 +0,0 @@ ---- -fixes: - - | - The qemu user on the host gets created using uid/gid 107. Certificates - on the host, but also the vhost-user sockets created by ovs use this - uid/gid. With the move to TCIB images the default kolla ids were - reverted and the previous overwrite dropped. This make e.g. the qemu - processes to fail to use the libvirt-vnc bind mounted certificates. - This change brings back the previous overwrite of the qemu user - uid/gid. diff --git a/releasenotes/notes/quote-$@-a3d47106c9b7eeb6.yaml b/releasenotes/notes/quote-$@-a3d47106c9b7eeb6.yaml deleted file mode 100644 index 51e7c8bcd..000000000 --- a/releasenotes/notes/quote-$@-a3d47106c9b7eeb6.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -fixes: - - The generated ansible-playbook-command.sh now has quotes around $@ so - that the value can be passed through to ansible-playbook with spaces or - other characters requiring quotes. diff --git a/releasenotes/notes/redfish-550a0e0f0fd4ea41.yaml b/releasenotes/notes/redfish-550a0e0f0fd4ea41.yaml deleted file mode 100644 index 4e0b14b18..000000000 --- a/releasenotes/notes/redfish-550a0e0f0fd4ea41.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -features: - - | - Add support for enrolling nodes using Redfish protocol for management. - Requires additional field ``pm_system_id``, see `documentation - `_. diff --git a/releasenotes/notes/reduce-memory-consumption-d7effb68ab63b8c5.yaml b/releasenotes/notes/reduce-memory-consumption-d7effb68ab63b8c5.yaml deleted file mode 100644 index b47d80c17..000000000 --- a/releasenotes/notes/reduce-memory-consumption-d7effb68ab63b8c5.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -fixes: - - | - Fix high water mark memory usage on Cinder Volume and Backup services and - reduce peak memory usage. diff --git a/releasenotes/notes/releasenotes/notes/update-lb-mgmt-subnet-to-class-b-1cd832ef08a30c85.yaml b/releasenotes/notes/releasenotes/notes/update-lb-mgmt-subnet-to-class-b-1cd832ef08a30c85.yaml deleted file mode 100644 index 10432b145..000000000 --- a/releasenotes/notes/releasenotes/notes/update-lb-mgmt-subnet-to-class-b-1cd832ef08a30c85.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - - Enhance lb-mgmt-subnet to be a class B subnet, so the global amount - of Octavia loadbalancers won't be constrained to a very low number. diff --git a/releasenotes/notes/remove-abort-7214f376c9672644.yaml b/releasenotes/notes/remove-abort-7214f376c9672644.yaml deleted file mode 100644 index a198e97eb..000000000 --- a/releasenotes/notes/remove-abort-7214f376c9672644.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- -fixes: - - The update abort command was introduced many releases ago. However, it is - not a safe operation in the context of TripleO. The TripleO Heat stack - could become irrepairably damage should a rollback be attempted. As such, - it is best to remove this functionality without a deprecation period. - The workaround for this command is to wait until the stack times out or - completes the update. diff --git a/releasenotes/notes/remove-ironic-staging-drivers-override-ce9776ec030ec02a.yaml b/releasenotes/notes/remove-ironic-staging-drivers-override-ce9776ec030ec02a.yaml deleted file mode 100644 index 0fda4b4cb..000000000 --- a/releasenotes/notes/remove-ironic-staging-drivers-override-ce9776ec030ec02a.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -fixes: - - | - Remove the tripleo specific inclusion of the openstack-ironic-statging-drivers - package into the ironic-conductor container as this has been included in - kolla. diff --git a/releasenotes/notes/remove-overcloudrc.v3-2118c053035c1439.yaml b/releasenotes/notes/remove-overcloudrc.v3-2118c053035c1439.yaml deleted file mode 100644 index 78c532559..000000000 --- a/releasenotes/notes/remove-overcloudrc.v3-2118c053035c1439.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -deprecations: - - | - overcloudrc.v3 is no longer generated from the overcloudrc workflow. This - is due to the fact that we've been shipping keystone v3 by default for some - releases now, and we have the same contents available in overcloudrc. diff --git a/releasenotes/notes/remove-skopeo-image-uploader-51e7574cc386a3e9.yaml b/releasenotes/notes/remove-skopeo-image-uploader-51e7574cc386a3e9.yaml deleted file mode 100644 index bfb19751d..000000000 --- a/releasenotes/notes/remove-skopeo-image-uploader-51e7574cc386a3e9.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -other: - - | - Removed skopeo based container image uploader. This has been deprecated - since stein and does not work with our current registry implementation. diff --git a/releasenotes/notes/remove-skydive-support-1cea22a7419a3b13.yaml b/releasenotes/notes/remove-skydive-support-1cea22a7419a3b13.yaml deleted file mode 100644 index b278489a4..000000000 --- a/releasenotes/notes/remove-skydive-support-1cea22a7419a3b13.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - | - Removed support for troubleshooting network issues using `Skydive - `_. diff --git a/releasenotes/notes/remove-tempest-container-support-596426994bbb5c9d.yaml b/releasenotes/notes/remove-tempest-container-support-596426994bbb5c9d.yaml deleted file mode 100644 index 535b317af..000000000 --- a/releasenotes/notes/remove-tempest-container-support-596426994bbb5c9d.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- -removal: - - | - Support of Tempest container is removed in favor of os_tempest - ansible role. - As it is no longer tested by the CI team and not used anywhere. - Since tempest is a standalone OpenStack validation tool, can be - used via installing through rpm package, which is fully supported. diff --git a/releasenotes/notes/resource-class-6fffaaf3b39b36c5.yaml b/releasenotes/notes/resource-class-6fffaaf3b39b36c5.yaml deleted file mode 100644 index f48d91333..000000000 --- a/releasenotes/notes/resource-class-6fffaaf3b39b36c5.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -fixes: - - | - Set the ``resource_class`` field of newly created nodes to ``baremetal`` - to adapt to the recent scheduling changes. See `bug 1708653 - `_ for details. diff --git a/releasenotes/notes/rm_create_default_deployment_plan-397b259f6f641ab9.yaml b/releasenotes/notes/rm_create_default_deployment_plan-397b259f6f641ab9.yaml deleted file mode 100644 index 9cae1bb73..000000000 --- a/releasenotes/notes/rm_create_default_deployment_plan-397b259f6f641ab9.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -upgrade: - - | - The tripleo.plan_management.v1.create_default_deployment_plan workflow - has been removed, since it's been deprecated since the pike release and - is no longer used in TripleO. Any other users of this workflow should - switch to tripleo.plan_management.v1.create_deployment_plan instead. diff --git a/releasenotes/notes/role-config-none-d440bd0dcbb63534.yaml b/releasenotes/notes/role-config-none-d440bd0dcbb63534.yaml deleted file mode 100644 index b1ef49eb0..000000000 --- a/releasenotes/notes/role-config-none-d440bd0dcbb63534.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -fixes: - - RoleConfig can exist as a stack output, but have a value of None. That case - is now handled with a default value of {} where the value was previously - None. diff --git a/releasenotes/notes/role-specific-validation-5ea0a31711ced6fe.yaml b/releasenotes/notes/role-specific-validation-5ea0a31711ced6fe.yaml deleted file mode 100644 index 7619d0f0f..000000000 --- a/releasenotes/notes/role-specific-validation-5ea0a31711ced6fe.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Added role-specific parameter validation workflow. diff --git a/releasenotes/notes/select-roles-workflow-01860e1ba8b7b86c.yaml b/releasenotes/notes/select-roles-workflow-01860e1ba8b7b86c.yaml deleted file mode 100644 index 77eb52b30..000000000 --- a/releasenotes/notes/select-roles-workflow-01860e1ba8b7b86c.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -features: - - | - Adds a workflow that takes a list of role names as input and populates - roles_data.yaml in deployment plan with respective roles from - the '/roles directory'. diff --git a/releasenotes/notes/set-ssh-server-keep-alive-options-071e1b3b570e78a7.yaml b/releasenotes/notes/set-ssh-server-keep-alive-options-071e1b3b570e78a7.yaml deleted file mode 100644 index 588a4f928..000000000 --- a/releasenotes/notes/set-ssh-server-keep-alive-options-071e1b3b570e78a7.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -fixes: - - The ServerAliveInterval and ServerAliveCountMax SSH options are now set in - the mistral ansible action so that when networking configuration is - performed on the overcloud nodes SSH will not drop the connection. diff --git a/releasenotes/notes/skip-deploy-identifier-d5abb0d4e6af0ecd.yaml b/releasenotes/notes/skip-deploy-identifier-d5abb0d4e6af0ecd.yaml deleted file mode 100644 index f80315d7f..000000000 --- a/releasenotes/notes/skip-deploy-identifier-d5abb0d4e6af0ecd.yaml +++ /dev/null @@ -1,10 +0,0 @@ ---- -features: - - Add a new action argument, skip_deploy_identifier to DeployStackAction. The - argument will disable setting a unique value for the DeployIdentifier - parameter, which means the SoftwareDeployment resources in the templates - will only be triggered if there is an actual change to their configuration. - This argument can be used to avoid always applying configuration, such as - during node scale out. This option should be used with Caution, and only if - there is confidence that the software configuration does not need to be - run, such as when scaling out certain roles. diff --git a/releasenotes/notes/skip-tag-lookup-if-tag-specified-2284c45dc0f87693.yaml b/releasenotes/notes/skip-tag-lookup-if-tag-specified-2284c45dc0f87693.yaml deleted file mode 100644 index 98bf8be80..000000000 --- a/releasenotes/notes/skip-tag-lookup-if-tag-specified-2284c45dc0f87693.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -fixes: - - | - When using specifying ContainerImagePrepare if a tag is explicitly provided - in a set, the tag_from_label functionality will not be run as we use - the defined tag for the containers. Previously we would still attempt - tag lookups even if we wanted a specific tag. diff --git a/releasenotes/notes/split_off_ceph_containers-e1a66fa39076c2cf.yaml b/releasenotes/notes/split_off_ceph_containers-e1a66fa39076c2cf.yaml deleted file mode 100644 index c8d8aa67b..000000000 --- a/releasenotes/notes/split_off_ceph_containers-e1a66fa39076c2cf.yaml +++ /dev/null @@ -1,38 +0,0 @@ ---- -features: - - | - Split off Ceph related container images - - This change releases an update on the default tripleo_containers jinja - template, splitting off the Ceph related container images. - With this new approach pulling the ceph containers is optional, and can - be avoided by setting the `ceph_images` boolean to False. - e.g., passing something like the following:: - - - parameter_defaults: - ContainerImagePrepare: - - push_destination: true - set: - name_prefix: openstack- - name_suffix: '' - namespace: quay.io/tripleomaster - neutron_driver: ovn - rhel_containers: false - tag: current-tripleo - ceph_images: false - ContainerImagePrepareDebug: true - ContainerImageRegistryCredentials: {} - - avoid the ceph containers being pulled in the undercloud. - To make this possible, a new jinja template processing approach - has been introduced, and a template basedir parameter (required - by the jinja loader) has been added to the BaseImageManager. - Finallym, two more `ceph_` prefixed containers, required to deploy - the Ceph Ingress daemon are added, and they are supposed to match - the tripleo-heat-templates `OS::TripleO::Services::CephIngress` - service. - The Ingress daemon doesn't use the Ceph daemon container, hence - `tripleo container image prepare` should be executed to pull the - new container images/tags in the undercloud as made for the Ceph - Dashboard and the regular Ceph image. diff --git a/releasenotes/notes/stack-update-1530096686438046.yaml b/releasenotes/notes/stack-update-1530096686438046.yaml deleted file mode 100644 index 71292fd8d..000000000 --- a/releasenotes/notes/stack-update-1530096686438046.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -fixes: - - Fixes `bug 1614928 `__ by - adding workflows to support the package update command. diff --git a/releasenotes/notes/stop-octavia-amphora-image-install-5d26e3d37c7b508f.yaml b/releasenotes/notes/stop-octavia-amphora-image-install-5d26e3d37c7b508f.yaml deleted file mode 100644 index f53cefd43..000000000 --- a/releasenotes/notes/stop-octavia-amphora-image-install-5d26e3d37c7b508f.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- -upgrade: - - | - Package octavia-amphora-image (RHEL) will no longer be installed by role - octavia-undercloud, and it now installs image files in directory - /usr/share/openstack-octavia-amphora-images/. Please ensure you have the - latest package version installed in the undercloud node beforehand - deploying or updating the overcloud. diff --git a/releasenotes/notes/stop-using-mistral-env-2ed6e17c4cdb9761.yaml b/releasenotes/notes/stop-using-mistral-env-2ed6e17c4cdb9761.yaml deleted file mode 100644 index a2b8639d5..000000000 --- a/releasenotes/notes/stop-using-mistral-env-2ed6e17c4cdb9761.yaml +++ /dev/null @@ -1,6 +0,0 @@ -upgrade: - - | - The environment configuration for deployments is now stored in a - file called ``plan-environment.yaml`` and stored in Swift with the - templates; Mistral is no longer used to store this data. Migration - of the existing plans is handled automatically. diff --git a/releasenotes/notes/swift-additional-healtchecks-ab8cd9c7562654f3.yaml b/releasenotes/notes/swift-additional-healtchecks-ab8cd9c7562654f3.yaml deleted file mode 100644 index 129a2c9cd..000000000 --- a/releasenotes/notes/swift-additional-healtchecks-ab8cd9c7562654f3.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - | - Adds additional healtchecks for Swift to monitor account, - container and object replicators as well as the rsync process. diff --git a/releasenotes/notes/switch_to_tripleomaster_registry-bd795a51f4e572c9.yaml b/releasenotes/notes/switch_to_tripleomaster_registry-bd795a51f4e572c9.yaml deleted file mode 100644 index e6a9e6d0e..000000000 --- a/releasenotes/notes/switch_to_tripleomaster_registry-bd795a51f4e572c9.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- -feature: - - | - Switch to tripleomaster registry be default for container images, where - push happens automatically on RDO promotion. -deprecations: - - | - The tripleoupstream container registry is not used anymore and may be - retired in the future. diff --git a/releasenotes/notes/tripleo-bootstrap-721b73d21ade7d6d.yaml b/releasenotes/notes/tripleo-bootstrap-721b73d21ade7d6d.yaml deleted file mode 100644 index 184dabd46..000000000 --- a/releasenotes/notes/tripleo-bootstrap-721b73d21ade7d6d.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - | - Introduce a new Ansible role, called tripleo-bootstrap which will take - care of prepare an environment so we can deploy TripleO. diff --git a/releasenotes/notes/tripleo-container-rm-082aa93d2de1e8bc.yaml b/releasenotes/notes/tripleo-container-rm-082aa93d2de1e8bc.yaml deleted file mode 100644 index 69c8c70b9..000000000 --- a/releasenotes/notes/tripleo-container-rm-082aa93d2de1e8bc.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -features: - - | - tripleo-container-rm is the new role that replaces tripleo-docker-rm which - is in charge of tearing down containers running in Docker or Podman. - If the container_cli is Podman, the role takes care of systemd cleanup for - both the container and its healthcheck if it does exist. diff --git a/releasenotes/notes/tripleo-container-tag-ec42e64289cb17e2.yaml b/releasenotes/notes/tripleo-container-tag-ec42e64289cb17e2.yaml deleted file mode 100644 index 9e81d43f9..000000000 --- a/releasenotes/notes/tripleo-container-tag-ec42e64289cb17e2.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - | - A new Ansible role to tag containers managed by Pacemaker. - This role will be consummed by services managed by Pacemaker. diff --git a/releasenotes/notes/tripleo-create-admin-0ce59d13ce2c07f6.yaml b/releasenotes/notes/tripleo-create-admin-0ce59d13ce2c07f6.yaml deleted file mode 100644 index 45047283a..000000000 --- a/releasenotes/notes/tripleo-create-admin-0ce59d13ce2c07f6.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -features: - - | - Break out tripleo-admin creation to its own role called tripleo-create-admin. - This removes some inline ansible from the mistral workflow, and allows - this role to be reused in other contexts (such as undercloud install). diff --git a/releasenotes/notes/tripleo-deploy-openshift-plan-name-89135e3a68307047.yaml b/releasenotes/notes/tripleo-deploy-openshift-plan-name-89135e3a68307047.yaml deleted file mode 100644 index 14c119674..000000000 --- a/releasenotes/notes/tripleo-deploy-openshift-plan-name-89135e3a68307047.yaml +++ /dev/null @@ -1,10 +0,0 @@ ---- -features: - - | - `tripleo-deploy-openshift` script now understands the `--plan` option to - run the openshift-ansible playbooks for a deployment named differently than - "openshift". -deprecations: - - | - The `--config-download-dir` option to the `tripleo-deploy-openshift` script - is deprecated in favor of `--plan`. diff --git a/releasenotes/notes/tripleo-deploy-openshift-playbook-ac8b49a212545c0f.yaml b/releasenotes/notes/tripleo-deploy-openshift-playbook-ac8b49a212545c0f.yaml deleted file mode 100644 index 64cab2895..000000000 --- a/releasenotes/notes/tripleo-deploy-openshift-playbook-ac8b49a212545c0f.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -features: - - | - Introduce a `--playbook` option to the `tripleo-deploy-openshift` script in - order to be able to run openshift-ansible playbook directly on already - deployed servers. diff --git a/releasenotes/notes/tripleo-docker-rm-b64297d5f9f42988.yaml b/releasenotes/notes/tripleo-docker-rm-b64297d5f9f42988.yaml deleted file mode 100644 index fc39e9487..000000000 --- a/releasenotes/notes/tripleo-docker-rm-b64297d5f9f42988.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - | - The new tripleo-docker-rm will be useful to remove the containers that - were managed by Docker and that are now managed by Podman. diff --git a/releasenotes/notes/tripleo-module-load-80f7fd8c8dd6a81e.yaml b/releasenotes/notes/tripleo-module-load-80f7fd8c8dd6a81e.yaml deleted file mode 100644 index 3fa0c1db4..000000000 --- a/releasenotes/notes/tripleo-module-load-80f7fd8c8dd6a81e.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Loads and persist kernel modules from the host directly. diff --git a/releasenotes/notes/tripleo-mount-image-e038a7d9d51c4828.yaml b/releasenotes/notes/tripleo-mount-image-e038a7d9d51c4828.yaml deleted file mode 100644 index 3d7ba6cd7..000000000 --- a/releasenotes/notes/tripleo-mount-image-e038a7d9d51c4828.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- -features: - - | - The scripts `tripleo-mount-image` and `tripleo-unmount-image` will mount - the contents of an overcloud image file using qemu-nbd, making it available - for chroot, or other read/write image operations. The scripts handle - partition image (overcloud-full.qcow2) as well as the whole-disk image - (overcloud-hardened-uefi-full.qcow2) with its multiple LVM volume mount - points. \ No newline at end of file diff --git a/releasenotes/notes/tripleo-ssh-known-hosts-d27c54b0a6f9a028.yaml b/releasenotes/notes/tripleo-ssh-known-hosts-d27c54b0a6f9a028.yaml deleted file mode 100644 index 770309ce6..000000000 --- a/releasenotes/notes/tripleo-ssh-known-hosts-d27c54b0a6f9a028.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -fixes: - - Support for the SshKnownHostsDeployment resources has been fixed by adding - a new role that can be used to configure /etc/ssh/ssh_known_hosts - on each host. diff --git a/releasenotes/notes/undercloud-backup-actions-and-workflow-1d661bba3fb2f974.yaml b/releasenotes/notes/undercloud-backup-actions-and-workflow-1d661bba3fb2f974.yaml deleted file mode 100644 index e498a26ca..000000000 --- a/releasenotes/notes/undercloud-backup-actions-and-workflow-1d661bba3fb2f974.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - | - Introduce Undercloud Backup workflow as well as set of Mistral actions to - perform Undercloud Backup diff --git a/releasenotes/notes/update-keystone-utils-bfd14da957d34ec5.yaml b/releasenotes/notes/update-keystone-utils-bfd14da957d34ec5.yaml deleted file mode 100644 index 7a3c0b1ac..000000000 --- a/releasenotes/notes/update-keystone-utils-bfd14da957d34ec5.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -fixes: - - | - The keystone utils in tripleo-common had gotten out of sync with the way - Mistral was using authentication. This patch aligns the two so that they - are closer to equivalent. diff --git a/releasenotes/notes/update-params-workflow-b26fd4cc40549537.yaml b/releasenotes/notes/update-params-workflow-b26fd4cc40549537.yaml deleted file mode 100644 index 4c0210154..000000000 --- a/releasenotes/notes/update-params-workflow-b26fd4cc40549537.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - | - Adds a workflow to update the parameters in a given deployment plan so the - tripleo.parameters.update action does not need to be called directly. \ No newline at end of file diff --git a/releasenotes/notes/update-programming-language-54ded15322426458.yaml b/releasenotes/notes/update-programming-language-54ded15322426458.yaml deleted file mode 100644 index 281f94c9f..000000000 --- a/releasenotes/notes/update-programming-language-54ded15322426458.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -update: - - | - As py35 has been dropped and py36, py37 jobs are running, so - update programming language mentioned in setup.cfg accordingly. diff --git a/releasenotes/notes/update-ps1-in-rc-files-c710832fc1ee37f5.yaml b/releasenotes/notes/update-ps1-in-rc-files-c710832fc1ee37f5.yaml deleted file mode 100644 index 618fc9086..000000000 --- a/releasenotes/notes/update-ps1-in-rc-files-c710832fc1ee37f5.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -features: - - | - When sourcing the overcloudrc on the undercloud, the command prompt will - show that the credentials have been loaded by being preprended with - the overcloud stack name. For example, - '(overcloud) [stack@undercloud ~]$' diff --git a/releasenotes/notes/update-roles-workflow-00be679eb8e9548c.yaml b/releasenotes/notes/update-roles-workflow-00be679eb8e9548c.yaml deleted file mode 100644 index 98de1cd26..000000000 --- a/releasenotes/notes/update-roles-workflow-00be679eb8e9548c.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - | - Adds a workflow and associated actions to update roles in a deployment - plan. diff --git a/releasenotes/notes/use-hostnames-in-inventory-6d1a3572baebf509.yaml b/releasenotes/notes/use-hostnames-in-inventory-6d1a3572baebf509.yaml deleted file mode 100644 index cbfdd8d49..000000000 --- a/releasenotes/notes/use-hostnames-in-inventory-6d1a3572baebf509.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -other: - - The inventory code is updated to use hostnames as the host alias. Since the - hostname may not always be resolvable, ansible_host is added as a hostvar - and set to the host's IP address. Using hostnames produces a much more user - friendly result in the ansible output showing task result and play recap. diff --git a/releasenotes/notes/use-tripleo-containers-file-0590a59f56fb3907.yaml b/releasenotes/notes/use-tripleo-containers-file-0590a59f56fb3907.yaml deleted file mode 100644 index 0263e802f..000000000 --- a/releasenotes/notes/use-tripleo-containers-file-0590a59f56fb3907.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -other: - - | - `container-images/tripleo_containers.yaml.j2` is now used to determine what - containers are used for which services when running the container image - prepare process runs. diff --git a/releasenotes/notes/use_trash_output_in_create_admin_via_ssh-dc9cae99934e1fbe.yaml b/releasenotes/notes/use_trash_output_in_create_admin_via_ssh-dc9cae99934e1fbe.yaml deleted file mode 100644 index 80a62ffb8..000000000 --- a/releasenotes/notes/use_trash_output_in_create_admin_via_ssh-dc9cae99934e1fbe.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- -fixes: - - When deploying a large amount of nodes, the create_admin_via_ssh workflow - could fail due to the large amount of ansible output generated. This patch - updates the tripleo.ansible-playbook action in the workflow with - trash_output:true so that the output is not saved in the mistral DB. There - is a log file saved already in case the output is needed for debug - purposes. diff --git a/releasenotes/notes/v3-only-overcloudrc-8439cfed2145341f.yaml b/releasenotes/notes/v3-only-overcloudrc-8439cfed2145341f.yaml deleted file mode 100644 index 7172492ac..000000000 --- a/releasenotes/notes/v3-only-overcloudrc-8439cfed2145341f.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -features: - - | - The overcloudrc and overcloudrc.v3 now have the same contents and are - keystone-v3-enabled. This was done because keystone no longer supports the - v2.0 API. diff --git a/releasenotes/notes/validations-in-workflow-8ce6a053cacece0d.yaml b/releasenotes/notes/validations-in-workflow-8ce6a053cacece0d.yaml deleted file mode 100644 index 6d8dfc623..000000000 --- a/releasenotes/notes/validations-in-workflow-8ce6a053cacece0d.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -features: - - Adds support for calling the external TripleO - validations from the deployment and introspection - workflows. They default to off, and can be enabled - by passing 'True' to the run_validations parameters - of these workflows. diff --git a/releasenotes/notes/verify-profiles-a9d075f565bc3df6.yaml b/releasenotes/notes/verify-profiles-a9d075f565bc3df6.yaml deleted file mode 100644 index c8347fd22..000000000 --- a/releasenotes/notes/verify-profiles-a9d075f565bc3df6.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - Adds an action and workflow used to verify the profiles - assigned to nodes and their count. diff --git a/releasenotes/notes/workaround_ssh_known_host_atomic_update-481e0baf3b3d6342.yaml b/releasenotes/notes/workaround_ssh_known_host_atomic_update-481e0baf3b3d6342.yaml deleted file mode 100644 index 5f49ce054..000000000 --- a/releasenotes/notes/workaround_ssh_known_host_atomic_update-481e0baf3b3d6342.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -fixes: - - Workaround `bug 1810932 `__ by - scripting an in-place update of ssh_known_hosts - diff --git a/releasenotes/notes/workflow-config-download-export-d22f3eb958b8c97a.yaml b/releasenotes/notes/workflow-config-download-export-d22f3eb958b8c97a.yaml deleted file mode 100644 index 3803ed96e..000000000 --- a/releasenotes/notes/workflow-config-download-export-d22f3eb958b8c97a.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -fixes: - - A new workflow, config_download_export, for exporting the config-download - files via a Swift tempurl is added so that the openstack overcloud config - download tripleoclient command can use the API. diff --git a/releasenotes/source/_static/.placeholder b/releasenotes/source/_static/.placeholder deleted file mode 100644 index e69de29bb..000000000 diff --git a/releasenotes/source/conf.py b/releasenotes/source/conf.py deleted file mode 100644 index 6fa2af7f5..000000000 --- a/releasenotes/source/conf.py +++ /dev/null @@ -1,262 +0,0 @@ -# -*- coding: utf-8 -*- -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -# sys.path.insert(0, os.path.abspath('.')) - -# -- General configuration ------------------------------------------------ - -# If your documentation needs a minimal Sphinx version, state it here. -# needs_sphinx = '1.0' - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom -# ones. -extensions = [ - 'openstackdocstheme', - 'reno.sphinxext', -] - -# Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] - -# The suffix of source filenames. -source_suffix = '.rst' - -# The encoding of source files. -# source_encoding = 'utf-8-sig' - -# The master toctree document. -master_doc = 'index' - -# General information about the project. -copyright = u'2016, TripleO Developers' - -# The version info for the project you're documenting, acts as replacement for -# |version| and |release|, also used in various other places throughout the -# built documents. -# -# The full version, including alpha/beta/rc tags. -release = '' -# The short X.Y version. -version = '' - -# The full version, including alpha/beta/rc tags. - -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -# language = None - -# There are two options for replacing |today|: either, you set today to some -# non-false value, then it is used: -# today = '' -# Else, today_fmt is used as the format for a strftime call. -# today_fmt = '%B %d, %Y' - -# List of patterns, relative to source directory, that match files and -# directories to ignore when looking for source files. -exclude_patterns = [] - -# The reST default role (used for this markup: `text`) to use for all -# documents. -# default_role = None - -# If true, '()' will be appended to :func: etc. cross-reference text. -# add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -# add_module_names = True - -# If true, sectionauthor and moduleauthor directives will be shown in the -# output. They are ignored by default. -# show_authors = False - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'native' - -# A list of ignored prefixes for module index sorting. -# modindex_common_prefix = [] - -# If true, keep warnings as "system message" paragraphs in the built documents. -# keep_warnings = False - - -# -- Options for HTML output ---------------------------------------------- - -# The theme to use for HTML and HTML Help pages. See the documentation for -# a list of builtin themes. -html_theme = 'openstackdocs' - -# Theme options are theme-specific and customize the look and feel of a theme -# further. For a list of options available for each theme, see the -# documentation. -# html_theme_options = {} - -# Add any paths that contain custom themes here, relative to this directory. -# html_theme_path = [] - -# The name for this set of Sphinx documents. If None, it defaults to -# " v documentation". -# html_title = None - -# A shorter title for the navigation bar. Default is the same as html_title. -# html_short_title = None - -# The name of an image file (relative to this directory) to place at the top -# of the sidebar. -# html_logo = None - -# The name of an image file (within the static path) to use as favicon of the -# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 -# pixels large. -# html_favicon = None - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ['_static'] - -# Add any extra paths that contain custom files (such as robots.txt or -# .htaccess) here, relative to this directory. These files are copied -# directly to the root of the documentation. -# html_extra_path = [] - -# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, -# using the given strftime format. -# html_last_updated_fmt = '%b %d, %Y' - -# If true, SmartyPants will be used to convert quotes and dashes to -# typographically correct entities. -# html_use_smartypants = True - -# Custom sidebar templates, maps document names to template names. -# html_sidebars = {} - -# Additional templates that should be rendered to pages, maps page names to -# template names. -# html_additional_pages = {} - -# If false, no module index is generated. -# html_domain_indices = True - -# If false, no index is generated. -# html_use_index = True - -# If true, the index is split into individual pages for each letter. -# html_split_index = False - -# If true, links to the reST sources are added to the pages. -# html_show_sourcelink = True - -# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. -# html_show_sphinx = True - -# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. -# html_show_copyright = True - -# If true, an OpenSearch description file will be output, and all pages will -# contain a tag referring to it. The value of this option must be the -# base URL from which the finished HTML is served. -# html_use_opensearch = '' - -# This is the file name suffix for HTML files (e.g. ".xhtml"). -# html_file_suffix = None - -# Output file base name for HTML help builder. -htmlhelp_basename = 'tripleo-commonReleaseNotesdoc' - - -# -- Options for LaTeX output --------------------------------------------- - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, -# author, documentclass [howto, manual, or own class]). -latex_documents = [ - ('index', 'tripleo-commonReleaseNotes.tex', - u'tripleo-common Release Notes Documentation', - u'2016, TripleO Developers', 'manual'), -] - -# The name of an image file (relative to this directory) to place at the top of -# the title page. -# latex_logo = None - -# For "manual" documents, if this is true, then toplevel headings are parts, -# not chapters. -# latex_use_parts = False - -# If true, show page references after internal links. -# latex_show_pagerefs = False - -# If true, show URL addresses after external links. -# latex_show_urls = False - -# Documents to append as an appendix to all manuals. -# latex_appendices = [] - -# If false, no module index is generated. -# latex_domain_indices = True - - -# -- Options for manual page output --------------------------------------- - -# One entry per manual page. List of tuples -# (source start file, name, description, authors, manual section). -man_pages = [ - ('index', 'tripleo-commonreleasenotes', - u'tripleo-common Release Notes Documentation', - [u'2016, TripleO Developers'], 1) -] - -# If true, show URL addresses after external links. -# man_show_urls = False - - -# -- Options for Texinfo output ------------------------------------------- - -# Grouping the document tree into Texinfo files. List of tuples -# (source start file, target name, title, author, -# dir menu entry, description, category) -texinfo_documents = [ - ('index', 'tripleo-commonReleaseNotes', - u'tripleo-common Release Notes Documentation', - u'2016, TripleO Developers', - 'tripleo-commonReleaseNotes', - 'One line description of project.', - 'Miscellaneous'), -] - -# Documents to append as an appendix to all manuals. -# texinfo_appendices = [] - -# If false, no module index is generated. -# texinfo_domain_indices = True - -# How to display URL addresses: 'footnote', 'no', or 'inline'. -# texinfo_show_urls = 'footnote' - -# If true, do not generate a @detailmenu in the "Top" node's menu. -# texinfo_no_detailmenu = False - -# -- Options for Internationalization output ------------------------------ -locale_dirs = ['locale/'] - -# openstackdocstheme options -openstackdocs_repo_name = 'openstack/tripleo-common' -openstackdocs_bug_project = 'tripleo' -openstackdocs_bug_tag = 'documentation' diff --git a/releasenotes/source/index.rst b/releasenotes/source/index.rst deleted file mode 100644 index a80b041cb..000000000 --- a/releasenotes/source/index.rst +++ /dev/null @@ -1,28 +0,0 @@ -======================================== -Welcome to tripleo-common Release Notes! -======================================== - -Contents -======== - -.. toctree:: - :maxdepth: 2 - - unreleased - zed - wallaby - victoria - ussuri - train - stein - rocky - queens - pike - ocata - - -Indices and tables -================== - -* :ref:`genindex` -* :ref:`search` diff --git a/releasenotes/source/ocata.rst b/releasenotes/source/ocata.rst deleted file mode 100644 index ebe62f42e..000000000 --- a/releasenotes/source/ocata.rst +++ /dev/null @@ -1,6 +0,0 @@ -=================================== - Ocata Series Release Notes -=================================== - -.. release-notes:: - :branch: origin/stable/ocata diff --git a/releasenotes/source/pike.rst b/releasenotes/source/pike.rst deleted file mode 100644 index e43bfc0ce..000000000 --- a/releasenotes/source/pike.rst +++ /dev/null @@ -1,6 +0,0 @@ -=================================== - Pike Series Release Notes -=================================== - -.. release-notes:: - :branch: stable/pike diff --git a/releasenotes/source/queens.rst b/releasenotes/source/queens.rst deleted file mode 100644 index 36ac6160c..000000000 --- a/releasenotes/source/queens.rst +++ /dev/null @@ -1,6 +0,0 @@ -=================================== - Queens Series Release Notes -=================================== - -.. release-notes:: - :branch: stable/queens diff --git a/releasenotes/source/rocky.rst b/releasenotes/source/rocky.rst deleted file mode 100644 index 40dd517b7..000000000 --- a/releasenotes/source/rocky.rst +++ /dev/null @@ -1,6 +0,0 @@ -=================================== - Rocky Series Release Notes -=================================== - -.. release-notes:: - :branch: stable/rocky diff --git a/releasenotes/source/stein.rst b/releasenotes/source/stein.rst deleted file mode 100644 index efaceb667..000000000 --- a/releasenotes/source/stein.rst +++ /dev/null @@ -1,6 +0,0 @@ -=================================== - Stein Series Release Notes -=================================== - -.. release-notes:: - :branch: stable/stein diff --git a/releasenotes/source/train.rst b/releasenotes/source/train.rst deleted file mode 100644 index 583900393..000000000 --- a/releasenotes/source/train.rst +++ /dev/null @@ -1,6 +0,0 @@ -========================== -Train Series Release Notes -========================== - -.. release-notes:: - :branch: stable/train diff --git a/releasenotes/source/unreleased.rst b/releasenotes/source/unreleased.rst deleted file mode 100644 index 2334dd5cf..000000000 --- a/releasenotes/source/unreleased.rst +++ /dev/null @@ -1,5 +0,0 @@ -============================== - Current Series Release Notes -============================== - - .. release-notes:: diff --git a/releasenotes/source/ussuri.rst b/releasenotes/source/ussuri.rst deleted file mode 100644 index e21e50e0c..000000000 --- a/releasenotes/source/ussuri.rst +++ /dev/null @@ -1,6 +0,0 @@ -=========================== -Ussuri Series Release Notes -=========================== - -.. release-notes:: - :branch: stable/ussuri diff --git a/releasenotes/source/victoria.rst b/releasenotes/source/victoria.rst deleted file mode 100644 index 4efc7b6f3..000000000 --- a/releasenotes/source/victoria.rst +++ /dev/null @@ -1,6 +0,0 @@ -============================= -Victoria Series Release Notes -============================= - -.. release-notes:: - :branch: stable/victoria diff --git a/releasenotes/source/wallaby.rst b/releasenotes/source/wallaby.rst deleted file mode 100644 index d77b56599..000000000 --- a/releasenotes/source/wallaby.rst +++ /dev/null @@ -1,6 +0,0 @@ -============================ -Wallaby Series Release Notes -============================ - -.. release-notes:: - :branch: stable/wallaby diff --git a/releasenotes/source/zed.rst b/releasenotes/source/zed.rst deleted file mode 100644 index 9608c05e4..000000000 --- a/releasenotes/source/zed.rst +++ /dev/null @@ -1,6 +0,0 @@ -======================== -Zed Series Release Notes -======================== - -.. release-notes:: - :branch: stable/zed diff --git a/requirements.txt b/requirements.txt deleted file mode 100644 index e2dc35989..000000000 --- a/requirements.txt +++ /dev/null @@ -1,19 +0,0 @@ -# The order of packages is significant, because pip processes them in the order -# of appearance. Changing the order has an impact on the overall integration -# process, which may cause wedges in the gate later. - -pbr!=2.1.0,>=2.0.0 # Apache-2.0 -GitPython>=1.0.1 # BSD License (3 clause) -python-heatclient>=1.10.0 # Apache-2.0 -oslo.config>=5.2.0 # Apache-2.0 -oslo.log>=3.36.0 # Apache-2.0 -oslo.utils>=3.33.0 # Apache-2.0 -python-ironicclient!=2.5.2,!=2.7.1,!=3.0.0,>=2.3.0 # Apache-2.0 -oslo.concurrency>=3.26.0 # Apache-2.0 -Jinja2>=2.10 # BSD License (3 clause) -passlib>=1.7.0 # BSD -cryptography>=3.4.7 # BSD/Apache-2.0 -tenacity>=6.1.0 # Apache-2.0 -metalsmith>=0.13.0 # Apache-2.0 -requests>=2.18.0,!=2.20.0 # Apache-2.0 -PyYAML>=3.12 # MIT diff --git a/scripts/README-tripleo.sh b/scripts/README-tripleo.sh deleted file mode 100644 index 5b086b09f..000000000 --- a/scripts/README-tripleo.sh +++ /dev/null @@ -1,3 +0,0 @@ -The tripleo.sh script is now maintained in the tripleo-ci repo: - -https://github.com/openstack-infra/tripleo-ci diff --git a/scripts/bindep-install b/scripts/bindep-install deleted file mode 100755 index 6451da028..000000000 --- a/scripts/bindep-install +++ /dev/null @@ -1,45 +0,0 @@ -#!/usr/bin/env bash -# Copyright 2019 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -## Shell Opts ---------------------------------------------------------------- - -set -o pipefail -set -xeuo - - -## Vars ---------------------------------------------------------------------- - -export BINDEP_FILE="${BINDEP_FILE:-$(dirname $(readlink -f ${BASH_SOURCE[0]}))/../bindep.txt}" - - -## Main ---------------------------------------------------------------------- - -# Source distribution information -source /etc/os-release || source /usr/lib/os-release -which dnf &>/dev/null && RHT_PKG_MGR='dnf' || RHT_PKG_MGR='yum' - -# NOTE(cloudnull): Get a list of packages to install with bindep. If packages -# need to be installed, bindep exits with an exit code of 1. -BINDEP_PKGS=$(bindep -b -f "${BINDEP_FILE}" test || true) - -if [[ ${#BINDEP_PKGS} > 0 ]]; then - case "${ID,,}" in - amzn|rhel|centos|fedora) - sudo "${RHT_PKG_MGR}" install -y ${BINDEP_PKGS} - ;; - esac -fi diff --git a/scripts/bootstrap_host_exec b/scripts/bootstrap_host_exec deleted file mode 100755 index 99cf76f79..000000000 --- a/scripts/bootstrap_host_exec +++ /dev/null @@ -1,19 +0,0 @@ -#!/bin/bash -set -e -SERVICE_NAME=$1 -if [ -z "$SERVICE_NAME" ]; then - echo "Please supply a valid service name." - exit 1 -fi -shift -if [ -z "$*" ]; then - echo "Please supply a valid 'command' to run as an argument." - exit 1 -fi -HOSTNAME=$(/bin/hostname -s) -SERVICE_NODEID=$(/bin/hiera -c /etc/puppet/hiera.yaml "${SERVICE_NAME}_short_bootstrap_node_name") -if [[ "${HOSTNAME,,}" == "${SERVICE_NODEID,,}" ]]; then - eval $* -else - echo "Skipping execution since this is not the bootstrap node for this service." -fi diff --git a/scripts/bootstrap_host_only_eval b/scripts/bootstrap_host_only_eval deleted file mode 100755 index 99cf76f79..000000000 --- a/scripts/bootstrap_host_only_eval +++ /dev/null @@ -1,19 +0,0 @@ -#!/bin/bash -set -e -SERVICE_NAME=$1 -if [ -z "$SERVICE_NAME" ]; then - echo "Please supply a valid service name." - exit 1 -fi -shift -if [ -z "$*" ]; then - echo "Please supply a valid 'command' to run as an argument." - exit 1 -fi -HOSTNAME=$(/bin/hostname -s) -SERVICE_NODEID=$(/bin/hiera -c /etc/puppet/hiera.yaml "${SERVICE_NAME}_short_bootstrap_node_name") -if [[ "${HOSTNAME,,}" == "${SERVICE_NODEID,,}" ]]; then - eval $* -else - echo "Skipping execution since this is not the bootstrap node for this service." -fi diff --git a/scripts/bootstrap_host_only_exec b/scripts/bootstrap_host_only_exec deleted file mode 100755 index 1c665c999..000000000 --- a/scripts/bootstrap_host_only_exec +++ /dev/null @@ -1,19 +0,0 @@ -#!/bin/bash -set -e -SERVICE_NAME=$1 -if [ -z "$SERVICE_NAME" ]; then - echo "Please supply a valid service name." - exit 1 -fi -shift -if [ -z "$*" ]; then - echo "Please supply a valid 'command' to run as an argument." - exit 1 -fi -HOSTNAME=$(/bin/hostname -s) -SERVICE_NODEID=$(/bin/hiera -c /etc/puppet/hiera.yaml "${SERVICE_NAME}_short_bootstrap_node_name") -if [[ "${HOSTNAME,,}" == "${SERVICE_NODEID,,}" ]]; then - exec "$@" -else - echo "Skipping execution since this is not the bootstrap node for this service." -fi diff --git a/scripts/containerfile-converter.py b/scripts/containerfile-converter.py deleted file mode 100755 index c03447dd7..000000000 --- a/scripts/containerfile-converter.py +++ /dev/null @@ -1,245 +0,0 @@ -#!/usr/bin/env python -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os -import re -import sys - -import yaml - - -TCIB_MAP = { - "tcib_path": None, - "tcib_args": {}, - "tcib_from": None, - "tcib_labels": {}, - "tcib_envs": {}, - "tcib_onbuilds": [], - "tcib_volumes": [], - "tcib_workdir": None, - "tcib_adds": [], - "tcib_copies": [], - "tcib_exposes": [], - "tcib_user": None, - "tcib_shell": None, - "tcib_runs": [], - "tcib_healthcheck": None, - "tcib_stopsignal": None, - "tcib_entrypoint": None, - "tcib_cmd": None, - "tcib_actions": [], - "tcib_gather_files": [], -} - -DOCKER_VERB_MAP = { - "FROM": "tcib_from", - "RUN": "tcib_runs", - "CMD": "tcib_cmd", - "LABEL": "tcib_labels", - "EXPOSE": "tcib_exposes", - "ENV": "tcib_envs", - "ADD": "tcib_adds", - "COPY": "tcib_copies", - "ENTRYPOINT": "tcib_entrypoint", - "VOLUME": "tcib_volumes", - "USER": "tcib_user", - "WORKDIR": "tcib_workdir", - "ARG": "tcib_args", - "ONBUILD": "tcib_onbuilds", - "STOPSIGNAL": "tcib_stopsignal", - "HEALTHCHECK": "tcib_healthcheck", - "SHELL": "tcib_shell", -} - - -def line_reader(lines, return_lines=None): - """Read all lines of a container file. - - This will concatinate all them into a machine readable array. - - :param Lines: list of lines to read. - :type Lines: List - :param return_lines: List of lines that will be returned. - :type return_lines: List - :returns: List - """ - if not return_lines: - return_lines = list() - try: - line = next(lines) - line = line.strip() - if line: - if line.endswith("\\"): - while True: - new_line = next(lines) - if not new_line.startswith("#"): - new_line = new_line.strip() - line = line.rstrip("\\") - line += " {line}".format(line=new_line.rstrip("\\")) - if not new_line.endswith("\\"): - break - return_lines.append(line) - else: - if not line.startswith("#"): - return_lines.append(line) - except StopIteration: - return return_lines - else: - return line_reader(lines, return_lines=return_lines) - - -def package_parse(packages_line, lines): - """Parse a command line which runs a dnf install. - - :param package_line: Line to parse - :type package_line: String - :param lines: List of lines - :type lines: List - :returns: List - """ - a = re.search(r".*dnf -y install (.*?) (&&|' ')", packages_line) - TCIB_MAP["tcib_packages"] = {"common": sorted(a.group(1).split())} - index = lines.index(packages_line) - lines.pop(index) - lines.insert( - 0, - packages_line.replace( - a.group(1), r"{{ tcib_packages.common | join(' ') }}" - ), - ) - return lines - - -def module_parse(module_line, lines): - """Parse a command line which runs a dnf module. - - :param module_line: Line to parse - :type module_line: String - :param lines: List of lines - :type lines: List - :returns: List - """ - modules_list = TCIB_MAP["tcib_packages"]["modules"] = list() - pattern = re.compile( - r"dnf -y module (disable|enable|info|install|list|provides|" - r"remove|repoquery|reset|update)(.*?)(&&|' ')" - ) - for match in re.findall(pattern, module_line): - key, value, _ = match - modules = [i for i in value.split() if i] - for module in modules: - modules_list.append({key: module}) - module_jinja = ( - r"RUN if [ '{{ tcib_distro }}' == 'rhel' ]; then " - r"{% for item in tcib_packages.modules %}" - r"{% set key, value = (item.items() | list).0 %}" - r"dnf module -y {{ key }} {{ value }}; " - r"{% endfor %}fi" - ) - index = lines.index(module_line) - lines.pop(index) - lines.insert( - index, - module_line.replace( - " ".join( - [ - i[0] - for i in re.findall( - r"(dnf -y module.*?(&&|' '))", module_line - ) - ] - ), - "", - ), - ) - lines.insert(index, module_jinja) - return lines - - -def line_parser(lines): - """Line parser which will translate strings into machine data. - - :param lines: List of lines - :type lines: List - """ - for line in lines: - verb, content = line.split(" ", 1) - if verb in ["ADD", "COPY", "RUN"]: - TCIB_MAP["tcib_actions"].append({verb.lower(): content.strip()}) - elif verb in ["FROM", "LABEL"]: - continue - else: - map_item = TCIB_MAP[DOCKER_VERB_MAP[verb]] - if isinstance(map_item, list): - map_item.append(content) - elif isinstance(map_item, dict): - try: - key, value = content.split("=", 1) - except ValueError: - key, value = content.split(" ", 1) - map_item[key] = value.strip('"') - else: - TCIB_MAP[DOCKER_VERB_MAP[verb]] = content - - -def main(containerfile): - """Run the main application. - - :param containerfile: File to parse, this requires the full path. - :type containerfile: String - """ - with open(containerfile) as f: - lines = [ - " ".join( - i.split() - ) for i in line_reader(lines=iter(f.readlines())) - ] - - r = re.compile(".*dnf.*install(.*)($| )") - packages_lines = list(filter(r.match, lines)) - if len(packages_lines) == 1: - lines = package_parse(packages_line=packages_lines[0], lines=lines) - elif len(packages_lines) > 1: - print( - "Warning: packages not parsed because there is more than one " - "install command, file '{}' will need to be manually converted " - "to using the packages structure.".format(containerfile) - ) - - r = re.compile(".*dnf.*module(.*)($| )") - module_lines = list(filter(r.match, lines)) - if len(module_lines) == 1: - lines = module_parse(module_line=module_lines[0], lines=lines) - elif len(module_lines) > 1: - print( - "Warning: modules not parsed because there is more than one " - "module command, file '{}' will need to be manually converted to " - "using the module structure.".format(containerfile) - ) - - line_parser(lines=lines) - render_vars = dict() - for key, value in TCIB_MAP.items(): - if value: - render_vars[key] = value - - dir_path = os.path.dirname(containerfile) - var_file = "{var}.yaml".format( - var=os.path.basename(dir_path).replace("-container", "") - ) - with open(os.path.join(dir_path, var_file), "w") as f: - f.write(yaml.dump(render_vars, default_flow_style=False, width=4096)) - - -if __name__ == "__main__": - main(containerfile=sys.argv[1]) diff --git a/scripts/pull-puppet-modules b/scripts/pull-puppet-modules deleted file mode 100755 index c20c9558d..000000000 --- a/scripts/pull-puppet-modules +++ /dev/null @@ -1,115 +0,0 @@ -#!/bin/bash -# Copyright 2017 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# -# Script to download src puppet modules. -# This script can be used in conjuction with upload-puppet-modules -# -set -eu -set -o pipefail - -SCRIPT_NAME=$(basename $0) -SCRIPT_HOME=$(cd $(dirname $0); pwd) - - -function show_options { - echo "Usage: $SCRIPT_NAME " - echo - echo "Options:" - echo " -h, --help -- print this help." - echo " -d, --directory -- Puppet modules directory. Required." - echo " -m, --modules 'ceph oslo' -- Puppet modules. Optional. Must be quoted and space delimited" - echo " -n, --no-pull -- If the module directory exists do not clone or repull the source" - echo " -x, --no-extras -- Do not download midonet, pacemaker, tripleo" - echo - echo Script to download source puppet modules. - echo This script can be used in conjuction with upload-puppet-modules - echo - echo This script uses a list of puppet modules defined in - echo https://raw.githubusercontent.com/openstack/puppet-openstack-integration/master/openstack_modules.txt - echo Extra puppet modules are downloaded by default - exit -} - -TEMP=`getopt -o m:hnxd: -l help,modules:,no-pull,no-extras,directory: -n $SCRIPT_NAME -- "$@"` -if [ $? != 0 ]; then - echo "Terminating..." # >&2 - exit 1 -fi - -# Note the quotes around `$TEMP': they are essential! -eval set -- "$TEMP" - -MODULES= -MODULE_DEFAULT_LIST= -MODULES_DIRECTORY= -NO_PULL= - -# Default module selection -# openstack_modules.txt is the definitive list of puppet modules -: ${MODULE_DEFAULT_LIST_URL:="https://raw.githubusercontent.com/openstack/puppet-openstack-integration/master/openstack_modules.txt"} - -# Additional puppet modules may be required -: ${MODULES_EXTRAS:=" -midonet -pacemaker -tripleo -"} - -while true ; do - case "$1" in - -h|--help) show_options; 0 >&2;; - -d|--directory) MODULES_DIRECTORY=$2 ; shift 2;; - -m|--modules) MODULES=$2 ; shift 2;; - -n|--no-pull) NO_PULL=1 ; shift ;; - -x|--no-extras) MODULES_EXTRAS=""; shift ;; - --) shift ; break;; - *) echo "Error: unsupported option $1." ; exit 1;; - esac -done - -if [ -z $MODULES_DIRECTORY ]; then - show_options; - exit -fi - -if [[ $MODULES="" ]]; then - MODULE_DEFAULT_LIST=`curl -s --connect-timeout 5 $MODULE_DEFAULT_LIST_URL` - if [ $? != 0 ]; then - echo "Error getting the list of puppet modules from $MODULE_DEFAULT_LIST_URL" - fi -fi - -: ${MODULES:="$MODULE_DEFAULT_LIST $MODULES_EXTRAS"} - -modules_directory=${MODULES_DIRECTORY%/} - -mkdir -p $modules_directory -pushd $MODULES_DIRECTORY - -for module in $MODULES; do - echo "checking $module" - if [[ -d $module && -z "$NO_PULL" ]]; then - pushd $module > /dev/null - git pull - popd > /dev/null - elif ! [ -d $module ]; then - git clone https://opendev.org/openstack/puppet-$module $module - else - echo "$module already exists and will not be updated" - fi -done - -popd diff --git a/scripts/tripleo-build-images b/scripts/tripleo-build-images deleted file mode 100755 index 77703721b..000000000 --- a/scripts/tripleo-build-images +++ /dev/null @@ -1,78 +0,0 @@ -#!/usr/bin/env python -# Copyright 2015 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -import os -import sys - -from oslo_config import cfg -from oslo_config import types -from oslo_log import log - - -from tripleo_common.image.build import ImageBuildManager - -LOG = log.getLogger(__name__) -env = os.environ.copy() - -CONF = cfg.CONF -log.register_options(CONF) -image_opt_group = cfg.OptGroup(name='image', - title='Image build options') -_opts = [ - cfg.MultiOpt('config-file', - item_type=types.String(), - default=['disk_images.yaml'], - help=("""Path to configuration file. Can be specified """ - """multiple times""")), - cfg.StrOpt('output-directory', - default=env.get('TRIPLEO_ROOT', '.'), - help=("""output directory for images. """ - """Defaults to $TRIPLEO_ROOT, or current directory""")), - cfg.BoolOpt('skip', - default=False, - help="""Skip build if cached image exists."""), - cfg.BoolOpt('json-output', - default=False, - help="""Skip build and only output the configuration in a """ - """structured JSON format."""), - cfg.MultiOpt('name', - item_type=types.String(), - help="""Name of image to build. May be specified multiple """ - """times. If unspecified, will build all images in """ - """given YAML files."""), -] -CONF.register_group(image_opt_group) -CONF.register_cli_opts(_opts, group=image_opt_group) -CONF.set_default('use_stderr', True) -log.setup(CONF, 'build-overcloud-images') - - -def main(argv=sys.argv): - CONF(argv[1:]) - LOG.info('Using config files at: %s' % CONF.image.config_file) - - manager = ImageBuildManager(CONF.image.config_file, - output_directory=CONF.image.output_directory, - skip=CONF.image.skip, - images=CONF.image.name) - if CONF.image.json_output: - manager.json_output() - else: - manager.build() - - -if __name__ == '__main__': - sys.exit(main(sys.argv)) diff --git a/scripts/tripleo-config-download b/scripts/tripleo-config-download deleted file mode 100755 index 528ac709e..000000000 --- a/scripts/tripleo-config-download +++ /dev/null @@ -1,57 +0,0 @@ -#!/usr/bin/python -# Copyright 2016 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import argparse -import logging -import os -import sys - -import os_client_config -from tripleo_common.utils import config - - -def get_orchestration_client(): - return os_client_config.make_client('orchestration') - - -def get_args(): - parser = argparse.ArgumentParser( - description=("tripleo-config-download"), - formatter_class=argparse.ArgumentDefaultsHelpFormatter) - parser.add_argument('--stack-name', '-s', - default='overcloud', - help="Heat stack name") - parser.add_argument('--output-dir', '-o', - default='tripleo-config-download', - help="Output directory for downloaded config") - - args = parser.parse_args(sys.argv[1:]) - return args - - -if __name__ == '__main__': - args = get_args() - - logging.basicConfig() - log = logging.getLogger() - log.setLevel(logging.INFO) - - if not os.path.exists(args.output_dir): - os.mkdir(args.output_dir) - - client = get_orchestration_client() - stack_config = config.Config(client) - stack_config.download_config(args.stack_name, args.output_dir) diff --git a/scripts/tripleo-mount-image b/scripts/tripleo-mount-image deleted file mode 100755 index d70e5ecbd..000000000 --- a/scripts/tripleo-mount-image +++ /dev/null @@ -1,342 +0,0 @@ -#!/bin/bash -# Copyright 2021 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# -# Script to upload files to a Artifact container for deployment via -# TripleO Heat Templates. -# - -set -eu -set -o pipefail -SCRIPT_NAME=$(basename $0) -NBD_DEVICE="" -IMAGE_FILE="" -MOUNT_DIR="" -CHROOT="" -if [ ! -a "/dev/nbd0" ]; then - modprobe nbd -fi - -# GPT GUIDs of interest. -# See https://en.wikipedia.org/wiki/GUID_Partition_Table#Partition_type_GUIDs -# also https://systemd.io/BOOT_LOADER_SPECIFICATION/ -GUID_EFI="c12a7328-f81f-11d2-ba4b-00a0c93ec93b" -GUID_LINUX_BOOT="bc13c2ff-59e6-4262-a352-b275fd6f7172" - -# supported LVM devices and mount points for whole-disk overcloud images -MOUNTS="/dev/mapper/vg-lv_var:/var \ - /dev/mapper/vg-lv_log:/var/log \ - /dev/mapper/vg-lv_audit:/var/log/audit \ - /dev/mapper/vg-lv_home:/home \ - /dev/mapper/vg-lv_tmp:/tmp \ - /dev/mapper/vg-lv_srv:/srv" -REVERSE_MOUNTS="" -for m in $MOUNTS; do - REVERSE_MOUNTS="$m $REVERSE_MOUNTS" -done - -mount_show_options() { - echo "Usage: $SCRIPT_NAME" - echo - echo "Options:" - echo " -h, --help -- print this help." - echo " -a -- Image file to mount." - echo " -m -- Directory to mount image to." - echo " -n -- NBD device to use (example, /dev/nbd0)." - echo " Defaults to first available" - echo " --chroot -- Start a working chroot which is active until exit." - echo " Directory will be unmounted on chroot exit" - echo - echo "Mount an overcloud image to a directory" - echo - exit $1 -} - -unmount_show_options() { - echo "Usage: $SCRIPT_NAME" - echo - echo "Options:" - echo " -h, --help -- print this help." - echo " -m -- Directory to unmount." - echo " -n -- NBD device to disconnect (example, /dev/nbd0)." - echo " Defaults to detected device from mounted directory" - echo - echo "Unmount a mounted overcloud image" - echo - exit $1 -} - -mount_volume () { - if [ -b "$1" ]; then - if [ ! -d $2 ]; then - mkdir $2 - fi - mount $1 $2 - fi -} - -unmount_volume () { - if mountpoint "$1"; then - umount $1 - fi -} - -remove_device () { - if [ -b "$1" ]; then - dmsetup remove $1 - fi -} - -start_chroot () { - EACTION="echo Exited chroot, unmounting $MOUNT_DIR ; unmount_image" - trap "$EACTION" EXIT - - mount -o bind /dev $MOUNT_DIR/dev/ - EACTION="umount $MOUNT_DIR/dev/; $EACTION" - trap "$EACTION" EXIT - - if [ -a $MOUNT_DIR/etc/resolv.conf ]; then - mv $MOUNT_DIR/etc/resolv.conf $MOUNT_DIR/etc/resolv.conf.tmi - EACTION="mv $MOUNT_DIR/etc/resolv.conf.tmi $MOUNT_DIR/etc/resolv.conf; $EACTION" - trap "$EACTION" EXIT - fi - - cp /etc/resolv.conf $MOUNT_DIR/etc/resolv.conf - EACTION="rm $MOUNT_DIR/etc/resolv.conf; $EACTION" - trap "$EACTION" EXIT - - echo "Starting chroot in $MOUNT_DIR, exit will also unmount" - PS1="[\u@chroot \W]\$ " chroot $MOUNT_DIR /bin/bash -l -} - -mount_image() { - set -x - - if qemu-img info --output json $IMAGE_FILE |grep '"format": "raw"' ; then - image_format='--format raw' - elif qemu-img info --output json $IMAGE_FILE |grep '"format": "qcow2"' ; then - image_format='--format qcow2' - else - image_format='' - fi - qemu-nbd $image_format --connect $NBD_DEVICE $IMAGE_FILE - - # search for the vg volume group, this is automatic in some environments - vgscan - # refresh for when this script is called with different values of $NBD_DEVICE - vgchange --refresh - - # activate new logical volumes, this is automatic in some environments - vgchange -ay - - root_device="" - boot_device="" - efi_device="" - - # wait for any sub-devices to appear - timeout 5 sh -c "while ! ls ${NBD_DEVICE}p* ; do sleep 1; done" || true - - set +e - devices=$(ls ${NBD_DEVICE}p*) - set -e - device_count=$(echo $devices | wc -w) - if [ $device_count == "0" ]; then - # if there are no partition devices, assume one root device - root_device=${NBD_DEVICE} - elif [ $device_count == "1" ]; then - # if there is one partition device, assume it is the root device - root_device=${devices} - devices="" - fi - - for device in ${devices}; do - lsblk --nodeps -P --output-all $device - fstype=$(blkid -o value -s TYPE -p $device) - label=$(blkid -o value -s LABEL -p $device) - part_type_name=$(lsblk --all --nodeps --noheadings --output PARTTYPENAME $device || echo "") - part_type=$(blkid -o value -s PART_ENTRY_TYPE -p $device) - - if [ -z "${fstype}" ]; then - # Ignore block device with no filesystem type - continue - fi - - # look for EFI partition to mount at /boot/efi - if [ -z "$efi_device" ]; then - if [[ ${part_type} == ${GUID_EFI} || ${part_type_name} == "EFI System" ]]; then - efi_device=$device - continue - fi - fi - - # look for partition to mount as /boot, only the RHEL guest image is known - # to have this - if [ -z "$boot_device" ]; then - if [[ ${part_type} == ${GUID_LINUX_BOOT} || ${label} == "boot" ]]; then - boot_device=$device - continue - fi - fi - - if [ -z "$root_device" ]; then - root_device=$device - continue - fi - done - - if [ -z "$root_device" ]; then - echo "ERROR: No root device found to mount" - exit 1 - else - if [ -b "/dev/mapper/vg-lv_root" ]; then - # a whole-disk overcloud with lvm volumes - # for example, overcloud-hardened-uefi-full.qcow2 - mount /dev/mapper/vg-lv_root $MOUNT_DIR - for m in $MOUNTS; do - device=${m%:*} - path=${m#*:} - mount_volume $device $MOUNT_DIR$path - done - else - # a simple root partition - mount $root_device $MOUNT_DIR - fi - fi - if [ ! -z "$boot_device" ]; then - # mount to /boot - mount $boot_device $MOUNT_DIR/boot - fi - if [ ! -z "$efi_device" ]; then - # mount to /boot/efi - mount $efi_device $MOUNT_DIR/boot/efi - fi -} - -unmount_image() { - - set -x - - if mountpoint "$MOUNT_DIR"; then - for m in $REVERSE_MOUNTS; do - path=${m#*:} - unmount_volume $MOUNT_DIR$path - done - unmount_volume $MOUNT_DIR/boot/efi - unmount_volume $MOUNT_DIR/boot - unmount_volume $MOUNT_DIR - fi - - # `--activate n` makes LVs inactive, they must be set - # inactive so that the nbd device can be disconnected. - # Ref bug: https://bugs.launchpad.net/tripleo/+bug/1950137 - vgchange --activate n vg || true - qemu-nbd --disconnect $NBD_DEVICE - vgchange --refresh vg || true - - for m in $REVERSE_MOUNTS; do - device=${m%:*} - remove_device $device - done - remove_device vg-lv_root -} - - -if [ $SCRIPT_NAME == "tripleo-unmount-image" ]; then - TEMP=`getopt -o hm:n: -l help -n $SCRIPT_NAME -- "$@"` - if [ $? != 0 ]; then - echo "Terminating..." >&2 - exit 1 - fi - eval set -- "$TEMP" - - while true ; do - case "$1" in - -h|--help) unmount_show_options 0 >&2;; - -m) MOUNT_DIR=$2 ; shift 2;; - -n) NBD_DEVICE=$2 ; shift 2;; - --) shift ; break;; - *) echo "Error: unsupported option $1." ; exit 1;; - esac - done - if [ -z "${MOUNT_DIR}" ]; then - unmount_show_options 1 - fi - MOUNT_DIR=$(realpath ${MOUNT_DIR}) - if [ -z "${NBD_DEVICE}" ]; then - for i in {0..15} ; do - device="/dev/nbd${i}" - mountpoints=$(lsblk --noheadings --output MOUNTPOINT $device) - if [[ $mountpoints =~ "$MOUNT_DIR" ]]; then - NBD_DEVICE="$device" - break - fi - done - fi - if [ -z "${NBD_DEVICE}" ]; then - echo "NBD device could not be detected from existing mounts." - echo "Specify a device with -n if an unmount is really required" - exit 1 - fi - unmount_image -else - TEMP=`getopt -o ha:m:n: -l help,chroot -n $SCRIPT_NAME -- "$@"` - if [ $? != 0 ]; then - echo "Terminating..." >&2 - exit 1 - fi - eval set -- "$TEMP" - - - while true ; do - case "$1" in - -h|--help) mount_show_options 0 >&2;; - -a) IMAGE_FILE=$2 ; shift 2;; - -m) MOUNT_DIR=$2 ; shift 2;; - -n) NBD_DEVICE=$2 ; shift 2;; - --chroot) CHROOT=yes ; shift;; - --) shift ; break;; - *) echo "Error: unsupported option $1." ; exit 1;; - esac - done - if [ -z "${MOUNT_DIR}" ]; then - MOUNT_DIR=$(mktemp -d) - fi - if [ -z "${IMAGE_FILE}" ]; then - mount_show_options 1 - fi - MOUNT_DIR=$(realpath ${MOUNT_DIR}) - if mountpoint "${MOUNT_DIR}"; then - echo "${MOUNT_DIR} is already a mountpoint, unmount it or specify a different path" - mount_show_options 1 - fi - if [ -z "${NBD_DEVICE}" ]; then - for i in {0..15} ; do - device="/dev/nbd${i}" - part_type=$(blkid -o value -s PTTYPE -p $device || echo "") - if [ -z "${part_type}" ]; then - NBD_DEVICE="$device" - break - fi - done - if [ -z "${NBD_DEVICE}" ]; then - echo "No NBD device is available" - exit 1 - fi - fi - mount_image - if [ -n "${CHROOT}" ]; then - start_chroot - fi -fi diff --git a/scripts/tripleo-unmount-image b/scripts/tripleo-unmount-image deleted file mode 120000 index 4f32c26d4..000000000 --- a/scripts/tripleo-unmount-image +++ /dev/null @@ -1 +0,0 @@ -tripleo-mount-image \ No newline at end of file diff --git a/scripts/upload-artifacts b/scripts/upload-artifacts deleted file mode 100755 index e59a75b6e..000000000 --- a/scripts/upload-artifacts +++ /dev/null @@ -1,128 +0,0 @@ -#!/bin/bash -#set -x -# Copyright 2015 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# -# Script to upload files to a Artifact container for deployment via -# TripleO Heat Templates. -# -set -eu -set -o pipefail -SCRIPT_NAME=$(basename $0) -SCRIPT_HOME=$(cd $(dirname $0); pwd) - -function show_options { - echo "Usage: $SCRIPT_NAME" - echo - echo "Options:" - echo " -h, --help -- print this help." - echo " -f -- File(s) to upload." - echo " -c -- Artifact container to use." - echo " Default: overcloud-artifacts" - echo " --environment -- Generate this heat " - echo " Default: $HOME/.tripleo/environments/deployment-artifacts.yaml" - echo - echo "Upload a set of files to Artifact and generate a Heat environment" - echo "file containing the required DeployArtifactURLs parameter(s) " - echo "so that it is used with TripleO Heat Templates." - echo - exit $1 -} - -function check_file { - local FILE=$1 - supported_files=(" RPM " "gzip compressed data") - array_length=${#supported_files[@]} - - local test_type=`file $FILE` - - local i=0 - local matched=0 - - while [ $i -ne $array_length -a $matched -eq 0 ]; do - if [[ "$test_type" =~ ${supported_files[$i]} ]]; then - matched=1 - fi - i=$((i+1)) - done - - if [ $matched -eq 0 ]; then - echo "Not a supported file type: $FILE" - exit 1 - fi - -} - - -TEMP=`getopt -o he:f:c: -l help,environment:,file:,container: -n $SCRIPT_NAME -- "$@"` -if [ $? != 0 ]; then - echo "Terminating..." >&2 - exit 1 -fi - -# Note the quotes around `$TEMP': they are essential! -eval set -- "$TEMP" - -ENVIRONMENT_FILE="$HOME/.tripleo/environments/deployment-artifacts.yaml" -FILES= -CONTAINER_NAME=overcloud-artifacts - -while true ; do - case "$1" in - -h|--help) show_options 0 >&2;; - -e|--environment) ENVIRONMENT_FILE=$2 ; shift 2;; - -f|--file) FILES+=" $2" ; shift 2;; - -c|--container) CONTAINER_NAME=$2 ; shift 2;; - --) shift ; break;; - *) echo "Error: unsupported option $1." ; exit 1;; - esac -done - -if [ -z "${FILES:-}" ]; then - echo "Error: No files were specified." - exit 1 -fi - -if [ -z "${CONTAINER_NAME:-}" ]; then - echo "Error: No Artifact --container was specified." - exit 1 -fi - -# Create artifact archive -sudo mkdir -p "/var/lib/tripleo/artifacts/${CONTAINER_NAME}" - -for FILE in ${FILES[@]}; do - check_file "$FILE" - sudo mv -v "$FILE" "/var/lib/tripleo/artifacts/${CONTAINER_NAME}/" -done - -if [ -n "${ENVIRONMENT_FILE:-}" ]; then - echo "Creating heat environment file: ${ENVIRONMENT_FILE}" - mkdir -p $(dirname "${ENVIRONMENT_FILE}") - /bin/python3 <" - echo - echo "Options:" - echo " -h, --help -- print this help." - echo " -d -- Puppet modules directory. Required." - echo " -c -- Artifact container to use." - echo " Default: overcloud-artifacts" - echo " --environment -- Generate this heat " - echo " Default: $HOME/.tripleo/environments/puppet-modules-url.yaml" - echo - echo "Upload a directory tarball of puppet modules to a artifact container" - echo "and generate a heat environment file containing the required" - echo "DeployArtifactURLs parameter so that it is used for TripleO deployment." - echo - exit $1 -} - -TEMP=`getopt -o h:ed:c: -l help,environment:,directory:,container: -n $SCRIPT_NAME -- "$@"` -if [ $? != 0 ]; then - echo "Terminating..." >&2 - exit 1 -fi - -# Note the quotes around `$TEMP': they are essential! -eval set -- "$TEMP" - -ENVIRONMENT_FILE="$HOME/.tripleo/environments/puppet-modules-url.yaml" -MODULES_DIRECTORY= -CONTAINER_NAME=overcloud-artifacts - -while true ; do - case "$1" in - -h|--help) show_options 0 >&2;; - -e|--environment) ENVIRONMENT_FILE=$2 ; shift 2;; - -d|--directory) MODULES_DIRECTORY=$2 ; shift 2;; - -c|--container) CONTAINER_NAME=$2 ; shift 2;; - --) shift ; break;; - *) echo "Error: unsupported option $1." ; exit 1;; - esac -done - -: ${MODULES_DIRECTORY:?--directory is required} - -modules_directory=${MODULES_DIRECTORY%/} -modules_directory=${modules_directory#/} - -: ${CONTAINER_NAME:?No artifact --container was specified} - -echo "Creating tarball..." -PUPPET_MODULES_TMP=$(mktemp -d -t 'puppet-modules-XXXXXXX') -tar --transform "s|${modules_directory}|etc/puppet/modules|" \ - -czf "$PUPPET_MODULES_TMP/puppet-modules.tar.gz" $MODULES_DIRECTORY -echo "Tarball created." - -upload-artifacts -f "$PUPPET_MODULES_TMP/puppet-modules.tar.gz" \ - -c "$CONTAINER_NAME" \ - --environment "$ENVIRONMENT_FILE" diff --git a/scripts/upload-swift-artifacts b/scripts/upload-swift-artifacts deleted file mode 120000 index 357f4a3aa..000000000 --- a/scripts/upload-swift-artifacts +++ /dev/null @@ -1 +0,0 @@ -upload-artifacts \ No newline at end of file diff --git a/setup.cfg b/setup.cfg deleted file mode 100644 index 2f9b5f4ef..000000000 --- a/setup.cfg +++ /dev/null @@ -1,59 +0,0 @@ -[metadata] -name = tripleo-common -summary = A common library for TripleO workflows. -description_file = - README.rst -author = OpenStack -author_email = openstack-discuss@lists.openstack.org -home_page = https://docs.openstack.org/tripleo-common/latest/ -python_requires = >=3.8 -classifier = - Environment :: OpenStack - Intended Audience :: Information Technology - Intended Audience :: System Administrators - License :: OSI Approved :: Apache Software License - Operating System :: POSIX :: Linux - Programming Language :: Python - Programming Language :: Python :: Implementation :: CPython - Programming Language :: Python :: 3 :: Only - Programming Language :: Python :: 3 - Programming Language :: Python :: 3.8 - Programming Language :: Python :: 3.9 - -[files] -packages = - tripleo_common - -scripts = - scripts/bootstrap_host_exec - scripts/bootstrap_host_only_eval - scripts/bootstrap_host_only_exec - scripts/pull-puppet-modules - scripts/tripleo-build-images - scripts/tripleo-config-download - scripts/tripleo-mount-image - scripts/tripleo-unmount-image - scripts/upload-puppet-modules - scripts/upload-swift-artifacts - scripts/upload-artifacts - -data_files = - share/tripleo-common/container-images = container-images/* - share/tripleo-common/image-yaml = image-yaml/* - share/tripleo-common/healthcheck = healthcheck/* - -[tool:pytest] -norecursedirs = .eggs .git .tox dist -log_cli = true - -[flake8] -show-source = True -# E123, E125 skipped as they are invalid PEP-8. -# W504/W503 are mutually exclusive and we should avoid them until community -# will pick a winner. -ignore = E123,E125,W503,W504 -exclude=.venv,.git,.tox,dist,doc,*lib/python*,*egg,build,releasenotes -builtins = _ - -[pycodestyle] -exclude = .eggs,.tox diff --git a/setup.py b/setup.py deleted file mode 100644 index cd35c3c35..000000000 --- a/setup.py +++ /dev/null @@ -1,20 +0,0 @@ -# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import setuptools - -setuptools.setup( - setup_requires=['pbr>=2.0.0'], - pbr=True) diff --git a/test-requirements.txt b/test-requirements.txt deleted file mode 100644 index 9ebc578ef..000000000 --- a/test-requirements.txt +++ /dev/null @@ -1,11 +0,0 @@ -# The order of packages is significant, because pip processes them in the order -# of appearance. Changing the order has an impact on the overall integration -# process, which may cause wedges in the gate later. - -coverage!=4.4,>=4.0 # Apache-2.0 -python-subunit>=1.0.0 # Apache-2.0/BSD -oslotest>=3.2.0 # Apache-2.0 -stestr>=2.0.0 # Apache-2.0 -testscenarios>=0.4 # Apache-2.0/BSD -testtools>=2.2.0 # MIT -requests-mock>=1.2.0 # Apache-2.0 diff --git a/tools/check_duplicate_jinja_blocks.sh b/tools/check_duplicate_jinja_blocks.sh deleted file mode 100755 index be820b4a7..000000000 --- a/tools/check_duplicate_jinja_blocks.sh +++ /dev/null @@ -1,21 +0,0 @@ -#!/bin/sh - -myname=${0##*/} -path=container-images/tripleo_kolla_template_overrides.j2 - -grep '{% block' $path | -sort | -uniq -c | -awk -v myname=$myname ' - $1 == 2 { - printf "%s: found duplicate block for %s\n", myname, $4 - retval=1 - } - - END {exit retval} -' - -if [ $? -ne 0 ]; then - echo "$myname: duplicate jinja block declarations found in $path" >&2 - exit 1 -fi diff --git a/tools/releasenotes_tox.sh b/tools/releasenotes_tox.sh deleted file mode 100755 index 3a84b5241..000000000 --- a/tools/releasenotes_tox.sh +++ /dev/null @@ -1,27 +0,0 @@ -#!/usr/bin/env bash - -rm -rf releasenotes/build - -sphinx-build -a -E -W --keep-going \ - -d releasenotes/build/doctrees \ - -b html \ - releasenotes/source releasenotes/build/html -BUILD_RESULT=$? - -UNCOMMITTED_NOTES=$(git status --porcelain | \ - awk '$1 == "M" && $2 ~ /releasenotes\/notes/ {print $2}') - -if [ "${UNCOMMITTED_NOTES}" ]; then - cat <=20.0.20 - pre-commit>=2.4.0 -commands = - python -m pre_commit run -a {posargs:} - bash -c tools/check_duplicate_jinja_blocks.sh - -[testenv:releasenotes] -deps = {[testenv:docs]deps} -commands = bash -c tools/releasenotes_tox.sh - -[testenv:requirements] -deps = - -egit+https://opendev.org/openstack/requirements#egg=openstack-requirements -allowlist_externals = sh -commands = - sh -c '{envdir}/src/openstack-requirements/playbooks/files/project-requirements-change.py --req {envdir}/src/openstack-requirements --local {toxinidir} master' diff --git a/tripleo_common/__init__.py b/tripleo_common/__init__.py deleted file mode 100644 index e3e3157c4..000000000 --- a/tripleo_common/__init__.py +++ /dev/null @@ -1,18 +0,0 @@ -# -*- coding: utf-8 -*- - -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import pbr.version - - -__version__ = pbr.version.VersionInfo('tripleo_common') diff --git a/tripleo_common/arch.py b/tripleo_common/arch.py deleted file mode 100644 index 486da16ea..000000000 --- a/tripleo_common/arch.py +++ /dev/null @@ -1,29 +0,0 @@ -# -*- coding: utf-8 -*- -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -Simple routines to map host architectures as expected by various components. -""" - -import os - - -def kernel_arch(): - """Return the kernel arch.""" - return os.uname()[4] - - -def dib_arch(): - """Return the kernel arch or the more appripriate DiB arch.""" - return {'x86_64': 'amd64'}.get(kernel_arch(), kernel_arch()) diff --git a/tripleo_common/constants.py b/tripleo_common/constants.py deleted file mode 100644 index d9c962395..000000000 --- a/tripleo_common/constants.py +++ /dev/null @@ -1,194 +0,0 @@ -# Copyright 2015 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os - -#: The resource name used for package updates -UPDATE_RESOURCE_NAME = 'UpdateDeployment' - -#: The default timeout to pass to Heat stacks -STACK_TIMEOUT_DEFAULT = 240 - -#: The default name to use for a plan container -DEFAULT_CONTAINER_NAME = 'overcloud' - -#: The default name to use for the config files of the container -CONFIG_CONTAINER_NAME = 'overcloud-config' - -#: The path to the base directory of tripleo-validations -DEFAULT_VALIDATIONS_BASEDIR = "/usr/share/ansible" - -# Path to Tripleo heat templates directory -THT_BASE_DIR = "/usr/share/openstack-tripleo-heat-templates" - -# Path to system wide THT roles -ROLES_PATH_DEFAULT = os.path.join(THT_BASE_DIR, "roles") - -# 60 minutes maximum to build the child layers at the same time. -BUILD_TIMEOUT = 3600 - -#: List of names of parameters that contain passwords -PASSWORD_PARAMETER_NAMES = ( - 'AdminPassword', - 'AodhPassword', - 'BarbicanPassword', - 'BarbicanSimpleCryptoKek', - 'CeilometerMeteringSecret', - 'CeilometerPassword', - 'CephClientKey', - 'CephClusterFSID', - 'CephManilaClientKey', - 'CephRgwKey', - 'CephGrafanaAdminPassword', - 'CephDashboardAdminPassword', - 'CinderPassword', - 'DesignatePassword', - 'DesignateRndcKey', - 'EtcdInitialClusterToken', - 'GlancePassword', - 'GnocchiPassword', - 'HAProxyStatsPassword', - 'HeatAuthEncryptionKey', - 'HeatPassword', - 'HeatStackDomainAdminPassword', - 'HorizonSecret', - 'IronicPassword', - 'LibvirtTLSPassword', - 'KeystoneCredential0', - 'KeystoneCredential1', - 'KeystoneFernetKeys', - 'KeystonePassword', - 'ManilaPassword', - 'MysqlClustercheckPassword', - 'MysqlMariabackupPassword', - 'MysqlRootPassword', - 'NeutronMetadataProxySharedSecret', - 'NeutronPassword', - 'NovaPassword', - 'MigrationSshKey', - 'OctaviaServerCertsKeyPassphrase', - 'OctaviaCaKeyPassphrase', - 'OctaviaHeartbeatKey', - 'OctaviaPassword', - 'PacemakerRemoteAuthkey', - 'PcsdPassword', - 'PlacementPassword', - 'RpcPassword', - 'NotifyPassword', - 'RabbitCookie', - 'RabbitPassword', - 'RedisPassword', - 'SnmpdReadonlyUserPassword', - 'SwiftHashSuffix', - 'SwiftPassword', -) - -DB_PASSWORD_PARAMETER_NAMES = ( - 'AodhPassword', - 'BarbicanPassword', - 'CeilometerPassword', - 'CephGrafanaAdminPassword', - 'CephDashboardAdminPassword', - 'CinderPassword', - 'DesignatePassword', - 'GlancePassword', - 'GnocchiPassword', - 'HeatPassword', - 'HeatStackDomainAdminPassword', - 'IronicPassword', - 'LibvirtTLSPassword', - 'KeystonePassword', - 'ManilaPassword', - 'MysqlClustercheckPassword', - 'MysqlMariabackupPassword', - 'MysqlRootPassword', - 'NeutronPassword', - 'NovaPassword', - 'OctaviaPassword', - 'PlacementPassword', -) - -# List of passwords that should not be rotated by default using the -# GeneratePasswordAction because they require some special handling -DO_NOT_ROTATE_LIST = ( - 'BarbicanSimpleCryptoKek', - 'KeystoneCredential0', - 'KeystoneCredential1', - 'KeystoneFernetKeys', - 'CephClientKey', - 'CephClusterFSID', - 'CephManilaClientKey', - 'CephRgwKey', - 'HeatAuthEncryptionKey', - 'MysqlClustercheckPassword', - 'MysqlMariabackupPassword', - 'PacemakerRemoteAuthkey', - 'PcsdPassword', -) - -# The default version of the Identity API to set in overcloudrc. -DEFAULT_IDENTITY_API_VERSION = '3' - -# The default version of the Compute API to set in overcloudrc. -DEFAULT_COMPUTE_API_VERSION = '2.latest' - -# The default version of the Image API to set in overcloudrc. -DEFAULT_IMAGE_API_VERSION = '2' - -# The default version of the Volume API to set in overcloudrc. -DEFAULT_VOLUME_API_VERSION = '3' - -# Default nested depth when recursing Heat stacks -NESTED_DEPTH = 7 - -# Resource name for deployment resources when using config download -TRIPLEO_DEPLOYMENT_RESOURCE = 'TripleODeployment' - -HOST_NETWORK = 'ctlplane' - -DEFAULT_VLAN_ID = "1" - -# The key is different in RoleConfig than in RoleData, so we need both so they -# are correctly found. -EXTERNAL_TASKS = ['external_deploy_tasks', 'external_deploy_steps_tasks'] - -ANSIBLE_ERRORS_FILE = 'ansible-errors.json' - -EXCLUSIVE_NEUTRON_DRIVERS = ['ovn', 'openvswitch'] - -DEFAULT_STEPS_MAX = 6 - -_PER_STEP_TASK_STRICTNESS = [False for i in range(DEFAULT_STEPS_MAX)] - -PER_STEP_TASKS = { - 'upgrade_tasks': _PER_STEP_TASK_STRICTNESS, - 'deploy_steps_tasks': _PER_STEP_TASK_STRICTNESS, - 'update_tasks': _PER_STEP_TASK_STRICTNESS, - 'post_update_tasks': [False, False, False, False] -} - -INVENTORY_NETWORK_CONFIG_FILE = 'inventory-network-config.yaml' - -# Hard coded name in: -# tripleo_ansible/ansible_plugins/modules/tripleo_ovn_mac_addresses.py -OVN_MAC_ADDR_NET_NAME = 'ovn_mac_addr_net' - -# Default directory for the overcloud deployment, where all inputs, -# outputs and generated files are stored -DEFAULT_WORKING_DIR_FORMAT = os.path.join(os.environ.get('HOME', '~/'), - 'overcloud-deploy', '{}') - -# Format for password filename -PASSWORDS_ENV_FORMAT = '{}-passwords.yaml' diff --git a/tripleo_common/exception.py b/tripleo_common/exception.py deleted file mode 100644 index f5d392b74..000000000 --- a/tripleo_common/exception.py +++ /dev/null @@ -1,128 +0,0 @@ -# Copyright 2015 Red Hat, Inc. -# All Rights Reserved. -# -# -*- coding: utf-8 -*- - -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import logging -import sys - -from tripleo_common.i18n import _ - -_FATAL_EXCEPTION_FORMAT_ERRORS = False - -LOG = logging.getLogger(__name__) - - -class TripleoCommonException(Exception): - """Base Tripleo-Common Exception. - - To correctly use this class, inherit from it and define a 'msg_fmt' - property. That msg_fmt will get printf'd with the keyword arguments - provided to the constructor. - """ - message = _("An unknown exception occurred.") - - def __init__(self, **kwargs): - self.kwargs = kwargs - self.msg_fmt = self.message - - try: - self.message = self.msg_fmt % kwargs - except KeyError: - exc_info = sys.exc_info() - # kwargs doesn't match a variable in the message - # log the issue and the kwargs - LOG.exception('Exception in string format operation') - for name, value in kwargs.items(): - LOG.error("%(name)s: %(value)s", - {'name': name, 'value': value}) # noqa - - if _FATAL_EXCEPTION_FORMAT_ERRORS: - raise Exception(exc_info[0], exc_info[1], exc_info[2]) - - def __str__(self): - return self.message - - def __deepcopy__(self, memo): - return self.__class__(**self.kwargs) - - -class StackInUseError(TripleoCommonException): - msg_fmt = _("Cannot delete a plan that has an associated stack.") - - -class InvalidNode(ValueError): - """Node data is invalid.""" - - def __init__(self, message, node=None): - message = 'Invalid node data: %s' % message - self.node = node - super(InvalidNode, self).__init__(message) - - -class Timeout(Exception): - """An operation timed out""" - - def __init__(self, message): - message = 'An operation timed out: %s' % message - super(Timeout, self).__init__(message) - - -class StateTransitionFailed(Exception): - """Ironic node state transition failed""" - - def __init__(self, node, target_state): - self.node = node - self.target_state = target_state - message = ( - "Error transitioning Ironic node %(uuid)s to provision state " - "%(state)s: %(error)s. Now in state %(actual)s." % { - 'uuid': node.uuid, - 'state': target_state, - 'error': node.last_error, - 'actual': node.provision_state - } - ) - super(StateTransitionFailed, self).__init__(message) - - -class RootDeviceDetectionError(Exception): - """Failed to detect the root device""" - - -class NotFound(Exception): - """Resource not found""" - - -class RoleMetadataError(Exception): - """Role metadata is invalid""" - - -class UnauthorizedException(Exception): - """Authorization failed""" - - -class GroupOsApplyConfigException(Exception): - """group:os-apply-config not supported with config-download""" - - def __init__(self, deployment_name): - self.deployment_name = deployment_name - message = ( - "Deployment %s with group:os-apply-config not supported with " - "config-download." % self.deployment_name) - super(GroupOsApplyConfigException, self).__init__(message) - - -class MissingMandatoryNeutronResourceTag(Exception): - """Missing mandatory neutron resource tag""" diff --git a/tripleo_common/filters/__init__.py b/tripleo_common/filters/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/tripleo_common/filters/capabilities_filter.py b/tripleo_common/filters/capabilities_filter.py deleted file mode 100644 index edbd745dc..000000000 --- a/tripleo_common/filters/capabilities_filter.py +++ /dev/null @@ -1,46 +0,0 @@ -# Copyright 2016 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from nova.scheduler import filters -from oslo_log import log as logging - - -LOG = logging.getLogger(__name__) - - -class TripleOCapabilitiesFilter(filters.BaseHostFilter): - """Filter hosts based on capabilities in boot request - - The standard Nova ComputeCapabilitiesFilter does not respect capabilities - requested in the scheduler_hints field, so we need a custom one in order - to be able to do predictable placement of nodes. - """ - - # list of hosts doesn't change within a request - run_filter_once_per_request = True - - def host_passes(self, host_state, spec_obj): - host_node = host_state.stats.get('node') - instance_node = spec_obj.scheduler_hints.get('capabilities:node') - # The instance didn't request a specific node - if not instance_node: - LOG.debug('No specific node requested') - return True - if host_node == instance_node[0]: - LOG.debug('Node tagged %s matches requested node %s', host_node, - instance_node[0]) - return True - LOG.debug('Node tagged %s does not match requested node %s', - host_node, instance_node[0]) - return False diff --git a/tripleo_common/filters/list.py b/tripleo_common/filters/list.py deleted file mode 100644 index d125c92ca..000000000 --- a/tripleo_common/filters/list.py +++ /dev/null @@ -1,27 +0,0 @@ -# Copyright 2016 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import nova - -from tripleo_common.filters import capabilities_filter - - -def tripleo_filters(): - """Return a list of filter classes for TripleO - - This is a wrapper around the Nova all_filters function so we can add our - filters to the resulting list. - """ - nova_filters = nova.scheduler.filters.all_filters() - return (nova_filters + [capabilities_filter.TripleOCapabilitiesFilter]) diff --git a/tripleo_common/i18n.py b/tripleo_common/i18n.py deleted file mode 100644 index 86f3df3ac..000000000 --- a/tripleo_common/i18n.py +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright 2015 Red Hat, Inc. -# Copyright 2014 IBM Corp. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# https://docs.openstack.org/oslo.i18n/latest/user/usage.html - -import oslo_i18n as i18n - - -_translators = i18n.TranslatorFactory(domain='tripleo') - -# The primary translation function using the well-known name "_" -_ = _translators.primary diff --git a/tripleo_common/image/__init__.py b/tripleo_common/image/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/tripleo_common/image/base.py b/tripleo_common/image/base.py deleted file mode 100644 index 29f867ee7..000000000 --- a/tripleo_common/image/base.py +++ /dev/null @@ -1,95 +0,0 @@ -# Copyright 2015 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import collections -import json -import os -import yaml - -from oslo_log import log - -from tripleo_common.image.exception import ImageSpecificationException - - -class BaseImageManager(object): - logger = log.getLogger(__name__ + '.BaseImageManager') - APPEND_ATTRIBUTES = ['elements', 'options', 'packages'] - CONFIG_SECTIONS = ( - DISK_IMAGES, UPLOADS, CONTAINER_IMAGES, - CONTAINER_IMAGES_TEMPLATE - ) = ( - 'disk_images', 'uploads', 'container_images', - 'container_images_template' - ) - - def __init__(self, config_files, template_dir=None, images=None): - self.config_files = config_files - self.images = images - self.template_dir = template_dir - - def _extend_or_set_attribute(self, existing_image, image, attribute_name): - attribute = image.get(attribute_name) - if attribute: - try: - existing_image[attribute_name].update(attribute) - except AttributeError: - existing_image[attribute_name].extend(attribute) - except KeyError: - existing_image[attribute_name] = attribute - - def load_config_files(self, section): - config_data = collections.OrderedDict() - for config_file in self.config_files: - if os.path.isfile(config_file): - with open(config_file) as cf: - data = yaml.safe_load(cf.read()).get(section) - if not data: - return None - self.logger.debug('%s JSON: %s', section, str(data)) - for item in data: - image_name = item.get('imagename') - if image_name is None: - msg = 'imagename is required' - self.logger.error(msg) - raise ImageSpecificationException(msg) - - if self.images is not None and \ - image_name not in self.images: - self.logger.debug('Image %s ignored', image_name) - continue - - existing_image = config_data.get(image_name) - if not existing_image: - config_data[image_name] = item - continue - - for attr in self.APPEND_ATTRIBUTES: - self._extend_or_set_attribute(existing_image, item, - attr) - - # If a new key is introduced, add it. - for key, value in item.items(): - if key not in existing_image: - existing_image[key] = item[key] - - config_data[image_name] = existing_image - else: - self.logger.error('No config file exists at: %s', config_file) - raise IOError('No config file exists at: %s' % config_file) - return [x for x in config_data.values()] - - def json_output(self): - self.logger.info('Using config files: %s', self.config_files) - disk_images = self.load_config_files(self.DISK_IMAGES) - print(json.dumps(disk_images)) diff --git a/tripleo_common/image/build.py b/tripleo_common/image/build.py deleted file mode 100644 index c0ebcbb17..000000000 --- a/tripleo_common/image/build.py +++ /dev/null @@ -1,85 +0,0 @@ -# Copyright 2015 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -import os -import re - -from oslo_log import log -from oslo_utils import strutils - -import tripleo_common.arch -from tripleo_common.image.base import BaseImageManager -from tripleo_common.image.exception import ImageSpecificationException -from tripleo_common.image.image_builder import ImageBuilder - - -class ImageBuildManager(BaseImageManager): - """Manage the building of image files - - Manage the building of images from a config file specified in YAML - syntax. Multiple config files can be specified. They will be merged - """ - logger = log.getLogger(__name__ + '.ImageBuildManager') - - APPEND_ATTRIBUTES = BaseImageManager.APPEND_ATTRIBUTES + ['environment'] - - def __init__(self, config_files, images=None, output_directory='.', - skip=False): - super(ImageBuildManager, self).__init__(config_files, images=images) - self.output_directory = re.sub('[/]$', '', output_directory) - self.skip = skip - - def build(self): - """Start the build process""" - - self.logger.info('Using config files: %s', self.config_files) - - disk_images = self.load_config_files(self.DISK_IMAGES) - - for image in disk_images: - arch = image.get('arch', tripleo_common.arch.dib_arch()) - image_type = image.get('type', 'qcow2') - image_name = image.get('imagename') - builder = image.get('builder', 'dib') - skip_base = strutils.bool_from_string( - image.get('skip_base', False)) - docker_target = image.get('docker_target') - node_dist = image.get('distro') - if node_dist is None: - raise ImageSpecificationException('distro is required') - self.logger.info('imagename: %s', image_name) - image_extension = image.get('imageext', image_type) - image_path = os.path.join(self.output_directory, image_name) - if self.skip: - self.logger.info('looking for image at path: %s', image_path) - if os.path.exists('%s.%s' % (image_path, image_extension)): - self.logger.info('Image file exists for image name: %s', - image_name) - self.logger.info('Skipping image build') - continue - elements = image.get('elements', []) - options = image.get('options', []) - packages = image.get('packages', []) - environment = image.get('environment', {}) - - extra_options = { - 'skip_base': skip_base, - 'docker_target': docker_target, - 'environment': environment - } - - builder = ImageBuilder.get_builder(builder) - builder.build_image(image_path, image_type, node_dist, arch, - elements, options, packages, extra_options) diff --git a/tripleo_common/image/builder/__init__.py b/tripleo_common/image/builder/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/tripleo_common/image/builder/base.py b/tripleo_common/image/builder/base.py deleted file mode 100644 index 9db19bd75..000000000 --- a/tripleo_common/image/builder/base.py +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright 2019 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - - -class BaseBuilder(object): - """Base Tripleo-Common Image Builder. - - For now it does nothing but this interface will allow - to support multiple builders and not just buildah or docker. - """ - - def __init__(self): - pass diff --git a/tripleo_common/image/builder/buildah.py b/tripleo_common/image/builder/buildah.py deleted file mode 100644 index fdf3d11a7..000000000 --- a/tripleo_common/image/builder/buildah.py +++ /dev/null @@ -1,383 +0,0 @@ -# Copyright 2019 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - - -from concurrent import futures -import os -import pathlib -import tenacity - -from oslo_concurrency import processutils -from oslo_config import cfg -from oslo_log import log as logging - -from tripleo_common import constants -from tripleo_common.image.builder import base -from tripleo_common.utils import process - -CONF = cfg.CONF -LOG = logging.getLogger(__name__ + ".BuildahBuilder") - - -class BuildahBuilder(base.BaseBuilder): - """Builder to build container images with Buildah.""" - - log = LOG - - def __init__(self, work_dir, deps, base='fedora', img_type='binary', - tag='latest', namespace='master', - registry_address='127.0.0.1:8787', push_containers=True, - volumes=[], excludes=[], build_timeout=None, debug=False): - """Setup the parameters to build with Buildah. - - :params work_dir: Directory where the Dockerfiles or Containerfiles - are generated by Kolla. - :params deps: Dictionary defining the container images - dependencies. - :params base: Base image on which the containers are built. - Default to fedora. - :params img_type: Method used to build the image. All TripleO images - are built from binary method. Can be set to false to remove it - from image name. - :params tag: Tag used to identify the images that we build. - Default to latest. - :params namespace: Namespace used to build the containers. - Default to master. - :params registry_address: IP + port of the registry where we push - the images. Default is 127.0.0.1:8787. - :params push: Flag to bypass registry push if False. Default is True - :params volumes: Bind mount volumes used during buildah bud. - Default to []. - :params excludes: List of images to skip. Default to []. - :params build_timeout: Timeout. Default to constants.BUILD_TIMEOUT - :params debug: Enable debug flag. Default to False. - """ - - logging.register_options(CONF) - if debug: - CONF.debug = True - logging.setup(CONF, '') - - super(BuildahBuilder, self).__init__() - if build_timeout is None: - self.build_timeout = constants.BUILD_TIMEOUT - else: - self.build_timeout = build_timeout - self.work_dir = work_dir - self.deps = deps - self.base = base - self.img_type = img_type - self.tag = tag - self.namespace = namespace - self.registry_address = registry_address - self.push_containers = push_containers - self.volumes = volumes - self.excludes = excludes - self.debug = debug - # Each container image has a Dockerfile or a Containerfile. - # Buildah needs to know the base directory later. - self.cont_map = {os.path.basename(root): root for root, dirs, - fnames in os.walk(self.work_dir) - if 'Dockerfile' in fnames or - 'Containerfile' in fnames} - # Building images with root so overlayfs is used, and not fuse-overlay - # from userspace, which would be slower. - self.buildah_cmd = ['sudo', 'buildah'] - if self.debug: - self.buildah_cmd.append('--log-level=debug') - - def _find_container_dir(self, container_name): - """Return the path of the Dockerfile/Containerfile directory. - - :params container_name: Name of the container. - """ - - if container_name not in self.cont_map: - self.log.error('Container not found in Kolla ' - 'deps: %s' % container_name) - return self.cont_map.get(container_name, '') - - def _get_destination(self, container_name): - """Return the destination of a container image to push. - - :params container_name: Name of the container. - """ - - destination = "{}/{}/{}".format( - self.registry_address, - self.namespace, - self.base, - ) - if self.img_type: - destination += '-' + self.img_type - destination += '-' + container_name + ':' + self.tag - return destination - - def _generate_container(self, container_name): - """Generate a container image by building and pushing the image. - - :params container_name: Name of the container. - """ - - if container_name in self.excludes: - return - - # NOTE(mwhahaha): Use a try catch block so we can better log issues - # as this is called in a multiprocess fashion so the exception - # loses some information when it reaches _multi_build - try: - self.build(container_name, - self._find_container_dir(container_name)) - if self.push_containers: - self.push(self._get_destination(container_name)) - except Exception as e: - self.log.exception(e) - raise - - @tenacity.retry( - # Retry up to 5 times: 0, 1, 5, 21, 85 - # http://exponentialbackoffcalculator.com/ - reraise=True, - wait=tenacity.wait_random_exponential(multiplier=4, max=60), - stop=tenacity.stop_after_attempt(5), - before_sleep=tenacity.after_log(LOG, logging.WARNING) - ) - def build(self, container_name, container_build_path): - """Build an image from a given directory. - - :params container_name: Name of the container. - :params container_build_path: Directory where the Dockerfile or - Containerfile and other files are located to build the image. - """ - - # 'buildah bud' is the command we want because Kolla uses Dockefile to - # build images. - # TODO(emilien): Stop ignoring TLS. The deployer should either secure - # the registry or add it to insecure_registries. - logfile = container_build_path + '/' + container_name + '-build.log' - - # TODO(ramishra) Hack to make the logfile readable by current user, - # as we're running buildah as root. This would be removed once we - # move to rootless buildah. - pathlib.Path(logfile).touch() - - bud_args = ['bud', '--net=host'] - for v in self.volumes: - bud_args.extend(['--volume', v]) - if self.debug: - # TODO(bogdando): add --log-rusage for newer buildah - bud_args.extend(['--loglevel=3']) - # TODO(aschultz): drop --format docker when oci format is properly - # supported by the undercloud registry - bud_args.extend(['--format', 'docker', '--tls-verify=False', - '--logfile', logfile, '-t', - self._get_destination(container_name), - container_build_path]) - args = self.buildah_cmd + bud_args - self.log.info("Building %s image with: %s" % - (container_name, ' '.join(args))) - process.execute( - *args, - check_exit_code=True, - run_as_root=False, - use_standard_locale=True - ) - - @tenacity.retry( # Retry up to 10 times with jittered exponential backoff - reraise=True, - wait=tenacity.wait_random_exponential(multiplier=1, max=15), - stop=tenacity.stop_after_attempt(10), - before_sleep=tenacity.after_log(LOG, logging.WARNING) - ) - def push(self, destination): - """Push an image to a container registry. - - :params destination: URL to used to push the container. It contains - the registry address, namespace, base, img_type (optional), - container name and tag. - """ - # TODO(emilien): Stop ignoring TLS. The deployer should either secure - # the registry or add it to insecure_registries. - # TODO(emilien) We need to figure out how we can push to something - # else than a Docker registry. - args = self.buildah_cmd + ['push', '--tls-verify=False', destination, - 'docker://' + destination] - self.log.info("Pushing %s image with: %s" % - (destination, ' '.join(args))) - if self.debug: - # buildah push logs to stderr, since there is no --log* opt - # so we'll use the current logging context for that - process.execute(*args, log_stdout=True, run_as_root=False, - use_standard_locale=True, logger=self.log, - loglevel=logging.DEBUG) - else: - process.execute(*args, run_as_root=False, - use_standard_locale=True) - - def build_all(self, deps=None): - """Build all containers. - - This function will thread the build process allowing it to complete - in the shortest possible time. - - :params deps: Dictionary defining the container images - dependencies. - """ - - if deps is None: - deps = self.deps - - container_deps = self._generate_deps(deps=deps, containers=list()) - self.log.debug("All container deps: {}".format(container_deps)) - for containers in container_deps: - self.log.info("Processing containers: {}".format(containers)) - if isinstance(deps, (list,)): - self._multi_build(containers=containers) - else: - self._multi_build(containers=[containers]) - - def _generate_deps(self, deps, containers, prio_list=None): - """Browse containers dependencies and return an an array. - - When the dependencies are generated they're captured in an array, - which contains additional arrays. This data structure is later - used in a futures queue. - - :params deps: Dictionary defining the container images - dependencies. - :params containers: List used to keep track of dependent containers. - :params prio_list: List used to keep track of nested dependencies. - :returns: list - """ - - self.log.debug("Process deps: {}".format(deps)) - if isinstance(deps, str): - if prio_list: - prio_list.append(deps) - else: - containers.append([deps]) - - elif isinstance(deps, (dict,)): - parents = list(deps.keys()) - if prio_list: - prio_list.extend(parents) - else: - containers.append(parents) - for value in deps.values(): - self.log.debug("Recursing with: {}".format(value)) - self._generate_deps( - deps=value, - containers=containers - ) - - elif isinstance(deps, (list,)): - dep_list = list() - dep_rehash_list = list() - for item in deps: - if isinstance(item, str): - dep_list.append(item) - else: - dep_rehash_list.append(item) - - if dep_list: - containers.append(dep_list) - - for item in dep_rehash_list: - self.log.debug("Recursing with: {}".format(item)) - self._generate_deps( - deps=item, - containers=containers, - prio_list=dep_list - ) - - self.log.debug("Constructed containers: {}".format(containers)) - return containers - - def _multi_build(self, containers): - """Build mutliple containers. - - Multi-thread the build process for all containers defined within - the containers list. - - :params containers: List defining the container images. - """ - - # Workers will use the processor core count with a max of 8. If - # the containers array has a length less-than the expected processor - # count, the workers will be adjusted to meet the expectations of the - # work being processed. - workers = min( - min( - 8, - max( - 2, - processutils.get_worker_count() - ) - ), - len(containers) - ) - with futures.ThreadPoolExecutor(max_workers=workers) as executor: - future_to_build = { - executor.submit( - self._generate_container, container_name - ): container_name for container_name in containers - } - done, not_done = futures.wait( - future_to_build, - timeout=self.build_timeout, - return_when=futures.FIRST_EXCEPTION - ) - - # NOTE(cloudnull): Once the job has been completed all completed - # jobs are checked for exceptions. If any jobs - # failed a SystemError will be raised using the - # exception information. If any job was loaded - # but not executed a SystemError will be raised. - exceptions = list() - for job in done: - if job._exception: - exceptions.append( - "\nException information: {exception}".format( - exception=job._exception - ) - ) - - if exceptions: - raise RuntimeError( - '\nThe following errors were detected during ' - 'container build(s):\n{exceptions}'.format( - exceptions='\n'.join(exceptions) - ) - ) - - if not_done: - error_msg = ( - 'The following jobs were incomplete: {}'.format( - [future_to_build[job] for job in not_done] - ) - ) - - jobs_with_exceptions = [{ - 'container': future_to_build[job], - 'exception': job._exception} - for job in not_done if job._exception] - if jobs_with_exceptions: - for job_with_exception in jobs_with_exceptions: - error_msg = error_msg + os.linesep + ( - "%(container)s raised the following " - "exception: %(exception)s" % - job_with_exception) - - raise SystemError(error_msg) diff --git a/tripleo_common/image/exception.py b/tripleo_common/image/exception.py deleted file mode 100644 index fde7ce8de..000000000 --- a/tripleo_common/image/exception.py +++ /dev/null @@ -1,39 +0,0 @@ -# Copyright 2015 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - - -class ImageBuilderException(Exception): - pass - - -class ImageRateLimitedException(Exception): - """Rate Limited request""" - - -class ImageSpecificationException(Exception): - pass - - -class ImageUploaderException(Exception): - pass - - -class ImageUploaderThreadException(Exception): - """Conflict during thread processing""" - pass - - -class ImageNotFoundException(Exception): - pass diff --git a/tripleo_common/image/image_builder.py b/tripleo_common/image/image_builder.py deleted file mode 100644 index ce652632a..000000000 --- a/tripleo_common/image/image_builder.py +++ /dev/null @@ -1,138 +0,0 @@ -# Copyright 2015 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - - -import abc -import logging -import os -import shlex -import subprocess -import sys - -from tripleo_common.image.exception import ImageBuilderException - -if sys.version_info[0] < 3: - import codecs - _open = open - open = codecs.open - - -class ImageBuilder(object, metaclass=abc.ABCMeta): - """Base representation of an image building method""" - - @staticmethod - def get_builder(builder): - if builder == 'dib': - return DibImageBuilder() - raise ImageBuilderException('Unknown image builder type') - - @abc.abstractmethod - def build_image(self, image_path, image_type, node_dist, arch, elements, - options, packages, extra_options={}): - """Build a disk image""" - pass - - -class DibImageBuilder(ImageBuilder): - """Build images using diskimage-builder""" - - logger = logging.getLogger(__name__ + '.DibImageBuilder') - handler = logging.StreamHandler(sys.stdout) - - # NOTE(bnemec): This may not play nicely with callers other than the - # openstackclient. However, since at this time there are no such other - # callers we can deal with that if/when it happens. - def _configure_logging(self): - """Ensure our info level log output gets seen - - The default openstackclient logging level is warning, which means - our info messages for the image build are not visible to the user. - By adding our own local handler we can ensure that the messages get - logged in a visible way. - - To avoid duplicate log messages, we need to not propagate them to - parent loggers. Otherwise we end up with both our handler and the - parent handler logging warning and above messages. - """ - if not self.logger.handlers: - self.logger.addHandler(self.handler) - self.logger.propagate = False - - def build_image(self, image_path, image_type, node_dist, arch, elements, - options, packages, extra_options={}): - self._configure_logging() - env = os.environ.copy() - - elements_path = env.get('ELEMENTS_PATH') - if elements_path is None: - env['ELEMENTS_PATH'] = os.pathsep.join([ - "/usr/share/tripleo-puppet-elements", - '/usr/share/tripleo-image-elements', - '/usr/share/ironic-python-agent-builder/dib', - ]) - os.environ.update(env) - - cmd = ['disk-image-create', '-a', arch, '-o', image_path, - '-t', image_type] - - if packages: - cmd.append('-p') - cmd.append(','.join(packages)) - - if options: - for option in options: - cmd.extend(shlex.split(option)) - - skip_base = extra_options.get('skip_base', False) - if skip_base: - cmd.append('-n') - - docker_target = extra_options.get('docker_target') - if docker_target: - cmd.append('--docker-target') - cmd.append(docker_target) - - environment = extra_options.get('environment') - if environment: - os.environ.update(environment) - - if node_dist: - cmd.append(node_dist) - - cmd.extend(elements) - - log_file = '%s.log' % image_path - - self.logger.info('Running %s', cmd) - self.logger.info('Logging output to %s', log_file) - process = subprocess.Popen(cmd, - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT) - with open(log_file, 'w', encoding='utf-8') as f: - while True: - line = process.stdout.readline() - try: - line = line.decode('utf-8') - except AttributeError: - # In Python 3 there is no decode method, but we don't need - # to decode because strings are always unicode. - pass - if line: - self.logger.info(line.rstrip()) - f.write(line) - if line == '' and process.poll() is not None: - break - if process.returncode != 0: - raise subprocess.CalledProcessError(process.returncode, cmd) diff --git a/tripleo_common/image/image_export.py b/tripleo_common/image/image_export.py deleted file mode 100644 index 133f4c528..000000000 --- a/tripleo_common/image/image_export.py +++ /dev/null @@ -1,474 +0,0 @@ -# Copyright 2018 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -import collections -import errno -from functools import wraps -import hashlib -import json -import os -import requests -import shutil - -from oslo_log import log as logging -from tripleo_common.utils import image as image_utils - -LOG = logging.getLogger(__name__) - - -IMAGE_EXPORT_DIR = '/var/lib/image-serve' - -MEDIA_TYPES = ( - MEDIA_MANIFEST_V1, - MEDIA_MANIFEST_V1_SIGNED, - MEDIA_MANIFEST_V2, - MEDIA_MANIFEST_V2_LIST, -) = ( - 'application/vnd.docker.distribution.manifest.v1+json', - 'application/vnd.docker.distribution.manifest.v1+prettyjws', - 'application/vnd.docker.distribution.manifest.v2+json', - 'application/vnd.docker.distribution.manifest.list.v2+json', -) - -TYPE_KEYS = ( - TYPE_KEY_URI, - TYPE_KEY_TYPE -) = ( - 'URI', - 'Content-Type' -) - -TYPE_MAP_EXTENSION = '.type-map' - - -def skip_if_exists(f): - @wraps(f) - def wrapper(*args, **kwargs): - try: - return f(*args, **kwargs) - except OSError as e: - # Handle race for the already existing entity - if e.errno == errno.EEXIST: - pass - else: - raise e - return wrapper - - -@skip_if_exists -def make_dir(path): - if os.path.exists(path): - return - os.makedirs(path, 0o775) - - -def image_tag_from_url(image_url): - parts = image_url.path.split(':') - if len(parts) == 1: - tag = None - image = parts[0] - else: - tag = parts[-1] - image = ':'.join(parts[:-1]) - - # strip leading slash - if image.startswith('/'): - image = image[1:] - - return image, tag - - -@skip_if_exists -def export_stream(target_url, layer, layer_stream, verify_digest=True): - image, _ = image_tag_from_url(target_url) - digest = layer['digest'] - blob_dir_path = os.path.join(IMAGE_EXPORT_DIR, 'v2', image, 'blobs') - make_dir(blob_dir_path) - blob_path = os.path.join(blob_dir_path, '%s.gz' % digest) - - LOG.debug('[%s] Export layer to %s' % (image, blob_path)) - - length = 0 - calc_digest = hashlib.sha256() - - def remove_layer(image, blob_path): - if os.path.isfile(blob_path): - os.remove(blob_path) - LOG.error('[%s] Broken layer found and removed %s' % - (image, blob_path)) - - try: - fd = os.open(blob_path, os.O_WRONLY | os.O_CREAT) - os.fchmod(fd, 0o0644) - with open(fd, 'wb') as f: - count = 0 - for chunk in layer_stream: - count += 1 - if not chunk: - break - LOG.debug('[%s] Writing chunk %i for %s' % - (image, count, digest)) - f.write(chunk) - calc_digest.update(chunk) - length += len(chunk) - LOG.debug('[%s] Written %i bytes for %s' % - (image, length, digest)) - except MemoryError as e: - memory_error = '[{}] Memory Error: {}'.format(image, str(e)) - LOG.error(memory_error) - remove_layer(image, blob_path) - raise MemoryError(memory_error) - except requests.exceptions.HTTPError as e: - # catch http errors seperately as those can be retried in - # the image uploader - http_error = '[{}] HTTP error: {}'.format(image, str(e)) - LOG.error(http_error) - remove_layer(image, blob_path) - raise - except Exception as e: - write_error = '[{}] Write Failure: {}'.format(image, str(e)) - LOG.error(write_error) - remove_layer(image, blob_path) - raise IOError(write_error) - else: - LOG.info('[%s] Layer written successfully %s' % (image, blob_path)) - - layer_digest = 'sha256:%s' % calc_digest.hexdigest() - LOG.debug('[%s] Provided digest: %s, Calculated digest: %s' % - (image, digest, layer_digest)) - - if verify_digest: - if digest != layer_digest: - hash_request_id = hashlib.sha1(str(target_url.geturl()).encode()) - error_msg = ( - '[%s] Image ID: %s, Expected digest "%s" does not match' - ' calculated digest "%s", Blob path "%s". Blob' - ' path will be cleaned up.' % ( - image, - hash_request_id.hexdigest(), - digest, - layer_digest, - blob_path - ) - ) - LOG.error(error_msg) - if os.path.isfile(blob_path): - os.remove(blob_path) - raise requests.exceptions.HTTPError(error_msg) - else: - # if the original layer is uncompressed - # the digest may change on export - expected_blob_path = os.path.join( - blob_dir_path, '%s.gz' % layer_digest - ) - if blob_path != expected_blob_path: - os.rename(blob_path, expected_blob_path) - blob_path = expected_blob_path - - layer['digest'] = layer_digest - layer['size'] = length - LOG.debug('[%s] Done exporting image layer %s' % (image, digest)) - return (layer_digest, blob_path) - - -@skip_if_exists -def layer_cross_link(layer, image, blob_path, target_image_url): - target_image, _ = image_tag_from_url(target_image_url) - target_dir_path = os.path.join( - IMAGE_EXPORT_DIR, 'v2', target_image, 'blobs') - make_dir(target_dir_path) - target_blob_path = os.path.join(target_dir_path, '%s.gz' % layer) - if not os.path.exists(target_blob_path): - LOG.debug('[%s] Linking layers: %s -> %s' % - (image, blob_path, target_blob_path)) - # make a hard link so the layers can have independent lifecycles - os.link(blob_path, target_blob_path) - - -def cross_repo_mount(target_image_url, image_layers, source_layers, - uploaded_layers=None): - linked_layers = {} - target_image, _ = image_tag_from_url(target_image_url) - for layer in source_layers: - known_path, ref_image = image_utils.uploaded_layers_details( - uploaded_layers, layer, scope='local') - - if layer not in image_layers and not ref_image: - continue - - image_url = image_layers.get(layer, None) - if image_url: - image, _ = image_tag_from_url(image_url) - else: - image = ref_image - if not image: - continue - - if known_path and ref_image: - blob_path = known_path - image = ref_image - if ref_image != image: - LOG.debug('[%s] Layer ref. by image %s already exists ' - 'at %s' % (image, ref_image, known_path)) - else: - LOG.debug('[%s] Layer already exists at %s' - % (image, known_path)) - else: - dir_path = os.path.join(IMAGE_EXPORT_DIR, 'v2', image, 'blobs') - blob_path = os.path.join(dir_path, '%s.gz' % layer) - if not os.path.exists(blob_path): - LOG.debug('[%s] Layer not found: %s' % (image, blob_path)) - continue - - layer_cross_link(layer, image, blob_path, target_image_url) - linked_layers.update({layer: {'known_path': blob_path, - 'ref_image': image}}) - return linked_layers - - -def export_manifest_config(target_url, - manifest_str, - manifest_type, - config_str, - multi_arch=False): - image, tag = image_tag_from_url(target_url) - manifest = json.loads(manifest_str) - if config_str is not None: - blob_dir_path = os.path.join( - IMAGE_EXPORT_DIR, 'v2', image, 'blobs') - make_dir(blob_dir_path) - config_digest = manifest['config']['digest'] - config_path = os.path.join(blob_dir_path, config_digest) - - with open(config_path, 'w+b') as f: - f.write(config_str.encode('utf-8')) - - calc_digest = hashlib.sha256() - calc_digest.update(manifest_str.encode('utf-8')) - manifest_digest = 'sha256:%s' % calc_digest.hexdigest() - - manifests_path = os.path.join( - IMAGE_EXPORT_DIR, 'v2', image, 'manifests') - manifest_dir_path = os.path.join(manifests_path, manifest_digest) - manifest_path = os.path.join(manifest_dir_path, 'index.json') - htaccess_path = os.path.join(manifest_dir_path, '.htaccess') - - make_dir(manifest_dir_path) - - headers = collections.OrderedDict() - headers['Content-Type'] = manifest_type - headers['Docker-Content-Digest'] = manifest_digest - headers['ETag'] = manifest_digest - with open(htaccess_path, 'w+') as f: - for header in headers.items(): - f.write('Header set %s "%s"\n' % header) - - with open(manifest_path, 'w+b') as f: - manifest_data = manifest_str.encode('utf-8') - f.write(manifest_data) - - manifest_dict = {} - if multi_arch: - if manifest_type == MEDIA_MANIFEST_V2_LIST: - manifest_dict[manifest_type] = manifest_digest - # choose one of the entries to be the default v2 manifest - # to return: - # - If architecture amd64 exists, choose that - # - Otherwise choose the first entry - entries = manifest.get('manifests') - if entries: - entry = None - for i in entries: - if i.get('platform', {}).get('architecture') == 'amd64': - entry = i - break - if not entry: - entry = entries[0] - manifest_dict[entry['mediaType']] = entry['digest'] - - else: - manifest_dict[manifest_type] = manifest_digest - - if manifest_dict: - write_type_map_file(image, tag, manifest_dict) - build_tags_list(image) - build_catalog() - - -def write_type_map_file(image, tag, manifest_dict): - manifests_path = os.path.join( - IMAGE_EXPORT_DIR, 'v2', image, 'manifests') - type_map_path = os.path.join(manifests_path, '%s%s' % - (tag, TYPE_MAP_EXTENSION)) - with open(type_map_path, 'w+') as f: - f.write('URI: %s\n\n' % tag) - for manifest_type, digest in manifest_dict.items(): - f.write('Content-Type: %s\n' % manifest_type) - f.write('URI: %s/index.json\n\n' % digest) - - -def parse_type_map_file(type_map_path): - uri = None - content_type = None - type_map = {} - with open(type_map_path, 'r') as f: - for x in f: - line = x[:-1] - if not line: - if uri and content_type: - type_map[content_type] = uri - uri = None - content_type = None - else: - key, value = line.split(': ') - if key == TYPE_KEY_URI: - uri = value - elif key == TYPE_KEY_TYPE: - content_type = value - return type_map - - -def migrate_to_type_map_file(image, manifest_symlink_path): - tag = os.path.split(manifest_symlink_path)[-1] - manifest_dir = os.readlink(manifest_symlink_path) - manifest_digest = os.path.split(manifest_dir)[-1] - write_type_map_file(image, tag, {MEDIA_MANIFEST_V2: manifest_digest}) - os.remove(manifest_symlink_path) - - -def build_tags_list(image): - manifests_path = os.path.join( - IMAGE_EXPORT_DIR, 'v2', image, 'manifests') - tags_dir_path = os.path.join(IMAGE_EXPORT_DIR, 'v2', image, 'tags') - tags_list_path = os.path.join(tags_dir_path, 'list') - LOG.debug('[%s] Rebuilding %s' % (image, tags_dir_path)) - make_dir(tags_dir_path) - tags = [] - for f in os.listdir(manifests_path): - f_path = os.path.join(manifests_path, f) - if os.path.islink(f_path): - tags.append(f) - migrate_to_type_map_file(image, f_path) - if f.endswith(TYPE_MAP_EXTENSION): - tags.append(f[:-len(TYPE_MAP_EXTENSION)]) - - tags_data = { - "name": image, - "tags": tags - } - with open(tags_list_path, 'w+b') as f: - f.write(json.dumps(tags_data, ensure_ascii=False).encode('utf-8')) - - -def build_catalog(): - catalog_path = os.path.join(IMAGE_EXPORT_DIR, 'v2', '_catalog') - catalog_entries = [] - images_path = os.path.join(IMAGE_EXPORT_DIR, 'v2') - metadata_set = set(['blobs', 'manifests', 'tags']) - LOG.debug(f'Rebuilding {catalog_path} Based on images_path {images_path}') - metadata_set = set(['blobs', 'manifests', 'tags']) - for folder, contents_set, files in os.walk(images_path): - if metadata_set.issubset(contents_set): - image = folder.replace(f'{images_path}/', '') - LOG.debug(f'Adding image {image} to catalog') - catalog_entries.append(image) - - catalog = {'repositories': catalog_entries} - with open(catalog_path, 'w+b') as f: - f.write(json.dumps(catalog, ensure_ascii=False).encode('utf-8')) - - -def delete_image(image_url): - image, tag = image_tag_from_url(image_url) - manifests_path = os.path.join( - IMAGE_EXPORT_DIR, 'v2', image, 'manifests') - - manifest_symlink_path = os.path.join(manifests_path, tag) - if os.path.exists(manifest_symlink_path): - LOG.debug('[%s] Deleting legacy tag symlink %s' % - (image, manifest_symlink_path)) - os.remove(manifest_symlink_path) - - type_map_path = os.path.join(manifests_path, '%s%s' % - (tag, TYPE_MAP_EXTENSION)) - if os.path.exists(type_map_path): - LOG.debug('[%s] Deleting typemap file %s' % (image, type_map_path)) - os.remove(type_map_path) - - build_tags_list(image) - - # build list of manifest_dir_path without symlinks - linked_manifest_dirs = set() - manifest_dirs = set() - for f in os.listdir(manifests_path): - f_path = os.path.join(manifests_path, f) - if f_path.endswith(TYPE_MAP_EXTENSION): - for uri in parse_type_map_file(f_path).values(): - linked_manifest_dir = os.path.dirname( - os.path.join(manifests_path, uri)) - linked_manifest_dirs.add(linked_manifest_dir) - elif os.path.isdir(f_path): - manifest_dirs.add(f_path) - - delete_manifest_dirs = manifest_dirs.difference(linked_manifest_dirs) - - # delete list of manifest_dir_path without symlinks - for manifest_dir in delete_manifest_dirs: - LOG.debug('[%s] Deleting manifest %s' % (image, manifest_dir)) - shutil.rmtree(manifest_dir) - - # load all remaining manifests and build the set of of in-use blobs, - # delete any layer blob not in-use - reffed_blobs = set() - blobs_path = os.path.join(IMAGE_EXPORT_DIR, 'v2', image, 'blobs') - - def add_reffed_blob(digest): - blob_path = os.path.join(blobs_path, digest) - gz_blob_path = os.path.join(blobs_path, '%s.gz' % digest) - if os.path.isfile(gz_blob_path): - reffed_blobs.add(gz_blob_path) - elif os.path.isfile(blob_path): - reffed_blobs.add(blob_path) - - for manifest_dir in linked_manifest_dirs: - manifest_path = os.path.join(manifest_dir, 'index.json') - with open(manifest_path) as f: - manifest = json.load(f) - v1manifest = manifest.get('schemaVersion', 2) == 1 - - if v1manifest: - for layer in manifest.get('fsLayers', []): - add_reffed_blob(layer.get('blobSum')) - else: - for layer in manifest.get('layers', []): - add_reffed_blob(layer.get('digest')) - add_reffed_blob(manifest.get('config', {}).get('digest')) - - all_blobs = set([os.path.join(blobs_path, b) - for b in os.listdir(blobs_path)]) - delete_blobs = all_blobs.difference(reffed_blobs) - for blob in delete_blobs: - LOG.debug('[%s] Deleting layer blob %s' % (image, blob)) - os.remove(blob) - - # if no files left in manifests_path, delete the whole image - remaining = os.listdir(manifests_path) - if not remaining or remaining == ['.htaccess']: - image_path = os.path.join(IMAGE_EXPORT_DIR, 'v2', image) - LOG.debug('[%s] Deleting image directory %s' % (image, image_path)) - shutil.rmtree(image_path) - - # rebuild the catalog for the current image list - build_catalog() diff --git a/tripleo_common/image/image_uploader.py b/tripleo_common/image/image_uploader.py deleted file mode 100644 index 230aca988..000000000 --- a/tripleo_common/image/image_uploader.py +++ /dev/null @@ -1,2612 +0,0 @@ -# Copyright 2015 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -import base64 -from concurrent import futures -import hashlib -import json -import os -import random -import re -import requests -from requests import auth as requests_auth -from requests.adapters import HTTPAdapter -import shutil -from urllib.parse import urlparse -import socket -import subprocess -import tempfile -import tenacity -import yaml - -from datetime import datetime -from dateutil.parser import parse as dt_parse -from dateutil.tz import tzlocal -from oslo_concurrency import processutils -from oslo_log import log as logging -from tripleo_common.utils import ansible -from tripleo_common.image.base import BaseImageManager -from tripleo_common.image.exception import ImageNotFoundException -from tripleo_common.image.exception import ImageRateLimitedException -from tripleo_common.image.exception import ImageUploaderException -from tripleo_common.image.exception import ImageUploaderThreadException -from tripleo_common.image import image_export -from tripleo_common.utils import image as image_utils -from tripleo_common.utils.locks import threadinglock - - -LOG = logging.getLogger(__name__) - - -SECURE_REGISTRIES = ( - 'quay.rdoproject.org', - 'registry.redhat.io', - 'registry.access.redhat.com', - 'docker.io', - 'registry-1.docker.io', -) - -NO_VERIFY_REGISTRIES = () - -CLEANUP = ( - CLEANUP_FULL, CLEANUP_PARTIAL, CLEANUP_NONE -) = ( - 'full', 'partial', 'none' -) - -CALL_TYPES = ( - CALL_PING, - CALL_MANIFEST, - CALL_BLOB, - CALL_UPLOAD, - CALL_TAGS, - CALL_CATALOG -) = ( - '/', - '%(image)s/manifests/%(tag)s', - '%(image)s/blobs/%(digest)s', - '%(image)s/blobs/uploads/', - '%(image)s/tags/list', - '/_catalog', -) - -MEDIA_TYPES = ( - MEDIA_MANIFEST_V1, - MEDIA_MANIFEST_V1_SIGNED, - MEDIA_MANIFEST_V2, - MEDIA_MANIFEST_V2_LIST, - MEDIA_OCI_MANIFEST_V1, - MEDIA_OCI_CONFIG_V1, - MEDIA_OCI_INDEX_V1, - MEDIA_OCI_LAYER, - MEDIA_OCI_LAYER_COMPRESSED, - MEDIA_CONFIG, - MEDIA_BLOB, - MEDIA_BLOB_COMPRESSED -) = ( - 'application/vnd.docker.distribution.manifest.v1+json', - 'application/vnd.docker.distribution.manifest.v1+prettyjws', - 'application/vnd.docker.distribution.manifest.v2+json', - 'application/vnd.docker.distribution.manifest.list.v2+json', - 'application/vnd.oci.image.manifest.v1+json', - 'application/vnd.oci.image.config.v1+json', - 'application/vnd.oci.image.index.v1+json', - 'application/vnd.oci.image.layer.v1.tar', - 'application/vnd.oci.image.layer.v1.tar+gzip', - 'application/vnd.docker.container.image.v1+json', - 'application/vnd.docker.image.rootfs.diff.tar', - 'application/vnd.docker.image.rootfs.diff.tar.gzip' -) - -DEFAULT_UPLOADER = 'python' - - -def get_undercloud_registry(): - ctlplane_hostname = '.'.join([socket.gethostname().split('.')[0], - 'ctlplane']) - cmd = ['getent', 'hosts', ctlplane_hostname] - process = subprocess.Popen(cmd, stdout=subprocess.PIPE, - universal_newlines=True) - out, err = process.communicate() - - if process.returncode != 0: - LOG.warning('No entry for %s in /etc/hosts. Falling back to use the ' - 'default (localhost) undercloud registry.' - % ctlplane_hostname) - address = 'localhost' - else: - address = out.split()[1] - - return '%s:%s' % (address, '8787') - - -class MakeSession(object): - """Class method to uniformly create sessions. - - Sessions created by this class will retry on errors with an exponential - backoff before raising an exception. Because our primary interaction is - with the container registries the adapter will also retry on 401 and - 404. This is being done because registries commonly return 401 when an - image is not found, which is commonly a cache miss. See the adapter - definitions for more on retry details. - """ - def __init__(self, verify=True): - self.session = requests.Session() - self.session.verify = verify - adapter = HTTPAdapter( - max_retries=8, - pool_connections=24, - pool_maxsize=24, - pool_block=False - ) - self.session.mount('http://', adapter) - self.session.mount('https://', adapter) - - def create(self): - return self.__enter__() - - def __enter__(self): - return self.session - - def __exit__(self, *args, **kwargs): - self.session.close() - - -class RegistrySessionHelper(object): - """ Class with various registry session helpers - - This class contains a bunch of static methods to be used when making - session requests against a container registry. The methods are primarily - used to handle authentication/reauthentication for the requests against - registries that require auth. - """ - @staticmethod - def check_status(session, request, allow_reauth=True): - """ Check request status and trigger reauth - - This function can be used to check if we need to perform authentication - for a container registry request because we've gotten a 401. - """ - hash_request_id = hashlib.sha1(str(request.url).encode()) - request_id = hash_request_id.hexdigest() - text = getattr(request, 'text', 'unknown') - reason = getattr(request, 'reason', 'unknown') - status_code = getattr(request, 'status_code', None) - headers = getattr(request, 'headers', {}) - session_headers = getattr(session, 'headers', {}) - - if status_code >= 300: - LOG.info( - 'Non-2xx: id {}, status {}, reason {}, text {}'.format( - request_id, - status_code, - reason, - text - ) - ) - - if status_code == 401: - LOG.warning( - 'Failure: id {}, status {}, reason {} text {}'.format( - request_id, - status_code, - reason, - text - ) - ) - LOG.debug( - 'Request headers after 401: id {}, headers {}'.format( - request_id, - headers - ) - ) - LOG.debug( - 'Session headers after 401: id {}, headers {}'.format( - request_id, - session_headers - ) - ) - - www_auth = headers.get( - 'www-authenticate', - headers.get( - 'Www-Authenticate' - ) - ) - if www_auth: - error = None - # Handle docker.io shenanigans. docker.io will return 401 - # for 403 and 404 but provide an error string. Other registries - # like registry.redhat.io and quay.io do not do this. So if - # we find an error string, check to see if we should reauth. - do_reauth = allow_reauth - if 'error=' in www_auth: - error = re.search('error="(.*?)"', www_auth).group(1) - LOG.warning( - 'Error detected in auth headers: error {}'.format( - error - ) - ) - do_reauth = (error == 'invalid_token' and allow_reauth) - if do_reauth: - if hasattr(session, 'reauthenticate'): - reauth = int(session.headers.get('_TripleOReAuth', 0)) - reauth += 1 - session.headers['_TripleOReAuth'] = str(reauth) - LOG.warning( - 'Re-authenticating: id {}, count {}'.format( - request_id, - reauth - ) - ) - session.reauthenticate(**session.auth_args) - - if status_code == 429: - raise ImageRateLimitedException('Rate Limited while requesting ' - '{}'.format(request.url)) - - request.raise_for_status() - - @staticmethod - def check_redirect_trusted(request_response, request_session, - stream=True, timeout=30): - """Check if we've been redirected to a trusted source - - Because we may be using auth, we may not want to leak authentication - keys to an untrusted source. If we get a redirect, we need to check - that the redirect url is one of our sources that we trust. Otherwise - we drop the Authorization header from the redirect request. We'll - add the header back into the request session after performing the - request to ensure that future usage of the session. - - :param: request_response: Response object of the request to check - :param: request_session: Session to use when redirecting - :param: stream: Should we stream the response of the redirect - :param: tiemout: Timeout for the redirect request - """ - # we're not a redirect, just return the original response - if not (request_response.status_code >= 300 - and request_response.status_code < 400): - return request_response - # parse the destination location - redir_url = urlparse(request_response.headers['Location']) - # close the response since we're going to replace it - request_response.close() - auth_header = request_session.headers.pop('Authorization', None) - # ok we got a redirect, let's check where we are going - if len([h for h in SECURE_REGISTRIES if h in redir_url.netloc]) > 0: - # we're going to a trusted location, add the header back and - # return response - request_session.headers.update({'Authorization': auth_header}) - request_response = request_session.get(redir_url.geturl(), - stream=stream, - timeout=timeout) - else: - # we didn't trust the place we're going, request without auth but - # add the auth back to the request session afterwards - request_response = request_session.get(redir_url.geturl(), - stream=stream, - timeout=timeout) - request_session.headers.update({'Authorization': auth_header}) - - request_response.encoding = 'utf-8' - # recheck status here to make sure we didn't get a 401 from - # our redirect host path. - RegistrySessionHelper.check_status(session=request_session, - request=request_response) - return request_response - - @staticmethod - def get_cached_bearer_token(lock=None, scope=None): - if not lock: - return None - with lock.get_lock(): - data = lock.sessions().get(scope) - if data: - # expires_in is an integer value from the issued_at time. - # We will use this to determine if the token should be expired - # https://www.rfc-editor.org/rfc/rfc6749#section-4.2.2 - if data.get('expires_in'): - now = datetime.now(tzlocal()) - expires_in = data.get('expires_in') - token_time = dt_parse(data.get('issued_at')) - if (now - token_time).seconds < expires_in: - return data['token'] - - # TODO(bshephar) Remove once Satellite returns expected expiry - # expires_at uses UTC date and time format. Although, this - # field doesn't appear in the RFC for OAuth. It appears some - # API's return a response with expires_at instead of expires_in - # https://www.rfc-editor.org/rfc/rfc6749 - # https://bugzilla.redhat.com/show_bug.cgi?id=2134075 - # https://bugzilla.redhat.com/show_bug.cgi?id=2138743 - elif data.get('expires_at'): - now = datetime.utcnow() - expires = datetime.strptime(data.get('expires_at'), - "%Y-%m-%dT%H:%M:%S.%fZ") - if now < expires: - return data['token'] - - # If we don't have expires_in or expires_at. We can just - # return the token since it probably doesn't expire. - else: - return data['token'] - - return None - - @staticmethod - def get_bearer_token(session, lock=None, username=None, password=None, - realm=None, service=None, scope=None): - cached_token = RegistrySessionHelper.get_cached_bearer_token(lock, - scope) - if cached_token: - return cached_token - - auth = None - token_param = {} - if service: - token_param['service'] = service - if scope: - token_param['scope'] = scope - if username: - auth = requests.auth.HTTPBasicAuth(username, password) - - auth_req = session.get(realm, params=token_param, auth=auth, - timeout=30) - auth_req.raise_for_status() - resp = auth_req.json() - if lock and 'token' in resp: - with lock.get_lock(): - lock.sessions().update({scope: resp}) - elif lock and 'token' not in resp: - raise Exception('Invalid auth response, no token provide') - hash_request_id = hashlib.sha1(str(auth_req.url).encode()) - LOG.debug( - 'Session authenticated: id {}'.format( - hash_request_id.hexdigest() - ) - ) - return resp['token'] - - @staticmethod - def parse_www_authenticate(header): - auth_type = None - auth_type_match = re.search('^([A-Za-z]*) ', header) - if auth_type_match: - auth_type = auth_type_match.group(1) - if not auth_type: - return (None, None, None) - realm = None - service = None - if 'realm=' in header: - realm = re.search('realm="(.*?)"', header).group(1) - if 'service=' in header: - service = re.search('service="(.*?)"', header).group(1) - return (auth_type, realm, service) - - @staticmethod - @tenacity.retry( # Retry up to 5 times with longer time for rate limit - reraise=True, - retry=tenacity.retry_if_exception_type( - ImageRateLimitedException - ), - wait=tenacity.wait_random_exponential(multiplier=1.5, max=60), - stop=tenacity.stop_after_attempt(5) - ) - def _action(action, request_session, *args, **kwargs): - """ Perform a session action and retry if auth fails - - This function dynamically performs a specific type of call - using the provided session (get, patch, post, etc). It will - attempt a single re-authentication if the initial request - fails with a 401. - """ - _action = getattr(request_session, action) - try: - req = _action(*args, **kwargs) - RegistrySessionHelper.check_status(session=request_session, - request=req) - except requests.exceptions.HTTPError as e: - if e.response.status_code == 401: - req = _action(*args, **kwargs) - RegistrySessionHelper.check_status(session=request_session, - request=req) - else: - raise - return req - - @staticmethod - def get(request_session, *args, **kwargs): - """ Perform a get and retry if auth fails - - This function is designed to be used when we perform a get to - an authenticated source. This function will attempt a single - re-authentication request if the first one fails. - """ - return RegistrySessionHelper._action('get', - request_session, - *args, - **kwargs) - - @staticmethod - def patch(request_session, *args, **kwargs): - """ Perform a patch and retry if auth fails - - This function is designed to be used when we perform a path to - an authenticated source. This function will attempt a single - re-authentication request if the first one fails. - """ - return RegistrySessionHelper._action('patch', - request_session, - *args, - **kwargs) - - @staticmethod - def post(request_session, *args, **kwargs): - """ Perform a post and retry if auth fails - - This function is designed to be used when we perform a post to - an authenticated source. This function will attempt a single - re-authentication request if the first one fails. - """ - return RegistrySessionHelper._action('post', - request_session, - *args, - **kwargs) - - @staticmethod - def put(request_session, *args, **kwargs): - """ Perform a put and retry if auth fails - - This function is designed to be used when we perform a put to - an authenticated source. This function will attempt a single - re-authentication request if the first one fails. - """ - return RegistrySessionHelper._action('put', - request_session, - *args, - **kwargs) - - -class ImageUploadManager(BaseImageManager): - """Manage the uploading of image files - - Manage the uploading of images from a config file specified in YAML - syntax. Multiple config files can be specified. They will be merged. - """ - - def __init__(self, config_files=None, - cleanup=CLEANUP_FULL, - mirrors=None, registry_credentials=None, - multi_arch=False, lock=None): - if config_files is None: - config_files = [] - super(ImageUploadManager, self).__init__(config_files) - self.uploaders = { - 'python': PythonImageUploader() - } - self.uploaders['python'].init_global_state(lock) - self.cleanup = cleanup - if mirrors: - for uploader in self.uploaders.values(): - if hasattr(uploader, 'mirrors'): - uploader.mirrors.update(mirrors) - if registry_credentials: - self.validate_registry_credentials(registry_credentials) - for uploader in self.uploaders.values(): - uploader.registry_credentials = registry_credentials - self.multi_arch = multi_arch - - @staticmethod - def validate_registry_credentials(creds_data): - if not isinstance(creds_data, dict): - raise TypeError('Credentials data must be a dict') - for registry, cred_entry in creds_data.items(): - if not isinstance(cred_entry, dict) or len(cred_entry) != 1: - raise TypeError('Credentials entry must be ' - 'a dict with a single item') - if not isinstance(registry, str): - raise TypeError('Key must be a registry host string: %s' % - registry) - username, password = next(iter(cred_entry.items())) - if not (isinstance(username, str) and - isinstance(password, str)): - raise TypeError('Username and password must be strings: %s' % - username) - - def discover_image_tag(self, image, tag_from_label=None, - username=None, password=None): - uploader = self.uploader(DEFAULT_UPLOADER) - return uploader.discover_image_tag( - image, tag_from_label=tag_from_label, - username=username, password=password) - - def uploader(self, uploader): - if uploader not in self.uploaders: - raise ImageUploaderException('Unknown image uploader type') - return self.uploaders[uploader] - - def get_uploader(self, uploader): - return self.uploader(uploader) - - @staticmethod - def get_push_destination(item): - push_destination = item.get('push_destination') - if not push_destination: - return get_undercloud_registry() - - # If set to True, use discovered undercloud registry - if isinstance(push_destination, bool): - return get_undercloud_registry() - - return push_destination - - def upload(self): - """Start the upload process""" - - LOG.info('Using config files: %s' % self.config_files) - - uploads = self.load_config_files(self.UPLOADS) or [] - container_images = self.load_config_files(self.CONTAINER_IMAGES) or [] - upload_images = uploads + container_images - - tasks = [] - for item in upload_images: - image_name = item.get('imagename') - uploader = item.get('uploader', DEFAULT_UPLOADER) - pull_source = item.get('pull_source') - push_destination = self.get_push_destination(item) - - # This updates the parsed upload_images dict with real values - item['push_destination'] = push_destination - append_tag = item.get('modify_append_tag') - modify_role = item.get('modify_role') - modify_vars = item.get('modify_vars') - multi_arch = item.get('multi_arch', self.multi_arch) - - uploader = self.uploader(uploader) - tasks.append(UploadTask( - image_name, pull_source, push_destination, - append_tag, modify_role, modify_vars, - self.cleanup, multi_arch)) - - # NOTE(mwhahaha): We want to randomize the upload process because of - # the shared nature of container layers. Because we multiprocess the - # handling of containers, if performed in an alphabetical order (the - # default) we end up duplicating fetching of container layers. Things - # Like cinder-volume and cinder-backup share almost all of the same - # layers so when they are fetched at the same time, we will duplicate - # the processing. By randomizing the list we will reduce the amount - # of duplicating that occurs. In my testing I went from ~30mins to - # ~20mins to run. In the future this could be improved if we added - # some locking to the container fetching based on layer hashes but - # will require a significant rewrite. - random.shuffle(tasks) - for task in tasks: - uploader.add_upload_task(task) - - for uploader in self.uploaders.values(): - uploader.run_tasks() - - return upload_images # simply to make test validation easier - - -class BaseImageUploader(object): - lock = None - mirrors = {} - insecure_registries = set() - no_verify_registries = set(NO_VERIFY_REGISTRIES) - secure_registries = set(SECURE_REGISTRIES) - export_registries = set() - push_registries = set() - - def __init__(self): - self.upload_tasks = [] - # A mapping of layer hashs to the image which first copied that - # layer to the target - self.image_layers = {} - self.registry_credentials = {} - - @classmethod - def init_registries_cache(cls): - cls.insecure_registries.clear() - cls.no_verify_registries.clear() - cls.no_verify_registries.update(NO_VERIFY_REGISTRIES) - cls.secure_registries.clear() - cls.secure_registries.update(SECURE_REGISTRIES) - cls.mirrors.clear() - cls.export_registries.clear() - cls.push_registries.clear() - - def cleanup(self): - pass - - def run_tasks(self): - pass - - def credentials_for_registry(self, registry): - creds = self.registry_credentials.get(registry) - if not creds: - return None, None - username, password = next(iter(creds.items())) - return username, password - - @classmethod - def run_modify_playbook(cls, modify_role, modify_vars, - source_image, target_image, append_tag, - container_build_tool='buildah'): - run_vars = {} - if modify_vars: - run_vars.update(modify_vars) - run_vars['source_image'] = source_image - run_vars['target_image'] = target_image - run_vars['modified_append_tag'] = append_tag - run_vars['container_build_tool'] = container_build_tool - LOG.info('Playbook variables: \n%s' % yaml.safe_dump( - run_vars, default_flow_style=False)) - playbook = [{ - 'hosts': 'localhost', - 'gather_facts': 'no', - 'tasks': [{ - 'name': 'Import role %s' % modify_role, - 'import_role': { - 'name': modify_role - }, - 'vars': run_vars - }] - }] - LOG.info('Playbook: \n%s' % yaml.safe_dump( - playbook, default_flow_style=False)) - work_dir = tempfile.mkdtemp(prefix='tripleo-modify-image-playbook-') - log_name = 'tripleo-container-image-prepare-ansible.log' - try: - for handler in LOG.logger.root.handlers: - if hasattr(handler, 'baseFilename'): - if os.path.isfile(handler.baseFilename): - log_f = os.path.join( - os.path.dirname(handler.baseFilename), - log_name - ) - break - else: - raise OSError('Log output is not a file.') - except (AttributeError, OSError): - log_f = os.path.join('/var/log', log_name) - try: - LOG.info('Ansible action starting') - ansible.run_ansible_playbook( - playbook=playbook, - work_dir=work_dir, - verbosity=1, - extra_env_variables=dict(os.environ), - override_ansible_cfg=( - "[defaults]\n" - "stdout_callback=tripleo_dense\n" - "log_path=%s\n" % log_f - ) - ) - except processutils.ProcessExecutionError as e: - LOG.error( - '%s\n' - 'Error running playbook in directory: %s\n' - 'Playbook log information can be reviewed here: %s' % ( - e.stdout, - work_dir, - log_f - ) - ) - raise ImageUploaderException( - 'Modifying image %s failed' % target_image - ) - else: - LOG.info('Ansible action completed') - finally: - shutil.rmtree(work_dir) - - @classmethod - def _images_match(cls, image1, image2, session1=None): - try: - image1_digest = cls._image_digest(image1, session=session1) - except Exception: - return False - try: - image2_digest = cls._image_digest(image2) - except Exception: - return False - - # missing digest, no way to know if they match - if not image1_digest or not image2_digest: - return False - return image1_digest == image2_digest - - @classmethod - def _image_digest(cls, image, session=None): - image_url = cls._image_to_url(image) - i = cls._inspect(image_url, session) - return i.get('Digest') - - @classmethod - def _image_labels(cls, image_url, session=None): - i = cls._inspect(image_url, session) - return i.get('Labels', {}) or {} - - @classmethod - def _image_exists(cls, image, session=None): - try: - cls._image_digest( - image, session=session) - except ImageNotFoundException: - return False - else: - return True - - @tenacity.retry( # Retry up to 5 times with jittered exponential backoff - reraise=True, - retry=tenacity.retry_if_exception_type( - requests.exceptions.RequestException - ), - wait=tenacity.wait_random_exponential(multiplier=1, max=10), - stop=tenacity.stop_after_attempt(5) - ) - def authenticate(self, image_url, username=None, password=None, - session=None): - netloc = image_url.netloc - image, tag = self._image_tag_from_url(image_url) - scope = 'repository:%s:pull' % image[1:] - - self.is_insecure_registry(registry_host=netloc) - url = self._build_url(image_url, path='/') - verify = (netloc not in self.no_verify_registries) - if not session: - session = MakeSession(verify=verify).create() - else: - session.headers.pop('Authorization', None) - session.verify = verify - - cached_token = None - if getattr(self, 'lock', None): - cached_token = RegistrySessionHelper.\ - get_cached_bearer_token(self.lock, scope) - - if cached_token: - session.headers['Authorization'] = 'Bearer %s' % cached_token - - r = session.get(url, timeout=30) - LOG.debug('%s status code %s' % (url, r.status_code)) - if r.status_code == 200: - return session - if r.status_code != 401: - r.raise_for_status() - if 'www-authenticate' not in r.headers: - raise ImageUploaderException( - 'Unknown authentication method for headers: %s' % r.headers) - - auth = None - www_auth = r.headers['www-authenticate'] - token_param = {} - - (auth_type, realm, service) = \ - RegistrySessionHelper.parse_www_authenticate(www_auth) - - if auth_type and auth_type.lower() == 'bearer': - LOG.debug('Using bearer token auth') - if getattr(self, 'lock', None): - lock = self.lock - else: - lock = None - token = RegistrySessionHelper.get_bearer_token(session, lock=lock, - username=username, - password=password, - realm=realm, - service=service, - scope=scope) - elif auth_type and auth_type.lower() == 'basic': - LOG.debug('Using basic auth') - if not username or not password: - raise Exception('Authentication credentials required for ' - 'basic auth: %s' % url) - auth = requests_auth.HTTPBasicAuth(username, password) - rauth = session.get(url, params=token_param, auth=auth, timeout=30) - rauth.raise_for_status() - token = ( - base64.b64encode( - bytes(username + ':' + password, 'utf-8')).decode('ascii') - ) - hash_request_id = hashlib.sha1(str(rauth.url).encode()) - LOG.debug( - 'Session authenticated: id {}'.format( - hash_request_id.hexdigest() - ) - ) - else: - raise ImageUploaderException( - 'Unknown www-authenticate value: %s' % www_auth) - auth_header = '%s %s' % (auth_type, token) - session.headers['Authorization'] = auth_header - - setattr(session, 'reauthenticate', self.authenticate) - setattr( - session, - 'auth_args', - dict( - image_url=image_url, - username=username, - password=password, - session=session - ) - ) - return session - - @staticmethod - def _get_response_text(response, encoding='utf-8', force_encoding=False): - """Return request response text - - We need to set the encoding for the response other wise it - will attempt to detect the encoding which is very time consuming. - See https://github.com/psf/requests/issues/4235 for additional - context. - - :param: response: requests Respoinse object - :param: encoding: encoding to set if not currently set - :param: force_encoding: set response encoding always - """ - - if force_encoding or not response.encoding: - response.encoding = encoding - return response.text - - @classmethod - def _build_url(cls, url, path): - netloc = url.netloc - if netloc in cls.mirrors: - mirror = cls.mirrors[netloc] - return '%sv2%s' % (mirror, path) - if (cls.is_insecure_registry(registry_host=netloc) and - netloc not in cls.no_verify_registries): - scheme = 'http' - else: - scheme = 'https' - if netloc == 'docker.io': - netloc = 'registry-1.docker.io' - return '%s://%s/v2%s' % (scheme, netloc, path) - - @classmethod - def _image_tag_from_url(cls, image_url): - if '@' in image_url.path: - parts = image_url.path.split('@') - else: - parts = image_url.path.split(':') - tag = parts[-1] - image = ':'.join(parts[:-1]) - return image, tag - - @classmethod - @tenacity.retry( # Retry up to 5 times with jittered exponential backoff - reraise=True, - retry=tenacity.retry_if_exception_type( - requests.exceptions.RequestException - ), - wait=tenacity.wait_random_exponential(multiplier=1, max=10), - stop=tenacity.stop_after_attempt(5) - ) - def _inspect(cls, image_url, session=None, default_tag=False): - image, tag = cls._image_tag_from_url(image_url) - parts = { - 'image': image, - 'tag': tag - } - - tags_url = cls._build_url( - image_url, CALL_TAGS % parts - ) - tags_r = RegistrySessionHelper.get(session, tags_url, timeout=30) - tags = tags_r.json()['tags'] - if default_tag and tag not in tags: - if tags: - parts['tag'] = tags[-1] - else: - raise ImageNotFoundException('Not found image: %s' % - image_url.geturl()) - - manifest_url = cls._build_url( - image_url, CALL_MANIFEST % parts - ) - # prefer docker manifest over oci - manifest_headers = {'Accept': ", ".join([ - MEDIA_MANIFEST_V2 + ";q=1", MEDIA_OCI_MANIFEST_V1 + ";q=0.5"])} - - try: - manifest_r = RegistrySessionHelper.get( - session, - manifest_url, - headers=manifest_headers, - timeout=30 - ) - except requests.exceptions.HTTPError as e: - if e.response.status_code in (403, 404): - raise ImageNotFoundException('Not found image: %s' % - image_url.geturl()) - raise - - manifest_str = cls._get_response_text(manifest_r) - - if 'Docker-Content-Digest' in manifest_r.headers: - digest = manifest_r.headers['Docker-Content-Digest'] - else: - # The registry didn't supply the manifest digest, so calculate it - calc_digest = hashlib.sha256() - calc_digest.update(manifest_str.encode('utf-8')) - digest = 'sha256:%s' % calc_digest.hexdigest() - - manifest = json.loads(manifest_str) - - if manifest.get('schemaVersion', 2) == 1: - config = json.loads(manifest['history'][0]['v1Compatibility']) - layers = list(reversed([x['blobSum'] - for x in manifest['fsLayers']])) - else: - layers = [x['digest'] for x in manifest['layers']] - - parts['digest'] = manifest['config']['digest'] - config_headers = { - 'Accept': manifest['config']['mediaType'] - } - config_url = cls._build_url( - image_url, CALL_BLOB % parts) - config_r = RegistrySessionHelper.get( - session, - config_url, - headers=config_headers, - timeout=30, - allow_redirects=False - ) - # check if the blob is a redirect - config_r = RegistrySessionHelper.check_redirect_trusted( - config_r, session, stream=False) - config = config_r.json() - - image, tag = cls._image_tag_from_url(image_url) - name = '%s%s' % (image_url.netloc, image) - created = config['created'] - docker_version = config.get('docker_version', '') - labels = config['config'].get('Labels', {}) - # NOTE: labels can be null - if labels is None: - labels = {} - architecture = config['architecture'] - image_os = config['os'] - - return { - 'Name': name, - 'Tag': tag, - 'Digest': digest, - 'RepoTags': tags, - 'Created': created, - 'DockerVersion': docker_version, - 'Labels': labels, - 'Architecture': architecture, - 'Os': image_os, - 'Layers': layers, - } - - def list(self, registry, session=None): - self.is_insecure_registry(registry_host=registry) - url = self._image_to_url(registry) - catalog_url = self._build_url( - url, CALL_CATALOG - ) - catalog_resp = session.get(catalog_url, timeout=30) - if catalog_resp.status_code in [200]: - catalog = catalog_resp.json() - elif catalog_resp.status_code in [404]: - # just return since the catalog returned a 404 - LOG.debug('catalog_url return 404') - return [] - else: - raise ImageUploaderException( - 'Image registry made invalid response: %s' % - catalog_resp.status_code - ) - - tags_get_args = [] - for repo in catalog.get('repositories', []): - image = '%s/%s' % (registry, repo) - tags_get_args.append((self, image, session)) - - images = [] - workers = min(max(2, processutils.get_worker_count() // 2), 8) - with futures.ThreadPoolExecutor(max_workers=workers) as p: - for image, tags in p.map(tags_for_image, tags_get_args): - if not tags: - continue - for tag in tags: - images.append('%s:%s' % (image, tag)) - return images - - def inspect(self, image, session=None): - image_url = self._image_to_url(image) - return self._inspect(image_url, session) - - def delete(self, image, session=None): - image_url = self._image_to_url(image) - return self._delete(image_url, session) - - @classmethod - def _delete(cls, image, session=None): - raise NotImplementedError() - - @classmethod - @tenacity.retry( # Retry up to 5 times with jittered exponential backoff - reraise=True, - retry=tenacity.retry_if_exception_type( - requests.exceptions.RequestException - ), - wait=tenacity.wait_random_exponential(multiplier=1, max=10), - stop=tenacity.stop_after_attempt(5) - ) - def _tags_for_image(cls, image, session): - url = cls._image_to_url(image) - parts = { - 'image': url.path, - } - tags_url = cls._build_url( - url, CALL_TAGS % parts - ) - r = session.get(tags_url, timeout=30) - if r.status_code in (403, 404): - return image, [] - tags = r.json() - return image, tags.get('tags', []) - - @classmethod - def _image_to_url(cls, image): - if '://' not in image: - image = 'docker://' + image - url = urlparse(image) - return url - - @classmethod - def _discover_tag_from_inspect(cls, i, image, tag_from_label=None, - fallback_tag=None): - labels = i.get('Labels', {}) - - if hasattr(labels, 'keys'): - label_keys = ', '.join(labels.keys()) - else: - label_keys = "" - - if not tag_from_label: - raise ImageUploaderException( - 'No label specified. Available labels: %s' % label_keys - ) - - if "{" in tag_from_label: - try: - tag_label = tag_from_label.format(**labels) - except ValueError as e: - raise ImageUploaderException(e) - except (KeyError, TypeError) as e: - if fallback_tag: - tag_label = fallback_tag - else: - raise ImageUploaderException( - 'Image %s %s. Available labels: %s' % - (image, e, label_keys) - ) - else: - tag_label = None - if isinstance(labels, dict): - tag_label = labels.get(tag_from_label) - if tag_label is None: - if fallback_tag: - tag_label = fallback_tag - else: - raise ImageUploaderException( - 'Image %s has no label %s. Available labels: %s' % - (image, tag_from_label, label_keys) - ) - - # confirm the tag exists by checking for an entry in RepoTags - repo_tags = i.get('RepoTags', []) - if tag_label not in repo_tags: - raise ImageUploaderException( - 'Image %s has no tag %s.\nAvailable tags: %s' % - (image, tag_label, ', '.join(repo_tags)) - ) - return tag_label - - def discover_image_tags(self, images, tag_from_label=None, - default_tag=False): - image_urls = [self._image_to_url(i) for i in images] - - # prime self.insecure_registries by testing every image - for url in image_urls: - self.is_insecure_registry(registry_host=url) - - discover_args = [] - for image in images: - discover_args.append((self, image, tag_from_label, - default_tag)) - - versioned_images = {} - with futures.ThreadPoolExecutor(max_workers=16) as p: - for image, versioned_image in p.map(discover_tag_from_inspect, - discover_args): - versioned_images[image] = versioned_image - return versioned_images - - def discover_image_tag(self, image, tag_from_label=None, - fallback_tag=None, username=None, password=None): - image_url = self._image_to_url(image) - self.is_insecure_registry(registry_host=image_url.netloc) - try: - session = self.authenticate( - image_url, username=username, password=password) - except requests.exceptions.HTTPError as e: - if e.response.status_code == 401: - raise ImageUploaderException( - 'Unable to authenticate. This may indicate ' - 'missing registry credentials or the provided ' - 'container or namespace does not exist. %s' % e) - raise - - i = self._inspect(image_url, session) - return self._discover_tag_from_inspect(i, image, tag_from_label, - fallback_tag) - - def filter_images_with_labels(self, images, labels, - username=None, password=None): - images_with_labels = [] - for image in images: - url = self._image_to_url(image) - self.is_insecure_registry(registry_host=url.netloc) - try: - session = self.authenticate( - url, username=username, password=password) - except requests.exceptions.HTTPError as e: - if e.response.status_code == 401: - raise ImageUploaderException( - 'Unable to authenticate. This may indicate ' - 'missing registry credentials or the provided ' - 'container or namespace does not exist. %s' % e) - raise - image_labels = self._image_labels( - url, session=session) - # The logic is the following: if one of the labels in - # modify_only_with_labels parameter is present in the image, it - # will match and add the images that need to be modified. - for label in labels: - if label in image_labels: - # we found a matching label, adding the image - # and leave the loop. - images_with_labels.append(image) - break - - return images_with_labels - - def add_upload_task(self, task): - if task.modify_role and task.multi_arch: - raise ImageUploaderException( - 'Cannot run a modify role on multi-arch image %s' % - task.image_name - ) - # prime insecure_registries - if task.pull_source: - self.is_insecure_registry( - registry_host=self._image_to_url(task.pull_source).netloc - ) - else: - self.is_insecure_registry( - registry_host=self._image_to_url(task.image_name).netloc - ) - self.is_insecure_registry( - registry_host=self._image_to_url(task.push_destination).netloc - ) - self.upload_tasks.append((self, task)) - - @classmethod - def is_insecure_registry(cls, registry_host): - if registry_host in cls.secure_registries: - return False - if (registry_host in cls.insecure_registries or - registry_host in cls.no_verify_registries): - return True - with requests.Session() as s: - try: - s.get('https://%s' % registry_host, timeout=30) - except requests.exceptions.SSLError: - # Might be just a TLS certificate validation issue - # Just retry without the verification - try: - s.get('https://%s' % registry_host, timeout=30, - verify=False) - cls.no_verify_registries.add(registry_host) - # Techinically these type of registries are insecure when - # the container engine tries to do a pull. The python - # uploader ignores the certificate problem, but they are - # still inscure so we return True here while we'll still - # use https when we access the registry. LP#1833751 - return True - except requests.exceptions.SSLError: - # So nope, it's really not a certificate verification issue - cls.insecure_registries.add(registry_host) - return True - except Exception: - # for any other error assume it is a secure registry, because: - # - it is secure registry - # - the host is not accessible - pass - cls.secure_registries.add(registry_host) - return False - - @classmethod - @tenacity.retry( # Retry up to 5 times with jittered exponential backoff - reraise=True, - retry=tenacity.retry_if_exception_type( - requests.exceptions.RequestException - ), - wait=tenacity.wait_random_exponential(multiplier=1, max=10), - stop=tenacity.stop_after_attempt(5) - ) - def _cross_repo_mount(cls, target_image_url, image_layers, - source_layers, session): - netloc = target_image_url.netloc - name = target_image_url.path.split(':')[0][1:] - export = netloc in cls.export_registries - if export: - # pylint: disable=no-member - linked_layers = image_export.cross_repo_mount( - target_image_url, image_layers, source_layers, - uploaded_layers=cls._global_view_proxy()) - # track linked layers globally for future references - for layer, info in linked_layers.items(): - # pylint: disable=no-member - cls._track_uploaded_layers( - layer, known_path=info['known_path'], - image_ref=info['ref_image'], scope='local') - return - - if netloc in cls.insecure_registries: - scheme = 'http' - else: - scheme = 'https' - url = '%s://%s/v2/%s/blobs/uploads/' % (scheme, netloc, name) - - for layer in source_layers: - # pylint: disable=no-member - known_path, existing_name = image_utils.uploaded_layers_details( - cls._global_view_proxy(), layer, scope='remote') - if layer not in image_layers and not existing_name: - continue - if not existing_name: - existing_name = image_layers[layer].path.split(':')[0][1:] - if existing_name != name: - LOG.debug('[%s] Layer %s ref. by image %s already exists ' - 'at %s' % (name, layer, existing_name, known_path)) - LOG.info('[%s] Cross repository blob mount from %s' % - (layer, existing_name)) - data = { - 'mount': layer, - 'from': existing_name - } - r = RegistrySessionHelper.post(session, url, data=data, timeout=30) - LOG.debug('%s %s' % (r.status_code, r.reason)) - - -class PythonImageUploader(BaseImageUploader): - """Upload images using a direct implementation of the registry API""" - - uploaded_layers = {} # provides global view for multi-threading workers - lock = None # provides global locking info plus global view, if MP is used - - @classmethod - def init_global_state(cls, lock): - if not cls.lock: - cls.lock = lock - - @classmethod - @tenacity.retry( # Retry until we no longer have collisions - retry=tenacity.retry_if_exception_type(ImageUploaderThreadException), - wait=tenacity.wait_random_exponential(multiplier=1, max=10) - ) - def _layer_fetch_lock(cls, layer): - if not cls.lock: - LOG.warning('No lock information provided for layer %s' % layer) - return - if layer in cls.lock.objects(): - LOG.debug('[%s] Layer is being fetched by another thread' % layer) - raise ImageUploaderThreadException('layer being fetched') - known_path, image = image_utils.uploaded_layers_details( - cls._global_view_proxy(), layer, scope='local') - if not known_path or not image: - known_path, image = image_utils.uploaded_layers_details( - cls._global_view_proxy(), layer, scope='remote') - if image and known_path: - # already processed layers needs no further locking - return - with cls.lock.get_lock(): - if layer in cls.lock.objects(): - LOG.debug('Collision for lock %s' % layer) - raise ImageUploaderThreadException('layer conflict') - cls.lock.objects().append(layer) - LOG.debug('Got lock on layer %s' % layer) - - @classmethod - def _layer_fetch_unlock(cls, layer): - if not cls.lock: - LOG.warning('No lock information provided for layer %s' % layer) - return - with cls.lock.get_lock(): - while layer in cls.lock.objects(): - cls.lock.objects().remove(layer) - LOG.debug('Released lock on layer %s' % layer) - - @classmethod - def _global_view_proxy(cls, value=None, forget=False): - """Represent the global view for mixed multi-workers concurrent access - - Depending on worker's context target the corresponding shared data - structures (global view) for the requested value add/remove - operation. Also keep that global view always consolidated for all of - the supported MP/MT worker types. Threads will share common data via - its common class namespace in the threads-safe standard dictionary. - Processes will use multiprocess synchronization primitives stored in - the global lock context. - - :param: value: Shared data to track in the global view - :param: forget: Defines either to add or remove the shared data - """ - if not cls.lock: - LOG.warning('No lock information provided for value %s' % value) - return - with cls.lock.get_lock(): - if value and forget: - cls.uploaded_layers.pop(value, None) - if hasattr(cls.lock, '_global_view'): - cls.lock._global_view.pop(value, None) - elif value: - cls.uploaded_layers.update(value) - if hasattr(cls.lock, '_global_view'): - cls.lock._global_view.update(value) - - if not value: - # return global view consolidated among MP/MT workers state - if hasattr(cls.lock, '_global_view'): - consolidated_view = cls.uploaded_layers.copy() - consolidated_view.update(cls.lock._global_view) - return consolidated_view - return cls.uploaded_layers - - @classmethod - def _track_uploaded_layers(cls, layer, known_path=None, image_ref=None, - forget=False, scope='remote'): - """Track an image layer info in the global view - - Adds or removes layer info to/from the global view shared among - all workers of all supported types (MP/MT). An image layer hash and - scope pair provide a unique one-way entry tracked in the global view. - The layer info being forgotten will be untracked by any existing scope. - - :param: layer: A container image layer hash to track in the global view - :param: known_path: Known URL or local path for the tracked layer - :param: image_ref: Name of the image cross-referencing tracked layer - :param: forget: Defines either to add or remove the tracked layer info - :param: scope: Specifies remote or local type of the tracked image - """ - if forget: - LOG.debug('Untracking processed layer %s for any scope' % layer) - cls._global_view_proxy(value=layer, forget=True) - else: - LOG.debug('Tracking processed layer %s for %s scope' - % (layer, scope)) - cls._global_view_proxy( - value={layer: {scope: {'ref': image_ref, 'path': known_path}}}) - - def upload_image(self, task): - """Upload image from a task - - This function takes an UploadTask and pushes it to the appropriate - target destinations. It should be noted that if the source container - is prefix with 'containers-storage:' instead of 'docker://' or no - prefix, this process will assume that the source container is already - local to the system. The local container upload does not currently - support any of the modification actions. In order to run the - modification actions on a container prior to upload, the source must - be a remote image. Additionally, cleanup has no affect when - uploading a local image as well. - - :param: task: UploadTask with container information - """ - t = task - LOG.info('[%s] Starting upload image process' % t.image_name) - - source_local = t.source_image.startswith('containers-storage:') - target_image_local_url = urlparse('containers-storage:%s' % - t.target_image) - target_username, target_password = self.credentials_for_registry( - t.target_image_url.netloc) - try: - target_session = self.authenticate( - t.target_image_url, - username=target_username, - password=target_password - ) - except requests.exceptions.HTTPError as e: - if e.response.status_code == 401: - raise ImageUploaderException( - 'Unable to authenticate. This may indicate ' - 'missing registry credentials or the provided ' - 'container or namespace does not exist. %s' % e) - raise - - try: - self._detect_target_export(t.target_image_url, target_session) - except Exception: - LOG.error('[%s] Failed uploading the target ' - 'image' % t.target_image) - # Close the session before raising it for more of retrying perhaps - target_session.close() - raise - - if source_local: - if t.modify_role: - target_session.close() - raise NotImplementedError('Modify role not implemented for ' - 'local containers') - if t.cleanup: - LOG.warning('[%s] Cleanup has no effect with a local source ' - 'container.' % t.image_name) - - try: - source_local_url = urlparse(t.source_image) - # Copy from local storage to target registry - self._copy_local_to_registry( - source_local_url, - t.target_image_url, - session=target_session - ) - except Exception: - LOG.warning('[%s] Failed copying the target image ' - 'to the target registry' % t.target_image) - pass - target_session.close() - return [] - - if t.modify_role: - image_exists = False - try: - image_exists = self._image_exists(t.target_image, - target_session) - except Exception: - LOG.warning('[%s] Failed to check if the target ' - 'image exists' % t.target_image) - pass - if image_exists: - LOG.warning('[%s] Skipping upload for modified image %s' % - (t.image_name, t.target_image)) - target_session.close() - return [] - copy_target_url = t.target_image_source_tag_url - else: - copy_target_url = t.target_image_url - # Keep the target session open yet - - source_username, source_password = self.credentials_for_registry( - t.source_image_url.netloc) - try: - source_session = self.authenticate( - t.source_image_url, - username=source_username, - password=source_password - ) - except requests.exceptions.HTTPError as e: - if e.response.status_code == 401: - raise ImageUploaderException( - 'Unable to authenticate. This may indicate ' - 'missing registry credentials or the provided ' - 'container or namespace does not exist. %s' % e) - raise - - source_layers = [] - manifests_str = [] - try: - self._collect_manifests_layers( - t.source_image_url, source_session, - manifests_str, source_layers, - t.multi_arch - ) - - self._cross_repo_mount( - copy_target_url, self.image_layers, source_layers, - session=target_session) - to_cleanup = [] - - # Copy unmodified images from source to target - self._copy_registry_to_registry( - t.source_image_url, - copy_target_url, - source_manifests=manifests_str, - source_session=source_session, - target_session=target_session, - source_layers=source_layers, - multi_arch=t.multi_arch - ) - except Exception: - LOG.error('[%s] Failed uploading the target ' - 'image' % t.target_image) - # Close the sessions before raising it for more of - # retrying perhaps - source_session.close() - target_session.close() - raise - - if not t.modify_role: - LOG.info('[%s] Completed upload for image' % t.image_name) - else: - LOG.info('[%s] Copy ummodified image from target to local' % - t.image_name) - try: - self._copy_registry_to_local(t.target_image_source_tag_url) - - if t.cleanup in (CLEANUP_FULL, CLEANUP_PARTIAL): - to_cleanup.append(t.target_image_source_tag) - - self.run_modify_playbook( - t.modify_role, - t.modify_vars, - t.target_image_source_tag, - t.target_image_source_tag, - t.append_tag, - container_build_tool='buildah') - if t.cleanup == CLEANUP_FULL: - to_cleanup.append(t.target_image) - - # cross-repo mount the unmodified image to the modified image - self._cross_repo_mount( - t.target_image_url, self.image_layers, source_layers, - session=target_session) - - # Copy from local storage to target registry - self._copy_local_to_registry( - target_image_local_url, - t.target_image_url, - session=target_session - ) - LOG.info('[%s] Completed modify and upload for image' % - t.image_name) - except Exception: - LOG.error('[%s] Failed processing the target ' - 'image' % t.target_image) - # Close the sessions before raising it for more of - # retrying perhaps - source_session.close() - target_session.close() - raise - - try: - for layer in source_layers: - self.image_layers.setdefault(layer, t.target_image_url) - except Exception: - LOG.warning('[%s] Failed setting default layer %s for the ' - 'target image' % (t.target_image, layer)) - pass - target_session.close() - source_session.close() - return to_cleanup - - @classmethod - @tenacity.retry( # Retry up to 5 times with jittered exponential backoff - reraise=True, - retry=tenacity.retry_if_exception_type( - requests.exceptions.RequestException - ), - wait=tenacity.wait_random_exponential(multiplier=1, max=10), - stop=tenacity.stop_after_attempt(5) - ) - def _detect_target_export(cls, image_url, session): - if image_url.netloc in cls.export_registries: - return True - if image_url.netloc in cls.push_registries: - return False - - # detect if the registry is push-capable by requesting an upload URL. - image, _ = cls._image_tag_from_url(image_url) - upload_req_url = cls._build_url( - image_url, - path=CALL_UPLOAD % {'image': image}) - try: - RegistrySessionHelper.post( - session, - upload_req_url, - timeout=30 - ) - except requests.exceptions.HTTPError as e: - if e.response.status_code in (501, 403, 404, 405): - cls.export_registries.add(image_url.netloc) - return True - raise - cls.push_registries.add(image_url.netloc) - return False - - @classmethod - @tenacity.retry( # Retry up to 5 times with jittered exponential backoff - reraise=True, - retry=tenacity.retry_if_exception_type( - requests.exceptions.RequestException - ), - wait=tenacity.wait_random_exponential(multiplier=1, max=10), - stop=tenacity.stop_after_attempt(5) - ) - def _fetch_manifest(cls, url, session, multi_arch): - image, tag = cls._image_tag_from_url(url) - parts = { - 'image': image, - 'tag': tag - } - url = cls._build_url( - url, CALL_MANIFEST % parts - ) - if multi_arch: - manifest_headers = {'Accept': MEDIA_MANIFEST_V2_LIST} - else: - # prefer docker manifest over oci - manifest_headers = {'Accept': ", ".join([ - MEDIA_MANIFEST_V2 + ";q=1", MEDIA_OCI_MANIFEST_V1 + ";q=0.5"])} - try: - r = RegistrySessionHelper.get( - session, - url, - headers=manifest_headers, - timeout=30 - ) - except requests.exceptions.HTTPError as e: - if e.response.status_code in (403, 404): - raise ImageNotFoundException('Not found image: %s' % url) - raise - return cls._get_response_text(r) - - def _collect_manifests_layers(self, image_url, session, - manifests_str, layers, - multi_arch): - manifest_str = self._fetch_manifest( - image_url, - session=session, - multi_arch=multi_arch - ) - manifests_str.append(manifest_str) - manifest = json.loads(manifest_str) - media_type = manifest.get('mediaType', - manifest.get('config', {}).get('mediaType')) - if manifest.get('schemaVersion', 2) == 1: - layers.extend(reversed([x['blobSum'] - for x in manifest['fsLayers']])) - elif not media_type or media_type in [MEDIA_MANIFEST_V2, - MEDIA_OCI_MANIFEST_V1, - MEDIA_OCI_CONFIG_V1]: - layers.extend(x['digest'] for x in manifest['layers']) - elif media_type == MEDIA_MANIFEST_V2_LIST: - image, _, tag = image_url.geturl().rpartition(':') - for man in manifest.get('manifests', []): - # replace image tag with the manifest hash in the list - man_url = urlparse('%s@%s' % (image, man['digest'])) - self._collect_manifests_layers( - man_url, session, manifests_str, layers, - multi_arch=False - ) - - @classmethod - @tenacity.retry( # Retry up to 5 times with jittered exponential backoff - reraise=True, - retry=tenacity.retry_if_exception_type( - requests.exceptions.RequestException - ), - wait=tenacity.wait_random_exponential(multiplier=1, max=10), - stop=tenacity.stop_after_attempt(5) - ) - def _upload_url(cls, image_url, session, previous_request=None): - if previous_request and 'Location' in previous_request.headers: - return previous_request.headers['Location'] - - image, tag = cls._image_tag_from_url(image_url) - upload_req_url = cls._build_url( - image_url, - path=CALL_UPLOAD % {'image': image}) - r = RegistrySessionHelper.post( - session, - upload_req_url, - timeout=30 - ) - return r.headers['Location'] - - @classmethod - @tenacity.retry( # Retry up to 5 times with longer time - reraise=True, - retry=tenacity.retry_if_exception_type( - (requests.exceptions.RequestException, - ImageRateLimitedException) - ), - wait=tenacity.wait_random_exponential(multiplier=1.5, max=60), - stop=tenacity.stop_after_attempt(5) - ) - def _layer_stream_registry(cls, digest, source_url, calc_digest, - session): - image, tag = cls._image_tag_from_url(source_url) - parts = { - 'image': image, - 'tag': tag, - 'digest': digest - } - source_blob_url = cls._build_url( - source_url, CALL_BLOB % parts) - # NOTE(aschultz): We specify None and let requests figure it out - chunk_size = None - LOG.info("[%s] Fetching layer %s from %s" % - (image, digest, source_blob_url)) - with session.get(source_blob_url, - stream=True, - timeout=30, - allow_redirects=False) as blob_req: - blob_req.encoding = 'utf-8' - # raise for status here to ensure we didn't got a 401 - RegistrySessionHelper.check_status(session=session, - request=blob_req) - # Requests to docker.io redirect to CDN for the actual content - # so we need to check if our initial blob request is a redirect - # and follow as necessary. - blob_req = RegistrySessionHelper.check_redirect_trusted(blob_req, - session) - for data in blob_req.iter_content(chunk_size): - LOG.debug("[%s] Read %i bytes for %s" % - (image, len(data), digest)) - if not data: - break - calc_digest.update(data) - yield data - LOG.info("[%s] Done fetching layer %s from registry" % (image, digest)) - - @classmethod - @tenacity.retry( # Retry up to 5 times with jittered exponential backoff - reraise=True, - retry=tenacity.retry_if_exception_type( - IOError - ), - wait=tenacity.wait_random_exponential(multiplier=1, max=10), - stop=tenacity.stop_after_attempt(5) - ) - def _copy_layer_registry_to_registry(cls, source_url, target_url, - layer, - source_session=None, - target_session=None): - layer_entry = {'digest': layer} - try: - cls._layer_fetch_lock(layer) - if cls._target_layer_exists_registry( - target_url, layer_entry, [layer_entry], target_session): - cls._layer_fetch_unlock(layer) - return - known_path, ref_image = image_utils.uploaded_layers_details( - cls._global_view_proxy(), layer, scope='local') - if known_path and ref_image: - # cross-link target from local source, skip fetching it again - image_export.layer_cross_link( - layer, ref_image, known_path, target_url) - cls._layer_fetch_unlock(layer) - return - except ImageUploaderThreadException: - # skip trying to unlock, because that's what threw the exception - raise - except Exception: - cls._layer_fetch_unlock(layer) - raise - - digest = layer_entry['digest'] - LOG.debug('[%s] Uploading layer' % digest) - - calc_digest = hashlib.sha256() - known_path = None - layer_val = None - try: - layer_stream = cls._layer_stream_registry( - digest, source_url, calc_digest, source_session) - layer_val, known_path = cls._copy_stream_to_registry( - target_url, layer_entry, calc_digest, layer_stream, - target_session) - except (IOError, requests.exceptions.HTTPError): - cls._track_uploaded_layers(layer, forget=True, scope='remote') - LOG.error('[%s] Failed processing layer for the target ' - 'image %s' % (layer, target_url.geturl())) - raise - else: - if layer_val and known_path: - image_ref = target_url.path.split(':')[0][1:] - uploaded = urlparse(known_path).scheme - cls._track_uploaded_layers( - layer_val, known_path=known_path, image_ref=image_ref, - scope=('remote' if uploaded else 'local')) - return layer_val - finally: - cls._layer_fetch_unlock(layer) - - @classmethod - def _assert_scheme(cls, url, scheme): - if url.scheme != scheme: - raise ImageUploaderException( - 'Expected %s scheme: %s' % (scheme, url.geturl())) - - @classmethod - @tenacity.retry( # Retry up to 5 times with jittered exponential backoff - reraise=True, - retry=tenacity.retry_if_exception_type( - requests.exceptions.RequestException - ), - wait=tenacity.wait_random_exponential(multiplier=1, max=10), - stop=tenacity.stop_after_attempt(5) - ) - def _copy_registry_to_registry(cls, source_url, target_url, - source_manifests, - source_session=None, - target_session=None, - source_layers=None, - multi_arch=False): - cls._assert_scheme(source_url, 'docker') - cls._assert_scheme(target_url, 'docker') - - image, tag = cls._image_tag_from_url(source_url) - parts = { - 'image': image, - 'tag': tag - } - - # Upload all layers - copy_jobs = [] - jobs_count = 0 - jobs_finished = 0 - with futures.ThreadPoolExecutor(max_workers=4) as p: - if source_layers: - for layer in source_layers: - copy_jobs.append(p.submit( - cls._copy_layer_registry_to_registry, - source_url, target_url, - layer=layer, - source_session=source_session, - target_session=target_session - )) - - jobs_count = len(copy_jobs) - LOG.debug('[%s] Waiting for %i jobs to finish' % - (image, jobs_count)) - for job in futures.as_completed(copy_jobs): - e = job.exception() - if e: - raise e - layer = job.result() - if layer: - LOG.debug('[%s] Upload complete for layer %s' % - (image, layer)) - jobs_finished += 1 - LOG.debug('[%s] Waiting for next job: %i of %i complete' % - (image, jobs_finished, jobs_count)) - - LOG.debug('[%s] Completed %i jobs' % (image, jobs_count)) - - for source_manifest in source_manifests: - manifest = json.loads(source_manifest) - config_str = None - # NOTE(mwhahaha): mediaType will not be set when it's - # schemaVersion 1 - media_type = manifest.get('mediaType', - manifest.get('config', - {}).get('mediaType')) - if media_type in [MEDIA_MANIFEST_V2, - MEDIA_OCI_MANIFEST_V1, - MEDIA_OCI_CONFIG_V1]: - config_digest = manifest['config']['digest'] - LOG.debug('[%s] Uploading config with digest: %s' % - (image, config_digest)) - - parts['digest'] = config_digest - source_config_url = cls._build_url( - source_url, - CALL_BLOB % parts - ) - - r = RegistrySessionHelper.get( - source_session, - source_config_url, - timeout=30, - allow_redirects=False - ) - # check if the blob was a redirect - r = RegistrySessionHelper.check_redirect_trusted( - r, source_session, stream=False) - - config_str = cls._get_response_text(r) - manifest['config']['size'] = len(config_str) - manifest['config']['mediaType'] = MEDIA_CONFIG - - cls._copy_manifest_config_to_registry( - target_url=target_url, - manifest_str=source_manifest, - config_str=config_str, - target_session=target_session, - multi_arch=multi_arch - ) - LOG.debug('[%s] Finished copying image' % image) - - @classmethod - def _copy_manifest_config_to_registry(cls, target_url, - manifest_str, - config_str, - target_session=None, - multi_arch=False): - - manifest = json.loads(manifest_str) - if manifest.get('schemaVersion', 2) == 1: - if 'signatures' in manifest: - manifest_type = MEDIA_MANIFEST_V1_SIGNED - else: - manifest_type = MEDIA_MANIFEST_V1 - else: - # NOTE(mwhahaha): always force docker media format if not set or - # is explicitly OCI because buildah uses OCI by default but we - # convert the metadata to Docker format in the uploader. - # See LP#1860585 - manifest_type = manifest.get('mediaType', - manifest.get('config', - {}).get('mediaType')) - if manifest_type in [MEDIA_OCI_MANIFEST_V1, - MEDIA_OCI_CONFIG_V1]: - manifest_type = MEDIA_MANIFEST_V2 - # convert config mediaType to docker.container.image - manifest['config']['mediaType'] = MEDIA_CONFIG - layers = manifest.get('layers') - # convert layer type to docker layer type - if layers: - new_layers = [] - for layer in layers: - layer_type = layer.get('mediaType') - if layer_type == MEDIA_OCI_LAYER_COMPRESSED: - layer['mediaType'] = MEDIA_BLOB_COMPRESSED - elif layer_type == MEDIA_OCI_LAYER: - layer['mediaType'] = MEDIA_BLOB - new_layers.append(layer) - manifest['layers'] = new_layers - elif manifest_type == MEDIA_CONFIG: - manifest_type = MEDIA_MANIFEST_V2 - elif manifest_type == MEDIA_OCI_INDEX_V1: - manifest_type = MEDIA_MANIFEST_V2_LIST - manifest['mediaType'] = manifest_type - manifest_str = json.dumps(manifest, indent=3) - - export = target_url.netloc in cls.export_registries - if export: - image_export.export_manifest_config( - target_url, - manifest_str, - manifest_type, - config_str, - multi_arch=multi_arch - ) - return - - if config_str is not None: - config_digest = manifest['config']['digest'] - # Upload the config json as a blob - upload_url = cls._upload_url( - target_url, - session=target_session) - r = RegistrySessionHelper.put( - target_session, - upload_url, - timeout=30, - params={ - 'digest': config_digest - }, - data=config_str.encode('utf-8'), - headers={ - 'Content-Length': str(len(config_str)), - 'Content-Type': 'application/octet-stream' - } - ) - - # Upload the manifest - image, tag = cls._image_tag_from_url(target_url) - parts = { - 'image': image, - 'tag': tag - } - manifest_url = cls._build_url( - target_url, CALL_MANIFEST % parts) - - LOG.debug('[%s] Uploading manifest of type %s to: %s' % - (image, manifest_type, manifest_url)) - - try: - r = RegistrySessionHelper.put( - target_session, - manifest_url, - timeout=30, - data=manifest_str.encode('utf-8'), - headers={ - 'Content-Type': manifest_type - } - ) - except requests.exceptions.HTTPError as e: - if e.response.status_code == 400: - LOG.error(cls._get_response_text(r)) - raise ImageUploaderException('Pushing manifest failed') - raise - - @classmethod - @tenacity.retry( # Retry up to 5 times with jittered exponential backoff - reraise=True, - wait=tenacity.wait_random_exponential(multiplier=1, max=10), - stop=tenacity.stop_after_attempt(5) - ) - def _copy_registry_to_local(cls, source_url): - cls._assert_scheme(source_url, 'docker') - pull_source = source_url.netloc + source_url.path - cmd = ['buildah', '--debug', 'pull'] - - if source_url.netloc in [cls.insecure_registries, - cls.no_verify_registries]: - cmd.append('--tls-verify=false') - - cmd.append(pull_source) - LOG.info('Pulling %s' % pull_source) - LOG.info('Running %s' % ' '.join(cmd)) - try: - process = subprocess.Popen( - cmd, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - universal_newlines=True, - close_fds=True - ) - out, err = process.communicate() - if process.returncode != 0: - error_msg = ( - 'Pulling image failed: cmd "{}", stdout "{}",' - ' stderr "{}"'.format( - ' '.join(cmd), - out, - err - ) - ) - LOG.error(error_msg) - raise ImageUploaderException(error_msg) - except KeyboardInterrupt: - raise Exception('Action interrupted with ctrl+c') - return out - - @classmethod - def _target_layer_exists_registry(cls, target_url, layer, check_layers, - session): - image, tag = cls._image_tag_from_url(target_url) - norm_image = (image[1:] if image.startswith('/') else image) - parts = { - 'image': image, - 'tag': tag - } - layer_found = None - # Check in global view or do a HEAD call for the supplied - # digests to see if the layer is already in the registry - for x in check_layers: - if not x: - continue - known_path, ref_image = image_utils.uploaded_layers_details( - cls._global_view_proxy(), x['digest'], scope='remote') - if ref_image == norm_image: - LOG.debug('[%s] Layer %s already exists at %s' % - (image, x['digest'], known_path)) - layer_found = x - break - parts['digest'] = x['digest'] - blob_url = cls._build_url(target_url, CALL_BLOB % parts) - if session.head(blob_url, timeout=30).status_code == 200: - LOG.debug('[%s] Layer already exists: %s' % - (image, x['digest'])) - layer_found = x - break - if layer_found: - layer['digest'] = layer_found['digest'] - if 'size' in layer_found: - layer['size'] = layer_found['size'] - if 'mediaType' in layer_found: - layer['mediaType'] = layer_found['mediaType'] - return True - return False - - @classmethod - def _layer_stream_local(cls, layer_id, calc_digest): - LOG.debug('[%s] Exporting layer' % layer_id) - - tar_split_path = cls._containers_file_path( - 'overlay-layers', - '%s.tar-split.gz' % layer_id - ) - overlay_path = cls._containers_file_path( - 'overlay', layer_id, 'diff' - ) - cmd = [ - 'tar-split', 'asm', - '--input', tar_split_path, - '--path', overlay_path, - '--compress' - ] - LOG.debug(' '.join(cmd)) - try: - p = subprocess.Popen(cmd, stdout=subprocess.PIPE) - - chunk_size = 2 ** 20 - - while True: - data = p.stdout.read(chunk_size) - if not data: - break - calc_digest.update(data) - yield data - p.wait() - if p.returncode != 0: - raise ImageUploaderException('Extracting layer failed') - except KeyboardInterrupt: - raise Exception('Action interrupted with ctrl+c') - - @classmethod - @tenacity.retry( # Retry up to 5 times with jittered exponential backoff - reraise=True, - retry=tenacity.retry_if_exception_type( - requests.exceptions.RequestException - ), - wait=tenacity.wait_random_exponential(multiplier=1, max=10), - stop=tenacity.stop_after_attempt(5) - ) - def _copy_layer_local_to_registry(cls, target_url, - session, layer, layer_entry): - - # Check in global view or do a HEAD call for the compressed-diff-digest - # and diff-digest to see if the layer is already in the registry - check_layers = [] - compressed_digest = layer_entry.get('compressed-diff-digest') - if compressed_digest: - check_layers.append({ - 'digest': compressed_digest, - 'size': layer_entry.get('compressed-size'), - 'mediaType': MEDIA_BLOB_COMPRESSED, - }) - - digest = layer_entry.get('diff-digest') - if digest: - check_layers.append({ - 'digest': digest, - 'size': layer_entry.get('diff-size'), - 'mediaType': MEDIA_BLOB, - }) - if cls._target_layer_exists_registry(target_url, layer, check_layers, - session): - return - - layer_id = layer_entry['id'] - LOG.debug('[%s] Uploading layer' % layer_id) - - calc_digest = hashlib.sha256() - known_path = None - layer_val = None - try: - layer_stream = cls._layer_stream_local(layer_id, calc_digest) - layer_val, known_path = cls._copy_stream_to_registry( - target_url, layer, calc_digest, layer_stream, session, - verify_digest=False) - except (IOError, requests.exceptions.HTTPError): - cls._track_uploaded_layers( - layer['digest'], forget=True, scope='remote') - LOG.error('[%s] Failed processing layer for the target ' - 'image %s' % (layer['digest'], target_url.geturl())) - raise - else: - if layer_val and known_path: - image_ref = target_url.path.split(':')[0][1:] - uploaded = urlparse(known_path).scheme - cls._track_uploaded_layers( - layer_val, known_path=known_path, image_ref=image_ref, - scope=('remote' if uploaded else 'local')) - return layer_val - - @classmethod - def _copy_stream_to_registry(cls, target_url, layer, calc_digest, - layer_stream, session, verify_digest=True): - layer['mediaType'] = MEDIA_BLOB_COMPRESSED - length = 0 - upload_resp = None - - export = target_url.netloc in cls.export_registries - if export: - return image_export.export_stream( - target_url, layer, layer_stream, verify_digest=verify_digest) - - for chunk in layer_stream: - if not chunk: - break - - chunk_length = len(chunk) - upload_url = cls._upload_url( - target_url, session, upload_resp) - upload_resp = RegistrySessionHelper.patch( - session, - upload_url, - timeout=30, - data=chunk, - headers={ - 'Content-Length': str(chunk_length), - 'Content-Range': '%d-%d' % ( - length, length + chunk_length - 1), - 'Content-Type': 'application/octet-stream' - } - ) - length += chunk_length - - layer_digest = 'sha256:%s' % calc_digest.hexdigest() - LOG.debug('[%s] Calculated layer digest' % layer_digest) - upload_url = cls._upload_url( - target_url, session, upload_resp) - upload_resp = RegistrySessionHelper.put( - session, - upload_url, - timeout=30, - params={ - 'digest': layer_digest - }, - ) - layer['digest'] = layer_digest - layer['size'] = length - return (layer_digest, cls._build_url(target_url, target_url.path)) - - @classmethod - @tenacity.retry( # Retry up to 5 times with jittered exponential backoff - reraise=True, - retry=tenacity.retry_if_exception_type( - requests.exceptions.RequestException - ), - wait=tenacity.wait_random_exponential(multiplier=1, max=10), - stop=tenacity.stop_after_attempt(5) - ) - def _copy_local_to_registry(cls, source_url, target_url, session): - cls._assert_scheme(source_url, 'containers-storage') - cls._assert_scheme(target_url, 'docker') - - name = '%s%s' % (source_url.netloc, source_url.path) - image, manifest, config_str = cls._image_manifest_config(name) - layers_by_digest = cls._get_all_local_layers_by_digest() - - # Upload all layers - copy_jobs = [] - jobs_count = 0 - jobs_finished = 0 - with futures.ThreadPoolExecutor(max_workers=4) as p: - for layer in manifest['layers']: - layer_entry = layers_by_digest[layer['digest']] - copy_jobs.append(p.submit( - cls._copy_layer_local_to_registry, - target_url, session, layer, layer_entry - )) - jobs_count = len(copy_jobs) - LOG.debug('[%s] Waiting for %i jobs to finish' % - (name, jobs_count)) - for job in futures.as_completed(copy_jobs): - e = job.exception() - if e: - raise e - layer = job.result() - if layer: - LOG.debug('[%s] Upload complete for layer: %s' % - (name, layer)) - jobs_finished += 1 - LOG.debug('[%s] Waiting for next job: %i of %i complete' % - (name, jobs_finished, jobs_count)) - - LOG.debug('[%s] Completed %i jobs' % (name, jobs_count)) - - manifest_str = json.dumps(manifest, indent=3) - cls._copy_manifest_config_to_registry( - target_url=target_url, - manifest_str=manifest_str, - config_str=config_str, - target_session=session - ) - LOG.debug('[%s] Finished copying' % name) - - @classmethod - def _containers_file_path(cls, *path): - full_path = os.path.join('/var/lib/containers/storage/', *path) - if not os.path.exists(full_path): - raise ImageUploaderException('Missing file %s' % full_path) - return full_path - - @classmethod - def _containers_file(cls, *path): - full_path = cls._containers_file_path(*path) - - try: - with open(full_path, 'r') as f: - return f.read() - except Exception as e: - raise ImageUploaderException(e) - - @classmethod - def _containers_json(cls, *path): - return json.loads(cls._containers_file(*path)) - - @classmethod - def _get_all_local_layers_by_digest(cls): - all_layers = cls._containers_json('overlay-layers', 'layers.json') - layers_by_digest = {} - for x in all_layers: - if 'diff-digest' in x: - layers_by_digest[x['diff-digest']] = x - if 'compressed-diff-digest' in x: - layers_by_digest[x['compressed-diff-digest']] = x - return layers_by_digest - - @classmethod - def _get_local_layers_manifest(cls, manifest, config_str): - """Return a valid local manifest - - The manifest that is kept in the container storage is the - original manifest but the layers may be different once processed - by libpod & company. We want a valid manifest for the local - file system so we need to use the root fs layers from the container - config rather than just assuming the original manifest is still - valid. - """ - layers = cls._get_all_local_layers_by_digest() - config = json.loads(config_str) - rootfs = config.get('rootfs', {}) - layer_ids = rootfs.get('diff_ids', None) - if not layer_ids: - # TODO(aschultz): add container name/path - LOG.warning('Container missing rootfs layers') - return manifest - # clear out the manifest layers - manifest['layers'] = [] - for layer in layer_ids: - layer_digest = {'mediaType': MEDIA_BLOB} - if layer not in layers: - raise ImageNotFoundException('Unable to find layer %s in the ' - 'local layers' % layer) - layer_digest['digest'] = layer - # podman currently doesn't do compressed layers so just use - # the diff-size - layer_digest['size'] = layers[layer]['diff-size'] - manifest['layers'].append(layer_digest) - return manifest - - @classmethod - def _image_manifest_config(cls, name): - image = None - images = cls._containers_json('overlay-images', 'images.json') - for i in images: - for n in i.get('names', []): - if name == n: - image = i - break - if image: - break - if not image: - raise ImageNotFoundException('Not found image: %s' % name) - image_id = image['id'] - manifest = cls._containers_json('overlay-images', image_id, 'manifest') - config_digest = manifest['config']['digest'] - - config_id = '=' + base64.b64encode( - config_digest.encode()).decode('utf-8') - config_str = cls._containers_file('overlay-images', image_id, - config_id) - manifest = cls._get_local_layers_manifest(manifest, config_str) - manifest['config']['size'] = len(config_str) - manifest['config']['mediaType'] = MEDIA_CONFIG - return image, manifest, config_str - - @classmethod - def _inspect(cls, image_url, session=None, default_tag=False): - if image_url.scheme == 'docker': - return super(PythonImageUploader, cls)._inspect( - image_url, session=session, default_tag=default_tag) - if image_url.scheme != 'containers-storage': - raise ImageUploaderException('Inspect not implemented for %s' % - image_url.geturl()) - - name = '%s%s' % (image_url.netloc, image_url.path) - image, manifest, config_str = cls._image_manifest_config(name) - config = json.loads(config_str) - - layers = [x['digest'] for x in manifest['layers']] - i, _ = cls._image_tag_from_url(image_url) - digest = image['digest'] - created = image['created'] - labels = config['config'].get('Labels', {}) - # NOTE: labels can be null - if labels is None: - labels = {} - architecture = config['architecture'] - image_os = config['os'] - return { - 'Name': i, - 'Digest': digest, - 'RepoTags': [], - 'Created': created, - 'DockerVersion': '', - 'Labels': labels, - 'Architecture': architecture, - 'Os': image_os, - 'Layers': layers, - } - - @classmethod - def _delete_from_registry(cls, image_url, session=None): - if not cls._detect_target_export(image_url, session): - raise NotImplementedError( - 'Deleting not supported via the registry API') - return image_export.delete_image(image_url) - - @classmethod - def _delete(cls, image_url, session=None): - image = image_url.geturl() - LOG.info('[%s] Deleting image' % image) - if image_url.scheme == 'docker': - return cls._delete_from_registry(image_url, session) - if image_url.scheme != 'containers-storage': - raise ImageUploaderException('Delete not implemented for %s' % - image_url.geturl()) - cmd = ['buildah', 'rmi', image_url.path] - LOG.info('Running %s' % ' '.join(cmd)) - env = os.environ.copy() - try: - process = subprocess.Popen(cmd, env=env, stdout=subprocess.PIPE, - universal_newlines=True) - - out, err = process.communicate() - LOG.info(out) - if process.returncode != 0: - LOG.warning('Error deleting image:\n%s\n%s' % - (' '.join(cmd), err)) - except KeyboardInterrupt: - raise Exception('Action interrupted with ctrl+c') - return out - - def cleanup(self, local_images): - if not local_images: - return [] - - for image in sorted(local_images): - if not image: - continue - LOG.info('[%s] Removing local copy of image' % image) - image_url = urlparse('containers-storage:%s' % image) - self._delete(image_url) - - def _get_executor(self): - """Get executor type based on lock object - - We check to see if the lock object is not set or if it is a threading - lock. We cannot check if it is a ProcessLock due to the side effect - of trying to include ProcessLock when running under Mistral breaks - Mistral. - """ - if not self.lock or isinstance(self.lock, threadinglock.ThreadingLock): - # workers will scale from 2 to 8 based on the cpu count // 2 - workers = min(max(2, processutils.get_worker_count() // 2), 8) - return futures.ThreadPoolExecutor(max_workers=workers) - # there really isn't an improvement with > 4 workers due to the - # container layer overlaps. The higher the workers, the more - # RAM required which can lead to OOMs. It's best to limit to 4 - return futures.ProcessPoolExecutor(max_workers=4) - - def run_tasks(self): - if not self.upload_tasks: - return - local_images = [] - - with self._get_executor() as p: - for result in p.map(upload_task, self.upload_tasks): - local_images.extend(result) - LOG.info('result %s' % local_images) - - # Do cleanup after all the uploads so common layers don't get deleted - # repeatedly - self.cleanup(local_images) - - -class UploadTask(object): - - def __init__(self, image_name, pull_source, push_destination, - append_tag, modify_role, modify_vars, cleanup, - multi_arch): - self.image_name = image_name - self.pull_source = pull_source - self.push_destination = push_destination - self.append_tag = append_tag or '' - self.modify_role = modify_role - self.modify_vars = modify_vars - self.cleanup = cleanup - self.multi_arch = multi_arch - - if ':' in image_name: - image = image_name.rpartition(':')[0] - self.source_tag = image_name.rpartition(':')[2] - else: - image = image_name - self.source_tag = 'latest' - if pull_source: - # prevent a double // in the url which causes auth problems - # with docker.io - if pull_source.endswith('/'): - pull_source = pull_source[:-1] - self.repo = pull_source + '/' + image - else: - self.repo = image - - if push_destination.endswith('/'): - push_destination = push_destination[:-1] - self.target_image_no_tag = (push_destination + '/' + - self.repo.partition('/')[2]) - self.target_tag = self.source_tag + self.append_tag - self.source_image = self.repo + ':' + self.source_tag - self.target_image_source_tag = (self.target_image_no_tag + ':' + - self.source_tag) - self.target_image = self.target_image_no_tag + ':' + self.target_tag - - image_to_url = BaseImageUploader._image_to_url - self.source_image_url = image_to_url(self.source_image) - self.target_image_url = image_to_url(self.target_image) - self.target_image_source_tag_url = image_to_url( - self.target_image_source_tag - ) - - -def upload_task(args): - uploader, task = args - return uploader.upload_image(task) - - -def discover_tag_from_inspect(args): - self, image, tag_from_label, default_tag = args - image_url = self._image_to_url(image) - username, password = self.credentials_for_registry(image_url.netloc) - try: - session = self.authenticate( - image_url, username=username, password=password) - except requests.exceptions.HTTPError as e: - if e.response.status_code == 401: - raise ImageUploaderException( - 'Unable to authenticate. This may indicate ' - 'missing registry credentials or the provided ' - 'container or namespace does not exist. %s' % e) - raise - i = self._inspect(image_url, session=session, default_tag=default_tag) - session.close() - if ':' in image_url.path: - # break out the tag from the url to be the fallback tag - path = image.rpartition(':') - fallback_tag = path[2] - image = path[0] - else: - fallback_tag = None - return image, self._discover_tag_from_inspect( - i, image, tag_from_label, fallback_tag) - - -def tags_for_image(args): - self, image, session = args - return self._tags_for_image(image, session) diff --git a/tripleo_common/image/kolla_builder.py b/tripleo_common/image/kolla_builder.py deleted file mode 100644 index ea0486472..000000000 --- a/tripleo_common/image/kolla_builder.py +++ /dev/null @@ -1,596 +0,0 @@ -# Copyright 2017 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - - -import jinja2 -import os -import re -import subprocess -import sys -import tempfile -import time -import yaml - -from osc_lib.i18n import _ -from oslo_log import log as logging -from tripleo_common.image import base -from tripleo_common.image import image_uploader -from tripleo_common.utils.locks import threadinglock - -CONTAINER_IMAGE_PREPARE_PARAM_STR = None - -CONTAINER_IMAGE_PREPARE_PARAM = None - -CONTAINER_IMAGES_DEFAULTS = None - - -def init_prepare_defaults(defaults_file): - global CONTAINER_IMAGE_PREPARE_PARAM_STR - with open(defaults_file) as f: - CONTAINER_IMAGE_PREPARE_PARAM_STR = f.read() - - global CONTAINER_IMAGE_PREPARE_PARAM - p = yaml.safe_load(CONTAINER_IMAGE_PREPARE_PARAM_STR) - CONTAINER_IMAGE_PREPARE_PARAM = p[ - 'parameter_defaults']['ContainerImagePrepare'] - - global CONTAINER_IMAGES_DEFAULTS - CONTAINER_IMAGES_DEFAULTS = CONTAINER_IMAGE_PREPARE_PARAM[0]['set'] - - -DEFAULT_TEMPLATE_DIR = os.path.join(sys.prefix, 'share', 'tripleo-common', - 'container-images') - -DEFAULT_TEMPLATE_FILE = os.path.join(DEFAULT_TEMPLATE_DIR, - 'tripleo_containers.yaml.j2') - -DEFAULT_PREPARE_FILE = os.path.join(DEFAULT_TEMPLATE_DIR, - 'container_image_prepare_defaults.yaml') - -if os.path.isfile(DEFAULT_PREPARE_FILE): - init_prepare_defaults(DEFAULT_PREPARE_FILE) - -LOG = logging.getLogger(__name__ + '.KollaImageBuilder') - - -def _filter_services(service_list, resource_registry): - if resource_registry: - for service in service_list.copy(): - env_path = resource_registry.get(service) - if env_path == 'OS::Heat::None': - service_list.remove(service) - - -def get_enabled_services(environment, roles_data): - """Build a map of role name and default enabled services - - :param environment: Heat environment for deployment - :param roles_data: Roles file data used to filter services - :returns: set of resource types representing enabled services - """ - - enabled_services = {} - parameter_defaults = environment.get('parameter_defaults', {}) - resource_registry = environment.get('resource_registry', {}) - - for role in roles_data: - count = parameter_defaults.get('%sCount' % role['name'], - role.get('CountDefault', 0)) - try: - count = int(count) - except ValueError: - raise ValueError('Unable to convert %sCount to an int: %s' % - (role['name'], count)) - - param = '%sServices' % role['name'] - if count > 0: - if param in parameter_defaults: - enabled_services[param] = parameter_defaults[param] - else: - default_services = role.get('ServicesDefault', []) - _filter_services(default_services, resource_registry) - enabled_services[param] = default_services - else: - enabled_services[param] = [] - return enabled_services - - -def build_service_filter(environment, roles_data): - """Build list of containerized services - - :param environment: Heat environment for deployment - :param roles_data: Roles file data used to filter services - :returns: set of resource types representing containerized services - """ - if not roles_data: - return None - - filtered_services = set() - enabled_services = get_enabled_services(environment, roles_data) - resource_registry = environment.get('resource_registry') - - for role in roles_data: - role_services = enabled_services.get( - '%sServices' % role['name'], set()) - # This filtering is probably not required, but filter if the - # {{role.name}}Services has services mapped to OS::Heat::None - _filter_services(role_services, resource_registry) - filtered_services.update(role_services) - return filtered_services - - -def set_neutron_driver(pd, mapping_args): - """Set the neutron_driver images variable based on parameters - - :param pd: Parameter defaults from the environment - :param mapping_args: Dict to set neutron_driver value on - """ - if mapping_args.get('neutron_driver'): - return - if not pd or 'NeutronMechanismDrivers' not in pd: - # we should set default neutron driver - mapping_args['neutron_driver'] = 'ovn' - else: - nmd = pd['NeutronMechanismDrivers'] - if 'ovn' in nmd: - mapping_args['neutron_driver'] = 'ovn' - else: - mapping_args['neutron_driver'] = 'other' - - -def container_images_prepare_multi(environment, roles_data, dry_run=False, - cleanup=image_uploader.CLEANUP_FULL, - lock=None): - """Perform multiple container image prepares and merge result - - Given the full heat environment and roles data, perform multiple image - prepare operations. The data to drive the multiple prepares is taken from - the ContainerImagePrepare parameter in the provided environment. If - push_destination is specified, uploads will be performed during the - preparation. - - :param environment: Heat environment for deployment - :param roles_data: Roles file data used to filter services - :param lock: a locking object to use when handling uploads - :returns: dict containing merged container image parameters from all - prepare operations - """ - - if not lock: - lock = threadinglock.ThreadingLock() - - pd = environment.get('parameter_defaults', {}) - cip = pd.get('ContainerImagePrepare') - # if user does not provide a ContainerImagePrepare, use the defaults. - if not cip: - LOG.info(_("No ContainerImagePrepare parameter defined. Using " - "the defaults.")) - cip = CONTAINER_IMAGE_PREPARE_PARAM - - mirrors = {} - mirror = pd.get('DockerRegistryMirror') - if mirror: - mirrors['docker.io'] = mirror - - creds = pd.get('ContainerImageRegistryCredentials') - multi_arch = len(pd.get('AdditionalArchitectures', [])) - - env_params = {} - service_filter = build_service_filter( - environment, roles_data) - - for cip_entry in cip: - mapping_args = cip_entry.get('set', {}) - set_neutron_driver(pd, mapping_args) - push_destination = cip_entry.get('push_destination') - # use the configured registry IP as the discovered registry - # if it is available - if push_destination and isinstance(push_destination, bool): - local_registry_ip = pd.get('LocalContainerRegistry') - if local_registry_ip: - push_destination = '%s:8787' % local_registry_ip - pull_source = cip_entry.get('pull_source') - modify_role = cip_entry.get('modify_role') - modify_vars = cip_entry.get('modify_vars') - modify_only_with_labels = cip_entry.get('modify_only_with_labels') - modify_only_with_source = cip_entry.get('modify_only_with_source') - modify_append_tag = cip_entry.get('modify_append_tag', - time.strftime( - '-modified-%Y%m%d%H%M%S')) - - # do not use tag_from_label if a tag is specified in the set - tag_from_label = None - if not mapping_args.get('tag'): - tag_from_label = cip_entry.get('tag_from_label') - - if multi_arch and 'multi_arch' in cip_entry: - # individual entry sets multi_arch, - # so set global multi_arch to False - multi_arch = False - - prepare_data = container_images_prepare( - excludes=cip_entry.get('excludes'), - includes=cip_entry.get('includes'), - service_filter=service_filter, - pull_source=pull_source, - push_destination=push_destination, - mapping_args=mapping_args, - output_env_file='image_params', - output_images_file='upload_data', - tag_from_label=tag_from_label, - append_tag=modify_append_tag, - modify_role=modify_role, - modify_vars=modify_vars, - modify_only_with_labels=modify_only_with_labels, - modify_only_with_source=modify_only_with_source, - mirrors=mirrors, - registry_credentials=creds, - multi_arch=multi_arch, - lock=lock - ) - env_params.update(prepare_data['image_params']) - - if not dry_run and (push_destination or pull_source or modify_role): - with tempfile.NamedTemporaryFile(mode='w') as f: - yaml.safe_dump({ - 'container_images': prepare_data['upload_data'] - }, f) - uploader = image_uploader.ImageUploadManager( - [f.name], - cleanup=cleanup, - mirrors=mirrors, - registry_credentials=creds, - multi_arch=multi_arch, - lock=lock - ) - uploader.upload() - return env_params - - -def container_images_prepare_defaults(): - """Return default dict for prepare substitutions - - This can be used as the mapping_args argument to the - container_images_prepare function to get the same result as not specifying - any mapping_args. - """ - return KollaImageBuilder.container_images_template_inputs() - - -def container_images_prepare(template_file=DEFAULT_TEMPLATE_FILE, - template_dir=DEFAULT_TEMPLATE_DIR, - excludes=None, includes=None, service_filter=None, - pull_source=None, push_destination=None, - mapping_args=None, output_env_file=None, - output_images_file=None, tag_from_label=None, - append_tag=None, modify_role=None, - modify_vars=None, modify_only_with_labels=None, - modify_only_with_source=None, - mirrors=None, registry_credentials=None, - multi_arch=False, lock=None): - """Perform container image preparation - - :param template_file: path to Jinja2 file containing all image entries - :param template_dir: path to Jinja2 files included in the main template - :param excludes: list of image name substrings to use for exclude filter - :param includes: list of image name substrings, at least one must match. - All excludes are ignored if includes is specified. - :param service_filter: set of heat resource types for containerized - services to filter by. Disable by passing None. - :param pull_source: DEPRECATED namespace for pulling during image uploads - :param push_destination: namespace for pushing during image uploads. When - specified the image parameters will use this - namespace too. - :param mapping_args: dict containing substitutions for template file. See - CONTAINER_IMAGES_DEFAULTS for expected keys. - :param output_env_file: key to use for heat environment parameter data - :param output_images_file: key to use for image upload data - :param tag_from_label: string when set will trigger tag discovery on every - image - :param append_tag: string to append to the tag for the destination - image - :param modify_role: string of ansible role name to run during upload before - the push to destination - :param modify_vars: dict of variables to pass to modify_role - :param modify_only_with_labels: only modify the container images with the - given labels - :param modify_only_with_source: only modify the container images from a - image_source in the tripleo-common service - to container mapping (e.g. kolla/tripleo) - :param mirrors: dict of registry netloc values to mirror urls - :param registry_credentials: dict of registry netloc values to - authentication credentials for that registry. - The value is a single-entry dict where the - username is the key and the password is the - value. - :param multi_arch: boolean whether to prepare every architecture of - each image - - :param lock: a locking object to use when handling uploads - :returns: dict with entries for the supplied output_env_file or - output_images_file - """ - - if mapping_args is None: - mapping_args = {} - - if not lock: - lock = threadinglock.ThreadingLock() - - def ffunc(entry): - imagename = entry.get('imagename', '') - if service_filter is not None: - # check the entry is for a service being deployed - image_services = set(entry.get('services', [])) - if not image_services.intersection(service_filter): - return None - if includes: - for p in includes: - if re.search(p, imagename): - return entry - return None - if excludes: - for p in excludes: - if re.search(p, imagename): - return None - return entry - - builder = KollaImageBuilder([template_file], template_dir) - result = builder.container_images_from_template( - filter=ffunc, **mapping_args) - - manager = image_uploader.ImageUploadManager( - mirrors=mirrors, - registry_credentials=registry_credentials, - multi_arch=multi_arch, - lock=lock - ) - uploader = manager.uploader('python') - images = [i.get('imagename', '') for i in result] - # set a flag to record whether the default tag is used or not. the - # logic here is that if the tag key is not already in mapping then it - # wil be added during the template render, so default_tag is set to - # True. - default_tag = 'tag' not in mapping_args - - if tag_from_label: - image_version_tags = uploader.discover_image_tags( - images, tag_from_label, default_tag) - for entry in result: - imagename = entry.get('imagename', '') - image_no_tag = imagename.rpartition(':')[0] - if image_no_tag in image_version_tags: - entry['imagename'] = '%s:%s' % ( - image_no_tag, image_version_tags[image_no_tag]) - - images_with_labels = [] - if modify_only_with_labels: - images_with_labels = uploader.filter_images_with_labels( - images, modify_only_with_labels) - - images_with_source = [] - if modify_only_with_source: - images_with_source = [i.get('imagename') for i in result - if i.get('image_source', '') - in modify_only_with_source] - - params = {} - modify_append_tag = append_tag - for entry in result: - imagename = entry.get('imagename', '') - append_tag = '' - if modify_role and ( - (not modify_only_with_labels - and not modify_only_with_source) or - (imagename in images_with_labels or - imagename in images_with_source)): - entry['modify_role'] = modify_role - if modify_append_tag: - entry['modify_append_tag'] = modify_append_tag - append_tag = modify_append_tag - if modify_vars: - entry['modify_vars'] = modify_vars - if pull_source: - entry['pull_source'] = pull_source - if push_destination: - # substitute discovered registry if push_destination is set to true - if isinstance(push_destination, bool): - push_destination = image_uploader.get_undercloud_registry() - - entry['push_destination'] = push_destination - # replace the host portion of the imagename with the - # push_destination, since that is where they will be uploaded to - image = imagename.partition('/')[2] - imagename = '/'.join((push_destination, image)) - if 'params' in entry: - for p in entry.pop('params'): - params[p] = imagename + append_tag - if 'services' in entry: - del(entry['services']) - - params.update( - detect_insecure_registries(params, lock=lock)) - - return_data = {} - if output_env_file: - return_data[output_env_file] = params - if output_images_file: - return_data[output_images_file] = result - return return_data - - -def detect_insecure_registries(params, lock=None): - """Detect insecure registries in image parameters - - :param params: dict of container image parameters - :returns: dict containing DockerInsecureRegistryAddress parameter to be - merged into other parameters - """ - insecure = set() - uploader = image_uploader.ImageUploadManager(lock=lock).uploader('python') - for image in params.values(): - host = image.split('/')[0] - if uploader.is_insecure_registry(host): - insecure.add(host) - if not insecure: - return {} - return {'DockerInsecureRegistryAddress': sorted(insecure)} - - -class KollaImageBuilder(base.BaseImageManager): - """Build images using kolla-build""" - - @staticmethod - def imagename_to_regex(imagename): - if not imagename: - return - # remove any namespace from the start - imagename = imagename.split('/')[-1] - - # remove any tag from the end - imagename = imagename.split(':')[0] - - # remove supported base names from the start - imagename = re.sub(r'^(centos|rhel)-', '', imagename) - - # remove install_type from the start - imagename = re.sub(r'^(binary|source|rdo|rhos)-', '', imagename) - - # what results should be acceptable as a regex to build one image - return imagename - - @staticmethod - def container_images_template_inputs(**kwargs): - '''Build the template mapping from defaults and keyword arguments. - - Defaults in CONTAINER_IMAGES_DEFAULTS are combined with keyword - argments to return a dict that can be used to render the container - images template. Any set values for name_prefix and name_suffix are - hyphenated appropriately. - ''' - mapping = dict(kwargs) - - if CONTAINER_IMAGES_DEFAULTS is None: - return - for k, v in CONTAINER_IMAGES_DEFAULTS.items(): - mapping.setdefault(k, v) - np = mapping['name_prefix'] - if np and not np.endswith('-'): - mapping['name_prefix'] = np + '-' - ns = mapping['name_suffix'] - if ns and not ns.startswith('-'): - mapping['name_suffix'] = '-' + ns - return mapping - - def container_images_from_template(self, filter=None, **kwargs): - '''Build container_images data from container_images_template. - - Any supplied keyword arguments are used for the substitution mapping to - transform the data in the config file container_images_template - section. - - The resulting data resembles a config file which contains a valid - populated container_images section. - - If a function is passed to the filter argument, this will be used to - modify the entry after substitution. If the filter function returns - None then the entry will not be added to the resulting list. - - Defaults are applied so that when no arguments are provided. - ''' - mapping = self.container_images_template_inputs(**kwargs) - result = [] - - if len(self.config_files) != 1: - raise ValueError('A single config file must be specified') - config_file = self.config_files[0] - template_dir = self.template_dir - - with open(config_file) as cf: - if template_dir is not None: - template = jinja2.Environment(loader=jinja2.FileSystemLoader( - template_dir)).from_string(cf.read()) - else: - template = jinja2.Template(cf.read()) - - rendered = template.render(mapping) - rendered_dict = yaml.safe_load(rendered) - for i in rendered_dict[self.CONTAINER_IMAGES_TEMPLATE]: - entry = dict(i) - if filter: - entry = filter(entry) - if entry is not None: - result.append(entry) - return result - - def build_images(self, kolla_config_files=None, excludes=[], - template_only=False, kolla_tmp_dir=None): - - cmd = ['kolla-build'] - if kolla_config_files: - for f in kolla_config_files: - cmd.append('--config-file') - cmd.append(f) - - if len(self.config_files) == 0: - self.config_files = [DEFAULT_TEMPLATE_FILE] - self.template_dir = DEFAULT_TEMPLATE_DIR - container_images = self.container_images_from_template() - else: - container_images = self.load_config_files(self.CONTAINER_IMAGES) \ - or [] - container_images.sort(key=lambda i: i.get('imagename')) - for i in container_images: - # Do not attempt to build containers that are not from kolla or - # are in our exclude list - if not i.get('image_source', '') == 'kolla': - continue - image = self.imagename_to_regex(i.get('imagename')) - # Make sure the image was properly parsed and not purposely skipped - if image and image not in excludes: - # NOTE(mgoddard): Use a full string match. - cmd.append("^%s$" % image) - - if template_only: - # build the dep list cmd line - cmd_deps = list(cmd) - cmd_deps.append('--list-dependencies') - # build the template only cmd line - cmd.append('--template-only') - cmd.append('--work-dir') - cmd.append(kolla_tmp_dir) - - LOG.info(_('Running %s'), ' '.join(cmd)) - env = os.environ.copy() - process = subprocess.Popen(cmd, env=env, stdout=subprocess.PIPE, - universal_newlines=True) - out, err = process.communicate() - if process.returncode != 0: - LOG.error(_('Building containers image process failed with %d rc'), - process.returncode) - raise subprocess.CalledProcessError(process.returncode, cmd, err) - - if template_only: - self.logger.info('Running %s', ' '.join(cmd_deps)) - env = os.environ.copy() - process = subprocess.Popen(cmd_deps, env=env, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - universal_newlines=True) - out, err = process.communicate() - if process.returncode != 0: - raise subprocess.CalledProcessError(process.returncode, - cmd_deps, err) - return out diff --git a/tripleo_common/inventories.py b/tripleo_common/inventories.py deleted file mode 100644 index 7d469ed18..000000000 --- a/tripleo_common/inventories.py +++ /dev/null @@ -1,145 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2019 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from collections import OrderedDict -import os -import tempfile -import yaml - - -class TemplateDumper(yaml.SafeDumper): - def represent_ordered_dict(self, data): - return self.represent_dict(data.items()) - - -TemplateDumper.add_representer(OrderedDict, - TemplateDumper.represent_ordered_dict) - - -class TripleoInventories(object): - def __init__(self, stack_to_inv_obj_map): - """ - Input: a mapping of stack->TripleoInventory objects, e.g. - stack_to_inv_obj_map['central'] = TripleoInventory('central') - stack_to_inv_obj_map['edge0'] = TripleoInventory('edge0') - """ - self.stack_to_inv_obj_map = stack_to_inv_obj_map - - def _merge(self, dynamic=True): - """Merge TripleoInventory objects""" - inventory = OrderedDict() - if dynamic: - inventory['_meta'] = {'hostvars': {}} - for stack, inv_obj in self.stack_to_inv_obj_map.items(): - # convert each inventory object into an ordered dict - inv = inv_obj.list(dynamic) - # only want one undercloud, shouldn't matter which - if 'Undercloud' not in inventory.keys(): - inventory['Undercloud'] = inv['Undercloud'] - if dynamic: - inventory['Undercloud']['hosts'] = ['undercloud'] - else: - inventory['Undercloud']['hosts'] = {'undercloud': {}} - # add 'plans' to create a list to append to - inventory['Undercloud']['vars']['plans'] = [] - - # save the plan for this stack in the plans list - plan = inv['Undercloud']['vars'].get('plan') - if plan is not None: - inventory['Undercloud']['vars']['plans'].append(plan) - - for key in inv.keys(): - if key != 'Undercloud': - new_key = stack + '_' + key - - if key not in ('_meta', stack): - # Merge into a top level group - if dynamic: - inventory.setdefault(key, {'children': []}) - inventory[key]['children'].append(new_key) - inventory[key]['children'].sort() - else: - inventory.setdefault(key, {'children': {}}) - inventory[key]['children'][new_key] = {} - if 'children' in inv[key].keys(): - roles = [] - for child in inv[key]['children']: - roles.append(stack + '_' + child) - roles.sort() - if dynamic: - inventory[new_key] = { - 'children': roles - } - else: - inventory[new_key] = { - 'children': {x: {} for x in roles} - } - if 'vars' in inv[key]: - inventory[new_key]['vars'] = inv[key]['vars'] - if key == 'allovercloud': - # useful to have just stack name refer to children - if dynamic: - inventory[stack] = {'children': [new_key]} - else: - inventory[stack] = {'children': {new_key: {}}} - else: - if key != '_meta': - inventory[new_key] = inv[key] - elif dynamic: - inventory['_meta']['hostvars'].update( - inv['_meta'].get('hostvars', {}) - ) - - # 'plan' doesn't make sense when using multiple plans - if len(self.stack_to_inv_obj_map) > 1: - del inventory['Undercloud']['vars']['plan'] - # sort plans list for consistency - inventory['Undercloud']['vars']['plans'].sort() - return inventory - - def list(self, dynamic=True): - return self._merge(dynamic) - - def write_static_inventory(self, inventory_file_path, extra_vars=None): - """Convert OrderedDict inventory to static yaml format in a file.""" - allowed_extensions = ('.yaml', '.yml', '.json') - if not os.path.splitext(inventory_file_path)[1] in allowed_extensions: - raise ValueError("Path %s does not end with one of %s extensions" - % (inventory_file_path, - ",".join(allowed_extensions))) - - inventory = self._merge(dynamic=False) - - if extra_vars: - for var, value in extra_vars.items(): - if var in inventory: - inventory[var]['vars'].update(value) - - # Atomic update as concurrent tripleoclient commands can call this - inventory_file_dir = os.path.dirname(inventory_file_path) - with tempfile.NamedTemporaryFile( - 'w', - dir=inventory_file_dir, - delete=False) as inventory_file: - yaml.dump(inventory, inventory_file, TemplateDumper) - os.rename(inventory_file.name, inventory_file_path) - - def host(self): - # Dynamic inventory scripts must return empty json if they don't - # provide detailed info for hosts: - # http://docs.ansible.com/ansible/developing_inventory.html - return {} diff --git a/tripleo_common/inventory.py b/tripleo_common/inventory.py deleted file mode 100644 index 6902e1621..000000000 --- a/tripleo_common/inventory.py +++ /dev/null @@ -1,846 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2017 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from collections import OrderedDict -import copy -import logging -import os -import sys -import tempfile -import yaml - -from heatclient.exc import HTTPNotFound -import keystoneauth1 -import openstack - -from tripleo_common import exception -from tripleo_common import constants -from tripleo_common import inventories - -HOST_NETWORK = 'ctlplane' -DEFAULT_DOMAIN = 'localdomain.' - -UNDERCLOUD_CONNECTION_SSH = 'ssh' - -UNDERCLOUD_CONNECTION_LOCAL = 'local' - -logging.basicConfig() -LOG = logging.getLogger(__name__) -LOG.setLevel(logging.INFO) - - -class TemplateDumper(yaml.SafeDumper): - def represent_ordered_dict(self, data): - return self.represent_dict(data.items()) - - -TemplateDumper.add_representer(OrderedDict, - TemplateDumper.represent_ordered_dict) - - -class StackOutputs(object): - """Item getter for stack outputs. - - It takes a long time to resolve stack outputs. This class ensures that - we only have to do it once and then reuse the results from that call in - subsequent lookups. It also lazy loads the outputs so we don't spend time - on unnecessary Heat calls. - """ - - def __init__(self, stack): - self.outputs = {} - self.stack = stack - - def _load_outputs(self): - """Load outputs from the stack if necessary - - Retrieves the stack outputs if that has not already happened. If it - has then this is a noop. - """ - if not self.outputs: - self.outputs = {i['output_key']: i['output_value'] - for i in self.stack.outputs - } - - def __getitem__(self, key): - self._load_outputs() - return self.outputs[key] - - def __iter__(self): - self._load_outputs() - return iter(self.outputs.keys()) - - def get(self, key, default=None): - try: - self.__getitem__(key) - except KeyError: - pass - return self.outputs.get(key, default) - - -class NeutronData(object): - """Neutron inventory data. - - A data object with for inventory generation enriched neutron data. - """ - def __init__(self, networks, subnets, ports, host_network=None): - self.networks = networks - self.subnets = subnets - self.ports = ports - self.host_network = host_network or HOST_NETWORK - self.networks_by_id = self._networks_by_id() - self.subnets_by_id = self._subnets_by_id() - self.ports_by_role_and_host = self._ports_by_role_and_host() - - def _tags_to_dict(self, tags): - tag_dict = dict() - for tag in tags: - if not tag.startswith('tripleo_'): - continue - try: - key, value = tag.rsplit('=') - except ValueError: - continue - - # Make booleans type bool - value = True if value in {'True', 'true', True} else value - value = False if value in {'False', 'false', False} else value - - # Convert network index value to integer - if key == 'tripleo_net_idx': - value = int(value) - - tag_dict.update({key: value}) - - return tag_dict - - def _ports_by_role_and_host(self): - mandatory_tags = {'tripleo_role'} - - ports_by_role_and_host = {} - for port in self.ports: - # Ignore ports in networks we ignore, i.e the OVN Bridge MAC net - if port.network_id not in self.networks_by_id: - continue - - tags = self._tags_to_dict(port.tags) - - # In case of missing required tags, raise an error. - # neutron is useless as a inventory source in this case. - if not mandatory_tags.issubset(tags): - raise exception.MissingMandatoryNeutronResourceTag() - - hostname = port.dns_name - # Strip the final fqdn dot of the hostname - # See: https://bugs.launchpad.net/tripleo/+bug/1928869 - if hostname.endswith('.'): - hostname = hostname[:-1] - - network_id = port.network_id - network = self.networks_by_id[network_id] - fixed_ips = port.fixed_ips[0] - subnet_id = fixed_ips.get('subnet_id') - subnet = self.subnets_by_id[subnet_id] - - # "TripleO" cidr is the number of bits in the network mask - cidr = subnet['cidr'].split('/')[1] - dns_domain = network['dns_domain'] - # Strip the final fqdn dot of the dnsname - # See: https://bugs.launchpad.net/tripleo/+bug/1928869 - if dns_domain.endswith('.'): - dns_domain = dns_domain[:-1] - - dns_nameservers = subnet['dns_nameservers'] - mtu = network['mtu'] - net_name = network['name'] - ip_address = fixed_ips.get('ip_address') - gateway_ip = subnet['gateway_ip'] - # Need deepcopy here so that adding default entry does not end - # up in the subnet object and leak to other nodes with a different - # default route network. - host_routes = copy.deepcopy(subnet['host_routes']) - # If this is the default route network, add a default route using - # gateway_ip to the host_routes unless it's already present - if tags.get('tripleo_default_route'): - host_routes.append({'default': True, 'nexthop': gateway_ip}) - - vlan_id = subnet['tags'].get('tripleo_vlan_id', - constants.DEFAULT_VLAN_ID) - role_name = tags['tripleo_role'] - - role = ports_by_role_and_host.setdefault(role_name, {}) - host = role.setdefault(hostname, []) - host.append( - {'name': port.name, - 'hostname': hostname, - 'dns_domain': dns_domain, - 'network_id': network_id, - 'network_name': net_name, - 'fixed_ips': port.fixed_ips, - 'subnet_id': subnet_id, - 'ip_address': ip_address, - 'mtu': mtu, - 'cidr': cidr, - 'gateway_ip': gateway_ip, - 'dns_nameservers': dns_nameservers, - 'host_routes': host_routes, - 'vlan_id': vlan_id, - 'tags': tags} - ) - - return ports_by_role_and_host - - def _networks_by_id(self): - mandatory_tags = {'tripleo_network_name'} - networks_by_id = {} - for net in self.networks: - # Don't include the OVN Bridge MAC address net - if net.name == constants.OVN_MAC_ADDR_NET_NAME: - continue - - tags = self._tags_to_dict(net.tags) - # In case of missing required tags, raise an error. - # neutron is useless as a inventory source in this case. - if (net.name != self.host_network and - not mandatory_tags.issubset(tags)): - raise exception.MissingMandatoryNeutronResourceTag() - - if net.name != self.host_network: - name_upper = tags['tripleo_network_name'] - else: - name_upper = self.host_network - networks_by_id.update( - {net.id: {'name': net.name, - 'name_upper': name_upper, - 'subnet_ids': net.subnet_ids, - 'mtu': net.mtu, - 'dns_domain': net.dns_domain, - 'tags': tags} - } - ) - - return networks_by_id - - def _subnets_by_id(self): - subnets_by_id = {} - for subnet in self.subnets: - subnets_by_id.update( - {subnet.id: {'name': subnet.name, - 'network_id': subnet.network_id, - 'ip_version': subnet.ip_version, - 'gateway_ip': subnet.gateway_ip, - 'cidr': subnet.cidr, - 'host_routes': subnet.host_routes, - 'dns_nameservers': subnet.dns_nameservers, - 'tags': self._tags_to_dict(subnet.tags)} - } - ) - - return subnets_by_id - - -class TripleoInventory(object): - def __init__(self, cloud_name=None, session=None, - hclient=None, plan_name=None, - auth_url=None, project_name=None, cacert=None, - username=None, ansible_ssh_user=None, - host_network=None, ansible_python_interpreter=None, - undercloud_connection=UNDERCLOUD_CONNECTION_LOCAL, - undercloud_key_file=None, serial=1, work_dir=None): - - self.connection = None - if cloud_name: - self.connection = openstack.connect(cloud=cloud_name) - elif session: - self.connection = openstack.connection.Connection(session=session) - - self.hclient = hclient - self.host_network = host_network or HOST_NETWORK - self.ansible_ssh_user = ansible_ssh_user - self.undercloud_key_file = undercloud_key_file - self.plan_name = self.plan_group_name = plan_name - self.ansible_python_interpreter = ansible_python_interpreter - self.hostvars = {} - self.undercloud_connection = undercloud_connection - self.serial = serial - self.work_dir = work_dir - - @staticmethod - def get_roles_by_service(enabled_services): - # Flatten the lists of services for each role into a set - services = set( - [item for role_services in enabled_services.values() - for item in role_services]) - - roles_by_services = {} - for service in services: - roles_by_services[service] = [] - for role, val in enabled_services.items(): - if service in val: - roles_by_services[service].append(role) - roles_by_services[service] = sorted(roles_by_services[service]) - return roles_by_services - - def get_overcloud_environment(self): - try: - environment = self.hclient.stacks.environment(self.plan_name) - return environment - except (HTTPNotFound, - keystoneauth1.exceptions.catalog.EndpointNotFound): - return {} - - UNDERCLOUD_SERVICES = ['tripleo_ironic_conductor'] - - def get_undercloud_service_list(self): - """Return list of undercloud services - currently static - - Replace this when we have a better way - e.g. heat deploys undercloud - """ - return self.UNDERCLOUD_SERVICES - - def _hosts(self, alist, dynamic=True): - """Static yaml inventories reqire a different hosts format?!""" - if not dynamic: - return {x: {} for x in alist} - return alist - - def _get_stack(self): - if self.plan_name is None: - return None - try: - stack = self.hclient.stacks.get(self.plan_name) - except (HTTPNotFound, - keystoneauth1.exceptions.catalog.EndpointNotFound): - stack = None - - return stack - - def _inventory_from_heat_outputs(self, ret, children, dynamic): - if not self.stack: - return - - vip_map = self.stack_outputs.get('VipMap', {}) - role_net_ip_map = self.stack_outputs.get('RoleNetIpMap', {}) - role_node_id_map = self.stack_outputs.get('ServerIdData', {}) - networks = set() - role_net_hostname_map = self.stack_outputs.get( - 'RoleNetHostnameMap', {}) - for role_name, hostnames in role_net_hostname_map.items(): - if not hostnames: - continue - - net_ip_map = role_net_ip_map[role_name] - try: - ips = net_ip_map[self.host_network] - except KeyError: - LOG.warning( - "Network key %s not found, check role data for %s", - self.host_network, - role_name - ) - continue - - if not ips: - raise Exception("No IPs found for %s role on %s network" % - (role_name, self.host_network)) - - net_hostname_map = role_net_hostname_map[role_name] - bootstrap_server_id = role_node_id_map.get('bootstrap_server_id') - node_id_map = role_node_id_map.get('server_ids') - if node_id_map: - srv_id_map = node_id_map.get(role_name) - - role_networks = sorted([str(net) for net in net_ip_map]) - networks.update(role_networks) - - # Undercloud role in the stack should overwrite, not append. - # See bug: https://bugs.launchpad.net/tripleo/+bug/1913551 - if role_name == 'Undercloud': - role = ret[role_name] = {} - else: - role = ret.setdefault(role_name, {}) - - hosts = role.setdefault('hosts', {}) - role_vars = role.setdefault('vars', {}) - - role_vars.setdefault('ansible_ssh_user', self.ansible_ssh_user) - role_vars.setdefault('bootstrap_server_id', bootstrap_server_id) - role_vars.setdefault('tripleo_role_name', role_name) - role_vars.setdefault('tripleo_role_networks', role_networks) - role_vars.setdefault('serial', self.serial) - - if self.ansible_python_interpreter: - role_vars.setdefault('ansible_python_interpreter', - self.ansible_python_interpreter) - - names = hostnames.get(self.host_network) or [] - shortnames = [n.split(".%s." % self.host_network)[0].lower() - for n in names] - - for idx, name in enumerate(shortnames): - host = hosts.setdefault(name, {}) - host.setdefault('ansible_host', ips[idx]) - - if srv_id_map: - host.setdefault('deploy_server_id', srv_id_map[idx]) - - # Add variable for IP on each network - for net in net_ip_map: - host.setdefault('{}_ip'.format(net), net_ip_map[net][idx]) - - # Add variables for hostname on each network - for net in net_hostname_map: - host.setdefault( - '{}_hostname'.format(net), net_hostname_map[net][idx]) - - children.add(role_name) - - self.hostvars.update(hosts) - - if dynamic: - hosts_format = [h for h in hosts.keys()] - hosts_format.sort() - ret[role_name]['hosts'] = hosts_format - - if children: - plan_group = ret.setdefault(self.plan_name, {}) - overcloud_vars = plan_group.setdefault('vars', {}) - - for vip_name, vip in vip_map.items(): - if vip and (vip_name in networks or vip_name == 'redis'): - overcloud_vars.setdefault('{}_vip'.format(vip_name), vip) - - overcloud_vars.setdefault( - 'container_cli', self.get_overcloud_environment().get( - 'parameter_defaults', {}).get('ContainerCli')) - - plan_group.setdefault( - 'children', self._hosts(sorted(children), dynamic) - ) - - ret.setdefault( - 'allovercloud', { - 'children': self._hosts([self.plan_name], dynamic) - } - ) - - # Associate services with roles - roles_by_service = self.get_roles_by_service( - self.stack_outputs.get('EnabledServices', {})) - - # tripleo-groups map to ceph-ansible groups as follows - ceph_group_map = { - 'ceph_mon': 'mons', - 'ceph_osd': 'osds', - 'ceph_mgr': 'mgrs', - 'ceph_rgw': 'rgws', - 'ceph_mds': 'mdss', - 'ceph_nfs': 'nfss', - 'ceph_client': 'clients', - 'ceph_rbdmirror': 'rbdmirrors', - 'ceph_grafana': 'grafana-server' - } - # add a ceph-ansible compatible group to the inventory - # which has the same roles. E.g. if the inventory has - # a group 'ceph_mon' which has childen and vars, then - # the inventory will now also have a group 'mons' with - # the same children and vars. - for service, roles in roles_by_service.copy().items(): - if service in ceph_group_map.keys(): - roles_by_service[ceph_group_map[service]] = roles - - for service, roles in roles_by_service.items(): - service_children = [role for role in roles - if ret.get(role) is not None] - if service_children: - svc_host = ret.setdefault(service.lower(), {}) - svc_host_vars = svc_host.setdefault('vars', {}) - svc_host.setdefault('children', self._hosts(service_children, - dynamic)) - svc_host_vars.setdefault('ansible_ssh_user', - self.ansible_ssh_user) - if self.ansible_python_interpreter: - svc_host_vars.setdefault('ansible_python_interpreter', - self.ansible_python_interpreter) - - excluded_hosts = self.stack_outputs.get('BlacklistedHostnames', {}) - excluded_overcloud = ret.setdefault('excluded_overcloud', {}) - exclude = excluded_overcloud.setdefault('hosts', {}) - for hostname in excluded_hosts: - if hostname: - exclude[hostname] = {} - - def _get_neutron_data(self): - if not self.connection: - LOG.info("Session not set, neutron data will not be used to build " - "the inventory.") - return - - try: - tags_filter = ['tripleo_stack_name={}'.format(self.plan_name)] - ports = list(self.connection.network.ports(tags=tags_filter)) - if not ports: - return None - - # Filter tripleo_service_vip and tripleo_vip_net ports - ports = [p for p in ports - if not any("tripleo_service_vip" in tag for tag in p.tags) - and not any("tripleo_vip_net" in tag for tag in p.tags)] - - # NOTE(hjensas): ctlplane ports created by THT Heat Server - # resources, or nova less without --network-ports/--network-config - # enabled, does not have the 'tripleo_stack_name' tag. We - # shouldn't use neutron as a source if no ctlplane ports are - # tagged with the 'tripleo_stack_name'. - # See bug: https://bugs.launchpad.net/tripleo/+bug/1928926 - found_ctlplane_port = False - ctlplane_net = self.connection.network.find_network( - self.host_network) - if ctlplane_net: - for p in ports: - if p.network_id == ctlplane_net.id: - found_ctlplane_port = True - break - else: - LOG.warning("Host SSH network %s not found in neutron, not " - "using neutron data for inventory", - self.host_network) - if not found_ctlplane_port: - return None - - networks = [self.connection.network.find_network(p.network_id) - for p in ports] - subnets = [] - for net in networks: - subnets.extend(self.connection.network.subnets( - network_id=net.id)) - - data = NeutronData(networks, subnets, ports) - except exception.MissingMandatoryNeutronResourceTag: - # In case of missing required tags, neutron is useless as an - # inventory source, log warning and return None to disable the - # neutron source. - LOG.warning("Neutron resource without mandatory tags present. " - "Disabling use of neutron as a source for inventory " - "generation.") - return None - except (openstack.connection.exceptions.EndpointNotFound, - openstack.exceptions.ResourceNotFound, - keystoneauth1.exceptions.catalog.EndpointNotFound): - LOG.warning("Neutron service not installed. Disabling use of " - "neutron as a source for inventory generation.") - return None - - return data - - def _add_host_from_neutron_data(self, host, ports, role_networks, - role_vars): - for port in ports: - net_name = port['network_name'] - - # Add network name to tripleo_role_networks variable - if net_name not in role_networks: - role_networks.append(net_name) - - # Append to role_vars if not already present - net_config_keys = {'cidr', 'dns_nameservers', 'gateway_ip', - 'host_routes', 'vlan_id'} - for key in net_config_keys: - var = '{}_{}'.format(net_name, key) - if var not in role_vars: - role_vars.setdefault(var, port[key]) - - # Add variable for hostname on network - host.setdefault('{}_hostname'.format(net_name), '.'.join( - [port['hostname'], port['dns_domain']])) - - # Add variable for IP address on networks - host.setdefault('{}_ip'.format(net_name), port['ip_address']) - - if net_name == self.host_network: - # Add variable for ansible_host - host.setdefault('ansible_host', port['ip_address']) - - # Add variable for canonical hostname - dns_domain = port.get('dns_domain') - if dns_domain: - canonical_dns_domain = dns_domain.partition('.')[-1] - else: - canonical_dns_domain = DEFAULT_DOMAIN - host.setdefault('canonical_hostname', '.'.join( - [port['hostname'], canonical_dns_domain])) - - def _inventory_from_neutron_data(self, ret, children, dynamic): - if not self.neutron_data: - return - ports_by_role_and_host = self.neutron_data.ports_by_role_and_host - networks_by_id = self.neutron_data.networks_by_id - - netname_by_idx = { - net['tags'].get('tripleo_net_idx'): - net['tags'].get('tripleo_network_name') - for _, net in networks_by_id.items() - if net['name'] != self.host_network} - networks_all = [netname_by_idx[idx] for idx in sorted(netname_by_idx)] - networks_lower = {net['name_upper']: net['name'] - for _, net in networks_by_id.items()} - networks_upper = {net['name']: net['name_upper'] - for _, net in networks_by_id.items()} - - for role_name, ports_by_host in ports_by_role_and_host.items(): - role = ret.setdefault(role_name, {}) - hosts = role.setdefault('hosts', {}) - role_vars = role.setdefault('vars', {}) - role_vars.setdefault('tripleo_role_name', role_name) - role_vars.setdefault('ansible_ssh_user', self.ansible_ssh_user) - role_vars.setdefault('serial', self.serial) - role_networks = role_vars.setdefault('tripleo_role_networks', []) - for hostname, ports in ports_by_host.items(): - host = hosts.setdefault(hostname, {}) - self._add_host_from_neutron_data(host, ports, role_networks, - role_vars) - - # The nic config templates use ctlplane_subnet_cidr, not - # ctlplane_cidr. Handle the special case. - role_vars.setdefault(self.host_network + '_subnet_cidr', - role_vars[self.host_network + '_cidr']) - role_vars.setdefault('tripleo_role_networks', - sorted(role_networks)) - role_vars.setdefault( - 'role_networks', - [networks_upper[net] for net in role_networks - if net != self.host_network]) - role_vars.setdefault('networks_all', networks_all) - role_vars.setdefault('networks_lower', networks_lower) - - for _, net in networks_by_id.items(): - role_vars.setdefault(net['name'] + '_mtu', net['mtu']) - - children.add(role_name) - self.hostvars.update(hosts) - - if dynamic: - hosts_format = [h for h in hosts.keys()] - hosts_format.sort() - ret[role_name]['hosts'] = hosts_format - - if children: - ret.setdefault( - self.plan_name, { - 'children': self._hosts(sorted(children), dynamic) - } - ) - ret.setdefault( - 'allovercloud', { - 'children': self._hosts([self.plan_name], dynamic) - } - ) - - def _extend_inventory(self, ret, dynamic, data=None): - if not data: - return - - for role_name, role_values in data.items(): - inventory_role = ret.get(role_name) - if not inventory_role: - continue - inventory_hosts = inventory_role.get('hosts', {}) - inventory_vars = inventory_role.get('vars', {}) - - config_file_hosts = role_values.get('hosts', {}) - config_file_vars = role_values.get('vars', {}) - - for k, v in config_file_vars.items(): - inventory_vars.setdefault(k, v) - - for config_file_host, host_values in config_file_hosts.items(): - inventory_host = inventory_hosts.get(config_file_host, {}) - if not inventory_host: - continue - - for k, v in host_values.items(): - inventory_host.setdefault(k, v) - - self.hostvars.update(inventory_hosts) - if dynamic: - hosts_format = [h for h in inventory_hosts.keys()] - hosts_format.sort() - ret[role_name]['hosts'] = hosts_format - - def _get_data_from_config_file(self): - if not self.plan_name: - return - if not self.work_dir: - return - - data_file_path = os.path.join(self.work_dir, - constants.INVENTORY_NETWORK_CONFIG_FILE) - if not os.path.isfile(data_file_path): - return - with open(data_file_path, 'r') as f: - data = yaml.safe_load(f.read()) - - return data - - def _undercloud_inventory(self, ret, dynamic): - undercloud = ret.setdefault('Undercloud', {}) - undercloud.setdefault('hosts', self._hosts(['undercloud'], dynamic)) - _vars = undercloud.setdefault('vars', {}) - _vars.setdefault('ansible_host', 'localhost') - _vars.setdefault('ansible_connection', self.undercloud_connection) - # see https://github.com/ansible/ansible/issues/41808 - _vars.setdefault('ansible_remote_tmp', '/tmp/ansible-${USER}') - # We don't want to let UC fail, especially when it comes to - # external tasks. - # See https://bugs.launchpad.net/tripleo/+bug/1960518 for context - _vars.setdefault('any_error_fatal', True) - _vars.setdefault('max_fail_percentage', 0) - - if self.ansible_python_interpreter: - _vars.setdefault('ansible_python_interpreter', - self.ansible_python_interpreter) - else: - _vars.setdefault('ansible_python_interpreter', sys.executable) - - if self.undercloud_connection == UNDERCLOUD_CONNECTION_SSH: - _vars.setdefault('ansible_ssh_user', self.ansible_ssh_user) - if self.undercloud_key_file: - _vars.setdefault('ansible_ssh_private_key_file', - self.undercloud_key_file) - - _vars.setdefault('undercloud_service_list', - self.get_undercloud_service_list()) - - # Remaining variables need the stack to be resolved ... - if not self.stack: - return - - _vars.setdefault('plan', self.plan_name) - - admin_password = self.get_overcloud_environment().get( - 'parameter_defaults', {}).get('AdminPassword') - if admin_password: - _vars.setdefault('overcloud_admin_password', admin_password) - - keystone_url = self.stack_outputs.get('KeystoneURL') - if keystone_url: - _vars.setdefault('overcloud_keystone_url', keystone_url) - - endpoint_map = self.stack_outputs.get('EndpointMap') - if endpoint_map: - horizon_endpoint = endpoint_map.get('HorizonPublic', {}).get('uri') - if horizon_endpoint: - _vars.setdefault('overcloud_horizon_url', horizon_endpoint) - - def list(self, dynamic=True): - ret = OrderedDict() - if dynamic: - # Prevent Ansible from repeatedly calling us to get empty host - # details - ret.setdefault('_meta', {'hostvars': self.hostvars}) - - children = set() - - self.stack = self._get_stack() - self.stack_outputs = StackOutputs(self.stack) - - self.neutron_data = self._get_neutron_data() - - if self.stack is None and self.neutron_data is None: - LOG.warning("Stack not found: %s. No data found in neither " - "neutron or heat. Only the undercloud will be added " - "to the inventory.", self.plan_name) - - self._undercloud_inventory(ret, dynamic) - self._inventory_from_neutron_data(ret, children, dynamic) - self._inventory_from_heat_outputs(ret, children, dynamic) - self._extend_inventory(ret, dynamic, - data=self._get_data_from_config_file()) - - return ret - - def host(self): - # NOTE(mandre) - # Dynamic inventory scripts must return empty json if they don't - # provide detailed info for hosts: - # http://docs.ansible.com/ansible/developing_inventory.html - return {} - - def write_static_inventory(self, inventory_file_path, extra_vars=None): - """Convert inventory list to static yaml format in a file.""" - allowed_extensions = ('.yaml', '.yml', '.json') - if not os.path.splitext(inventory_file_path)[1] in allowed_extensions: - raise ValueError("Path %s does not end with one of %s extensions" - % (inventory_file_path, - ",".join(allowed_extensions))) - - # For some reason the json/yaml format needed for static and - # dynamic inventories is different for the hosts/children?! - inventory = self.list(dynamic=False) - - if extra_vars: - for var, value in extra_vars.items(): - if var in inventory: - inventory[var]['vars'].update(value) - - # Atomic update as concurrent tripleoclient commands can call this - inventory_file_dir = os.path.dirname(inventory_file_path) - with tempfile.NamedTemporaryFile( - 'w', - dir=inventory_file_dir, - delete=False) as inventory_file: - yaml.dump(inventory, inventory_file, TemplateDumper) - os.rename(inventory_file.name, inventory_file_path) - - -def generate_tripleo_ansible_inventory(heat=None, - auth_url=None, - username=None, - project_name=None, - cacert=None, - plan='overcloud', - work_dir=None, - ansible_python_interpreter=None, - ansible_ssh_user='tripleo-admin', - undercloud_key_file=None, - ssh_network='ctlplane', - session=None, - cloud_name='undercloud'): - if not work_dir: - work_dir = os.path.join(os.path.expanduser('~'), - 'overcloud-deploy-{}'.format(plan)) - if not os.path.isdir(work_dir): - work_dir = tempfile.mkdtemp(prefix='tripleo-ansible') - - inventory_path = os.path.join( - work_dir, 'tripleo-ansible-inventory.yaml') - inv = inventories.TripleoInventories({ - plan: TripleoInventory( - cloud_name=cloud_name, - hclient=heat, - session=session, - ansible_ssh_user=ansible_ssh_user, - undercloud_key_file=undercloud_key_file, - ansible_python_interpreter=ansible_python_interpreter, - plan_name=plan, - host_network=ssh_network, - work_dir=work_dir - ) - }) - - inv.write_static_inventory(inventory_path) - return inventory_path diff --git a/tripleo_common/releasenotes/notes/automatically-retry-failed-deployments-baf0c701e6d1ad4a.yaml b/tripleo_common/releasenotes/notes/automatically-retry-failed-deployments-baf0c701e6d1ad4a.yaml deleted file mode 100644 index f873b5c37..000000000 --- a/tripleo_common/releasenotes/notes/automatically-retry-failed-deployments-baf0c701e6d1ad4a.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -fixes: - - When using config-download, previously failed server deployments will be - automatically retried on subsequent runs with ansible-playbook. Previously, - -e force=true would have to be passed to trigger this behavior. diff --git a/tripleo_common/releasenotes/notes/convert-docker-params-84dfc6083e88bb52.yaml b/tripleo_common/releasenotes/notes/convert-docker-params-84dfc6083e88bb52.yaml deleted file mode 100644 index 5fb3a728e..000000000 --- a/tripleo_common/releasenotes/notes/convert-docker-params-84dfc6083e88bb52.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -features: - - | - Convert Docker* params to Container* parameters transparently in our - templates workflows. This will allow us a means to migrate t-h-t - from Docker* parameters to the new generic Container varients. diff --git a/tripleo_common/templates/deployment.j2 b/tripleo_common/templates/deployment.j2 deleted file mode 100644 index c2bd75075..000000000 --- a/tripleo_common/templates/deployment.j2 +++ /dev/null @@ -1,27 +0,0 @@ -{{ deployment.get('deployment_name') }}: -{% if deployment.get('scalar') %} - config: | -{% else %} - config: -{% endif %} -{{ deployment.get('config') | string | indent(4, true) }} - creation_time: "{{ deployment.get('creation_time') }}" - deployment_name: {{ deployment.get('deployment_name') }} - group: {{ deployment.get('group') }} - id: {{ deployment.get('id') }} - inputs: -{% for input in deployment.get('inputs') %} - - name: {{ input.get('name') }} - description: {{ input.get('description') }} - type: {{ input.get('type') }} - value: |- -{{ input.get('value') | string | indent(8, true) }} -{% endfor %} - name: {{ deployment.get('name') }} - options: {{ deployment.get('options') }} - outputs: -{% for output in deployment.get('outputs') %} - - name: {{ output.get('name') }} - description: {{ output.get('description') }} - type: {{ output.get('type') }} -{% endfor %} diff --git a/tripleo_common/templates/deployments.yaml b/tripleo_common/templates/deployments.yaml deleted file mode 100644 index 90d23dda5..000000000 --- a/tripleo_common/templates/deployments.yaml +++ /dev/null @@ -1,219 +0,0 @@ -- name: Quote the JSON queries to escape invalid characters - set_fact: - item_id: '"{{ item }}".id' - item_group: '"{{ item }}".group' - item_only: '"{{ item }}"' - -- name: Lookup deployment UUID - set_fact: - deployment_uuid: "{{ lookup('file', tripleo_role_name ~ '/' ~ ansible_facts['hostname'] | lower ~ '/' ~ item) | from_yaml | json_query(item_id) }}" - -- name: Lookup deployment group - set_fact: - deployment_group: "{{ lookup('file', tripleo_role_name ~ '/' ~ ansible_facts['hostname'] | lower ~ '/' ~ item) | from_yaml | json_query(item_group) }}" - -- name: Hiera check and diff mode - block: - - - name: Create hiera check-mode directory - become: true - file: - path: /etc/puppet/check-mode/hieradata - state: directory - check_mode: no - - - name: Create deployed check-mode directory - become: true - file: - path: /var/lib/heat-config/check-mode - state: directory - check_mode: no - - - name: Create tripleo-config-download check-mode directory - become: true - file: - path: /var/lib/heat-config/tripleo-config-download/check-mode - state: directory - check_mode: no - - - name: "Template deployment file for {{ item }}" - become: true - template: - src: "{{ tripleo_role_name ~ '/' ~ ansible_facts['hostname'] | lower ~ '/' ~ item }}" - dest: "/var/lib/heat-config/check-mode/tripleo-config-download/{{ item ~ '-' ~ deployment_uuid }}.rendered" - # variable_start/end are overridden since the Heat template must be - # valid yaml, so the jinja expression must be wrapped in quotes in the - # Heat templates. Now, the extra quotes must be removed so that they do - # not end up in the final rendered value. Also, $$ is used as the delimiter - # for the jinja expression since { and } are reserved in yaml. - variable_start_string: "\"$$" - variable_end_string: "$$\"" - - - name: "Slurp remote deployment file for {{ item }}" - slurp: - src: "/var/lib/heat-config/check-mode/tripleo-config-download/{{ item ~ '-' ~ deployment_uuid }}.rendered" - register: deployment_content - - - name: "Remote copy deployment file for {{ item }}" - become: true - copy: - content: "[ {{ deployment_content.content | b64decode | from_yaml | json_query(item_only) }} ]" - dest: "/var/lib/heat-config/check-mode/tripleo-config-download/{{ item ~ '-' ~ deployment_uuid }}" - when: - - deployment_content is defined - - - name: Run hiera deployment for check mode - shell: | - rm -f /var/lib/heat-config/check-mode/{{ deployment_uuid }}.json - /usr/libexec/os-refresh-config/configure.d/55-heat-config - exit $(jq .deploy_status_code /var/lib/heat-config/check-mode/{{ deployment_uuid }}.notify.json) - args: - warn: false - become: true - environment: - HEAT_SHELL_CONFIG: /var/lib/heat-config/tripleo-config-download/check-mode/{{ item ~ '-' ~ deployment_uuid }} - HEAT_PUPPET_HIERA_DATADIR: /etc/puppet/check-mode/hieradata - HEAT_HIERA_CONFIG: /etc/puppet/check-mode/hiera.yaml - HEAT_CONFIG_DEPLOYED: /var/lib/heat-config/check-mode - check_mode: no - ignore_errors: yes - - - name: List hieradata files for check mode - find: - path: /etc/puppet/check-mode/hieradata - register: check_hieradata_files - check_mode: no - - - name: diff hieradata changes for check mode - command: - diff -uN {{ hieradata_item.path | regex_replace('check-mode', '') }} {{ hieradata_item.path }} - with_items: "{{ check_hieradata_files.files }}" - check_mode: no - register: diff_results - changed_when: diff_results.rc == 1 - loop_control: - loop_var: hieradata_item - label: "{{ hieradata_item.path }}" - failed_when: false - when: ansible_diff_mode|bool - - - name: diff hieradata changes for check mode - debug: - var: item.stdout_lines - with_items: "{{ diff_results.results }}" - changed_when: item.rc == 1 - loop_control: - label: "{{ item._ansible_item_label }}" - when: ansible_diff_mode|bool - - - name: hiera.yaml changes for check mode - command: - diff -uN /etc/puppet/hiera.yaml /etc/puppet/check-mode/hiera.yaml - check_mode: no - register: diff_results - changed_when: diff_results.rc == 1 - failed_when: false - - - name: diff hiera.yaml changes for check mode - debug: - var: diff_results.stdout_lines - changed_when: diff_results.rc == 1 - when: ansible_diff_mode|bool - - ignore_errors: yes - when: - - deployment_group == 'hiera' - - ansible_check_mode|bool - -- name: "Template deployment file for {{ item }}" - become: true - template: - src: "{{ tripleo_role_name ~ '/' ~ ansible_facts['hostname'] | lower ~ '/' ~ item }}" - dest: "/var/lib/heat-config/tripleo-config-download/{{ item ~ '-' ~ deployment_uuid }}.rendered" - # variable_start/end are overridden since the Heat template must be - # valid yaml, so the jinja expression must be wrapped in quotes in the - # Heat templates. Now, the extra quotes must be removed so that they do - # not end up in the final rendered value. Also, $$ is used as the delimiter - # for the jinja expression since { and } are reserved in yaml. - variable_start_string: "\"$$" - variable_end_string: "$$\"" - -- name: "Slurp remote deployment file for {{ item }}" - slurp: - src: "/var/lib/heat-config/tripleo-config-download/{{ item ~ '-' ~ deployment_uuid }}.rendered" - register: deployment_content - when: - - not ansible_check_mode|bool - -- name: "Remote copy deployment file for {{ item }}" - become: true - copy: - content: "[ {{ deployment_content.content | b64decode | from_yaml | json_query(item_only) }} ]" - dest: "/var/lib/heat-config/tripleo-config-download/{{ item ~ '-' ~ deployment_uuid }}" - when: - - not ansible_check_mode|bool - -- name: "Check if deployed file exists for {{ item }}" - become: true - stat: - path: /var/lib/heat-config/deployed/{{ deployment_uuid }}.json - register: deployed_file_stat - -- name: "Check previous deployment rc for {{ item }}" - become: true - shell: | - exit $(jq .deploy_status_code /var/lib/heat-config/deployed/{{ deployment_uuid }}.notify.json) - register: previous_deployment_result - ignore_errors: true - failed_when: false - when: - - deployed_file_stat is defined - - deployed_file_stat.stat.exists - -- name: "Remove deployed file for {{ item }} when previous deployment failed" - file: - path: /var/lib/heat-config/deployed/{{ deployment_uuid }}.json - state: absent - become: true - when: - - not ansible_check_mode|bool - - deployed_file_stat.stat.exists - - previous_deployment_result.rc != 0 - -- name: "Force remove deployed file for {{ item }}" - file: - path: /var/lib/heat-config/deployed/{{ deployment_uuid }}.json - state: absent - become: true - when: (force | bool) - -- name: "Run async deployment {{ item }}" - shell: | - /usr/libexec/os-refresh-config/configure.d/55-heat-config - exit $(jq .deploy_status_code /var/lib/heat-config/deployed/{{ deployment_uuid }}.notify.json) - become: true - environment: - HEAT_SHELL_CONFIG: /var/lib/heat-config/tripleo-config-download/{{ item ~ '-' ~ deployment_uuid }} - register: deployment_async_result - ignore_errors: yes - async: "{{ async_timeout | default(1800) }}" - poll: "{{ async_poll | default(3) }}" - -- name: "Output for async deployment {{ item }}" - debug: - msg: - - stderr: "{{ deployment_async_result.stderr_lines | default(['Timed out']) }}" - - status_code: "{{ deployment_async_result.rc | default(-1) }}" - tags: - - output - failed_when: deployment_async_result.rc | default(-1) != 0 - when: not ansible_check_mode|bool - -- name: "Check-mode for Run deployment {{ item }} (changed status indicates deployment would run)" - stat: - path: /var/lib/heat-config/deployed/{{ deployment_uuid }}.json - become: true - register: deploy_exists - changed_when: not deploy_exists.stat.exists - when: ansible_check_mode|bool - ignore_errors: yes diff --git a/tripleo_common/templates/heat-config.j2 b/tripleo_common/templates/heat-config.j2 deleted file mode 100644 index d48da4187..000000000 --- a/tripleo_common/templates/heat-config.j2 +++ /dev/null @@ -1 +0,0 @@ -[{{ deployment }}] diff --git a/tripleo_common/templates/host_var_server.j2 b/tripleo_common/templates/host_var_server.j2 deleted file mode 100644 index a85b96d0c..000000000 --- a/tripleo_common/templates/host_var_server.j2 +++ /dev/null @@ -1,22 +0,0 @@ -pre_network_{{ role }}: {% if not pre_network %} [] {% endif %} - -{% for deployment in pre_network %} - - {{ deployment }} -{% endfor %} - -pre_deployments_{{ role }}: {% if not pre_deployments %} [] {% endif %} - -{% for deployment in pre_deployments %} - - {{ deployment }} -{% endfor %} - -post_deployments_{{ role }}: {% if not post_deployments %} [] {% endif %} - -{% for deployment in post_deployments %} - - {{ deployment }} -{% endfor %} - -{% if ansible_host_vars %} -# ansible_host_vars managed by TripleO Heat Templates in puppet/role.role.j2.yaml: -{{ ansible_host_vars }} -{% endif %} diff --git a/tripleo_common/tests/__init__.py b/tripleo_common/tests/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/tripleo_common/tests/base.py b/tripleo_common/tests/base.py deleted file mode 100644 index 1c30cdb56..000000000 --- a/tripleo_common/tests/base.py +++ /dev/null @@ -1,23 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright 2010-2011 OpenStack Foundation -# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslotest import base - - -class TestCase(base.BaseTestCase): - - """Test case base class for all unit tests.""" diff --git a/tripleo_common/tests/fake_config/__init__.py b/tripleo_common/tests/fake_config/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/tripleo_common/tests/fake_config/fakes.py b/tripleo_common/tests/fake_config/fakes.py deleted file mode 100644 index f610d2352..000000000 --- a/tripleo_common/tests/fake_config/fakes.py +++ /dev/null @@ -1,97 +0,0 @@ -# Copyright 2015 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from unittest import mock - - -FAKE_STACK = { - 'parameters': { - 'ControllerCount': 1, - 'ComputeCount': 1, - 'ObjectStorageCount': 0, - 'BlockStorageCount': 0, - 'CephStorageCount': 0, - }, - 'stack_name': 'overcloud', - 'stack_status': "CREATE_COMPLETE", - 'outputs': [ - {'output_key': 'RoleConfig', - 'output_value': { - 'foo_config': 'foo', - 'external_deploy_steps_tasks': [{'name': 'Fake external task', - 'debug': 'name=hello', - 'when': 'step|int == 1'}]}}, - {'output_key': 'RoleData', - 'output_value': { - 'FakeCompute': { - 'config_settings': {'nova::compute::libvirt::services::' - 'libvirt_virt_type': 'qemu'}, - 'global_config_settings': {}, - 'logging_groups': ['root', 'neutron', 'nova'], - 'logging_sources': [{'path': '/var/log/nova/nova-compute.log', - 'type': 'tail'}], - 'monitoring_subscriptions': ['overcloud-nova-compute'], - 'service_config_settings': {'horizon': {'neutron::' - 'plugins': ['ovs']} - }, - 'service_metadata_settings': None, - 'service_names': ['nova_compute', 'fake_service'], - 'step_config': ['include ::tripleo::profile::base::sshd', - 'include ::timezone'], - 'upgrade_batch_tasks': [], - 'upgrade_tasks': [{'name': 'Stop fake service', - 'service': 'name=fake state=stopped', - 'when': ['nova_api_enabled.rc == 0', - False, - 'httpd_enabled.rc != 0', - 'step|int == 1']}, - {'name': 'Stop nova-compute service', - 'service': 'name=openstack-nova-compute ' - 'state=stopped', - 'when': ['nova_compute_enabled.rc == 0', - 'step|int == 2', 'existing', - 'list']}] - }, - 'FakeController': { - 'config_settings': {'tripleo::haproxy::user': 'admin'}, - 'global_config_settings': {}, - 'logging_groups': ['root', 'keystone', 'neutron'], - 'logging_sources': [{'path': '/var/log/keystone/keystone.log', - 'type': 'tail'}], - 'monitoring_subscriptions': ['overcloud-keystone'], - 'service_config_settings': {'horizon': {'neutron::' - 'plugins': ['ovs']} - }, - 'service_metadata_settings': None, - 'service_names': ['pacemaker', 'fake_service'], - 'step_config': ['include ::tripleo::profile::base::sshd', - 'include ::timezone'], - 'upgrade_batch_tasks': [], - 'upgrade_tasks': [{'name': 'Stop fake service', - 'service': 'name=fake state=stopped', - 'when': 'step|int == 1'}]}}}]} - - -def create_to_dict_mock(**kwargs): - mock_with_to_dict = mock.Mock() - mock_with_to_dict.configure_mock(**kwargs) - mock_with_to_dict.to_dict.return_value = kwargs - return mock_with_to_dict - - -def create_tht_stack(**kwargs): - stack = FAKE_STACK.copy() - stack.update(kwargs) - return create_to_dict_mock(**stack) diff --git a/tripleo_common/tests/fake_neutron/__init__.py b/tripleo_common/tests/fake_neutron/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/tripleo_common/tests/fake_neutron/fakes.py b/tripleo_common/tests/fake_neutron/fakes.py deleted file mode 100644 index 8c6ca5548..000000000 --- a/tripleo_common/tests/fake_neutron/fakes.py +++ /dev/null @@ -1,178 +0,0 @@ -# Copyright 2020 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from tripleo_common.tests.fake_neutron import stubs - - -ctlplane_network = stubs.FakeNeutronNetwork( - name='ctlplane', - id='ctlplane_network_id', - mtu=1500, - dns_domain='ctlplane.example.com.', - subnet_ids=['ctlplane_subnet_id'], - tags=[], -) -internal_api_network = stubs.FakeNeutronNetwork( - name='internal_api', - id='internal_api_network_id', - mtu=1500, - dns_domain='internalapi.example.com.', - subnet_ids=['internal_api_subnet_id'], - tags=['tripleo_net_idx=0', - 'tripleo_vip=true', - 'tripleo_network_name=InternalApi'], -) - -ctlplane_subnet = stubs.FakeNeutronSubnet( - name='ctlplane-subnet', - id='ctlplane_subnet_id', - network_id='ctlplane_network_id', - cidr='192.0.2.0/24', - gateway_ip='192.0.2.1', - dns_nameservers=['192.0.2.253', '192.0.2.254'], - host_routes=[], - ip_version=4, - tags=[], -) -internal_api_subnet = stubs.FakeNeutronSubnet( - name='internal_api_subnet', - id='internal_api_subnet_id', - network_id='internal_api_network_id', - cidr='198.51.100.128/25', - gateway_ip='198.51.100.129', - dns_nameservers=[], - host_routes=[], - ip_version=4, - tags=['tripleo_vlan_id=20'], -) - - -fake_networks = [ctlplane_network, internal_api_network] -fake_subnets = [ctlplane_subnet, internal_api_subnet] - -controller0_ports = [ - stubs.FakeNeutronPort(name='c-0-ctlplane', - id='controller_0_ctlplane_id', - network_id=ctlplane_network.id, - fixed_ips=[dict(ip_address='192.0.2.10', - subnet_id=ctlplane_subnet.id)], - dns_name='c-0', - tags=['tripleo_network_name=ctlplane', - 'tripleo_role=Controller', - 'tripleo_stack=overcloud', - 'tripleo_default_route=True'], - ), - stubs.FakeNeutronPort(name='c-0-internal_api', - id='controller_0_internal_api_id', - network_id=internal_api_network.id, - fixed_ips=[dict(ip_address='198.51.100.140', - subnet_id=internal_api_subnet.id)], - dns_name='c-0', - tags=['tripleo_network_name=InternalApi', - 'tripleo_role=Controller', - 'tripleo_stack=overcloud', - 'tripleo_default_route=False'], - ), -] - -controller1_ports = [ - stubs.FakeNeutronPort(name='c-1-ctlplane', - id='controller_1_ctlplane_id', - network_id=ctlplane_network.id, - fixed_ips=[dict(ip_address='192.0.2.11', - subnet_id=ctlplane_subnet.id)], - dns_name='c-1', - tags=['tripleo_network_name=ctlplane', - 'tripleo_role=Controller', - 'tripleo_stack=overcloud', - 'tripleo_default_route=True'], - ), - stubs.FakeNeutronPort(name='c-1-internal_api', - id='controller_1_internal_api_id', - network_id=internal_api_network.id, - fixed_ips=[dict(ip_address='198.51.100.141', - subnet_id=internal_api_subnet.id)], - dns_name='c-1', - tags=['tripleo_network_name=InternalApi', - 'tripleo_role=Controller', - 'tripleo_stack=overcloud', - 'tripleo_default_route=False'], - ), -] - -controller2_ports = [ - stubs.FakeNeutronPort(name='c-2-ctlplane', - id='controller_2_ctlplane_id', - network_id=ctlplane_network.id, - fixed_ips=[dict(ip_address='192.0.2.12', - subnet_id=ctlplane_subnet.id)], - dns_name='c-2', - tags=['tripleo_network_name=ctlplane', - 'tripleo_role=Controller', - 'tripleo_stack=overcloud', - 'tripleo_default_route=True'], - ), - stubs.FakeNeutronPort(name='c-2-internal_api', - id='controller_2_internal_api_id', - network_id=internal_api_network.id, - fixed_ips=[dict(ip_address='198.51.100.142', - subnet_id=internal_api_subnet.id)], - dns_name='c-2', - tags=['tripleo_network_name=InternalApi', - 'tripleo_role=Controller', - 'tripleo_stack=overcloud', - 'tripleo_default_route=False'], - ), -] - -compute_0_ports = [ - stubs.FakeNeutronPort(name='cp-0-ctlplane', - id='compute_0_ctlplane_id', - network_id=ctlplane_network.id, - fixed_ips=[dict(ip_address='192.0.2.20', - subnet_id=ctlplane_subnet.id)], - dns_name='cp-0', - tags=['tripleo_network_name=ctlplane', - 'tripleo_role=Compute', - 'tripleo_stack=overcloud', - 'tripleo_default_route=True'], - ), - stubs.FakeNeutronPort(name='cp-0-internal_api', - id='compute_0_internal_api_id', - network_id=internal_api_network.id, - fixed_ips=[dict(ip_address='198.51.100.150', - subnet_id=internal_api_subnet.id)], - dns_name='cp-0', - tags=['tripleo_network_name=InternalApi', - 'tripleo_role=Compute', - 'tripleo_stack=overcloud', - 'tripleo_default_route=False'], - ), - -] - -custom_0_ports = [ - stubs.FakeNeutronPort(name='cs-0-ctlplane', - id='custom_0_ctlplane_id', - network_id=ctlplane_network.id, - fixed_ips=[dict(ip_address='192.0.2.200', - subnet_id=ctlplane_subnet.id)], - dns_name='cs-0', - tags=['tripleo_network_name=ctlplane', - 'tripleo_role=CustomRole', - 'tripleo_stack=overcloud', - 'tripleo_default_route=True'], - ), -] diff --git a/tripleo_common/tests/fake_neutron/stubs.py b/tripleo_common/tests/fake_neutron/stubs.py deleted file mode 100644 index 562c02a26..000000000 --- a/tripleo_common/tests/fake_neutron/stubs.py +++ /dev/null @@ -1,141 +0,0 @@ -# Copyright 2020 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -class FakeNeutronNetwork(dict): - def __init__(self, **attrs): - NETWORK_ATTRS = ['id', - 'name', - 'status', - 'tenant_id', - 'is_admin_state_up', - 'mtu', - 'segments', - 'is_shared', - 'subnets', - 'provider:network_type', - 'provider:physical_network', - 'provider:segmentation_id', - 'router:external', - 'availability_zones', - 'availability_zone_hints', - 'is_default', - 'tags'] - - raw = dict.fromkeys(NETWORK_ATTRS) - raw.update(attrs) - raw.update({ - 'provider_physical_network': attrs.get( - 'provider:physical_network', None), - 'provider_network_type': attrs.get( - 'provider:network_type', None), - 'provider_segmentation_id': attrs.get( - 'provider:segmentation_id', None) - }) - super(FakeNeutronNetwork, self).__init__(raw) - - def __getattr__(self, key): - try: - return self[key] - except KeyError: - raise AttributeError(key) - - def __setattr__(self, key, value): - if key in self: - self[key] = value - else: - raise AttributeError(key) - - -class FakeNeutronPort(dict): - def __init__(self, **attrs): - PORT_ATTRS = ['admin_state_up', - 'allowed_address_pairs', - 'binding:host_id', - 'binding:profile', - 'binding:vif_details', - 'binding:vif_type', - 'binding:vnic_type', - 'data_plane_status', - 'description', - 'device_id', - 'device_owner', - 'dns_assignment', - 'dns_domain', - 'dns_name', - 'extra_dhcp_opts', - 'fixed_ips', - 'id', - 'mac_address', - 'name', 'network_id', - 'port_security_enabled', - 'security_group_ids', - 'status', - 'tenant_id', - 'qos_network_policy_id', - 'qos_policy_id', - 'tags', - 'uplink_status_propagation'] - - raw = dict.fromkeys(PORT_ATTRS) - raw.update(attrs) - super(FakeNeutronPort, self).__init__(raw) - - def __getattr__(self, key): - try: - return self[key] - except KeyError: - raise AttributeError(key) - - def __setattr__(self, key, value): - if key in self: - self[key] = value - else: - raise AttributeError(key) - - -class FakeNeutronSubnet(dict): - def __init__(self, **attrs): - SUBNET_ATTRS = ['id', - 'name', - 'network_id', - 'cidr', - 'tenant_id', - 'is_dhcp_enabled', - 'dns_nameservers', - 'allocation_pools', - 'host_routes', - 'ip_version', - 'gateway_ip', - 'ipv6_address_mode', - 'ipv6_ra_mode', - 'subnetpool_id', - 'segment_id', - 'tags'] - - raw = dict.fromkeys(SUBNET_ATTRS) - raw.update(attrs) - super(FakeNeutronSubnet, self).__init__(raw) - - def __getattr__(self, key): - try: - return self[key] - except KeyError: - raise AttributeError(key) - - def __setattr__(self, key, value): - if key in self: - self[key] = value - else: - raise AttributeError(key) diff --git a/tripleo_common/tests/fake_nova/README b/tripleo_common/tests/fake_nova/README deleted file mode 100644 index 894926aa6..000000000 --- a/tripleo_common/tests/fake_nova/README +++ /dev/null @@ -1,4 +0,0 @@ -We don't want to pull in all of Nova and, more importantly, all of its -numerous dependencies just for the sake of having one class to inherit -from in our custom filter. Instead, this module will be injected into -sys.modules as 'nova' when we run unit tests that rely on it. diff --git a/tripleo_common/tests/fake_nova/__init__.py b/tripleo_common/tests/fake_nova/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/tripleo_common/tests/fake_nova/scheduler/__init__.py b/tripleo_common/tests/fake_nova/scheduler/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/tripleo_common/tests/fake_nova/scheduler/filters.py b/tripleo_common/tests/fake_nova/scheduler/filters.py deleted file mode 100644 index c80d4b846..000000000 --- a/tripleo_common/tests/fake_nova/scheduler/filters.py +++ /dev/null @@ -1,18 +0,0 @@ -# Copyright 2016 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -class BaseHostFilter(object): - pass diff --git a/tripleo_common/tests/image/__init__.py b/tripleo_common/tests/image/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/tripleo_common/tests/image/builder/__init__.py b/tripleo_common/tests/image/builder/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/tripleo_common/tests/image/builder/test_buildah.py b/tripleo_common/tests/image/builder/test_buildah.py deleted file mode 100644 index 9467e1123..000000000 --- a/tripleo_common/tests/image/builder/test_buildah.py +++ /dev/null @@ -1,306 +0,0 @@ -# Copyright 2019 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -"""Unit tests for image.builder.buildah""" - -import copy -from concurrent import futures -from concurrent.futures import ThreadPoolExecutor as tpe -import pathlib -from unittest import mock - -from tripleo_common.image.builder.buildah import BuildahBuilder as bb -from tripleo_common.tests import base -from tripleo_common.utils import process - - -BUILDAH_CMD_BASE = ['sudo', 'buildah'] -DEPS = {"base"} -WORK_DIR = '/tmp/kolla' -VOLS = ['/etc/pki:/etc/pki', '/etc/dir2:/dir2'] -BUILD_ALL_LIST_CONTAINERS = ['container1', 'container2', 'container3'] -BUILD_ALL_DICT_CONTAINERS = { - 'container1': {}, - 'container2': {}, - 'container3': {} -} -BUILD_ALL_STR_CONTAINER = 'container1' - -PREPROCESSED_CONTAINER_DEPS = [ - { - "image0": [ - "image1", - { - "image2": [ - { - "image3": [ - "image4", - "image5" - ] - }, - "image8", - { - "image6": [ - "image7" - ] - }, - "image9" - ] - }, - { - "image10": [ - "image11", - "image12" - ] - }, - "image13", - "image14" - ] - } -] - - -class ThreadPoolExecutorReturn(object): - _exception = None - - -class ThreadPoolExecutorReturnFailed(object): - _exception = True - exception_info = "This is a test failure" - - -class ThreadPoolExecutorReturnSuccess(object): - _exception = False - - -# Iterable version of the return values for predictable submit() returns -R_FAILED_LIST = [ThreadPoolExecutorReturnSuccess(), - ThreadPoolExecutorReturnSuccess(), - ThreadPoolExecutorReturnFailed()] -R_OK_LIST = [ThreadPoolExecutorReturnSuccess(), - ThreadPoolExecutorReturnSuccess(), - ThreadPoolExecutorReturnSuccess()] -R_BROKEN_LISTS = [[ThreadPoolExecutorReturnSuccess()], - [ThreadPoolExecutorReturn(), - ThreadPoolExecutorReturn()]] - -# Return values as done and not_done sets for the ThreadPoolExecutor -R_FAILED = (set(R_FAILED_LIST), set()) -R_OK = (set(R_OK_LIST), set()) -R_BROKEN = (set(R_BROKEN_LISTS[0]), set(R_BROKEN_LISTS[1])) - - -class TestBuildahBuilder(base.TestCase): - - @mock.patch.object(process, 'execute', autospec=True) - @mock.patch.object(pathlib.Path, 'touch', autospec=True) - def test_build(self, mock_touch, mock_process): - args = copy.copy(BUILDAH_CMD_BASE) - dest = '127.0.0.1:8787/master/fedora-binary-fedora-base:latest' - container_build_path = WORK_DIR + '/' + 'fedora-base' - logfile = '/tmp/kolla/fedora-base/fedora-base-build.log' - buildah_cmd_build = ['--log-level=debug', 'bud', '--net=host', - '--loglevel=3', '--format', 'docker', - '--tls-verify=False', '--logfile', - logfile, '-t', dest, container_build_path] - args.extend(buildah_cmd_build) - bb(WORK_DIR, DEPS, debug=True).build('fedora-base', - container_build_path) - mock_process.assert_called_once_with( - *args, - check_exit_code=True, - run_as_root=False, - use_standard_locale=True - ) - - @mock.patch.object(process, 'execute', autospec=True) - @mock.patch.object(pathlib.Path, 'touch', autospec=True) - def test_build_without_img_type(self, mock_touch, mock_process): - args = copy.copy(BUILDAH_CMD_BASE) - dest = '127.0.0.1:8787/master/fedora-fedora-base:latest' - container_build_path = WORK_DIR + '/' + 'fedora-base' - logfile = '/tmp/kolla/fedora-base/fedora-base-build.log' - buildah_cmd_build = ['bud', '--net=host', '--format', - 'docker', '--tls-verify=False', - '--logfile', logfile, '-t', dest, - container_build_path] - args.extend(buildah_cmd_build) - bb(WORK_DIR, DEPS, img_type=False).build('fedora-base', - container_build_path) - mock_process.assert_called_once_with( - *args, - check_exit_code=True, - run_as_root=False, - use_standard_locale=True - ) - - @mock.patch.object(process, 'execute', autospec=True) - @mock.patch.object(pathlib.Path, 'touch', autospec=True) - def test_build_with_volumes(self, mock_touch, mock_process): - args = copy.copy(BUILDAH_CMD_BASE) - dest = '127.0.0.1:8787/master/fedora-binary-fedora-base:latest' - container_build_path = WORK_DIR + '/' + 'fedora-base' - logfile = '/tmp/kolla/fedora-base/fedora-base-build.log' - buildah_cmd_build = ['bud', '--net=host', - '--volume', '/etc/pki:/etc/pki', - '--volume', '/etc/dir2:/dir2', - '--format', 'docker', - '--tls-verify=False', - '--logfile', logfile, '-t', dest, - container_build_path] - args.extend(buildah_cmd_build) - bb(WORK_DIR, DEPS, volumes=VOLS).build('fedora-base', - container_build_path) - mock_process.assert_called_once_with( - *args, - check_exit_code=True, - run_as_root=False, - use_standard_locale=True - ) - - @mock.patch.object(process, 'execute', autospec=True) - def test_build_with_excludes(self, mock_process): - bb(WORK_DIR, DEPS, excludes=['fedora-base'])._generate_container( - 'fedora-base') - assert not mock_process.called - - @mock.patch.object(process, 'execute', autospec=True) - def test_push(self, mock_process): - args = copy.copy(BUILDAH_CMD_BASE) - dest = '127.0.0.1:8787/master/fedora-binary-fedora-base:latest' - buildah_cmd_push = ['push', '--tls-verify=False', dest, - 'docker://' + dest] - args.extend(buildah_cmd_push) - bb(WORK_DIR, DEPS).push(dest) - mock_process.assert_called_once_with( - *args, run_as_root=False, use_standard_locale=True - ) - - @mock.patch.object(bb, 'build', autospec=True) - @mock.patch.object(bb, 'push', autospec=True) - def test_generate_container_with_push(self, mock_push, mock_build): - container_name = "fedora-base" - destination = "127.0.0.1:8787/master/fedora-binary-{}:latest" - builder = bb(WORK_DIR, DEPS, push_containers=True) - builder._generate_container(container_name) - mock_build.assert_called_once_with(builder, container_name, "") - mock_push.assert_called_once_with(builder, - destination.format(container_name)) - - @mock.patch.object(bb, 'build', autospec=True) - @mock.patch.object(bb, 'push', autospec=True) - def test_generate_container_without_push(self, mock_push, mock_build): - container_name = "fedora-base" - builder = bb(WORK_DIR, DEPS, push_containers=False) - builder._generate_container(container_name) - mock_build.assert_called_once_with(builder, container_name, "") - assert not mock_push.called - - @mock.patch.object(tpe, 'submit', autospec=True) - @mock.patch.object(futures, 'wait', autospec=True, return_value=R_BROKEN) - @mock.patch.object(process, 'execute', autospec=True) - def test_build_all_list_broken(self, mock_build, mock_wait, mock_submit): - mock_submit.side_effect = R_BROKEN_LISTS[0] + R_BROKEN_LISTS[1] - _b = bb(WORK_DIR, DEPS) - self.assertRaises( - SystemError, - _b.build_all, - deps=BUILD_ALL_LIST_CONTAINERS - ) - - @mock.patch.object(tpe, 'submit', autospec=True) - @mock.patch.object(futures, 'wait', autospec=True, return_value=R_FAILED) - @mock.patch.object(process, 'execute', autospec=True) - def test_build_all_list_failed(self, mock_build, mock_wait, mock_submit): - mock_submit.side_effect = R_FAILED_LIST - _b = bb(WORK_DIR, DEPS) - self.assertRaises( - RuntimeError, - _b.build_all, - deps=BUILD_ALL_LIST_CONTAINERS - ) - - @mock.patch.object(tpe, 'submit', autospec=True) - @mock.patch.object(futures, 'wait', autospec=True, return_value=R_OK) - @mock.patch.object(process, 'execute', autospec=True) - def test_build_all_list_ok(self, mock_build, mock_wait, mock_submit): - bb(WORK_DIR, DEPS).build_all(deps=BUILD_ALL_LIST_CONTAINERS) - - @mock.patch.object(tpe, 'submit', autospec=True) - @mock.patch.object(futures, 'wait', autospec=True, return_value=R_OK) - @mock.patch.object(process, 'execute', autospec=True) - def test_build_all_ok_no_deps(self, mock_build, mock_wait, mock_submit): - bb(WORK_DIR, DEPS).build_all() - - @mock.patch.object(tpe, 'submit', autospec=True) - @mock.patch.object(futures, 'wait', autospec=True, return_value=R_OK) - @mock.patch.object(process, 'execute', autospec=True) - @mock.patch.object(pathlib.Path, 'touch', autospec=True) - def test_build_all_dict_ok(self, mock_touch, - mock_build, mock_wait, mock_submit): - bb(WORK_DIR, DEPS).build_all(deps=BUILD_ALL_DICT_CONTAINERS) - - @mock.patch.object(tpe, 'submit', autospec=True) - @mock.patch.object(futures, 'wait', autospec=True, return_value=R_OK) - @mock.patch.object(process, 'execute', autospec=True) - @mock.patch.object(pathlib.Path, 'touch', autospec=True) - def test_build_all_str_ok(self, mock_touch, - mock_build, mock_wait, mock_submit): - bb(WORK_DIR, DEPS).build_all(deps=BUILD_ALL_STR_CONTAINER) - - def test_dep_processing(self): - containers = list() - self.assertEqual( - bb(WORK_DIR, DEPS)._generate_deps( - deps=PREPROCESSED_CONTAINER_DEPS, - containers=containers - ), - [ - [ - 'image0' - ], - [ - 'image1', - 'image13', - 'image14', - 'image2', - 'image10' - ], - [ - 'image8', - 'image9', - 'image3', - 'image6' - ], - [ - 'image4', - 'image5' - ], - [ - 'image7' - ], - [ - 'image11', - 'image12' - ] - ] - ) - - @mock.patch( - 'tripleo_common.image.builder.buildah.BuildahBuilder._multi_build', - autospec=True - ) - def test_build_all_multi_build(self, mock_multi_build): - bb(WORK_DIR, DEPS).build_all(deps=BUILD_ALL_LIST_CONTAINERS) - self.assertTrue(mock_multi_build.called) diff --git a/tripleo_common/tests/image/fakes.py b/tripleo_common/tests/image/fakes.py deleted file mode 100644 index 4403ba4ed..000000000 --- a/tripleo_common/tests/image/fakes.py +++ /dev/null @@ -1,45 +0,0 @@ -# Copyright 2015 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -def create_disk_images(): - disk_images = { - 'disk_images': [{ - 'arch': 'amd64', - 'distro': 'some_awesome_os', - 'imagename': 'overcloud', - 'type': 'qcow2', - 'elements': ['image_element'] - }] - } - - return disk_images - - -def create_parsed_upload_images(): - uploads = [ - {'imagename': 'docker.io/tripleomastercentos9/' - 'heat-docker-agents-centos:latest', - 'push_destination': 'localhost:8787'}, - {'imagename': 'docker.io/tripleomastercentos9/' - 'centos-binary-nova-compute:liberty', - 'push_destination': 'localhost:8787'}, - {'imagename': 'docker.io/tripleomastercentos9/' - 'centos-binary-nova-libvirt:liberty', - 'push_destination': '192.0.2.0:8787'}, - {'imagename': 'docker.io/tripleomastercentos9/' - 'image-with-missing-tag', - 'push_destination': 'localhost:8787'}, - ] - return uploads diff --git a/tripleo_common/tests/image/test_base.py b/tripleo_common/tests/image/test_base.py deleted file mode 100644 index 2bfcc435d..000000000 --- a/tripleo_common/tests/image/test_base.py +++ /dev/null @@ -1,166 +0,0 @@ -# Copyright 2015 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from unittest import mock - -from tripleo_common.image.base import BaseImageManager -from tripleo_common.image.exception import ImageSpecificationException -from tripleo_common.tests import base as testbase -from tripleo_common.tests.image import fakes - - -class TestBaseImageManager(testbase.TestCase): - def setUp(self): - super(TestBaseImageManager, self).setUp() - - @mock.patch('yaml.safe_load', autospec=True) - @mock.patch('os.path.isfile', autospec=True) - def test_load_config_files(self, mock_os_path_isfile, mock_yaml_load): - mock_yaml_load.return_value = fakes.create_disk_images() - - mock_os_path_isfile.return_value = True - - mock_open_context = mock.mock_open() - mock_open_context().read.return_value = "YAML" - - with mock.patch('builtins.open', mock_open_context): - base_manager = BaseImageManager(['yamlfile']) - disk_images = base_manager.load_config_files('disk_images') - - mock_yaml_load.assert_called_once_with("YAML") - self.assertEqual([{ - 'arch': 'amd64', - 'distro': 'some_awesome_os', - 'imagename': 'overcloud', - 'type': 'qcow2', - 'elements': ['image_element'] - }], disk_images) - - def test_load_config_files_not_found(self): - base_manager = BaseImageManager(['file/does/not/exist']) - self.assertRaises(IOError, base_manager.load_config_files, - 'disk_images') - - @mock.patch('tripleo_common.image.base.BaseImageManager.APPEND_ATTRIBUTES', - ['elements', 'options', 'packages', 'environment']) - @mock.patch('yaml.safe_load', autospec=True) - @mock.patch('os.path.isfile', autospec=True) - def test_load_config_files_multiple_files(self, mock_os_path_isfile, - mock_yaml_load): - mock_yaml_load.side_effect = [{ - 'disk_images': [{ - 'arch': 'amd64', - 'imagename': 'overcloud', - 'distro': 'some_awesome_distro', - 'type': 'qcow2', - 'elements': ['image_element'], - 'environment': {'test_env': '1'}, - }]}, - { - 'disk_images': [{ - 'imagename': 'overcloud', - 'elements': ['another_image_element'], - 'packages': ['a_package'], - 'otherkey': 'some_other_key', - 'environment': {'test_env2': '0'}, - }]}] - - mock_os_path_isfile.return_value = True - - mock_open_context = mock.mock_open() - mock_open_context().read.return_value = "YAML" - - with mock.patch('builtins.open', mock_open_context): - base_manager = BaseImageManager(['yamlfile1', 'yamlfile2']) - disk_images = base_manager.load_config_files('disk_images') - - self.assertEqual(2, mock_yaml_load.call_count) - self.assertEqual([{ - 'arch': 'amd64', - 'distro': 'some_awesome_distro', - 'imagename': 'overcloud', - 'type': 'qcow2', - 'elements': ['image_element', 'another_image_element'], - 'packages': ['a_package'], - 'otherkey': 'some_other_key', - 'environment': {'test_env': '1', 'test_env2': '0'}, - }], disk_images) - - @mock.patch('yaml.safe_load', autospec=True) - @mock.patch('os.path.isfile', autospec=True) - def test_load_config_files_missing_image_name(self, mock_os_path_isfile, - mock_yaml_load): - mock_yaml_load.return_value = { - 'disk_images': [{ - 'arch': 'amd64', - 'imagename': 'overcloud', - 'type': 'qcow2', - 'elements': ['image_element'] - }, { - 'arch': 'amd64', - 'type': 'qcow2', - }] - } - - mock_os_path_isfile.return_value = True - - mock_open_context = mock.mock_open() - mock_open_context().read.return_value = "YAML" - - with mock.patch('builtins.open', mock_open_context): - base_manager = BaseImageManager(['yamlfile']) - self.assertRaises(ImageSpecificationException, - base_manager.load_config_files, 'disk_images') - - @mock.patch('yaml.safe_load', autospec=True) - @mock.patch('os.path.isfile', autospec=True) - def test_load_config_files_single_image(self, mock_os_path_isfile, - mock_yaml_load): - mock_yaml_load.side_effect = [{ - 'disk_images': [ - { - 'arch': 'amd64', - 'imagename': 'overcloud', - 'distro': 'some_awesome_distro', - 'type': 'qcow2', - 'elements': ['image_element'] - }, - { - 'arch': 'amd64', - 'imagename': 'not-overcloud', - 'distro': 'some_other_distro', - 'type': 'qcow2', - 'elements': ['other_element'] - } - ]}] - - mock_os_path_isfile.return_value = True - - mock_open_context = mock.mock_open() - mock_open_context().read.return_value = "YAML" - - with mock.patch('builtins.open', mock_open_context): - base_manager = BaseImageManager(['yamlfile1'], - images=['not-overcloud']) - disk_images = base_manager.load_config_files('disk_images') - - self.assertEqual(1, mock_yaml_load.call_count) - self.assertEqual([{ - 'arch': 'amd64', - 'distro': 'some_other_distro', - 'imagename': 'not-overcloud', - 'type': 'qcow2', - 'elements': ['other_element'], - }], disk_images) diff --git a/tripleo_common/tests/image/test_build.py b/tripleo_common/tests/image/test_build.py deleted file mode 100644 index aca12cb01..000000000 --- a/tripleo_common/tests/image/test_build.py +++ /dev/null @@ -1,80 +0,0 @@ -# Copyright 2015 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from unittest import mock - -from tripleo_common.image.build import ImageBuildManager -from tripleo_common.image.exception import ImageSpecificationException -from tripleo_common.image.image_builder import ImageBuilder -from tripleo_common.tests import base -from tripleo_common.tests.image import fakes - - -class TestImageBuildManager(base.TestCase): - def setUp(self): - super(TestImageBuildManager, self).setUp() - - @mock.patch.object(ImageBuilder, 'get_builder') - @mock.patch('tripleo_common.image.base.BaseImageManager.load_config_files', - autospec=True) - def test_build(self, mock_load_config_files, mock_get_builder): - mock_load_config_files.return_value = fakes.create_disk_images().get( - 'disk_images') - - mock_builder = mock.Mock() - mock_get_builder.return_value = mock_builder - - build_manager = ImageBuildManager(['config/file']) - build_manager.build() - - self.assertEqual(1, mock_load_config_files.call_count) - - mock_builder.build_image.assert_called_with( - './overcloud', 'qcow2', 'some_awesome_os', 'amd64', - ['image_element'], [], [], - {'skip_base': False, 'docker_target': None, 'environment': {}}) - - @mock.patch.object(ImageBuilder, 'get_builder') - @mock.patch('tripleo_common.image.base.BaseImageManager.load_config_files', - autospec=True) - def test_build_no_distro(self, mock_load_config_files, mock_get_builder): - mock_load_config_files.return_value = [{ - 'imagename': 'overcloud', - }] - - mock_builder = mock.Mock() - mock_get_builder.return_value = mock_builder - - build_manager = ImageBuildManager(['config/file']) - self.assertRaises(ImageSpecificationException, build_manager.build) - - @mock.patch('os.path.exists', autospec=True) - @mock.patch.object(ImageBuilder, 'get_builder') - @mock.patch('tripleo_common.image.base.BaseImageManager.load_config_files', - autospec=True) - def test_build_with_skip(self, mock_load_config_files, mock_get_builder, - mock_os_path_exists): - mock_load_config_files.return_value = fakes.create_disk_images().get( - 'disk_images') - - mock_builder = mock.Mock() - mock_get_builder.return_value = mock_builder - - mock_os_path_exists.return_value = True - - build_manager = ImageBuildManager(['config/file'], skip=True) - build_manager.build() - - self.assertEqual(1, mock_os_path_exists.call_count) diff --git a/tripleo_common/tests/image/test_image_builder.py b/tripleo_common/tests/image/test_image_builder.py deleted file mode 100644 index b00be6216..000000000 --- a/tripleo_common/tests/image/test_image_builder.py +++ /dev/null @@ -1,108 +0,0 @@ -# Copyright 2015 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -import subprocess -from unittest import mock - -from tripleo_common.image.exception import ImageBuilderException -from tripleo_common.image.image_builder import DibImageBuilder -from tripleo_common.image.image_builder import ImageBuilder -from tripleo_common.tests import base - - -class TestImageBuilder(base.TestCase): - - def test_get_builder_dib(self): - builder = ImageBuilder.get_builder('dib') - assert isinstance(builder, DibImageBuilder) - - def test_get_builder_unknown(self): - self.assertRaises(ImageBuilderException, ImageBuilder.get_builder, - 'unknown') - - -class TestDibImageBuilder(base.TestCase): - - def setUp(self): - super(TestDibImageBuilder, self).setUp() - self.builder = DibImageBuilder() - - @mock.patch('tripleo_common.image.image_builder.open', - create=True) - @mock.patch('subprocess.Popen') - def test_build_image(self, mock_popen, mock_open): - mock_process = mock.Mock() - mock_process.stdout.readline.side_effect = ['foo\n', 'bar\n', ''] - mock_process.poll.side_effect = [0, 0, 1] - mock_process.returncode = 0 - mock_popen.return_value = mock_process - mock_open.return_value = mock.MagicMock() - mock_file = mock.Mock() - mock_open.return_value.__enter__.return_value = mock_file - self.builder.logger = mock.Mock() - self.builder.build_image('image/path', 'imgtype', 'node_dist', 'arch', - ['element1', 'element2'], ['options'], - ['package1', 'package2'], - {'skip_base': True, - 'docker_target': 'docker-target'}) - mock_popen.assert_called_once_with( - ['disk-image-create', '-a', 'arch', '-o', 'image/path', - '-t', 'imgtype', - '-p', 'package1,package2', 'options', '-n', - '--docker-target', 'docker-target', 'node_dist', - 'element1', 'element2'], - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT) - mock_open.assert_called_once_with( - 'image/path.log', 'w', encoding='utf-8') - self.assertEqual([mock.call(u'foo\n'), - mock.call(u'bar\n')], - mock_file.write.mock_calls) - self.builder.logger.info.assert_has_calls([mock.call(u'foo'), - mock.call(u'bar')]) - - @mock.patch('tripleo_common.image.image_builder.open', - create=True) - @mock.patch('subprocess.Popen') - def test_build_image_fails(self, mock_popen, mock_open): - mock_process = mock.Mock() - mock_process.stdout.readline.side_effect = ['error\n', ''] - mock_process.poll.side_effect = [0, 1] - mock_process.returncode = 1 - mock_popen.return_value = mock_process - mock_open.return_value = mock.MagicMock() - mock_file = mock.Mock() - mock_open.return_value.__enter__.return_value = mock_file - self.builder.logger = mock.Mock() - self.assertRaises(subprocess.CalledProcessError, - self.builder.build_image, - 'image/path', 'imgtype', 'node_dist', 'arch', - ['element1', 'element2'], ['options'], - ['package1', 'package2'], - {'skip_base': True, - 'docker_target': 'docker-target'}) - mock_popen.assert_called_once_with( - ['disk-image-create', '-a', 'arch', '-o', 'image/path', - '-t', 'imgtype', - '-p', 'package1,package2', 'options', '-n', - '--docker-target', 'docker-target', 'node_dist', - 'element1', 'element2'], - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT) - mock_open.assert_called_once_with( - 'image/path.log', 'w', encoding='utf-8') - self.assertEqual([mock.call(u'error\n')], - mock_file.write.mock_calls) - self.builder.logger.info.assert_has_calls([mock.call(u'error')]) diff --git a/tripleo_common/tests/image/test_image_export.py b/tripleo_common/tests/image/test_image_export.py deleted file mode 100644 index c82647f3c..000000000 --- a/tripleo_common/tests/image/test_image_export.py +++ /dev/null @@ -1,535 +0,0 @@ -# Copyright 2018 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -import hashlib -import io -import json -import os -import requests -import shutil -from urllib.parse import urlparse -import tempfile -from unittest import mock -import zlib - -from tripleo_common.image import image_export -from tripleo_common.image import image_uploader -from tripleo_common.tests import base - - -class TestImageExport(base.TestCase): - def setUp(self): - super(TestImageExport, self).setUp() - export_dir = image_export.IMAGE_EXPORT_DIR - with tempfile.NamedTemporaryFile() as f: - temp_export_dir = f.name - image_export.make_dir(temp_export_dir) - - def restore_export_dir(): - shutil.rmtree(temp_export_dir) - image_export.IMAGE_EXPORT_DIR = export_dir - - image_export.IMAGE_EXPORT_DIR = temp_export_dir - self.addCleanup(restore_export_dir) - - def test_make_dir(self): - - path = os.path.join(image_export.IMAGE_EXPORT_DIR, 'foo/bar') - - self.assertFalse(os.path.exists(path)) - - self.addCleanup(os.rmdir, path) - image_export.make_dir(path) - - self.assertTrue(os.path.isdir(path)) - - # Call again to assert no error is raised - image_export.make_dir(path) - - def test_image_tag_from_url(self): - url = urlparse('docker://docker.io/t/nova-api:latest') - self.assertEqual( - ('t/nova-api', 'latest'), - image_export.image_tag_from_url(url) - ) - url = urlparse('containers-storage:localhost:8787/t/nova-api:latest') - self.assertEqual( - ('localhost:8787/t/nova-api', 'latest'), - image_export.image_tag_from_url(url) - ) - - url = urlparse('docker://docker.io/t/nova-api') - self.assertEqual( - ('t/nova-api', None), - image_export.image_tag_from_url(url) - ) - - def test_export_stream(self): - blob_data = b'The Blob' - blob_compressed = zlib.compress(blob_data) - calc_digest = hashlib.sha256() - calc_digest.update(blob_compressed) - compressed_digest = 'sha256:' + calc_digest.hexdigest() - - target_url = urlparse('docker://localhost:8787/t/nova-api:latest') - layer = { - 'digest': 'sha256:somethingelse' - } - calc_digest = hashlib.sha256() - layer_stream = io.BytesIO(blob_compressed) - mask = os.umask(0o077) - layer_digest, _ = image_export.export_stream( - target_url, layer, layer_stream, verify_digest=False - ) - self.assertEqual(compressed_digest, layer_digest) - self.assertEqual(compressed_digest, layer['digest']) - self.assertEqual(len(blob_compressed), layer['size']) - - blob_dir = os.path.join(image_export.IMAGE_EXPORT_DIR, - 'v2/t/nova-api/blobs') - blob_path = os.path.join(blob_dir, '%s.gz' % compressed_digest) - - self.assertTrue(os.path.isdir(blob_dir)) - self.assertTrue(os.path.isfile(blob_path)) - with open(blob_path, 'rb') as f: - self.assertEqual(blob_compressed, f.read()) - - os.umask(mask) - blob_mode = oct(os.stat(blob_path).st_mode) - self.assertEqual('644', blob_mode[-3:]) - - @mock.patch('tripleo_common.image.image_export.open', - side_effect=MemoryError()) - def test_export_stream_memory_error(self, mock_open): - blob_data = b'The Blob' - blob_compressed = zlib.compress(blob_data) - calc_digest = hashlib.sha256() - calc_digest.update(blob_compressed) - - target_url = urlparse('docker://localhost:8787/t/nova-api:latest') - layer = { - 'digest': 'sha256:somethingelse' - } - calc_digest = hashlib.sha256() - layer_stream = io.BytesIO(blob_compressed) - self.assertRaises(MemoryError, image_export.export_stream, - target_url, layer, layer_stream, verify_digest=False) - - def test_export_stream_verify_failed(self): - blob_data = b'The Blob' - blob_compressed = zlib.compress(blob_data) - calc_digest = hashlib.sha256() - calc_digest.update(blob_compressed) - - target_url = urlparse('docker://localhost:8787/t/nova-api:latest') - layer = { - 'digest': 'sha256:somethingelse' - } - calc_digest = hashlib.sha256() - layer_stream = io.BytesIO(blob_compressed) - self.assertRaises(requests.exceptions.HTTPError, - image_export.export_stream, - target_url, layer, layer_stream, - verify_digest=True) - blob_dir = os.path.join(image_export.IMAGE_EXPORT_DIR, - 'v2/t/nova-api/blobs') - blob_path = os.path.join(blob_dir, 'sha256:somethingelse.gz') - - self.assertTrue(os.path.isdir(blob_dir)) - self.assertFalse(os.path.isfile(blob_path)) - - def test_cross_repo_mount(self): - target_url = urlparse('docker://localhost:8787/t/nova-api:latest') - other_url = urlparse('docker://localhost:8787/t/nova-compute:latest') - image_layers = { - 'sha256:1234': other_url - } - source_layers = [ - 'sha256:1234', 'sha256:6789' - ] - source_blob_dir = os.path.join(image_export.IMAGE_EXPORT_DIR, - 'v2/t/nova-compute/blobs') - source_blob_path = os.path.join(source_blob_dir, 'sha256:1234.gz') - target_blob_dir = os.path.join(image_export.IMAGE_EXPORT_DIR, - 'v2/t/nova-api/blobs') - target_blob_path = os.path.join(target_blob_dir, 'sha256:1234.gz') - - # call with missing source, no change - image_export.cross_repo_mount(target_url, image_layers, source_layers, - uploaded_layers={}) - self.assertFalse(os.path.exists(source_blob_path)) - self.assertFalse(os.path.exists(target_blob_path)) - - image_export.make_dir(source_blob_dir) - with open(source_blob_path, 'w') as f: - f.write('blob') - self.assertTrue(os.path.exists(source_blob_path)) - - # call with existing source - image_export.cross_repo_mount(target_url, image_layers, source_layers, - uploaded_layers={}) - self.assertTrue(os.path.exists(target_blob_path)) - with open(target_blob_path, 'r') as f: - self.assertEqual('blob', f.read()) - - def test_export_manifest_config(self): - target_url = urlparse('docker://localhost:8787/t/nova-api:latest') - config_str = '{"config": {}}' - config_digest = 'sha256:1234' - manifest = { - 'config': { - 'digest': config_digest, - 'size': 2, - 'mediaType': 'application/vnd.docker.container.image.v1+json' - }, - 'layers': [ - {'digest': 'sha256:aeb786'}, - {'digest': 'sha256:4dc536'}, - ], - 'mediaType': 'application/vnd.docker.' - 'distribution.manifest.v2+json', - } - catalog = {'repositories': ['t/nova-api']} - - manifest_str = json.dumps(manifest) - calc_digest = hashlib.sha256() - calc_digest.update(manifest_str.encode('utf-8')) - manifest_digest = 'sha256:%s' % calc_digest.hexdigest() - - image_export.export_manifest_config( - target_url, manifest_str, - image_uploader.MEDIA_MANIFEST_V2, config_str - ) - - catalog_path = os.path.join( - image_export.IMAGE_EXPORT_DIR, - 'v2/_catalog' - ) - config_path = os.path.join( - image_export.IMAGE_EXPORT_DIR, - 'v2/t/nova-api/blobs/sha256:1234' - ) - manifest_path = os.path.join( - image_export.IMAGE_EXPORT_DIR, - 'v2/t/nova-api/manifests', - manifest_digest, - 'index.json' - ) - manifest_htaccess_path = os.path.join( - image_export.IMAGE_EXPORT_DIR, - 'v2/t/nova-api/manifests', - manifest_digest, - '.htaccess' - ) - expected_htaccess = '''Header set Content-Type "%s" -Header set Docker-Content-Digest "%s" -Header set ETag "%s" -''' % ( - 'application/vnd.docker.distribution.manifest.v2+json', - manifest_digest, - manifest_digest - ) - - with open(catalog_path, 'r') as f: - self.assertEqual(catalog, json.load(f)) - with open(config_path, 'r') as f: - self.assertEqual(config_str, f.read()) - with open(manifest_path, 'r') as f: - self.assertEqual(manifest_str, f.read()) - with open(manifest_htaccess_path, 'r') as f: - self.assertEqual(expected_htaccess, f.read()) - - def test_write_parse_type_map_file(self): - manifest_dir_path = os.path.join( - image_export.IMAGE_EXPORT_DIR, - 'v2/foo/bar/manifests' - ) - map_file_path = os.path.join( - image_export.IMAGE_EXPORT_DIR, - manifest_dir_path, 'latest.type-map' - ) - - image_export.make_dir(manifest_dir_path) - image_export.write_type_map_file( - 'foo/bar', - 'latest', - {image_export.MEDIA_MANIFEST_V2: 'sha256:1234abcd'} - ) - - expected_map_file = '''URI: latest - -Content-Type: application/vnd.docker.distribution.manifest.v2+json -URI: sha256:1234abcd/index.json - -''' - # assert the file contains the expected content - with open(map_file_path, 'r') as f: - self.assertEqual(expected_map_file, f.read()) - - # assert parse_type_map_file correctly reads that file - self.assertEqual( - { - 'application/vnd.docker.distribution.manifest.v2+json': - 'sha256:1234abcd/index.json' - }, - image_export.parse_type_map_file(map_file_path) - ) - - # assert a multi-entry file is correctly parsed - multi_map_file = '''URI: latest - -Content-Type: application/vnd.docker.distribution.manifest.v2+json -URI: sha256:1234abcd/index.json - -Content-Type: application/vnd.docker.distribution.manifest.list.v2+json -URI: sha256:eeeeeeee/index.json - -''' - with open(map_file_path, 'w+') as f: - f.write(multi_map_file) - self.assertEqual( - { - 'application/vnd.docker.distribution.manifest.v2+json': - 'sha256:1234abcd/index.json', - 'application/vnd.docker.distribution.manifest.list.v2+json': - 'sha256:eeeeeeee/index.json' - }, - image_export.parse_type_map_file(map_file_path) - ) - - def test_migrate_to_type_map_file(self): - manifest_dir_path = os.path.join( - image_export.IMAGE_EXPORT_DIR, - 'v2/foo/bar/manifests' - ) - map_file_path = os.path.join( - image_export.IMAGE_EXPORT_DIR, - manifest_dir_path, 'latest.type-map' - ) - symlink_path = os.path.join( - image_export.IMAGE_EXPORT_DIR, - manifest_dir_path, 'latest' - ) - manifest_path = os.path.join( - image_export.IMAGE_EXPORT_DIR, - manifest_dir_path, 'sha256:1234abcd' - ) - image_export.make_dir(manifest_dir_path) - # create legacy symlink - os.symlink(manifest_path, symlink_path) - - # run the migration - image_export.migrate_to_type_map_file('foo/bar', symlink_path) - - expected_map_file = '''URI: latest - -Content-Type: application/vnd.docker.distribution.manifest.v2+json -URI: sha256:1234abcd/index.json - -''' - # assert the migrated file contains the expected content - with open(map_file_path, 'r') as f: - self.assertEqual(expected_map_file, f.read()) - - def _write_test_image(self, url, manifest): - image, tag = image_uploader.BaseImageUploader._image_tag_from_url( - url) - blob_dir = os.path.join( - image_export.IMAGE_EXPORT_DIR, 'v2', image[1:], 'blobs') - image_export.make_dir(blob_dir) - - if manifest.get('schemaVersion', 2) == 1: - config_str = None - manifest_type = image_uploader.MEDIA_MANIFEST_V1 - layers = list(reversed([x['blobSum'] - for x in manifest['fsLayers']])) - else: - config_str = '{"config": {}}' - manifest_type = image_uploader.MEDIA_MANIFEST_V2 - layers = [x['digest'] for x in manifest['layers']] - manifest_str = json.dumps(manifest) - calc_digest = hashlib.sha256() - calc_digest.update(manifest_str.encode('utf-8')) - manifest_digest = 'sha256:%s' % calc_digest.hexdigest() - - image_export.export_manifest_config( - url, manifest_str, manifest_type, config_str - ) - for digest in layers: - blob_path = os.path.join(blob_dir, '%s.gz' % digest) - - with open(blob_path, 'w+') as f: - f.write('The Blob') - return manifest_digest - - def assertFiles(self, dirs, files, deleted): - for d in dirs: - self.assertTrue(os.path.isdir(d), 'is dir: %s' % d) - for f in files: - self.assertTrue(os.path.isfile(f), 'is file: %s' % f) - for d in deleted: - self.assertFalse(os.path.exists(d), 'deleted still exists: %s' % d) - - def test_build_catalog(self): - prefix = 'docker://localhost:8787/t' - url1 = urlparse('%s/nova-api:latest' % prefix) - url2 = urlparse('%s/namespace/nova-compute:abc' % prefix) - url3 = urlparse('%s/yet/another/namespace/nova-compute:abc' % prefix) - expected_list = set([ - 't/namespace/nova-compute', - 't/yet/another/namespace/nova-compute', - 't/nova-api' - ]) - manifest = { - 'schemaVersion': 1, - 'fsLayers': [ - {'blobSum': 'sha256:aeb786'}, - {'blobSum': 'sha256:4dc536'}, - ], - 'mediaType': 'application/vnd.docker.' - 'distribution.manifest.v2+json', - } - self._write_test_image( - url=url1, - manifest=manifest - ) - self._write_test_image( - url=url2, - manifest=manifest - ) - self._write_test_image( - url=url3, - manifest=manifest - ) - - image_export.build_catalog() - catalog = os.path.join(image_export.IMAGE_EXPORT_DIR, 'v2', '_catalog') - with open(catalog, 'r') as f: - data = json.load(f) - self.assertTrue(set(data['repositories']) == expected_list) - - def test_delete_image(self): - url1 = urlparse('docker://localhost:8787/t/nova-api:latest') - url2 = urlparse('docker://localhost:8787/t/nova-api:abc') - manifest_1 = { - 'schemaVersion': 1, - 'fsLayers': [ - {'blobSum': 'sha256:aeb786'}, - {'blobSum': 'sha256:4dc536'}, - ], - 'mediaType': 'application/vnd.docker.' - 'distribution.manifest.v2+json', - } - manifest_2 = { - 'config': { - 'digest': 'sha256:5678', - 'size': 2, - 'mediaType': 'application/vnd.docker.container.image.v1+json' - }, - 'layers': [ - {'digest': 'sha256:aeb786'}, # shared with manifest_1 - {'digest': 'sha256:eeeeee'}, # different to manifest_1 - ], - 'mediaType': 'application/vnd.docker.' - 'distribution.manifest.v2+json', - } - - m1_digest = self._write_test_image( - url=url1, - manifest=manifest_1 - ) - m2_digest = self._write_test_image( - url=url2, - manifest=manifest_2 - ) - - v2_dir = os.path.join(image_export.IMAGE_EXPORT_DIR, 'v2') - image_dir = os.path.join(v2_dir, 't/nova-api') - blob_dir = os.path.join(image_dir, 'blobs') - m_dir = os.path.join(image_dir, 'manifests') - - # assert every directory and file for the 2 images - self.assertFiles( - dirs=[ - v2_dir, - image_dir, - blob_dir, - m_dir, - os.path.join(m_dir, m1_digest), - os.path.join(m_dir, m2_digest), - ], - files=[ - os.path.join(m_dir, m1_digest, 'index.json'), - os.path.join(m_dir, m2_digest, 'index.json'), - os.path.join(blob_dir, 'sha256:aeb786.gz'), - os.path.join(blob_dir, 'sha256:4dc536.gz'), - os.path.join(blob_dir, 'sha256:5678'), - os.path.join(blob_dir, 'sha256:eeeeee.gz'), - os.path.join(m_dir, 'latest.type-map'), - os.path.join(m_dir, 'abc.type-map'), - ], - deleted=[] - ) - - image_export.delete_image(url2) - - # assert files deleted for nova-api:abc - self.assertFiles( - dirs=[ - v2_dir, - image_dir, - blob_dir, - m_dir, - os.path.join(m_dir, m1_digest), - ], - files=[ - os.path.join(m_dir, m1_digest, 'index.json'), - os.path.join(blob_dir, 'sha256:aeb786.gz'), - os.path.join(blob_dir, 'sha256:4dc536.gz'), - os.path.join(m_dir, 'latest.type-map'), - ], - deleted=[ - os.path.join(m_dir, 'abc'), - os.path.join(m_dir, m2_digest), - os.path.join(m_dir, m2_digest, 'index.json'), - os.path.join(blob_dir, 'sha256:5678'), - os.path.join(blob_dir, 'sha256:eeeeee.gz'), - ] - ) - - image_export.delete_image(url1) - - # assert all nova-api files deleted after deleting the last image - self.assertFiles( - dirs=[ - v2_dir, - ], - files=[], - deleted=[ - image_dir, - blob_dir, - m_dir, - os.path.join(m_dir, 'abc'), - os.path.join(m_dir, 'latest'), - os.path.join(m_dir, m1_digest), - os.path.join(m_dir, m1_digest, 'index.json'), - os.path.join(m_dir, m2_digest), - os.path.join(m_dir, m2_digest, 'index.json'), - os.path.join(blob_dir, 'sha256:5678'), - os.path.join(blob_dir, 'sha256:eeeeee.gz'), - os.path.join(blob_dir, 'sha256:aeb786.gz'), - os.path.join(blob_dir, 'sha256:4dc536.gz'), - ] - ) diff --git a/tripleo_common/tests/image/test_image_uploader.py b/tripleo_common/tests/image/test_image_uploader.py deleted file mode 100644 index 933e9ca2c..000000000 --- a/tripleo_common/tests/image/test_image_uploader.py +++ /dev/null @@ -1,2996 +0,0 @@ -# Copyright 2015 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -import datetime -import hashlib -import io -import json -import operator -import requests -from requests_mock.contrib import fixture as rm_fixture -from urllib.parse import urlparse -from unittest import mock -import zlib - -from dateutil.tz import tzlocal -from tripleo_common.image.exception import ImageNotFoundException -from tripleo_common.image.exception import ImageRateLimitedException -from tripleo_common.image.exception import ImageUploaderException -from tripleo_common.image import image_uploader -from tripleo_common.tests import base -from tripleo_common.tests.image import fakes - - -filedata = str( - """container_images: - - imagename: docker.io/tripleomastercentos9/heat-docker-agents-centos:latest - push_destination: localhost:8787 - - imagename: docker.io/tripleomastercentos9/centos-binary-nova-compute:liberty - push_destination: localhost:8787 - - imagename: docker.io/tripleomastercentos9/centos-binary-nova-libvirt:liberty - - imagename: docker.io/tripleomastercentos9/image-with-missing-tag - push_destination: localhost:8787 -""") # noqa - - -class TestRegistrySessionHelper(base.TestCase): - def setUp(self): - super(TestRegistrySessionHelper, self).setUp() - - def test_check_status(self): - session = mock.Mock() - raise_for_status_mock = mock.Mock() - request = mock.Mock() - request.raise_for_status = raise_for_status_mock - request.status_code = 200 - - image_uploader.RegistrySessionHelper.check_status(session, request) - raise_for_status_mock.assert_called_once() - - def test_check_status_reauth(self): - session = mock.Mock() - session_reauth_mock = mock.Mock() - session.headers = {} - session.auth_args = {} - session.reauthenticate = session_reauth_mock - raise_for_status_mock = mock.Mock() - request = mock.Mock() - request.headers = {'www-authenticate': 'foo'} - request.raise_for_status = raise_for_status_mock - request.status_code = 401 - - image_uploader.RegistrySessionHelper.check_status(session, request) - session_reauth_mock.assert_called_once_with() - raise_for_status_mock.assert_called_once() - - def test_check_status_ratelimit(self): - session = mock.Mock() - session_reauth_mock = mock.Mock() - session.headers = {} - session.auth_args = {} - session.reauthenticate = session_reauth_mock - raise_for_status_mock = mock.Mock() - request = mock.Mock() - request.headers = {'www-authenticate': 'foo'} - request.raise_for_status = raise_for_status_mock - request.status_code = 429 - - self.assertRaises(ImageRateLimitedException, - image_uploader.RegistrySessionHelper.check_status, - session, - request) - - def test_check_redirect_trusted_no_redirect(self): - get_mock = mock.Mock() - session = mock.Mock() - session.headers = {'Authorization': 'foo'} - session.auth_args = {} - session.get = get_mock - resp = mock.Mock() - resp.status_code = 200 - - r = image_uploader.RegistrySessionHelper.check_redirect_trusted( - resp, session) - - self.assertEqual(resp, r) - - def test_check_redirect_trusted_is_trusted(self): - get_result = mock.Mock() - get_result.status_code = 200 - get_mock = mock.Mock() - get_mock.return_value = get_result - session = mock.Mock() - session.headers = {'Authorization': 'foo'} - session.auth_args = {} - session.get = get_mock - resp = mock.Mock() - resp.headers = {'Location': 'https://registry.redhat.io/v2'} - resp.status_code = 307 - - r = image_uploader.RegistrySessionHelper.check_redirect_trusted( - resp, session) - - self.assertNotEqual(resp, r) - self.assertEqual(get_result, r) - get_mock.assert_called_once_with('https://registry.redhat.io/v2', - stream=True, - timeout=30) - self.assertEqual(session.headers['Authorization'], 'foo') - - def test_check_redirect_trusted_not_trusted(self): - get_result = mock.Mock() - get_result.status_code = 200 - get_mock = mock.Mock() - get_mock.return_value = get_result - session = mock.Mock() - session.headers = {'Authorization': 'foo'} - session.auth_args = {} - session.get = get_mock - resp = mock.Mock() - resp.headers = {'Location': 'http://172.16.12.12:8787/'} - resp.status_code = 307 - - r = image_uploader.RegistrySessionHelper.check_redirect_trusted( - resp, session, False, 12) - - self.assertNotEqual(resp, r) - self.assertEqual(get_result, r) - get_mock.assert_called_once_with('http://172.16.12.12:8787/', - stream=False, - timeout=12) - self.assertEqual(session.headers['Authorization'], 'foo') - - @mock.patch('tripleo_common.image.image_uploader.RegistrySessionHelper' - '.check_status') - def test_action(self, mock_status): - request_session = mock.Mock() - mock_get = mock.Mock() - mock_get.return_value = {} - request_session.get = mock_get - - image_uploader.RegistrySessionHelper._action('get', request_session) - mock_get.assert_called_once_with() - mock_status.assert_called_once_with(session=request_session, - request={}) - - @mock.patch('tripleo_common.image.image_uploader.RegistrySessionHelper' - '.check_status') - def test_action_reauth(self, mock_status): - exc_response = mock.Mock() - exc_response.status_code = 401 - auth_exc = requests.exceptions.HTTPError(response=exc_response) - mock_status.side_effect = [auth_exc, True] - request_session = mock.Mock() - mock_get = mock.Mock() - mock_get.return_value = {} - request_session.get = mock_get - - image_uploader.RegistrySessionHelper._action('get', request_session) - - get_call = mock.call() - get_calls = [get_call, get_call] - mock_get.assert_has_calls(get_calls) - status_call = mock.call(session=request_session, request={}) - status_calls = [status_call, status_call] - mock_status.assert_has_calls(status_calls) - - @mock.patch('tripleo_common.image.image_uploader.RegistrySessionHelper' - '.check_status') - def test_action_reauth_fail(self, mock_status): - exc_response = mock.Mock() - exc_response.status_code = 404 - auth_exc = requests.exceptions.HTTPError(response=exc_response) - mock_status.side_effect = auth_exc - request_session = mock.Mock() - mock_get = mock.Mock() - mock_get.return_value = {} - request_session.get = mock_get - - self.assertRaises(requests.exceptions.HTTPError, - image_uploader.RegistrySessionHelper._action, - 'get', - request_session) - - mock_get.assert_called_once_with() - mock_status.assert_called_once_with(session=request_session, - request={}) - - @mock.patch('tripleo_common.image.image_uploader.RegistrySessionHelper' - '._action') - def test_get(self, mock_action): - request_session = mock.Mock() - image_uploader.RegistrySessionHelper.get(request_session) - - mock_action.assert_called_once_with('get', - request_session) - - @mock.patch('tripleo_common.image.image_uploader.RegistrySessionHelper' - '._action') - def test_patch(self, mock_action): - request_session = mock.Mock() - image_uploader.RegistrySessionHelper.patch(request_session) - - mock_action.assert_called_once_with('patch', - request_session) - - @mock.patch('tripleo_common.image.image_uploader.RegistrySessionHelper' - '._action') - def test_post(self, mock_action): - request_session = mock.Mock() - image_uploader.RegistrySessionHelper.post(request_session) - - mock_action.assert_called_once_with('post', - request_session) - - @mock.patch('tripleo_common.image.image_uploader.RegistrySessionHelper' - '._action') - def test_put(self, mock_action): - request_session = mock.Mock() - image_uploader.RegistrySessionHelper.put(request_session) - - mock_action.assert_called_once_with('put', - request_session) - - @mock.patch('tripleo_common.utils.locks.threadinglock.ThreadingLock', - name='mock_lock') - def test_get_cached_bearer_token_expired(self, mock_lock): - issued = datetime.datetime.now(tzlocal()) - datetime.timedelta( - minutes=1 - ) - - resp = {"fakeimage": { - 'token': 'blah', - 'expires_in': 1, - 'issued_at': f'{issued}'} - } - - scope = "fakeimage" - mock_lock.sessions.return_value = resp - - ret = image_uploader.RegistrySessionHelper.get_cached_bearer_token( - mock_lock, scope) - - self.assertIsNone(ret) - - @mock.patch('tripleo_common.utils.locks.threadinglock.ThreadingLock', - name='mock_lock') - def test_get_cached_bearer_token_expires_at_current(self, mock_lock): - expiry = datetime.datetime.utcnow() + datetime.timedelta(minutes=60) - expiry_time = "{}Z".format(expiry.isoformat()) - - resp = {"fakeimage": { - 'token': 'blah', - 'expires_at': expiry_time, - 'issued_at': '2022-10-17T23:45:09.306Z'} - } - - scope = "fakeimage" - mock_lock.sessions.return_value = resp - - ret = image_uploader.RegistrySessionHelper.get_cached_bearer_token( - mock_lock, scope) - - self.assertEqual(ret, "blah") - - @mock.patch('tripleo_common.utils.locks.threadinglock.ThreadingLock', - name='mock_lock') - def test_get_cached_bearer_token_no_expires_in(self, mock_lock): - - resp = {"fakeimage": { - 'token': 'blah', - 'issued_at': '2022-10-17T23:45:09.306Z'} - } - - scope = "fakeimage" - mock_lock.sessions.return_value = resp - - ret = image_uploader.RegistrySessionHelper.get_cached_bearer_token( - mock_lock, scope) - - self.assertEqual(ret, "blah") - - @mock.patch('tripleo_common.utils.locks.threadinglock.ThreadingLock', - name='mock_lock') - def test_get_cached_bearer_token_no_issued_at(self, mock_lock): - - resp = {"fakeimage": { - 'token': 'blah', - 'expires_at': '2022-10-17T23:45:09.306Z'} - } - - scope = "fakeimage" - mock_lock.sessions.return_value = resp - - ret = image_uploader.RegistrySessionHelper.get_cached_bearer_token( - mock_lock, scope) - - self.assertIsNone(ret) - - @mock.patch('tripleo_common.utils.locks.threadinglock.ThreadingLock', - name='mock_lock') - def test_get_cached_bearer_token_expires_in_current(self, mock_lock): - issued = datetime.datetime.now(tzlocal()) - - resp = {"fakeimage": { - 'token': 'blah', - 'expires_in': 10000, - 'issued_at': f'{issued}'} - } - - scope = "fakeimage" - mock_lock.sessions.return_value = resp - - ret = image_uploader.RegistrySessionHelper.get_cached_bearer_token( - mock_lock, scope) - - self.assertEqual(ret, "blah") - - -class TestImageUploadManager(base.TestCase): - def setUp(self): - super(TestImageUploadManager, self).setUp() - files = [] - files.append('testfile') - self.filelist = files - - @mock.patch('tripleo_common.image.image_uploader.' - 'RegistrySessionHelper.check_status') - @mock.patch('tripleo_common.image.image_uploader.' - 'PythonImageUploader._fetch_manifest') - @mock.patch('tripleo_common.image.image_uploader.' - 'PythonImageUploader._copy_registry_to_registry') - @mock.patch('tripleo_common.image.image_uploader.' - 'BaseImageUploader.authenticate') - @mock.patch('tripleo_common.image.image_uploader.' - 'BaseImageUploader._inspect') - @mock.patch('tripleo_common.image.base.open', - mock.mock_open(read_data=filedata), create=True) - @mock.patch('tripleo_common.image.image_uploader.' - 'BaseImageUploader.is_insecure_registry', - return_value=True) - @mock.patch('tripleo_common.image.image_uploader.' - 'BaseImageUploader._images_match', - return_value=False) - @mock.patch('os.path.isfile', return_value=True) - @mock.patch('fcntl.ioctl', side_effect=Exception) - @mock.patch('tripleo_common.image.image_uploader.' - 'get_undercloud_registry', return_value='192.0.2.0:8787') - def test_file_parsing(self, mock_gur, mockioctl, mockpath, - mock_images_match, mock_is_insecure, mock_inspect, - mock_auth, mock_copy, mock_manifest, - check_status): - - mock_manifest.return_value = '{"layers": []}' - mock_inspect.return_value = {} - manager = image_uploader.ImageUploadManager(self.filelist) - parsed_data = manager.upload() - mockpath(self.filelist[0]) - - expected_data = fakes.create_parsed_upload_images() - sorted_expected_data = sorted(expected_data, - key=operator.itemgetter('imagename')) - sorted_parsed_data = sorted(parsed_data, - key=operator.itemgetter('imagename')) - self.assertEqual(sorted_expected_data, sorted_parsed_data) - - @mock.patch('subprocess.Popen', autospec=True) - @mock.patch('socket.gethostname', return_value='uc.somedomain') - def test_get_undercloud_registry_ipv4(self, mock_gethostname, - mock_popen): - mock_process = mock.Mock() - mock_process.communicate.return_value = ( - '192.0.2.1 uc.ctlplane.localdomain uc.ctlplane', '') - mock_process.returncode = 0 - mock_popen.return_value = mock_process - self.assertEqual('uc.ctlplane.localdomain:8787', - image_uploader.get_undercloud_registry()) - - @mock.patch('subprocess.Popen', autospec=True) - @mock.patch('socket.gethostname', return_value='uc.somedomain') - def test_get_undercloud_registry_ipv6(self, mock_gethostname, - mock_popen): - mock_process = mock.Mock() - mock_process.communicate.return_value = ( - 'fd12::1 uc.ctlplane.localdomain uc.ctlplane', '') - mock_process.returncode = 0 - mock_popen.return_value = mock_process - self.assertEqual('uc.ctlplane.localdomain:8787', - image_uploader.get_undercloud_registry()) - - @mock.patch('subprocess.Popen', autospec=True) - @mock.patch('socket.gethostname', return_value='localhost.localdomain') - def test_get_undercloud_registry_no_etc_hosts(self, mock_gethostname, - mock_popen): - mock_process = mock.Mock() - mock_process.communicate.return_value = ('', '') - mock_process.returncode = 2 - mock_popen.return_value = mock_process - self.assertEqual('localhost:8787', - image_uploader.get_undercloud_registry()) - - @mock.patch('subprocess.Popen', autospec=True) - @mock.patch('socket.gethostname', return_value='undercloud.somedomain') - def test_get_push_destination(self, mock_gethostname, mock_popen): - mock_process = mock.Mock() - mock_process.communicate.return_value = ( - 'fd12::1 uc.ctlplane.localdomain uc.ctlplane', '') - mock_process.returncode = 0 - mock_popen.return_value = mock_process - manager = image_uploader.ImageUploadManager(self.filelist) - self.assertEqual( - 'uc.ctlplane.localdomain:8787', - manager.get_push_destination({}) - ) - self.assertEqual( - '192.0.2.1:8787', - manager.get_push_destination({'push_destination': - '192.0.2.1:8787'}) - ) - self.assertEqual( - 'uc.ctlplane.localdomain:8787', - manager.get_push_destination({'push_destination': False}) - ) - self.assertEqual( - 'uc.ctlplane.localdomain:8787', - manager.get_push_destination({'push_destination': True}) - ) - self.assertEqual( - 'uc.ctlplane.localdomain:8787', - manager.get_push_destination({'push_destination': None}) - ) - - def test_get_uploader_python(self): - manager = image_uploader.ImageUploadManager(self.filelist) - uploader = manager.get_uploader('python') - assert isinstance(uploader, image_uploader.PythonImageUploader) - - def test_get_builder_unknown(self): - manager = image_uploader.ImageUploadManager(self.filelist) - self.assertRaises(ImageUploaderException, - manager.get_uploader, - 'unknown') - - def test_validate_registry_credentials(self): - # valid credentials - image_uploader.ImageUploadManager( - self.filelist, - registry_credentials=None) - image_uploader.ImageUploadManager( - self.filelist, - registry_credentials={}) - manager = image_uploader.ImageUploadManager( - self.filelist, - registry_credentials={ - 'docker.io': {'my_username': 'my_password'}, - u'quay.io': {u'quay_username': u'quay_password'}, - }) - self.assertEqual( - ('my_username', 'my_password'), - manager.uploader('python').credentials_for_registry('docker.io') - ) - self.assertEqual( - ('quay_username', 'quay_password'), - manager.uploader('python').credentials_for_registry('quay.io') - ) - - # invalid credentials - self.assertRaises( - TypeError, - image_uploader.ImageUploadManager, - self.filelist, - registry_credentials='foo' - ) - self.assertRaises( - TypeError, - image_uploader.ImageUploadManager, - self.filelist, - registry_credentials={ - 1234: {'my_username': 'my_password'}, - } - ) - self.assertRaises( - TypeError, - image_uploader.ImageUploadManager, - self.filelist, - registry_credentials={ - 'docker.io': {True: 'my_password'}, - } - ) - self.assertRaises( - TypeError, - image_uploader.ImageUploadManager, - self.filelist, - registry_credentials={ - 'docker.io': {'my_username': True}, - } - ) - self.assertRaises( - TypeError, - image_uploader.ImageUploadManager, - self.filelist, - registry_credentials={ - 'docker.io': {'my_username': 'my_password', 'foo': 'bar'}, - } - ) - - -class TestUploadTask(base.TestCase): - def test_basics(self): - obj = image_uploader.UploadTask( - image_name='foo:bar', - pull_source='docker.io/namespace', - push_destination='127.0.0.1:8787', - append_tag='baz', - modify_role=None, - modify_vars=None, - cleanup=False, - multi_arch=False) - self.assertEqual(obj.repo, 'docker.io/namespace/foo') - self.assertEqual(obj.source_tag, 'bar') - self.assertEqual(obj.target_tag, 'barbaz') - self.assertEqual(obj.target_image_no_tag, - '127.0.0.1:8787/namespace/foo') - self.assertEqual(obj.target_image, - '127.0.0.1:8787/namespace/foo:barbaz') - - def test_repo_pull_source_trailing_slash(self): - obj = image_uploader.UploadTask( - image_name='foo:bar', - pull_source='docker.io/namespace/', - push_destination='127.0.0.1:8787', - append_tag=None, - modify_role=None, - modify_vars=None, - cleanup=False, - multi_arch=False) - self.assertEqual(obj.repo, 'docker.io/namespace/foo') - - def test_repo_push_destination_trailing_slash(self): - obj = image_uploader.UploadTask( - image_name='foo:bar', - pull_source='docker.io/namespace', - push_destination='127.0.0.1:8787/', - append_tag=None, - modify_role=None, - modify_vars=None, - cleanup=False, - multi_arch=False) - self.assertEqual(obj.target_image_no_tag, - '127.0.0.1:8787/namespace/foo') - - -class TestBaseImageUploader(base.TestCase): - - def setUp(self): - super(TestBaseImageUploader, self).setUp() - self.uploader = image_uploader.BaseImageUploader() - self.uploader.init_registries_cache() - # pylint: disable=no-member - self.uploader._inspect.retry.sleep = mock.Mock() - self.requests = self.useFixture(rm_fixture.Fixture()) - - @mock.patch.object(requests.Session, 'get', return_value=True) - def test_is_insecure_registry_known(self, mock_session): - self.assertFalse( - self.uploader.is_insecure_registry('docker.io')) - - @mock.patch.object(requests.Session, 'get', return_value=True) - def test_is_insecure_registry_secure(self, mock_session): - self.assertFalse( - self.uploader.is_insecure_registry('192.0.2.0:8787')) - self.assertFalse( - self.uploader.is_insecure_registry('192.0.2.0:8787')) - calls = [mock.call('https://192.0.2.0:8787', timeout=30)] - mock_session.assert_has_calls(calls) - self.assertEqual(mock_session.call_count, 1) - - @mock.patch.object(requests.Session, 'get', - side_effect=[requests.exceptions.SSLError('err'), True]) - def test_is_insecure_registry_bad_cert(self, mock_session): - self.assertTrue( - self.uploader.is_insecure_registry('bcert:8787')) - self.assertTrue( - self.uploader.is_insecure_registry('bcert:8787')) - calls = [mock.call('https://bcert:8787', timeout=30), - mock.call('https://bcert:8787', timeout=30, verify=False)] - mock_session.assert_has_calls(calls) - self.assertEqual(mock_session.call_count, 2) - - @mock.patch.object(requests.Session, 'get', - side_effect=requests.exceptions.ReadTimeout('ouch')) - def test_is_insecure_registry_timeout(self, mock_session): - self.assertFalse( - self.uploader.is_insecure_registry('192.0.2.0:8787')) - self.assertFalse( - self.uploader.is_insecure_registry('192.0.2.0:8787')) - calls = [mock.call('https://192.0.2.0:8787', timeout=30)] - mock_session.assert_has_calls(calls) - self.assertEqual(mock_session.call_count, 1) - - @mock.patch.object(requests.Session, 'get', - side_effect=requests.exceptions.SSLError('ouch')) - def test_is_insecure_registry_insecure(self, mock_session): - self.assertTrue( - self.uploader.is_insecure_registry('192.0.2.0:8787')) - self.assertTrue( - self.uploader.is_insecure_registry('192.0.2.0:8787')) - calls = [mock.call('https://192.0.2.0:8787', timeout=30), - mock.call('https://192.0.2.0:8787', timeout=30, - verify=False)] - mock_session.assert_has_calls(calls) - self.assertEqual(mock_session.call_count, 2) - - @mock.patch('tripleo_common.image.image_uploader.' - 'BaseImageUploader.authenticate') - @mock.patch('tripleo_common.image.image_uploader.' - 'BaseImageUploader._inspect') - def test_discover_image_tag(self, mock_inspect, mock_auth): - mock_inspect.return_value = { - 'Labels': { - 'rdo_version': 'a', - 'build_version': '4.0.0' - }, - 'RepoTags': ['a'] - } - - self.assertEqual( - 'a', - self.uploader.discover_image_tag('docker.io/t/foo:b', - 'rdo_version') - ) - - # no tag_from_label specified - self.assertRaises( - ImageUploaderException, - self.uploader.discover_image_tag, - 'docker.io/t/foo:b') - - # missing RepoTags entry - self.assertRaises( - ImageUploaderException, - self.uploader.discover_image_tag, - 'docker.io/t/foo:b', - 'build_version') - - # missing Labels entry - self.assertRaises( - ImageUploaderException, - self.uploader.discover_image_tag, - 'docker.io/t/foo:b', - 'version') - - # inspect call failed - mock_inspect.side_effect = ImageNotFoundException() - self.assertRaises( - ImageNotFoundException, - self.uploader.discover_image_tag, - 'docker.io/t/foo:b', - 'rdo_version') - - # handle auth issues - mock_401 = mock.Mock() - mock_401.status_code = 401 - mock_401_except = requests.exceptions.HTTPError(response=mock_401) - mock_404 = mock.Mock() - mock_404.status_code = 404 - mock_404_except = requests.exceptions.HTTPError(response=mock_404) - mock_auth.side_effect = [mock_401_except, mock_404_except] - self.assertRaises( - ImageUploaderException, - image_uploader.discover_tag_from_inspect, - (self.uploader, 'docker.io/t/foo', 'rdo_version', False) - ) - self.assertRaises( - requests.exceptions.HTTPError, - image_uploader.discover_tag_from_inspect, - (self.uploader, 'docker.io/t/foo', 'rdo_version', False) - ) - - @mock.patch('tripleo_common.image.image_uploader.' - 'BaseImageUploader.authenticate') - @mock.patch('tripleo_common.image.image_uploader.' - 'BaseImageUploader._inspect') - def test_discover_tag_from_inspect(self, mock_inspect, mock_auth): - mock_inspect.return_value = { - 'Labels': { - 'rdo_version': 'a', - 'build_version': '4.0.0', - 'release': '1.0.0', - 'version': '20180125' - }, - 'RepoTags': ['a', '1.0.0-20180125'] - } - - # simple label -> tag - self.assertEqual( - ('docker.io/t/foo', 'a'), - image_uploader.discover_tag_from_inspect( - (self.uploader, 'docker.io/t/foo', 'rdo_version', False)) - ) - - # templated labels -> tag - self.assertEqual( - ('docker.io/t/foo', '1.0.0-20180125'), - image_uploader.discover_tag_from_inspect( - (self.uploader, 'docker.io/t/foo', '{release}-{version}', - False)) - ) - - # simple label -> tag with fallback - self.assertEqual( - ('docker.io/t/foo', 'a'), - image_uploader.discover_tag_from_inspect( - (self.uploader, 'docker.io/t/foo:a', 'bar', False)) - ) - - # templated labels -> tag with fallback - self.assertEqual( - ('docker.io/t/foo', 'a'), - image_uploader.discover_tag_from_inspect( - (self.uploader, 'docker.io/t/foo:a', '{releases}-{versions}', - False)) - ) - - # Invalid template - self.assertRaises( - ImageUploaderException, - image_uploader.discover_tag_from_inspect, - (self.uploader, 'docker.io/t/foo', '{release}-{version', False) - ) - - # Missing label in template - self.assertRaises( - ImageUploaderException, - image_uploader.discover_tag_from_inspect, - (self.uploader, 'docker.io/t/foo', '{releases}-{version}', False) - ) - - # no tag_from_label specified - self.assertRaises( - ImageUploaderException, - image_uploader.discover_tag_from_inspect, - (self.uploader, 'docker.io/t/foo', None, False) - ) - - # missing RepoTags entry - self.assertRaises( - ImageUploaderException, - image_uploader.discover_tag_from_inspect, - (self.uploader, 'docker.io/t/foo', 'build_version', False) - ) - - # missing Labels entry - self.assertRaises( - ImageUploaderException, - image_uploader.discover_tag_from_inspect, - (self.uploader, 'docker.io/t/foo', 'version', False) - ) - - # inspect call failed - mock_inspect.side_effect = ImageUploaderException() - self.assertRaises( - ImageUploaderException, - image_uploader.discover_tag_from_inspect, - (self.uploader, 'docker.io/t/foo', 'rdo_version', False) - ) - - # handle auth issues - mock_401 = mock.Mock() - mock_401.status_code = 401 - mock_401_except = requests.exceptions.HTTPError(response=mock_401) - mock_404 = mock.Mock() - mock_404.status_code = 404 - mock_404_except = requests.exceptions.HTTPError(response=mock_404) - mock_auth.side_effect = [mock_401_except, mock_404_except] - self.assertRaises( - ImageUploaderException, - image_uploader.discover_tag_from_inspect, - (self.uploader, 'docker.io/t/foo', 'rdo_version', False) - ) - self.assertRaises( - requests.exceptions.HTTPError, - image_uploader.discover_tag_from_inspect, - (self.uploader, 'docker.io/t/foo', 'rdo_version', False) - ) - - @mock.patch('concurrent.futures.ThreadPoolExecutor') - def test_discover_image_tags(self, mock_pool): - mock_map = mock.Mock() - mock_map.return_value = ( - ('docker.io/t/foo', 'a'), - ('docker.io/t/bar', 'b'), - ('docker.io/t/baz', 'c') - ) - mock_pool.return_value.__enter__.return_value.map = mock_map - images = [ - 'docker.io/t/foo', - 'docker.io/t/bar', - 'docker.io/t/baz' - ] - self.assertEqual( - { - 'docker.io/t/foo': 'a', - 'docker.io/t/bar': 'b', - 'docker.io/t/baz': 'c' - }, - self.uploader.discover_image_tags(images, 'rdo_release') - ) - mock_map.assert_called_once_with( - image_uploader.discover_tag_from_inspect, - [ - (self.uploader, 'docker.io/t/foo', 'rdo_release', False), - (self.uploader, 'docker.io/t/bar', 'rdo_release', False), - (self.uploader, 'docker.io/t/baz', 'rdo_release', False) - ]) - - @mock.patch('tripleo_common.image.image_uploader.' - 'BaseImageUploader._inspect') - def test_images_match(self, mock_inspect): - mock_inspect.side_effect = [{'Digest': 'a'}, {'Digest': 'b'}] - self.assertFalse(self.uploader._images_match('foo', 'bar', set())) - - mock_inspect.side_effect = [{'Digest': 'a'}, {'Digest': 'a'}] - self.assertTrue(self.uploader._images_match('foo', 'bar', set())) - - mock_inspect.side_effect = [{}, {'Digest': 'b'}] - self.assertFalse(self.uploader._images_match('foo', 'bar', set())) - - mock_inspect.side_effect = [{'Digest': 'a'}, {}] - self.assertFalse(self.uploader._images_match('foo', 'bar', set())) - - mock_inspect.side_effect = [None, None] - self.assertFalse(self.uploader._images_match('foo', 'bar', set())) - - mock_inspect.side_effect = ImageUploaderException() - self.assertFalse(self.uploader._images_match('foo', 'bar', set())) - - def test_authenticate(self): - req = self.requests - auth = self.uploader.authenticate - url1 = urlparse('docker://docker.io/t/nova-api:latest') - - # no auth required - req.get('https://registry-1.docker.io/v2/', status_code=200) - self.assertNotIn('Authorization', auth(url1).headers) - - # missing 'www-authenticate' header - req.get('https://registry-1.docker.io/v2/', status_code=401) - self.assertRaises(ImageUploaderException, auth, url1) - - # unknown 'www-authenticate' header - req.get('https://registry-1.docker.io/v2/', status_code=401, - headers={'www-authenticate': 'Foo'}) - self.assertRaises(ImageUploaderException, auth, url1) - - # successful auth requests - headers = { - 'www-authenticate': 'Bearer ' - 'realm="https://auth.docker.io/token",' - 'service="registry.docker.io"' - } - req.get('https://registry-1.docker.io/v2/', status_code=401, - headers=headers) - req.get('https://auth.docker.io/token', json={"token": "asdf1234"}) - self.assertEqual( - 'Bearer asdf1234', - auth(url1).headers['Authorization'] - ) - - def test_authenticate_basic_auth(self): - req = self.requests - auth = self.uploader.authenticate - url1 = urlparse('docker://myrepo.com/t/nova-api:latest') - - # successful auth requests - headers = { - 'www-authenticate': 'Basic realm="Some Realm"' - } - - def req_match(request): - resp = requests.Response() - resp.headers = headers - resp.status_code = 401 - # if we got sent an user/password, return 200 - if 'Authorization' in request.headers: - resp.status_code = 200 - return resp - - req.add_matcher(req_match) - self.assertEqual( - 'Basic Zm9vOmJhcg==', - auth(url1, username='foo', password='bar').headers['Authorization'] - ) - - def test_authenticate_with_no_service(self): - req = self.requests - auth = self.uploader.authenticate - url1 = urlparse('docker://docker.io/t/nova-api:latest') - - headers = { - 'www-authenticate': 'Bearer ' - 'realm="https://auth.docker.io/token",' - } - req.get('https://registry-1.docker.io/v2/', status_code=401, - headers=headers) - req.get('https://auth.docker.io/token', json={"token": "asdf1234"}) - self.assertEqual( - 'Bearer asdf1234', - auth(url1).headers['Authorization'] - ) - - def test_build_url(self): - url1 = urlparse('docker://docker.io/t/nova-api:latest') - url2 = urlparse('docker://registry-1.docker.io/t/nova-api:latest') - url3 = urlparse('docker://192.0.2.1:8787/t/nova-api:latest') - build = image_uploader.BaseImageUploader._build_url - insecure_reg = image_uploader.BaseImageUploader.insecure_registries - secure_reg = image_uploader.BaseImageUploader.secure_registries - no_verify_reg = image_uploader.BaseImageUploader.no_verify_registries - mirrors = image_uploader.BaseImageUploader.mirrors - # fix urls - self.assertEqual( - 'https://registry-1.docker.io/v2/', - build(url1, '/') - ) - - # no change urls - insecure_reg.add('registry-1.docker.io') - secure_reg.add('192.0.2.1:8787') - self.assertEqual( - 'https://registry-1.docker.io/v2/t/nova-api/manifests/latest', - build(url2, '/t/nova-api/manifests/latest') - ) - self.assertEqual( - 'https://192.0.2.1:8787/v2/t/nova-api/tags/list', - build(url3, '/t/nova-api/tags/list') - ) - # "no verify" registries are insecure but still use https - secure_reg.remove('192.0.2.1:8787') - no_verify_reg.add('192.0.2.1:8787') - self.assertEqual( - 'https://192.0.2.1:8787/v2/t/nova-api/tags/list', - build(url3, '/t/nova-api/tags/list') - ) - - # test mirrors - mirrors['docker.io'] = 'http://192.0.2.2:8081/registry-1.docker/' - self.assertEqual( - 'http://192.0.2.2:8081/registry-1.docker/v2/' - 't/nova-api/blobs/asdf1234', - build(url1, '/t/nova-api/blobs/asdf1234') - ) - - def test_inspect_default_tag(self): - req = self.requests - session = requests.Session() - session.headers['Authorization'] = 'Bearer asdf1234' - inspect = image_uploader.BaseImageUploader._inspect - - url1 = urlparse('docker://docker.io/t/nova-api:latest') - - manifest_resp = { - 'schemaVersion': 2, - 'config': { - 'mediaType': 'text/html', - 'digest': 'abcdef' - }, - 'layers': [ - {'digest': 'aaa'}, - {'digest': 'bbb'}, - {'digest': 'ccc'}, - ] - } - manifest_str = json.dumps(manifest_resp, indent=3) - manifest_headers = {'Docker-Content-Digest': 'eeeeee'} - tags_resp = {'tags': ['one', 'two']} - config_resp = { - 'created': '2018-10-02T11:13:45.567533229Z', - 'docker_version': '1.13.1', - 'config': { - 'Labels': { - 'build-date': '20181002', - 'build_id': '1538477701', - 'kolla_version': '7.0.0' - } - }, - 'architecture': 'amd64', - 'os': 'linux', - } - - req.get('https://registry-1.docker.io/v2/t/nova-api/tags/list', - json=tags_resp) - req.get('https://registry-1.docker.io/v2/t/nova-api/blobs/abcdef', - json=config_resp) - req.get('https://registry-1.docker.io/v2/t/nova-api/manifests/two', - text=manifest_str, headers=manifest_headers) - - # test default_tag=True - self.assertEqual( - { - 'Architecture': 'amd64', - 'Created': '2018-10-02T11:13:45.567533229Z', - 'Digest': 'eeeeee', - 'DockerVersion': '1.13.1', - 'Labels': { - 'build-date': '20181002', - 'build_id': '1538477701', - 'kolla_version': '7.0.0' - }, - 'Layers': ['aaa', 'bbb', 'ccc'], - 'Name': 'docker.io/t/nova-api', - 'Os': 'linux', - 'RepoTags': ['one', 'two'], - 'Tag': 'latest' - }, - inspect(url1, session=session, default_tag=True) - ) - - # test default_tag=False - req.get('https://registry-1.docker.io/v2/t/nova-api/manifests/latest', - status_code=404) - self.assertRaises(ImageNotFoundException, inspect, url1, - session=session, - default_tag=False) - - # test default_tag=True, but no tags returned - tags_resp = {'tags': []} - req.get('https://registry-1.docker.io/v2/t/nova-api/tags/list', - json=tags_resp) - self.assertRaises(ImageNotFoundException, inspect, url1, - session=session, - default_tag=True) - - def test_inspect(self): - req = self.requests - session = requests.Session() - session.headers['Authorization'] = 'Bearer asdf1234' - inspect = image_uploader.BaseImageUploader._inspect - - url1 = urlparse('docker://docker.io/t/nova-api:latest') - - manifest_resp = { - 'schemaVersion': 2, - 'config': { - 'mediaType': 'text/html', - 'digest': 'abcdef' - }, - 'layers': [ - {'digest': 'aaa'}, - {'digest': 'bbb'}, - {'digest': 'ccc'}, - ] - } - manifest_str = json.dumps(manifest_resp, indent=3) - manifest_headers = {'Docker-Content-Digest': 'eeeeee'} - tags_resp = {'tags': ['one', 'two', 'latest']} - config_resp = { - 'created': '2018-10-02T11:13:45.567533229Z', - 'docker_version': '1.13.1', - 'config': { - 'Labels': { - 'build-date': '20181002', - 'build_id': '1538477701', - 'kolla_version': '7.0.0' - } - }, - 'architecture': 'amd64', - 'os': 'linux', - } - - req.get('https://registry-1.docker.io/v2/t/nova-api/tags/list', - json=tags_resp) - req.get('https://registry-1.docker.io/v2/t/nova-api/blobs/abcdef', - json=config_resp) - - # test 404 response - req.get('https://registry-1.docker.io/v2/t/nova-api/manifests/latest', - status_code=404) - self.assertRaises(ImageNotFoundException, inspect, url1, - session=session) - - # test full response - req.get('https://registry-1.docker.io/v2/t/nova-api/manifests/latest', - text=manifest_str, headers=manifest_headers) - - self.assertEqual( - { - 'Architecture': 'amd64', - 'Created': '2018-10-02T11:13:45.567533229Z', - 'Digest': 'eeeeee', - 'DockerVersion': '1.13.1', - 'Labels': { - 'build-date': '20181002', - 'build_id': '1538477701', - 'kolla_version': '7.0.0' - }, - 'Layers': ['aaa', 'bbb', 'ccc'], - 'Name': 'docker.io/t/nova-api', - 'Os': 'linux', - 'RepoTags': ['one', 'two', 'latest'], - 'Tag': 'latest' - }, - inspect(url1, session=session) - ) - - def test_inspect_v1_manifest(self): - req = self.requests - session = requests.Session() - session.headers['Authorization'] = 'Bearer asdf1234' - inspect = image_uploader.BaseImageUploader._inspect - - url1 = urlparse('docker://docker.io/t/nova-api:latest') - - config = { - 'created': '2018-10-02T11:13:45.567533229Z', - 'docker_version': '1.13.1', - 'config': { - 'Labels': { - 'build-date': '20181002', - 'build_id': '1538477701', - 'kolla_version': '7.0.0' - } - }, - 'architecture': 'amd64', - 'os': 'linux', - } - manifest_resp = { - 'schemaVersion': 1, - 'history': [ - {'v1Compatibility': json.dumps(config)} - ], - 'config': { - 'mediaType': 'text/html', - 'digest': 'abcdef' - }, - 'fsLayers': [ - {'blobSum': 'ccc'}, - {'blobSum': 'bbb'}, - {'blobSum': 'aaa'}, - ] - } - manifest_str = json.dumps(manifest_resp, indent=3) - manifest_headers = {'Docker-Content-Digest': 'eeeeee'} - tags_resp = {'tags': ['one', 'two', 'latest']} - - req.get('https://registry-1.docker.io/v2/t/nova-api/tags/list', - json=tags_resp) - - # test 404 response - req.get('https://registry-1.docker.io/v2/t/nova-api/manifests/latest', - status_code=404) - self.assertRaises(ImageNotFoundException, inspect, url1, - session=session) - - # test full response - req.get('https://registry-1.docker.io/v2/t/nova-api/manifests/latest', - text=manifest_str, headers=manifest_headers) - - self.assertDictEqual( - { - 'Architecture': 'amd64', - 'Created': '2018-10-02T11:13:45.567533229Z', - 'Digest': 'eeeeee', - 'DockerVersion': '1.13.1', - 'Labels': { - 'build-date': '20181002', - 'build_id': '1538477701', - 'kolla_version': '7.0.0' - }, - 'Layers': ['aaa', 'bbb', 'ccc'], - 'Name': 'docker.io/t/nova-api', - 'Os': 'linux', - 'RepoTags': ['one', 'two', 'latest'], - 'Tag': 'latest' - }, - inspect(url1, session=session) - ) - - def test_inspect_no_digest_header(self): - req = self.requests - session = requests.Session() - session.headers['Authorization'] = 'Bearer asdf1234' - inspect = image_uploader.BaseImageUploader._inspect - - url1 = urlparse('docker://docker.io/t/nova-api:latest') - - manifest_resp = { - 'schemaVersion': 2, - 'config': { - 'mediaType': 'text/html', - 'digest': 'abcdef' - }, - 'layers': [ - {'digest': 'aaa'}, - {'digest': 'bbb'}, - {'digest': 'ccc'}, - ] - } - manifest_str = json.dumps(manifest_resp, indent=3) - manifest_headers = {} - tags_resp = {'tags': ['one', 'two', 'latest']} - config_resp = { - 'created': '2018-10-02T11:13:45.567533229Z', - 'docker_version': '1.13.1', - 'config': { - 'Labels': { - 'build-date': '20181002', - 'build_id': '1538477701', - 'kolla_version': '7.0.0' - } - }, - 'architecture': 'amd64', - 'os': 'linux', - } - - req.get('https://registry-1.docker.io/v2/t/nova-api/tags/list', - json=tags_resp) - req.get('https://registry-1.docker.io/v2/t/nova-api/blobs/abcdef', - json=config_resp) - - # test 404 response - req.get('https://registry-1.docker.io/v2/t/nova-api/manifests/latest', - status_code=404) - self.assertRaises(ImageNotFoundException, inspect, url1, - session=session) - - # test full response - req.get('https://registry-1.docker.io/v2/t/nova-api/manifests/latest', - text=manifest_str, headers=manifest_headers) - - calc_digest = hashlib.sha256() - calc_digest.update(manifest_str.encode('utf-8')) - digest = 'sha256:%s' % calc_digest.hexdigest() - - self.assertEqual( - { - 'Architecture': 'amd64', - 'Created': '2018-10-02T11:13:45.567533229Z', - 'Digest': digest, - 'DockerVersion': '1.13.1', - 'Labels': { - 'build-date': '20181002', - 'build_id': '1538477701', - 'kolla_version': '7.0.0' - }, - 'Layers': ['aaa', 'bbb', 'ccc'], - 'Name': 'docker.io/t/nova-api', - 'Os': 'linux', - 'RepoTags': ['one', 'two', 'latest'], - 'Tag': 'latest' - }, - inspect(url1, session=session) - ) - - @mock.patch('concurrent.futures.ThreadPoolExecutor') - def test_list(self, mock_pool): - mock_map = mock.Mock() - mock_map.return_value = ( - ('localhost:8787/t/foo', ['a']), - ('localhost:8787/t/bar', ['b']), - ('localhost:8787/t/baz', ['c', 'd']), - ('localhost:8787/t/bink', []) - ) - mock_pool.return_value.__enter__.return_value.map = mock_map - session = mock.Mock() - response = mock.Mock() - response.status_code = 200 - response.json.return_value = { - 'repositories': ['t/foo', 't/bar', 't/baz', 't/bink'] - } - session.get.return_value = response - self.assertEqual( - [ - 'localhost:8787/t/foo:a', - 'localhost:8787/t/bar:b', - 'localhost:8787/t/baz:c', - 'localhost:8787/t/baz:d' - ], - self.uploader.list('localhost:8787', session=session) - ) - mock_map.assert_called_once_with( - image_uploader.tags_for_image, - [ - (self.uploader, 'localhost:8787/t/foo', session), - (self.uploader, 'localhost:8787/t/bar', session), - (self.uploader, 'localhost:8787/t/baz', session), - (self.uploader, 'localhost:8787/t/bink', session) - ]) - - def test_list_404(self): - # setup bits - session = mock.Mock() - response = mock.Mock() - response.status_code = 404 - session.get.return_value = response - # execute function - return_val = self.uploader.list('localhost:8787', session=session) - # check status of things - self.assertEqual( - [], - return_val - ) - - @mock.patch('concurrent.futures.ThreadPoolExecutor') - def test_list_500(self, mock_pool): - session = mock.Mock() - response = mock.Mock() - response.status_code = 500 - session.get.return_value = response - mock_pool.return_value.map.return_value = () - self.assertRaises(ImageUploaderException, - self.uploader.list, - 'localhost:8787', - session=session) - - def test_tags_for_image(self): - session = mock.Mock() - r = mock.Mock() - r.status_code = 200 - r.json.return_value = {'tags': ['a', 'b', 'c']} - session.get.return_value = r - self.uploader.insecure_registries.add('localhost:8787') - url = 'docker://localhost:8787/t/foo' - image, tags = self.uploader._tags_for_image(url, session=session) - self.assertEqual(url, image) - self.assertEqual(['a', 'b', 'c'], tags) - - # test missing tags file - r.status_code = 404 - image, tags = self.uploader._tags_for_image(url, session=session) - self.assertEqual([], tags) - - def test_image_tag_from_url(self): - u = self.uploader - self.assertEqual( - ('/t/foo', 'bar'), - u._image_tag_from_url(urlparse( - 'docker://docker.io/t/foo:bar')) - ) - self.assertEqual( - ('/foo', 'bar'), - u._image_tag_from_url(urlparse( - 'docker://192.168.2.1:5000/foo:bar')) - - ) - self.assertEqual( - ('/foo', 'bar'), - u._image_tag_from_url(urlparse( - 'containers-storage:/foo:bar')) - ) - - -class TestPythonImageUploader(base.TestCase): - - # pylint: disable=no-member - def setUp(self): - super(TestPythonImageUploader, self).setUp() - self.uploader = image_uploader.PythonImageUploader() - self.uploader.init_registries_cache() - u = self.uploader - u._fetch_manifest.retry.sleep = mock.Mock() - u._upload_url.retry.sleep = mock.Mock() - u._copy_layer_local_to_registry.retry.sleep = mock.Mock() - u._copy_layer_registry_to_registry.retry.sleep = mock.Mock() - u._copy_registry_to_registry.retry.sleep = mock.Mock() - u._copy_local_to_registry.retry.sleep = mock.Mock() - self.requests = self.useFixture(rm_fixture.Fixture()) - - @mock.patch('tripleo_common.image.image_uploader.' - 'RegistrySessionHelper.check_status') - @mock.patch('tripleo_common.image.image_uploader.' - 'PythonImageUploader.authenticate') - @mock.patch('tripleo_common.image.image_uploader.' - 'PythonImageUploader._fetch_manifest') - @mock.patch('tripleo_common.image.image_uploader.' - 'PythonImageUploader._cross_repo_mount') - @mock.patch('tripleo_common.image.image_uploader.' - 'PythonImageUploader._copy_registry_to_registry') - def test_upload_image( - self, _copy_registry_to_registry, _cross_repo_mount, - _fetch_manifest, authenticate, check_status): - - target_session = mock.Mock() - source_session = mock.Mock() - authenticate.side_effect = [ - target_session, - source_session - ] - manifest = json.dumps({ - 'schemaVersion': 2, - 'mediaType': image_uploader.MEDIA_MANIFEST_V2, - 'config': { - 'digest': 'sha256:1234', - }, - 'layers': [ - {'digest': 'sha256:aaa'}, - {'digest': 'sha256:bbb'}, - {'digest': 'sha256:ccc'} - ], - }) - _fetch_manifest.return_value = manifest - - image = 'docker.io/tripleomastercentos9/heat-docker-agents-centos' - tag = 'latest' - push_destination = 'localhost:8787' - task = image_uploader.UploadTask( - image_name=image + ':' + tag, - pull_source=None, - push_destination=push_destination, - append_tag=None, - modify_role=None, - modify_vars=None, - cleanup='full', - multi_arch=False - ) - - self.assertEqual( - [], - self.uploader.upload_image(task) - ) - source_url = urlparse('docker://docker.io/tripleomastercentos9/' - 'heat-docker-agents-centos:latest') - target_url = urlparse('docker://localhost:8787/tripleomastercentos9/' - 'heat-docker-agents-centos:latest') - - authenticate.assert_has_calls([ - mock.call( - target_url, - username=None, - password=None - ), - mock.call( - source_url, - username=None, - password=None - ), - ]) - - _fetch_manifest.assert_called_once_with( - source_url, session=source_session, multi_arch=False) - - _cross_repo_mount.assert_called_once_with( - target_url, - { - 'sha256:aaa': target_url, - 'sha256:bbb': target_url, - 'sha256:ccc': target_url, - }, - ['sha256:aaa', 'sha256:bbb', 'sha256:ccc'], - session=target_session) - - _copy_registry_to_registry.assert_called_once_with( - source_url, - target_url, - source_manifests=[manifest], - source_session=source_session, - target_session=target_session, - source_layers=['sha256:aaa', 'sha256:bbb', 'sha256:ccc'], - multi_arch=False - ) - - @mock.patch('tripleo_common.image.image_uploader.' - 'RegistrySessionHelper.check_status') - @mock.patch('tripleo_common.image.image_uploader.' - 'PythonImageUploader.authenticate') - @mock.patch('tripleo_common.image.image_uploader.' - 'PythonImageUploader._fetch_manifest') - @mock.patch('tripleo_common.image.image_uploader.' - 'PythonImageUploader._cross_repo_mount') - @mock.patch('tripleo_common.image.image_uploader.' - 'PythonImageUploader._copy_registry_to_registry') - def test_authenticate_upload_image( - self, _copy_registry_to_registry, _cross_repo_mount, - _fetch_manifest, authenticate, check_status): - - self.uploader.registry_credentials = { - 'docker.io': {'my_username': 'my_password'}, - 'localhost:8787': {'local_username': 'local_password'}, - } - target_session = mock.Mock() - source_session = mock.Mock() - authenticate.side_effect = [ - target_session, - source_session - ] - manifest = json.dumps({ - 'config': { - 'digest': 'sha256:1234', - }, - 'layers': [ - {'digest': 'sha256:aaa'}, - {'digest': 'sha256:bbb'}, - {'digest': 'sha256:ccc'} - ], - }) - _fetch_manifest.return_value = manifest - - image = 'docker.io/tripleomastercentos9/heat-docker-agents-centos' - tag = 'latest' - push_destination = 'localhost:8787' - task = image_uploader.UploadTask( - image_name=image + ':' + tag, - pull_source=None, - push_destination=push_destination, - append_tag=None, - modify_role=None, - modify_vars=None, - cleanup='full', - multi_arch=False - ) - - self.assertEqual( - [], - self.uploader.upload_image(task) - ) - source_url = urlparse('docker://docker.io/tripleomastercentos9/' - 'heat-docker-agents-centos:latest') - target_url = urlparse('docker://localhost:8787/tripleomastercentos9/' - 'heat-docker-agents-centos:latest') - - authenticate.assert_has_calls([ - mock.call( - target_url, - username='local_username', - password='local_password' - ), - mock.call( - source_url, - username='my_username', - password='my_password' - ), - ]) - - @mock.patch('tripleo_common.image.image_uploader.' - 'RegistrySessionHelper.check_status') - @mock.patch('tripleo_common.image.image_uploader.' - 'PythonImageUploader.authenticate') - @mock.patch('tripleo_common.image.image_uploader.' - 'PythonImageUploader._fetch_manifest') - @mock.patch('tripleo_common.image.image_uploader.' - 'PythonImageUploader._cross_repo_mount') - @mock.patch('tripleo_common.image.image_uploader.' - 'PythonImageUploader._copy_registry_to_registry') - def test_insecure_registry( - self, _copy_registry_to_registry, _cross_repo_mount, - _fetch_manifest, authenticate, check_status): - target_session = mock.Mock() - source_session = mock.Mock() - authenticate.side_effect = [ - target_session, - source_session - ] - manifest = json.dumps({ - 'config': { - 'digest': 'sha256:1234', - }, - 'layers': [ - {'digest': 'sha256:aaa'}, - {'digest': 'sha256:bbb'}, - {'digest': 'sha256:ccc'} - ], - }) - _fetch_manifest.return_value = manifest - - image = '192.0.2.0:8787/tripleomastercentos9/heat-docker-agents-centos' - tag = 'latest' - push_destination = 'localhost:8787' - task = image_uploader.UploadTask( - image_name=image + ':' + tag, - pull_source=None, - push_destination=push_destination, - append_tag=None, - modify_role=None, - modify_vars=None, - cleanup='full', - multi_arch=False - ) - - self.assertEqual( - [], - self.uploader.upload_image(task) - ) - source_url = urlparse('docker://192.0.2.0:8787/tripleomastercentos9/' - 'heat-docker-agents-centos:latest') - target_url = urlparse('docker://localhost:8787/tripleomastercentos9/' - 'heat-docker-agents-centos:latest') - - authenticate.assert_has_calls([ - mock.call( - target_url, - username=None, - password=None - ), - mock.call( - source_url, - username=None, - password=None - ), - ]) - - @mock.patch('tripleo_common.image.image_uploader.' - 'RegistrySessionHelper.check_status') - @mock.patch('tripleo_common.image.image_uploader.' - 'PythonImageUploader.authenticate') - @mock.patch('tripleo_common.image.image_uploader.' - 'PythonImageUploader._fetch_manifest') - @mock.patch('tripleo_common.image.image_uploader.' - 'PythonImageUploader._cross_repo_mount') - @mock.patch('tripleo_common.image.image_uploader.' - 'PythonImageUploader._copy_registry_to_registry') - def test_upload_image_v1_manifest( - self, _copy_registry_to_registry, _cross_repo_mount, - _fetch_manifest, authenticate, check_status): - - target_session = mock.Mock() - source_session = mock.Mock() - authenticate.side_effect = [ - target_session, - source_session - ] - manifest = json.dumps({ - 'schemaVersion': 1, - 'fsLayers': [ - {'blobSum': 'sha256:ccc'}, - {'blobSum': 'sha256:bbb'}, - {'blobSum': 'sha256:aaa'} - ], - }) - _fetch_manifest.return_value = manifest - - image = 'docker.io/tripleomastercentos9/heat-docker-agents-centos' - tag = 'latest' - push_destination = 'localhost:8787' - task = image_uploader.UploadTask( - image_name=image + ':' + tag, - pull_source=None, - push_destination=push_destination, - append_tag=None, - modify_role=None, - modify_vars=None, - cleanup='full', - multi_arch=False - ) - - self.assertEqual( - [], - self.uploader.upload_image(task) - ) - source_url = urlparse('docker://docker.io/tripleomastercentos9/' - 'heat-docker-agents-centos:latest') - target_url = urlparse('docker://localhost:8787/tripleomastercentos9/' - 'heat-docker-agents-centos:latest') - - authenticate.assert_has_calls([ - mock.call( - target_url, - username=None, - password=None - ), - mock.call( - source_url, - username=None, - password=None - ), - ]) - - _fetch_manifest.assert_called_once_with( - source_url, session=source_session, multi_arch=False) - - _cross_repo_mount.assert_called_once_with( - target_url, - { - 'sha256:aaa': target_url, - 'sha256:bbb': target_url, - 'sha256:ccc': target_url, - }, - ['sha256:aaa', 'sha256:bbb', 'sha256:ccc'], - session=target_session) - - _copy_registry_to_registry.assert_called_once_with( - source_url, - target_url, - source_manifests=[manifest], - source_session=source_session, - target_session=target_session, - source_layers=['sha256:aaa', 'sha256:bbb', 'sha256:ccc'], - multi_arch=False - ) - - @mock.patch('tripleo_common.image.image_uploader.' - 'RegistrySessionHelper.check_status') - @mock.patch('tripleo_common.image.image_uploader.' - 'PythonImageUploader.authenticate') - @mock.patch('tripleo_common.image.image_uploader.' - 'PythonImageUploader._image_exists') - @mock.patch('tripleo_common.image.image_uploader.' - 'PythonImageUploader._fetch_manifest') - @mock.patch('tripleo_common.image.image_uploader.' - 'PythonImageUploader._cross_repo_mount') - @mock.patch('tripleo_common.image.image_uploader.' - 'PythonImageUploader._copy_registry_to_registry') - @mock.patch('tripleo_common.image.image_uploader.' - 'PythonImageUploader._copy_registry_to_local') - @mock.patch('tripleo_common.image.image_uploader.' - 'PythonImageUploader.run_modify_playbook') - @mock.patch('tripleo_common.image.image_uploader.' - 'PythonImageUploader._copy_local_to_registry') - def test_upload_image_modify( - self, _copy_local_to_registry, run_modify_playbook, - _copy_registry_to_local, _copy_registry_to_registry, - _cross_repo_mount, _fetch_manifest, _image_exists, authenticate, - check_status): - - _image_exists.return_value = False - target_session = mock.Mock() - source_session = mock.Mock() - authenticate.side_effect = [ - target_session, - source_session - ] - manifest = json.dumps({ - 'schemaVersion': 2, - 'mediaType': image_uploader.MEDIA_MANIFEST_V2, - 'config': { - 'digest': 'sha256:1234', - }, - 'layers': [ - {'digest': 'sha256:aaa'}, - {'digest': 'sha256:bbb'}, - {'digest': 'sha256:ccc'} - ], - }) - _fetch_manifest.return_value = manifest - - image = 'docker.io/tripleomastercentos9/heat-docker-agents-centos' - tag = 'latest' - append_tag = 'modify-123' - push_destination = 'localhost:8787' - task = image_uploader.UploadTask( - image_name=image + ':' + tag, - pull_source=None, - push_destination=push_destination, - append_tag=append_tag, - modify_role='add-foo-plugin', - modify_vars={'foo_version': '1.0.1'}, - cleanup='full', - multi_arch=False - ) - - source_url = urlparse( - 'docker://docker.io/tripleomastercentos9/' - 'heat-docker-agents-centos:latest') - unmodified_target_url = urlparse( - 'docker://localhost:8787/tripleomastercentos9/' - 'heat-docker-agents-centos:latest') - local_modified_url = urlparse( - 'containers-storage:localhost:8787/tripleomastercentos9/' - 'heat-docker-agents-centos:latestmodify-123') - target_url = urlparse( - 'docker://localhost:8787/tripleomastercentos9/' - 'heat-docker-agents-centos:latestmodify-123') - - self.assertEqual([ - 'localhost:8787/tripleomastercentos9/' - 'heat-docker-agents-centos:latest', - 'localhost:8787/tripleomastercentos9/' - 'heat-docker-agents-centos:latestmodify-123'], - self.uploader.upload_image(task) - ) - authenticate.assert_has_calls([ - mock.call( - target_url, - username=None, - password=None - ), - mock.call( - source_url, - username=None, - password=None - ), - ]) - - _fetch_manifest.assert_called_once_with( - source_url, session=source_session, multi_arch=False) - - _cross_repo_mount.assert_has_calls([ - mock.call( - unmodified_target_url, - { - 'sha256:aaa': target_url, - 'sha256:bbb': target_url, - 'sha256:ccc': target_url, - }, - ['sha256:aaa', 'sha256:bbb', 'sha256:ccc'], - session=target_session - ), - mock.call( - target_url, - { - 'sha256:aaa': target_url, - 'sha256:bbb': target_url, - 'sha256:ccc': target_url, - }, - ['sha256:aaa', 'sha256:bbb', 'sha256:ccc'], - session=target_session - ) - ]) - - _copy_registry_to_registry.assert_called_once_with( - source_url, - unmodified_target_url, - source_manifests=[manifest], - source_session=source_session, - target_session=target_session, - source_layers=['sha256:aaa', 'sha256:bbb', 'sha256:ccc'], - multi_arch=False - ) - _copy_registry_to_local.assert_called_once_with(unmodified_target_url) - run_modify_playbook.assert_called_once_with( - 'add-foo-plugin', - {'foo_version': '1.0.1'}, - 'localhost:8787/tripleomastercentos9/' - 'heat-docker-agents-centos:latest', - 'localhost:8787/tripleomastercentos9/' - 'heat-docker-agents-centos:latest', - 'modify-123', - container_build_tool='buildah' - ) - _copy_local_to_registry.assert_called_once_with( - local_modified_url, - target_url, - session=target_session - ) - - @mock.patch('tripleo_common.image.image_uploader.' - 'PythonImageUploader._detect_target_export') - @mock.patch('tripleo_common.image.image_uploader.' - 'PythonImageUploader.credentials_for_registry') - @mock.patch('tripleo_common.image.image_uploader.' - 'PythonImageUploader._copy_local_to_registry') - @mock.patch('tripleo_common.image.image_uploader.' - 'PythonImageUploader.authenticate') - def test_upload_image_local(self, authenticate, mock_copy, mock_creds, - mock_detect): - - mock_creds.return_value = (None, None) - target_session = mock.Mock() - authenticate.side_effect = [ - target_session - ] - - image = 'docker.io/tripleomastercentos9/heat-docker-agents-centos' - tag = 'latest' - push_destination = 'localhost:8787' - source_image = 'containers-storage:%s:%s' % (image, tag) - task = image_uploader.UploadTask( - image_name=source_image, - pull_source=None, - push_destination=push_destination, - append_tag=None, - modify_role=None, - modify_vars=None, - cleanup='full', - multi_arch=False - ) - - self.assertEqual( - [], - self.uploader.upload_image(task) - ) - source_url = urlparse(source_image) - target_url = urlparse('docker://localhost:8787/tripleomastercentos9/' - 'heat-docker-agents-centos:latest') - authenticate.assert_has_calls([ - mock.call( - target_url, - username=None, - password=None - ) - ]) - mock_detect.assert_called_once_with(target_url, target_session) - mock_copy.assert_called_once_with(source_url, target_url, - session=target_session) - - @mock.patch('tripleo_common.image.image_uploader.' - 'RegistrySessionHelper.check_status') - def test_fetch_manifest(self, check_status): - url = urlparse('docker://docker.io/t/nova-api:tripleo-current') - manifest = '{"layers": []}' - session = mock.Mock() - session.get.return_value.text = manifest - self.assertEqual( - manifest, - self.uploader._fetch_manifest(url, session, multi_arch=False) - ) - - session.get.assert_called_once_with( - 'https://registry-1.docker.io/v2/t/' - 'nova-api/manifests/tripleo-current', - timeout=30, - headers={ - 'Accept': 'application/' - 'vnd.docker.distribution.manifest.v2+json;q=1, ' - 'application/' - 'vnd.oci.image.manifest.v1+json;q=0.5' - } - ) - - @mock.patch('tripleo_common.image.image_uploader.' - 'RegistrySessionHelper.check_status') - def test_upload_url(self, check_status): - # test with previous request - previous_request = mock.Mock() - previous_request.headers = { - 'Location': 'http://192.168.2.1/v2/upload?foo=bar' - } - url = urlparse('docker://192.168.2.1/t/nova-api:latest') - session = mock.Mock() - self.assertEqual( - 'http://192.168.2.1/v2/upload?foo=bar', - self.uploader._upload_url( - url, - session=session, - previous_request=previous_request - ) - ) - session.post.assert_not_called() - - # test with requesting an upload url - session.post.return_value.headers = { - 'Location': 'http://192.168.2.1/v2/upload?foo=baz' - } - self.assertEqual( - 'http://192.168.2.1/v2/upload?foo=baz', - self.uploader._upload_url( - url, - session=session, - previous_request=None - ) - ) - session.post.assert_called_once_with( - 'https://192.168.2.1/v2/t/nova-api/blobs/uploads/', - timeout=30 - ) - - @mock.patch('tripleo_common.image.image_uploader.' - 'PythonImageUploader._upload_url') - @mock.patch('tripleo_common.utils.image.uploaded_layers_details') - def test_copy_layer_registry_to_registry(self, global_check, _upload_url): - _upload_url.return_value = 'https://192.168.2.1:5000/v2/upload' - source_url = urlparse('docker://docker.io/t/nova-api:latest') - target_url = urlparse('docker://192.168.2.1:5000/t/nova-api:latest') - source_session = requests.Session() - target_session = requests.Session() - - blob_data = b'The Blob' - calc_digest = hashlib.sha256() - calc_digest.update(blob_data) - blob_digest = 'sha256:' + calc_digest.hexdigest() - layer_entry = { - 'digest': blob_digest, - 'size': 8, - 'mediaType': 'application/vnd.docker.image.rootfs.diff.tar.gzip' - } - layer = layer_entry['digest'] - - # layer already exists at destination - global_check.return_value = (None, None) - self.requests.head( - 'https://192.168.2.1:5000/v2/t/nova-api/blobs/%s' % blob_digest, - status_code=200 - ) - self.assertIsNone( - self.uploader._copy_layer_registry_to_registry( - source_url, - target_url, - layer, - source_session=source_session, - target_session=target_session - ) - ) - - # layer needs transferring - self.requests.head( - 'https://192.168.2.1:5000/v2/t/nova-api/blobs/%s' % blob_digest, - status_code=404 - ) - self.requests.put( - 'https://192.168.2.1:5000/v2/upload', - ) - self.requests.patch( - 'https://192.168.2.1:5000/v2/upload', - ) - self.requests.get( - 'https://registry-1.docker.io/v2/t/nova-api/blobs/%s' % - blob_digest, - content=blob_data - ) - - self.assertEqual( - blob_digest, - self.uploader._copy_layer_registry_to_registry( - source_url, - target_url, - layer, - source_session=source_session, - target_session=target_session - ) - ) - self.assertEqual( - { - 'digest': blob_digest, - 'mediaType': 'application/' - 'vnd.docker.image.rootfs.diff.tar.gzip', - 'size': 8 - }, - layer_entry - ) - - def test_assert_scheme(self): - self.uploader._assert_scheme( - urlparse('docker://docker.io/foo/bar:latest'), - 'docker' - ) - self.uploader._assert_scheme( - urlparse('containers-storage:foo/bar:latest'), - 'containers-storage' - ) - self.assertRaises( - ImageUploaderException, - self.uploader._assert_scheme, - urlparse('containers-storage:foo/bar:latest'), - 'docker' - ) - - @mock.patch('tripleo_common.image.image_uploader.' - 'RegistrySessionHelper.check_redirect_trusted') - @mock.patch('tripleo_common.image.image_uploader.' - 'PythonImageUploader._copy_manifest_config_to_registry') - @mock.patch('tripleo_common.image.image_uploader.' - 'RegistrySessionHelper.get') - @mock.patch('tripleo_common.image.image_uploader.' - 'PythonImageUploader._upload_url') - @mock.patch('tripleo_common.image.image_uploader.' - 'PythonImageUploader.' - '_copy_layer_registry_to_registry') - def test_copy_registry_to_registry(self, _copy_layer, _upload_url, - mock_get, mock_copy_manifest, - mock_trusted): - source_url = urlparse('docker://docker.io/t/nova-api:latest') - target_url = urlparse('docker://192.168.2.1:5000/t/nova-api:latest') - _upload_url.return_value = 'https://192.168.2.1:5000/v2/upload' - - source_session = mock.Mock() - target_session = mock.Mock() - - mock_resp = mock.Mock() - mock_resp.text = '{}' - mock_get.return_value = mock_resp - mock_trusted.return_value = mock_resp - - manifest = json.dumps({ - 'mediaType': image_uploader.MEDIA_MANIFEST_V2, - 'config': { - 'digest': 'sha256:1234', - 'size': 2, - 'mediaType': image_uploader.MEDIA_CONFIG - }, - 'layers': [ - {'digest': 'sha256:aaaa'}, - {'digest': 'sha256:bbbb'}, - ] - }) - _copy_layer.side_effect = [ - 'sha256:aaaa', - 'sha256:bbbb' - ] - - self.uploader._copy_registry_to_registry( - source_url, target_url, [manifest], - source_session=source_session, - target_session=target_session - ) - - mock_get.assert_called_once_with( - source_session, - 'https://registry-1.docker.io/v2/t/nova-api/blobs/sha256:1234', - timeout=30, - allow_redirects=False - ) - target_manifest = { - 'config': { - 'digest': 'sha256:1234', - 'size': 2, - 'mediaType': 'application/vnd.docker.container.image.v1+json' - }, - 'layers': [ - {'digest': 'sha256:aaaa'}, - {'digest': 'sha256:bbbb'}, - ], - 'mediaType': 'application/vnd.docker.' - 'distribution.manifest.v2+json', - } - - mock_trusted.assert_called_once_with(mock_resp, - source_session, - stream=False) - mock_copy_manifest.assert_has_calls([ - mock.call( - target_url=target_url, - manifest_str=mock.ANY, - config_str='{}', - target_session=target_session, - multi_arch=False - ) - ]) - put_manifest = json.loads( - mock_copy_manifest.call_args[1]['manifest_str'] - ) - self.assertEqual(target_manifest, put_manifest) - - @mock.patch('tripleo_common.image.image_uploader.' - 'RegistrySessionHelper.check_status') - @mock.patch('tripleo_common.image.image_uploader.' - 'BaseImageUploader._build_url') - @mock.patch('tripleo_common.image.image_uploader.' - 'BaseImageUploader._image_tag_from_url') - def test_copy_manifest_config_to_registry(self, image_tag_mock, - build_url_mock, status_mock): - - target_url = urlparse('docker://192.168.2.1:5000/t/nova-api:latest') - - image_tag_mock.return_value = ('t/nova-api', 'latest') - - build_url = 'https://192.168.2.1:5000/v2/t/nova-api' - build_url_mock.return_value = build_url - target_session = mock.Mock() - target_put = mock.Mock() - target_put.return_value.text = '{}' - target_session.put = target_put - - config_str = None - - manifest_str = json.dumps({ - 'config': { - 'digest': 'sha256:1234', - 'size': 2, - 'mediaType': image_uploader.MEDIA_CONFIG - }, - 'layers': [ - {'digest': 'sha256:aaaa'}, - {'digest': 'sha256:bbbb'}, - ], - 'mediaType': image_uploader.MEDIA_MANIFEST_V2 - }) - expected_manifest = { - 'config': { - 'digest': 'sha256:1234', - 'size': 2, - 'mediaType': image_uploader.MEDIA_CONFIG - }, - 'layers': [ - {'digest': 'sha256:aaaa'}, - {'digest': 'sha256:bbbb'}, - ], - 'mediaType': image_uploader.MEDIA_MANIFEST_V2 - } - - expected_headers = { - 'Content-Type': image_uploader.MEDIA_MANIFEST_V2 - } - self.uploader._copy_manifest_config_to_registry( - target_url, manifest_str, config_str, - target_session=target_session - ) - - calls = [mock.call(build_url, - data=mock.ANY, - headers=expected_headers, - timeout=30)] - target_put.assert_has_calls(calls) - # We're seeing ordering issues with the py27 checking this field - # so switch to checking it this way - call_manifest = json.loads( - target_put.call_args[1]['data'].decode('utf-8') - ) - self.assertEqual(expected_manifest, call_manifest) - - @mock.patch('tripleo_common.image.image_export.export_manifest_config') - def test_copy_manifest_config_to_registry_export(self, export_mock): - - target_url = urlparse('docker://192.168.2.1:5000/t/nova-api:latest') - self.uploader.export_registries.add('192.168.2.1:5000') - target_session = mock.Mock() - config_str = None - - manifest_str = json.dumps({ - 'config': { - 'digest': 'sha256:1234', - 'size': 2, - 'mediaType': image_uploader.MEDIA_CONFIG - }, - 'layers': [ - {'digest': 'sha256:aaaa'}, - {'digest': 'sha256:bbbb'}, - ], - }) - expected_manifest = { - 'config': { - 'digest': 'sha256:1234', - 'size': 2, - 'mediaType': image_uploader.MEDIA_CONFIG - }, - 'layers': [ - {'digest': 'sha256:aaaa'}, - {'digest': 'sha256:bbbb'}, - ], - 'mediaType': image_uploader.MEDIA_MANIFEST_V2 - } - - self.uploader._copy_manifest_config_to_registry( - target_url, manifest_str, config_str, - target_session=target_session - ) - - calls = [mock.call(target_url, - mock.ANY, - image_uploader.MEDIA_MANIFEST_V2, - config_str, - multi_arch=False)] - export_mock.assert_has_calls(calls) - # We're seeing ordering issues with the py27 checking this field - # so switch to checking it this way - call_manifest = json.loads( - export_mock.call_args[0][1] - ) - self.assertEqual(expected_manifest, call_manifest) - - @mock.patch('tripleo_common.image.image_uploader.' - 'RegistrySessionHelper.put') - @mock.patch('tripleo_common.image.image_uploader.' - 'BaseImageUploader._build_url') - @mock.patch('tripleo_common.image.image_uploader.' - 'BaseImageUploader._image_tag_from_url') - def test_copy_manifest_config_to_registry_oci(self, image_tag_mock, - build_url_mock, put_mock): - - target_url = urlparse('docker://192.168.2.1:5000/t/nova-api:latest') - - image_tag_mock.return_value = ('t/nova-api', 'latest') - - build_url = 'https://192.168.2.1:5000/v2/t/nova-api' - build_url_mock.return_value = build_url - target_session = mock.Mock() - put_mock.return_value.text = '{}' - - config_str = None - - manifest_str = json.dumps({ - 'config': { - 'digest': 'sha256:1234', - 'size': 2, - 'mediaType': image_uploader.MEDIA_OCI_CONFIG_V1 - }, - 'layers': [ - {'digest': 'sha256:aaaa'}, - {'digest': 'sha256:bbbb'}, - ], - }) - expected_manifest = { - 'config': { - 'digest': 'sha256:1234', - 'size': 2, - 'mediaType': image_uploader.MEDIA_CONFIG - }, - 'layers': [ - {'digest': 'sha256:aaaa'}, - {'digest': 'sha256:bbbb'}, - ], - 'mediaType': image_uploader.MEDIA_MANIFEST_V2 - } - - expected_headers = { - 'Content-Type': image_uploader.MEDIA_MANIFEST_V2 - } - self.uploader._copy_manifest_config_to_registry( - target_url, manifest_str, config_str, - target_session=target_session - ) - - calls = [mock.call(target_session, - build_url, - data=mock.ANY, - headers=expected_headers, - timeout=30)] - put_mock.assert_has_calls(calls) - # We're seeing ordering issues with the py27 checking this field - # so switch to checking it this way - call_manifest = json.loads( - put_mock.call_args[1]['data'].decode('utf-8') - ) - self.assertEqual(expected_manifest, call_manifest) - - @mock.patch('os.environ') - @mock.patch('subprocess.Popen') - def test_copy_registry_to_local(self, mock_popen, mock_environ): - mock_success = mock.Mock() - mock_success.communicate.return_value = ( - b'pull complete', - b'' - ) - mock_success.returncode = 0 - - mock_failure = mock.Mock() - mock_failure.communicate.return_value = ('', 'ouch') - mock_failure.returncode = 1 - mock_popen.side_effect = [ - mock_failure, - mock_failure, - mock_failure, - mock_failure, - mock_success - ] - mock_environ.copy.return_value = {} - - source = urlparse('docker://docker.io/t/nova-api') - - self.uploader._copy_registry_to_local(source) - - self.assertEqual(mock_failure.communicate.call_count, 4) - self.assertEqual(mock_success.communicate.call_count, 1) - - @mock.patch('os.path.exists') - @mock.patch('subprocess.Popen') - @mock.patch('tripleo_common.image.image_uploader.' - 'PythonImageUploader._upload_url') - @mock.patch('tripleo_common.utils.image.uploaded_layers_details') - def test_copy_layer_local_to_registry(self, global_check, _upload_url, - mock_popen, mock_exists): - mock_exists.return_value = True - _upload_url.return_value = 'https://192.168.2.1:5000/v2/upload' - target_url = urlparse('docker://192.168.2.1:5000/t/nova-api:latest') - layer = {'digest': 'sha256:aaaa'} - target_session = requests.Session() - - blob_data = b'The Blob' - calc_digest = hashlib.sha256() - calc_digest.update(blob_data) - blob_digest = 'sha256:' + calc_digest.hexdigest() - - blob_compressed = zlib.compress(blob_data) - calc_digest = hashlib.sha256() - calc_digest.update(blob_compressed) - compressed_digest = 'sha256:' + calc_digest.hexdigest() - layer_entry = { - 'compressed-diff-digest': compressed_digest, - 'compressed-size': len(compressed_digest), - 'diff-digest': blob_digest, - 'diff-size': len(blob_data), - 'id': 'aaaa' - } - - # layer already exists at destination - global_check.return_value = (None, None) - self.requests.head( - 'https://192.168.2.1:5000/v2/t/' - 'nova-api/blobs/%s' % compressed_digest, - status_code=404 - ) - self.requests.head( - 'https://192.168.2.1:5000/v2/t/nova-api/blobs/%s' % blob_digest, - status_code=200 - ) - self.assertIsNone( - self.uploader._copy_layer_local_to_registry( - target_url, - session=target_session, - layer=layer, - layer_entry=layer_entry - ) - ) - - # layer needs uploading - mock_success = mock.Mock() - mock_success.stdout = io.BytesIO(blob_compressed) - mock_success.returncode = 0 - mock_popen.return_value = mock_success - - target_session = requests.Session() - self.requests.head( - 'https://192.168.2.1:5000/v2/t/' - 'nova-api/blobs/%s' % compressed_digest, - status_code=404 - ) - self.requests.head( - 'https://192.168.2.1:5000/v2/t/nova-api/blobs/%s' % blob_digest, - status_code=404 - ) - self.requests.patch( - 'https://192.168.2.1:5000/v2/upload', - status_code=200 - ) - self.requests.put( - 'https://192.168.2.1:5000/v2/upload?digest=%s' % compressed_digest, - status_code=200 - ) - self.assertEqual( - compressed_digest, - self.uploader._copy_layer_local_to_registry( - target_url, - session=target_session, - layer=layer, - layer_entry=layer_entry - ) - ) - # test tar-split assemble call - mock_popen.assert_called_once_with([ - 'tar-split', 'asm', - '--input', - '/var/lib/containers/storage/overlay-layers/aaaa.tar-split.gz', - '--path', - '/var/lib/containers/storage/overlay/aaaa/diff', - '--compress' - ], stdout=-1) - - # test side-effect of layer being fully populated - self.assertEqual({ - 'digest': compressed_digest, - 'mediaType': 'application/vnd.docker.image.rootfs.diff.tar.gzip', - 'size': len(blob_compressed)}, - layer - ) - - @mock.patch('tripleo_common.utils.image.uploaded_layers_details') - @mock.patch('tripleo_common.image.image_uploader.' - 'PythonImageUploader._image_manifest_config') - @mock.patch('tripleo_common.image.image_uploader.' - 'PythonImageUploader._copy_layer_local_to_registry') - @mock.patch('tripleo_common.image.image_uploader.' - 'PythonImageUploader._containers_json') - @mock.patch('tripleo_common.image.image_uploader.' - 'PythonImageUploader._upload_url') - def test_copy_local_to_registry(self, _upload_url, _containers_json, - _copy_layer_local_to_registry, - _image_manifest_config, _global_check): - source_url = urlparse('containers-storage:/t/nova-api:latest') - target_url = urlparse('docker://192.168.2.1:5000/t/nova-api:latest') - target_session = requests.Session() - _upload_url.return_value = 'https://192.168.2.1:5000/v2/upload' - _global_check.return_value = (None, None) - layers = [{ - "compressed-diff-digest": "sha256:aeb786", - "compressed-size": 74703002, - "compression": 2, - "created": "2018-11-07T02:45:16.760488331Z", - "diff-digest": "sha256:f972d1", - "diff-size": 208811520, - "id": "f972d1" - }, { - "compressed-diff-digest": "sha256:4dc536", - "compressed-size": 23400, - "compression": 2, - "created": "2018-11-07T02:45:21.59385649Z", - "diff-digest": "sha256:26deb2", - "diff-size": 18775552, - "id": "97397b", - "parent": "f972d1" - }] - _containers_json.return_value = layers - - config_str = '{"config": {}}' - config_digest = 'sha256:1234' - - manifest = { - 'config': { - 'digest': config_digest, - 'size': 2, - 'mediaType': 'application/vnd.docker.container.image.v1+json' - }, - 'layers': [ - {'digest': 'sha256:aeb786'}, - {'digest': 'sha256:4dc536'}, - ], - 'mediaType': 'application/vnd.docker.' - 'distribution.manifest.v2+json', - } - _image_manifest_config.return_value = ( - 't/nova-api:latest', - manifest, - config_str - ) - put_config = self.requests.put( - 'https://192.168.2.1:5000/v2/upload?digest=%s' % config_digest, - status_code=200 - ) - put_manifest = self.requests.put( - 'https://192.168.2.1:5000/v2/t/nova-api/manifests/latest', - status_code=200 - ) - - self.uploader._copy_local_to_registry( - source_url=source_url, - target_url=target_url, - session=target_session - ) - - _containers_json.assert_called_once_with( - 'overlay-layers', 'layers.json') - _image_manifest_config.assert_called_once_with('/t/nova-api:latest') - _copy_layer_local_to_registry.assert_any_call( - target_url, - target_session, - {'digest': 'sha256:aeb786'}, - layers[0] - ) - _copy_layer_local_to_registry.assert_any_call( - target_url, - target_session, - {'digest': 'sha256:4dc536'}, - layers[1] - ) - self.assertTrue(put_config.called) - self.assertTrue(put_manifest.called) - - @mock.patch('os.path.exists') - def test_containers_file_path(self, mock_exists): - mock_exists.side_effect = [False, True] - - self.assertRaises( - ImageUploaderException, - self.uploader._containers_file_path, - 'overlay-layers', - 'layers.json' - ) - self.assertEqual( - '/var/lib/containers/storage/overlay-layers/layers.json', - self.uploader._containers_file_path( - 'overlay-layers', 'layers.json') - ) - - @mock.patch('os.path.exists') - def test_containers_file(self, mock_exists): - mock_exists.return_value = True - - data = '{"config": {}}' - mock_open = mock.mock_open(read_data=data) - open_func = 'tripleo_common.image.image_uploader.open' - - with mock.patch(open_func, mock_open): - self.assertEqual( - '{"config": {}}', - self.uploader._containers_file( - 'overlay-layers', 'layers.json') - ) - - @mock.patch('os.path.exists') - def test_containers_json(self, mock_exists): - mock_exists.return_value = True - - data = '{"config": {}}' - mock_open = mock.mock_open(read_data=data) - open_func = 'tripleo_common.image.image_uploader.open' - - with mock.patch(open_func, mock_open): - self.assertEqual( - {'config': {}}, - self.uploader._containers_json( - 'overlay-layers', 'layers.json') - ) - - @mock.patch('tripleo_common.image.image_uploader.' - 'PythonImageUploader._get_local_layers_manifest') - @mock.patch('tripleo_common.image.image_uploader.' - 'PythonImageUploader._containers_json') - @mock.patch('tripleo_common.image.image_uploader.' - 'PythonImageUploader._containers_file') - def test_image_manifest_config(self, _containers_file, _containers_json, - _get_local_layers_manifest): - _containers_file.return_value = '{"config": {}}' - images_not_found = [{ - 'id': 'aaaa', - 'names': ['192.168.2.1:5000/t/heat-api:latest'] - }, { - 'id': 'bbbb', - 'names': ['192.168.2.1:5000/t/heat-engine:latest'] - }] - images = [{ - 'id': 'cccc', - 'names': ['192.168.2.1:5000/t/nova-api:latest'] - }] - man = { - 'config': { - 'digest': 'sha256:1234', - 'size': 2, - 'mediaType': 'application/vnd.docker.container.image.v1+json' - }, - 'layers': [], - } - _containers_json.side_effect = [images_not_found, images, man] - _get_local_layers_manifest.return_value = man - - self.assertRaises( - ImageNotFoundException, - self.uploader._image_manifest_config, - '192.168.2.1:5000/t/nova-api:latest' - ) - - image, manifest, config_str = self.uploader._image_manifest_config( - '192.168.2.1:5000/t/nova-api:latest' - ) - self.assertEqual(images[0], image) - self.assertEqual(man, manifest) - self.assertEqual('{"config": {}}', config_str) - _containers_json.assert_has_calls([ - mock.call('overlay-images', 'images.json'), - mock.call('overlay-images', 'images.json'), - mock.call('overlay-images', 'cccc', 'manifest') - ]) - _containers_file.assert_called_once_with( - 'overlay-images', 'cccc', '=c2hhMjU2OjEyMzQ=' - ) - _get_local_layers_manifest.assert_called_once_with( - man, config_str) - - @mock.patch('tripleo_common.image.image_uploader.' - 'PythonImageUploader._get_all_local_layers_by_digest') - def test_get_local_layers_manifest(self, mock_layers_by_digest): - mock_layers_by_digest.return_value = { - 'sha256:1': {'diff-size': 8}, - 'sha256:2': {'diff-size': 9} - } - man = { - 'config': { - 'digest': 'sha256:1234', - 'size': 2, - 'mediaType': 'application/vnd.docker.container.image.v1+json' - }, - 'layers': [ - {"digest": "sha256:12345"} - ] - } - config_str = json.dumps({'rootfs': - {'diff_ids': ['sha256:1', 'sha256:2']}}) - - manifest = self.uploader._get_local_layers_manifest(man, config_str) - - manifest_expected = { - 'config': man['config'], - 'layers': [ - {'digest': 'sha256:1', - 'mediaType': 'application/vnd.docker.image.rootfs.diff.tar', - 'size': 8}, - {'digest': 'sha256:2', - 'mediaType': 'application/vnd.docker.image.rootfs.diff.tar', - 'size': 9} - ] - } - - self.assertEqual(manifest_expected, manifest) - - @mock.patch('tripleo_common.image.image_uploader.' - 'PythonImageUploader._get_all_local_layers_by_digest') - def test_get_local_layers_manifest_missing_rootfs(self, - mock_layers_by_digest): - mock_layers_by_digest.return_value = { - 'sha256:1': {'diff-size': 8} - } - man = { - 'config': { - 'digest': 'sha256:1234', - 'size': 2, - 'mediaType': 'application/vnd.docker.container.image.v1+json' - }, - 'layers': [ - {"digest": "sha256:12345"} - ] - } - manifest = self.uploader._get_local_layers_manifest(man, '{}') - - self.assertEqual(man, manifest) - - @mock.patch('tripleo_common.image.image_uploader.' - 'PythonImageUploader._get_all_local_layers_by_digest') - def test_get_local_layers_manifest_missing_layer(self, - mock_layers_by_digest): - mock_layers_by_digest.return_value = { - 'sha256:1': {'diff-size': 8} - } - man = { - 'config': { - 'digest': 'sha256:1234', - 'size': 2, - 'mediaType': 'application/vnd.docker.container.image.v1+json' - }, - 'layers': [ - {"digest": "sha256:12345"} - ] - } - config_str = json.dumps({'rootfs': - {'diff_ids': ['sha256:3']}}) - self.assertRaises(ImageNotFoundException, - self.uploader._get_local_layers_manifest, - man, - config_str) - - @mock.patch('tripleo_common.image.image_uploader.' - 'PythonImageUploader._image_manifest_config') - def test_inspect(self, _image_manifest_config): - url = urlparse('containers-storage:/t/nova-api:latest') - config = { - 'config': { - 'Labels': ['one', 'two'] - }, - 'architecture': 'x86_64', - 'os': 'linux' - } - _image_manifest_config.return_value = ( - { - 'id': 'cccc', - 'digest': 'sha256:ccccc', - 'names': ['192.168.2.1:5000/t/nova-api:latest'], - 'created': '2018-10-02T11:13:45.567533229Z' - }, { - 'config': { - 'digest': 'sha256:1234', - }, - 'layers': [ - {'digest': 'sha256:aaa'}, - {'digest': 'sha256:bbb'}, - {'digest': 'sha256:ccc'} - ], - }, - json.dumps(config) - ) - - self.assertEqual( - { - 'Name': '/t/nova-api', - 'Architecture': 'x86_64', - 'Created': '2018-10-02T11:13:45.567533229Z', - 'Digest': 'sha256:ccccc', - 'DockerVersion': '', - 'Labels': ['one', 'two'], - 'Layers': ['sha256:aaa', 'sha256:bbb', 'sha256:ccc'], - 'Os': 'linux', - 'RepoTags': [] - }, - self.uploader._inspect(url) - ) - - @mock.patch('tripleo_common.image.image_uploader.' - 'PythonImageUploader._image_manifest_config') - def test_inspect_no_labels(self, _image_manifest_config): - url = urlparse('containers-storage:/t/nova-api:latest') - config = { - 'config': {}, - 'architecture': 'x86_64', - 'os': 'linux' - } - _image_manifest_config.return_value = ( - { - 'id': 'cccc', - 'digest': 'sha256:ccccc', - 'names': ['192.168.2.1:5000/t/nova-api:latest'], - 'created': '2018-10-02T11:13:45.567533229Z' - }, { - 'config': { - 'digest': 'sha256:1234', - }, - 'layers': [ - {'digest': 'sha256:aaa'}, - {'digest': 'sha256:bbb'}, - {'digest': 'sha256:ccc'} - ], - }, - json.dumps(config) - ) - - self.assertEqual( - { - 'Name': '/t/nova-api', - 'Architecture': 'x86_64', - 'Created': '2018-10-02T11:13:45.567533229Z', - 'Digest': 'sha256:ccccc', - 'DockerVersion': '', - 'Labels': {}, - 'Layers': ['sha256:aaa', 'sha256:bbb', 'sha256:ccc'], - 'Os': 'linux', - 'RepoTags': [] - }, - self.uploader._inspect(url) - ) - - @mock.patch('os.environ') - @mock.patch('subprocess.Popen') - def test_delete(self, mock_popen, mock_environ): - url = urlparse('containers-storage:/t/nova-api:latest') - mock_process = mock.Mock() - mock_process.communicate.return_value = ('image deleted', '') - mock_process.returncode = 0 - mock_popen.return_value = mock_process - mock_environ.copy.return_value = {} - - self.assertEqual( - 'image deleted', - self.uploader._delete(url) - ) - mock_popen.assert_called_once_with([ - 'buildah', - 'rmi', - '/t/nova-api:latest'], - env={}, stdout=-1, - universal_newlines=True - ) - - @mock.patch('tripleo_common.image.image_uploader.' - 'PythonImageUploader._delete') - def test_cleanup(self, _delete): - self.uploader.cleanup(['foo', 'bar', 'baz']) - _delete.assert_has_calls([ - mock.call(urlparse('containers-storage:bar')), - mock.call(urlparse('containers-storage:baz')), - mock.call(urlparse('containers-storage:foo')) - ]) - - @mock.patch('tripleo_common.image.image_uploader.' - 'PythonImageUploader._fetch_manifest') - def test_collect_manifests_layers(self, _fetch_manifest): - manifest = { - 'schemaVersion': 2, - 'mediaType': image_uploader.MEDIA_MANIFEST_V2, - 'config': { - 'mediaType': image_uploader.MEDIA_CONFIG, - 'digest': 'sha256:1111' - }, - 'layers': [ - {'digest': 'sha256:2222'}, - {'digest': 'sha256:3333'}, - {'digest': 'sha256:4444'} - ] - } - manifest_str = json.dumps(manifest, indent=2) - _fetch_manifest.return_value = manifest_str - url = urlparse('docker://docker.io/t/nova-api:latest') - session = requests.Session() - layers = [] - manifests_str = [] - - self.uploader._collect_manifests_layers( - url, session, manifests_str, layers, False) - self.assertEqual([manifest_str], manifests_str) - self.assertEqual( - [ - 'sha256:2222', - 'sha256:3333', - 'sha256:4444', - ], - layers - ) - - @mock.patch('tripleo_common.image.image_uploader.' - 'PythonImageUploader._fetch_manifest') - def test_collect_manifests_layers_v1(self, _fetch_manifest): - manifest = { - 'schemaVersion': 1, - 'mediaType': image_uploader.MEDIA_MANIFEST_V1, - 'fsLayers': [ - {'blobSum': 'sha256:4444'}, - {'blobSum': 'sha256:3333'}, - {'blobSum': 'sha256:2222'}, - ] - } - manifest_str = json.dumps(manifest, indent=2) - _fetch_manifest.return_value = manifest_str - url = urlparse('docker://docker.io/t/nova-api:latest') - session = requests.Session() - layers = [] - manifests_str = [] - - self.uploader._collect_manifests_layers( - url, session, manifests_str, layers, False) - self.assertEqual([manifest_str], manifests_str) - self.assertEqual( - [ - 'sha256:2222', - 'sha256:3333', - 'sha256:4444', - ], - layers - ) - - @mock.patch('tripleo_common.image.image_uploader.' - 'PythonImageUploader._fetch_manifest') - def test_collect_manifests_layers_multi_arch(self, _fetch_manifest): - manifest_x86 = { - 'schemaVersion': 2, - 'mediaType': image_uploader.MEDIA_MANIFEST_V2, - 'config': { - 'mediaType': image_uploader.MEDIA_CONFIG, - 'digest': 'sha256:1111' - }, - 'layers': [ - {'digest': 'sha256:2222'}, - {'digest': 'sha256:3333'}, - {'digest': 'sha256:4444'} - ] - } - manifest_ppc = { - 'schemaVersion': 2, - 'mediaType': image_uploader.MEDIA_MANIFEST_V2, - 'config': { - 'mediaType': image_uploader.MEDIA_CONFIG, - 'digest': 'sha256:5555' - }, - 'layers': [ - {'digest': 'sha256:6666'}, - {'digest': 'sha256:7777'}, - {'digest': 'sha256:8888'} - ] - } - manifest = { - 'schemaVersion': 2, - 'mediaType': image_uploader.MEDIA_MANIFEST_V2_LIST, - "manifests": [ - { - "mediaType": image_uploader.MEDIA_MANIFEST_V2, - "digest": "sha256:bbbb", - "platform": { - "architecture": "amd64", - "os": "linux", - "features": ["sse4"] - } - }, - { - "mediaType": image_uploader.MEDIA_MANIFEST_V2, - "digest": "sha256:aaaa", - "platform": { - "architecture": "ppc64le", - "os": "linux", - } - } - ] - } - manifest_str = json.dumps(manifest, indent=2) - _fetch_manifest.side_effect = [ - manifest_str, - json.dumps(manifest_x86), - json.dumps(manifest_ppc) - ] - url = urlparse('docker://docker.io/t/nova-api:latest') - session = requests.Session() - layers = [] - manifests_str = [] - - self.uploader._collect_manifests_layers( - url, session, manifests_str, layers, multi_arch=True) - self.assertEqual( - [ - manifest_str, - json.dumps(manifest_x86), - json.dumps(manifest_ppc) - ], - manifests_str - ) - self.assertEqual( - [ - 'sha256:2222', - 'sha256:3333', - 'sha256:4444', - 'sha256:6666', - 'sha256:7777', - 'sha256:8888', - ], - layers - ) diff --git a/tripleo_common/tests/image/test_kolla_builder.py b/tripleo_common/tests/image/test_kolla_builder.py deleted file mode 100644 index fe0a773b6..000000000 --- a/tripleo_common/tests/image/test_kolla_builder.py +++ /dev/null @@ -1,1299 +0,0 @@ -# Copyright 2017 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -import os -import sys -import tempfile -from unittest import mock - -import yaml - -from tripleo_common.image import image_uploader -from tripleo_common.image import kolla_builder as kb -from tripleo_common.tests import base - - -TEMPLATE_PATH = os.path.join(os.path.dirname(__file__), - '..', '..', '..', 'container-images', - 'tripleo_containers.yaml.j2') - - -DEFAULTS_PATH = os.path.join(os.path.dirname(__file__), - '..', '..', '..', 'container-images', - 'container_image_prepare_defaults.yaml') - -TEMPLATE_DIR_PATH = os.path.join(os.path.dirname(__file__), - '..', '..', '..', 'container-images') - -kb.init_prepare_defaults(DEFAULTS_PATH) -KB_DEFAULT_TAG = kb.CONTAINER_IMAGES_DEFAULTS['tag'] -KB_DEFAULT_PREFIX = kb.CONTAINER_IMAGES_DEFAULTS['name_prefix'] -KB_DEFAULT_NAMESPACE = kb.CONTAINER_IMAGES_DEFAULTS['namespace'] -CONTAINER_DEFAULTS_ENVIRONMENT = ('environments/' - 'containers-default-parameters.yaml') - -filedata = str("""container_images: -- imagename: docker.io/tripleomastercentos9/heat-docker-agents-centos:latest - image_source: kolla - push_destination: localhost:8787 -- imagename: docker.io/tripleomastercentos9/centos-binary-nova-compute:liberty - image_source: kolla - uploader: docker - push_destination: localhost:8787 -- imagename: docker.io/tripleomastercentos9/centos-binary-nova-libvirt:liberty - image_source: kolla - uploader: docker -- imagename: docker.io/tripleomastercentos9/image-with-missing-tag - image_source: kolla - push_destination: localhost:8787 -- imagename: docker.io/tripleomastercentos9/skip-build - image_source: foo - push_destination: localhost:8787 -""") - -template_filedata = str(""" -container_images_template: -- imagename: "{{namespace}}/heat-docker-agents-centos:latest" - image_source: kolla - push_destination: "{{push_destination}}" -- imagename: "{{namespace}}/{{name_prefix}}nova-compute{{name_suffix}}:{{tag}}" - image_source: kolla - uploader: "docker" - push_destination: "{{push_destination}}" -- imagename: "{{namespace}}/{{name_prefix}}nova-libvirt{{name_suffix}}:{{tag}}" - image_source: kolla - uploader: "docker" -- imagename: "{{namespace}}/image-with-missing-tag" - image_source: kolla - push_destination: "{{push_destination}}" -- imagename: "{{namespace}}/skip-build" - image_source: foo - push_destination: "{{push_destination}}" -""") - - -class TestKollaImageBuilder(base.TestCase): - - def setUp(self): - super(TestKollaImageBuilder, self).setUp() - files = [] - files.append('testfile') - self.filelist = files - - def test_imagename_to_regex(self): - itr = kb.KollaImageBuilder.imagename_to_regex - self.assertIsNone(itr('')) - self.assertIsNone(itr(None)) - self.assertEqual('foo', itr('foo')) - self.assertEqual('foo', itr('foo:current-tripleo')) - self.assertEqual('foo', itr('tripleo/foo:current-tripleo')) - self.assertEqual('foo', itr('tripleo/foo')) - self.assertEqual('foo', - itr('tripleo/centos-binary-foo:current-tripleo')) - self.assertEqual('foo', itr('centos-binary-foo:current-tripleo')) - self.assertEqual('foo', itr('centos-binary-foo')) - - @mock.patch('tripleo_common.image.base.open', - mock.mock_open(read_data=filedata), create=True) - @mock.patch('os.path.isfile', return_value=True) - @mock.patch('subprocess.Popen') - def test_build_images(self, mock_popen, mock_path): - process = mock.Mock() - process.returncode = 0 - process.communicate.return_value = 'done', '' - mock_popen.return_value = process - - builder = kb.KollaImageBuilder(self.filelist) - self.assertEqual('done', builder.build_images(['kolla-config.conf'])) - env = os.environ.copy() - mock_popen.assert_called_once_with([ - 'kolla-build', - '--config-file', - 'kolla-config.conf', - '^nova-compute$', - '^nova-libvirt$', - '^heat-docker-agents-centos$', - '^image-with-missing-tag$', - ], env=env, stdout=-1, universal_newlines=True) - - @mock.patch('tripleo_common.image.base.open', - mock.mock_open(read_data=filedata), create=True) - @mock.patch('os.path.isfile', return_value=True) - @mock.patch('subprocess.Popen') - def test_build_images_template_only(self, mock_popen, mock_path): - process = mock.Mock() - process.returncode = 0 - process.communicate.return_value = 'done', '' - mock_popen.return_value = process - - builder = kb.KollaImageBuilder(self.filelist) - self.assertEqual('done', - builder.build_images( - ['kolla-config.conf'], [], True, '/tmp/kolla')) - env = os.environ.copy() - call1 = mock.call([ - 'kolla-build', - '--config-file', - 'kolla-config.conf', - '^nova-compute$', - '^nova-libvirt$', - '^heat-docker-agents-centos$', - '^image-with-missing-tag$', - '--template-only', - '--work-dir', '/tmp/kolla', - ], env=env, stdout=-1, universal_newlines=True) - call2 = mock.call([ - 'kolla-build', - '--config-file', - 'kolla-config.conf', - '^nova-compute$', - '^nova-libvirt$', - '^heat-docker-agents-centos$', - '^image-with-missing-tag$', - '--list-dependencies', - ], env=env, stdout=-1, stderr=-1, universal_newlines=True) - calls = [call1, call2] - mock_popen.assert_has_calls(calls, any_order=True) - - @mock.patch('tripleo_common.image.kolla_builder.KollaImageBuilder.' - 'container_images_from_template') - @mock.patch('subprocess.Popen') - def test_build_images_no_conf(self, mock_popen, mock_images_from_template): - process = mock.Mock() - process.returncode = 0 - process.communicate.return_value = 'done', '' - mock_popen.return_value = process - mock_images_from_template.return_value = [] - - builder = kb.KollaImageBuilder([]) - self.assertEqual('done', builder.build_images([])) - env = os.environ.copy() - mock_images_from_template.assert_called_once() - mock_popen.assert_called_once_with([ - 'kolla-build', - ], env=env, stdout=-1, universal_newlines=True) - - @mock.patch('tripleo_common.image.base.open', - mock.mock_open(read_data=filedata), create=True) - @mock.patch('os.path.isfile', return_value=True) - @mock.patch('subprocess.Popen') - def test_build_images_exclude(self, mock_popen, mock_path): - process = mock.Mock() - process.returncode = 0 - process.communicate.return_value = 'done', '' - mock_popen.return_value = process - - builder = kb.KollaImageBuilder(self.filelist) - self.assertEqual('done', builder.build_images(['kolla-config.conf'], - ['nova-compute'])) - env = os.environ.copy() - mock_popen.assert_called_once_with([ - 'kolla-build', - '--config-file', - 'kolla-config.conf', - '^nova-libvirt$', - '^heat-docker-agents-centos$', - '^image-with-missing-tag$', - ], env=env, stdout=-1, universal_newlines=True) - - -class TestKollaImageBuilderTemplate(base.TestCase): - - def setUp(self): - super(TestKollaImageBuilderTemplate, self).setUp() - with tempfile.NamedTemporaryFile(delete=False) as imagefile: - self.addCleanup(os.remove, imagefile.name) - self.filelist = [imagefile.name] - with open(imagefile.name, 'w') as f: - f.write(template_filedata) - - def test_container_images_from_template(self): - """Test that we can generate same as testdata""" - builder = kb.KollaImageBuilder(self.filelist) - result = builder.container_images_from_template( - push_destination='localhost:8787', - name_prefix='centos-binary-', - namespace='docker.io/tripleomastercentos9', - tag='liberty' - ) - # template substitution on the container_images_template section should - # be identical to the container_images section - container_images = yaml.safe_load(filedata)['container_images'] - self.assertEqual(container_images, result) - - def test_container_images_template_inputs(self): - builder = kb.KollaImageBuilder(self.filelist) - self.assertEqual( - kb.CONTAINER_IMAGES_DEFAULTS, - builder.container_images_template_inputs() - ) - - expected = { - 'name_suffix': '', - 'rhel_containers': False, - 'neutron_driver': 'ovn', - } - for key in ( - 'namespace', - 'name_prefix', - 'tag', - 'ceph_namespace', - 'ceph_image', - 'ceph_tag', - 'ceph_grafana_namespace', - 'ceph_grafana_image', - 'ceph_grafana_tag', - 'ceph_prometheus_namespace', - 'ceph_prometheus_image', - 'ceph_prometheus_tag', - 'ceph_alertmanager_namespace', - 'ceph_alertmanager_image', - 'ceph_alertmanager_tag', - 'ceph_node_exporter_namespace', - 'ceph_node_exporter_image', - 'ceph_node_exporter_tag', - 'ceph_haproxy_namespace', - 'ceph_haproxy_image', - 'ceph_haproxy_tag', - 'ceph_keepalived_namespace', - 'ceph_keepalived_image', - 'ceph_keepalived_tag', - 'pushgateway_namespace', - 'pushgateway_image', - 'pushgateway_tag', - ): - if key in kb.CONTAINER_IMAGES_DEFAULTS: - expected[key] = kb.CONTAINER_IMAGES_DEFAULTS[key] - - self.assertEqual( - expected, - builder.container_images_template_inputs() - ) - - expected = { - 'namespace': '192.0.2.0:5000/tripleomastercentos9', - 'ceph_namespace': 'quay.ceph.io/ceph-ci', - 'ceph_image': 'ceph-daemon', - 'ceph_tag': 'latest', - 'name_prefix': 'prefix-', - 'name_suffix': '-suffix', - 'tag': 'master', - 'rhel_containers': False, - 'neutron_driver': 'ovn', - } - for key in ( - 'ceph_grafana_namespace', - 'ceph_grafana_image', - 'ceph_grafana_tag', - 'ceph_prometheus_namespace', - 'ceph_prometheus_image', - 'ceph_prometheus_tag', - 'ceph_alertmanager_namespace', - 'ceph_alertmanager_image', - 'ceph_alertmanager_tag', - 'ceph_node_exporter_namespace', - 'ceph_node_exporter_image', - 'ceph_node_exporter_tag', - 'ceph_haproxy_namespace', - 'ceph_haproxy_image', - 'ceph_haproxy_tag', - 'ceph_keepalived_namespace', - 'ceph_keepalived_image', - 'ceph_keepalived_tag', - 'pushgateway_namespace', - 'pushgateway_image', - 'pushgateway_tag', - ): - if key in kb.CONTAINER_IMAGES_DEFAULTS: - expected[key] = kb.CONTAINER_IMAGES_DEFAULTS[key] - - self.assertEqual( - expected, - builder.container_images_template_inputs( - namespace='192.0.2.0:5000/tripleomastercentos9', - ceph_namespace='quay.ceph.io/ceph-ci', - ceph_image='ceph-daemon', - ceph_tag='latest', - name_prefix='prefix', - name_suffix='suffix', - tag='master', - rhel_containers=False, - neutron_driver='ovn', - ) - ) - - def test_container_images_from_template_filter(self): - builder = kb.KollaImageBuilder(self.filelist) - - def filter(entry): - - # do not want heat-agents image - if 'heat-docker-agents' in entry.get('imagename'): - return - - # set source and destination on all entries - entry['push_destination'] = 'localhost:8787' - return entry - - result = builder.container_images_from_template( - filter=filter, - tag='liberty' - ) - container_images = [{ - 'image_source': 'kolla', - 'imagename': KB_DEFAULT_NAMESPACE + '/' + KB_DEFAULT_PREFIX + - 'nova-compute:liberty', - 'push_destination': 'localhost:8787', - 'uploader': 'docker' - }, { - 'image_source': 'kolla', - 'imagename': KB_DEFAULT_NAMESPACE + '/' + KB_DEFAULT_PREFIX + - 'nova-libvirt:liberty', - 'push_destination': 'localhost:8787', - 'uploader': 'docker' - }, { - 'image_source': 'kolla', - 'imagename': KB_DEFAULT_NAMESPACE + '/image-with-missing-tag', - 'push_destination': 'localhost:8787' - }, { - 'image_source': 'foo', - 'imagename': KB_DEFAULT_NAMESPACE + '/skip-build', - 'push_destination': 'localhost:8787' - }] - self.assertEqual(container_images, result) - - def _test_container_images_yaml_in_sync_helper(self, neutron_driver=None, - rhel_containers=False, - remove_images=[]): - '''Confirm overcloud_containers.tpl.yaml equals tripleo_containers.yaml - - TODO(sbaker) remove when tripleo_containers.yaml is deleted - ''' - mod_dir = os.path.dirname(sys.modules[__name__].__file__) - project_dir = os.path.abspath(os.path.join(mod_dir, '../../../')) - files_dir = os.path.join(project_dir, 'container-images') - - oc_tmpl_file = os.path.join(files_dir, 'tripleo_containers.yaml.j2') - tmpl_builder = kb.KollaImageBuilder([oc_tmpl_file], files_dir) - - def ffunc(entry): - if 'params' in entry: - del(entry['params']) - if 'services' in entry: - del(entry['services']) - return entry - - result = tmpl_builder.container_images_from_template( - filter=ffunc, neutron_driver=neutron_driver, - rhel_containers=rhel_containers) - - oc_yaml_file = os.path.join(files_dir, 'tripleo_containers.yaml') - yaml_builder = kb.KollaImageBuilder([oc_yaml_file], files_dir) - container_images = yaml_builder.load_config_files( - yaml_builder.CONTAINER_IMAGES) - - # remove image references from tripleo_containers.yaml specified - # in remove_images param. - for image in remove_images: - container_images.remove(image) - - self.assertSequenceEqual(container_images, result) - - def test_container_images_yaml_in_sync(self): - remove_images = [ - {'image_source': 'tripleo', - 'imagename': KB_DEFAULT_NAMESPACE + '/' + KB_DEFAULT_PREFIX + - 'ovn-northd:' + KB_DEFAULT_TAG}, - {'image_source': 'tripleo', - 'imagename': KB_DEFAULT_NAMESPACE + '/' + KB_DEFAULT_PREFIX + - 'ovn-controller:' + KB_DEFAULT_TAG}, - {'image_source': 'tripleo', - 'imagename': KB_DEFAULT_NAMESPACE + '/' + KB_DEFAULT_PREFIX + - 'ovn-nb-db-server:' + KB_DEFAULT_TAG}, - {'image_source': 'tripleo', - 'imagename': KB_DEFAULT_NAMESPACE + '/' + KB_DEFAULT_PREFIX + - 'ovn-sb-db-server:' + KB_DEFAULT_TAG}, - {'image_source': 'tripleo', - 'imagename': KB_DEFAULT_NAMESPACE + '/' + KB_DEFAULT_PREFIX + - 'neutron-metadata-agent-ovn:' + KB_DEFAULT_TAG}] - self._test_container_images_yaml_in_sync_helper( - remove_images=remove_images) - - def test_container_images_yaml_in_sync_for_ovn(self): - # remove neutron-server image reference from tripleo_containers.yaml - remove_images = [] - self._test_container_images_yaml_in_sync_helper( - neutron_driver='ovn', remove_images=remove_images) - - -class TestPrepare(base.TestCase): - - def setUp(self): - super(TestPrepare, self).setUp() - image_uploader.BaseImageUploader.init_registries_cache() - with tempfile.NamedTemporaryFile(delete=False) as imagefile: - self.addCleanup(os.remove, imagefile.name) - self.filelist = [imagefile.name] - with open(imagefile.name, 'w') as f: - f.write(template_filedata) - - @mock.patch.object(image_uploader.ImageUploadManager, 'uploader') - def test_detect_insecure_registry(self, mock_uploader): - mock_f = mock.MagicMock() - mock_f.is_insecure_registry.side_effect = [False, True] - mock_uploader.return_value = mock_f - self.assertEqual( - {}, - kb.detect_insecure_registries( - {'foo': 'docker.io/tripleo'})) - self.assertEqual( - {'DockerInsecureRegistryAddress': ['tripleo']}, - kb.detect_insecure_registries( - {'foo': 'tripleo'})) - - @mock.patch.object(image_uploader.ImageUploadManager, 'uploader') - def test_detect_insecure_registry_multi(self, mock_uploader): - mock_f = mock.MagicMock() - mock_f.is_insecure_registry.return_value = True - mock_uploader.return_value = mock_f - self.assertEqual( - {'DockerInsecureRegistryAddress': [ - '192.0.2.0:8787', - '192.0.2.1:8787']}, - kb.detect_insecure_registries({ - 'foo': '192.0.2.0:8787/tripleo/foo', - 'bar': '192.0.2.0:8787/tripleo/bar', - 'baz': '192.0.2.1:8787/tripleo/baz', - })) - - @mock.patch('tripleo_common.image.kolla_builder.' - 'detect_insecure_registries', return_value={}) - def test_prepare_noargs(self, mock_insecure): - self.assertEqual( - {}, - kb.container_images_prepare(template_file=TEMPLATE_PATH, - template_dir=TEMPLATE_DIR_PATH) - ) - - @mock.patch('tripleo_common.image.kolla_builder.' - 'detect_insecure_registries', return_value={}) - def test_prepare_simple(self, mock_insecure): - self.assertEqual({ - 'container_images.yaml': [ - {'image_source': 'tripleo', - 'imagename': '192.0.2.0:8787/t/p-nova-compute:l'} - ], - 'environments/containers-default-parameters.yaml': { - 'ContainerNovaComputeImage': - '192.0.2.0:8787/t/p-nova-compute:l', - 'ContainerNovaLibvirtConfigImage': - '192.0.2.0:8787/t/p-nova-compute:l' - }}, - kb.container_images_prepare( - template_file=TEMPLATE_PATH, - template_dir=TEMPLATE_DIR_PATH, - output_env_file=CONTAINER_DEFAULTS_ENVIRONMENT, - output_images_file='container_images.yaml', - service_filter=['OS::TripleO::Services::NovaLibvirt'], - excludes=['libvirt'], - mapping_args={ - 'namespace': '192.0.2.0:8787/t', - 'name_prefix': 'p', - 'name_suffix': '', - 'tag': 'l', - } - ) - ) - - @mock.patch('tripleo_common.image.kolla_builder.' - 'detect_insecure_registries', return_value={}) - def test_prepare_includes(self, mock_insecure): - self.assertEqual({ - 'container_images.yaml': [ - {'image_source': 'tripleo', - 'imagename': '192.0.2.0:8787/t/p-nova-libvirt:l'} - ], - 'environments/containers-default-parameters.yaml': { - 'ContainerNovaLibvirtImage': - '192.0.2.0:8787/t/p-nova-libvirt:l' - }}, - kb.container_images_prepare( - template_file=TEMPLATE_PATH, - template_dir=TEMPLATE_DIR_PATH, - output_env_file=CONTAINER_DEFAULTS_ENVIRONMENT, - output_images_file='container_images.yaml', - includes=['libvirt'], - mapping_args={ - 'namespace': '192.0.2.0:8787/t', - 'name_prefix': 'p', - 'name_suffix': '', - 'tag': 'l', - } - ) - ) - - @mock.patch('tripleo_common.image.kolla_builder.' - 'detect_insecure_registries', return_value={}) - def test_prepare_includes_excludes(self, mock_insecure): - # assert same result as includes only. includes trumps excludes - self.assertEqual({ - 'container_images.yaml': [ - {'image_source': 'tripleo', - 'imagename': '192.0.2.0:8787/t/p-nova-libvirt:l'} - ], - 'environments/containers-default-parameters.yaml': { - 'ContainerNovaLibvirtImage': - '192.0.2.0:8787/t/p-nova-libvirt:l' - }}, - kb.container_images_prepare( - template_file=TEMPLATE_PATH, - template_dir=TEMPLATE_DIR_PATH, - output_env_file=CONTAINER_DEFAULTS_ENVIRONMENT, - output_images_file='container_images.yaml', - includes=['libvirt'], - excludes=['libvirt'], - mapping_args={ - 'namespace': '192.0.2.0:8787/t', - 'name_prefix': 'p', - 'name_suffix': '', - 'tag': 'l', - } - ) - ) - - @mock.patch('tripleo_common.image.kolla_builder.' - 'detect_insecure_registries', return_value={}) - def test_prepare_push_dest(self, mock_insecure): - self.assertEqual({ - 'container_images.yaml': [{ - 'image_source': 'tripleo', - 'imagename': 'docker.io/t/p-nova-api:l', - 'push_destination': '192.0.2.0:8787', - }], - 'environments/containers-default-parameters.yaml': { - 'ContainerNovaApiImage': - '192.0.2.0:8787/t/p-nova-api:l', - 'ContainerNovaConfigImage': - '192.0.2.0:8787/t/p-nova-api:l', - 'ContainerNovaMetadataConfigImage': - u'192.0.2.0:8787/t/p-nova-api:l', - 'ContainerNovaMetadataImage': - '192.0.2.0:8787/t/p-nova-api:l' - }}, - kb.container_images_prepare( - template_file=TEMPLATE_PATH, - template_dir=TEMPLATE_DIR_PATH, - output_env_file=CONTAINER_DEFAULTS_ENVIRONMENT, - output_images_file='container_images.yaml', - service_filter=['OS::TripleO::Services::NovaApi'], - push_destination='192.0.2.0:8787', - mapping_args={ - 'namespace': 'docker.io/t', - 'name_prefix': 'p', - 'name_suffix': '', - 'tag': 'l', - } - ) - ) - - @mock.patch('tripleo_common.image.kolla_builder.' - 'detect_insecure_registries', return_value={}) - @mock.patch('tripleo_common.image.image_uploader.get_undercloud_registry') - def test_prepare_push_dest_discover(self, mock_gur, mock_insecure): - mock_gur.return_value = '192.0.2.0:8787' - self.assertEqual({ - 'container_images.yaml': [{ - 'image_source': 'tripleo', - 'imagename': 'docker.io/t/p-nova-api:l', - 'push_destination': '192.0.2.0:8787', - }], - 'environments/containers-default-parameters.yaml': { - 'ContainerNovaApiImage': - '192.0.2.0:8787/t/p-nova-api:l', - 'ContainerNovaConfigImage': - '192.0.2.0:8787/t/p-nova-api:l', - 'ContainerNovaMetadataConfigImage': - u'192.0.2.0:8787/t/p-nova-api:l', - 'ContainerNovaMetadataImage': - '192.0.2.0:8787/t/p-nova-api:l' - }}, - kb.container_images_prepare( - template_file=TEMPLATE_PATH, - template_dir=TEMPLATE_DIR_PATH, - output_env_file=CONTAINER_DEFAULTS_ENVIRONMENT, - output_images_file='container_images.yaml', - service_filter=['OS::TripleO::Services::NovaApi'], - push_destination=True, - mapping_args={ - 'namespace': 'docker.io/t', - 'name_prefix': 'p', - 'name_suffix': '', - 'tag': 'l', - } - ) - ) - - @mock.patch('tripleo_common.image.kolla_builder.' - 'detect_insecure_registries', return_value={}) - def test_prepare_ceph(self, mock_insecure): - self.assertEqual({ - 'container_images.yaml': [{ - 'image_source': 'ceph', - 'imagename': '192.0.2.0:8787/t/ceph:l', - }], - 'environments/containers-default-parameters.yaml': { - 'ContainerCephDaemonImage': '192.0.2.0:8787/t/ceph:l' - }}, - kb.container_images_prepare( - template_file=TEMPLATE_PATH, - template_dir=TEMPLATE_DIR_PATH, - output_env_file=CONTAINER_DEFAULTS_ENVIRONMENT, - output_images_file='container_images.yaml', - service_filter=['OS::TripleO::Services::CephMon'], - mapping_args={ - 'ceph_namespace': '192.0.2.0:8787/t', - 'ceph_image': 'ceph', - 'ceph_tag': 'l', - } - ) - ) - - @mock.patch('tripleo_common.image.kolla_builder.' - 'detect_insecure_registries', return_value={}) - def test_prepare_neutron_driver_default(self, mock_insecure): - self.assertEqual({ - 'container_images.yaml': [ - {'image_source': 'tripleo', - 'imagename': 't/p-neutron-server:l'} - ], - 'environments/containers-default-parameters.yaml': { - 'ContainerNeutronApiImage': 't/p-neutron-server:l', - 'ContainerNeutronConfigImage': 't/p-neutron-server:l' - }}, - kb.container_images_prepare( - template_file=TEMPLATE_PATH, - template_dir=TEMPLATE_DIR_PATH, - output_env_file=CONTAINER_DEFAULTS_ENVIRONMENT, - output_images_file='container_images.yaml', - service_filter=[ - 'OS::TripleO::Services::NeutronServer' - ], - mapping_args={ - 'namespace': 't', - 'name_prefix': 'p', - 'name_suffix': '', - 'tag': 'l', - 'neutron_driver': None - } - ) - ) - - @mock.patch('tripleo_common.image.kolla_builder.' - 'detect_insecure_registries', return_value={}) - def test_prepare_neutron_driver_ovn(self, mock_insecure): - self.assertEqual({ - 'container_images.yaml': [ - {'image_source': 'tripleo', - 'imagename': 't/p-neutron-server:l'}, - {'image_source': 'tripleo', - 'imagename': 't/p-ovn-controller:l'} - ], - 'environments/containers-default-parameters.yaml': { - 'ContainerNeutronApiImage': 't/p-neutron-server:l', - 'ContainerNeutronConfigImage': 't/p-neutron-server:l', - 'ContainerOvnControllerConfigImage': 't/p-ovn-controller:l', - 'ContainerOvnControllerImage': 't/p-ovn-controller:l' - }}, - kb.container_images_prepare( - template_file=TEMPLATE_PATH, - template_dir=TEMPLATE_DIR_PATH, - output_env_file=CONTAINER_DEFAULTS_ENVIRONMENT, - output_images_file='container_images.yaml', - service_filter=[ - 'OS::TripleO::Services::NeutronServer', - 'OS::TripleO::Services::OVNController' - ], - mapping_args={ - 'namespace': 't', - 'name_prefix': 'p', - 'name_suffix': '', - 'tag': 'l', - 'neutron_driver': 'ovn' - } - ) - ) - - @mock.patch.object(image_uploader, 'ImageUploadManager') - @mock.patch('tripleo_common.image.kolla_builder.' - 'detect_insecure_registries', return_value={}) - def test_prepare_default_tag(self, mock_insecure, mock_manager): - mock_manager_instance = mock.Mock() - mock_manager.return_value = mock_manager_instance - mock_uploader = mock.Mock() - mock_uploader.discover_image_tags.return_value = [] - mock_manager_instance.uploader.return_value = mock_uploader - - kb.container_images_prepare( - template_file=TEMPLATE_PATH, - template_dir=TEMPLATE_DIR_PATH, - output_env_file=CONTAINER_DEFAULTS_ENVIRONMENT, - output_images_file='container_images.yaml', - mapping_args={}, - tag_from_label="n-v", - ) - self.assertTrue( - mock_uploader.discover_image_tags.call_args_list[0][0][2]) - - kb.container_images_prepare( - template_file=TEMPLATE_PATH, - template_dir=TEMPLATE_DIR_PATH, - output_env_file=CONTAINER_DEFAULTS_ENVIRONMENT, - output_images_file='container_images.yaml', - mapping_args={"tag": "master"}, - tag_from_label="n-v", - ) - self.assertFalse( - mock_uploader.discover_image_tags.call_args_list[1][0][2]) - - def test_get_enabled_services_empty(self): - self.assertEqual( - {}, - kb.get_enabled_services({}, []) - ) - - def test_get_enabled_services_default_count(self): - self.assertEqual( - {'ControllerServices': [ - 'OS::TripleO::Services::NeutronApi', - 'OS::TripleO::Services::NovaApi'], - 'ComputeServices': [ - 'OS::TripleO::Services::NovaCompute'], - 'BlockStorageServices': []}, - kb.get_enabled_services({ - 'parameter_defaults': {} - }, [ - { - 'name': 'Controller', - 'CountDefault': 1, - 'ServicesDefault': [ - 'OS::TripleO::Services::NeutronApi', - 'OS::TripleO::Services::NovaApi' - ] - }, { - 'name': 'Compute', - 'CountDefault': 1, - 'ServicesDefault': [ - 'OS::TripleO::Services::NovaCompute' - ] - }, { - 'name': 'BlockStorage', - 'ServicesDefault': [ - 'OS::TripleO::Services::Timesync' - ] - } - ]) - ) - - def test_get_enabled_services(self): - self.assertEqual( - {'ControllerServices': [ - 'OS::TripleO::Services::NeutronApi', - 'OS::TripleO::Services::NovaApi'], - 'ComputeServices': [ - 'OS::TripleO::Services::NovaCompute'], - 'BlockStorageServices': []}, - kb.get_enabled_services({ - 'parameter_defaults': { - 'ControllerCount': 1, - 'ComputeCount': 1, - 'BlockStorageCount': 0, - } - }, [ - { - 'name': 'Controller', - 'CountDefault': 0, - 'ServicesDefault': [ - 'OS::TripleO::Services::NeutronApi', - 'OS::TripleO::Services::NovaApi' - ] - }, { - 'name': 'Compute', - 'ServicesDefault': [ - 'OS::TripleO::Services::NovaCompute' - ] - }, { - 'name': 'BlockStorage', - 'ServicesDefault': [ - 'OS::TripleO::Services::Timesync' - ] - } - ]) - ) - - def test_build_service_filter(self): - self.assertEqual( - set([ - 'OS::TripleO::Services::HeatApi', - 'OS::TripleO::Services::NovaApi', - 'OS::TripleO::Services::NovaCompute', - 'OS::TripleO::Services::NeutronApi', - 'OS::TripleO::Services::Kubernetes::Worker', - ]), - kb.build_service_filter({ - 'resource_registry': { - 'OS::TripleO::Services::NovaApi': - '/tht/docker/services/foo.yaml', - 'OS::TripleO::Services::NovaCompute': - '/tht/docker/services/foo.yaml', - 'OS::TripleO::Services::Kubernetes::Worker': - 'deployment' + - 'kubernetes/kubernetes-worker-baremetal-ansible.yaml', - 'OS::TripleO::Services::Noop': - 'OS::Heat::None' - } - }, [ - { - 'name': 'Controller', - 'CountDefault': 1, - 'ServicesDefault': [ - 'OS::TripleO::Services::HeatApi', - 'OS::TripleO::Services::NeutronApi', - 'OS::TripleO::Services::NovaApi', - 'OS::TripleO::Services::Noop' - ] - }, { - 'name': 'Compute', - 'CountDefault': 1, - 'ServicesDefault': [ - 'OS::TripleO::Services::NovaCompute', - 'OS::TripleO::Services::Kubernetes::Worker' - ] - }, { - 'name': 'BlockStorage', - 'ServicesDefault': [ - 'OS::TripleO::Services::Timesync' - ] - } - ]) - ) - - @mock.patch('tripleo_common.image.kolla_builder.container_images_prepare') - @mock.patch('tripleo_common.image.image_uploader.ImageUploadManager', - autospec=True) - def test_container_images_prepare_multi(self, mock_im, mock_cip): - mock_lock = mock.MagicMock() - mapping_args = { - 'namespace': 't', - 'name_prefix': '', - 'name_suffix': '', - } - env = { - 'parameter_defaults': { - 'LocalContainerRegistry': '192.0.2.1', - 'DockerRegistryMirror': 'http://192.0.2.2/reg/', - 'ContainerImageRegistryCredentials': { - 'docker.io': {'my_username': 'my_password'} - }, - 'ContainerImagePrepare': [{ - 'set': mapping_args, - 'tag_from_label': 'foo', - 'includes': ['nova', 'neutron'], - }, { - 'set': mapping_args, - 'tag_from_label': 'bar', - 'excludes': ['nova', 'neutron'], - 'push_destination': True, - 'modify_role': 'add-foo-plugin', - 'modify_only_with_labels': ['kolla_version'], - 'modify_vars': {'foo_version': '1.0.1'} - }, { - 'set': mapping_args, - 'tag_from_label': 'bar', - 'includes': ['nova', 'neutron'], - 'push_destination': True, - 'modify_role': 'add-foo-plugin', - 'modify_only_with_source': ['kolla', 'tripleo'], - 'modify_vars': {'foo_version': '1.0.1'} - - }] - } - } - roles_data = [] - mock_cip.side_effect = [ - { - 'image_params': { - 'FooImage': 't/foo:latest', - 'BarImage': 't/bar:latest', - 'BazImage': 't/baz:latest', - 'BinkImage': 't/bink:latest' - }, - 'upload_data': [] - }, { - 'image_params': { - 'BarImage': 't/bar:1.0', - 'BazImage': 't/baz:1.0' - }, - 'upload_data': [{ - 'imagename': 't/bar:1.0', - 'push_destination': '192.0.2.1:8787' - }, { - 'imagename': 't/baz:1.0', - 'push_destination': '192.0.2.1:8787' - }] - }, - { - 'image_params': { - 'BarImage': 't/bar:1.0', - 'BazImage': 't/baz:1.0' - }, - 'upload_data': [{ - 'imagename': 't/bar:1.0', - 'push_destination': '192.0.2.1:8787' - }, { - 'imagename': 't/baz:1.0', - 'push_destination': '192.0.2.1:8787' - }] - }, - ] - - image_params = kb.container_images_prepare_multi(env, roles_data, - lock=mock_lock) - - mock_cip.assert_has_calls([ - mock.call( - excludes=None, - includes=['nova', 'neutron'], - mapping_args=mapping_args, - output_env_file='image_params', - output_images_file='upload_data', - pull_source=None, - push_destination=None, - service_filter=None, - tag_from_label='foo', - append_tag=mock.ANY, - modify_role=None, - modify_only_with_labels=None, - modify_only_with_source=None, - modify_vars=None, - mirrors={ - 'docker.io': 'http://192.0.2.2/reg/' - }, - registry_credentials={ - 'docker.io': {'my_username': 'my_password'} - }, - multi_arch=False, - lock=mock_lock - ), - mock.call( - excludes=['nova', 'neutron'], - includes=None, - mapping_args=mapping_args, - output_env_file='image_params', - output_images_file='upload_data', - pull_source=None, - push_destination='192.0.2.1:8787', - service_filter=None, - tag_from_label='bar', - append_tag=mock.ANY, - modify_role='add-foo-plugin', - modify_only_with_labels=['kolla_version'], - modify_only_with_source=None, - modify_vars={'foo_version': '1.0.1'}, - mirrors={ - 'docker.io': 'http://192.0.2.2/reg/' - }, - registry_credentials={ - 'docker.io': {'my_username': 'my_password'} - }, - multi_arch=False, - lock=mock_lock - ), - mock.call( - excludes=None, - includes=['nova', 'neutron'], - mapping_args=mapping_args, - output_env_file='image_params', - output_images_file='upload_data', - pull_source=None, - push_destination='192.0.2.1:8787', - service_filter=None, - tag_from_label='bar', - append_tag=mock.ANY, - modify_role='add-foo-plugin', - modify_only_with_labels=None, - modify_only_with_source=['kolla', 'tripleo'], - modify_vars={'foo_version': '1.0.1'}, - mirrors={ - 'docker.io': 'http://192.0.2.2/reg/' - }, - registry_credentials={ - 'docker.io': {'my_username': 'my_password'} - }, - multi_arch=False, - lock=mock_lock - ) - ]) - - self.assertEqual(mock_im.call_count, 2) - - self.assertEqual( - { - 'BarImage': 't/bar:1.0', - 'BazImage': 't/baz:1.0', - 'BinkImage': 't/bink:latest', - 'FooImage': 't/foo:latest' - }, - image_params - ) - - @mock.patch('tripleo_common.image.kolla_builder.container_images_prepare') - def test_container_images_prepare_multi_dry_run(self, mock_cip): - mock_lock = mock.MagicMock() - mapping_args = { - 'namespace': 't', - 'name_prefix': '', - 'name_suffix': '', - } - env = { - 'parameter_defaults': { - 'ContainerImagePrepare': [{ - 'set': mapping_args, - 'tag_from_label': 'foo', - }, { - 'set': mapping_args, - 'tag_from_label': 'bar', - 'excludes': ['nova', 'neutron'], - 'push_destination': '192.0.2.1:8787', - 'modify_role': 'add-foo-plugin', - 'modify_only_with_labels': ['kolla_version'], - 'modify_vars': {'foo_version': '1.0.1'}, - 'modify_append_tag': 'modify-123' - }] - } - } - roles_data = [] - mock_cip.side_effect = [ - { - 'image_params': { - 'FooImage': 't/foo:latest', - 'BarImage': 't/bar:latest', - 'BazImage': 't/baz:latest', - 'BinkImage': 't/bink:latest' - }, - 'upload_data': [] - }, { - 'image_params': { - 'BarImage': 't/bar:1.0', - 'BazImage': 't/baz:1.0' - }, - 'upload_data': [{ - 'imagename': 't/bar:1.0', - 'push_destination': '192.0.2.1:8787' - }, { - 'imagename': 't/baz:1.0', - 'push_destination': '192.0.2.1:8787' - }] - }, - ] - - image_params = kb.container_images_prepare_multi(env, roles_data, True, - lock=mock_lock) - - mock_cip.assert_has_calls([ - mock.call( - excludes=None, - includes=None, - mapping_args=mapping_args, - output_env_file='image_params', - output_images_file='upload_data', - pull_source=None, - push_destination=None, - service_filter=None, - tag_from_label='foo', - append_tag=mock.ANY, - modify_role=None, - modify_only_with_labels=None, - modify_only_with_source=None, - modify_vars=None, - mirrors={}, - registry_credentials=None, - multi_arch=False, - lock=mock_lock - ), - mock.call( - excludes=['nova', 'neutron'], - includes=None, - mapping_args=mapping_args, - output_env_file='image_params', - output_images_file='upload_data', - pull_source=None, - push_destination='192.0.2.1:8787', - service_filter=None, - tag_from_label='bar', - append_tag=mock.ANY, - modify_role='add-foo-plugin', - modify_only_with_labels=['kolla_version'], - modify_only_with_source=None, - modify_vars={'foo_version': '1.0.1'}, - mirrors={}, - registry_credentials=None, - multi_arch=False, - lock=mock_lock - ) - ]) - self.assertEqual( - { - 'BarImage': 't/bar:1.0', - 'BazImage': 't/baz:1.0', - 'BinkImage': 't/bink:latest', - 'FooImage': 't/foo:latest' - }, - image_params - ) - - @mock.patch('tripleo_common.image.kolla_builder.container_images_prepare') - def test_container_images_prepare_multi_tag_from_label(self, mock_cip): - mock_lock = mock.MagicMock() - mapping_args = { - 'namespace': 't', - 'name_prefix': '', - 'name_suffix': '', - 'tag': 'l', - } - mapping_args_no_tag = { - 'namespace': 't', - 'name_prefix': '', - 'name_suffix': '', - } - env = { - 'parameter_defaults': { - 'ContainerImagePrepare': [{ - 'set': mapping_args_no_tag, - 'tag_from_label': 'foo', - }, { - 'set': mapping_args, - 'tag_from_label': 'bar', - 'excludes': ['nova', 'neutron'], - 'push_destination': '192.0.2.1:8787', - 'modify_role': 'add-foo-plugin', - 'modify_only_with_labels': ['kolla_version'], - 'modify_vars': {'foo_version': '1.0.1'}, - 'modify_append_tag': 'modify-123' - }] - } - } - roles_data = [] - mock_cip.side_effect = [ - { - 'image_params': { - 'FooImage': 't/foo:latest', - 'BarImage': 't/bar:latest', - 'BazImage': 't/baz:latest', - 'BinkImage': 't/bink:latest' - }, - 'upload_data': [] - }, { - 'image_params': { - 'BarImage': 't/bar:1.0', - 'BazImage': 't/baz:1.0' - }, - 'upload_data': [{ - 'imagename': 't/bar:1.0', - 'push_destination': '192.0.2.1:8787' - }, { - 'imagename': 't/baz:1.0', - 'push_destination': '192.0.2.1:8787' - }] - }, - ] - - image_params = kb.container_images_prepare_multi(env, roles_data, True, - lock=mock_lock) - - mock_cip.assert_has_calls([ - mock.call( - excludes=None, - includes=None, - mapping_args=mapping_args_no_tag, - output_env_file='image_params', - output_images_file='upload_data', - pull_source=None, - push_destination=None, - service_filter=None, - tag_from_label='foo', - append_tag=mock.ANY, - modify_role=None, - modify_only_with_labels=None, - modify_only_with_source=None, - modify_vars=None, - mirrors={}, - registry_credentials=None, - multi_arch=False, - lock=mock_lock - ), - mock.call( - excludes=['nova', 'neutron'], - includes=None, - mapping_args=mapping_args, - output_env_file='image_params', - output_images_file='upload_data', - pull_source=None, - push_destination='192.0.2.1:8787', - service_filter=None, - tag_from_label=None, - append_tag=mock.ANY, - modify_role='add-foo-plugin', - modify_only_with_labels=['kolla_version'], - modify_only_with_source=None, - modify_vars={'foo_version': '1.0.1'}, - mirrors={}, - registry_credentials=None, - multi_arch=False, - lock=mock_lock - ) - ]) - - self.assertEqual( - { - 'BarImage': 't/bar:1.0', - 'BazImage': 't/baz:1.0', - 'BinkImage': 't/bink:latest', - 'FooImage': 't/foo:latest' - }, - image_params - ) - - def test_set_neutron_driver(self): - mapping_args = {} - kb.set_neutron_driver(None, mapping_args) - self.assertEqual('ovn', mapping_args['neutron_driver']) - - mapping_args = {} - kb.set_neutron_driver({}, mapping_args) - self.assertEqual('ovn', mapping_args['neutron_driver']) - - mapping_args = {} - kb.set_neutron_driver( - {'NeutronMechanismDrivers': ['sriovnicswitch', 'openvswitch']}, - mapping_args - ) - self.assertEqual('other', mapping_args['neutron_driver']) - - mapping_args = {} - kb.set_neutron_driver( - {'NeutronMechanismDrivers': ['ovn']}, - mapping_args - ) - self.assertEqual('ovn', mapping_args['neutron_driver']) diff --git a/tripleo_common/tests/inventory_data/cell1_dynamic.json b/tripleo_common/tests/inventory_data/cell1_dynamic.json deleted file mode 100644 index 0b8f111c8..000000000 --- a/tripleo_common/tests/inventory_data/cell1_dynamic.json +++ /dev/null @@ -1,362 +0,0 @@ -{ - "CellController": { - "hosts": [ - "cell1-cellcontrol-0" - ], - "vars": { - "ansible_ssh_user": "heat-admin", - "bootstrap_server_id": "2a668e02-96b3-48a5-9cf2-7bde46830e23", - "serial": "1", - "tripleo_role_name": "CellController", - "tripleo_role_networks": [ - "ctlplane", - "external", - "internal_api", - "management", - "storage", - "storage_mgmt", - "tenant" - ] - } - }, - "Compute": { - "hosts": [ - "cell1-compute-0" - ], - "vars": { - "ansible_ssh_user": "heat-admin", - "bootstrap_server_id": "2a668e02-96b3-48a5-9cf2-7bde46830e23", - "serial": "1", - "tripleo_role_name": "Compute", - "tripleo_role_networks": [ - "ctlplane", - "external", - "internal_api", - "management", - "storage", - "storage_mgmt", - "tenant" - ] - } - }, - "Undercloud": { - "hosts": [ - "undercloud" - ], - "vars": { - "ansible_connection": "local", - "ansible_host": "localhost", - "ansible_python_interpreter": "/usr/bin/python2", - "ansible_remote_tmp": "/tmp/ansible-${USER}", - "auth_url": "https://192.168.24.2:13000", - "cacert": "/etc/pki/ca-trust/source/anchors/cm-local-ca.pem", - "os_auth_token": "gAAAAABedRLI4L-0UK1i3r0lkHVPFeGE2FB40mk7tjWw_US3nwQvzZlwKPM_uCq1wYeBqkCLwiXW61BrZswCi9M3mI-6HeniTi9qV7nXUyLHrWw1Kh5woAEGPyjmdiMiTh_P0WFKobYqXdP0oFwcN_LWr-mnYE1YWluvX4qKeUEeIOXHVj8OtMg", - "overcloud_admin_password": "9RDyelRD5PT5Jk6q4efjYG6Es", - "overcloud_horizon_url": "https://10.0.0.5:443/dashboard", - "overcloud_keystone_url": "https://10.0.0.5:13000", - "plan": "cell1", - "project_name": "admin", - "undercloud_service_list": [ - "tripleo_ironic_conductor" - ], - "undercloud_swift_url": "https://192.168.24.2:13808/v1/AUTH_b0d47705b94c486889fd2b26ce343763", - "username": "admin" - } - }, - "_meta": { - "hostvars": { - "cell1-cellcontrol-0": { - "ansible_host": "192.168.24.29", - "canonical_hostname": "cell1-cellcontrol-0.localdomain", - "ctlplane_hostname": "cell1-cellcontrol-0.ctlplane.localdomain", - "ctlplane_ip": "192.168.24.29", - "deploy_server_id": "2a668e02-96b3-48a5-9cf2-7bde46830e23", - "external_hostname": "cell1-cellcontrol-0.external.localdomain", - "external_ip": "10.0.0.38", - "internal_api_hostname": "cell1-cellcontrol-0.internalapi.localdomain", - "internal_api_ip": "172.16.2.119", - "management_ip": "192.168.24.29", - "storage_hostname": "cell1-cellcontrol-0.storage.localdomain", - "storage_ip": "172.16.1.167", - "storage_mgmt_hostname": "cell1-cellcontrol-0.storagemgmt.localdomain", - "storage_mgmt_ip": "172.16.3.183", - "tenant_hostname": "cell1-cellcontrol-0.tenant.localdomain", - "tenant_ip": "172.16.0.125" - }, - "cell1-compute-0": { - "ansible_host": "192.168.24.10", - "canonical_hostname": "cell1-compute-0.localdomain", - "ctlplane_hostname": "cell1-compute-0.ctlplane.localdomain", - "ctlplane_ip": "192.168.24.10", - "deploy_server_id": "5b31842e-1118-4961-95cf-47d7a326e839", - "external_ip": "192.168.24.10", - "internal_api_hostname": "cell1-compute-0.internalapi.localdomain", - "internal_api_ip": "172.16.2.18", - "management_ip": "192.168.24.10", - "storage_hostname": "cell1-compute-0.storage.localdomain", - "storage_ip": "172.16.1.218", - "storage_mgmt_ip": "192.168.24.10", - "tenant_hostname": "cell1-compute-0.tenant.localdomain", - "tenant_ip": "172.16.0.27" - } - } - }, - "cell1": { - "children": [ - "CellController", - "Compute" - ], - "vars": { - "container_cli": "podman", - "ctlplane_vip": "192.168.24.21", - "external_vip": "10.0.0.6", - "internal_api_vip": "172.16.2.11", - "redis_vip": "192.168.24.21", - "storage_mgmt_vip": "172.16.3.26", - "storage_vip": "172.16.1.131" - } - }, - "boot_params_service": { - "children": [ - "Compute" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "ca_certs": { - "children": [ - "CellController", - "Compute" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "allovercloud": { - "children": [ - "cell1" - ] - }, - "certmonger_user": { - "children": [ - "CellController", - "Compute" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "chrony": { - "children": [ - "CellController", - "Compute" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "container_image_prepare": { - "children": [ - "CellController" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "haproxy": { - "children": [ - "CellController" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "iscsid": { - "children": [ - "CellController", - "Compute" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "kernel": { - "children": [ - "CellController", - "Compute" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "logrotate_crond": { - "children": [ - "CellController", - "Compute" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "mysql": { - "children": [ - "CellController" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "mysql_client": { - "children": [ - "CellController", - "Compute" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "nova_compute": { - "children": [ - "Compute" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "nova_conductor": { - "children": [ - "CellController" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "nova_libvirt": { - "children": [ - "Compute" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "nova_libvirt_guests": { - "children": [ - "Compute" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "nova_metadata": { - "children": [ - "CellController" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "nova_migration_target": { - "children": [ - "Compute" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "nova_vnc_proxy": { - "children": [ - "CellController" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "oslo_messaging_rpc": { - "children": [ - "CellController" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "ovn_controller": { - "children": [ - "Compute" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "ovn_metadata": { - "children": [ - "Compute" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "podman": { - "children": [ - "CellController", - "Compute" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "snmp": { - "children": [ - "CellController", - "Compute" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "sshd": { - "children": [ - "CellController", - "Compute" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "timezone": { - "children": [ - "CellController", - "Compute" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "tripleo_firewall": { - "children": [ - "CellController", - "Compute" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "tripleo_packages": { - "children": [ - "CellController", - "Compute" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "tuned": { - "children": [ - "CellController", - "Compute" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - } -} diff --git a/tripleo_common/tests/inventory_data/cell1_static.yaml b/tripleo_common/tests/inventory_data/cell1_static.yaml deleted file mode 100644 index b464df593..000000000 --- a/tripleo_common/tests/inventory_data/cell1_static.yaml +++ /dev/null @@ -1,188 +0,0 @@ -Undercloud: - hosts: - undercloud: {} - vars: - ansible_connection: local - ansible_host: localhost - ansible_python_interpreter: /usr/bin/python2 - ansible_remote_tmp: /tmp/ansible-${USER} - auth_url: https://192.168.24.2:13000 - cacert: /etc/pki/ca-trust/source/anchors/cm-local-ca.pem - os_auth_token: gAAAAABedRLI4L-0UK1i3r0lkHVPFeGE2FB40mk7tjWw_US3nwQvzZlwKPM_uCq1wYeBqkCLwiXW61BrZswCi9M3mI-6HeniTi9qV7nXUyLHrWw1Kh5woAEGPyjmdiMiTh_P0WFKobYqXdP0oFwcN_LWr-mnYE1YWluvX4qKeUEeIOXHVj8OtMg - overcloud_admin_password: 9RDyelRD5PT5Jk6q4efjYG6Es - overcloud_horizon_url: https://10.0.0.5:443/dashboard - overcloud_keystone_url: https://10.0.0.5:13000 - plan: cell1 - project_name: admin - undercloud_service_list: [tripleo_ironic_conductor] - undercloud_swift_url: https://192.168.24.2:13808/v1/AUTH_b0d47705b94c486889fd2b26ce343763 - username: admin -CellController: - hosts: - cell1-cellcontrol-0: {ansible_host: 192.168.24.29, canonical_hostname: cell1-cellcontrol-0.localdomain, - ctlplane_hostname: cell1-cellcontrol-0.ctlplane.localdomain, ctlplane_ip: 192.168.24.29, - deploy_server_id: 2a668e02-96b3-48a5-9cf2-7bde46830e23, external_hostname: cell1-cellcontrol-0.external.localdomain, - external_ip: 10.0.0.38, internal_api_hostname: cell1-cellcontrol-0.internalapi.localdomain, - internal_api_ip: 172.16.2.119, management_ip: 192.168.24.29, storage_hostname: cell1-cellcontrol-0.storage.localdomain, - storage_ip: 172.16.1.167, storage_mgmt_hostname: cell1-cellcontrol-0.storagemgmt.localdomain, - storage_mgmt_ip: 172.16.3.183, tenant_hostname: cell1-cellcontrol-0.tenant.localdomain, - tenant_ip: 172.16.0.125} - vars: - ansible_ssh_user: heat-admin - bootstrap_server_id: 2a668e02-96b3-48a5-9cf2-7bde46830e23 - serial: '1' - tripleo_role_name: CellController - tripleo_role_networks: [ctlplane, external, internal_api, management, storage, - storage_mgmt, tenant] -Compute: - hosts: - cell1-compute-0: {ansible_host: 192.168.24.10, canonical_hostname: cell1-compute-0.localdomain, - ctlplane_hostname: cell1-compute-0.ctlplane.localdomain, ctlplane_ip: 192.168.24.10, - deploy_server_id: 5b31842e-1118-4961-95cf-47d7a326e839, external_ip: 192.168.24.10, - internal_api_hostname: cell1-compute-0.internalapi.localdomain, internal_api_ip: 172.16.2.18, - management_ip: 192.168.24.10, storage_hostname: cell1-compute-0.storage.localdomain, - storage_ip: 172.16.1.218, storage_mgmt_ip: 192.168.24.10, tenant_hostname: cell1-compute-0.tenant.localdomain, - tenant_ip: 172.16.0.27} - vars: - ansible_ssh_user: heat-admin - bootstrap_server_id: 2a668e02-96b3-48a5-9cf2-7bde46830e23 - serial: '1' - tripleo_role_name: Compute - tripleo_role_networks: [ctlplane, external, internal_api, management, storage, - storage_mgmt, tenant] -cell1: - children: - CellController: {} - Compute: {} - vars: {container_cli: podman, ctlplane_vip: 192.168.24.21, external_vip: 10.0.0.6, - internal_api_vip: 172.16.2.11, redis_vip: 192.168.24.21, storage_mgmt_vip: 172.16.3.26, - storage_vip: 172.16.1.131} -allovercloud: - children: - cell1: {} -kernel: - children: - CellController: {} - Compute: {} - vars: {ansible_ssh_user: heat-admin} -mysql: - children: - CellController: {} - vars: {ansible_ssh_user: heat-admin} -nova_libvirt: - children: - Compute: {} - vars: {ansible_ssh_user: heat-admin} -container_image_prepare: - children: - CellController: {} - vars: {ansible_ssh_user: heat-admin} -timezone: - children: - CellController: {} - Compute: {} - vars: {ansible_ssh_user: heat-admin} -iscsid: - children: - CellController: {} - Compute: {} - vars: {ansible_ssh_user: heat-admin} -nova_libvirt_guests: - children: - Compute: {} - vars: {ansible_ssh_user: heat-admin} -tripleo_firewall: - children: - CellController: {} - Compute: {} - vars: {ansible_ssh_user: heat-admin} -snmp: - children: - CellController: {} - Compute: {} - vars: {ansible_ssh_user: heat-admin} -certmonger_user: - children: - CellController: {} - Compute: {} - vars: {ansible_ssh_user: heat-admin} -boot_params_service: - children: - Compute: {} - vars: {ansible_ssh_user: heat-admin} -oslo_messaging_rpc: - children: - CellController: {} - vars: {ansible_ssh_user: heat-admin} -nova_vnc_proxy: - children: - CellController: {} - vars: {ansible_ssh_user: heat-admin} -nova_metadata: - children: - CellController: {} - vars: {ansible_ssh_user: heat-admin} -mysql_client: - children: - CellController: {} - Compute: {} - vars: {ansible_ssh_user: heat-admin} -nova_migration_target: - children: - Compute: {} - vars: {ansible_ssh_user: heat-admin} -podman: - children: - CellController: {} - Compute: {} - vars: {ansible_ssh_user: heat-admin} -tripleo_packages: - children: - CellController: {} - Compute: {} - vars: {ansible_ssh_user: heat-admin} -nova_conductor: - children: - CellController: {} - vars: {ansible_ssh_user: heat-admin} -nova_compute: - children: - Compute: {} - vars: {ansible_ssh_user: heat-admin} -logrotate_crond: - children: - CellController: {} - Compute: {} - vars: {ansible_ssh_user: heat-admin} -haproxy: - children: - CellController: {} - vars: {ansible_ssh_user: heat-admin} -sshd: - children: - CellController: {} - Compute: {} - vars: {ansible_ssh_user: heat-admin} -ovn_metadata: - children: - Compute: {} - vars: {ansible_ssh_user: heat-admin} -tuned: - children: - CellController: {} - Compute: {} - vars: {ansible_ssh_user: heat-admin} -chrony: - children: - CellController: {} - Compute: {} - vars: {ansible_ssh_user: heat-admin} -ca_certs: - children: - CellController: {} - Compute: {} - vars: {ansible_ssh_user: heat-admin} -ovn_controller: - children: - Compute: {} - vars: {ansible_ssh_user: heat-admin} diff --git a/tripleo_common/tests/inventory_data/list_overcloud.json b/tripleo_common/tests/inventory_data/list_overcloud.json deleted file mode 100644 index 5bd07536d..000000000 --- a/tripleo_common/tests/inventory_data/list_overcloud.json +++ /dev/null @@ -1,826 +0,0 @@ -{ - "_meta": { - "hostvars": { - "overcloud-controller-0": { - "ansible_host": "192.168.24.12", - "canonical_hostname": "overcloud-controller-0.localdomain", - "ctlplane_hostname": "overcloud-controller-0.ctlplane.localdomain", - "ctlplane_ip": "192.168.24.12", - "deploy_server_id": "6c1befea-4173-49bd-9507-631cc33f44a2", - "external_hostname": "overcloud-controller-0.external.localdomain", - "external_ip": "10.0.0.10", - "internal_api_hostname": "overcloud-controller-0.internalapi.localdomain", - "internal_api_ip": "172.16.2.199", - "management_ip": "192.168.24.12", - "storage_hostname": "overcloud-controller-0.storage.localdomain", - "storage_ip": "172.16.1.13", - "storage_mgmt_hostname": "overcloud-controller-0.storagemgmt.localdomain", - "storage_mgmt_ip": "172.16.3.89", - "tenant_hostname": "overcloud-controller-0.tenant.localdomain", - "tenant_ip": "172.16.0.167" - }, - "overcloud-novacompute-0": { - "ansible_host": "192.168.24.13", - "canonical_hostname": "overcloud-novacompute-0.localdomain", - "ctlplane_hostname": "overcloud-novacompute-0.ctlplane.localdomain", - "ctlplane_ip": "192.168.24.13", - "deploy_server_id": "c5c20c87-60f8-4dc9-a0e8-1f185c4b8a8c", - "external_ip": "192.168.24.13", - "internal_api_hostname": "overcloud-novacompute-0.internalapi.localdomain", - "internal_api_ip": "172.16.2.241", - "management_ip": "192.168.24.13", - "storage_hostname": "overcloud-novacompute-0.storage.localdomain", - "storage_ip": "172.16.1.235", - "storage_mgmt_ip": "192.168.24.13", - "tenant_hostname": "overcloud-novacompute-0.tenant.localdomain", - "tenant_ip": "172.16.0.242" - } - } - }, - "Undercloud": { - "hosts": [ - "undercloud" - ], - "vars": { - "ansible_connection": "local", - "ansible_host": "localhost", - "ansible_python_interpreter": "/usr/bin/python2", - "ansible_remote_tmp": "/tmp/ansible-${USER}", - "auth_url": "https://192.168.24.2:13000", - "cacert": "/etc/pki/ca-trust/source/anchors/cm-local-ca.pem", - "os_auth_token": "gAAAAABedRLI4L-0UK1i3r0lkHVPFeGE2FB40mk7tjWw_US3nwQvzZlwKPM_uCq1wYeBqkCLwiXW61BrZswCi9M3mI-6HeniTi9qV7nXUyLHrWw1Kh5woAEGPyjmdiMiTh_P0WFKobYqXdP0oFwcN_LWr-mnYE1YWluvX4qKeUEeIOXHVj8OtMg", - "overcloud_admin_password": "9RDyelRD5PT5Jk6q4efjYG6Es", - "overcloud_horizon_url": "https://10.0.0.5:443/dashboard", - "overcloud_keystone_url": "https://10.0.0.5:13000", - "plan": "overcloud", - "project_name": "admin", - "undercloud_service_list": [ - "tripleo_ironic_conductor" - ], - "undercloud_swift_url": "https://192.168.24.2:13808/v1/AUTH_b0d47705b94c486889fd2b26ce343763", - "username": "admin", - "plans": [ - "overcloud" - ] - } - }, - "Compute": { - "children": [ - "overcloud_Compute" - ] - }, - "overcloud_Compute": { - "hosts": [ - "overcloud-novacompute-0" - ], - "vars": { - "ansible_ssh_user": "heat-admin", - "bootstrap_server_id": "6c1befea-4173-49bd-9507-631cc33f44a2", - "serial": "1", - "tripleo_role_name": "Compute", - "tripleo_role_networks": [ - "ctlplane", - "external", - "internal_api", - "management", - "storage", - "storage_mgmt", - "tenant" - ] - } - }, - "Controller": { - "children": [ - "overcloud_Controller" - ] - }, - "overcloud_Controller": { - "hosts": [ - "overcloud-controller-0" - ], - "vars": { - "ansible_ssh_user": "heat-admin", - "bootstrap_server_id": "6c1befea-4173-49bd-9507-631cc33f44a2", - "serial": "1", - "tripleo_role_name": "Controller", - "tripleo_role_networks": [ - "ctlplane", - "external", - "internal_api", - "management", - "storage", - "storage_mgmt", - "tenant" - ] - } - }, - "allovercloud": { - "children": [ - "overcloud_allovercloud" - ] - }, - "overcloud_overcloud": { - "children": [ - "overcloud_Compute", - "overcloud_Controller" - ], - "vars": { - "container_cli": "podman", - "ctlplane_vip": "192.168.24.7", - "external_vip": "10.0.0.5", - "internal_api_vip": "172.16.2.164", - "redis_vip": "172.16.2.196", - "storage_mgmt_vip": "172.16.3.44", - "storage_vip": "172.16.1.147" - } - }, - "overcloud": { - "children": [ - "overcloud_allovercloud" - ] - }, - "overcloud_allovercloud": { - "children": [ - "overcloud_overcloud" - ] - }, - "boot_params_service": { - "children": [ - "overcloud_boot_params_service" - ] - }, - "overcloud_boot_params_service": { - "children": [ - "overcloud_Compute", - "overcloud_Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "ca_certs": { - "children": [ - "overcloud_ca_certs" - ] - }, - "overcloud_ca_certs": { - "children": [ - "overcloud_Compute", - "overcloud_Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "certmonger_user": { - "children": [ - "overcloud_certmonger_user" - ] - }, - "overcloud_certmonger_user": { - "children": [ - "overcloud_Compute", - "overcloud_Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "chrony": { - "children": [ - "overcloud_chrony" - ] - }, - "overcloud_chrony": { - "children": [ - "overcloud_Compute", - "overcloud_Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "cinder_api": { - "children": [ - "overcloud_cinder_api" - ] - }, - "overcloud_cinder_api": { - "children": [ - "overcloud_Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "cinder_scheduler": { - "children": [ - "overcloud_cinder_scheduler" - ] - }, - "overcloud_cinder_scheduler": { - "children": [ - "overcloud_Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "cinder_volume": { - "children": [ - "overcloud_cinder_volume" - ] - }, - "overcloud_cinder_volume": { - "children": [ - "overcloud_Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "container_image_prepare": { - "children": [ - "overcloud_container_image_prepare" - ] - }, - "overcloud_container_image_prepare": { - "children": [ - "overcloud_Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "glance_api": { - "children": [ - "overcloud_glance_api" - ] - }, - "overcloud_glance_api": { - "children": [ - "overcloud_Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "haproxy": { - "children": [ - "overcloud_haproxy" - ] - }, - "overcloud_haproxy": { - "children": [ - "overcloud_Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "heat_api": { - "children": [ - "overcloud_heat_api" - ] - }, - "overcloud_heat_api": { - "children": [ - "overcloud_Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "heat_api_cfn": { - "children": [ - "overcloud_heat_api_cfn" - ] - }, - "overcloud_heat_api_cfn": { - "children": [ - "overcloud_Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "heat_api_cloudwatch_disabled": { - "children": [ - "overcloud_heat_api_cloudwatch_disabled" - ] - }, - "overcloud_heat_api_cloudwatch_disabled": { - "children": [ - "overcloud_Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "heat_engine": { - "children": [ - "overcloud_heat_engine" - ] - }, - "overcloud_heat_engine": { - "children": [ - "overcloud_Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "horizon": { - "children": [ - "overcloud_horizon" - ] - }, - "overcloud_horizon": { - "children": [ - "overcloud_Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "iscsid": { - "children": [ - "overcloud_iscsid" - ] - }, - "overcloud_iscsid": { - "children": [ - "overcloud_Compute", - "overcloud_Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "kernel": { - "children": [ - "overcloud_kernel" - ] - }, - "overcloud_kernel": { - "children": [ - "overcloud_Compute", - "overcloud_Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "keystone": { - "children": [ - "overcloud_keystone" - ] - }, - "overcloud_keystone": { - "children": [ - "overcloud_Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "keystone_admin_api": { - "children": [ - "overcloud_keystone_admin_api" - ] - }, - "overcloud_keystone_admin_api": { - "children": [ - "overcloud_Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "keystone_public_api": { - "children": [ - "overcloud_keystone_public_api" - ] - }, - "overcloud_keystone_public_api": { - "children": [ - "overcloud_Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "logrotate_crond": { - "children": [ - "overcloud_logrotate_crond" - ] - }, - "overcloud_logrotate_crond": { - "children": [ - "overcloud_Compute", - "overcloud_Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "memcached": { - "children": [ - "overcloud_memcached" - ] - }, - "overcloud_memcached": { - "children": [ - "overcloud_Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "mysql": { - "children": [ - "overcloud_mysql" - ] - }, - "overcloud_mysql": { - "children": [ - "overcloud_Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "mysql_client": { - "children": [ - "overcloud_mysql_client" - ] - }, - "overcloud_mysql_client": { - "children": [ - "overcloud_Compute", - "overcloud_Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "neutron_api": { - "children": [ - "overcloud_neutron_api" - ] - }, - "overcloud_neutron_api": { - "children": [ - "overcloud_Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "neutron_plugin_ml2_ovn": { - "children": [ - "overcloud_neutron_plugin_ml2_ovn" - ] - }, - "overcloud_neutron_plugin_ml2_ovn": { - "children": [ - "overcloud_Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "nova_api": { - "children": [ - "overcloud_nova_api" - ] - }, - "overcloud_nova_api": { - "children": [ - "overcloud_Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "nova_compute": { - "children": [ - "overcloud_nova_compute" - ] - }, - "overcloud_nova_compute": { - "children": [ - "overcloud_Compute" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "nova_conductor": { - "children": [ - "overcloud_nova_conductor" - ] - }, - "overcloud_nova_conductor": { - "children": [ - "overcloud_Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "nova_libvirt": { - "children": [ - "overcloud_nova_libvirt" - ] - }, - "overcloud_nova_libvirt": { - "children": [ - "overcloud_Compute" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "nova_libvirt_guests": { - "children": [ - "overcloud_nova_libvirt_guests" - ] - }, - "overcloud_nova_libvirt_guests": { - "children": [ - "overcloud_Compute" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "nova_metadata": { - "children": [ - "overcloud_nova_metadata" - ] - }, - "overcloud_nova_metadata": { - "children": [ - "overcloud_Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "nova_migration_target": { - "children": [ - "overcloud_nova_migration_target" - ] - }, - "overcloud_nova_migration_target": { - "children": [ - "overcloud_Compute" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "nova_scheduler": { - "children": [ - "overcloud_nova_scheduler" - ] - }, - "overcloud_nova_scheduler": { - "children": [ - "overcloud_Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "nova_vnc_proxy": { - "children": [ - "overcloud_nova_vnc_proxy" - ] - }, - "overcloud_nova_vnc_proxy": { - "children": [ - "overcloud_Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "oslo_messaging_notify": { - "children": [ - "overcloud_oslo_messaging_notify" - ] - }, - "overcloud_oslo_messaging_notify": { - "children": [ - "overcloud_Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "oslo_messaging_rpc": { - "children": [ - "overcloud_oslo_messaging_rpc" - ] - }, - "overcloud_oslo_messaging_rpc": { - "children": [ - "overcloud_Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "ovn_controller": { - "children": [ - "overcloud_ovn_controller" - ] - }, - "overcloud_ovn_controller": { - "children": [ - "overcloud_Compute", - "overcloud_Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "ovn_dbs": { - "children": [ - "overcloud_ovn_dbs" - ] - }, - "overcloud_ovn_dbs": { - "children": [ - "overcloud_Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "ovn_metadata": { - "children": [ - "overcloud_ovn_metadata" - ] - }, - "overcloud_ovn_metadata": { - "children": [ - "overcloud_Compute" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "placement": { - "children": [ - "overcloud_placement" - ] - }, - "overcloud_placement": { - "children": [ - "overcloud_Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "podman": { - "children": [ - "overcloud_podman" - ] - }, - "overcloud_podman": { - "children": [ - "overcloud_Compute", - "overcloud_Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "snmp": { - "children": [ - "overcloud_snmp" - ] - }, - "overcloud_snmp": { - "children": [ - "overcloud_Compute", - "overcloud_Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "sshd": { - "children": [ - "overcloud_sshd" - ] - }, - "overcloud_sshd": { - "children": [ - "overcloud_Compute", - "overcloud_Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "swift_proxy": { - "children": [ - "overcloud_swift_proxy" - ] - }, - "overcloud_swift_proxy": { - "children": [ - "overcloud_Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "swift_ringbuilder": { - "children": [ - "overcloud_swift_ringbuilder" - ] - }, - "overcloud_swift_ringbuilder": { - "children": [ - "overcloud_Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "swift_storage": { - "children": [ - "overcloud_swift_storage" - ] - }, - "overcloud_swift_storage": { - "children": [ - "overcloud_Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "timezone": { - "children": [ - "overcloud_timezone" - ] - }, - "overcloud_timezone": { - "children": [ - "overcloud_Compute", - "overcloud_Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "tripleo_firewall": { - "children": [ - "overcloud_tripleo_firewall" - ] - }, - "overcloud_tripleo_firewall": { - "children": [ - "overcloud_Compute", - "overcloud_Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "tripleo_packages": { - "children": [ - "overcloud_tripleo_packages" - ] - }, - "overcloud_tripleo_packages": { - "children": [ - "overcloud_Compute", - "overcloud_Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "tuned": { - "children": [ - "overcloud_tuned" - ] - }, - "overcloud_tuned": { - "children": [ - "overcloud_Compute", - "overcloud_Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - } -} diff --git a/tripleo_common/tests/inventory_data/merged_dynamic.json b/tripleo_common/tests/inventory_data/merged_dynamic.json deleted file mode 100644 index 2cd7fb62b..000000000 --- a/tripleo_common/tests/inventory_data/merged_dynamic.json +++ /dev/null @@ -1,1204 +0,0 @@ -{ - "CellController": { - "children": [ - "cell1_CellController" - ] - }, - "Compute": { - "children": [ - "cell1_Compute", - "overcloud_Compute" - ] - }, - "Controller": { - "children": [ - "overcloud_Controller" - ] - }, - "Undercloud": { - "hosts": [ - "undercloud" - ], - "vars": { - "ansible_connection": "local", - "ansible_host": "localhost", - "ansible_python_interpreter": "/usr/bin/python2", - "ansible_remote_tmp": "/tmp/ansible-${USER}", - "auth_url": "https://192.168.24.2:13000", - "cacert": "/etc/pki/ca-trust/source/anchors/cm-local-ca.pem", - "os_auth_token": "gAAAAABedRLI4L-0UK1i3r0lkHVPFeGE2FB40mk7tjWw_US3nwQvzZlwKPM_uCq1wYeBqkCLwiXW61BrZswCi9M3mI-6HeniTi9qV7nXUyLHrWw1Kh5woAEGPyjmdiMiTh_P0WFKobYqXdP0oFwcN_LWr-mnYE1YWluvX4qKeUEeIOXHVj8OtMg", - "overcloud_admin_password": "9RDyelRD5PT5Jk6q4efjYG6Es", - "overcloud_horizon_url": "https://10.0.0.5:443/dashboard", - "overcloud_keystone_url": "https://10.0.0.5:13000", - "plans": [ - "cell1", - "overcloud" - ], - "project_name": "admin", - "undercloud_service_list": [ - "tripleo_ironic_conductor" - ], - "undercloud_swift_url": "https://192.168.24.2:13808/v1/AUTH_b0d47705b94c486889fd2b26ce343763", - "username": "admin" - } - }, - "_meta": { - "hostvars": { - "cell1-cellcontrol-0": { - "ansible_host": "192.168.24.29", - "canonical_hostname": "cell1-cellcontrol-0.localdomain", - "ctlplane_hostname": "cell1-cellcontrol-0.ctlplane.localdomain", - "ctlplane_ip": "192.168.24.29", - "deploy_server_id": "2a668e02-96b3-48a5-9cf2-7bde46830e23", - "external_hostname": "cell1-cellcontrol-0.external.localdomain", - "external_ip": "10.0.0.38", - "internal_api_hostname": "cell1-cellcontrol-0.internalapi.localdomain", - "internal_api_ip": "172.16.2.119", - "management_ip": "192.168.24.29", - "storage_hostname": "cell1-cellcontrol-0.storage.localdomain", - "storage_ip": "172.16.1.167", - "storage_mgmt_hostname": "cell1-cellcontrol-0.storagemgmt.localdomain", - "storage_mgmt_ip": "172.16.3.183", - "tenant_hostname": "cell1-cellcontrol-0.tenant.localdomain", - "tenant_ip": "172.16.0.125" - }, - "cell1-compute-0": { - "ansible_host": "192.168.24.10", - "canonical_hostname": "cell1-compute-0.localdomain", - "ctlplane_hostname": "cell1-compute-0.ctlplane.localdomain", - "ctlplane_ip": "192.168.24.10", - "deploy_server_id": "5b31842e-1118-4961-95cf-47d7a326e839", - "external_ip": "192.168.24.10", - "internal_api_hostname": "cell1-compute-0.internalapi.localdomain", - "internal_api_ip": "172.16.2.18", - "management_ip": "192.168.24.10", - "storage_hostname": "cell1-compute-0.storage.localdomain", - "storage_ip": "172.16.1.218", - "storage_mgmt_ip": "192.168.24.10", - "tenant_hostname": "cell1-compute-0.tenant.localdomain", - "tenant_ip": "172.16.0.27" - }, - "overcloud-controller-0": { - "ansible_host": "192.168.24.12", - "canonical_hostname": "overcloud-controller-0.localdomain", - "ctlplane_hostname": "overcloud-controller-0.ctlplane.localdomain", - "ctlplane_ip": "192.168.24.12", - "deploy_server_id": "6c1befea-4173-49bd-9507-631cc33f44a2", - "external_hostname": "overcloud-controller-0.external.localdomain", - "external_ip": "10.0.0.10", - "internal_api_hostname": "overcloud-controller-0.internalapi.localdomain", - "internal_api_ip": "172.16.2.199", - "management_ip": "192.168.24.12", - "storage_hostname": "overcloud-controller-0.storage.localdomain", - "storage_ip": "172.16.1.13", - "storage_mgmt_hostname": "overcloud-controller-0.storagemgmt.localdomain", - "storage_mgmt_ip": "172.16.3.89", - "tenant_hostname": "overcloud-controller-0.tenant.localdomain", - "tenant_ip": "172.16.0.167" - }, - "overcloud-novacompute-0": { - "ansible_host": "192.168.24.13", - "canonical_hostname": "overcloud-novacompute-0.localdomain", - "ctlplane_hostname": "overcloud-novacompute-0.ctlplane.localdomain", - "ctlplane_ip": "192.168.24.13", - "deploy_server_id": "c5c20c87-60f8-4dc9-a0e8-1f185c4b8a8c", - "external_ip": "192.168.24.13", - "internal_api_hostname": "overcloud-novacompute-0.internalapi.localdomain", - "internal_api_ip": "172.16.2.241", - "management_ip": "192.168.24.13", - "storage_hostname": "overcloud-novacompute-0.storage.localdomain", - "storage_ip": "172.16.1.235", - "storage_mgmt_ip": "192.168.24.13", - "tenant_hostname": "overcloud-novacompute-0.tenant.localdomain", - "tenant_ip": "172.16.0.242" - } - } - }, - "allovercloud": { - "children": [ - "cell1_allovercloud", - "overcloud_allovercloud" - ] - }, - "boot_params_service": { - "children": [ - "cell1_boot_params_service", - "overcloud_boot_params_service" - ] - }, - "ca_certs": { - "children": [ - "cell1_ca_certs", - "overcloud_ca_certs" - ] - }, - "cell1": { - "children": [ - "cell1_allovercloud" - ] - }, - "cell1_allovercloud": { - "children": [ - "cell1_cell1" - ] - }, - "cell1_CellController": { - "hosts": [ - "cell1-cellcontrol-0" - ], - "vars": { - "ansible_ssh_user": "heat-admin", - "bootstrap_server_id": "2a668e02-96b3-48a5-9cf2-7bde46830e23", - "serial": "1", - "tripleo_role_name": "CellController", - "tripleo_role_networks": [ - "ctlplane", - "external", - "internal_api", - "management", - "storage", - "storage_mgmt", - "tenant" - ] - } - }, - "cell1_Compute": { - "hosts": [ - "cell1-compute-0" - ], - "vars": { - "ansible_ssh_user": "heat-admin", - "bootstrap_server_id": "2a668e02-96b3-48a5-9cf2-7bde46830e23", - "serial": "1", - "tripleo_role_name": "Compute", - "tripleo_role_networks": [ - "ctlplane", - "external", - "internal_api", - "management", - "storage", - "storage_mgmt", - "tenant" - ] - } - }, - "cell1_cell1": { - "children": [ - "cell1_CellController", - "cell1_Compute" - ], - "vars": { - "container_cli": "podman", - "ctlplane_vip": "192.168.24.21", - "external_vip": "10.0.0.6", - "internal_api_vip": "172.16.2.11", - "redis_vip": "192.168.24.21", - "storage_mgmt_vip": "172.16.3.26", - "storage_vip": "172.16.1.131" - } - - }, - "cell1_boot_params_service": { - "children": [ - "cell1_Compute" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "cell1_ca_certs": { - "children": [ - "cell1_CellController", - "cell1_Compute" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "cell1_allovercloud": { - "children": [ - "cell1_cell1" - ] - }, - "cell1_certmonger_user": { - "children": [ - "cell1_CellController", - "cell1_Compute" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "cell1_chrony": { - "children": [ - "cell1_CellController", - "cell1_Compute" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "cell1_container_image_prepare": { - "children": [ - "cell1_CellController" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "cell1_haproxy": { - "children": [ - "cell1_CellController" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "cell1_iscsid": { - "children": [ - "cell1_CellController", - "cell1_Compute" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "cell1_kernel": { - "children": [ - "cell1_CellController", - "cell1_Compute" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "cell1_logrotate_crond": { - "children": [ - "cell1_CellController", - "cell1_Compute" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "cell1_mysql": { - "children": [ - "cell1_CellController" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "cell1_mysql_client": { - "children": [ - "cell1_CellController", - "cell1_Compute" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "cell1_nova_compute": { - "children": [ - "cell1_Compute" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "cell1_nova_conductor": { - "children": [ - "cell1_CellController" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "cell1_nova_libvirt": { - "children": [ - "cell1_Compute" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "cell1_nova_libvirt_guests": { - "children": [ - "cell1_Compute" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "cell1_nova_metadata": { - "children": [ - "cell1_CellController" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "cell1_nova_migration_target": { - "children": [ - "cell1_Compute" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "cell1_nova_vnc_proxy": { - "children": [ - "cell1_CellController" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "cell1_oslo_messaging_rpc": { - "children": [ - "cell1_CellController" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "cell1_ovn_controller": { - "children": [ - "cell1_Compute" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "cell1_ovn_metadata": { - "children": [ - "cell1_Compute" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "cell1_podman": { - "children": [ - "cell1_CellController", - "cell1_Compute" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "cell1_snmp": { - "children": [ - "cell1_CellController", - "cell1_Compute" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "cell1_sshd": { - "children": [ - "cell1_CellController", - "cell1_Compute" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "cell1_timezone": { - "children": [ - "cell1_CellController", - "cell1_Compute" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "cell1_tripleo_firewall": { - "children": [ - "cell1_CellController", - "cell1_Compute" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "cell1_tripleo_packages": { - "children": [ - "cell1_CellController", - "cell1_Compute" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "cell1_tuned": { - "children": [ - "cell1_CellController", - "cell1_Compute" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "certmonger_user": { - "children": [ - "cell1_certmonger_user", - "overcloud_certmonger_user" - ] - }, - "chrony": { - "children": [ - "cell1_chrony", - "overcloud_chrony" - ] - }, - "cinder_api": { - "children": [ - "overcloud_cinder_api" - ] - }, - "cinder_scheduler": { - "children": [ - "overcloud_cinder_scheduler" - ] - }, - "cinder_volume": { - "children": [ - "overcloud_cinder_volume" - ] - }, - "container_image_prepare": { - "children": [ - "cell1_container_image_prepare", - "overcloud_container_image_prepare" - ] - }, - "glance_api": { - "children": [ - "overcloud_glance_api" - ] - }, - "haproxy": { - "children": [ - "cell1_haproxy", - "overcloud_haproxy" - ] - }, - "heat_api": { - "children": [ - "overcloud_heat_api" - ] - }, - "heat_api_cfn": { - "children": [ - "overcloud_heat_api_cfn" - ] - }, - "heat_api_cloudwatch_disabled": { - "children": [ - "overcloud_heat_api_cloudwatch_disabled" - ] - }, - "heat_engine": { - "children": [ - "overcloud_heat_engine" - ] - }, - "horizon": { - "children": [ - "overcloud_horizon" - ] - }, - "iscsid": { - "children": [ - "cell1_iscsid", - "overcloud_iscsid" - ] - }, - "kernel": { - "children": [ - "cell1_kernel", - "overcloud_kernel" - ] - }, - "keystone": { - "children": [ - "overcloud_keystone" - ] - }, - "keystone_admin_api": { - "children": [ - "overcloud_keystone_admin_api" - ] - }, - "keystone_public_api": { - "children": [ - "overcloud_keystone_public_api" - ] - }, - "logrotate_crond": { - "children": [ - "cell1_logrotate_crond", - "overcloud_logrotate_crond" - ] - }, - "memcached": { - "children": [ - "overcloud_memcached" - ] - }, - "mysql": { - "children": [ - "cell1_mysql", - "overcloud_mysql" - ] - }, - "mysql_client": { - "children": [ - "cell1_mysql_client", - "overcloud_mysql_client" - ] - }, - "neutron_api": { - "children": [ - "overcloud_neutron_api" - ] - }, - "neutron_plugin_ml2_ovn": { - "children": [ - "overcloud_neutron_plugin_ml2_ovn" - ] - }, - "nova_api": { - "children": [ - "overcloud_nova_api" - ] - }, - "nova_compute": { - "children": [ - "cell1_nova_compute", - "overcloud_nova_compute" - ] - }, - "nova_conductor": { - "children": [ - "cell1_nova_conductor", - "overcloud_nova_conductor" - ] - }, - "nova_libvirt": { - "children": [ - "cell1_nova_libvirt", - "overcloud_nova_libvirt" - ] - }, - "nova_libvirt_guests": { - "children": [ - "cell1_nova_libvirt_guests", - "overcloud_nova_libvirt_guests" - ] - }, - "nova_metadata": { - "children": [ - "cell1_nova_metadata", - "overcloud_nova_metadata" - ] - }, - "nova_migration_target": { - "children": [ - "cell1_nova_migration_target", - "overcloud_nova_migration_target" - ] - }, - "nova_scheduler": { - "children": [ - "overcloud_nova_scheduler" - ] - }, - "nova_vnc_proxy": { - "children": [ - "cell1_nova_vnc_proxy", - "overcloud_nova_vnc_proxy" - ] - }, - "oslo_messaging_notify": { - "children": [ - "overcloud_oslo_messaging_notify" - ] - }, - "oslo_messaging_rpc": { - "children": [ - "cell1_oslo_messaging_rpc", - "overcloud_oslo_messaging_rpc" - ] - }, - "overcloud": { - "children": [ - "overcloud_allovercloud" - ] - }, - "overcloud_Compute": { - "hosts": [ - "overcloud-novacompute-0" - ], - "vars": { - "ansible_ssh_user": "heat-admin", - "bootstrap_server_id": "6c1befea-4173-49bd-9507-631cc33f44a2", - "serial": "1", - "tripleo_role_name": "Compute", - "tripleo_role_networks": [ - "ctlplane", - "external", - "internal_api", - "management", - "storage", - "storage_mgmt", - "tenant" - ] - } - }, - "overcloud_Controller": { - "hosts": [ - "overcloud-controller-0" - ], - "vars": { - "ansible_ssh_user": "heat-admin", - "bootstrap_server_id": "6c1befea-4173-49bd-9507-631cc33f44a2", - "serial": "1", - "tripleo_role_name": "Controller", - "tripleo_role_networks": [ - "ctlplane", - "external", - "internal_api", - "management", - "storage", - "storage_mgmt", - "tenant" - ] - } - }, - "overcloud_overcloud": { - "children": [ - "overcloud_Compute", - "overcloud_Controller" - ], - "vars": { - "container_cli": "podman", - "ctlplane_vip": "192.168.24.7", - "external_vip": "10.0.0.5", - "internal_api_vip": "172.16.2.164", - "redis_vip": "172.16.2.196", - "storage_mgmt_vip": "172.16.3.44", - "storage_vip": "172.16.1.147" - } - }, - "overcloud_allovercloud": { - "children": [ - "overcloud_overcloud" - ] - }, - "overcloud_boot_params_service": { - "children": [ - "overcloud_Compute", - "overcloud_Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "overcloud_ca_certs": { - "children": [ - "overcloud_Compute", - "overcloud_Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "overcloud_certmonger_user": { - "children": [ - "overcloud_Compute", - "overcloud_Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "overcloud_chrony": { - "children": [ - "overcloud_Compute", - "overcloud_Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "overcloud_cinder_api": { - "children": [ - "overcloud_Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "overcloud_cinder_scheduler": { - "children": [ - "overcloud_Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "overcloud_cinder_volume": { - "children": [ - "overcloud_Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "overcloud_container_image_prepare": { - "children": [ - "overcloud_Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "overcloud_glance_api": { - "children": [ - "overcloud_Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "overcloud_haproxy": { - "children": [ - "overcloud_Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "overcloud_heat_api": { - "children": [ - "overcloud_Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "overcloud_heat_api_cfn": { - "children": [ - "overcloud_Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "overcloud_heat_api_cloudwatch_disabled": { - "children": [ - "overcloud_Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "overcloud_heat_engine": { - "children": [ - "overcloud_Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "overcloud_horizon": { - "children": [ - "overcloud_Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "overcloud_iscsid": { - "children": [ - "overcloud_Compute", - "overcloud_Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "overcloud_kernel": { - "children": [ - "overcloud_Compute", - "overcloud_Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "overcloud_keystone": { - "children": [ - "overcloud_Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "overcloud_keystone_admin_api": { - "children": [ - "overcloud_Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "overcloud_keystone_public_api": { - "children": [ - "overcloud_Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "overcloud_logrotate_crond": { - "children": [ - "overcloud_Compute", - "overcloud_Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "overcloud_memcached": { - "children": [ - "overcloud_Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "overcloud_mysql": { - "children": [ - "overcloud_Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "overcloud_mysql_client": { - "children": [ - "overcloud_Compute", - "overcloud_Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "overcloud_neutron_api": { - "children": [ - "overcloud_Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "overcloud_neutron_plugin_ml2_ovn": { - "children": [ - "overcloud_Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "overcloud_nova_api": { - "children": [ - "overcloud_Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "overcloud_nova_compute": { - "children": [ - "overcloud_Compute" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "overcloud_nova_conductor": { - "children": [ - "overcloud_Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "overcloud_nova_libvirt": { - "children": [ - "overcloud_Compute" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "overcloud_nova_libvirt_guests": { - "children": [ - "overcloud_Compute" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "overcloud_nova_metadata": { - "children": [ - "overcloud_Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "overcloud_nova_migration_target": { - "children": [ - "overcloud_Compute" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "overcloud_nova_scheduler": { - "children": [ - "overcloud_Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "overcloud_nova_vnc_proxy": { - "children": [ - "overcloud_Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "overcloud_oslo_messaging_notify": { - "children": [ - "overcloud_Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "overcloud_oslo_messaging_rpc": { - "children": [ - "overcloud_Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "overcloud_ovn_controller": { - "children": [ - "overcloud_Compute", - "overcloud_Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "overcloud_ovn_dbs": { - "children": [ - "overcloud_Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "overcloud_ovn_metadata": { - "children": [ - "overcloud_Compute" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "overcloud_placement": { - "children": [ - "overcloud_Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "overcloud_podman": { - "children": [ - "overcloud_Compute", - "overcloud_Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "overcloud_snmp": { - "children": [ - "overcloud_Compute", - "overcloud_Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "overcloud_sshd": { - "children": [ - "overcloud_Compute", - "overcloud_Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "overcloud_swift_proxy": { - "children": [ - "overcloud_Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "overcloud_swift_ringbuilder": { - "children": [ - "overcloud_Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "overcloud_swift_storage": { - "children": [ - "overcloud_Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "overcloud_timezone": { - "children": [ - "overcloud_Compute", - "overcloud_Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "overcloud_tripleo_firewall": { - "children": [ - "overcloud_Compute", - "overcloud_Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "overcloud_tripleo_packages": { - "children": [ - "overcloud_Compute", - "overcloud_Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "overcloud_tuned": { - "children": [ - "overcloud_Compute", - "overcloud_Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "ovn_controller": { - "children": [ - "cell1_ovn_controller", - "overcloud_ovn_controller" - ] - }, - "ovn_dbs": { - "children": [ - "overcloud_ovn_dbs" - ] - }, - "ovn_metadata": { - "children": [ - "cell1_ovn_metadata", - "overcloud_ovn_metadata" - ] - }, - "placement": { - "children": [ - "overcloud_placement" - ] - }, - "podman": { - "children": [ - "cell1_podman", - "overcloud_podman" - ] - }, - "snmp": { - "children": [ - "cell1_snmp", - "overcloud_snmp" - ] - }, - "sshd": { - "children": [ - "cell1_sshd", - "overcloud_sshd" - ] - }, - "swift_proxy": { - "children": [ - "overcloud_swift_proxy" - ] - }, - "swift_ringbuilder": { - "children": [ - "overcloud_swift_ringbuilder" - ] - }, - "swift_storage": { - "children": [ - "overcloud_swift_storage" - ] - }, - "timezone": { - "children": [ - "cell1_timezone", - "overcloud_timezone" - ] - }, - "tripleo_firewall": { - "children": [ - "cell1_tripleo_firewall", - "overcloud_tripleo_firewall" - ] - }, - "tripleo_packages": { - "children": [ - "cell1_tripleo_packages", - "overcloud_tripleo_packages" - ] - }, - "tuned": { - "children": [ - "cell1_tuned", - "overcloud_tuned" - ] - } -} diff --git a/tripleo_common/tests/inventory_data/merged_static.yaml b/tripleo_common/tests/inventory_data/merged_static.yaml deleted file mode 100644 index beb5d1e38..000000000 --- a/tripleo_common/tests/inventory_data/merged_static.yaml +++ /dev/null @@ -1,741 +0,0 @@ -Undercloud: - hosts: - undercloud: {} - vars: - ansible_connection: local - ansible_host: localhost - ansible_python_interpreter: /usr/bin/python2 - ansible_remote_tmp: /tmp/ansible-${USER} - auth_url: https://192.168.24.2:13000 - cacert: /etc/pki/ca-trust/source/anchors/cm-local-ca.pem - os_auth_token: gAAAAABedRLI4L-0UK1i3r0lkHVPFeGE2FB40mk7tjWw_US3nwQvzZlwKPM_uCq1wYeBqkCLwiXW61BrZswCi9M3mI-6HeniTi9qV7nXUyLHrWw1Kh5woAEGPyjmdiMiTh_P0WFKobYqXdP0oFwcN_LWr-mnYE1YWluvX4qKeUEeIOXHVj8OtMg - overcloud_admin_password: 9RDyelRD5PT5Jk6q4efjYG6Es - overcloud_horizon_url: https://10.0.0.5:443/dashboard - overcloud_keystone_url: https://10.0.0.5:13000 - plans: [cell1, overcloud] - project_name: admin - undercloud_service_list: [tripleo_ironic_conductor] - undercloud_swift_url: https://192.168.24.2:13808/v1/AUTH_b0d47705b94c486889fd2b26ce343763 - username: admin -CellController: - children: - cell1_CellController: {} -cell1_CellController: - hosts: - cell1-cellcontrol-0: {ansible_host: 192.168.24.29, canonical_hostname: cell1-cellcontrol-0.localdomain, - ctlplane_hostname: cell1-cellcontrol-0.ctlplane.localdomain, ctlplane_ip: 192.168.24.29, - deploy_server_id: 2a668e02-96b3-48a5-9cf2-7bde46830e23, external_hostname: cell1-cellcontrol-0.external.localdomain, - external_ip: 10.0.0.38, internal_api_hostname: cell1-cellcontrol-0.internalapi.localdomain, - internal_api_ip: 172.16.2.119, management_ip: 192.168.24.29, storage_hostname: cell1-cellcontrol-0.storage.localdomain, - storage_ip: 172.16.1.167, storage_mgmt_hostname: cell1-cellcontrol-0.storagemgmt.localdomain, - storage_mgmt_ip: 172.16.3.183, tenant_hostname: cell1-cellcontrol-0.tenant.localdomain, - tenant_ip: 172.16.0.125} - vars: - ansible_ssh_user: heat-admin - bootstrap_server_id: 2a668e02-96b3-48a5-9cf2-7bde46830e23 - serial: '1' - tripleo_role_name: CellController - tripleo_role_networks: [ctlplane, external, internal_api, management, storage, - storage_mgmt, tenant] -Compute: - children: - cell1_Compute: {} - overcloud_Compute: {} -cell1_Compute: - hosts: - cell1-compute-0: {ansible_host: 192.168.24.10, canonical_hostname: cell1-compute-0.localdomain, - ctlplane_hostname: cell1-compute-0.ctlplane.localdomain, ctlplane_ip: 192.168.24.10, - deploy_server_id: 5b31842e-1118-4961-95cf-47d7a326e839, external_ip: 192.168.24.10, - internal_api_hostname: cell1-compute-0.internalapi.localdomain, internal_api_ip: 172.16.2.18, - management_ip: 192.168.24.10, storage_hostname: cell1-compute-0.storage.localdomain, - storage_ip: 172.16.1.218, storage_mgmt_ip: 192.168.24.10, tenant_hostname: cell1-compute-0.tenant.localdomain, - tenant_ip: 172.16.0.27} - vars: - ansible_ssh_user: heat-admin - bootstrap_server_id: 2a668e02-96b3-48a5-9cf2-7bde46830e23 - serial: '1' - tripleo_role_name: Compute - tripleo_role_networks: [ctlplane, external, internal_api, management, storage, - storage_mgmt, tenant] -allovercloud: - children: - cell1_allovercloud: {} - overcloud_allovercloud: {} -cell1_cell1: - children: - cell1_CellController: {} - cell1_Compute: {} - vars: - container_cli: podman - ctlplane_vip: 192.168.24.21 - external_vip: 10.0.0.6 - internal_api_vip: 172.16.2.11 - redis_vip: 192.168.24.21 - storage_mgmt_vip: 172.16.3.26 - storage_vip: 172.16.1.131 -cell1: - children: - cell1_allovercloud: {} -cell1_allovercloud: - children: - cell1_cell1: {} -kernel: - children: - cell1_kernel: {} - overcloud_kernel: {} -cell1_kernel: - children: - cell1_CellController: {} - cell1_Compute: {} - vars: - ansible_ssh_user: heat-admin -mysql: - children: - cell1_mysql: {} - overcloud_mysql: {} -cell1_mysql: - children: - cell1_CellController: {} - vars: - ansible_ssh_user: heat-admin -nova_libvirt: - children: - cell1_nova_libvirt: {} - overcloud_nova_libvirt: {} -cell1_nova_libvirt: - children: - cell1_Compute: {} - vars: - ansible_ssh_user: heat-admin -container_image_prepare: - children: - cell1_container_image_prepare: {} - overcloud_container_image_prepare: {} -cell1_container_image_prepare: - children: - cell1_CellController: {} - vars: - ansible_ssh_user: heat-admin -timezone: - children: - cell1_timezone: {} - overcloud_timezone: {} -cell1_timezone: - children: - cell1_CellController: {} - cell1_Compute: {} - vars: - ansible_ssh_user: heat-admin -iscsid: - children: - cell1_iscsid: {} - overcloud_iscsid: {} -cell1_iscsid: - children: - cell1_CellController: {} - cell1_Compute: {} - vars: - ansible_ssh_user: heat-admin -nova_libvirt_guests: - children: - cell1_nova_libvirt_guests: {} - overcloud_nova_libvirt_guests: {} -cell1_nova_libvirt_guests: - children: - cell1_Compute: {} - vars: - ansible_ssh_user: heat-admin -tripleo_firewall: - children: - cell1_tripleo_firewall: {} - overcloud_tripleo_firewall: {} -cell1_tripleo_firewall: - children: - cell1_CellController: {} - cell1_Compute: {} - vars: - ansible_ssh_user: heat-admin -snmp: - children: - cell1_snmp: {} - overcloud_snmp: {} -cell1_snmp: - children: - cell1_CellController: {} - cell1_Compute: {} - vars: - ansible_ssh_user: heat-admin -certmonger_user: - children: - cell1_certmonger_user: {} - overcloud_certmonger_user: {} -cell1_certmonger_user: - children: - cell1_CellController: {} - cell1_Compute: {} - vars: - ansible_ssh_user: heat-admin -boot_params_service: - children: - cell1_boot_params_service: {} - overcloud_boot_params_service: {} -cell1_boot_params_service: - children: - cell1_Compute: {} - vars: - ansible_ssh_user: heat-admin -oslo_messaging_rpc: - children: - cell1_oslo_messaging_rpc: {} - overcloud_oslo_messaging_rpc: {} -cell1_oslo_messaging_rpc: - children: - cell1_CellController: {} - vars: - ansible_ssh_user: heat-admin -nova_vnc_proxy: - children: - cell1_nova_vnc_proxy: {} - overcloud_nova_vnc_proxy: {} -cell1_nova_vnc_proxy: - children: - cell1_CellController: {} - vars: - ansible_ssh_user: heat-admin -nova_metadata: - children: - cell1_nova_metadata: {} - overcloud_nova_metadata: {} -cell1_nova_metadata: - children: - cell1_CellController: {} - vars: - ansible_ssh_user: heat-admin -mysql_client: - children: - cell1_mysql_client: {} - overcloud_mysql_client: {} -cell1_mysql_client: - children: - cell1_CellController: {} - cell1_Compute: {} - vars: - ansible_ssh_user: heat-admin -nova_migration_target: - children: - cell1_nova_migration_target: {} - overcloud_nova_migration_target: {} -cell1_nova_migration_target: - children: - cell1_Compute: {} - vars: - ansible_ssh_user: heat-admin -podman: - children: - cell1_podman: {} - overcloud_podman: {} -cell1_podman: - children: - cell1_CellController: {} - cell1_Compute: {} - vars: - ansible_ssh_user: heat-admin -tripleo_packages: - children: - cell1_tripleo_packages: {} - overcloud_tripleo_packages: {} -cell1_tripleo_packages: - children: - cell1_CellController: {} - cell1_Compute: {} - vars: - ansible_ssh_user: heat-admin -nova_conductor: - children: - cell1_nova_conductor: {} - overcloud_nova_conductor: {} -cell1_nova_conductor: - children: - cell1_CellController: {} - vars: - ansible_ssh_user: heat-admin -nova_compute: - children: - cell1_nova_compute: {} - overcloud_nova_compute: {} -cell1_nova_compute: - children: - cell1_Compute: {} - vars: - ansible_ssh_user: heat-admin -logrotate_crond: - children: - cell1_logrotate_crond: {} - overcloud_logrotate_crond: {} -cell1_logrotate_crond: - children: - cell1_CellController: {} - cell1_Compute: {} - vars: - ansible_ssh_user: heat-admin -haproxy: - children: - cell1_haproxy: {} - overcloud_haproxy: {} -cell1_haproxy: - children: - cell1_CellController: {} - vars: - ansible_ssh_user: heat-admin -sshd: - children: - cell1_sshd: {} - overcloud_sshd: {} -cell1_sshd: - children: - cell1_CellController: {} - cell1_Compute: {} - vars: - ansible_ssh_user: heat-admin -ovn_metadata: - children: - cell1_ovn_metadata: {} - overcloud_ovn_metadata: {} -cell1_ovn_metadata: - children: - cell1_Compute: {} - vars: - ansible_ssh_user: heat-admin -tuned: - children: - cell1_tuned: {} - overcloud_tuned: {} -cell1_tuned: - children: - cell1_CellController: {} - cell1_Compute: {} - vars: - ansible_ssh_user: heat-admin -chrony: - children: - cell1_chrony: {} - overcloud_chrony: {} -cell1_chrony: - children: - cell1_CellController: {} - cell1_Compute: {} - vars: - ansible_ssh_user: heat-admin -ca_certs: - children: - cell1_ca_certs: {} - overcloud_ca_certs: {} -cell1_ca_certs: - children: - cell1_CellController: {} - cell1_Compute: {} - vars: - ansible_ssh_user: heat-admin -ovn_controller: - children: - cell1_ovn_controller: {} - overcloud_ovn_controller: {} -cell1_ovn_controller: - children: - cell1_Compute: {} - vars: - ansible_ssh_user: heat-admin -Controller: - children: - overcloud_Controller: {} -overcloud_Controller: - hosts: - overcloud-controller-0: {ansible_host: 192.168.24.12, canonical_hostname: overcloud-controller-0.localdomain, - ctlplane_hostname: overcloud-controller-0.ctlplane.localdomain, ctlplane_ip: 192.168.24.12, - deploy_server_id: 6c1befea-4173-49bd-9507-631cc33f44a2, external_hostname: overcloud-controller-0.external.localdomain, - external_ip: 10.0.0.10, internal_api_hostname: overcloud-controller-0.internalapi.localdomain, - internal_api_ip: 172.16.2.199, management_ip: 192.168.24.12, storage_hostname: overcloud-controller-0.storage.localdomain, - storage_ip: 172.16.1.13, storage_mgmt_hostname: overcloud-controller-0.storagemgmt.localdomain, - storage_mgmt_ip: 172.16.3.89, tenant_hostname: overcloud-controller-0.tenant.localdomain, - tenant_ip: 172.16.0.167} - vars: - ansible_ssh_user: heat-admin - bootstrap_server_id: 6c1befea-4173-49bd-9507-631cc33f44a2 - serial: '1' - tripleo_role_name: Controller - tripleo_role_networks: [ctlplane, external, internal_api, management, storage, - storage_mgmt, tenant] -overcloud_Compute: - hosts: - overcloud-novacompute-0: {ansible_host: 192.168.24.13, canonical_hostname: overcloud-novacompute-0.localdomain, - ctlplane_hostname: overcloud-novacompute-0.ctlplane.localdomain, ctlplane_ip: 192.168.24.13, - deploy_server_id: c5c20c87-60f8-4dc9-a0e8-1f185c4b8a8c, external_ip: 192.168.24.13, - internal_api_hostname: overcloud-novacompute-0.internalapi.localdomain, internal_api_ip: 172.16.2.241, - management_ip: 192.168.24.13, storage_hostname: overcloud-novacompute-0.storage.localdomain, - storage_ip: 172.16.1.235, storage_mgmt_ip: 192.168.24.13, tenant_hostname: overcloud-novacompute-0.tenant.localdomain, - tenant_ip: 172.16.0.242} - vars: - ansible_ssh_user: heat-admin - bootstrap_server_id: 6c1befea-4173-49bd-9507-631cc33f44a2 - serial: '1' - tripleo_role_name: Compute - tripleo_role_networks: [ctlplane, external, internal_api, management, storage, - storage_mgmt, tenant] -overcloud_overcloud: - children: - overcloud_Compute: {} - overcloud_Controller: {} - vars: - container_cli: podman - ctlplane_vip: 192.168.24.7 - external_vip: 10.0.0.5 - internal_api_vip: 172.16.2.164 - redis_vip: 172.16.2.196 - storage_mgmt_vip: 172.16.3.44 - storage_vip: 172.16.1.147 -overcloud: - children: - overcloud_allovercloud: {} -overcloud_allovercloud: - children: - overcloud_overcloud: {} -overcloud_kernel: - children: - overcloud_Compute: {} - overcloud_Controller: {} - vars: - ansible_ssh_user: heat-admin -overcloud_nova_conductor: - children: - overcloud_Controller: {} - vars: - ansible_ssh_user: heat-admin -overcloud_snmp: - children: - overcloud_Compute: {} - overcloud_Controller: {} - vars: - ansible_ssh_user: heat-admin -overcloud_mysql: - children: - overcloud_Controller: {} - vars: - ansible_ssh_user: heat-admin -cinder_api: - children: - overcloud_cinder_api: {} -overcloud_cinder_api: - children: - overcloud_Controller: {} - vars: - ansible_ssh_user: heat-admin -swift_proxy: - children: - overcloud_swift_proxy: {} -overcloud_swift_proxy: - children: - overcloud_Controller: {} - vars: - ansible_ssh_user: heat-admin -swift_ringbuilder: - children: - overcloud_swift_ringbuilder: {} -overcloud_swift_ringbuilder: - children: - overcloud_Controller: {} - vars: - ansible_ssh_user: heat-admin -heat_api: - children: - overcloud_heat_api: {} -overcloud_heat_api: - children: - overcloud_Controller: {} - vars: - ansible_ssh_user: heat-admin -overcloud_container_image_prepare: - children: - overcloud_Controller: {} - vars: - ansible_ssh_user: heat-admin -overcloud_timezone: - children: - overcloud_Compute: {} - overcloud_Controller: {} - vars: - ansible_ssh_user: heat-admin -overcloud_iscsid: - children: - overcloud_Compute: {} - overcloud_Controller: {} - vars: - ansible_ssh_user: heat-admin -overcloud_boot_params_service: - children: - overcloud_Compute: {} - overcloud_Controller: {} - vars: - ansible_ssh_user: heat-admin -heat_api_cloudwatch_disabled: - children: - overcloud_heat_api_cloudwatch_disabled: {} -overcloud_heat_api_cloudwatch_disabled: - children: - overcloud_Controller: {} - vars: - ansible_ssh_user: heat-admin -overcloud_tripleo_firewall: - children: - overcloud_Compute: {} - overcloud_Controller: {} - vars: - ansible_ssh_user: heat-admin -swift_storage: - children: - overcloud_swift_storage: {} -overcloud_swift_storage: - children: - overcloud_Controller: {} - vars: - ansible_ssh_user: heat-admin -keystone_admin_api: - children: - overcloud_keystone_admin_api: {} -overcloud_keystone_admin_api: - children: - overcloud_Controller: {} - vars: - ansible_ssh_user: heat-admin -overcloud_certmonger_user: - children: - overcloud_Compute: {} - overcloud_Controller: {} - vars: - ansible_ssh_user: heat-admin -overcloud_mysql_client: - children: - overcloud_Compute: {} - overcloud_Controller: {} - vars: - ansible_ssh_user: heat-admin -glance_api: - children: - overcloud_glance_api: {} -overcloud_glance_api: - children: - overcloud_Controller: {} - vars: - ansible_ssh_user: heat-admin -keystone: - children: - overcloud_keystone: {} -overcloud_keystone: - children: - overcloud_Controller: {} - vars: - ansible_ssh_user: heat-admin -cinder_volume: - children: - overcloud_cinder_volume: {} -overcloud_cinder_volume: - children: - overcloud_Controller: {} - vars: - ansible_ssh_user: heat-admin -ovn_dbs: - children: - overcloud_ovn_dbs: {} -overcloud_ovn_dbs: - children: - overcloud_Controller: {} - vars: - ansible_ssh_user: heat-admin -memcached: - children: - overcloud_memcached: {} -overcloud_memcached: - children: - overcloud_Controller: {} - vars: - ansible_ssh_user: heat-admin -overcloud_oslo_messaging_rpc: - children: - overcloud_Controller: {} - vars: - ansible_ssh_user: heat-admin -overcloud_nova_vnc_proxy: - children: - overcloud_Controller: {} - vars: - ansible_ssh_user: heat-admin -oslo_messaging_notify: - children: - overcloud_oslo_messaging_notify: {} -overcloud_oslo_messaging_notify: - children: - overcloud_Controller: {} - vars: - ansible_ssh_user: heat-admin -keystone_public_api: - children: - overcloud_keystone_public_api: {} -overcloud_keystone_public_api: - children: - overcloud_Controller: {} - vars: - ansible_ssh_user: heat-admin -nova_api: - children: - overcloud_nova_api: {} -overcloud_nova_api: - children: - overcloud_Controller: {} - vars: - ansible_ssh_user: heat-admin -overcloud_nova_metadata: - children: - overcloud_Controller: {} - vars: - ansible_ssh_user: heat-admin -heat_engine: - children: - overcloud_heat_engine: {} -overcloud_heat_engine: - children: - overcloud_Controller: {} - vars: - ansible_ssh_user: heat-admin -overcloud_nova_migration_target: - children: - overcloud_Compute: {} - vars: - ansible_ssh_user: heat-admin -overcloud_podman: - children: - overcloud_Compute: {} - overcloud_Controller: {} - vars: - ansible_ssh_user: heat-admin -overcloud_tripleo_packages: - children: - overcloud_Compute: {} - overcloud_Controller: {} - vars: - ansible_ssh_user: heat-admin -nova_scheduler: - children: - overcloud_nova_scheduler: {} -overcloud_nova_scheduler: - children: - overcloud_Controller: {} - vars: - ansible_ssh_user: heat-admin -overcloud_nova_compute: - children: - overcloud_Compute: {} - vars: - ansible_ssh_user: heat-admin -overcloud_nova_libvirt_guests: - children: - overcloud_Compute: {} - vars: - ansible_ssh_user: heat-admin -neutron_plugin_ml2_ovn: - children: - overcloud_neutron_plugin_ml2_ovn: {} -overcloud_neutron_plugin_ml2_ovn: - children: - overcloud_Controller: {} - vars: - ansible_ssh_user: heat-admin -overcloud_logrotate_crond: - children: - overcloud_Compute: {} - overcloud_Controller: {} - vars: - ansible_ssh_user: heat-admin -overcloud_haproxy: - children: - overcloud_Controller: {} - vars: - ansible_ssh_user: heat-admin -overcloud_sshd: - children: - overcloud_Compute: {} - overcloud_Controller: {} - vars: - ansible_ssh_user: heat-admin -placement: - children: - overcloud_placement: {} -overcloud_placement: - children: - overcloud_Controller: {} - vars: - ansible_ssh_user: heat-admin -cinder_scheduler: - children: - overcloud_cinder_scheduler: {} -overcloud_cinder_scheduler: - children: - overcloud_Controller: {} - vars: - ansible_ssh_user: heat-admin -overcloud_nova_libvirt: - children: - overcloud_Compute: {} - vars: - ansible_ssh_user: heat-admin -overcloud_ovn_metadata: - children: - overcloud_Compute: {} - vars: - ansible_ssh_user: heat-admin -overcloud_tuned: - children: - overcloud_Compute: {} - overcloud_Controller: {} - vars: - ansible_ssh_user: heat-admin -overcloud_chrony: - children: - overcloud_Compute: {} - overcloud_Controller: {} - vars: - ansible_ssh_user: heat-admin -horizon: - children: - overcloud_horizon: {} -overcloud_horizon: - children: - overcloud_Controller: {} - vars: - ansible_ssh_user: heat-admin -neutron_api: - children: - overcloud_neutron_api: {} -overcloud_neutron_api: - children: - overcloud_Controller: {} - vars: - ansible_ssh_user: heat-admin -overcloud_ca_certs: - children: - overcloud_Compute: {} - overcloud_Controller: {} - vars: - ansible_ssh_user: heat-admin -heat_api_cfn: - children: - overcloud_heat_api_cfn: {} -overcloud_heat_api_cfn: - children: - overcloud_Controller: {} - vars: - ansible_ssh_user: heat-admin -overcloud_ovn_controller: - children: - overcloud_Compute: {} - overcloud_Controller: {} - vars: - ansible_ssh_user: heat-admin diff --git a/tripleo_common/tests/inventory_data/overcloud_dynamic.json b/tripleo_common/tests/inventory_data/overcloud_dynamic.json deleted file mode 100644 index c2305b007..000000000 --- a/tripleo_common/tests/inventory_data/overcloud_dynamic.json +++ /dev/null @@ -1,548 +0,0 @@ -{ - "Compute": { - "hosts": [ - "overcloud-novacompute-0" - ], - "vars": { - "ansible_ssh_user": "heat-admin", - "bootstrap_server_id": "6c1befea-4173-49bd-9507-631cc33f44a2", - "serial": "1", - "tripleo_role_name": "Compute", - "tripleo_role_networks": [ - "ctlplane", - "external", - "internal_api", - "management", - "storage", - "storage_mgmt", - "tenant" - ] - } - }, - "Controller": { - "hosts": [ - "overcloud-controller-0" - ], - "vars": { - "ansible_ssh_user": "heat-admin", - "bootstrap_server_id": "6c1befea-4173-49bd-9507-631cc33f44a2", - "serial": "1", - "tripleo_role_name": "Controller", - "tripleo_role_networks": [ - "ctlplane", - "external", - "internal_api", - "management", - "storage", - "storage_mgmt", - "tenant" - ] - } - }, - "Undercloud": { - "hosts": [ - "undercloud" - ], - "vars": { - "ansible_connection": "local", - "ansible_host": "localhost", - "ansible_python_interpreter": "/usr/bin/python2", - "ansible_remote_tmp": "/tmp/ansible-${USER}", - "auth_url": "https://192.168.24.2:13000", - "cacert": "/etc/pki/ca-trust/source/anchors/cm-local-ca.pem", - "os_auth_token": "gAAAAABedRLI4L-0UK1i3r0lkHVPFeGE2FB40mk7tjWw_US3nwQvzZlwKPM_uCq1wYeBqkCLwiXW61BrZswCi9M3mI-6HeniTi9qV7nXUyLHrWw1Kh5woAEGPyjmdiMiTh_P0WFKobYqXdP0oFwcN_LWr-mnYE1YWluvX4qKeUEeIOXHVj8OtMg", - "overcloud_admin_password": "9RDyelRD5PT5Jk6q4efjYG6Es", - "overcloud_horizon_url": "https://10.0.0.5:443/dashboard", - "overcloud_keystone_url": "https://10.0.0.5:13000", - "plan": "overcloud", - "project_name": "admin", - "undercloud_service_list": [ - "tripleo_ironic_conductor" - ], - "undercloud_swift_url": "https://192.168.24.2:13808/v1/AUTH_b0d47705b94c486889fd2b26ce343763", - "username": "admin" - } - }, - "_meta": { - "hostvars": { - "overcloud-controller-0": { - "ansible_host": "192.168.24.12", - "canonical_hostname": "overcloud-controller-0.localdomain", - "ctlplane_hostname": "overcloud-controller-0.ctlplane.localdomain", - "ctlplane_ip": "192.168.24.12", - "deploy_server_id": "6c1befea-4173-49bd-9507-631cc33f44a2", - "external_hostname": "overcloud-controller-0.external.localdomain", - "external_ip": "10.0.0.10", - "internal_api_hostname": "overcloud-controller-0.internalapi.localdomain", - "internal_api_ip": "172.16.2.199", - "management_ip": "192.168.24.12", - "storage_hostname": "overcloud-controller-0.storage.localdomain", - "storage_ip": "172.16.1.13", - "storage_mgmt_hostname": "overcloud-controller-0.storagemgmt.localdomain", - "storage_mgmt_ip": "172.16.3.89", - "tenant_hostname": "overcloud-controller-0.tenant.localdomain", - "tenant_ip": "172.16.0.167" - }, - "overcloud-novacompute-0": { - "ansible_host": "192.168.24.13", - "canonical_hostname": "overcloud-novacompute-0.localdomain", - "ctlplane_hostname": "overcloud-novacompute-0.ctlplane.localdomain", - "ctlplane_ip": "192.168.24.13", - "deploy_server_id": "c5c20c87-60f8-4dc9-a0e8-1f185c4b8a8c", - "external_ip": "192.168.24.13", - "internal_api_hostname": "overcloud-novacompute-0.internalapi.localdomain", - "internal_api_ip": "172.16.2.241", - "management_ip": "192.168.24.13", - "storage_hostname": "overcloud-novacompute-0.storage.localdomain", - "storage_ip": "172.16.1.235", - "storage_mgmt_ip": "192.168.24.13", - "tenant_hostname": "overcloud-novacompute-0.tenant.localdomain", - "tenant_ip": "172.16.0.242" - } - } - }, - "overcloud": { - "children": [ - "Compute", - "Controller" - ], - "vars": { - "container_cli": "podman", - "ctlplane_vip": "192.168.24.7", - "external_vip": "10.0.0.5", - "internal_api_vip": "172.16.2.164", - "redis_vip": "172.16.2.196", - "storage_mgmt_vip": "172.16.3.44", - "storage_vip": "172.16.1.147" - } - }, - "allovercloud": { - "children": [ - "overcloud" - ] - }, - "boot_params_service": { - "children": [ - "Compute", - "Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "ca_certs": { - "children": [ - "Compute", - "Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "certmonger_user": { - "children": [ - "Compute", - "Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "chrony": { - "children": [ - "Compute", - "Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "cinder_api": { - "children": [ - "Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "cinder_scheduler": { - "children": [ - "Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "cinder_volume": { - "children": [ - "Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "container_image_prepare": { - "children": [ - "Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "glance_api": { - "children": [ - "Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "haproxy": { - "children": [ - "Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "heat_api": { - "children": [ - "Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "heat_api_cfn": { - "children": [ - "Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "heat_api_cloudwatch_disabled": { - "children": [ - "Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "heat_engine": { - "children": [ - "Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "horizon": { - "children": [ - "Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "iscsid": { - "children": [ - "Compute", - "Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "kernel": { - "children": [ - "Compute", - "Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "keystone": { - "children": [ - "Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "keystone_admin_api": { - "children": [ - "Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "keystone_public_api": { - "children": [ - "Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "logrotate_crond": { - "children": [ - "Compute", - "Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "memcached": { - "children": [ - "Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "mysql": { - "children": [ - "Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "mysql_client": { - "children": [ - "Compute", - "Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "neutron_api": { - "children": [ - "Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "neutron_plugin_ml2_ovn": { - "children": [ - "Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "nova_api": { - "children": [ - "Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "nova_compute": { - "children": [ - "Compute" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "nova_conductor": { - "children": [ - "Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "nova_libvirt": { - "children": [ - "Compute" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "nova_libvirt_guests": { - "children": [ - "Compute" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "nova_metadata": { - "children": [ - "Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "nova_migration_target": { - "children": [ - "Compute" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "nova_scheduler": { - "children": [ - "Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "nova_vnc_proxy": { - "children": [ - "Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "oslo_messaging_notify": { - "children": [ - "Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "oslo_messaging_rpc": { - "children": [ - "Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "ovn_controller": { - "children": [ - "Compute", - "Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "ovn_dbs": { - "children": [ - "Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "ovn_metadata": { - "children": [ - "Compute" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "placement": { - "children": [ - "Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "podman": { - "children": [ - "Compute", - "Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "snmp": { - "children": [ - "Compute", - "Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "sshd": { - "children": [ - "Compute", - "Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "swift_proxy": { - "children": [ - "Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "swift_ringbuilder": { - "children": [ - "Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "swift_storage": { - "children": [ - "Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "timezone": { - "children": [ - "Compute", - "Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "tripleo_firewall": { - "children": [ - "Compute", - "Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "tripleo_packages": { - "children": [ - "Compute", - "Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - }, - "tuned": { - "children": [ - "Compute", - "Controller" - ], - "vars": { - "ansible_ssh_user": "heat-admin" - } - } -} diff --git a/tripleo_common/tests/inventory_data/overcloud_static.yaml b/tripleo_common/tests/inventory_data/overcloud_static.yaml deleted file mode 100644 index 7e319036f..000000000 --- a/tripleo_common/tests/inventory_data/overcloud_static.yaml +++ /dev/null @@ -1,282 +0,0 @@ -Undercloud: - hosts: - undercloud: {} - vars: - ansible_connection: local - ansible_host: localhost - ansible_python_interpreter: /usr/bin/python2 - ansible_remote_tmp: /tmp/ansible-${USER} - auth_url: https://192.168.24.2:13000 - cacert: /etc/pki/ca-trust/source/anchors/cm-local-ca.pem - os_auth_token: gAAAAABedRLI4L-0UK1i3r0lkHVPFeGE2FB40mk7tjWw_US3nwQvzZlwKPM_uCq1wYeBqkCLwiXW61BrZswCi9M3mI-6HeniTi9qV7nXUyLHrWw1Kh5woAEGPyjmdiMiTh_P0WFKobYqXdP0oFwcN_LWr-mnYE1YWluvX4qKeUEeIOXHVj8OtMg - overcloud_admin_password: 9RDyelRD5PT5Jk6q4efjYG6Es - overcloud_horizon_url: https://10.0.0.5:443/dashboard - overcloud_keystone_url: https://10.0.0.5:13000 - plan: overcloud - project_name: admin - undercloud_service_list: [tripleo_ironic_conductor] - undercloud_swift_url: https://192.168.24.2:13808/v1/AUTH_b0d47705b94c486889fd2b26ce343763 - username: admin -Controller: - hosts: - overcloud-controller-0: {ansible_host: 192.168.24.12, canonical_hostname: overcloud-controller-0.localdomain, - ctlplane_hostname: overcloud-controller-0.ctlplane.localdomain, ctlplane_ip: 192.168.24.12, - deploy_server_id: 6c1befea-4173-49bd-9507-631cc33f44a2, external_hostname: overcloud-controller-0.external.localdomain, - external_ip: 10.0.0.10, internal_api_hostname: overcloud-controller-0.internalapi.localdomain, - internal_api_ip: 172.16.2.199, management_ip: 192.168.24.12, storage_hostname: overcloud-controller-0.storage.localdomain, - storage_ip: 172.16.1.13, storage_mgmt_hostname: overcloud-controller-0.storagemgmt.localdomain, - storage_mgmt_ip: 172.16.3.89, tenant_hostname: overcloud-controller-0.tenant.localdomain, - tenant_ip: 172.16.0.167} - vars: - ansible_ssh_user: heat-admin - bootstrap_server_id: 6c1befea-4173-49bd-9507-631cc33f44a2 - serial: '1' - tripleo_role_name: Controller - tripleo_role_networks: [ctlplane, external, internal_api, management, storage, - storage_mgmt, tenant] -Compute: - hosts: - overcloud-novacompute-0: {ansible_host: 192.168.24.13, canonical_hostname: overcloud-novacompute-0.localdomain, - ctlplane_hostname: overcloud-novacompute-0.ctlplane.localdomain, ctlplane_ip: 192.168.24.13, - deploy_server_id: c5c20c87-60f8-4dc9-a0e8-1f185c4b8a8c, external_ip: 192.168.24.13, - internal_api_hostname: overcloud-novacompute-0.internalapi.localdomain, internal_api_ip: 172.16.2.241, - management_ip: 192.168.24.13, storage_hostname: overcloud-novacompute-0.storage.localdomain, - storage_ip: 172.16.1.235, storage_mgmt_ip: 192.168.24.13, tenant_hostname: overcloud-novacompute-0.tenant.localdomain, - tenant_ip: 172.16.0.242} - vars: - ansible_ssh_user: heat-admin - bootstrap_server_id: 6c1befea-4173-49bd-9507-631cc33f44a2 - serial: '1' - tripleo_role_name: Compute - tripleo_role_networks: [ctlplane, external, internal_api, management, storage, - storage_mgmt, tenant] -overcloud: - children: - Compute: {} - Controller: {} - vars: {container_cli: podman, ctlplane_vip: 192.168.24.7, external_vip: 10.0.0.5, - internal_api_vip: 172.16.2.164, redis_vip: 172.16.2.196, storage_mgmt_vip: 172.16.3.44, - storage_vip: 172.16.1.147} -allovercloud: - children: - overcloud: {} -kernel: - children: - Compute: {} - Controller: {} - vars: {ansible_ssh_user: heat-admin} -nova_conductor: - children: - Controller: {} - vars: {ansible_ssh_user: heat-admin} -snmp: - children: - Compute: {} - Controller: {} - vars: {ansible_ssh_user: heat-admin} -mysql: - children: - Controller: {} - vars: {ansible_ssh_user: heat-admin} -cinder_api: - children: - Controller: {} - vars: {ansible_ssh_user: heat-admin} -swift_proxy: - children: - Controller: {} - vars: {ansible_ssh_user: heat-admin} -swift_ringbuilder: - children: - Controller: {} - vars: {ansible_ssh_user: heat-admin} -heat_api: - children: - Controller: {} - vars: {ansible_ssh_user: heat-admin} -container_image_prepare: - children: - Controller: {} - vars: {ansible_ssh_user: heat-admin} -timezone: - children: - Compute: {} - Controller: {} - vars: {ansible_ssh_user: heat-admin} -iscsid: - children: - Compute: {} - Controller: {} - vars: {ansible_ssh_user: heat-admin} -boot_params_service: - children: - Compute: {} - Controller: {} - vars: {ansible_ssh_user: heat-admin} -heat_api_cloudwatch_disabled: - children: - Controller: {} - vars: {ansible_ssh_user: heat-admin} -tripleo_firewall: - children: - Compute: {} - Controller: {} - vars: {ansible_ssh_user: heat-admin} -swift_storage: - children: - Controller: {} - vars: {ansible_ssh_user: heat-admin} -keystone_admin_api: - children: - Controller: {} - vars: {ansible_ssh_user: heat-admin} -certmonger_user: - children: - Compute: {} - Controller: {} - vars: {ansible_ssh_user: heat-admin} -mysql_client: - children: - Compute: {} - Controller: {} - vars: {ansible_ssh_user: heat-admin} -glance_api: - children: - Controller: {} - vars: {ansible_ssh_user: heat-admin} -keystone: - children: - Controller: {} - vars: {ansible_ssh_user: heat-admin} -cinder_volume: - children: - Controller: {} - vars: {ansible_ssh_user: heat-admin} -ovn_dbs: - children: - Controller: {} - vars: {ansible_ssh_user: heat-admin} -memcached: - children: - Controller: {} - vars: {ansible_ssh_user: heat-admin} -oslo_messaging_rpc: - children: - Controller: {} - vars: {ansible_ssh_user: heat-admin} -nova_vnc_proxy: - children: - Controller: {} - vars: {ansible_ssh_user: heat-admin} -oslo_messaging_notify: - children: - Controller: {} - vars: {ansible_ssh_user: heat-admin} -keystone_public_api: - children: - Controller: {} - vars: {ansible_ssh_user: heat-admin} -nova_api: - children: - Controller: {} - vars: {ansible_ssh_user: heat-admin} -nova_metadata: - children: - Controller: {} - vars: {ansible_ssh_user: heat-admin} -heat_engine: - children: - Controller: {} - vars: {ansible_ssh_user: heat-admin} -nova_migration_target: - children: - Compute: {} - vars: {ansible_ssh_user: heat-admin} -podman: - children: - Compute: {} - Controller: {} - vars: {ansible_ssh_user: heat-admin} -tripleo_packages: - children: - Compute: {} - Controller: {} - vars: {ansible_ssh_user: heat-admin} -nova_scheduler: - children: - Controller: {} - vars: {ansible_ssh_user: heat-admin} -nova_compute: - children: - Compute: {} - vars: {ansible_ssh_user: heat-admin} -nova_libvirt_guests: - children: - Compute: {} - vars: {ansible_ssh_user: heat-admin} -neutron_plugin_ml2_ovn: - children: - Controller: {} - vars: {ansible_ssh_user: heat-admin} -logrotate_crond: - children: - Compute: {} - Controller: {} - vars: {ansible_ssh_user: heat-admin} -haproxy: - children: - Controller: {} - vars: {ansible_ssh_user: heat-admin} -sshd: - children: - Compute: {} - Controller: {} - vars: {ansible_ssh_user: heat-admin} -placement: - children: - Controller: {} - vars: {ansible_ssh_user: heat-admin} -cinder_scheduler: - children: - Controller: {} - vars: {ansible_ssh_user: heat-admin} -nova_libvirt: - children: - Compute: {} - vars: {ansible_ssh_user: heat-admin} -ovn_metadata: - children: - Compute: {} - vars: {ansible_ssh_user: heat-admin} -tuned: - children: - Compute: {} - Controller: {} - vars: {ansible_ssh_user: heat-admin} -chrony: - children: - Compute: {} - Controller: {} - vars: {ansible_ssh_user: heat-admin} -horizon: - children: - Controller: {} - vars: {ansible_ssh_user: heat-admin} -neutron_api: - children: - Controller: {} - vars: {ansible_ssh_user: heat-admin} -ca_certs: - children: - Compute: {} - Controller: {} - vars: {ansible_ssh_user: heat-admin} -heat_api_cfn: - children: - Controller: {} - vars: {ansible_ssh_user: heat-admin} -ovn_controller: - children: - Compute: {} - Controller: {} - vars: {ansible_ssh_user: heat-admin} diff --git a/tripleo_common/tests/inventory_data/undercloud_dynamic.json b/tripleo_common/tests/inventory_data/undercloud_dynamic.json deleted file mode 100644 index 23cb5260b..000000000 --- a/tripleo_common/tests/inventory_data/undercloud_dynamic.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "_meta": { - "hostvars": {} - }, - "Undercloud": { - "hosts": [ - "undercloud" - ], - "vars": { - "ansible_host": "localhost", - "ansible_python_interpreter": "/usr/bin/python3", - "ansible_connection": "local", - "ansible_remote_tmp": "/tmp/ansible-${USER}", - "auth_url": "https://192.168.24.2:13000", - "cacert": null, - "os_auth_token": "gAAAAABfyhfVBvn9GwTNPKCgjTLtBAMW4_8JVMUj4n9GTrt8Ns2_YrJiAK9E3E9V0ABixQ2TRICTgZtHGgl4qfIcJMvbbLjH84oL4QxuyQ4LfcNIF7WBcnvoo3qvkCYfTNbzEJChFYiId8W0lyXiTGE80Dhr13PXrHuYDeejs4jShuFGSP_8BeQ", - "plan": null, - "project_name": "admin", - "username": "admin", - "undercloud_swift_url": "https://192.168.24.2:13808/v1/AUTH_ad15d77254c94b03b1534a261059cd76", - "undercloud_service_list": [ - "tripleo_ironic_conductor" - ] - } - } -} - diff --git a/tripleo_common/tests/inventory_data/undercloud_dynamic_merged.json b/tripleo_common/tests/inventory_data/undercloud_dynamic_merged.json deleted file mode 100644 index c417fff5a..000000000 --- a/tripleo_common/tests/inventory_data/undercloud_dynamic_merged.json +++ /dev/null @@ -1,28 +0,0 @@ -{ - "_meta": { - "hostvars": {} - }, - "Undercloud": { - "hosts": [ - "undercloud" - ], - "vars": { - "ansible_host": "localhost", - "ansible_python_interpreter": "/usr/bin/python3", - "ansible_connection": "local", - "ansible_remote_tmp": "/tmp/ansible-${USER}", - "auth_url": "https://192.168.24.2:13000", - "cacert": null, - "os_auth_token": "gAAAAABfyhfVBvn9GwTNPKCgjTLtBAMW4_8JVMUj4n9GTrt8Ns2_YrJiAK9E3E9V0ABixQ2TRICTgZtHGgl4qfIcJMvbbLjH84oL4QxuyQ4LfcNIF7WBcnvoo3qvkCYfTNbzEJChFYiId8W0lyXiTGE80Dhr13PXrHuYDeejs4jShuFGSP_8BeQ", - "plan": null, - "plans": [], - "project_name": "admin", - "username": "admin", - "undercloud_swift_url": "https://192.168.24.2:13808/v1/AUTH_ad15d77254c94b03b1534a261059cd76", - "undercloud_service_list": [ - "tripleo_ironic_conductor" - ] - } - } -} - diff --git a/tripleo_common/tests/inventory_data/undercloud_static.yaml b/tripleo_common/tests/inventory_data/undercloud_static.yaml deleted file mode 100644 index bf8a9c42d..000000000 --- a/tripleo_common/tests/inventory_data/undercloud_static.yaml +++ /dev/null @@ -1,16 +0,0 @@ -Undercloud: - hosts: - undercloud: {} - vars: - ansible_connection: local - ansible_host: localhost - ansible_python_interpreter: /usr/bin/python3 - ansible_remote_tmp: /tmp/ansible-${USER} - auth_url: https://192.168.24.2:13000 - cacert: null - os_auth_token: gAAAAABfyheSpP9F_O2zpn_zMgPANBpl10eMR4QWL3tfBDJYhDEP_iEoPBjP0H0kCfkoDBldnlmkf9LRIWqSRJ1Z6FJOQdMp--DjJE4lpvWk6_swo2NpSt3IGW9Kudc7xNm-WA9R7xBujF4DtZ_WjWw7H_Ue0VfF6eKfo2HLP6Y9VS2IY3m9HZQ - plan: null - project_name: admin - undercloud_service_list: [tripleo_ironic_conductor] - undercloud_swift_url: https://192.168.24.2:13808/v1/AUTH_ad15d77254c94b03b1534a261059cd76 - username: admin diff --git a/tripleo_common/tests/inventory_data/undercloud_static_merged.yaml b/tripleo_common/tests/inventory_data/undercloud_static_merged.yaml deleted file mode 100644 index e6cb2bc00..000000000 --- a/tripleo_common/tests/inventory_data/undercloud_static_merged.yaml +++ /dev/null @@ -1,17 +0,0 @@ -Undercloud: - hosts: - undercloud: {} - vars: - ansible_connection: local - ansible_host: localhost - ansible_python_interpreter: /usr/bin/python3 - ansible_remote_tmp: /tmp/ansible-${USER} - auth_url: https://192.168.24.2:13000 - cacert: null - os_auth_token: gAAAAABfyheSpP9F_O2zpn_zMgPANBpl10eMR4QWL3tfBDJYhDEP_iEoPBjP0H0kCfkoDBldnlmkf9LRIWqSRJ1Z6FJOQdMp--DjJE4lpvWk6_swo2NpSt3IGW9Kudc7xNm-WA9R7xBujF4DtZ_WjWw7H_Ue0VfF6eKfo2HLP6Y9VS2IY3m9HZQ - plan: null - plans: [] - project_name: admin - undercloud_service_list: [tripleo_ironic_conductor] - undercloud_swift_url: https://192.168.24.2:13808/v1/AUTH_ad15d77254c94b03b1534a261059cd76 - username: admin diff --git a/tripleo_common/tests/test_arch.py b/tripleo_common/tests/test_arch.py deleted file mode 100644 index 733c3cbf9..000000000 --- a/tripleo_common/tests/test_arch.py +++ /dev/null @@ -1,39 +0,0 @@ -# -*- coding: utf-8 -*- -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2017, Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Test cases for tripleo_common.arch module. -""" - -from unittest import mock -from unittest import TestCase - -from tripleo_common import arch - - -class ArchTestCase(TestCase): - def test_kernel_arch(self): - for (expected, _arch) in [('x86_64', 'x86_64'), - ('ppc64le', 'ppc64le')]: - with mock.patch('os.uname', return_value=('', '', '', '', _arch)): - self.assertEqual(expected, arch.kernel_arch()) - - def test_dib_arch(self): - for (expected, _arch) in [('amd64', 'x86_64'), - ('ppc64le', 'ppc64le')]: - with mock.patch('os.uname', return_value=('', '', '', '', _arch)): - self.assertEqual(expected, arch.dib_arch()) diff --git a/tripleo_common/tests/test_filters.py b/tripleo_common/tests/test_filters.py deleted file mode 100644 index c81de0d3c..000000000 --- a/tripleo_common/tests/test_filters.py +++ /dev/null @@ -1,65 +0,0 @@ -# Copyright 2016 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import sys -from unittest import mock - -from tripleo_common.tests import base -from tripleo_common.tests import fake_nova - -# See the README file in the fake_nova module directory for details on why -# this is being done. -if 'nova' not in sys.modules: - sys.modules['nova'] = fake_nova -else: - raise RuntimeError('nova module already found in sys.modules. The ' - 'fake_nova injection should be removed.') -from tripleo_common.filters import capabilities_filter # noqa - - -class TestCapabilitiesFilter(base.TestCase): - def test_no_requested_node(self): - instance = capabilities_filter.TripleOCapabilitiesFilter() - host_state = mock.Mock() - host_state.stats.get.return_value = '' - spec_obj = mock.Mock() - spec_obj.scheduler_hints.get.return_value = [] - self.assertTrue(instance.host_passes(host_state, spec_obj)) - - def test_requested_node_matches(self): - def mock_host_get(key): - if key == 'node': - return 'compute-0' - self.fail('Unexpected key requested by filter') - - def mock_spec_get(key): - if key == 'capabilities:node': - return ['compute-0'] - self.fail('Unexpected key requested by filter') - - instance = capabilities_filter.TripleOCapabilitiesFilter() - host_state = mock.Mock() - host_state.stats.get.side_effect = mock_host_get - spec_obj = mock.Mock() - spec_obj.scheduler_hints.get.side_effect = mock_spec_get - self.assertTrue(instance.host_passes(host_state, spec_obj)) - - def test_requested_node_no_match(self): - instance = capabilities_filter.TripleOCapabilitiesFilter() - host_state = mock.Mock() - host_state.stats.get.return_value = 'controller-0' - spec_obj = mock.Mock() - spec_obj.scheduler_hints.get.return_value = ['compute-0'] - self.assertFalse(instance.host_passes(host_state, spec_obj)) diff --git a/tripleo_common/tests/test_inventories.py b/tripleo_common/tests/test_inventories.py deleted file mode 100644 index 3520be34c..000000000 --- a/tripleo_common/tests/test_inventories.py +++ /dev/null @@ -1,167 +0,0 @@ -# -*- coding: utf-8 -*- - -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import collections -import fixtures -import os -from unittest.mock import MagicMock - -import yaml - -from tripleo_common.tests import base -from tripleo_common.inventories import TripleoInventories - - -class _TestInventoriesBase(base.TestCase): - def setUp(self): - super(_TestInventoriesBase, self).setUp() - self.read_inventory_data() - - def read_inventory_data(self): - inventory_data = collections.OrderedDict() - inventory_dir = os.path.join( - os.path.dirname(__file__), 'inventory_data' - ) - for datafile in ( - 'cell1_dynamic.json', - 'cell1_static.yaml', - 'overcloud_dynamic.json', - 'overcloud_static.yaml', - 'merged_dynamic.json', - 'merged_static.yaml', - 'list_overcloud.json', - 'undercloud_dynamic.json', - 'undercloud_static.yaml', - 'undercloud_dynamic_merged.json', - 'undercloud_static_merged.yaml', - ): - name = os.path.basename(datafile).split('.')[0] - path = os.path.join(inventory_dir, datafile) - with open(path, 'r') as data: - inventory_data[name] = yaml.safe_load(data) - self.inventory_data = inventory_data - - -class TestInventoriesStatic(_TestInventoriesBase): - def setUp(self): - super(TestInventoriesStatic, self).setUp() - mock_inv_overcloud = MagicMock() - mock_inv_cell1 = MagicMock() - mock_inv_overcloud.list.return_value = self.inventory_data[ - 'overcloud_static' - ] - mock_inv_cell1.list.return_value = self.inventory_data[ - 'cell1_static' - ] - stack_to_inv_obj_map = { - 'overcloud': mock_inv_overcloud, - 'cell1': mock_inv_cell1 - } - self.inventories = TripleoInventories(stack_to_inv_obj_map) - - def test_merge(self): - actual = dict(self.inventories._merge(dynamic=False)) - expected = self.inventory_data['merged_static'] - self.assertEqual(expected, actual) - - def test_inventory_write_static(self): - tmp_dir = self.useFixture(fixtures.TempDir()).path - inv_path = os.path.join(tmp_dir, "inventory.yaml") - self.inventories.write_static_inventory(inv_path) - expected = self.inventory_data['merged_static'] - with open(inv_path, 'r') as f: - loaded_inv = collections.OrderedDict(yaml.safe_load(f)) - self.assertEqual(expected, loaded_inv) - - -class TestInventoriesDynamic(_TestInventoriesBase): - def setUp(self): - super(TestInventoriesDynamic, self).setUp() - mock_inv_overcloud = MagicMock() - mock_inv_cell1 = MagicMock() - mock_inv_overcloud.list.return_value = self.inventory_data[ - 'overcloud_dynamic' - ] - mock_inv_cell1.list.return_value = self.inventory_data[ - 'cell1_dynamic' - ] - stack_to_inv_obj_map = { - 'overcloud': mock_inv_overcloud, - 'cell1': mock_inv_cell1 - } - self.inventories = TripleoInventories(stack_to_inv_obj_map) - - def test_merge(self): - actual = dict(self.inventories._merge()) - expected = dict(self.inventory_data['merged_dynamic']) - self.assertEqual(expected, actual) - - def test_list(self): - actual = self.inventories.list() - expected = self.inventory_data['merged_dynamic'] - self.assertEqual(expected, actual) - - -class TestInventorySingleDynamic(_TestInventoriesBase): - def setUp(self): - super(TestInventorySingleDynamic, self).setUp() - mock_inv_overcloud = MagicMock() - mock_inv_overcloud.list.return_value = self.inventory_data[ - 'overcloud_dynamic' - ] - stack_to_inv_obj_map = { - 'overcloud': mock_inv_overcloud - } - self.inventories = TripleoInventories(stack_to_inv_obj_map) - - def test_list(self): - actual = self.inventories.list() - expected = self.inventory_data['list_overcloud'] - self.assertEqual(expected, actual) - - -class TestInventoryUndercloudStatic(_TestInventoriesBase): - def setUp(self): - super(TestInventoryUndercloudStatic, self).setUp() - mock_inv_undercloud = MagicMock() - mock_inv_undercloud.list.return_value = self.inventory_data[ - 'undercloud_static' - ] - stack_to_inv_obj_map = { - 'foobar': mock_inv_undercloud - } - self.inventories = TripleoInventories(stack_to_inv_obj_map) - - def test_list(self): - actual = self.inventories.list(dynamic=False) - expected = self.inventory_data['undercloud_static_merged'] - self.assertEqual(expected, actual) - - -class TestInventoryUndercloudDynamic(_TestInventoriesBase): - def setUp(self): - super(TestInventoryUndercloudDynamic, self).setUp() - mock_inv_undercloud = MagicMock() - mock_inv_undercloud.list.return_value = self.inventory_data[ - 'undercloud_dynamic' - ] - stack_to_inv_obj_map = { - 'foobar': mock_inv_undercloud - } - self.inventories = TripleoInventories(stack_to_inv_obj_map) - - def test_list(self): - actual = self.inventories.list() - expected = self.inventory_data['undercloud_dynamic_merged'] - self.assertEqual(expected, actual) diff --git a/tripleo_common/tests/test_inventory.py b/tripleo_common/tests/test_inventory.py deleted file mode 100644 index 0412b1f4b..000000000 --- a/tripleo_common/tests/test_inventory.py +++ /dev/null @@ -1,1430 +0,0 @@ -# -*- coding: utf-8 -*- - -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from collections import OrderedDict -import fixtures -import os -import sys -from unittest import mock - -import yaml - -from heatclient.exc import HTTPNotFound - -from tripleo_common.inventory import NeutronData -from tripleo_common.inventory import StackOutputs -from tripleo_common.inventory import TripleoInventory -from tripleo_common.tests import base -from tripleo_common.tests.fake_neutron import fakes as neutron_fakes - - -MOCK_ENABLED_SERVICES = { - "ObjectStorage": [ - "kernel", - "swift_storage", - "tripleo_packages" - ], - "Controller": [ - "kernel", - "keystone", - "tripleo_packages" - ], - "Compute": [ - "nova_compute", - "kernel", - "tripleo_packages", - "ceph_client" - ], - "CephStorage": [ - "kernel", - "tripleo_packages" - ], - "BlockStorage": [ - "cinder_volume", - "kernel", - "tripleo_packages" - ] - } - - -class TestInventory(base.TestCase): - def setUp(self): - super(TestInventory, self).setUp() - self.outputs_data = {'outputs': [ - { - 'output_key': 'EnabledServices', - 'output_value': { - 'Controller': ['sa', 'sb'], - 'Compute': ['sd', 'se', 'ceph_client'], - 'CustomRole': ['sg', 'sh']} - }, - { - 'output_key': 'KeystoneURL', - 'output_value': 'xyz://keystone' - }, - { - 'output_key': 'ServerIdData', - 'output_value': { - 'server_ids': { - 'Controller': ['a', 'b', 'c'], - 'Compute': ['d'], - 'CustomRole': ['e'] - }, - 'bootstrap_server_id': 'a' - } - }, - { - 'output_key': 'RoleNetHostnameMap', - 'output_value': { - 'Controller': { - 'ctlplane': [ - 'c-0.ctlplane.localdomain', - 'c-1.ctlplane.localdomain', - 'c-2.ctlplane.localdomain'], - 'internal_api': [ - 'c-0.internal_api.localdomain', - 'c-1.internal_api.localdomain', - 'c-2.internal_api.localdomain'] - }, - 'Compute': { - 'ctlplane': ['cp-0.ctlplane.localdomain'] - }, - 'CustomRole': { - 'ctlplane': ['cs-0.ctlplane.localdomain'] - } - } - }, - { - 'output_key': 'RoleNetIpMap', - 'output_value': { - 'Controller': { - 'ctlplane': [ - 'x.x.x.1', - 'x.x.x.2', - 'x.x.x.3' - ], - 'internal_api': [ - 'x.x.x.4', - 'x.x.x.5', - 'x.x.x.6' - ] - }, - 'Compute': { - 'ctlplane': ['y.y.y.1'] - }, - 'CustomRole': { - 'ctlplane': ['z.z.z.1'] - } - } - }, - { - 'output_key': 'VipMap', - 'output_value': { - 'ctlplane': 'x.x.x.4', - 'redis': 'x.x.x.6' - } - }, - { - 'output_key': 'RoleData', - 'output_value': { - 'Controller': {'config_settings': 'foo1'}, - 'Compute': {'config_settings': 'foo2'}, - 'CustomRole': {'config_settings': 'foo3'} - } - } - ] - } - self.plan_name = 'overcloud' - self.hclient = mock.MagicMock() - self.hclient.stacks.environment.return_value = { - 'parameter_defaults': { - 'AdminPassword': 'theadminpw', - 'ContainerCli': 'podman' - } - } - self.mock_stack = mock.MagicMock() - self.mock_stack.outputs = self.outputs_data['outputs'] - self.hclient.stacks.get.return_value = self.mock_stack - self.outputs = StackOutputs(self.mock_stack) - self.connection = mock.MagicMock() - patcher = mock.patch('openstack.connect', - return_value=self.connection) - patcher.start() - self.inventory = TripleoInventory( - cloud_name='undercloud', - hclient=self.hclient, - plan_name=self.plan_name, - ansible_ssh_user='heat-admin') - self.inventory.stack_outputs = self.outputs - self.addCleanup(patcher.stop) - - def test_get_roles_by_service(self): - services = TripleoInventory.get_roles_by_service(MOCK_ENABLED_SERVICES) - expected = { - 'kernel': [ - 'BlockStorage', - 'CephStorage', - 'Compute', - 'Controller', - 'ObjectStorage' - ], - 'swift_storage': ['ObjectStorage'], - 'tripleo_packages': [ - 'BlockStorage', - 'CephStorage', - 'Compute', - 'Controller', - 'ObjectStorage' - ], - 'keystone': ['Controller'], - 'nova_compute': ['Compute'], - 'cinder_volume': ['BlockStorage'], - 'ceph_client': ['Compute'], - } - self.assertDictEqual(services, expected) - - def test_stack_not_found(self): - self.hclient.stacks.get.side_effect = HTTPNotFound('not found') - self.assertEqual(None, self.inventory._get_stack()) - - def test_outputs_valid_key_calls_api(self): - expected = 'xyz://keystone' - self.hclient.stacks.output_show.return_value = dict(output=dict( - output_value=expected)) - self.assertEqual(expected, self.outputs['KeystoneURL']) - # This should also support the get method - self.assertEqual(expected, self.outputs.get('KeystoneURL')) - self.assertTrue(self.hclient.called_once_with( - 'overcloud', 'KeystoneURL')) - - def test_with_excluded_nodes(self): - excluded_output = { - 'output_key': 'BlacklistedHostnames', - 'output_value': ['cp-0', '', ''] - } - self.outputs_data['outputs'].append(excluded_output) - inventory_group = {'hosts': {'cp-0': {}}} - self.assertEquals(inventory_group, - self.inventory.list()['excluded_overcloud']) - - def test_no_ips(self): - for output in self.outputs_data['outputs']: - if output['output_key'] == 'RoleNetIpMap': - output['output_value'] = dict(Controller=dict(ctlplane=[])) - self.assertRaises(Exception, self.inventory.list) - - def test_outputs_invalid_key_raises_keyerror(self): - self.assertRaises(KeyError, lambda: self.outputs['Invalid']) - - def test_outputs_get_method_returns_default(self): - default = 'default value' - self.assertEqual(default, self.outputs.get('Invalid', default)) - - def test_outputs_iterating_returns_list_of_output_keys(self): - self.assertEqual({ - 'EnabledServices', - 'KeystoneURL', - 'ServerIdData', - 'RoleNetHostnameMap', - 'RoleNetIpMap', - 'VipMap', - 'RoleData' - }, set([o for o in self.outputs])) - - def test_inventory_list(self): - self.inventory.undercloud_connection = 'local' - self._inventory_list(self.inventory) - - def _inventory_list(self, inventory): - ansible_ssh_user = 'heat-admin' - expected = { - 'Compute': { - 'hosts': ['cp-0'], - 'vars': { - 'ansible_ssh_user': ansible_ssh_user, - 'bootstrap_server_id': 'a', - 'serial': 1, - 'tripleo_role_name': 'Compute', - 'tripleo_role_networks': ['ctlplane'] - } - }, - 'Controller': { - 'hosts': ['c-0', 'c-1', 'c-2'], - 'vars': { - 'ansible_ssh_user': ansible_ssh_user, - 'bootstrap_server_id': 'a', - 'serial': 1, - 'tripleo_role_name': 'Controller', - 'tripleo_role_networks': [ - 'ctlplane', - 'internal_api' - ] - } - }, - 'CustomRole': { - 'hosts': ['cs-0'], - 'vars': { - 'ansible_ssh_user': ansible_ssh_user, - 'bootstrap_server_id': 'a', - 'serial': 1, - 'tripleo_role_name': 'CustomRole', - 'tripleo_role_networks': ['ctlplane'] - } - }, - 'allovercloud': { - 'children': ['overcloud'] - }, - 'excluded_overcloud': { - 'hosts': {} - }, - 'overcloud': { - 'children': ['Compute', 'Controller', 'CustomRole'], - 'vars': { - 'container_cli': 'podman', - 'ctlplane_vip': 'x.x.x.4', - 'redis_vip': 'x.x.x.6' - } - }, - 'Undercloud': { - 'hosts': ['undercloud'], - 'vars': { - 'ansible_connection': 'local', - 'ansible_host': 'localhost', - 'ansible_python_interpreter': sys.executable, - 'ansible_remote_tmp': '/tmp/ansible-${USER}', - 'any_error_fatal': True, - 'max_fail_percentage': 0, - 'overcloud_keystone_url': 'xyz://keystone', - 'overcloud_admin_password': 'theadminpw', - 'plan': 'overcloud', - 'undercloud_service_list': [ - 'tripleo_ironic_conductor'] - } - } - } - inv_list = inventory.list() - for k in expected: - self.assertEqual(expected[k], inv_list[k]) - - def test_inventory_list_undercloud_installer(self): - outputs_data = { - 'outputs': [ - {'output_key': 'EnabledServices', - 'output_value': {'Undercloud': ['sa', 'sb']}}, - {'output_key': 'KeystoneURL', - 'output_value': 'xyz://keystone'}, - {'output_key': 'ServerIdData', - 'output_value': {'server_ids': {'Undercloud': ['a']}, - 'bootstrap_server_id': 'a'}}, - {'output_key': 'RoleNetHostnameMap', - 'output_value': {'Undercloud': { - 'ctlplane': ['uc0.ctlplane.localdomain'], - 'external': ['uc0.external.localdomain'], - 'canonical': ['uc0.lab.example.com']}}}, - {'output_key': 'RoleNetIpMap', - 'output_value': {'Undercloud': {'ctlplane': ['x.x.x.1'], - 'external': ['x.x.x.1']}}}, - {'output_key': 'VipMap', - 'output_value': {'ctlplane': 'x.x.x.4', 'redis': 'x.x.x.6'}}, - {'output_key': 'RoleData', - 'output_value': {'Undercloud': {'config_settings': 'foo1'}}} - ] - } - - self.hclient.stacks.environment.return_value = { - 'parameter_defaults': { - 'AdminPassword': 'theadminpw', 'ContainerCli': 'podman'}} - mock_stack = mock.MagicMock() - mock_stack.outputs = outputs_data['outputs'] - self.hclient.stacks.get.return_value = mock_stack - - outputs = StackOutputs(mock_stack) - inventory = TripleoInventory( - hclient=self.hclient, - cloud_name='undercloud', - plan_name='overcloud', - ansible_ssh_user='heat-admin') - inventory.stack_outputs = outputs - expected = { - 'Undercloud': { - 'hosts': { - 'uc0': { - 'ansible_host': 'x.x.x.1', - 'canonical_hostname': 'uc0.lab.example.com', - 'ctlplane_hostname': 'uc0.ctlplane.localdomain', - 'ctlplane_ip': 'x.x.x.1', - 'deploy_server_id': 'a', - 'external_hostname': 'uc0.external.localdomain', - 'external_ip': 'x.x.x.1'}}, - 'vars': { - 'ansible_ssh_user': 'heat-admin', - 'bootstrap_server_id': 'a', - 'serial': 1, - 'tripleo_role_name': 'Undercloud', - 'tripleo_role_networks': ['ctlplane', 'external']}}, - 'allovercloud': { - 'children': {'overcloud': {}} - }, - 'overcloud': { - 'children': {'Undercloud': {}}, - 'vars': {'container_cli': 'podman', - 'ctlplane_vip': 'x.x.x.4', - 'redis_vip': 'x.x.x.6'}}, - 'sb': {'children': {'Undercloud': {}}, - 'vars': {'ansible_ssh_user': 'heat-admin'}}, - 'sa': {'children': {'Undercloud': {}}, - 'vars': {'ansible_ssh_user': 'heat-admin'}} - } - inv_list = inventory.list(dynamic=False) - for k in expected: - self.assertEqual(expected[k], inv_list[k]) - - def test_inventory_list_undercloud_only(self): - self.inventory.plan_name = None - self.inventory.undercloud_connection = 'local' - expected = { - 'Undercloud': { - 'hosts': ['undercloud'], - 'vars': { - 'ansible_connection': 'local', - 'ansible_host': 'localhost', - 'ansible_python_interpreter': sys.executable, - 'ansible_remote_tmp': '/tmp/ansible-${USER}', - 'any_error_fatal': True, - 'max_fail_percentage': 0, - 'undercloud_service_list': [ - 'tripleo_ironic_conductor'] - } - }, - '_meta': {'hostvars': {}}, - } - self.assertEqual(expected, self.inventory.list()) - - def test_ansible_ssh_user(self): - self._try_alternative_args( - ansible_ssh_user='my-custom-admin', undercloud_connection='ssh') - - def _try_alternative_args(self, ansible_ssh_user, undercloud_connection): - key_file = '/var/lib/mistral/.ssh/%s-key' % ansible_ssh_user - self.inventory = TripleoInventory( - hclient=self.hclient, - cloud_name='undercloud', - plan_name=self.plan_name, - ansible_ssh_user=ansible_ssh_user, - undercloud_connection=undercloud_connection, - undercloud_key_file=key_file, - ansible_python_interpreter='foo' - ) - - self.inventory.stack_outputs = self.outputs - - expected = { - 'Compute': { - 'hosts': ['cp-0'], - 'vars': { - 'ansible_python_interpreter': 'foo', - 'ansible_ssh_user': ansible_ssh_user, - 'bootstrap_server_id': 'a', - 'serial': 1, - 'tripleo_role_name': 'Compute', - 'tripleo_role_networks': ['ctlplane'] - } - }, - 'Controller': { - 'hosts': ['c-0', 'c-1', 'c-2'], - 'vars': { - 'ansible_python_interpreter': 'foo', - 'ansible_ssh_user': ansible_ssh_user, - 'bootstrap_server_id': 'a', - 'serial': 1, - 'tripleo_role_name': 'Controller', - 'tripleo_role_networks': [ - 'ctlplane', - 'internal_api' - ] - } - }, - 'CustomRole': { - 'hosts': ['cs-0'], - 'vars': { - 'ansible_python_interpreter': 'foo', - 'ansible_ssh_user': ansible_ssh_user, - 'bootstrap_server_id': 'a', - 'serial': 1, - 'tripleo_role_name': 'CustomRole', - 'tripleo_role_networks': ['ctlplane'] - } - }, - 'allovercloud': { - 'children': ['overcloud'] - }, - 'overcloud': { - 'children': ['Compute', 'Controller', 'CustomRole'], - 'vars': { - 'container_cli': 'podman', - 'ctlplane_vip': 'x.x.x.4', - 'redis_vip': 'x.x.x.6' - } - }, - 'sa': { - 'children': ['Controller'], - 'vars': { - 'ansible_ssh_user': 'my-custom-admin', - 'ansible_python_interpreter': 'foo' - } - }, - 'sb': { - 'children': ['Controller'], - 'vars': { - 'ansible_ssh_user': 'my-custom-admin', - 'ansible_python_interpreter': 'foo' - } - }, - 'sd': { - 'children': ['Compute'], - 'vars': { - 'ansible_ssh_user': 'my-custom-admin', - 'ansible_python_interpreter': 'foo' - } - }, - 'se': { - 'children': ['Compute'], - 'vars': { - 'ansible_ssh_user': 'my-custom-admin', - 'ansible_python_interpreter': 'foo' - } - }, - 'ceph_client': { - 'children': ['Compute'], - 'vars': { - 'ansible_ssh_user': 'my-custom-admin', - 'ansible_python_interpreter': 'foo' - } - }, - 'clients': { - 'children': ['Compute'], - 'vars': { - 'ansible_ssh_user': 'my-custom-admin', - 'ansible_python_interpreter': 'foo' - } - }, - 'sg': { - 'children': ['CustomRole'], - 'vars': { - 'ansible_ssh_user': 'my-custom-admin', - 'ansible_python_interpreter': 'foo' - } - }, - 'sh': { - 'children': ['CustomRole'], - 'vars': { - 'ansible_ssh_user': 'my-custom-admin', - 'ansible_python_interpreter': 'foo' - } - }, - 'Undercloud': { - 'hosts': ['undercloud'], - 'vars': { - 'ansible_connection': 'ssh', - 'ansible_ssh_private_key_file': key_file, - 'ansible_ssh_user': 'my-custom-admin', - 'ansible_host': 'localhost', - 'ansible_python_interpreter': 'foo', - 'ansible_remote_tmp': '/tmp/ansible-${USER}', - 'any_error_fatal': True, - 'max_fail_percentage': 0, - 'overcloud_keystone_url': 'xyz://keystone', - 'overcloud_admin_password': 'theadminpw', - 'plan': 'overcloud', - 'undercloud_service_list': [ - 'tripleo_ironic_conductor'] - } - } - } - - inv_list = self.inventory.list() - for k in expected: - self.assertEqual(expected[k], inv_list[k]) - - def test_inventory_write_static(self): - self.inventory.undercloud_connection = 'local' - self._inventory_write_static() - - def test_inventory_write_static_extra_vars(self): - self.inventory.undercloud_connection = 'local' - extra_vars = {'Undercloud': {'anextravar': 123}} - self._inventory_write_static(extra_vars=extra_vars) - - def _inventory_write_static(self, extra_vars=None): - tmp_dir = self.useFixture(fixtures.TempDir()).path - inv_path = os.path.join(tmp_dir, "inventory.yaml") - self.inventory.write_static_inventory(inv_path, extra_vars) - ansible_ssh_user = 'heat-admin' - expected = { - 'Undercloud': { - 'hosts': {'undercloud': {}}, - 'vars': { - 'ansible_connection': 'local', - 'ansible_host': 'localhost', - 'ansible_python_interpreter': sys.executable, - 'ansible_remote_tmp': '/tmp/ansible-${USER}', - 'any_error_fatal': True, - 'max_fail_percentage': 0, - 'overcloud_admin_password': 'theadminpw', - 'overcloud_keystone_url': 'xyz://keystone', - 'plan': 'overcloud', - 'undercloud_service_list': [ - 'tripleo_ironic_conductor'] - } - }, - 'Controller': { - 'hosts': { - 'c-0': { - 'ansible_host': 'x.x.x.1', - 'ctlplane_ip': 'x.x.x.1', - 'deploy_server_id': 'a', - 'ctlplane_hostname': 'c-0.ctlplane.localdomain', - 'internal_api_hostname': - 'c-0.internal_api.localdomain', - 'internal_api_ip': 'x.x.x.4' - }, - 'c-1': { - 'ansible_host': 'x.x.x.2', - 'ctlplane_ip': 'x.x.x.2', - 'deploy_server_id': 'b', - 'ctlplane_hostname': 'c-1.ctlplane.localdomain', - 'internal_api_hostname': - 'c-1.internal_api.localdomain', - 'internal_api_ip': 'x.x.x.5' - }, - 'c-2': { - 'ansible_host': 'x.x.x.3', - 'ctlplane_ip': 'x.x.x.3', - 'deploy_server_id': 'c', - 'ctlplane_hostname': 'c-2.ctlplane.localdomain', - 'internal_api_hostname': - 'c-2.internal_api.localdomain', - 'internal_api_ip': 'x.x.x.6' - } - }, - 'vars': { - 'ansible_ssh_user': ansible_ssh_user, - 'bootstrap_server_id': 'a', - 'serial': 1, - 'tripleo_role_name': 'Controller', - 'tripleo_role_networks': [ - 'ctlplane', - 'internal_api' - ] - } - }, - 'Compute': { - 'hosts': { - 'cp-0': { - 'ansible_host': 'y.y.y.1', - 'ctlplane_ip': 'y.y.y.1', - 'deploy_server_id': 'd', - 'ctlplane_hostname': 'cp-0.ctlplane.localdomain' - } - }, - 'vars': { - 'ansible_ssh_user': ansible_ssh_user, - 'bootstrap_server_id': 'a', - 'serial': 1, - 'tripleo_role_name': 'Compute', - 'tripleo_role_networks': ['ctlplane'] - } - }, - 'CustomRole': { - 'hosts': { - 'cs-0': { - 'ansible_host': 'z.z.z.1', - 'ctlplane_ip': 'z.z.z.1', - 'deploy_server_id': 'e', - 'ctlplane_hostname': 'cs-0.ctlplane.localdomain' - } - }, - 'vars': { - 'ansible_ssh_user': ansible_ssh_user, - 'bootstrap_server_id': 'a', - 'serial': 1, - 'tripleo_role_name': 'CustomRole', - 'tripleo_role_networks': ['ctlplane'] - } - }, - 'allovercloud': { - 'children': {'overcloud': {}} - }, - 'excluded_overcloud': { - 'hosts': {} - }, - 'overcloud': { - 'children': { - 'Compute': {}, - 'Controller': {}, - 'CustomRole': {} - }, - 'vars': { - 'container_cli': 'podman', - 'ctlplane_vip': 'x.x.x.4', - 'redis_vip': 'x.x.x.6' - } - }, - 'sa': { - 'children': {'Controller': {}}, - 'vars': {'ansible_ssh_user': 'heat-admin'} - }, - 'sb': { - 'children': {'Controller': {}}, - 'vars': {'ansible_ssh_user': 'heat-admin'} - }, - 'sd': { - 'children': {'Compute': {}}, - 'vars': {'ansible_ssh_user': 'heat-admin'} - }, - 'se': { - 'children': {'Compute': {}}, - 'vars': {'ansible_ssh_user': 'heat-admin'} - }, - 'ceph_client': { - 'children': {'Compute': {}}, - 'vars': {'ansible_ssh_user': 'heat-admin'} - }, - 'clients': { - 'children': {'Compute': {}}, - 'vars': {'ansible_ssh_user': 'heat-admin'} - }, - 'sg': { - 'children': {'CustomRole': {}}, - 'vars': {'ansible_ssh_user': 'heat-admin'} - }, - 'sh': { - 'children': {'CustomRole': {}}, - 'vars': {'ansible_ssh_user': 'heat-admin'} - }, - } - if extra_vars: - expected['Undercloud']['vars']['anextravar'] = 123 - - with open(inv_path, 'r') as f: - loaded_inv = yaml.safe_load(f) - self.assertEqual(expected, loaded_inv) - - def test__add_host_from_neutron_data(self): - neutron_data = NeutronData(networks=neutron_fakes.fake_networks, - subnets=neutron_fakes.fake_subnets, - ports=neutron_fakes.compute_0_ports) - ret = OrderedDict() - role = ret.setdefault('Compute', {}) - role_vars = role.setdefault('vars', {}) - role_networks = role_vars.setdefault('tripleo_role_networks', []) - hosts = role.setdefault('hosts', {}) - ports = neutron_data.ports_by_role_and_host['Compute']['cp-0'] - self.inventory._add_host_from_neutron_data(hosts, ports, role_networks, - role_vars) - self.assertEqual(OrderedDict([ - ('Compute', - {'hosts': { - 'ansible_host': '192.0.2.20', - 'canonical_hostname': 'cp-0.example.com', - 'ctlplane_hostname': 'cp-0.ctlplane.example.com', - 'ctlplane_ip': '192.0.2.20', - 'internal_api_hostname': 'cp-0.internalapi.example.com', - 'internal_api_ip': '198.51.100.150'}, - 'vars': { - 'ctlplane_cidr': '24', - 'ctlplane_dns_nameservers': ['192.0.2.253', - '192.0.2.254'], - 'ctlplane_gateway_ip': '192.0.2.1', - 'ctlplane_host_routes': [{'default': True, - 'nexthop': '192.0.2.1'}], - 'ctlplane_vlan_id': '1', - 'internal_api_cidr': '25', - 'internal_api_dns_nameservers': [], - 'internal_api_gateway_ip': '198.51.100.129', - 'internal_api_host_routes': [], - 'internal_api_vlan_id': '20', - 'tripleo_role_networks': ['ctlplane', 'internal_api'] - }}) - ]), ret) - - def test__inventory_from_neutron_data(self): - ret = OrderedDict() - children = set() - fake_ports = (neutron_fakes.controller0_ports + - neutron_fakes.controller1_ports + - neutron_fakes.compute_0_ports) - self.inventory.neutron_data = NeutronData( - networks=neutron_fakes.fake_networks, - subnets=neutron_fakes.fake_subnets, - ports=fake_ports) - - self.inventory._inventory_from_neutron_data(ret, children, False) - self.assertEqual({'Compute', 'Controller'}, children) - self.assertEqual(OrderedDict([ - ('Controller', - {'hosts': { - 'c-0': { - 'ansible_host': '192.0.2.10', - 'canonical_hostname': 'c-0.example.com', - 'ctlplane_hostname': 'c-0.ctlplane.example.com', - 'ctlplane_ip': '192.0.2.10', - 'internal_api_hostname': 'c-0.internalapi.example.com', - 'internal_api_ip': '198.51.100.140'}, - 'c-1': { - 'ansible_host': '192.0.2.11', - 'canonical_hostname': 'c-1.example.com', - 'ctlplane_hostname': 'c-1.ctlplane.example.com', - 'ctlplane_ip': '192.0.2.11', - 'internal_api_hostname': 'c-1.internalapi.example.com', - 'internal_api_ip': '198.51.100.141'}}, - 'vars': {'ansible_ssh_user': 'heat-admin', - 'ctlplane_cidr': '24', - 'ctlplane_dns_nameservers': ['192.0.2.253', - '192.0.2.254'], - 'ctlplane_gateway_ip': '192.0.2.1', - 'ctlplane_host_routes': [{'default': True, - 'nexthop': '192.0.2.1'}], - 'ctlplane_mtu': 1500, - 'ctlplane_subnet_cidr': '24', - 'ctlplane_vlan_id': '1', - 'internal_api_cidr': '25', - 'internal_api_dns_nameservers': [], - 'internal_api_gateway_ip': '198.51.100.129', - 'internal_api_host_routes': [], - 'internal_api_mtu': 1500, - 'internal_api_vlan_id': '20', - 'networks_all': ['InternalApi'], - 'networks_lower': {'InternalApi': 'internal_api', - 'ctlplane': 'ctlplane'}, - 'role_networks': ['InternalApi'], - 'serial': 1, - 'tripleo_role_name': 'Controller', - 'tripleo_role_networks': ['ctlplane', 'internal_api'] - }}), - ('Compute', - {'hosts': { - 'cp-0': { - 'ansible_host': '192.0.2.20', - 'canonical_hostname': 'cp-0.example.com', - 'ctlplane_hostname': 'cp-0.ctlplane.example.com', - 'ctlplane_ip': '192.0.2.20', - 'internal_api_hostname': 'cp-0.internalapi.example.com', - 'internal_api_ip': '198.51.100.150'}}, - 'vars': {'ansible_ssh_user': 'heat-admin', - 'ctlplane_cidr': '24', - 'ctlplane_dns_nameservers': ['192.0.2.253', - '192.0.2.254'], - 'ctlplane_gateway_ip': '192.0.2.1', - 'ctlplane_host_routes': [{'default': True, - 'nexthop': '192.0.2.1'}], - 'ctlplane_mtu': 1500, - 'ctlplane_subnet_cidr': '24', - 'ctlplane_vlan_id': '1', - 'internal_api_cidr': '25', - 'internal_api_dns_nameservers': [], - 'internal_api_gateway_ip': '198.51.100.129', - 'internal_api_host_routes': [], - 'internal_api_mtu': 1500, - 'internal_api_vlan_id': '20', - 'networks_all': ['InternalApi'], - 'networks_lower': {'InternalApi': 'internal_api', - 'ctlplane': 'ctlplane'}, - 'role_networks': ['InternalApi'], - 'serial': 1, - 'tripleo_role_name': 'Compute', - 'tripleo_role_networks': ['ctlplane', 'internal_api'] - }}), - ('overcloud', {'children': {'Compute': {}, 'Controller': {}}}), - ('allovercloud', {'children': {'overcloud': {}}}) - ]), ret) - - def test__inventory_from_neutron_data_dynamic(self): - ret = OrderedDict() - children = set() - fake_ports = (neutron_fakes.controller0_ports + - neutron_fakes.controller1_ports + - neutron_fakes.compute_0_ports) - self.inventory.neutron_data = NeutronData( - networks=neutron_fakes.fake_networks, - subnets=neutron_fakes.fake_subnets, - ports=fake_ports) - - self.inventory._inventory_from_neutron_data(ret, children, True) - self.assertEqual({'Compute', 'Controller'}, children) - self.assertEqual(OrderedDict([ - ('Controller', { - 'hosts': ['c-0', 'c-1'], - 'vars': {'ansible_ssh_user': 'heat-admin', - 'ctlplane_cidr': '24', - 'ctlplane_dns_nameservers': ['192.0.2.253', - '192.0.2.254'], - 'ctlplane_gateway_ip': '192.0.2.1', - 'ctlplane_host_routes': [{'default': True, - 'nexthop': '192.0.2.1'}], - 'ctlplane_mtu': 1500, - 'ctlplane_vlan_id': '1', - 'internal_api_cidr': '25', - 'internal_api_dns_nameservers': [], - 'internal_api_gateway_ip': '198.51.100.129', - 'internal_api_host_routes': [], - 'internal_api_mtu': 1500, - 'ctlplane_subnet_cidr': '24', - 'internal_api_vlan_id': '20', - 'networks_all': ['InternalApi'], - 'networks_lower': {'InternalApi': 'internal_api', - 'ctlplane': 'ctlplane'}, - 'role_networks': ['InternalApi'], - 'serial': 1, - 'tripleo_role_name': 'Controller', - 'tripleo_role_networks': ['ctlplane', 'internal_api'] - }}), - ('Compute', { - 'hosts': ['cp-0'], - 'vars': {'ansible_ssh_user': 'heat-admin', - 'ctlplane_cidr': '24', - 'ctlplane_dns_nameservers': ['192.0.2.253', - '192.0.2.254'], - 'ctlplane_gateway_ip': '192.0.2.1', - 'ctlplane_host_routes': [{'default': True, - 'nexthop': '192.0.2.1'}], - 'ctlplane_mtu': 1500, - 'ctlplane_vlan_id': '1', - 'internal_api_cidr': '25', - 'internal_api_dns_nameservers': [], - 'internal_api_gateway_ip': '198.51.100.129', - 'internal_api_host_routes': [], - 'internal_api_mtu': 1500, - 'ctlplane_subnet_cidr': '24', - 'internal_api_vlan_id': '20', - 'networks_all': ['InternalApi'], - 'networks_lower': {'InternalApi': 'internal_api', - 'ctlplane': 'ctlplane'}, - 'role_networks': ['InternalApi'], - 'serial': 1, - 'tripleo_role_name': 'Compute', - 'tripleo_role_networks': ['ctlplane', 'internal_api'] - }}), - ('overcloud', {'children': ['Compute', 'Controller']}), - ('allovercloud', {'children': ['overcloud']}) - ]), ret) - - @mock.patch.object(TripleoInventory, '_get_neutron_data', autospec=True) - def test_inventory_list_with_neutron_and_heat(self, mock_get_neutron_data): - fake_ports = (neutron_fakes.controller0_ports + - neutron_fakes.controller1_ports + - neutron_fakes.controller2_ports + - neutron_fakes.compute_0_ports + - neutron_fakes.custom_0_ports) - mock_get_neutron_data.return_value = NeutronData( - networks=neutron_fakes.fake_networks, - subnets=neutron_fakes.fake_subnets, - ports=fake_ports) - inv_list = self.inventory.list(dynamic=False) - c_0 = inv_list['Controller']['hosts']['c-0'] - c_1 = inv_list['Controller']['hosts']['c-1'] - c_2 = inv_list['Controller']['hosts']['c-2'] - cp_0 = inv_list['Compute']['hosts']['cp-0'] - cs_0 = inv_list['CustomRole']['hosts']['cs-0'] - - # The setdefault pattern should always put the value discovered first - # in the inventory, neutron source run's prior to heat stack source. - # Assert IP addresses from neutron fake are used in the - # inventory, not the heat stack IPs. - - # Controller - self.assertNotEqual( - c_0['ctlplane_ip'], - self.outputs['RoleNetIpMap']['Controller']['ctlplane'][0]) - self.assertNotEqual( - c_0['ansible_host'], - self.outputs['RoleNetIpMap']['Controller']['ctlplane'][0]) - self.assertNotEqual( - c_1['ctlplane_ip'], - self.outputs['RoleNetIpMap']['Controller']['ctlplane'][1]) - self.assertNotEqual( - c_1['ansible_host'], - self.outputs['RoleNetIpMap']['Controller']['ctlplane'][1]) - self.assertNotEqual( - c_2['ctlplane_ip'], - self.outputs['RoleNetIpMap']['Controller']['ctlplane'][2]) - self.assertNotEqual( - c_2['ansible_host'], - self.outputs['RoleNetIpMap']['Controller']['ctlplane'][2]) - # Compute - self.assertNotEqual( - cp_0['ctlplane_ip'], - self.outputs['RoleNetIpMap']['Compute']['ctlplane'][0]) - self.assertNotEqual( - cp_0['ansible_host'], - self.outputs['RoleNetIpMap']['Compute']['ctlplane'][0]) - # CustomRole - self.assertNotEqual( - cs_0['ctlplane_ip'], - self.outputs['RoleNetIpMap']['CustomRole']['ctlplane'][0]) - self.assertNotEqual( - cs_0['ansible_host'], - self.outputs['RoleNetIpMap']['CustomRole']['ctlplane'][0]) - - # IP's and hostnames are from neutron while deploy_server_id and - # bootstrap_server_id, serial etc are from heat. - expected = { - 'Undercloud': { - 'hosts': {'undercloud': {}}, - 'vars': {'ansible_connection': 'local', - 'ansible_host': 'localhost', - 'ansible_python_interpreter': sys.executable, - 'ansible_remote_tmp': '/tmp/ansible-${USER}', - 'any_error_fatal': True, - 'max_fail_percentage': 0, - 'overcloud_admin_password': 'theadminpw', - 'overcloud_keystone_url': 'xyz://keystone', - 'plan': 'overcloud', - 'undercloud_service_list': [ - 'tripleo_ironic_conductor']} - }, - 'Controller': { - 'hosts': { - 'c-0': { - 'ansible_host': '192.0.2.10', - 'canonical_hostname': 'c-0.example.com', - 'ctlplane_hostname': 'c-0.ctlplane.example.com', - 'ctlplane_ip': '192.0.2.10', - 'deploy_server_id': 'a', - 'internal_api_hostname': 'c-0.internalapi.example.com', - 'internal_api_ip': '198.51.100.140'}, - 'c-1': { - 'ansible_host': '192.0.2.11', - 'canonical_hostname': 'c-1.example.com', - 'ctlplane_hostname': 'c-1.ctlplane.example.com', - 'ctlplane_ip': '192.0.2.11', - 'deploy_server_id': 'b', - 'internal_api_hostname': 'c-1.internalapi.example.com', - 'internal_api_ip': '198.51.100.141'}, - 'c-2': { - 'ansible_host': '192.0.2.12', - 'canonical_hostname': 'c-2.example.com', - 'ctlplane_hostname': 'c-2.ctlplane.example.com', - 'ctlplane_ip': '192.0.2.12', - 'deploy_server_id': 'c', - 'internal_api_hostname': 'c-2.internalapi.example.com', - 'internal_api_ip': '198.51.100.142'}}, - 'vars': { - 'ansible_ssh_user': 'heat-admin', - 'bootstrap_server_id': 'a', - 'ctlplane_cidr': '24', - 'ctlplane_dns_nameservers': ['192.0.2.253', '192.0.2.254'], - 'ctlplane_gateway_ip': '192.0.2.1', - 'ctlplane_host_routes': [{'default': True, - 'nexthop': '192.0.2.1'}], - 'ctlplane_mtu': 1500, - 'ctlplane_subnet_cidr': '24', - 'ctlplane_vlan_id': '1', - 'internal_api_cidr': '25', - 'internal_api_dns_nameservers': [], - 'internal_api_gateway_ip': '198.51.100.129', - 'internal_api_host_routes': [], - 'internal_api_mtu': 1500, - 'internal_api_vlan_id': '20', - 'networks_all': ['InternalApi'], - 'networks_lower': {'InternalApi': 'internal_api', - 'ctlplane': 'ctlplane'}, - 'role_networks': ['InternalApi'], - 'serial': 1, - 'tripleo_role_name': 'Controller', - 'tripleo_role_networks': ['ctlplane', 'internal_api']} - }, - 'Compute': { - 'hosts': { - 'cp-0': { - 'ansible_host': '192.0.2.20', - 'canonical_hostname': 'cp-0.example.com', - 'ctlplane_hostname': 'cp-0.ctlplane.example.com', - 'ctlplane_ip': '192.0.2.20', - 'deploy_server_id': 'd', - 'internal_api_hostname': - 'cp-0.internalapi.example.com', - 'internal_api_ip': '198.51.100.150'}}, - 'vars': {'ansible_ssh_user': 'heat-admin', - 'bootstrap_server_id': 'a', - 'ctlplane_cidr': '24', - 'ctlplane_dns_nameservers': ['192.0.2.253', - '192.0.2.254'], - 'ctlplane_gateway_ip': '192.0.2.1', - 'ctlplane_host_routes': [{'default': True, - 'nexthop': '192.0.2.1'}], - 'ctlplane_mtu': 1500, - 'ctlplane_subnet_cidr': '24', - 'ctlplane_vlan_id': '1', - 'internal_api_cidr': '25', - 'internal_api_dns_nameservers': [], - 'internal_api_gateway_ip': '198.51.100.129', - 'internal_api_host_routes': [], - 'internal_api_mtu': 1500, - 'internal_api_vlan_id': '20', - 'networks_all': ['InternalApi'], - 'networks_lower': {'InternalApi': 'internal_api', - 'ctlplane': 'ctlplane'}, - 'role_networks': ['InternalApi'], - 'serial': 1, - 'tripleo_role_name': 'Compute', - 'tripleo_role_networks': ['ctlplane', 'internal_api']} - }, - 'CustomRole': { - 'hosts': { - 'cs-0': { - 'ansible_host': '192.0.2.200', - 'canonical_hostname': 'cs-0.example.com', - 'ctlplane_hostname': 'cs-0.ctlplane.example.com', - 'ctlplane_ip': '192.0.2.200', - 'deploy_server_id': 'e'}}, - 'vars': {'ansible_ssh_user': 'heat-admin', - 'bootstrap_server_id': 'a', - 'ctlplane_cidr': '24', - 'ctlplane_dns_nameservers': ['192.0.2.253', - '192.0.2.254'], - 'ctlplane_gateway_ip': '192.0.2.1', - 'ctlplane_host_routes': [{'default': True, - 'nexthop': '192.0.2.1'}], - 'ctlplane_mtu': 1500, - 'ctlplane_subnet_cidr': '24', - 'ctlplane_vlan_id': '1', - 'internal_api_mtu': 1500, - 'networks_all': ['InternalApi'], - 'networks_lower': {'InternalApi': 'internal_api', - 'ctlplane': 'ctlplane'}, - 'role_networks': [], - 'serial': 1, - 'tripleo_role_name': 'CustomRole', - 'tripleo_role_networks': ['ctlplane']} - }, - 'overcloud': { - 'children': {'Compute': {}, - 'Controller': {}, - 'CustomRole': {}}, - 'vars': {'container_cli': 'podman', - 'ctlplane_vip': 'x.x.x.4', - 'redis_vip': 'x.x.x.6'} - }, - 'allovercloud': {'children': {'overcloud': {}}}, - 'sa': {'children': {'Controller': {}}, - 'vars': {'ansible_ssh_user': 'heat-admin'}}, - 'se': {'children': {'Compute': {}}, - 'vars': {'ansible_ssh_user': 'heat-admin'}}, - 'sd': {'children': {'Compute': {}}, - 'vars': {'ansible_ssh_user': 'heat-admin'}}, - 'sb': {'children': {'Controller': {}}, - 'vars': {'ansible_ssh_user': 'heat-admin'}}, - 'sg': {'children': {'CustomRole': {}}, - 'vars': {'ansible_ssh_user': 'heat-admin'}}, - 'ceph_client': {'children': {'Compute': {}}, - 'vars': {'ansible_ssh_user': 'heat-admin'}}, - 'sh': {'children': {'CustomRole': {}}, - 'vars': {'ansible_ssh_user': 'heat-admin'}}, - 'clients': {'children': {'Compute': {}}, - 'vars': {'ansible_ssh_user': 'heat-admin'}}, - } - for k in expected: - self.assertEqual(expected[k], inv_list[k]) - - def test__extend_inventory(self): - dynamic = False - existing_inventory = OrderedDict() - existing_inventory.update({ - 'RoleA': { - 'hosts': { - 'host0': { - 'existing': 'existing_value' - } - }, - 'vars': { - 'existing': 'existing_value' - }, - } - }) - extend_data = { - 'RoleA': { - 'hosts': { - 'host0': { - 'new': 'new_value', - 'existing': 'not_overwritten', - } - }, - 'vars': { - 'new': 'new_var_is_added', - 'existing': 'not_overwritten', - }, - } - } - expected_inventory = OrderedDict([( - 'RoleA', { - 'hosts': { - 'host0': { - 'existing': 'existing_value', - 'new': 'new_value' - } - }, - 'vars': { - 'existing': 'existing_value', - 'new': 'new_var_is_added' - } - } - )]) - - self.inventory._extend_inventory(existing_inventory, dynamic, - data=extend_data) - self.assertEqual(expected_inventory, existing_inventory) - - def test__extend_inventory_dynamic(self): - dynamic = True - existing_inventory = OrderedDict() - existing_inventory.update({ - 'RoleA': { - 'hosts': { - 'host0': { - 'existing': 'existing_value' - } - }, - 'vars': { - 'existing': 'existing_value' - }, - } - }) - extend_data = { - 'RoleA': { - 'hosts': { - 'host0': { - 'new': 'new_value', - 'existing': 'not_overwritten', - } - }, - 'vars': { - 'new': 'new_var_is_added', - 'existing': 'not_overwritten', - }, - } - } - expected_inventory = OrderedDict([( - 'RoleA', { - 'hosts': ['host0'], - 'vars': - {'existing': 'existing_value', - 'new': 'new_var_is_added'}})]) - - self.inventory._extend_inventory(existing_inventory, dynamic, - data=extend_data) - self.assertEqual(expected_inventory, existing_inventory) - self.assertEqual( - {'host0': {'existing': 'existing_value', - 'new': 'new_value'}}, self.inventory.hostvars) - - -class TestNeutronData(base.TestCase): - def setUp(self): - super(TestNeutronData, self).setUp() - fake_ports = (neutron_fakes.controller0_ports + - neutron_fakes.controller1_ports + - neutron_fakes.compute_0_ports) - self.neutron_data = NeutronData(networks=neutron_fakes.fake_networks, - subnets=neutron_fakes.fake_subnets, - ports=fake_ports) - - def test__tags_to_dict(self): - tags = ['tripleo_foo=foo', 'tripleo_bar=bar', 'other_tag'] - self.assertEqual({'tripleo_foo': 'foo', 'tripleo_bar': 'bar'}, - NeutronData._tags_to_dict(self, tags)) - - def test__networks_by_id(self): - self.assertEqual({ - 'ctlplane_network_id': { - 'dns_domain': 'ctlplane.example.com.', - 'mtu': 1500, - 'name': 'ctlplane', - 'name_upper': 'ctlplane', - 'subnet_ids': ['ctlplane_subnet_id'], - 'tags': {}}, - 'internal_api_network_id': { - 'dns_domain': 'internalapi.example.com.', - 'mtu': 1500, - 'name': 'internal_api', - 'name_upper': 'InternalApi', - 'subnet_ids': ['internal_api_subnet_id'], - 'tags': {'tripleo_net_idx': 0, - 'tripleo_network_name': 'InternalApi', - 'tripleo_vip': True} - }, - }, self.neutron_data.networks_by_id) - - def test__subnets_by_id(self): - self.assertEqual({ - 'ctlplane_subnet_id': { - 'cidr': '192.0.2.0/24', - 'dns_nameservers': ['192.0.2.253', '192.0.2.254'], - 'gateway_ip': '192.0.2.1', - 'host_routes': [], - 'ip_version': 4, - 'name': 'ctlplane-subnet', - 'network_id': 'ctlplane_network_id', - 'tags': {} - }, - 'internal_api_subnet_id': { - 'cidr': '198.51.100.128/25', - 'dns_nameservers': [], - 'gateway_ip': '198.51.100.129', - 'host_routes': [], - 'ip_version': 4, - 'name': 'internal_api_subnet', - 'network_id': 'internal_api_network_id', - 'tags': {'tripleo_vlan_id': '20'} - }, - }, self.neutron_data.subnets_by_id) - - def test__ports_by_role_and_host(self): - self.assertTrue( - 'Controller' in self.neutron_data.ports_by_role_and_host) - self.assertTrue( - 'Compute' in self.neutron_data.ports_by_role_and_host) - ctr_role = self.neutron_data.ports_by_role_and_host['Controller'] - cmp_role = self.neutron_data.ports_by_role_and_host['Compute'] - self.assertTrue('c-0' in ctr_role) - self.assertTrue('c-1' in ctr_role) - ctr_0 = ctr_role['c-0'] - ctr_1 = ctr_role['c-1'] - self.assertTrue('cp-0' in cmp_role) - cmp_0 = cmp_role['cp-0'] - self.assertEqual( - [{'cidr': '24', - 'dns_domain': 'ctlplane.example.com', - 'dns_nameservers': ['192.0.2.253', '192.0.2.254'], - 'fixed_ips': [{'ip_address': '192.0.2.10', - 'subnet_id': 'ctlplane_subnet_id'}], - 'gateway_ip': '192.0.2.1', - 'host_routes': [{'default': True, 'nexthop': '192.0.2.1'}], - 'hostname': 'c-0', - 'ip_address': '192.0.2.10', - 'mtu': 1500, - 'name': 'c-0-ctlplane', - 'network_id': 'ctlplane_network_id', - 'network_name': 'ctlplane', - 'subnet_id': 'ctlplane_subnet_id', - 'tags': {'tripleo_default_route': True, - 'tripleo_network_name': 'ctlplane', - 'tripleo_role': 'Controller', - 'tripleo_stack': 'overcloud'}, - 'vlan_id': '1'}, - {'cidr': '25', - 'dns_domain': 'internalapi.example.com', - 'dns_nameservers': [], - 'fixed_ips': [{'ip_address': '198.51.100.140', - 'subnet_id': 'internal_api_subnet_id'}], - 'gateway_ip': '198.51.100.129', - 'host_routes': [], - 'hostname': 'c-0', - 'ip_address': '198.51.100.140', - 'mtu': 1500, - 'name': 'c-0-internal_api', - 'network_id': 'internal_api_network_id', - 'network_name': 'internal_api', - 'subnet_id': 'internal_api_subnet_id', - 'tags': {'tripleo_default_route': False, - 'tripleo_network_name': 'InternalApi', - 'tripleo_role': 'Controller', - 'tripleo_stack': 'overcloud'}, - 'vlan_id': '20'}], - ctr_0 - ) - self.assertEqual( - [{'cidr': '24', - 'dns_domain': 'ctlplane.example.com', - 'dns_nameservers': ['192.0.2.253', '192.0.2.254'], - 'fixed_ips': [{'ip_address': '192.0.2.11', - 'subnet_id': 'ctlplane_subnet_id'}], - 'gateway_ip': '192.0.2.1', - 'host_routes': [{'default': True, 'nexthop': '192.0.2.1'}], - 'hostname': 'c-1', - 'ip_address': '192.0.2.11', - 'mtu': 1500, - 'name': 'c-1-ctlplane', - 'network_id': 'ctlplane_network_id', - 'network_name': 'ctlplane', - 'subnet_id': 'ctlplane_subnet_id', - 'tags': {'tripleo_default_route': True, - 'tripleo_network_name': 'ctlplane', - 'tripleo_role': 'Controller', - 'tripleo_stack': 'overcloud'}, - 'vlan_id': '1'}, - {'cidr': '25', - 'dns_domain': 'internalapi.example.com', - 'dns_nameservers': [], - 'fixed_ips': [{'ip_address': '198.51.100.141', - 'subnet_id': 'internal_api_subnet_id'}], - 'gateway_ip': '198.51.100.129', - 'host_routes': [], - 'hostname': 'c-1', - 'ip_address': '198.51.100.141', - 'mtu': 1500, - 'name': 'c-1-internal_api', - 'network_id': 'internal_api_network_id', - 'network_name': 'internal_api', - 'subnet_id': 'internal_api_subnet_id', - 'tags': {'tripleo_default_route': False, - 'tripleo_network_name': 'InternalApi', - 'tripleo_role': 'Controller', - 'tripleo_stack': 'overcloud'}, - 'vlan_id': '20'}], - ctr_1 - ) - self.assertEqual( - [{'cidr': '24', - 'dns_domain': 'ctlplane.example.com', - 'dns_nameservers': ['192.0.2.253', '192.0.2.254'], - 'fixed_ips': [{'ip_address': '192.0.2.20', - 'subnet_id': 'ctlplane_subnet_id'}], - 'gateway_ip': '192.0.2.1', - 'host_routes': [{'default': True, 'nexthop': '192.0.2.1'}], - 'hostname': 'cp-0', - 'ip_address': '192.0.2.20', - 'mtu': 1500, - 'name': 'cp-0-ctlplane', - 'network_id': 'ctlplane_network_id', - 'network_name': 'ctlplane', - 'subnet_id': 'ctlplane_subnet_id', - 'tags': {'tripleo_default_route': True, - 'tripleo_network_name': 'ctlplane', - 'tripleo_role': 'Compute', - 'tripleo_stack': 'overcloud'}, - 'vlan_id': '1'}, - {'cidr': '25', - 'dns_domain': 'internalapi.example.com', - 'dns_nameservers': [], - 'fixed_ips': [{'ip_address': '198.51.100.150', - 'subnet_id': 'internal_api_subnet_id'}], - 'gateway_ip': '198.51.100.129', - 'host_routes': [], - 'hostname': 'cp-0', - 'ip_address': '198.51.100.150', - 'mtu': 1500, - 'name': 'cp-0-internal_api', - 'network_id': 'internal_api_network_id', - 'network_name': 'internal_api', - 'subnet_id': 'internal_api_subnet_id', - 'tags': {'tripleo_default_route': False, - 'tripleo_network_name': 'InternalApi', - 'tripleo_role': 'Compute', - 'tripleo_stack': 'overcloud'}, - 'vlan_id': '20'}], - cmp_0 - ) - self.assertEqual({'Controller': ctr_role, 'Compute': cmp_role}, - self.neutron_data.ports_by_role_and_host) diff --git a/tripleo_common/tests/test_update.py b/tripleo_common/tests/test_update.py deleted file mode 100644 index 12e8e19bf..000000000 --- a/tripleo_common/tests/test_update.py +++ /dev/null @@ -1,120 +0,0 @@ -# Copyright 2018 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from unittest import mock - -from tripleo_common.tests import base -from tripleo_common import update - - -class TestUpdate(base.TestCase): - - def setUp(self): - super(TestUpdate, self).setUp() - - def test_successful_search_stack(self): - test_stack = [{'one': {'one_1': 'nope'}}, - {'two': [{'two_1': {'two_1_2': 'nope'}}, - {'two_2': [{'two_2_1': 'nope'}, - {'two_2_2': 'nope'}]}]}, - {'three': [{'three_1': {'three_1_2': 'nope'}}, - {'three_2': [{'three_2_1': 'nope'}, - {'three_2_2': { - 'target': ['val1', 'val2', - 'val3']}}]}]}] - result = update.search_stack(test_stack, 'target') - self.assertEqual(['val1', 'val2', 'val3'], result) - - def test_failed_search_stack(self): - test_stack = [{'one': {'one_1': 'nope'}}, - {'two': [{'two_1': {'two_1_2': 'nope'}}, - {'two_2': [{'two_2_1': 'nope'}, - {'two_2_2': 'nope'}]}]}, - {'three': [{'three_1': {'three_1_2': 'nope'}}, - {'three_2': [{'three_2_1': 'nope'}, - {'three_2_2': { - 'target': ['val1', 'val2', - 'val3']}}]}]}] - result = update.search_stack(test_stack, 'missing-target') - self.assertIsNone(result) - - def test_exclusive_neutron_drivers_not_found(self): - self.assertIsNone( - update.get_exclusive_neutron_driver(None)) - self.assertIsNone( - update.get_exclusive_neutron_driver('sriovnicswitch')) - self.assertIsNone( - update.get_exclusive_neutron_driver(['sriovnicswitch'])) - - def test_exclusive_neutron_drivers_found(self): - for ex in ['ovn', ['ovn'], ['ovn'], ['sriovnicswitch', 'ovn']]: - self.assertEqual('ovn', - update.get_exclusive_neutron_driver(ex)) - for ex in ['openvswitch', ['openvswitch'], - ['sriovnicswitch', 'openvswitch']]: - self.assertEqual('openvswitch', - update.get_exclusive_neutron_driver(ex)) - - @mock.patch('tripleo_common.update.search_stack', - autospec=True) - def test_update_check_mechanism_drivers_force_update(self, - mock_search_stack): - env = {'parameter_defaults': {'ForceNeutronDriverUpdate': True}} - stack = mock.Mock() - update.check_neutron_mechanism_drivers(env, stack, None, None) - self.assertFalse(mock_search_stack.called) - - @mock.patch('tripleo_common.update.get_exclusive_neutron_driver', - return_value='ovn') - @mock.patch('tripleo_common.update.search_stack', - autospec=True) - def test_update_check_mechanism_drivers_match_stack_env(self, - mock_search_stack, - mock_ex_driver): - env = {'parameter_defaults': { - 'ForceNeutronDriverUpdate': False, - 'NeutronMechanismDrivers': 'ovn' - }} - stack = mock.Mock() - self.assertIsNone(update.check_neutron_mechanism_drivers( - env, stack, None, None)) - - @mock.patch('tripleo_common.update.search_stack', - return_value='openvswitch') - def test_update_check_mechanism_drivers_mismatch_stack_env( - self, mock_search_stack): - env = {'parameter_defaults': { - 'ForceNeutronDriverUpdate': False - }} - stack = mock.Mock() - plan_client = mock.Mock() - plan_client.get_object.return_value = ( - 0, 'parameters:\n NeutronMechanismDrivers: {default: ovn}\n') - self.assertIsNotNone(update.check_neutron_mechanism_drivers( - env, stack, plan_client, None)) - - @mock.patch('tripleo_common.update.search_stack', - return_value='ovn') - def test_update_check_mechanism_drivers_match_stack_template( - self, mock_search_stack): - env = {'parameter_defaults': { - 'ForceNeutronDriverUpdate': False - }} - stack = mock.Mock() - plan_client = mock.Mock() - plan_client.get_object.return_value = ( - 0, 'parameters:\n NeutronMechanismDrivers: {default: ovn}\n') - self.assertIsNone(update.check_neutron_mechanism_drivers( - env, stack, plan_client, None)) diff --git a/tripleo_common/tests/utils/__init__.py b/tripleo_common/tests/utils/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/tripleo_common/tests/utils/data/Compute b/tripleo_common/tests/utils/data/Compute deleted file mode 100644 index 6f1436f77..000000000 --- a/tripleo_common/tests/utils/data/Compute +++ /dev/null @@ -1,2 +0,0 @@ -max_fail_percentage: 15 -any_errors_fatal: True diff --git a/tripleo_common/tests/utils/data/Controller b/tripleo_common/tests/utils/data/Controller deleted file mode 100644 index a3e005aa8..000000000 --- a/tripleo_common/tests/utils/data/Controller +++ /dev/null @@ -1,5 +0,0 @@ -max_fail_percentage: 15 -any_errors_fatal: True -chrony_host: 192.168.2.1 -chrony_acl: none -chrony_foo: bar diff --git a/tripleo_common/tests/utils/data/config_data.yaml b/tripleo_common/tests/utils/data/config_data.yaml deleted file mode 100644 index 8218a5e05..000000000 --- a/tripleo_common/tests/utils/data/config_data.yaml +++ /dev/null @@ -1,138 +0,0 @@ -deployments: - - server: 00b3a5e1-5e8e-4b55-878b-2fa2271f15ad - deployment: a08ac0c0-7168-4fc5-b3fa-98d83b388676 - config: 50fcb6ee-4ff2-4318-b734-d3a7b45a8d6d - name: ControllerHostEntryDeployment - - server: 00b3a5e1-5e8e-4b55-878b-2fa2271f15ad - deployment: 01161ba4-a7a5-4a3e-b82f-f80b7537e866 - config: 613b3009-7708-4b06-8c2a-56f8cd379aff - name: NetworkDeployment - - server: a7db3010-a51f-4ae0-a791-2364d629d20d - deployment: eaf3757f-315a-416c-8975-684a0d41f6bf - config: 26db1678-fe18-4acb-86db-19885fccae4c - name: ComputeHostEntryDeployment - - server: 8b07cd31-3083-4b88-a433-955f72039e2c - deployment: 5b464758-b4ff-4bc7-bb37-fec04db932f1 - config: 26db1678-fe18-4acb-86db-19885fccae4c - name: ComputeHostEntryDeployment - - server: 169b46f8-1965-4d90-a7de-f36fb4a830fe - deployment: 0ace55f9-fd01-44d5-af4b-6da663633f2a - config: 26db1678-fe18-4acb-86db-19885fccae4c - name: ComputeHostEntryDeployment - - server: a7db3010-a51f-4ae0-a791-2364d629d20d - deployment: 6054566d-8ab0-483c-a3a7-70913032d9ed - config: d21aefd1-9d94-44c0-b73a-16c8d4bc605c - name: NetworkDeployment - - server: 8b07cd31-3083-4b88-a433-955f72039e2c - deployment: c3a59b6a-a9e6-4ac1-9ef0-c72dfe5242bd - config: d21aefd1-9d94-44c0-b73a-16c8d4bc605c - name: NetworkDeployment - - server: 169b46f8-1965-4d90-a7de-f36fb4a830fe - deployment: 6f37e874-4380-4252-8665-31b4c5654627 - config: d21aefd1-9d94-44c0-b73a-16c8d4bc605c - name: NetworkDeployment - - - server: 00b3a5e1-5e8e-4b55-878b-2fa2271f15ad - deployment: 8967f92f-3232-46ee-9e4f-b7d3b5433163 - config: ac63372f-bddd-45a0-aca9-6f0950d581e2 - name: MyPostConfig - - server: 00b3a5e1-5e8e-4b55-878b-2fa2271f15ad - deployment: 7a07b2dc-cebf-474c-a8f1-d7c93b299555 - config: 9ccb2f66-7cd8-4e3e-a034-4c1cfafd037e - name: MyExtraConfigPost - - server: a7db3010-a51f-4ae0-a791-2364d629d20d - deployment: 2acaf5d0-c170-449b-8015-37c2598baaec - config: 9ccb2f66-7cd8-4e3e-a034-4c1cfafd037e - name: MyExtraConfigPost - - server: 8b07cd31-3083-4b88-a433-955f72039e2c - deployment: 7e0f2650-b1b9-410e-96a6-4745ca735125 - config: 9ccb2f66-7cd8-4e3e-a034-4c1cfafd037e - name: MyExtraConfigPost - - server: 169b46f8-1965-4d90-a7de-f36fb4a830fe - deployment: 5096a321-64f9-4f07-a74c-53f2b029e62e - config: 9ccb2f66-7cd8-4e3e-a034-4c1cfafd037e - name: MyExtraConfigPost - - # Only applies to overcloud-novacompute-2 - - server: 169b46f8-1965-4d90-a7de-f36fb4a830fe - deployment: 05c94b5d-59ad-484e-b120-e271c2e100ed - config: 391263ba-3d06-43b6-a47e-481808aaff20 - name: AnsibleDeployment - -configs: - 50fcb6ee-4ff2-4318-b734-d3a7b45a8d6d: - id: 50fcb6ee-4ff2-4318-b734-d3a7b45a8d6d - config: | - #!/bin/bash - echo "A script" - outputs: [] - group: script - 613b3009-7708-4b06-8c2a-56f8cd379aff: - id: 613b3009-7708-4b06-8c2a-56f8cd379aff - config: | - #!/bin/bash - echo "Another script" - outputs: [] - group: script - 26db1678-fe18-4acb-86db-19885fccae4c: - id: 26db1678-fe18-4acb-86db-19885fccae4c - config: - hiera_key1: hiera_value1 - hiera_key2: hiera_value2 - outputs: [] - group: hiera - d21aefd1-9d94-44c0-b73a-16c8d4bc605c: - id: d21aefd1-9d94-44c0-b73a-16c8d4bc605c - config: - hiera_key3: hiera_value3 - hiera_key4: - hiera_key4_subkey0: hiera_key4_subvalue0 - hiera_key4_subkey1: hiera_key4_subvalue1 - outputs: [] - group: hiera - ac63372f-bddd-45a0-aca9-6f0950d581e2: - id: ac63372f-bddd-45a0-aca9-6f0950d581e2 - config: | - #!/bin/bash - echo "A PostConfig script" - outputs: [] - group: script - 9ccb2f66-7cd8-4e3e-a034-4c1cfafd037e: - id: 9ccb2f66-7cd8-4e3e-a034-4c1cfafd037e - config: | - #!/bin/bash - echo "An ExtraConfigPost script" - outputs: [] - group: script - 391263ba-3d06-43b6-a47e-481808aaff20: - id: 391263ba-3d06-43b6-a47e-481808aaff20 - group: ansible - outputs: [] - config: | - tasks: - - name: An Ansible task - copy: - content: "{{ some_hostvar | to_json }}" - dest: /some/path - -servers: - - physical_resource_id: 00b3a5e1-5e8e-4b55-878b-2fa2271f15ad - name: overcloud-controller-0 - OS::stack_id: 00b3a5e1-5e8e-4b55-878b-2fa2271f15ad - - physical_resource_id: a7db3010-a51f-4ae0-a791-2364d629d20d - name: overcloud-novacompute-0 - OS::stack_id: a7db3010-a51f-4ae0-a791-2364d629d20d - - physical_resource_id: 8b07cd31-3083-4b88-a433-955f72039e2c - name: overcloud-novacompute-1 - OS::stack_id: 8b07cd31-3083-4b88-a433-955f72039e2c - - physical_resource_id: 169b46f8-1965-4d90-a7de-f36fb4a830fe - name: overcloud-novacompute-2 - OS::stack_id: 169b46f8-1965-4d90-a7de-f36fb4a830fe - -server_id_data: - Controller: - - 00b3a5e1-5e8e-4b55-878b-2fa2271f15ad - Compute: - - a7db3010-a51f-4ae0-a791-2364d629d20d - - 8b07cd31-3083-4b88-a433-955f72039e2c - - 169b46f8-1965-4d90-a7de-f36fb4a830fe diff --git a/tripleo_common/tests/utils/data/host_vars/overcloud-controller-0 b/tripleo_common/tests/utils/data/host_vars/overcloud-controller-0 deleted file mode 100644 index 14de724db..000000000 --- a/tripleo_common/tests/utils/data/host_vars/overcloud-controller-0 +++ /dev/null @@ -1,12 +0,0 @@ -pre_network_Controller: [] - -pre_deployments_Controller: - - ControllerHostEntryDeployment - - NetworkDeployment - - MyPostConfig - - MyExtraConfigPost - -post_deployments_Controller: [] - -uuid: 0 -my_var: 'foo' diff --git a/tripleo_common/tests/utils/data/host_vars/overcloud-novacompute-0 b/tripleo_common/tests/utils/data/host_vars/overcloud-novacompute-0 deleted file mode 100644 index c7460b8d0..000000000 --- a/tripleo_common/tests/utils/data/host_vars/overcloud-novacompute-0 +++ /dev/null @@ -1,10 +0,0 @@ -pre_network_Compute: [] - -pre_deployments_Compute: - - ComputeHostEntryDeployment - - NetworkDeployment - - MyExtraConfigPost - -post_deployments_Compute: [] - -uuid: 1 diff --git a/tripleo_common/tests/utils/data/host_vars/overcloud-novacompute-1 b/tripleo_common/tests/utils/data/host_vars/overcloud-novacompute-1 deleted file mode 100644 index ccd90109a..000000000 --- a/tripleo_common/tests/utils/data/host_vars/overcloud-novacompute-1 +++ /dev/null @@ -1,10 +0,0 @@ -pre_network_Compute: [] - -pre_deployments_Compute: - - ComputeHostEntryDeployment - - NetworkDeployment - - MyExtraConfigPost - -post_deployments_Compute: [] - -uuid: 2 diff --git a/tripleo_common/tests/utils/data/host_vars/overcloud-novacompute-2 b/tripleo_common/tests/utils/data/host_vars/overcloud-novacompute-2 deleted file mode 100644 index eb15e09c9..000000000 --- a/tripleo_common/tests/utils/data/host_vars/overcloud-novacompute-2 +++ /dev/null @@ -1,11 +0,0 @@ -pre_network_Compute: [] - -pre_deployments_Compute: - - ComputeHostEntryDeployment - - NetworkDeployment - - MyExtraConfigPost - - AnsibleDeployment - -post_deployments_Compute: [] - -uuid: 3 diff --git a/tripleo_common/tests/utils/data/overcloud-controller-0/ControllerHostEntryDeployment b/tripleo_common/tests/utils/data/overcloud-controller-0/ControllerHostEntryDeployment deleted file mode 100644 index 4445a9d43..000000000 --- a/tripleo_common/tests/utils/data/overcloud-controller-0/ControllerHostEntryDeployment +++ /dev/null @@ -1,17 +0,0 @@ -ControllerHostEntryDeployment: - config: | - #!/bin/bash - echo "A script" - creation_time: "None" - deployment_name: ControllerHostEntryDeployment - group: script - id: 50fcb6ee-4ff2-4318-b734-d3a7b45a8d6d - inputs: - - name: deploy_server_id - description: None - type: None - value: |- - 00b3a5e1-5e8e-4b55-878b-2fa2271f15ad - name: None - options: None - outputs: diff --git a/tripleo_common/tests/utils/data/overcloud-controller-0/MyExtraConfigPost b/tripleo_common/tests/utils/data/overcloud-controller-0/MyExtraConfigPost deleted file mode 100644 index 2f0e63009..000000000 --- a/tripleo_common/tests/utils/data/overcloud-controller-0/MyExtraConfigPost +++ /dev/null @@ -1,18 +0,0 @@ -MyExtraConfigPost: - config: | - #!/bin/bash - echo "An ExtraConfigPost script" - creation_time: "None" - deployment_name: MyExtraConfigPost - group: script - id: 9ccb2f66-7cd8-4e3e-a034-4c1cfafd037e - inputs: - - name: deploy_server_id - description: None - type: None - value: |- - 00b3a5e1-5e8e-4b55-878b-2fa2271f15ad - name: None - options: None - outputs: - diff --git a/tripleo_common/tests/utils/data/overcloud-controller-0/MyPostConfig b/tripleo_common/tests/utils/data/overcloud-controller-0/MyPostConfig deleted file mode 100644 index 51c44340f..000000000 --- a/tripleo_common/tests/utils/data/overcloud-controller-0/MyPostConfig +++ /dev/null @@ -1,17 +0,0 @@ -MyPostConfig: - config: | - #!/bin/bash - echo "A PostConfig script" - creation_time: "None" - deployment_name: MyPostConfig - group: script - id: ac63372f-bddd-45a0-aca9-6f0950d581e2 - inputs: - - name: deploy_server_id - description: None - type: None - value: |- - 00b3a5e1-5e8e-4b55-878b-2fa2271f15ad - name: None - options: None - outputs: diff --git a/tripleo_common/tests/utils/data/overcloud-controller-0/NetworkDeployment b/tripleo_common/tests/utils/data/overcloud-controller-0/NetworkDeployment deleted file mode 100644 index d57932586..000000000 --- a/tripleo_common/tests/utils/data/overcloud-controller-0/NetworkDeployment +++ /dev/null @@ -1,17 +0,0 @@ -NetworkDeployment: - config: | - #!/bin/bash - echo "Another script" - creation_time: "None" - deployment_name: NetworkDeployment - group: script - id: 613b3009-7708-4b06-8c2a-56f8cd379aff - inputs: - - name: deploy_server_id - description: None - type: None - value: |- - 00b3a5e1-5e8e-4b55-878b-2fa2271f15ad - name: None - options: None - outputs: diff --git a/tripleo_common/tests/utils/data/overcloud-novacompute-0/ComputeHostEntryDeployment b/tripleo_common/tests/utils/data/overcloud-novacompute-0/ComputeHostEntryDeployment deleted file mode 100644 index b910e77f8..000000000 --- a/tripleo_common/tests/utils/data/overcloud-novacompute-0/ComputeHostEntryDeployment +++ /dev/null @@ -1,16 +0,0 @@ -ComputeHostEntryDeployment: - config: - {"hiera_key1": "hiera_value1", "hiera_key2": "hiera_value2"} - creation_time: "None" - deployment_name: ComputeHostEntryDeployment - group: hiera - id: 26db1678-fe18-4acb-86db-19885fccae4c - inputs: - - name: deploy_server_id - description: None - type: None - value: |- - a7db3010-a51f-4ae0-a791-2364d629d20d - name: None - options: None - outputs: diff --git a/tripleo_common/tests/utils/data/overcloud-novacompute-0/MyExtraConfigPost b/tripleo_common/tests/utils/data/overcloud-novacompute-0/MyExtraConfigPost deleted file mode 100644 index 6bf8dccf8..000000000 --- a/tripleo_common/tests/utils/data/overcloud-novacompute-0/MyExtraConfigPost +++ /dev/null @@ -1,17 +0,0 @@ -MyExtraConfigPost: - config: | - #!/bin/bash - echo "An ExtraConfigPost script" - creation_time: "None" - deployment_name: MyExtraConfigPost - group: script - id: 9ccb2f66-7cd8-4e3e-a034-4c1cfafd037e - inputs: - - name: deploy_server_id - description: None - type: None - value: |- - a7db3010-a51f-4ae0-a791-2364d629d20d - name: None - options: None - outputs: diff --git a/tripleo_common/tests/utils/data/overcloud-novacompute-0/NetworkDeployment b/tripleo_common/tests/utils/data/overcloud-novacompute-0/NetworkDeployment deleted file mode 100644 index f16125d20..000000000 --- a/tripleo_common/tests/utils/data/overcloud-novacompute-0/NetworkDeployment +++ /dev/null @@ -1,16 +0,0 @@ -NetworkDeployment: - config: - {"hiera_key3": "hiera_value3", "hiera_key4": {"hiera_key4_subkey1": "hiera_key4_subvalue1", "hiera_key4_subkey0": "hiera_key4_subvalue0"}} - creation_time: "None" - deployment_name: NetworkDeployment - group: hiera - id: d21aefd1-9d94-44c0-b73a-16c8d4bc605c - inputs: - - name: deploy_server_id - description: None - type: None - value: |- - a7db3010-a51f-4ae0-a791-2364d629d20d - name: None - options: None - outputs: diff --git a/tripleo_common/tests/utils/data/overcloud-novacompute-1/ComputeHostEntryDeployment b/tripleo_common/tests/utils/data/overcloud-novacompute-1/ComputeHostEntryDeployment deleted file mode 100644 index da25d92eb..000000000 --- a/tripleo_common/tests/utils/data/overcloud-novacompute-1/ComputeHostEntryDeployment +++ /dev/null @@ -1,16 +0,0 @@ -ComputeHostEntryDeployment: - config: - {"hiera_key1": "hiera_value1", "hiera_key2": "hiera_value2"} - creation_time: "None" - deployment_name: ComputeHostEntryDeployment - group: hiera - id: 26db1678-fe18-4acb-86db-19885fccae4c - inputs: - - name: deploy_server_id - description: None - type: None - value: |- - 8b07cd31-3083-4b88-a433-955f72039e2c - name: None - options: None - outputs: diff --git a/tripleo_common/tests/utils/data/overcloud-novacompute-1/MyExtraConfigPost b/tripleo_common/tests/utils/data/overcloud-novacompute-1/MyExtraConfigPost deleted file mode 100644 index c6d6fff78..000000000 --- a/tripleo_common/tests/utils/data/overcloud-novacompute-1/MyExtraConfigPost +++ /dev/null @@ -1,18 +0,0 @@ -MyExtraConfigPost: - config: | - #!/bin/bash - echo "An ExtraConfigPost script" - creation_time: "None" - deployment_name: MyExtraConfigPost - group: script - id: 9ccb2f66-7cd8-4e3e-a034-4c1cfafd037e - inputs: - - name: deploy_server_id - description: None - type: None - value: |- - 8b07cd31-3083-4b88-a433-955f72039e2c - name: None - options: None - outputs: - diff --git a/tripleo_common/tests/utils/data/overcloud-novacompute-1/NetworkDeployment b/tripleo_common/tests/utils/data/overcloud-novacompute-1/NetworkDeployment deleted file mode 100644 index 8ef0de079..000000000 --- a/tripleo_common/tests/utils/data/overcloud-novacompute-1/NetworkDeployment +++ /dev/null @@ -1,16 +0,0 @@ -NetworkDeployment: - config: - {"hiera_key3": "hiera_value3", "hiera_key4": {"hiera_key4_subkey1": "hiera_key4_subvalue1", "hiera_key4_subkey0": "hiera_key4_subvalue0"}} - creation_time: "None" - deployment_name: NetworkDeployment - group: hiera - id: d21aefd1-9d94-44c0-b73a-16c8d4bc605c - inputs: - - name: deploy_server_id - description: None - type: None - value: |- - 8b07cd31-3083-4b88-a433-955f72039e2c - name: None - options: None - outputs: diff --git a/tripleo_common/tests/utils/data/overcloud-novacompute-2/AnsibleDeployment b/tripleo_common/tests/utils/data/overcloud-novacompute-2/AnsibleDeployment deleted file mode 100644 index 8b165c737..000000000 --- a/tripleo_common/tests/utils/data/overcloud-novacompute-2/AnsibleDeployment +++ /dev/null @@ -1,20 +0,0 @@ -AnsibleDeployment: - id: 391263ba-3d06-43b6-a47e-481808aaff20 - creation_time: "None" - deployment_name: AnsibleDeployment - name: None - options: None - group: ansible - inputs: - - name: deploy_server_id - description: None - type: None - value: |- - 169b46f8-1965-4d90-a7de-f36fb4a830fe - outputs: - config: | - tasks: - - name: An Ansible task - copy: - content: "{{ some_hostvar | to_json }}" - dest: /some/path diff --git a/tripleo_common/tests/utils/data/overcloud-novacompute-2/ComputeHostEntryDeployment b/tripleo_common/tests/utils/data/overcloud-novacompute-2/ComputeHostEntryDeployment deleted file mode 100644 index 83094bcf9..000000000 --- a/tripleo_common/tests/utils/data/overcloud-novacompute-2/ComputeHostEntryDeployment +++ /dev/null @@ -1,16 +0,0 @@ -ComputeHostEntryDeployment: - config: - {"hiera_key1": "hiera_value1", "hiera_key2": "hiera_value2"} - creation_time: "None" - deployment_name: ComputeHostEntryDeployment - group: hiera - id: 26db1678-fe18-4acb-86db-19885fccae4c - inputs: - - name: deploy_server_id - description: None - type: None - value: |- - 169b46f8-1965-4d90-a7de-f36fb4a830fe - name: None - options: None - outputs: diff --git a/tripleo_common/tests/utils/data/overcloud-novacompute-2/MyExtraConfigPost b/tripleo_common/tests/utils/data/overcloud-novacompute-2/MyExtraConfigPost deleted file mode 100644 index ad8be4f4b..000000000 --- a/tripleo_common/tests/utils/data/overcloud-novacompute-2/MyExtraConfigPost +++ /dev/null @@ -1,17 +0,0 @@ -MyExtraConfigPost: - config: | - #!/bin/bash - echo "An ExtraConfigPost script" - creation_time: "None" - deployment_name: MyExtraConfigPost - group: script - id: 9ccb2f66-7cd8-4e3e-a034-4c1cfafd037e - inputs: - - name: deploy_server_id - description: None - type: None - value: |- - 169b46f8-1965-4d90-a7de-f36fb4a830fe - name: None - options: None - outputs: diff --git a/tripleo_common/tests/utils/data/overcloud-novacompute-2/NetworkDeployment b/tripleo_common/tests/utils/data/overcloud-novacompute-2/NetworkDeployment deleted file mode 100644 index aa8fab359..000000000 --- a/tripleo_common/tests/utils/data/overcloud-novacompute-2/NetworkDeployment +++ /dev/null @@ -1,16 +0,0 @@ -NetworkDeployment: - config: - {"hiera_key3": "hiera_value3", "hiera_key4": {"hiera_key4_subkey1": "hiera_key4_subvalue1", "hiera_key4_subkey0": "hiera_key4_subvalue0"}} - creation_time: "None" - deployment_name: NetworkDeployment - group: hiera - id: d21aefd1-9d94-44c0-b73a-16c8d4bc605c - inputs: - - name: deploy_server_id - description: None - type: None - value: |- - 169b46f8-1965-4d90-a7de-f36fb4a830fe - name: None - options: None - outputs: diff --git a/tripleo_common/tests/utils/test_ansible.py b/tripleo_common/tests/utils/test_ansible.py deleted file mode 100644 index fd4112ff4..000000000 --- a/tripleo_common/tests/utils/test_ansible.py +++ /dev/null @@ -1,250 +0,0 @@ -# Copyright 2017 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import json -import os -import configparser -import shutil -import tempfile -from unittest import mock - -from oslo_concurrency import processutils - -from tripleo_common.utils import ansible -from tripleo_common.tests import base - - -class AnsiblePlaybookTest(base.TestCase): - - def setUp(self): - super(AnsiblePlaybookTest, self).setUp() - - self.limit_hosts = None - self.remote_user = 'fido' - self.become = True - self.become_user = 'root' - self.extra_vars = {"var1": True, "var2": 0} - self.verbosity = 2 - self.ctx = mock.MagicMock() - self.max_message_size = 1024 - self.work_dir = tempfile.mkdtemp('tripleo-ansible') - self.playbook = os.path.join(self.work_dir, "playbook.yaml") - - @mock.patch('tempfile.mkdtemp') - @mock.patch("tripleo_common.utils.ansible.write_default_ansible_cfg") - @mock.patch("oslo_concurrency.processutils.execute") - def test_run(self, mock_execute, mock_write_cfg, mock_work_dir): - - mock_execute.return_value = ('', '') - mock_work_dir.return_value = self.work_dir - ansible_config_path = os.path.join(self.work_dir, - 'ansible.cfg') - mock_write_cfg.return_value = ansible_config_path - ansible.run_ansible_playbook( - playbook=self.playbook, limit_hosts=self.limit_hosts, - remote_user=self.remote_user, become=self.become, - become_user=self.become_user, extra_vars=self.extra_vars, - verbosity=self.verbosity) - - mock_write_cfg.assert_called_once_with(self.work_dir, - self.remote_user, - ssh_private_key=None, - override_ansible_cfg=None) - - pb = os.path.join(self.work_dir, 'playbook.yaml') - env = { - 'HOME': self.work_dir, - 'ANSIBLE_LOCAL_TEMP': self.work_dir, - 'ANSIBLE_CONFIG': ansible_config_path, - 'ANSIBLE_CALLBACKS_ENABLED': - 'tripleo_dense,tripleo_profile_tasks,tripleo_states', - 'ANSIBLE_STDOUT_CALLBACK': 'tripleo_dense', - 'PROFILE_TASKS_TASK_OUTPUT_LIMIT': '20', - } - ansible_playbook_cmd = 'ansible-playbook' - mock_execute.assert_called_once_with( - ansible_playbook_cmd, '-v', pb, '--become', - '--become-user', - self.become_user, '--extra-vars', json.dumps(self.extra_vars), - env_variables=env, cwd=self.work_dir, - log_errors=processutils.LogErrors.ALL) - - @mock.patch('tempfile.mkdtemp') - @mock.patch("tripleo_common.utils.ansible.write_default_ansible_cfg") - @mock.patch("oslo_concurrency.processutils.execute") - def test_run_with_limit(self, mock_execute, mock_write_cfg, mock_work_dir): - - mock_execute.return_value = ('', '') - mock_work_dir.return_value = self.work_dir - ansible_config_path = os.path.join(self.work_dir, - 'ansible.cfg') - mock_write_cfg.return_value = ansible_config_path - - ansible.run_ansible_playbook( - playbook=self.playbook, limit_hosts=['compute35'], - blacklisted_hostnames=['compute21'], - remote_user=self.remote_user, become=self.become, - become_user=self.become_user, extra_vars=self.extra_vars, - verbosity=self.verbosity) - - mock_write_cfg.assert_called_once_with(self.work_dir, - self.remote_user, - ssh_private_key=None, - override_ansible_cfg=None) - - pb = os.path.join(self.work_dir, 'playbook.yaml') - env = { - 'HOME': self.work_dir, - 'ANSIBLE_LOCAL_TEMP': self.work_dir, - 'ANSIBLE_CONFIG': ansible_config_path, - 'ANSIBLE_CALLBACKS_ENABLED': - 'tripleo_dense,tripleo_profile_tasks,tripleo_states', - 'ANSIBLE_STDOUT_CALLBACK': 'tripleo_dense', - 'PROFILE_TASKS_TASK_OUTPUT_LIMIT': '20', - } - ansible_playbook_cmd = 'ansible-playbook' - mock_execute.assert_called_once_with( - ansible_playbook_cmd, '-v', pb, '--limit', "['compute35']", - '--become', '--become-user', - self.become_user, '--extra-vars', json.dumps(self.extra_vars), - env_variables=env, cwd=self.work_dir, - log_errors=processutils.LogErrors.ALL) - - @mock.patch('tempfile.mkdtemp') - @mock.patch("shutil.rmtree") - @mock.patch("tripleo_common.utils.ansible.write_default_ansible_cfg") - @mock.patch("oslo_concurrency.processutils.execute") - def test_work_dir_cleanup(self, mock_execute, mock_write_cfg, - mock_rmtree, mock_work_dir): - - mock_execute.return_value = ('', '') - mock_work_dir.return_value = self.work_dir - ansible_config_path = os.path.join(self.work_dir, - 'ansible.cfg') - mock_write_cfg.return_value = ansible_config_path - - try: - ansible.run_ansible_playbook( - playbook=self.playbook, limit_hosts=self.limit_hosts, - remote_user=self.remote_user, become=self.become, - become_user=self.become_user, extra_vars=self.extra_vars, - verbosity=0) - mock_rmtree.assert_called_once_with(self.work_dir) - finally: - # Since we mocked the delete we need to manually cleanup. - shutil.rmtree(self.work_dir) - - @mock.patch("shutil.rmtree") - @mock.patch("tripleo_common.utils.ansible.write_default_ansible_cfg") - @mock.patch("oslo_concurrency.processutils.execute") - def test_work_dir_no_cleanup(self, mock_execute, mock_write_cfg, - mock_rmtree): - - mock_execute.return_value = ('', '') - - # Specity a self.work_dir, this should not be deleted automatically. - work_dir = tempfile.mkdtemp() - ansible_config_path = os.path.join(work_dir, - 'ansible.cfg') - mock_write_cfg.return_value = ansible_config_path - - try: - ansible.run_ansible_playbook( - playbook=self.playbook, limit_hosts=self.limit_hosts, - remote_user=self.remote_user, become=self.become, - become_user=self.become_user, extra_vars=self.extra_vars, - verbosity=self.verbosity, work_dir=work_dir) - - # verify the rmtree is not called - mock_rmtree.assert_not_called() - finally: - shutil.rmtree(work_dir) - - -class CopyConfigFileTest(base.TestCase): - - def test_copy_config_file(self): - with tempfile.NamedTemporaryFile() as ansible_cfg_file: - ansible_cfg_path = ansible_cfg_file.name - work_dir = tempfile.mkdtemp(prefix='ansible-mistral-action-test') - # Needed for the configparser to be able to read this file. - ansible_cfg_file.write(b'[defaults]\n') - ansible_cfg_file.write(b'[ssh_connection]\n') - ansible_cfg_file.flush() - - resulting_ansible_config = ansible.write_default_ansible_cfg( - work_dir, None, None, None, base_ansible_cfg=ansible_cfg_path) - - self.assertEqual(resulting_ansible_config, - os.path.join(work_dir, 'ansible.cfg')) - - config = configparser.ConfigParser() - config.read(resulting_ansible_config) - - retry_files_enabled = config.get('defaults', 'retry_files_enabled') - self.assertEqual(retry_files_enabled, 'False') - - log_path = config.get('defaults', 'log_path') - self.assertEqual(log_path, - os.path.join(work_dir, 'ansible.log')) - - def test_override_ansible_cfg(self): - with tempfile.NamedTemporaryFile() as ansible_cfg_file: - ansible_cfg_path = ansible_cfg_file.name - work_dir = tempfile.mkdtemp(prefix='ansible-mistral-action-test') - # Needed for the configparser to be able to read this file. - ansible_cfg_file.write(b'[defaults]\n') - ansible_cfg_file.write(b'[ssh_connection]\n') - ansible_cfg_file.flush() - - override_ansible_cfg = ( - "[defaults]\n" - "forks=10\n" - "[ssh_connection]\n" - "custom_option=custom_value\n" - ) - - resulting_ansible_config = ansible.write_default_ansible_cfg( - work_dir, None, None, None, base_ansible_cfg=ansible_cfg_path, - override_ansible_cfg=override_ansible_cfg) - - ansible_cfg = configparser.ConfigParser() - ansible_cfg.read(resulting_ansible_config) - - self.assertEqual('10', ansible_cfg.get('defaults', 'forks')) - self.assertEqual('custom_value', - ansible_cfg.get('ssh_connection', - 'custom_option')) - - @mock.patch("multiprocessing.cpu_count") - def test_override_ansible_cfg_empty(self, cpu_count): - with tempfile.NamedTemporaryFile() as ansible_cfg_file: - ansible_cfg_path = ansible_cfg_file.name - work_dir = tempfile.mkdtemp(prefix='ansible-mistral-action-test') - # Needed for the configparser to be able to read this file. - ansible_cfg_file.write(b'[defaults]\n') - ansible_cfg_file.write(b'[ssh_connection]\n') - ansible_cfg_file.flush() - cpu_count.return_value = 4 - override_ansible_cfg = "" - - resulting_ansible_config = ansible.write_default_ansible_cfg( - work_dir, None, None, base_ansible_cfg=ansible_cfg_path, - override_ansible_cfg=override_ansible_cfg) - - ansible_cfg = configparser.ConfigParser() - ansible_cfg.read(resulting_ansible_config) - - self.assertEqual('16', ansible_cfg.get('defaults', 'forks')) diff --git a/tripleo_common/tests/utils/test_config.py b/tripleo_common/tests/utils/test_config.py deleted file mode 100644 index 23bbfae78..000000000 --- a/tripleo_common/tests/utils/test_config.py +++ /dev/null @@ -1,998 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import datetime -import fixtures -import git -import os -from unittest import mock -from unittest.mock import patch -from unittest.mock import call -import uuid -import warnings - -import yaml - - -from tripleo_common import constants -from tripleo_common.tests import base -from tripleo_common.tests.fake_config import fakes -from tripleo_common.utils import config as ooo_config - -RESOURCES_YAML_CONTENTS = """heat_template_version: 2016-04-08 -resources: - Controller: - type: OS::Heat::ResourceGroup - NotRoleContoller: - type: OS::Dummy::DummyGroup -""" - - -class TestConfig(base.TestCase): - - def setUp(self): - super(TestConfig, self).setUp() - - @patch.object(ooo_config.Config, 'initialize_git_repo') - @patch.object(ooo_config.shutil, 'copyfile') - @patch.object(ooo_config.Config, '_mkdir') - @patch.object(ooo_config.Config, '_open_file') - @patch.object(ooo_config.shutil, 'rmtree') - def test_overcloud_config_generate_config(self, - mock_rmtree, - mock_open, - mock_mkdir, - mock_copyfile, - mock_git_init): - config_type_list = ['config_settings', 'global_config_settings', - 'logging_sources', 'monitoring_subscriptions', - 'service_config_settings', - 'service_metadata_settings', - 'service_names', - 'upgrade_batch_tasks', 'upgrade_tasks', - 'external_deploy_steps_tasks'] - - heat = mock.MagicMock() - heat.stacks.get.return_value = fakes.create_tht_stack() - self.config = ooo_config.Config(heat) - self.config.fetch_config('overcloud') - fake_role = list(self.config.stack_outputs.get('RoleData')) - self.config.download_config('overcloud', '/tmp/tht', config_type_list) - - mock_git_init.assert_called_once_with('/tmp/tht') - expected_mkdir_calls = [call('/tmp/tht/%s' % r) for r in fake_role] - mock_mkdir.assert_has_calls(expected_mkdir_calls, any_order=True) - mock_mkdir.assert_called() - expected_calls = [] - for config in config_type_list: - if 'external' in config: - for step in range(constants.DEFAULT_STEPS_MAX): - expected_calls += [call('/tmp/tht/%s_step%s.yaml' % - (config, step))] - - for role in fake_role: - if 'external' in config: - continue - if config == 'step_config': - expected_calls += [call('/tmp/tht/%s/%s.pp' % - (role, config))] - elif config == 'param_config': - expected_calls += [call('/tmp/tht/%s/%s.json' % - (role, config))] - else: - expected_calls += [call('/tmp/tht/%s/%s.yaml' % - (role, config))] - mock_open.assert_has_calls(expected_calls, any_order=True) - - @patch.object(ooo_config.Config, 'initialize_git_repo') - @patch.object(ooo_config.shutil, 'copyfile') - @patch.object(ooo_config.Config, '_mkdir') - @patch.object(ooo_config.Config, '_open_file') - @patch.object(ooo_config.shutil, 'rmtree') - def test_overcloud_config_one_config_type(self, - mock_rmtree, - mock_open, - mock_mkdir, - mock_copyfile, - mock_git_init): - - expected_config_type = 'config_settings' - - heat = mock.MagicMock() - heat.stacks.get.return_value = fakes.create_tht_stack() - self.config = ooo_config.Config(heat) - self.config.fetch_config('overcloud') - fake_role = list(self.config.stack_outputs.get('RoleData')) - self.config.download_config('overcloud', '/tmp/tht', - ['config_settings']) - expected_mkdir_calls = [call('/tmp/tht/%s' % r) for r in fake_role] - expected_calls = [call('/tmp/tht/%s/%s.yaml' - % (r, expected_config_type)) - for r in fake_role] - mock_mkdir.assert_has_calls(expected_mkdir_calls, any_order=True) - mock_mkdir.assert_called() - mock_open.assert_has_calls(expected_calls, any_order=True) - mock_git_init.assert_called_once_with('/tmp/tht') - - @patch.object(ooo_config.git, 'Repo') - @mock.patch('os.mkdir') - @mock.patch('builtins.open') - @patch.object(ooo_config.shutil, 'rmtree') - def test_overcloud_config_wrong_config_type(self, mock_rmtree, - mock_open, mock_mkdir, - mock_repo): - args = {'name': 'overcloud', 'config_dir': '/tmp/tht', - 'config_type': ['bad_config']} - heat = mock.MagicMock() - heat.stacks.get.return_value = fakes.create_tht_stack() - self.config = ooo_config.Config(heat) - self.assertRaises( - KeyError, - self.config.download_config, *args) - - def test_overcloud_config_upgrade_tasks(self): - - heat = mock.MagicMock() - heat.stacks.get.return_value = fakes.create_tht_stack() - self.config = ooo_config.Config(heat) - self.config.fetch_config('overcloud') - self.tmp_dir = self.useFixture(fixtures.TempDir()).path - fake_role = list(self.config.stack_outputs.get('RoleData')) - expected_tasks = {'FakeController': {0: [], - 1: [{'name': 'Stop fake service', - 'service': 'name=fake ' - 'state=stopped', - 'when': 'step|int == 1'}], - 2: [], - 3: [], - 4: [], - 5: []}, - 'FakeCompute': {0: [], - 1: [{'name': 'Stop fake service', - 'service': 'name=fake ' - 'state=stopped', - 'when': ['nova_api_enabled.rc' - ' == 0', False, - 'httpd_enabled.rc' - ' != 0', - 'step|int == 1']}], - 2: [{'name': 'Stop nova-compute ' - 'service', - 'service': 'name=openstack-' - 'nova-compute state=stopped', - 'when': ['nova_compute_' - 'enabled.rc == 0', - 'step|int == 2', - 'existing', - 'list']}], - 3: [], - 4: [], - 5: []}} - for role in fake_role: - filedir = os.path.join(self.tmp_dir, role) - os.makedirs(filedir) - for step in range(constants.DEFAULT_STEPS_MAX): - filepath = os.path.join(filedir, "upgrade_tasks_step%s.yaml" - % step) - playbook_tasks = self.config._write_tasks_per_step( - self.config.stack_outputs.get('RoleData')[role] - ['upgrade_tasks'], filepath, step) - self.assertTrue(os.path.isfile(filepath)) - self.assertEqual(expected_tasks[role][step], playbook_tasks) - - def test_get_server_names(self): - heat = mock.MagicMock() - self.config = ooo_config.Config(heat) - self.config.stack_outputs = { - 'RoleNetHostnameMap': { - 'Controller': { - 'ctlplane': [ - 'c0.ctlplane.localdomain', - 'c1.ctlplane.localdomain', - 'c2.ctlplane.localdomain']}}, - 'ServerIdData': { - 'server_ids': { - 'Controller': [ - '8269f736', - '2af0a373', - 'c8479674']}}} - server_names = self.config.get_server_names() - expected = {'2af0a373': 'c1', '8269f736': 'c0', 'c8479674': 'c2'} - self.assertEqual(expected, server_names) - - def test_get_role_config(self): - heat = mock.MagicMock() - self.config = ooo_config.Config(heat) - self.config.stack_outputs = {'RoleConfig': None} - role_config = self.config.get_role_config() - self.assertEqual({}, role_config) - - def test_get_deployment_data(self): - heat = mock.MagicMock() - self.config = ooo_config.Config(heat) - stack = 'overcloud' - first = mock.MagicMock() - first.creation_time = datetime.datetime.now() - datetime.timedelta(2) - second = mock.MagicMock() - second.creation_time = datetime.datetime.now() - datetime.timedelta(1) - third = mock.MagicMock() - third.creation_time = datetime.datetime.now() - # Set return_value in a nonsorted order, as we expect the function to - # sort, so that's what we want to test - heat.resources.list.return_value = [second, third, first] - - deployment_data = self.config.get_deployment_data(stack) - self.assertTrue(heat.resources.list.called) - self.assertEqual( - heat.resources.list.call_args, - mock.call(stack, - filters=dict(name=constants.TRIPLEO_DEPLOYMENT_RESOURCE), - nested_depth=constants.NESTED_DEPTH, - with_detail=True)) - self.assertEqual(deployment_data, - [first, second, third]) - - def _get_config_data(self, datafile): - config_data_path = os.path.join( - os.path.dirname(os.path.realpath(__file__)), - 'data', - datafile) - with open(config_data_path) as fin: - config_data = yaml.safe_load(fin.read()) - deployment_data = [] - - for deployment in config_data['deployments']: - deployment_mock = mock.MagicMock() - deployment_mock.id = deployment['deployment'] - deployment_mock.attributes = dict( - value=dict(server=deployment['server'], - deployment=deployment['deployment'], - config=deployment['config'], - name=deployment['name'])) - deployment_data.append(deployment_mock) - - configs = config_data['configs'] - - return deployment_data, configs - - def _get_deployment_id(self, deployment): - return deployment.attributes['value']['deployment'] - - def _get_config_dict(self, deployment_id): - deployment = list(filter( - lambda d: d.id == deployment_id, self.deployments))[0] - config = self.configs[deployment.attributes['value']['config']].copy() - config['inputs'] = [] - config['inputs'].append(dict( - name='deploy_server_id', - value=deployment.attributes['value']['server'])) - return config - - def _get_yaml_file(self, file_name): - file_path = os.path.join( - os.path.dirname(os.path.realpath(__file__)), - 'data', - file_name) - with open(file_path) as fin: - return yaml.safe_load(fin.read()) - - @patch.object(ooo_config.Config, 'initialize_git_repo') - @patch('tripleo_common.utils.config.Config.get_deployment_resource_id') - @patch('tripleo_common.utils.config.Config.get_config_dict') - @patch('tripleo_common.utils.config.Config.get_deployment_data') - def test_config_download(self, mock_deployment_data, mock_config_dict, - mock_deployment_resource_id, - mock_git_init): - heat = mock.MagicMock() - self.config = ooo_config.Config(heat) - stack = mock.MagicMock() - heat.stacks.get.return_value = stack - stack.outputs = [ - {'output_key': 'RoleNetHostnameMap', - 'output_value': { - 'Controller': { - 'ctlplane': [ - 'overcloud-controller-0.ctlplane.localdomain']}, - 'Compute': { - 'ctlplane': [ - 'overcloud-novacompute-0.ctlplane.localdomain', - 'overcloud-novacompute-1.ctlplane.localdomain', - 'overcloud-novacompute-2.ctlplane.localdomain']}}}, - {'output_key': 'ServerIdData', - 'output_value': { - 'server_ids': { - 'Controller': [ - '00b3a5e1-5e8e-4b55-878b-2fa2271f15ad'], - 'Compute': [ - 'a7db3010-a51f-4ae0-a791-2364d629d20d', - '8b07cd31-3083-4b88-a433-955f72039e2c', - '169b46f8-1965-4d90-a7de-f36fb4a830fe']}}}, - {'output_key': 'AnsibleHostVarsMap', - 'output_value': { - 'Controller': { - 'overcloud-controller-0': { - 'uuid': 0, - 'my_var': 'foo'}}, - 'Compute': { - 'overcloud-novacompute-0': { - 'uuid': 1}, - 'overcloud-novacompute-1': { - 'uuid': 2}, - 'overcloud-novacompute-2': { - 'uuid': 3}}}}, - {'output_key': 'RoleGroupVars', - 'output_value': { - 'Controller': { - 'any_errors_fatal': True, - 'chrony_host': '192.168.2.1', - 'chrony_foo': 'bar', - 'chrony_acl': 'none', - 'max_fail_percentage': 15}, - 'Compute': { - 'any_errors_fatal': True, - 'max_fail_percentage': 15}, - }}] - deployment_data, configs = \ - self._get_config_data('config_data.yaml') - self.configs = configs - self.deployments = deployment_data - mock_deployment_data.return_value = deployment_data - mock_deployment_resource_id.side_effect = self._get_deployment_id - mock_config_dict.side_effect = self._get_config_dict - - self.tmp_dir = self.useFixture(fixtures.TempDir()).path - tmp_path = self.config.download_config(stack, self.tmp_dir) - - mock_git_init.assert_called_once_with(self.tmp_dir) - for f in ['Controller', - 'Compute', ]: - - with open(os.path.join(tmp_path, 'group_vars', f)) as fin: - self.assertEqual( - self._get_yaml_file(f), - yaml.safe_load(fin.read())) - - for f in ['overcloud-controller-0', - 'overcloud-novacompute-0', - 'overcloud-novacompute-1', - 'overcloud-novacompute-2']: - with open(os.path.join(tmp_path, 'host_vars', f)) as fin: - self.assertEqual( - self._get_yaml_file(os.path.join('host_vars', f)), - yaml.safe_load(fin.read())) - - for d in ['ControllerHostEntryDeployment', - 'NetworkDeployment', - 'MyExtraConfigPost', - 'MyPostConfig']: - with open(os.path.join(tmp_path, 'Controller', - 'overcloud-controller-0', - d)) as fin: - self.assertEqual( - yaml.safe_load(fin.read()), - self._get_yaml_file(os.path.join( - 'overcloud-controller-0', - d))) - - for d in ['ComputeHostEntryDeployment', - 'NetworkDeployment', - 'MyExtraConfigPost']: - - with open(os.path.join(tmp_path, 'Compute', - 'overcloud-novacompute-0', - d)) as fin: - self.assertEqual( - yaml.safe_load(fin.read()), - self._get_yaml_file(os.path.join( - 'overcloud-novacompute-0', - d))) - - for d in ['ComputeHostEntryDeployment', - 'NetworkDeployment', - 'MyExtraConfigPost']: - with open(os.path.join(tmp_path, 'Compute', - 'overcloud-novacompute-1', - d)) as fin: - self.assertEqual( - yaml.safe_load(fin.read()), - self._get_yaml_file(os.path.join( - 'overcloud-novacompute-1', - d))) - - for d in ['ComputeHostEntryDeployment', - 'NetworkDeployment', - 'MyExtraConfigPost', - 'AnsibleDeployment']: - with open(os.path.join(tmp_path, 'Compute', - 'overcloud-novacompute-2', - d)) as fin: - self.assertEqual( - yaml.safe_load(fin.read()), - self._get_yaml_file(os.path.join( - 'overcloud-novacompute-2', - d))) - - @patch.object(ooo_config.Config, 'initialize_git_repo') - @patch('tripleo_common.utils.config.Config.get_deployment_resource_id') - @patch('tripleo_common.utils.config.Config.get_config_dict') - @patch('tripleo_common.utils.config.Config.get_deployment_data') - def test_config_download_os_apply_config( - self, mock_deployment_data, mock_config_dict, - mock_deployment_resource_id, mock_git_init): - heat = mock.MagicMock() - self.config = ooo_config.Config(heat) - stack = mock.MagicMock() - heat.stacks.get.return_value = stack - heat.resources.get.return_value = mock.MagicMock() - stack.outputs = [ - {'output_key': 'RoleNetHostnameMap', - 'output_value': { - 'Controller': { - 'ctlplane': [ - 'overcloud-controller-0.ctlplane.localdomain']}, - 'Compute': { - 'ctlplane': [ - 'overcloud-novacompute-0.ctlplane.localdomain', - 'overcloud-novacompute-1.ctlplane.localdomain', - 'overcloud-novacompute-2.ctlplane.localdomain']}}}, - {'output_key': 'ServerIdData', - 'output_value': { - 'server_ids': { - 'Controller': [ - '00b3a5e1-5e8e-4b55-878b-2fa2271f15ad'], - 'Compute': [ - 'a7db3010-a51f-4ae0-a791-2364d629d20d', - '8b07cd31-3083-4b88-a433-955f72039e2c', - '169b46f8-1965-4d90-a7de-f36fb4a830fe']}}}, - {'output_key': 'RoleGroupVars', - 'output_value': { - 'Controller': { - 'any_errors_fatal': 'yes', - 'max_fail_percentage': 15}, - 'Compute': { - 'any_errors_fatal': 'yes', - 'max_fail_percentage': 15}, - }}] - deployment_data, configs = \ - self._get_config_data('config_data.yaml') - - # Add a group:os-apply-config config and deployment - config_uuid = str(uuid.uuid4()) - configs[config_uuid] = dict( - id=config_uuid, - config=dict(a='a'), - group='os-apply-config', - outputs=[]) - - deployment_uuid = str(uuid.uuid4()) - deployment_mock = mock.MagicMock() - deployment_mock.id = deployment_uuid - deployment_mock.attributes = dict( - value=dict(server='00b3a5e1-5e8e-4b55-878b-2fa2271f15ad', - deployment=deployment_uuid, - config=config_uuid, - name='OsApplyConfigDeployment')) - deployment_data.append(deployment_mock) - - self.configs = configs - self.deployments = deployment_data - mock_deployment_data.return_value = deployment_data - mock_config_dict.side_effect = self._get_config_dict - mock_deployment_resource_id.side_effect = self._get_deployment_id - - self.tmp_dir = self.useFixture(fixtures.TempDir()).path - with warnings.catch_warnings(record=True) as w: - self.config.download_config(stack, self.tmp_dir) - mock_git_init.assert_called_once_with(self.tmp_dir) - # check that we got at least one of the warnings that we expected - # to throw - self.assertGreaterEqual(len(w), 1) - self.assertGreaterEqual(len([x for x in w - if issubclass(x.category, - DeprecationWarning)]), - 1) - self.assertGreaterEqual(len([x for x in w - if "group:os-apply-config" - in str(x.message)]), - 1) - - @patch.object(ooo_config.Config, 'initialize_git_repo') - @patch('tripleo_common.utils.config.Config.get_deployment_resource_id') - @patch('tripleo_common.utils.config.Config.get_deployment_data') - def test_config_download_no_deployment_name( - self, mock_deployment_data, mock_deployment_resource_id, - mock_git_init): - heat = mock.MagicMock() - self.config = ooo_config.Config(heat) - stack = mock.MagicMock() - heat.stacks.get.return_value = stack - heat.resources.get.return_value = mock.MagicMock() - - deployment_data, _ = self._get_config_data('config_data.yaml') - - # Delete the name of the first deployment and his parent. - del deployment_data[0].attributes['value']['name'] - deployment_data[0].parent_resource = None - self.deployments = deployment_data - - mock_deployment_data.return_value = deployment_data - mock_deployment_resource_id.side_effect = self._get_deployment_id - - self.tmp_dir = self.useFixture(fixtures.TempDir()).path - self.assertRaises(ValueError, - self.config.download_config, stack, self.tmp_dir) - mock_git_init.assert_called_once_with(self.tmp_dir) - - @patch.object(ooo_config.Config, 'initialize_git_repo') - @patch('tripleo_common.utils.config.Config.get_deployment_resource_id') - @patch('tripleo_common.utils.config.Config.get_deployment_data') - def test_config_download_warn_grandparent_resource_name( - self, mock_deployment_data, mock_deployment_resource_id, - mock_git_init): - heat = mock.MagicMock() - self.config = ooo_config.Config(heat) - stack = mock.MagicMock() - heat.stacks.get.return_value = stack - heat.resources.get.return_value = mock.MagicMock() - - deployment_data, _ = self._get_config_data('config_data.yaml') - - # Set the name of the deployment to an integer to trigger looking up - # the grandparent resource name - deployment_data[0].attributes['value']['name'] = 1 - self.deployments = deployment_data - - mock_deployment_data.return_value = deployment_data - mock_deployment_resource_id.side_effect = self._get_deployment_id - - self.tmp_dir = self.useFixture(fixtures.TempDir()).path - with warnings.catch_warnings(record=True) as w: - self.assertRaises(ValueError, - self.config.download_config, stack, self.tmp_dir) - self.assertGreaterEqual(len(w), 1) - self.assertGreaterEqual(len([x for x in w - if "grandparent" - in str(x.message)]), - 1) - - mock_git_init.assert_called_once_with(self.tmp_dir) - - @patch.object(ooo_config.Config, 'initialize_git_repo') - @patch('tripleo_common.utils.config.Config.get_deployment_resource_id') - @patch('tripleo_common.utils.config.Config.get_config_dict') - @patch('tripleo_common.utils.config.Config.get_deployment_data') - def test_config_download_no_deployment_uuid(self, mock_deployment_data, - mock_config_dict, - mock_deployment_resource_id, - mock_git_init): - heat = mock.MagicMock() - self.config = ooo_config.Config(heat) - stack = mock.MagicMock() - heat.stacks.get.return_value = stack - heat.resources.get.return_value = mock.MagicMock() - - stack.outputs = [ - {'output_key': 'RoleNetHostnameMap', - 'output_value': { - 'Controller': { - 'ctlplane': [ - 'overcloud-controller-0.ctlplane.localdomain']}, - 'Compute': { - 'ctlplane': [ - 'overcloud-novacompute-0.ctlplane.localdomain', - 'overcloud-novacompute-1.ctlplane.localdomain', - 'overcloud-novacompute-2.ctlplane.localdomain']}}}, - {'output_key': 'ServerIdData', - 'output_value': { - 'server_ids': { - 'Controller': [ - '00b3a5e1-5e8e-4b55-878b-2fa2271f15ad'], - 'Compute': [ - 'a7db3010-a51f-4ae0-a791-2364d629d20d', - '8b07cd31-3083-4b88-a433-955f72039e2c', - '169b46f8-1965-4d90-a7de-f36fb4a830fe']}}}, - {'output_key': 'RoleGroupVars', - 'output_value': { - 'Controller': { - 'any_errors_fatal': 'yes', - 'max_fail_percentage': 15}, - 'Compute': { - 'any_errors_fatal': 'yes', - 'max_fail_percentage': 15}, - }}] - deployment_data, configs = self._get_config_data('config_data.yaml') - - # Set the deployment to TripleOSoftwareDeployment for the first - # deployment - deployment_data[0].attributes['value']['deployment'] = \ - 'TripleOSoftwareDeployment' - - # Set the physical_resource_id as '' for the second deployment - deployment_data[1].attributes['value']['deployment'] = '' - - self.configs = configs - self.deployments = deployment_data - mock_deployment_data.return_value = deployment_data - mock_config_dict.side_effect = self._get_config_dict - mock_deployment_resource_id.side_effect = self._get_deployment_id - - self.tmp_dir = self.useFixture(fixtures.TempDir()).path - with warnings.catch_warnings(record=True) as w: - self.config.download_config(stack, self.tmp_dir) - assert "Skipping deployment" in str(w[-1].message) - assert "Skipping deployment" in str(w[-2].message) - - @patch.object(ooo_config.Config, 'initialize_git_repo') - @patch.object(ooo_config.git, 'Repo') - @patch.object(ooo_config.shutil, 'copyfile') - @patch.object(ooo_config.Config, '_mkdir') - @patch.object(ooo_config.Config, '_open_file') - @patch.object(ooo_config.shutil, 'rmtree') - @patch.object(ooo_config.os.path, 'exists') - def test_overcloud_config_dont_preserve_config(self, - mock_os_path_exists, - mock_rmtree, - mock_open, - mock_mkdir, - mock_copyfile, - mock_repo, - mock_git_init): - config_type_list = ['config_settings', 'global_config_settings', - 'logging_sources', 'monitoring_subscriptions', - 'service_config_settings', - 'service_metadata_settings', - 'service_names', - 'upgrade_batch_tasks', 'upgrade_tasks', - 'external_deploy_tasks'] - - mock_os_path_exists.get.return_value = True - heat = mock.MagicMock() - heat.stacks.get.return_value = fakes.create_tht_stack() - self.config = ooo_config.Config(heat) - self.config.fetch_config('overcloud') - fake_role = list(self.config.stack_outputs.get('RoleData')) - self.config.download_config('overcloud', '/tmp/tht', config_type_list, - False) - - mock_git_init.assert_called_once_with('/tmp/tht') - expected_rmtree_calls = [call('/tmp/tht')] - mock_rmtree.assert_has_calls(expected_rmtree_calls) - - expected_mkdir_calls = [call('/tmp/tht/%s' % r) for r in fake_role] - mock_mkdir.assert_has_calls(expected_mkdir_calls, any_order=True) - mock_mkdir.assert_called() - expected_calls = [] - for config in config_type_list: - for role in fake_role: - if 'external' in config: - continue - if config == 'step_config': - expected_calls += [call('/tmp/tht/%s/%s.pp' % - (role, config))] - elif config == 'param_config': - expected_calls += [call('/tmp/tht/%s/%s.json' % - (role, config))] - else: - expected_calls += [call('/tmp/tht/%s/%s.yaml' % - (role, config))] - mock_open.assert_has_calls(expected_calls, any_order=True) - - @patch.object(ooo_config.os, 'makedirs') - @patch.object(ooo_config.shutil, 'rmtree') - @patch.object(ooo_config.os.path, 'exists') - def test_create_config_dir(self, mock_os_path_exists, mock_rmtree, - mock_makedirs): - mock_os_path_exists.get.return_value = True - heat = mock.MagicMock() - heat.stacks.get.return_value = fakes.create_tht_stack() - self.config = ooo_config.Config(heat) - self.config.create_config_dir('/tmp/tht', False) - expected_rmtree_calls = [call('/tmp/tht')] - mock_rmtree.assert_has_calls(expected_rmtree_calls) - expected_makedirs_calls = [ - call('/tmp/tht', mode=0o700, exist_ok=True), - call('/tmp/tht/artifacts', mode=0o700, exist_ok=True), - call('/tmp/tht/env', mode=0o700, exist_ok=True), - call('/tmp/tht/inventory', mode=0o700, exist_ok=True), - call('/tmp/tht/profiling_data', mode=0o700, exist_ok=True), - call('/tmp/tht/project', mode=0o700, exist_ok=True), - call('/tmp/tht/roles', mode=0o700, exist_ok=True), - ] - mock_makedirs.assert_has_calls(expected_makedirs_calls) - - def test_initialize_git_repo(self): - heat = mock.MagicMock() - heat.stacks.get.return_value = fakes.create_tht_stack() - self.config = ooo_config.Config(heat) - self.tmp_dir = self.useFixture(fixtures.TempDir()).path - repo = self.config.initialize_git_repo(self.tmp_dir) - self.assertIsInstance(repo, git.Repo) - - @patch('tripleo_common.utils.config.Config.get_config_dict') - @patch('tripleo_common.utils.config.Config.get_deployment_data') - def test_write_config(self, mock_deployment_data, mock_config_dict): - heat = mock.MagicMock() - self.config = ooo_config.Config(heat) - stack = mock.MagicMock() - heat.stacks.get.return_value = stack - - stack.outputs = [ - {'output_key': 'RoleNetHostnameMap', - 'output_value': { - 'Controller': { - 'ctlplane': [ - 'overcloud-controller-0.ctlplane.localdomain']}, - 'Compute': { - 'ctlplane': [ - 'overcloud-novacompute-0.ctlplane.localdomain', - 'overcloud-novacompute-1.ctlplane.localdomain', - 'overcloud-novacompute-2.ctlplane.localdomain']}}}, - {'output_key': 'ServerIdData', - 'output_value': { - 'server_ids': { - 'Controller': [ - '00b3a5e1-5e8e-4b55-878b-2fa2271f15ad'], - 'Compute': [ - 'a7db3010-a51f-4ae0-a791-2364d629d20d', - '8b07cd31-3083-4b88-a433-955f72039e2c', - '169b46f8-1965-4d90-a7de-f36fb4a830fe']}}}, - {'output_key': 'RoleGroupVars', - 'output_value': { - 'Controller': { - 'any_errors_fatal': True, - 'chrony_host': '192.168.2.1', - 'chrony_foo': 'bar', - 'chrony_acl': 'none', - 'max_fail_percentage': 15}, - 'Compute': { - 'any_errors_fatal': True, - 'max_fail_percentage': 15}}}, - ] - deployment_data, configs = \ - self._get_config_data('config_data.yaml') - self.configs = configs - self.deployments = deployment_data - - stack_data = self.config.fetch_config('overcloud') - mock_deployment_data.return_value = deployment_data - mock_config_dict.side_effect = self._get_config_dict - config_dir = self.useFixture(fixtures.TempDir()).path - - self.config.write_config(stack_data, 'overcloud', config_dir) - - for f in ['Controller', - 'Compute', ]: - with open(os.path.join(config_dir, 'group_vars', f)) as fin: - self.assertEqual( - yaml.safe_load(fin.read()), - self._get_yaml_file(f)) - - for d in ['ControllerHostEntryDeployment', - 'NetworkDeployment', - 'MyExtraConfigPost', - 'MyPostConfig']: - with open(os.path.join(config_dir, 'Controller', - 'overcloud-controller-0', d)) as fin: - self.assertEqual( - yaml.safe_load(fin.read()), - self._get_yaml_file(os.path.join( - 'overcloud-controller-0', - d))) - - for d in ['ComputeHostEntryDeployment', - 'NetworkDeployment', - 'MyExtraConfigPost']: - with open(os.path.join(config_dir, 'Compute', - 'overcloud-novacompute-0', - d)) as fin: - self.assertEqual( - yaml.safe_load(fin.read()), - self._get_yaml_file(os.path.join( - 'overcloud-novacompute-0', - d))) - - for d in ['ComputeHostEntryDeployment', - 'NetworkDeployment', - 'MyExtraConfigPost']: - with open(os.path.join(config_dir, 'Compute', - 'overcloud-novacompute-1', - d)) as fin: - self.assertEqual( - yaml.safe_load(fin.read()), - self._get_yaml_file(os.path.join( - 'overcloud-novacompute-1', - d))) - - for d in ['ComputeHostEntryDeployment', - 'NetworkDeployment', - 'MyExtraConfigPost', - 'AnsibleDeployment']: - with open(os.path.join(config_dir, 'Compute', - 'overcloud-novacompute-2', d)) as fin: - self.assertEqual( - yaml.safe_load(fin.read()), - self._get_yaml_file(os.path.join( - 'overcloud-novacompute-2', - d))) - - @patch('tripleo_common.utils.config.Config.get_config_dict') - @patch('tripleo_common.utils.config.Config.get_deployment_data') - @patch.object(ooo_config.yaml, 'safe_load') - def test_validate_config(self, mock_yaml, mock_deployment_data, - mock_config_dict): - stack_config = """ - Controller: - ctlplane: - overcloud-controller-0.ctlplane.localdomain - Compute: - ctlplane: - overcloud-novacompute-0.ctlplane.localdomain - overcloud-novacompute-1.ctlplane.localdomain - overcloud-novacompute-2.ctlplane.localdomain - """ - yaml_file = '/tmp/testfile.yaml' - heat = mock.MagicMock() - heat.stacks.get.return_value = fakes.create_tht_stack() - self.config = ooo_config.Config(heat) - self.config.validate_config(stack_config, yaml_file) - expected_yaml_safe_load_calls = [call(stack_config)] - mock_yaml.assert_has_calls(expected_yaml_safe_load_calls) - - @patch('tripleo_common.utils.config.Config.get_config_dict') - @patch('tripleo_common.utils.config.Config.get_deployment_data') - def test_validate_config_invalid_yaml(self, mock_deployment_data, - mock_config_dict): - # Use invalid YAML to assert that we properly handle the exception - stack_config = """ - Controller: - ctlplane: - overcloud-controller-0.ctlplane.localdomain - Compute: - ctlplane: - overcloud-novacompute-0.ctlplane.localdomain - overcloud-novacompute-1.ctlplane.localdomain - overcloud-novacompute-2.ctlplane.localdomain - """ - yaml_file = '/tmp/testfile.yaml' - heat = mock.MagicMock() - heat.stacks.get.return_value = fakes.create_tht_stack() - self.config = ooo_config.Config(heat) - self.assertRaises(yaml.scanner.ScannerError, - self.config.validate_config, stack_config, yaml_file) - - -class OvercloudConfigTest(base.TestCase): - - def setUp(self,): - super(OvercloudConfigTest, self).setUp() - self.plan = 'overcloud' - self.config_container = 'config-overcloud' - - @mock.patch('tripleo_common.utils.config.Config.download_config') - def test_get_overcloud_config(self, mock_config): - heat = mock.MagicMock() - heat.stacks.get.return_value = mock.MagicMock( - stack_name='stack', id='stack_id') - mock_config.return_value = '/tmp/fake-path' - - ooo_config.get_overcloud_config( - None, heat, - self.plan, - self.config_container, - '/tmp/fake-path') - mock_config.assert_called_once_with('overcloud', '/tmp/fake-path', - None, commit_message=mock.ANY, - preserve_config_dir=True) - - @patch.object(ooo_config.Config, '_open_file') - def test_overcloud_config__write_tasks_per_step(self, mock_open_file): - heat = mock.MagicMock() - self.config = ooo_config.Config(heat) - - # TODO: how can I share this tasks definition between to test - # as a fixture So that I do two several test cases instead of - # a big one. - tasks = [ - { - "when": "step|int == 0", - "name": "Simple check" - }, - { - "when": "(step|int == 0)", - "name": "Check within parenthesis" - }, - { - "when": ["step|int == 0", "test1", False], - "name": "Check with list with boolean" - }, - { - "when": ["test1", False, "step|int == 0"], - "name": "Check with list with boolean other order" - }, - { - "when": "step|int == 0 or step|int == 3", - "name": "Check with boolean expression" - }, - { - "when": "(step|int == 0 or step|int == 3) and other_cond", - "name": "Complex boolean expression" - }, - { - "name": "Task with no conditional" - } - ] - - # Everything should come back - tasks_per_step = self.config._write_tasks_per_step( - tasks, - 'Compute/update_tasks_step0.yaml', - 0 - ) - - self.assertEqual(tasks, tasks_per_step) - - # Using stict the tasks with no conditional will be dropped - tasks_per_step = self.config._write_tasks_per_step( - tasks, - 'Compute/update_tasks_step0.yaml', - 0, - strict=True, - ) - - expected_tasks = [task for task in tasks - if task != {"name": "Task with no conditional"}] - self.assertEqual(expected_tasks, - tasks_per_step) - - # Some tasks will be filtered out for step 3. - tasks_per_step = self.config._write_tasks_per_step( - tasks, - 'Compute/update_tasks_step3.yaml', - 3 - ) - - self.assertEqual( - [ - { - "when": "step|int == 0 or step|int == 3", - "name": "Check with boolean expression" - }, - { - "when": "(step|int == 0 or step|int == 3) and other_cond", - "name": "Complex boolean expression" - }, - { - "name": "Task with no conditional" - } - ], - tasks_per_step) - - # Even more tasks will be filtered out for step 3 with strict. - tasks_per_step = self.config._write_tasks_per_step( - tasks, - 'Compute/update_tasks_step3.yaml', - 3, - strict=True, - ) - - self.assertEqual( - [ - { - "when": "step|int == 0 or step|int == 3", - "name": "Check with boolean expression" - }, - { - "when": "(step|int == 0 or step|int == 3) and other_cond", - "name": "Complex boolean expression" - }, - ], - tasks_per_step) diff --git a/tripleo_common/tests/utils/test_nodes.py b/tripleo_common/tests/utils/test_nodes.py deleted file mode 100644 index 93a01df03..000000000 --- a/tripleo_common/tests/utils/test_nodes.py +++ /dev/null @@ -1,1431 +0,0 @@ -# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. -# Copyright (c) 2021 Dell Inc. or its subsidiaries. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import collections -from unittest import mock - -from testtools import matchers - -from tripleo_common import exception -from tripleo_common.tests import base -from tripleo_common.utils import nodes - - -class DriverInfoTest(base.TestCase): - def setUp(self): - super(DriverInfoTest, self).setUp() - self.driver_info = nodes.DriverInfo( - 'foo', - mapping={ - 'pm_1': 'foo_1', - 'pm_2': 'foo_2' - }, - deprecated_mapping={ - 'pm_3': 'foo_3' - }) - - def test_convert_key(self): - self.assertEqual('foo_1', self.driver_info.convert_key('pm_1')) - self.assertEqual('foo_42', self.driver_info.convert_key('foo_42')) - self.assertIsNone(self.driver_info.convert_key('bar_baz')) - - @mock.patch.object(nodes.LOG, 'warning', autospec=True) - def test_convert_key_deprecated(self, mock_log): - self.assertEqual('foo_3', self.driver_info.convert_key('pm_3')) - self.assertTrue(mock_log.called) - - @mock.patch.object(nodes.LOG, 'warning', autospec=True) - def test_convert_key_pm_unsupported(self, mock_log): - self.assertIsNone(self.driver_info.convert_key('pm_42')) - self.assertTrue(mock_log.called) - - def test_convert(self): - result = self.driver_info.convert({'pm_1': 'val1', - 'foo_42': 42, - 'unknown': 'foo'}) - self.assertEqual({'foo_1': 'val1', 'foo_42': 42}, result) - - -class PrefixedDriverInfoTest(base.TestCase): - def setUp(self): - super(PrefixedDriverInfoTest, self).setUp() - self.driver_info = nodes.PrefixedDriverInfo( - 'foo', deprecated_mapping={'pm_d': 'foo_d'}) - - def test_convert_key(self): - keys = {'pm_addr': 'foo_address', - 'pm_user': 'foo_username', - 'pm_password': 'foo_password', - 'foo_something': 'foo_something', - 'pm_d': 'foo_d'} - for key, expected in keys.items(): - self.assertEqual(expected, self.driver_info.convert_key(key)) - - for key in ('unknown', 'pm_port'): - self.assertIsNone(self.driver_info.convert_key(key)) - - def test_unique_id_from_fields(self): - fields = {'pm_addr': 'localhost', - 'pm_user': 'user', - 'pm_password': '123456', - 'pm_port': 42} - self.assertEqual('localhost', - self.driver_info.unique_id_from_fields(fields)) - - def test_unique_id_from_node(self): - node = mock.Mock(driver_info={'foo_address': 'localhost', - 'foo_port': 42}) - self.assertEqual('localhost', - self.driver_info.unique_id_from_node(node)) - - -class PrefixedDriverInfoTestWithPort(base.TestCase): - def setUp(self): - super(PrefixedDriverInfoTestWithPort, self).setUp() - self.driver_info = nodes.PrefixedDriverInfo( - 'foo', deprecated_mapping={'pm_d': 'foo_d'}, - has_port=True) - - def test_convert_key_with_port(self): - keys = {'pm_addr': 'foo_address', - 'pm_user': 'foo_username', - 'pm_password': 'foo_password', - 'foo_something': 'foo_something', - 'pm_d': 'foo_d', - 'pm_port': 'foo_port'} - for key, expected in keys.items(): - self.assertEqual(expected, self.driver_info.convert_key(key)) - - self.assertIsNone(self.driver_info.convert_key('unknown')) - - def test_unique_id_from_fields(self): - fields = {'pm_addr': 'localhost', - 'pm_user': 'user', - 'pm_password': '123456', - 'pm_port': 42} - self.assertEqual('localhost:42', - self.driver_info.unique_id_from_fields(fields)) - - def test_unique_id_from_node(self): - node = mock.Mock(driver_info={'foo_address': 'localhost', - 'foo_port': 42}) - self.assertEqual('localhost:42', - self.driver_info.unique_id_from_node(node)) - - -class RedfishDriverInfoTest(base.TestCase): - driver_info = nodes.RedfishDriverInfo() - - def test_convert_key(self): - keys = {'pm_addr': 'redfish_address', - 'pm_user': 'redfish_username', - 'pm_password': 'redfish_password', - 'pm_system_id': 'redfish_system_id', - 'redfish_verify_ca': 'redfish_verify_ca'} - for key, expected in keys.items(): - self.assertEqual(expected, self.driver_info.convert_key(key)) - - self.assertIsNone(self.driver_info.convert_key('unknown')) - - def test_unique_id_from_fields(self): - for address in ['example.com', - 'http://example.com/', - 'https://example.com/']: - fields = {'pm_addr': address, - 'pm_user': 'user', - 'pm_password': '123456', - 'pm_system_id': '/redfish/v1/Systems/1'} - self.assertEqual('example.com/redfish/v1/Systems/1', - self.driver_info.unique_id_from_fields(fields)) - - def test_unique_id_from_node(self): - for address in ['example.com', - 'http://example.com/', - 'https://example.com/']: - node = mock.Mock(driver_info={ - 'redfish_address': address, - 'redfish_system_id': '/redfish/v1/Systems/1'}) - self.assertEqual('example.com/redfish/v1/Systems/1', - self.driver_info.unique_id_from_node(node)) - - -class oVirtDriverInfoTest(base.TestCase): - driver_info = nodes.oVirtDriverInfo() - - def test_convert_key(self): - keys = {'pm_addr': 'ovirt_address', - 'pm_user': 'ovirt_username', - 'pm_password': 'ovirt_password', - 'pm_vm_name': 'ovirt_vm_name', - 'ovirt_insecure': 'ovirt_insecure'} - for key, expected in keys.items(): - self.assertEqual(expected, self.driver_info.convert_key(key)) - - self.assertIsNone(self.driver_info.convert_key('unknown')) - - def test_unique_id_from_fields(self): - fields = {'pm_addr': 'http://127.0.0.1', - 'pm_user': 'user', - 'pm_password': '123456', - 'pm_vm_name': 'My VM'} - self.assertEqual('http://127.0.0.1:My VM', - self.driver_info.unique_id_from_fields(fields)) - - def test_unique_id_from_node(self): - node = mock.Mock(driver_info={ - 'ovirt_address': 'http://127.0.0.1', - 'ovirt_vm_name': 'My VM'}) - self.assertEqual('http://127.0.0.1:My VM', - self.driver_info.unique_id_from_node(node)) - - -class iBootDriverInfoTest(base.TestCase): - def setUp(self): - super(iBootDriverInfoTest, self).setUp() - self.driver_info = nodes.iBootDriverInfo() - - def test_unique_id_from_fields(self): - fields = {'pm_addr': 'localhost', - 'pm_user': 'user', - 'pm_password': '123456', - 'pm_port': 42, - 'iboot_relay_id': 'r1'} - self.assertEqual('localhost:42#r1', - self.driver_info.unique_id_from_fields(fields)) - - def test_unique_id_from_fields_no_relay(self): - fields = {'pm_addr': 'localhost', - 'pm_user': 'user', - 'pm_password': '123456', - 'pm_port': 42} - self.assertEqual('localhost:42', - self.driver_info.unique_id_from_fields(fields)) - - def test_unique_id_from_node(self): - node = mock.Mock(driver_info={'iboot_address': 'localhost', - 'iboot_port': 42, - 'iboot_relay_id': 'r1'}) - self.assertEqual('localhost:42#r1', - self.driver_info.unique_id_from_node(node)) - - def test_unique_id_from_node_no_relay(self): - node = mock.Mock(driver_info={'iboot_address': 'localhost', - 'iboot_port': 42}) - self.assertEqual('localhost:42', - self.driver_info.unique_id_from_node(node)) - - -class iDRACDriverInfoTest(base.TestCase): - def setUp(self): - super(iDRACDriverInfoTest, self).setUp() - self.driver_info = nodes.iDRACDriverInfo() - - def test_convert_key(self): - keys = {'pm_addr': 'drac_address', - 'pm_user': 'drac_username', - 'pm_password': 'drac_password', - 'pm_port': 'drac_port', - 'pm_system_id': 'redfish_system_id', - 'redfish_verify_ca': 'redfish_verify_ca' - } - for key, expected in keys.items(): - self.assertEqual(expected, self.driver_info.convert_key(key)) - - self.assertIsNone(self.driver_info.convert_key('unknown')) - - def test_convert(self): - for address in ['foo.bar', - 'http://foo.bar/', - 'https://foo.bar/', - 'https://foo.bar:8080/']: - fields = {'pm_addr': address, - 'pm_user': 'test', - 'pm_password': 'random', - 'redfish_system_id': '/redfish/v1/Systems/1', - 'pm_port': 6230} - result = self.driver_info.convert(fields) - self.assertEqual({'drac_password': 'random', - 'drac_address': 'foo.bar', - 'drac_username': 'test', - 'redfish_password': 'random', - 'redfish_address': address, - 'redfish_username': 'test', - 'redfish_system_id': '/redfish/v1/Systems/1', - 'drac_port': 6230}, result) - - def test_unique_id_from_fields(self): - mock_drac = mock.Mock( - wraps=self.driver_info._drac_driverinfo.unique_id_from_fields) - self.driver_info._drac_driverinfo.unique_id_from_fields = mock_drac - mock_redfish = mock.Mock( - wraps=self.driver_info._redfish_driverinfo.unique_id_from_fields) - self.driver_info._redfish_driverinfo.unique_id_from_fields = ( - mock_redfish) - - fields = {'pm_addr': 'foo.bar', - 'pm_user': 'test', - 'pm_password': 'random', - 'pm_port': 6230} - self.assertEqual('foo.bar:6230', - self.driver_info.unique_id_from_fields(fields)) - - mock_drac.assert_called_once_with(fields) - mock_redfish.assert_not_called() - - def test_unique_id_from_fields_with_https(self): - fields = {'pm_addr': 'https://foo.bar:8080/', - 'pm_user': 'test', - 'pm_password': 'random', - 'pm_port': 6230} - self.assertEqual('foo.bar:6230', - self.driver_info.unique_id_from_fields(fields)) - - def test_unique_id_from_node(self): - mock_drac = mock.Mock( - wraps=self.driver_info._drac_driverinfo.unique_id_from_node) - self.driver_info._drac_driverinfo.unique_id_from_node = mock_drac - mock_redfish = mock.Mock( - wraps=self.driver_info._redfish_driverinfo.unique_id_from_node) - self.driver_info._redfish_driverinfo.unique_id_from_node = mock_redfish - - node = mock.Mock(driver_info={'drac_address': 'foo.bar', - 'drac_port': 6230}) - self.assertEqual('foo.bar:6230', - self.driver_info.unique_id_from_node(node)) - - mock_drac.assert_called_once_with(node) - mock_redfish.assert_not_called() - - -class FindNodeHandlerTest(base.TestCase): - def test_found(self): - test = [('fake', 'fake'), - ('fake_pxe', 'fake'), - ('pxe_ipmitool', 'ipmi'), - ('ipmi', 'ipmi'), - ('pxe_ilo', 'ilo'), - ('ilo', 'ilo'), - ('pxe_drac', 'drac'), - ('idrac', 'drac'), - ('agent_irmc', 'irmc'), - ('irmc', 'irmc')] - for driver, prefix in test: - handler = nodes._find_node_handler({'pm_type': driver}) - self.assertEqual(prefix, handler._prefix) - - def test_no_driver(self): - self.assertRaises(exception.InvalidNode, - nodes._find_node_handler, {}) - - def test_unknown_driver(self): - self.assertRaises(exception.InvalidNode, - nodes._find_node_handler, {'pm_type': 'foobar'}) - self.assertRaises(exception.InvalidNode, - nodes._find_node_handler, {'pm_type': 'ipmi_foo'}) - - -class NodesTest(base.TestCase): - - def _get_node(self): - return {'cpu': '1', 'memory': '2048', 'disk': '30', 'arch': 'amd64', - 'ports': [{'address': 'aaa'}], 'pm_addr': 'foo.bar', - 'pm_user': 'test', 'pm_password': 'random', 'pm_type': 'ipmi', - 'name': 'node1', 'capabilities': 'num_nics:6'} - - def test_register_all_nodes_ironic_no_hw_stats(self): - node_list = [self._get_node()] - - # Remove the hardware stats from the node dictionary - node_list[0].pop("cpu") - node_list[0].pop("memory") - node_list[0].pop("disk") - node_list[0].pop("arch") - - # Node properties should be created with empty string values for the - # hardware statistics - node_properties = {"capabilities": "num_nics:6"} - - ironic = mock.MagicMock() - new_nodes = nodes.register_all_nodes(node_list, client=ironic) - self.assertEqual([ironic.node.create.return_value], new_nodes) - pxe_node_driver_info = {"ipmi_address": "foo.bar", - "ipmi_username": "test", - "ipmi_password": "random"} - pxe_node = mock.call(driver="ipmi", - name='node1', - driver_info=pxe_node_driver_info, - resource_class='baremetal', - properties=node_properties) - port_call = mock.call(node_uuid=ironic.node.create.return_value.uuid, - address='aaa', physical_network='ctlplane', - local_link_connection=None) - ironic.node.create.assert_has_calls([pxe_node, mock.ANY]) - ironic.port.create.assert_has_calls([port_call]) - - def test_register_all_nodes(self): - node_list = [self._get_node()] - node_list[0]['root_device'] = {"serial": "abcdef"} - node_properties = {"cpus": "1", - "memory_mb": "2048", - "local_gb": "30", - "cpu_arch": "amd64", - "capabilities": "num_nics:6", - "root_device": {"serial": "abcdef"}} - ironic = mock.MagicMock() - nodes.register_all_nodes(node_list, client=ironic) - pxe_node_driver_info = {"ipmi_address": "foo.bar", - "ipmi_username": "test", - "ipmi_password": "random"} - pxe_node = mock.call(driver="ipmi", - name='node1', - driver_info=pxe_node_driver_info, - resource_class='baremetal', - properties=node_properties) - port_call = mock.call(node_uuid=ironic.node.create.return_value.uuid, - address='aaa', physical_network='ctlplane', - local_link_connection=None) - ironic.node.create.assert_has_calls([pxe_node, mock.ANY]) - ironic.port.create.assert_has_calls([port_call]) - - def test_register_all_nodes_with_platform(self): - node_list = [self._get_node()] - node_list[0]['root_device'] = {"serial": "abcdef"} - node_properties = {"cpus": "1", - "memory_mb": "2048", - "local_gb": "30", - "cpu_arch": "amd64", - "capabilities": "num_nics:6", - "root_device": {"serial": "abcdef"}} - node_list[0].update({'platform': 'SNB'}) - node_extra = {"tripleo_platform": "SNB"} - ironic = mock.MagicMock() - nodes.register_all_nodes(node_list, client=ironic) - pxe_node_driver_info = {"ipmi_address": "foo.bar", - "ipmi_username": "test", - "ipmi_password": "random"} - pxe_node = mock.call(driver="ipmi", - name='node1', - driver_info=pxe_node_driver_info, - resource_class='baremetal', - properties=node_properties, - extra=node_extra) - port_call = mock.call(node_uuid=ironic.node.create.return_value.uuid, - address='aaa', physical_network='ctlplane', - local_link_connection=None) - ironic.node.create.assert_has_calls([pxe_node, mock.ANY]) - ironic.port.create.assert_has_calls([port_call]) - - def test_register_all_nodes_kernel_ramdisk(self): - node_list = [self._get_node()] - node_properties = {"cpus": "1", - "memory_mb": "2048", - "local_gb": "30", - "cpu_arch": "amd64", - "capabilities": "num_nics:6"} - ironic = mock.MagicMock() - nodes.register_all_nodes(node_list, client=ironic, - kernel_name='bm-kernel', - ramdisk_name='bm-ramdisk') - pxe_node_driver_info = {"ipmi_address": "foo.bar", - "ipmi_username": "test", - "ipmi_password": "random"} - pxe_node = mock.call(driver="ipmi", - name='node1', - driver_info=pxe_node_driver_info, - resource_class='baremetal', - properties=node_properties) - port_call = mock.call(node_uuid=ironic.node.create.return_value.uuid, - address='aaa', physical_network='ctlplane', - local_link_connection=None) - ironic.node.create.assert_has_calls([pxe_node, mock.ANY]) - ironic.port.create.assert_has_calls([port_call]) - - def test_register_all_nodes_uuid(self): - node_list = [self._get_node()] - node_list[0]['uuid'] = 'abcdef' - node_properties = {"cpus": "1", - "memory_mb": "2048", - "local_gb": "30", - "cpu_arch": "amd64", - "capabilities": "num_nics:6"} - ironic = mock.MagicMock() - nodes.register_all_nodes(node_list, client=ironic) - pxe_node_driver_info = {"ipmi_address": "foo.bar", - "ipmi_username": "test", - "ipmi_password": "random"} - pxe_node = mock.call(driver="ipmi", - name='node1', - driver_info=pxe_node_driver_info, - properties=node_properties, - resource_class='baremetal', - uuid="abcdef") - port_call = mock.call(node_uuid=ironic.node.create.return_value.uuid, - address='aaa', physical_network='ctlplane', - local_link_connection=None) - ironic.node.create.assert_has_calls([pxe_node, mock.ANY]) - ironic.port.create.assert_has_calls([port_call]) - - def test_register_all_nodes_caps_dict(self): - node_list = [self._get_node()] - node_list[0]['capabilities'] = { - 'num_nics': 7 - } - node_properties = {"cpus": "1", - "memory_mb": "2048", - "local_gb": "30", - "cpu_arch": "amd64", - "capabilities": "num_nics:7"} - ironic = mock.MagicMock() - nodes.register_all_nodes(node_list, client=ironic) - pxe_node_driver_info = {"ipmi_address": "foo.bar", - "ipmi_username": "test", - "ipmi_password": "random"} - pxe_node = mock.call(driver="ipmi", - name='node1', - driver_info=pxe_node_driver_info, - resource_class='baremetal', - properties=node_properties) - port_call = mock.call(node_uuid=ironic.node.create.return_value.uuid, - address='aaa', physical_network='ctlplane', - local_link_connection=None) - ironic.node.create.assert_has_calls([pxe_node, mock.ANY]) - ironic.port.create.assert_has_calls([port_call]) - - def test_register_all_nodes_with_profile(self): - node_list = [self._get_node()] - node_list[0]['root_device'] = {"serial": "abcdef"} - node_list[0]['profile'] = "compute" - node_properties = {"cpus": "1", - "memory_mb": "2048", - "local_gb": "30", - "cpu_arch": "amd64", - "capabilities": "num_nics:6,profile:compute", - "root_device": {"serial": "abcdef"}} - ironic = mock.MagicMock() - nodes.register_all_nodes(node_list, client=ironic) - pxe_node_driver_info = {"ipmi_address": "foo.bar", - "ipmi_username": "test", - "ipmi_password": "random"} - pxe_node = mock.call(driver="ipmi", - name='node1', - driver_info=pxe_node_driver_info, - resource_class='baremetal', - properties=node_properties) - port_call = mock.call(node_uuid=ironic.node.create.return_value.uuid, - address='aaa', physical_network='ctlplane', - local_link_connection=None) - ironic.node.create.assert_has_calls([pxe_node, mock.ANY]) - ironic.port.create.assert_has_calls([port_call]) - - def test_register_all_nodes_with_interfaces(self): - interfaces = {'boot_interface': 'pxe', - 'console_interface': 'ipmitool-socat', - 'deploy_interface': 'direct', - 'inspect_interface': 'inspector', - 'management_interface': 'ipmitool', - 'network_interface': 'neutron', - 'power_interface': 'ipmitool', - 'raid_interface': 'agent', - 'rescue_interface': 'agent', - 'storage_interface': 'cinder', - 'vendor_interface': 'ipmitool'} - - node_list = [self._get_node()] - node_list[0].update(interfaces) - node_properties = {"cpus": "1", - "memory_mb": "2048", - "local_gb": "30", - "cpu_arch": "amd64", - "capabilities": "num_nics:6"} - ironic = mock.MagicMock() - nodes.register_all_nodes(node_list, client=ironic) - pxe_node_driver_info = {"ipmi_address": "foo.bar", - "ipmi_username": "test", - "ipmi_password": "random"} - pxe_node = mock.call(driver="ipmi", - name='node1', - driver_info=pxe_node_driver_info, - properties=node_properties, - resource_class='baremetal', - **interfaces) - port_call = mock.call(node_uuid=ironic.node.create.return_value.uuid, - address='aaa', local_link_connection=None, - physical_network='ctlplane') - ironic.node.create.assert_has_calls([pxe_node, mock.ANY]) - ironic.port.create.assert_has_calls([port_call]) - - def test_register_update(self): - interfaces = {'boot_interface': 'pxe', - 'console_interface': 'ipmitool-socat', - 'deploy_interface': 'direct', - 'inspect_interface': 'inspector', - 'management_interface': 'ipmitool', - 'network_interface': 'neutron', - 'power_interface': 'ipmitool', - 'raid_interface': 'agent', - 'rescue_interface': 'agent', - 'storage_interface': 'cinder', - 'vendor_interface': 'ipmitool'} - - node = self._get_node() - node.update(interfaces) - node['root_device'] = {'serial': 'abcdef'} - ironic = mock.MagicMock() - node_map = {'mac': {'aaa': 1}} - - def side_effect(*args, **kwargs): - update_patch = [ - {'path': '/name', 'value': 'node1'}, - {'path': '/driver_info/ipmi_password', 'value': 'random'}, - {'path': '/driver_info/ipmi_address', 'value': 'foo.bar'}, - {'path': '/properties/memory_mb', 'value': '2048'}, - {'path': '/properties/local_gb', 'value': '30'}, - {'path': '/properties/cpu_arch', 'value': 'amd64'}, - {'path': '/properties/cpus', 'value': '1'}, - {'path': '/properties/capabilities', 'value': 'num_nics:6'}, - {'path': '/properties/root_device', - 'value': {'serial': 'abcdef'}}, - {'path': '/driver_info/ipmi_username', 'value': 'test'}] - for iface, value in interfaces.items(): - update_patch.append({'path': '/%s' % iface, 'value': value}) - for key in update_patch: - key['op'] = 'add' - self.assertThat(update_patch, - matchers.MatchesSetwise(*(map(matchers.Equals, - args[1])))) - return mock.Mock(uuid='uuid1') - - ironic.node.update.side_effect = side_effect - nodes._update_or_register_ironic_node(node, node_map, client=ironic) - ironic.node.update.assert_called_once_with(1, mock.ANY) - - def test_register_update_caps_dict(self): - interfaces = {'boot_interface': 'pxe', - 'console_interface': 'ipmitool-socat', - 'deploy_interface': 'direct', - 'inspect_interface': 'inspector', - 'management_interface': 'ipmitool', - 'network_interface': 'neutron', - 'power_interface': 'ipmitool', - 'raid_interface': 'agent', - 'rescue_interface': 'agent', - 'storage_interface': 'cinder', - 'vendor_interface': 'ipmitool'} - - node = self._get_node() - node.update(interfaces) - node['root_device'] = {'serial': 'abcdef'} - node['capabilities'] = {'profile': 'compute', 'num_nics': 6} - ironic = mock.MagicMock() - node_map = {'mac': {'aaa': 1}} - - def side_effect(*args, **kwargs): - update_patch = [ - {'path': '/name', 'value': 'node1'}, - {'path': '/driver_info/ipmi_password', 'value': 'random'}, - {'path': '/driver_info/ipmi_address', 'value': 'foo.bar'}, - {'path': '/properties/memory_mb', 'value': '2048'}, - {'path': '/properties/local_gb', 'value': '30'}, - {'path': '/properties/cpu_arch', 'value': 'amd64'}, - {'path': '/properties/cpus', 'value': '1'}, - {'path': '/properties/capabilities', - 'value': 'num_nics:6,profile:compute'}, - {'path': '/properties/root_device', - 'value': {'serial': 'abcdef'}}, - {'path': '/driver_info/ipmi_username', 'value': 'test'}] - for iface, value in interfaces.items(): - update_patch.append({'path': '/%s' % iface, 'value': value}) - for key in update_patch: - key['op'] = 'add' - self.assertThat(update_patch, - matchers.MatchesSetwise(*(map(matchers.Equals, - args[1])))) - return mock.Mock(uuid='uuid1') - - ironic.node.update.side_effect = side_effect - nodes._update_or_register_ironic_node(node, node_map, client=ironic) - ironic.node.update.assert_called_once_with(1, mock.ANY) - - def test_register_update_profile(self): - interfaces = {'boot_interface': 'pxe', - 'console_interface': 'ipmitool-socat', - 'deploy_interface': 'direct', - 'inspect_interface': 'inspector', - 'management_interface': 'ipmitool', - 'network_interface': 'neutron', - 'power_interface': 'ipmitool', - 'raid_interface': 'agent', - 'rescue_interface': 'agent', - 'storage_interface': 'cinder', - 'vendor_interface': 'ipmitool'} - - node = self._get_node() - node.update(interfaces) - node['root_device'] = {'serial': 'abcdef'} - node['profile'] = 'compute' - ironic = mock.MagicMock() - node_map = {'mac': {'aaa': 1}} - - def side_effect(*args, **kwargs): - update_patch = [ - {'path': '/name', 'value': 'node1'}, - {'path': '/driver_info/ipmi_password', 'value': 'random'}, - {'path': '/driver_info/ipmi_address', 'value': 'foo.bar'}, - {'path': '/properties/memory_mb', 'value': '2048'}, - {'path': '/properties/local_gb', 'value': '30'}, - {'path': '/properties/cpu_arch', 'value': 'amd64'}, - {'path': '/properties/cpus', 'value': '1'}, - {'path': '/properties/capabilities', - 'value': 'num_nics:6,profile:compute'}, - {'path': '/properties/root_device', - 'value': {'serial': 'abcdef'}}, - {'path': '/driver_info/ipmi_username', 'value': 'test'}] - for iface, value in interfaces.items(): - update_patch.append({'path': '/%s' % iface, 'value': value}) - for key in update_patch: - key['op'] = 'add' - self.assertThat(update_patch, - matchers.MatchesSetwise(*(map(matchers.Equals, - args[1])))) - return mock.Mock(uuid='uuid1') - - ironic.node.update.side_effect = side_effect - nodes._update_or_register_ironic_node(node, node_map, client=ironic) - ironic.node.update.assert_called_once_with(1, mock.ANY) - - def test_register_update_with_images(self): - node = self._get_node() - node['kernel_id'] = 'image-k' - node['ramdisk_id'] = 'image-r' - ironic = mock.MagicMock() - node_map = {'mac': {'aaa': 1}} - - def side_effect(*args, **kwargs): - update_patch = [ - {'path': '/name', 'value': 'node1'}, - {'path': '/driver_info/ipmi_password', 'value': 'random'}, - {'path': '/driver_info/ipmi_address', 'value': 'foo.bar'}, - {'path': '/properties/memory_mb', 'value': '2048'}, - {'path': '/properties/local_gb', 'value': '30'}, - {'path': '/properties/cpu_arch', 'value': 'amd64'}, - {'path': '/properties/cpus', 'value': '1'}, - {'path': '/properties/capabilities', 'value': 'num_nics:6'}, - {'path': '/driver_info/deploy_kernel', 'value': 'image-k'}, - {'path': '/driver_info/deploy_ramdisk', 'value': 'image-r'}, - {'path': '/driver_info/rescue_kernel', 'value': 'image-k'}, - {'path': '/driver_info/rescue_ramdisk', 'value': 'image-r'}, - {'path': '/driver_info/ipmi_username', 'value': 'test'}] - for key in update_patch: - key['op'] = 'add' - self.assertThat(update_patch, - matchers.MatchesSetwise(*(map(matchers.Equals, - args[1])))) - return mock.Mock(uuid='uuid1') - - ironic.node.update.side_effect = side_effect - nodes._update_or_register_ironic_node(node, node_map, client=ironic) - ironic.node.update.assert_called_once_with(1, mock.ANY) - - def test_register_update_with_interfaces(self): - node = self._get_node() - ironic = mock.MagicMock() - node_map = {'mac': {'aaa': 1}} - - def side_effect(*args, **kwargs): - update_patch = [ - {'path': '/name', 'value': 'node1'}, - {'path': '/driver_info/ipmi_password', 'value': 'random'}, - {'path': '/driver_info/ipmi_address', 'value': 'foo.bar'}, - {'path': '/properties/memory_mb', 'value': '2048'}, - {'path': '/properties/local_gb', 'value': '30'}, - {'path': '/properties/cpu_arch', 'value': 'amd64'}, - {'path': '/properties/cpus', 'value': '1'}, - {'path': '/properties/capabilities', 'value': 'num_nics:6'}, - {'path': '/driver_info/ipmi_username', 'value': 'test'}] - for key in update_patch: - key['op'] = 'add' - self.assertThat(update_patch, - matchers.MatchesSetwise(*(map(matchers.Equals, - args[1])))) - return mock.Mock(uuid='uuid1') - - ironic.node.update.side_effect = side_effect - nodes._update_or_register_ironic_node(node, node_map, client=ironic) - ironic.node.update.assert_called_once_with(1, mock.ANY) - - def _update_by_type(self, pm_type): - ironic = mock.MagicMock() - node_map = {'mac': {}, 'pm_addr': {}} - node = self._get_node() - node['pm_type'] = pm_type - node_map['pm_addr']['foo.bar'] = ironic.node.get.return_value.uuid - nodes._update_or_register_ironic_node(node, node_map, client=ironic) - ironic.node.update.assert_called_once_with( - ironic.node.get.return_value.uuid, mock.ANY) - - def test_update_node_ironic_ipmi(self): - self._update_by_type('ipmi') - - def test_update_node_ironic_pxe_ipmitool(self): - self._update_by_type('pxe_ipmitool') - - def test_update_node_ironic_idrac(self): - self._update_by_type('idrac') - - def test_update_node_ironic_pxe_drac(self): - self._update_by_type('pxe_drac') - - def test_update_node_ironic_ilo(self): - self._update_by_type('ilo') - - def test_update_node_ironic_pxe_ilo(self): - self._update_by_type('pxe_ilo') - - def test_update_node_ironic_irmc(self): - self._update_by_type('irmc') - - def test_update_node_ironic_pxe_irmc(self): - self._update_by_type('pxe_irmc') - - def test_update_node_ironic_xclarity(self): - self._update_by_type('xclarity') - - def test_update_node_ironic_redfish(self): - ironic = mock.MagicMock() - node_map = {'mac': {}, 'pm_addr': {}} - node = self._get_node() - node.update({'pm_type': 'redfish', - 'pm_system_id': '/path'}) - node_map['pm_addr']['foo.bar/path'] = ironic.node.get.return_value.uuid - nodes._update_or_register_ironic_node(node, node_map, client=ironic) - ironic.node.update.assert_called_once_with( - ironic.node.get.return_value.uuid, mock.ANY) - - def test_update_node_ironic_ovirt(self): - ironic = mock.MagicMock() - node_map = {'mac': {}, 'pm_addr': {}} - node = self._get_node() - node.update({'pm_type': 'staging-ovirt', - 'pm_vm_name': 'VM1'}) - node_map['pm_addr']['foo.bar:VM1'] = ironic.node.get.return_value.uuid - nodes._update_or_register_ironic_node(node, node_map, client=ironic) - ironic.node.update.assert_called_once_with( - ironic.node.get.return_value.uuid, mock.ANY) - - def test_register_node_update(self): - node = self._get_node() - node['ports'][0]['address'] = node['ports'][0]['address'].upper() - ironic = mock.MagicMock() - node_map = {'mac': {'aaa': 1}} - - def side_effect(*args, **kwargs): - update_patch = [ - {'path': '/name', 'value': 'node1'}, - {'path': '/driver_info/ipmi_password', 'value': 'random'}, - {'path': '/driver_info/ipmi_address', 'value': 'foo.bar'}, - {'path': '/properties/memory_mb', 'value': '2048'}, - {'path': '/properties/local_gb', 'value': '30'}, - {'path': '/properties/cpu_arch', 'value': 'amd64'}, - {'path': '/properties/cpus', 'value': '1'}, - {'path': '/properties/capabilities', 'value': 'num_nics:6'}, - {'path': '/driver_info/ipmi_username', 'value': 'test'}] - for key in update_patch: - key['op'] = 'add' - self.assertThat(update_patch, - matchers.MatchesSetwise(*(map(matchers.Equals, - args[1])))) - return mock.Mock(uuid='uuid1') - - ironic.node.update.side_effect = side_effect - nodes._update_or_register_ironic_node(node, node_map, client=ironic) - ironic.node.update.assert_called_once_with(1, mock.ANY) - - def test_register_node_update_with_uuid(self): - node = self._get_node() - node['uuid'] = 'abcdef' - ironic = mock.MagicMock() - node_map = {'uuids': {'abcdef'}} - - def side_effect(*args, **kwargs): - update_patch = [ - {'path': '/name', 'value': 'node1'}, - {'path': '/driver_info/ipmi_password', 'value': 'random'}, - {'path': '/driver_info/ipmi_address', 'value': 'foo.bar'}, - {'path': '/properties/memory_mb', 'value': '2048'}, - {'path': '/properties/local_gb', 'value': '30'}, - {'path': '/properties/cpu_arch', 'value': 'amd64'}, - {'path': '/properties/cpus', 'value': '1'}, - {'path': '/properties/capabilities', 'value': 'num_nics:6'}, - {'path': '/driver_info/ipmi_username', 'value': 'test'}] - for key in update_patch: - key['op'] = 'add' - self.assertThat(update_patch, - matchers.MatchesSetwise(*(map(matchers.Equals, - args[1])))) - return mock.Mock(uuid='abcdef') - - ironic.node.update.side_effect = side_effect - nodes._update_or_register_ironic_node(node, node_map, client=ironic) - ironic.node.update.assert_called_once_with('abcdef', mock.ANY) - - def test_register_ironic_node_fake_pxe(self): - node_properties = {"cpus": "1", - "memory_mb": "2048", - "local_gb": "30", - "cpu_arch": "amd64", - "capabilities": "num_nics:6"} - node = self._get_node() - for v in ('pm_addr', 'pm_user', 'pm_password'): - del node[v] - node['pm_type'] = 'fake_pxe' - client = mock.MagicMock() - nodes.register_ironic_node(node, client=client) - client.node.create.assert_called_once_with(driver='manual-management', - name='node1', - properties=node_properties, - resource_class='baremetal', - driver_info={}) - - def test_register_ironic_node_conductor_group(self): - node_properties = {"cpus": "1", - "memory_mb": "2048", - "local_gb": "30", - "cpu_arch": "amd64", - "capabilities": "num_nics:6"} - node = self._get_node() - node['conductor_group'] = 'cg1' - client = mock.MagicMock() - nodes.register_ironic_node(node, client=client) - client.node.create.assert_called_once_with( - driver='ipmi', name='node1', - properties=node_properties, - resource_class='baremetal', - driver_info={'ipmi_password': 'random', 'ipmi_address': 'foo.bar', - 'ipmi_username': 'test'}, - conductor_group='cg1') - - def test_register_ironic_node_ipmi(self): - node_properties = {"cpus": "1", - "memory_mb": "2048", - "local_gb": "30", - "cpu_arch": "amd64", - "capabilities": "num_nics:6"} - node = self._get_node() - node['pm_type'] = 'ipmi' - node['pm_port'] = '6230' - client = mock.MagicMock() - nodes.register_ironic_node(node, client=client) - client.node.create.assert_called_once_with( - driver='ipmi', name='node1', properties=node_properties, - resource_class='baremetal', - driver_info={'ipmi_password': 'random', 'ipmi_address': 'foo.bar', - 'ipmi_username': 'test', 'ipmi_port': '6230'}) - - def test_register_ironic_node_pxe_ipmitool(self): - node_properties = {"cpus": "1", - "memory_mb": "2048", - "local_gb": "30", - "cpu_arch": "amd64", - "capabilities": "num_nics:6"} - node = self._get_node() - node['pm_type'] = 'pxe_ipmitool' - node['pm_port'] = '6230' - client = mock.MagicMock() - nodes.register_ironic_node(node, client=client) - client.node.create.assert_called_once_with( - driver='ipmi', name='node1', properties=node_properties, - resource_class='baremetal', - driver_info={'ipmi_password': 'random', 'ipmi_address': 'foo.bar', - 'ipmi_username': 'test', 'ipmi_port': '6230'}) - - def test_register_ironic_node_idrac(self): - node_properties = {"cpus": "1", - "memory_mb": "2048", - "local_gb": "30", - "cpu_arch": "amd64", - "capabilities": "num_nics:6"} - node = self._get_node() - node['pm_type'] = 'idrac' - node['pm_system_id'] = '/redfish/v1/Systems/1' - node['pm_port'] = '6230' - client = mock.MagicMock() - nodes.register_ironic_node(node, client=client) - client.node.create.assert_called_once_with( - driver='idrac', name='node1', properties=node_properties, - resource_class='baremetal', - driver_info={'drac_password': 'random', 'drac_address': 'foo.bar', - 'drac_username': 'test', 'redfish_password': 'random', - 'redfish_address': 'foo.bar', - 'redfish_username': 'test', - 'redfish_system_id': '/redfish/v1/Systems/1', - 'drac_port': '6230'}) - - def test_register_ironic_node_ilo(self): - node_properties = {"cpus": "1", - "memory_mb": "2048", - "local_gb": "30", - "cpu_arch": "amd64", - "capabilities": "num_nics:6"} - node = self._get_node() - node['pm_type'] = 'ilo' - node['pm_port'] = '1234' - client = mock.MagicMock() - nodes.register_ironic_node(node, client=client) - client.node.create.assert_called_once_with( - driver='ilo', name='node1', properties=node_properties, - resource_class='baremetal', - driver_info={'ilo_password': 'random', 'ilo_address': 'foo.bar', - 'ilo_username': 'test', 'ilo_port': '1234'}) - - def test_register_ironic_node_pxe_drac(self): - node_properties = {"cpus": "1", - "memory_mb": "2048", - "local_gb": "30", - "cpu_arch": "amd64", - "capabilities": "num_nics:6"} - node = self._get_node() - node['pm_type'] = 'pxe_drac' - node['pm_port'] = '6230' - client = mock.MagicMock() - nodes.register_ironic_node(node, client=client) - client.node.create.assert_called_once_with( - driver='idrac', name='node1', properties=node_properties, - resource_class='baremetal', - driver_info={'drac_password': 'random', 'drac_address': 'foo.bar', - 'drac_username': 'test', 'drac_port': '6230'}) - - def test_register_ironic_node_pxe_ilo(self): - node_properties = {"cpus": "1", - "memory_mb": "2048", - "local_gb": "30", - "cpu_arch": "amd64", - "capabilities": "num_nics:6"} - node = self._get_node() - node['pm_type'] = 'pxe_ilo' - node['pm_port'] = '1234' - client = mock.MagicMock() - nodes.register_ironic_node(node, client=client) - client.node.create.assert_called_once_with( - driver='ilo', name='node1', properties=node_properties, - resource_class='baremetal', - driver_info={'ilo_password': 'random', 'ilo_address': 'foo.bar', - 'ilo_username': 'test', 'ilo_port': '1234'}) - - def test_register_ironic_node_redfish(self): - node_properties = {"cpus": "1", - "memory_mb": "2048", - "local_gb": "30", - "cpu_arch": "amd64", - "capabilities": "num_nics:6"} - node = self._get_node() - node['pm_type'] = 'redfish' - node['pm_system_id'] = '/redfish/v1/Systems/1' - client = mock.MagicMock() - nodes.register_ironic_node(node, client=client) - client.node.create.assert_called_once_with( - driver='redfish', name='node1', properties=node_properties, - resource_class='baremetal', - driver_info={'redfish_password': 'random', - 'redfish_address': 'foo.bar', - 'redfish_username': 'test', - 'redfish_system_id': '/redfish/v1/Systems/1'}) - - def test_register_ironic_node_redfish_without_credentials(self): - node_properties = {"cpus": "1", - "memory_mb": "2048", - "local_gb": "30", - "cpu_arch": "amd64", - "capabilities": "num_nics:6"} - node = self._get_node() - node['pm_type'] = 'redfish' - node['pm_system_id'] = '/redfish/v1/Systems/1' - del node['pm_user'] - del node['pm_password'] - client = mock.MagicMock() - nodes.register_ironic_node(node, client=client) - client.node.create.assert_called_once_with( - driver='redfish', name='node1', properties=node_properties, - resource_class='baremetal', - driver_info={'redfish_address': 'foo.bar', - 'redfish_system_id': '/redfish/v1/Systems/1'}) - - def test_register_ironic_node_with_physical_network(self): - node = self._get_node() - node['ports'] = [{'physical_network': 'subnet1', 'address': 'aaa'}] - ironic = mock.MagicMock() - nodes.register_ironic_node(node, client=ironic) - port_call = mock.call(node_uuid=ironic.node.create.return_value.uuid, - address='aaa', physical_network='subnet1', - local_link_connection=None) - ironic.port.create.assert_has_calls([port_call]) - - def test_register_ironic_node_with_local_link_connection(self): - node = self._get_node() - node['ports'] = [ - { - 'local_link_connection': { - "switch_info": "switch", - "port_id": "port1", - "switch_id": "bbb" - }, - 'physical_network': 'subnet1', - 'address': 'aaa' - } - ] - ironic = mock.MagicMock() - nodes.register_ironic_node(node, client=ironic) - port_call = mock.call(node_uuid=ironic.node.create.return_value.uuid, - address='aaa', physical_network='subnet1', - local_link_connection={"switch_info": "switch", - "port_id": "port1", - "switch_id": "bbb"}) - ironic.port.create.assert_has_calls([port_call]) - - def test_clean_up_extra_nodes_ironic(self): - node = collections.namedtuple('node', ['uuid']) - client = mock.MagicMock() - client.node.list.return_value = [node('foobar')] - seen = [node('abcd')] - nodes._clean_up_extra_nodes(seen, client, remove=True) - client.node.delete.assert_called_once_with('foobar') - - def test__get_node_id_manual_management(self): - node = self._get_node() - node['pm_type'] = 'manual-management' - handler = nodes.find_driver_handler('manual-management') - node_map = {'mac': {'aaa': 'abcdef'}, 'pm_addr': {}} - self.assertEqual('abcdef', nodes._get_node_id(node, handler, node_map)) - - def test__get_node_id_conflict(self): - node = self._get_node() - handler = nodes.find_driver_handler('ipmi') - node_map = {'mac': {'aaa': 'abcdef'}, - 'pm_addr': {'foo.bar': 'defabc'}} - self.assertRaises(exception.InvalidNode, - nodes._get_node_id, - node, handler, node_map) - - def test_get_node_id_valid_duplicate(self): - node = self._get_node() - handler = nodes.find_driver_handler('ipmi') - node_map = {'mac': {'aaa': 'id'}, - 'pm_addr': {'foo.bar': 'id'}} - self.assertEqual('id', nodes._get_node_id(node, handler, node_map)) - - def test_register_ironic_node_xclarity(self): - node_properties = {"cpus": "1", - "memory_mb": "2048", - "local_gb": "30", - "cpu_arch": "amd64", - "capabilities": "num_nics:6"} - node = self._get_node() - node['pm_type'] = 'xclarity' - node['pm_port'] = '4444' - client = mock.MagicMock() - nodes.register_ironic_node(node, client=client) - client.node.create.assert_called_once_with( - driver='xclarity', name='node1', properties=node_properties, - resource_class='baremetal', - driver_info={'xclarity_password': 'random', - 'xclarity_address': 'foo.bar', - 'xclarity_username': 'test', - 'xclarity_port': '4444'}) - - -class TestPopulateNodeMapping(base.TestCase): - def test_populate_node_mapping_ironic(self): - client = mock.MagicMock() - ironic_node = collections.namedtuple('node', ['uuid', 'driver', - 'driver_info']) - ironic_port = collections.namedtuple('port', ['address']) - node1 = ironic_node('abcdef', 'redfish', {}) - node2 = ironic_node('fedcba', 'pxe_ipmitool', - {'ipmi_address': '10.0.1.2'}) - node3 = ironic_node('xyz', 'ipmi', {'ipmi_address': '10.0.1.3'}) - client.node.list_ports.side_effect = ([ironic_port('aaa')], [], []) - client.node.list.return_value = [node1, node2, node3] - expected = {'mac': {'aaa': 'abcdef'}, - 'pm_addr': {'10.0.1.2': 'fedcba', '10.0.1.3': 'xyz'}, - 'uuids': {'abcdef', 'fedcba', 'xyz'}} - self.assertEqual(expected, nodes._populate_node_mapping(client)) - - def test_populate_node_mapping_ironic_manual_management(self): - client = mock.MagicMock() - ironic_node = collections.namedtuple('node', ['uuid', 'driver', - 'driver_info']) - ironic_port = collections.namedtuple('port', ['address']) - node = ironic_node('abcdef', 'manual-management', None) - client.node.list_ports.return_value = [ironic_port('aaa')] - client.node.list.return_value = [node] - expected = {'mac': {'aaa': 'abcdef'}, 'pm_addr': {}, - 'uuids': {'abcdef'}} - self.assertEqual(expected, nodes._populate_node_mapping(client)) - - -VALID_NODE_JSON = [ - {'_comment': 'This is a comment', - 'pm_type': 'pxe_ipmitool', - 'pm_addr': '192.168.0.1', - 'pm_user': 'root', - 'pm_password': 'p@$$w0rd'}, - {'pm_type': 'ipmi', - 'pm_addr': '192.168.1.1', - 'pm_user': 'root', - 'pm_password': 'p@$$w0rd'}, - {'pm_type': 'pxe_ipmitool', - 'pm_addr': '192.168.0.1', - 'pm_user': 'root', - 'pm_password': 'p@$$w0rd', - 'pm_port': 1234, - 'ipmi_priv_level': 'USER', - 'ports': [ - {'address': 'aa:bb:cc:dd:ee:ff'}, - {'address': '11:22:33:44:55:66'} - ], - 'name': 'foobar1', - 'capabilities': {'foo': 'bar'}, - 'kernel_id': 'kernel1', - 'ramdisk_id': 'ramdisk1'}, - {'pm_type': 'ipmi', - 'pm_addr': '192.168.1.1', - 'pm_user': 'root', - 'pm_password': 'p@$$w0rd', - 'pm_port': 1234, - 'ipmi_priv_level': 'USER', - 'ports': [ - {'address': 'dd:ee:ff:aa:bb:cc'}, - {'address': '44:55:66:11:22:33'} - ], - 'name': 'foobar2', - 'capabilities': {'foo': 'bar'}, - 'kernel_id': 'kernel1', - 'ramdisk_id': 'ramdisk1'}, - {'pm_type': 'idrac', - 'pm_addr': '1.2.3.4', - 'pm_user': 'root', - 'pm_password': 'p@$$w0rd', - 'ports': [ - {'address': '22:22:22:22:22:22'} - ], - 'capabilities': 'foo:bar,foo1:bar1', - 'cpu': 2, - 'memory': 1024, - 'disk': 40, - 'arch': 'x86_64', - 'root_device': {'foo': 'bar'}}, - {'pm_type': 'redfish', - 'pm_addr': '1.2.3.4', - 'pm_user': 'root', - 'pm_password': 'foobar', - 'pm_system_id': '/redfish/v1/Systems/1'}, - {'pm_type': 'ipmi', - 'pm_addr': '1.1.1.1', - 'pm_user': 'root', - 'pm_password': 'p@$$w0rd', - 'arch': 'x86_64', - 'platform': 'SNB'}, -] - - -class TestValidateNodes(base.TestCase): - def test_valid(self): - nodes.validate_nodes(VALID_NODE_JSON) - - def test_unknown_driver(self): - nodes_json = [ - {'pm_type': 'pxe_foobar', - 'pm_addr': '1.1.1.1', - 'pm_user': 'root', - 'pm_password': 'p@$$w0rd'}, - ] - self.assertRaisesRegex(exception.InvalidNode, - 'unknown pm_type .* pxe_foobar', - nodes.validate_nodes, nodes_json) - - def test_duplicate_ipmi_address(self): - nodes_json = [ - {'pm_type': 'pxe_ipmitool', - 'pm_addr': '1.1.1.1', - 'pm_user': 'root', - 'pm_password': 'p@$$w0rd'}, - {'pm_type': 'ipmi', - 'pm_addr': '1.1.1.1', - 'pm_user': 'user', - 'pm_password': 'p@$$w0rd'}, - ] - self.assertRaisesRegex(exception.InvalidNode, - 'Node identified by 1.1.1.1 is already present', - nodes.validate_nodes, nodes_json) - - def test_invalid_mac(self): - nodes_json = [ - {'pm_type': 'pxe_ipmitool', - 'pm_addr': '1.1.1.1', - 'pm_user': 'root', - 'pm_password': 'p@$$w0rd', - 'ports': [ - {'address': '42'}] - }, - ] - self.assertRaisesRegex(exception.InvalidNode, - 'MAC address 42 is invalid', - nodes.validate_nodes, nodes_json) - - def test_duplicate_mac(self): - nodes_json = [ - {'pm_type': 'pxe_ipmitool', - 'pm_addr': '1.1.1.1', - 'pm_user': 'root', - 'pm_password': 'p@$$w0rd', - 'ports': [ - {'address': '11:22:33:44:55:66'} - ]}, - {'pm_type': 'ipmi', - 'pm_addr': '1.2.1.1', - 'pm_user': 'user', - 'pm_password': 'p@$$w0rd', - 'ports': [ - {'address': '11:22:33:44:55:66'} - ]}, - ] - self.assertRaisesRegex(exception.InvalidNode, - 'MAC 11:22:33:44:55:66 is not unique', - nodes.validate_nodes, nodes_json) - - def test_duplicate_names(self): - nodes_json = [ - {'pm_type': 'pxe_ipmitool', - 'pm_addr': '1.1.1.1', - 'pm_user': 'root', - 'pm_password': 'p@$$w0rd', - 'name': 'name'}, - {'pm_type': 'pxe_ipmitool', - 'pm_addr': '1.2.1.2', - 'pm_user': 'user', - 'pm_password': 'p@$$w0rd', - 'name': 'name'}, - ] - self.assertRaisesRegex(exception.InvalidNode, - 'Name "name" is not unique', - nodes.validate_nodes, nodes_json) - - def test_invalid_capability(self): - nodes_json = [ - {'pm_type': 'pxe_ipmitool', - 'pm_addr': '1.1.1.1', - 'pm_user': 'root', - 'pm_password': 'p@$$w0rd', - 'capabilities': '42'}, - ] - self.assertRaisesRegex(exception.InvalidNode, - 'Invalid capabilities: 42', - nodes.validate_nodes, nodes_json) - - def test_unexpected_fields(self): - nodes_json = [ - {'pm_type': 'pxe_ipmitool', - 'pm_addr': '1.1.1.1', - 'pm_user': 'root', - 'pm_password': 'p@$$w0rd', - 'pm_foobar': '42'}, - ] - self.assertRaisesRegex(exception.InvalidNode, - 'Unknown field pm_foobar', - nodes.validate_nodes, nodes_json) - - def test_missing_fields(self): - for field in ('pm_addr', 'pm_user', 'pm_password'): - # NOTE(tonyb): We can't use ipmi here as it's fine with some of - # these fields being missing. - nodes_json = [ - {'pm_type': 'pxe_drac', - 'pm_addr': '1.1.1.1', - 'pm_user': 'root', - 'pm_password': 'p@$$w0rd'}, - ] - del nodes_json[0][field] - - self.assertRaisesRegex(exception.InvalidNode, - 'fields are missing: %s' % field, - nodes.validate_nodes, nodes_json) - - def test_missing_arch_with_platform_fail(self): - nodes_json = [ - {'pm_type': 'ipmi', - 'pm_addr': '1.1.1.1', - 'pm_user': 'root', - 'pm_password': 'p@$$w0rd', - 'platform': 'SNB'}, - ] - - msg = 'You have specified a platform without an architecture' - self.assertRaisesRegex(exception.InvalidNode, - msg, - nodes.validate_nodes, nodes_json) - - def test_ipmi_missing_user_ok(self): - nodes_json = [ - {'pm_type': 'ipmi', - 'pm_addr': '1.1.1.1', - 'pm_password': 'p@$$w0rd'}, - ] - - # validate_nodes() doesn't have an explicit return which means python - # gives us None - self.assertEqual(None, nodes.validate_nodes(nodes_json)) - - def test_duplicate_redfish_node(self): - nodes_json = [ - {'pm_type': 'redfish', - 'pm_addr': 'example.com', - 'pm_user': 'root', - 'pm_password': 'p@$$w0rd', - 'pm_system_id': '/redfish/v1/Systems/1'}, - {'pm_type': 'redfish', - 'pm_addr': 'https://example.com', - 'pm_user': 'root', - 'pm_password': 'p@$$w0rd', - 'pm_system_id': '/redfish/v1/Systems/1'}, - ] - self.assertRaisesRegex( - exception.InvalidNode, - 'Node identified by example.com/redfish/v1/Systems/1 ' - 'is already present', - nodes.validate_nodes, nodes_json) - - def test_redfish_missing_system_id(self): - nodes_json = [ - {'pm_type': 'redfish', - 'pm_addr': '1.1.1.1', - 'pm_user': 'root', - 'pm_password': 'p@$$w0rd'}, - ] - - self.assertRaisesRegex(exception.InvalidNode, - 'fields are missing: pm_system_id', - nodes.validate_nodes, nodes_json) - - def test_invalid_root_device(self): - nodes_json = [ - {'pm_type': 'pxe_ipmitool', - 'pm_addr': '1.1.1.1', - 'pm_user': 'root', - 'pm_password': 'p@$$w0rd', - 'root_device': 42} - ] - self.assertRaisesRegex(exception.InvalidNode, - 'Invalid root device', - nodes.validate_nodes, nodes_json) diff --git a/tripleo_common/tests/utils/test_overcloudrc.py b/tripleo_common/tests/utils/test_overcloudrc.py deleted file mode 100644 index e13df1c8e..000000000 --- a/tripleo_common/tests/utils/test_overcloudrc.py +++ /dev/null @@ -1,84 +0,0 @@ -# Copyright 2016 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from unittest import mock - -from tripleo_common.tests import base -from tripleo_common.utils import overcloudrc - - -class OvercloudRcTest(base.TestCase): - - def test_generate_overcloudrc(self): - - stack = mock.MagicMock() - stack.stack_name = 'overcast' - stack.to_dict.return_value = { - "outputs": [ - {'output_key': 'KeystoneURL', - 'output_value': 'http://foo.com:8000/'}, - {'output_key': 'EndpointMap', - 'output_value': {'KeystoneAdmin': {'host': 'fd00::1'}}}, - ] - } - - result = overcloudrc._create_overcloudrc(stack, "foo", "AdminPassword", - "regionTwo") - - self.assertIn("export no_proxy='[fd00::1],foo,foo.com'", - result['overcloudrc']) - self.assertIn("OS_PASSWORD=AdminPassword", result['overcloudrc']) - - self.assertIn("export PYTHONWARNINGS='ignore:Certificate", - result['overcloudrc']) - self.assertIn("OS_IDENTITY_API_VERSION=3", result['overcloudrc']) - self.assertIn(overcloudrc.CLOUDPROMPT, result['overcloudrc']) - self.assertIn("OS_AUTH_TYPE=password", result['overcloudrc']) - self.assertIn("OS_AUTH_URL=http://foo.com:8000/", - result['overcloudrc']) - self.assertIn("OS_REGION_NAME=regionTwo", - result['overcloudrc']) - self.assertIn("OS_CLOUD=overcast", result['overcloudrc']) - - def test_generate_overcloudrc_with_duplicated_no_proxy(self): - - stack = mock.MagicMock() - stack.stack_name = 'overcast' - stack.to_dict.return_value = { - "outputs": [ - {'output_key': 'KeystoneURL', - 'output_value': 'http://foo.com:8000/'}, - {'output_key': 'EndpointMap', - 'output_value': {'KeystoneAdmin': {'host': 'fd00::1'}}}, - ] - } - - result = overcloudrc._create_overcloudrc( - stack, "foo,foo.com", "AdminPassword", "regionTwo") - - self.assertIn("export no_proxy='[fd00::1],foo,foo.com'", - result['overcloudrc']) - self.assertIn("OS_PASSWORD=AdminPassword", result['overcloudrc']) - - self.assertIn("export PYTHONWARNINGS='ignore:Certificate", - result['overcloudrc']) - self.assertIn("OS_IDENTITY_API_VERSION=3", result['overcloudrc']) - self.assertIn(overcloudrc.CLOUDPROMPT, result['overcloudrc']) - self.assertIn("OS_AUTH_TYPE=password", result['overcloudrc']) - self.assertIn("OS_AUTH_URL=http://foo.com:8000/", - result['overcloudrc']) - self.assertIn("OS_REGION_NAME=regionTwo", - result['overcloudrc']) - self.assertIn("OS_CLOUD=overcast", result['overcloudrc']) diff --git a/tripleo_common/tests/utils/test_parameters.py b/tripleo_common/tests/utils/test_parameters.py deleted file mode 100644 index 488ae0830..000000000 --- a/tripleo_common/tests/utils/test_parameters.py +++ /dev/null @@ -1,142 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from unittest import mock - -from tripleo_common.tests import base -from tripleo_common.utils import parameters - - -class ParametersTest(base.TestCase): - - @mock.patch('tripleo_common.utils.parameters.get_node_count') - @mock.patch('tripleo_common.utils.parameters.get_flavor') - def test_set_count_and_flavor_params_for_controller(self, - mock_get_flavor, - mock_get_node_count): - mock_get_node_count.return_value = 1 - mock_get_flavor.return_value = 'control' - expected = { - 'ControllerCount': 1, - 'OvercloudControlFlavor': 'control' - } - params = parameters.set_count_and_flavor_params('control', 1, 1) - self.assertEqual(expected, params) - - @mock.patch('tripleo_common.utils.parameters.get_node_count') - @mock.patch('tripleo_common.utils.parameters.get_flavor') - def test_set_count_and_flavor_params_for_swift(self, - mock_get_flavor, - mock_get_node_count): - mock_get_node_count.return_value = 1 - mock_get_flavor.return_value = 'swift-storage' - expected = { - 'ObjectStorageCount': 1, - 'OvercloudSwiftStorageFlavor': 'swift-storage' - } - params = parameters.set_count_and_flavor_params('object-storage', 1, 1) - self.assertEqual(expected, params) - - @mock.patch('tripleo_common.utils.parameters.get_node_count') - @mock.patch('tripleo_common.utils.parameters.get_flavor') - def test_set_count_and_flavor_params_for_role(self, - mock_get_flavor, - mock_get_node_count): - mock_get_node_count.return_value = 1 - mock_get_flavor.return_value = 'ceph-storage' - expected = { - 'CephStorageCount': 1, - 'OvercloudCephStorageFlavor': 'ceph-storage' - } - params = parameters.set_count_and_flavor_params('ceph-storage', 1, 1) - self.assertEqual(expected, params) - - @mock.patch('tripleo_common.utils.parameters.get_node_count') - @mock.patch('tripleo_common.utils.parameters.get_flavor') - def test_set_count_and_flavor_params_for_custom_role(self, - mock_get_flavor, - mock_get_node_count): - mock_get_node_count.return_value = 1 - mock_get_flavor.return_value = 'custom-role' - expected = { - 'MyCustomRoleCount': 1, - 'OvercloudMyCustomRoleFlavor': 'custom-role' - } - params = parameters.set_count_and_flavor_params('my-custom-role', 1, 1) - self.assertEqual(expected, params) - - def test_swift_flavor_detected(self): - compute_client = mock.MagicMock() - - # Mock for a compute_client.flavors.list result item and - # compute_client.flavors.get - flavor = mock.MagicMock() - flavor.id = 1 - flavor.name = 'swift-storage' - - # Mock result of .get_keys() - flavor_keys = mock.MagicMock() - flavor_keys.get.side_effect = ('swift-storage', ) - - # Connecting the mock instances... - flavor.get_keys.side_effect = (flavor_keys, ) - compute_client.flavors.list.side_effect = ((flavor, ),) - compute_client.flavors.get.side_effect = (flavor, ) - - # Calling `get_flavor` with an 'object-storage' role should return - # the 'swift-storage' flavor. - self.assertEqual(parameters.get_flavor('object-storage', - compute_client), - 'swift-storage') - - def test_compute_flavor_detected(self): - compute_client = mock.MagicMock() - - # Mock for a compute_client.flavors.list result item and - # compute_client.flavors.get - flavor = mock.MagicMock() - flavor.id = 1 - flavor.name = 'compute' - - # Mock result of .get_keys() - flavor_keys = mock.MagicMock() - flavor_keys.get.side_effect = ('compute', ) - - # Connecting the mock instances... - flavor.get_keys.side_effect = (flavor_keys, ) - compute_client.flavors.list.side_effect = ((flavor, ),) - compute_client.flavors.get.side_effect = (flavor, ) - - # Calling `get_flavor` with a 'compute' role should return - # the 'compute' flavor. - self.assertEqual(parameters.get_flavor('compute', compute_client), - 'compute') - - def test_convert_docker_params(self): - - env = { - 'parameter_defaults': { - 'DockerFooImage': 'bar', - 'DockerNoOverwriteImage': 'zzzz', - 'ContainerNoOverwriteImage': 'boom', - 'ContainerNoChangeImage': 'bar', - 'DockerNoChangeImage': 'bar', - } - } - - parameters.convert_docker_params(env) - pd = env.get('parameter_defaults', {}) - self.assertEqual(pd['ContainerFooImage'], 'bar') - self.assertEqual(pd['ContainerNoOverwriteImage'], 'boom') - self.assertEqual(pd['ContainerNoChangeImage'], 'bar') - self.assertEqual(pd['DockerNoChangeImage'], 'bar') diff --git a/tripleo_common/tests/utils/test_passwords.py b/tripleo_common/tests/utils/test_passwords.py deleted file mode 100644 index 6cc66d90b..000000000 --- a/tripleo_common/tests/utils/test_passwords.py +++ /dev/null @@ -1,89 +0,0 @@ -# Copyright 2016 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import sys -from unittest import mock - -from oslo_utils import uuidutils - -from tripleo_common.tests import base -from tripleo_common.utils import passwords as password_utils - - -class TestPasswords(base.TestCase): - - def setUp(self): - super(TestPasswords, self).setUp() - - if (sys.version_info > (3, 0)): - self.open_builtins = 'builtins.open' - else: - self.open_builtins = '__builtin__.open' - - self.snmp_test_pw = '78cbc32b858718267c355d4' - - def test_create_cephx_key(self): - key = password_utils.create_cephx_key() - self.assertEqual(len(key), 40) - - def test_get_snmpd_readonly_user_password(self): - with mock.patch(self.open_builtins, mock.mock_open(read_data="data")): - with mock.patch('yaml.safe_load') as mock_yaml: - with mock.patch('os.path.exists') as mock_exists: - mock_exists.return_value = True - mock_yaml.return_value = { - 'parameter_defaults': { - 'SnmpdReadonlyUserPassword': self.snmp_test_pw - } - } - value = password_utils.get_snmpd_readonly_user_password() - - self.assertEqual(value, self.snmp_test_pw) - - @mock.patch('tripleo_common.utils.passwords.create_keystone_credential') - def test_fernet_keys_and_credentials(self, mock_create_creds): - - keys = [uuidutils.generate_uuid(dashed=False), - uuidutils.generate_uuid(dashed=False), - uuidutils.generate_uuid(dashed=False), - uuidutils.generate_uuid(dashed=False), - uuidutils.generate_uuid(dashed=False)] - - # generate_passwords will be called multiple times - # but the order is based on how the strings are hashed, and thus - # not really predictable. So, make sure it is a unique one of the - # generated values - - mock_create_creds.side_effect = keys - with mock.patch(self.open_builtins, mock.mock_open(read_data="data")): - with mock.patch('yaml.load') as mock_yaml: - mock_yaml.return_value = { - 'parameter_defaults': { - 'SnmpdReadonlyUserPassword': self.snmp_test_pw - } - } - value = password_utils.generate_passwords() - self.assertIn(value['KeystoneCredential0'], keys) - self.assertIn(value['KeystoneCredential1'], keys) - self.assertIn(value['BarbicanSimpleCryptoKek'], keys) - - self.assertNotEqual(value['KeystoneCredential0'], - value['KeystoneCredential1']) - self.assertEqual(len(value['OctaviaServerCertsKeyPassphrase']), 32) - - def test_create_ssh_keypair(self): - - value = password_utils.create_ssh_keypair(comment="Foo") - self.assertEqual('ssh-rsa', value['public_key'][:7]) - self.assertEqual('Foo', value['public_key'][-3:]) diff --git a/tripleo_common/tests/utils/test_plan.py b/tripleo_common/tests/utils/test_plan.py deleted file mode 100644 index 50408a962..000000000 --- a/tripleo_common/tests/utils/test_plan.py +++ /dev/null @@ -1,534 +0,0 @@ -# Copyright (c) 2017 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from unittest import mock - -from tripleo_common import constants -from tripleo_common.tests import base -from tripleo_common.utils import passwords as password_utils -from tripleo_common.utils import plan as plan_utils - - -PLAN_ENV_CONTENTS = """ -version: 1.0 - -name: overcloud -template: overcloud.yaml -environments: -- path: overcloud-resource-registry-puppet.yaml -- path: environments/services/sahara.yaml -parameter_defaults: - BlockStorageCount: 42 - OvercloudControlFlavor: yummy -passwords: - AdminPassword: aaaa -""" - -USER_ENV_CONTENTS = """ -resource_registry: - OS::TripleO::Foo: bar.yaml -""" - -UNORDERED_PLAN_ENV_LIST = [ - {'path': 'overcloud-resource-registry-puppet.yaml'}, - {'path': 'environments/docker-ha.yaml'}, - {'path': 'environments/custom-environment-not-in-capabilities-map.yaml'}, - {'path': 'environments/containers-default-parameters.yaml'}, - {'path': 'environments/docker.yaml'} -] - -CAPABILITIES_DICT = { - 'topics': [{ - 'environment_groups': [{ - 'environments': [{ - 'file': 'overcloud-resource-registry-puppet.yaml'} - ]}, { - 'environments': [{ - 'file': 'environments/docker.yaml', - 'requires': ['overcloud-resource-registry-puppet.yaml'] - }, { - 'file': 'environments/containers-default-parameters.yaml', - 'requires': ['overcloud-resource-registry-puppet.yaml', - 'environments/docker.yaml'] - }]}, { - 'environments': [{ - 'file': 'environments/docker-ha.yaml', - 'requires': ['overcloud-resource-registry-puppet.yaml', - 'environments/docker.yaml'] - }]} - ] - }] -} - -_EXISTING_PASSWORDS = { - 'PlacementPassword': 'VFJeqBKbatYhQm9jja67hufft', - 'BarbicanPassword': 'MGGQBtgKT7FnywvkcdMwE9nhx', - 'BarbicanSimpleCryptoKek': 'dGhpcnR5X3R3b19ieXRlX2tleWJsYWhibGFoYmxhaGg=', - 'AdminPassword': 'jFmY8FTpvtF2e4d4ReXvmUP8k', - 'CeilometerMeteringSecret': 'CbHTGK4md4Cc8P8ZyzTns6wry', - 'NovaPassword': '7dZATgVPwD7Ergs9kTTDMCr7F', - 'MysqlRootPassword': 'VqJYpEdKks', - 'RabbitCookie': 'BqJYpEdKksAqJYpEdKks', - 'HeatAuthEncryptionKey': '9xZXehsKc2HbmFFMKjuqxTJHn', - 'PcsdPassword': 'KjEzeitus8eu751a', - 'HorizonSecret': 'mjEzeitus8eu751B', - 'IronicPassword': '4hFDgn9ANeVfuqk84pHpD4ksa', - 'RedisPassword': 'xjj3QZDcUQmU6Q7NzWBHRUhGd', - 'CinderPassword': 'dcxC3xyUcrmvzfrrxpAd3REcm', - 'GlancePassword': 'VqJYNEdKKsGZtgnHct77XBtrV', - 'RabbitPassword': 'ahuHRXdPMx9rzCdjD9CJJNCgA', - 'RpcPassword': 'ahuHRXdPMx9rzCdjD9CJJNCgA', - 'NotifyPassword': 'ahuHRXdPMx9rzCdjD9CJJNCgA', - 'HAProxyStatsPassword': 'P8tbdK6n4YUkTaUyy8XgEVTe6', - 'CeilometerPassword': 'RRdpwK6qf2pbKz2UtzxqauAdk', - 'GnocchiPassword': 'cRYHcUkMuJeK3vyU9pCaznUZc', - 'HeatStackDomainAdminPassword': 'GgTRyWzKYsxK4mReTJ4CM6sMc', - 'CephRgwKey': b'AQCQXtlXAAAAABAAUKcqUMu6oMjAXMjoUV4/3A==', - 'AodhPassword': '8VZXehsKc2HbmFFMKYuqxTJHn', - 'OctaviaHeartbeatKey': 'oct-heartbeat-key', - 'OctaviaPassword': 'NMl7j3nKk1VVwMxUZC8Cgw==', - 'OctaviaServerCertsKeyPassphrase': 'aW5zZWN1cmUta2V5LWRvLW5vdC11c2U=', - 'OctaviaCaKeyPassphrase': 'SLj4c3uCk4DDxPwQOG1Heb==', - 'ManilaPassword': 'NYJN86Fua3X8AVFWmMhQa2zTH', - 'NeutronMetadataProxySharedSecret': 'Q2YgUCwmBkYdqsdhhCF4hbghu', - 'CephManilaClientKey': b'AQANOFFY1NW6AxAAu6jWI3YSOsp2QWusb5Y3DQ==', - 'CephGrafanaAdminPassword': 'NYJN86Fua3X8AVFWmMhQa2zTH', - 'CephDashboardAdminPassword': 'NYJN86Fua3X8AVFWmMhQa2zTH', - 'SwiftHashSuffix': 'td8mV6k7TYEGKCDvjVBwckpn9', - 'SnmpdReadonlyUserPassword': 'TestPassword', - 'SwiftPassword': 'z6EWAVfW7CuxvKdzjWTdrXCeg', - 'HeatPassword': 'bREnsXtMHKTHxt8XW6NXAYr48', - 'MysqlClustercheckPassword': 'jN4RMMWWJ4sycaRwh7UvrAtfX', - 'MysqlMariabackupPassword': 'w3qjjJDTKajthzuRYVd4X5YVU', - 'CephClientKey': b'AQCQXtlXAAAAABAAKyc+8St8i9onHyu2mPk+vg==', - 'NeutronPassword': 'ZxAjdU2UXCV4GM3WyPKrzAZXD', - 'DesignatePassword': 'wHYj7rftFzHMpJKnGxbjjR9CW', - 'DesignateRndcKey': 'hB8XaZRd2Tf00jKsyoXpyw==', - 'KeystoneCredential0': 'ftJNQ_XlDUK7Lgvv1kdWf3SyqVsrvNDgoNV4kJg3yzw=', - 'KeystoneCredential1': 'c4MFq82TQLFLKpiiUjrKkp15dafE2ALcD3jbaIu3rfE=', - 'KeystoneFernetKeys': { - '/etc/keystone/fernet-keys/0': {'content': 'IAMAVERYSAFEKEY'}, - '/etc/keystone/fernet-keys/1': {'content': 'IALSOAMAVERYSAFEKEY'} - }, - 'KeystonePassword': 'jq6G6HyZtj7dcZEvuyhAfjutM', - 'CephClusterFSID': u'97c16f44-b62c-11e6-aed3-185e0f73fdc5', - 'EtcdInitialClusterToken': 'fcVZXehsSc2KdmFFMKDudxTLKa', - 'PacemakerRemoteAuthkey': - 'bCfHQx4fX7FqENVBbDfBnKvf6FTH6mPfVdNjfzakEjuF4UbmZJHAxWdheEr6feEyZmtM' - 'XEd4w3qM8nMVrzjnDCmqAFDmMDQfKcuNgTnqGnkbVUDGpym67Ry4vNCPHyp9tGGyfjNX' - 't66csYZTYUHPv6jdJk4HWBjE66v8B3nRpc3FePQ8DRMWX4hcGFNNxapJu7v2frKwq4tD' - '78cc7aPPMGPn8kR3mj7kMP8Ah8VVGXJEtybEvRg4sQ67zEkAzfKggrpXYPK2Qvv9sHKp' - 't2VjwZBHTvWKarJjyeMTqbzJyW6JTbm62gqZCr9afZRFQug62pPRduvkUNfUYNPNpqjy' - 'yznmeAZPxVseU3jJVxKrxdrgzavKEMtW6BbTmw86j8wuUdaWgRccRGVUQvtQ4p9kXHAy' - 'eXVduZvpvxFtbKvfNTvf6qCuJ8qeQp2TwJQPHUYHkxZYrpAA7fZUzNCZR2tFFdZzWGt2' - 'PEnYvYts4m7Fp9XEmNm7Jyme38CBfnaVERmTMRvHkq3EE2Amsc72aDdzeVRjR3xRgMNJ' - '2cEEWqatZXveHxJr6VmBNWJUyvPrfmVegwtKCGJND8d3Ysruy7GCn6zcrNY7d84aDk3P' - 'q7NyZfRYrGcNDKJuzNWH8UNwGP68uQsUUrV9NVTVpB2sRPG2tJm3unYqekUg3KYXu46J' - 'mANxqgrqDv6vPx6NCPdUXZTXFaesQatKRkkf3nZFqZQJXZVbkudTmrPYyRQAjvWuAmrY' - '6RcFFmygeFnhAxhwXNdge9tEfsfPeQ4GMxa8Amj2fMjmNvQXFfQ8uxMUnusDmhbwCRKM' - 'CvN2dNE92MaQge34vtxsueyDEmbuVE9sNRD3EQBRwx8nktgRwKHfRZJ3BX8f9XMaQe2e' - 'ZfGjtUNkbgKdCyYgEwEybXKPfevDnxFvbZMpJx4fqqCAbAZud9RnAuvqHgFbKHXcVEE4' - 'nRmgJmdqJsRsTkYPpYkKN9rssEDCXr9HFjbenkxXcUe8afrTvKAzwBvbDWcjYBEQKbuY' - '6Ptm9VJrjutUHCPmW2sh66qvq4C9vPhVEey7FpCZDEyYUPrjRfhKjxEFNBKWpcZzvmT2' - 'nRmgJmdqJsRsTkYPpYkKN9rssEDCXr9HFjbenkxXcUe8afrTvKAzwBvbDWcjYBEQKbuY' - '2cEEWqatZXveHxJr6VmBNWJUyvPrfmVegwtKCGJND8d3Ysruy7GCn6zcrNY7d84aDk3P' - 'VRE4aqMfuY72xFacxXHjvWagEGQEYtkMtQnsh7XAMGuazT3pkppeUTyDbKTY2Dz7Quc3' - '8UKaw8ece6fTXWpjX2EYrsd4qzvhC6eEPdgnpmzjqmuG8YqEAUZ7dYADgAhTkBQsNct8' - 'btQsQDYD4PBjxG2KWAZ9vgTsvBpjjEVcrPfWgwZKJTAZWfWq2u7nT4N2t39EYmQEzbEf' - '8UKaw8ece6fTXWpjX2EYrsd4qzvhC6eEPdgnpmzjqmuG8YqEAUZ7dYADgAhTkBQsNct8' - 'DkCF3DJ49jjZm9N4EKnKGGXD7XkFE79AFRGPUw4gXpeQCtUXyEugUErqMjqgJjC7ykdg' - 'zz7txnzYfRaKHNVs4r4GwNEHRHt7VcTuT3WBcbE4skQgjMnttgP7hts7dMU7PA8kRrfq' - 'BKdkPkUwqQ9Xn4zrysY4GvJQHWXxD6Tyqf9PZaz4xbUmsvtuY7NAz27U2aT3EA9XCgfn' - '2cEEWqatZXveHxJr6VmBNWJUyvPrfmVegwtKCGJND8d3Ysruy7GCn6zcrNY7d84aDk3P' - 'CEfTJQz342nwRMY4DCuhawz4cnrWwxgsnVPCbeXYH4RcgswVsk9edxKkYMkpTwpcKf6n' - 'nRmgJmdqJsRsTkYPpYkKN9rssEDCXr9HFjbenkxXcUe8afrTvKAzwBvbDWcjYBEQKbuY' - '6Ptm9VJrjutUHCPmW2sh66qvq4C9vPhVEey7FpCZDEyYUPrjRfhKjxEFNBKWpcZzvmT2' - 'VRE4aqMfuY72xFacxXHjvWagEGQEYtkMtQnsh7XAMGuazT3pkppeUTyDbKTY2Dz7Quc3' - '8UKaw8ece6fTXWpjX2EYrsd4qzvhC6eEPdgnpmzjqmuG8YqEAUZ7dYADgAhTkBQsNct8' - 'btQsQDYD4PBjxG2KWAZ9vgTsvBpjjEVcrPfWgwZKJTAZWfWq2u7nT4N2t39EYmQEzbEf' - 'DkCF3DJ49jjZm9N4EKnKGGXD7XkFE79AFRGPUw4gXpeQCtUXyEugUErqMjqgJjC7ykdg' - 'zz7txnzYfRaKHNVs4r4GwNEHRHt7VcTuT3WBcbE4skQgjMnttgP7hts7dMU7PA8kRrfq' - 'BKdkPkUwqQ9Xn4zrysY4GvJQHWXxD6Tyqf9PZaz4xbUmsvtuY7NAz27U2aT3EA9XCgfn' - '2cEEWqatZXveHxJr6VmBNWJUyvPrfmVegwtKCGJND8d3Ysruy7GCn6zcrNY7d84aDk3P' - 'CEfTJQz342nwRMY4DCuhawz4cnrWwxgsnVPCbeXYH4RcgswVsk9edxKkYMkpTwpcKf6n' - 'E2dhquqdKVTAYf7YKbTfFVsRwqykkPduKXuPwVDjbCqdEJPcmnRJAJkwkQCWgukpvzzm' - 'DKFVYxncxmzKgEN27VtgfpsXWBJ2jaxMeQCXb2rbjkVcaypyaETQ3Wkw98EptNAKRcjM' - 'E2dhquqdKVTAYf7YKbTfFVsRwqykkPduKXuPwVDjbCqdEJPcmnRJAJkwkQCWgukpvzzm' - 'zZJ2xFdfNYh7RZ7EgAAbY8Tqy3j2c9c6HNmXwAVV6dzPTrE4FHcKZGg76anGchczF9ev' - 'AG8RHQ7ea2sJhXqBmGsmEj6Q84TN9E7pgmtAtmVAA38AYsQBNZUMYdMcmBdpV9w7G3NZ' - 'mEU8R8uWqx6w3NzzqsMg78bnhCR7sdWDkhuEp2M8fYWmqujYFNYvzz6BcHNKQyrWETRD' - 'E2dhquqdKVTAYf7YKbTfFVsRwqykkPduKXuPwVDjbCqdEJPcmnRJAJkwkQCWgukpvzzm' - 'zaTdNWgM7wsXGkvgYVNdTWnReCPXJUN3yQwrvApZzdaF86QaeYwXW7qqEJrqmwpUUbw2' - 'JHkmvJB4AWtVhDc9etzUqfuTaqMyXwxFEWvht3RDTDx8dfQ3Ek8BD4QP4BtUQeQJpfsG' - 'FEJeQQYVcBxqVuK26xJrERUDmeNw8KWKBCrYPPy48cjCFdgZHz3cNet6bwJMdsgKMpZT' - 'erdYy9nqBw6FRZ37rRMtxmrcB4VsWHbf4HjdPRpu4xyJTqMThnXWa8nPDde3C9wCuKkQ' - '23k2zDYsMeHc6KD93vm7Ky48v3veYEuJvNNxQPyyCZ9XNnpGsWrqsVduCswR4MQpp6yJ' - 'RBmwbMYbuEjwJy9UuZxa9bQV4GqYFnVuETC6bXaT9uauWdaa2TrbuuXx3WWdmRGd4Rqh' - 'Z3NA9Kqx9pTQHe3KGZ2tFejsJqNvjJvFX94eVeMGDgHjtJzDdxp9NWYtG6v9zABGRzVF' - 'MqJX6nhhBPbsvjpswcgJq3ZXxzmWFJmvjECghGrbG6bKawtv4aYhMeaHagfMP8W6KrTy' - 'uGxWUhcEhfygjE4truAkjfKCtzzVtTcBArbWMny6HWMp6TAen3f6hEB6kBb7pgvKxkND' - '3JxueYBZvDeq4WWtRzUjcFF2qhEjwrtuCJhy3WMXX3MN6nFDtYRTHZGdPqyatW9Jcc8t' - '7gCMWMVzYyNuXZ2A6rwX6Umv8g3mBuwnrwKXEFTZkPCAZMxk3A6MTmMcJCVy3hw6MmRM' - 'eXKyhFxRcKWraysTQG7hd9kP8DeJZNDurYDJwqrh6cwDwaMhBfTgnxTBeyjwpbCJK2FD' - 'Jg2vFWPmTJ37gDMdwxWCMRQ9kyqz9PJZ4Xn2MPxMhNqT3Hb39YshryqnbvBagHbqYx9M' - 'r4ZKJpKya34JMaPambzg2pKRDd2WdFCZcdHTFyqxxzJbjXM2gjfBZ2strUNqWvQYNTw8' - 'QttkuxyeQTgHupKNaZF6y7rDyf7mbNR9DaPXpBQuZ7un6KDj2Dfh7yvfhPk8cHG7n9pb' - 'KEKD3sgbbKnQ8d9MsGhUtCQVed7dtjpYKsmGJmbYMvZjpGpqsfsHQfFRdCgJHnW3FdQ6' - 'sGhUtCQVed7dtj12', - 'MigrationSshKey': { - 'private_key': 'private_key', - 'public_key': 'public_key' - }, - 'LibvirtTLSPassword': 'xCdt9yeamKz8Fb6EGba9u82XU', -} - - -class PlanTest(base.TestCase): - def setUp(self): - super(PlanTest, self).setUp() - self.container = 'overcloud' - - def test_get_next_index(self): - keys_map = { - password_utils.KEYSTONE_FERNET_REPO + '0': { - 'content': 'Some key'}, - password_utils.KEYSTONE_FERNET_REPO + '1': { - 'content': 'Some other key'}, - } - next_index = plan_utils.get_next_index(keys_map) - self.assertEqual(next_index, 2) - - @mock.patch('tripleo_common.utils.passwords.' - 'create_keystone_credential') - def test_rotate_keys(self, mock_keystone_creds): - mock_keystone_creds.return_value = 'Some new key' - - staged_key_index = password_utils.KEYSTONE_FERNET_REPO + '0' - new_primary_key_index = password_utils.KEYSTONE_FERNET_REPO + '2' - keys_map = { - password_utils.KEYSTONE_FERNET_REPO + '0': { - 'content': 'Some key'}, - password_utils.KEYSTONE_FERNET_REPO + '1': { - 'content': 'Some other key'}, - } - new_keys_map = plan_utils.rotate_keys(keys_map, 2) - - # Staged key should be the new key - self.assertEqual('Some new key', - new_keys_map[staged_key_index]['content']) - # primary key should be the previous staged key - self.assertEqual('Some key', - new_keys_map[new_primary_key_index]['content']) - - def test_purge_excess_keys_should_purge(self): - keys_map = { - password_utils.KEYSTONE_FERNET_REPO + '0': { - 'content': 'key0'}, - password_utils.KEYSTONE_FERNET_REPO + '1': { - 'content': 'key1'}, - password_utils.KEYSTONE_FERNET_REPO + '2': { - 'content': 'key2'}, - password_utils.KEYSTONE_FERNET_REPO + '3': { - 'content': 'key3'}, - password_utils.KEYSTONE_FERNET_REPO + '4': { - 'content': 'key4'}, - } - max_keys = 3 - keys_map = plan_utils.purge_excess_keys(max_keys, keys_map) - self.assertEqual(max_keys, len(keys_map)) - # It should keep index 0, 3 and 4 - self.assertIn(password_utils.KEYSTONE_FERNET_REPO + '0', keys_map) - self.assertIn(password_utils.KEYSTONE_FERNET_REPO + '3', keys_map) - self.assertIn(password_utils.KEYSTONE_FERNET_REPO + '4', keys_map) - # It sould have removed index 1 and 2 - self.assertNotIn(password_utils.KEYSTONE_FERNET_REPO + '1', keys_map) - self.assertNotIn(password_utils.KEYSTONE_FERNET_REPO + '2', keys_map) - - def test_purge_excess_keys_should_not_purge_if_equal_to_max(self): - keys_map = { - password_utils.KEYSTONE_FERNET_REPO + '0': { - 'content': 'key0'}, - password_utils.KEYSTONE_FERNET_REPO + '1': { - 'content': 'key1'}, - password_utils.KEYSTONE_FERNET_REPO + '2': { - 'content': 'key2'}, - } - max_keys = 3 - keys_map = plan_utils.purge_excess_keys(max_keys, keys_map) - self.assertEqual(max_keys, len(keys_map)) - - def test_purge_excess_keys_should_not_purge_if_less_than_max(self): - keys_map = { - password_utils.KEYSTONE_FERNET_REPO + '0': { - 'content': 'key0'}, - password_utils.KEYSTONE_FERNET_REPO + '1': { - 'content': 'key1'}, - } - max_keys = 3 - keys_map = plan_utils.purge_excess_keys(max_keys, keys_map) - self.assertEqual(2, len(keys_map)) - - @mock.patch('tripleo_common.utils.passwords.' - 'get_snmpd_readonly_user_password') - def test_generate_password(self, mock_get_snmpd_readonly_user_password): - - mock_get_snmpd_readonly_user_password.return_value = "TestPassword" - - mock_orchestration = mock.MagicMock() - mock_orchestration.stacks.environment.return_value = { - 'parameter_defaults': {} - } - mock_resource = mock.MagicMock() - mock_resource.attributes = { - 'endpoint_map': { - 'PlacementPublic': {} - }, - } - mock_orchestration.resources.get.return_value = mock_resource - result = plan_utils.generate_passwords(None, mock_orchestration) - - for password_param_name in constants.PASSWORD_PARAMETER_NAMES: - self.assertTrue(password_param_name in result, - "%s is not in %s" % (password_param_name, result)) - - @mock.patch('tripleo_common.utils.passwords.' - 'create_ssh_keypair') - @mock.patch('tripleo_common.utils.passwords.' - 'create_fernet_keys_repo_structure_and_keys') - @mock.patch('tripleo_common.utils.passwords.' - 'get_snmpd_readonly_user_password') - def test_run_passwords_exist(self, mock_get_snmpd_readonly_user_password, - mock_fernet_keys_setup, - mock_create_ssh_keypair): - - mock_get_snmpd_readonly_user_password.return_value = "TestPassword" - mock_create_ssh_keypair.return_value = {'public_key': 'Foo', - 'private_key': 'Bar'} - mock_fernet_keys_setup.return_value = {'/tmp/foo': {'content': 'Foo'}, - '/tmp/bar': {'content': 'Bar'}} - - mock_orchestration = mock.MagicMock() - mock_orchestration.stacks.environment.return_value = { - 'parameter_defaults': _EXISTING_PASSWORDS.copy() - } - mock_resource = mock.MagicMock() - mock_resource.attributes = { - 'endpoint_map': { - 'PlacementPublic': {} - }, - } - mock_orchestration.resources.get.return_value = mock_resource - - result = plan_utils.generate_passwords(None, mock_orchestration) - - # ensure old passwords used and no new generation - self.assertEqual(_EXISTING_PASSWORDS, result) - - @mock.patch('tripleo_common.utils.passwords.' - 'create_ssh_keypair') - @mock.patch('tripleo_common.utils.passwords.' - 'create_fernet_keys_repo_structure_and_keys') - @mock.patch('tripleo_common.utils.passwords.' - 'get_snmpd_readonly_user_password') - def test_placement_passwords_upgrade(self, - mock_get_snmpd_readonly_user_password, - mock_fernet_keys_setup, - mock_create_ssh_keypair): - - mock_get_snmpd_readonly_user_password.return_value = "TestPassword" - mock_create_ssh_keypair.return_value = {'public_key': 'Foo', - 'private_key': 'Bar'} - mock_fernet_keys_setup.return_value = {'/tmp/foo': {'content': 'Foo'}, - '/tmp/bar': {'content': 'Bar'}} - - passwords = _EXISTING_PASSWORDS.copy() - - mock_orchestration = mock.MagicMock() - mock_orchestration.stacks.environment.return_value = { - 'parameter_defaults': passwords - } - mock_resource = mock.MagicMock() - mock_resource.attributes = { - 'endpoint_map': {}, - } - mock_orchestration.resources.get.return_value = mock_resource - result = plan_utils.generate_passwords(None, mock_orchestration) - self.assertEqual( - passwords['NovaPassword'], - result['PlacementPassword'] - ) - - @mock.patch('tripleo_common.utils.passwords.' - 'create_ssh_keypair') - @mock.patch('tripleo_common.utils.passwords.' - 'create_fernet_keys_repo_structure_and_keys') - @mock.patch('tripleo_common.utils.passwords.' - 'get_snmpd_readonly_user_password') - def test_keystone_passwords_upgrade(self, - mock_get_snmpd_readonly_user_password, - mock_fernet_keys_setup, - mock_create_ssh_keypair): - - mock_get_snmpd_readonly_user_password.return_value = "TestPassword" - mock_create_ssh_keypair.return_value = {'public_key': 'Foo', - 'private_key': 'Bar'} - mock_fernet_keys_setup.return_value = {'/tmp/foo': {'content': 'Foo'}, - '/tmp/bar': {'content': 'Bar'}} - - passwords = _EXISTING_PASSWORDS.copy() - keystone_password = passwords['KeystonePassword'] - passwords['AdminToken'] = keystone_password - del passwords['KeystonePassword'] - - mock_orchestration = mock.MagicMock() - mock_orchestration.stacks.environment.return_value = { - 'parameter_defaults': passwords - } - mock_resource = mock.MagicMock() - mock_resource.attributes = { - 'endpoint_map': {}, - } - mock_orchestration.resources.get.return_value = mock_resource - result = plan_utils.generate_passwords(None, mock_orchestration) - self.assertEqual( - keystone_password, - result['KeystonePassword'] - ) - self.assertNotIn('AdminToken', result) - - @mock.patch('tripleo_common.utils.passwords.' - 'create_ssh_keypair') - @mock.patch('tripleo_common.utils.passwords.' - 'create_fernet_keys_repo_structure_and_keys') - @mock.patch('tripleo_common.utils.passwords.' - 'get_snmpd_readonly_user_password') - def test_run_rotate_no_rotate_list( - self, mock_get_snmpd_readonly_user_password, - mock_fernet_keys_setup, mock_create_ssh_keypair): - - mock_get_snmpd_readonly_user_password.return_value = "TestPassword" - mock_create_ssh_keypair.return_value = {'public_key': 'Foo', - 'private_key': 'Bar'} - mock_fernet_keys_setup.return_value = {'/tmp/foo': {'content': 'Foo'}, - '/tmp/bar': {'content': 'Bar'}} - mock_orchestration = mock.MagicMock() - mock_orchestration.stacks.environment.return_value = { - 'parameter_defaults': _EXISTING_PASSWORDS.copy() - } - - mock_resource = mock.MagicMock() - mock_resource.attributes = { - 'endpoint_map': { - 'PlacementPublic': {} - }, - } - mock_orchestration.resources.get.return_value = mock_resource - - result = plan_utils.generate_passwords(None, mock_orchestration, - rotate_passwords=True) - - # ensure passwords in the DO_NOT_ROTATE_LIST are not modified - for name in constants.DO_NOT_ROTATE_LIST: - self.assertEqual(_EXISTING_PASSWORDS[name], result[name]) - - # ensure all passwords are generated - for name in constants.PASSWORD_PARAMETER_NAMES: - self.assertTrue(name in result, "%s is not in %s" % (name, result)) - - # ensure new passwords have been generated - self.assertNotEqual(_EXISTING_PASSWORDS, result) - - @mock.patch('tripleo_common.utils.passwords.' - 'create_ssh_keypair') - @mock.patch('tripleo_common.utils.passwords.' - 'create_fernet_keys_repo_structure_and_keys') - @mock.patch('tripleo_common.utils.passwords.' - 'get_snmpd_readonly_user_password') - def test_run_rotate_with_rotate_list( - self, mock_get_snmpd_readonly_user_password, - mock_fernet_keys_setup, mock_create_ssh_keypair): - - mock_get_snmpd_readonly_user_password.return_value = "TestPassword" - mock_create_ssh_keypair.return_value = {'public_key': 'Foo', - 'private_key': 'Bar'} - mock_fernet_keys_setup.return_value = {'/tmp/foo': {'content': 'Foo'}, - '/tmp/bar': {'content': 'Bar'}} - - mock_orchestration = mock.MagicMock() - mock_orchestration.stacks.environment.return_value = { - 'parameter_defaults': _EXISTING_PASSWORDS.copy() - } - - mock_resource = mock.MagicMock() - mock_resource.attributes = { - 'endpoint_map': { - 'PlacementPublic': {} - }, - } - mock_orchestration.resources.get.return_value = mock_resource - rotate_list = [ - 'BarbicanPassword', - 'AdminPassword', - 'CeilometerMeteringSecret', - 'NovaPassword', - 'MysqlRootPassword' - ] - - result = plan_utils.generate_passwords(None, mock_orchestration, - rotate_passwords=True, - rotate_pw_list=rotate_list) - - # ensure only specified passwords are regenerated - for name in constants.PASSWORD_PARAMETER_NAMES: - self.assertTrue(name in result, "%s is not in %s" % (name, result)) - if name in rotate_list: - self.assertNotEqual(_EXISTING_PASSWORDS[name], result[name]) - else: - self.assertEqual(_EXISTING_PASSWORDS[name], result[name]) - - @mock.patch('tripleo_common.utils.passwords.' - 'create_ssh_keypair') - @mock.patch('tripleo_common.utils.passwords.' - 'create_fernet_keys_repo_structure_and_keys') - @mock.patch('tripleo_common.utils.passwords.' - 'get_snmpd_readonly_user_password') - def test_passwords_exist_in_heat( - self, mock_get_snmpd_readonly_user_password, - mock_fernet_keys_setup, mock_create_ssh_keypair): - - mock_get_snmpd_readonly_user_password.return_value = "TestPassword" - mock_create_ssh_keypair.return_value = {'public_key': 'Foo', - 'private_key': 'Bar'} - mock_fernet_keys_setup.return_value = {'/tmp/foo': {'content': 'Foo'}, - '/tmp/bar': {'content': 'Bar'}} - - existing_passwords = _EXISTING_PASSWORDS.copy() - existing_passwords["AdminPassword"] = 'ExistingPasswordInHeat' - - mock_orchestration = mock.MagicMock() - mock_orchestration.stacks.environment.return_value = { - 'parameter_defaults': existing_passwords - } - - mock_resource = mock.MagicMock() - mock_resource.attributes = { - 'endpoint_map': { - 'PlacementPublic': {} - }, - } - mock_orchestration.resources.get.return_value = mock_resource - - result = plan_utils.generate_passwords(None, mock_orchestration) - self.assertEqual(existing_passwords, result) diff --git a/tripleo_common/tests/utils/test_process.py b/tripleo_common/tests/utils/test_process.py deleted file mode 100644 index 71c853568..000000000 --- a/tripleo_common/tests/utils/test_process.py +++ /dev/null @@ -1,81 +0,0 @@ -# Copyright (c) 2019 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Unit tests for utils.process.""" - - -import os -from unittest import mock - -from oslo_concurrency import processutils - -from tripleo_common.tests import base -from tripleo_common.utils import process - - -class ExecuteTestCase(base.TestCase): - # Allow calls to process.execute() and related functions - block_execute = False - - @mock.patch.object(processutils, 'execute', autospec=True) - @mock.patch.object(os.environ, 'copy', return_value={}, autospec=True) - def test_execute_use_standard_locale_no_env_variables(self, env_mock, - execute_mock): - process.execute('foo', use_standard_locale=True) - execute_mock.assert_called_once_with('foo', - env_variables={'LC_ALL': 'C'}) - - @mock.patch.object(processutils, 'execute', autospec=True) - def test_execute_use_standard_locale_with_env_variables(self, - execute_mock): - process.execute('foo', use_standard_locale=True, - env_variables={'foo': 'bar'}) - execute_mock.assert_called_once_with('foo', - env_variables={'LC_ALL': 'C', - 'foo': 'bar'}) - - @mock.patch.object(processutils, 'execute', autospec=True) - def test_execute_not_use_standard_locale(self, execute_mock): - process.execute('foo', use_standard_locale=False, - env_variables={'foo': 'bar'}) - execute_mock.assert_called_once_with('foo', - env_variables={'foo': 'bar'}) - - @mock.patch.object(process, 'LOG', autospec=True) - def _test_execute_with_log_stdout(self, log_mock, log_stdout=None): - with mock.patch.object( - processutils, 'execute', autospec=True) as execute_mock: - execute_mock.return_value = ('stdout', 'stderr') - if log_stdout is not None: - process.execute('foo', log_stdout=log_stdout) - else: - process.execute('foo') - execute_mock.assert_called_once_with('foo') - name, args, kwargs = log_mock.debug.mock_calls[1] - if log_stdout is False: - self.assertEqual(2, log_mock.debug.call_count) - self.assertNotIn('stdout', args[0]) - else: - self.assertEqual(3, log_mock.debug.call_count) - self.assertIn('stdout', args[0]) - - def test_execute_with_log_stdout_default(self): - self._test_execute_with_log_stdout() - - def test_execute_with_log_stdout_true(self): - self._test_execute_with_log_stdout(log_stdout=True) - - def test_execute_with_log_stdout_false(self): - self._test_execute_with_log_stdout(log_stdout=False) diff --git a/tripleo_common/tests/utils/test_roles.py b/tripleo_common/tests/utils/test_roles.py deleted file mode 100644 index 87eaabe99..000000000 --- a/tripleo_common/tests/utils/test_roles.py +++ /dev/null @@ -1,267 +0,0 @@ -# Copyright 2017 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from unittest import mock - -import yaml - -from tripleo_common.exception import NotFound -from tripleo_common.exception import RoleMetadataError -from tripleo_common.tests import base -from tripleo_common.utils import roles as rolesutils - -SAMPLE_ROLE = """ -############################################################################### -# Role: sample # -############################################################################### -- name: sample - description: | - Sample! - networks: - - InternalApi - HostnameFormatDefault: '%stackname%-sample-%index%' - ServicesDefault: - - OS::TripleO::Services::Timesync -""" -SAMPLE_ROLE_NETWORK_DICT = """ -############################################################################### -# Role: sample # -############################################################################### -- name: sample - description: | - Sample! - networks: - InternalApi: - subnet: internal_api_subnet - HostnameFormatDefault: '%stackname%-sample-%index%' - ServicesDefault: - - OS::TripleO::Services::Timesync -""" -SAMPLE_GENERATED_ROLE = """ -############################################################################### -# Role: sample # -############################################################################### -- name: sampleA - description: | - Sample! - networks: - - InternalApi - HostnameFormatDefault: '%stackname%-sample-%index%' - ServicesDefault: - - OS::TripleO::Services::Timesync -""" -SAMPLE_ROLE_OBJ = { - 'HostnameFormatDefault': '%stackname%-sample-%index%', - 'ServicesDefault': ['OS::TripleO::Services::Timesync'], - 'description': 'Sample!\n', - 'name': 'sample', - 'networks': ['InternalApi'] -} -SAMPLE_ROLE_OBJ_NETWORK_DICT = { - 'HostnameFormatDefault': '%stackname%-sample-%index%', - 'ServicesDefault': ['OS::TripleO::Services::Timesync'], - 'description': 'Sample!\n', - 'name': 'sample', - 'networks': { - 'InternalApi': { - 'subnet': 'internal_api_subnet'} - } -} - -ROLES_DATA_YAML_CONTENTS = """ -- name: MyController - CountDefault: 1 - ServicesDefault: - - OS::TripleO::Services::CACerts - -- name: Compute - HostnameFormatDefault: '%stackname%-novacompute-%index%' - ServicesDefault: - - OS::TripleO::Services::NovaCompute - - OS::TripleO::Services::DummyService - -- name: CustomRole - ServicesDefault: - - OS::TripleO::Services::Kernel -""" - - -class TestRolesUtils(base.TestCase): - @mock.patch('os.listdir') - @mock.patch('os.path.exists') - def test_get_roles_from_directory(self, exists_mock, listdir_mock): - exists_mock.return_value = True - listdir_mock.return_value = ['b.yaml', 'a.yaml'] - self.assertEqual(rolesutils.get_roles_list_from_directory('/foo'), - ['a', 'b']) - - @mock.patch('os.listdir') - @mock.patch('os.path.exists') - def test_get_roles_from_directory_failure(self, exists_mock, listdir_mock): - exists_mock.return_value = False - self.assertRaises(ValueError, rolesutils.get_roles_list_from_directory, - ['/foo']) - - def test_validate_roles(self): - available_roles = ['a', 'b', 'c'] - requested_roles = ['b', 'c'] - try: - rolesutils.check_role_exists(available_roles, requested_roles) - except Exception: - self.fail('Exception raised') - - def test_validate_roles_with_invalid_role(self): - available_roles = ['a', 'b', 'c'] - requested_roles = ['b', 'd'] - self.assertRaises(NotFound, rolesutils.check_role_exists, - available_roles, requested_roles) - - @mock.patch('tripleo_common.utils.roles.check_role_exists') - @mock.patch('tripleo_common.utils.roles.get_roles_list_from_directory') - def test_generate_roles_data_from_directory(self, get_roles_mock, - check_mock): - get_roles_mock.return_value = ['foo', 'bar', 'baz'] - m = mock.mock_open(read_data=SAMPLE_ROLE) - with mock.patch('tripleo_common.utils.roles.open', m) as open_mock: - r = rolesutils.generate_roles_data_from_directory('/foo', - ['foo', 'bar']) - open_mock.assert_any_call('/foo/foo.yaml', 'r') - open_mock.assert_any_call('/foo/bar.yaml', 'r') - - header = '\n'.join(["#" * 79, - "# File generated by TripleO", - "#" * 79, - ""]) - expected = header + SAMPLE_ROLE * 2 - self.assertEqual(expected, r) - get_roles_mock.assert_called_with('/foo') - check_mock.assert_called_with(['foo', 'bar', 'baz'], ['foo', 'bar']) - - def test_validate_role_yaml(self): - role = rolesutils.validate_role_yaml(SAMPLE_ROLE) - self.assertEqual(SAMPLE_ROLE_OBJ, role) - - def test_validate_role_with_network_dict(self): - role = rolesutils.validate_role_yaml(SAMPLE_ROLE_NETWORK_DICT) - self.assertEqual(SAMPLE_ROLE_OBJ_NETWORK_DICT, role) - - def test_validate_role_yaml_with_file(self): - m = mock.mock_open(read_data=SAMPLE_ROLE) - with mock.patch('tripleo_common.utils.roles.open', m): - r = rolesutils.validate_role_yaml(role_path='/foo.yaml') - self.assertEqual(SAMPLE_ROLE_OBJ, r) - - def test_validate_role_yaml_invalid_params(self): - self.assertRaises(ValueError, rolesutils.validate_role_yaml, 'foo', - 'bar') - - def test_validate_role_yaml_missing_name(self): - role = yaml.safe_load(SAMPLE_ROLE) - del role[0]['name'] - self.assertRaises(RoleMetadataError, rolesutils.validate_role_yaml, - yaml.safe_dump(role)) - - def test_validate_role_yaml_invalid_type(self): - role = yaml.safe_load(SAMPLE_ROLE) - role[0]['CountDefault'] = 'should not be a string' - self.assertRaises(RoleMetadataError, rolesutils.validate_role_yaml, - yaml.safe_dump(role)) - - def test_validate_role_yaml_invalid_network_type(self): - role = yaml.safe_load(SAMPLE_ROLE) - role[0]['networks'] = 'should not be a string' - self.assertRaises(RoleMetadataError, rolesutils.validate_role_yaml, - yaml.safe_dump(role)) - - @mock.patch('tripleo_common.utils.roles.check_role_exists') - @mock.patch('tripleo_common.utils.roles.get_roles_list_from_directory') - def test_generate_roles_with_one_role_generated(self, get_roles_mock, - check_mock): - get_roles_mock.return_value = ['sample', 'bar', 'baz'] - m = mock.mock_open(read_data=SAMPLE_ROLE) - with mock.patch('tripleo_common.utils.roles.open', m) as open_mock: - r = rolesutils.generate_roles_data_from_directory( - '/roles', ['sample:sampleA']) - open_mock.assert_any_call('/roles/sample.yaml', 'r') - - header = '\n'.join(["#" * 79, - "# File generated by TripleO", - "#" * 79, - ""]) - expected = header + SAMPLE_GENERATED_ROLE - self.assertEqual(expected, r) - get_roles_mock.assert_called_with('/roles') - check_mock.assert_called_with(['sample', 'bar', 'baz'], - ['sample:sampleA']) - - @mock.patch('tripleo_common.utils.roles.check_role_exists') - @mock.patch('tripleo_common.utils.roles.get_roles_list_from_directory') - def test_generate_roles_with_two_same_roles(self, get_roles_mock, - check_mock): - get_roles_mock.return_value = ['sample', 'bar', 'baz'] - m = mock.mock_open(read_data=SAMPLE_ROLE) - with mock.patch('tripleo_common.utils.roles.open', m) as open_mock: - r = rolesutils.generate_roles_data_from_directory( - '/roles', ['sample', 'sample:sampleA']) - open_mock.assert_any_call('/roles/sample.yaml', 'r') - - header = '\n'.join(["#" * 79, - "# File generated by TripleO", - "#" * 79, - ""]) - expected = header + SAMPLE_ROLE + SAMPLE_GENERATED_ROLE - self.assertEqual(expected, r) - get_roles_mock.assert_called_with('/roles') - check_mock.assert_called_with(['sample', 'bar', 'baz'], - ['sample', 'sample:sampleA']) - - @mock.patch('tripleo_common.utils.roles.check_role_exists') - @mock.patch('tripleo_common.utils.roles.get_roles_list_from_directory') - def test_generate_roles_with_wrong_colon_format(self, get_roles_mock, - check_mock): - get_roles_mock.return_value = ['sample', 'bar', 'baz'] - m = mock.mock_open(read_data=SAMPLE_ROLE) - with mock.patch('tripleo_common.utils.roles.open', m) as open_mock: - self.assertRaises(ValueError, - rolesutils.generate_roles_data_from_directory, - '/roles', - ['sample', 'sample:A']) - open_mock.assert_any_call('/roles/sample.yaml', 'r') - - @mock.patch('tripleo_common.utils.roles.check_role_exists') - @mock.patch('tripleo_common.utils.roles.get_roles_list_from_directory') - def test_generate_roles_with_invalid_role_name(self, get_roles_mock, - check_mock): - get_roles_mock.return_value = ['sample', 'bar', 'baz'] - m = mock.mock_open(read_data=SAMPLE_ROLE) - with mock.patch('tripleo_common.utils.roles.open', m) as open_mock: - self.assertRaises(ValueError, - rolesutils.generate_roles_data_from_directory, - '/roles', - ['sample', 'sampleA:sample']) - open_mock.assert_any_call('/roles/sample.yaml', 'r') - - @mock.patch('tripleo_common.utils.roles.check_role_exists') - @mock.patch('tripleo_common.utils.roles.get_roles_list_from_directory') - def test_generate_roles_with_invalid_colon_format(self, get_roles_mock, - check_mock): - get_roles_mock.return_value = ['sample', 'bar', 'baz'] - m = mock.mock_open(read_data=SAMPLE_ROLE) - with mock.patch('tripleo_common.utils.roles.open', m) as open_mock: - self.assertRaises(ValueError, - rolesutils.generate_roles_data_from_directory, - '/roles', - ['sample', 'sample:sample']) - open_mock.assert_any_call('/roles/sample.yaml', 'r') diff --git a/tripleo_common/tests/utils/test_stack_parameters.py b/tripleo_common/tests/utils/test_stack_parameters.py deleted file mode 100644 index bffbb55f1..000000000 --- a/tripleo_common/tests/utils/test_stack_parameters.py +++ /dev/null @@ -1,369 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from unittest import mock - -from ironicclient import exceptions as ironicexceptions - -from tripleo_common.tests import base -from tripleo_common.utils import stack_parameters -from tripleo_common.utils import nodes - - -class StackParametersTest(base.TestCase): - - def test_generate_hostmap(self): - - # two instances in 'nova list'. - # vm1 with id=123 and vm2 with id=234 - server1 = mock.MagicMock() - server1.id = 123 - server1.name = 'vm1' - - server2 = mock.MagicMock() - server2.id = 234 - server2.name = 'vm2' - - servers = mock.MagicMock() - servers = [server1, server2] - - compute_client = mock.MagicMock() - compute_client.servers.list.side_effect = (servers, ) - - # we assume instance id=123 has been provisioned using bm node 'bm1' - # while instance id=234 is in error state, so no bm node has been used - - def side_effect(args): - if args == 123: - return bm1 - if args == 234: - raise ironicexceptions.NotFound - - baremetal_client = mock.MagicMock() - baremetal_client.node.get_by_instance_uuid = mock.MagicMock( - side_effect=side_effect) - - # bm server with name='bm1' and uuid='9876' - bm1 = mock.MagicMock() - bm1.uuid = 9876 - bm1.name = 'bm1' - - # 'bm1' has a single port with mac='aa:bb:cc:dd:ee:ff' - port1 = mock.MagicMock() - port1.address = 'aa:bb:cc:dd:ee:ff' - - def side_effect2(node, *args): - if node == 9876: - return [port1, ] - raise ironicexceptions.NotFound - - baremetal_client.port.list = mock.MagicMock(side_effect=side_effect2) - - expected_hostmap = { - 'aa:bb:cc:dd:ee:ff': { - 'compute_name': 'vm1', - 'baremetal_name': 'bm1' - } - } - - result = nodes.generate_hostmap(baremetal_client, compute_client) - self.assertEqual(result, expected_hostmap) - - @mock.patch('tripleo_common.utils.nodes.generate_hostmap') - def test_generate_fencing_parameters(self, mock_generate_hostmap): - test_hostmap = { - "00:11:22:33:44:55": { - "compute_name": "compute_name_0", - "baremetal_name": "baremetal_name_0" - }, - "11:22:33:44:55:66": { - "compute_name": "compute_name_1", - "baremetal_name": "baremetal_name_1" - }, - "aa:bb:cc:dd:ee:ff": { - "compute_name": "compute_name_4", - "baremetal_name": "baremetal_name_4" - }, - "bb:cc:dd:ee:ff:gg": { - "compute_name": "compute_name_5", - "baremetal_name": "baremetal_name_5" - } - } - mock_generate_hostmap.return_value = test_hostmap - - test_envjson = [{ - "name": "control-0", - "pm_password": "control-0-password", - "pm_type": "ipmi", - "pm_user": "control-0-admin", - "pm_addr": "0.1.2.3", - "pm_port": "0123", - "ports": [ - {"address": "00:11:22:33:44:55"}, - ] - }, { - "name": "control-1", - "pm_password": "control-1-password", - # Still support deprecated drivers - "pm_type": "pxe_ipmitool", - "pm_user": "control-1-admin", - "pm_addr": "1.2.3.4", - "ports": [ - {"address": "11:22:33:44:55:66"} - ] - }, { - # test node using redfish pm - "name": "compute-4", - "pm_password": "calvin", - "pm_type": "redfish", - "pm_user": "root", - "pm_addr": "172.16.0.1:8000", - "pm_port": "8000", - "redfish_verify_ca": "false", - "pm_system_id": "/redfish/v1/Systems/5678", - "ports": [ - {"address": "aa:bb:cc:dd:ee:ff"} - ] - }, { - # This is an extra node on oVirt/RHV - "name": "control-3", - "pm_password": "ovirt-password", - "pm_type": "staging-ovirt", - "pm_user": "admin@internal", - "pm_addr": "3.4.5.6", - "pm_vm_name": "control-3", - "ports": [ - {"address": "bb:cc:dd:ee:ff:gg"} - ] - }, { - # This is an extra node that is not in the hostmap, to ensure we - # cope with unprovisioned nodes - "name": "control-2", - "pm_password": "control-2-password", - "pm_type": "ipmi", - "pm_user": "control-2-admin", - "pm_addr": "2.3.4.5", - "ports": [ - {"address": "22:33:44:55:66:77"} - ] - } - ] - - result = stack_parameters.generate_fencing_parameters( - test_envjson, 28, 5, 0, True)['parameter_defaults'] - - self.assertTrue(result["EnableFencing"]) - self.assertEqual(len(result["FencingConfig"]["devices"]), 5) - self.assertEqual(result["FencingConfig"]["devices"][0], { - "agent": "fence_ipmilan", - "host_mac": "00:11:22:33:44:55", - "params": { - "delay": 28, - "ipaddr": "0.1.2.3", - "ipport": "0123", - "lanplus": True, - "privlvl": 5, - "login": "control-0-admin", - "passwd": "control-0-password", - } - }) - self.assertEqual(result["FencingConfig"]["devices"][1], { - "agent": "fence_ipmilan", - "host_mac": "11:22:33:44:55:66", - "params": { - "delay": 28, - "ipaddr": "1.2.3.4", - "lanplus": True, - "privlvl": 5, - "login": "control-1-admin", - "passwd": "control-1-password", - } - }) - self.assertEqual(result["FencingConfig"]["devices"][2], { - "agent": "fence_redfish", - "host_mac": "aa:bb:cc:dd:ee:ff", - "params": { - "delay": 28, - "ipaddr": "172.16.0.1:8000", - "ipport": "8000", - "privlvl": 5, - "login": "root", - "passwd": "calvin", - "systems_uri": "/redfish/v1/Systems/5678", - "ssl_insecure": "true", - } - }) - self.assertEqual(result["FencingConfig"]["devices"][3], { - "agent": "fence_rhevm", - "host_mac": "bb:cc:dd:ee:ff:gg", - "params": { - "delay": 28, - "ipaddr": "3.4.5.6", - "login": "admin@internal", - "passwd": "ovirt-password", - "port": "control-3", - "ssl": 1, - "ssl_insecure": 1, - } - }) - - def test_run_valid_network_config(self): - mock_env = { - 'template': {}, - 'files': {}, - 'environment': [{'path': 'environments/test.yaml'}] - } - - mock_heat = mock.MagicMock() - - mock_heat.stacks.preview.return_value = mock.Mock(resources=[{ - "resource_identity": {"stack_name": "overcloud-TEMP-Compute-0"}, - "resource_name": "OsNetConfigImpl", - "properties": {"config": "echo \'{\"network_config\": {}}\'" - " > /etc/os-net-config/config.json"} - }]) - - expected = {"network_config": {}} - # Test - result = stack_parameters.get_network_configs( - mock_heat, mock_env, container='overcloud', role_name='Compute') - self.assertEqual(expected, result) - mock_heat.stacks.preview.assert_called_once_with( - environment=[{'path': 'environments/test.yaml'}], - files={}, - template={}, - stack_name='overcloud-TEMP', - ) - - def test_run_invalid_network_config(self): - - mock_env = { - 'template': {}, - 'files': {}, - 'environment': [{'path': 'environments/test.yaml'}] - } - mock_heat = mock.MagicMock() - - mock_heat.stacks.preview.return_value = mock.Mock(resources=[{ - "resource_identity": {"stack_name": "overcloud-TEMP-Compute-0"}, - "resource_name": "OsNetConfigImpl", - "properties": {"config": ""} - }]) - - # Test - self.assertRaises(RuntimeError, - stack_parameters.get_network_configs, - mock_heat, mock_env, container='overcloud', - role_name='Compute') - mock_heat.stacks.preview.assert_called_once_with( - environment=[{'path': 'environments/test.yaml'}], - files={}, - template={}, - stack_name='overcloud-TEMP', - ) - - def test_run_valid_network_config_with_no_if_routes_inputs(self): - - mock_env = { - 'template': { - 'resources': { - 'ComputeGroupVars': { - 'properties': { - 'value': { - 'role_networks': ['InternalApi', - 'Storage']} - } - } - } - }, - 'files': {}, - 'environment': {'parameter_defaults': {}} - } - - mock_heat = mock.MagicMock() - - mock_heat.stacks.preview.return_value = mock.Mock(resources=[{ - "resource_identity": {"stack_name": "overcloud-TEMP-Compute-0"}, - "resource_name": "OsNetConfigImpl", - "properties": {"config": "echo \'{\"network_config\": {}}\'" - " > /etc/os-net-config/config.json"} - }]) - - expected = {"network_config": {}} - # Test - result = stack_parameters.get_network_configs( - mock_heat, mock_env, container='overcloud', role_name='Compute') - self.assertEqual(expected, result) - mock_heat.stacks.preview.assert_called_once_with( - environment={ - 'parameter_defaults': { - 'InternalApiInterfaceRoutes': [[]], - 'StorageInterfaceRoutes': [[]] - } - }, - files={}, - template={'resources': {'ComputeGroupVars': {'properties': { - 'value': {'role_networks': ['InternalApi', 'Storage']} - }}}}, - stack_name='overcloud-TEMP', - ) - - def test_run_valid_network_config_with_if_routes_inputs(self): - - mock_env = { - 'template': { - 'resources': { - 'ComputeGroupVars': { - 'properties': { - 'value': { - 'role_networks': ['InternalApi', - 'Storage']} - } - } - } - }, - 'files': {}, - 'environment': { - 'parameter_defaults': { - 'InternalApiInterfaceRoutes': ['test1'], - 'StorageInterfaceRoutes': ['test2'] - }} - } - - mock_heat = mock.MagicMock() - - mock_heat.stacks.preview.return_value = mock.Mock(resources=[{ - "resource_identity": {"stack_name": "overcloud-TEMP-Compute-0"}, - "resource_name": "OsNetConfigImpl", - "properties": {"config": "echo \'{\"network_config\": {}}\'" - " > /etc/os-net-config/config.json"} - }]) - - expected = {"network_config": {}} - # Test - result = stack_parameters.get_network_configs( - mock_heat, mock_env, container='overcloud', role_name='Compute') - self.assertEqual(expected, result) - mock_heat.stacks.preview.assert_called_once_with( - environment={ - 'parameter_defaults': { - 'InternalApiInterfaceRoutes': ['test1'], - 'StorageInterfaceRoutes': ['test2'] - } - }, - files={}, - template={'resources': {'ComputeGroupVars': {'properties': { - 'value': {'role_networks': ['InternalApi', 'Storage']} - }}}}, - stack_name='overcloud-TEMP', - ) diff --git a/tripleo_common/update.py b/tripleo_common/update.py deleted file mode 100644 index ba37f28c1..000000000 --- a/tripleo_common/update.py +++ /dev/null @@ -1,80 +0,0 @@ -# Copyright 2015 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from heatclient.common import template_utils - -from tripleo_common import constants - - -def add_breakpoints_cleanup_into_env(env): - template_utils.deep_update(env, { - 'resource_registry': { - 'resources': {'*': {'*': { - constants.UPDATE_RESOURCE_NAME: {'hooks': []}}}} - } - }) - - -def search_stack(stack_data, key_name): - if isinstance(stack_data, list): - for item in stack_data: - result = search_stack(item, key_name) - if result: - return result - elif isinstance(stack_data, dict): - for k, v in stack_data.items(): - if k == key_name: - return v - result = search_stack(v, key_name) - if result: - return result - - -def get_exclusive_neutron_driver(drivers): - if not drivers: - return - mutually_exclusive_drivers = constants.EXCLUSIVE_NEUTRON_DRIVERS - if isinstance(drivers, str): - drivers = [drivers] - for d in mutually_exclusive_drivers: - if d in drivers: - return d - - -def check_neutron_mechanism_drivers(env, stack, plan_client, container): - force_update = env.get('parameter_defaults').get( - 'ForceNeutronDriverUpdate', False) - # Forcing an update and skip checks is need to support migrating from one - # driver to another - if force_update: - return - - driver_key = 'NeutronMechanismDrivers' - current_drivers = search_stack(stack._info, driver_key) - # TODO(beagles): We may need to move or copy this check earlier - # to automagically pull in an openvswitch ML2 compatibility driver. - current_driver = get_exclusive_neutron_driver(current_drivers) - configured_drivers = env.get('parameter_defaults').get(driver_key) - new_driver = None - if configured_drivers: - new_driver = get_exclusive_neutron_driver(configured_drivers) - else: - # thus set the most recent default (OVN) - new_driver = 'ovn' - if current_driver and new_driver and current_driver != new_driver: - msg = ("Unable to switch from {} to {} neutron " - "mechanism drivers on upgrade. Please consult the " - "documentation.").format(current_driver, new_driver) - return msg diff --git a/tripleo_common/utils/__init__.py b/tripleo_common/utils/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/tripleo_common/utils/ansible.py b/tripleo_common/utils/ansible.py deleted file mode 100644 index 84708449e..000000000 --- a/tripleo_common/utils/ansible.py +++ /dev/null @@ -1,375 +0,0 @@ -# Copyright 2017 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from datetime import datetime -from io import StringIO -import json -import logging -import multiprocessing -import os -from pathlib import Path -import shutil -import configparser -import tempfile -import yaml - -from oslo_concurrency import processutils - -from tripleo_common import constants - -LOG = logging.getLogger(__name__) - - -def write_default_ansible_cfg(work_dir, - remote_user, - ssh_private_key=None, - transport=None, - base_ansible_cfg='/etc/ansible/ansible.cfg', - override_ansible_cfg=None): - ansible_config_path = os.path.join(work_dir, 'ansible.cfg') - shutil.copy(base_ansible_cfg, ansible_config_path) - - modules_path = ( - '/root/.ansible/plugins/modules:' - '/usr/share/ansible/tripleo-plugins/modules:' - '/usr/share/ansible/plugins/modules:' - '/usr/share/ansible-modules:' - '{}/library'.format( - constants.DEFAULT_VALIDATIONS_BASEDIR)) - lookups_path = ( - '/root/.ansible/plugins/lookup:' - '/usr/share/ansible/tripleo-plugins/lookup:' - '/usr/share/ansible/plugins/lookup:' - '{}/lookup_plugins'.format( - constants.DEFAULT_VALIDATIONS_BASEDIR)) - callbacks_path = ( - '~/.ansible/plugins/callback:' - '/usr/share/ansible/tripleo-plugins/callback:' - '/usr/share/ansible/plugins/callback:' - '{}/callback_plugins'.format( - constants.DEFAULT_VALIDATIONS_BASEDIR)) - - callbacks_whitelist = ','.join(['tripleo_dense', 'tripleo_profile_tasks', - 'tripleo_states']) - action_plugins_path = ( - '~/.ansible/plugins/action:' - '/usr/share/ansible/plugins/action:' - '/usr/share/ansible/tripleo-plugins/action:' - '{}/action_plugins'.format( - constants.DEFAULT_VALIDATIONS_BASEDIR)) - filter_plugins_path = ( - '~/.ansible/plugins/filter:' - '/usr/share/ansible/plugins/filter:' - '/usr/share/ansible/tripleo-plugins/filter:' - '{}/filter_plugins'.format( - constants.DEFAULT_VALIDATIONS_BASEDIR)) - roles_path = ('{work_dir!s}/roles:' - '/root/.ansible/roles:' - '/usr/share/ansible/tripleo-roles:' - '/usr/share/ansible/roles:' - '/etc/ansible/roles:' - '{work_dir!s}'.format(work_dir=work_dir)) - - config = configparser.ConfigParser() - config.read(ansible_config_path) - - # NOTE(dvd): since ansible 2.12, we need to create the sections - # becase the base file is now empty. - for section in ['defaults', 'ssh_connection']: - if section not in config.sections(): - config.add_section(section) - - config.set('defaults', 'retry_files_enabled', 'False') - config.set('defaults', 'roles_path', roles_path) - config.set('defaults', 'library', modules_path) - config.set('defaults', 'callback_plugins', callbacks_path) - config.set('defaults', 'callback_whitelist', callbacks_whitelist) - config.set('defaults', 'stdout_callback', 'tripleo_dense') - config.set('defaults', 'action_plugins', action_plugins_path) - config.set('defaults', 'lookup_plugins', lookups_path) - config.set('defaults', 'filter_plugins', filter_plugins_path) - - log_path = os.path.join(work_dir, 'ansible.log') - config.set('defaults', 'log_path', log_path) - if os.path.exists(log_path): - new_path = (log_path + '-' + - datetime.now().strftime("%Y-%m-%dT%H:%M:%S")) - os.rename(log_path, new_path) - - # Create the log file, and set some rights on it in order to prevent - # unwanted accesse - Path(log_path).touch() - os.chmod(log_path, 0o640) - - config.set('defaults', 'forks', str(min( - multiprocessing.cpu_count() * 4, 100))) - config.set('defaults', 'timeout', '30') - config.set('defaults', 'gather_timeout', '30') - - # Setup fact cache to improve playbook execution speed - config.set('defaults', 'gathering', 'smart') - config.set('defaults', 'fact_caching', 'jsonfile') - config.set('defaults', 'fact_caching_connection', - '~/.ansible/fact_cache') - # NOTE(mwhahaha): only gather the bare minimum facts because this has - # direct impact on how fast ansible can go. - config.set('defaults', 'gather_subset', '!all,min') - # NOTE(mwhahaha): this significantly affects performation per ansible#73654 - config.set('defaults', 'inject_facts_as_vars', 'false') - - # Set the pull interval to lower CPU overhead - config.set('defaults', 'internal_poll_interval', '0.01') - - # Set the interpreter discovery to auto mode. - config.set('defaults', 'interpreter_python', 'auto') - - # Expire facts in the fact cache after 7200s (2h) - config.set('defaults', 'fact_caching_timeout', '7200') - - # mistral user has no home dir set, so no place to save a known hosts file - config.set('ssh_connection', 'ssh_args', - '-o UserKnownHostsFile=/dev/null ' - '-o StrictHostKeyChecking=no ' - '-o ControlMaster=auto ' - '-o ControlPersist=30m ' - '-o ServerAliveInterval=5 ' - '-o ServerAliveCountMax=5 ' - '-o PreferredAuthentications=publickey') - config.set('ssh_connection', 'control_path_dir', - os.path.join(work_dir, 'ansible-ssh')) - config.set('ssh_connection', 'retries', '8') - config.set('ssh_connection', 'pipelining', 'True') - # Related to https://github.com/ansible/ansible/issues/22127 - config.set('ssh_connection', 'scp_if_ssh', 'True') - - if override_ansible_cfg: - sio_cfg = StringIO() - sio_cfg.write(override_ansible_cfg) - sio_cfg.seek(0) - config.read_file(sio_cfg) - sio_cfg.close() - - with open(ansible_config_path, 'w') as configfile: - config.write(configfile) - - return ansible_config_path - - -def _get_inventory(inventory, work_dir): - if not inventory: - return None - - if (isinstance(inventory, str) and - os.path.exists(inventory)): - return inventory - if not isinstance(inventory, str): - inventory = yaml.safe_dump(inventory) - - path = os.path.join(work_dir, 'inventory.yaml') - - with open(path, 'w') as inv: - inv.write(inventory) - - return path - - -def _get_ssh_private_key(ssh_private_key, work_dir): - if not ssh_private_key: - return None - - if (isinstance(ssh_private_key, str) and - os.path.exists(ssh_private_key)): - os.chmod(ssh_private_key, 0o600) - return ssh_private_key - - path = os.path.join(work_dir, 'ssh_private_key') - - with open(path, 'w') as ssh_key: - ssh_key.write(ssh_private_key) - os.chmod(path, 0o600) - - return path - - -def _get_playbook(playbook, work_dir): - if not playbook: - return None - - if (isinstance(playbook, str) and - os.path.exists(playbook)): - return playbook - if not isinstance(playbook, str): - playbook = yaml.safe_dump(playbook) - - path = os.path.join(work_dir, 'playbook.yaml') - - with open(path, 'w') as pb: - pb.write(playbook) - - return path - - -def run_ansible_playbook(playbook, work_dir=None, **kwargs): - verbosity = kwargs.get('verbosity', 5) - remove_work_dir = False - if not work_dir: - work_dir = tempfile.mkdtemp(prefix='tripleo-ansible') - remove_work_dir = True - - playbook = _get_playbook(playbook, work_dir) - ansible_playbook_cmd = "ansible-playbook" - if 1 < verbosity < 6: - verbosity_option = '-' + ('v' * (verbosity - 1)) - command = [ansible_playbook_cmd, verbosity_option, - playbook] - else: - command = [ansible_playbook_cmd, playbook] - - limit_hosts = kwargs.get('limit_hosts', None) - if limit_hosts: - command.extend(['--limit', limit_hosts]) - - module_path = kwargs.get('module_path', None) - if module_path: - command.extend(['--module-path', module_path]) - - become = kwargs.get('become', False) - if become: - command.extend(['--become']) - - become_user = kwargs.get('become_user', None) - if become_user: - command.extend(['--become-user', become_user]) - - extra_vars = kwargs.get('extra_vars', None) - if extra_vars: - extra_vars = json.dumps(extra_vars) - command.extend(['--extra-vars', extra_vars]) - - flush_cache = kwargs.get('flush_cache', False) - if flush_cache: - command.extend(['--flush-cache']) - - forks = kwargs.get('forks', None) - if forks: - command.extend(['--forks', forks]) - - ssh_common_args = kwargs.get('ssh_common_args', None) - if ssh_common_args: - command.extend(['--ssh-common-args', ssh_common_args]) - - ssh_extra_args = kwargs.get('ssh_extra_args', None) - if ssh_extra_args: - command.extend(['--ssh-extra-args', ssh_extra_args]) - - timeout = kwargs.get('timeout', None) - if timeout: - command.extend(['--timeout', timeout]) - - inventory = _get_inventory(kwargs.get('inventory', None), - work_dir) - if inventory: - command.extend(['--inventory-file', inventory]) - - tags = kwargs.get('tags', None) - if tags: - command.extend(['--tags', tags]) - - skip_tags = kwargs.get('skip_tags', None) - if skip_tags: - command.extend(['--skip-tags', skip_tags]) - - extra_env_variables = kwargs.get('extra_env_variables', None) - override_ansible_cfg = kwargs.get('override_ansible_cfg', None) - remote_user = kwargs.get('remote_user', None) - ssh_private_key = kwargs.get('ssh_private_key', None) - - if extra_env_variables: - if not isinstance(extra_env_variables, dict): - msg = "extra_env_variables must be a dict" - raise RuntimeError(msg) - for key, value in extra_env_variables.items(): - extra_env_variables[key] = str(value) - - try: - ansible_config_path = write_default_ansible_cfg( - work_dir, - remote_user, - ssh_private_key=_get_ssh_private_key( - ssh_private_key, work_dir), - override_ansible_cfg=override_ansible_cfg) - env_variables = { - 'HOME': work_dir, - 'ANSIBLE_LOCAL_TEMP': work_dir, - 'ANSIBLE_CONFIG': ansible_config_path, - } - - profile_tasks = kwargs.get('profile_tasks', True) - if profile_tasks: - profile_tasks_limit = kwargs.get('profile_tasks_limit', 20) - env_variables.update({ - # the whitelist could be collected from multiple - # arguments if we find a use case for it - 'ANSIBLE_CALLBACKS_ENABLED': - 'tripleo_dense,tripleo_profile_tasks,tripleo_states', - 'ANSIBLE_STDOUT_CALLBACK': 'tripleo_dense', - 'PROFILE_TASKS_TASK_OUTPUT_LIMIT': - str(profile_tasks_limit), - }) - - if extra_env_variables: - env_variables.update(extra_env_variables) - - command = [str(c) for c in command] - - reproduce_command = kwargs.get('reproduce_command', None) - command_timeout = kwargs.get('command_timeout', None) - trash_output = kwargs.get('trash_output', None) - if reproduce_command: - command_path = os.path.join(work_dir, - "ansible-playbook-command.sh") - with open(command_path, 'w') as f: - f.write('#!/bin/bash\n') - f.write('\n') - for var in env_variables: - f.write('%s="%s"\n' % (var, env_variables[var])) - f.write('\n') - f.write(' '.join(command)) - f.write(' "$@"') - f.write('\n') - - os.chmod(command_path, 0o750) - - if command_timeout: - command = ['timeout', '-s', 'KILL', - str(command_timeout)] + command - - LOG.info('Running ansible-playbook command: %s', command) - - stderr, stdout = processutils.execute( - *command, cwd=work_dir, - env_variables=env_variables, - log_errors=processutils.LogErrors.ALL) - if trash_output: - stdout = "" - stderr = "" - return {"stderr": stderr, "stdout": stdout, - "log_path": os.path.join(work_dir, 'ansible.log')} - finally: - try: - if remove_work_dir: - shutil.rmtree(work_dir) - except Exception as e: - msg = "An error happened while cleaning work directory: " + e - raise RuntimeError(msg) diff --git a/tripleo_common/utils/common.py b/tripleo_common/utils/common.py deleted file mode 100644 index b4b3c3b1a..000000000 --- a/tripleo_common/utils/common.py +++ /dev/null @@ -1,27 +0,0 @@ -# Copyright 2019 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import socket - - -def bracket_ipv6(address): - """Put a bracket around address if it is valid IPv6 - - Return it unchanged if it is a hostname or IPv4 address. - """ - try: - socket.inet_pton(socket.AF_INET6, address) - return "[%s]" % address - except (socket.error, TypeError): - return address diff --git a/tripleo_common/utils/config.py b/tripleo_common/utils/config.py deleted file mode 100644 index c6a49f4a2..000000000 --- a/tripleo_common/utils/config.py +++ /dev/null @@ -1,601 +0,0 @@ -# Copyright 2016 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import git -import json -import logging -import os -import re -import shutil -import tempfile -import warnings -import yaml - -import jinja2 - -from tripleo_common import constants - - -LOG = logging.getLogger(__name__) - -warnings.filterwarnings('once', category=DeprecationWarning) -warnings.filterwarnings('once', category=UserWarning) - - -class Config(object): - - def __init__(self, orchestration_client): - self.log = logging.getLogger(__name__ + ".Config") - self.client = orchestration_client - self.stack_outputs = {} - - def get_server_names(self): - servers = {} - role_node_id_map = self.stack_outputs.get('ServerIdData', {}) - role_net_hostname_map = self.stack_outputs.get( - 'RoleNetHostnameMap', {}) - for role, hostnames in role_net_hostname_map.items(): - if hostnames: - names = hostnames.get(constants.HOST_NETWORK) or [] - shortnames = [n.split(".%s." % constants.HOST_NETWORK)[0] - for n in names] - for idx, name in enumerate(shortnames): - if 'server_ids' in role_node_id_map: - server_id = role_node_id_map['server_ids'][role][idx] - if server_id is not None: - servers[server_id] = name.lower() - return servers - - def get_deployment_data(self, stack, - nested_depth=constants.NESTED_DEPTH): - deployments = self.client.resources.list( - stack, - nested_depth=nested_depth, - filters=dict(name=constants.TRIPLEO_DEPLOYMENT_RESOURCE), - with_detail=True) - # Sort by creation time - deployments = sorted(deployments, key=lambda d: d.creation_time) - return deployments - - def get_role_from_server_id(self, stack, server_id): - server_id_data = self.stack_outputs.get('ServerIdData', {} - ).get('server_ids', {}) - - for k, v in server_id_data.items(): - if server_id in v: - return k - - def get_deployment_resource_id(self, deployment): - if '/' in deployment.attributes['value']['deployment']: - deployment_stack_id = \ - deployment.attributes['value']['deployment'].split('/')[-1] - deployment_resource_id = self.client.resources.get( - deployment_stack_id, - 'TripleOSoftwareDeployment').physical_resource_id - else: - deployment_resource_id = \ - deployment.attributes['value']['deployment'] - return deployment_resource_id - - def get_config_dict(self, deployment_resource_id): - deployment_rsrc = self.client.software_deployments.get( - deployment_resource_id) - config = self.client.software_configs.get( - deployment_rsrc.config_id) - - return config.to_dict() - - def get_jinja_env(self, tmp_path): - templates_path = os.path.join( - os.path.dirname(__file__), '..', 'templates') - self._mkdir(os.path.join(tmp_path, 'templates')) - env = jinja2.Environment( - loader=jinja2.FileSystemLoader(templates_path)) - env.trim_blocks = True - return env, templates_path - - def get_role_config(self): - role_config = self.stack_outputs.get('RoleConfig', {}) - # RoleConfig can exist as a stack output but have a value of None - return role_config or {} - - @staticmethod - def _open_file(path): - return os.fdopen( - os.open(path, os.O_WRONLY | os.O_CREAT | os.O_TRUNC, 0o600), 'w') - - def _write_tasks_per_step(self, tasks, filepath, step, strict=False): - - def step_in_task(task, step, strict): - whenexpr = task.get('when', None) - if whenexpr is None: - if not strict: - # If no step is defined, it will be executed for all - # steps if strict is false - return True - # We only want the task with the step defined. - return False - if not isinstance(whenexpr, list): - whenexpr = [whenexpr] - - # Filter out boolean value and remove blanks - flatten_when = "".join([re.sub(r'\s+', '', x) - for x in whenexpr - if isinstance(x, str)]) - # make \|int optional incase forgotten; use only step digit: - # ()'s around step|int are also optional - steps_found = re.findall(r'\(?step(?:\|int)?\)?==(\d+)', - flatten_when) - if steps_found: - if str(step) in steps_found: - return True - return False - - # No step found - if strict: - return False - return True - - tasks_per_step = [task for task in tasks if step_in_task(task, - step, - strict)] - with self._open_file(filepath) as conf_file: - yaml.safe_dump(tasks_per_step, conf_file, default_flow_style=False) - return tasks_per_step - - def initialize_git_repo(self, dirname): - repo = git.Repo.init(dirname) - gitignore_path = os.path.join(dirname, '.gitignore') - - # Ignore tarballs, which we use for the export process - if not os.path.exists(gitignore_path): - with open(gitignore_path, 'w') as f: - f.write('*.tar.gz\n') - # For some reason using repo.index.add is not working, so go - # directly to the GitCmd interface. - repo.git.add('.gitignore') - - return repo - - def snapshot_config_dir(self, repo, commit_message): - if repo.is_dirty(untracked_files=True): - self.log.info('Snapshotting %s', repo.working_dir) - # Use repo.git.add directly as repo.index.add defaults to forcing - # commit of ignored files, which we don't want. - repo.git.add('.') - commit = repo.index.commit(commit_message) - self.log.info('Created commit %s', commit.hexsha) - else: - self.log.info('No changes to commit') - - def _mkdir(self, dirname): - if not os.path.exists(dirname): - try: - os.makedirs(dirname, 0o700) - except OSError as e: - message = 'Failed to create: %s, error: %s' % (dirname, - str(e)) - raise OSError(message) - - def create_config_dir(self, config_dir, preserve_config_dir=True): - # Create config directory - if os.path.exists(config_dir) and preserve_config_dir is False: - try: - self.log.info("Directory %s already exists, removing", - config_dir) - shutil.rmtree(config_dir) - except OSError as e: - message = 'Failed to remove: %s, error: %s' % (config_dir, - str(e)) - raise OSError(message) - - os.makedirs(config_dir, mode=0o700, exist_ok=True) - - # Create Runner friendly directory structure - # https://ansible-runner.readthedocs.io/en/stable/intro.html#runner-artifacts-directory-hierarchy - structure = [ - 'artifacts', - 'env', - 'inventory', - 'profiling_data', - 'project', - 'roles', - ] - for d in structure: - os.makedirs(os.path.join(config_dir, d), mode=0o700, - exist_ok=True) - - def fetch_config(self, name): - # Get the stack object - stack = self.client.stacks.get(name) - self.stack_outputs = {i['output_key']: i['output_value'] - for i in stack.outputs} - return stack - - def validate_config(self, template_data, yaml_file): - try: - yaml.safe_load(template_data) - except (yaml.scanner.ScannerError, yaml.YAMLError) as e: - self.log.error("Config for file %s contains invalid yaml, got " - "error %s", yaml_file, e) - raise e - - def write_config(self, stack, name, config_dir, config_type=None): - # Get role data: - role_data = self.stack_outputs.get('RoleData', {}) - role_group_vars = self.stack_outputs.get('RoleGroupVars', {}) - role_host_vars = self.stack_outputs.get('AnsibleHostVarsMap', {}) - for role_name, role in role_data.items(): - role_path = os.path.join(config_dir, role_name) - self._mkdir(role_path) - for config in config_type or role.keys(): - if config in constants.EXTERNAL_TASKS: - # external tasks are collected globally, not per-role - continue - if config == 'step_config': - filepath = os.path.join(role_path, 'step_config.pp') - with self._open_file(filepath) as step_config: - step_config.write(role[config]) - elif config == 'param_config': - filepath = os.path.join(role_path, 'param_config.json') - with self._open_file(filepath) as param_config: - param_config.write(json.dumps(role[config])) - else: - # NOTE(emilien): Move this condition to the - # upper level once THT is adapted for all tasks to be - # run per step. - # We include it here to allow the CI to pass until THT - # changed is not merged. - if config in constants.PER_STEP_TASKS.keys(): - for i in range(len(constants.PER_STEP_TASKS[config])): - filepath = os.path.join(role_path, '%s_step%s.yaml' - % (config, i)) - self._write_tasks_per_step( - role[config], filepath, i, - constants.PER_STEP_TASKS[config][i]) - - try: - data = role[config] - except KeyError as e: - message = 'Invalid key: %s, error: %s' % (config, - str(e)) - raise KeyError(message) - filepath = os.path.join(role_path, '%s.yaml' % config) - with self._open_file(filepath) as conf_file: - yaml.safe_dump(data, - conf_file, - default_flow_style=False) - - role_config = self.get_role_config() - for config_name, config in role_config.items(): - - # External tasks are in RoleConfig and not defined per role. - # So we don't use the RoleData to create the per step playbooks. - if config_name in constants.EXTERNAL_TASKS: - for i in range(constants.DEFAULT_STEPS_MAX): - filepath = os.path.join(config_dir, - '%s_step%s.yaml' - % (config_name, i)) - self._write_tasks_per_step(config, filepath, i) - - conf_path = os.path.join(config_dir, config_name) - # Add .yaml extension only if there's no extension already - if '.' not in conf_path: - conf_path = conf_path + ".yaml" - with self._open_file(conf_path) as conf_file: - if isinstance(config, list) or isinstance(config, dict): - yaml.safe_dump(config, conf_file, default_flow_style=False) - else: - conf_file.write(config) - - # Get deployment data - self.log.info("Getting deployment data from Heat...") - deployments_data = self.get_deployment_data(name) - - # server_deployments is a dict of server name to a list of deployments - # (dicts) associated with that server - server_deployments = {} - # server_names is a dict of server id to server_name for easier lookup - server_names = self.get_server_names() - server_ids = dict([(v, k) for (k, v) in server_names.items()]) - # server_deployment_names is a dict of server names to deployment names - # for that role. The deployment names are further separated in their - # own dict with keys of pre_deployment/post_deployment. - server_deployment_names = {} - # server_roles is a dict of server name to server role for easier - # lookup - server_roles = {} - - for deployment in deployments_data: - # Check if the deployment value is the resource name. If that's the - # case, Heat did not create a physical_resource_id for this - # deployment since it does not trigger on this stack action. Such - # as a deployment that only triggers on DELETE, but this is a stack - # create. If that's the case, just skip this deployment, otherwise - # it will result in a Not found error if we try and query the - # deployment API for this deployment. - dep_value_resource_name = deployment.attributes[ - 'value'].get('deployment') == 'TripleOSoftwareDeployment' - - # if not check the physical_resource_id - if not dep_value_resource_name: - deployment_resource_id = self.get_deployment_resource_id( - deployment) - - if dep_value_resource_name or not deployment_resource_id: - warnings.warn('Skipping deployment %s because it has no ' - 'valid uuid (physical_resource_id) ' - 'associated.' % - deployment.physical_resource_id) - continue - - server_id = deployment.attributes['value']['server'] - config_dict = self.get_config_dict(deployment_resource_id) - - # deployment_name should be set via the name property on the - # Deployment resources in the templates, however, if it's None - # or empty string, default to the name of the parent_resource. - deployment_name = deployment.attributes['value'].get( - 'name') or deployment.parent_resource - if not deployment_name: - message = "The deployment name cannot be determined. It " \ - "should be set via the name property on the " \ - "Deployment resources in the templates." - raise ValueError(message) - - try: - int(deployment_name) - except ValueError: - pass - else: - # We can't have an integer here, let's figure out the - # grandparent resource name - deployment_ref = deployment.attributes['value']['deployment'] - warnings.warn('Determining grandparent resource name for ' - 'deployment %s. Ensure the name property is ' - 'set on the deployment resource in the ' - 'templates.' % deployment_ref) - - if '/' in deployment_ref: - deployment_stack_id = deployment_ref.split('/')[-1] - else: - for link in deployment.links: - if link['rel'] == 'stack': - deployment_stack_id = link['href'].split('/')[-1] - break - else: - raise ValueError("Couldn't not find parent stack") - deployment_stack = self.client.stacks.get( - deployment_stack_id, resolve_outputs=False) - parent_stack = deployment_stack.parent - grandparent_stack = self.client.stacks.get( - parent_stack, resolve_outputs=False).parent - resources = self.client.resources.list( - grandparent_stack, - filters=dict(physical_resource_id=parent_stack)) - if not resources: - message = "The deployment resource grandparent name" \ - "could not be determined." - raise ValueError(message) - deployment_name = resources[0].resource_name - config_dict['deployment_name'] = deployment_name - - # reset deploy_server_id to the actual server_id since we have to - # use a dummy server resource to create the deployment in the - # templates - deploy_server_id_input = \ - [i for i in config_dict['inputs'] - if i['name'] == 'deploy_server_id'].pop() - deploy_server_id_input['value'] = server_id - - # We don't want to fail if server_id can't be found, as it's - # most probably due to blacklisted nodes. However we fail for - # other errors. - try: - server_deployments.setdefault( - server_names[server_id], - []).append(config_dict) - except KeyError: - self.log.warning('Server with id %s is ignored from config ' - '(may be blacklisted)', server_id) - # continue the loop as this server_id is probably excluded - continue - except Exception as err: - err_msg = ('Error retrieving server name from this server_id: ' - '%s with this error: %s' % server_id, err) - raise Exception(err_msg) - - role = self.get_role_from_server_id(stack, server_id) - server_pre_network = server_deployment_names.setdefault( - server_names[server_id], {}).setdefault( - 'pre_network', []) - server_pre_deployments = server_deployment_names.setdefault( - server_names[server_id], {}).setdefault( - 'pre_deployments', []) - server_post_deployments = server_deployment_names.setdefault( - server_names[server_id], {}).setdefault( - 'post_deployments', []) - - server_roles[server_names[server_id]] = role - - # special handling of deployments that are run post the deploy - # steps. We have to look these up based on the - # physical_resource_id, but these names should be consistent since - # they are consistent interfaces in our templates. - if 'ExtraConfigPost' in deployment.physical_resource_id or \ - 'PostConfig' in deployment.physical_resource_id: - if deployment_name not in server_post_deployments: - server_post_deployments.append(deployment_name) - elif 'PreNetworkConfig' in deployment.physical_resource_id: - if deployment_name not in server_pre_network: - server_pre_network.append(deployment_name) - else: - if deployment_name not in server_pre_deployments: - server_pre_deployments.append(deployment_name) - - # Make sure server_roles is populated b/c it won't be if there are no - # server deployments. - for name, server_id in server_ids.items(): - server_roles.setdefault( - name, - self.get_role_from_server_id(stack, server_id)) - - env, templates_path = self.get_jinja_env(config_dir) - - templates_dest = os.path.join(config_dir, 'templates') - self._mkdir(templates_dest) - shutil.copyfile(os.path.join(templates_path, 'heat-config.j2'), - os.path.join(templates_dest, 'heat-config.j2')) - - group_vars_dir = os.path.join(config_dir, 'group_vars') - self._mkdir(group_vars_dir) - - host_vars_dir = os.path.join(config_dir, 'host_vars') - self._mkdir(host_vars_dir) - - for server, deployments in server_deployments.items(): - deployment_template = env.get_template('deployment.j2') - - for d in deployments: - - server_deployment_dir = os.path.join( - config_dir, server_roles[server], server) - self._mkdir(server_deployment_dir) - deployment_path = os.path.join( - server_deployment_dir, d['deployment_name']) - - # See if the config can be loaded as a JSON data structure - # In some cases, it may already be JSON (hiera), or it may just - # be a string (script). In those cases, just use the value - # as-is. - try: - data = json.loads(d['config']) - except Exception: - data = d['config'] - - # If the value is not a string already, pretty print it as a - # string so it's rendered in a readable format. - if not (isinstance(data, str) or - isinstance(data, str)): - data = json.dumps(data, indent=2) - - d['config'] = data - - # The hiera Heat hook expects an actual dict for the config - # value, not a scalar. All other hooks expect a scalar. - if d['group'] == 'hiera': - d['scalar'] = False - else: - d['scalar'] = True - - if d['group'] == 'os-apply-config': - message = ("group:os-apply-config is deprecated. " - "Deployment %s will not be applied by " - "config-download." % d['deployment_name']) - warnings.warn(message, DeprecationWarning) - - with open(deployment_path, 'wb') as f: - template_data = deployment_template.render( - deployment=d, - server_id=server_ids[server]) - self.validate_config(template_data, deployment_path) - f.write(template_data.encode('utf-8')) - - # Render group_vars - for role in set(server_roles.values()): - group_var_role_path = os.path.join(group_vars_dir, role) - # NOTE(aschultz): we just use yaml.safe_dump for the vars because - # the vars should already bein a hash for for ansible. - # See LP#1801162 for previous issues around using jinja for this - with open(group_var_role_path, 'w') as group_vars_file: - yaml.safe_dump(role_group_vars[role], group_vars_file, - default_flow_style=False) - - # Render host_vars - for server in server_names.values(): - host_var_server_path = os.path.join(host_vars_dir, server) - host_var_server_template = env.get_template('host_var_server.j2') - role = server_roles[server] - ansible_host_vars = ( - yaml.safe_dump( - role_host_vars[role][server], - default_flow_style=False) if role_host_vars else None) - - pre_network = server_deployment_names.get( - server, {}).get('pre_network', []) - pre_deployments = server_deployment_names.get( - server, {}).get('pre_deployments', []) - post_deployments = server_deployment_names.get( - server, {}).get('post_deployments', []) - - with open(host_var_server_path, 'w') as f: - template_data = host_var_server_template.render( - role=role, - pre_network=pre_network, - pre_deployments=pre_deployments, - post_deployments=post_deployments, - ansible_host_vars=ansible_host_vars) - self.validate_config(template_data, host_var_server_path) - f.write(template_data) - - shutil.copyfile( - os.path.join(templates_path, 'deployments.yaml'), - os.path.join(config_dir, 'deployments.yaml')) - - self.log.info("The TripleO configuration has been successfully " - "generated into: %s", config_dir) - return config_dir - - def download_config(self, name, config_dir, config_type=None, - preserve_config_dir=True, commit_message=None): - - if commit_message is None: - commit_message = 'Automatic commit of config-download' - - # One step does it all - stack = self.fetch_config(name) - self.create_config_dir(config_dir, preserve_config_dir) - self._mkdir(config_dir) - git_repo = self.initialize_git_repo(config_dir) - self.log.info("Generating configuration under the directory: " - "%s", config_dir) - self.write_config(stack, name, config_dir, config_type) - self.snapshot_config_dir(git_repo, commit_message) - return config_dir - - -def get_overcloud_config(swift=None, heat=None, - container=constants.DEFAULT_CONTAINER_NAME, - container_config=constants.CONFIG_CONTAINER_NAME, - config_dir=None, config_type=None, - preserve_config=False): - if heat is None: - raise RuntimeError('Should provide orchestration client to fetch ' - ' TripleO config') - if not config_dir: - config_dir = tempfile.mkdtemp(prefix='tripleo-', - suffix='-config') - - config = Config(heat) - message = "Automatic commit with fresh config download\n\n" - - config_path = config.download_config(container, config_dir, - config_type, - preserve_config_dir=True, - commit_message=message) - - if not preserve_config: - if os.path.exists(config_path): - shutil.rmtree(config_path) diff --git a/tripleo_common/utils/heat.py b/tripleo_common/utils/heat.py deleted file mode 100644 index e6230ddb2..000000000 --- a/tripleo_common/utils/heat.py +++ /dev/null @@ -1,98 +0,0 @@ -# Copyright (c) 2021 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import logging -import os -from osc_lib import utils as osc_lib_utils - - -LOG = logging.getLogger(__name__) -heatclient = None - - -class EphemeralHeatClient(object): - """A Heat client shim class to be used with ephemeral Heat. - - When the heat client is used to talk to the Heat API, the environment will - be set with the correct variable configuration to configure Keystone for - auth type none and to use a direct endpoint. - - After the client is finished, the environment is restored. This is - necessary so that the entire system environment is not reconfigured for - auth_type=none for the duration of the tripleoclient execution. - - :param heat: Heat client - :type heat: `heatclient.heatclient` - - """ - - def __init__(self, heat, host, port): - self.heat = heat - self.host = host - self.port = port - os.environ['OS_HEAT_TYPE'] = 'ephemeral' - os.environ['OS_HEAT_HOST'] = host - os.environ['OS_HEAT_PORT'] = str(port) - - def save_environment(self): - self.environ = os.environ.copy() - for v in ('OS_USER_DOMAIN_NAME', - 'OS_PROJECT_DOMAIN_NAME', - 'OS_PROJECT_NAME', - 'OS_CLOUD'): - os.environ.pop(v, None) - - os.environ['OS_AUTH_TYPE'] = "none" - os.environ['OS_ENDPOINT'] = self.heat.http_client.endpoint - - def restore_environment(self): - os.environ = self.environ.copy() - - def __getattr__(self, attr): - self.save_environment() - try: - val = getattr(self.heat, attr) - finally: - self.restore_environment() - return val - - -def local_orchestration_client(host="127.0.0.1", api_port=8006): - """Returns a local orchestration service client""" - - API_VERSIONS = { - '1': 'heatclient.v1.client.Client', - } - - heat_client = osc_lib_utils.get_client_class( - 'tripleoclient', - '1', - API_VERSIONS) - LOG.debug('Instantiating local_orchestration client for ' - 'host %s, port %s: %s', - host, api_port, heat_client) - - endpoint = 'http://%s:%s/v1/admin' % (host, api_port) - client = heat_client( - endpoint=endpoint, - username='admin', - password='fake', - region_name='regionOne', - token='fake', - ) - - global heatclient - heatclient = EphemeralHeatClient(client, host, api_port) - return heatclient diff --git a/tripleo_common/utils/image.py b/tripleo_common/utils/image.py deleted file mode 100644 index 49f02b80f..000000000 --- a/tripleo_common/utils/image.py +++ /dev/null @@ -1,26 +0,0 @@ -# Copyright 2019 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -def uploaded_layers_details(uploaded_layers, layer, scope): - known_path = None - known_layer = None - image = None - if layer: - known_layer = uploaded_layers.get(layer, None) - if known_layer and scope in known_layer: - known_path = known_layer[scope].get('path', None) - image = known_layer[scope].get('ref', None) - return (known_path, image) diff --git a/tripleo_common/utils/locks/__init__.py b/tripleo_common/utils/locks/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/tripleo_common/utils/locks/base.py b/tripleo_common/utils/locks/base.py deleted file mode 100644 index fb7f5e5d2..000000000 --- a/tripleo_common/utils/locks/base.py +++ /dev/null @@ -1,27 +0,0 @@ -# Copyright 2019 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -class BaseLock(object): - def get_lock(self): - # pylint: disable=no-member - return self._lock - - def objects(self): - # pylint: disable=no-member - return self._objects - - def sessions(self): - # pylint: disable=no-member - return self._sessions diff --git a/tripleo_common/utils/locks/processlock.py b/tripleo_common/utils/locks/processlock.py deleted file mode 100644 index c30aee8c5..000000000 --- a/tripleo_common/utils/locks/processlock.py +++ /dev/null @@ -1,33 +0,0 @@ -# Copyright 2019 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# NOTE(mwhahaha): this class cannot be imported under Mistral because the -# multiprocessor.Manager inclusion breaks things due to the service launching -# to handle the multiprocess work. - -import multiprocessing -from tripleo_common.utils.locks import base - - -class ProcessLock(base.BaseLock): - # the manager cannot live in __init__ - _mgr = multiprocessing.Manager() - _global_view = _mgr.dict() - - def __init__(self): - # https://github.com/PyCQA/pylint/issues/3313 - # pylint: disable=no-member - self._lock = self._mgr.Lock() - self._objects = self._mgr.list() - self._sessions = self._mgr.dict() diff --git a/tripleo_common/utils/locks/threadinglock.py b/tripleo_common/utils/locks/threadinglock.py deleted file mode 100644 index bacbefa8d..000000000 --- a/tripleo_common/utils/locks/threadinglock.py +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright 2019 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from tripleo_common.utils.locks import base -import threading - - -class ThreadingLock(base.BaseLock): - def __init__(self): - self._lock = threading.Lock() - self._objects = [] - self._sessions = {} diff --git a/tripleo_common/utils/nodes.py b/tripleo_common/utils/nodes.py deleted file mode 100644 index a3973e8b4..000000000 --- a/tripleo_common/utils/nodes.py +++ /dev/null @@ -1,791 +0,0 @@ -# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. -# Copyright (c) 2021 Dell Inc. or its subsidiaries. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import logging -import re - -from oslo_utils import netutils - -from ironicclient import exceptions as ironicexceptions -from oslo_concurrency import processutils -from tripleo_common import exception - -LOG = logging.getLogger(__name__) - -_KNOWN_INTERFACE_FIELDS = [ - '%s_interface' % field for field in ('boot', 'console', 'deploy', - 'inspect', 'management', 'network', - 'power', 'raid', 'rescue', 'storage', - 'vendor') -] - -CTLPLANE_NETWORK = 'ctlplane' - - -class DriverInfo(object): - """Class encapsulating field conversion logic.""" - DEFAULTS = {} - - def __init__(self, prefix, mapping, deprecated_mapping=None, - mandatory_fields=(), default_port=None, hardware_type=None): - self._prefix = prefix - self._mapping = mapping - self._deprecated_mapping = deprecated_mapping or {} - self._mandatory_fields = mandatory_fields - self._default_port = default_port - self._hardware_type = hardware_type - - @property - def default_port(self): - return self._default_port - - @property - def hardware_type(self): - return self._hardware_type - - def convert_key(self, key): - if key in self._mapping: - return self._mapping[key] - if key in self._deprecated_mapping: - real = self._deprecated_mapping[key] - LOG.warning('Key %s is deprecated, please use %s', - key, real) - return real - if key.startswith(self._prefix): - return key - if key != 'pm_type' and key.startswith('pm_'): - LOG.warning('Key %s is not supported and will not be passed', - key) - else: - LOG.debug('Skipping key %s not starting with prefix %s', - key, self._prefix) - - def convert(self, fields): - """Convert fields from instackenv.json format to ironic names.""" - result = self.DEFAULTS.copy() - for key, value in fields.items(): - new_key = self.convert_key(key) - if new_key is not None: - result[new_key] = value - return result - - def unique_id_from_fields(self, fields): - """Return a string uniquely identifying a node in instackenv.""" - - def unique_id_from_node(self, node): - """Return a string uniquely identifying a node in ironic db.""" - - def validate(self, node): - """Validate node record supplied by a user. - - :param node: node record before convert() - :raises: exception.InvalidNode - """ - missing = [] - for field in self._mandatory_fields: - if not node.get(field): - missing.append(field) - - if missing: - raise exception.InvalidNode( - 'The following fields are missing: %s' % ', '.join(missing)) - - -class PrefixedDriverInfo(DriverInfo): - def __init__(self, prefix, deprecated_mapping=None, - has_port=False, address_field='address', - default_port=None, hardware_type=None, - mandatory_fields=None): - mapping = { - 'pm_addr': '%s_%s' % (prefix, address_field), - 'pm_user': '%s_username' % prefix, - 'pm_password': '%s_password' % prefix, - } - mandatory_fields = mandatory_fields or list(mapping) - - if has_port: - mapping['pm_port'] = '%s_port' % prefix - self._has_port = has_port - - super(PrefixedDriverInfo, self).__init__( - prefix, mapping, - deprecated_mapping=deprecated_mapping, - mandatory_fields=mandatory_fields, - default_port=default_port, - hardware_type=hardware_type, - ) - - def unique_id_from_fields(self, fields): - try: - result = fields['pm_addr'] - except KeyError: - return - - if self._has_port and 'pm_port' in fields: - result = '%s:%s' % (result, fields['pm_port']) - return result - - def unique_id_from_node(self, node): - new_key = self.convert_key('pm_addr') - assert new_key is not None - try: - result = node.driver_info[new_key] - except KeyError: - # Node cannot be identified - return - - if self._has_port: - new_port = self.convert_key('pm_port') - assert new_port - try: - return '%s:%s' % (result, node.driver_info[new_port]) - except KeyError: - pass - - return result - - -class RedfishDriverInfo(DriverInfo): - def __init__(self): - mapping = { - 'pm_addr': 'redfish_address', - 'pm_user': 'redfish_username', - 'pm_password': 'redfish_password', - 'pm_system_id': 'redfish_system_id' - } - mandatory_fields = ['pm_addr', 'pm_system_id'] - - super(RedfishDriverInfo, self).__init__( - 'redfish', mapping, - deprecated_mapping=None, - mandatory_fields=mandatory_fields, - hardware_type='redfish', - ) - - def _build_id(self, address, system): - address = re.sub(r'https?://', '', address, count=1, flags=re.I) - return '%s/%s' % (address.rstrip('/'), system.lstrip('/')) - - def unique_id_from_fields(self, fields): - try: - return self._build_id(fields['pm_addr'], fields['pm_system_id']) - except KeyError: - return - - def unique_id_from_node(self, node): - try: - return self._build_id(node.driver_info['redfish_address'], - node.driver_info['redfish_system_id']) - except KeyError: - return - - -class oVirtDriverInfo(DriverInfo): - def __init__(self): - mapping = { - 'pm_addr': 'ovirt_address', - 'pm_user': 'ovirt_username', - 'pm_password': 'ovirt_password', - 'pm_vm_name': 'ovirt_vm_name' - } - - super(oVirtDriverInfo, self).__init__( - 'ovirt', mapping, - mandatory_fields=list(mapping), - hardware_type='staging-ovirt', - ) - - def unique_id_from_fields(self, fields): - try: - return '%s:%s' % (fields['pm_addr'], fields['pm_vm_name']) - except KeyError: - return - - def unique_id_from_node(self, node): - try: - return '%s:%s' % (node.driver_info['ovirt_address'], - node.driver_info['ovirt_vm_name']) - except KeyError: - return - - -class iBootDriverInfo(PrefixedDriverInfo): - def __init__(self): - super(iBootDriverInfo, self).__init__( - 'iboot', has_port=True, - deprecated_mapping={ - 'pm_relay_id': 'iboot_relay_id', - }, - hardware_type='staging-iboot', - ) - - def unique_id_from_fields(self, fields): - result = super(iBootDriverInfo, self).unique_id_from_fields(fields) - if 'iboot_relay_id' in fields: - result = '%s#%s' % (result, fields['iboot_relay_id']) - return result - - def unique_id_from_node(self, node): - try: - result = super(iBootDriverInfo, self).unique_id_from_node(node) - except IndexError: - return - - if node.driver_info.get('iboot_relay_id'): - result = '%s#%s' % (result, node.driver_info['iboot_relay_id']) - - return result - - -class iDRACDriverInfo(DriverInfo): - def __init__(self): - super(iDRACDriverInfo, self).__init__( - 'drac', mapping={}, - hardware_type='idrac', - ) - self._drac_driverinfo = PrefixedDriverInfo('drac', has_port=True, - hardware_type='idrac') - self._redfish_driverinfo = RedfishDriverInfo() - - def convert_key(self, key): - for driver_info in [self._drac_driverinfo, self._redfish_driverinfo]: - new_key = driver_info.convert_key(key) - if new_key: - return new_key - - def convert(self, fields): - """Convert fields from instackenv.json format to ironic names.""" - result = self.DEFAULTS.copy() - for key, value in fields.items(): - for driver_info in [self._drac_driverinfo, - self._redfish_driverinfo]: - new_key = driver_info.convert_key(key) - if new_key is not None: - if (key == 'pm_addr' and - driver_info is self._drac_driverinfo): - new_value = self._build_drac_address(value) - else: - new_value = value - result[new_key] = new_value - return result - - def _build_drac_address(self, value): - value = re.sub(r'https?://', '', value, count=1, flags=re.I) - result = value.split(':')[0] - return result.rstrip('/') - - def unique_id_from_fields(self, fields): - fields['pm_addr'] = self._build_drac_address(fields['pm_addr']) - return self._drac_driverinfo.unique_id_from_fields(fields) - - def unique_id_from_node(self, node): - return self._drac_driverinfo.unique_id_from_node(node) - - -DRIVER_INFO = { - # production drivers - r'^(ipmi|.*_ipmitool)$': PrefixedDriverInfo('ipmi', has_port=True, - default_port=623, - hardware_type='ipmi', - mandatory_fields=['pm_addr'] - ), - r'^.*_drac$': PrefixedDriverInfo('drac', has_port=True, - hardware_type='idrac'), - r'^idrac$': iDRACDriverInfo(), - r'^(ilo|.*_ilo)$': PrefixedDriverInfo('ilo', has_port=True, - hardware_type='ilo'), - r'^(irmc|.*_irmc)$': PrefixedDriverInfo('irmc', has_port=True, - hardware_type='irmc'), - r'^redfish$': RedfishDriverInfo(), - r'^xclarity$': PrefixedDriverInfo('xclarity', has_port=True), - # test drivers - r'^staging\-ovirt$': oVirtDriverInfo(), - r'^(staging\-iboot|.*_iboot)$': iBootDriverInfo(), - r'^(staging\-wol|.*wol)$': DriverInfo( - 'wol', - mapping={ - 'pm_addr': 'wol_host', - 'pm_port': 'wol_port', - }, - hardware_type='staging-wol'), - r'^(staging\-amt|.*_amt)$': PrefixedDriverInfo( - 'amt', hardware_type='staging-amt'), - # fake_pxe was used when no management interface was supported, now - # manual-management is used for the same purpose - r'^(manual\-management|fake_pxe|fake_agent)$': DriverInfo( - 'fake', mapping={}, hardware_type='manual-management'), - r'^fake(|\-hardware)$': DriverInfo('fake', mapping={}, - hardware_type='fake-hardware'), -} - - -def find_driver_handler(driver): - for driver_tpl, handler in DRIVER_INFO.items(): - if re.search(driver_tpl, driver) is not None: - return handler - - # FIXME(dtantsur): handle all drivers without hardcoding them - raise exception.InvalidNode('unknown pm_type (ironic driver to use): ' - '%s' % driver) - - -def _find_node_handler(fields): - try: - driver = fields['pm_type'] - except KeyError: - raise exception.InvalidNode('pm_type (ironic driver to use) is ' - 'required', node=fields) - return find_driver_handler(driver) - - -def register_ironic_node(node, client): - driver_info = {} - handler = _find_node_handler(node) - - if "kernel_id" in node: - driver_info["deploy_kernel"] = node["kernel_id"] - driver_info["rescue_kernel"] = node["kernel_id"] - if "ramdisk_id" in node: - driver_info["deploy_ramdisk"] = node["ramdisk_id"] - driver_info["rescue_ramdisk"] = node["ramdisk_id"] - - interface_fields = {field: node.pop(field) - for field in _KNOWN_INTERFACE_FIELDS - if field in node} - resource_class = node.pop('resource_class', 'baremetal') - if resource_class != 'baremetal': - LOG.warning('Resource class for a new node will be set to %s, which ' - 'is different from the default "baremetal". A custom ' - 'flavor will be required to deploy on such node', - resource_class) - - driver_info.update(handler.convert(node)) - - mapping = {'cpus': 'cpu', - 'memory_mb': 'memory', - 'local_gb': 'disk', - 'cpu_arch': 'arch', - 'root_device': 'root_device'} - properties = {k: node[v] - for k, v in mapping.items() - if node.get(v) is not None} - - extra = {} - platform = node.get('platform') - if platform: - extra = dict(tripleo_platform=platform) - - if 'capabilities' in node: - caps = capabilities_to_dict(node['capabilities']) - else: - caps = {} - - if 'profile' in node: - caps['profile'] = node['profile'] - - if caps: - properties["capabilities"] = dict_to_capabilities(caps) - - driver = node['pm_type'] - if handler.hardware_type and handler.hardware_type != driver: - LOG.warning('Replacing deprecated driver %(old)s with the ' - 'hardware type %(new)s, please update your inventory', - {'old': driver, 'new': handler.hardware_type}) - driver = handler.hardware_type - - create_map = {"driver": driver, - "properties": properties, - "driver_info": driver_info, - "resource_class": resource_class} - create_map.update(interface_fields) - if extra: - create_map["extra"] = extra - - for field in ('name', 'uuid'): - if field in node: - create_map.update({field: str(node[field])}) - - conductor_group = node.get("conductor_group") - if conductor_group: - create_map["conductor_group"] = conductor_group - node_id = handler.unique_id_from_fields(node) - LOG.debug('Registering node %s with ironic.', node_id) - ironic_node = client.node.create(**create_map) - - for port in node.get('ports', []): - LOG.debug( - 'Creating Bare Metal port for node: %s, with properties: %s.', - ironic_node.uuid, port) - client.port.create( - address=port.get('address'), - physical_network=port.get('physical_network', 'ctlplane'), - local_link_connection=port.get('local_link_connection'), - node_uuid=ironic_node.uuid) - - validation = client.node.validate(ironic_node.uuid) - if not validation.power['result']: - LOG.warning('Node %s did not pass power credentials validation: %s', - ironic_node.uuid, validation.power['reason']) - - return ironic_node - - -def _populate_node_mapping(client): - LOG.debug('Populating list of registered nodes.') - node_map = {'mac': {}, 'pm_addr': {}, 'uuids': set()} - nodes = client.node.list(detail=True) - for node in nodes: - for port in client.node.list_ports(node.uuid): - node_map['mac'][port.address] = node.uuid - - handler = find_driver_handler(node.driver) - unique_id = handler.unique_id_from_node(node) - if unique_id: - node_map['pm_addr'][unique_id] = node.uuid - - node_map['uuids'].add(node.uuid) - - return node_map - - -def _get_node_id(node, handler, node_map): - candidates = set() - for port in node.get('ports', []): - try: - candidates.add(node_map['mac'][port['address'].lower()]) - except AttributeError as e: - raise SystemExit( - "Node data has an unexpected value for the mac or port" - " address, or is missing. If the mac and port address" - " is defined, make sure it is approriately quoted." - " Error {} -- node {}".format(str(e), node) - ) - except KeyError: - pass - - unique_id = handler.unique_id_from_fields(node) - if unique_id: - try: - candidates.add(node_map['pm_addr'][unique_id]) - except KeyError: - pass - - uuid = node.get('uuid') - if uuid and uuid in node_map['uuids']: - candidates.add(uuid) - - if len(candidates) > 1: - raise exception.InvalidNode('Several candidates found for the same ' - 'node data: %s' % candidates, - node=node) - if candidates: - return list(candidates)[0] - - -_NON_DRIVER_FIELDS = {'cpu': '/properties/cpus', - 'memory': '/properties/memory_mb', - 'disk': '/properties/local_gb', - 'arch': '/properties/cpu_arch', - 'root_device': '/properties/root_device', - 'name': '/name', - 'resource_class': '/resource_class', - 'kernel_id': ['/driver_info/deploy_kernel', - '/driver_info/rescue_kernel'], - 'ramdisk_id': ['/driver_info/deploy_ramdisk', - '/driver_info/rescue_ramdisk'], - 'platform': '/extra/tripleo_platform', - 'conductor_group': '/conductor_group', - } - -_NON_DRIVER_FIELDS.update({field: '/%s' % field - for field in _KNOWN_INTERFACE_FIELDS}) - - -def _update_or_register_ironic_node(node, node_map, client): - handler = _find_node_handler(node) - node_uuid = _get_node_id(node, handler, node_map) - - if node_uuid: - LOG.info('Node %s already registered, updating details.', - node_uuid) - - patched = {} - for field, paths in _NON_DRIVER_FIELDS.items(): - if isinstance(paths, str): - paths = [paths] - - if field in node: - value = node.pop(field) - for path in paths: - patched[path] = value - - if 'capabilities' in node: - caps = capabilities_to_dict(node.pop('capabilities')) - else: - caps = {} - - if 'profile' in node: - caps['profile'] = node.pop('profile') - - if caps: - patched['/properties/capabilities'] = dict_to_capabilities(caps) - - driver_info = handler.convert(node) - for key, value in driver_info.items(): - patched['/driver_info/%s' % key] = value - - node_patch = [] - for key, value in patched.items(): - if key == 'uuid': - continue # not needed during update - node_patch.append({'path': key, 'value': value, 'op': 'add'}) - - ironic_node = client.node.update(node_uuid, node_patch) - else: - ironic_node = register_ironic_node(node, client) - - return ironic_node - - -def _clean_up_extra_nodes(seen, client, remove=False): - all_nodes = {n.uuid for n in client.node.list()} - remove_func = client.node.delete - extra_nodes = all_nodes - {n.uuid for n in seen} - for node in extra_nodes: - if remove: - LOG.debug('Removing extra registered node %s.', node) - remove_func(node) - else: - LOG.debug('Extra registered node %s found.', node) - - -def register_all_nodes(nodes_list, client, remove=False, glance_client=None, - kernel_name=None, ramdisk_name=None): - """Register all nodes in nodes_list in the baremetal service. - - :param nodes_list: The list of nodes to register. - :param client: An Ironic client object. - :param remove: Should nodes not in the list be removed? - :param glance_client: A Glance client object, for fetching ramdisk images. - :param kernel_name: Glance ID of the kernel to use for the nodes. - :param ramdisk_name: Glance ID of the ramdisk to use for the nodes. - :return: list of node objects representing the new nodes. - """ - - LOG.debug('Registering all nodes.') - node_map = _populate_node_mapping(client) - - seen = [] - for node in nodes_list: - node = _update_or_register_ironic_node(node, node_map, client=client) - seen.append(node) - - _clean_up_extra_nodes(seen, client, remove=remove) - - return seen - - -# These fields are treated specially during enrolling/updating -_SPECIAL_NON_DRIVER_FIELDS = {'ports', 'pm_type', 'capabilities'} - - -def validate_nodes(nodes_list): - """Validate all nodes list. - - :param nodes_list: The list of nodes to register. - :raises: InvalidNode on one or more invalid nodes - """ - failures = [] - unique_ids = set() - names = set() - macs = set() - for index, node in enumerate(nodes_list): - # Remove any comment - node.pop("_comment", None) - - handler = _find_node_handler(node) - - try: - handler.validate(node) - except exception.InvalidNode as exc: - failures.append((index, exc)) - - if node.get('mac'): - failures.append((index, 'The "mac" field is not longer supported. ' - 'Please use the "ports" field instead.')) - - for port in node.get('ports', ()): - if not netutils.is_valid_mac(port['address']): - failures.append((index, 'MAC address %s is invalid' % - port['address'])) - - if port['address'] in macs: - failures.append( - (index, 'MAC %s is not unique' % port['address'])) - else: - macs.add(port['address']) - - unique_id = handler.unique_id_from_fields(node) - if unique_id: - if unique_id in unique_ids: - failures.append( - (index, - "Node identified by %s is already present" % unique_id)) - else: - unique_ids.add(unique_id) - - if node.get('name'): - if node['name'] in names: - failures.append( - (index, 'Name "%s" is not unique' % node['name'])) - else: - names.add(node['name']) - - if node.get('platform') and not node.get('arch'): - failures.append( - (index, - 'You have specified a platform without an architecture')) - - try: - capabilities_to_dict(node.get('capabilities')) - except (ValueError, TypeError): - failures.append( - (index, 'Invalid capabilities: %s' % node.get('capabilities'))) - - if node.get('root_device') is not None: - if not isinstance(node['root_device'], dict): - failures.append( - (index, - 'Invalid root device: expected dict, got %s' % - node['root_device'])) - - for field in node: - converted = handler.convert_key(field) - if (converted is None and field not in _NON_DRIVER_FIELDS and - field not in _SPECIAL_NON_DRIVER_FIELDS): - failures.append((index, 'Unknown field %s' % field)) - - if failures: - raise exception.InvalidNode( - '\n'.join('node #%d: %s' % tpl for tpl in failures)) - - -def dict_to_capabilities(caps_dict): - """Convert a dictionary into a string with the capabilities syntax.""" - if isinstance(caps_dict, str): - return caps_dict - - # NOTE(dtantsur): sort capabilities so that their order does not change - # between updates. - items = sorted(caps_dict.items(), key=lambda tpl: tpl[0]) - return ','.join(["%s:%s" % (key, value) - for key, value in items - if value is not None]) - - -def capabilities_to_dict(caps): - """Convert the Node's capabilities into a dictionary.""" - if not caps: - return {} - if isinstance(caps, dict): - return caps - return dict([key.split(':', 1) for key in caps.split(',')]) - - -def _get_capability_patch(node, capability, value): - """Return a JSON patch updating a node capability""" - capabilities = node.properties.get('capabilities') - capabilities_dict = capabilities_to_dict(capabilities) - - if value is None: - del capabilities_dict[capability] - else: - capabilities_dict[capability] = value - - capabilities = dict_to_capabilities(capabilities_dict) - - return [{ - "op": "replace", - "path": "/properties/capabilities", - "value": capabilities - }] - - -def update_node_capability(node_uuid, capability, value, client): - """Update a node's capability - - :param node_uuid: The UUID of the node - :param capability: The name of the capability to update - :param value: The value to update token - :param client: An Ironic client object - :return: Result of updating the node - """ - node = client.node.get(node_uuid) - patch = _get_capability_patch(node, capability, value) - return client.node.update(node_uuid, patch) - - -def generate_hostmap(baremetal_client, compute_client): - """Create a map between Compute nodes and Baremetal nodes""" - hostmap = {} - for node in compute_client.servers.list(): - try: - bm_node = baremetal_client.node.get_by_instance_uuid(node.id) - for port in baremetal_client.port.list(node=bm_node.uuid): - hostmap[port.address] = {"compute_name": node.name, - "baremetal_name": bm_node.name} - except ironicexceptions.NotFound: - LOG.warning('Baremetal node for server %s not found - skipping it', - node.id) - pass - - if hostmap == {}: - return None - return hostmap - - -def run_nova_cell_v2_discovery(): - return processutils.execute( - '/usr/bin/sudo', - '/bin/nova-manage', - 'cell_v2', - 'discover_hosts', - '--verbose' - ) - - -def get_node_profile(node): - """Return the profile assosicated with the node """ - - capabilities = node.get('properties').get('capabilities') - capabilities_dict = capabilities_to_dict(capabilities) - - if 'profile' in capabilities_dict: - return capabilities_dict['profile'] - - return None - - -def get_node_hint(node): - """Return the 'capabilities:node' hint associated with the node """ - - capabilities = node.get('properties').get('capabilities') - capabilities_dict = capabilities_to_dict(capabilities) - - if 'node' in capabilities_dict: - return capabilities_dict['node'] - - return None diff --git a/tripleo_common/utils/overcloudrc.py b/tripleo_common/utils/overcloudrc.py deleted file mode 100644 index 9f68e852f..000000000 --- a/tripleo_common/utils/overcloudrc.py +++ /dev/null @@ -1,130 +0,0 @@ -# Copyright 2015 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import logging -import urllib - -from tripleo_common import constants -from tripleo_common.utils import common as common_utils - -try: # py3 - from shlex import quote -except ImportError: # py2 - from pipes import quote - -LOG = logging.getLogger(__name__) - - -def get_service_ips(stack): - service_ips = {} - for output in stack.to_dict().get('outputs', {}): - service_ips[output['output_key']] = output['output_value'] - return service_ips - - -def get_endpoint_map(stack): - endpoint_map = {} - for output in stack.to_dict().get('outputs', {}): - if output['output_key'] == 'EndpointMap': - endpoint_map = output['output_value'] - break - return endpoint_map - - -def get_endpoint(key, stack): - endpoint_map = get_endpoint_map(stack) - if endpoint_map: - return endpoint_map[key]['host'] - return get_service_ips(stack).get(key + 'Vip') - - -def get_overcloud_endpoint(stack): - for output in stack.to_dict().get('outputs', {}): - if output['output_key'] == 'KeystoneURL': - return output['output_value'] - - -CLEAR_ENV = """# Clear any old environment that may conflict. -for key in $( set | awk '{FS=\"=\"} /^OS_/ {print $1}' ); do unset $key ; done -""" -CLOUDPROMPT = """ -# Add OS_CLOUD to PS1 -if [ -z "${CLOUDPROMPT_ENABLED:-}" ]; then - export PS1=${PS1:-""} - export PS1=\\${OS_CLOUD:+"(\\$OS_CLOUD)"}\\ $PS1 - export CLOUDPROMPT_ENABLED=1 -fi -""" - - -def _create_overcloudrc(stack, no_proxy, admin_password, region_name): - """Given the stack and proxy settings, create the overcloudrc - - stack: Heat stack containing the deployed overcloud - no_proxy: a comma-separated string of hosts that shouldn't be proxied - """ - endpoint = get_overcloud_endpoint(stack) - admin_vip = get_endpoint('KeystoneAdmin', stack) - - return _create_overcloudrc_from_outputs( - stack.stack_name, endpoint, admin_vip, no_proxy, admin_password, - region_name) - - -def _create_overcloudrc_from_outputs( - stack_name, endpoint, admin_vip, no_proxy, admin_password, - region_name): - """Given the stack outputs and proxy settings, create the overcloudrc""" - - host = urllib.parse.urlparse(endpoint).hostname - no_proxy_list = no_proxy.split(',') - no_proxy_list.extend([host, admin_vip]) - no_proxy_list = map( - common_utils.bracket_ipv6, - [i for i in no_proxy_list if i] - ) - - # Remove duplicated entries - no_proxy_list = sorted(list(set(no_proxy_list))) - - rc_params = { - 'OS_USERNAME': 'admin', - 'OS_PROJECT_NAME': 'admin', - 'OS_USER_DOMAIN_NAME': 'Default', - 'OS_PROJECT_DOMAIN_NAME': 'Default', - 'OS_NO_CACHE': 'True', - 'OS_CLOUD': stack_name, - 'no_proxy': ','.join(no_proxy_list), - 'PYTHONWARNINGS': ('ignore:Certificate has no, ignore:A true ' - 'SSLContext object is not available'), - 'OS_AUTH_TYPE': 'password', - 'OS_PASSWORD': admin_password, - 'OS_AUTH_URL': endpoint.replace('/v2.0', ''), - 'OS_IDENTITY_API_VERSION': constants.DEFAULT_IDENTITY_API_VERSION, - 'OS_COMPUTE_API_VERSION': constants.DEFAULT_COMPUTE_API_VERSION, - 'OS_IMAGE_API_VERSION': constants.DEFAULT_IMAGE_API_VERSION, - 'OS_VOLUME_API_VERSION': constants.DEFAULT_VOLUME_API_VERSION, - 'OS_REGION_NAME': region_name or 'regionOne' - } - - overcloudrc = CLEAR_ENV - for key, value in rc_params.items(): - line = "export %(key)s=%(value)s\n" % {'key': key, - 'value': quote(value)} - overcloudrc = overcloudrc + line - overcloudrc = overcloudrc + CLOUDPROMPT - - return { - "overcloudrc": overcloudrc, - } diff --git a/tripleo_common/utils/parameters.py b/tripleo_common/utils/parameters.py deleted file mode 100644 index 056d0213e..000000000 --- a/tripleo_common/utils/parameters.py +++ /dev/null @@ -1,108 +0,0 @@ -# Copyright 2016 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from tripleo_common.utils import nodes - - -FLAVOR_ROLE_EXCEPTIONS = { - 'object-storage': 'swift-storage' -} - -PARAM_EXCEPTIONS = { - 'control': { - 'count': 'ControllerCount', - 'flavor': 'OvercloudControlFlavor' - }, - 'object-storage': { - 'count': 'ObjectStorageCount', - 'flavor': 'OvercloudSwiftStorageFlavor' - } -} - - -def get_node_count(role, baremetal_client): - count = 0 - for n in baremetal_client.node.list(): - node = baremetal_client.node.get(n.uuid) - caps = nodes.capabilities_to_dict(node.properties['capabilities']) - - if caps.get('profile') == role: - count += 1 - return count - - -def get_flavor(role, compute_client): - for f in compute_client.flavors.list(): - flavor = compute_client.flavors.get(f.id) - # The flavor's capabilities:profile and the role must match, - # unless the role has a different profile name (as defined in - # FLAVOR_ROLE_EXCEPTIONS). - if (flavor.get_keys().get('capabilities:profile') == - FLAVOR_ROLE_EXCEPTIONS.get(role, role)): - return flavor.name - return 'baremetal' - - -def _get_count_key(role): - return '%sCount' % role.title().replace('-', '') - - -def _get_flavor_key(role): - return 'Overcloud%sFlavor' % role.title().replace('-', '') - - -def set_count_and_flavor_params(role, baremetal_client, compute_client): - """Returns the parameters for role count and flavor. - - The parameter names are derived from the role name: - - Count - OvercloudFlavor - - Exceptions from this rule (the control and object-storage roles) are - defined in the PARAM_EXCEPTIONS dict. - """ - node_count = get_node_count(role, baremetal_client) - - if node_count == 0: - flavor = 'baremetal' - else: - flavor = get_flavor(role, compute_client) - - if role in PARAM_EXCEPTIONS: - return { - PARAM_EXCEPTIONS[role]['count']: node_count, - PARAM_EXCEPTIONS[role]['flavor']: flavor - } - return { - _get_count_key(role): node_count, - _get_flavor_key(role): flavor - } - - -def convert_docker_params(stack_env=None): - """Convert Docker* params to "Container" varients for compatibility. - - """ - - if stack_env: - pd = stack_env.get('parameter_defaults', {}) - for k, v in pd.copy().items(): - if k.startswith('Docker') and k.endswith('Image'): - name = "Container%s" % k[6:] - pd.setdefault(name, v) - # TODO(dprince) add other Docker* conversions here once - # this is wired in diff --git a/tripleo_common/utils/passwords.py b/tripleo_common/utils/passwords.py deleted file mode 100644 index bfc7d6fb4..000000000 --- a/tripleo_common/utils/passwords.py +++ /dev/null @@ -1,229 +0,0 @@ -# Copyright 2016 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import base64 -from cryptography.hazmat.primitives import serialization -from cryptography.hazmat.primitives.asymmetric import rsa -from cryptography.hazmat.primitives.asymmetric import ed25519 -from cryptography.hazmat.backends import default_backend -import hashlib -import hmac -import logging -import os -import struct -import time -import uuid -import urllib.parse - -import passlib.pwd -import yaml - -from tripleo_common import constants - - -_MIN_PASSWORD_SIZE = 25 -KEYSTONE_FERNET_REPO = '/etc/keystone/fernet-keys/' -LOG = logging.getLogger(__name__) - - -def generate_passwords(stack_env=None, - rotate_passwords=False, - rotate_pw_list=None): - """Create the passwords needed for deploying OpenStack via t-h-t. - - This will create the set of passwords required by the undercloud and - overcloud installers that use tripleo-heat-templates and return them - as a dict. - """ - - if not stack_env: - stack_env = {} - passwords = {} - db_ed25519 = stack_env.get('parameter_defaults', {}) \ - .get('EnableMysqlAuthEd25519', False) - if db_ed25519: - passwords['EnableMysqlAuthEd25519'] = True - for name in constants.PASSWORD_PARAMETER_NAMES: - no_rotate = (not rotate_passwords or ( - rotate_pw_list and name not in rotate_pw_list) - or name in constants.DO_NOT_ROTATE_LIST) - if no_rotate and ( - stack_env and name in stack_env.get( - 'parameter_defaults', {})): - current_password = stack_env['parameter_defaults'][name] - passwords[name] = current_password - if name in constants.DB_PASSWORD_PARAMETER_NAMES and db_ed25519: - db_uri = db_uri_from_ed25519_password(current_password) - passwords[name + 'Database'] = db_uri - elif (name == 'KeystonePassword' and stack_env and - 'AdminToken' in stack_env.get('parameter_defaults', {})): - # NOTE(tkajinam): AdminToken was renamed to KeystonePassword - passwords[name] = stack_env['parameter_defaults']['AdminToken'] - elif name in ('CephClientKey', 'CephManilaClientKey', 'CephRgwKey'): - # CephX keys aren't random strings - passwords[name] = create_cephx_key() - elif name == "CephClusterFSID": - # The FSID must be a UUID - passwords[name] = str(uuid.uuid4()) - # Since by default passlib.pwd.genword uses all digits and ascii upper - # & lowercase letters, it provides ~5.95 entropy per character. - # Make the length of the default authkey 4096 bytes, which should give - # us ~24000 bits of randomness - elif name.startswith("PacemakerRemoteAuthkey"): - passwords[name] = passlib.pwd.genword( - length=4096) - # The underclouds SnmpdReadonlyUserPassword is stored in a mistral env - # for the overcloud. - elif name == 'SnmpdReadonlyUserPassword': - passwords[name] = get_snmpd_readonly_user_password() - elif name in ('KeystoneCredential0', 'KeystoneCredential1'): - passwords[name] = create_keystone_credential() - elif name == 'KeystoneFernetKeys': - passwords[name] = create_fernet_keys_repo_structure_and_keys() - elif name == 'MigrationSshKey': - passwords[name] = create_ssh_keypair() - elif name == 'BarbicanSimpleCryptoKek': - passwords[name] = create_keystone_credential() - elif name in constants.DB_PASSWORD_PARAMETER_NAMES and db_ed25519: - # root and clustercheck passwords can't contain a null - # byte due to a mariadb limitation in config file - # TODO: bytes used as word separators can't be used either - # as long as password is used as a shell parameter - if name == 'MysqlRootPassword' or \ - name == 'MysqlClustercheckPassword': - skip_bytes = [0, ord(' '), ord('\t'), ord('\n')] - passwords[name] = create_ed25519_password(skip_bytes) - else: - ed25519_password = create_ed25519_password() - db_uri = db_uri_from_ed25519_password(ed25519_password) - passwords[name] = ed25519_password - passwords[name + 'Database'] = db_uri - elif name.startswith("MysqlRootPassword"): - passwords[name] = passlib.pwd.genword(length=10) - elif name.startswith("RabbitCookie"): - passwords[name] = passlib.pwd.genword(length=20) - elif name.startswith("PcsdPassword"): - passwords[name] = passlib.pwd.genword(length=16) - elif name.startswith("HorizonSecret"): - passwords[name] = passlib.pwd.genword(length=10) - elif name.startswith("HeatAuthEncryptionKey"): - passwords[name] = passlib.pwd.genword(length=32) - elif name.startswith("OctaviaServerCertsKeyPassphrase"): - passwords[name] = passlib.pwd.genword(length=32) - elif name.startswith("DesignateRndcKey"): - passwords[name] = create_rndc_key_secret() - else: - passwords[name] = passlib.pwd.genword(length=_MIN_PASSWORD_SIZE) - return passwords - - -def create_fernet_keys_repo_structure_and_keys(): - return { - KEYSTONE_FERNET_REPO + '0': { - 'content': create_keystone_credential()}, - KEYSTONE_FERNET_REPO + '1': { - 'content': create_keystone_credential()} - } - - -def create_cephx_key(): - # NOTE(gfidente): Taken from - # https://github.com/ceph/ceph-deploy/blob/master/ceph_deploy/new.py#L21 - key = os.urandom(16) - header = struct.pack(" 0: - msg = "Invalid roles requested: {}\nValid Roles:\n{}".format( - ','.join(role_check), '\n'.join(available_roles) - ) - raise NotFound(msg) - - -def generate_role_with_colon_format(content, defined_role, generated_role): - """Generate role data with input as Compute:ComputeA - - In Compute:ComputeA, the defined role 'Compute' can be added to - roles_data.yaml by changing the name to 'ComputeA'. This allows duplicating - the defined roles so that hardware specific nodes can be targeted with - specific roles. - - :param content defined role file's content - :param defined_role defined role's name - :param generated_role role's name to generate from defined role - :exception ValueError if generated role name is of invalid format - """ - - # "Compute:Compute" is invalid format - if generated_role == defined_role: - msg = ("Generated role name cannot be same as existing role name ({}) " - "with colon format".format(defined_role)) - raise ValueError(msg) - - # "Compute:A" is invalid format - if not generated_role.startswith(defined_role): - msg = ("Generated role name ({}) name should start with existing role " - "name ({})".format(generated_role, defined_role)) - raise ValueError(msg) - - name_line = "name:%s" % defined_role - name_line_match = False - processed = [] - for line in content.split('\n'): - stripped_line = line.replace(' ', '') - # Only 'name' need to be replaced in the existing role - if name_line in stripped_line: - line = line.replace(defined_role, generated_role) - name_line_match = True - processed.append(line) - - if not name_line_match: - raise ValueError(" error") - - return '\n'.join(processed) - - -def generate_roles_data_from_directory(directory, roles, validate=True): - """Generate a roles data file using roles from a local path - - :param directory local filesystem path to the roles - :param roles ordered list of roles - :param validate validate the metadata format in the role yaml files - :returns string contents of the roles_data.yaml - """ - available_roles = get_roles_list_from_directory(directory) - check_role_exists(available_roles, roles) - output = StringIO() - - header = ["#" * 79, - "# File generated by TripleO", - "#" * 79, - ""] - output.write("\n".join(header)) - - for role in roles: - defined_role = role.split(':')[0] - file_path = os.path.join(directory, "{}.yaml".format(defined_role)) - if validate: - validate_role_yaml(role_path=file_path) - with open(file_path, "r") as f: - if ':' in role: - generated_role = role.split(':')[1] - content = generate_role_with_colon_format(f.read(), - defined_role, - generated_role) - output.write(content) - else: - shutil.copyfileobj(f, output) - - return output.getvalue() - - -def validate_role_yaml(role_data=None, role_path=None): - """Basic role yaml validation - - :param role_data the role yaml in string form - :param role_path the path to the yaml file to validate. - :exception RoleMetadataError - :returns parsed role yaml object - """ - if role_data and role_path or (not role_data and not role_path): - raise ValueError('Either role_data OR role_path must be specified') - - if role_path: - with open(role_path, 'r') as f: - role_data = f.read() - - try: - role = yaml.safe_load(role_data)[0] - except yaml.YAMLError: - raise RoleMetadataError('Unable to parse role yaml') - - schema = { - 'name': {'type': str}, - 'CountDefault': {'type': int}, - 'HostnameFormatDefault': {'type': str}, - 'upgrade_batch_size': {'type': int}, - 'ServicesDefault': {'type': list}, - 'tags': {'type': list}, - 'description': {'type': str}, - 'networks': {'type': [list, dict]}, - 'networks_skip_config': {'type': list}, - } - - if 'name' not in role: - raise RoleMetadataError('Role name is missing from the role') - - # validate numeric metadata is numeric - for k in schema: - if k in role: - if k == 'networks': - if not (isinstance(role[k], schema[k]['type'][0]) or - isinstance(role[k], schema[k]['type'][1])): - msg = "Role '{}': {} is not of expected type {}".format( - role['name'], k, schema[k]['type']) - raise RoleMetadataError(msg) - else: - if not isinstance(role[k], schema[k]['type']): - msg = "Role '{}': {} is not of expected type {}".format( - role['name'], k, schema[k]['type']) - raise RoleMetadataError(msg) - return role diff --git a/tripleo_common/utils/stack.py b/tripleo_common/utils/stack.py deleted file mode 100644 index 90e805416..000000000 --- a/tripleo_common/utils/stack.py +++ /dev/null @@ -1,121 +0,0 @@ -# Copyright 2017 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import json -import logging -import uuid - - -LOG = logging.getLogger(__name__) - - -def _process_params(flattened, params): - for item in params: - if item not in flattened['parameters']: - param_obj = {} - for key, value in params.get(item).items(): - camel_case_key = key[0].lower() + key[1:] - param_obj[camel_case_key] = value - param_obj['name'] = item - flattened['parameters'][item] = param_obj - return list(params) - - -def _flat_it(flattened, name, data): - key = str(uuid.uuid4()) - value = {} - value.update({ - 'name': name, - 'id': key - }) - if 'Type' in data: - value['type'] = data['Type'] - if 'Description' in data: - value['description'] = data['Description'] - if 'Parameters' in data: - value['parameters'] = _process_params(flattened, - data['Parameters']) - if 'ParameterGroups' in data: - value['parameter_groups'] = data['ParameterGroups'] - if 'NestedParameters' in data: - nested = data['NestedParameters'] - nested_ids = [] - for nested_key in nested.keys(): - nested_data = _flat_it(flattened, nested_key, - nested.get(nested_key)) - # nested_data will always have one key (and only one) - nested_ids.append(list(nested_data)[0]) - - value['resources'] = nested_ids - - flattened['resources'][key] = value - return {key: value} - - -def preview_stack_and_network_configs(heat, processed_data, - container, role_name): - # stacks.preview method raises validation message if stack is - # already deployed. here renaming container to get preview data. - container_temp = container + "-TEMP" - fields = { - 'template': processed_data['template'], - 'files': processed_data['files'], - 'environment': processed_data['environment'], - 'stack_name': container_temp, - } - preview_data = heat.stacks.preview(**fields) - return get_network_config(preview_data, container_temp, role_name) - - -def get_network_config(preview_data, stack_name, role_name): - result = None - if preview_data: - for res in preview_data.resources: - net_script = process_preview_list(res, stack_name, - role_name) - if net_script: - ns_len = len(net_script) - start_index = (net_script.find( - "echo '{\"network_config\"", 0, ns_len) + 6) - # In file network/scripts/run-os-net-config.sh - end_str = "' > /etc/os-net-config/config.json" - end_index = net_script.find(end_str, start_index, ns_len) - if (end_index > start_index): - net_config = net_script[start_index:end_index] - if net_config: - result = json.loads(net_config) - break - if not result: - err_msg = ("Unable to determine network config for role '%s'." - % role_name) - LOG.exception(err_msg) - raise RuntimeError(err_msg) - return result - - -def process_preview_list(res, stack_name, role_name): - if type(res) == list: - for item in res: - out = process_preview_list(item, stack_name, role_name) - if out: - return out - elif type(res) == dict: - res_stack_name = stack_name + '-' + role_name - if res['resource_name'] == "OsNetConfigImpl" and \ - res['resource_identity'] and \ - res_stack_name in res['resource_identity']['stack_name']: - return res['properties']['config'] - return None diff --git a/tripleo_common/utils/stack_parameters.py b/tripleo_common/utils/stack_parameters.py deleted file mode 100644 index 86182c6a6..000000000 --- a/tripleo_common/utils/stack_parameters.py +++ /dev/null @@ -1,107 +0,0 @@ -# Copyright 2016 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import logging - -from tripleo_common.utils import stack as stack_utils - -LOG = logging.getLogger(__name__) - - -def generate_fencing_parameters(nodes_json, delay, - ipmi_level, ipmi_cipher, ipmi_lanplus): - fence_params = {"EnableFencing": True, "FencingConfig": {}} - devices = [] - - for node in nodes_json: - node_data = {} - params = {} - if "ports" in node: - # Not all Ironic drivers present a MAC address, so we only - # capture it if it's present - mac_addr = node['ports'][0]['address'].lower() - node_data["host_mac"] = mac_addr - - # Build up fencing parameters based on which Ironic driver this - # node is using - try: - # Deprecated classic drivers (pxe_ipmitool, etc) - driver_proto = node['pm_type'].split('_')[1] - except IndexError: - # New-style hardware types (ipmi, etc) - driver_proto = node['pm_type'] - - if driver_proto in {'ipmi', 'ipmitool', 'drac', 'idrac', 'ilo', - 'redfish'}: - if driver_proto == "redfish": - node_data["agent"] = "fence_redfish" - params["systems_uri"] = node["pm_system_id"] - else: - node_data["agent"] = "fence_ipmilan" - if ipmi_lanplus: - params["lanplus"] = ipmi_lanplus - params["ipaddr"] = node["pm_addr"] - params["passwd"] = node["pm_password"] - params["login"] = node["pm_user"] - if "pm_port" in node: - params["ipport"] = node["pm_port"] - if "redfish_verify_ca" in node: - if node["redfish_verify_ca"] == "false": - params["ssl_insecure"] = "true" - else: - params["ssl_insecure"] = "false" - if delay: - params["delay"] = delay - if ipmi_cipher: - params["cipher"] = ipmi_cipher - if ipmi_level: - params["privlvl"] = ipmi_level - elif driver_proto in {'staging-ovirt'}: - # fence_rhevm - node_data["agent"] = "fence_rhevm" - params["ipaddr"] = node["pm_addr"] - params["passwd"] = node["pm_password"] - params["login"] = node["pm_user"] - params["port"] = node["pm_vm_name"] - params["ssl"] = 1 - params["ssl_insecure"] = 1 - if delay: - params["delay"] = delay - else: - error = ("Unable to generate fencing parameters for %s" % - node["pm_type"]) - raise ValueError(error) - - node_data["params"] = params - devices.append(node_data) - - fence_params["FencingConfig"]["devices"] = devices - return {"parameter_defaults": fence_params} - - -def get_network_configs(heat, processed_data, container, role_name): - # Default temporary value is used when no user input for any - # interface routes for the role networks to find network config. - role_networks = processed_data['template'].get('resources', {}).get( - role_name + 'GroupVars', {}).get('properties', {}).get( - 'value', {}).get('role_networks', []) - for nw in role_networks: - rt = nw + 'InterfaceRoutes' - if rt not in processed_data['environment']['parameter_defaults']: - processed_data['environment']['parameter_defaults'][rt] = [[]] - - network_configs = stack_utils.preview_stack_and_network_configs( - heat, processed_data, container, role_name) - return network_configs diff --git a/zuul.d/cross-jobs.yaml b/zuul.d/cross-jobs.yaml deleted file mode 100644 index 1bf67e81a..000000000 --- a/zuul.d/cross-jobs.yaml +++ /dev/null @@ -1,9 +0,0 @@ -- job: - name: tripleo-cross-tripleoclient-py39 - parent: openstack-tox-py39 - description: Run cross-project tests on tripleoclient from HEAD with py39 - vars: - zuul_work_dir: src/opendev.org/openstack/python-tripleoclient - required-projects: - - name: openstack/python-tripleoclient - - name: openstack/tripleo-common diff --git a/zuul.d/layout.yaml b/zuul.d/layout.yaml deleted file mode 100644 index 80084f6b8..000000000 --- a/zuul.d/layout.yaml +++ /dev/null @@ -1,35 +0,0 @@ -- project: - queue: tripleo - templates: - - check-requirements - - openstack-cover-jobs - - openstack-python3-jobs - - publish-openstack-docs-pti - - release-notes-jobs-python3 - - tripleo-multinode-container-minimal-pipeline - - tripleo-undercloud-jobs-pipeline - - tripleo-buildimage-jobs - - tripleo-standalone-scenarios-pipeline - - tripleo-upgrades-master-pipeline - vars: - ensure_global_symlinks: true - check: - jobs: - - openstack-tox-linters - - tripleo-cross-tripleoclient-py39 - - tripleo-ci-centos-9-content-provider: - dependencies: &deps_unit_lint - - openstack-tox-linters - - openstack-tox-py310 - - tripleo-buildimage-overcloud-full-centos-9: - dependencies: *deps_unit_lint - - tripleo-buildimage-ironic-python-agent-centos-9: - dependencies: *deps_unit_lint - - tripleo-buildimage-overcloud-hardened-uefi-full-centos-9: - dependencies: *deps_unit_lint - - - gate: - jobs: - - openstack-tox-linters - - tripleo-cross-tripleoclient-py39