From 434b65c6ef669229ff69923d46583a0cb833897f Mon Sep 17 00:00:00 2001 From: Jeffrey Zhang Date: Mon, 30 Apr 2018 21:26:39 +0800 Subject: [PATCH] Retire kolla-kubernetes project - step 3 remove project content Depends-On: https://review.openstack.org/567779 Needed-By: https://review.openstack.org/568174 Change-Id: If5bdd602d5f5c8addba70235ac41c0a52cef2c11 --- .coveragerc | 7 - .gitignore | 60 - .mailmap | 3 - .testr.conf | 7 - .zuul.d/legacy.yaml | 351 -- CONTRIBUTING.rst | 17 - Dockerfile | 22 - HACKING.rst | 4 - LICENSE | 176 - README.rst | 90 +- ansible/action_plugins/merge_configs.py | 156 - ansible/action_plugins/merge_yaml.py | 96 - ansible/destroy.yml | 6 - ansible/group_vars/all.yml | 559 --- ansible/library/merge_configs.py | 51 - ansible/library/merge_yaml.py | 51 - ansible/roles/ceph/defaults/main.yml | 37 - ansible/roles/ceph/tasks/config.yml | 33 - ansible/roles/ceph/tasks/main.yml | 2 - ansible/roles/ceph/templates/ceph-mon.json.j2 | 43 - ansible/roles/ceph/templates/ceph-osd.json.j2 | 21 - ansible/roles/ceph/templates/ceph-rgw.json.j2 | 23 - ansible/roles/ceph/templates/ceph.conf.j2 | 46 - ansible/roles/cinder/defaults/main.yml | 160 - ansible/roles/cinder/tasks/config.yml | 65 - ansible/roles/cinder/tasks/main.yml | 2 - .../roles/cinder/templates/cinder-api.json.j2 | 30 - .../cinder/templates/cinder-backup.json.j2 | 37 - .../cinder/templates/cinder-scheduler.json.j2 | 30 - .../cinder/templates/cinder-volume.json.j2 | 51 - ansible/roles/cinder/templates/cinder.conf.j2 | 153 - ansible/roles/common/defaults/main.yml | 23 - .../roles/destroy/tasks/label_iterator.yml | 5 - ansible/roles/destroy/tasks/main.yml | 86 - ansible/roles/glance/defaults/main.yml | 62 - ansible/roles/glance/tasks/config.yml | 49 - ansible/roles/glance/tasks/main.yml | 2 - .../roles/glance/templates/glance-api.conf.j2 | 71 - .../roles/glance/templates/glance-api.json.j2 | 37 - .../glance/templates/glance-registry.conf.j2 | 48 - .../glance/templates/glance-registry.json.j2 | 25 - ansible/roles/heat/defaults/main.yml | 72 - ansible/roles/heat/tasks/config.yml | 55 - ansible/roles/heat/tasks/main.yml | 2 - ansible/roles/heat/templates/_deprecated.yaml | 4 - .../roles/heat/templates/heat-api-cfn.json.j2 | 25 - ansible/roles/heat/templates/heat-api.json.j2 | 25 - .../roles/heat/templates/heat-engine.json.j2 | 31 - ansible/roles/heat/templates/heat.conf.j2 | 83 - ansible/roles/horizon/defaults/main.yml | 53 - ansible/roles/horizon/tasks/config.yml | 88 - ansible/roles/horizon/tasks/main.yml | 2 - .../roles/horizon/templates/horizon.conf.j2 | 76 - .../roles/horizon/templates/horizon.json.j2 | 54 - .../roles/horizon/templates/local_settings.j2 | 815 ----- ansible/roles/ironic/defaults/main.yml | 66 - ansible/roles/ironic/tasks/config.yml | 90 - ansible/roles/ironic/tasks/main.yml | 2 - .../roles/ironic/templates/ironic-api.json.j2 | 25 - .../templates/ironic-conductor-tftp.json.j2 | 11 - .../ironic/templates/ironic-conductor.json.j2 | 35 - .../ironic/templates/ironic-dnsmasq.conf.j2 | 9 - .../ironic/templates/ironic-dnsmasq.json.j2 | 11 - .../templates/ironic-inspector-tftp.json.j2 | 18 - .../ironic/templates/ironic-inspector.conf.j2 | 41 - .../ironic/templates/ironic-inspector.json.j2 | 25 - ansible/roles/ironic/templates/ironic.conf.j2 | 63 - .../ironic/templates/pxelinux.default.j2 | 7 - ansible/roles/iscsi/defaults/main.yml | 13 - ansible/roles/iscsi/tasks/config.yml | 34 - ansible/roles/iscsi/tasks/main.yml | 2 - ansible/roles/iscsi/templates/iscsid.json.j2 | 4 - ansible/roles/iscsi/templates/tgtd.json.j2 | 4 - ansible/roles/keystone/defaults/main.yml | 38 - ansible/roles/keystone/tasks/config.yml | 111 - ansible/roles/keystone/tasks/main.yml | 2 - .../keystone/templates/keystone-paste.ini.j2 | 83 - .../roles/keystone/templates/keystone.conf.j2 | 55 - .../roles/keystone/templates/keystone.json.j2 | 49 - .../keystone/templates/wsgi-keystone.conf.j2 | 42 - ansible/roles/mariadb/defaults/main.yml | 15 - ansible/roles/mariadb/tasks/config.yml | 27 - ansible/roles/mariadb/tasks/main.yml | 2 - ansible/roles/mariadb/templates/galera.cnf.j2 | 41 - .../roles/mariadb/templates/mariadb.json.j2 | 24 - ansible/roles/memcached/defaults/main.yml | 25 - ansible/roles/memcached/tasks/config.yml | 16 - ansible/roles/memcached/tasks/main.yml | 2 - .../memcached/templates/memcached.json.j2 | 4 - ansible/roles/neutron/defaults/main.yml | 262 -- .../neutron/tasks/config-neutron-fake.yml | 52 - ansible/roles/neutron/tasks/config.yml | 255 -- ansible/roles/neutron/tasks/main.yml | 2 - .../neutron/templates/bgp_dragent.ini.j2 | 3 - .../roles/neutron/templates/dhcp_agent.ini.j2 | 6 - .../roles/neutron/templates/dnsmasq.conf.j2 | 8 - .../neutron/templates/fwaas_driver.ini.j2 | 1 - .../roles/neutron/templates/l3_agent.ini.j2 | 16 - .../neutron/templates/lbaas_agent.ini.j2 | 6 - .../neutron/templates/metadata_agent.ini.j2 | 6 - .../roles/neutron/templates/ml2_conf.ini.j2 | 72 - .../templates/neutron-bgp-dragent.json.j2 | 36 - .../templates/neutron-dhcp-agent.json.j2 | 48 - .../templates/neutron-l3-agent.json.j2 | 48 - .../templates/neutron-lbaas-agent.json.j2 | 48 - .../neutron-linuxbridge-agent.json.j2 | 31 - .../templates/neutron-metadata-agent.json.j2 | 42 - .../neutron-openvswitch-agent.json.j2 | 31 - .../neutron/templates/neutron-server.json.j2 | 43 - .../templates/neutron-vpnaas-agent.json.j2 | 54 - .../roles/neutron/templates/neutron.conf.j2 | 134 - .../neutron/templates/neutron_lbaas.conf.j2 | 17 - .../neutron/templates/neutron_vpnaas.conf.j2 | 4 - .../templates/openvswitch-db-server.json.j2 | 4 - .../templates/openvswitch-vswitchd.json.j2 | 8 - .../neutron/templates/vpnaas_agent.ini.j2 | 11 - ansible/roles/nova/defaults/main.yml | 238 -- ansible/roles/nova/tasks/config.yml | 115 - ansible/roles/nova/tasks/main.yml | 2 - ansible/roles/nova/templates/id_rsa | 1 - ansible/roles/nova/templates/id_rsa.pub | 1 - ansible/roles/nova/templates/libvirtd.conf.j2 | 13 - ansible/roles/nova/templates/nova-api.json.j2 | 25 - .../templates/nova-compute-ironic.json.j2 | 25 - .../roles/nova/templates/nova-compute.json.j2 | 36 - .../nova/templates/nova-conductor.json.j2 | 25 - .../nova/templates/nova-consoleauth.json.j2 | 25 - .../roles/nova/templates/nova-libvirt.json.j2 | 29 - .../nova/templates/nova-novncproxy.json.j2 | 25 - .../nova/templates/nova-scheduler.json.j2 | 25 - .../nova/templates/nova-serialproxy.json.j2 | 18 - .../templates/nova-spicehtml5proxy.json.j2 | 25 - ansible/roles/nova/templates/nova-ssh.json.j2 | 29 - ansible/roles/nova/templates/nova.conf.j2 | 228 -- .../nova/templates/placement-api-wsgi.conf.j2 | 28 - .../nova/templates/placement-api.json.j2 | 33 - ansible/roles/nova/templates/qemu.conf.j2 | 13 - ansible/roles/nova/templates/secret.xml.j2 | 6 - ansible/roles/nova/templates/ssh_config.j2 | 4 - ansible/roles/nova/templates/sshd_config.j2 | 5 - ansible/roles/rabbitmq/defaults/main.yml | 17 - ansible/roles/rabbitmq/tasks/config.yml | 25 - ansible/roles/rabbitmq/tasks/main.yml | 2 - .../rabbitmq/templates/definitions.json.j2 | 14 - .../templates/rabbitmq-clusterer.config.j2 | 8 - .../rabbitmq/templates/rabbitmq-env.conf.j2 | 19 - .../rabbitmq/templates/rabbitmq.config.j2 | 19 - .../roles/rabbitmq/templates/rabbitmq.json.j2 | 41 - ansible/site.retry | 1 - ansible/site.yml | 18 - babel.cfg | 2 - bindep.txt | 27 - contrib/README.rst | 25 - contrib/orchestration/ko.py | 3042 ----------------- contrib/orchestration/ko.py.readme.rst | 365 -- doc/source/ceph-guide.rst | 74 - doc/source/conf.py | 83 - doc/source/contributing.rst | 58 - doc/source/deployment-guide.rst | 904 ----- doc/source/development-environment.rst | 485 --- doc/source/index.rst | 23 - doc/source/private-registry.rst | 118 - doc/source/running-tests.rst | 109 - doc/source/service-security.rst | 71 - etc/kolla-kubernetes/kolla-kubernetes.yml | 275 -- etc/kolla-kubernetes/service_resources.yml | 709 ---- etc/kolla/globals.yml | 348 -- etc/kolla/passwords.yml | 195 -- helm/all_values.yaml | 1345 -------- helm/compute-kits/compute-kit/Chart.yaml | 21 - .../compute-kit/requirements.yaml | 40 - helm/compute-kits/compute-kit/values.yaml | 13 - helm/kolla-common/.helmignore | 4 - helm/kolla-common/Chart.yaml | 11 - .../_common_api_apache_deployment.yaml | 86 - .../_common_api_python_deployment.yaml | 205 -- .../templates/_common_create_db_job.yaml | 81 - .../_common_create_keystone_endpoint.yaml | 63 - .../_common_create_keystone_service.yaml | 63 - .../_common_create_keystone_user.yaml | 86 - .../templates/_common_defines.yaml | 65 - .../templates/_common_delete_db_job.yaml | 79 - .../_common_delete_keystone_service.yaml | 63 - .../_common_delete_keystone_user.yaml | 67 - .../_common_dependency_container.yaml | 39 - .../templates/_common_disabled.yaml | 7 - helm/kolla-common/templates/_common_lib.yaml | 68 - .../templates/_common_manage_db_job.yaml | 42 - helm/kolla-common/templates/_common_pv.yaml | 53 - helm/kolla-common/templates/_common_pvc.yaml | 16 - .../templates/_common_statefulset.yaml | 58 - helm/kolla-common/templates/_common_svc.yaml | 40 - .../templates/_common_val_get.rst | 94 - .../templates/_common_val_get.yaml | 56 - helm/microservice/ceph-admin-pod/Chart.yaml | 11 - .../ceph-admin-pod/requirements.yaml | 4 - .../templates/ceph-admin-pod.yaml | 64 - .../ceph-rbd-daemonset/Chart.yaml | 11 - .../ceph-rbd-daemonset/requirements.yaml | 4 - .../templates/ceph-rbd-daemonset.yaml | 92 - .../cinder-api-deployment/Chart.yaml | 11 - .../cinder-api-deployment/requirements.yaml | 4 - .../templates/cinder-api.yaml | 21 - helm/microservice/cinder-api-svc/Chart.yaml | 11 - .../cinder-api-svc/requirements.yaml | 4 - .../templates/cinder-api-svc.yaml | 9 - .../cinder-backup-statefulset/Chart.yaml | 11 - .../requirements.yaml | 4 - .../templates/cinder-backup.yaml | 81 - .../cinder-create-db-job/Chart.yaml | 11 - .../cinder-create-db-job/requirements.yaml | 4 - .../templates/cinder-create-db.yaml | 7 - .../Chart.yaml | 11 - .../requirements.yaml | 4 - ...cinder-create-keystone-endpoint-admin.yaml | 15 - .../Chart.yaml | 12 - .../requirements.yaml | 4 - ...nder-create-keystone-endpoint-adminv2.yaml | 17 - .../Chart.yaml | 12 - .../requirements.yaml | 4 - ...nder-create-keystone-endpoint-adminv3.yaml | 17 - .../Chart.yaml | 11 - .../requirements.yaml | 4 - ...der-create-keystone-endpoint-internal.yaml | 15 - .../Chart.yaml | 12 - .../requirements.yaml | 4 - ...r-create-keystone-endpoint-internalv2.yaml | 16 - .../Chart.yaml | 12 - .../requirements.yaml | 4 - ...r-create-keystone-endpoint-internalv3.yaml | 16 - .../Chart.yaml | 11 - .../requirements.yaml | 4 - ...inder-create-keystone-endpoint-public.yaml | 15 - .../Chart.yaml | 12 - .../requirements.yaml | 4 - ...der-create-keystone-endpoint-publicv2.yaml | 16 - .../Chart.yaml | 12 - .../requirements.yaml | 4 - ...der-create-keystone-endpoint-publicv3.yaml | 16 - .../Chart.yaml | 11 - .../requirements.yaml | 4 - .../cinder-create-keystone-service.yaml | 10 - .../Chart.yaml | 12 - .../requirements.yaml | 4 - .../cinder-create-keystone-servicev2.yaml | 12 - .../Chart.yaml | 12 - .../requirements.yaml | 4 - .../cinder-create-keystone-servicev3.yaml | 12 - .../Chart.yaml | 12 - .../requirements.yaml | 4 - .../cinder-create-keystone-user.yaml | 20 - .../cinder-delete-db-job/Chart.yaml | 12 - .../cinder-delete-db-job/requirements.yaml | 4 - .../templates/cinder-delete-db.yaml | 7 - .../Chart.yaml | 12 - .../requirements.yaml | 4 - .../cinder-delete-keystone-service.yaml | 10 - .../Chart.yaml | 12 - .../requirements.yaml | 4 - .../cinder-delete-keystone-servicev2.yaml | 12 - .../Chart.yaml | 12 - .../requirements.yaml | 4 - .../cinder-delete-keystone-servicev3.yaml | 12 - .../Chart.yaml | 12 - .../requirements.yaml | 4 - .../cinder-delete-keystone-user.yaml | 20 - .../cinder-manage-db-job/Chart.yaml | 11 - .../cinder-manage-db-job/requirements.yaml | 4 - .../templates/cinder-manage-db.yaml | 12 - .../cinder-scheduler-statefulset/Chart.yaml | 11 - .../requirements.yaml | 4 - .../templates/cinder-scheduler.yaml | 15 - .../cinder-volume-ceph-statefulset/Chart.yaml | 11 - .../requirements.yaml | 4 - .../templates/cinder-volume-ceph.yaml | 97 - .../cinder-volume-lvm-daemonset/Chart.yaml | 11 - .../requirements.yaml | 4 - .../cinder-volume-lvm-daemonset.yaml | 159 - .../glance-api-deployment/Chart.yaml | 11 - .../glance-api-deployment/requirements.yaml | 4 - .../templates/glance-api.yaml | 193 -- helm/microservice/glance-api-svc/Chart.yaml | 11 - .../glance-api-svc/requirements.yaml | 4 - .../templates/glance-api-svc.yaml | 8 - .../glance-create-db-job/Chart.yaml | 12 - .../glance-create-db-job/requirements.yaml | 4 - .../templates/glance-create-db.yaml | 7 - .../Chart.yaml | 11 - .../requirements.yaml | 4 - ...glance-create-keystone-endpoint-admin.yaml | 15 - .../Chart.yaml | 11 - .../requirements.yaml | 4 - ...nce-create-keystone-endpoint-internal.yaml | 16 - .../Chart.yaml | 11 - .../requirements.yaml | 4 - ...lance-create-keystone-endpoint-public.yaml | 15 - .../Chart.yaml | 11 - .../requirements.yaml | 4 - .../glance-create-keystone-service.yaml | 10 - .../Chart.yaml | 12 - .../requirements.yaml | 4 - .../glance-create-keystone-user.yaml | 20 - .../glance-delete-db-job/Chart.yaml | 12 - .../glance-delete-db-job/requirements.yaml | 4 - .../templates/glance-delete-db.yaml | 7 - .../Chart.yaml | 12 - .../requirements.yaml | 4 - .../glance-delete-keystone-service.yaml | 10 - .../Chart.yaml | 12 - .../requirements.yaml | 4 - .../glance-delete-keystone-user.yaml | 20 - .../glance-manage-db-job/Chart.yaml | 12 - .../glance-manage-db-job/requirements.yaml | 4 - .../templates/glance-manage-db.yaml | 77 - helm/microservice/glance-pv/Chart.yaml | 11 - helm/microservice/glance-pv/requirements.yaml | 4 - .../glance-pv/templates/glance-pv.yaml | 9 - helm/microservice/glance-pvc/Chart.yaml | 11 - .../microservice/glance-pvc/requirements.yaml | 4 - .../glance-pvc/templates/glance-pvc.yaml | 7 - .../glance-registry-deployment/Chart.yaml | 11 - .../requirements.yaml | 4 - .../templates/glance-registry.yaml | 21 - .../glance-registry-svc/Chart.yaml | 11 - .../glance-registry-svc/requirements.yaml | 4 - .../templates/glance-registry-svc.yaml | 8 - .../heat-api-cfn-deployment/Chart.yaml | 11 - .../heat-api-cfn-deployment/requirements.yaml | 4 - .../templates/heat-api-cfn-deployment.yaml | 33 - .../heat-api-deployment/Chart.yaml | 11 - .../heat-api-deployment/requirements.yaml | 4 - .../templates/heat-api-deployment.yaml | 31 - helm/microservice/heat-api-svc/Chart.yaml | 12 - .../heat-api-svc/requirements.yaml | 4 - .../heat-api-svc/templates/heat-api-svc.yaml | 9 - helm/microservice/heat-cfn-api-svc/Chart.yaml | 12 - .../heat-cfn-api-svc/requirements.yaml | 4 - .../templates/heat-cfn-api-svc.yaml | 9 - .../Chart.yaml | 11 - .../requirements.yaml | 4 - ...at-cfn-create-keystone-endpoint-admin.yaml | 15 - .../Chart.yaml | 11 - .../requirements.yaml | 4 - ...cfn-create-keystone-endpoint-internal.yaml | 15 - .../Chart.yaml | 11 - .../requirements.yaml | 4 - ...t-cfn-create-keystone-endpoint-public.yaml | 15 - .../Chart.yaml | 11 - .../requirements.yaml | 4 - .../heat-cfn-create-keystone-service.yaml | 10 - .../Chart.yaml | 12 - .../requirements.yaml | 4 - .../heat-delete-keystone-service.yaml | 10 - .../heat-create-db-job/Chart.yaml | 12 - .../heat-create-db-job/requirements.yaml | 4 - .../templates/heat-create-db.yaml | 8 - .../Chart.yaml | 11 - .../requirements.yaml | 4 - .../heat-create-keystone-endpoint-admin.yaml | 15 - .../Chart.yaml | 11 - .../requirements.yaml | 4 - ...eat-create-keystone-endpoint-internal.yaml | 15 - .../Chart.yaml | 11 - .../requirements.yaml | 4 - .../heat-create-keystone-endpoint-public.yaml | 15 - .../Chart.yaml | 11 - .../requirements.yaml | 4 - .../heat-create-keystone-service.yaml | 10 - .../heat-create-keystone-user-job/Chart.yaml | 12 - .../requirements.yaml | 4 - .../templates/heat_create_keystone_user.yaml | 20 - .../heat-delete-db-job/Chart.yaml | 12 - .../heat-delete-db-job/requirements.yaml | 4 - .../templates/heat-delete-db.yaml | 7 - .../Chart.yaml | 12 - .../requirements.yaml | 4 - .../heat-delete-keystone-service.yaml | 10 - .../heat-delete-keystone-user-job/Chart.yaml | 12 - .../requirements.yaml | 4 - .../templates/heat-delete-keystone-user.yaml | 20 - .../heat-engine-statefulset/Chart.yaml | 11 - .../heat-engine-statefulset/requirements.yaml | 4 - .../templates/heat-engine-statefulset.yaml | 15 - .../heat-manage-db-job/Chart.yaml | 12 - .../heat-manage-db-job/requirements.yaml | 4 - .../templates/heat-manage-db.yaml | 12 - .../helm-repo-deployment/Chart.yaml | 11 - .../helm-repo-deployment/requirements.yaml | 4 - .../templates/helm-repo.yaml | 112 - helm/microservice/helm-repo-pv/Chart.yaml | 11 - .../helm-repo-pv/requirements.yaml | 4 - .../helm-repo-pv/templates/helm-repo-pv.yaml | 10 - helm/microservice/helm-repo-pvc/Chart.yaml | 11 - .../helm-repo-pvc/requirements.yaml | 4 - .../templates/helm-repo-pvc.yaml | 8 - helm/microservice/helm-repo-svc/Chart.yaml | 11 - .../helm-repo-svc/requirements.yaml | 4 - .../templates/helm-repo-svc.yaml | 7 - .../horizon-deployment/Chart.yaml | 11 - .../horizon-deployment/requirements.yaml | 4 - .../templates/horizon-api.yaml | 59 - helm/microservice/horizon-svc/Chart.yaml | 11 - .../horizon-svc/requirements.yaml | 4 - .../horizon-svc/templates/horizon-svc.yaml | 7 - .../ironic-api-create-db-job/Chart.yaml | 13 - .../requirements.yaml | 4 - .../templates/ironic-api-create-db.yaml | 7 - .../Chart.yaml | 12 - .../requirements.yaml | 4 - ...ic-api-create-keystone-endpoint-admin.yaml | 15 - .../Chart.yaml | 12 - .../requirements.yaml | 4 - ...api-create-keystone-endpoint-internal.yaml | 15 - .../Chart.yaml | 12 - .../requirements.yaml | 4 - ...c-api-create-keystone-endpoint-public.yaml | 15 - .../ironic-api-delete-db-job/Chart.yaml | 13 - .../requirements.yaml | 4 - .../templates/ironic-api-delete-db.yaml | 7 - .../ironic-api-deployment/Chart.yaml | 11 - .../ironic-api-deployment/requirements.yaml | 4 - .../templates/ironic-api.yaml | 21 - .../ironic-api-manage-db-job/Chart.yaml | 13 - .../requirements.yaml | 4 - .../templates/ironic-api-manage-db.yaml | 12 - helm/microservice/ironic-api-svc/Chart.yaml | 12 - .../ironic-api-svc/requirements.yaml | 4 - .../templates/ironic-api-svc.yaml | 48 - .../ironic-conductor-daemonset/Chart.yaml | 11 - .../requirements.yaml | 4 - .../templates/ironic-conductor.yaml | 155 - .../Chart.yaml | 11 - .../requirements.yaml | 4 - .../ironic-create-keystone-service.yaml | 10 - .../Chart.yaml | 12 - .../requirements.yaml | 4 - .../ironic-create-keystone-user.yaml | 19 - .../Chart.yaml | 12 - .../requirements.yaml | 4 - .../ironic-delete-keystone-service.yaml | 10 - .../Chart.yaml | 12 - .../requirements.yaml | 4 - .../ironic-delete-keystone-user.yaml | 20 - .../ironic-inspector-create-db-job/Chart.yaml | 13 - .../requirements.yaml | 4 - .../templates/ironic-inspector-create-db.yaml | 7 - .../Chart.yaml | 12 - .../requirements.yaml | 4 - ...pector-create-keystone-endpoint-admin.yaml | 15 - .../Chart.yaml | 12 - .../requirements.yaml | 4 - ...tor-create-keystone-endpoint-internal.yaml | 15 - .../Chart.yaml | 12 - .../requirements.yaml | 4 - ...ector-create-keystone-endpoint-public.yaml | 15 - .../Chart.yaml | 12 - .../requirements.yaml | 4 - ...nic-inspector-create-keystone-service.yaml | 10 - .../Chart.yaml | 13 - .../requirements.yaml | 4 - ...ironic-inspector-create-keystone-user.yaml | 19 - .../ironic-inspector-delete-db-job/Chart.yaml | 13 - .../requirements.yaml | 4 - .../templates/ironic-inspector-delete-db.yaml | 7 - .../Chart.yaml | 13 - .../requirements.yaml | 4 - ...nic-inspector-delete-keystone-service.yaml | 10 - .../Chart.yaml | 13 - .../requirements.yaml | 4 - ...ironic-inspector-delete-keystone-user.yaml | 20 - .../ironic-inspector-deployment/Chart.yaml | 12 - .../requirements.yaml | 4 - .../templates/ironic-inspector.yaml | 187 - .../ironic-inspector-manage-db-job/Chart.yaml | 13 - .../requirements.yaml | 4 - .../templates/ironic-inspector-manage-db.yaml | 12 - .../ironic-inspector-svc/Chart.yaml | 12 - .../ironic-inspector-svc/requirements.yaml | 4 - .../templates/ironic-inspector-svc.yaml | 40 - .../iscsi-target-daemonset/Chart.yaml | 11 - .../iscsi-target-daemonset/requirements.yaml | 4 - .../templates/iscsi-target-daemonset.yaml | 125 - helm/microservice/iscsid-daemonset/Chart.yaml | 11 - .../iscsid-daemonset/requirements.yaml | 4 - .../templates/iscsid-daemonset.yaml | 119 - .../keepalived-daemonset/Chart.yaml | 11 - .../keepalived-daemonset/requirements.yaml | 4 - .../templates/keepalived-daemonset.yaml | 103 - .../keystone-admin-svc/Chart.yaml | 12 - .../keystone-admin-svc/requirements.yaml | 4 - .../templates/keystone-admin-svc.yaml | 10 - .../keystone-api-deployment/Chart.yaml | 11 - .../keystone-api-deployment/requirements.yaml | 4 - .../templates/keystone-api.yaml | 61 - .../keystone-create-db-job/Chart.yaml | 12 - .../keystone-create-db-job/requirements.yaml | 4 - .../templates/keystone-create-db.yaml | 7 - .../keystone-create-endpoints-job/Chart.yaml | 12 - .../requirements.yaml | 4 - .../templates/keystone-create-endpoints.yaml | 84 - .../keystone-delete-db-job/Chart.yaml | 12 - .../keystone-delete-db-job/requirements.yaml | 4 - .../templates/keystone-delete-db.yaml | 7 - .../keystone-fernet-rotate-job/Chart.yaml | 12 - .../requirements.yaml | 4 - .../templates/keystone-fernet-rotate-job.yaml | 121 - .../keystone-fernet-setup-job/Chart.yaml | 12 - .../requirements.yaml | 4 - .../templates/keystone-fernet-setup-job.yaml | 80 - .../keystone-internal-svc/Chart.yaml | 12 - .../keystone-internal-svc/requirements.yaml | 4 - .../templates/keystone-internal-svc.yaml | 10 - .../keystone-manage-db-job/Chart.yaml | 12 - .../keystone-manage-db-job/requirements.yaml | 4 - .../templates/keystone-manage-db.yaml | 12 - .../keystone-public-svc/Chart.yaml | 12 - .../keystone-public-svc/requirements.yaml | 4 - .../templates/keystone-public-svc.yaml | 10 - .../mariadb-init-element-job/Chart.yaml | 12 - .../requirements.yaml | 4 - .../templates/mariadb-init-element.yaml | 52 - helm/microservice/mariadb-pv/Chart.yaml | 12 - .../microservice/mariadb-pv/requirements.yaml | 4 - .../mariadb-pv/templates/mariadb-pv.yaml | 10 - helm/microservice/mariadb-pvc/Chart.yaml | 12 - .../mariadb-pvc/requirements.yaml | 4 - .../mariadb-pvc/templates/mariadb-pvc.yaml | 8 - .../mariadb-statefulset/Chart.yaml | 12 - .../mariadb-statefulset/requirements.yaml | 4 - .../templates/mariadb-pod.yaml | 68 - helm/microservice/mariadb-svc/Chart.yaml | 12 - .../mariadb-svc/requirements.yaml | 4 - .../mariadb-svc/templates/mariadb-svc.yaml | 8 - .../memcached-deployment/Chart.yaml | 10 - .../memcached-deployment/requirements.yaml | 4 - .../templates/memcached-deployment.yaml | 90 - helm/microservice/memcached-svc/Chart.yaml | 12 - .../memcached-svc/requirements.yaml | 4 - .../templates/memcached-svc.yaml | 15 - .../neutron-create-db-job/Chart.yaml | 12 - .../neutron-create-db-job/requirements.yaml | 4 - .../templates/neutron-create-db.yaml | 7 - .../Chart.yaml | 11 - .../requirements.yaml | 4 - ...eutron-create-keystone-endpoint-admin.yaml | 15 - .../Chart.yaml | 11 - .../requirements.yaml | 4 - ...ron-create-keystone-endpoint-internal.yaml | 15 - .../Chart.yaml | 11 - .../requirements.yaml | 4 - ...utron-create-keystone-endpoint-public.yaml | 15 - .../Chart.yaml | 11 - .../requirements.yaml | 4 - .../neutron-create-keystone-service.yaml | 10 - .../Chart.yaml | 12 - .../requirements.yaml | 4 - .../neutron-create-keystone-user.yaml | 20 - .../neutron-delete-db-job/Chart.yaml | 12 - .../neutron-delete-db-job/requirements.yaml | 4 - .../templates/neutron-delete-db.yaml | 7 - .../Chart.yaml | 12 - .../requirements.yaml | 4 - .../neutron-delete-keystone-service.yaml | 10 - .../Chart.yaml | 12 - .../requirements.yaml | 4 - .../neutron-delete-keystone-user.yaml | 20 - .../neutron-dhcp-agent-daemonset/Chart.yaml | 11 - .../requirements.yaml | 4 - .../templates/dhcp-agent-daemonset.yaml | 124 - .../neutron-l3-agent-daemonset/Chart.yaml | 11 - .../requirements.yaml | 4 - .../templates/l3-agent-daemonset.yaml | 143 - .../neutron-manage-db-job/Chart.yaml | 12 - .../neutron-manage-db-job/requirements.yaml | 4 - .../templates/neutron-manage-db.yaml | 26 - .../Chart.yaml | 11 - .../requirements.yaml | 4 - .../templates/metadata-agent-daemonset.yaml | 78 - .../Chart.yaml | 11 - .../requirements.yaml | 4 - .../openvswitch-agent-daemonset.yaml | 198 -- .../neutron-server-deployment/Chart.yaml | 11 - .../requirements.yaml | 4 - .../templates/neutron-server.yaml | 35 - .../neutron-server-svc/Chart.yaml | 12 - .../neutron-server-svc/requirements.yaml | 4 - .../templates/neutron-server-svc.yaml | 9 - .../Chart.yaml | 14 - .../requirements.yaml | 4 - .../nova-api-cell-discover-host.yaml | 67 - .../nova-api-create-db-job/Chart.yaml | 13 - .../nova-api-create-db-job/requirements.yaml | 4 - .../templates/nova-api-create-db.yaml | 7 - .../Chart.yaml | 14 - .../requirements.yaml | 4 - .../templates/nova-api-create-cell.yaml | 125 - .../nova-api-delete-db-job/Chart.yaml | 12 - .../nova-api-delete-db-job/requirements.yaml | 4 - .../templates/nova-api-delete-db.yaml | 7 - .../nova-api-deployment/Chart.yaml | 11 - .../nova-api-deployment/requirements.yaml | 4 - .../templates/nova-api.yaml | 32 - .../nova-api-manage-db-job/Chart.yaml | 13 - .../nova-api-manage-db-job/requirements.yaml | 4 - .../templates/nova-api-manage-db.yaml | 12 - helm/microservice/nova-api-svc/Chart.yaml | 12 - .../nova-api-svc/requirements.yaml | 4 - .../nova-api-svc/templates/nova-api-svc.yaml | 9 - .../nova-cell0-create-db-job/Chart.yaml | 13 - .../requirements.yaml | 4 - .../templates/nova-cell0-create-db.yaml | 7 - .../nova-cell0-delete-db-job/Chart.yaml | 13 - .../requirements.yaml | 4 - .../templates/nova-cell0-delete-db.yaml | 7 - .../nova-compute-daemonset/Chart.yaml | 11 - .../nova-compute-daemonset/requirements.yaml | 4 - .../templates/nova-compute.yaml | 186 - .../Chart.yaml | 12 - .../requirements.yaml | 4 - .../templates/nova-compute-ironic.yaml | 79 - .../nova-conductor-statefulset/Chart.yaml | 11 - .../requirements.yaml | 4 - .../templates/nova-conductor.yaml | 15 - .../nova-consoleauth-statefulset/Chart.yaml | 11 - .../requirements.yaml | 4 - .../templates/nova-consoleauth.yaml | 15 - .../nova-create-db-job/Chart.yaml | 12 - .../nova-create-db-job/requirements.yaml | 4 - .../templates/nova-create-db.yaml | 7 - .../Chart.yaml | 11 - .../requirements.yaml | 4 - .../nova-create-keystone-endpoint-admin.yaml | 15 - .../Chart.yaml | 11 - .../requirements.yaml | 4 - ...ova-create-keystone-endpoint-internal.yaml | 15 - .../Chart.yaml | 11 - .../requirements.yaml | 4 - .../nova-create-keystone-endpoint-public.yaml | 15 - .../Chart.yaml | 11 - .../requirements.yaml | 4 - .../nova-create-keystone-service.yaml | 10 - .../nova-create-keystone-user-job/Chart.yaml | 12 - .../requirements.yaml | 4 - .../templates/nova-create-keystone-user.yaml | 20 - .../nova-delete-db-job/Chart.yaml | 12 - .../nova-delete-db-job/requirements.yaml | 4 - .../templates/nova-delete-db.yaml | 7 - .../Chart.yaml | 12 - .../requirements.yaml | 4 - .../nova-delete-keystone-service.yaml | 10 - .../nova-delete-keystone-user-job/Chart.yaml | 12 - .../requirements.yaml | 4 - .../templates/nova-delete-keystone-user.yaml | 20 - .../nova-libvirt-daemonset/Chart.yaml | 11 - .../nova-libvirt-daemonset/requirements.yaml | 4 - .../templates/nova-libvirt.yaml | 156 - .../microservice/nova-metadata-svc/Chart.yaml | 12 - .../nova-metadata-svc/requirements.yaml | 4 - .../templates/nova-metadata-svc.yaml | 9 - .../nova-novncproxy-deployment/Chart.yaml | 11 - .../requirements.yaml | 4 - .../templates/nova-novncproxy.yaml | 21 - .../nova-novncproxy-svc/Chart.yaml | 12 - .../nova-novncproxy-svc/requirements.yaml | 4 - .../templates/nova-novncproxy-svc.yaml | 9 - .../Chart.yaml | 12 - .../requirements.yaml | 4 - ...cement-create-keystone-endpoint-admin.yaml | 15 - .../Chart.yaml | 12 - .../requirements.yaml | 4 - ...ent-create-keystone-endpoint-internal.yaml | 15 - .../Chart.yaml | 12 - .../requirements.yaml | 4 - ...ement-create-keystone-endpoint-public.yaml | 15 - .../Chart.yaml | 12 - .../requirements.yaml | 4 - ...ova-placement-create-keystone-service.yaml | 10 - .../Chart.yaml | 13 - .../requirements.yaml | 4 - .../nova-placement-create-keystone-user.yaml | 20 - .../Chart.yaml | 13 - .../requirements.yaml | 4 - ...ova-placement-delete-keystone-service.yaml | 10 - .../Chart.yaml | 13 - .../requirements.yaml | 4 - .../nova-placement-delete-keystone-user.yaml | 20 - .../nova-placement-deployment/Chart.yaml | 11 - .../requirements.yaml | 4 - .../templates/nova-placement.yaml | 39 - .../nova-placement-svc/Chart.yaml | 12 - .../nova-placement-svc/requirements.yaml | 4 - .../templates/nova-placement-svc.yaml | 9 - .../nova-scheduler-statefulset/Chart.yaml | 11 - .../requirements.yaml | 4 - .../templates/nova-scheduler.yaml | 15 - .../openvswitch-ovsdb-daemonset/Chart.yaml | 11 - .../requirements.yaml | 4 - .../openvswitch-ovsdb-daemonset.yaml | 95 - .../openvswitch-vswitchd-daemonset/Chart.yaml | 11 - .../requirements.yaml | 4 - .../openvswitch-vswitchd-daemonset.yaml | 139 - .../rabbitmq-init-element-job/Chart.yaml | 12 - .../requirements.yaml | 4 - .../templates/rabbitmq-init-element.yaml | 52 - helm/microservice/rabbitmq-pv/Chart.yaml | 12 - .../rabbitmq-pv/requirements.yaml | 4 - .../rabbitmq-pv/templates/rabbitmq-pv.yaml | 10 - helm/microservice/rabbitmq-pvc/Chart.yaml | 12 - .../rabbitmq-pvc/requirements.yaml | 4 - .../rabbitmq-pvc/templates/rabbitmq-pvc.yaml | 8 - .../rabbitmq-statefulset/Chart.yaml | 12 - .../rabbitmq-statefulset/requirements.yaml | 4 - .../templates/rabbitmq-pod.yaml | 77 - helm/microservice/rabbitmq-svc/Chart.yaml | 12 - .../rabbitmq-svc/requirements.yaml | 4 - .../templates/rabbitmq-mgmt-svc.yaml | 10 - .../rabbitmq-svc/templates/rabbitmq-svc.yaml | 8 - .../registry-deployment/Chart.yaml | 11 - .../templates/registry.yaml | 79 - .../registry-deployment/values.yaml | 12 - .../test-ceph-init-mon-job/Chart.yaml | 13 - .../test-ceph-init-mon-job/requirements.yaml | 4 - .../templates/test_ceph_init_mon_job.yaml | 86 - .../test-ceph-init-osd-job/Chart.yaml | 13 - .../test-ceph-init-osd-job/requirements.yaml | 4 - .../templates/test-ceph-init-osd-job.yaml | 118 - .../test-ceph-mon-daemonset/Chart.yaml | 13 - .../test-ceph-mon-daemonset/requirements.yaml | 4 - .../templates/test_ceph_mon_daemonset.yaml | 128 - .../microservice/test-ceph-osd-pod/Chart.yaml | 13 - .../test-ceph-osd-pod/requirements.yaml | 4 - .../templates/test-ceph-osd-pod.yaml | 108 - helm/microservice/tgtd-daemonset/Chart.yaml | 11 - .../tgtd-daemonset/requirements.yaml | 4 - .../templates/tgtd-daemonset.yaml | 107 - helm/service/cinder-cleanup/Chart.yaml | 13 - helm/service/cinder-cleanup/requirements.yaml | 16 - helm/service/cinder-cleanup/values.yaml | 30 - helm/service/cinder-control/Chart.yaml | 13 - helm/service/cinder-control/requirements.yaml | 55 - helm/service/cinder-control/values.yaml | 153 - helm/service/cinder-volume-lvm/Chart.yaml | 13 - .../cinder-volume-lvm/requirements.yaml | 10 - helm/service/cinder-volume-lvm/values.yaml | 22 - helm/service/glance-cleanup/Chart.yaml | 13 - helm/service/glance-cleanup/requirements.yaml | 10 - helm/service/glance-cleanup/values.yaml | 20 - helm/service/glance/Chart.yaml | 10 - helm/service/glance/requirements.yaml | 41 - helm/service/glance/values.yaml | 84 - helm/service/horizon/Chart.yaml | 10 - helm/service/horizon/requirements.yaml | 7 - helm/service/horizon/values.yaml | 10 - helm/service/ironic/Chart.yaml | 10 - helm/service/ironic/requirements.yaml | 31 - helm/service/ironic/values.yaml | 78 - helm/service/keystone-cleanup/Chart.yaml | 12 - .../keystone-cleanup/requirements.yaml | 4 - helm/service/keystone/Chart.yaml | 10 - helm/service/keystone/requirements.yaml | 25 - helm/service/keystone/values.yaml | 35 - helm/service/mariadb/Chart.yaml | 11 - helm/service/mariadb/requirements.yaml | 17 - helm/service/mariadb/values.yaml | 9 - helm/service/memcached/Chart.yaml | 10 - helm/service/memcached/requirements.yaml | 7 - helm/service/memcached/values.yaml | 4 - helm/service/neutron-cleanup/Chart.yaml | 13 - .../service/neutron-cleanup/requirements.yaml | 10 - helm/service/neutron-cleanup/values.yaml | 20 - helm/service/neutron/Chart.yaml | 11 - helm/service/neutron/requirements.yaml | 40 - helm/service/neutron/values.yaml | 69 - helm/service/nova-cleanup/Chart.yaml | 13 - helm/service/nova-cleanup/requirements.yaml | 17 - helm/service/nova-cleanup/values.yaml | 30 - helm/service/nova-compute-ironic/Chart.yaml | 13 - .../nova-compute-ironic/requirements.yaml | 4 - helm/service/nova-compute-ironic/values.yaml | 12 - helm/service/nova-compute/Chart.yaml | 12 - helm/service/nova-compute/requirements.yaml | 11 - helm/service/nova-compute/values.yaml | 28 - helm/service/nova-control/Chart.yaml | 12 - helm/service/nova-control/requirements.yaml | 81 - helm/service/nova-control/values.yaml | 155 - helm/service/openvswitch/Chart.yaml | 12 - helm/service/openvswitch/requirements.yaml | 7 - helm/service/openvswitch/values.yaml | 11 - helm/service/rabbitmq/Chart.yaml | 11 - helm/service/rabbitmq/requirements.yaml | 17 - helm/service/rabbitmq/values.yaml | 12 - helm/test/devenv/templates/ceph-conf.yaml | 27 - helm/test/devenv/templates/ceph-mon.yaml | 62 - helm/test/devenv/templates/ceph-osd.yaml | 40 - .../devenv/templates/cinder-api-haproxy.yaml | 44 - helm/test/devenv/templates/cinder-api.yaml | 100 - helm/test/devenv/templates/cinder-backup.yaml | 109 - .../devenv/templates/cinder-scheduler.yaml | 100 - helm/test/devenv/templates/cinder-volume.yaml | 128 - .../devenv/templates/glance-api-haproxy.yaml | 44 - helm/test/devenv/templates/glance-api.yaml | 92 - .../templates/glance-registry-haproxy.yaml | 44 - .../devenv/templates/glance-registry.yaml | 63 - helm/test/devenv/templates/horizon.yaml | 928 ----- helm/test/devenv/templates/keepalived.yaml | 33 - helm/test/devenv/templates/keystone.yaml | 179 - helm/test/devenv/templates/mariadb.yaml | 149 - helm/test/devenv/templates/memcached.yaml | 10 - .../devenv/templates/neutron-dhcp-agent.yaml | 146 - .../devenv/templates/neutron-l3-agent.yaml | 145 - .../templates/neutron-metadata-agent.yaml | 138 - .../templates/neutron-openvswitch-agent.yaml | 121 - .../templates/neutron-server-haproxy.yaml | 44 - .../test/devenv/templates/neutron-server.yaml | 135 - .../devenv/templates/nova-api-haproxy.yaml | 49 - helm/test/devenv/templates/nova-api.yaml | 133 - helm/test/devenv/templates/nova-compute.yaml | 140 - .../test/devenv/templates/nova-conductor.yaml | 133 - .../devenv/templates/nova-consoleauth.yaml | 133 - helm/test/devenv/templates/nova-libvirt.yaml | 31 - .../templates/nova-novncproxy-haproxy.yaml | 44 - .../devenv/templates/nova-novncproxy.yaml | 133 - .../test/devenv/templates/nova-scheduler.yaml | 133 - .../templates/openvswitch-db-server.yaml | 10 - .../templates/openvswitch-vswitchd.yaml | 10 - helm/test/devenv/templates/rabbitmq.yaml | 105 - helm/test/devenv/templates/resolv-conf.yaml | 10 - helm/test/selenium/Chart.yaml | 8 - .../templates/selenium-hub-deployment.yaml | 15 - .../selenium/templates/selenium-hub-svc.yaml | 13 - .../templates/selenium-node-firefox.yaml | 15 - kolla_kubernetes/__init__.py | 17 - kolla_kubernetes/app.py | 126 - kolla_kubernetes/commands/__init__.py | 0 kolla_kubernetes/commands/base_command.py | 22 - kolla_kubernetes/commands/cmd_resource.py | 434 --- kolla_kubernetes/commands/cmd_service.py | 71 - kolla_kubernetes/commands/genpwd.py | 131 - kolla_kubernetes/exception.py | 35 - kolla_kubernetes/kube_service_status.py | 362 -- kolla_kubernetes/pathfinder.py | 111 - kolla_kubernetes/service_resources.py | 410 --- kolla_kubernetes/tests/__init__.py | 0 kolla_kubernetes/tests/base.py | 41 - kolla_kubernetes/tests/test_helm_templates.py | 73 - kolla_kubernetes/tests/test_pathfinder.py | 35 - kolla_kubernetes/tests/test_templates.py | 191 -- kolla_kubernetes/tests/test_utils.py | 190 - kolla_kubernetes/utils.py | 279 -- kolla_kubernetes/version.py | 15 - orchestration/README.md | 62 - orchestration/deploy.yml | 6 - .../kolla-controller/tasks/kolla-config.yml | 97 - .../kolla-controller/tasks/kolla-deploy.yml | 123 - .../roles/kolla-controller/tasks/main.yml | 8 - .../kolla-controller/templates/cloud.yaml | 73 - .../templates/globals_config.j2 | 37 - releasenotes/notes/.placeholder | 0 ...nder_control_service-7eff1740903ad8ba.yaml | 4 - .../compute_kit_iscsi-897b109ecdd2648d.yaml | 7 - ...olidated-notes-0.4.0-18fcedafcfeb1647.yaml | 170 - .../destroy-workflow-0efbaa5a78822925.yaml | 5 - ...enable_placement_api-2690cfdc6e3b612b.yaml | 6 - ...fernet-token-support-abc0c9b496bd65e1.yaml | 4 - ...izon-service-package-7801a17f287ba5f9.yaml | 4 - .../notes/image-pull-4fc22fd41caf5904.yaml | 4 - .../notes/k8s-devenv-36777f058cf2229c.yaml | 4 - .../notes/keepalived-81c457d84c5910c5.yaml | 4 - .../keystone_service-1e9717d09e63de03.yaml | 4 - ...b-service-deployment-ba8063510b78ef49.yaml | 4 - ...ched-service-package-cdacd9315cfb3d2e.yaml | 4 - ...e-confs-to-kolla-k8s-e735bd379b17a494.yaml | 3 - ...tron-service-package-9c170d2caaabcf24.yaml | 4 - ...pute-service-package-2ffe16ecc27d9501.yaml | 4 - ...trol-service-package-58abbed9faf1997e.yaml | 4 - .../notes/prometheus-3937e3b8a8d85019.yaml | 4 - ...itmq_service_package-0ffba95048d24028.yaml | 4 - .../notes/selenium-d71bf318b83556a4.yaml | 4 - releasenotes/source/_static/.placeholder | 0 releasenotes/source/_templates/.placeholder | 0 releasenotes/source/conf.py | 255 -- releasenotes/source/index.rst | 17 - releasenotes/source/unreleased.rst | 5 - requirements.txt | 13 - services/WARNING.rst | 15 - services/ceph/ceph-admin-pod.yml.j2 | 56 - .../ceph/ceph-bootstrap-initial-mon.yml.j2 | 86 - services/ceph/ceph-bootstrap-osd.yml.j2 | 115 - services/ceph/ceph-mon-pod.yml.j2 | 124 - services/ceph/ceph-osd-pod.yml.j2 | 105 - services/ceph/ceph-rbd-pod.yml.j2 | 82 - services/ceph/ceph-secret.yml.j2 | 23 - services/common/api-haproxy-configmap.yml.j2 | 50 - .../common-create-keystone-endpoint.yml.j2 | 53 - .../common/common-create-keystone-user.yml.j2 | 54 - services/common/common-deployment.yml.j2 | 174 - services/common/common-disk.sh.j2 | 39 - services/common/common-lib.yml.j2 | 61 - services/common/common-pv.yml.j2 | 50 - services/common/common-pvc.yml.j2 | 17 - services/common/generic-service.yml.j2 | 20 - services/common/logging-configmap.yml.j2 | 66 - .../elasticsearch/elasticsearch-pod.yml.j2 | 73 - .../keepalived/keepalived-configmap.yml.j2 | 34 - .../keepalived/keepalived-daemonset.yml.j2 | 87 - .../neutron-bootstrap-job-create-db.yml.j2 | 71 - services/nova/nova-compute-pod.yml.j2 | 149 - ...ol-bootstrap-job-create-nova-api-db.yml.j2 | 39 - ...ontrol-bootstrap-job-create-nova-db.yml.j2 | 84 - .../nova/nova-control-conductor-pod.yml.j2 | 66 - .../nova/nova-control-consoleauth-pod.yml.j2 | 60 - .../nova/nova-control-scheduler-pod.yml.j2 | 68 - services/nova/nova-libvirt-pod.yml.j2 | 142 - services/nova/nova-libvirt-secret.yml.j2 | 16 - .../openvswitch-ovsdb-daemonset.yml.j2 | 99 - .../openvswitch-set-external-ip-job.yml.j2 | 26 - .../openvswitch-vswitchd-daemonset.yml.j2 | 105 - services/swift/swift-account-pod.yml.j2 | 88 - services/swift/swift-account-service.yml.j2 | 10 - services/swift/swift-container-pod.yml.j2 | 89 - services/swift/swift-container-service.yml.j2 | 10 - services/swift/swift-object-pod.yml.j2 | 102 - services/swift/swift-object-service.yml.j2 | 10 - services/swift/swift-proxy-pod.yml.j2 | 39 - services/swift/swift-proxy-service.yml.j2 | 10 - services/swift/swift-rsync-service.yml.j2 | 10 - setup.cfg | 76 - setup.py | 29 - specs/README.rst | 17 - specs/ansible-deployment.rst | 128 - specs/kolla-kubernetes-arch.rst | 391 --- test-requirements.txt | 16 - tests/bin/basic_tests.sh | 183 - tests/bin/build_docker_images.sh | 20 - tests/bin/build_test_ceph.sh | 111 - tests/bin/ceph_workflow.sh | 566 --- tests/bin/ceph_workflow_service.sh | 162 - tests/bin/cleanup_tests.sh | 38 - tests/bin/common_ceph_config.sh | 24 - tests/bin/common_iscsi_config.sh | 23 - tests/bin/common_iscsi_config_v4.sh | 44 - tests/bin/common_iscsi_config_v5.sh | 49 - tests/bin/common_workflow_config.sh | 65 - tests/bin/deploy_compute_kit.sh | 115 - tests/bin/deploy_ironic.sh | 148 - tests/bin/deploy_iscsi_common.sh | 569 --- tests/bin/destroy_tests.sh | 30 - tests/bin/endpoint_test.sh | 28 - tests/bin/fix_gate_iptables.sh | 21 - tests/bin/gate_capture_logs.sh | 157 - tests/bin/horizon_test.py | 110 - tests/bin/horizon_test.sh | 38 - tests/bin/ironic_deploy_tests.sh | 272 -- tests/bin/iscsi_generic_workflow.sh | 23 - tests/bin/iscsi_ironic_workflow.sh | 35 - tests/bin/prometheus_tests.sh | 20 - tests/bin/setup_canal.sh | 45 - tests/bin/setup_config.sh | 59 - tests/bin/setup_config_iscsi.sh | 41 - tests/bin/setup_gate_loopback.sh | 20 - tests/bin/setup_gate_loopback_lvm.sh | 16 - tests/conf/ceph-all-in-one/kolla_config | 76 - .../ceph-all-in-one/kolla_kubernetes_config | 18 - tests/conf/ironic/vm-1.xml | 70 - tests/conf/iscsi-all-in-one/kolla_config | 77 - .../iscsi-all-in-one/kolla_kubernetes_config | 11 - tests/conf/iscsid-tgtd-configmap.yaml | 23 - .../post.yaml | 15 - .../run.yaml | 68 - .../post.yaml | 15 - .../run.yaml | 65 - .../post.yaml | 15 - .../run.yaml | 65 - .../post.yaml | 15 - .../run.yaml | 65 - .../post.yaml | 15 - .../run.yaml | 65 - .../post.yaml | 15 - .../run.yaml | 65 - .../post.yaml | 15 - .../run.yaml | 65 - .../post.yaml | 15 - .../run.yaml | 65 - .../post.yaml | 15 - .../run.yaml | 68 - .../post.yaml | 15 - .../run.yaml | 68 - .../post.yaml | 15 - .../run.yaml | 65 - .../post.yaml | 15 - .../run.yaml | 65 - .../post.yaml | 15 - .../run.yaml | 68 - .../post.yaml | 15 - .../run.yaml | 65 - .../post.yaml | 15 - .../run.yaml | 65 - .../post.yaml | 15 - .../run.yaml | 65 - .../post.yaml | 15 - .../run.yaml | 65 - .../post.yaml | 15 - .../run.yaml | 65 - .../post.yaml | 15 - .../run.yaml | 56 - .../post.yaml | 15 - .../run.yaml | 56 - .../post.yaml | 15 - .../run.yaml | 56 - .../post.yaml | 15 - .../run.yaml | 56 - .../post.yaml | 15 - .../run.yaml | 56 - .../post.yaml | 15 - .../run.yaml | 65 - .../post.yaml | 15 - .../run.yaml | 65 - tools/Dockerfile | 90 - tools/build_dev_image.sh | 50 - tools/build_example_yaml.py | 68 - tools/build_helm_templates.sh | 12 - tools/build_local_admin_keystonerc.sh | 33 - tools/cleanup-k8s.sh | 26 - tools/fix-mitaka-config.py | 45 - tools/generate_passwords.py | 1 - tools/get_arch.sh | 16 - tools/helm_build_all.sh | 35 - tools/helm_build_compute_kits.py | 55 - tools/helm_build_microservices.py | 64 - tools/helm_build_services.py | 55 - tools/helm_buildrepo.sh | 44 - tools/helm_prebuild_compute_kits.py | 48 - tools/helm_prebuild_microservices.py | 217 -- tools/helm_prebuild_services.py | 49 - tools/helm_versions.sh | 4 - tools/kolla-kubernetes | 1 - tools/pull_containers.sh | 15 - tools/run_dev_image.sh | 13 - tools/secret-generator.py | 70 - tools/setup-ceph-secrets.sh | 14 - tools/setup-kubectl.sh | 12 - tools/setup_dev_env.sh | 172 - tools/setup_gate.sh | 100 - tools/setup_gate_ceph.sh | 168 - tools/setup_gate_common.sh | 113 - tools/setup_gate_iscsi.sh | 104 - tools/setup_helm.sh | 18 - tools/setup_kube_AIO.sh | 351 -- tools/setup_kubernetes.sh | 90 - tools/setup_rbd_volumes.sh | 30 - tools/setup_registry.sh | 29 - tools/setup_simple_ceph_users.sh | 33 - tools/test-dns.yml | 17 - tools/test.sh | 29 - tools/test_kube_dns.sh | 63 - tools/wait_for_kube_control_plane.sh | 35 - tools/wait_for_pods.py | 61 - tools/wait_for_pods.sh | 28 - tools/wait_for_pods_termination.sh | 16 - tox.ini | 64 - 1060 files changed, 10 insertions(+), 46408 deletions(-) delete mode 100644 .coveragerc delete mode 100644 .gitignore delete mode 100644 .mailmap delete mode 100644 .testr.conf delete mode 100644 .zuul.d/legacy.yaml delete mode 100644 CONTRIBUTING.rst delete mode 100644 Dockerfile delete mode 100644 HACKING.rst delete mode 100644 LICENSE delete mode 100644 ansible/action_plugins/merge_configs.py delete mode 100755 ansible/action_plugins/merge_yaml.py delete mode 100644 ansible/destroy.yml delete mode 100644 ansible/group_vars/all.yml delete mode 100644 ansible/library/merge_configs.py delete mode 100644 ansible/library/merge_yaml.py delete mode 100644 ansible/roles/ceph/defaults/main.yml delete mode 100644 ansible/roles/ceph/tasks/config.yml delete mode 100644 ansible/roles/ceph/tasks/main.yml delete mode 100644 ansible/roles/ceph/templates/ceph-mon.json.j2 delete mode 100644 ansible/roles/ceph/templates/ceph-osd.json.j2 delete mode 100644 ansible/roles/ceph/templates/ceph-rgw.json.j2 delete mode 100644 ansible/roles/ceph/templates/ceph.conf.j2 delete mode 100644 ansible/roles/cinder/defaults/main.yml delete mode 100644 ansible/roles/cinder/tasks/config.yml delete mode 100644 ansible/roles/cinder/tasks/main.yml delete mode 100644 ansible/roles/cinder/templates/cinder-api.json.j2 delete mode 100644 ansible/roles/cinder/templates/cinder-backup.json.j2 delete mode 100644 ansible/roles/cinder/templates/cinder-scheduler.json.j2 delete mode 100644 ansible/roles/cinder/templates/cinder-volume.json.j2 delete mode 100644 ansible/roles/cinder/templates/cinder.conf.j2 delete mode 100644 ansible/roles/common/defaults/main.yml delete mode 100644 ansible/roles/destroy/tasks/label_iterator.yml delete mode 100644 ansible/roles/destroy/tasks/main.yml delete mode 100644 ansible/roles/glance/defaults/main.yml delete mode 100644 ansible/roles/glance/tasks/config.yml delete mode 100644 ansible/roles/glance/tasks/main.yml delete mode 100644 ansible/roles/glance/templates/glance-api.conf.j2 delete mode 100644 ansible/roles/glance/templates/glance-api.json.j2 delete mode 100644 ansible/roles/glance/templates/glance-registry.conf.j2 delete mode 100644 ansible/roles/glance/templates/glance-registry.json.j2 delete mode 100644 ansible/roles/heat/defaults/main.yml delete mode 100644 ansible/roles/heat/tasks/config.yml delete mode 100644 ansible/roles/heat/tasks/main.yml delete mode 100644 ansible/roles/heat/templates/_deprecated.yaml delete mode 100644 ansible/roles/heat/templates/heat-api-cfn.json.j2 delete mode 100644 ansible/roles/heat/templates/heat-api.json.j2 delete mode 100644 ansible/roles/heat/templates/heat-engine.json.j2 delete mode 100644 ansible/roles/heat/templates/heat.conf.j2 delete mode 100644 ansible/roles/horizon/defaults/main.yml delete mode 100644 ansible/roles/horizon/tasks/config.yml delete mode 100644 ansible/roles/horizon/tasks/main.yml delete mode 100644 ansible/roles/horizon/templates/horizon.conf.j2 delete mode 100644 ansible/roles/horizon/templates/horizon.json.j2 delete mode 100644 ansible/roles/horizon/templates/local_settings.j2 delete mode 100644 ansible/roles/ironic/defaults/main.yml delete mode 100644 ansible/roles/ironic/tasks/config.yml delete mode 100644 ansible/roles/ironic/tasks/main.yml delete mode 100644 ansible/roles/ironic/templates/ironic-api.json.j2 delete mode 100644 ansible/roles/ironic/templates/ironic-conductor-tftp.json.j2 delete mode 100644 ansible/roles/ironic/templates/ironic-conductor.json.j2 delete mode 100644 ansible/roles/ironic/templates/ironic-dnsmasq.conf.j2 delete mode 100644 ansible/roles/ironic/templates/ironic-dnsmasq.json.j2 delete mode 100644 ansible/roles/ironic/templates/ironic-inspector-tftp.json.j2 delete mode 100644 ansible/roles/ironic/templates/ironic-inspector.conf.j2 delete mode 100644 ansible/roles/ironic/templates/ironic-inspector.json.j2 delete mode 100644 ansible/roles/ironic/templates/ironic.conf.j2 delete mode 100644 ansible/roles/ironic/templates/pxelinux.default.j2 delete mode 100644 ansible/roles/iscsi/defaults/main.yml delete mode 100644 ansible/roles/iscsi/tasks/config.yml delete mode 100644 ansible/roles/iscsi/tasks/main.yml delete mode 100644 ansible/roles/iscsi/templates/iscsid.json.j2 delete mode 100644 ansible/roles/iscsi/templates/tgtd.json.j2 delete mode 100644 ansible/roles/keystone/defaults/main.yml delete mode 100644 ansible/roles/keystone/tasks/config.yml delete mode 100644 ansible/roles/keystone/tasks/main.yml delete mode 100644 ansible/roles/keystone/templates/keystone-paste.ini.j2 delete mode 100644 ansible/roles/keystone/templates/keystone.conf.j2 delete mode 100644 ansible/roles/keystone/templates/keystone.json.j2 delete mode 100644 ansible/roles/keystone/templates/wsgi-keystone.conf.j2 delete mode 100644 ansible/roles/mariadb/defaults/main.yml delete mode 100644 ansible/roles/mariadb/tasks/config.yml delete mode 100644 ansible/roles/mariadb/tasks/main.yml delete mode 100644 ansible/roles/mariadb/templates/galera.cnf.j2 delete mode 100644 ansible/roles/mariadb/templates/mariadb.json.j2 delete mode 100644 ansible/roles/memcached/defaults/main.yml delete mode 100644 ansible/roles/memcached/tasks/config.yml delete mode 100644 ansible/roles/memcached/tasks/main.yml delete mode 100644 ansible/roles/memcached/templates/memcached.json.j2 delete mode 100644 ansible/roles/neutron/defaults/main.yml delete mode 100644 ansible/roles/neutron/tasks/config-neutron-fake.yml delete mode 100644 ansible/roles/neutron/tasks/config.yml delete mode 100644 ansible/roles/neutron/tasks/main.yml delete mode 100644 ansible/roles/neutron/templates/bgp_dragent.ini.j2 delete mode 100644 ansible/roles/neutron/templates/dhcp_agent.ini.j2 delete mode 100644 ansible/roles/neutron/templates/dnsmasq.conf.j2 delete mode 100644 ansible/roles/neutron/templates/fwaas_driver.ini.j2 delete mode 100644 ansible/roles/neutron/templates/l3_agent.ini.j2 delete mode 100644 ansible/roles/neutron/templates/lbaas_agent.ini.j2 delete mode 100644 ansible/roles/neutron/templates/metadata_agent.ini.j2 delete mode 100644 ansible/roles/neutron/templates/ml2_conf.ini.j2 delete mode 100644 ansible/roles/neutron/templates/neutron-bgp-dragent.json.j2 delete mode 100644 ansible/roles/neutron/templates/neutron-dhcp-agent.json.j2 delete mode 100644 ansible/roles/neutron/templates/neutron-l3-agent.json.j2 delete mode 100644 ansible/roles/neutron/templates/neutron-lbaas-agent.json.j2 delete mode 100644 ansible/roles/neutron/templates/neutron-linuxbridge-agent.json.j2 delete mode 100644 ansible/roles/neutron/templates/neutron-metadata-agent.json.j2 delete mode 100644 ansible/roles/neutron/templates/neutron-openvswitch-agent.json.j2 delete mode 100644 ansible/roles/neutron/templates/neutron-server.json.j2 delete mode 100644 ansible/roles/neutron/templates/neutron-vpnaas-agent.json.j2 delete mode 100644 ansible/roles/neutron/templates/neutron.conf.j2 delete mode 100644 ansible/roles/neutron/templates/neutron_lbaas.conf.j2 delete mode 100644 ansible/roles/neutron/templates/neutron_vpnaas.conf.j2 delete mode 100644 ansible/roles/neutron/templates/openvswitch-db-server.json.j2 delete mode 100644 ansible/roles/neutron/templates/openvswitch-vswitchd.json.j2 delete mode 100644 ansible/roles/neutron/templates/vpnaas_agent.ini.j2 delete mode 100644 ansible/roles/nova/defaults/main.yml delete mode 100644 ansible/roles/nova/tasks/config.yml delete mode 100644 ansible/roles/nova/tasks/main.yml delete mode 100644 ansible/roles/nova/templates/id_rsa delete mode 100644 ansible/roles/nova/templates/id_rsa.pub delete mode 100644 ansible/roles/nova/templates/libvirtd.conf.j2 delete mode 100644 ansible/roles/nova/templates/nova-api.json.j2 delete mode 100644 ansible/roles/nova/templates/nova-compute-ironic.json.j2 delete mode 100644 ansible/roles/nova/templates/nova-compute.json.j2 delete mode 100644 ansible/roles/nova/templates/nova-conductor.json.j2 delete mode 100644 ansible/roles/nova/templates/nova-consoleauth.json.j2 delete mode 100644 ansible/roles/nova/templates/nova-libvirt.json.j2 delete mode 100644 ansible/roles/nova/templates/nova-novncproxy.json.j2 delete mode 100644 ansible/roles/nova/templates/nova-scheduler.json.j2 delete mode 100644 ansible/roles/nova/templates/nova-serialproxy.json.j2 delete mode 100644 ansible/roles/nova/templates/nova-spicehtml5proxy.json.j2 delete mode 100644 ansible/roles/nova/templates/nova-ssh.json.j2 delete mode 100644 ansible/roles/nova/templates/nova.conf.j2 delete mode 100644 ansible/roles/nova/templates/placement-api-wsgi.conf.j2 delete mode 100644 ansible/roles/nova/templates/placement-api.json.j2 delete mode 100644 ansible/roles/nova/templates/qemu.conf.j2 delete mode 100644 ansible/roles/nova/templates/secret.xml.j2 delete mode 100644 ansible/roles/nova/templates/ssh_config.j2 delete mode 100644 ansible/roles/nova/templates/sshd_config.j2 delete mode 100644 ansible/roles/rabbitmq/defaults/main.yml delete mode 100644 ansible/roles/rabbitmq/tasks/config.yml delete mode 100644 ansible/roles/rabbitmq/tasks/main.yml delete mode 100644 ansible/roles/rabbitmq/templates/definitions.json.j2 delete mode 100644 ansible/roles/rabbitmq/templates/rabbitmq-clusterer.config.j2 delete mode 100644 ansible/roles/rabbitmq/templates/rabbitmq-env.conf.j2 delete mode 100644 ansible/roles/rabbitmq/templates/rabbitmq.config.j2 delete mode 100644 ansible/roles/rabbitmq/templates/rabbitmq.json.j2 delete mode 100644 ansible/site.retry delete mode 100644 ansible/site.yml delete mode 100644 babel.cfg delete mode 100644 bindep.txt delete mode 100644 contrib/README.rst delete mode 100755 contrib/orchestration/ko.py delete mode 100644 contrib/orchestration/ko.py.readme.rst delete mode 100644 doc/source/ceph-guide.rst delete mode 100755 doc/source/conf.py delete mode 100644 doc/source/contributing.rst delete mode 100644 doc/source/deployment-guide.rst delete mode 100644 doc/source/development-environment.rst delete mode 100644 doc/source/index.rst delete mode 100644 doc/source/private-registry.rst delete mode 100644 doc/source/running-tests.rst delete mode 100644 doc/source/service-security.rst delete mode 100755 etc/kolla-kubernetes/kolla-kubernetes.yml delete mode 100755 etc/kolla-kubernetes/service_resources.yml delete mode 100644 etc/kolla/globals.yml delete mode 100644 etc/kolla/passwords.yml delete mode 100755 helm/all_values.yaml delete mode 100644 helm/compute-kits/compute-kit/Chart.yaml delete mode 100644 helm/compute-kits/compute-kit/requirements.yaml delete mode 100644 helm/compute-kits/compute-kit/values.yaml delete mode 100644 helm/kolla-common/.helmignore delete mode 100644 helm/kolla-common/Chart.yaml delete mode 100644 helm/kolla-common/templates/_common_api_apache_deployment.yaml delete mode 100644 helm/kolla-common/templates/_common_api_python_deployment.yaml delete mode 100644 helm/kolla-common/templates/_common_create_db_job.yaml delete mode 100644 helm/kolla-common/templates/_common_create_keystone_endpoint.yaml delete mode 100644 helm/kolla-common/templates/_common_create_keystone_service.yaml delete mode 100644 helm/kolla-common/templates/_common_create_keystone_user.yaml delete mode 100644 helm/kolla-common/templates/_common_defines.yaml delete mode 100644 helm/kolla-common/templates/_common_delete_db_job.yaml delete mode 100644 helm/kolla-common/templates/_common_delete_keystone_service.yaml delete mode 100644 helm/kolla-common/templates/_common_delete_keystone_user.yaml delete mode 100644 helm/kolla-common/templates/_common_dependency_container.yaml delete mode 100644 helm/kolla-common/templates/_common_disabled.yaml delete mode 100644 helm/kolla-common/templates/_common_lib.yaml delete mode 100644 helm/kolla-common/templates/_common_manage_db_job.yaml delete mode 100644 helm/kolla-common/templates/_common_pv.yaml delete mode 100644 helm/kolla-common/templates/_common_pvc.yaml delete mode 100644 helm/kolla-common/templates/_common_statefulset.yaml delete mode 100644 helm/kolla-common/templates/_common_svc.yaml delete mode 100644 helm/kolla-common/templates/_common_val_get.rst delete mode 100644 helm/kolla-common/templates/_common_val_get.yaml delete mode 100644 helm/microservice/ceph-admin-pod/Chart.yaml delete mode 100644 helm/microservice/ceph-admin-pod/requirements.yaml delete mode 100644 helm/microservice/ceph-admin-pod/templates/ceph-admin-pod.yaml delete mode 100644 helm/microservice/ceph-rbd-daemonset/Chart.yaml delete mode 100644 helm/microservice/ceph-rbd-daemonset/requirements.yaml delete mode 100644 helm/microservice/ceph-rbd-daemonset/templates/ceph-rbd-daemonset.yaml delete mode 100644 helm/microservice/cinder-api-deployment/Chart.yaml delete mode 100644 helm/microservice/cinder-api-deployment/requirements.yaml delete mode 100644 helm/microservice/cinder-api-deployment/templates/cinder-api.yaml delete mode 100644 helm/microservice/cinder-api-svc/Chart.yaml delete mode 100644 helm/microservice/cinder-api-svc/requirements.yaml delete mode 100644 helm/microservice/cinder-api-svc/templates/cinder-api-svc.yaml delete mode 100644 helm/microservice/cinder-backup-statefulset/Chart.yaml delete mode 100644 helm/microservice/cinder-backup-statefulset/requirements.yaml delete mode 100644 helm/microservice/cinder-backup-statefulset/templates/cinder-backup.yaml delete mode 100644 helm/microservice/cinder-create-db-job/Chart.yaml delete mode 100644 helm/microservice/cinder-create-db-job/requirements.yaml delete mode 100644 helm/microservice/cinder-create-db-job/templates/cinder-create-db.yaml delete mode 100644 helm/microservice/cinder-create-keystone-endpoint-admin-job/Chart.yaml delete mode 100644 helm/microservice/cinder-create-keystone-endpoint-admin-job/requirements.yaml delete mode 100644 helm/microservice/cinder-create-keystone-endpoint-admin-job/templates/cinder-create-keystone-endpoint-admin.yaml delete mode 100644 helm/microservice/cinder-create-keystone-endpoint-adminv2-job/Chart.yaml delete mode 100644 helm/microservice/cinder-create-keystone-endpoint-adminv2-job/requirements.yaml delete mode 100644 helm/microservice/cinder-create-keystone-endpoint-adminv2-job/templates/cinder-create-keystone-endpoint-adminv2.yaml delete mode 100644 helm/microservice/cinder-create-keystone-endpoint-adminv3-job/Chart.yaml delete mode 100644 helm/microservice/cinder-create-keystone-endpoint-adminv3-job/requirements.yaml delete mode 100644 helm/microservice/cinder-create-keystone-endpoint-adminv3-job/templates/cinder-create-keystone-endpoint-adminv3.yaml delete mode 100644 helm/microservice/cinder-create-keystone-endpoint-internal-job/Chart.yaml delete mode 100644 helm/microservice/cinder-create-keystone-endpoint-internal-job/requirements.yaml delete mode 100644 helm/microservice/cinder-create-keystone-endpoint-internal-job/templates/cinder-create-keystone-endpoint-internal.yaml delete mode 100644 helm/microservice/cinder-create-keystone-endpoint-internalv2-job/Chart.yaml delete mode 100644 helm/microservice/cinder-create-keystone-endpoint-internalv2-job/requirements.yaml delete mode 100644 helm/microservice/cinder-create-keystone-endpoint-internalv2-job/templates/cinder-create-keystone-endpoint-internalv2.yaml delete mode 100644 helm/microservice/cinder-create-keystone-endpoint-internalv3-job/Chart.yaml delete mode 100644 helm/microservice/cinder-create-keystone-endpoint-internalv3-job/requirements.yaml delete mode 100644 helm/microservice/cinder-create-keystone-endpoint-internalv3-job/templates/cinder-create-keystone-endpoint-internalv3.yaml delete mode 100644 helm/microservice/cinder-create-keystone-endpoint-public-job/Chart.yaml delete mode 100644 helm/microservice/cinder-create-keystone-endpoint-public-job/requirements.yaml delete mode 100644 helm/microservice/cinder-create-keystone-endpoint-public-job/templates/cinder-create-keystone-endpoint-public.yaml delete mode 100644 helm/microservice/cinder-create-keystone-endpoint-publicv2-job/Chart.yaml delete mode 100644 helm/microservice/cinder-create-keystone-endpoint-publicv2-job/requirements.yaml delete mode 100644 helm/microservice/cinder-create-keystone-endpoint-publicv2-job/templates/cinder-create-keystone-endpoint-publicv2.yaml delete mode 100644 helm/microservice/cinder-create-keystone-endpoint-publicv3-job/Chart.yaml delete mode 100644 helm/microservice/cinder-create-keystone-endpoint-publicv3-job/requirements.yaml delete mode 100644 helm/microservice/cinder-create-keystone-endpoint-publicv3-job/templates/cinder-create-keystone-endpoint-publicv3.yaml delete mode 100644 helm/microservice/cinder-create-keystone-service-job/Chart.yaml delete mode 100644 helm/microservice/cinder-create-keystone-service-job/requirements.yaml delete mode 100644 helm/microservice/cinder-create-keystone-service-job/templates/cinder-create-keystone-service.yaml delete mode 100644 helm/microservice/cinder-create-keystone-servicev2-job/Chart.yaml delete mode 100644 helm/microservice/cinder-create-keystone-servicev2-job/requirements.yaml delete mode 100644 helm/microservice/cinder-create-keystone-servicev2-job/templates/cinder-create-keystone-servicev2.yaml delete mode 100644 helm/microservice/cinder-create-keystone-servicev3-job/Chart.yaml delete mode 100644 helm/microservice/cinder-create-keystone-servicev3-job/requirements.yaml delete mode 100644 helm/microservice/cinder-create-keystone-servicev3-job/templates/cinder-create-keystone-servicev3.yaml delete mode 100644 helm/microservice/cinder-create-keystone-user-job/Chart.yaml delete mode 100644 helm/microservice/cinder-create-keystone-user-job/requirements.yaml delete mode 100644 helm/microservice/cinder-create-keystone-user-job/templates/cinder-create-keystone-user.yaml delete mode 100644 helm/microservice/cinder-delete-db-job/Chart.yaml delete mode 100644 helm/microservice/cinder-delete-db-job/requirements.yaml delete mode 100644 helm/microservice/cinder-delete-db-job/templates/cinder-delete-db.yaml delete mode 100644 helm/microservice/cinder-delete-keystone-service-job/Chart.yaml delete mode 100644 helm/microservice/cinder-delete-keystone-service-job/requirements.yaml delete mode 100644 helm/microservice/cinder-delete-keystone-service-job/templates/cinder-delete-keystone-service.yaml delete mode 100644 helm/microservice/cinder-delete-keystone-servicev2-job/Chart.yaml delete mode 100644 helm/microservice/cinder-delete-keystone-servicev2-job/requirements.yaml delete mode 100644 helm/microservice/cinder-delete-keystone-servicev2-job/templates/cinder-delete-keystone-servicev2.yaml delete mode 100644 helm/microservice/cinder-delete-keystone-servicev3-job/Chart.yaml delete mode 100644 helm/microservice/cinder-delete-keystone-servicev3-job/requirements.yaml delete mode 100644 helm/microservice/cinder-delete-keystone-servicev3-job/templates/cinder-delete-keystone-servicev3.yaml delete mode 100644 helm/microservice/cinder-delete-keystone-user-job/Chart.yaml delete mode 100644 helm/microservice/cinder-delete-keystone-user-job/requirements.yaml delete mode 100644 helm/microservice/cinder-delete-keystone-user-job/templates/cinder-delete-keystone-user.yaml delete mode 100644 helm/microservice/cinder-manage-db-job/Chart.yaml delete mode 100644 helm/microservice/cinder-manage-db-job/requirements.yaml delete mode 100644 helm/microservice/cinder-manage-db-job/templates/cinder-manage-db.yaml delete mode 100644 helm/microservice/cinder-scheduler-statefulset/Chart.yaml delete mode 100644 helm/microservice/cinder-scheduler-statefulset/requirements.yaml delete mode 100644 helm/microservice/cinder-scheduler-statefulset/templates/cinder-scheduler.yaml delete mode 100644 helm/microservice/cinder-volume-ceph-statefulset/Chart.yaml delete mode 100644 helm/microservice/cinder-volume-ceph-statefulset/requirements.yaml delete mode 100644 helm/microservice/cinder-volume-ceph-statefulset/templates/cinder-volume-ceph.yaml delete mode 100644 helm/microservice/cinder-volume-lvm-daemonset/Chart.yaml delete mode 100644 helm/microservice/cinder-volume-lvm-daemonset/requirements.yaml delete mode 100644 helm/microservice/cinder-volume-lvm-daemonset/templates/cinder-volume-lvm-daemonset.yaml delete mode 100644 helm/microservice/glance-api-deployment/Chart.yaml delete mode 100644 helm/microservice/glance-api-deployment/requirements.yaml delete mode 100644 helm/microservice/glance-api-deployment/templates/glance-api.yaml delete mode 100644 helm/microservice/glance-api-svc/Chart.yaml delete mode 100644 helm/microservice/glance-api-svc/requirements.yaml delete mode 100644 helm/microservice/glance-api-svc/templates/glance-api-svc.yaml delete mode 100644 helm/microservice/glance-create-db-job/Chart.yaml delete mode 100644 helm/microservice/glance-create-db-job/requirements.yaml delete mode 100644 helm/microservice/glance-create-db-job/templates/glance-create-db.yaml delete mode 100644 helm/microservice/glance-create-keystone-endpoint-admin-job/Chart.yaml delete mode 100644 helm/microservice/glance-create-keystone-endpoint-admin-job/requirements.yaml delete mode 100644 helm/microservice/glance-create-keystone-endpoint-admin-job/templates/glance-create-keystone-endpoint-admin.yaml delete mode 100644 helm/microservice/glance-create-keystone-endpoint-internal-job/Chart.yaml delete mode 100644 helm/microservice/glance-create-keystone-endpoint-internal-job/requirements.yaml delete mode 100644 helm/microservice/glance-create-keystone-endpoint-internal-job/templates/glance-create-keystone-endpoint-internal.yaml delete mode 100644 helm/microservice/glance-create-keystone-endpoint-public-job/Chart.yaml delete mode 100644 helm/microservice/glance-create-keystone-endpoint-public-job/requirements.yaml delete mode 100644 helm/microservice/glance-create-keystone-endpoint-public-job/templates/glance-create-keystone-endpoint-public.yaml delete mode 100644 helm/microservice/glance-create-keystone-service-job/Chart.yaml delete mode 100644 helm/microservice/glance-create-keystone-service-job/requirements.yaml delete mode 100644 helm/microservice/glance-create-keystone-service-job/templates/glance-create-keystone-service.yaml delete mode 100644 helm/microservice/glance-create-keystone-user-job/Chart.yaml delete mode 100644 helm/microservice/glance-create-keystone-user-job/requirements.yaml delete mode 100644 helm/microservice/glance-create-keystone-user-job/templates/glance-create-keystone-user.yaml delete mode 100644 helm/microservice/glance-delete-db-job/Chart.yaml delete mode 100644 helm/microservice/glance-delete-db-job/requirements.yaml delete mode 100644 helm/microservice/glance-delete-db-job/templates/glance-delete-db.yaml delete mode 100644 helm/microservice/glance-delete-keystone-service-job/Chart.yaml delete mode 100644 helm/microservice/glance-delete-keystone-service-job/requirements.yaml delete mode 100644 helm/microservice/glance-delete-keystone-service-job/templates/glance-delete-keystone-service.yaml delete mode 100644 helm/microservice/glance-delete-keystone-user-job/Chart.yaml delete mode 100644 helm/microservice/glance-delete-keystone-user-job/requirements.yaml delete mode 100644 helm/microservice/glance-delete-keystone-user-job/templates/glance-delete-keystone-user.yaml delete mode 100644 helm/microservice/glance-manage-db-job/Chart.yaml delete mode 100644 helm/microservice/glance-manage-db-job/requirements.yaml delete mode 100644 helm/microservice/glance-manage-db-job/templates/glance-manage-db.yaml delete mode 100644 helm/microservice/glance-pv/Chart.yaml delete mode 100644 helm/microservice/glance-pv/requirements.yaml delete mode 100644 helm/microservice/glance-pv/templates/glance-pv.yaml delete mode 100644 helm/microservice/glance-pvc/Chart.yaml delete mode 100644 helm/microservice/glance-pvc/requirements.yaml delete mode 100644 helm/microservice/glance-pvc/templates/glance-pvc.yaml delete mode 100644 helm/microservice/glance-registry-deployment/Chart.yaml delete mode 100644 helm/microservice/glance-registry-deployment/requirements.yaml delete mode 100644 helm/microservice/glance-registry-deployment/templates/glance-registry.yaml delete mode 100644 helm/microservice/glance-registry-svc/Chart.yaml delete mode 100644 helm/microservice/glance-registry-svc/requirements.yaml delete mode 100644 helm/microservice/glance-registry-svc/templates/glance-registry-svc.yaml delete mode 100644 helm/microservice/heat-api-cfn-deployment/Chart.yaml delete mode 100644 helm/microservice/heat-api-cfn-deployment/requirements.yaml delete mode 100644 helm/microservice/heat-api-cfn-deployment/templates/heat-api-cfn-deployment.yaml delete mode 100644 helm/microservice/heat-api-deployment/Chart.yaml delete mode 100644 helm/microservice/heat-api-deployment/requirements.yaml delete mode 100644 helm/microservice/heat-api-deployment/templates/heat-api-deployment.yaml delete mode 100755 helm/microservice/heat-api-svc/Chart.yaml delete mode 100644 helm/microservice/heat-api-svc/requirements.yaml delete mode 100755 helm/microservice/heat-api-svc/templates/heat-api-svc.yaml delete mode 100755 helm/microservice/heat-cfn-api-svc/Chart.yaml delete mode 100644 helm/microservice/heat-cfn-api-svc/requirements.yaml delete mode 100755 helm/microservice/heat-cfn-api-svc/templates/heat-cfn-api-svc.yaml delete mode 100755 helm/microservice/heat-cfn-create-keystone-endpoint-admin-job/Chart.yaml delete mode 100644 helm/microservice/heat-cfn-create-keystone-endpoint-admin-job/requirements.yaml delete mode 100755 helm/microservice/heat-cfn-create-keystone-endpoint-admin-job/templates/heat-cfn-create-keystone-endpoint-admin.yaml delete mode 100755 helm/microservice/heat-cfn-create-keystone-endpoint-internal-job/Chart.yaml delete mode 100644 helm/microservice/heat-cfn-create-keystone-endpoint-internal-job/requirements.yaml delete mode 100755 helm/microservice/heat-cfn-create-keystone-endpoint-internal-job/templates/heat-cfn-create-keystone-endpoint-internal.yaml delete mode 100755 helm/microservice/heat-cfn-create-keystone-endpoint-public-job/Chart.yaml delete mode 100644 helm/microservice/heat-cfn-create-keystone-endpoint-public-job/requirements.yaml delete mode 100755 helm/microservice/heat-cfn-create-keystone-endpoint-public-job/templates/heat-cfn-create-keystone-endpoint-public.yaml delete mode 100755 helm/microservice/heat-cfn-create-keystone-service-job/Chart.yaml delete mode 100644 helm/microservice/heat-cfn-create-keystone-service-job/requirements.yaml delete mode 100755 helm/microservice/heat-cfn-create-keystone-service-job/templates/heat-cfn-create-keystone-service.yaml delete mode 100644 helm/microservice/heat-cfn-delete-keystone-service-job/Chart.yaml delete mode 100644 helm/microservice/heat-cfn-delete-keystone-service-job/requirements.yaml delete mode 100644 helm/microservice/heat-cfn-delete-keystone-service-job/templates/heat-delete-keystone-service.yaml delete mode 100755 helm/microservice/heat-create-db-job/Chart.yaml delete mode 100644 helm/microservice/heat-create-db-job/requirements.yaml delete mode 100755 helm/microservice/heat-create-db-job/templates/heat-create-db.yaml delete mode 100755 helm/microservice/heat-create-keystone-endpoint-admin-job/Chart.yaml delete mode 100644 helm/microservice/heat-create-keystone-endpoint-admin-job/requirements.yaml delete mode 100755 helm/microservice/heat-create-keystone-endpoint-admin-job/templates/heat-create-keystone-endpoint-admin.yaml delete mode 100755 helm/microservice/heat-create-keystone-endpoint-internal-job/Chart.yaml delete mode 100644 helm/microservice/heat-create-keystone-endpoint-internal-job/requirements.yaml delete mode 100755 helm/microservice/heat-create-keystone-endpoint-internal-job/templates/heat-create-keystone-endpoint-internal.yaml delete mode 100755 helm/microservice/heat-create-keystone-endpoint-public-job/Chart.yaml delete mode 100644 helm/microservice/heat-create-keystone-endpoint-public-job/requirements.yaml delete mode 100755 helm/microservice/heat-create-keystone-endpoint-public-job/templates/heat-create-keystone-endpoint-public.yaml delete mode 100755 helm/microservice/heat-create-keystone-service-job/Chart.yaml delete mode 100644 helm/microservice/heat-create-keystone-service-job/requirements.yaml delete mode 100755 helm/microservice/heat-create-keystone-service-job/templates/heat-create-keystone-service.yaml delete mode 100755 helm/microservice/heat-create-keystone-user-job/Chart.yaml delete mode 100644 helm/microservice/heat-create-keystone-user-job/requirements.yaml delete mode 100755 helm/microservice/heat-create-keystone-user-job/templates/heat_create_keystone_user.yaml delete mode 100644 helm/microservice/heat-delete-db-job/Chart.yaml delete mode 100644 helm/microservice/heat-delete-db-job/requirements.yaml delete mode 100644 helm/microservice/heat-delete-db-job/templates/heat-delete-db.yaml delete mode 100644 helm/microservice/heat-delete-keystone-service-job/Chart.yaml delete mode 100644 helm/microservice/heat-delete-keystone-service-job/requirements.yaml delete mode 100644 helm/microservice/heat-delete-keystone-service-job/templates/heat-delete-keystone-service.yaml delete mode 100644 helm/microservice/heat-delete-keystone-user-job/Chart.yaml delete mode 100644 helm/microservice/heat-delete-keystone-user-job/requirements.yaml delete mode 100644 helm/microservice/heat-delete-keystone-user-job/templates/heat-delete-keystone-user.yaml delete mode 100644 helm/microservice/heat-engine-statefulset/Chart.yaml delete mode 100644 helm/microservice/heat-engine-statefulset/requirements.yaml delete mode 100644 helm/microservice/heat-engine-statefulset/templates/heat-engine-statefulset.yaml delete mode 100755 helm/microservice/heat-manage-db-job/Chart.yaml delete mode 100644 helm/microservice/heat-manage-db-job/requirements.yaml delete mode 100755 helm/microservice/heat-manage-db-job/templates/heat-manage-db.yaml delete mode 100644 helm/microservice/helm-repo-deployment/Chart.yaml delete mode 100644 helm/microservice/helm-repo-deployment/requirements.yaml delete mode 100644 helm/microservice/helm-repo-deployment/templates/helm-repo.yaml delete mode 100644 helm/microservice/helm-repo-pv/Chart.yaml delete mode 100644 helm/microservice/helm-repo-pv/requirements.yaml delete mode 100644 helm/microservice/helm-repo-pv/templates/helm-repo-pv.yaml delete mode 100644 helm/microservice/helm-repo-pvc/Chart.yaml delete mode 100644 helm/microservice/helm-repo-pvc/requirements.yaml delete mode 100644 helm/microservice/helm-repo-pvc/templates/helm-repo-pvc.yaml delete mode 100644 helm/microservice/helm-repo-svc/Chart.yaml delete mode 100644 helm/microservice/helm-repo-svc/requirements.yaml delete mode 100644 helm/microservice/helm-repo-svc/templates/helm-repo-svc.yaml delete mode 100755 helm/microservice/horizon-deployment/Chart.yaml delete mode 100644 helm/microservice/horizon-deployment/requirements.yaml delete mode 100644 helm/microservice/horizon-deployment/templates/horizon-api.yaml delete mode 100755 helm/microservice/horizon-svc/Chart.yaml delete mode 100644 helm/microservice/horizon-svc/requirements.yaml delete mode 100644 helm/microservice/horizon-svc/templates/horizon-svc.yaml delete mode 100644 helm/microservice/ironic-api-create-db-job/Chart.yaml delete mode 100644 helm/microservice/ironic-api-create-db-job/requirements.yaml delete mode 100644 helm/microservice/ironic-api-create-db-job/templates/ironic-api-create-db.yaml delete mode 100644 helm/microservice/ironic-api-create-keystone-endpoint-admin-job/Chart.yaml delete mode 100644 helm/microservice/ironic-api-create-keystone-endpoint-admin-job/requirements.yaml delete mode 100644 helm/microservice/ironic-api-create-keystone-endpoint-admin-job/templates/ironic-api-create-keystone-endpoint-admin.yaml delete mode 100644 helm/microservice/ironic-api-create-keystone-endpoint-internal-job/Chart.yaml delete mode 100644 helm/microservice/ironic-api-create-keystone-endpoint-internal-job/requirements.yaml delete mode 100644 helm/microservice/ironic-api-create-keystone-endpoint-internal-job/templates/ironic-api-create-keystone-endpoint-internal.yaml delete mode 100644 helm/microservice/ironic-api-create-keystone-endpoint-public-job/Chart.yaml delete mode 100644 helm/microservice/ironic-api-create-keystone-endpoint-public-job/requirements.yaml delete mode 100644 helm/microservice/ironic-api-create-keystone-endpoint-public-job/templates/ironic-api-create-keystone-endpoint-public.yaml delete mode 100644 helm/microservice/ironic-api-delete-db-job/Chart.yaml delete mode 100644 helm/microservice/ironic-api-delete-db-job/requirements.yaml delete mode 100644 helm/microservice/ironic-api-delete-db-job/templates/ironic-api-delete-db.yaml delete mode 100644 helm/microservice/ironic-api-deployment/Chart.yaml delete mode 100644 helm/microservice/ironic-api-deployment/requirements.yaml delete mode 100644 helm/microservice/ironic-api-deployment/templates/ironic-api.yaml delete mode 100644 helm/microservice/ironic-api-manage-db-job/Chart.yaml delete mode 100644 helm/microservice/ironic-api-manage-db-job/requirements.yaml delete mode 100644 helm/microservice/ironic-api-manage-db-job/templates/ironic-api-manage-db.yaml delete mode 100644 helm/microservice/ironic-api-svc/Chart.yaml delete mode 100644 helm/microservice/ironic-api-svc/requirements.yaml delete mode 100644 helm/microservice/ironic-api-svc/templates/ironic-api-svc.yaml delete mode 100644 helm/microservice/ironic-conductor-daemonset/Chart.yaml delete mode 100644 helm/microservice/ironic-conductor-daemonset/requirements.yaml delete mode 100644 helm/microservice/ironic-conductor-daemonset/templates/ironic-conductor.yaml delete mode 100644 helm/microservice/ironic-create-keystone-service-job/Chart.yaml delete mode 100644 helm/microservice/ironic-create-keystone-service-job/requirements.yaml delete mode 100644 helm/microservice/ironic-create-keystone-service-job/templates/ironic-create-keystone-service.yaml delete mode 100644 helm/microservice/ironic-create-keystone-user-job/Chart.yaml delete mode 100644 helm/microservice/ironic-create-keystone-user-job/requirements.yaml delete mode 100644 helm/microservice/ironic-create-keystone-user-job/templates/ironic-create-keystone-user.yaml delete mode 100644 helm/microservice/ironic-delete-keystone-service-job/Chart.yaml delete mode 100644 helm/microservice/ironic-delete-keystone-service-job/requirements.yaml delete mode 100644 helm/microservice/ironic-delete-keystone-service-job/templates/ironic-delete-keystone-service.yaml delete mode 100644 helm/microservice/ironic-delete-keystone-user-job/Chart.yaml delete mode 100644 helm/microservice/ironic-delete-keystone-user-job/requirements.yaml delete mode 100644 helm/microservice/ironic-delete-keystone-user-job/templates/ironic-delete-keystone-user.yaml delete mode 100644 helm/microservice/ironic-inspector-create-db-job/Chart.yaml delete mode 100644 helm/microservice/ironic-inspector-create-db-job/requirements.yaml delete mode 100644 helm/microservice/ironic-inspector-create-db-job/templates/ironic-inspector-create-db.yaml delete mode 100644 helm/microservice/ironic-inspector-create-keystone-endpoint-admin-job/Chart.yaml delete mode 100644 helm/microservice/ironic-inspector-create-keystone-endpoint-admin-job/requirements.yaml delete mode 100644 helm/microservice/ironic-inspector-create-keystone-endpoint-admin-job/templates/ironic-inspector-create-keystone-endpoint-admin.yaml delete mode 100644 helm/microservice/ironic-inspector-create-keystone-endpoint-internal-job/Chart.yaml delete mode 100644 helm/microservice/ironic-inspector-create-keystone-endpoint-internal-job/requirements.yaml delete mode 100644 helm/microservice/ironic-inspector-create-keystone-endpoint-internal-job/templates/ironic-inspector-create-keystone-endpoint-internal.yaml delete mode 100644 helm/microservice/ironic-inspector-create-keystone-endpoint-public-job/Chart.yaml delete mode 100644 helm/microservice/ironic-inspector-create-keystone-endpoint-public-job/requirements.yaml delete mode 100644 helm/microservice/ironic-inspector-create-keystone-endpoint-public-job/templates/ironic-inspector-create-keystone-endpoint-public.yaml delete mode 100644 helm/microservice/ironic-inspector-create-keystone-service-job/Chart.yaml delete mode 100644 helm/microservice/ironic-inspector-create-keystone-service-job/requirements.yaml delete mode 100644 helm/microservice/ironic-inspector-create-keystone-service-job/templates/ironic-inspector-create-keystone-service.yaml delete mode 100644 helm/microservice/ironic-inspector-create-keystone-user-job/Chart.yaml delete mode 100644 helm/microservice/ironic-inspector-create-keystone-user-job/requirements.yaml delete mode 100644 helm/microservice/ironic-inspector-create-keystone-user-job/templates/ironic-inspector-create-keystone-user.yaml delete mode 100644 helm/microservice/ironic-inspector-delete-db-job/Chart.yaml delete mode 100644 helm/microservice/ironic-inspector-delete-db-job/requirements.yaml delete mode 100644 helm/microservice/ironic-inspector-delete-db-job/templates/ironic-inspector-delete-db.yaml delete mode 100644 helm/microservice/ironic-inspector-delete-keystone-service-job/Chart.yaml delete mode 100644 helm/microservice/ironic-inspector-delete-keystone-service-job/requirements.yaml delete mode 100644 helm/microservice/ironic-inspector-delete-keystone-service-job/templates/ironic-inspector-delete-keystone-service.yaml delete mode 100644 helm/microservice/ironic-inspector-delete-keystone-user-job/Chart.yaml delete mode 100644 helm/microservice/ironic-inspector-delete-keystone-user-job/requirements.yaml delete mode 100644 helm/microservice/ironic-inspector-delete-keystone-user-job/templates/ironic-inspector-delete-keystone-user.yaml delete mode 100644 helm/microservice/ironic-inspector-deployment/Chart.yaml delete mode 100644 helm/microservice/ironic-inspector-deployment/requirements.yaml delete mode 100644 helm/microservice/ironic-inspector-deployment/templates/ironic-inspector.yaml delete mode 100644 helm/microservice/ironic-inspector-manage-db-job/Chart.yaml delete mode 100644 helm/microservice/ironic-inspector-manage-db-job/requirements.yaml delete mode 100644 helm/microservice/ironic-inspector-manage-db-job/templates/ironic-inspector-manage-db.yaml delete mode 100644 helm/microservice/ironic-inspector-svc/Chart.yaml delete mode 100644 helm/microservice/ironic-inspector-svc/requirements.yaml delete mode 100644 helm/microservice/ironic-inspector-svc/templates/ironic-inspector-svc.yaml delete mode 100644 helm/microservice/iscsi-target-daemonset/Chart.yaml delete mode 100644 helm/microservice/iscsi-target-daemonset/requirements.yaml delete mode 100644 helm/microservice/iscsi-target-daemonset/templates/iscsi-target-daemonset.yaml delete mode 100644 helm/microservice/iscsid-daemonset/Chart.yaml delete mode 100644 helm/microservice/iscsid-daemonset/requirements.yaml delete mode 100644 helm/microservice/iscsid-daemonset/templates/iscsid-daemonset.yaml delete mode 100644 helm/microservice/keepalived-daemonset/Chart.yaml delete mode 100644 helm/microservice/keepalived-daemonset/requirements.yaml delete mode 100644 helm/microservice/keepalived-daemonset/templates/keepalived-daemonset.yaml delete mode 100644 helm/microservice/keystone-admin-svc/Chart.yaml delete mode 100644 helm/microservice/keystone-admin-svc/requirements.yaml delete mode 100644 helm/microservice/keystone-admin-svc/templates/keystone-admin-svc.yaml delete mode 100644 helm/microservice/keystone-api-deployment/Chart.yaml delete mode 100644 helm/microservice/keystone-api-deployment/requirements.yaml delete mode 100644 helm/microservice/keystone-api-deployment/templates/keystone-api.yaml delete mode 100644 helm/microservice/keystone-create-db-job/Chart.yaml delete mode 100644 helm/microservice/keystone-create-db-job/requirements.yaml delete mode 100644 helm/microservice/keystone-create-db-job/templates/keystone-create-db.yaml delete mode 100644 helm/microservice/keystone-create-endpoints-job/Chart.yaml delete mode 100644 helm/microservice/keystone-create-endpoints-job/requirements.yaml delete mode 100644 helm/microservice/keystone-create-endpoints-job/templates/keystone-create-endpoints.yaml delete mode 100644 helm/microservice/keystone-delete-db-job/Chart.yaml delete mode 100644 helm/microservice/keystone-delete-db-job/requirements.yaml delete mode 100644 helm/microservice/keystone-delete-db-job/templates/keystone-delete-db.yaml delete mode 100644 helm/microservice/keystone-fernet-rotate-job/Chart.yaml delete mode 100644 helm/microservice/keystone-fernet-rotate-job/requirements.yaml delete mode 100644 helm/microservice/keystone-fernet-rotate-job/templates/keystone-fernet-rotate-job.yaml delete mode 100644 helm/microservice/keystone-fernet-setup-job/Chart.yaml delete mode 100644 helm/microservice/keystone-fernet-setup-job/requirements.yaml delete mode 100644 helm/microservice/keystone-fernet-setup-job/templates/keystone-fernet-setup-job.yaml delete mode 100644 helm/microservice/keystone-internal-svc/Chart.yaml delete mode 100644 helm/microservice/keystone-internal-svc/requirements.yaml delete mode 100644 helm/microservice/keystone-internal-svc/templates/keystone-internal-svc.yaml delete mode 100644 helm/microservice/keystone-manage-db-job/Chart.yaml delete mode 100644 helm/microservice/keystone-manage-db-job/requirements.yaml delete mode 100644 helm/microservice/keystone-manage-db-job/templates/keystone-manage-db.yaml delete mode 100644 helm/microservice/keystone-public-svc/Chart.yaml delete mode 100644 helm/microservice/keystone-public-svc/requirements.yaml delete mode 100644 helm/microservice/keystone-public-svc/templates/keystone-public-svc.yaml delete mode 100755 helm/microservice/mariadb-init-element-job/Chart.yaml delete mode 100644 helm/microservice/mariadb-init-element-job/requirements.yaml delete mode 100644 helm/microservice/mariadb-init-element-job/templates/mariadb-init-element.yaml delete mode 100755 helm/microservice/mariadb-pv/Chart.yaml delete mode 100644 helm/microservice/mariadb-pv/requirements.yaml delete mode 100644 helm/microservice/mariadb-pv/templates/mariadb-pv.yaml delete mode 100755 helm/microservice/mariadb-pvc/Chart.yaml delete mode 100644 helm/microservice/mariadb-pvc/requirements.yaml delete mode 100644 helm/microservice/mariadb-pvc/templates/mariadb-pvc.yaml delete mode 100755 helm/microservice/mariadb-statefulset/Chart.yaml delete mode 100644 helm/microservice/mariadb-statefulset/requirements.yaml delete mode 100644 helm/microservice/mariadb-statefulset/templates/mariadb-pod.yaml delete mode 100755 helm/microservice/mariadb-svc/Chart.yaml delete mode 100644 helm/microservice/mariadb-svc/requirements.yaml delete mode 100644 helm/microservice/mariadb-svc/templates/mariadb-svc.yaml delete mode 100644 helm/microservice/memcached-deployment/Chart.yaml delete mode 100644 helm/microservice/memcached-deployment/requirements.yaml delete mode 100644 helm/microservice/memcached-deployment/templates/memcached-deployment.yaml delete mode 100755 helm/microservice/memcached-svc/Chart.yaml delete mode 100644 helm/microservice/memcached-svc/requirements.yaml delete mode 100644 helm/microservice/memcached-svc/templates/memcached-svc.yaml delete mode 100644 helm/microservice/neutron-create-db-job/Chart.yaml delete mode 100644 helm/microservice/neutron-create-db-job/requirements.yaml delete mode 100644 helm/microservice/neutron-create-db-job/templates/neutron-create-db.yaml delete mode 100644 helm/microservice/neutron-create-keystone-endpoint-admin-job/Chart.yaml delete mode 100644 helm/microservice/neutron-create-keystone-endpoint-admin-job/requirements.yaml delete mode 100644 helm/microservice/neutron-create-keystone-endpoint-admin-job/templates/neutron-create-keystone-endpoint-admin.yaml delete mode 100644 helm/microservice/neutron-create-keystone-endpoint-internal-job/Chart.yaml delete mode 100644 helm/microservice/neutron-create-keystone-endpoint-internal-job/requirements.yaml delete mode 100644 helm/microservice/neutron-create-keystone-endpoint-internal-job/templates/neutron-create-keystone-endpoint-internal.yaml delete mode 100644 helm/microservice/neutron-create-keystone-endpoint-public-job/Chart.yaml delete mode 100644 helm/microservice/neutron-create-keystone-endpoint-public-job/requirements.yaml delete mode 100644 helm/microservice/neutron-create-keystone-endpoint-public-job/templates/neutron-create-keystone-endpoint-public.yaml delete mode 100644 helm/microservice/neutron-create-keystone-service-job/Chart.yaml delete mode 100644 helm/microservice/neutron-create-keystone-service-job/requirements.yaml delete mode 100644 helm/microservice/neutron-create-keystone-service-job/templates/neutron-create-keystone-service.yaml delete mode 100644 helm/microservice/neutron-create-keystone-user-job/Chart.yaml delete mode 100644 helm/microservice/neutron-create-keystone-user-job/requirements.yaml delete mode 100644 helm/microservice/neutron-create-keystone-user-job/templates/neutron-create-keystone-user.yaml delete mode 100644 helm/microservice/neutron-delete-db-job/Chart.yaml delete mode 100644 helm/microservice/neutron-delete-db-job/requirements.yaml delete mode 100644 helm/microservice/neutron-delete-db-job/templates/neutron-delete-db.yaml delete mode 100644 helm/microservice/neutron-delete-keystone-service-job/Chart.yaml delete mode 100644 helm/microservice/neutron-delete-keystone-service-job/requirements.yaml delete mode 100644 helm/microservice/neutron-delete-keystone-service-job/templates/neutron-delete-keystone-service.yaml delete mode 100644 helm/microservice/neutron-delete-keystone-user-job/Chart.yaml delete mode 100644 helm/microservice/neutron-delete-keystone-user-job/requirements.yaml delete mode 100644 helm/microservice/neutron-delete-keystone-user-job/templates/neutron-delete-keystone-user.yaml delete mode 100644 helm/microservice/neutron-dhcp-agent-daemonset/Chart.yaml delete mode 100644 helm/microservice/neutron-dhcp-agent-daemonset/requirements.yaml delete mode 100644 helm/microservice/neutron-dhcp-agent-daemonset/templates/dhcp-agent-daemonset.yaml delete mode 100644 helm/microservice/neutron-l3-agent-daemonset/Chart.yaml delete mode 100644 helm/microservice/neutron-l3-agent-daemonset/requirements.yaml delete mode 100644 helm/microservice/neutron-l3-agent-daemonset/templates/l3-agent-daemonset.yaml delete mode 100644 helm/microservice/neutron-manage-db-job/Chart.yaml delete mode 100644 helm/microservice/neutron-manage-db-job/requirements.yaml delete mode 100644 helm/microservice/neutron-manage-db-job/templates/neutron-manage-db.yaml delete mode 100644 helm/microservice/neutron-metadata-agent-daemonset/Chart.yaml delete mode 100644 helm/microservice/neutron-metadata-agent-daemonset/requirements.yaml delete mode 100644 helm/microservice/neutron-metadata-agent-daemonset/templates/metadata-agent-daemonset.yaml delete mode 100644 helm/microservice/neutron-openvswitch-agent-daemonset/Chart.yaml delete mode 100644 helm/microservice/neutron-openvswitch-agent-daemonset/requirements.yaml delete mode 100644 helm/microservice/neutron-openvswitch-agent-daemonset/templates/openvswitch-agent-daemonset.yaml delete mode 100644 helm/microservice/neutron-server-deployment/Chart.yaml delete mode 100644 helm/microservice/neutron-server-deployment/requirements.yaml delete mode 100644 helm/microservice/neutron-server-deployment/templates/neutron-server.yaml delete mode 100644 helm/microservice/neutron-server-svc/Chart.yaml delete mode 100644 helm/microservice/neutron-server-svc/requirements.yaml delete mode 100644 helm/microservice/neutron-server-svc/templates/neutron-server-svc.yaml delete mode 100644 helm/microservice/nova-api-cell-discover-host-job/Chart.yaml delete mode 100644 helm/microservice/nova-api-cell-discover-host-job/requirements.yaml delete mode 100644 helm/microservice/nova-api-cell-discover-host-job/templates/nova-api-cell-discover-host.yaml delete mode 100644 helm/microservice/nova-api-create-db-job/Chart.yaml delete mode 100644 helm/microservice/nova-api-create-db-job/requirements.yaml delete mode 100644 helm/microservice/nova-api-create-db-job/templates/nova-api-create-db.yaml delete mode 100644 helm/microservice/nova-api-create-simple-cell-job/Chart.yaml delete mode 100644 helm/microservice/nova-api-create-simple-cell-job/requirements.yaml delete mode 100644 helm/microservice/nova-api-create-simple-cell-job/templates/nova-api-create-cell.yaml delete mode 100644 helm/microservice/nova-api-delete-db-job/Chart.yaml delete mode 100644 helm/microservice/nova-api-delete-db-job/requirements.yaml delete mode 100644 helm/microservice/nova-api-delete-db-job/templates/nova-api-delete-db.yaml delete mode 100644 helm/microservice/nova-api-deployment/Chart.yaml delete mode 100644 helm/microservice/nova-api-deployment/requirements.yaml delete mode 100644 helm/microservice/nova-api-deployment/templates/nova-api.yaml delete mode 100644 helm/microservice/nova-api-manage-db-job/Chart.yaml delete mode 100644 helm/microservice/nova-api-manage-db-job/requirements.yaml delete mode 100644 helm/microservice/nova-api-manage-db-job/templates/nova-api-manage-db.yaml delete mode 100644 helm/microservice/nova-api-svc/Chart.yaml delete mode 100644 helm/microservice/nova-api-svc/requirements.yaml delete mode 100644 helm/microservice/nova-api-svc/templates/nova-api-svc.yaml delete mode 100644 helm/microservice/nova-cell0-create-db-job/Chart.yaml delete mode 100644 helm/microservice/nova-cell0-create-db-job/requirements.yaml delete mode 100644 helm/microservice/nova-cell0-create-db-job/templates/nova-cell0-create-db.yaml delete mode 100644 helm/microservice/nova-cell0-delete-db-job/Chart.yaml delete mode 100644 helm/microservice/nova-cell0-delete-db-job/requirements.yaml delete mode 100644 helm/microservice/nova-cell0-delete-db-job/templates/nova-cell0-delete-db.yaml delete mode 100644 helm/microservice/nova-compute-daemonset/Chart.yaml delete mode 100644 helm/microservice/nova-compute-daemonset/requirements.yaml delete mode 100644 helm/microservice/nova-compute-daemonset/templates/nova-compute.yaml delete mode 100644 helm/microservice/nova-compute-ironic-statefulset/Chart.yaml delete mode 100644 helm/microservice/nova-compute-ironic-statefulset/requirements.yaml delete mode 100644 helm/microservice/nova-compute-ironic-statefulset/templates/nova-compute-ironic.yaml delete mode 100644 helm/microservice/nova-conductor-statefulset/Chart.yaml delete mode 100644 helm/microservice/nova-conductor-statefulset/requirements.yaml delete mode 100644 helm/microservice/nova-conductor-statefulset/templates/nova-conductor.yaml delete mode 100644 helm/microservice/nova-consoleauth-statefulset/Chart.yaml delete mode 100644 helm/microservice/nova-consoleauth-statefulset/requirements.yaml delete mode 100644 helm/microservice/nova-consoleauth-statefulset/templates/nova-consoleauth.yaml delete mode 100644 helm/microservice/nova-create-db-job/Chart.yaml delete mode 100644 helm/microservice/nova-create-db-job/requirements.yaml delete mode 100644 helm/microservice/nova-create-db-job/templates/nova-create-db.yaml delete mode 100644 helm/microservice/nova-create-keystone-endpoint-admin-job/Chart.yaml delete mode 100644 helm/microservice/nova-create-keystone-endpoint-admin-job/requirements.yaml delete mode 100644 helm/microservice/nova-create-keystone-endpoint-admin-job/templates/nova-create-keystone-endpoint-admin.yaml delete mode 100644 helm/microservice/nova-create-keystone-endpoint-internal-job/Chart.yaml delete mode 100644 helm/microservice/nova-create-keystone-endpoint-internal-job/requirements.yaml delete mode 100644 helm/microservice/nova-create-keystone-endpoint-internal-job/templates/nova-create-keystone-endpoint-internal.yaml delete mode 100644 helm/microservice/nova-create-keystone-endpoint-public-job/Chart.yaml delete mode 100644 helm/microservice/nova-create-keystone-endpoint-public-job/requirements.yaml delete mode 100644 helm/microservice/nova-create-keystone-endpoint-public-job/templates/nova-create-keystone-endpoint-public.yaml delete mode 100644 helm/microservice/nova-create-keystone-service-job/Chart.yaml delete mode 100644 helm/microservice/nova-create-keystone-service-job/requirements.yaml delete mode 100644 helm/microservice/nova-create-keystone-service-job/templates/nova-create-keystone-service.yaml delete mode 100644 helm/microservice/nova-create-keystone-user-job/Chart.yaml delete mode 100644 helm/microservice/nova-create-keystone-user-job/requirements.yaml delete mode 100644 helm/microservice/nova-create-keystone-user-job/templates/nova-create-keystone-user.yaml delete mode 100644 helm/microservice/nova-delete-db-job/Chart.yaml delete mode 100644 helm/microservice/nova-delete-db-job/requirements.yaml delete mode 100644 helm/microservice/nova-delete-db-job/templates/nova-delete-db.yaml delete mode 100644 helm/microservice/nova-delete-keystone-service-job/Chart.yaml delete mode 100644 helm/microservice/nova-delete-keystone-service-job/requirements.yaml delete mode 100644 helm/microservice/nova-delete-keystone-service-job/templates/nova-delete-keystone-service.yaml delete mode 100644 helm/microservice/nova-delete-keystone-user-job/Chart.yaml delete mode 100644 helm/microservice/nova-delete-keystone-user-job/requirements.yaml delete mode 100644 helm/microservice/nova-delete-keystone-user-job/templates/nova-delete-keystone-user.yaml delete mode 100644 helm/microservice/nova-libvirt-daemonset/Chart.yaml delete mode 100644 helm/microservice/nova-libvirt-daemonset/requirements.yaml delete mode 100644 helm/microservice/nova-libvirt-daemonset/templates/nova-libvirt.yaml delete mode 100644 helm/microservice/nova-metadata-svc/Chart.yaml delete mode 100644 helm/microservice/nova-metadata-svc/requirements.yaml delete mode 100644 helm/microservice/nova-metadata-svc/templates/nova-metadata-svc.yaml delete mode 100644 helm/microservice/nova-novncproxy-deployment/Chart.yaml delete mode 100644 helm/microservice/nova-novncproxy-deployment/requirements.yaml delete mode 100644 helm/microservice/nova-novncproxy-deployment/templates/nova-novncproxy.yaml delete mode 100644 helm/microservice/nova-novncproxy-svc/Chart.yaml delete mode 100644 helm/microservice/nova-novncproxy-svc/requirements.yaml delete mode 100644 helm/microservice/nova-novncproxy-svc/templates/nova-novncproxy-svc.yaml delete mode 100644 helm/microservice/nova-placement-create-keystone-endpoint-admin-job/Chart.yaml delete mode 100644 helm/microservice/nova-placement-create-keystone-endpoint-admin-job/requirements.yaml delete mode 100644 helm/microservice/nova-placement-create-keystone-endpoint-admin-job/templates/nova-placement-create-keystone-endpoint-admin.yaml delete mode 100644 helm/microservice/nova-placement-create-keystone-endpoint-internal-job/Chart.yaml delete mode 100644 helm/microservice/nova-placement-create-keystone-endpoint-internal-job/requirements.yaml delete mode 100644 helm/microservice/nova-placement-create-keystone-endpoint-internal-job/templates/nova-placement-create-keystone-endpoint-internal.yaml delete mode 100644 helm/microservice/nova-placement-create-keystone-endpoint-public-job/Chart.yaml delete mode 100644 helm/microservice/nova-placement-create-keystone-endpoint-public-job/requirements.yaml delete mode 100644 helm/microservice/nova-placement-create-keystone-endpoint-public-job/templates/nova-placement-create-keystone-endpoint-public.yaml delete mode 100644 helm/microservice/nova-placement-create-keystone-service-job/Chart.yaml delete mode 100644 helm/microservice/nova-placement-create-keystone-service-job/requirements.yaml delete mode 100644 helm/microservice/nova-placement-create-keystone-service-job/templates/nova-placement-create-keystone-service.yaml delete mode 100644 helm/microservice/nova-placement-create-keystone-user-job/Chart.yaml delete mode 100644 helm/microservice/nova-placement-create-keystone-user-job/requirements.yaml delete mode 100644 helm/microservice/nova-placement-create-keystone-user-job/templates/nova-placement-create-keystone-user.yaml delete mode 100644 helm/microservice/nova-placement-delete-keystone-service-job/Chart.yaml delete mode 100644 helm/microservice/nova-placement-delete-keystone-service-job/requirements.yaml delete mode 100644 helm/microservice/nova-placement-delete-keystone-service-job/templates/nova-placement-delete-keystone-service.yaml delete mode 100644 helm/microservice/nova-placement-delete-keystone-user-job/Chart.yaml delete mode 100644 helm/microservice/nova-placement-delete-keystone-user-job/requirements.yaml delete mode 100644 helm/microservice/nova-placement-delete-keystone-user-job/templates/nova-placement-delete-keystone-user.yaml delete mode 100644 helm/microservice/nova-placement-deployment/Chart.yaml delete mode 100644 helm/microservice/nova-placement-deployment/requirements.yaml delete mode 100644 helm/microservice/nova-placement-deployment/templates/nova-placement.yaml delete mode 100644 helm/microservice/nova-placement-svc/Chart.yaml delete mode 100644 helm/microservice/nova-placement-svc/requirements.yaml delete mode 100644 helm/microservice/nova-placement-svc/templates/nova-placement-svc.yaml delete mode 100644 helm/microservice/nova-scheduler-statefulset/Chart.yaml delete mode 100644 helm/microservice/nova-scheduler-statefulset/requirements.yaml delete mode 100644 helm/microservice/nova-scheduler-statefulset/templates/nova-scheduler.yaml delete mode 100644 helm/microservice/openvswitch-ovsdb-daemonset/Chart.yaml delete mode 100644 helm/microservice/openvswitch-ovsdb-daemonset/requirements.yaml delete mode 100644 helm/microservice/openvswitch-ovsdb-daemonset/templates/openvswitch-ovsdb-daemonset.yaml delete mode 100644 helm/microservice/openvswitch-vswitchd-daemonset/Chart.yaml delete mode 100644 helm/microservice/openvswitch-vswitchd-daemonset/requirements.yaml delete mode 100644 helm/microservice/openvswitch-vswitchd-daemonset/templates/openvswitch-vswitchd-daemonset.yaml delete mode 100755 helm/microservice/rabbitmq-init-element-job/Chart.yaml delete mode 100644 helm/microservice/rabbitmq-init-element-job/requirements.yaml delete mode 100644 helm/microservice/rabbitmq-init-element-job/templates/rabbitmq-init-element.yaml delete mode 100755 helm/microservice/rabbitmq-pv/Chart.yaml delete mode 100644 helm/microservice/rabbitmq-pv/requirements.yaml delete mode 100644 helm/microservice/rabbitmq-pv/templates/rabbitmq-pv.yaml delete mode 100755 helm/microservice/rabbitmq-pvc/Chart.yaml delete mode 100644 helm/microservice/rabbitmq-pvc/requirements.yaml delete mode 100644 helm/microservice/rabbitmq-pvc/templates/rabbitmq-pvc.yaml delete mode 100755 helm/microservice/rabbitmq-statefulset/Chart.yaml delete mode 100644 helm/microservice/rabbitmq-statefulset/requirements.yaml delete mode 100644 helm/microservice/rabbitmq-statefulset/templates/rabbitmq-pod.yaml delete mode 100755 helm/microservice/rabbitmq-svc/Chart.yaml delete mode 100644 helm/microservice/rabbitmq-svc/requirements.yaml delete mode 100644 helm/microservice/rabbitmq-svc/templates/rabbitmq-mgmt-svc.yaml delete mode 100644 helm/microservice/rabbitmq-svc/templates/rabbitmq-svc.yaml delete mode 100644 helm/microservice/registry-deployment/Chart.yaml delete mode 100644 helm/microservice/registry-deployment/templates/registry.yaml delete mode 100644 helm/microservice/registry-deployment/values.yaml delete mode 100644 helm/microservice/test-ceph-init-mon-job/Chart.yaml delete mode 100644 helm/microservice/test-ceph-init-mon-job/requirements.yaml delete mode 100644 helm/microservice/test-ceph-init-mon-job/templates/test_ceph_init_mon_job.yaml delete mode 100644 helm/microservice/test-ceph-init-osd-job/Chart.yaml delete mode 100644 helm/microservice/test-ceph-init-osd-job/requirements.yaml delete mode 100644 helm/microservice/test-ceph-init-osd-job/templates/test-ceph-init-osd-job.yaml delete mode 100644 helm/microservice/test-ceph-mon-daemonset/Chart.yaml delete mode 100644 helm/microservice/test-ceph-mon-daemonset/requirements.yaml delete mode 100644 helm/microservice/test-ceph-mon-daemonset/templates/test_ceph_mon_daemonset.yaml delete mode 100644 helm/microservice/test-ceph-osd-pod/Chart.yaml delete mode 100644 helm/microservice/test-ceph-osd-pod/requirements.yaml delete mode 100644 helm/microservice/test-ceph-osd-pod/templates/test-ceph-osd-pod.yaml delete mode 100644 helm/microservice/tgtd-daemonset/Chart.yaml delete mode 100644 helm/microservice/tgtd-daemonset/requirements.yaml delete mode 100644 helm/microservice/tgtd-daemonset/templates/tgtd-daemonset.yaml delete mode 100644 helm/service/cinder-cleanup/Chart.yaml delete mode 100644 helm/service/cinder-cleanup/requirements.yaml delete mode 100644 helm/service/cinder-cleanup/values.yaml delete mode 100644 helm/service/cinder-control/Chart.yaml delete mode 100644 helm/service/cinder-control/requirements.yaml delete mode 100644 helm/service/cinder-control/values.yaml delete mode 100644 helm/service/cinder-volume-lvm/Chart.yaml delete mode 100644 helm/service/cinder-volume-lvm/requirements.yaml delete mode 100644 helm/service/cinder-volume-lvm/values.yaml delete mode 100644 helm/service/glance-cleanup/Chart.yaml delete mode 100644 helm/service/glance-cleanup/requirements.yaml delete mode 100644 helm/service/glance-cleanup/values.yaml delete mode 100644 helm/service/glance/Chart.yaml delete mode 100644 helm/service/glance/requirements.yaml delete mode 100644 helm/service/glance/values.yaml delete mode 100644 helm/service/horizon/Chart.yaml delete mode 100644 helm/service/horizon/requirements.yaml delete mode 100644 helm/service/horizon/values.yaml delete mode 100644 helm/service/ironic/Chart.yaml delete mode 100644 helm/service/ironic/requirements.yaml delete mode 100644 helm/service/ironic/values.yaml delete mode 100644 helm/service/keystone-cleanup/Chart.yaml delete mode 100644 helm/service/keystone-cleanup/requirements.yaml delete mode 100644 helm/service/keystone/Chart.yaml delete mode 100644 helm/service/keystone/requirements.yaml delete mode 100644 helm/service/keystone/values.yaml delete mode 100755 helm/service/mariadb/Chart.yaml delete mode 100644 helm/service/mariadb/requirements.yaml delete mode 100644 helm/service/mariadb/values.yaml delete mode 100644 helm/service/memcached/Chart.yaml delete mode 100644 helm/service/memcached/requirements.yaml delete mode 100644 helm/service/memcached/values.yaml delete mode 100644 helm/service/neutron-cleanup/Chart.yaml delete mode 100644 helm/service/neutron-cleanup/requirements.yaml delete mode 100644 helm/service/neutron-cleanup/values.yaml delete mode 100644 helm/service/neutron/Chart.yaml delete mode 100644 helm/service/neutron/requirements.yaml delete mode 100644 helm/service/neutron/values.yaml delete mode 100644 helm/service/nova-cleanup/Chart.yaml delete mode 100644 helm/service/nova-cleanup/requirements.yaml delete mode 100644 helm/service/nova-cleanup/values.yaml delete mode 100644 helm/service/nova-compute-ironic/Chart.yaml delete mode 100644 helm/service/nova-compute-ironic/requirements.yaml delete mode 100644 helm/service/nova-compute-ironic/values.yaml delete mode 100644 helm/service/nova-compute/Chart.yaml delete mode 100644 helm/service/nova-compute/requirements.yaml delete mode 100644 helm/service/nova-compute/values.yaml delete mode 100644 helm/service/nova-control/Chart.yaml delete mode 100644 helm/service/nova-control/requirements.yaml delete mode 100644 helm/service/nova-control/values.yaml delete mode 100755 helm/service/openvswitch/Chart.yaml delete mode 100644 helm/service/openvswitch/requirements.yaml delete mode 100644 helm/service/openvswitch/values.yaml delete mode 100755 helm/service/rabbitmq/Chart.yaml delete mode 100644 helm/service/rabbitmq/requirements.yaml delete mode 100644 helm/service/rabbitmq/values.yaml delete mode 100644 helm/test/devenv/templates/ceph-conf.yaml delete mode 100644 helm/test/devenv/templates/ceph-mon.yaml delete mode 100644 helm/test/devenv/templates/ceph-osd.yaml delete mode 100644 helm/test/devenv/templates/cinder-api-haproxy.yaml delete mode 100644 helm/test/devenv/templates/cinder-api.yaml delete mode 100644 helm/test/devenv/templates/cinder-backup.yaml delete mode 100644 helm/test/devenv/templates/cinder-scheduler.yaml delete mode 100644 helm/test/devenv/templates/cinder-volume.yaml delete mode 100644 helm/test/devenv/templates/glance-api-haproxy.yaml delete mode 100644 helm/test/devenv/templates/glance-api.yaml delete mode 100644 helm/test/devenv/templates/glance-registry-haproxy.yaml delete mode 100644 helm/test/devenv/templates/glance-registry.yaml delete mode 100644 helm/test/devenv/templates/horizon.yaml delete mode 100644 helm/test/devenv/templates/keepalived.yaml delete mode 100644 helm/test/devenv/templates/keystone.yaml delete mode 100644 helm/test/devenv/templates/mariadb.yaml delete mode 100644 helm/test/devenv/templates/memcached.yaml delete mode 100644 helm/test/devenv/templates/neutron-dhcp-agent.yaml delete mode 100644 helm/test/devenv/templates/neutron-l3-agent.yaml delete mode 100644 helm/test/devenv/templates/neutron-metadata-agent.yaml delete mode 100644 helm/test/devenv/templates/neutron-openvswitch-agent.yaml delete mode 100644 helm/test/devenv/templates/neutron-server-haproxy.yaml delete mode 100644 helm/test/devenv/templates/neutron-server.yaml delete mode 100644 helm/test/devenv/templates/nova-api-haproxy.yaml delete mode 100644 helm/test/devenv/templates/nova-api.yaml delete mode 100644 helm/test/devenv/templates/nova-compute.yaml delete mode 100644 helm/test/devenv/templates/nova-conductor.yaml delete mode 100644 helm/test/devenv/templates/nova-consoleauth.yaml delete mode 100644 helm/test/devenv/templates/nova-libvirt.yaml delete mode 100644 helm/test/devenv/templates/nova-novncproxy-haproxy.yaml delete mode 100644 helm/test/devenv/templates/nova-novncproxy.yaml delete mode 100644 helm/test/devenv/templates/nova-scheduler.yaml delete mode 100644 helm/test/devenv/templates/openvswitch-db-server.yaml delete mode 100644 helm/test/devenv/templates/openvswitch-vswitchd.yaml delete mode 100644 helm/test/devenv/templates/rabbitmq.yaml delete mode 100644 helm/test/devenv/templates/resolv-conf.yaml delete mode 100644 helm/test/selenium/Chart.yaml delete mode 100644 helm/test/selenium/templates/selenium-hub-deployment.yaml delete mode 100644 helm/test/selenium/templates/selenium-hub-svc.yaml delete mode 100644 helm/test/selenium/templates/selenium-node-firefox.yaml delete mode 100644 kolla_kubernetes/__init__.py delete mode 100755 kolla_kubernetes/app.py delete mode 100644 kolla_kubernetes/commands/__init__.py delete mode 100644 kolla_kubernetes/commands/base_command.py delete mode 100644 kolla_kubernetes/commands/cmd_resource.py delete mode 100644 kolla_kubernetes/commands/cmd_service.py delete mode 100755 kolla_kubernetes/commands/genpwd.py delete mode 100644 kolla_kubernetes/exception.py delete mode 100644 kolla_kubernetes/kube_service_status.py delete mode 100644 kolla_kubernetes/pathfinder.py delete mode 100644 kolla_kubernetes/service_resources.py delete mode 100644 kolla_kubernetes/tests/__init__.py delete mode 100644 kolla_kubernetes/tests/base.py delete mode 100644 kolla_kubernetes/tests/test_helm_templates.py delete mode 100644 kolla_kubernetes/tests/test_pathfinder.py delete mode 100644 kolla_kubernetes/tests/test_templates.py delete mode 100644 kolla_kubernetes/tests/test_utils.py delete mode 100644 kolla_kubernetes/utils.py delete mode 100644 kolla_kubernetes/version.py delete mode 100644 orchestration/README.md delete mode 100644 orchestration/deploy.yml delete mode 100644 orchestration/roles/kolla-controller/tasks/kolla-config.yml delete mode 100644 orchestration/roles/kolla-controller/tasks/kolla-deploy.yml delete mode 100644 orchestration/roles/kolla-controller/tasks/main.yml delete mode 100644 orchestration/roles/kolla-controller/templates/cloud.yaml delete mode 100644 orchestration/roles/kolla-controller/templates/globals_config.j2 delete mode 100644 releasenotes/notes/.placeholder delete mode 100644 releasenotes/notes/cinder_control_service-7eff1740903ad8ba.yaml delete mode 100644 releasenotes/notes/compute_kit_iscsi-897b109ecdd2648d.yaml delete mode 100644 releasenotes/notes/consolidated-notes-0.4.0-18fcedafcfeb1647.yaml delete mode 100644 releasenotes/notes/destroy-workflow-0efbaa5a78822925.yaml delete mode 100644 releasenotes/notes/enable_placement_api-2690cfdc6e3b612b.yaml delete mode 100644 releasenotes/notes/fernet-token-support-abc0c9b496bd65e1.yaml delete mode 100644 releasenotes/notes/horizon-service-package-7801a17f287ba5f9.yaml delete mode 100644 releasenotes/notes/image-pull-4fc22fd41caf5904.yaml delete mode 100644 releasenotes/notes/k8s-devenv-36777f058cf2229c.yaml delete mode 100644 releasenotes/notes/keepalived-81c457d84c5910c5.yaml delete mode 100644 releasenotes/notes/keystone_service-1e9717d09e63de03.yaml delete mode 100644 releasenotes/notes/mariadb-service-deployment-ba8063510b78ef49.yaml delete mode 100644 releasenotes/notes/memcached-service-package-cdacd9315cfb3d2e.yaml delete mode 100644 releasenotes/notes/move-confs-to-kolla-k8s-e735bd379b17a494.yaml delete mode 100644 releasenotes/notes/neutron-service-package-9c170d2caaabcf24.yaml delete mode 100644 releasenotes/notes/nova-compute-service-package-2ffe16ecc27d9501.yaml delete mode 100644 releasenotes/notes/nova-control-service-package-58abbed9faf1997e.yaml delete mode 100644 releasenotes/notes/prometheus-3937e3b8a8d85019.yaml delete mode 100644 releasenotes/notes/rabbitmq_service_package-0ffba95048d24028.yaml delete mode 100644 releasenotes/notes/selenium-d71bf318b83556a4.yaml delete mode 100644 releasenotes/source/_static/.placeholder delete mode 100644 releasenotes/source/_templates/.placeholder delete mode 100644 releasenotes/source/conf.py delete mode 100644 releasenotes/source/index.rst delete mode 100644 releasenotes/source/unreleased.rst delete mode 100644 requirements.txt delete mode 100644 services/WARNING.rst delete mode 100644 services/ceph/ceph-admin-pod.yml.j2 delete mode 100644 services/ceph/ceph-bootstrap-initial-mon.yml.j2 delete mode 100644 services/ceph/ceph-bootstrap-osd.yml.j2 delete mode 100644 services/ceph/ceph-mon-pod.yml.j2 delete mode 100644 services/ceph/ceph-osd-pod.yml.j2 delete mode 100644 services/ceph/ceph-rbd-pod.yml.j2 delete mode 100644 services/ceph/ceph-secret.yml.j2 delete mode 100644 services/common/api-haproxy-configmap.yml.j2 delete mode 100644 services/common/common-create-keystone-endpoint.yml.j2 delete mode 100644 services/common/common-create-keystone-user.yml.j2 delete mode 100755 services/common/common-deployment.yml.j2 delete mode 100644 services/common/common-disk.sh.j2 delete mode 100644 services/common/common-lib.yml.j2 delete mode 100644 services/common/common-pv.yml.j2 delete mode 100644 services/common/common-pvc.yml.j2 delete mode 100644 services/common/generic-service.yml.j2 delete mode 100644 services/common/logging-configmap.yml.j2 delete mode 100644 services/elasticsearch/elasticsearch-pod.yml.j2 delete mode 100644 services/keepalived/keepalived-configmap.yml.j2 delete mode 100644 services/keepalived/keepalived-daemonset.yml.j2 delete mode 100644 services/neutron/neutron-bootstrap-job-create-db.yml.j2 delete mode 100755 services/nova/nova-compute-pod.yml.j2 delete mode 100644 services/nova/nova-control-bootstrap-job-create-nova-api-db.yml.j2 delete mode 100644 services/nova/nova-control-bootstrap-job-create-nova-db.yml.j2 delete mode 100755 services/nova/nova-control-conductor-pod.yml.j2 delete mode 100755 services/nova/nova-control-consoleauth-pod.yml.j2 delete mode 100644 services/nova/nova-control-scheduler-pod.yml.j2 delete mode 100644 services/nova/nova-libvirt-pod.yml.j2 delete mode 100644 services/nova/nova-libvirt-secret.yml.j2 delete mode 100644 services/openvswitch/openvswitch-ovsdb-daemonset.yml.j2 delete mode 100644 services/openvswitch/openvswitch-set-external-ip-job.yml.j2 delete mode 100644 services/openvswitch/openvswitch-vswitchd-daemonset.yml.j2 delete mode 100644 services/swift/swift-account-pod.yml.j2 delete mode 100644 services/swift/swift-account-service.yml.j2 delete mode 100644 services/swift/swift-container-pod.yml.j2 delete mode 100644 services/swift/swift-container-service.yml.j2 delete mode 100644 services/swift/swift-object-pod.yml.j2 delete mode 100644 services/swift/swift-object-service.yml.j2 delete mode 100644 services/swift/swift-proxy-pod.yml.j2 delete mode 100644 services/swift/swift-proxy-service.yml.j2 delete mode 100644 services/swift/swift-rsync-service.yml.j2 delete mode 100644 setup.cfg delete mode 100644 setup.py delete mode 100644 specs/README.rst delete mode 100644 specs/ansible-deployment.rst delete mode 100644 specs/kolla-kubernetes-arch.rst delete mode 100644 test-requirements.txt delete mode 100755 tests/bin/basic_tests.sh delete mode 100755 tests/bin/build_docker_images.sh delete mode 100755 tests/bin/build_test_ceph.sh delete mode 100755 tests/bin/ceph_workflow.sh delete mode 100755 tests/bin/ceph_workflow_service.sh delete mode 100755 tests/bin/cleanup_tests.sh delete mode 100755 tests/bin/common_ceph_config.sh delete mode 100755 tests/bin/common_iscsi_config.sh delete mode 100755 tests/bin/common_iscsi_config_v4.sh delete mode 100755 tests/bin/common_iscsi_config_v5.sh delete mode 100755 tests/bin/common_workflow_config.sh delete mode 100755 tests/bin/deploy_compute_kit.sh delete mode 100755 tests/bin/deploy_ironic.sh delete mode 100755 tests/bin/deploy_iscsi_common.sh delete mode 100755 tests/bin/destroy_tests.sh delete mode 100755 tests/bin/endpoint_test.sh delete mode 100755 tests/bin/fix_gate_iptables.sh delete mode 100755 tests/bin/gate_capture_logs.sh delete mode 100755 tests/bin/horizon_test.py delete mode 100755 tests/bin/horizon_test.sh delete mode 100755 tests/bin/ironic_deploy_tests.sh delete mode 100755 tests/bin/iscsi_generic_workflow.sh delete mode 100755 tests/bin/iscsi_ironic_workflow.sh delete mode 100755 tests/bin/prometheus_tests.sh delete mode 100755 tests/bin/setup_canal.sh delete mode 100755 tests/bin/setup_config.sh delete mode 100755 tests/bin/setup_config_iscsi.sh delete mode 100755 tests/bin/setup_gate_loopback.sh delete mode 100755 tests/bin/setup_gate_loopback_lvm.sh delete mode 100644 tests/conf/ceph-all-in-one/kolla_config delete mode 100644 tests/conf/ceph-all-in-one/kolla_kubernetes_config delete mode 100644 tests/conf/ironic/vm-1.xml delete mode 100644 tests/conf/iscsi-all-in-one/kolla_config delete mode 100644 tests/conf/iscsi-all-in-one/kolla_kubernetes_config delete mode 100644 tests/conf/iscsid-tgtd-configmap.yaml delete mode 100644 tests/playbooks/legacy/kolla-kubernetes-deploy-centos-binary-2-ceph-multi/post.yaml delete mode 100644 tests/playbooks/legacy/kolla-kubernetes-deploy-centos-binary-2-ceph-multi/run.yaml delete mode 100644 tests/playbooks/legacy/kolla-kubernetes-deploy-centos-binary-2-ceph-reboot/post.yaml delete mode 100644 tests/playbooks/legacy/kolla-kubernetes-deploy-centos-binary-2-ceph-reboot/run.yaml delete mode 100644 tests/playbooks/legacy/kolla-kubernetes-deploy-centos-binary-2-ceph/post.yaml delete mode 100644 tests/playbooks/legacy/kolla-kubernetes-deploy-centos-binary-2-ceph/run.yaml delete mode 100644 tests/playbooks/legacy/kolla-kubernetes-deploy-centos-binary-2-external-ovs/post.yaml delete mode 100644 tests/playbooks/legacy/kolla-kubernetes-deploy-centos-binary-2-external-ovs/run.yaml delete mode 100644 tests/playbooks/legacy/kolla-kubernetes-deploy-centos-binary-2-helm-compute-kit/post.yaml delete mode 100644 tests/playbooks/legacy/kolla-kubernetes-deploy-centos-binary-2-helm-compute-kit/run.yaml delete mode 100644 tests/playbooks/legacy/kolla-kubernetes-deploy-centos-binary-2-helm-entrypoint/post.yaml delete mode 100644 tests/playbooks/legacy/kolla-kubernetes-deploy-centos-binary-2-helm-entrypoint/run.yaml delete mode 100644 tests/playbooks/legacy/kolla-kubernetes-deploy-centos-binary-2-helm-operator/post.yaml delete mode 100644 tests/playbooks/legacy/kolla-kubernetes-deploy-centos-binary-2-helm-operator/run.yaml delete mode 100644 tests/playbooks/legacy/kolla-kubernetes-deploy-centos-binary-2-iscsi/post.yaml delete mode 100644 tests/playbooks/legacy/kolla-kubernetes-deploy-centos-binary-2-iscsi/run.yaml delete mode 100644 tests/playbooks/legacy/kolla-kubernetes-deploy-centos-binary-3-ceph-multi/post.yaml delete mode 100644 tests/playbooks/legacy/kolla-kubernetes-deploy-centos-binary-3-ceph-multi/run.yaml delete mode 100644 tests/playbooks/legacy/kolla-kubernetes-deploy-centos-binary-4-ceph-multi/post.yaml delete mode 100644 tests/playbooks/legacy/kolla-kubernetes-deploy-centos-binary-4-ceph-multi/run.yaml delete mode 100644 tests/playbooks/legacy/kolla-kubernetes-deploy-centos-binary-4-helm-compute-kit/post.yaml delete mode 100644 tests/playbooks/legacy/kolla-kubernetes-deploy-centos-binary-4-helm-compute-kit/run.yaml delete mode 100644 tests/playbooks/legacy/kolla-kubernetes-deploy-centos-binary-4-helm-entrypoint/post.yaml delete mode 100644 tests/playbooks/legacy/kolla-kubernetes-deploy-centos-binary-4-helm-entrypoint/run.yaml delete mode 100644 tests/playbooks/legacy/kolla-kubernetes-deploy-centos-binary-t-ceph-multi/post.yaml delete mode 100644 tests/playbooks/legacy/kolla-kubernetes-deploy-centos-binary-t-ceph-multi/run.yaml delete mode 100644 tests/playbooks/legacy/kolla-kubernetes-deploy-centos-source-4-helm-compute-kit/post.yaml delete mode 100644 tests/playbooks/legacy/kolla-kubernetes-deploy-centos-source-4-helm-compute-kit/run.yaml delete mode 100644 tests/playbooks/legacy/kolla-kubernetes-deploy-centos-source-4-helm-entrypoint/post.yaml delete mode 100644 tests/playbooks/legacy/kolla-kubernetes-deploy-centos-source-4-helm-entrypoint/run.yaml delete mode 100644 tests/playbooks/legacy/kolla-kubernetes-deploy-centos-source-4-ironic/post.yaml delete mode 100644 tests/playbooks/legacy/kolla-kubernetes-deploy-centos-source-4-ironic/run.yaml delete mode 100644 tests/playbooks/legacy/kolla-kubernetes-deploy-centos-source-4-microchart-ansible/post.yaml delete mode 100644 tests/playbooks/legacy/kolla-kubernetes-deploy-centos-source-4-microchart-ansible/run.yaml delete mode 100644 tests/playbooks/legacy/kolla-kubernetes-deploy-centos-source-t-iscsi/post.yaml delete mode 100644 tests/playbooks/legacy/kolla-kubernetes-deploy-centos-source-t-iscsi/run.yaml delete mode 100644 tests/playbooks/legacy/kolla-kubernetes-deploy-ubuntu-binary-2-ceph/post.yaml delete mode 100644 tests/playbooks/legacy/kolla-kubernetes-deploy-ubuntu-binary-2-ceph/run.yaml delete mode 100644 tests/playbooks/legacy/kolla-kubernetes-deploy-ubuntu-binary-2-iscsi/post.yaml delete mode 100644 tests/playbooks/legacy/kolla-kubernetes-deploy-ubuntu-binary-2-iscsi/run.yaml delete mode 100644 tests/playbooks/legacy/kolla-kubernetes-deploy-ubuntu-source-4-ironic/post.yaml delete mode 100644 tests/playbooks/legacy/kolla-kubernetes-deploy-ubuntu-source-4-ironic/run.yaml delete mode 100644 tests/playbooks/legacy/kolla-kubernetes-deploy-ubuntu-source-4-microchart-ansible/post.yaml delete mode 100644 tests/playbooks/legacy/kolla-kubernetes-deploy-ubuntu-source-4-microchart-ansible/run.yaml delete mode 100644 tests/playbooks/legacy/kolla-kubernetes-deploy-ubuntu-source-t-iscsi/post.yaml delete mode 100644 tests/playbooks/legacy/kolla-kubernetes-deploy-ubuntu-source-t-iscsi/run.yaml delete mode 100644 tests/playbooks/legacy/kolla-kubernetes-upgrade-centos-binary-2-ceph/post.yaml delete mode 100644 tests/playbooks/legacy/kolla-kubernetes-upgrade-centos-binary-2-ceph/run.yaml delete mode 100644 tests/playbooks/legacy/kolla-kubernetes-upgrade-centos-binary-3-ceph/post.yaml delete mode 100644 tests/playbooks/legacy/kolla-kubernetes-upgrade-centos-binary-3-ceph/run.yaml delete mode 100644 tools/Dockerfile delete mode 100755 tools/build_dev_image.sh delete mode 100755 tools/build_example_yaml.py delete mode 100644 tools/build_helm_templates.sh delete mode 100755 tools/build_local_admin_keystonerc.sh delete mode 100755 tools/cleanup-k8s.sh delete mode 100755 tools/fix-mitaka-config.py delete mode 120000 tools/generate_passwords.py delete mode 100755 tools/get_arch.sh delete mode 100755 tools/helm_build_all.sh delete mode 100755 tools/helm_build_compute_kits.py delete mode 100755 tools/helm_build_microservices.py delete mode 100755 tools/helm_build_services.py delete mode 100755 tools/helm_buildrepo.sh delete mode 100755 tools/helm_prebuild_compute_kits.py delete mode 100755 tools/helm_prebuild_microservices.py delete mode 100755 tools/helm_prebuild_services.py delete mode 100644 tools/helm_versions.sh delete mode 120000 tools/kolla-kubernetes delete mode 100755 tools/pull_containers.sh delete mode 100755 tools/run_dev_image.sh delete mode 100755 tools/secret-generator.py delete mode 100755 tools/setup-ceph-secrets.sh delete mode 100755 tools/setup-kubectl.sh delete mode 100755 tools/setup_dev_env.sh delete mode 100755 tools/setup_gate.sh delete mode 100755 tools/setup_gate_ceph.sh delete mode 100755 tools/setup_gate_common.sh delete mode 100755 tools/setup_gate_iscsi.sh delete mode 100755 tools/setup_helm.sh delete mode 100755 tools/setup_kube_AIO.sh delete mode 100755 tools/setup_kubernetes.sh delete mode 100755 tools/setup_rbd_volumes.sh delete mode 100755 tools/setup_registry.sh delete mode 100755 tools/setup_simple_ceph_users.sh delete mode 100644 tools/test-dns.yml delete mode 100755 tools/test.sh delete mode 100755 tools/test_kube_dns.sh delete mode 100755 tools/wait_for_kube_control_plane.sh delete mode 100755 tools/wait_for_pods.py delete mode 100755 tools/wait_for_pods.sh delete mode 100755 tools/wait_for_pods_termination.sh delete mode 100644 tox.ini diff --git a/.coveragerc b/.coveragerc deleted file mode 100644 index 7fffe7cd2..000000000 --- a/.coveragerc +++ /dev/null @@ -1,7 +0,0 @@ -[run] -branch = True -source = kolla_kubernetes -omit = kolla_kubernetes/tests/* - -[report] -ignore_errors = True diff --git a/.gitignore b/.gitignore deleted file mode 100644 index a2f48b039..000000000 --- a/.gitignore +++ /dev/null @@ -1,60 +0,0 @@ -*.py[cod] - -# C extensions -*.so - -# helm / chart -values.yaml -**/charts -helm/**/*.lock - -# Packages -*.egg* -*.egg-info -dist -build -eggs -parts -bin -var -sdist -develop-eggs -.installed.cfg -lib -lib64 - -# Installer logs -pip-log.txt - -# Unit test / coverage reports -cover/ -.coverage* -!.coveragerc -.tox -nosetests.xml -.testrepository -.venv - -# Translations -*.mo - -# Mr Developer -.mr.developer.cfg -.project -.pydevproject - -# Complexity -output/*.html -output/*/index.html - -# Sphinx -doc/build - -# pbr generates these -AUTHORS -ChangeLog - -# Editors -*~ -.*.swp -.*sw? diff --git a/.mailmap b/.mailmap deleted file mode 100644 index 516ae6fe0..000000000 --- a/.mailmap +++ /dev/null @@ -1,3 +0,0 @@ -# Format is: -# -# diff --git a/.testr.conf b/.testr.conf deleted file mode 100644 index 1b1a8e693..000000000 --- a/.testr.conf +++ /dev/null @@ -1,7 +0,0 @@ -[DEFAULT] -test_command=OS_STDOUT_CAPTURE=${OS_STDOUT_CAPTURE:-1} \ - OS_STDERR_CAPTURE=${OS_STDERR_CAPTURE:-1} \ - OS_TEST_TIMEOUT=${OS_TEST_TIMEOUT:-240} \ - ${PYTHON:-python} -m subunit.run discover -t ./ . $LISTOPT $IDOPTION -test_id_option=--load-list $IDFILE -test_list_option=--list diff --git a/.zuul.d/legacy.yaml b/.zuul.d/legacy.yaml deleted file mode 100644 index b056d220a..000000000 --- a/.zuul.d/legacy.yaml +++ /dev/null @@ -1,351 +0,0 @@ -- project: - check: - jobs: - - kolla-kubernetes-deploy-centos-binary-2-ceph-multi: - voting: false - irrelevant-files: - - ^.*\.rst$ - - ^deploy-guide/source/.* - - ^doc/.* - - kolla-kubernetes-deploy-centos-binary-2-ceph: - voting: false - irrelevant-files: - - ^.*\.rst$ - - ^deploy-guide/source/.* - - ^doc/.* - - kolla-kubernetes-deploy-centos-binary-2-external-ovs: - voting: false - irrelevant-files: - - ^.*\.rst$ - - ^deploy-guide/source/.* - - ^doc/.* - - kolla-kubernetes-deploy-centos-binary-2-helm-entrypoint: - voting: false - irrelevant-files: - - ^.*\.rst$ - - ^deploy-guide/source/.* - - ^doc/.* - - kolla-kubernetes-deploy-centos-binary-2-helm-compute-kit: - voting: false - irrelevant-files: - - ^.*\.rst$ - - ^deploy-guide/source/.* - - ^doc/.* - - kolla-kubernetes-deploy-centos-binary-2-iscsi: - voting: false - irrelevant-files: - - ^.*\.rst$ - - ^deploy-guide/source/.* - - ^doc/.* - - kolla-kubernetes-deploy-ubuntu-binary-2-iscsi: - voting: false - irrelevant-files: - - ^.*\.rst$ - - ^deploy-guide/source/.* - - ^doc/.* - - kolla-kubernetes-deploy-ubuntu-binary-2-ceph: - voting: false - irrelevant-files: - - ^.*\.rst$ - - ^deploy-guide/source/.* - - ^doc/.* - - kolla-kubernetes-deploy-centos-binary-3-ceph-multi: - voting: false - irrelevant-files: - - ^.*\.rst$ - - ^deploy-guide/source/.* - - ^doc/.* - - kolla-kubernetes-deploy-centos-source-4-ironic: - voting: false - irrelevant-files: - - ^.*\.rst$ - - ^deploy-guide/source/.* - - ^doc/.* - - kolla-kubernetes-deploy-ubuntu-source-4-microchart-ansible: - voting: false - irrelevant-files: - - ^.*\.rst$ - - ^deploy-guide/source/.* - - ^doc/.* - - kolla-kubernetes-deploy-centos-source-4-microchart-ansible: - voting: false - irrelevant-files: - - ^.*\.rst$ - - ^deploy-guide/source/.* - - ^doc/.* - experimental: - jobs: - - kolla-kubernetes-deploy-centos-binary-2-helm-operator: - irrelevant-files: - - ^.*\.rst$ - - ^deploy-guide/source/.* - - ^doc/.* - - kolla-kubernetes-deploy-centos-binary-2-ceph-reboot: - irrelevant-files: - - ^.*\.rst$ - - ^deploy-guide/source/.* - - ^doc/.* - - kolla-kubernetes-deploy-centos-binary-4-helm-compute-kit: - irrelevant-files: - - ^.*\.rst$ - - ^deploy-guide/source/.* - - ^doc/.* - - kolla-kubernetes-deploy-centos-binary-4-helm-entrypoint: - irrelevant-files: - - ^.*\.rst$ - - ^deploy-guide/source/.* - - ^doc/.* - - kolla-kubernetes-deploy-centos-binary-4-ceph-multi: - irrelevant-files: - - ^.*\.rst$ - - ^deploy-guide/source/.* - - ^doc/.* - - kolla-kubernetes-deploy-centos-source-4-helm-compute-kit: - irrelevant-files: - - ^.*\.rst$ - - ^deploy-guide/source/.* - - ^doc/.* - - kolla-kubernetes-deploy-centos-source-4-helm-entrypoint: - irrelevant-files: - - ^.*\.rst$ - - ^deploy-guide/source/.* - - ^doc/.* - - kolla-kubernetes-deploy-ubuntu-source-4-ironic: - irrelevant-files: - - ^.*\.rst$ - - ^deploy-guide/source/.* - - ^doc/.* - - kolla-kubernetes-upgrade-centos-binary-2-ceph: - irrelevant-files: - - ^.*\.rst$ - - ^deploy-guide/source/.* - - ^doc/.* - - kolla-kubernetes-upgrade-centos-binary-3-ceph: - irrelevant-files: - - ^.*\.rst$ - - ^deploy-guide/source/.* - - ^doc/.* - - kolla-kubernetes-deploy-centos-binary-t-ceph-multi: - irrelevant-files: - - ^.*\.rst$ - - ^deploy-guide/source/.* - - ^doc/.* - - kolla-kubernetes-deploy-centos-source-t-iscsi: - irrelevant-files: - - ^.*\.rst$ - - ^deploy-guide/source/.* - - ^doc/.* - - kolla-kubernetes-deploy-ubuntu-source-t-iscsi: - irrelevant-files: - - ^.*\.rst$ - - ^deploy-guide/source/.* - - ^doc/.* - -- job: - name: kolla-kubernetes-base - description: | - This job runs provides the base required projects for - legacy kolla-kubernetes jobs. - parent: legacy-base - required-projects: - - openstack/requirements - -- job: - name: kolla-kubernetes-deploy-centos-binary-2-ceph - parent: kolla-kubernetes-base - run: tests/playbooks/legacy/kolla-kubernetes-deploy-centos-binary-2-ceph/run.yaml - post-run: tests/playbooks/legacy/kolla-kubernetes-deploy-centos-binary-2-ceph/post.yaml - nodeset: legacy-centos-7 - timeout: 3600 - -- job: - name: kolla-kubernetes-deploy-centos-binary-2-ceph-multi - parent: kolla-kubernetes-base - run: tests/playbooks/legacy/kolla-kubernetes-deploy-centos-binary-2-ceph-multi/run.yaml - post-run: tests/playbooks/legacy/kolla-kubernetes-deploy-centos-binary-2-ceph-multi/post.yaml - nodeset: legacy-centos-7-2-node - timeout: 3600 - -- job: - name: kolla-kubernetes-deploy-centos-binary-2-ceph-reboot - parent: kolla-kubernetes-base - run: tests/playbooks/legacy/kolla-kubernetes-deploy-centos-binary-2-ceph-reboot/run.yaml - post-run: tests/playbooks/legacy/kolla-kubernetes-deploy-centos-binary-2-ceph-reboot/post.yaml - nodeset: legacy-centos-7-2-node - timeout: 3600 - -- job: - name: kolla-kubernetes-deploy-centos-binary-2-external-ovs - parent: kolla-kubernetes-base - run: tests/playbooks/legacy/kolla-kubernetes-deploy-centos-binary-2-external-ovs/run.yaml - post-run: tests/playbooks/legacy/kolla-kubernetes-deploy-centos-binary-2-external-ovs/post.yaml - nodeset: legacy-centos-7 - timeout: 3600 - -- job: - name: kolla-kubernetes-deploy-centos-binary-2-helm-compute-kit - parent: kolla-kubernetes-base - run: tests/playbooks/legacy/kolla-kubernetes-deploy-centos-binary-2-helm-compute-kit/run.yaml - post-run: tests/playbooks/legacy/kolla-kubernetes-deploy-centos-binary-2-helm-compute-kit/post.yaml - nodeset: legacy-centos-7 - timeout: 3600 - -- job: - name: kolla-kubernetes-deploy-centos-binary-2-helm-entrypoint - parent: kolla-kubernetes-base - run: tests/playbooks/legacy/kolla-kubernetes-deploy-centos-binary-2-helm-entrypoint/run.yaml - post-run: tests/playbooks/legacy/kolla-kubernetes-deploy-centos-binary-2-helm-entrypoint/post.yaml - nodeset: legacy-centos-7 - timeout: 3600 - -- job: - name: kolla-kubernetes-deploy-centos-binary-2-helm-operator - parent: kolla-kubernetes-base - run: tests/playbooks/legacy/kolla-kubernetes-deploy-centos-binary-2-helm-operator/run.yaml - post-run: tests/playbooks/legacy/kolla-kubernetes-deploy-centos-binary-2-helm-operator/post.yaml - nodeset: legacy-centos-7 - timeout: 3600 - -- job: - name: kolla-kubernetes-deploy-centos-binary-2-iscsi - parent: kolla-kubernetes-base - run: tests/playbooks/legacy/kolla-kubernetes-deploy-centos-binary-2-iscsi/run.yaml - post-run: tests/playbooks/legacy/kolla-kubernetes-deploy-centos-binary-2-iscsi/post.yaml - nodeset: legacy-centos-7 - timeout: 3600 - -- job: - name: kolla-kubernetes-deploy-centos-binary-3-ceph-multi - parent: kolla-kubernetes-base - run: tests/playbooks/legacy/kolla-kubernetes-deploy-centos-binary-3-ceph-multi/run.yaml - post-run: tests/playbooks/legacy/kolla-kubernetes-deploy-centos-binary-3-ceph-multi/post.yaml - nodeset: legacy-centos-7-2-node - timeout: 3600 - -- job: - name: kolla-kubernetes-deploy-centos-binary-4-ceph-multi - parent: kolla-kubernetes-base - run: tests/playbooks/legacy/kolla-kubernetes-deploy-centos-binary-4-ceph-multi/run.yaml - post-run: tests/playbooks/legacy/kolla-kubernetes-deploy-centos-binary-4-ceph-multi/post.yaml - nodeset: legacy-centos-7-2-node - timeout: 3600 - -- job: - name: kolla-kubernetes-deploy-centos-binary-4-helm-compute-kit - parent: kolla-kubernetes-base - run: tests/playbooks/legacy/kolla-kubernetes-deploy-centos-binary-4-helm-compute-kit/run.yaml - post-run: tests/playbooks/legacy/kolla-kubernetes-deploy-centos-binary-4-helm-compute-kit/post.yaml - nodeset: legacy-centos-7 - timeout: 3600 - -- job: - name: kolla-kubernetes-deploy-centos-binary-4-helm-entrypoint - parent: kolla-kubernetes-base - run: tests/playbooks/legacy/kolla-kubernetes-deploy-centos-binary-4-helm-entrypoint/run.yaml - post-run: tests/playbooks/legacy/kolla-kubernetes-deploy-centos-binary-4-helm-entrypoint/post.yaml - nodeset: legacy-centos-7 - timeout: 3600 - -- job: - name: kolla-kubernetes-deploy-centos-binary-t-ceph-multi - parent: kolla-kubernetes-base - run: tests/playbooks/legacy/kolla-kubernetes-deploy-centos-binary-t-ceph-multi/run.yaml - post-run: tests/playbooks/legacy/kolla-kubernetes-deploy-centos-binary-t-ceph-multi/post.yaml - nodeset: legacy-centos-7-2-node - timeout: 3600 - -- job: - name: kolla-kubernetes-deploy-centos-source-4-helm-compute-kit - parent: kolla-kubernetes-base - run: tests/playbooks/legacy/kolla-kubernetes-deploy-centos-source-4-helm-compute-kit/run.yaml - post-run: tests/playbooks/legacy/kolla-kubernetes-deploy-centos-source-4-helm-compute-kit/post.yaml - nodeset: legacy-centos-7 - timeout: 3600 - -- job: - name: kolla-kubernetes-deploy-centos-source-4-helm-entrypoint - parent: kolla-kubernetes-base - run: tests/playbooks/legacy/kolla-kubernetes-deploy-centos-source-4-helm-entrypoint/run.yaml - post-run: tests/playbooks/legacy/kolla-kubernetes-deploy-centos-source-4-helm-entrypoint/post.yaml - nodeset: legacy-centos-7 - timeout: 3600 - -- job: - name: kolla-kubernetes-deploy-centos-source-4-ironic - parent: kolla-kubernetes-base - run: tests/playbooks/legacy/kolla-kubernetes-deploy-centos-source-4-ironic/run.yaml - post-run: tests/playbooks/legacy/kolla-kubernetes-deploy-centos-source-4-ironic/post.yaml - nodeset: legacy-centos-7 - timeout: 3600 - -- job: - name: kolla-kubernetes-deploy-centos-source-4-microchart-ansible - parent: kolla-kubernetes-base - run: tests/playbooks/legacy/kolla-kubernetes-deploy-centos-source-4-microchart-ansible/run.yaml - post-run: tests/playbooks/legacy/kolla-kubernetes-deploy-centos-source-4-microchart-ansible/post.yaml - nodeset: legacy-centos-7 - timeout: 3600 - -- job: - name: kolla-kubernetes-deploy-centos-source-t-iscsi - parent: kolla-kubernetes-base - run: tests/playbooks/legacy/kolla-kubernetes-deploy-centos-source-t-iscsi/run.yaml - post-run: tests/playbooks/legacy/kolla-kubernetes-deploy-centos-source-t-iscsi/post.yaml - nodeset: legacy-centos-7 - timeout: 3600 - -- job: - name: kolla-kubernetes-deploy-ubuntu-binary-2-ceph - parent: kolla-kubernetes-base - run: tests/playbooks/legacy/kolla-kubernetes-deploy-ubuntu-binary-2-ceph/run.yaml - post-run: tests/playbooks/legacy/kolla-kubernetes-deploy-ubuntu-binary-2-ceph/post.yaml - nodeset: legacy-ubuntu-xenial - timeout: 3600 - -- job: - name: kolla-kubernetes-deploy-ubuntu-binary-2-iscsi - parent: kolla-kubernetes-base - run: tests/playbooks/legacy/kolla-kubernetes-deploy-ubuntu-binary-2-iscsi/run.yaml - post-run: tests/playbooks/legacy/kolla-kubernetes-deploy-ubuntu-binary-2-iscsi/post.yaml - nodeset: legacy-ubuntu-xenial - timeout: 3600 - -- job: - name: kolla-kubernetes-deploy-ubuntu-source-4-ironic - parent: kolla-kubernetes-base - run: tests/playbooks/legacy/kolla-kubernetes-deploy-ubuntu-source-4-ironic/run.yaml - post-run: tests/playbooks/legacy/kolla-kubernetes-deploy-ubuntu-source-4-ironic/post.yaml - nodeset: legacy-ubuntu-xenial - timeout: 3600 - -- job: - name: kolla-kubernetes-deploy-ubuntu-source-4-microchart-ansible - parent: kolla-kubernetes-base - run: tests/playbooks/legacy/kolla-kubernetes-deploy-ubuntu-source-4-microchart-ansible/run.yaml - post-run: tests/playbooks/legacy/kolla-kubernetes-deploy-ubuntu-source-4-microchart-ansible/post.yaml - nodeset: legacy-ubuntu-xenial - timeout: 3600 - -- job: - name: kolla-kubernetes-deploy-ubuntu-source-t-iscsi - parent: kolla-kubernetes-base - run: tests/playbooks/legacy/kolla-kubernetes-deploy-ubuntu-source-t-iscsi/run.yaml - post-run: tests/playbooks/legacy/kolla-kubernetes-deploy-ubuntu-source-t-iscsi/post.yaml - nodeset: legacy-ubuntu-xenial - timeout: 3600 - -- job: - name: kolla-kubernetes-upgrade-centos-binary-2-ceph - parent: kolla-kubernetes-base - run: tests/playbooks/legacy/kolla-kubernetes-upgrade-centos-binary-2-ceph/run.yaml - post-run: tests/playbooks/legacy/kolla-kubernetes-upgrade-centos-binary-2-ceph/post.yaml - nodeset: legacy-centos-7 - timeout: 3600 - -- job: - name: kolla-kubernetes-upgrade-centos-binary-3-ceph - parent: kolla-kubernetes-base - run: tests/playbooks/legacy/kolla-kubernetes-upgrade-centos-binary-3-ceph/run.yaml - post-run: tests/playbooks/legacy/kolla-kubernetes-upgrade-centos-binary-3-ceph/post.yaml - nodeset: legacy-centos-7 - timeout: 3600 diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst deleted file mode 100644 index 3fc13eaef..000000000 --- a/CONTRIBUTING.rst +++ /dev/null @@ -1,17 +0,0 @@ -If you would like to contribute to the development of OpenStack, you must -follow the steps in this page: - - http://docs.openstack.org/infra/manual/developers.html - -If you already have a good understanding of how the system works and your -OpenStack accounts are set up, you can skip to the development workflow -section of this documentation to learn how changes to OpenStack should be -submitted for review via the Gerrit tool: - - http://docs.openstack.org/infra/manual/developers.html#development-workflow - -Pull requests submitted through GitHub will be ignored. - -Bugs should be filed on Launchpad, not GitHub: - - https://bugs.launchpad.net/kolla-kubernetes diff --git a/Dockerfile b/Dockerfile deleted file mode 100644 index 54d7c0123..000000000 --- a/Dockerfile +++ /dev/null @@ -1,22 +0,0 @@ -FROM ubuntu:16.04 - -RUN apt-get update && apt-get -y install python-dev curl libffi-dev gcc libssl-dev sshpass wget crudini git vim -RUN curl https://bootstrap.pypa.io/get-pip.py -o get-pip.py \ - && python get-pip.py \ - && rm get-pip.py -RUN pip install ansible==2.2.* oslo_config - -ENV HELM_LATEST_VERSION="v2.7.2" -ENV KUBE_LATEST_VERSION="v1.8.4" - -RUN wget http://storage.googleapis.com/kubernetes-helm/helm-${HELM_LATEST_VERSION}-linux-amd64.tar.gz \ - && tar -xvf helm-${HELM_LATEST_VERSION}-linux-amd64.tar.gz \ - && mv linux-amd64/helm /usr/local/bin \ - && rm -f /helm-${HELM_LATEST_VERSION}-linux-amd64.tar.gz -RUN curl -L https://storage.googleapis.com/kubernetes-release/release/${KUBE_LATEST_VERSION}/bin/linux/amd64/kubectl -o /usr/local/bin/kubectl \ - && chmod +x /usr/local/bin/kubectl - -ADD . /kolla-kubernetes - -RUN pip install -U /kolla-kubernetes/ -RUN cp -a /kolla-kubernetes/etc/* /etc diff --git a/HACKING.rst b/HACKING.rst deleted file mode 100644 index 48a0a1502..000000000 --- a/HACKING.rst +++ /dev/null @@ -1,4 +0,0 @@ -kolla-kubernetes Style Commandments -=============================================== - -Read the OpenStack Style Commandments https://docs.openstack.org/hacking/latest/ diff --git a/LICENSE b/LICENSE deleted file mode 100644 index 68c771a09..000000000 --- a/LICENSE +++ /dev/null @@ -1,176 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - diff --git a/README.rst b/README.rst index 6f8ff94c6..25d66c3bb 100644 --- a/README.rst +++ b/README.rst @@ -1,83 +1,13 @@ -======================== -Team and repository tags -======================== +This project is no longer maintained. -.. image:: http://governance.openstack.org/badges/kolla-kubernetes.svg - :target: http://governance.openstack.org/reference/tags/index.html +The contents of this repository are still available in the Git +source code management system. To see the contents of this +repository before it reached its end of life, please check out the +previous commit with "git checkout 22ed0c232d7666afb6e288001b8814deea664992". -.. Change things from this point on +For an alternative, consider checking the project at +http://github.com/openstack/openstack-helm. - -============== -Kolla Overview -============== - -The Kolla project is a member of the OpenStack `Big Tent -Governance `__. - -Kolla's mission statement is: - -:: - - To provide production-ready containers and deployment tools for operating - OpenStack clouds. - -================ -kolla-kubernetes -================ - -The kolla-kubernetes deliverable deploys OpenStack on top of Kubernetes. -This work is experimental at this time. A 1.0.0 version or later signals -this work is ready for evaluation. - -The kolla-kubernetes Repository -=============================== - -The kolla-kubernetes repository is one of three deliverables of the -OpenStack Kolla project. The three deliverables that make up the Kolla -project are: - -================ ========================================================= -Deliverable Repository -================ ========================================================= -kolla https://git.openstack.org/openstack/kolla -kolla-ansible https://git.openstack.org/openstack/kolla-ansible -kolla-kubernetes https://git.openstack.org/openstack/kolla-kubernetes -================ ========================================================= - -The kolla deliverable maintains container images and container build tools. - -The kolla-ansible deliverable maintains an Ansible deployment for Kolla -containers on bare metal. - -The kolla-kubernetes deliverable maintains a cloud-native implementation -of deployment of OpenStack on Kubernetes. - -Getting Involved -================ - -Need a feature? Find a bug? Let us know! Contributions are much -appreciated and should follow the standard `Gerrit -workflow `__. - -- We communicate using the #openstack-kolla irc channel. -- File bugs, blueprints, track releases, etc on - `Launchpad `__. -- Attend weekly - `meetings `__. -- Contribute `code `__. - -General Information -=================== - -* Free software: Apache license -* Documentation: http://docs.openstack.org/developer/kolla-kubernetes -* Source: http://git.openstack.org/cgit/openstack/kolla-kubernetes -* Bugs: http://bugs.launchpad.net/kolla-kubernetes - -Notices -======= - -Docker and the Docker logo are trademarks or registered trademarks of -Docker, Inc. in the United States and/or other countries. Docker, Inc. -and other parties may also have trademark rights in other terms used herein. +For any further questions, please email +openstack-dev@lists.openstack.org with the tagline [kolla] or join +#openstack-kolla on Freenode. diff --git a/ansible/action_plugins/merge_configs.py b/ansible/action_plugins/merge_configs.py deleted file mode 100644 index 507774901..000000000 --- a/ansible/action_plugins/merge_configs.py +++ /dev/null @@ -1,156 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2015 Sam Yaple -# Copyright 2017 99Cloud Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import collections -import inspect -import os - -from ansible.plugins import action -from six import StringIO - -from oslo_config import iniparser - - -class OverrideConfigParser(iniparser.BaseParser): - - def __init__(self): - self._cur_sections = collections.OrderedDict() - self._sections = collections.OrderedDict() - self._cur_section = None - - def assignment(self, key, value): - cur_value = self._cur_section.get(key) - if len(value) == 1 and value[0] == '': - value = [] - if not cur_value: - self._cur_section[key] = [value] - else: - self._cur_section[key].append(value) - - def parse(self, lineiter): - self._cur_sections = collections.OrderedDict() - super(OverrideConfigParser, self).parse(lineiter) - - # merge _cur_sections into _sections - for section, values in self._cur_sections.items(): - if section not in self._sections: - self._sections[section] = collections.OrderedDict() - for key, value in values.items(): - self._sections[section][key] = value - - def new_section(self, section): - cur_section = self._cur_sections.get(section) - if not cur_section: - cur_section = collections.OrderedDict() - self._cur_sections[section] = cur_section - self._cur_section = cur_section - return cur_section - - def write(self, fp): - def write_key_value(key, values): - for v in values: - if not v: - fp.write('{} =\n'.format(key)) - for index, value in enumerate(v): - if index == 0: - fp.write('{} = {}\n'.format(key, value)) - else: - fp.write('{} {}\n'.format(len(key) * ' ', value)) - - def write_section(section): - for key, values in section.items(): - write_key_value(key, values) - - for section in self._sections: - fp.write('[{}]\n'.format(section)) - write_section(self._sections[section]) - fp.write('\n') - - -class ActionModule(action.ActionBase): - - TRANSFERS_FILES = True - - def read_config(self, source, config): - # Only use config if present - if os.access(source, os.R_OK): - with open(source, 'r') as f: - template_data = f.read() - result = self._templar.template(template_data) - fakefile = StringIO(result) - config.parse(fakefile) - fakefile.close() - - def run(self, tmp=None, task_vars=None): - - if task_vars is None: - task_vars = dict() - result = super(ActionModule, self).run(tmp, task_vars) - - # NOTE(jeffrey4l): Ansible 2.1 add a remote_user param to the - # _make_tmp_path function. inspect the number of the args here. In - # this way, ansible 2.0 and ansible 2.1 are both supported - make_tmp_path_args = inspect.getargspec(self._make_tmp_path)[0] - if not tmp and len(make_tmp_path_args) == 1: - tmp = self._make_tmp_path() - if not tmp and len(make_tmp_path_args) == 2: - remote_user = (task_vars.get('ansible_user') - or self._play_context.remote_user) - tmp = self._make_tmp_path(remote_user) - - sources = self._task.args.get('sources', None) - extra_vars = self._task.args.get('vars', list()) - - if not isinstance(sources, list): - sources = [sources] - - temp_vars = task_vars.copy() - temp_vars.update(extra_vars) - - config = OverrideConfigParser() - old_vars = self._templar._available_variables - self._templar.set_available_variables(temp_vars) - - for source in sources: - self.read_config(source, config) - - self._templar.set_available_variables(old_vars) - # Dump configparser to string via an emulated file - - fakefile = StringIO() - config.write(fakefile) - - remote_path = self._connection._shell.join_path(tmp, 'src') - xfered = self._transfer_data(remote_path, fakefile.getvalue()) - fakefile.close() - - new_module_args = self._task.args.copy() - new_module_args.pop('vars', None) - new_module_args.pop('sources', None) - - new_module_args.update( - dict( - src=xfered - ) - ) - - result.update(self._execute_module(module_name='copy', - module_args=new_module_args, - task_vars=task_vars, - tmp=tmp)) - return result diff --git a/ansible/action_plugins/merge_yaml.py b/ansible/action_plugins/merge_yaml.py deleted file mode 100755 index 6ad232af8..000000000 --- a/ansible/action_plugins/merge_yaml.py +++ /dev/null @@ -1,96 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2015 Sam Yaple -# Copyright 2016 intel -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import inspect -import os - -from yaml import dump -from yaml import safe_load -try: - from yaml import CDumper as Dumper # noqa: F401 - from yaml import CLoader as Loader # noqa: F401 -except ImportError: - from yaml import Dumper # noqa: F401 - from yaml import Loader # noqa: F401 - - -from ansible.plugins import action - - -class ActionModule(action.ActionBase): - - TRANSFERS_FILES = True - - def read_config(self, source): - result = None - # Only use config if present - if os.access(source, os.R_OK): - with open(source, 'r') as f: - template_data = f.read() - template_data = self._templar.template(template_data) - result = safe_load(template_data) - return result or {} - - def run(self, tmp=None, task_vars=None): - if task_vars is None: - task_vars = dict() - result = super(ActionModule, self).run(tmp, task_vars) - - # NOTE(jeffrey4l): Ansible 2.1 add a remote_user param to the - # _make_tmp_path function. inspect the number of the args here. In - # this way, ansible 2.0 and ansible 2.1 are both supported - make_tmp_path_args = inspect.getargspec(self._make_tmp_path)[0] - if not tmp and len(make_tmp_path_args) == 1: - tmp = self._make_tmp_path() - if not tmp and len(make_tmp_path_args) == 2: - remote_user = (task_vars.get('ansible_user') - or self._play_context.remote_user) - tmp = self._make_tmp_path(remote_user) - # save template args. - extra_vars = self._task.args.get('vars', list()) - old_vars = self._templar._available_variables - - temp_vars = task_vars.copy() - temp_vars.update(extra_vars) - self._templar.set_available_variables(temp_vars) - - output = {} - sources = self._task.args.get('sources', None) - if not isinstance(sources, list): - sources = [sources] - for source in sources: - output.update(self.read_config(source)) - - # restore original vars - self._templar.set_available_variables(old_vars) - - remote_path = self._connection._shell.join_path(tmp, 'src') - xfered = self._transfer_data(remote_path, - dump(output, - default_flow_style=False)) - new_module_args = self._task.args.copy() - new_module_args.update( - dict( - src=xfered - ) - ) - del new_module_args['sources'] - result.update(self._execute_module(module_name='copy', - module_args=new_module_args, - task_vars=task_vars, - tmp=tmp)) - return result diff --git a/ansible/destroy.yml b/ansible/destroy.yml deleted file mode 100644 index 527d46788..000000000 --- a/ansible/destroy.yml +++ /dev/null @@ -1,6 +0,0 @@ ---- -- name: Destroy the existing Kolla-Kubernetes deployment - hosts: localhost - connection: local - roles: - - destroy diff --git a/ansible/group_vars/all.yml b/ansible/group_vars/all.yml deleted file mode 100644 index 806aff1fc..000000000 --- a/ansible/group_vars/all.yml +++ /dev/null @@ -1,559 +0,0 @@ ---- -# The options in this file can be overridden in 'globals.yml' - -# The "temp" files that are created before merge need to stay persistent due -# to the fact that ansible will register a "change" if it has to create them -# again. Persistent files allow for idempotency -container_config_directory: "/var/lib/kolla/config_files" - -# The directory to merge custom config files the kolla's config files -node_custom_config: "/etc/kolla/config" - -# The project to generate configuration files for -project: "" - -# The directory to store the config files on the destination node -node_config_directory: "/etc/kolla/{{ project }}" - - -################### -# Kolla options -################### - -std_logger: true - -# Which orchestration engine to use. Valid options are [ ANSIBLE, KUBERNETES ] -orchestration_engine: "KUBERNETES" - -# Valid options are [ COPY_ONCE, COPY_ALWAYS ] -config_strategy: "COPY_ALWAYS" - -# Valid options are [ centos, oraclelinux, ubuntu ] -kolla_base_distro: "centos" -# Valid options are [ binary, source ] -kolla_install_type: "binary" - -kolla_internal_vip_address: "{{ kolla_internal_address }}" -kolla_internal_fqdn: "{{ kolla_internal_vip_address }}" -kolla_external_vip_address: "{{ kolla_internal_vip_address }}" -kolla_external_fqdn: "{{ kolla_internal_fqdn if kolla_external_vip_address == kolla_internal_vip_address else kolla_external_vip_address }}" - -kolla_enable_sanity_checks: "no" - -kolla_enable_sanity_keystone: "{{ kolla_enable_sanity_checks }}" -kolla_enable_sanity_glance: "{{ kolla_enable_sanity_checks }}" -kolla_enable_sanity_cinder: "{{ kolla_enable_sanity_checks }}" -kolla_enable_sanity_swift: "{{ kolla_enable_sanity_checks }}" - - -#################### -# kolla-kubernetes -#################### -# By default, Kolla API services bind to the network address assigned -# to the api_interface. Allow the bind address to be an override. In -# some cases (Kubernetes), the api_interface address is not known -# until container runtime, and thus it is necessary to bind to all -# interfaces "0.0.0.0". When used outside of Kubernetes, binding to -# all interfaces may present a security issue, and thus is not -# recommended. -api_interface_address: "{{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] if orchestration_engine == 'ANSIBLE' else '0.0.0.0' }}" - -################ -# Chrony options -################ -# A list contains ntp servers -external_ntp_servers: - - 0.pool.ntp.org - - 1.pool.ntp.org - - 2.pool.ntp.org - - 3.pool.ntp.org - -#################### -# Database options -#################### -database_address: "{{ kolla_internal_fqdn }}" -database_user: "root" -database_port: "3306" -keystone_database_address: "mariadb" -glance_database_address: "mariadb" -nova_database_address: "mariadb" -nova_api_database_address: "mariadb" -neutron_database_address: "mariadb" -cinder_database_address: "mariadb" -ironic_database_address: "mariadb" -placement_database_address: "mariadb" - -#################### -# Docker options -#################### -docker_registry_email: -docker_registry: -docker_namespace: "kolla" -docker_registry_username: - -# Valid options are [ never, on-failure, always, unless-stopped ] -docker_restart_policy: "unless-stopped" - -# '0' means unlimited retries -docker_restart_policy_retry: "10" - -# Common options used throughout Docker -docker_common_options: - auth_email: "{{ docker_registry_email }}" - auth_password: "{{ docker_registry_password }}" - auth_registry: "{{ docker_registry }}" - auth_username: "{{ docker_registry_username }}" - environment: - KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}" - restart_policy: "{{ docker_restart_policy }}" - restart_retries: "{{ docker_restart_policy_retry }}" - - -#################### -# keepalived options -#################### -# Arbitrary unique number from 0..255 -keepalived_virtual_router_id: "51" - - -#################### -# Networking options -#################### -network_interface: "eth0" -neutron_external_interface: "eth1" -kolla_external_vip_interface: "{{ network_interface }}" -api_interface: "{{ network_interface }}" -storage_interface: "{{ network_interface }}" -cluster_interface: "{{ network_interface }}" -tunnel_interface: "{{ network_interface }}" -bifrost_network_interface: "{{ network_interface }}" -dns_interface: "{{ network_interface }}" -tunnel_interface_address: "{{ hostvars[inventory_hostname]['ansible_' + tunnel_interface]['ipv4']['address'] if orchestration_engine == 'ANSIBLE' else '0.0.0.0' }}" - -# Valid options are [ openvswitch, linuxbridge, sfc ] -neutron_plugin_agent: "openvswitch" - -# The default ports used by each service. -aodh_api_port: "8042" - -barbican_api_port: "9311" - -ceilometer_api_port: "8777" - -congress_api_port: "1789" - -cloudkitty_api_port: "8889" - -designate_api_port: "9001" -designate_bind_port: "53" -designate_mdns_port: "5354" -designate_rndc_port: "953" - -freezer_api_port: "9090" - -iscsi_port: "3260" - -gnocchi_api_port: "8041" - -mariadb_port: "{{ database_port }}" -mariadb_wsrep_port: "4567" -mariadb_ist_port: "4568" -mariadb_sst_port: "4444" - -panko_api_port: "8977" - -rabbitmq_port: "5672" -rabbitmq_management_port: "15672" -rabbitmq_cluster_port: "25672" -rabbitmq_epmd_port: "4369" -rabbitmq_servers: "rabbitmq" - -mongodb_port: "27017" -mongodb_web_port: "28017" - -haproxy_stats_port: "1984" - -keystone_public_port: "5000" -keystone_admin_port: "35357" -keystone_ssh_port: "8023" - -glance_api_port: "9292" -glance_registry_port: "9191" - -octavia_api_port: "9876" -octavia_health_manager_port: "5555" - -placement_api_port: "8780" - -nova_api_port: "8774" -nova_metadata_port: "8775" -nova_novncproxy_port: "6080" -nova_spicehtml5proxy_port: "6082" -nova_serialproxy_port: "6083" - -neutron_server_port: "9696" - -cinder_api_port: "8776" - -memcached_servers: "memcached" -memcached_port: "11211" - -swift_proxy_server_port: "8080" -swift_object_server_port: "6000" -swift_account_server_port: "6001" -swift_container_server_port: "6002" -swift_rsync_port: "10873" - -sahara_api_port: "8386" - -heat_api_port: "8004" -heat_api_cfn_port: "8000" - -horizon_port: "80" - -murano_api_port: "8082" - -ironic_api_port: "6385" -ironic_inspector_port: "5050" - -magnum_api_port: "9511" - -solum_application_deployment_port: "9777" - -solum_image_builder_port: "9778" - -rgw_port: "6780" - -mistral_api_port: "8989" - -kibana_server_port: "5601" - -elasticsearch_port: "9200" - -manila_api_port: "8786" - -watcher_api_port: "9322" - -influxdb_admin_port: "8083" -influxdb_http_port: "8086" - -senlin_api_port: "8778" - -trove_api_port: "8779" - -etcd_client_port: "2379" -etcd_peer_port: "2380" - -karbor_api_port: "8799" - -kuryr_port: "23750" - -searchlight_api_port: "9393" - -grafana_server_port: "3000" - -tacker_server_port: "9890" - -fluentd_syslog_port: "5140" - -public_protocol: "{{ 'https' if kolla_enable_tls_external | bool else 'http' }}" -internal_protocol: "http" -admin_protocol: "http" - -#################### -# OpenStack options -#################### -openstack_release: "auto" -openstack_logging_debug: "False" - -openstack_region_name: "RegionOne" - -# In the context of multi-regions, list here the name of all your regions. -multiple_regions_names: - - "{{ openstack_region_name }}" - -openstack_service_workers: "{{ [ansible_processor_vcpus, 5]|min if orchestration_engine == 'ANSIBLE' else '1'}}" - -# Optionally allow Kolla to set sysctl values -set_sysctl: "yes" - -# Valid options are [ novnc, spice ] -nova_console: "novnc" - -# OpenStack authentication string. You should only need to override these if you -# are changing the admin tenant/project or user. -openstack_auth: - auth_url: "{{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_admin_port }}" - username: "admin" - password: "{{ keystone_admin_password }}" - project_name: "admin" - domain_name: "default" - -# These roles are required for Kolla to be operation, however a savvy deployer -# could disable some of these required roles and run their own services. -enable_glance: "yes" -enable_haproxy: "yes" -enable_keystone: "yes" -enable_mariadb: "yes" -enable_memcached: "yes" -enable_neutron: "yes" -enable_nova: "yes" -enable_rabbitmq: "yes" - -# Additional optional OpenStack features and services are specified here -enable_aodh: "no" -enable_barbican: "no" -enable_cadf_notifications: "no" -enable_ceilometer: "no" -enable_central_logging: "no" -enable_ceph: "no" -enable_ceph_rgw: "no" -enable_chrony: "no" -enable_cinder: "no" -enable_cinder_backend_hnas_iscsi: "no" -enable_cinder_backend_hnas_nfs: "no" -enable_cinder_backend_iscsi: "no" -enable_cinder_backend_lvm: "no" -enable_cinder_backend_nfs: "no" -enable_cloudkitty: "no" -enable_congress: "no" -enable_designate: "no" -enable_etcd: "no" -enable_freezer: "no" -enable_gnocchi: "no" -enable_grafana: "no" -enable_heat: "yes" -enable_horizon: "yes" -enable_horizon_cloudkitty: "{{ enable_cloudkitty | bool }}" -enable_horizon_freezer: "{{ enable_freezer | bool }}" -enable_horizon_ironic: "{{ enable_ironic | bool }}" -enable_horizon_karbor: "{{ enable_karbor | bool }}" -enable_horizon_magnum: "{{ enable_magnum | bool }}" -enable_horizon_manila: "{{ enable_manila | bool }}" -enable_horizon_mistral: "{{ enable_mistral | bool }}" -enable_horizon_murano: "{{ enable_murano | bool }}" -enable_horizon_neutron_lbaas: "{{ enable_neutron_lbaas | bool }}" -enable_horizon_sahara: "{{ enable_sahara | bool }}" -enable_horizon_searchlight: "{{ enable_searchlight | bool }}" -enable_horizon_senlin: "{{ enable_senlin | bool }}" -enable_horizon_solum: "{{ enable_solum | bool }}" -enable_horizon_tacker: "{{ enable_tacker | bool }}" -enable_horizon_trove: "{{ enable_trove | bool }}" -enable_horizon_watcher: "{{ enable_watcher | bool }}" -enable_influxdb: "no" -enable_ironic: "no" -enable_iscsid: "{{ enable_cinder_backend_iscsi | bool or enable_cinder_backend_lvm | bool or enable_ironic | bool }}" -enable_karbor: "no" -enable_kuryr: "no" -enable_magnum: "no" -enable_manila: "no" -enable_manila_backend_generic: "no" -enable_manila_backend_hnas: "no" -enable_mistral: "no" -enable_mongodb: "no" -enable_multipathd: "no" -enable_murano: "no" -enable_neutron_vpnaas: "no" -enable_neutron_dvr: "no" -enable_neutron_lbaas: "no" -enable_neutron_fwaas: "no" -enable_neutron_qos: "no" -enable_neutron_agent_ha: "no" -enable_neutron_bgp_dragent: "no" -enable_nova_serialconsole_proxy: "no" -enable_octavia: "no" -enable_panko: "no" -enable_rally: "no" -enable_sahara: "no" -enable_searchlight: "no" -enable_senlin: "no" -enable_solum: "no" -enable_swift: "no" -enable_tacker: "no" -enable_telegraf: "no" -enable_tempest: "no" -enable_trove: "no" -enable_vmtp: "no" -enable_watcher: "no" -enable_placement: "yes" - -ironic_keystone_user: "ironic" -neutron_keystone_user: "neutron" -nova_keystone_user: "nova" -designate_keystone_user: "designate" - -# Nova fake driver and the number of fake driver per compute node -enable_nova_fake: "no" -num_nova_fake_per_node: 5 - -# Monitoring options are specified here -enable_collectd: "no" - -# Clean images options are specified here -enable_destroy_images: "no" - -#################### -# Logging options -#################### - -elasticsearch_address: "{{ kolla_internal_vip_address }}" -elasticsearch_protocol: "{{ internal_protocol }}" - -enable_elasticsearch: "{{ 'yes' if enable_central_logging | bool or enable_freezer | bool else 'no' }}" -enable_kibana: "{{ 'yes' if enable_central_logging | bool else 'no' }}" - -#################### -# RabbitMQ options -#################### -rabbitmq_user: "openstack" -rabbitmq_version: "rabbitmq_server-3.6/plugins/rabbitmq_clusterer-3.6.x.ez/rabbitmq_clusterer-3.6.x-667f92b0/ebin" - -#################### -# HAProxy options -#################### -haproxy_user: "openstack" -haproxy_enable_external_vip: "{{ 'no' if kolla_external_vip_address == kolla_internal_vip_address else 'yes' }}" -kolla_enable_tls_external: "no" -kolla_external_fqdn_cert: "{{ node_config_directory }}/certificates/haproxy.pem" -kolla_external_fqdn_cacert: "{{ node_config_directory }}/certificates/haproxy-ca.crt" - - -#################### -# Kibana options -#################### -kibana_user: "kibana" - - -#################### -# Keystone options -#################### -keystone_admin_url: "{{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_admin_port }}/v3" -keystone_internal_url: "{{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_public_port }}/v3" -keystone_public_url: "{{ public_protocol }}://{{ kolla_external_fqdn }}:{{ keystone_public_port }}/v3" - -# Valid options are [ uuid, fernet ] -keystone_token_provider: "fernet" -fernet_token_expiry: 86400 - -keystone_default_user_role: "_member_" - -####################### -# Glance options -####################### -glance_backend_file: "{{ not enable_ceph | bool }}" -glance_backend_ceph: "{{ enable_ceph }}" -glance_registry_host: "glance-registry" - - -####################### -# Ceilometer options -####################### -# Valid options are [ mongodb, mysql, gnocchi ] -ceilometer_database_type: "mongodb" - -# Valid options are [ mongodb, gnocchi, panko ] -ceilometer_event_type: "mongodb" - - -######################## -### Panko options -######################## -# Valid options are [ mongodb, mysql ] -panko_database_type: "mysql" - - -################# -# Gnocchi options -################# -# Vaid options are [file, ceph] -gnocchi_backend_storage: "{{ 'ceph' if enable_ceph|bool else 'file' }}" - - -################################# -# Cinder options -################################# -cinder_backend_ceph: "{{ enable_ceph }}" -cinder_volume_group: "cinder-volumes" -cinder_backup_driver: "nfs" -cinder_backup_share: "" -cinder_backup_mount_options_nfs: "" - -####################### -# Cloudkitty options -####################### -# Valid options are [ ceilometer, gnocchi ] -cloudkitty_collector_backend: "ceilometer" - -####################### -# Designate options -####################### -# Valid options are [ bind9 ] -designate_backend: "bind9" -designate_ns_record: "sample.openstack.org" - - -####################### -# Neutron options -####################### -neutron_bgp_router_id: "1.1.1.1" -neutron_host: "neutron" - - -####################### -# Nova options -####################### -nova_backend_ceph: "{{ enable_ceph }}" -nova_backend: "{{ 'rbd' if nova_backend_ceph | bool else 'default' }}" -# Valid options are [ kvm, qemu ] -nova_compute_virt_type: "kvm" - - -####################### -# Horizon options -####################### -horizon_backend_database: "{{ enable_murano | bool }}" - -################# -# Octavia options -################# -# Load balancer topology options are [ SINGLE, ACTIVE_STANDBY ] -octavia_loadbalancer_topology: "SINGLE" -octavia_amp_boot_network_list: -octavia_amp_secgroup_list: -octavia_amp_flavor_id: - -################### -# Ceph options -################### -# Ceph can be setup with a caching to improve performance. To use the cache you -# must provide separate disks than those for the OSDs -ceph_enable_cache: "no" -# Valid options are [ forward, none, writeback ] -ceph_cache_mode: "writeback" - -# Valid options are [ ext4, btrfs, xfs ] -ceph_osd_filesystem: "xfs" - -# Set to 'yes-i-really-really-mean-it' to force wipe disks with existing partitions for OSDs. Only -# set if you understand the consequences! -ceph_osd_wipe_disk: "" - -# These are /etc/fstab options. Comma separated, no spaces (see fstab(8)) -ceph_osd_mount_options: "defaults,noatime" - -# A requirement for using the erasure-coded pools is you must setup a cache tier -# Valid options are [ erasure, replicated ] -ceph_pool_type: "replicated" - -# Integrate Ceph Rados Object Gateway with OpenStack keystone -enable_ceph_rgw_keystone: "no" - -ceph_cinder_pool_name: "volumes" -ceph_cinder_backup_pool_name: "backups" -ceph_glance_pool_name: "images" -ceph_gnocchi_pool_name: "gnocchi" -ceph_nova_pool_name: "vms" - -ceph_erasure_profile: "k=4 m=2 ruleset-failure-domain=host" -ceph_rule: "default host {{ 'indep' if ceph_pool_type == 'erasure' else 'firstn' }}" -ceph_cache_rule: "cache host firstn" diff --git a/ansible/library/merge_configs.py b/ansible/library/merge_configs.py deleted file mode 100644 index 1a73e3101..000000000 --- a/ansible/library/merge_configs.py +++ /dev/null @@ -1,51 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2015 Sam Yaple -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -DOCUMENTATION = ''' ---- -module: merge_configs -short_description: Merge ini-style configs -description: - - ConfigParser is used to merge several ini-style configs into one -options: - dest: - description: - - The destination file name - required: True - type: str - sources: - description: - - A list of files on the destination node to merge together - default: None - required: True - type: str -author: Sam Yaple -''' - -EXAMPLES = ''' -Merge multiple configs: - -- hosts: database - tasks: - - name: Merge configs - merge_configs: - sources: - - "/tmp/config_1.cnf" - - "/tmp/config_2.cnf" - - "/tmp/config_3.cnf" - dest: - - "/etc/mysql/my.cnf" -''' diff --git a/ansible/library/merge_yaml.py b/ansible/library/merge_yaml.py deleted file mode 100644 index 4d54b437d..000000000 --- a/ansible/library/merge_yaml.py +++ /dev/null @@ -1,51 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2015 Sam Yaple -# Copyright 2016 intel -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -DOCUMENTATION = ''' ---- -module: merge_yaml -short_description: Merge yaml-style configs -description: - - PyYAML is used to merge several yaml files into one -options: - dest: - description: - - The destination file name - required: True - type: str - sources: - description: - - A list of files on the destination node to merge together - default: None - required: True - type: str -author: Sean Mooney -''' - -EXAMPLES = ''' -Merge multiple yaml files: - -- hosts: localhost - tasks: - - name: Merge yaml files - merge_yaml: - sources: - - "/tmp/default.yml" - - "/tmp/override.yml" - dest: - - "/tmp/out.yml" -''' diff --git a/ansible/roles/ceph/defaults/main.yml b/ansible/roles/ceph/defaults/main.yml deleted file mode 100644 index 456cf4356..000000000 --- a/ansible/roles/ceph/defaults/main.yml +++ /dev/null @@ -1,37 +0,0 @@ ---- -project_name: "ceph" - - -#################### -# Docker -#################### -ceph_mon_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-ceph-mon" -ceph_mon_tag: "{{ openstack_release }}" -ceph_mon_image_full: "{{ ceph_mon_image }}:{{ ceph_mon_tag }}" - -ceph_osd_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-ceph-osd" -ceph_osd_tag: "{{ openstack_release }}" -ceph_osd_image_full: "{{ ceph_osd_image }}:{{ ceph_osd_tag }}" - -ceph_rgw_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-ceph-rgw" -ceph_rgw_tag: "{{ openstack_release }}" -ceph_rgw_image_full: "{{ ceph_rgw_image }}:{{ ceph_rgw_tag }}" - -#################### -# Ceph -#################### -osd_initial_weight: "1" - -#################### -## Ceph_rgw_keystone -#################### -swift_admin_endpoint: "{{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ rgw_port }}/swift/v1" -swift_internal_endpoint: "{{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ rgw_port }}/swift/v1" -swift_public_endpoint: "{{ public_protocol }}://{{ kolla_external_fqdn }}:{{ rgw_port }}/swift/v1" - -openstack_swift_auth: "{{ openstack_auth }}" - -#################### -# Kolla -#################### -kolla_ceph_use_udev: True diff --git a/ansible/roles/ceph/tasks/config.yml b/ansible/roles/ceph/tasks/config.yml deleted file mode 100644 index d88e9645d..000000000 --- a/ansible/roles/ceph/tasks/config.yml +++ /dev/null @@ -1,33 +0,0 @@ ---- -- name: Ensuring config directories exist - file: - path: "{{ node_config_directory }}/{{ item }}" - state: "directory" - recurse: yes - with_items: - - "ceph-mon" - - "ceph-osd" - - "ceph-rgw" - -- name: Copying over config.json files for services - template: - src: "{{ item }}.json.j2" - dest: "{{ node_config_directory }}/{{ item }}/config.json" - with_items: - - "ceph-mon" - - "ceph-osd" - - "ceph-rgw" - -- name: Copying over ceph.conf - merge_configs: - vars: - service_name: "{{ item }}" - sources: - - "{{ role_path }}/templates/ceph.conf.j2" - - "{{ node_custom_config }}/ceph.conf" - - "{{ node_custom_config }}/ceph/{{ inventory_hostname }}/ceph.conf" - dest: "{{ node_config_directory }}/{{ item }}/ceph.conf" - with_items: - - "ceph-mon" - - "ceph-osd" - - "ceph-rgw" diff --git a/ansible/roles/ceph/tasks/main.yml b/ansible/roles/ceph/tasks/main.yml deleted file mode 100644 index b5216113e..000000000 --- a/ansible/roles/ceph/tasks/main.yml +++ /dev/null @@ -1,2 +0,0 @@ ---- -- include: "config.yml" diff --git a/ansible/roles/ceph/templates/ceph-mon.json.j2 b/ansible/roles/ceph/templates/ceph-mon.json.j2 deleted file mode 100644 index 11dfccf6e..000000000 --- a/ansible/roles/ceph/templates/ceph-mon.json.j2 +++ /dev/null @@ -1,43 +0,0 @@ -{ -{%- if orchestration_engine == 'KUBERNETES' %} - "command": "/usr/bin/ceph-mon -f -i @MONID@ --public-addr @MONADDR@:6789", -{%- else %} - "command": "/usr/bin/ceph-mon -f -i {{ hostvars[inventory_hostname]['ansible_' + storage_interface]['ipv4']['address'] }} --public-addr {{ hostvars[inventory_hostname]['ansible_' + storage_interface]['ipv4']['address'] }}:6789", -{%- endif %} - "config_files": [ - { - "source": "{{ container_config_directory }}/ceph.conf", - "dest": "/etc/ceph/ceph.conf", - "owner": "ceph", - "perm": "0600" - }, - { - "source": "{{ container_config_directory }}/ceph.client.admin.keyring", - "dest": "/etc/ceph/ceph.client.admin.keyring", - "owner": "ceph", - "perm": "0600", - "optional": true - }, - { - "source": "{{ container_config_directory }}/ceph.client.mon.keyring", - "dest": "/etc/ceph/ceph.client.mon.keyring", - "owner": "ceph", - "perm": "0600", - "optional": true - }, - { - "source": "{{ container_config_directory }}/ceph.client.radosgw.keyring", - "dest": "/etc/ceph/ceph.client.radosgw.keyring", - "owner": "ceph", - "perm": "0600", - "optional": true - }, - { - "source": "{{ container_config_directory }}/ceph.monmap", - "dest": "/etc/ceph/ceph.monmap", - "owner": "ceph", - "perm": "0600", - "optional": true - } - ] -} diff --git a/ansible/roles/ceph/templates/ceph-osd.json.j2 b/ansible/roles/ceph/templates/ceph-osd.json.j2 deleted file mode 100644 index 7118f1b16..000000000 --- a/ansible/roles/ceph/templates/ceph-osd.json.j2 +++ /dev/null @@ -1,21 +0,0 @@ -{ -{%- if orchestration_engine == 'KUBERNETES' %} - "command": "/usr/bin/ceph-osd -f --public-addr @HOSTADDR@ --cluster-addr @CLUSTERADDR@", -{%- else %} - "command": "/usr/bin/ceph-osd -f --public-addr {{ hostvars[inventory_hostname]['ansible_' + storage_interface]['ipv4']['address'] }} --cluster-addr {{ hostvars[inventory_hostname]['ansible_' + cluster_interface]['ipv4']['address'] }}", -{%- endif %} - "config_files": [ - { - "source": "{{ container_config_directory }}/ceph.conf", - "dest": "/etc/ceph/ceph.conf", - "owner": "ceph", - "perm": "0600" - }, - { - "source": "{{ container_config_directory }}/ceph.client.admin.keyring", - "dest": "/etc/ceph/ceph.client.admin.keyring", - "owner": "ceph", - "perm": "0600" - } - ] -} diff --git a/ansible/roles/ceph/templates/ceph-rgw.json.j2 b/ansible/roles/ceph/templates/ceph-rgw.json.j2 deleted file mode 100644 index 9789651da..000000000 --- a/ansible/roles/ceph/templates/ceph-rgw.json.j2 +++ /dev/null @@ -1,23 +0,0 @@ -{ - "command": "/usr/bin/radosgw -c /etc/ceph/ceph.conf -n client.radosgw.gateway -f", - "config_files": [ - { - "source": "{{ container_config_directory }}/ceph.conf", - "dest": "/etc/ceph/ceph.conf", - "owner": "ceph", - "perm": "0600" - }, - { - "source": "{{ container_config_directory }}/ceph.client.admin.keyring", - "dest": "/etc/ceph/ceph.client.admin.keyring", - "owner": "ceph", - "perm": "0600" - }, - { - "source": "{{ container_config_directory }}/ceph.client.radosgw.keyring", - "dest": "/etc/ceph/ceph.client.radosgw.keyring", - "owner": "ceph", - "perm": "0600" - } - ] -} diff --git a/ansible/roles/ceph/templates/ceph.conf.j2 b/ansible/roles/ceph/templates/ceph.conf.j2 deleted file mode 100644 index c716ea723..000000000 --- a/ansible/roles/ceph/templates/ceph.conf.j2 +++ /dev/null @@ -1,46 +0,0 @@ -[global] -{% if std_logger %} -log to syslog = false -err to syslog = false -log to stderr = true -err to stderr = true -{% else %} -log file = /var/log/kolla/ceph/$cluster-$name.log -log to syslog = false -err to syslog = false -log to stderr = false -err to stderr = false -{% endif %} - -fsid = {{ ceph_cluster_fsid }} - -auth cluster required = cephx -auth service required = cephx -auth client required = cephx - -# NOTE(inc0): This line will mean that if ceph was upgraded, it will run as root -# until contents of /var/lib/ceph are chowned to ceph user. -# This change was introduced in Jewel version and we should include -# chown operation in upgrade procedure. https://bugs.launchpad.net/kolla/+bug/1620702 -setuser match path = /var/lib/ceph/$type/$cluster-$id - -[mon] -# NOTE(SamYaple): The monitor files have been known to grow very large. The -# only fix for that is to compact the files. -mon compact on start = true -mon cluster log file = /var/log/kolla/ceph/$cluster.log - -{% if service_name is defined and service_name == 'ceph-rgw' %} -[client.radosgw.gateway] -{% if enable_ceph_rgw_keystone | bool %} -rgw_keystone_url = {{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_admin_port }} -rgw_keystone_admin_user = {{ openstack_auth.username }} -rgw_keystone_admin_password = {{ openstack_auth.password }} -rgw_keystone_admin_project = {{ openstack_auth.project_name }} -rgw_keystone_admin_domain = default -rgw_keystone_api_version = 3 -rgw_keystone_accepted_roles = admin, {{ keystone_default_user_role }} -{% endif %} -keyring = /etc/ceph/ceph.client.radosgw.keyring -log file = /var/log/kolla/ceph/client.radosgw.gateway.log -{% endif %} diff --git a/ansible/roles/cinder/defaults/main.yml b/ansible/roles/cinder/defaults/main.yml deleted file mode 100644 index 3c4825280..000000000 --- a/ansible/roles/cinder/defaults/main.yml +++ /dev/null @@ -1,160 +0,0 @@ ---- -project_name: "cinder" - -cinder_services: - cinder-api: - container_name: cinder_api - group: cinder-api - enabled: true - image: "{{ cinder_api_image_full }}" - volumes: - - "{{ node_config_directory }}/cinder-api/:{{ container_config_directory }}/:ro" - - "/etc/localtime:/etc/localtime:ro" - - "kolla_logs:/var/log/kolla/" - cinder-scheduler: - container_name: cinder_scheduler - group: cinder-scheduler - enabled: true - image: "{{ cinder_scheduler_image_full }}" - volumes: - - "{{ node_config_directory }}/cinder-scheduler/:{{ container_config_directory }}/:ro" - - "/etc/localtime:/etc/localtime:ro" - - "kolla_logs:/var/log/kolla/" - cinder-volume: - container_name: cinder_volume - group: cinder-volume - enabled: true - image: "{{ cinder_volume_image_full }}" - privileged: True - ipc_mode: "host" - volumes: - - "{{ node_config_directory }}/cinder-volume/:{{ container_config_directory }}/:ro" - - "/etc/localtime:/etc/localtime:ro" - - "/dev/:/dev/" - - "/run/:/run/:shared" - - "{% if enable_iscsid | bool %}cinder:/var/lib/cinder{% endif %}" - - "{% if enable_iscsid | bool %}iscsi_info:/etc/iscsi{% endif %}" - - "kolla_logs:/var/log/kolla/" - cinder-backup: - container_name: cinder_backup - group: cinder-backup - enabled: true - image: "{{ cinder_backup_image_full }}" - privileged: True - volumes: - - "{{ node_config_directory }}/cinder-backup/:{{ container_config_directory }}/:ro" - - "/etc/localtime:/etc/localtime:ro" - - "/dev/:/dev/" - - "/run/:/run/:shared" - - "{% if enable_cinder_backend_lvm | bool %}cinder:/var/lib/cinder{% endif %}" - - "{% if enable_cinder_backend_lvm | bool %}iscsi_info:/etc/iscsi{% endif %}" - - "kolla_logs:/var/log/kolla/" - -#################### -# Ceph -#################### -ceph_cinder_pool_type: "{{ ceph_pool_type }}" -ceph_cinder_cache_mode: "{{ ceph_cache_mode }}" -ceph_cinder_backup_pool_type: "{{ ceph_pool_type }}" -ceph_cinder_backup_cache_mode: "{{ ceph_cache_mode }}" - -# Due to Ansible issues on include, you cannot override these variables. Please -# override the variables they reference instead. -cinder_pool_name: "{{ ceph_cinder_pool_name }}" -cinder_pool_type: "{{ ceph_cinder_pool_type }}" -cinder_cache_mode: "{{ ceph_cinder_cache_mode }}" -cinder_backup_pool_name: "{{ ceph_cinder_backup_pool_name }}" -cinder_backup_pool_type: "{{ ceph_cinder_backup_pool_type }}" -cinder_backup_cache_mode: "{{ ceph_cinder_backup_cache_mode }}" - - -#################### -# Database -#################### -cinder_database_name: "cinder" -cinder_database_user: "cinder" -cinder_database_address: "{{ kolla_internal_fqdn }}:{{ database_port }}" - - -#################### -# Docker -#################### -cinder_volume_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-cinder-volume" -cinder_volume_tag: "{{ openstack_release }}" -cinder_volume_image_full: "{{ cinder_volume_image }}:{{ cinder_volume_tag }}" - -cinder_scheduler_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-cinder-scheduler" -cinder_scheduler_tag: "{{ openstack_release }}" -cinder_scheduler_image_full: "{{ cinder_scheduler_image }}:{{ cinder_scheduler_tag }}" - -cinder_backup_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-cinder-backup" -cinder_backup_tag: "{{ openstack_release }}" -cinder_backup_image_full: "{{ cinder_backup_image }}:{{ cinder_backup_tag }}" - -cinder_api_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-cinder-api" -cinder_api_tag: "{{ openstack_release }}" -cinder_api_image_full: "{{ cinder_api_image }}:{{ cinder_api_tag }}" - - -#################### -# OpenStack -#################### -cinder_admin_endpoint: "{{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ cinder_api_port }}/v1/%(tenant_id)s" -cinder_internal_endpoint: "{{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ cinder_api_port }}/v1/%(tenant_id)s" -cinder_public_endpoint: "{{ public_protocol }}://{{ kolla_external_fqdn }}:{{ cinder_api_port }}/v1/%(tenant_id)s" -cinder_v2_admin_endpoint: "{{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ cinder_api_port }}/v2/%(tenant_id)s" -cinder_v2_internal_endpoint: "{{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ cinder_api_port }}/v2/%(tenant_id)s" -cinder_v2_public_endpoint: "{{ public_protocol }}://{{ kolla_external_fqdn }}:{{ cinder_api_port }}/v2/%(tenant_id)s" -cinder_v3_admin_endpoint: "{{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ cinder_api_port }}/v3/%(tenant_id)s" -cinder_v3_internal_endpoint: "{{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ cinder_api_port }}/v3/%(tenant_id)s" -cinder_v3_public_endpoint: "{{ public_protocol }}://{{ kolla_external_fqdn }}:{{ cinder_api_port }}/v3/%(tenant_id)s" - -cinder_logging_debug: "{{ openstack_logging_debug }}" - -cinder_keystone_user: "cinder" - -openstack_cinder_auth: "{{ openstack_auth }}" - - -#################### -# Cinder -#################### -cinder_backends: - - name: "rbd-1" - driver: "ceph" - enabled: "{{ enable_ceph | bool and cinder_backend_ceph | bool }}" - - name: "lvm-1" - driver: "lvm" - enabled: "{{ enable_cinder_backend_lvm | bool }}" - - name: "nfs-1" - driver: "nfs" - enabled: "{{ enable_cinder_backend_nfs | bool }}" - - name: "hnas-iscsi" - driver: "hnas_iscsi" - enabled: "{{ enable_cinder_backend_hnas_iscsi | bool }}" - - name: "hnas-nfs" - driver: "hnas_nfs" - enabled: "{{ enable_cinder_backend_hnas_nfs | bool }}" - -cinder_enabled_backends: "{{ cinder_backends|selectattr('enabled', 'equalto', true)|list }}" - - -############################################# -# Hitachi NAS Platform iSCSI and NFS drivers -############################################# -# iscsi -hnas_iscsi_backend: "hnas_iscsi_backend" -hnas_iscsi_username: -hnas_iscsi_password: -hnas_iscsi_mgmt_ip0: -hnas_iscsi_svc0_volume_type: -hnas_iscsi_svc0_hdp: -hnas_iscsi_svc0_ip: - -# nfs -hnas_nfs_backend: "hnas_nfs_backend" -hnas_nfs_username: -hnas_nfs_password: -hnas_nfs_mgmt_ip0: -hnas_nfs_svc0_volume_type: -hnas_nfs_svc0_hdp: diff --git a/ansible/roles/cinder/tasks/config.yml b/ansible/roles/cinder/tasks/config.yml deleted file mode 100644 index d1a86fb24..000000000 --- a/ansible/roles/cinder/tasks/config.yml +++ /dev/null @@ -1,65 +0,0 @@ ---- -- name: Ensuring config directories exist - file: - path: "{{ node_config_directory }}/{{ item.key }}" - state: "directory" - recurse: yes - when: - - item.value.enabled | bool - with_dict: "{{ cinder_services }}" - -- name: Copying over config.json files for services - template: - src: "{{ item.key }}.json.j2" - dest: "{{ node_config_directory }}/{{ item.key }}/config.json" - register: cinder_config_jsons - when: - - item.value.enabled | bool - with_dict: "{{ cinder_services }}" - -- name: Copying over cinder.conf - merge_configs: - vars: - service_name: "{{ item.key }}" - sources: - - "{{ role_path }}/templates/cinder.conf.j2" - - "{{ node_custom_config }}/global.conf" - - "{{ node_custom_config }}/database.conf" - - "{{ node_custom_config }}/messaging.conf" - - "{{ node_custom_config }}/cinder.conf" - - "{{ node_custom_config }}/cinder/{{ item.key }}.conf" - - "{{ node_custom_config }}/cinder/{{ inventory_hostname }}/cinder.conf" - dest: "{{ node_config_directory }}/{{ item.key }}/cinder.conf" - register: cinder_confs - when: - - item.value.enabled | bool - with_dict: "{{ cinder_services }}" - -- name: Check if policies shall be overwritten - local_action: stat path="{{ node_custom_config }}/cinder/policy.json" - register: cinder_policy - -- name: Copying over existing policy.json - template: - src: "{{ node_custom_config }}/cinder/policy.json" - dest: "{{ node_config_directory }}/{{ item.key }}/policy.json" - register: cinder_policy_jsons - when: - - cinder_policy.stat.exists - with_dict: "{{ cinder_services }}" - -- name: Copying over nfs_shares files for cinder_volume - template: - src: "{{ item }}" - dest: "{{ node_config_directory }}/cinder-volume/nfs_shares" - with_first_found: - - files: - - "{{ node_custom_config }}/nfs_shares.j2" - - "{{ node_custom_config }}/cinder/nfs_shares.j2" - - "{{ node_custom_config }}/cinder/cinder-volume/nfs_shares.j2" - - "{{ node_custom_config }}/cinder/{{ inventory_hostname }}/nfs_shares.j2" - - "{{ node_custom_config }}/nfs_shares" - - "{{ node_custom_config }}/cinder/nfs_shares" - - "{{ node_custom_config }}/cinder/cinder-volume/nfs_shares" - - "{{ node_custom_config }}/cinder/{{ inventory_hostname }}/nfs_shares" - skip: "{{ not enable_cinder_backend_nfs | bool and not enable_cinder_backend_hnas_nfs | bool }}" diff --git a/ansible/roles/cinder/tasks/main.yml b/ansible/roles/cinder/tasks/main.yml deleted file mode 100644 index b5216113e..000000000 --- a/ansible/roles/cinder/tasks/main.yml +++ /dev/null @@ -1,2 +0,0 @@ ---- -- include: "config.yml" diff --git a/ansible/roles/cinder/templates/cinder-api.json.j2 b/ansible/roles/cinder/templates/cinder-api.json.j2 deleted file mode 100644 index 473368125..000000000 --- a/ansible/roles/cinder/templates/cinder-api.json.j2 +++ /dev/null @@ -1,30 +0,0 @@ -{ - "command": "cinder-api --config-file /etc/cinder/cinder.conf", - "config_files": [ - { - "source": "{{ container_config_directory }}/cinder.conf", - "dest": "/etc/cinder/cinder.conf", - "owner": "cinder", - "perm": "0600" - }, - { - "source": "{{ container_config_directory }}/policy.json", - "dest": "/etc/cinder/policy.json", - "owner": "cinder", - "perm": "0600", - "optional": true - } - ], - "permissions": [ - { - "path": "/var/lib/cinder", - "owner": "cinder:cinder", - "recurse": true - }, - { - "path": "/var/log/kolla/cinder", - "owner": "cinder:cinder", - "recurse": true - } - ] -} diff --git a/ansible/roles/cinder/templates/cinder-backup.json.j2 b/ansible/roles/cinder/templates/cinder-backup.json.j2 deleted file mode 100644 index a24b2f062..000000000 --- a/ansible/roles/cinder/templates/cinder-backup.json.j2 +++ /dev/null @@ -1,37 +0,0 @@ -{ - "command": "cinder-backup --config-file /etc/cinder/cinder.conf", - "config_files": [ - { - "source": "{{ container_config_directory }}/cinder.conf", - "dest": "/etc/cinder/cinder.conf", - "owner": "cinder", - "perm": "0600" - }, - { - "source": "{{ container_config_directory }}/policy.json", - "dest": "/etc/cinder/policy.json", - "owner": "cinder", - "perm": "0600", - "optional": true - }{% if cinder_backend_ceph | bool %}, - { - "source": "{{ container_config_directory }}/ceph.*", - "dest": "/etc/ceph/", - "owner": "cinder", - "perm": "0700", - "optional": {{ (not cinder_backend_ceph | bool) | string | lower }} - }{% endif %} - ], - "permissions": [ - { - "path": "/var/lib/cinder", - "owner": "cinder:cinder", - "recurse": true - }, - { - "path": "/var/log/kolla/cinder", - "owner": "cinder:cinder", - "recurse": true - } - ] -} diff --git a/ansible/roles/cinder/templates/cinder-scheduler.json.j2 b/ansible/roles/cinder/templates/cinder-scheduler.json.j2 deleted file mode 100644 index 84fdfe3d4..000000000 --- a/ansible/roles/cinder/templates/cinder-scheduler.json.j2 +++ /dev/null @@ -1,30 +0,0 @@ -{ - "command": "cinder-scheduler --config-file /etc/cinder/cinder.conf", - "config_files": [ - { - "source": "{{ container_config_directory }}/cinder.conf", - "dest": "/etc/cinder/cinder.conf", - "owner": "cinder", - "perm": "0600" - }, - { - "source": "{{ container_config_directory }}/policy.json", - "dest": "/etc/cinder/policy.json", - "owner": "cinder", - "perm": "0600", - "optional": true - } - ], - "permissions": [ - { - "path": "/var/lib/cinder", - "owner": "cinder:cinder", - "recurse": true - }, - { - "path": "/var/log/kolla/cinder", - "owner": "cinder:cinder", - "recurse": true - } - ] -} diff --git a/ansible/roles/cinder/templates/cinder-volume.json.j2 b/ansible/roles/cinder/templates/cinder-volume.json.j2 deleted file mode 100644 index c00ea1a01..000000000 --- a/ansible/roles/cinder/templates/cinder-volume.json.j2 +++ /dev/null @@ -1,51 +0,0 @@ -{ - "command": "cinder-volume --config-file /etc/cinder/cinder.conf", - "config_files": [ - { - "source": "{{ container_config_directory }}/cinder.conf", - "dest": "/etc/cinder/cinder.conf", - "owner": "cinder", - "perm": "0600" - }, - { - "source": "{{ container_config_directory }}/ceph.*", - "dest": "/etc/ceph/", - "owner": "cinder", - "perm": "0700", - "optional": {{ (not cinder_backend_ceph | bool) | string | lower }} - }, - { - "source": "{{ container_config_directory }}/ceph.conf", - "dest": "/etc/ceph/ceph.conf", - "owner": "cinder", - "perm": "0600", - "optional": {{ (not cinder_backend_ceph | bool) | string | lower }} - }, - { - "source": "{{ container_config_directory }}/nfs_shares", - "dest": "/etc/cinder/nfs_shares", - "owner": "cinder", - "perm": "0600", - "optional": {{ (not enable_cinder_backend_nfs | bool) | string | lower }} - }, - { - "source": "{{ container_config_directory }}/policy.json", - "dest": "/etc/cinder/policy.json", - "owner": "cinder", - "perm": "0600", - "optional": true - } - ], - "permissions": [ - { - "path": "/var/lib/cinder", - "owner": "cinder:cinder", - "recurse": true - }, - { - "path": "/var/log/kolla/cinder", - "owner": "cinder:cinder", - "recurse": true - } - ] -} diff --git a/ansible/roles/cinder/templates/cinder.conf.j2 b/ansible/roles/cinder/templates/cinder.conf.j2 deleted file mode 100644 index 5af22cc02..000000000 --- a/ansible/roles/cinder/templates/cinder.conf.j2 +++ /dev/null @@ -1,153 +0,0 @@ -[DEFAULT] -debug = {{ cinder_logging_debug }} - -use_forwarded_for = true - -{% if std_logger %} -use_syslog = False -use_stderr = True -{% else %} -log_dir = /var/log/kolla/cinder - -# Set use_stderr to False or the logs will also be sent to stderr -# and collected by Docker -use_stderr = False -{% endif %} - -enable_v1_api=false -osapi_volume_workers = {{ openstack_service_workers }} -volume_name_template = volume-%s - -glance_api_servers = {{ internal_protocol }}://glance-api:{{ glance_api_port }} - -glance_api_version = 2 - -os_region_name = {{ openstack_region_name }} - -{% if cinder_enabled_backends %} -enabled_backends = {{ cinder_enabled_backends|map(attribute='name')|join(',') }} -{% endif %} - -{% if service_name == "cinder-backup" and enable_ceph | bool and cinder_backend_ceph | bool %} -backup_driver = cinder.backup.drivers.ceph -backup_ceph_conf = /etc/ceph/ceph.conf -backup_ceph_user = cinder-backup -backup_ceph_chunk_size = 134217728 -backup_ceph_pool = {{ ceph_cinder_backup_pool_name }} -backup_ceph_stripe_unit = 0 -backup_ceph_stripe_count = 0 -restore_discard_excess_bytes = true -{% elif cinder_backup_driver == "nfs"%} -backup_driver = cinder.backup.drivers.nfs -backup_mount_options = {{ cinder_backup_mount_options_nfs }} -backup_mount_point_base = /var/lib/cinder/backup -backup_share = {{ cinder_backup_share }} -backup_file_size = 327680000 -{% elif cinder_backup_driver == "swift"%} -backup_driver = cinder.backup.drivers.swift -backup_swift_url = http://{{ kolla_internal_vip_address }}:{{ swift_proxy_server_port }}/v1/AUTH_ -backup_swift_auth = per_user -backup_swift_auth_version = 1 -backup_swift_user = -backup_swift_key = -{% endif %} - -osapi_volume_listen = {{ api_interface_address }} -osapi_volume_listen_port = {{ cinder_api_port }} - -api_paste_config = /etc/cinder/api-paste.ini -nova_catalog_info = compute:nova:internalURL - -auth_strategy = keystone - -transport_url = rabbit://{{ rabbitmq_user }}:{{ rabbitmq_password }}@rabbitmq:{{ rabbitmq_port }} - -[oslo_messaging_notifications] -{% if enable_ceilometer | bool or enable_searchlight | bool %} -driver = messagingv2 -topics = notifications -{% else %} -driver = noop -{% endif %} - -[database] -connection = mysql+pymysql://{{ cinder_database_user }}:{{ cinder_database_password }}@{{ cinder_database_address }}/{{ cinder_database_name }} -max_retries = -1 - -[keystone_authtoken] -auth_uri = {{ keystone_internal_url }} -auth_url = {{ keystone_admin_url }} -auth_type = password -project_domain_id = default -user_domain_id = default -project_name = service -username = {{ cinder_keystone_user }} -password = {{ cinder_keystone_password }} - -memcache_security_strategy = ENCRYPT -memcache_secret_key = {{ memcache_secret_key }} -memcache_servers = {{ memcached_servers }}:{{ memcached_port }} - -[oslo_concurrency] -lock_path = /var/lib/cinder/tmp - - -{% if enable_cinder_backend_lvm | bool %} -[lvm-1] -volume_group = {{ cinder_volume_group }} -volume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver -volume_backend_name = lvm-1 -iscsi_helper = tgtadm -iscsi_protocol = iscsi -{% endif %} - -{% if enable_ceph | bool and cinder_backend_ceph | bool %} -[rbd-1] -volume_driver = cinder.volume.drivers.rbd.RBDDriver -rbd_pool = {{ ceph_cinder_pool_name }} -rbd_ceph_conf = /etc/ceph/ceph.conf -rbd_flatten_volume_from_snapshot = false -rbd_max_clone_depth = 5 -rbd_store_chunk_size = 4 -rados_connect_timeout = -1 -rbd_user = cinder -rbd_secret_uuid = {{ cinder_rbd_secret_uuid }} -report_discard_supported = True -{% endif %} - -{% if enable_cinder_backend_nfs | bool %} -[nfs-1] -volume_driver = cinder.volume.drivers.nfs.NfsDriver -volume_backend_name = nfs-1 -nfs_shares_config = /etc/cinder/nfs_shares -{% endif %} - -{% if enable_cinder_backend_hnas_iscsi | bool %} -[hnas-iscsi] -volume_driver = cinder.volume.drivers.hitachi.hnas_iscsi.HNASISCSIDriver -volume_backend_name = {{ hnas_iscsi_backend }} -hnas_username = {{ hnas_iscsi_username }} -hnas_password = {{ hnas_iscsi_password }} -hnas_mgmt_ip0 = {{ hnas_iscsi_mgmt_ip0 }} -hnas_chap_enabled = True - -hnas_svc0_volume_type = {{ hnas_iscsi_svc0_volume_type }} -hnas_svc0_hdp = {{ hnas_iscsi_svc0_hdp }} -hnas_svc0_iscsi_ip = {{ hnas_iscsi_svc0_ip }} -{% endif %} - -{% if enable_cinder_backend_hnas_nfs | bool %} -[hnas-nfs] -volume_driver = cinder.volume.drivers.hitachi.hnas_nfs.HNASNFSDriver -nfs_shares_config = /home/cinder/nfs_shares -volume_backend_name = {{ hnas_nfs_backend }} -hnas_username = {{ hnas_nfs_username }} -hnas_password = {{ hnas_nfs_password }} -hnas_mgmt_ip0 = {{ hnas_nfs_mgmt_ip0 }} - -hnas_svc0_volume_type = {{ hnas_nfs_svc0_volume_type }} -hnas_svc0_hdp = {{ hnas_nfs_svc0_hdp }} -{% endif %} - -[privsep_entrypoint] -helper_command=sudo cinder-rootwrap /etc/cinder/rootwrap.conf privsep-helper --config-file /etc/cinder/cinder.conf diff --git a/ansible/roles/common/defaults/main.yml b/ansible/roles/common/defaults/main.yml deleted file mode 100644 index 11db34b92..000000000 --- a/ansible/roles/common/defaults/main.yml +++ /dev/null @@ -1,23 +0,0 @@ ---- -# Due to the way we do our inventory, ansible does not pick up on the fact that -# this role has already run. We can track what has run with host facts. -common_run: False - -#################### -# Docker -#################### -kolla_toolbox_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-kolla-toolbox" -kolla_toolbox_tag: "{{ openstack_release }}" -kolla_toolbox_image_full: "{{ kolla_toolbox_image }}:{{ kolla_toolbox_tag }}" - -cron_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-cron" -cron_tag: "{{ openstack_release }}" -cron_image_full: "{{ cron_image }}:{{ cron_tag }}" - -fluentd_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-fluentd" -fluentd_tag: "{{ openstack_release }}" -fluentd_image_full: "{{ fluentd_image }}:{{ fluentd_tag }}" - -kubetoolbox_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-kubetoolbox" -kubetoolbox_tag: "{{ openstack_release }}" -kubetoolbox_image_full: "{{ kubetoolbox_image }}:{{ kubetoolbox_tag }}" diff --git a/ansible/roles/destroy/tasks/label_iterator.yml b/ansible/roles/destroy/tasks/label_iterator.yml deleted file mode 100644 index d69fccbc9..000000000 --- a/ansible/roles/destroy/tasks/label_iterator.yml +++ /dev/null @@ -1,5 +0,0 @@ ---- -- name: "Removing label on node {{ host['metadata']['name'] }}" - command: "kubectl label node {{ host['metadata']['name'] }} {{ item.key }}-" - when: item.key.startswith('kolla_') - with_dict: "{{ host['metadata']['labels'] }}" diff --git a/ansible/roles/destroy/tasks/main.yml b/ansible/roles/destroy/tasks/main.yml deleted file mode 100644 index cb47a8b38..000000000 --- a/ansible/roles/destroy/tasks/main.yml +++ /dev/null @@ -1,86 +0,0 @@ ---- -- name: Obtain node information - command: kubectl get nodes -o json - register: kubectl_nodes - -- name: Set node facts - set_fact: - kubectl_dict: "{{ kubectl_nodes.stdout|from_json }}" - -- name: Obtain list of Kolla PVs - command: "kubectl get pvc -n kolla -o jsonpath={.items[*].spec.volumeName}" - register: pv_list - failed_when: - - pv_list.rc != 0 - -- name: Obtain list of Kolla configmaps - command: "kubectl get configmaps -n kolla -o name" - register: configmaps_list - changed_when: - - configmaps_list | success - failed_when: - - configmaps_list.rc != 0 - -- name: Obtain list of Kolla secrets - command: "kubectl get secrets -n kolla -o name" - register: secrets_list - changed_when: - - secrets_list | success - failed_when: - - secrets_list.rc != 0 - -- name: Obtain list of Kolla Helm charts - command: "helm list --namespace kolla --all -q" - register: helm_list - changed_when: - - helm_list | success - failed_when: - - helm_list.rc != 0 - -- name: Delete existing Kolla Helm charts - command: "helm delete {{ item }} --purge" - when: helm_list.stdout | length != 0 - with_items: - - "{{ helm_list.stdout }}" - -- name: Delete existing Kolla secrets - command: "kubectl delete -n kolla {{ item }}" - when: secrets_list.stdout | length != 0 - with_items: - - "{{ secrets_list.stdout }}" - -- name: Delete existing Kolla configmaps - command: "kubectl delete -n kolla {{ item }}" - when: configmaps_list.stdout | length != 0 - with_items: - - "{{ configmaps_list.stdout }}" - -- name: "Delete existing Kolla labels" - include: "label_iterator.yml" - with_items: "{{ kubectl_dict['items'] }}" - loop_control: - loop_var: host - -- name: Obtain list of Kolla PVCs - shell: "kubectl get pvc -n kolla -o jsonpath={.items[*].metadata.name}" - register: pvc_list - failed_when: - - pvc_list.rc != 0 - -- name: Delete existing Kolla PVCs - command: "kubectl delete pvc -n kolla {{ item }}" - when: pvc_list.stdout | length != 0 - with_items: - - "{{ pvc_list.stdout }}" - -- name: Delete existing Kolla PVs - command: "kubectl delete pv {{ item }}" - when: pv_list.stdout | length != 0 - with_items: - - "{{ pv_list.stdout }}" - -- name: Delete Kolla namespace - command: "kubectl delete namespace kolla" - register: namespace_delete - failed_when: - - namespace_delete.rc != 0 diff --git a/ansible/roles/glance/defaults/main.yml b/ansible/roles/glance/defaults/main.yml deleted file mode 100644 index 0cc9764dc..000000000 --- a/ansible/roles/glance/defaults/main.yml +++ /dev/null @@ -1,62 +0,0 @@ ---- -project_name: "glance" - -glance_services: - glance-api: - container_name: glance_api - group: glance-api - enabled: true - image: "{{ glance_api_image_full }}" - glance-registry: - container_name: glance_registry - group: glance-registry - enabled: true - image: "{{ glance_registry_image_full }}" - -#################### -# Ceph -#################### -ceph_glance_pool_type: "{{ ceph_pool_type }}" -ceph_glance_cache_mode: "{{ ceph_cache_mode }}" - -# Due to Ansible issues on include, you cannot override these variables. Please -# override the variables they reference instead. -glance_pool_name: "{{ ceph_glance_pool_name }}" -glance_pool_type: "{{ ceph_glance_pool_type }}" -glance_cache_mode: "{{ ceph_glance_cache_mode }}" - - -#################### -# Database -#################### -glance_database_name: "glance" -glance_database_user: "glance" -glance_database_address: "{{ kolla_external_fqdn }}:{{ database_port }}" - - -#################### -# Docker -#################### -glance_registry_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-glance-registry" -glance_registry_tag: "{{ openstack_release }}" -glance_registry_image_full: "{{ glance_registry_image }}:{{ glance_registry_tag }}" - -glance_api_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-glance-api" -glance_api_tag: "{{ openstack_release }}" -glance_api_image_full: "{{ glance_api_image }}:{{ glance_api_tag }}" - - -#################### -# OpenStack -#################### -glance_admin_endpoint: "{{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ glance_api_port }}" -glance_internal_endpoint: "{{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ glance_api_port }}" -glance_public_endpoint: "{{ public_protocol }}://{{ kolla_external_fqdn }}:{{ glance_api_port }}" - -glance_logging_debug: "{{ openstack_logging_debug }}" - -glance_keystone_user: "glance" - -openstack_glance_auth: "{{ openstack_auth }}" - -glance_registry_host: "glance-registry" diff --git a/ansible/roles/glance/tasks/config.yml b/ansible/roles/glance/tasks/config.yml deleted file mode 100644 index 913c9cb37..000000000 --- a/ansible/roles/glance/tasks/config.yml +++ /dev/null @@ -1,49 +0,0 @@ ---- -- name: Ensuring config directories exist - file: - path: "{{ node_config_directory }}/{{ item.key }}" - state: "directory" - recurse: yes - when: - - item.value.enabled | bool - with_dict: "{{ glance_services }}" - -- name: Copying over config.json files for services - template: - src: "{{ item.key }}.json.j2" - dest: "{{ node_config_directory }}/{{ item.key }}/config.json" - register: glance_config_jsons - when: - - item.value.enabled | bool - with_dict: "{{ glance_services }}" - -- name: Copying over glance-*.conf - merge_configs: - vars: - service_name: "{{ item.key }}" - sources: - - "{{ role_path }}/templates/{{ item.key }}.conf.j2" - - "{{ node_custom_config }}/global.conf" - - "{{ node_custom_config }}/database.conf" - - "{{ node_custom_config }}/messaging.conf" - - "{{ node_custom_config }}/glance.conf" - - "{{ node_custom_config }}/glance/{{ item.key }}.conf" - - "{{ node_custom_config }}/glance/{{ inventory_hostname }}/{{ item.key }}.conf" - dest: "{{ node_config_directory }}/{{ item.key }}/{{ item.key }}.conf" - register: glance_confs - when: - - item.value.enabled | bool - with_dict: "{{ glance_services }}" - -- name: Check if policies shall be overwritten - local_action: stat path="{{ node_custom_config }}/glance/policy.json" - register: glance_policy - -- name: Copying over existing policy.json - template: - src: "{{ node_custom_config }}/glance/policy.json" - dest: "{{ node_config_directory }}/{{ item.key }}/policy.json" - register: glance_policy_jsons - when: - - glance_policy.stat.exists - with_dict: "{{ glance_services }}" diff --git a/ansible/roles/glance/tasks/main.yml b/ansible/roles/glance/tasks/main.yml deleted file mode 100644 index b5216113e..000000000 --- a/ansible/roles/glance/tasks/main.yml +++ /dev/null @@ -1,2 +0,0 @@ ---- -- include: "config.yml" diff --git a/ansible/roles/glance/templates/glance-api.conf.j2 b/ansible/roles/glance/templates/glance-api.conf.j2 deleted file mode 100644 index 39248c663..000000000 --- a/ansible/roles/glance/templates/glance-api.conf.j2 +++ /dev/null @@ -1,71 +0,0 @@ -[DEFAULT] -debug = {{ glance_logging_debug }} - -use_forwarded_for = true - -{% if std_logger %} -use_syslog = False -use_stderr = True -{% else %} -# NOTE(elemoine) log_dir alone does not work for Glance -log_file = /var/log/kolla/glance/api.log -{% endif %} - -bind_host = {{ api_interface_address }} -bind_port = {{ glance_api_port }} -workers = {{ openstack_service_workers }} - -registry_host = {{ glance_registry_host }} - -{% if enable_ceph | bool %} -show_image_direct_url= True -show_multiple_locations = True -{% endif %} - -cinder_catalog_info = volume:cinder:internalURL - -transport_url = rabbit://rabbitmq:{{ rabbitmq_port }} - -[database] -connection = mysql+pymysql://{{ glance_database_user }}:{{ glance_database_password }}@{{ glance_database_address }}/{{ glance_database_name }} -max_retries = -1 - -[keystone_authtoken] -auth_uri = {{ keystone_internal_url }} -auth_url = {{ keystone_admin_url }} -auth_type = password -project_domain_id = default -user_domain_id = default -project_name = service -username = {{ glance_keystone_user }} -password = {{ glance_keystone_password }} - -{# For Kolla-Ansible, generate the memcache servers based on the list of -memcached servers in the inventory and memcached_servers should be un-set. -For Kolla-Kubernetes, it is necessary to define the memcached_servers -variable in globals.yml to set it to the Kubernetes service for memcached. #} -memcache_security_strategy = ENCRYPT -memcache_secret_key = {{ memcache_secret_key }} -memcache_servers = {{ memcached_servers }} - -[paste_deploy] -flavor = keystone - -[glance_store] -{% if enable_ceph | bool and glance_backend_ceph | bool %} -default_store = rbd -stores = rbd,http -rbd_store_user = glance -rbd_store_pool = {{ ceph_glance_pool_name }} -rbd_store_chunk_size = 8 -{% else %} -default_store = file -filesystem_store_datadir = /var/lib/glance/images/ -{% endif %} - -[oslo_messaging_notifications] -{% if enable_ceilometer | bool or enable_searchlight | bool %} -driver = messagingv2 -{% else %} -driver = noop -{% endif %} diff --git a/ansible/roles/glance/templates/glance-api.json.j2 b/ansible/roles/glance/templates/glance-api.json.j2 deleted file mode 100644 index fd15198c0..000000000 --- a/ansible/roles/glance/templates/glance-api.json.j2 +++ /dev/null @@ -1,37 +0,0 @@ -{ - "command": "glance-api", - "config_files": [ - { - "source": "{{ container_config_directory }}/glance-api.conf", - "dest": "/etc/glance/glance-api.conf", - "owner": "glance", - "perm": "0600" - }, - { - "source": "{{ container_config_directory }}/policy.json", - "dest": "/etc/glance/policy.json", - "owner": "glance", - "perm": "0600", - "optional": true - }{% if glance_backend_ceph | bool %}, - { - "source": "{{ container_config_directory }}/ceph.*", - "dest": "/etc/ceph/", - "owner": "glance", - "perm": "0700" - } - {% endif %} - ], - "permissions": [ - { - "path": "/var/lib/glance", - "owner": "glance:glance", - "recurse": true - }, - { - "path": "/var/log/kolla/glance", - "owner": "glance:glance", - "recurse": true - } - ] -} diff --git a/ansible/roles/glance/templates/glance-registry.conf.j2 b/ansible/roles/glance/templates/glance-registry.conf.j2 deleted file mode 100644 index d40dc9223..000000000 --- a/ansible/roles/glance/templates/glance-registry.conf.j2 +++ /dev/null @@ -1,48 +0,0 @@ -[DEFAULT] -debug = {{ glance_logging_debug }} - -{% if std_logger %} -use_syslog = False -use_stderr = True -{% else %} -# NOTE(elemoine) log_dir alone does not work for Glance -log_file = /var/log/kolla/glance/registry.log -{% endif %} - -bind_host = {{ api_interface_address }} -bind_port = {{ glance_registry_port }} -workers = {{ openstack_service_workers }} - -transport_url = rabbit://{{ rabbitmq_user }}:{{ rabbitmq_password }}@rabbitmq - -[database] -connection = mysql+pymysql://{{ glance_database_user }}:{{ glance_database_password }}@{{ glance_database_address }}/{{ glance_database_name }} -max_retries = -1 - -[keystone_authtoken] -auth_uri = {{ keystone_internal_url }} -auth_url = {{ keystone_admin_url }} -auth_type = password -project_domain_id = default -user_domain_id = default -project_name = service -username = {{ glance_keystone_user }} -password = {{ glance_keystone_password }} - -{# For Kolla-Ansible, generate the memcache servers based on the list of -memcached servers in the inventory and memcached_servers should be un-set. -For Kolla-Kubernetes, it is necessary to define the memcached_servers -variable in globals.yml to set it to the Kubernetes service for memcached. #} -memcache_security_strategy = ENCRYPT -memcache_secret_key = {{ memcache_secret_key }} -memcache_servers = {{ memcached_servers }} - -[paste_deploy] -flavor = keystone - -[oslo_messaging_notifications] -{% if enable_ceilometer | bool or enable_searchlight | bool %} -driver = messagingv2 -{% else %} -driver = noop -{% endif %} diff --git a/ansible/roles/glance/templates/glance-registry.json.j2 b/ansible/roles/glance/templates/glance-registry.json.j2 deleted file mode 100644 index 46dd51736..000000000 --- a/ansible/roles/glance/templates/glance-registry.json.j2 +++ /dev/null @@ -1,25 +0,0 @@ -{ - "command": "glance-registry", - "config_files": [ - { - "source": "{{ container_config_directory }}/glance-registry.conf", - "dest": "/etc/glance/glance-registry.conf", - "owner": "glance", - "perm": "0600" - }, - { - "source": "{{ container_config_directory }}/policy.json", - "dest": "/etc/glance/policy.json", - "owner": "glance", - "perm": "0600", - "optional": true - } - ], - "permissions": [ - { - "path": "/var/log/kolla/glance", - "owner": "glance:glance", - "recurse": true - } - ] -} diff --git a/ansible/roles/heat/defaults/main.yml b/ansible/roles/heat/defaults/main.yml deleted file mode 100644 index 2a4f80e21..000000000 --- a/ansible/roles/heat/defaults/main.yml +++ /dev/null @@ -1,72 +0,0 @@ ---- -project_name: "heat" - -heat_services: - heat-api: - container_name: heat_api - group: heat-api - enabled: true - image: "{{ heat_api_image_full }}" - volumes: - - "{{ node_config_directory }}/heat-api/:{{ container_config_directory }}/:ro" - - "/etc/localtime:/etc/localtime:ro" - - "kolla_logs:/var/log/kolla/" - heat-api-cfn: - container_name: heat_api_cfn - group: heat-api-cfn - enabled: true - image: "{{ heat_api_cfn_image_full }}" - volumes: - - "{{ node_config_directory }}/heat-api-cfn/:{{ container_config_directory }}/:ro" - - "/etc/localtime:/etc/localtime:ro" - - "kolla_logs:/var/log/kolla/" - heat-engine: - container_name: heat_engine - group: heat-engine - enabled: true - image: "{{ heat_engine_image_full }}" - volumes: - - "{{ node_config_directory }}/heat-engine/:{{ container_config_directory }}/:ro" - - "/etc/localtime:/etc/localtime:ro" - - "kolla_logs:/var/log/kolla/" - -#################### -# Database -#################### -heat_database_name: "heat" -heat_database_user: "heat" -heat_database_address: "{{ kolla_internal_fqdn }}:{{ database_port }}" - - -#################### -# Docker -#################### -heat_api_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-heat-api" -heat_api_tag: "{{ openstack_release }}" -heat_api_image_full: "{{ heat_api_image }}:{{ heat_api_tag }}" - -heat_api_cfn_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-heat-api-cfn" -heat_api_cfn_tag: "{{ openstack_release }}" -heat_api_cfn_image_full: "{{ heat_api_cfn_image }}:{{ heat_api_cfn_tag }}" - -heat_engine_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-heat-engine" -heat_engine_tag: "{{ openstack_release }}" -heat_engine_image_full: "{{ heat_engine_image }}:{{ heat_engine_tag }}" - -#################### -# OpenStack -#################### -heat_admin_endpoint: "{{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ heat_api_port }}/v1/%(tenant_id)s" -heat_internal_endpoint: "{{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ heat_api_port }}/v1/%(tenant_id)s" -heat_public_endpoint: "{{ public_protocol }}://{{ kolla_external_fqdn }}:{{ heat_api_port }}/v1/%(tenant_id)s" -heat_cfn_admin_endpoint: "{{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ heat_api_cfn_port }}/v1" -heat_cfn_internal_endpoint: "{{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ heat_api_cfn_port }}/v1" -heat_cfn_public_endpoint: "{{ public_protocol }}://{{ kolla_external_fqdn }}:{{ heat_api_cfn_port }}/v1" - -heat_logging_debug: "{{ openstack_logging_debug }}" - -heat_keystone_user: "heat" -heat_stack_user_role: "heat_stack_user" -heat_stack_owner_role: "heat_stack_owner" - -openstack_heat_auth: "{{ openstack_auth }}" diff --git a/ansible/roles/heat/tasks/config.yml b/ansible/roles/heat/tasks/config.yml deleted file mode 100644 index adb48be74..000000000 --- a/ansible/roles/heat/tasks/config.yml +++ /dev/null @@ -1,55 +0,0 @@ ---- -- name: Ensuring config directories exist - file: - path: "{{ node_config_directory }}/{{ item.key }}" - state: "directory" - recurse: yes - when: - - item.value.enabled | bool - with_dict: "{{ heat_services }}" - -- name: Copying over config.json files for services - template: - src: "{{ item.key }}.json.j2" - dest: "{{ node_config_directory }}/{{ item.key }}/config.json" - when: - - item.value.enabled | bool - with_dict: "{{ heat_services }}" - -- name: Copying over the heat-engine environment file - template: - src: "_deprecated.yaml" - dest: "{{ node_config_directory }}/{{ item }}/_deprecated.yaml" - with_items: - - "heat-engine" - -- name: Copying over heat.conf - merge_configs: - vars: - service_name: "{{ item.key }}" - sources: - - "{{ role_path }}/templates/heat.conf.j2" - - "{{ node_custom_config }}/global.conf" - - "{{ node_custom_config }}/database.conf" - - "{{ node_custom_config }}/messaging.conf" - - "{{ node_custom_config }}/heat.conf" - - "{{ node_custom_config }}/heat/{{ item.key }}.conf" - - "{{ node_custom_config }}/heat/{{ inventory_hostname }}/heat.conf" - dest: "{{ node_config_directory }}/{{ item.key }}/heat.conf" - register: heat_confs - when: - - item.value.enabled | bool - with_dict: "{{ heat_services }}" - -- name: Check if policies shall be overwritten - local_action: stat path="{{ node_custom_config }}/heat/policy.json" - register: heat_policy - -- name: Copying over existing policy.json - template: - src: "{{ node_custom_config }}/heat/policy.json" - dest: "{{ node_config_directory }}/{{ item.key }}/policy.json" - register: heat_policy_jsons - when: - - heat_policy.stat.exists - with_dict: "{{ heat_services }}" diff --git a/ansible/roles/heat/tasks/main.yml b/ansible/roles/heat/tasks/main.yml deleted file mode 100644 index b5216113e..000000000 --- a/ansible/roles/heat/tasks/main.yml +++ /dev/null @@ -1,2 +0,0 @@ ---- -- include: "config.yml" diff --git a/ansible/roles/heat/templates/_deprecated.yaml b/ansible/roles/heat/templates/_deprecated.yaml deleted file mode 100644 index 76cc8e387..000000000 --- a/ansible/roles/heat/templates/_deprecated.yaml +++ /dev/null @@ -1,4 +0,0 @@ -resource_registry: - "OS::Heat::HARestarter": - "OS::Heat::SoftwareDeployments": - "OS::Heat::StructuredDeployments": diff --git a/ansible/roles/heat/templates/heat-api-cfn.json.j2 b/ansible/roles/heat/templates/heat-api-cfn.json.j2 deleted file mode 100644 index 40d7987c9..000000000 --- a/ansible/roles/heat/templates/heat-api-cfn.json.j2 +++ /dev/null @@ -1,25 +0,0 @@ -{ - "command": "heat-api-cfn", - "config_files": [ - { - "source": "{{ container_config_directory }}/heat.conf", - "dest": "/etc/heat/heat.conf", - "owner": "heat", - "perm": "0600" - }, - { - "source": "{{ container_config_directory }}/policy.json", - "dest": "/etc/heat/policy.json", - "owner": "heat", - "perm": "0600", - "optional": true - } - ], - "permissions": [ - { - "path": "/var/log/kolla/heat", - "owner": "heat:heat", - "recurse": true - } - ] -} diff --git a/ansible/roles/heat/templates/heat-api.json.j2 b/ansible/roles/heat/templates/heat-api.json.j2 deleted file mode 100644 index bc11a53e0..000000000 --- a/ansible/roles/heat/templates/heat-api.json.j2 +++ /dev/null @@ -1,25 +0,0 @@ -{ - "command": "heat-api", - "config_files": [ - { - "source": "{{ container_config_directory }}/heat.conf", - "dest": "/etc/heat/heat.conf", - "owner": "heat", - "perm": "0600" - }, - { - "source": "{{ container_config_directory }}/policy.json", - "dest": "/etc/heat/policy.json", - "owner": "heat", - "perm": "0600", - "optional": true - } - ], - "permissions": [ - { - "path": "/var/log/kolla/heat", - "owner": "heat:heat", - "recurse": true - } - ] -} diff --git a/ansible/roles/heat/templates/heat-engine.json.j2 b/ansible/roles/heat/templates/heat-engine.json.j2 deleted file mode 100644 index c9bda6aaf..000000000 --- a/ansible/roles/heat/templates/heat-engine.json.j2 +++ /dev/null @@ -1,31 +0,0 @@ -{ - "command": "heat-engine", - "config_files": [ - { - "source": "{{ container_config_directory }}/heat.conf", - "dest": "/etc/heat/heat.conf", - "owner": "heat", - "perm": "0600" - }, - { - "source": "{{ container_config_directory }}/_deprecated.yaml", - "dest": "/etc/heat/environment.d/_deprecated.yaml", - "owner": "heat", - "perm": "0600" - }, - { - "source": "{{ container_config_directory }}/policy.json", - "dest": "/etc/heat/policy.json", - "owner": "heat", - "perm": "0600", - "optional": true - } - ], - "permissions": [ - { - "path": "/var/log/kolla/heat", - "owner": "heat:heat", - "recurse": true - } - ] -} diff --git a/ansible/roles/heat/templates/heat.conf.j2 b/ansible/roles/heat/templates/heat.conf.j2 deleted file mode 100644 index b0d046437..000000000 --- a/ansible/roles/heat/templates/heat.conf.j2 +++ /dev/null @@ -1,83 +0,0 @@ -[DEFAULT] -debug = {{ heat_logging_debug }} - -{% if std_logger %} -use_syslog = False -use_stderr = True -{% else %} -log_dir = /var/log/kolla/heat -{% endif %} - -heat_watch_server_url = {{ public_protocol }}://{{ kolla_external_fqdn }}:{{ heat_api_cfn_port }} -heat_metadata_server_url = {{ public_protocol }}://{{ kolla_external_fqdn }}:{{ heat_api_cfn_port }} -heat_waitcondition_server_url = {{ public_protocol }}://{{ kolla_external_fqdn }}:{{ heat_api_cfn_port }}/v1/waitcondition - -heat_stack_user_role = {{ heat_stack_user_role }} - -stack_domain_admin = heat_domain_admin -stack_domain_admin_password = {{ heat_domain_admin_password }} -stack_user_domain_name = heat_user_domain - -deferred_auth_method = trusts -trusts_delegated_roles = heat_stack_owner -num_engine_workers = {{ openstack_service_workers }} - -transport_url = rabbit://{{ rabbitmq_user }}:{{ rabbitmq_password }}@rabbitmq:{{ rabbitmq_port }} - -{% if service_name == 'heat-api' %} -[heat_api] -bind_host = {{ api_interface_address }} -bind_port = {{ heat_api_port }} -workers = {{ openstack_service_workers }} -{% endif %} - -{% if service_name == 'heat-api-cfn' %} -[heat_api_cfn] -bind_host = {{ api_interface_address }} -bind_port = {{ heat_api_cfn_port }} -{% endif %} - -[database] -connection = mysql+pymysql://{{ heat_database_user }}:{{ heat_database_password }}@{{ heat_database_address }}/{{ heat_database_name }} -max_retries = -1 - -[keystone_authtoken] -auth_uri = {{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_public_port }} -auth_url = {{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_admin_port }} -auth_type = password -project_domain_id = default -user_domain_id = default -project_name = service -username = {{ heat_keystone_user }} -password = {{ heat_keystone_password }} - -memcache_security_strategy = ENCRYPT -memcache_secret_key = {{ memcache_secret_key }} -memcache_servers = {{ memcached_servers }}:{{ memcached_port }} - - -[cache] -backend = oslo_cache.memcache_pool -enabled = True -memcache_servers = {{ memcached_servers }}:{{ memcached_port }} - - -[trustee] -auth_uri = {{ keystone_internal_url }} -auth_url = {{ keystone_admin_url }} -auth_type = password -user_domain_id = default -username = {{ heat_keystone_user }} -password = {{ heat_keystone_password }} - -[ec2authtoken] -auth_uri = {{ keystone_internal_url }} - -[clients_keystone] -auth_uri = {{ keystone_internal_url }} - -[oslo_messaging_notifications] -driver = noop - -[clients] -endpoint_type = internalURL diff --git a/ansible/roles/horizon/defaults/main.yml b/ansible/roles/horizon/defaults/main.yml deleted file mode 100644 index ed12ef204..000000000 --- a/ansible/roles/horizon/defaults/main.yml +++ /dev/null @@ -1,53 +0,0 @@ ---- -project_name: "horizon" - -horizon_services: - horizon: - container_name: horizon - group: horizon - enabled: true - image: "{{ horizon_image_full }}" - environment: - ENABLE_CLOUDKITTY: "{{ 'yes' if enable_horizon_cloudkitty | bool else 'no' }}" - ENABLE_FREEZER: "{{ 'yes' if enable_horizon_freezer | bool else 'no' }}" - ENABLE_IRONIC: "{{ 'yes' if enable_horizon_ironic | bool else 'no' }}" - ENABLE_KARBOR: "{{ 'yes' if enable_horizon_karbor | bool else 'no' }}" - ENABLE_MAGNUM: "{{ 'yes' if enable_horizon_magnum | bool else 'no' }}" - ENABLE_MANILA: "{{ 'yes' if enable_horizon_manila | bool else 'no' }}" - ENABLE_MISTRAL: "{{ 'yes' if enable_horizon_mistral | bool else 'no' }}" - ENABLE_MURANO: "{{ 'yes' if enable_horizon_murano | bool else 'no' }}" - ENABLE_NEUTRON_LBAAS: "{{ 'yes' if enable_horizon_neutron_lbaas | bool else 'no' }}" - ENABLE_SAHARA: "{{ 'yes' if enable_horizon_sahara | bool else 'no' }}" - ENABLE_SEARCHLIGHT: "{{ 'yes' if enable_horizon_searchlight | bool else 'no' }}" - ENABLE_SENLIN: "{{ 'yes' if enable_horizon_senlin | bool else 'no' }}" - ENABLE_SOLUM: "{{ 'yes' if enable_horizon_solum | bool else 'no' }}" - ENABLE_TACKER: "{{ 'yes' if enable_horizon_tacker | bool else 'no' }}" - ENABLE_TROVE: "{{ 'yes' if enable_horizon_trove | bool else 'no' }}" - ENABLE_WATCHER: "{{ 'yes' if enable_horizon_watcher | bool else 'no' }}" - volumes: - - "{{ node_config_directory }}/horizon/:{{ container_config_directory }}/:ro" - - "/etc/localtime:/etc/localtime:ro" - - "kolla_logs:/var/log/kolla/" - - -#################### -# Database -#################### -horizon_database_name: "horizon" -horizon_database_user: "horizon" -horizon_database_address: "{{ kolla_internal_fqdn }}:{{ database_port }}" - -#################### -# Docker -#################### -horizon_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-horizon" -horizon_tag: "{{ openstack_release }}" -horizon_image_full: "{{ horizon_image }}:{{ horizon_tag }}" - - -#################### -# OpenStack -#################### -openstack_horizon_auth: "{'auth_url':'{{ openstack_auth.auth_url }}','username':'{{ openstack_auth.username }}','password':'{{ openstack_auth.password }}','project_name':'{{ openstack_auth.project_name }}'}" - -horizon_logging_debug: "{{ openstack_logging_debug }}" diff --git a/ansible/roles/horizon/tasks/config.yml b/ansible/roles/horizon/tasks/config.yml deleted file mode 100644 index f5e12ac22..000000000 --- a/ansible/roles/horizon/tasks/config.yml +++ /dev/null @@ -1,88 +0,0 @@ ---- -- name: Ensuring config directories exist - file: - path: "{{ node_config_directory }}/{{ item.key }}" - state: "directory" - recurse: yes - when: - - item.value.enabled | bool - with_dict: "{{ horizon_services }}" - -- name: Copying over config.json files for services - vars: - horizon: "{{ horizon_services['horizon'] }}" - template: - src: "horizon.json.j2" - dest: "{{ node_config_directory }}/horizon/config.json" - register: horizon_config_json - when: - - horizon.enabled | bool - -- name: Copying over horizon.conf - vars: - horizon: "{{ horizon_services['horizon'] }}" - template: - src: "{{ item }}" - dest: "{{ node_config_directory }}/horizon/horizon.conf" - register: horizon_conf - with_first_found: - - "{{ node_custom_config }}/horizon/{{ inventory_hostname }}/horizon.conf" - - "{{ node_custom_config }}/horizon/horizon.conf" - - "horizon.conf.j2" - when: - - horizon.enabled | bool - -- name: Copying over local_settings - vars: - horizon: "{{ horizon_services['horizon'] }}" - template: - src: "{{ item }}" - dest: "{{ node_config_directory }}/horizon/local_settings" - with_first_found: - - "{{ node_custom_config }}/horizon/{{ inventory_hostname }}/local_settings" - - "{{ node_custom_config }}/horizon/local_settings" - - "local_settings.j2" - register: horizon_local_settings - when: - - horizon.enabled | bool - -- name: Check if policies shall be overwritten - local_action: stat path="{{ node_custom_config }}/horizon/{{ item.name }}_policy.json" - register: custom_policy - when: item.enabled | bool - with_items: - - { name: "ceilometer", enabled: "{{ enable_ceilometer }}" } - - { name: "cinder", enabled: "{{ enable_cinder }}" } - - { name: "cloudkitty", enabled: "{{ enable_horizon_cloudkitty }}" } - - { name: "freezer", enabled: "{{ enable_horizon_freezer }}" } - - { name: "glance", enabled: "{{ enable_glance }}" } - - { name: "heat", enabled: "{{ enable_heat }}" } - - { name: "ironic", enabled: "{{ enable_horizon_ironic }}" } - - { name: "keystone", enabled: "{{ enable_keystone }}" } - - { name: "karbor", enabled: "{{ enable_horizon_karbor }}" } - - { name: "magnum", enabled: "{{ enable_horizon_magnum }}" } - - { name: "manila", enabled: "{{ enable_horizon_manila }}" } - - { name: "mistral", enabled: "{{ enable_horizon_mistral }}" } - - { name: "murano", enabled: "{{ enable_horizon_murano }}" } - - { name: "neutron", enabled: "{{ enable_neutron }}" } - - { name: "nova", enabled: "{{ enable_nova }}" } - - { name: "sahara", enabled: "{{ enable_horizon_sahara }}" } - - { name: "searchlight", enabled: "{{ enable_horizon_searchlight }}" } - - { name: "senlin", enabled: "{{ enable_horizon_senlin }}" } - - { name: "solum", enabled: "{{ enable_horizon_solum }}" } - - { name: "tacker", enabled: "{{ enable_horizon_tacker }}" } - - { name: "trove", enabled: "{{ enable_horizon_trove }}" } - - { name: "watcher", enabled: "{{ enable_horizon_watcher }}" } - -- name: Copying over existing policy.json - vars: - horizon: "{{ horizon_services['horizon'] }}" - template: - src: "{{ node_custom_config }}/horizon/{{ item[0]['name'] }}_policy.json" - dest: "{{ node_config_directory }}/horizon/{{ item[0]['name'] }}_policy.json" - register: policy_jsons - when: - - horizon.enabled | bool - - item.item.enabled | bool - - item.stat.exists - with_items: "{{ custom_policy.results }}" diff --git a/ansible/roles/horizon/tasks/main.yml b/ansible/roles/horizon/tasks/main.yml deleted file mode 100644 index b5216113e..000000000 --- a/ansible/roles/horizon/tasks/main.yml +++ /dev/null @@ -1,2 +0,0 @@ ---- -- include: "config.yml" diff --git a/ansible/roles/horizon/templates/horizon.conf.j2 b/ansible/roles/horizon/templates/horizon.conf.j2 deleted file mode 100644 index db44fa82b..000000000 --- a/ansible/roles/horizon/templates/horizon.conf.j2 +++ /dev/null @@ -1,76 +0,0 @@ -{% set python_path = '/usr/share/openstack-dashboard' if kolla_install_type == 'binary' else '/var/lib/kolla/venv/lib/python2.7/site-packages' %} -Listen {{ api_interface_address }}:{{ horizon_port }} - - - LogLevel warn -{% if std_logger %} - ErrorLog /proc/self/fd/2 - CustomLog /proc/self/fd/1 combined -{% else %} - ErrorLog /var/log/kolla/horizon/horizon.log - CustomLog /var/log/kolla/horizon/horizon-access.log combined -{% endif %} - - WSGIScriptReloading On - WSGIDaemonProcess horizon-http processes={{ openstack_service_workers }} threads=1 user=horizon group=horizon display-name=%{GROUP} python-path={{ python_path }} - WSGIProcessGroup horizon-http - WSGIScriptAlias / {{ python_path }}/openstack_dashboard/wsgi/django.wsgi - WSGIPassAuthorization On - - - Require all granted - - - Alias /static {{ python_path }}/static - - SetHandler None - - - -{% if kolla_enable_tls_external | bool %} -Header edit Location ^http://(.*)$ https://$1 -{% else %} -# NOTE(Jeffrey4l): Only enable deflate when tls is disabled until the -# OSSN-0037 is fixed. -# see https://wiki.openstack.org/wiki/OSSN/OSSN-0037 for more information. - - # Compress HTML, CSS, JavaScript, Text, XML and fonts - AddOutputFilterByType DEFLATE application/javascript - AddOutputFilterByType DEFLATE application/rss+xml - AddOutputFilterByType DEFLATE application/vnd.ms-fontobject - AddOutputFilterByType DEFLATE application/x-font - AddOutputFilterByType DEFLATE application/x-font-opentype - AddOutputFilterByType DEFLATE application/x-font-otf - AddOutputFilterByType DEFLATE application/x-font-truetype - AddOutputFilterByType DEFLATE application/x-font-ttf - AddOutputFilterByType DEFLATE application/x-javascript - AddOutputFilterByType DEFLATE application/xhtml+xml - AddOutputFilterByType DEFLATE application/xml - AddOutputFilterByType DEFLATE font/opentype - AddOutputFilterByType DEFLATE font/otf - AddOutputFilterByType DEFLATE font/ttf - AddOutputFilterByType DEFLATE image/svg+xml - AddOutputFilterByType DEFLATE image/x-icon - AddOutputFilterByType DEFLATE text/css - AddOutputFilterByType DEFLATE text/html - AddOutputFilterByType DEFLATE text/javascript - AddOutputFilterByType DEFLATE text/plain - AddOutputFilterByType DEFLATE text/xml - -{% endif %} - - - - ExpiresActive on - ExpiresDefault "access plus 1 month" - ExpiresByType application/javascript "access plus 1 year" - ExpiresByType text/css "access plus 1 year" - ExpiresByType image/x-ico "access plus 1 year" - ExpiresByType image/jpg "access plus 1 year" - ExpiresByType image/jpeg "access plus 1 year" - ExpiresByType image/gif "access plus 1 year" - ExpiresByType image/png "access plus 1 year" - Header merge Cache-Control public - Header unset ETag - - diff --git a/ansible/roles/horizon/templates/horizon.json.j2 b/ansible/roles/horizon/templates/horizon.json.j2 deleted file mode 100644 index 95e2fca2f..000000000 --- a/ansible/roles/horizon/templates/horizon.json.j2 +++ /dev/null @@ -1,54 +0,0 @@ -{% set apache_cmd = 'apache2' if kolla_base_distro in ['ubuntu', 'debian'] else 'httpd' %} -{% set apache_dir = 'apache2/conf-enabled' if kolla_base_distro in ['ubuntu', 'debian'] else 'httpd/conf.d' %} -{% set apache_file = '000-default.conf' if kolla_base_distro in ['ubuntu', 'debian'] else 'horizon.conf' %} -{% set services = [ - ( 'ceilometer', enable_ceilometer ), - ( 'cinder', enable_cinder ), - ( 'cloudkitty', enable_horizon_cloudkitty ), - ( 'freezer', enable_horizon_freezer ), - ( 'glance', enable_glance ), - ( 'heat', enable_heat ), - ( 'ironic', enable_horizon_ironic ), - ( 'keystone', enable_keystone ), - ( 'karbor', enable_horizon_karbor ), - ( 'magnum', enable_horizon_magnum ), - ( 'manila', enable_horizon_manila ), - ( 'mistral', enable_horizon_mistral ), - ( 'murano', enable_horizon_murano ), - ( 'neutron', enable_neutron ), - ( 'nova', enable_nova ), - ( 'sahara', enable_horizon_sahara ), - ( 'searchlight', enable_horizon_searchlight ), - ( 'senlin', enable_horizon_senlin ), - ( 'solum', enable_horizon_solum ), - ( 'tacker', enable_horizon_tacker ), - ( 'trove', enable_horizon_trove ), - ( 'watcher', enable_horizon_watcher ) -] %} - -{ - "command": "/usr/sbin/{{ apache_cmd }} -DFOREGROUND", - "config_files": [ - { - "source": "{{ container_config_directory }}/horizon.conf", - "dest": "/etc/{{ apache_dir }}/{{ apache_file }}", - "owner": "horizon", - "perm": "0644" - }, -{% for service, enabled in services if enabled | bool %} - { - "source": "{{ container_config_directory }}/horizon/{{ service }}_policy.json", - "dest": "/etc/openstack-dashboard/{{ service }}_policy.json", - "owner": "horizon", - "perm": "0600", - "optional": true - }, -{% endfor %} - { - "source": "{{ container_config_directory }}/local_settings", - "dest": "/etc/openstack-dashboard/local_settings", - "owner": "horizon", - "perm": "0644" - } - ] -} diff --git a/ansible/roles/horizon/templates/local_settings.j2 b/ansible/roles/horizon/templates/local_settings.j2 deleted file mode 100644 index 462470d5c..000000000 --- a/ansible/roles/horizon/templates/local_settings.j2 +++ /dev/null @@ -1,815 +0,0 @@ -# -*- coding: utf-8 -*- - -import os - -from django.utils.translation import ugettext_lazy as _ - -from openstack_dashboard import exceptions -from openstack_dashboard.settings import HORIZON_CONFIG - -DEBUG = {{ horizon_logging_debug }} -TEMPLATE_DEBUG = DEBUG - -COMPRESS_OFFLINE = True - -# WEBROOT is the location relative to Webserver root -# should end with a slash. -WEBROOT = '/' -#LOGIN_URL = WEBROOT + 'auth/login/' -#LOGOUT_URL = WEBROOT + 'auth/logout/' -# -# LOGIN_REDIRECT_URL can be used as an alternative for -# HORIZON_CONFIG.user_home, if user_home is not set. -# Do not set it to '/home/', as this will cause circular redirect loop -#LOGIN_REDIRECT_URL = WEBROOT - -# If horizon is running in production (DEBUG is False), set this -# with the list of host/domain names that the application can serve. -# For more information see: -# https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts -ALLOWED_HOSTS = ['*'] - -{% if horizon_backend_database | bool %} -SESSION_ENGINE = 'django.contrib.sessions.backends.db' -DATABASES = { - 'default': { - 'ENGINE': 'django.db.backends.mysql', - 'NAME': '{{ horizon_database_name }}', - 'USER': '{{ horizon_database_user }}', - 'PASSWORD': '{{ horizon_database_password }}', - 'HOST': '{{ database_address }}', - 'PORT': '{{ database_port }}' - } -} -{% endif %} - -# Set SSL proxy settings: -# Pass this header from the proxy after terminating the SSL, -# and don't forget to strip it from the client's request. -# For more information see: -# https://docs.djangoproject.com/en/dev/ref/settings/#secure-proxy-ssl-header -#SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https') - -# If Horizon is being served through SSL, then uncomment the following two -# settings to better secure the cookies from security exploits -#CSRF_COOKIE_SECURE = True -#SESSION_COOKIE_SECURE = True - -{% if kolla_enable_tls_external | bool %} -SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https') -CSRF_COOKIE_SECURE = True -SESSION_COOKIE_SECURE = True -{% endif %} - -# The absolute path to the directory where message files are collected. -# The message file must have a .json file extension. When the user logins to -# horizon, the message files collected are processed and displayed to the user. -#MESSAGES_PATH=None - -# Overrides for OpenStack API versions. Use this setting to force the -# OpenStack dashboard to use a specific API version for a given service API. -# Versions specified here should be integers or floats, not strings. -# NOTE: The version should be formatted as it appears in the URL for the -# service API. For example, The identity service APIs have inconsistent -# use of the decimal point, so valid options would be 2.0 or 3. -# Minimum compute version to get the instance locked status is 2.9. -#OPENSTACK_API_VERSIONS = { -# "data-processing": 1.1, -# "identity": 3, -# "volume": 2, -# "compute": 2, -#} - -OPENSTACK_API_VERSIONS = { - "identity": 3, - "volume": 2, -} - -# Set this to True if running on a multi-domain model. When this is enabled, it -# will require the user to enter the Domain name in addition to the username -# for login. -#OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = False - -# Overrides the default domain used when running on single-domain model -# with Keystone V3. All entities will be created in the default domain. -# NOTE: This value must be the ID of the default domain, NOT the name. -# Also, you will most likely have a value in the keystone policy file like this -# "cloud_admin": "rule:admin_required and domain_id:" -# This value must match the domain id specified there. -#OPENSTACK_KEYSTONE_DEFAULT_DOMAIN = 'default' - -# Set this to True to enable panels that provide the ability for users to -# manage Identity Providers (IdPs) and establish a set of rules to map -# federation protocol attributes to Identity API attributes. -# This extension requires v3.0+ of the Identity API. -#OPENSTACK_KEYSTONE_FEDERATION_MANAGEMENT = False - -# Set Console type: -# valid options are "AUTO"(default), "VNC", "SPICE", "RDP", "SERIAL" or None -# Set to None explicitly if you want to deactivate the console. -#CONSOLE_TYPE = "AUTO" - -# If provided, a "Report Bug" link will be displayed in the site header -# which links to the value of this setting (ideally a URL containing -# information on how to report issues). -#HORIZON_CONFIG["bug_url"] = "http://bug-report.example.com" - -# Show backdrop element outside the modal, do not close the modal -# after clicking on backdrop. -#HORIZON_CONFIG["modal_backdrop"] = "static" - -# Specify a regular expression to validate user passwords. -#HORIZON_CONFIG["password_validator"] = { -# "regex": '.*', -# "help_text": _("Your password does not meet the requirements."), -#} - -# Disable simplified floating IP address management for deployments with -# multiple floating IP pools or complex network requirements. -#HORIZON_CONFIG["simple_ip_management"] = False - -# Turn off browser autocompletion for forms including the login form and -# the database creation workflow if so desired. -#HORIZON_CONFIG["password_autocomplete"] = "off" - -# Setting this to True will disable the reveal button for password fields, -# including on the login form. -#HORIZON_CONFIG["disable_password_reveal"] = False - -LOCAL_PATH = '/tmp' - -# Set custom secret key: -# You can either set it to a specific value or you can let horizon generate a -# default secret key that is unique on this machine, e.i. regardless of the -# amount of Python WSGI workers (if used behind Apache+mod_wsgi): However, -# there may be situations where you would want to set this explicitly, e.g. -# when multiple dashboard instances are distributed on different machines -# (usually behind a load-balancer). Either you have to make sure that a session -# gets all requests routed to the same dashboard instance or you set the same -# SECRET_KEY for all of them. -SECRET_KEY='{{ horizon_secret_key }}' - -# We recommend you use memcached for development; otherwise after every reload -# of the django development server, you will have to login again. To use -# memcached set CACHES to something like -#CACHES = { -# 'default': { -# 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache', -# 'LOCATION': '127.0.0.1:11211', -# }, -#} - -{% if horizon_backend_database | bool == False %} -SESSION_ENGINE = 'django.contrib.sessions.backends.cache' -CACHES = { - 'default': { - 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache', - - 'LOCATION': '{{ memcached_servers }}' - } -} -{% endif %} - -# Send email to the console by default -EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend' -# Or send them to /dev/null -#EMAIL_BACKEND = 'django.core.mail.backends.dummy.EmailBackend' - -# Configure these for your outgoing email host -#EMAIL_HOST = 'smtp.my-company.com' -#EMAIL_PORT = 25 -#EMAIL_HOST_USER = 'djangomail' -#EMAIL_HOST_PASSWORD = 'top-secret!' - -# For multiple regions uncomment this configuration, and add (endpoint, title). -#AVAILABLE_REGIONS = [ -# ('http://cluster1.example.com:5000/v2.0', 'cluster1'), -# ('http://cluster2.example.com:5000/v2.0', 'cluster2'), -#] - -OPENSTACK_HOST = "{{ api_interface_address }}" - -OPENSTACK_KEYSTONE_URL = "{{ keystone_internal_url }}" -OPENSTACK_KEYSTONE_DEFAULT_ROLE = "{{ keystone_default_user_role }}" - -# Enables keystone web single-sign-on if set to True. -#WEBSSO_ENABLED = False - -# Determines which authentication choice to show as default. -#WEBSSO_INITIAL_CHOICE = "credentials" - -# The list of authentication mechanisms which include keystone -# federation protocols and identity provider/federation protocol -# mapping keys (WEBSSO_IDP_MAPPING). Current supported protocol -# IDs are 'saml2' and 'oidc' which represent SAML 2.0, OpenID -# Connect respectively. -# Do not remove the mandatory credentials mechanism. -# Note: The last two tuples are sample mapping keys to a identity provider -# and federation protocol combination (WEBSSO_IDP_MAPPING). -#WEBSSO_CHOICES = ( -# ("credentials", _("Keystone Credentials")), -# ("oidc", _("OpenID Connect")), -# ("saml2", _("Security Assertion Markup Language")), -# ("acme_oidc", "ACME - OpenID Connect"), -# ("acme_saml2", "ACME - SAML2"), -#) - -# A dictionary of specific identity provider and federation protocol -# combinations. From the selected authentication mechanism, the value -# will be looked up as keys in the dictionary. If a match is found, -# it will redirect the user to a identity provider and federation protocol -# specific WebSSO endpoint in keystone, otherwise it will use the value -# as the protocol_id when redirecting to the WebSSO by protocol endpoint. -# NOTE: The value is expected to be a tuple formatted as: (, ). -#WEBSSO_IDP_MAPPING = { -# "acme_oidc": ("acme", "oidc"), -# "acme_saml2": ("acme", "saml2"), -#} - -# Disable SSL certificate checks (useful for self-signed certificates): -#OPENSTACK_SSL_NO_VERIFY = True - -# The CA certificate to use to verify SSL connections -#OPENSTACK_SSL_CACERT = '/path/to/cacert.pem' - -# The OPENSTACK_KEYSTONE_BACKEND settings can be used to identify the -# capabilities of the auth backend for Keystone. -# If Keystone has been configured to use LDAP as the auth backend then set -# can_edit_user to False and name to 'ldap'. -# -# TODO(tres): Remove these once Keystone has an API to identify auth backend. -OPENSTACK_KEYSTONE_BACKEND = { - 'name': 'native', - 'can_edit_user': True, - 'can_edit_group': True, - 'can_edit_project': True, - 'can_edit_domain': True, - 'can_edit_role': True, -} - -# Setting this to True, will add a new "Retrieve Password" action on instance, -# allowing Admin session password retrieval/decryption. -#OPENSTACK_ENABLE_PASSWORD_RETRIEVE = False - -# The Launch Instance user experience has been significantly enhanced. -# You can choose whether to enable the new launch instance experience, -# the legacy experience, or both. The legacy experience will be removed -# in a future release, but is available as a temporary backup setting to ensure -# compatibility with existing deployments. Further development will not be -# done on the legacy experience. Please report any problems with the new -# experience via the Launchpad tracking system. -# -# Toggle LAUNCH_INSTANCE_LEGACY_ENABLED and LAUNCH_INSTANCE_NG_ENABLED to -# determine the experience to enable. Set them both to true to enable -# both. -#LAUNCH_INSTANCE_LEGACY_ENABLED = True -#LAUNCH_INSTANCE_NG_ENABLED = False - -# A dictionary of settings which can be used to provide the default values for -# properties found in the Launch Instance modal. -#LAUNCH_INSTANCE_DEFAULTS = { -# 'config_drive': False, -# 'enable_scheduler_hints': True -#} - -# The Xen Hypervisor has the ability to set the mount point for volumes -# attached to instances (other Hypervisors currently do not). Setting -# can_set_mount_point to True will add the option to set the mount point -# from the UI. -OPENSTACK_HYPERVISOR_FEATURES = { - 'can_set_mount_point': False, - 'can_set_password': False, - 'requires_keypair': False, - 'enable_quotas': True -} - -# The OPENSTACK_CINDER_FEATURES settings can be used to enable optional -# services provided by cinder that is not exposed by its extension API. -OPENSTACK_CINDER_FEATURES = { - 'enable_backup': True, -} - -# The OPENSTACK_NEUTRON_NETWORK settings can be used to enable optional -# services provided by neutron. Options currently available are load -# balancer service, security groups, quotas, VPN service. -OPENSTACK_NEUTRON_NETWORK = { - 'enable_router': True, - 'enable_quotas': True, - 'enable_ipv6': True, - 'enable_distributed_router': False, - 'enable_ha_router': False, - 'enable_lb': True, - 'enable_firewall': True, - 'enable_vpn': True, - 'enable_fip_topology_check': True, - - # Default dns servers you would like to use when a subnet is - # created. This is only a default, users can still choose a different - # list of dns servers when creating a new subnet. - # The entries below are examples only, and are not appropriate for - # real deployments - # 'default_dns_nameservers': ["8.8.8.8", "8.8.4.4", "208.67.222.222"], - - # The profile_support option is used to detect if an external router can be - # configured via the dashboard. When using specific plugins the - # profile_support can be turned on if needed. - 'profile_support': None, - #'profile_support': 'cisco', - - # Set which provider network types are supported. Only the network types - # in this list will be available to choose from when creating a network. - # Network types include local, flat, vlan, gre, vxlan and geneve. - # 'supported_provider_types': ['*'], - - # You can configure available segmentation ID range per network type - # in your deployment. - # 'segmentation_id_range': { - # 'vlan': [1024, 2048], - # 'vxlan': [4094, 65536], - # }, - - # You can define additional provider network types here. - # 'extra_provider_types': { - # 'awesome_type': { - # 'display_name': 'Awesome New Type', - # 'require_physical_network': False, - # 'require_segmentation_id': True, - # } - # }, - - # Set which VNIC types are supported for port binding. Only the VNIC - # types in this list will be available to choose from when creating a - # port. - # VNIC types include 'normal', 'macvtap' and 'direct'. - # Set to empty list or None to disable VNIC type selection. - 'supported_vnic_types': ['*'], -} - -# The OPENSTACK_HEAT_STACK settings can be used to disable password -# field required while launching the stack. -OPENSTACK_HEAT_STACK = { - 'enable_user_pass': True, -} - -# The OPENSTACK_IMAGE_BACKEND settings can be used to customize features -# in the OpenStack Dashboard related to the Image service, such as the list -# of supported image formats. -#OPENSTACK_IMAGE_BACKEND = { -# 'image_formats': [ -# ('', _('Select format')), -# ('aki', _('AKI - Amazon Kernel Image')), -# ('ami', _('AMI - Amazon Machine Image')), -# ('ari', _('ARI - Amazon Ramdisk Image')), -# ('docker', _('Docker')), -# ('iso', _('ISO - Optical Disk Image')), -# ('ova', _('OVA - Open Virtual Appliance')), -# ('qcow2', _('QCOW2 - QEMU Emulator')), -# ('raw', _('Raw')), -# ('vdi', _('VDI - Virtual Disk Image')), -# ('vhd', _('VHD - Virtual Hard Disk')), -# ('vmdk', _('VMDK - Virtual Machine Disk')), -# ], -#} - -# The IMAGE_CUSTOM_PROPERTY_TITLES settings is used to customize the titles for -# image custom property attributes that appear on image detail pages. -IMAGE_CUSTOM_PROPERTY_TITLES = { - "architecture": _("Architecture"), - "kernel_id": _("Kernel ID"), - "ramdisk_id": _("Ramdisk ID"), - "image_state": _("Euca2ools state"), - "project_id": _("Project ID"), - "image_type": _("Image Type"), -} - -# The IMAGE_RESERVED_CUSTOM_PROPERTIES setting is used to specify which image -# custom properties should not be displayed in the Image Custom Properties -# table. -IMAGE_RESERVED_CUSTOM_PROPERTIES = [] - -# Set to 'legacy' or 'direct' to allow users to upload images to glance via -# Horizon server. When enabled, a file form field will appear on the create -# image form. If set to 'off', there will be no file form field on the create -# image form. See documentation for deployment considerations. -#HORIZON_IMAGES_UPLOAD_MODE = 'legacy' - -# OPENSTACK_ENDPOINT_TYPE specifies the endpoint type to use for the endpoints -# in the Keystone service catalog. Use this setting when Horizon is running -# external to the OpenStack environment. The default is 'publicURL'. -OPENSTACK_ENDPOINT_TYPE = "internalURL" - -# SECONDARY_ENDPOINT_TYPE specifies the fallback endpoint type to use in the -# case that OPENSTACK_ENDPOINT_TYPE is not present in the endpoints -# in the Keystone service catalog. Use this setting when Horizon is running -# external to the OpenStack environment. The default is None. This -# value should differ from OPENSTACK_ENDPOINT_TYPE if used. -#SECONDARY_ENDPOINT_TYPE = None - -# The number of objects (Swift containers/objects or images) to display -# on a single page before providing a paging element (a "more" link) -# to paginate results. -API_RESULT_LIMIT = 1000 -API_RESULT_PAGE_SIZE = 20 - -# The size of chunk in bytes for downloading objects from Swift -SWIFT_FILE_TRANSFER_CHUNK_SIZE = 512 * 1024 - -# Specify a maximum number of items to display in a dropdown. -DROPDOWN_MAX_ITEMS = 30 - -# The timezone of the server. This should correspond with the timezone -# of your entire OpenStack installation, and hopefully be in UTC. -TIME_ZONE = "UTC" - -# When launching an instance, the menu of available flavors is -# sorted by RAM usage, ascending. If you would like a different sort order, -# you can provide another flavor attribute as sorting key. Alternatively, you -# can provide a custom callback method to use for sorting. You can also provide -# a flag for reverse sort. For more info, see -# http://docs.python.org/2/library/functions.html#sorted -#CREATE_INSTANCE_FLAVOR_SORT = { -# 'key': 'name', -# # or -# 'key': my_awesome_callback_method, -# 'reverse': False, -#} - -# Set this to True to display an 'Admin Password' field on the Change Password -# form to verify that it is indeed the admin logged-in who wants to change -# the password. -#ENFORCE_PASSWORD_CHECK = False - -# Modules that provide /auth routes that can be used to handle different types -# of user authentication. Add auth plugins that require extra route handling to -# this list. -#AUTHENTICATION_URLS = [ -# 'openstack_auth.urls', -#] - -# The Horizon Policy Enforcement engine uses these values to load per service -# policy rule files. The content of these files should match the files the -# OpenStack services are using to determine role based access control in the -# target installation. - -# Path to directory containing policy.json files -POLICY_FILES_PATH = '/etc/openstack-dashboard' - -# Map of local copy of service policy files. -# Please insure that your identity policy file matches the one being used on -# your keystone servers. There is an alternate policy file that may be used -# in the Keystone v3 multi-domain case, policy.v3cloudsample.json. -# This file is not included in the Horizon repository by default but can be -# found at -# http://git.openstack.org/cgit/openstack/keystone/tree/etc/ \ -# policy.v3cloudsample.json -# Having matching policy files on the Horizon and Keystone servers is essential -# for normal operation. This holds true for all services and their policy files. -#POLICY_FILES = { -# 'identity': 'keystone_policy.json', -# 'compute': 'nova_policy.json', -# 'volume': 'cinder_policy.json', -# 'image': 'glance_policy.json', -# 'orchestration': 'heat_policy.json', -# 'network': 'neutron_policy.json', -# 'telemetry': 'ceilometer_policy.json', -#} - -# TODO: (david-lyle) remove when plugins support adding settings. -# Note: Only used when trove-dashboard plugin is configured to be used by -# Horizon. -# Trove user and database extension support. By default support for -# creating users and databases on database instances is turned on. -# To disable these extensions set the permission here to something -# unusable such as ["!"]. -#TROVE_ADD_USER_PERMS = [] -#TROVE_ADD_DATABASE_PERMS = [] - -# Change this patch to the appropriate list of tuples containing -# a key, label and static directory containing two files: -# _variables.scss and _styles.scss -#AVAILABLE_THEMES = [ -# ('default', 'Default', 'themes/default'), -# ('material', 'Material', 'themes/material'), -#] - -LOGGING = { - 'version': 1, - # When set to True this will disable all logging except - # for loggers specified in this configuration dictionary. Note that - # if nothing is specified here and disable_existing_loggers is True, - # django.db.backends will still log unless it is disabled explicitly. - 'disable_existing_loggers': False, - 'formatters': { - 'operation': { - # The format of "%(message)s" is defined by - # OPERATION_LOG_OPTIONS['format'] - 'format': '%(asctime)s %(message)s' - }, - }, - 'handlers': { - 'null': { - 'level': 'DEBUG', - 'class': 'logging.NullHandler', - }, - 'console': { - # Set the level to "DEBUG" for verbose output logging. - 'level': 'INFO', - 'class': 'logging.StreamHandler', - }, - 'operation': { - 'level': 'INFO', - 'class': 'logging.StreamHandler', - 'formatter': 'operation', - }, - }, - 'loggers': { - # Logging from django.db.backends is VERY verbose, send to null - # by default. - 'django.db.backends': { - 'handlers': ['null'], - 'propagate': False, - }, - 'requests': { - 'handlers': ['null'], - 'propagate': False, - }, - 'horizon': { - 'handlers': ['console'], - 'level': 'DEBUG', - 'propagate': False, - }, - 'horizon.operation_log': { - 'handlers': ['operation'], - 'level': 'INFO', - 'propagate': False, - }, - 'openstack_dashboard': { - 'handlers': ['console'], - 'level': 'DEBUG', - 'propagate': False, - }, - 'novaclient': { - 'handlers': ['console'], - 'level': 'DEBUG', - 'propagate': False, - }, - 'cinderclient': { - 'handlers': ['console'], - 'level': 'DEBUG', - 'propagate': False, - }, - 'keystoneclient': { - 'handlers': ['console'], - 'level': 'DEBUG', - 'propagate': False, - }, - 'glanceclient': { - 'handlers': ['console'], - 'level': 'DEBUG', - 'propagate': False, - }, - 'neutronclient': { - 'handlers': ['console'], - 'level': 'DEBUG', - 'propagate': False, - }, - 'heatclient': { - 'handlers': ['console'], - 'level': 'DEBUG', - 'propagate': False, - }, - 'ceilometerclient': { - 'handlers': ['console'], - 'level': 'DEBUG', - 'propagate': False, - }, - 'swiftclient': { - 'handlers': ['console'], - 'level': 'DEBUG', - 'propagate': False, - }, - 'openstack_auth': { - 'handlers': ['console'], - 'level': 'DEBUG', - 'propagate': False, - }, - 'nose.plugins.manager': { - 'handlers': ['console'], - 'level': 'DEBUG', - 'propagate': False, - }, - 'django': { - 'handlers': ['console'], - 'level': 'DEBUG', - 'propagate': False, - }, - 'iso8601': { - 'handlers': ['null'], - 'propagate': False, - }, - 'scss': { - 'handlers': ['null'], - 'propagate': False, - }, - }, -} - -# 'direction' should not be specified for all_tcp/udp/icmp. -# It is specified in the form. -SECURITY_GROUP_RULES = { - 'all_tcp': { - 'name': _('All TCP'), - 'ip_protocol': 'tcp', - 'from_port': '1', - 'to_port': '65535', - }, - 'all_udp': { - 'name': _('All UDP'), - 'ip_protocol': 'udp', - 'from_port': '1', - 'to_port': '65535', - }, - 'all_icmp': { - 'name': _('All ICMP'), - 'ip_protocol': 'icmp', - 'from_port': '-1', - 'to_port': '-1', - }, - 'ssh': { - 'name': 'SSH', - 'ip_protocol': 'tcp', - 'from_port': '22', - 'to_port': '22', - }, - 'smtp': { - 'name': 'SMTP', - 'ip_protocol': 'tcp', - 'from_port': '25', - 'to_port': '25', - }, - 'dns': { - 'name': 'DNS', - 'ip_protocol': 'tcp', - 'from_port': '53', - 'to_port': '53', - }, - 'http': { - 'name': 'HTTP', - 'ip_protocol': 'tcp', - 'from_port': '80', - 'to_port': '80', - }, - 'pop3': { - 'name': 'POP3', - 'ip_protocol': 'tcp', - 'from_port': '110', - 'to_port': '110', - }, - 'imap': { - 'name': 'IMAP', - 'ip_protocol': 'tcp', - 'from_port': '143', - 'to_port': '143', - }, - 'ldap': { - 'name': 'LDAP', - 'ip_protocol': 'tcp', - 'from_port': '389', - 'to_port': '389', - }, - 'https': { - 'name': 'HTTPS', - 'ip_protocol': 'tcp', - 'from_port': '443', - 'to_port': '443', - }, - 'smtps': { - 'name': 'SMTPS', - 'ip_protocol': 'tcp', - 'from_port': '465', - 'to_port': '465', - }, - 'imaps': { - 'name': 'IMAPS', - 'ip_protocol': 'tcp', - 'from_port': '993', - 'to_port': '993', - }, - 'pop3s': { - 'name': 'POP3S', - 'ip_protocol': 'tcp', - 'from_port': '995', - 'to_port': '995', - }, - 'ms_sql': { - 'name': 'MS SQL', - 'ip_protocol': 'tcp', - 'from_port': '1433', - 'to_port': '1433', - }, - 'mysql': { - 'name': 'MYSQL', - 'ip_protocol': 'tcp', - 'from_port': '3306', - 'to_port': '3306', - }, - 'rdp': { - 'name': 'RDP', - 'ip_protocol': 'tcp', - 'from_port': '3389', - 'to_port': '3389', - }, -} - -# Deprecation Notice: -# -# The setting FLAVOR_EXTRA_KEYS has been deprecated. -# Please load extra spec metadata into the Glance Metadata Definition Catalog. -# -# The sample quota definitions can be found in: -# /etc/metadefs/compute-quota.json -# -# The metadata definition catalog supports CLI and API: -# $glance --os-image-api-version 2 help md-namespace-import -# $glance-manage db_load_metadefs -# -# See Metadata Definitions on: http://docs.openstack.org/developer/glance/ - -# TODO: (david-lyle) remove when plugins support settings natively -# Note: This is only used when the Sahara plugin is configured and enabled -# for use in Horizon. -# Indicate to the Sahara data processing service whether or not -# automatic floating IP allocation is in effect. If it is not -# in effect, the user will be prompted to choose a floating IP -# pool for use in their cluster. False by default. You would want -# to set this to True if you were running Nova Networking with -# auto_assign_floating_ip = True. -#SAHARA_AUTO_IP_ALLOCATION_ENABLED = False - -# The hash algorithm to use for authentication tokens. This must -# match the hash algorithm that the identity server and the -# auth_token middleware are using. Allowed values are the -# algorithms supported by Python's hashlib library. -#OPENSTACK_TOKEN_HASH_ALGORITHM = 'md5' - -# AngularJS requires some settings to be made available to -# the client side. Some settings are required by in-tree / built-in horizon -# features. These settings must be added to REST_API_REQUIRED_SETTINGS in the -# form of ['SETTING_1','SETTING_2'], etc. -# -# You may remove settings from this list for security purposes, but do so at -# the risk of breaking a built-in horizon feature. These settings are required -# for horizon to function properly. Only remove them if you know what you -# are doing. These settings may in the future be moved to be defined within -# the enabled panel configuration. -# You should not add settings to this list for out of tree extensions. -# See: https://wiki.openstack.org/wiki/Horizon/RESTAPI -REST_API_REQUIRED_SETTINGS = ['OPENSTACK_HYPERVISOR_FEATURES', - 'LAUNCH_INSTANCE_DEFAULTS', - 'OPENSTACK_IMAGE_FORMATS'] - -# Additional settings can be made available to the client side for -# extensibility by specifying them in REST_API_ADDITIONAL_SETTINGS -# !! Please use extreme caution as the settings are transferred via HTTP/S -# and are not encrypted on the browser. This is an experimental API and -# may be deprecated in the future without notice. -#REST_API_ADDITIONAL_SETTINGS = [] - -# DISALLOW_IFRAME_EMBED can be used to prevent Horizon from being embedded -# within an iframe. Legacy browsers are still vulnerable to a Cross-Frame -# Scripting (XFS) vulnerability, so this option allows extra security hardening -# where iframes are not used in deployment. Default setting is True. -# For more information see: -# http://tinyurl.com/anticlickjack -#DISALLOW_IFRAME_EMBED = True - -# Help URL can be made available for the client. To provide a help URL, edit the -# following attribute to the URL of your choice. -#HORIZON_CONFIG["help_url"] = "http://openstack.mycompany.org" - -# Settings for OperationLogMiddleware -# OPERATION_LOG_ENABLED is flag to use the function to log an operation on -# Horizon. -# mask_targets is arrangement for appointing a target to mask. -# method_targets is arrangement of HTTP method to output log. -# format is the log contents. -#OPERATION_LOG_ENABLED = False -#OPERATION_LOG_OPTIONS = { -# 'mask_fields': ['password'], -# 'target_methods': ['POST'], -# 'format': ("[%(domain_name)s] [%(domain_id)s] [%(project_name)s]" -# " [%(project_id)s] [%(user_name)s] [%(user_id)s] [%(request_scheme)s]" -# " [%(referer_url)s] [%(request_url)s] [%(message)s] [%(method)s]" -# " [%(http_status)s] [%(param)s]"), -#} - -# The default date range in the Overview panel meters - either minus N -# days (if the value is integer N), or from the beginning of the current month -# until today (if set to None). This setting should be used to limit the amount -# of data fetched by default when rendering the Overview panel. -#OVERVIEW_DAYS_RANGE = 1 - -# To allow operators to require admin users provide a search criteria first -# before loading any data into the admin views, set the following attribute to -# True -#ADMIN_FILTER_DATA_FIRST=False diff --git a/ansible/roles/ironic/defaults/main.yml b/ansible/roles/ironic/defaults/main.yml deleted file mode 100644 index 6fe4db94b..000000000 --- a/ansible/roles/ironic/defaults/main.yml +++ /dev/null @@ -1,66 +0,0 @@ ---- -project_name: "ironic" - -#################### -# Database -#################### -ironic_database_name: "ironic" -ironic_database_user: "ironic" -ironic_database_address: "{{ kolla_internal_fqdn }}:{{ database_port }}" - -ironic_inspector_database_name: "ironic_inspector" -ironic_inspector_database_user: "ironic_inspector" -ironic_inspector_database_address: "{{ kolla_internal_fqdn }}:{{ database_port }}" - - -#################### -# Docker -#################### -ironic_api_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-ironic-api" -ironic_api_tag: "{{ openstack_release }}" -ironic_api_image_full: "{{ ironic_api_image }}:{{ ironic_api_tag }}" - -ironic_conductor_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-ironic-conductor" -ironic_conductor_tag: "{{ openstack_release }}" -ironic_conductor_image_full: "{{ ironic_conductor_image }}:{{ ironic_conductor_tag }}" - -ironic_pxe_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-ironic-pxe" -ironic_pxe_tag: "{{ openstack_release }}" -ironic_pxe_image_full: "{{ ironic_pxe_image }}:{{ ironic_pxe_tag }}" - -ironic_inspector_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-ironic-inspector" -ironic_inspector_tag: "{{ openstack_release }}" -ironic_inspector_image_full: "{{ ironic_inspector_image }}:{{ ironic_inspector_tag }}" - -ironic_dnsmasq_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-dnsmasq" -ironic_dnsmasq_tag: "{{ openstack_release }}" -ironic_dnsmasq_image_full: "{{ ironic_dnsmasq_image }}:{{ ironic_dnsmasq_tag }}" - - -#################### -# OpenStack -#################### -ironic_inspector_keystone_user: "ironic-inspector" - -ironic_admin_endpoint: "{{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ ironic_api_port }}" -ironic_internal_endpoint: "{{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ ironic_api_port }}" -ironic_public_endpoint: "{{ public_protocol }}://{{ kolla_external_fqdn }}:{{ ironic_api_port }}" - -ironic_inspector_admin_endpoint: "{{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ ironic_inspector_port }}" -ironic_inspector_internal_endpoint: "{{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ ironic_inspector_port }}" -ironic_inspector_public_endpoint: "{{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ ironic_inspector_port }}" - -ironic_logging_debug: "{{ openstack_logging_debug }}" - -openstack_ironic_auth: "{{ openstack_auth }}" - -openstack_ironic_inspector_auth: "{{ openstack_auth }}" - - -######### -# Ironic -######### - -ironic_dnsmasq_interface: "{{ api_interface }}" -ironic_dnsmasq_dhcp_range: -ironic_cleaning_network: diff --git a/ansible/roles/ironic/tasks/config.yml b/ansible/roles/ironic/tasks/config.yml deleted file mode 100644 index 4db18aaa7..000000000 --- a/ansible/roles/ironic/tasks/config.yml +++ /dev/null @@ -1,90 +0,0 @@ ---- -- name: Ensuring config directories exist - file: - path: "{{ node_config_directory }}/{{ item }}" - state: "directory" - recurse: yes - with_items: - - "ironic-api" - - "ironic-conductor" - - "ironic-conductor-tftp" - - "ironic-inspector" - - "ironic-inspector-tftp" - - "ironic-dnsmasq" - -- name: Copying over config.json files for services - template: - src: "{{ item }}.json.j2" - dest: "{{ node_config_directory }}/{{ item }}/config.json" - with_items: - - "ironic-api" - - "ironic-conductor" - - "ironic-conductor-tftp" - - "ironic-inspector" - - "ironic-inspector-tftp" - - "ironic-dnsmasq" - -- name: Copying over ironic.conf - merge_configs: - vars: - service_name: "{{ item }}" - sources: - - "{{ role_path }}/templates/ironic.conf.j2" - - "{{ node_custom_config }}/global.conf" - - "{{ node_custom_config }}/database.conf" - - "{{ node_custom_config }}/messaging.conf" - - "{{ node_custom_config }}/ironic.conf" - - "{{ node_custom_config }}/ironic/{{ item }}.conf" - - "{{ node_custom_config }}/ironic/{{ inventory_hostname }}/ironic.conf" - dest: "{{ node_config_directory }}/{{ item }}/ironic.conf" - with_items: - - "ironic-api" - - "ironic-conductor" - -- name: Copying over inspector.conf - merge_configs: - vars: - service_name: "ironic-inspector" - sources: - - "{{ role_path }}/templates/ironic-inspector.conf.j2" - - "{{ node_custom_config }}/global.conf" - - "{{ node_custom_config }}/database.conf" - - "{{ node_custom_config }}/messaging.conf" - - "{{ node_custom_config }}/ironic-inspector.conf" - - "{{ node_custom_config }}/ironic-inspector/inspector.conf" - - "{{ node_custom_config }}/ironic-inspector/{{ inventory_hostname }}/inspector.conf" - dest: "{{ node_config_directory }}/ironic-inspector/inspector.conf" - -- name: Copying over dnsmasq.conf - template: - src: "{{ item }}" - dest: "{{ node_config_directory }}/ironic-dnsmasq/dnsmasq.conf" - with_first_found: - - "{{ node_custom_config }}/ironic/ironic-dnsmasq.conf" - - "{{ node_custom_config }}/ironic/{{ inventory_hostname }}/ironic-dnsmasq.conf" - - "ironic-dnsmasq.conf.j2" - -- name: Copying pxelinux.cfg default - template: - src: "{{ item }}" - dest: "{{ node_config_directory }}/ironic-inspector-tftp/default" - with_first_found: - - "{{ node_custom_config }}/ironic/pxelinux.default" - - "{{ node_custom_config }}/ironic/{{ inventory_hostname }}/pxelinux.default" - - "pxelinux.default.j2" - -- name: Check if policies shall be overwritten - local_action: stat path="{{ node_custom_config }}/ironic/policy.json" - register: ironic_policy - -- name: Copying over existing policy.json - template: - src: "{{ node_custom_config }}/ironic/policy.json" - dest: "{{ node_config_directory }}/{{ item }}/policy.json" - with_items: - - "ironic-api" - - "ironic-conductor" - - "ironic-inspector" - - "ironic-inspector-tftp" - when: - ironic_policy.stat.exists diff --git a/ansible/roles/ironic/tasks/main.yml b/ansible/roles/ironic/tasks/main.yml deleted file mode 100644 index b5216113e..000000000 --- a/ansible/roles/ironic/tasks/main.yml +++ /dev/null @@ -1,2 +0,0 @@ ---- -- include: "config.yml" diff --git a/ansible/roles/ironic/templates/ironic-api.json.j2 b/ansible/roles/ironic/templates/ironic-api.json.j2 deleted file mode 100644 index ff0917118..000000000 --- a/ansible/roles/ironic/templates/ironic-api.json.j2 +++ /dev/null @@ -1,25 +0,0 @@ -{ - "command": "ironic-api", - "config_files": [ - { - "source": "{{ container_config_directory }}/ironic.conf", - "dest": "/etc/ironic/ironic.conf", - "owner": "ironic", - "perm": "0600" - }, - { - "source": "{{ container_config_directory }}/policy.json", - "dest": "/etc/ironic/policy.json", - "owner": "ironic", - "perm": "0600", - "optional": true - } - ], - "permissions": [ - { - "path": "/var/log/kolla/ironic", - "owner": "ironic:ironic", - "recurse": true - } - ] -} diff --git a/ansible/roles/ironic/templates/ironic-conductor-tftp.json.j2 b/ansible/roles/ironic/templates/ironic-conductor-tftp.json.j2 deleted file mode 100644 index 86fc3d043..000000000 --- a/ansible/roles/ironic/templates/ironic-conductor-tftp.json.j2 +++ /dev/null @@ -1,11 +0,0 @@ -{ - "command": "/usr/sbin/in.tftpd --verbose --foreground --user root --address 0.0.0.0:69 --map-file /map-file /tftpboot", - "config_files": [], - "permissions": [ - { - "path": "/tftpboot/pxelinux.cfg", - "owner": "ironic:ironic", - "recurse": true - } - ] -} diff --git a/ansible/roles/ironic/templates/ironic-conductor.json.j2 b/ansible/roles/ironic/templates/ironic-conductor.json.j2 deleted file mode 100644 index 969b1f749..000000000 --- a/ansible/roles/ironic/templates/ironic-conductor.json.j2 +++ /dev/null @@ -1,35 +0,0 @@ -{ - "command": "ironic-conductor", - "config_files": [ - { - "source": "{{ container_config_directory }}/ironic.conf", - "dest": "/etc/ironic/ironic.conf", - "owner": "ironic", - "perm": "0600" - }, - { - "source": "{{ container_config_directory }}/policy.json", - "dest": "/etc/ironic/policy.json", - "owner": "ironic", - "perm": "0600", - "optional": true - } - ], - "permissions": [ - { - "path": "/var/log/kolla/ironic", - "owner": "ironic:ironic", - "recurse": true - }, - { - "path": "/var/lib/ironic", - "owner": "ironic:ironic", - "recurse": true - }, - { - "path": "/tftpboot", - "owner": "ironic:ironic", - "recurse": true - } - ] -} diff --git a/ansible/roles/ironic/templates/ironic-dnsmasq.conf.j2 b/ansible/roles/ironic/templates/ironic-dnsmasq.conf.j2 deleted file mode 100644 index 5d339e59b..000000000 --- a/ansible/roles/ironic/templates/ironic-dnsmasq.conf.j2 +++ /dev/null @@ -1,9 +0,0 @@ -port=0 -interface={{ api_interface }} -dhcp-range={{ ironic_dnsmasq_dhcp_range }} -dhcp-option=option:tftp-server,{{ kolla_internal_vip_address }} -dhcp-option=option:server-ip-address,{{ kolla_internal_vip_address }} -bind-interfaces -dhcp-sequential-ip -dhcp-option=option:bootfile-name,pxelinux.0 -dhcp-option=210,/tftpboot/ diff --git a/ansible/roles/ironic/templates/ironic-dnsmasq.json.j2 b/ansible/roles/ironic/templates/ironic-dnsmasq.json.j2 deleted file mode 100644 index baab50528..000000000 --- a/ansible/roles/ironic/templates/ironic-dnsmasq.json.j2 +++ /dev/null @@ -1,11 +0,0 @@ -{ - "command": "dnsmasq --no-daemon --conf-file=/etc/dnsmasq.conf", - "config_files": [ - { - "source": "{{ container_config_directory }}/dnsmasq.conf", - "dest": "/etc/dnsmasq.conf", - "owner": "root", - "perm": "0600" - } - ] -} diff --git a/ansible/roles/ironic/templates/ironic-inspector-tftp.json.j2 b/ansible/roles/ironic/templates/ironic-inspector-tftp.json.j2 deleted file mode 100644 index 3d253d283..000000000 --- a/ansible/roles/ironic/templates/ironic-inspector-tftp.json.j2 +++ /dev/null @@ -1,18 +0,0 @@ -{ - "command": "/usr/sbin/in.tftpd --verbose --foreground --user root --address 0.0.0.0:69 --map-file /map-file /tftpboot", - "config_files": [ - { - "source": "{{ container_config_directory }}/default", - "dest": "/tftpboot/pxelinux.cfg/default", - "owner": "root", - "perm": "0644" - } - ], - "permissions": [ - { - "path": "/tftpboot/pxelinux.cfg", - "owner": "ironic:ironic", - "recurse": true - } - ] -} diff --git a/ansible/roles/ironic/templates/ironic-inspector.conf.j2 b/ansible/roles/ironic/templates/ironic-inspector.conf.j2 deleted file mode 100644 index 00e1c57d3..000000000 --- a/ansible/roles/ironic/templates/ironic-inspector.conf.j2 +++ /dev/null @@ -1,41 +0,0 @@ -[DEFAULT] -debug = {{ ironic_logging_debug }} - -{% if std_logger %} -use_syslog = False -use_stderr = True -{% else %} -log_dir = /var/log/kolla/ironic -{% endif %} - -listen_address = 0.0.0.0 -listen_port = {{ ironic_inspector_port }} - -[ironic] -auth_url = {{ keystone_admin_url }} -auth_type = password -project_domain_id = default -user_domain_id = default -project_name = service -username = {{ ironic_inspector_keystone_user }} -password = {{ ironic_inspector_keystone_password }} - -[keystone_authtoken] -auth_uri = {{ keystone_internal_url }} -auth_url = {{ keystone_admin_url }} -auth_type = password -project_domain_id = default -user_domain_id = default -project_name = service -username = {{ ironic_inspector_keystone_user }} -password = {{ ironic_inspector_keystone_password }} - -memcache_security_strategy = ENCRYPT -memcache_secret_key = {{ memcache_secret_key }} -memcache_servers = {{ memcached_servers }} - -[firewall] -dnsmasq_interface = undefined - -[database] -connection = mysql+pymysql://{{ ironic_inspector_database_user }}:{{ ironic_inspector_database_password }}@{{ ironic_inspector_database_address }}/{{ ironic_inspector_database_name }} diff --git a/ansible/roles/ironic/templates/ironic-inspector.json.j2 b/ansible/roles/ironic/templates/ironic-inspector.json.j2 deleted file mode 100644 index 2ca381fdc..000000000 --- a/ansible/roles/ironic/templates/ironic-inspector.json.j2 +++ /dev/null @@ -1,25 +0,0 @@ -{ - "command": "ironic-inspector --config-file /etc/ironic-inspector/inspector.conf", - "config_files": [ - { - "source": "{{ container_config_directory }}/inspector.conf", - "dest": "/etc/ironic-inspector/inspector.conf", - "owner": "ironic", - "perm": "0600" - }, - { - "source": "{{ container_config_directory }}/policy.json", - "dest": "/etc/ironic/policy.json", - "owner": "ironic", - "perm": "0600", - "optional": true - } - ], - "permissions": [ - { - "path": "/var/log/kolla/ironic", - "owner": "ironic:ironic", - "recurse": true - } - ] -} diff --git a/ansible/roles/ironic/templates/ironic.conf.j2 b/ansible/roles/ironic/templates/ironic.conf.j2 deleted file mode 100644 index 01f066069..000000000 --- a/ansible/roles/ironic/templates/ironic.conf.j2 +++ /dev/null @@ -1,63 +0,0 @@ -[DEFAULT] -debug = {{ ironic_logging_debug }} - -{% if std_logger %} -use_syslog = False -use_stderr = True -{% else %} -log_dir = /var/log/kolla/ironic -{% endif %} - -admin_user = {{ openstack_auth.username }} -admin_password = {{ keystone_admin_password }} - -enabled_drivers = pxe_ipmitool - -transport_url = rabbit://{{ rabbitmq_user }}:{{ rabbitmq_password }}@rabbitmq:{{ rabbitmq_port }} - -{% if service_name == 'ironic-api' %} -[api] - -host_ip = 0.0.0.0 -api_workers = {{ openstack_service_workers }} -{% endif %} - -{% if service_name == 'ironic-conductor' %} -[conductor] -api_url = {{ internal_protocol }}://ironic-api:{{ ironic_api_port }} -automated_clean=false -{% endif %} - -[database] -connection = mysql+pymysql://{{ ironic_database_user }}:{{ ironic_database_password }}@{{ ironic_database_address }}/{{ ironic_database_name }} -max_retries = -1 - -[keystone_authtoken] -auth_uri = {{ keystone_internal_url }} -auth_url = {{ keystone_admin_url }} -auth_type = password -project_domain_id = default -user_domain_id = default -project_name = service -username = {{ ironic_keystone_user }} -password = {{ ironic_keystone_password }} - -memcache_security_strategy = ENCRYPT -memcache_secret_key = {{ memcache_secret_key }} -memcache_servers = {{ memcached_servers }} - - -[glance] -glance_host = {{ internal_protocol }}://glance-api:{{ glance_api_port }} - -[neutron] -url = {{ internal_protocol }}://neutron-server:{{ neutron_server_port }} -cleaning_network = {{ ironic_cleaning_network }} - -[inspector] -enabled = true - -[agent] -deploy_logs_local_path = /var/log/kolla/ironic -deploy_logs_storage_backend = local -deploy_logs_collect = always diff --git a/ansible/roles/ironic/templates/pxelinux.default.j2 b/ansible/roles/ironic/templates/pxelinux.default.j2 deleted file mode 100644 index 6df316b80..000000000 --- a/ansible/roles/ironic/templates/pxelinux.default.j2 +++ /dev/null @@ -1,7 +0,0 @@ -default introspect - -label introspect -kernel ironic-agent.kernel -append initrd=ironic-agent.initramfs ipa-inspection-callback-url=http://ironic-inspector:{{ ironic_inspector_port }}/v1/continue systemd.journald.forward_to_console=yes - -ipappend 3 diff --git a/ansible/roles/iscsi/defaults/main.yml b/ansible/roles/iscsi/defaults/main.yml deleted file mode 100644 index 9af4cbbd3..000000000 --- a/ansible/roles/iscsi/defaults/main.yml +++ /dev/null @@ -1,13 +0,0 @@ ---- -project_name: "iscsi" - -#################### -# Docker -#################### -iscsid_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-iscsid" -iscsid_tag: "{{ openstack_release }}" -iscsid_image_full: "{{ iscsid_image }}:{{ iscsid_tag }}" - -tgtd_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-tgtd" -tgtd_tag: "{{ openstack_release }}" -tgtd_image_full: "{{ tgtd_image }}:{{ tgtd_tag }}" diff --git a/ansible/roles/iscsi/tasks/config.yml b/ansible/roles/iscsi/tasks/config.yml deleted file mode 100644 index 44e2c67e1..000000000 --- a/ansible/roles/iscsi/tasks/config.yml +++ /dev/null @@ -1,34 +0,0 @@ ---- -- name: Ensuring config directories exist - file: - path: "{{ node_config_directory }}/{{ item }}" - state: "directory" - recurse: yes - with_items: - - "iscsid" - -- name: Copying over config.json files for services - template: - src: "{{ item }}.json.j2" - dest: "{{ node_config_directory }}/{{ item }}/config.json" - with_items: - - "iscsid" - -- name: Ensuring config directories exist - file: - path: "{{ node_config_directory }}/{{ item }}" - state: "directory" - recurse: yes - when: - - enable_cinder_backend_lvm | bool - with_items: - - "tgtd" - -- name: Copying over config.json files for services - template: - src: "{{ item }}.json.j2" - dest: "{{ node_config_directory }}/{{ item }}/config.json" - when: - - enable_cinder_backend_lvm | bool - with_items: - - "tgtd" diff --git a/ansible/roles/iscsi/tasks/main.yml b/ansible/roles/iscsi/tasks/main.yml deleted file mode 100644 index b5216113e..000000000 --- a/ansible/roles/iscsi/tasks/main.yml +++ /dev/null @@ -1,2 +0,0 @@ ---- -- include: "config.yml" diff --git a/ansible/roles/iscsi/templates/iscsid.json.j2 b/ansible/roles/iscsi/templates/iscsid.json.j2 deleted file mode 100644 index f44cf16c9..000000000 --- a/ansible/roles/iscsi/templates/iscsid.json.j2 +++ /dev/null @@ -1,4 +0,0 @@ -{ - "command": "iscsid -d 8 -f --pid=/run/iscsid.pid", - "config_files": [] -} diff --git a/ansible/roles/iscsi/templates/tgtd.json.j2 b/ansible/roles/iscsi/templates/tgtd.json.j2 deleted file mode 100644 index 3f38ef996..000000000 --- a/ansible/roles/iscsi/templates/tgtd.json.j2 +++ /dev/null @@ -1,4 +0,0 @@ -{ - "command": "tgtd -d 1 -f --iscsi portal={{ api_interface_address }}:{{ iscsi_port }}", - "config_files": [] -} diff --git a/ansible/roles/keystone/defaults/main.yml b/ansible/roles/keystone/defaults/main.yml deleted file mode 100644 index dbcf5651a..000000000 --- a/ansible/roles/keystone/defaults/main.yml +++ /dev/null @@ -1,38 +0,0 @@ ---- -project_name: "keystone" - -keystone_services: - keystone: - container_name: "keystone" - group: "keystone" - enabled: true - image: "{{ keystone_image_full }}" - -#################### -# Database -#################### -keystone_database_name: "keystone" -keystone_database_user: "keystone" -keystone_database_address: "{{ kolla_internal_fqdn }}:{{ database_port }}" - - -#################### -# Fernet -#################### -keystone_username: "keystone" -keystone_groupname: "keystone" - - -#################### -# Docker -#################### -keystone_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-keystone" -keystone_tag: "{{ openstack_release }}" -keystone_image_full: "{{ keystone_image }}:{{ keystone_tag }}" - -#################### -# OpenStack -#################### -keystone_logging_debug: "{{ openstack_logging_debug }}" - -openstack_keystone_auth: "{{ openstack_auth }}" diff --git a/ansible/roles/keystone/tasks/config.yml b/ansible/roles/keystone/tasks/config.yml deleted file mode 100644 index 34f97dc51..000000000 --- a/ansible/roles/keystone/tasks/config.yml +++ /dev/null @@ -1,111 +0,0 @@ ---- -- name: Check if policies shall be overwritten - local_action: stat path="{{ node_custom_config }}/keystone/policy.json" - register: keystone_policy - -- name: Check if Keystone Domain specific settings enabled - local_action: stat path="{{ node_custom_config }}/keystone/domains" - register: keystone_domain_directory - -- name: Ensuring config directories exist - file: - path: "{{ node_config_directory }}/{{ item.key }}" - state: "directory" - recurse: yes - when: - - item.value.enabled | bool - with_dict: "{{ keystone_services }}" - -- name: Copying over config.json files for services - template: - src: "{{ item.key }}.json.j2" - dest: "{{ node_config_directory }}/{{ item.key }}/config.json" - register: keystone_config_jsons - with_dict: "{{ keystone_services }}" - when: - - item.value.enabled | bool - -- name: Copying over keystone.conf - merge_configs: - vars: - service_name: "{{ item.key }}" - sources: - - "{{ role_path }}/templates/keystone.conf.j2" - - "{{ node_custom_config }}/global.conf" - - "{{ node_custom_config }}/database.conf" - - "{{ node_custom_config }}/messaging.conf" - - "{{ node_custom_config }}/keystone.conf" - - "{{ node_custom_config }}/keystone/{{ item.key }}.conf" - - "{{ node_custom_config }}/keystone/{{ inventory_hostname }}/keystone.conf" - dest: "{{ node_config_directory }}/{{ item.key }}/keystone.conf" - register: keystone_confs - with_dict: "{{ keystone_services }}" - when: - - item.key == "keystone" - - item.value.enabled | bool - -- name: Creating Keystone Domain directory - vars: - keystone: "{{ keystone_services.keystone }}" - file: - dest: "{{ node_config_directory }}/keystone/domains/" - state: "directory" - when: - - keystone.enabled | bool - - keystone_domain_directory.stat.exists - -- name: Get file list in custom domains folder - local_action: find path="{{ node_custom_config }}/keystone/domains" recurse=no file_type=file - register: keystone_domains - when: keystone_domain_directory.stat.exists - -- name: Copying Keystone Domain specific settings - vars: - keystone: "{{ keystone_services.keystone }}" - copy: - src: "{{ item.path }}" - dest: "{{ node_config_directory }}/keystone/domains/" - register: keystone_domains - when: - - keystone.enabled | bool - - keystone_domain_directory.stat.exists - with_items: "{{ keystone_domains.files|default([]) }}" - -- name: Copying over existing policy.json - template: - src: "{{ node_custom_config }}/keystone/policy.json" - dest: "{{ node_config_directory }}/{{ item.key }}/policy.json" - register: keystone_policy_jsons - when: - - item.key == "keystone" - - item.value.enabled | bool - - keystone_policy.stat.exists - with_dict: "{{ keystone_services }}" - -- name: Copying over wsgi-keystone.conf - vars: - keystone: "{{ keystone_services.keystone }}" - template: - src: "{{ item }}" - dest: "{{ node_config_directory }}/keystone/wsgi-keystone.conf" - register: keystone_wsgi - when: - - keystone.enabled | bool - with_first_found: - - "{{ node_custom_config }}/keystone/{{ inventory_hostname }}/wsgi-keystone.conf" - - "{{ node_custom_config }}/keystone/wsgi-keystone.conf" - - "wsgi-keystone.conf.j2" - -- name: Copying over keystone-paste.ini - vars: - keystone: "{{ keystone_services.keystone }}" - merge_configs: - sources: - - "{{ role_path }}/templates/keystone-paste.ini.j2" - - "{{ node_custom_config }}/keystone/keystone-paste.ini" - - "{{ node_custom_config }}/keystone/{{ inventory_hostname }}/keystone-paste.ini" - dest: "{{ node_config_directory }}/keystone/keystone-paste.ini" - register: keystone_paste_ini - when: - - keystone.enabled | bool - diff --git a/ansible/roles/keystone/tasks/main.yml b/ansible/roles/keystone/tasks/main.yml deleted file mode 100644 index b5216113e..000000000 --- a/ansible/roles/keystone/tasks/main.yml +++ /dev/null @@ -1,2 +0,0 @@ ---- -- include: "config.yml" diff --git a/ansible/roles/keystone/templates/keystone-paste.ini.j2 b/ansible/roles/keystone/templates/keystone-paste.ini.j2 deleted file mode 100644 index 0e2ee368e..000000000 --- a/ansible/roles/keystone/templates/keystone-paste.ini.j2 +++ /dev/null @@ -1,83 +0,0 @@ -# Keystone PasteDeploy configuration file. - -[filter:debug] -use = egg:oslo.middleware#debug - -[filter:request_id] -use = egg:oslo.middleware#request_id - -[filter:build_auth_context] -use = egg:keystone#build_auth_context - -[filter:token_auth] -use = egg:keystone#token_auth - -[filter:json_body] -use = egg:keystone#json_body - -[filter:cors] -use = egg:oslo.middleware#cors -oslo_config_project = keystone - -[filter:ec2_extension] -use = egg:keystone#ec2_extension - -[filter:ec2_extension_v3] -use = egg:keystone#ec2_extension_v3 - -[filter:s3_extension] -use = egg:keystone#s3_extension - -[filter:url_normalize] -use = egg:keystone#url_normalize - -[filter:sizelimit] -use = egg:oslo.middleware#sizelimit - -[app:public_service] -use = egg:keystone#public_service - -[app:service_v3] -use = egg:keystone#service_v3 - -[app:admin_service] -use = egg:keystone#admin_service - -[pipeline:public_api] -# The last item in this pipeline must be public_service or an equivalent -# application. It cannot be a filter. -pipeline = cors sizelimit url_normalize request_id build_auth_context token_auth json_body ec2_extension public_service - -[pipeline:admin_api] -# The last item in this pipeline must be admin_service or an equivalent -# application. It cannot be a filter. -pipeline = cors sizelimit url_normalize request_id build_auth_context token_auth json_body ec2_extension s3_extension admin_service - -[pipeline:api_v3] -# The last item in this pipeline must be service_v3 or an equivalent -# application. It cannot be a filter. -pipeline = cors sizelimit url_normalize request_id build_auth_context token_auth json_body ec2_extension_v3 s3_extension service_v3 - -[app:public_version_service] -use = egg:keystone#public_version_service - -[app:admin_version_service] -use = egg:keystone#admin_version_service - -[pipeline:public_version_api] -pipeline = cors sizelimit url_normalize public_version_service - -[pipeline:admin_version_api] -pipeline = cors sizelimit url_normalize admin_version_service - -[composite:main] -use = egg:Paste#urlmap -/v2.0 = public_api -/v3 = api_v3 -/ = public_version_api - -[composite:admin] -use = egg:Paste#urlmap -/v2.0 = admin_api -/v3 = api_v3 -/ = admin_version_api diff --git a/ansible/roles/keystone/templates/keystone.conf.j2 b/ansible/roles/keystone/templates/keystone.conf.j2 deleted file mode 100644 index 3eb04be65..000000000 --- a/ansible/roles/keystone/templates/keystone.conf.j2 +++ /dev/null @@ -1,55 +0,0 @@ -[DEFAULT] -debug = {{ keystone_logging_debug }} -{% if enable_cadf_notifications | bool %} -notification_format = cadf -transport_url = rabbit://{{ rabbitmq_user }}:{{ rabbitmq_password }}@rabbitmq:{{ rabbitmq_port }} -{% endif %} - -{% if std_logger %} -use_syslog = False -use_stderr = True -{% else %} -# NOTE(elemoine) log_dir alone does not work for Keystone -log_file = /var/log/kolla/keystone/keystone.log -use_stderr = True -{% endif %} - -secure_proxy_ssl_header = HTTP_X_FORWARDED_PROTO - -[database] -connection = mysql+pymysql://{{ keystone_database_user }}:{{ keystone_database_password }}@{{ keystone_database_address }}/{{ keystone_database_name }} -max_retries = -1 - -{% if keystone_domain_directory.stat.exists %} -[identity] -domain_specific_drivers_enabled = true -domain_config_dir = /etc/keystone/domains -{% endif %} - -[token] -revoke_by_id = False -{% if keystone_token_provider == 'uuid' %} -provider = uuid -{% elif keystone_token_provider == 'fernet' %} -provider = {{ keystone_token_provider }} -expiration = {{ fernet_token_expiry }} - -[fernet_tokens] -max_active_keys = 3 -{% endif %} - -[cache] -backend = oslo_cache.memcache_pool -enabled = True - -{# For Kolla-Ansible, generate the memcache servers based on the list of -memcached servers in the inventory and memcached_servers should be un-set. -For Kolla-Kubernetes, it is necessary to define the memcached_servers -variable in globals.yml to set it to the Kubernetes service for memcached. #} - -memcache_servers = {{ memcached_servers }}:{{ memcached_port }} - -{% if enable_cadf_notifications | bool %} -[oslo_messaging_notifications] -driver = messagingv2 -{% endif %} diff --git a/ansible/roles/keystone/templates/keystone.json.j2 b/ansible/roles/keystone/templates/keystone.json.j2 deleted file mode 100644 index 3cd792111..000000000 --- a/ansible/roles/keystone/templates/keystone.json.j2 +++ /dev/null @@ -1,49 +0,0 @@ -{% set keystone_cmd = 'apache2' if kolla_base_distro in ['ubuntu', 'debian'] else 'httpd' %} -{% set keystone_dir = 'apache2/conf-enabled' if kolla_base_distro in ['ubuntu', 'debian'] else 'httpd/conf.d' %} -{ - "command": "/usr/sbin/{{ keystone_cmd }}", - "config_files": [ - { - "source": "{{ container_config_directory }}/keystone.conf", - "dest": "/etc/keystone/keystone.conf", - "owner": "keystone", - "perm": "0600" - }, - { - "source": "{{ container_config_directory }}/keystone-paste.ini", - "dest": "/etc/keystone/keystone-paste.ini", - "owner": "keystone", - "perm": "0600" - }, - { - "source": "{{ container_config_directory }}/domains", - "dest": "/etc/keystone/domains", - "owner": "keystone", - "perm": "0700", - "optional": true - }, - { - "source": "{{ container_config_directory }}/policy.json", - "dest": "/etc/keystone/policy.json", - "owner": "keystone", - "perm": "0600", - "optional": true - }, - { - "source": "{{ container_config_directory }}/wsgi-keystone.conf", - "dest": "/etc/{{ keystone_dir }}/wsgi-keystone.conf", - "owner": "keystone", - "perm": "0644" - } - ], - "permissions": [ - { - "path": "/var/log/kolla", - "owner": "keystone:kolla" - }, - { - "path": "/var/log/kolla/keystone/keystone.log", - "owner": "keystone:keystone" - } - ] -} diff --git a/ansible/roles/keystone/templates/wsgi-keystone.conf.j2 b/ansible/roles/keystone/templates/wsgi-keystone.conf.j2 deleted file mode 100644 index ae1616251..000000000 --- a/ansible/roles/keystone/templates/wsgi-keystone.conf.j2 +++ /dev/null @@ -1,42 +0,0 @@ -{% set keystone_log_dir = '/var/log/kolla/keystone' %} -{% set python_path = '/usr/lib/python2.7/site-packages' if kolla_install_type == 'binary' else '/var/lib/kolla/venv/lib/python2.7/site-packages' %} -Listen {{ api_interface_address }}:{{ keystone_public_port }} -Listen {{ api_interface_address }}:{{ keystone_admin_port }} - - - WSGIDaemonProcess keystone-public processes={{ openstack_service_workers }} threads=1 user=keystone group=keystone display-name=%{GROUP} python-path={{ python_path }} - WSGIProcessGroup keystone-public - WSGIScriptAlias / /var/www/cgi-bin/keystone/main - WSGIApplicationGroup %{GLOBAL} - WSGIPassAuthorization On - = 2.4> - ErrorLogFormat "%{cu}t %M" - - LogFormat "%{X-Forwarded-For}i %l %u %t \"%r\" %>s %b %D \"%{Referer}i\" \"%{User-Agent}i\"" logformat -{% if std_logger %} - ErrorLog /proc/self/fd/2 - CustomLog /proc/self/fd/1 combined -{% else %} - ErrorLog "{{ keystone_log_dir }}/keystone-apache-public-error.log" - CustomLog "{{ keystone_log_dir }}/keystone-apache-public-access.log" logformat -{% endif %} - - - - WSGIDaemonProcess keystone-admin processes={{ openstack_service_workers }} threads=1 user=keystone group=keystone display-name=%{GROUP} python-path={{ python_path }} - WSGIProcessGroup keystone-admin - WSGIScriptAlias / /var/www/cgi-bin/keystone/admin - WSGIApplicationGroup %{GLOBAL} - WSGIPassAuthorization On - = 2.4> - ErrorLogFormat "%{cu}t %M" - - LogFormat "%{X-Forwarded-For}i %l %u %t \"%r\" %>s %b %D \"%{Referer}i\" \"%{User-Agent}i\"" logformat -{% if std_logger %} - ErrorLog /proc/self/fd/2 - CustomLog /proc/self/fd/1 combined -{% else %} - ErrorLog "{{ keystone_log_dir }}/keystone-apache-admin-error.log" - CustomLog "{{ keystone_log_dir }}/keystone-apache-admin-access.log" logformat -{% endif %} - diff --git a/ansible/roles/mariadb/defaults/main.yml b/ansible/roles/mariadb/defaults/main.yml deleted file mode 100644 index 410e86d3e..000000000 --- a/ansible/roles/mariadb/defaults/main.yml +++ /dev/null @@ -1,15 +0,0 @@ ---- -project_name: "mariadb" - -#################### -# Database -#################### -database_cluster_name: "openstack" -database_max_timeout: 60 - -#################### -# Docker -#################### -mariadb_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-mariadb" -mariadb_tag: "{{ openstack_release }}" -mariadb_image_full: "{{ mariadb_image }}:{{ mariadb_tag }}" diff --git a/ansible/roles/mariadb/tasks/config.yml b/ansible/roles/mariadb/tasks/config.yml deleted file mode 100644 index 39a186c8c..000000000 --- a/ansible/roles/mariadb/tasks/config.yml +++ /dev/null @@ -1,27 +0,0 @@ ---- -- name: Ensuring config directories exist - file: - path: "{{ node_config_directory }}/{{ item }}" - state: "directory" - recurse: yes - with_items: - - "mariadb" - -- name: Copying over config.json files for services - template: - src: "{{ item }}.json.j2" - dest: "{{ node_config_directory }}/{{ item }}/config.json" - with_items: - - "mariadb" - -- name: Copying over galera.cnf - merge_configs: - vars: - service_name: "{{ item }}" - sources: - - "{{ role_path }}/templates/galera.cnf.j2" - - "{{ node_custom_config }}/galera.cnf" - - "{{ node_custom_config }}/mariadb/{{ inventory_hostname }}/galera.cnf" - dest: "{{ node_config_directory }}/{{ item }}/galera.cnf" - with_items: - - "mariadb" diff --git a/ansible/roles/mariadb/tasks/main.yml b/ansible/roles/mariadb/tasks/main.yml deleted file mode 100644 index b5216113e..000000000 --- a/ansible/roles/mariadb/tasks/main.yml +++ /dev/null @@ -1,2 +0,0 @@ ---- -- include: "config.yml" diff --git a/ansible/roles/mariadb/templates/galera.cnf.j2 b/ansible/roles/mariadb/templates/galera.cnf.j2 deleted file mode 100644 index b5a65341e..000000000 --- a/ansible/roles/mariadb/templates/galera.cnf.j2 +++ /dev/null @@ -1,41 +0,0 @@ -[client] -default-character-set=utf8 - -[mysql] -default-character-set=utf8 - -[mysqld] -bind-address={{ api_interface_address }} -port={{ mariadb_port }} - -{% if not std_logger %} -log-error=/var/log/kolla/mariadb/mariadb.log -{% endif %} - -log-bin=mysql-bin -binlog_format=ROW -default-storage-engine=innodb -innodb_autoinc_lock_mode=2 - -collation-server = utf8_general_ci -init-connect='SET NAMES utf8' -character-set-server = utf8 - -datadir=/var/lib/mysql/ - -wsrep_provider=none - -max_connections=10000 - -key_buffer_size = '64M' -max_heap_table_size = '64M' -tmp_table_size = '64M' -{% set dynamic_pool_size_mb = (hostvars[inventory_hostname]['ansible_memtotal_mb'] * 0.4) | round | int %} -{% if dynamic_pool_size_mb < 8192 %} -innodb_buffer_pool_size = '{{ dynamic_pool_size_mb }}M' -{% else %} -innodb_buffer_pool_size = '8192M' -{% endif %} - -[server] -pid-file=/var/lib/mysql/mariadb.pid diff --git a/ansible/roles/mariadb/templates/mariadb.json.j2 b/ansible/roles/mariadb/templates/mariadb.json.j2 deleted file mode 100644 index ac1b5bf27..000000000 --- a/ansible/roles/mariadb/templates/mariadb.json.j2 +++ /dev/null @@ -1,24 +0,0 @@ -{% set mysql_dir = 'mysql' if kolla_base_distro in ['ubuntu', 'debian'] else '' %} -{ - "command": "/usr/bin/mysqld_safe", - "config_files": [ - { - "source": "{{ container_config_directory }}/galera.cnf", - "dest": "/etc/{{ mysql_dir }}/my.cnf", - "owner": "mysql", - "perm": "0600" - } - ], - "permissions": [ - { - "path": "/var/log/kolla/mariadb", - "owner": "mysql:mysql", - "recurse": true - }, - { - "path": "/var/lib/mysql", - "owner": "mysql:mysql", - "recurse": true - } - ] -} diff --git a/ansible/roles/memcached/defaults/main.yml b/ansible/roles/memcached/defaults/main.yml deleted file mode 100644 index 2e02b716d..000000000 --- a/ansible/roles/memcached/defaults/main.yml +++ /dev/null @@ -1,25 +0,0 @@ ---- -project_name: "memcached" - -memcached_services: - memcached: - container_name: "memcached" - image: "{{ memcached_image_full }}" - enabled: true - group: "memcached" - volumes: - - "{{ node_config_directory }}/memcached/:{{ container_config_directory }}/:ro" - - "/etc/localtime:/etc/localtime:ro" - - -#################### -# Docker -#################### -memcached_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-memcached" -memcached_tag: "{{ openstack_release }}" -memcached_image_full: "{{ memcached_image }}:{{ memcached_tag }}" - -#################### -# Memcached options -#################### -memcached_connection_limit: "5000" diff --git a/ansible/roles/memcached/tasks/config.yml b/ansible/roles/memcached/tasks/config.yml deleted file mode 100644 index 104759e1a..000000000 --- a/ansible/roles/memcached/tasks/config.yml +++ /dev/null @@ -1,16 +0,0 @@ ---- -- name: Ensuring config directories exist - file: - path: "{{ node_config_directory }}/{{ item }}" - state: "directory" - recurse: yes - with_items: - - "memcached" - -- name: Copying over config.json files for services - template: - src: "{{ item }}.json.j2" - dest: "{{ node_config_directory }}/{{ item }}/config.json" - register: memcached_config_json - with_items: - - "memcached" diff --git a/ansible/roles/memcached/tasks/main.yml b/ansible/roles/memcached/tasks/main.yml deleted file mode 100644 index b5216113e..000000000 --- a/ansible/roles/memcached/tasks/main.yml +++ /dev/null @@ -1,2 +0,0 @@ ---- -- include: "config.yml" diff --git a/ansible/roles/memcached/templates/memcached.json.j2 b/ansible/roles/memcached/templates/memcached.json.j2 deleted file mode 100644 index f1ffc5d5b..000000000 --- a/ansible/roles/memcached/templates/memcached.json.j2 +++ /dev/null @@ -1,4 +0,0 @@ -{ - "command": "/usr/bin/memcached -vv -l {{ api_interface_address }} -p {{ memcached_port }} -c {{ memcached_connection_limit }} -P /memcached/memcached.pid", - "config_files": [] -} diff --git a/ansible/roles/neutron/defaults/main.yml b/ansible/roles/neutron/defaults/main.yml deleted file mode 100644 index 038dcfe33..000000000 --- a/ansible/roles/neutron/defaults/main.yml +++ /dev/null @@ -1,262 +0,0 @@ ---- -project_name: "neutron" - -neutron_services: - openvswitch-db-server: - container_name: "openvswitch_db" - image: "{{ openvswitch_db_image_full }}" - enabled: "{{ neutron_plugin_agent == 'openvswitch' }}" - volumes: - - "{{ node_config_directory }}/openvswitch-db-server/:{{ container_config_directory }}/:ro" - - "/etc/localtime:/etc/localtime:ro" - - "/run:/run:shared" - - "kolla_logs:/var/log/kolla/" - - "openvswitch_db:/var/lib/openvswitch/" - openvswitch-vswitchd: - container_name: "openvswitch_vswitchd" - image: "{{ openvswitch_vswitchd_image_full }}" - enabled: "{{ neutron_plugin_agent == 'openvswitch' }}" - privileged: True - volumes: - - "{{ node_config_directory }}/openvswitch-vswitchd/:{{ container_config_directory }}/:ro" - - "/etc/localtime:/etc/localtime:ro" - - "/lib/modules:/lib/modules:ro" - - "/run:/run:shared" - - "kolla_logs:/var/log/kolla/" - neutron-server: - container_name: "neutron_server" - image: "{{ neutron_server_image_full }}" - enabled: true - group: "neutron-server" - volumes: - - "{{ node_config_directory }}/neutron-server/:{{ container_config_directory }}/:ro" - - "/etc/localtime:/etc/localtime:ro" - - "kolla_logs:/var/log/kolla/" - neutron-openvswitch-agent: - container_name: "neutron_openvswitch_agent" - image: "{{ neutron_openvswitch_agent_image_full }}" - enabled: "{{ neutron_plugin_agent == 'openvswitch' }}" - privileged: True - volumes: - - "{{ node_config_directory }}/neutron-openvswitch-agent/:{{ container_config_directory }}/:ro" - - "/etc/localtime:/etc/localtime:ro" - - "/lib/modules:/lib/modules:ro" - - "/run:/run:shared" - - "kolla_logs:/var/log/kolla/" - neutron-sfc-agent: - container_name: "neutron_sfc_agent" - image: "{{ neutron_sfc_agent_image_full }}" - enabled: "{{ neutron_plugin_agent == 'sfc' }}" - privileged: True - volumes: - - "{{ node_config_directory }}/neutron-sfc-agent/:{{ container_config_directory }}/:ro" - - "/etc/localtime:/etc/localtime:ro" - - "/lib/modules:/lib/modules:ro" - - "/run:/run:shared" - - "kolla_logs:/var/log/kolla/" - neutron-linuxbridge-agent: - container_name: "neutron_linuxbridge_agent" - image: "{{ neutron_linuxbridge_agent_image_full }}" - privileged: True - enabled: "{{ neutron_plugin_agent == 'linuxbridge' }}" - environment: - KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}" - NEUTRON_BRIDGE: "br-ex" - NEUTRON_INTERFACE: "{{ neutron_external_interface }}" - volumes: - - "{{ node_config_directory }}/neutron-linuxbridge-agent/:{{ container_config_directory }}/:ro" - - "/etc/localtime:/etc/localtime:ro" - - "/lib/modules:/lib/modules:ro" - - "/run:/run:shared" - - "kolla_logs:/var/log/kolla/" - neutron-dhcp-agent: - container_name: "neutron_dhcp_agent" - image: "{{ neutron_dhcp_agent_image_full }}" - privileged: True - enabled: True - group: "neutron-dhcp-agent" - volumes: - - "{{ node_config_directory }}/neutron-dhcp-agent/:{{ container_config_directory }}/:ro" - - "/etc/localtime:/etc/localtime:ro" - - "/run/:/run/:shared" - - "/run/netns/:/run/netns/:shared" - - "neutron_metadata_socket:/var/lib/neutron/kolla/" - - "kolla_logs:/var/log/kolla/" - neutron-l3-agent: - container_name: "neutron_l3_agent" - image: "{{ neutron_l3_agent_image_full }}" - privileged: True - enabled: "{{ not enable_neutron_vpnaas | bool }}" - volumes: - - "{{ node_config_directory }}/neutron-l3-agent/:{{ container_config_directory }}/:ro" - - "/etc/localtime:/etc/localtime:ro" - - "/run:/run:shared" - - "/run/netns/:/run/netns/:shared" - - "neutron_metadata_socket:/var/lib/neutron/kolla/" - - "kolla_logs:/var/log/kolla/" - neutron-lbaas-agent: - container_name: "neutron_lbaas_agent" - image: "{{ neutron_lbaas_agent_image_full }}" - privileged: True - enabled: "{{ enable_neutron_lbaas | bool }}" - group: "neutron-lbaas-agent" - volumes: - - "{{ node_config_directory }}/neutron-lbaas-agent/:{{ container_config_directory }}/:ro" - - "/etc/localtime:/etc/localtime:ro" - - "/run:/run:shared" - - "/run/netns/:/run/netns/:shared" - - "neutron_metadata_socket:/var/lib/neutron/kolla/" - - "kolla_logs:/var/log/kolla/" - neutron-metadata-agent: - container_name: "neutron_metadata_agent" - image: "{{ neutron_metadata_agent_image_full }}" - privileged: True - enabled: true - volumes: - - "{{ node_config_directory }}/neutron-metadata-agent/:{{ container_config_directory }}/:ro" - - "/etc/localtime:/etc/localtime:ro" - - "/run/netns/:/run/netns/:shared" - - "neutron_metadata_socket:/var/lib/neutron/kolla/" - - "kolla_logs:/var/log/kolla/" - neutron-vpnaas-agent: - container_name: "neutron_vpnaas_agent" - image: "{{ neutron_vpnaas_agent_image_full }}" - privileged: True - enabled: "{{ enable_neutron_vpnaas | bool }}" - group: "neutron-vpnaas-agent" - volumes: - - "{{ node_config_directory }}/neutron-vpnaas-agent/:{{ container_config_directory }}/:ro" - - "/etc/localtime:/etc/localtime:ro" - - "/run:/run:shared" - - "/run/netns/:/run/netns/:shared" - - "/lib/modules:/lib/modules:ro" - - "neutron_metadata_socket:/var/lib/neutron/kolla/" - - "kolla_logs:/var/log/kolla/" - neutron-bgp-dragent: - container_name: "neutron_bgp_dragent" - image: "{{ neutron_bgp_dragent_image_full }}" - privileged: True - enabled: "{{ enable_neutron_bgp_dragent | bool }}" - group: "neutron-bgp-dragent" - volumes: - - "{{ node_config_directory }}/neutron-bgp-dragent/:{{ container_config_directory }}/:ro" - - "/etc/localtime:/etc/localtime:ro" - - "/run:/run:shared" - - "/run/netns/:/run/netns/:shared" - - "/lib/modules:/lib/modules:ro" - - "neutron_metadata_socket:/var/lib/neutron/kolla/" - - "kolla_logs:/var/log/kolla/" - - -#################### -# Database -#################### -neutron_database_name: "neutron" -neutron_database_user: "neutron" -neutron_database_address: "{{ kolla_internal_fqdn }}:{{ database_port }}" - - -#################### -# Docker -#################### -neutron_dhcp_agent_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-neutron-dhcp-agent" -neutron_dhcp_agent_tag: "{{ openstack_release }}" -neutron_dhcp_agent_image_full: "{{ neutron_dhcp_agent_image }}:{{ neutron_dhcp_agent_tag }}" - -neutron_l3_agent_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-neutron-l3-agent" -neutron_l3_agent_tag: "{{ openstack_release }}" -neutron_l3_agent_image_full: "{{ neutron_l3_agent_image }}:{{ neutron_l3_agent_tag }}" - -neutron_lbaas_agent_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-neutron-lbaas-agent" -neutron_lbaas_agent_tag: "{{ openstack_release }}" -neutron_lbaas_agent_image_full: "{{ neutron_lbaas_agent_image }}:{{ neutron_lbaas_agent_tag }}" - -neutron_linuxbridge_agent_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-neutron-linuxbridge-agent" -neutron_linuxbridge_agent_tag: "{{ openstack_release }}" -neutron_linuxbridge_agent_image_full: "{{ neutron_linuxbridge_agent_image }}:{{ neutron_linuxbridge_agent_tag }}" - -neutron_metadata_agent_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-neutron-metadata-agent" -neutron_metadata_agent_tag: "{{ openstack_release }}" -neutron_metadata_agent_image_full: "{{ neutron_metadata_agent_image }}:{{ neutron_metadata_agent_tag }}" - -neutron_openvswitch_agent_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-neutron-openvswitch-agent" -neutron_openvswitch_agent_tag: "{{ openstack_release }}" -neutron_openvswitch_agent_image_full: "{{ neutron_openvswitch_agent_image }}:{{ neutron_openvswitch_agent_tag }}" - -neutron_sfc_agent_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-neutron-sfc-agent" -neutron_sfc_agent_tag: "{{ openstack_release }}" -neutron_sfc_agent_image_full: "{{ neutron_sfc_agent_image }}:{{ neutron_sfc_agent_tag }}" - -neutron_server_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-neutron-server" -neutron_server_tag: "{{ openstack_release }}" -neutron_server_image_full: "{{ neutron_server_image }}:{{ neutron_server_tag }}" - -neutron_vpnaas_agent_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-neutron-vpnaas-agent" -neutron_vpnaas_agent_tag: "{{ openstack_release }}" -neutron_vpnaas_agent_image_full: "{{ neutron_vpnaas_agent_image }}:{{ neutron_vpnaas_agent_tag }}" - -neutron_bgp_dragent_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-neutron-bgp-dragent" -neutron_bgp_dragent_tag: "{{ openstack_release }}" -neutron_bgp_dragent_image_full: "{{ neutron_bgp_dragent_image }}:{{ neutron_bgp_dragent_tag }}" - -openvswitch_db_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-openvswitch-db-server" -openvswitch_db_tag: "{{ openstack_release }}" -openvswitch_db_image_full: "{{ openvswitch_db_image }}:{{ openvswitch_db_tag }}" - -openvswitch_vswitchd_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-openvswitch-vswitchd" -openvswitch_vswitchd_tag: "{{ openstack_release }}" -openvswitch_vswitchd_image_full: "{{ openvswitch_vswitchd_image }}:{{ openvswitch_vswitchd_tag }}" - - -#################### -# OpenStack -#################### -dhcp_agents_per_network: 2 -min_l3_agents_per_router: 2 -max_l3_agents_per_router: 3 - -neutron_admin_endpoint: "{{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ neutron_server_port }}" -neutron_internal_endpoint: "{{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ neutron_server_port }}" -neutron_public_endpoint: "{{ public_protocol }}://{{ kolla_external_fqdn }}:{{ neutron_server_port }}" - -neutron_logging_debug: "{{ openstack_logging_debug }}" - -neutron_bridge_name: "br-ex" - -openstack_neutron_auth: "{{ openstack_auth }}" - -#################### -# Extension drivers -#################### -extension_drivers: - - name: "qos" - enabled: "{{ enable_neutron_qos | bool }}" - - name: "port_security" - enabled: "{{ enable_tacker | bool or enable_designate | bool }}" - - name: "dns" - enabled: "{{ enable_designate | bool }}" - -neutron_extension_drivers: "{{ extension_drivers|selectattr('enabled', 'equalto', true)|list }}" - -#################### -# Service Plugins -#################### -service_plugins: - - name: "flow_classifier" - enabled: "{{ neutron_plugin_agent == 'sfc' }}" - - name: "neutron_lbaas.services.loadbalancer.plugin.LoadBalancerPluginv2" - enabled: "{{ enable_neutron_lbaas | bool }}" - - name: "neutron.services.firewall.fwaas_plugin.FirewallPlugin" - enabled: "{{ enable_neutron_fwaas | bool }}" - - name: "neutron_vpnaas.services.vpn.plugin.VPNDriverPlugin" - enabled: "{{ enable_neutron_vpnaas | bool }}" - - name: "qos" - enabled: "{{ enable_neutron_qos | bool }}" - - name: "router" - enabled: true - - name: "sfc" - enabled: "{{ neutron_plugin_agent == 'sfc' }}" - - name: "neutron_dynamic_routing.services.bgp.bgp_plugin.BgpPlugin" - enabled: "{{ enable_neutron_bgp_dragent | bool }}" - -neutron_service_plugins: "{{ service_plugins|selectattr('enabled', 'equalto', true)|list }}" diff --git a/ansible/roles/neutron/tasks/config-neutron-fake.yml b/ansible/roles/neutron/tasks/config-neutron-fake.yml deleted file mode 100644 index cb6788b70..000000000 --- a/ansible/roles/neutron/tasks/config-neutron-fake.yml +++ /dev/null @@ -1,52 +0,0 @@ ---- -- name: Ensuring config directories exist - file: - path: "{{ node_config_directory }}/neutron-openvswitch-agent-fake-{{ item }}" - state: "directory" - recurse: yes - with_sequence: start=1 end={{ num_nova_fake_per_node }} - when: inventory_hostname in groups['compute'] - -- name: Copying over config.json files for services - template: - src: "neutron-openvswitch-agent.json.j2" - dest: "{{ node_config_directory }}/neutron-openvswitch-agent-fake-{{ item }}/config.json" - register: fake_config_json - with_sequence: start=1 end={{ num_nova_fake_per_node }} - when: - - inventory_hostname in groups['compute'] - - neutron_plugin_agent == "openvswitch" - -- name: Copying over neutron.conf - merge_configs: - vars: - service_name: "{{ item }}" - sources: - - "{{ role_path }}/templates/neutron.conf.j2" - - "{{ node_config_directory }}/config/global.conf" - - "{{ node_config_directory }}/config/database.conf" - - "{{ node_config_directory }}/config/messaging.conf" - - "{{ node_config_directory }}/config/neutron.conf" - - "{{ node_config_directory }}/config/neutron/{{ item }}.conf" - - "{{ node_config_directory }}/config/neutron/{{ inventory_hostname }}/neutron.conf" - dest: "{{ node_config_directory }}/neutron-openvswitch-agent-fake-{{ item }}/neutron.conf" - register: fake_neutron_conf - with_sequence: start=1 end={{ num_nova_fake_per_node }} - when: - - inventory_hostname in groups['compute'] - - neutron_plugin_agent == "openvswitch" - -- name: Copying over ml2_conf.ini - merge_configs: - vars: - service_name: "{{ item }}" - sources: - - "{{ role_path }}/templates/ml2_conf.ini.j2" - - "{{ node_config_directory }}/config/neutron/ml2_conf.ini" - - "{{ node_config_directory }}/config/neutron/{{ inventory_hostname }}/neutron.conf" - dest: "{{ node_config_directory }}/neutron-openvswitch-agent-fake-{{ item }}/ml2_conf.ini" - register: fake_neutron_ml2_conf_ini - with_sequence: start=1 end={{ num_nova_fake_per_node }} - when: - - inventory_hostname in groups['compute'] - - neutron_plugin_agent == "openvswitch" diff --git a/ansible/roles/neutron/tasks/config.yml b/ansible/roles/neutron/tasks/config.yml deleted file mode 100644 index 0d9ae7270..000000000 --- a/ansible/roles/neutron/tasks/config.yml +++ /dev/null @@ -1,255 +0,0 @@ ---- -- name: Ensuring config directories exist - file: - path: "{{ node_config_directory }}/{{ item.key }}" - state: "directory" - recurse: yes - when: - - item.value.enabled | bool - with_dict: "{{ neutron_services }}" - -- name: Copying over config.json files for services - template: - src: "{{ item.key }}.json.j2" - dest: "{{ node_config_directory }}/{{ item.key }}/config.json" - register: neutron_config_jsons - when: - - item.value.enabled | bool - with_dict: "{{ neutron_services }}" - -- name: Copying over neutron.conf - vars: - service_name: "{{ item.key }}" - services_need_neutron_conf: - - "neutron-dhcp-agent" - - "neutron-l3-agent" - - "neutron-linuxbridge-agent" - - "neutron-metadata-agent" - - "neutron-openvswitch-agent" - - "neutron-server" - - "neutron-lbaas-agent" - - "neutron-vpnaas-agent" - - "neutron-bgp-dragent" - merge_configs: - sources: - - "{{ role_path }}/templates/neutron.conf.j2" - - "{{ node_custom_config }}/global.conf" - - "{{ node_custom_config }}/database.conf" - - "{{ node_custom_config }}/messaging.conf" - - "{{ node_custom_config }}/neutron.conf" - - "{{ node_custom_config }}/neutron/{{ item.key }}.conf" - - "{{ node_custom_config }}/neutron/{{ inventory_hostname }}/neutron.conf" - dest: "{{ node_config_directory }}/{{ item.key }}/neutron.conf" - register: neutron_confs - when: - - item.value.enabled | bool - - - item.key in services_need_neutron_conf - with_dict: "{{ neutron_services }}" - -- name: Copying over neutron_lbaas.conf - vars: - service_name: "{{ item.key }}" - services_need_neutron_lbaas_conf: - - "neutron-server" - - "neutron-lbaas-agent" - merge_configs: - sources: - - "{{ role_path }}/templates/neutron_lbaas.conf.j2" - - "{{ node_custom_config }}/neutron/neutron_lbaas.conf" - - "{{ node_custom_config }}/neutron/{{ inventory_hostname }}/neutron_lbaas.conf" - dest: "{{ node_config_directory }}/{{ item.key }}/neutron_lbaas.conf" - register: neutron_lbaas_confs - when: - - item.value.enabled | bool - - - item.key in services_need_neutron_lbaas_conf - with_dict: "{{ neutron_services }}" - -- name: Copying over neutron_vpnaas.conf - vars: - service_name: "neutron-server" - neutron_server: "{{ neutron_services[service_name] }}" - merge_configs: - sources: - - "{{ role_path }}/templates/neutron_vpnaas.conf.j2" - - "{{ node_custom_config }}/neutron/neutron_vpnaas.conf" - - "{{ node_custom_config }}/neutron/{{ inventory_hostname }}/neutron_vpnaas.conf" - dest: "{{ node_config_directory }}/{{ service_name }}/neutron_vpnaas.conf" - register: neutron_vpnaas_conf - when: - - neutron_server.enabled | bool - - -- name: Copying over ml2_conf.ini - vars: - service_name: "{{ item.key }}" - services_need_ml2_conf_ini: - - "neutron-dhcp-agent" - - "neutron-l3-agent" - - "neutron-linuxbridge-agent" - - "neutron-lbaas-agent" - - "neutron-metadata-agent" - - "neutron-openvswitch-agent" - - "neutron-server" - - "neutron-vpnaas-agent" - merge_configs: - sources: - - "{{ role_path }}/templates/ml2_conf.ini.j2" - - "{{ node_custom_config }}/neutron/ml2_conf.ini" - - "{{ node_custom_config }}/neutron/{{ inventory_hostname }}/ml2_conf.ini" - dest: "{{ node_config_directory }}/{{ service_name }}/ml2_conf.ini" - register: neutron_ml2_confs - when: - - item.key in services_need_ml2_conf_ini - - item.value.enabled | bool - - with_dict: "{{ neutron_services }}" - -- name: Copying over dhcp_agent.ini - vars: - service_name: "neutron-dhcp-agent" - neutron_dhcp_agent: "{{ neutron_services[service_name] }}" - merge_configs: - sources: - - "{{ role_path }}/templates/dhcp_agent.ini.j2" - - "{{ node_custom_config }}/neutron/dhcp_agent.ini" - - "{{ node_custom_config }}/neutron/{{ inventory_hostname }}/dhcp_agent.ini" - dest: "{{ node_config_directory }}/{{ service_name }}/dhcp_agent.ini" - register: dhcp_agent_ini - when: - - neutron_dhcp_agent.enabled | bool - - -- name: Copying over dnsmasq.conf - vars: - service_name: "neutron-dhcp-agent" - neutron_dhcp_agent: "{{ neutron_services[service_name] }}" - template: - src: "dnsmasq.conf.j2" - dest: "{{ node_config_directory }}/{{ service_name }}/dnsmasq.conf" - register: dnsmasq_conf - when: - - neutron_dhcp_agent.enabled | bool - - -- name: Copying over l3_agent.ini - vars: - service_name: "{{ item.key }}" - services_need_l3_agent_ini: - - "neutron-l3-agent" - - "neutron-vpnaas-agent" - merge_configs: - sources: - - "{{ role_path }}/templates/l3_agent.ini.j2" - - "{{ node_custom_config }}/neutron/l3_agent.ini" - - "{{ node_custom_config }}/neutron/{{ inventory_hostname }}/l3_agent.ini" - dest: "{{ node_config_directory }}/{{ service_name }}/l3_agent.ini" - register: neutron_l3_agent_inis - when: - - item.key in services_need_l3_agent_ini - - item.value.enabled | bool - - with_dict: "{{ neutron_services }}" - -- name: Copying over fwaas_driver.ini - vars: - service_name: "{{ item.key }}" - services_need_fwaas_driver_ini: - - "neutron-l3-agent" - - "neutron-vpnaas-agent" - merge_configs: - sources: - - "{{ role_path }}/templates/fwaas_driver.ini.j2" - - "{{ node_custom_config }}/neutron/fwaas_driver.ini" - dest: "{{ node_config_directory }}/{{ service_name }}/fwaas_driver.ini" - register: neutron_fwaas_driver_inis - when: - - item.key in services_need_fwaas_driver_ini - - item.value.enabled | bool - - with_dict: "{{ neutron_services }}" - -- name: Copying over metadata_agent.ini - vars: - service_name: "neutron-metadata-agent" - neutron_metadata_agent: "{{ neutron_services[service_name] }}" - merge_configs: - sources: - - "{{ role_path }}/templates/metadata_agent.ini.j2" - - "{{ node_custom_config }}/neutron/metadata_agent.ini" - dest: "{{ node_config_directory }}/{{ service_name }}/metadata_agent.ini" - register: neutron_metadata_agent_ini - when: - - neutron_metadata_agent.enabled | bool - - -- name: Copying over lbaas_agent.ini - vars: - service_name: "neutron-lbaas-agent" - neutron_lbaas_agent: "{{ neutron_services['neutron-lbaas-agent'] }}" - merge_configs: - sources: - - "{{ role_path }}/templates/lbaas_agent.ini.j2" - - "{{ node_custom_config }}/neutron/lbaas_agent.ini" - dest: "{{ node_config_directory }}/{{ service_name }}/lbaas_agent.ini" - register: neutron_lbaas_agent_ini - when: - - neutron_lbaas_agent.enabled | bool - - -- name: Copying over vpnaas_agent.ini - vars: - service_name: "neutron-vpnaas-agent" - neutron_vpnaas_agent: "{{ neutron_services['neutron-vpnaas-agent'] }}" - merge_configs: - sources: - - "{{ role_path }}/templates/vpnaas_agent.ini.j2" - - "{{ node_custom_config }}/neutron/vpnaas_agent.ini" - dest: "{{ node_config_directory }}/{{ service_name }}/vpnaas_agent.ini" - register: neutron_vpnaas_agent_ini - when: - - neutron_vpnaas_agent.enabled | bool - - -- name: Copying over bgp_dragent.ini - vars: - service_name: "neutron-bgp-dragent" - neutron_bgp_dragent: "{{ neutron_services['neutron-bgp-dragent'] }}" - merge_configs: - sources: - - "{{ role_path }}/templates/bgp_dragent.ini.j2" - - "{{ node_custom_config }}/neutron/bgp_dragent.ini" - dest: "{{ node_config_directory }}/{{ service_name }}/bgp_dragent.ini" - register: neutron_bgp_dragent_ini - when: - - neutron_bgp_dragent.enabled | bool - - -- name: Check if policies shall be overwritten - local_action: stat path="{{ node_custom_config }}/neutron/policy.json" - register: neutron_policy - -- name: Copying over existing policy.json - vars: - service_name: "{{ item.key }}" - services_need_policy_json: - - "neutron-dhcp-agent" - - "neutron-l3-agent" - - "neutron-linuxbridge-agent" - - "neutron-metadata-agent" - - "neutron-openvswitch-agent" - - "neutron-server" - - "neutron-lbaas-agent" - - "neutron-vpnaas-agent" - - "neutron-bgp-dragent" - template: - src: "{{ node_custom_config }}/neutron/policy.json" - dest: "{{ node_config_directory }}/{{ service_name }}/policy.json" - register: policy_jsons - when: - - neutron_policy.stat.exists | bool - - item.value.enabled | bool - - with_dict: "{{ neutron_services }}" - diff --git a/ansible/roles/neutron/tasks/main.yml b/ansible/roles/neutron/tasks/main.yml deleted file mode 100644 index b5216113e..000000000 --- a/ansible/roles/neutron/tasks/main.yml +++ /dev/null @@ -1,2 +0,0 @@ ---- -- include: "config.yml" diff --git a/ansible/roles/neutron/templates/bgp_dragent.ini.j2 b/ansible/roles/neutron/templates/bgp_dragent.ini.j2 deleted file mode 100644 index 89ffbe45a..000000000 --- a/ansible/roles/neutron/templates/bgp_dragent.ini.j2 +++ /dev/null @@ -1,3 +0,0 @@ -[BGP] -bgp_speaker_driver = neutron_dynamic_routing.services.bgp.agent.driver.ryu.driver.RyuBgpDriver -bgp_router_id = {{ neutron_bgp_router_id }} diff --git a/ansible/roles/neutron/templates/dhcp_agent.ini.j2 b/ansible/roles/neutron/templates/dhcp_agent.ini.j2 deleted file mode 100644 index 0b2667b33..000000000 --- a/ansible/roles/neutron/templates/dhcp_agent.ini.j2 +++ /dev/null @@ -1,6 +0,0 @@ -# dhcp_agent.ini -[DEFAULT] -dnsmasq_config_file = /etc/neutron/dnsmasq.conf -enable_isolated_metadata = true -force_metadata = true -dnsmasq_dns_servers = 8.8.8.8,8.8.4.4 diff --git a/ansible/roles/neutron/templates/dnsmasq.conf.j2 b/ansible/roles/neutron/templates/dnsmasq.conf.j2 deleted file mode 100644 index 843bfa058..000000000 --- a/ansible/roles/neutron/templates/dnsmasq.conf.j2 +++ /dev/null @@ -1,8 +0,0 @@ -{% if std_logger %} -#FIXME(kfox1111) This breaks things for some reason... I suspect a parent process is -#reading stdout. Come up with an alternate solution in a follow on PS for this issue. -#log-facility=- -log-facility=/var/log/kolla/neutron/dnsmasq.log -{% else %} -log-facility=/var/log/kolla/neutron/dnsmasq.log -{% endif %} diff --git a/ansible/roles/neutron/templates/fwaas_driver.ini.j2 b/ansible/roles/neutron/templates/fwaas_driver.ini.j2 deleted file mode 100644 index b020e6bbd..000000000 --- a/ansible/roles/neutron/templates/fwaas_driver.ini.j2 +++ /dev/null @@ -1 +0,0 @@ -[fwaas] diff --git a/ansible/roles/neutron/templates/l3_agent.ini.j2 b/ansible/roles/neutron/templates/l3_agent.ini.j2 deleted file mode 100644 index c9dea04a5..000000000 --- a/ansible/roles/neutron/templates/l3_agent.ini.j2 +++ /dev/null @@ -1,16 +0,0 @@ -#jinja2: trim_blocks: False -[DEFAULT] -{% if enable_neutron_dvr | bool %} -{% if inventory_hostname in groups['network'] %} -agent_mode = dvr_snat -{% elif inventory_hostname in groups['compute'] %} -agent_mode = dvr -{% endif %} -{% else %} -agent_mode = legacy -{% endif %} -{% if enable_neutron_fwaas | bool %} -[fwaas] -driver = neutron_fwaas.services.firewall.drivers.linux.iptables_fwaas.IptablesFwaasDriver -enabled = True -{% endif %} diff --git a/ansible/roles/neutron/templates/lbaas_agent.ini.j2 b/ansible/roles/neutron/templates/lbaas_agent.ini.j2 deleted file mode 100644 index 973bd6180..000000000 --- a/ansible/roles/neutron/templates/lbaas_agent.ini.j2 +++ /dev/null @@ -1,6 +0,0 @@ -[DEFAULT] -debug = {{ neutron_logging_debug }} -device_driver = neutron_lbaas.drivers.haproxy.namespace_driver.HaproxyNSDriver - -[haproxy] -user_group = haproxy diff --git a/ansible/roles/neutron/templates/metadata_agent.ini.j2 b/ansible/roles/neutron/templates/metadata_agent.ini.j2 deleted file mode 100644 index 79f6aa230..000000000 --- a/ansible/roles/neutron/templates/metadata_agent.ini.j2 +++ /dev/null @@ -1,6 +0,0 @@ -# metadata_agent.ini -[DEFAULT] -nova_metadata_ip = {% if orchestration_engine == 'KUBERNETES' %}nova-metadata{% else %}{{ kolla_internal_fqdn }}{% endif %} - -nova_metadata_port = {{ nova_metadata_port }} -metadata_proxy_shared_secret = {{ metadata_secret }} diff --git a/ansible/roles/neutron/templates/ml2_conf.ini.j2 b/ansible/roles/neutron/templates/ml2_conf.ini.j2 deleted file mode 100644 index 6ece2a72b..000000000 --- a/ansible/roles/neutron/templates/ml2_conf.ini.j2 +++ /dev/null @@ -1,72 +0,0 @@ -# ml2_conf.ini -[ml2] -{% if enable_ironic | bool %} -tenant_network_types = vxlan, flat -mechanism_drivers = openvswitch -{% else %} -# Changing type_drivers after bootstrap can lead to database inconsistencies -type_drivers = flat,vlan,vxlan -tenant_network_types = vxlan -{% endif %} - -{% if neutron_plugin_agent == "openvswitch" %} -mechanism_drivers = openvswitch,l2population -{% elif neutron_plugin_agent == "linuxbridge" %} -mechanism_drivers = linuxbridge,l2population -{% endif %} - -{% if neutron_extension_drivers %} -extension_drivers = {{ neutron_extension_drivers|map(attribute='name')|join(',') }} -{% endif %} - -[ml2_type_vlan] -{% if enable_ironic | bool %} -network_vlan_ranges = physnet1 -{% else %} -network_vlan_ranges = -{% endif %} - -[ml2_type_flat] -{% if enable_ironic | bool %} -flat_networks = * -{% else %} -flat_networks = {% for bridge in neutron_bridge_name.split(',') %}physnet{{ loop.index0 + 1 }}{% if not loop.last %},{% endif %}{% endfor %} -{% endif %} - -[ml2_type_vxlan] -vni_ranges = 1:1000 -vxlan_group = 239.1.1.1 - -[securitygroup] -{% if neutron_plugin_agent == "openvswitch" %} -firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver -{% elif neutron_plugin_agent == "linuxbridge" %} -firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver -{% endif %} - -{% if neutron_plugin_agent == "openvswitch" %} -[agent] -tunnel_types = vxlan -l2_population = true -arp_responder = true - -{% if enable_neutron_dvr | bool %} -enable_distributed_routing = True -{% endif %} - -[ovs] -bridge_mappings = {% for bridge in neutron_bridge_name.split(',') %}physnet{{ loop.index0 + 1 }}:{{ bridge }}{% if not loop.last %},{% endif %}{% endfor %} - -ovsdb_connection = tcp:{{ api_interface_address }}:6640 -{% if enable_nova_fake | bool %} -integration_bridge = br-int-{{ item }} -{% endif %} -{% elif neutron_plugin_agent == "linuxbridge" %} -[linux_bridge] -physical_interface_mappings = physnet1:{{ neutron_external_interface }} - - -[vxlan] -l2_population = true -{% endif %} -local_ip = {{ tunnel_interface_address }} diff --git a/ansible/roles/neutron/templates/neutron-bgp-dragent.json.j2 b/ansible/roles/neutron/templates/neutron-bgp-dragent.json.j2 deleted file mode 100644 index bfc514ffb..000000000 --- a/ansible/roles/neutron/templates/neutron-bgp-dragent.json.j2 +++ /dev/null @@ -1,36 +0,0 @@ -{ - "command": "neutron-bgp-dragent --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/bgp_dragent.ini", - "config_files": [ - { - "source": "{{ container_config_directory }}/neutron.conf", - "dest": "/etc/neutron/neutron.conf", - "owner": "neutron", - "perm": "0600" - }, - { - "source": "{{ container_config_directory }}/bgp_dragent.ini", - "dest": "/etc/neutron/bgp_dragent.ini", - "owner": "neutron", - "perm": "0600" - }, - { - "source": "{{ container_config_directory }}/policy.json", - "dest": "/etc/neutron/policy.json", - "owner": "neutron", - "perm": "0600", - "optional": true - } - ], - "permissions": [ - { - "path": "/var/log/kolla/neutron", - "owner": "neutron:neutron", - "recurse": true - }, - { - "path": "/var/lib/neutron/kolla", - "owner": "neutron:neutron", - "recurse": true - } - ] -} diff --git a/ansible/roles/neutron/templates/neutron-dhcp-agent.json.j2 b/ansible/roles/neutron/templates/neutron-dhcp-agent.json.j2 deleted file mode 100644 index 5244a5b7e..000000000 --- a/ansible/roles/neutron/templates/neutron-dhcp-agent.json.j2 +++ /dev/null @@ -1,48 +0,0 @@ -{ - "command": "neutron-dhcp-agent --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini --config-file /etc/neutron/dhcp_agent.ini", - "config_files": [ - { - "source": "{{ container_config_directory }}/neutron.conf", - "dest": "/etc/neutron/neutron.conf", - "owner": "neutron", - "perm": "0600" - }, - { - "source": "{{ container_config_directory }}/ml2_conf.ini", - "dest": "/etc/neutron/plugins/ml2/ml2_conf.ini", - "owner": "neutron", - "perm": "0600" - }, - { - "source": "{{ container_config_directory }}/dhcp_agent.ini", - "dest": "/etc/neutron/dhcp_agent.ini", - "owner": "neutron", - "perm": "0600" - }, - { - "source": "{{ container_config_directory }}/dnsmasq.conf", - "dest": "/etc/neutron/dnsmasq.conf", - "owner": "neutron", - "perm": "0600" - }, - { - "source": "{{ container_config_directory }}/policy.json", - "dest": "/etc/neutron/policy.json", - "owner": "neutron", - "perm": "0600", - "optional": true - } - ], - "permissions": [ - { - "path": "/var/log/kolla/neutron", - "owner": "neutron:neutron", - "recurse": true - }, - { - "path": "/var/lib/neutron/kolla", - "owner": "neutron:neutron", - "recurse": true - } - ] -} diff --git a/ansible/roles/neutron/templates/neutron-l3-agent.json.j2 b/ansible/roles/neutron/templates/neutron-l3-agent.json.j2 deleted file mode 100644 index 03fcadb12..000000000 --- a/ansible/roles/neutron/templates/neutron-l3-agent.json.j2 +++ /dev/null @@ -1,48 +0,0 @@ -{ - "command": "neutron-l3-agent --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/l3_agent.ini --config-file /etc/neutron/fwaas_driver.ini --config-file /etc/neutron/plugins/ml2/ml2_conf.ini", - "config_files": [ - { - "source": "{{ container_config_directory }}/neutron.conf", - "dest": "/etc/neutron/neutron.conf", - "owner": "neutron", - "perm": "0600" - }, - { - "source": "{{ container_config_directory }}/ml2_conf.ini", - "dest": "/etc/neutron/plugins/ml2/ml2_conf.ini", - "owner": "neutron", - "perm": "0600" - }, - { - "source": "{{ container_config_directory }}/fwaas_driver.ini", - "dest": "/etc/neutron/fwaas_driver.ini", - "owner": "neutron", - "perm": "0600" - }, - { - "source": "{{ container_config_directory }}/l3_agent.ini", - "dest": "/etc/neutron/l3_agent.ini", - "owner": "neutron", - "perm": "0600" - }, - { - "source": "{{ container_config_directory }}/policy.json", - "dest": "/etc/neutron/policy.json", - "owner": "neutron", - "perm": "0600", - "optional": true - } - ], - "permissions": [ - { - "path": "/var/log/kolla/neutron", - "owner": "neutron:neutron", - "recurse": true - }, - { - "path": "/var/lib/neutron/kolla", - "owner": "neutron:neutron", - "recurse": true - } - ] -} diff --git a/ansible/roles/neutron/templates/neutron-lbaas-agent.json.j2 b/ansible/roles/neutron/templates/neutron-lbaas-agent.json.j2 deleted file mode 100644 index 9b966898a..000000000 --- a/ansible/roles/neutron/templates/neutron-lbaas-agent.json.j2 +++ /dev/null @@ -1,48 +0,0 @@ -{ - "command": "neutron-lbaasv2-agent --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini --config-file /etc/neutron/lbaas_agent.ini --config-file /etc/neutron/neutron_lbaas.conf", - "config_files": [ - { - "source": "{{ container_config_directory }}/neutron.conf", - "dest": "/etc/neutron/neutron.conf", - "owner": "neutron", - "perm": "0600" - }, - { - "source": "{{ container_config_directory }}/lbaas_agent.ini", - "dest": "/etc/neutron/lbaas_agent.ini", - "owner": "neutron", - "perm": "0600" - }, - { - "source": "{{ container_config_directory }}/ml2_conf.ini", - "dest": "/etc/neutron/plugins/ml2/ml2_conf.ini", - "owner": "neutron", - "perm": "0600" - }, - { - "source": "{{ container_config_directory }}/neutron_lbaas.conf", - "dest": "/etc/neutron/neutron_lbaas.conf", - "owner": "neutron", - "perm": "0600" - }, - { - "source": "{{ container_config_directory }}/policy.json", - "dest": "/etc/neutron/policy.json", - "owner": "neutron", - "perm": "0600", - "optional": true - } - ], - "permissions": [ - { - "path": "/var/log/kolla/neutron", - "owner": "neutron:neutron", - "recurse": true - }, - { - "path": "/var/lib/neutron/kolla", - "owner": "neutron:neutron", - "recurse": true - } - ] -} diff --git a/ansible/roles/neutron/templates/neutron-linuxbridge-agent.json.j2 b/ansible/roles/neutron/templates/neutron-linuxbridge-agent.json.j2 deleted file mode 100644 index 6dfd44811..000000000 --- a/ansible/roles/neutron/templates/neutron-linuxbridge-agent.json.j2 +++ /dev/null @@ -1,31 +0,0 @@ -{ - "command": "neutron-linuxbridge-agent --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini", - "config_files": [ - { - "source": "{{ container_config_directory }}/neutron.conf", - "dest": "/etc/neutron/neutron.conf", - "owner": "neutron", - "perm": "0600" - }, - { - "source": "{{ container_config_directory }}/ml2_conf.ini", - "dest": "/etc/neutron/plugins/ml2/ml2_conf.ini", - "owner": "neutron", - "perm": "0600" - }, - { - "source": "{{ container_config_directory }}/policy.json", - "dest": "/etc/neutron/policy.json", - "owner": "neutron", - "perm": "0600", - "optional": true - } - ], - "permissions": [ - { - "path": "/var/log/kolla/neutron", - "owner": "neutron:neutron", - "recurse": true - } - ] -} diff --git a/ansible/roles/neutron/templates/neutron-metadata-agent.json.j2 b/ansible/roles/neutron/templates/neutron-metadata-agent.json.j2 deleted file mode 100644 index f4b48ac76..000000000 --- a/ansible/roles/neutron/templates/neutron-metadata-agent.json.j2 +++ /dev/null @@ -1,42 +0,0 @@ -{ - "command": "neutron-metadata-agent --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/metadata_agent.ini", - "config_files": [ - { - "source": "{{ container_config_directory }}/neutron.conf", - "dest": "/etc/neutron/neutron.conf", - "owner": "neutron", - "perm": "0600" - }, - { - "source": "{{ container_config_directory }}/ml2_conf.ini", - "dest": "/etc/neutron/plugins/ml2/ml2_conf.ini", - "owner": "neutron", - "perm": "0600" - }, - { - "source": "{{ container_config_directory }}/metadata_agent.ini", - "dest": "/etc/neutron/metadata_agent.ini", - "owner": "neutron", - "perm": "0600" - }, - { - "source": "{{ container_config_directory }}/policy.json", - "dest": "/etc/neutron/policy.json", - "owner": "neutron", - "perm": "0600", - "optional": true - } - ], - "permissions": [ - { - "path": "/var/log/kolla/neutron", - "owner": "neutron:neutron", - "recurse": true - }, - { - "path": "/var/lib/neutron/kolla", - "owner": "neutron:neutron", - "recurse": true - } - ] -} diff --git a/ansible/roles/neutron/templates/neutron-openvswitch-agent.json.j2 b/ansible/roles/neutron/templates/neutron-openvswitch-agent.json.j2 deleted file mode 100644 index e5dfd784c..000000000 --- a/ansible/roles/neutron/templates/neutron-openvswitch-agent.json.j2 +++ /dev/null @@ -1,31 +0,0 @@ -{ - "command": "neutron-openvswitch-agent --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini", - "config_files": [ - { - "source": "{{ container_config_directory }}/neutron.conf", - "dest": "/etc/neutron/neutron.conf", - "owner": "neutron", - "perm": "0600" - }, - { - "source": "{{ container_config_directory }}/ml2_conf.ini", - "dest": "/etc/neutron/plugins/ml2/ml2_conf.ini", - "owner": "neutron", - "perm": "0600" - }, - { - "source": "{{ container_config_directory }}/policy.json", - "dest": "/etc/neutron/policy.json", - "owner": "neutron", - "perm": "0600", - "optional": true - } - ], - "permissions": [ - { - "path": "/var/log/kolla/neutron", - "owner": "neutron:neutron", - "recurse": true - } - ] -} diff --git a/ansible/roles/neutron/templates/neutron-server.json.j2 b/ansible/roles/neutron/templates/neutron-server.json.j2 deleted file mode 100644 index 3305eb4fa..000000000 --- a/ansible/roles/neutron/templates/neutron-server.json.j2 +++ /dev/null @@ -1,43 +0,0 @@ -{ - "command": "neutron-server --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini --config-file /etc/neutron/neutron_lbaas.conf --config-file /etc/neutron/neutron_vpnaas.conf", - "config_files": [ - { - "source": "{{ container_config_directory }}/neutron.conf", - "dest": "/etc/neutron/neutron.conf", - "owner": "neutron", - "perm": "0600" - }, - { - "source": "{{ container_config_directory }}/neutron_lbaas.conf", - "dest": "/etc/neutron/neutron_lbaas.conf", - "owner": "neutron", - "perm": "0600" - }, - { - "source": "{{ container_config_directory }}/neutron_vpnaas.conf", - "dest": "/etc/neutron/neutron_vpnaas.conf", - "owner": "neutron", - "perm": "0600" - }, - { - "source": "{{ container_config_directory }}/ml2_conf.ini", - "dest": "/etc/neutron/plugins/ml2/ml2_conf.ini", - "owner": "neutron", - "perm": "0600" - }, - { - "source": "{{ container_config_directory }}/policy.json", - "dest": "/etc/neutron/policy.json", - "owner": "neutron", - "perm": "0600", - "optional": true - } - ], - "permissions": [ - { - "path": "/var/log/kolla/neutron", - "owner": "neutron:neutron", - "recurse": true - } - ] -} diff --git a/ansible/roles/neutron/templates/neutron-vpnaas-agent.json.j2 b/ansible/roles/neutron/templates/neutron-vpnaas-agent.json.j2 deleted file mode 100644 index 265c935a6..000000000 --- a/ansible/roles/neutron/templates/neutron-vpnaas-agent.json.j2 +++ /dev/null @@ -1,54 +0,0 @@ -{ - "command": "neutron-vpn-agent --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/fwaas_driver.ini --config-file /etc/neutron/l3_agent.ini --config-file /etc/neutron/vpnaas_agent.ini --config-file /etc/neutron/plugins/ml2/ml2_conf.ini", - "config_files": [ - { - "source": "{{ container_config_directory }}/neutron.conf", - "dest": "/etc/neutron/neutron.conf", - "owner": "neutron", - "perm": "0600" - }, - { - "source": "{{ container_config_directory }}/ml2_conf.ini", - "dest": "/etc/neutron/plugins/ml2/ml2_conf.ini", - "owner": "neutron", - "perm": "0600" - }, - { - "source": "{{ container_config_directory }}/fwaas_driver.ini", - "dest": "/etc/neutron/fwaas_driver.ini", - "owner": "neutron", - "perm": "0600" - }, - { - "source": "{{ container_config_directory }}/l3_agent.ini", - "dest": "/etc/neutron/l3_agent.ini", - "owner": "neutron", - "perm": "0600" - }, - { - "source": "{{ container_config_directory }}/vpnaas_agent.ini", - "dest": "/etc/neutron/vpnaas_agent.ini", - "owner": "neutron", - "perm": "0600" - }, - { - "source": "{{ container_config_directory }}/policy.json", - "dest": "/etc/neutron/policy.json", - "owner": "neutron", - "perm": "0600", - "optional": true - } - ], - "permissions": [ - { - "path": "/var/log/kolla/neutron", - "owner": "neutron:neutron", - "recurse": true - }, - { - "path": "/var/lib/neutron/kolla", - "owner": "neutron:neutron", - "recurse": true - } - ] -} diff --git a/ansible/roles/neutron/templates/neutron.conf.j2 b/ansible/roles/neutron/templates/neutron.conf.j2 deleted file mode 100644 index 8a32b17d1..000000000 --- a/ansible/roles/neutron/templates/neutron.conf.j2 +++ /dev/null @@ -1,134 +0,0 @@ -# neutron.conf -[DEFAULT] -debug = {{ neutron_logging_debug }} - -{% if std_logger %} -use_syslog = False -use_stderr = True -{% else %} -# NOTE(elemoine): set use_stderr to False or the logs will also be sent to -# stderr and collected by Docker -use_stderr = False - -log_dir = /var/log/kolla/neutron -{% endif %} - -bind_host = {{ api_interface_address }} -bind_port = {{ neutron_server_port }} - -api_paste_config = /usr/share/neutron/api-paste.ini -endpoint_type = internalURL - -api_workers = {{ openstack_service_workers }} -metadata_workers = {{ openstack_service_workers }} - -# NOTE(SamYaple): We must specify this value here rather than the metadata conf -# because it is used by the l3 and dhcp agents. The reason the path has 'kolla' -# in it is because we are sharing this socket in a volume which is it's own dir -metadata_proxy_socket = /var/lib/neutron/kolla/metadata_proxy - -{% if neutron_plugin_agent == "openvswitch" %} -interface_driver = openvswitch -{% elif neutron_plugin_agent == "linuxbridge" %} -interface_driver = linuxbridge -{% endif %} - -{% if enable_nova_fake | bool %} -ovs_integration_bridge = br-int-{{ item }} -host = {{ ansible_hostname }}_{{ item }} -{% endif %} - -allow_overlapping_ips = true -core_plugin = ml2 - -service_plugins = {{ neutron_service_plugins|map(attribute='name')|join(',') }} - -{% if enable_neutron_agent_ha | bool %} -dhcp_agents_per_network = {{ dhcp_agents_per_network }} -l3_ha = true -max_l3_agents_per_router = {{ max_l3_agents_per_router }} -min_l3_agents_per_router = {{ min_l3_agents_per_router }} -{% endif %} - -transport_url = rabbit://{{ rabbitmq_user }}:{{ rabbitmq_password }}@rabbitmq:{{ rabbitmq_port }} - -{% if enable_neutron_dvr | bool %} -router_distributed = True -{% endif %} - -{% if enable_designate | bool %} -dns_domain = {{ designate_ns_record }}. -external_dns_driver = designate -{% endif %} - -[nova] -auth_url = {{ keystone_admin_url }} -auth_type = password -project_domain_id = default -user_domain_id = default -region_name = {{ openstack_region_name }} -project_name = service -username = {{ nova_keystone_user }} -password = {{ nova_keystone_password }} -endpoint_type = internal - -[oslo_concurrency] -lock_path = /var/lib/neutron/tmp - -[agent] -root_helper = sudo neutron-rootwrap /etc/neutron/rootwrap.conf - -[database] -connection = mysql+pymysql://{{ neutron_database_user }}:{{ neutron_database_password }}@{{ neutron_database_address }}/{{ neutron_database_name }} -max_retries = -1 - -[keystone_authtoken] -auth_uri = {{ keystone_internal_url }} -auth_url = {{ keystone_admin_url }} -auth_type = password -project_domain_id = default -user_domain_id = default -project_name = service -username = {{ neutron_keystone_user }} -password = {{ neutron_keystone_password }} - -memcache_security_strategy = ENCRYPT -memcache_secret_key = {{ memcache_secret_key }} - -memcache_servers = {{ memcached_servers }}:{{ memcached_port }} - -[oslo_messaging_notifications] -{% if enable_ceilometer | bool or enable_searchlight | bool or enable_designate | bool %} -driver = messagingv2 -{% set topics=["notifications" if enable_ceilometer | bool else "", "notifications_designate" if enable_designate | bool else ""] %} -topics = {{ topics|reject("equalto", "")|list|join(",") }} -{% else %} -driver = noop -{% endif %} - -{% if neutron_plugin_agent == "sfc" %} -[sfc] -drivers = ovs -[flowclassifier] -{% endif %} - -{% if enable_octavia | bool %} -[octavia] -base_url = {{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ octavia_api_port }} -{% endif %} - -{% if enable_designate | bool %} -[designate] -url = {{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ designate_api_port }}/v2 -auth_uri = {{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_public_port }} -auth_url = {{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_admin_port }} -auth_type = password -project_domain_id = default -user_domain_id = default -project_name = service -username = {{ designate_keystone_user }} -password = {{ designate_keystone_password }} -allow_reverse_dns_lookup = True -ipv4_ptr_zone_prefix_size = 24 -ipv6_ptr_zone_prefix_size = 116 -{% endif %} diff --git a/ansible/roles/neutron/templates/neutron_lbaas.conf.j2 b/ansible/roles/neutron/templates/neutron_lbaas.conf.j2 deleted file mode 100644 index 98c9045c8..000000000 --- a/ansible/roles/neutron/templates/neutron_lbaas.conf.j2 +++ /dev/null @@ -1,17 +0,0 @@ -{% if enable_neutron_lbaas | bool %} -[service_providers] -{% if enable_octavia | bool %} -service_provider = LOADBALANCERV2:Octavia:neutron_lbaas.drivers.octavia.driver.OctaviaDriver:default -{% else %} -service_provider = LOADBALANCERV2:Haproxy:neutron_lbaas.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default -{% endif %} - -[service_auth] -auth_url = {{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_public_port }}/v2.0 -admin_tenant_name = service -admin_user = neutron -admin_password = {{ neutron_keystone_password }} -auth_version = 2 -region = {{ openstack_region_name }} -endpoint_type = internal -{% endif %} diff --git a/ansible/roles/neutron/templates/neutron_vpnaas.conf.j2 b/ansible/roles/neutron/templates/neutron_vpnaas.conf.j2 deleted file mode 100644 index 47eeefb4a..000000000 --- a/ansible/roles/neutron/templates/neutron_vpnaas.conf.j2 +++ /dev/null @@ -1,4 +0,0 @@ -{% if enable_neutron_vpnaas | bool %} -[service_providers] -service_provider = VPN:openswan:neutron_vpnaas.services.vpn.service_drivers.ipsec.IPsecVPNDriver:default -{% endif %} diff --git a/ansible/roles/neutron/templates/openvswitch-db-server.json.j2 b/ansible/roles/neutron/templates/openvswitch-db-server.json.j2 deleted file mode 100644 index 147871ae1..000000000 --- a/ansible/roles/neutron/templates/openvswitch-db-server.json.j2 +++ /dev/null @@ -1,4 +0,0 @@ -{ - "command": "start-ovsdb-server {{ api_interface_address }} {{ neutron_bridge_name }} {{ neutron_external_interface }}", - "config_files": [] -} diff --git a/ansible/roles/neutron/templates/openvswitch-vswitchd.json.j2 b/ansible/roles/neutron/templates/openvswitch-vswitchd.json.j2 deleted file mode 100644 index eb916fa5f..000000000 --- a/ansible/roles/neutron/templates/openvswitch-vswitchd.json.j2 +++ /dev/null @@ -1,8 +0,0 @@ -{ -{% if std_logger %} - "command": "/usr/sbin/ovs-vswitchd unix:/run/openvswitch/db.sock -vconsole:emer -vsyslog:err -vfile:info --mlockall", -{% else %} - "command": "/usr/sbin/ovs-vswitchd unix:/run/openvswitch/db.sock -vconsole:emer -vsyslog:err -vfile:info --mlockall --log-file=/var/log/kolla/openvswitch/ovs-vswitchd.log", -{% endif %} - "config_files": [] -} diff --git a/ansible/roles/neutron/templates/vpnaas_agent.ini.j2 b/ansible/roles/neutron/templates/vpnaas_agent.ini.j2 deleted file mode 100644 index 5647ac204..000000000 --- a/ansible/roles/neutron/templates/vpnaas_agent.ini.j2 +++ /dev/null @@ -1,11 +0,0 @@ -{% set vpn_device_driver = 'neutron_vpnaas.services.vpn.device_drivers.strongswan_ipsec.StrongSwanDriver' if kolla_base_distro in ['ubuntu', 'debian'] else 'neutron_vpnaas.services.vpn.device_drivers.libreswan_ipsec.LibreSwanDriver'%} -[DEFAULT] - -[ipsec] -enable_detailed_logging = {{ neutron_logging_debug }} - -[service_providers] -service_provider = VPN:openswan:neutron_vpnaas.services.vpn.service_drivers.ipsec.IPsecVPNDriver:default - -[vpnagent] -vpn_device_driver = {{ vpn_device_driver }} diff --git a/ansible/roles/nova/defaults/main.yml b/ansible/roles/nova/defaults/main.yml deleted file mode 100644 index bd0b5913d..000000000 --- a/ansible/roles/nova/defaults/main.yml +++ /dev/null @@ -1,238 +0,0 @@ ---- -project_name: "nova" - -nova_services: - nova-libvirt: - container_name: nova_libvirt - group: compute - enabled: "{{ nova_compute_virt_type in ['kvm', 'qemu'] }}" - image: "{{ nova_libvirt_image_full }}" - pid_mode: "host" - privileged: True - volumes: - - "{{ node_config_directory }}/nova-libvirt/:{{ container_config_directory }}/:ro" - - "/etc/localtime:/etc/localtime:ro" - - "/lib/modules:/lib/modules:ro" - - "/run/:/run/:shared" - - "/dev:/dev" - - "/sys/fs/cgroup:/sys/fs/cgroup" - - "kolla_logs:/var/log/kolla/" - - "libvirtd:/var/lib/libvirt" - - "nova_compute:/var/lib/nova/" - - "{% if enable_cinder_backend_nfs | bool %}/var/lib/nova/mnt:/var/lib/nova/mnt:shared{% endif %}" - - "nova_libvirt_qemu:/etc/libvirt/qemu" - nova-ssh: - container_name: "nova_ssh" - group: "compute" - image: "{{ nova_ssh_image_full }}" - enabled: True - volumes: - - "{{ node_config_directory }}/nova-ssh/:{{ container_config_directory }}/:ro" - - "/etc/localtime:/etc/localtime:ro" - - "kolla_logs:/var/log/kolla" - - "nova_compute:/var/lib/nova" - - "{% if enable_cinder_backend_nfs | bool %}/var/lib/nova/mnt:/var/lib/nova/mnt:shared{% endif %}" - placement-api: - container_name: "placement_api" - group: "placement-api" - image: "{{ placement_api_image_full }}" - enabled: True - volumes: - - "{{ node_config_directory }}/placement-api/:{{ container_config_directory }}/:ro" - - "/etc/localtime:/etc/localtime:ro" - - "kolla_logs:/var/log/kolla/" - nova-api: - container_name: "nova_api" - group: "nova-api" - image: "{{ nova_api_image_full }}" - enabled: True - privileged: True - volumes: - - "{{ node_config_directory }}/nova-api/:{{ container_config_directory }}/:ro" - - "/etc/localtime:/etc/localtime:ro" - - "/lib/modules:/lib/modules:ro" - - "kolla_logs:/var/log/kolla/" - nova-consoleauth: - container_name: "nova_consoleauth" - group: "nova-consoleauth" - image: "{{ nova_consoleauth_image_full }}" - enabled: True - volumes: - - "{{ node_config_directory }}/nova-consoleauth/:{{ container_config_directory }}/:ro" - - "/etc/localtime:/etc/localtime:ro" - - "kolla_logs:/var/log/kolla/" - nova-novncproxy: - container_name: "nova_novncproxy" - group: "nova-novncproxy" - image: "{{ nova_novncproxy_image_full }}" - enabled: "{{ nova_console == 'novnc' }}" - volumes: - - "{{ node_config_directory }}/nova-novncproxy/:{{ container_config_directory }}/:ro" - - "/etc/localtime:/etc/localtime:ro" - - "kolla_logs:/var/log/kolla/" - nova-scheduler: - container_name: "nova_scheduler" - group: "nova-scheduler" - image: "{{ nova_scheduler_image_full }}" - enabled: True - volumes: - - "{{ node_config_directory }}/nova-scheduler/:{{ container_config_directory }}/:ro" - - "/etc/localtime:/etc/localtime:ro" - - "kolla_logs:/var/log/kolla/" - nova-spicehtml5proxy: - container_name: "nova_spicehtml5proxy" - group: "nova-spicehtml5proxy" - image: "{{ nova_spicehtml5proxy_image_full }}" - enabled: "{{ nova_console == 'spice' }}" - volumes: - - "{{ node_config_directory }}/nova-spicehtml5proxy/:{{ container_config_directory }}/:ro" - - "/etc/localtime:/etc/localtime:ro" - - "kolla_logs:/var/log/kolla/" - nova-serialproxy: - container_name: "nova_serialproxy" - group: "nova-serialproxy" - image: "{{ nova_serialproxy_image_full }}" - enabled: "{{ enable_nova_serialconsole_proxy | bool }}" - volumes: - - "{{ node_config_directory }}/nova-serialproxy/:{{ container_config_directory }}/:ro" - - "/etc/localtime:/etc/localtime:ro" - - "kolla_logs:/var/log/kolla/" - nova-conductor: - container_name: "nova_conductor" - group: "nova-conductor" - enabled: True - image: "{{ nova_conductor_image_full }}" - volumes: - - "{{ node_config_directory }}/nova-conductor/:{{ container_config_directory }}/:ro" - - "/etc/localtime:/etc/localtime:ro" - - "kolla_logs:/var/log/kolla/" - nova-compute: - container_name: "nova_compute" - group: "compute" - image: "{{ nova_compute_image_full }}" - privileged: True - enabled: "{{ True if orchestration_engine == 'KUBERNETES' else not enable_nova_fake | bool }}" - volumes: - - "{{ node_config_directory }}/nova-compute/:{{ container_config_directory }}/:ro" - - "/etc/localtime:/etc/localtime:ro" - - "/lib/modules:/lib/modules:ro" - - "/run:/run:shared" - - "/dev:/dev" - - "kolla_logs:/var/log/kolla/" - - "{% if enable_iscsid | bool %}iscsi_info:/etc/iscsi{% endif %}" - - "libvirtd:/var/lib/libvirt" - - "nova_compute:/var/lib/nova/" - - "{% if enable_cinder_backend_nfs | bool %}/var/lib/nova/mnt:/var/lib/nova/mnt:shared{% endif %}" - nova-compute-ironic: - container_name: "nova_compute_ironic" - group: "nova-compute-ironic" - image: "{{ nova_compute_ironic_image_full }}" - enabled: "{{ enable_ironic | bool }}" - volumes: - - "{{ node_config_directory }}/nova-compute-ironic/:{{ container_config_directory }}/:ro" - - "/etc/localtime:/etc/localtime:ro" - - "kolla_logs:/var/log/kolla/" - -#################### -# Ceph -#################### -ceph_nova_pool_type: "{{ ceph_pool_type }}" -ceph_nova_cache_mode: "{{ ceph_cache_mode }}" - -# Due to Ansible issues on include, you cannot override these variables. Please -# override the variables they reference instead. -nova_pool_name: "{{ ceph_nova_pool_name }}" -nova_pool_type: "{{ ceph_nova_pool_type }}" -nova_cache_mode: "{{ ceph_nova_cache_mode }}" - -# Discard option for nova managed disks. Requires libvirt (1, 0, 6) or later and -# qemu (1, 6, 0) or later. Set to "" to disable. -nova_hw_disk_discard: "unmap" - - -#################### -# Database -#################### -nova_database_name: "nova" -nova_database_user: "nova" -nova_database_address: "{{ kolla_internal_fqdn }}:{{ database_port }}" - -nova_api_database_name: "nova_api" -nova_api_database_user: "nova_api" -nova_api_database_address: "{{ kolla_internal_fqdn }}:{{ database_port }}" - -#################### -# Docker -#################### -nova_libvirt_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-nova-libvirt" -nova_libvirt_tag: "{{ openstack_release }}" -nova_libvirt_image_full: "{{ nova_libvirt_image }}:{{ nova_libvirt_tag }}" - -nova_ssh_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-nova-ssh" -nova_ssh_tag: "{{ openstack_release }}" -nova_ssh_image_full: "{{ nova_ssh_image }}:{{ nova_ssh_tag }}" - -nova_conductor_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-nova-conductor" -nova_conductor_tag: "{{ openstack_release }}" -nova_conductor_image_full: "{{ nova_conductor_image }}:{{ nova_conductor_tag }}" - -nova_consoleauth_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-nova-consoleauth" -nova_consoleauth_tag: "{{ openstack_release }}" -nova_consoleauth_image_full: "{{ nova_consoleauth_image }}:{{ nova_consoleauth_tag }}" - -nova_novncproxy_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-nova-novncproxy" -nova_novncproxy_tag: "{{ openstack_release }}" -nova_novncproxy_image_full: "{{ nova_novncproxy_image }}:{{ nova_novncproxy_tag }}" - -nova_spicehtml5proxy_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-nova-spicehtml5proxy" -nova_spicehtml5proxy_tag: "{{ openstack_release }}" -nova_spicehtml5proxy_image_full: "{{ nova_spicehtml5proxy_image }}:{{ nova_spicehtml5proxy_tag }}" - -nova_scheduler_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-nova-scheduler" -nova_scheduler_tag: "{{ openstack_release }}" -nova_scheduler_image_full: "{{ nova_scheduler_image }}:{{ nova_scheduler_tag }}" - -nova_compute_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-nova-compute" -nova_compute_tag: "{{ openstack_release }}" -nova_compute_image_full: "{{ nova_compute_image }}:{{ nova_compute_tag }}" - -nova_api_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-nova-api" -nova_api_tag: "{{ openstack_release }}" -nova_api_image_full: "{{ nova_api_image }}:{{ nova_api_tag }}" - -nova_compute_ironic_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-nova-compute-ironic" -nova_compute_ironic_tag: "{{ openstack_release }}" -nova_compute_ironic_image_full: "{{ nova_compute_ironic_image }}:{{ nova_compute_ironic_tag }}" - -nova_serialproxy_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-nova-serialproxy" -nova_serialproxy_tag: "{{ openstack_release }}" -nova_serialproxy_image_full: "{{ nova_serialproxy_image }}:{{ nova_serialproxy_tag }}" - -placement_api_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-nova-placement-api" -placement_api_tag: "{{ openstack_release }}" -placement_api_image_full: "{{ placement_api_image }}:{{ placement_api_tag }}" - -#################### -# OpenStack -#################### -nova_legacy_admin_endpoint: "{{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ nova_api_port }}/v2/%(tenant_id)s" -nova_legacy_internal_endpoint: "{{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ nova_api_port }}/v2/%(tenant_id)s" -nova_legacy_public_endpoint: "{{ public_protocol }}://{{ kolla_external_fqdn }}:{{ nova_api_port }}/v2/%(tenant_id)s" - -nova_admin_endpoint: "{{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ nova_api_port }}/v2.1/%(tenant_id)s" -nova_internal_endpoint: "{{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ nova_api_port }}/v2.1/%(tenant_id)s" -nova_public_endpoint: "{{ public_protocol }}://{{ kolla_external_fqdn }}:{{ nova_api_port }}/v2.1/%(tenant_id)s" - -placement_admin_endpoint: "{{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ placement_api_port }}" -placement_internal_endpoint: "{{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ placement_api_port }}" -placement_public_endpoint: "{{ public_protocol }}://{{ kolla_external_fqdn }}:{{ placement_api_port }}" - -nova_logging_debug: "{{ openstack_logging_debug }}" - -placement_keystone_user: "placement" - -openstack_nova_auth: "{{ openstack_auth }}" -openstack_placement_auth: "{{ openstack_auth }}" - - -nova_ssh_port: "8022" diff --git a/ansible/roles/nova/tasks/config.yml b/ansible/roles/nova/tasks/config.yml deleted file mode 100644 index 055f27080..000000000 --- a/ansible/roles/nova/tasks/config.yml +++ /dev/null @@ -1,115 +0,0 @@ ---- -- name: Ensuring config directories exist - file: - path: "{{ node_config_directory }}/{{ item.key }}" - state: "directory" - recurse: yes - when: - - item.value.enabled | bool - with_dict: "{{ nova_services }}" - -- name: Copying over config.json files for services - template: - src: "{{ item.key }}.json.j2" - dest: "{{ node_config_directory }}/{{ item.key }}/config.json" - register: config_jsons - when: - - item.value.enabled | bool - with_dict: "{{ nova_services }}" - -- name: Copying over nova.conf - vars: - services_require_nova_conf: - - placement-api - - nova-api - - nova-compute - - nova-compute-ironic - - nova-conductor - - nova-consoleauth - - nova-novncproxy - - nova-serialproxy - - nova-scheduler - - nova-spicehtml5proxy - merge_configs: - vars: - service_name: "{{ item.key }}" - sources: - - "{{ role_path }}/templates/nova.conf.j2" - - "{{ node_custom_config }}/global.conf" - - "{{ node_custom_config }}/database.conf" - - "{{ node_custom_config }}/messaging.conf" - - "{{ node_custom_config }}/nova.conf" - - "{{ node_custom_config }}/nova/{{ item.key }}.conf" - - "{{ node_custom_config }}/nova/{{ inventory_hostname }}/nova.conf" - dest: "{{ node_config_directory }}/{{ item.key }}/nova.conf" - register: nova_confs - when: - - item.value.enabled | bool - - item.key in services_require_nova_conf - with_dict: "{{ nova_services }}" - -- name: Copying over libvirt configuration - vars: - service: "{{ nova_services['nova-libvirt'] }}" - template: - src: "{{ item.src }}" - dest: "{{ node_config_directory }}/nova-libvirt/{{ item.dest }}" - register: nova_libvirt_confs - when: - - service.enabled | bool - with_items: - - { src: "qemu.conf.j2", dest: "qemu.conf" } - - { src: "libvirtd.conf.j2", dest: "libvirtd.conf" } - -- name: Copying over placement-api wsgi configuration - vars: - service: "{{ nova_services['placement-api'] }}" - template: - src: "placement-api-wsgi.conf.j2" - dest: "{{ node_config_directory }}/placement-api/placement-api-wsgi.conf" - register: placement_api_wsgi_conf - when: - - service.enabled | bool - -- name: Copying files for nova-ssh - vars: - service: "{{ nova_services['nova-ssh'] }}" - template: - src: "{{ item.src }}" - dest: "{{ node_config_directory }}/nova-ssh/{{ item.dest }}" - register: nova_ssh_confs - when: - - service.enabled | bool - with_items: - - { src: "sshd_config.j2", dest: "sshd_config" } - - { src: "id_rsa", dest: "id_rsa" } - - { src: "id_rsa.pub", dest: "id_rsa.pub" } - - { src: "ssh_config.j2", dest: "ssh_config" } - -- name: Check if policies shall be overwritten - local_action: stat path="{{ node_custom_config }}/nova/policy.json" - register: nova_policy - -- name: Copying over existing policy.json - vars: - services_require_policy_json: - - placement-api - - nova-api - - nova-compute - - nova-compute-ironic - - nova-conductor - - nova-consoleauth - - nova-novncproxy - - nova-serialproxy - - nova-scheduler - - nova-spicehtml5proxy - template: - src: "{{ node_custom_config }}/nova/policy.json" - dest: "{{ node_config_directory }}/{{ item.key }}/policy.json" - register: policy_jsons - when: - - item.value.enabled | bool - - nova_policy.stat.exists | bool - - item.key in services_require_policy_json - with_dict: "{{ nova_services }}" - diff --git a/ansible/roles/nova/tasks/main.yml b/ansible/roles/nova/tasks/main.yml deleted file mode 100644 index b5216113e..000000000 --- a/ansible/roles/nova/tasks/main.yml +++ /dev/null @@ -1,2 +0,0 @@ ---- -- include: "config.yml" diff --git a/ansible/roles/nova/templates/id_rsa b/ansible/roles/nova/templates/id_rsa deleted file mode 100644 index 173a4b3e1..000000000 --- a/ansible/roles/nova/templates/id_rsa +++ /dev/null @@ -1 +0,0 @@ -{{ nova_ssh_key.private_key }} diff --git a/ansible/roles/nova/templates/id_rsa.pub b/ansible/roles/nova/templates/id_rsa.pub deleted file mode 100644 index 16bd674f2..000000000 --- a/ansible/roles/nova/templates/id_rsa.pub +++ /dev/null @@ -1 +0,0 @@ -{{ nova_ssh_key.public_key }} diff --git a/ansible/roles/nova/templates/libvirtd.conf.j2 b/ansible/roles/nova/templates/libvirtd.conf.j2 deleted file mode 100644 index 356ae3614..000000000 --- a/ansible/roles/nova/templates/libvirtd.conf.j2 +++ /dev/null @@ -1,13 +0,0 @@ -listen_tcp = 1 -listen_tls = 0 -auth_tcp = "none" -ca_file = "" -log_level = 3 - -{% if std_logger %} -log_outputs = "3:stderr" -{% else %} -log_outputs = "3:file:/var/log/kolla/libvirt/libvirtd.log" -{% endif %} - -listen_addr = "{{ api_interface_address }}" diff --git a/ansible/roles/nova/templates/nova-api.json.j2 b/ansible/roles/nova/templates/nova-api.json.j2 deleted file mode 100644 index d669bfdaf..000000000 --- a/ansible/roles/nova/templates/nova-api.json.j2 +++ /dev/null @@ -1,25 +0,0 @@ -{ - "command": "nova-api", - "config_files": [ - { - "source": "{{ container_config_directory }}/nova.conf", - "dest": "/etc/nova/nova.conf", - "owner": "nova", - "perm": "0600" - }, - { - "source": "{{ container_config_directory }}/policy.json", - "dest": "/etc/nova/policy.json", - "owner": "nova", - "perm": "0600", - "optional": true - } - ], - "permissions": [ - { - "path": "/var/log/kolla/nova", - "owner": "nova:nova", - "recurse": true - } - ] -} diff --git a/ansible/roles/nova/templates/nova-compute-ironic.json.j2 b/ansible/roles/nova/templates/nova-compute-ironic.json.j2 deleted file mode 100644 index 92c0ee71b..000000000 --- a/ansible/roles/nova/templates/nova-compute-ironic.json.j2 +++ /dev/null @@ -1,25 +0,0 @@ -{ - "command": "nova-compute", - "config_files": [ - { - "source": "{{ container_config_directory }}/nova.conf", - "dest": "/etc/nova/nova.conf", - "owner": "nova", - "perm": "0600" - }, - { - "source": "{{ container_config_directory }}/policy.json", - "dest": "/etc/nova/policy.json", - "owner": "nova", - "perm": "0600", - "optional": true - } - ], - "permissions": [ - { - "path": "/var/log/kolla/nova", - "owner": "nova:nova", - "recurse": true - } - ] -} diff --git a/ansible/roles/nova/templates/nova-compute.json.j2 b/ansible/roles/nova/templates/nova-compute.json.j2 deleted file mode 100644 index d37f07157..000000000 --- a/ansible/roles/nova/templates/nova-compute.json.j2 +++ /dev/null @@ -1,36 +0,0 @@ -{ - "command": "nova-compute", - "config_files": [ - { - "source": "{{ container_config_directory }}/nova.conf", - "dest": "/etc/nova/nova.conf", - "owner": "nova", - "perm": "0600" - }, - { - "source": "{{ container_config_directory }}/policy.json", - "dest": "/etc/nova/policy.json", - "owner": "nova", - "perm": "0600", - "optional": true - }{% if nova_backend == "rbd" %}, - { - "source": "{{ container_config_directory }}/ceph.*", - "dest": "/etc/ceph/", - "owner": "nova", - "perm": "0700" - }{% endif %} - ], - "permissions": [ - { - "path": "/var/log/kolla/nova", - "owner": "nova:nova", - "recurse": true - }, - { - "path": "/var/lib/nova", - "owner": "nova:nova", - "recurse": true - } - ] -} diff --git a/ansible/roles/nova/templates/nova-conductor.json.j2 b/ansible/roles/nova/templates/nova-conductor.json.j2 deleted file mode 100644 index 50bcd5369..000000000 --- a/ansible/roles/nova/templates/nova-conductor.json.j2 +++ /dev/null @@ -1,25 +0,0 @@ -{ - "command": "nova-conductor", - "config_files": [ - { - "source": "{{ container_config_directory }}/nova.conf", - "dest": "/etc/nova/nova.conf", - "owner": "nova", - "perm": "0600" - }, - { - "source": "{{ container_config_directory }}/policy.json", - "dest": "/etc/nova/policy.json", - "owner": "nova", - "perm": "0600", - "optional": true - } - ], - "permissions": [ - { - "path": "/var/log/kolla/nova", - "owner": "nova:nova", - "recurse": true - } - ] -} diff --git a/ansible/roles/nova/templates/nova-consoleauth.json.j2 b/ansible/roles/nova/templates/nova-consoleauth.json.j2 deleted file mode 100644 index af6a6c992..000000000 --- a/ansible/roles/nova/templates/nova-consoleauth.json.j2 +++ /dev/null @@ -1,25 +0,0 @@ -{ - "command": "nova-consoleauth", - "config_files": [ - { - "source": "{{ container_config_directory }}/nova.conf", - "dest": "/etc/nova/nova.conf", - "owner": "nova", - "perm": "0600" - }, - { - "source": "{{ container_config_directory }}/policy.json", - "dest": "/etc/nova/policy.json", - "owner": "nova", - "perm": "0600", - "optional": true - } - ], - "permissions": [ - { - "path": "/var/log/kolla/nova", - "owner": "nova:nova", - "recurse": true - } - ] -} diff --git a/ansible/roles/nova/templates/nova-libvirt.json.j2 b/ansible/roles/nova/templates/nova-libvirt.json.j2 deleted file mode 100644 index aa19f7a39..000000000 --- a/ansible/roles/nova/templates/nova-libvirt.json.j2 +++ /dev/null @@ -1,29 +0,0 @@ -{ - "command": "/usr/sbin/libvirtd --listen", - "config_files": [ - { - "source": "{{ container_config_directory }}/libvirtd.conf", - "dest": "/etc/libvirt/libvirtd.conf", - "owner": "root", - "perm": "0644" - }, - { - "source": "{{ container_config_directory }}/qemu.conf", - "dest": "/etc/libvirt/qemu.conf", - "owner": "root", - "perm": "0644" - }{% if nova_backend == "rbd" %}, - { - "source": "{{ container_config_directory }}/secrets", - "dest": "/etc/libvirt/secrets", - "owner": "root", - "perm": "0600" - }, - { - "source": "{{ container_config_directory }}/ceph.conf", - "dest": "/etc/ceph/ceph.conf", - "owner": "root", - "perm": "0600" - }{% endif %} - ] -} diff --git a/ansible/roles/nova/templates/nova-novncproxy.json.j2 b/ansible/roles/nova/templates/nova-novncproxy.json.j2 deleted file mode 100644 index 11e2bbf06..000000000 --- a/ansible/roles/nova/templates/nova-novncproxy.json.j2 +++ /dev/null @@ -1,25 +0,0 @@ -{ - "command": "nova-novncproxy", - "config_files": [ - { - "source": "{{ container_config_directory }}/nova.conf", - "dest": "/etc/nova/nova.conf", - "owner": "nova", - "perm": "0600" - }, - { - "source": "{{ container_config_directory }}/policy.json", - "dest": "/etc/nova/policy.json", - "owner": "nova", - "perm": "0600", - "optional": true - } - ], - "permissions": [ - { - "path": "/var/log/kolla/nova", - "owner": "nova:nova", - "recurse": true - } - ] -} diff --git a/ansible/roles/nova/templates/nova-scheduler.json.j2 b/ansible/roles/nova/templates/nova-scheduler.json.j2 deleted file mode 100644 index b59f2f0e4..000000000 --- a/ansible/roles/nova/templates/nova-scheduler.json.j2 +++ /dev/null @@ -1,25 +0,0 @@ -{ - "command": "nova-scheduler", - "config_files": [ - { - "source": "{{ container_config_directory }}/nova.conf", - "dest": "/etc/nova/nova.conf", - "owner": "nova", - "perm": "0600" - }, - { - "source": "{{ container_config_directory }}/policy.json", - "dest": "/etc/nova/policy.json", - "owner": "nova", - "perm": "0600", - "optional": true - } - ], - "permissions": [ - { - "path": "/var/log/kolla/nova", - "owner": "nova:nova", - "recurse": true - } - ] -} diff --git a/ansible/roles/nova/templates/nova-serialproxy.json.j2 b/ansible/roles/nova/templates/nova-serialproxy.json.j2 deleted file mode 100644 index 3aac72591..000000000 --- a/ansible/roles/nova/templates/nova-serialproxy.json.j2 +++ /dev/null @@ -1,18 +0,0 @@ -{ - "command": "nova-serialproxy", - "config_files": [ - { - "source": "{{ container_config_directory }}/nova.conf", - "dest": "/etc/nova/nova.conf", - "owner": "nova", - "perm": "0600" - } - ], - "permissions": [ - { - "path": "/var/log/kolla/nova", - "owner": "nova:nova", - "recurse": true - } - ] -} diff --git a/ansible/roles/nova/templates/nova-spicehtml5proxy.json.j2 b/ansible/roles/nova/templates/nova-spicehtml5proxy.json.j2 deleted file mode 100644 index b1a218bb8..000000000 --- a/ansible/roles/nova/templates/nova-spicehtml5proxy.json.j2 +++ /dev/null @@ -1,25 +0,0 @@ -{ - "command": "nova-spicehtml5proxy", - "config_files": [ - { - "source": "{{ container_config_directory }}/nova.conf", - "dest": "/etc/nova/nova.conf", - "owner": "nova", - "perm": "0600" - }, - { - "source": "{{ container_config_directory }}/policy.json", - "dest": "/etc/nova/policy.json", - "owner": "nova", - "perm": "0600", - "optional": true - } - ], - "permissions": [ - { - "path": "/var/log/kolla/nova", - "owner": "nova:nova", - "recurse": true - } - ] -} diff --git a/ansible/roles/nova/templates/nova-ssh.json.j2 b/ansible/roles/nova/templates/nova-ssh.json.j2 deleted file mode 100644 index 1fb041ecc..000000000 --- a/ansible/roles/nova/templates/nova-ssh.json.j2 +++ /dev/null @@ -1,29 +0,0 @@ -{ - "command": "/usr/sbin/sshd -D", - "config_files": [ - { - "source": "{{ container_config_directory }}/sshd_config", - "dest": "/etc/ssh/sshd_config", - "owner": "root", - "perm": "0644" - }, - { - "source": "{{ container_config_directory }}/ssh_config", - "dest": "/var/lib/nova/.ssh/config", - "owner": "nova", - "perm": "0600" - }, - { - "source": "{{ container_config_directory }}/id_rsa", - "dest": "/var/lib/nova/.ssh/id_rsa", - "owner": "nova", - "perm": "0600" - }, - { - "source": "{{ container_config_directory }}/id_rsa.pub", - "dest": "/var/lib/nova/.ssh/authorized_keys", - "owner": "nova", - "perm": "0600" - } - ] -} diff --git a/ansible/roles/nova/templates/nova.conf.j2 b/ansible/roles/nova/templates/nova.conf.j2 deleted file mode 100644 index 578006bcf..000000000 --- a/ansible/roles/nova/templates/nova.conf.j2 +++ /dev/null @@ -1,228 +0,0 @@ -# nova.conf -[DEFAULT] -debug = {{ nova_logging_debug }} - -{% if std_logger %} -use_syslog = False -use_stderr = True -{% else %} -log_dir = /var/log/kolla/nova -{% endif %} - -state_path = /var/lib/nova - -{% if kolla_enable_tls_external | bool %} -secure_proxy_ssl_header = X-Forwarded-Proto -{% endif %} - -osapi_compute_listen = {{ api_interface_address }} -osapi_compute_listen_port = {{ nova_api_port }} -osapi_compute_workers = {{ openstack_service_workers }} -metadata_workers = {{ openstack_service_workers }} - -metadata_listen = {{ api_interface_address }} -metadata_listen_port = {{ nova_metadata_port }} - -firewall_driver = nova.virt.firewall.NoopFirewallDriver - -{% if neutron_plugin_agent == "openvswitch" %} -linuxnet_interface_driver = nova.network.linux_net.LinuxOVSInterfaceDriver -{% elif neutron_plugin_agent == "linuxbridge" %} -linuxnet_interface_driver = nova.network.linux_net.NeutronLinuxBridgeInterfaceDriver -{% endif %} - -allow_resize_to_same_host = true - -{% if enable_ironic | bool %} -scheduler_host_manager = ironic_host_manager -{% endif %} - -{% if service_name == "nova-compute-ironic" %} -host={{ ansible_hostname }}-ironic -log_file = /var/log/kolla/nova/nova-compute-ironic.log -compute_driver = ironic.IronicDriver -vnc_enabled = False -ram_allocation_ratio = 1.0 -reserved_host_memory_mb = 0 -{% elif enable_nova_fake | bool %} -scheduler_default_filters = RetryFilter,AvailabilityZoneFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter -host = empty -compute_driver = fake.FakeDriver -{% else %} -compute_driver = libvirt.LibvirtDriver -{% endif %} - -# Though my_ip is not used directly, lots of other variables use $my_ip -my_ip = {{ api_interface_address }} - -{% if enable_ceilometer | bool or enable_searchlight | bool or enable_designate | bool %} -instance_usage_audit = True -instance_usage_audit_period = hour -notify_on_state_change = vm_and_task_state -{% if enable_watcher | bool %} -compute_monitors=nova.compute.monitors.cpu.virt_driver -{% endif %} -{% endif %} - -transport_url = rabbit://{{ rabbitmq_user }}:{{ rabbitmq_password }}@rabbitmq:{{ rabbitmq_port }} - -[api] -use_forwarded_for = true - -[conductor] -workers = {{ openstack_service_workers }} - -{% if nova_console == 'novnc' %} -[vnc] -novncproxy_host = {{ api_interface_address }} -novncproxy_port = {{ nova_novncproxy_port }} -vncserver_listen = {{ api_interface_address }} -vncserver_proxyclient_address = {{ api_interface_address }} -novncproxy_base_url = {{ public_protocol }}://{{ kolla_external_fqdn }}:{{ nova_novncproxy_port }}/vnc_auto.html -{% elif nova_console == 'spice' %} -[vnc] -# We have to turn off vnc to use spice -enabled = false -[spice] -enabled = true -server_listen = {{ api_interface_address }} -server_proxyclient_address = {{ api_interface_address }} -html5proxy_base_url = {{ public_protocol }}://{{ kolla_external_fqdn }}:{{ nova_spicehtml5proxy_port }}/spice_auto.html -html5proxy_host = {{ api_interface_address }} -html5proxy_port = {{ nova_spicehtml5proxy_port }} -{% endif %} - -{% if service_name == "nova-compute-ironic" %} -[ironic] -username = {{ ironic_keystone_user }} -password = {{ ironic_keystone_password }} -auth_url = {{ keystone_admin_url }} -auth_type = password -project_name = service -user_domain_name = default -project_domain_name = default -api_endpoint = {{ internal_protocol }}://ironic-api:{{ ironic_api_port }}/v1 -{% endif %} - -[oslo_concurrency] -lock_path = /var/lib/nova/tmp - -[glance] -api_servers = {{ internal_protocol }}://glance-api:{{ glance_api_port }} - -num_retries = 3 - -[cinder] -catalog_info = volumev2:cinderv2:internalURL - -[neutron] -url = {{ internal_protocol }}://neutron-server:{{ neutron_server_port }} -metadata_proxy_shared_secret = {{ metadata_secret }} -service_metadata_proxy = true - -auth_url = {{ keystone_admin_url }} -auth_type = password -project_domain_name = default -user_domain_id = default -project_name = service -username = {{ neutron_keystone_user }} -password = {{ neutron_keystone_password }} - -[database] -connection = mysql+pymysql://{{ nova_database_user }}:{{ nova_database_password }}@{{ nova_database_address }}/{{ nova_database_name }} -max_pool_size = 50 -max_overflow = 1000 -max_retries = -1 - -[api_database] -connection = mysql+pymysql://{{ nova_api_database_user }}:{{ nova_api_database_password }}@{{ nova_api_database_address }}/{{ nova_api_database_name }} -max_retries = -1 - -[cache] -backend = oslo_cache.memcache_pool -enabled = True -memcache_servers = memcached:{{ memcached_port }} - - -[keystone_authtoken] -auth_uri = {{ keystone_internal_url }} -auth_url = {{ keystone_admin_url }} -auth_type = password -project_domain_id = default -user_domain_id = default -project_name = service -username = {{ nova_keystone_user }} -password = {{ nova_keystone_password }} - -memcache_security_strategy = ENCRYPT -memcache_secret_key = {{ memcache_secret_key }} -memcached_servers = memcached:{{ memcached_port }} - -{% if nova_compute_virt_type in ['kvm', 'qemu'] %} -[libvirt] -connection_uri = "qemu+tcp://{{ api_interface_address }}/system" -{% if enable_ceph | bool and nova_backend == "rbd" %} -images_type = rbd -images_rbd_pool = {{ ceph_nova_pool_name }} -images_rbd_ceph_conf = /etc/ceph/ceph.conf -rbd_user = cinder -disk_cachemodes="network=writeback" -{% if nova_hw_disk_discard != '' %} -hw_disk_discard = {{ nova_hw_disk_discard }} -{% endif %} -{% endif %} -{% if nova_backend == "rbd" %} -rbd_secret_uuid = {{ rbd_secret_uuid }} -{% endif %} -virt_type = {{ nova_compute_virt_type }} -{% endif %} - -[upgrade_levels] -compute = auto - -[oslo_messaging_notifications] -{% if enable_ceilometer | bool or enable_searchlight | bool or enable_designate | bool %} -driver = messagingv2 -{% set topics=["notifications" if enable_ceilometer | bool else "", "notifications_designate" if enable_designate | bool else ""] %} -topics = {{ topics|reject("equalto", "")|list|join(",") }} -{% else %} -driver = noop -{% endif %} - -[privsep_entrypoint] -helper_command=sudo nova-rootwrap /etc/nova/rootwrap.conf privsep-helper --config-file /etc/nova/nova.conf - -[glance] -debug = {{ nova_logging_debug }} - -[guestfs] -debug = {{ nova_logging_debug }} - -[wsgi] -api_paste_config = /etc/nova/api-paste.ini -{% if kolla_enable_tls_external | bool %} -secure_proxy_ssl_header = HTTP_X_FORWARDED_PROTO -{% endif %} - -[scheduler] -max_attempts = 10 -discover_hosts_in_cells_interval = 60 - -{% if enable_placement | bool %} -[placement] -auth_type = password -auth_url = {{ keystone_admin_url }} -username = {{ placement_keystone_user }} -password = {{ placement_keystone_password }} -user_domain_name = default -project_name = service -project_domain_name = default -os_region_name = {{ openstack_region_name }} -os_interface = internal -{% endif %} - -osapi_compute_listen = {{ api_interface_address }} -osapi_compute_listen_port = {{ nova_api_port }} -osapi_compute_workers = {{ openstack_service_workers }} -metadata_workers = {{ openstack_service_workers }} - diff --git a/ansible/roles/nova/templates/placement-api-wsgi.conf.j2 b/ansible/roles/nova/templates/placement-api-wsgi.conf.j2 deleted file mode 100644 index d5954478d..000000000 --- a/ansible/roles/nova/templates/placement-api-wsgi.conf.j2 +++ /dev/null @@ -1,28 +0,0 @@ -{% set log_dir = '/var/log/kolla/nova' %} -{% set python_path = '/usr/lib/python2.7/site-packages' if kolla_install_type == 'binary' else '/var/lib/kolla/venv/lib/python2.7/site-packages' %} -{% set wsgi_directory = '/usr/bin' if kolla_install_type == 'binary' else '/var/lib/kolla/venv/bin' %} -Listen {{ api_interface_address }}:{{ placement_api_port }} - - - WSGIDaemonProcess placement-api processes={{ openstack_service_workers }} threads=1 user=nova group=nova display-name=%{GROUP} python-path={{ python_path }} - WSGIProcessGroup placement-api - WSGIScriptAlias / {{ wsgi_directory }}/nova-placement-api - WSGIApplicationGroup %{GLOBAL} - WSGIPassAuthorization On - = 2.4> - ErrorLogFormat "%{cu}t %M" - - LogFormat "%{X-Forwarded-For}i %l %u %t \"%r\" %>s %b %D \"%{Referer}i\" \"%{User-Agent}i\"" logformat -{% if std_logger %} - ErrorLog /proc/self/fd/2 - CustomLog /proc/self/fd/1 combined -{% else %} - ErrorLog "{{ log_dir }}/placement-api.log" - CustomLog "{{ log_dir }}/placement-api-access.log" logformat -{% endif %} - - - Require all granted - - - diff --git a/ansible/roles/nova/templates/placement-api.json.j2 b/ansible/roles/nova/templates/placement-api.json.j2 deleted file mode 100644 index 10ff1cfaa..000000000 --- a/ansible/roles/nova/templates/placement-api.json.j2 +++ /dev/null @@ -1,33 +0,0 @@ -{% set apache_binary = 'apache2' if kolla_base_distro in ['ubuntu', 'debian'] else 'httpd' %} -{% set apache_conf_dir = 'apache2/conf-enabled' if kolla_base_distro in ['ubuntu', 'debian'] else 'httpd/conf.d' %} -{ - "command": "/usr/sbin/{{ apache_binary }} -DFOREGROUND", - "config_files": [ - { - "source": "{{ container_config_directory }}/nova.conf", - "dest": "/etc/nova/nova.conf", - "owner": "nova", - "perm": "0600" - }, - { - "source": "{{ container_config_directory }}/policy.json", - "dest": "/etc/nova/policy.json", - "owner": "nova", - "perm": "0600", - "optional": true - }, - { - "source": "{{ container_config_directory }}/placement-api-wsgi.conf", - "dest": "/etc/{{ apache_conf_dir }}/placement-api-wsgi.conf", - "owner": "nova", - "perm": "0644" - } - ], - "permissions": [ - { - "path": "/var/log/kolla/nova", - "owner": "nova:nova", - "recurse": true - } - ] -} diff --git a/ansible/roles/nova/templates/qemu.conf.j2 b/ansible/roles/nova/templates/qemu.conf.j2 deleted file mode 100644 index 1127ef0a2..000000000 --- a/ansible/roles/nova/templates/qemu.conf.j2 +++ /dev/null @@ -1,13 +0,0 @@ -stdio_handler = "file" - -{% if kolla_base_distro in ['ubuntu', 'centos', 'oraclelinux']%} -# TODO: this workaround need to be fixed in Pike -# libvirt-bin latest version which runs vms as libvirt-qemu user. -# This locks access to nova created files for vms which have -# permissions 722 for nova user. We need to force qemu to use this user -# to be able to access vm files. -# see https://bugs.launchpad.net/kolla-ansible/+bug/1668654 -# see https://bugs.launchpad.net/kolla/+bug/1718541 -user = "nova" -group = "nova" -{% endif %} diff --git a/ansible/roles/nova/templates/secret.xml.j2 b/ansible/roles/nova/templates/secret.xml.j2 deleted file mode 100644 index 9f63543a2..000000000 --- a/ansible/roles/nova/templates/secret.xml.j2 +++ /dev/null @@ -1,6 +0,0 @@ - - {{ item.uuid }} - - {{ item.name }} - - diff --git a/ansible/roles/nova/templates/ssh_config.j2 b/ansible/roles/nova/templates/ssh_config.j2 deleted file mode 100644 index 7c5c962f9..000000000 --- a/ansible/roles/nova/templates/ssh_config.j2 +++ /dev/null @@ -1,4 +0,0 @@ -Host * - StrictHostKeyChecking no - UserKnownHostsFile /dev/null - port {{ nova_ssh_port }} diff --git a/ansible/roles/nova/templates/sshd_config.j2 b/ansible/roles/nova/templates/sshd_config.j2 deleted file mode 100644 index 29bea8dea..000000000 --- a/ansible/roles/nova/templates/sshd_config.j2 +++ /dev/null @@ -1,5 +0,0 @@ -Port {{ nova_ssh_port }} -ListenAddress {{ api_interface_address }} - -SyslogFacility AUTHPRIV -UsePAM yes diff --git a/ansible/roles/rabbitmq/defaults/main.yml b/ansible/roles/rabbitmq/defaults/main.yml deleted file mode 100644 index 4fb19feb4..000000000 --- a/ansible/roles/rabbitmq/defaults/main.yml +++ /dev/null @@ -1,17 +0,0 @@ ---- -project_name: "rabbitmq" - -#################### -# Docker -#################### -rabbitmq_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-rabbitmq" -rabbitmq_tag: "{{ openstack_release }}" -rabbitmq_image_full: "{{ rabbitmq_image }}:{{ rabbitmq_tag }}" - - -#################### -# Message-Broker -#################### -rabbitmq_user: "openstack" -rabbitmq_cluster_name: "openstack" -rabbitmq_hostname: "{{ ansible_hostname }}" diff --git a/ansible/roles/rabbitmq/tasks/config.yml b/ansible/roles/rabbitmq/tasks/config.yml deleted file mode 100644 index e104fb8bc..000000000 --- a/ansible/roles/rabbitmq/tasks/config.yml +++ /dev/null @@ -1,25 +0,0 @@ ---- -- name: Ensuring config directories exist - file: - path: "{{ node_config_directory }}/{{ item }}" - state: "directory" - recurse: yes - with_items: - - "rabbitmq" - -- name: Copying over config.json files for services - template: - src: "{{ item }}.json.j2" - dest: "{{ node_config_directory }}/{{ item }}/config.json" - with_items: - - "rabbitmq" - -- name: Copying over rabbitmq configs - template: - src: "{{ item }}.j2" - dest: "{{ node_config_directory }}/rabbitmq/{{ item }}" - with_items: - - "rabbitmq-env.conf" - - "rabbitmq.config" - - "rabbitmq-clusterer.config" - - "definitions.json" diff --git a/ansible/roles/rabbitmq/tasks/main.yml b/ansible/roles/rabbitmq/tasks/main.yml deleted file mode 100644 index b5216113e..000000000 --- a/ansible/roles/rabbitmq/tasks/main.yml +++ /dev/null @@ -1,2 +0,0 @@ ---- -- include: "config.yml" diff --git a/ansible/roles/rabbitmq/templates/definitions.json.j2 b/ansible/roles/rabbitmq/templates/definitions.json.j2 deleted file mode 100644 index fdb7267b5..000000000 --- a/ansible/roles/rabbitmq/templates/definitions.json.j2 +++ /dev/null @@ -1,14 +0,0 @@ -{ - "vhosts": [ - {"name": "/"} - ], - "users": [ - {"name": "{{ rabbitmq_user }}", "password": "{{ rabbitmq_password }}", "tags": "administrator"} - ], - "permissions": [ - {"user": "{{ rabbitmq_user }}", "vhost": "/", "configure": ".*", "write": ".*", "read": ".*"} - ], - "policies":[ - {"vhost": "/", "name": "ha-all", "pattern": ".*", "apply-to": "all", "definition": {"ha-mode":"all"}, "priority":0} - ] -} diff --git a/ansible/roles/rabbitmq/templates/rabbitmq-clusterer.config.j2 b/ansible/roles/rabbitmq/templates/rabbitmq-clusterer.config.j2 deleted file mode 100644 index bbb083123..000000000 --- a/ansible/roles/rabbitmq/templates/rabbitmq-clusterer.config.j2 +++ /dev/null @@ -1,8 +0,0 @@ -[ - {version, 1}, - {nodes, [ - {'rabbit@rabbitmq', disc} - ]}, - {gospel, - {node, 'rabbit@rabbitmq'}} -]. diff --git a/ansible/roles/rabbitmq/templates/rabbitmq-env.conf.j2 b/ansible/roles/rabbitmq/templates/rabbitmq-env.conf.j2 deleted file mode 100644 index 2281ccf00..000000000 --- a/ansible/roles/rabbitmq/templates/rabbitmq-env.conf.j2 +++ /dev/null @@ -1,19 +0,0 @@ -RABBITMQ_NODENAME=rabbit - -{% if std_logger %} -RABBITMQ_LOGS=- -{% else %} -RABBITMQ_LOG_BASE=/var/log/kolla/rabbitmq -{% endif %} - -# TODO(sdake, vhosakot) -# erlang by default binds to wildcard (all interfaces) and can potentially -# interfere with the neutron external or tenant networks. We should in theory -# bind epmd to the host's IPv4 address to address the issue however this also -# has issues and can crash erlang when it is compiled with IPv6 support. -# See bugs: -# https://bugs.launchpad.net/ubuntu/+source/erlang/+bug/1374109 -# https://bugs.launchpad.net/kolla/+bug/1562701 -# https://bugzilla.redhat.com/show_bug.cgi?id=1324922 -#export ERL_EPMD_ADDRESS={{ api_interface_address }} -export ERL_EPMD_PORT={{ rabbitmq_epmd_port }} diff --git a/ansible/roles/rabbitmq/templates/rabbitmq.config.j2 b/ansible/roles/rabbitmq/templates/rabbitmq.config.j2 deleted file mode 100644 index df13dfb65..000000000 --- a/ansible/roles/rabbitmq/templates/rabbitmq.config.j2 +++ /dev/null @@ -1,19 +0,0 @@ -[ - {kernel, [ - {inet_dist_use_interface, {% raw %}{{% endraw %}{{ api_interface_address | regex_replace('\.', ',') }}}}, - {inet_dist_listen_min, {{ rabbitmq_cluster_port }}}, - {inet_dist_listen_max, {{ rabbitmq_cluster_port }}} - ]}, - {rabbit, [ - {tcp_listeners, [ - {"{{ api_interface_address }}", {{ rabbitmq_port }}} - ]} - ]}, - {rabbitmq_management, [ - {listener, [ - {ip, "{{ api_interface_address }}"}, - {port, {{ rabbitmq_management_port }}} - ]}, - {load_definitions, "/etc/rabbitmq/definitions.json"} - ]} -]. diff --git a/ansible/roles/rabbitmq/templates/rabbitmq.json.j2 b/ansible/roles/rabbitmq/templates/rabbitmq.json.j2 deleted file mode 100644 index 07987def3..000000000 --- a/ansible/roles/rabbitmq/templates/rabbitmq.json.j2 +++ /dev/null @@ -1,41 +0,0 @@ -{ - "command": "/usr/sbin/rabbitmq-server", - "config_files": [ - { - "source": "{{ container_config_directory }}/rabbitmq-env.conf", - "dest": "/etc/rabbitmq/rabbitmq-env.conf", - "owner": "rabbitmq", - "perm": "0600" - }, - { - "source": "{{ container_config_directory }}/rabbitmq.config", - "dest": "/etc/rabbitmq/rabbitmq.config", - "owner": "rabbitmq", - "perm": "0600" - }, - { - "source": "{{ container_config_directory }}/rabbitmq-clusterer.config", - "dest": "/etc/rabbitmq/rabbitmq-clusterer.config", - "owner": "rabbitmq", - "perm": "0600" - }, - { - "source": "{{ container_config_directory }}/definitions.json", - "dest": "/etc/rabbitmq/definitions.json", - "owner": "rabbitmq", - "perm": "0600" - } - ], - "permissions": [ - { - "path": "/var/lib/rabbitmq", - "owner": "rabbitmq:rabbitmq", - "recurse": true - }, - { - "path": "/var/log/kolla/rabbitmq", - "owner": "rabbitmq:rabbitmq", - "recurse": true - } - ] -} diff --git a/ansible/site.retry b/ansible/site.retry deleted file mode 100644 index 2fbb50c4a..000000000 --- a/ansible/site.retry +++ /dev/null @@ -1 +0,0 @@ -localhost diff --git a/ansible/site.yml b/ansible/site.yml deleted file mode 100644 index 57e313f94..000000000 --- a/ansible/site.yml +++ /dev/null @@ -1,18 +0,0 @@ ---- -- name: Generate configuration files - hosts: localhost - connection: local - roles: - - rabbitmq - - mariadb - - iscsi - - ceph - - nova - - neutron - - keystone - - glance - - horizon - - memcached - - cinder - - heat - - ironic diff --git a/babel.cfg b/babel.cfg deleted file mode 100644 index 15cd6cb76..000000000 --- a/babel.cfg +++ /dev/null @@ -1,2 +0,0 @@ -[python: **.py] - diff --git a/bindep.txt b/bindep.txt deleted file mode 100644 index 6ee37100b..000000000 --- a/bindep.txt +++ /dev/null @@ -1,27 +0,0 @@ -build-essential [platform:dpkg] -curl [!platform:gentoo] -net-misc/curl [platform:gentoo] -dev-libs/cyrus-sasl [platform:gentoo] -cyrus-sasl-devel [platform:rpm] -gawk -libcurl-devel [platform:rpm] -libcurl4-gnutls-dev [platform:dpkg] -libffi-dev [platform:dpkg] -libffi-devel [platform:rpm] -virtual/libffi [platform:gentoo] -libsasl2-dev [platform:dpkg] -pypy [platform:ubuntu !platform:ubuntu-precise] -pypy-dev [platform:ubuntu !platform:ubuntu-precise] -python-dev [platform:dpkg] -python-devel [platform:rpm] -dev-lang/python [platform:gentoo] -# Note that python3-all-dev includes python3-all, added -# both here for documentary purpose. -python3-all [platform:dpkg !platform:ubuntu-precise] -python3-all-dev [platform:dpkg !platform:ubuntu-precise] -python3-devel [platform:fedora] -python34-devel [platform:centos] -crudini -jq -openssl-devel [platform:rpm] -openssl-dev [platform:dpgk] diff --git a/contrib/README.rst b/contrib/README.rst deleted file mode 100644 index 9b9ab0e29..000000000 --- a/contrib/README.rst +++ /dev/null @@ -1,25 +0,0 @@ -.. README: - -======= -contrib -======= - -Contributing -============ - -This is a location for user contributed code that is not necessarily -tested and not part of any gate code. As such, contributions are -strictly not supported however they have been deemed useful to the -community. - -Entries -======= - -orchestartion/ko.py -------------------- - -This is a python script to orchestrate kubernetes and -kolla-kubernetes. It follows the bare metal deployment guide quite -strictly. Because it is not data-driven and basically does too much it -belongs in contrib. It has been used by some in the community to gain -a quick appreciation of kolla-kubernetes. diff --git a/contrib/orchestration/ko.py b/contrib/orchestration/ko.py deleted file mode 100755 index 05fbcdae1..000000000 --- a/contrib/orchestration/ko.py +++ /dev/null @@ -1,3042 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2017-present, Lenovo -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -''' -ko.py - Kolla Kubernetes Openstack deployer - -Purpose -======= - -This is a tool to deploy OpenStack on a Kubernetes Cluster using Kolla images -and Kolla-Kubernetes on bare metal servers or virtual machines. - -It sticks to the methods outlined in the kolla-kubernetes Bare Metal -Deployment Guide: - -https://docs.openstack.org/developer/kolla-kubernetes/deployment-guide.html - -This tool exists primarily to: - -1. Provide an easy way to run kolla-kubernetes which is a development project -to deploy Kolla OpenStack images on a Kubernetes Cluster. - -2. Ease development of kolla-kubernetes. - -3. Provide an OpenStack environment that is Production Level. - -Features -======== -1. Supports both Centos and Ubuntu natively. - -2. Requires just a VM with two NIC's, low congnitive overhead: -'ko.py int1 int2'. - -4. Options to change the versions of all the tools, like helm, kubernetes etc. - -5. Options to change the image version (openstack release) and image tag -(micro-version) of OpenStack as needed. The user can quickly play with Ocata, -or Pike or Master(Queens). - -6. Easy on the eye output, with optional verbose mode for more information. - -7. Contains a demo mode that walks the user through each step with additional -information and instruction. - -8. Verifies its completeness by generating a VM in the OpenStack Cluster. - -9. Leaves the user with a working OpenStack Cluster with all the basic -services. - -10. Lots of options to customize - even edit globals.yaml and cloud.yaml before -deploying. - -11. Cleans up previous deployment with --cc option - -12. Select a different docker registry to the default (kolla) to run with -custom images. - -13. Select between Canal and Weave CNI's for inter-pod communications. - -14. Optionally installs a fluent-bit container for log aggregation to ELK. - -15. Option to create a kubernetes minion to add to existing deployment. - -16. Option to create a kubernetes cluster only - no OpenStack - but another -option to install OpenStack over an existing Kubernetes cluster. - -17. Option to not overwrite kolla-kubenetes directory for development of -kolla-kubernetes code. - -Host machine requirements -========================= - -The host machine must satisfy the following minimum requirements: - -- 2 network interfaces -- 8GB min, 16GB preferred RAM -- 40G min, 80GB preferred disk space -- 2 CPU's Min, 4 preferred CPU's -- Root access to the deployment host machine - -Prerequisites -============= - -Verify the state of network interfaces. If using a VM spawned on OpenStack as -the host machine, the state of the second interface will be DOWN on booting -the VM. - - ip addr show - -Bring up the second network interface if it is down. - - ip link set ens4 up - -However as this interface will be used for Neutron External, this Interface -should not have an IP Address. Verify this with. - - ip addr show - - -Mandatory Inputs -================ - -1. mgmt_int (network_interface): -Name of the interface to be used for management operations. - -The `network_interface` variable is the interface to which Kolla binds API -services. For example, when starting Mariadb, it will bind to the IP on the -interface list in the ``network_interface`` variable. - -2. neutron_int (neutron_external_interface): -Name of the interface to be used for Neutron operations. - -The `neutron_external_interface` variable is the interface that will be used -for the external bridge in Neutron. Without this bridge the deployment instance -traffic will be unable to access the rest of the Internet. - -To create two interfaces like this in Ubuntu, for example: - -Edit /etc/network/interfaces: - -# The primary network interface -auto ens3 -iface ens3 inet dhcp - -# Neutron network interface (up but no ip address) -auto ens4 -iface ens4 inet manual -ifconfig ens4 up - -TODO -==== - -1. Convert to using https://github.com/kubernetes-incubator/client-python -2. Note there are various todo's scattered inline as well. - -Recomendations -============== -1. Due to the length the script can run for, recomend disabling sudo timeout: - -sudo visudo -Add: 'Defaults timestamp_timeout=-1' - -2. Due to the length of time the script can run for, I recommend using nohup - -E.g. nohup python -u k8s.py eth0 eth1 - -Then in another window: - -tail -f nohup.out - -3. Can be run remotely with: - -curl https://raw.githubusercontent.com/RichWellum/k8s/master/ko.py \ -| python - ens3 ens4 --image_version master -cni weave -''' - -from __future__ import print_function -import argparse -from argparse import RawDescriptionHelpFormatter -import logging -import os -import platform -import random -import re -import subprocess -import sys -import tarfile -import time - - -logger = logging.getLogger(__name__) - -# Nasty globals but used universally -global PROGRESS -PROGRESS = 0 - -global K8S_FINAL_PROGRESS -K8S_FINAL_PROGRESS = 0 - -# Set these both to 0 as they get set later depending on what is configured -global KOLLA_FINAL_PROGRESS -KOLLA_FINAL_PROGRESS = 0 - -global K8S_CLEANUP_PROGRESS -K8S_CLEANUP_PROGRESS = 0 - - -def set_logging(): - '''Set basic logging format.''' - - FORMAT = "[%(asctime)s.%(msecs)03d %(levelname)8s: "\ - "%(funcName)20s:%(lineno)s] %(message)s" - logging.basicConfig(format=FORMAT, datefmt="%H:%M:%S") - - -class AbortScriptException(Exception): - '''Abort the script and clean up before exiting.''' - - -def parse_args(): - '''Parse sys.argv and return args''' - - parser = argparse.ArgumentParser( - formatter_class=RawDescriptionHelpFormatter, - description='This tool provides a method to deploy OpenStack on a ' - 'Kubernetes Cluster using Kolla\nand Kolla-Kubernetes on bare metal ' - 'servers or virtual machines.\nVirtual machines supported are Ubuntu ' - 'and Centos. \nUsage as simple as: "ko.py eth0 eth1"\n' - 'The host machine must satisfy the following minimum requirements:\n' - '- 2 network interfaces\n' - '- 8GB min, 16GB preferred - main memory\n' - '- 40G min, 80GB preferred - disk space\n' - '- 2 CPUs Min, 4 preferred - CPUs\n' - 'Root access to the deployment host machine is required.', - epilog='E.g.: ko.py eth0 eth1 -iv master -cni weave --logs\n') - parser.add_argument('MGMT_INT', - help='The interface to which Kolla binds ' - 'API services, E.g: eth0') - parser.add_argument('NEUTRON_INT', - help='The interface that will be used for the ' - 'external bridge in Neutron, E.g: eth1') - parser.add_argument('-mi', '--mgmt_ip', type=str, default='None', - help='Provide own MGMT ip address Address, ' - 'E.g: 10.240.83.111') - parser.add_argument('-vi', '--vip_ip', type=str, default='None', - help='Provide own Keepalived VIP, used with ' - 'keepalived, should be an unused IP on management ' - 'NIC subnet, E.g: 10.240.83.112') - parser.add_argument('-iv', '--image_version', type=str, default='ocata', - help='Specify a different Kolla image version to ' - 'the default (ocata)') - parser.add_argument('-it', '--image_tag', type=str, - help='Specify a different Kolla tag version to ' - 'the default which is the same as the image_version ' - 'by default') - parser.add_argument('-hv', '--helm_version', type=str, default='2.7.2', - help='Specify a different helm version to the ' - 'default(2.7.2)') - parser.add_argument('-kv', '--k8s_version', type=str, default='1.9.1', - help='Specify a different kubernetes version to ' - 'the default(1.9.1) - note 1.8.0 is the minimum ' - 'supported') - parser.add_argument('-av', '--ansible_version', type=str, - default='2.4.2.0', - help='Specify a different ansible version to ' - 'the default(2.4.2.0)') - parser.add_argument('-jv', '--jinja2_version', type=str, default='2.10', - help='Specify a different jinja2 version to ' - 'the default(2.10)') - parser.add_argument('-dr', '--docker_repo', type=str, default='kolla', - help='Specify a different docker repo from ' - 'the default(kolla), for example "rwellum" has ' - 'the latest pike images') - parser.add_argument('-cni', '--cni', type=str, default='canal', - help='Specify a different CNI/SDN to ' - 'the default(canal), like "weave"') - parser.add_argument('-l', '--logs', action='store_true', - help='Install fluent-bit container') - parser.add_argument('-k8s', '--kubernetes', action='store_true', - help='Stop after bringing up kubernetes, ' - 'do not install OpenStack') - parser.add_argument('-cm', '--create_minion', action='store_true', - help='Do not install Kubernetes or OpenStack, ' - 'useful for preparing a multi-node minion') - parser.add_argument('-os', '--openstack', action='store_true', - help='Build OpenStack on an existing ' - 'Kubernetes Cluster') - parser.add_argument('-eg', '--edit_globals', action='store_true', - help='Pause to allow the user to edit the ' - 'globals.yaml file - for custom configuration') - parser.add_argument('-ec', '--edit_cloud', action='store_true', - help='Pause to allow the user to edit the ' - 'cloud.yaml file - for custom configuration') - parser.add_argument('-v', '--verbose', action='store_const', - const=logging.DEBUG, default=logging.INFO, - help='Turn on verbose messages') - parser.add_argument('-d', '--demo', action='store_true', - help='Display some demo information and ' - 'offer to move on') - parser.add_argument('-f', '--force', action='store_true', - help='When used in conjunction with --demo - it ' - 'will proceed without user input.') - parser.add_argument('-nn', '--no_network', action='store_true', - help='Do not run init-runonce, no networking created.') - parser.add_argument('-dm', '--dev_mode', action='store_true', - help='Adds option to modify kolla and more info') - parser.add_argument('-ng', '--no_git', action='store_true', - help='Select this to not override git repos ' - 'previously downloaded') - parser.add_argument('-bd', '--base_distro', type=str, default='centos', - help='Specify a base container image to ' - 'the default(centos), like "ubuntu"') - parser.add_argument('-c', '--cleanup', action='store_true', - help='YMMV: Cleanup existing Kubernetes cluster ' - 'before creating a new one. Because LVM is not ' - 'cleaned up, space will be used up. ' - '"-cc" is far more reliable but requires a reboot') - parser.add_argument('-cc', '--complete_cleanup', action='store_true', - help='Cleanup existing Kubernetes cluster ' - 'then exit, rebooting host is advised') - - return parser.parse_args() - - -def run_shell(args, cmd): - '''Run a shell command and return the output - - Print the output and errors if debug is enabled - Not using logger.debug as a bit noisy for this info - ''' - - p = subprocess.Popen( - cmd, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - shell=True) - out, err = p.communicate() - - if args.demo: - if not re.search('kubectl get pods', cmd): - print('DEMO: CMD: "%s"' % cmd) - - out = out.rstrip() - err = err.rstrip() - - if args.verbose == 10: # Hack - debug enabled - if str(out) is not '0' and str(out) is not '1' and out: - print("Shell STDOUT output: \n'%s'\n" % out) - if err: - print("Shell STDERR output: \n'%s'\n" % err) - - return(out) - - -def untar(fname): - '''Untar a tarred and compressed file''' - - if (fname.endswith("tar.gz")): - tar = tarfile.open(fname, "r:gz") - tar.extractall() - tar.close() - elif (fname.endswith("tar")): - tar = tarfile.open(fname, "r:") - tar.extractall() - tar.close() - - -def pause_tool_execution(str): - '''Pause the script for manual debugging of the VM before continuing''' - - print('Pause: "%s"' % str) - raw_input('Press Enter to continue\n') - - -def banner(description): - '''Display a bannerized print''' - - banner = len(description) - if banner > 100: - banner = 100 - - # First banner - print('\n') - for c in range(banner): - print('*', end='') - - # Add description - print('\n%s' % description) - - # Final banner - for c in range(banner): - print('*', end='') - print('\n') - - -def demo(args, title, description): - '''Pause the script to provide demo information''' - - if not args.demo: - return - - banner = len(description) - if banner > 100: - banner = 100 - - # First banner - print('\n') - for c in range(banner): - print('*', end='') - - # Add DEMO string - print('\n%s'.ljust(banner - len('DEMO')) % 'DEMO') - - # Add title formatted to banner length - print('%s'.ljust(banner - len(title)) % title) - - # Add description - print('%s' % description) - - # Final banner - for c in range(banner): - print('*', end='') - print('\n') - - if not args.force: - raw_input('Press Enter to continue with demo...') - else: - print('Demo: Continuing with Demo') - - -def curl(*args): - '''Use curl to retrieve a file from a URI''' - - curl_path = '/usr/bin/curl' - curl_list = [curl_path] - for arg in args: - curl_list.append(arg) - curl_result = subprocess.Popen( - curl_list, - stderr=subprocess.PIPE, - stdout=subprocess.PIPE).communicate()[0] - return curl_result - - -def linux_ver(): - '''Determine Linux version - Ubuntu or Centos - - Fail if it is not one of those. - Return the long string for output - ''' - - find_os = platform.linux_distribution() - if re.search('Centos', find_os[0], re.IGNORECASE): - linux = 'centos' - elif re.search('Ubuntu', find_os[0], re.IGNORECASE): - linux = 'ubuntu' - else: - print('Linux "%s" is not supported yet' % find_os[0]) - sys.exit(1) - - return(linux) - - -def linux_ver_det(): - '''Determine Linux version - Ubuntu or Centos - - Return the long string for output - ''' - - return(str(platform.linux_distribution())) - - -def docker_ver(args): - '''Display docker version''' - - oldstr = run_shell(args, "docker --version | awk '{print $3}'") - newstr = oldstr.replace(",", "") - return(newstr.rstrip()) - - -def tools_versions(args, str): - '''A Dictionary of tools and their versions - - Defaults are populated by tested well known versions. - - User can then overide each individual tool. - - Return a Version for a string. - ''' - - tools = [ - "kolla", - "helm", - "kubernetes", - "ansible", - "jinja2"] - - # This should match up with the defaults set in parse_args - # kolla helm k8s ansible jinja2 - versions = ["ocata", "2.7.2", "1.9.1", "2.4.2.0", "2.10"] - - tools_dict = {} - # Generate dictionary - for i in range(len(tools)): - tools_dict[tools[i]] = versions[i] - - # Now overide based on user input - first - if tools_dict["kolla"] is not args.image_version: - tools_dict["kolla"] = args.image_version - if tools_dict["helm"] is not args.helm_version: - tools_dict["helm"] = args.helm_version - if tools_dict["kubernetes"] is not args.k8s_version: - tools_dict["kubernetes"] = args.k8s_version - if tools_dict["ansible"] is not args.ansible_version: - tools_dict["ansible"] = args.ansible_version - if tools_dict["jinja2"] is not args.jinja2_version: - tools_dict["jinja2"] = args.jinja2_version - - return(tools_dict[str]) - - -def print_versions(args): - '''Print out lots of information - - Tool versions, networking, user options and more - ''' - - banner('Kubernetes - Bring up a Kubernetes Cluster') - if args.edit_globals: - print(' *globals.yaml will be editable with this option*\n') - - if args.edit_cloud: - print(' *cloud.yaml will be editable with this option*\n') - - # This a good place to install docker - as it's always needed and we - # need the version anyway - - # Note later versions of ubuntu require a change: - # https://github.com/moby/moby/issues/15651 - # sudo vi /lib/systemd/system/docker.service - # ExecStart=/usr/bin/dockerd -H fd:// $DOCKER_OPTS -s overlay2 - # sudo systemctl daemon-reload - # sudo systemctl restart docker - # sudo docker info - if linux_ver() == 'centos': - run_shell(args, 'sudo yum install -y docker') - else: - run_shell(args, 'sudo apt autoremove -y && sudo apt autoclean') - run_shell(args, 'sudo apt-get install -y docker.io') - - print('\nLinux Host Info: %s' % linux_ver_det()) - - print('\nNetworking Info:') - print(' Management Int: %s' % args.MGMT_INT) - print(' Neutron Int: %s' % args.NEUTRON_INT) - print(' Management IP: %s' % args.mgmt_ip) - print(' VIP Keepalive: %s' % args.vip_ip) - print(' CNI/SDN: %s' % args.cni) - - print('\nTool Versions:') - print(' Docker version: %s' % docker_ver(args)) - print(' Helm version: %s' % tools_versions(args, 'helm')) - print(' K8s version: %s' - % tools_versions(args, 'kubernetes').rstrip()) - print(' Ansible version: %s' % - tools_versions(args, 'ansible').rstrip()) - print(' Jinja2 version: %s' % - tools_versions(args, 'jinja2').rstrip()) - - print('\nOpenStack Versions:') - print(' Base image version: %s' % args.base_distro) - print(' Docker repo: %s' % args.docker_repo) - print(' Openstack version: %s' % tools_versions(args, 'kolla')) - print(' Image Tag version: %s' % kolla_get_image_tag(args)) - - print('\nOptions:') - print(' Logging enabled: %s' % args.logs) - print(' Dev mode enabled: %s' % args.dev_mode) - print(' No Network: %s' % args.no_network) - print(' Demo mode: %s' % args.demo) - print(' Edit Cloud: %s' % args.edit_cloud) - print(' Edit Globals: %s' % args.edit_globals) - print('\n') - time.sleep(2) - - -def populate_ip_addresses(args): - '''Populate the management and vip ip addresses - - By either finding the user input or finding them from - the users system - ''' - - if linux_ver() == 'centos': - run_shell(args, 'sudo yum install -y nmap') - else: - run_shell(args, 'sudo apt-get install -y nmap') - - # Populate Management IP Address - if args.mgmt_ip is 'None': - mgt = run_shell( - args, - "ip add show %s | awk ' / inet / {print $2}' | cut -f1 -d'/'" - % args.MGMT_INT) - args.mgmt_ip = mgt.strip() - if args.mgmt_ip is None: - print(' *Kubernetes - No IP Address found on %s*') - sys.exit(1) - - # Populate VIP IP Address - by finding an unused IP on MGMT subnet - if args.vip_ip is 'None': - start_ip = args.mgmt_ip[:args.mgmt_ip.rfind(".")] - - r = list(range(2, 253)) - random.shuffle(r) - for k in r: - vip = run_shell(args, 'sudo nmap -sP -PR %s.%s' % (start_ip, k)) - if "Host seems down" in vip: - args.vip_ip = start_ip + '.' + str(k) - break - - -def k8s_create_repo(args): - '''Create a k8s repository file''' - - if linux_ver() == 'centos': - name = './kubernetes.repo' - repo = '/etc/yum.repos.d/kubernetes.repo' - with open(name, "w") as w: - w.write("""\ -[kubernetes] -name=Kubernetes -baseurl=http://yum.kubernetes.io/repos/kubernetes-el7-x86_64 -enabled=1 -gpgcheck=0 -repo_gpgcheck=1 -gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg - https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg -""") - # todo: add -H to all sudo's see if it works in both envs - run_shell(args, 'sudo mv ./kubernetes.repo %s' % repo) - else: - run_shell(args, - 'curl -s https://packages.cloud.google.com' - '/apt/doc/apt-key.gpg ' - '| sudo -E apt-key add -') - name = './kubernetes.list' - repo = '/etc/apt/sources.list.d/kubernetes.list' - with open(name, "w") as w: - w.write("""\ -deb http://apt.kubernetes.io/ kubernetes-xenial main -""") - run_shell(args, 'sudo mv ./kubernetes.list %s' % repo) - run_shell(args, 'sudo apt-get update') - - -def k8s_wait_for_kube_system(args): - '''Wait for basic k8s to come up''' - - TIMEOUT = 2000 # Give k8s 2000s to come up - RETRY_INTERVAL = 10 - elapsed_time = 0 - prev_cnt = 0 - base_pods = 6 - - print('(%02d/%d) Kubernetes - Wait for basic ' - 'Kubernetes (6 pods) infrastructure' - % (PROGRESS, K8S_FINAL_PROGRESS)) - - while True: - pod_status = run_shell(args, - 'kubectl get pods -n kube-system --no-headers') - nlines = len(pod_status.splitlines()) - if nlines == 6: - print( - ' *All pods %s/%s are started, continuing*' % - (nlines, base_pods)) - run_shell(args, 'kubectl get pods -n kube-system') - break - elif elapsed_time < TIMEOUT: - if nlines < 0: - cnt = 0 - else: - cnt = nlines - - if elapsed_time is not 0: - if cnt is not prev_cnt: - print( - " *Running pod(s) status after %d seconds %s:%s*" - % (elapsed_time, cnt, base_pods)) - prev_cnt = cnt - time.sleep(RETRY_INTERVAL) - elapsed_time = elapsed_time + RETRY_INTERVAL - continue - else: - # Dump verbose output in case it helps... - print(pod_status) - raise AbortScriptException( - "Kubernetes - did not come up after {0} seconds!" - .format(elapsed_time)) - add_one_to_progress() - - -def k8s_wait_for_pod_start(args, chart): - '''Wait for a chart to start''' - - # Useful for debugging issues when Service fails to start - return - - if 'cinder' in chart: - chart = 'cinder' - - if 'nova' in chart: - chart = 'nova' - - time.sleep(3) - - while True: - chart_up = run_shell(args, - 'kubectl get pods --no-headers --all-namespaces' - ' | grep -i "%s" | wc -l' % chart) - if int(chart_up) == 0: - print(' *Kubernetes - chart "%s" not started yet*' % chart) - time.sleep(3) - continue - else: - print(' *Kubernetes - chart "%s" is started*' % chart) - break - - -def k8s_wait_for_running_negate(args, timeout=None): - '''Query get pods until only state is Running''' - - if timeout is None: - TIMEOUT = 1000 - else: - TIMEOUT = timeout - - RETRY_INTERVAL = 3 - - print(' Wait for all pods to be in Running state:') - - elapsed_time = 0 - prev_not_running = 0 - while True: - etcd_check = run_shell(args, - 'kubectl get pods --no-headers --all-namespaces' - ' | grep -i "request timed out" | wc -l') - - if int(etcd_check) != 0: - print('Kubernetes - etcdserver is busy - ' - 'retrying after brief pause') - time.sleep(15) - continue - - not_running = run_shell( - args, - 'kubectl get pods --no-headers --all-namespaces | ' - 'grep -v "Running" | wc -l') - - if int(not_running) != 0: - if prev_not_running != not_running: - print(" *%02d pod(s) are not in Running state*" - % int(not_running)) - time.sleep(RETRY_INTERVAL) - elapsed_time = elapsed_time + RETRY_INTERVAL - prev_not_running = not_running - continue - else: - print(' *All pods are in Running state*') - time.sleep(1) - break - - if elapsed_time > TIMEOUT: - # Dump verbose output in case it helps... - print(int(not_running)) - raise AbortScriptException( - "Kubernetes did not come up after {0} 1econds!" - .format(elapsed_time)) - sys.exit(1) - - -def k8s_wait_for_vm(args, vm): - """Wait for a vm to be listed as running in nova list""" - - TIMEOUT = 50 - RETRY_INTERVAL = 5 - - print(" Kubernetes - Wait for VM %s to be in running state:" % vm) - elapsed_time = 0 - - while True: - nova_out = run_shell(args, - '. ~/keystonerc_admin; nova list | grep %s' % vm) - if not re.search('Running', nova_out): - print(' *Kubernetes - VM %s is not Running yet - ' - 'wait 15s*' % vm) - time.sleep(RETRY_INTERVAL) - elapsed_time = elapsed_time + RETRY_INTERVAL - if elapsed_time > TIMEOUT: - print('VM %s did not come up after %s seconds! ' - 'This is probably not in a healthy state' % - (vm, int(elapsed_time))) - break - continue - else: - print(' *Kubernetes - VM %s is Running*' % vm) - break - - -def add_one_to_progress(): - '''Add one to progress meter''' - - global PROGRESS - PROGRESS += 1 - - -def clean_progress(): - '''Reset progress meter to zero''' - - global PROGRESS - PROGRESS = 0 - - -def print_progress(process, msg, finalctr, add_one=False): - '''Print a message with a progress account''' - - if add_one: - add_one_to_progress() - print("(%02d/%02d) %s - %s" % (PROGRESS, finalctr, process, msg)) - add_one_to_progress() - - -def k8s_install_tools(args): - '''Basic tools needed for first pass''' - - # Reset kubeadm if it's a new installation - if not args.openstack: - run_shell(args, 'sudo kubeadm reset') - - print_progress('Kubernetes', - 'Installing environment', - K8S_FINAL_PROGRESS) - - if linux_ver() == 'centos': - run_shell(args, 'sudo yum update -y; sudo yum upgrade -y') - run_shell(args, 'sudo yum install -y qemu epel-release bridge-utils') - run_shell(args, - 'sudo yum install -y python-pip python-devel libffi-devel ' - 'gcc openssl-devel sshpass') - run_shell(args, 'sudo yum install -y git crudini jq ansible curl lvm2') - else: - run_shell(args, 'sudo apt-get update; sudo apt-get dist-upgrade -y ' - '--allow-downgrades --no-install-recommends') - run_shell(args, 'sudo apt-get install -y qemu bridge-utils') - run_shell(args, 'sudo apt-get install -y python-dev libffi-dev gcc ' - 'libssl-dev python-pip sshpass apt-transport-https') - run_shell(args, 'sudo apt-get install -y git gcc crudini jq ' - 'ansible curl lvm2') - - curl( - '-L', - 'https://bootstrap.pypa.io/get-pip.py', - '-o', '/tmp/get-pip.py') - run_shell(args, 'sudo python /tmp/get-pip.py') - - run_shell(args, - 'sudo -H pip install ansible==%s' % - tools_versions(args, 'ansible')) - - # Standard jinja2 in Centos7(2.9.6) is broken - run_shell(args, - 'sudo -H pip install Jinja2==%s' % - tools_versions(args, 'jinja2')) - - # https://github.com/ansible/ansible/issues/26670 - run_shell(args, 'sudo -H pip uninstall pyOpenSSL -y') - run_shell(args, 'sudo -H pip install pyOpenSSL') - - -def k8s_setup_ntp(args): - '''Setup NTP''' - - print_progress('Kubernetes', - 'Setup NTP', - K8S_FINAL_PROGRESS) - - if linux_ver() == 'centos': - run_shell(args, 'sudo yum install -y ntp') - run_shell(args, 'sudo systemctl enable ntpd.service') - run_shell(args, 'sudo systemctl start ntpd.service') - else: - run_shell(args, 'sudo apt-get install -y ntp') - run_shell(args, 'sudo systemctl restart ntp') - - -def k8s_turn_things_off(args): - '''Currently turn off SELinux and Firewall''' - - if linux_ver() == 'centos': - print_progress('Kubernetes', - 'Turn off SELinux', - K8S_FINAL_PROGRESS) - - run_shell(args, 'sudo setenforce 0') - run_shell(args, - 'sudo sed -i s/enforcing/permissive/g /etc/selinux/config') - - print_progress('Kubernetes', - 'Turn off firewall and ISCSID', - K8S_FINAL_PROGRESS) - - if linux_ver() == 'centos': - run_shell(args, 'sudo systemctl stop firewalld') - run_shell(args, 'sudo systemctl disable firewalld') - else: - run_shell(args, 'sudo ufw disable') - run_shell(args, 'sudo systemctl stop iscsid') - run_shell(args, 'sudo systemctl stop iscsid.service') - - -def k8s_install_k8s(args): - '''Necessary repo to install kubernetes and tools - - This is often broken and may need to be more programatic - ''' - - print_progress('Kubernetes', - 'Create Kubernetes repo and install Kubernetes ', - K8S_FINAL_PROGRESS) - - run_shell(args, 'sudo -H pip install --upgrade pip') - k8s_create_repo(args) - - demo(args, 'Installing Kubernetes', 'Installing docker ebtables ' - 'kubelet-%s kubeadm-%s kubectl-%s kubernetes-cni' % - (tools_versions(args, 'kubernetes'), - tools_versions(args, 'kubernetes'), - tools_versions(args, 'kubernetes'))) - - if linux_ver() == 'centos': - run_shell(args, - 'sudo yum install -y ebtables kubelet-%s ' - 'kubeadm-%s kubectl-%s kubernetes-cni' - % (tools_versions(args, 'kubernetes'), - tools_versions(args, 'kubernetes'), - tools_versions(args, 'kubernetes'))) - else: - # todo - this breaks when ubuntu steps up a revision to -01 etc - run_shell(args, - 'sudo apt-get install -y --allow-downgrades ' - 'ebtables kubelet=%s-00 kubeadm=%s-00 kubectl=%s-00 ' - 'kubernetes-cni' % (tools_versions(args, 'kubernetes'), - tools_versions(args, 'kubernetes'), - tools_versions(args, 'kubernetes'))) - - -def k8s_setup_dns(args): - '''DNS services and kubectl fixups''' - - print_progress('Kubernetes', - 'Start docker and setup the DNS server with ' - 'the service CIDR', - K8S_FINAL_PROGRESS) - - run_shell(args, 'sudo systemctl enable docker') - run_shell(args, 'sudo systemctl start docker') - run_shell( - args, - 'sudo cp /etc/systemd/system/kubelet.service.d/10-kubeadm.conf /tmp') - run_shell(args, 'sudo chmod 777 /tmp/10-kubeadm.conf') - run_shell(args, - 'sudo sed -i s/10.96.0.10/10.3.3.10/g /tmp/10-kubeadm.conf') - - # https://github.com/kubernetes/kubernetes/issues/53333#issuecomment-339793601 - # https://stackoverflow.com/questions/46726216/kubelet-fails-to-get-cgroup-stats-for-docker-and-kubelet-services - run_shell( - args, - 'sudo echo Environment="KUBELET_CGROUP_ARGS=--cgroup-driver=systemd" ' - '>> /tmp/10-kubeadm.conf') - run_shell( - args, - 'sudo echo Environment="KUBELET_EXTRA_ARGS=--fail-swap-on=false" ' - '>> /tmp/10-kubeadm.conf') - run_shell( - args, - 'sudo echo Environment="KUBELET_DOS_ARGS=--runtime-cgroups=/systemd' - '/system.slice --kubelet-cgroups=/systemd/system.slice --hostname-' - 'override=$(hostname) --fail-swap-on=false" >> /tmp/10-kubeadm.conf') - - run_shell(args, 'sudo mv /tmp/10-kubeadm.conf ' - '/etc/systemd/system/kubelet.service.d/10-kubeadm.conf') - - -def k8s_reload_service_files(args): - '''Service files where modified so bring them up again''' - - print_progress('Kubernetes', - 'Reload the hand-modified service files', - K8S_FINAL_PROGRESS) - - run_shell(args, 'sudo systemctl daemon-reload') - - -def k8s_start_kubelet(args): - '''Start kubelet''' - - print_progress('Kubernetes', - 'Enable and start kubelet', - K8S_FINAL_PROGRESS) - - demo(args, 'Enable and start kubelet', - 'kubelet is a command line interface for running commands ' - 'against Kubernetes clusters') - - run_shell(args, 'sudo systemctl enable kubelet') - run_shell(args, 'sudo systemctl start kubelet') - - -def k8s_fix_iptables(args): - '''Maybe Centos only but this needs to be changed to proceed''' - - reload_sysctl = False - print_progress('Kubernetes', - 'Fix iptables to enable bridging', - K8S_FINAL_PROGRESS) - - demo(args, 'Centos fix bridging', - 'Setting net.bridge.bridge-nf-call-iptables=1 ' - 'in /etc/sysctl.conf') - - run_shell(args, 'sudo cp /etc/sysctl.conf /tmp') - run_shell(args, 'sudo chmod 777 /tmp/sysctl.conf') - - with open('/tmp/sysctl.conf', 'r+') as myfile: - contents = myfile.read() - if not re.search('net.bridge.bridge-nf-call-ip6tables=1', contents): - myfile.write('net.bridge.bridge-nf-call-ip6tables=1' + '\n') - reload_sysctl = True - if not re.search('net.bridge.bridge-nf-call-iptables=1', contents): - myfile.write('net.bridge.bridge-nf-call-iptables=1' + '\n') - reload_sysctl = True - if reload_sysctl is True: - run_shell(args, 'sudo mv /tmp/sysctl.conf /etc/sysctl.conf') - run_shell(args, 'sudo sysctl -p') - - -def k8s_deploy_k8s(args): - '''Start the kubernetes master''' - - print_progress('Kubernetes', - 'Deploying Kubernetes with kubeadm (Slow!)', - K8S_FINAL_PROGRESS) - - demo(args, 'Initializes your Kubernetes Master', - 'One of the most frequent criticisms of Kubernetes is that it is ' - 'hard to install.\n' - 'Kubeadm is a new tool that is part of the Kubernetes distribution ' - 'that makes this easier') - demo(args, 'The Kubernetes Control Plane', - 'The Kubernetes control plane consists of the Kubernetes ' - 'API server\n' - '(kube-apiserver), controller manager (kube-controller-manager),\n' - 'and scheduler (kube-scheduler). The API server depends ' - 'on etcd so\nan etcd cluster is also required.\n' - 'https://www.ianlewis.org/en/how-kubeadm-initializes-' - 'your-kubernetes-master') - demo(args, 'kubeadm and the kubelet', - 'Kubernetes has a component called the Kubelet which ' - 'manages containers\nrunning on a single host. It allows us to ' - 'use Kubelet to manage the\ncontrol plane components. This is ' - 'exactly what kubeadm sets us up to do.\n' - 'We run:\n' - 'kubeadm init --pod-network-cidr=10.1.0.0/16 ' - '--service-cidr=10.3.3.0/24 --ignore-preflight-errors=all ' - 'and check output\n' - 'Run: "watch -d sudo docker ps" in another window') - demo(args, 'Monitoring Kubernetes', - 'What monitors Kubelet and make sure it is always running? This ' - 'is where we use systemd.\n Systemd is started as PID 1 so the OS\n' - 'will make sure it is always running, systemd makes sure the ' - 'Kubelet is running, and the\nKubelet makes sure our containers ' - 'with the control plane components are running.') - - if args.demo: - print(run_shell(args, - 'sudo kubeadm init --pod-network-cidr=10.1.0.0/16 ' - '--service-cidr=10.3.3.0/24 ' - '--ignore-preflight-errors=all')) - demo(args, 'What happened?', - 'We can see above that kubeadm created the necessary ' - 'certificates for\n' - 'the API, started the control plane components, ' - 'and installed the essential addons.\n' - 'The join command is important - it allows other nodes ' - 'to be added to the existing resources\n' - 'Kubeadm does not mention anything about the Kubelet but ' - 'we can verify that it is running:') - print(run_shell(args, - 'sudo ps aux | grep /usr/bin/kubelet | grep -v grep')) - demo(args, - 'Kubelet was started. But what is it doing? ', - 'The Kubelet will monitor the control plane components ' - 'but what monitors Kubelet and make sure\n' - 'it is always running? This is where we use systemd. ' - 'Systemd is started as PID 1 so the OS\n' - 'will make sure it is always running, systemd makes ' - 'sure the Kubelet is running, and the\nKubelet ' - 'makes sure our containers with the control plane ' - 'components are running.') - else: - out = run_shell(args, - 'sudo kubeadm init --pod-network-cidr=10.1.0.0/16 ' - '--service-cidr=10.3.3.0/24 ' - '--ignore-preflight-errors=all') - # Even in no-verbose mode, we need to display the join command to - # enabled multi-node - for line in out.splitlines(): - if re.search('kubeadm join', line): - print(' You can now join any number of machines by ' - 'running the following on each node as root:') - line += ' ' * 2 - print(line) - - -def k8s_load_kubeadm_creds(args): - '''This ensures the user gets output from 'kubectl get pods''' - - print_progress('Kubernetes', - 'Load kubeadm credentials into the system', - K8S_FINAL_PROGRESS) - - home = os.environ['HOME'] - kube = os.path.join(home, '.kube') - config = os.path.join(kube, 'config') - - if not os.path.exists(kube): - os.makedirs(kube) - run_shell(args, 'sudo -H cp /etc/kubernetes/admin.conf %s' % config) - run_shell(args, 'sudo chmod 777 %s' % kube) - run_shell(args, 'sudo -H chown $(id -u):$(id -g) $HOME/.kube/config') - demo(args, 'Verify Kubelet', - 'Kubelete should be running our control plane components and be\n' - 'connected to the API server (like any other Kubelet node.\n' - 'Run "watch -d kubectl get pods --all-namespaces" in another ' - 'window\nNote that the kube-dns-* pod is not ready yet. We do ' - 'not have a network yet') - demo(args, 'Verifying the Control Plane Components', - 'We can see that kubeadm created a /etc/kubernetes/ ' - 'directory so check\nout what is there.') - if args.demo: - print(run_shell(args, 'ls -lh /etc/kubernetes/')) - demo(args, 'Files created by kubectl', - 'The admin.conf and kubelet.conf are yaml files that mostly\n' - 'contain certs used for authentication with the API. The pki\n' - 'directory contains the certificate authority certs, ' - 'API server\ncerts, and tokens:') - print(run_shell(args, 'ls -lh /etc/kubernetes/pki')) - demo(args, 'The manifests directory ', - 'This directory is where things get interesting. In the\n' - 'manifests directory we have a number of json files for our\n' - 'control plane components.') - print(run_shell(args, 'sudo ls -lh /etc/kubernetes/manifests/')) - demo(args, 'Pod Manifests', - 'If you noticed earlier the Kubelet was passed the\n' - '--pod-manifest-path=/etc/kubernetes/manifests flag ' - 'which tells\nit to monitor the files in the ' - '/etc/kubernetes/manifests directory\n' - 'and makes sure the components defined therein are ' - 'always running.\nWe can see that they are running my ' - 'checking with the local Docker\nto list the running containers.') - print( - run_shell(args, - 'sudo docker ps --format="table {{.ID}}\t{{.Image}}"')) - demo(args, 'Note above containers', - 'We can see that etcd, kube-apiserver, ' - 'kube-controller-manager, and\nkube-scheduler are running.') - demo(args, 'How can we connect to containers?', - 'If we look at each of the json files in the ' - '/etc/kubernetes/manifests\ndirectory we can see that they ' - 'each use the hostNetwork: true option\nwhich allows the ' - 'applications to bind to ports on the host just as\n' - 'if they were running outside of a container.') - demo(args, 'Connect to the API', - 'So we can connect to the API servers insecure local port.\n' - 'curl http://127.0.0.1:8080/version') - print(run_shell(args, 'sudo curl http://127.0.0.1:8080/version')) - demo(args, 'Secure port?', 'The API server also binds a secure' - 'port 443 which\nrequires a client cert and authentication. ' - 'Be careful to use the\npublic IP for your master here.\n' - 'curl --cacert /etc/kubernetes/pki/ca.pem ' - 'https://10.240.0.2/version') - print(run_shell(args, 'curl --cacert /etc/kubernetes/pki/ca.pem ' - 'https://10.240.0.2/version')) - print(' Note "kubectl get pods --all-namespaces" should work now') - - -def k8s_deploy_cni(args): - '''Deploy CNI/SDN to K8s cluster''' - - if args.cni == 'weave': - print_progress('Kubernetes', - 'Deploy pod network SDN using Weave CNI', - K8S_FINAL_PROGRESS) - - weave_ver = run_shell(args, - "echo $(kubectl version | base64 | tr -d '\n')") - curl( - '-L', - 'https://cloud.weave.works/k8s/net?k8s-version=%s' % weave_ver, - '-o', '/tmp/weave.yaml') - - # Don't allow Weave Net to crunch ip's used by k8s - name = '/tmp/ipalloc.txt' - with open(name, "w") as w: - w.write("""\ - - name: IPALLOC_RANGE - value: 10.0.0.0/16 -""") - run_shell(args, 'chmod 777 /tmp/ipalloc.txt /tmp/weave.yaml') - run_shell(args, "sed -i '/fieldPath: spec.nodeName/ r " - "/tmp/ipalloc.txt' /tmp/weave.yaml") - - run_shell( - args, - 'kubectl apply -f /tmp/weave.yaml') - return - - # If not weave then canal... - # The ip range in canal.yaml, - # /etc/kubernetes/manifests/kube-controller-manager.yaml - # and the kubeadm init command must match - print_progress('Kubernetes', - 'Deploy pod network SDN using Canal CNI', - K8S_FINAL_PROGRESS) - - answer = curl( - '-L', - 'https://raw.githubusercontent.com/projectcalico/canal/master/' - 'k8s-install/1.7/rbac.yaml', - '-o', '/tmp/rbac.yaml') - logger.debug(answer) - run_shell(args, 'kubectl create -f /tmp/rbac.yaml') - - if args.demo: - demo(args, 'Why use a CNI Driver?', - 'Container Network Interface (CNI) is a ' - 'specification started by CoreOS\n' - 'with the input from the wider open ' - 'source community aimed to make network\n' - 'plugins interoperable between container ' - 'execution engines. It aims to be\n' - 'as common and vendor-neutral as possible ' - 'to support a wide variety of\n' - 'networking options from MACVLAN to modern ' - 'SDNs such as Weave and flannel.\n\n' - 'CNI is growing in popularity. It got its ' - 'start as a network plugin\n' - 'layer for rkt, a container runtime from CoreOS. ' - 'CNI is getting even\n' - 'wider adoption with Kubernetes adding support for ' - 'it. Kubernetes\n' - 'accelerates development cycles while simplifying ' - 'operations, and with\n' - 'support for CNI is taking the next step toward a ' - 'common ground for\nnetworking.') - answer = curl( - '-L', - 'https://raw.githubusercontent.com/projectcalico/canal/master/' - 'k8s-install/1.7/canal.yaml', - '-o', '/tmp/canal.yaml') - logger.debug(answer) - run_shell(args, 'sudo chmod 777 /tmp/canal.yaml') - run_shell(args, - 'sudo sed -i s@10.244.0.0/16@10.1.0.0/16@ /tmp/canal.yaml') - run_shell(args, 'kubectl create -f /tmp/canal.yaml') - demo(args, - 'Wait for CNI to be deployed', - 'A successfully deployed CNI will result in a valid dns pod') - - -def k8s_add_api_server(args): - '''Add API Server''' - - print_progress('Kubernetes', - 'Add API Server', - K8S_FINAL_PROGRESS) - - run_shell(args, 'sudo mkdir -p /etc/nodepool/') - run_shell(args, 'sudo echo %s > /tmp/primary_node_private' % args.mgmt_ip) - run_shell(args, 'sudo mv -f /tmp/primary_node_private /etc/nodepool') - - -def k8s_schedule_master_node(args): - '''Make node an AIO - - Normally master node won't be happy - unless you do this step to - make it an AOI deployment - - While the command says "taint" the "-" at the end is an "untaint" - ''' - - print_progress('Kubernetes', - 'Mark master node as schedulable by untainting the node', - K8S_FINAL_PROGRESS) - - demo(args, - 'Running on the master is different though', - 'There is a special annotation on our node ' - 'telling Kubernetes not to\n' - 'schedule containers on our master node.') - run_shell(args, - 'kubectl taint nodes ' - '--all=true node-role.kubernetes.io/master:NoSchedule-') - - -def kolla_update_rbac(args): - '''Override the default RBAC settings''' - - print_progress('Kolla', - 'Overide default RBAC settings', - KOLLA_FINAL_PROGRESS) - - demo(args, 'Role-based access control (RBAC)', - 'A method of regulating access to computer or ' - 'network resources based\n' - 'on the roles of individual users within an enterprise. ' - 'In this context,\n' - 'access is the ability of an individual user to perform a ' - 'specific task\n' - 'such as view, create, or modify a file.') - name = '/tmp/rbac' - with open(name, "w") as w: - w.write("""\ -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: cluster-admin -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: cluster-admin -subjects: -- kind: Group - name: system:masters -- kind: Group - name: system:authenticated -- kind: Group - name: system:unauthenticated -""") - if args.demo: - print(run_shell(args, 'kubectl apply -f /tmp/rbac')) - demo(args, 'Note the cluster-admin has been replaced', '') - else: - run_shell(args, 'kubectl apply -f /tmp/rbac') - - -def kolla_install_deploy_helm(args): - '''Deploy helm binary''' - - print_progress('Kolla', - 'Deploy Helm Tiller pod', - KOLLA_FINAL_PROGRESS) - - demo(args, 'Download the version of helm requested and install it', - 'Installing means the Tiller Server will be instantiated in a pod') - curl('-sSL', - 'https://storage.googleapis.com/kubernetes-helm/' - 'helm-v%s-linux-amd64.tar.gz' % args.helm_version, - '-o', - '/tmp/helm-v%s-linux-amd64.tar.gz' % args.helm_version) - untar('/tmp/helm-v%s-linux-amd64.tar.gz' % args.helm_version) - run_shell(args, 'sudo mv -f linux-amd64/helm /usr/local/bin/helm') - run_shell(args, 'helm init') - k8s_wait_for_pod_start(args, 'tiller') - k8s_wait_for_running_negate(args) - # Check for helm version - # Todo - replace this to using json path to check for that field - while True: - out = run_shell(args, - 'helm version | grep "%s" | wc -l' % - args.helm_version) - if int(out) == 2: - print_progress('Kolla', - 'Helm successfully installed', - KOLLA_FINAL_PROGRESS) - break - else: - time.sleep(1) - continue - - demo(args, 'Check running pods..', - 'Note that the helm version in server and client is the same.\n' - 'Tiller is ready to respond to helm chart requests') - - -def is_running(args, process): - '''Check if a process is running''' - - s = run_shell(args, 'ps awx') - for x in s: - if re.search(process, x): - return True - else: - return False - - -def k8s_cleanup(args): - '''Cleanup on Isle 9''' - - if args.cleanup is True or args.complete_cleanup is True: - clean_progress() - banner('Kubernetes - Cleaning up an existing Kubernetes Cluster') - - print_progress('Kubernetes', - 'Kubeadm reset', - K8S_CLEANUP_PROGRESS, - True) - - run_shell(args, 'sudo kubeadm reset') - - print_progress('Kubernetes', - 'Delete /etc files and dirs', - K8S_CLEANUP_PROGRESS) - - run_shell(args, 'sudo rm -rf /etc/kolla*') - run_shell(args, 'sudo rm -rf /etc/kubernetes') - run_shell(args, 'sudo rm -rf /etc/kolla-kubernetes') - - print_progress('Kubernetes', - 'Delete /var files and dirs', - K8S_CLEANUP_PROGRESS) - - run_shell(args, 'sudo rm -rf /var/lib/kolla*') - run_shell(args, 'sudo rm -rf /var/etcd') - run_shell(args, 'sudo rm -rf /var/run/kubernetes/*') - run_shell(args, 'sudo rm -rf /var/lib/kubelet/*') - run_shell(args, 'sudo rm -rf /var/run/lock/kubelet.lock') - run_shell(args, 'sudo rm -rf /var/run/lock/api-server.lock') - run_shell(args, 'sudo rm -rf /var/run/lock/etcd.lock') - run_shell(args, 'sudo rm -rf /var/run/lock/kubelet.lock') - - print_progress('Kubernetes', - 'Delete /tmp', - K8S_CLEANUP_PROGRESS) - - run_shell(args, 'sudo rm -rf /tmp/*') - - if os.path.exists('/data'): - print_progress('Kubernetes', - 'Remove cinder volumes and data', - K8S_CLEANUP_PROGRESS) - - run_shell(args, 'sudo vgremove cinder-volumes -f') - run_shell(args, 'sudo losetup -d /dev/loop0') - run_shell(args, 'sudo rm -rf /data') - - print_progress('Kubernetes', - 'Cleanup docker containers and images', - K8S_CLEANUP_PROGRESS) - - # Clean up docker containers - run_shell(args, - "sudo docker rm $(sudo docker ps -q -f 'status=exited')") - run_shell(args, - "sudo docker rmi $(sudo docker images -q -f " - "'dangling=true')") - run_shell(args, - "sudo docker volume rm -f $(sudo docker volume " - "ls -qf dangling=true)") - - # Remove docker images on system - run_shell(args, - "sudo docker rmi -f $(sudo docker images -a -q)") - - if args.complete_cleanup: - print_progress('Kubernetes', - 'Cleanup done. Highly recommend rebooting ' - 'your host', - K8S_CLEANUP_PROGRESS) - else: - print_progress('Kubernetes', - 'Cleanup done. Will attempt ' - 'to proceed with installation. YMMV.\n', - K8S_CLEANUP_PROGRESS) - - clean_progress() - add_one_to_progress() - - # After reboot, kubelet service comes back... - run_shell(args, 'sudo kubeadm reset') - - -def kolla_install_repos(args): - '''Installing the kolla repos - - For sanity I just delete a repo if already exists - ''' - - if args.no_git: - print('(%02d/%d) Kolla - Not cloning kolla repos to preserve existing ' - 'content' % - (PROGRESS, KOLLA_FINAL_PROGRESS)) - add_one_to_progress() - - if not args.no_git: - print('(%02d/%d) Kolla - Clone kolla-ansible' % - (PROGRESS, KOLLA_FINAL_PROGRESS)) - add_one_to_progress() - - demo(args, 'Git cloning repos, then using pip to install them', - 'http://github.com/openstack/kolla-ansible\n' - 'http://github.com/openstack/kolla-kubernetes') - - if os.path.exists('./kolla-ansible'): - run_shell(args, 'sudo rm -rf ./kolla-ansible') - run_shell(args, - 'git clone http://github.com/openstack/kolla-ansible') - - if os.path.exists('./kolla-kubernetes'): - run_shell(args, 'sudo rm -rf ./kolla-kubernetes') - print_progress('Kolla', - 'Clone kolla-kubernetes', - KOLLA_FINAL_PROGRESS) - - run_shell(args, - 'git clone http://github.com/openstack/kolla-kubernetes') - - if args.dev_mode: - pause_tool_execution('DEV: edit kolla-kubernetes repo now') - - print_progress('Kolla', - 'Install kolla-ansible and kolla-kubernetes', - KOLLA_FINAL_PROGRESS) - - run_shell(args, - 'sudo -H pip install -U kolla-ansible/ kolla-kubernetes/') - - if linux_ver() == 'centos': - print_progress('Kolla', - 'Copy default kolla-ansible configuration to /etc', - KOLLA_FINAL_PROGRESS) - - run_shell(args, - 'sudo cp -aR /usr/share/kolla-ansible/etc_' - 'examples/kolla /etc') - else: - print_progress('Kolla', - 'Copy default kolla-ansible configuration to /etc', - KOLLA_FINAL_PROGRESS) - - run_shell(args, - 'sudo cp -aR /usr/local/share/kolla-ansible/' - 'etc_examples/kolla /etc') - - print_progress('Kolla', - 'Copy default kolla-kubernetes configuration to /etc', - KOLLA_FINAL_PROGRESS) - - run_shell(args, 'sudo cp -aR kolla-kubernetes/etc/kolla-kubernetes /etc') - - -def kolla_setup_loopback_lvm(args): - '''Setup a loopback LVM for Cinder - - /opt/kolla-kubernetes/tests/bin/setup_gate_loopback_lvm.sh - ''' - - print_progress('Kolla', - 'Setup Loopback LVM for Cinder (Slow!)', - KOLLA_FINAL_PROGRESS) - - demo(args, 'Loopback LVM for Cinder', - 'Create a flat file on the filesystem and then loopback mount\n' - 'it so that it looks like a block-device attached to /dev/zero\n' - 'Then LVM manages it. This is useful for test and development\n' - 'It is also very slow and etcdserver may time out frequently') - new = '/tmp/setup_lvm' - with open(new, "w") as w: - w.write(""" -sudo mkdir -p /data/kolla -sudo df -h -sudo dd if=/dev/zero of=/data/kolla/cinder-volumes.img bs=5M count=2048 -LOOP=$(losetup -f) -sudo losetup $LOOP /data/kolla/cinder-volumes.img -sudo parted -s $LOOP mklabel gpt -sudo parted -s $LOOP mkpart 1 0% 100% -sudo parted -s $LOOP set 1 lvm on -sudo partprobe $LOOP -sudo pvcreate -y $LOOP -sudo vgcreate -y cinder-volumes $LOOP -""") - run_shell(args, 'bash %s' % new) - - -def kolla_install_os_client(args): - '''Install Openstack Client''' - - print_progress('Kolla', - 'Install Python Openstack Client', - KOLLA_FINAL_PROGRESS) - - demo(args, 'Install Python packages', - 'python-openstackclient, python-neutronclient and ' - 'python-cinderclient\nprovide the command-line ' - 'clients for openstack') - run_shell(args, 'sudo -H pip install python-openstackclient') - run_shell(args, 'sudo -H pip install python-neutronclient') - run_shell(args, 'sudo -H pip install python-cinderclient') - - -def kolla_gen_passwords(args): - '''Generate the Kolla Passwords''' - - print_progress('Kolla', - 'Generate default passwords via SPRNG', - KOLLA_FINAL_PROGRESS) - - demo(args, 'Generate passwords', - 'This will populate all empty fields in the ' - '/etc/kolla/passwords.yml\n' - 'file using randomly generated values to secure the deployment') - run_shell(args, 'sudo kolla-kubernetes-genpwd') - - -def kolla_create_namespace(args): - '''Create a kolla namespace''' - - print_progress('Kolla', - 'Create a Kubernetes namespace "kolla"', - KOLLA_FINAL_PROGRESS) - - demo(args, 'Isolate the Kubernetes namespace', - 'Create a namespace using "kubectl create namespace kolla"') - if args.demo: - print(run_shell(args, 'kubectl create namespace kolla')) - else: - run_shell(args, 'kubectl create namespace kolla') - - -def kolla_label_nodes(args, node_list): - '''Label the nodes according to the list passed in''' - - print_progress('Kolla', - 'Label Nodes controller and compute', - KOLLA_FINAL_PROGRESS) - - demo(args, 'Label the node', - 'Currently controller and compute') - for node in node_list: - print(" Label the AIO node as '%s'" % node) - run_shell(args, 'kubectl label node $(hostname) %s=true' % node) - - -def k8s_check_exit(k8s_only): - '''If the user only wants kubernetes and not kolla - stop here''' - - if k8s_only is True: - print('Kubernetes Cluster is running and healthy and you do ' - 'not wish to install kolla') - sys.exit(1) - - -def kolla_modify_globals(args): - '''Necessary additions and changes to the global.yml. - - Which is based on the users inputs - ''' - - print_progress('Kolla', - 'Modify global.yml to setup network_interface ' - 'and neutron_interface', - KOLLA_FINAL_PROGRESS) - - demo(args, 'Kolla uses two files currently to configure', - 'Here we are modifying /etc/kolla/globals.yml\n' - 'We are setting the management interface to "%s" ' - 'and IP to %s\n' % (args.MGMT_INT, args.mgmt_ip) + - 'The interface for neutron(externally bound) "%s"\n' - % args.NEUTRON_INT + - 'globals.yml is used when we run ansible to generate ' - 'configs in further step') - run_shell(args, - "sudo sed -i 's/eth0/%s/g' /etc/kolla/globals.yml" - % args.MGMT_INT) - run_shell(args, - "sudo sed -i 's/#network_interface/network_interface/g' " - "/etc/kolla/globals.yml") - run_shell(args, - "sudo sed -i 's/10.10.10.254/%s/g' /etc/kolla/globals.yml" % - args.mgmt_ip) - run_shell(args, - "sudo sed -i 's/eth1/%s/g' /etc/kolla/globals.yml" - % args.NEUTRON_INT) - run_shell(args, - "sudo sed -i 's/#neutron_external_interface/" - "neutron_external_interface/g' /etc/kolla/globals.yml") - - -def kolla_add_to_globals(args): - '''Default section needed''' - - print_progress('Kolla', - 'Add default config to globals.yml', - KOLLA_FINAL_PROGRESS) - - new = '/tmp/add' - add_to = '/etc/kolla/globals.yml' - - with open(new, "w") as w: - w.write(""" -kolla_install_type: "source" -tempest_image_alt_id: "{{ tempest_image_id }}" -tempest_flavor_ref_alt_id: "{{ tempest_flavor_ref_id }}" - -neutron_plugin_agent: "openvswitch" -api_interface_address: 0.0.0.0 -tunnel_interface_address: 0.0.0.0 -orchestration_engine: KUBERNETES -memcached_servers: "memcached" -keystone_admin_url: "http://keystone-admin:35357/v3" -keystone_internal_url: "http://keystone-internal:5000/v3" -keystone_public_url: "http://keystone-public:5000/v3" -glance_registry_host: "glance-registry" -neutron_host: "neutron" -keystone_database_address: "mariadb" -glance_database_address: "mariadb" -nova_database_address: "mariadb" -nova_api_database_address: "mariadb" -neutron_database_address: "mariadb" -cinder_database_address: "mariadb" -ironic_database_address: "mariadb" -placement_database_address: "mariadb" -rabbitmq_servers: "rabbitmq" -openstack_logging_debug: "True" -enable_haproxy: "no" -enable_heat: "no" -enable_cinder: "yes" -enable_cinder_backend_lvm: "yes" -enable_cinder_backend_iscsi: "yes" -enable_cinder_backend_rbd: "no" -enable_ceph: "no" -enable_elasticsearch: "no" -enable_kibana: "no" -glance_backend_ceph: "no" -cinder_backend_ceph: "no" -nova_backend_ceph: "no" -enable_neutron_provider_networks: "yes" -""") - run_shell(args, 'cat %s | sudo tee -a %s' % (new, add_to)) - - if args.edit_globals: - pause_tool_execution('Pausing to edit the /etc/kolla/globals.yml file') - - demo(args, 'We have also added some basic config that is not defaulted', - 'Mainly Cinder and Database:') - if args.demo: - print(run_shell(args, 'sudo cat /tmp/add')) - - -def kolla_enable_qemu(args): - '''Set libvirt type to QEMU''' - - print_progress('Kolla', - 'Set libvirt type to QEMU', - KOLLA_FINAL_PROGRESS) - run_shell( - args, - 'sudo crudini --set /etc/kolla/nova-compute/nova.conf libvirt ' - 'virt_type qemu') - run_shell( - args, - 'sudo crudini --set /etc/kolla/nova-compute/nova.conf libvirt ' - 'cpu_mode none') - UUID = run_shell(args, - "awk '{if($1 == \"cinder_rbd_secret_uuid: \")" - "{print $2}}' /etc/kolla/passwords.yml") - run_shell( - args, - 'sudo crudini --set /etc/kolla/nova-compute/nova.conf libvirt ' - 'rbd_secret_uuid %s' % UUID) - run_shell( - args, - 'sudo crudini --set /etc/kolla/keystone/keystone.conf cache ' - 'enabled False') - - # https://bugs.launchpad.net/kolla/+bug/1687459 - run_shell(args, - 'sudo service libvirt-bin stop') - run_shell(args, - 'sudo update-rc.d libvirt-bin disable') - run_shell(args, - 'sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.libvirtd') - - -def kolla_gen_configs(args): - '''Generate the configs using Jinja2 - - Some version meddling here until things are more stable - ''' - - print_progress('Kolla', - 'Generate the default configuration', - KOLLA_FINAL_PROGRESS) - - # globals.yml is used when we run ansible to generate configs - demo(args, 'Explanation about generating configs', - 'There is absolutely no written description about the ' - 'following steps: gen config and configmaps...\n' - 'The default configuration is generated by Ansible using ' - 'the globals.yml and the generated password\n' - 'into files in /etc/kolla\n' - '"kubectl create configmap" is called to wrap each ' - 'microservice config into a configmap.\n' - 'When helm microchart is launched, it mounts the ' - 'configmap into the container via a\n ' - 'tmpfs bindmount and the configuration is read and ' - 'processed by the microcharts\n' - 'container and the container then does its thing') - - demo(args, 'The command executed is', - 'cd kolla-kubernetes; sudo ansible-playbook -e ' - 'ansible_python_interpreter=/usr/bin/python -e ' - '@/etc/kolla/globals.yml -e @/etc/kolla/passwords.yml ' - '-e CONFIG_DIR=/etc/kolla ./ansible/site.yml') - - demo(args, 'This is temporary', - 'The next gen involves creating config maps in helm ' - 'charts with overides (sound familiar?)') - - run_shell(args, - 'cd kolla-kubernetes; sudo ansible-playbook -e ' - 'ansible_python_interpreter=/usr/bin/python -e ' - '@/etc/kolla/globals.yml -e @/etc/kolla/passwords.yml ' - '-e CONFIG_DIR=/etc/kolla ./ansible/site.yml; cd ..') - - -def kolla_gen_secrets(args): - '''Generate Kubernetes secrets''' - - print_progress('Kolla', - 'Generate secrets and register them with Kubernetes', - KOLLA_FINAL_PROGRESS) - - demo(args, - 'Create secrets from the generated password file using ' - '"kubectl create secret generic"', - 'Kubernetes Secrets is an object that contains a small amount of\n' - 'sensitive data such as passwords, keys and tokens etc') - - run_shell(args, - 'python ./kolla-kubernetes/tools/secret-generator.py create') - - -def kolla_create_config_maps(args): - '''Generate the Kolla config map''' - - print_progress('Kolla', - 'Generate and register the Kolla config maps', - KOLLA_FINAL_PROGRESS) - - demo(args, 'Create Kolla Config Maps', - 'Similar to Secrets, Config Maps are another kubernetes artifact\n' - 'ConfigMaps allow you to decouple configuration ' - 'artifacts from image\n' - 'content to keep containerized applications portable. ' - 'The ConfigMap API\n' - 'resource stores configuration data as key-value pairs. ' - 'The data can be\n' - 'consumed in pods or provide the configurations for ' - 'system components\n' - 'such as controllers. ConfigMap is similar to Secrets, ' - 'but provides a\n' - 'means of working with strings that do not contain ' - 'sensitive information.\n' - 'Users and system components alike can store configuration ' - 'data in ConfigMap.') - run_shell(args, - 'kollakube res create configmap ' - 'mariadb keystone horizon rabbitmq memcached nova-api ' - 'nova-conductor nova-scheduler glance-api-haproxy ' - 'glance-registry-haproxy glance-api glance-registry ' - 'neutron-server neutron-dhcp-agent neutron-l3-agent ' - 'neutron-metadata-agent neutron-openvswitch-agent ' - 'openvswitch-db-server openvswitch-vswitchd nova-libvirt ' - 'nova-compute nova-consoleauth nova-novncproxy ' - 'nova-novncproxy-haproxy neutron-server-haproxy ' - 'nova-api-haproxy cinder-api cinder-api-haproxy cinder-backup ' - 'cinder-scheduler cinder-volume iscsid tgtd keepalived ' - 'placement-api placement-api-haproxy') - - demo(args, 'Lets look at a configmap', - 'kubectl get configmap -n kolla; kubectl describe ' - 'configmap -n kolla XYZ') - - -def kolla_build_micro_charts(args): - '''Build all helm micro charts''' - - print_progress('Kolla', - 'Build and register all Helm charts (Slow!)', - KOLLA_FINAL_PROGRESS) - - demo(args, 'Build helm charts', - 'Helm uses a packaging format called charts. ' - 'A chart is a collection of\n' - 'files that describe a related set of Kubernetes ' - 'resources. A single chart\n' - 'might be used to deploy something simple, like a' - 'memcached pod, or something\n' - 'complex, like a full web app stack with HTTP servers, ' - 'databases, caches, and so on\n' - 'Helm also allows you to detail dependencies between ' - 'charts - vital for Openstack\n' - 'This step builds all the known helm charts and ' - 'dependencies (193)\n' - 'This is another step that takes a few minutes') - if args.demo: - print(run_shell( - args, - './kolla-kubernetes/tools/helm_build_all.sh /tmp')) - else: - run_shell( - args, - './kolla-kubernetes/tools/helm_build_all.sh /tmp') - - demo(args, 'Lets look at these helm charts', - 'helm list; helm search | grep local | wc -l; ' - 'helm fetch url chart; helm inspect local/glance') - - -def kolla_verify_helm_images(args): - '''Check to see if enough helm charts were generated''' - - print_progress('Kolla', - 'Verify number of helm images', - KOLLA_FINAL_PROGRESS) - - out = run_shell(args, 'ls /tmp | grep ".tgz" | wc -l') - if int(out) > 190: - print(' %s Helm images created' % int(out)) - else: - print(' Error: only %s Helm images created' % int(out)) - sys.exit(1) - - -def kolla_create_cloud_v4(args): - '''Generate the cloud.yml file - - Which works with the globals.yml file to define your cluster networking. - - This uses most of the user options. - - This works for tag version 4.x - ''' - - print_progress('Kolla', - 'Create a version 4 cloud.yaml', - KOLLA_FINAL_PROGRESS) - - demo(args, 'Create a 4.x (Ocata) cloud.yaml', - 'cloud.yaml is the partner to globals.yml\n' - 'It contains a list of global OpenStack services ' - 'and key-value pairs, which\n' - 'guide helm when running each chart. This includes ' - 'our basic inputs, MGMT and Neutron') - cloud = '/tmp/cloud.yaml' - with open(cloud, "w") as w: - w.write(""" -global: - kolla: - all: - image_tag: "%s" - kube_logger: false - external_vip: "%s" - base_distro: "%s" - install_type: "source" - tunnel_interface: "%s" - kolla_kubernetes_external_subnet: 24 - kolla_kubernetes_external_vip: %s - keepalived: - all: - api_interface: br-ex - keystone: - all: - admin_port_external: "true" - dns_name: "%s" - port: 5000 - public: - all: - port_external: "true" - rabbitmq: - all: - cookie: 67 - glance: - api: - all: - port_external: "true" - cinder: - api: - all: - port_external: "true" - volume_lvm: - all: - element_name: cinder-volume - daemonset: - lvm_backends: - - '%s': 'cinder-volumes' - ironic: - conductor: - daemonset: - selector_key: "kolla_conductor" - nova: - placement_api: - all: - port_external: true - novncproxy: - all: - port: 6080 - port_external: true - openvwswitch: - all: - add_port: true - ext_bridge_name: br-ex - ext_interface_name: %s - setup_bridge: true - horizon: - all: - port_external: true - """ % (kolla_get_image_tag(args), - args.mgmt_ip, - args.base_distro, - args.MGMT_INT, - args.vip_ip, - args.mgmt_ip, - args.mgmt_ip, - args.NEUTRON_INT)) - - if args.edit_cloud: - pause_tool_execution('Pausing to edit the /tmp/cloud.yaml file') - - if args.demo: - print(run_shell(args, 'sudo cat /tmp/cloud.yaml')) - - -def kolla_create_cloud(args): - '''Generate the cloud.yml file - - Which works with the globals.yml file to define your cluster networking. - - This uses most of the user options. - - This works for tag versions 5+ - ''' - - # Note for local registry add "docker_registry: 127.0.0.1:30401" - - print_progress('Kolla', - 'Create a version 5+ cloud.yaml', - KOLLA_FINAL_PROGRESS) - - image_tag = kolla_get_image_tag(args) - - demo(args, 'Create a 5.x (Pike) cloud.yaml', - 'cloud.yaml is the partner to globals.yml\n' - 'It contains a list of global OpenStack services ' - 'and key-value pairs, which\n' - 'guide helm when running each chart. This includes our ' - 'basic inputs, MGMT and Neutron') - cloud = '/tmp/cloud.yaml' - with open(cloud, "w") as w: - w.write(""" -global: - kolla: - all: - docker_namespace: %s - image_tag: "%s" - kube_logger: false - external_vip: "%s" - base_distro: "%s" - install_type: source - tunnel_interface: "%s" - ceph_backend: false - libvirt_tcp: false - kolla_kubernetes_external_subnet: 24 - kolla_kubernetes_external_vip: %s - kolla_toolbox_image_tag: %s - haproxy_image_tag: %s - fluentd_image_tag: %s - kubernetes_entrypoint_image_tag: %s - keepalived: - all: - api_interface: br-ex - keystone: - all: - admin_port_external: "true" - dns_name: "%s" - port: 5000 - public: - all: - port_external: "true" - rabbitmq: - all: - cookie: 67 - glance: - api: - all: - port_external: "true" - cinder: - api: - all: - port_external: "true" - volume_lvm: - all: - element_name: cinder-volume - daemonset: - lvm_backends: - - '%s': 'cinder-volumes' - nova: - all: - placement_api_enabled: true - cell_enabled: true - novncproxy: - all: - port: 6080 - port_external: true - api: - create_cell: - job: - cell_wait_compute: false - ironic: - conductor: - daemonset: - selector_key: "kolla_conductor" - openvwswitch: - all: - add_port: true - ext_bridge_name: br-ex - ext_interface_name: %s - setup_bridge: true - horizon: - all: - port_external: true - """ % (args.docker_repo, - image_tag, - args.mgmt_ip, - args.base_distro, - args.MGMT_INT, - args.vip_ip, - image_tag, - image_tag, - image_tag, - image_tag, - args.mgmt_ip, - args.mgmt_ip, - args.NEUTRON_INT)) - - if args.edit_cloud: - pause_tool_execution('Pausing to edit the /tmp/cloud.yaml file') - - if args.demo: - print(run_shell(args, 'sudo cat /tmp/cloud.yaml')) - - -def helm_install_service_chart(args, chart_list): - '''helm install a list of service charts''' - - for chart in chart_list: - print_progress('Kolla', - "Helm Install service chart: \--'%s'--/" % chart, - KOLLA_FINAL_PROGRESS) - run_shell(args, - 'helm install --debug kolla-kubernetes/helm/service/%s ' - '--namespace kolla --name %s --values /tmp/cloud.yaml' - % (chart, chart)) - k8s_wait_for_pod_start(args, chart) - k8s_wait_for_running_negate(args) - - -def helm_install_micro_service_chart(args, chart_list): - '''helm install a list of micro service charts''' - - for chart in chart_list: - print_progress('Kolla', - "Helm Install micro service chart: \--'%s'--/" % chart, - KOLLA_FINAL_PROGRESS) - run_shell(args, - 'helm install --debug kolla-kubernetes/helm/microservice/%s ' - '--namespace kolla --name %s --values /tmp/cloud.yaml' - % (chart, chart)) - k8s_wait_for_running_negate(args) - - -def kolla_create_keystone_user(args): - '''Create a keystone user''' - - demo(args, 'We now should have a running OpenStack Cluster on Kubernetes!', - 'Lets create a keystone account, create a demo VM, ' - 'attach a floating ip\n' - 'Finally ssh to the VM and or open Horizon and ' - 'see our cluster') - - print_progress('Kolla', - 'Create a keystone admin account', - KOLLA_FINAL_PROGRESS) - - run_shell(args, 'sudo rm -f ~/keystonerc_admin') - run_shell(args, - 'kolla-kubernetes/tools/build_local_admin_keystonerc.sh ext') - - -def kolla_allow_ingress(args): - '''Open up ingress rules to access vm''' - - print_progress('Kolla', - 'Allow Ingress by changing neutron rules', - KOLLA_FINAL_PROGRESS) - - new = '/tmp/neutron_rules.sh' - with open(new, "w") as w: - w.write(""" -openstack security group list -f value -c ID | while read SG_ID; do - neutron security-group-rule-create --protocol icmp \ - --direction ingress $SG_ID - neutron security-group-rule-create --protocol tcp \ - --port-range-min 22 --port-range-max 22 \ - --direction ingress $SG_ID -done -""") - out = run_shell(args, - '. ~/keystonerc_admin; chmod 766 %s; bash %s' % - (new, new)) - logger.debug(out) - - -def kolla_pike_workaround(args): - '''An issue in Pike with nova that needs to be fixed - - I believe this is because we are using the simple cell setup which is - really an upgrade scenario. Kolla-Ansible does not do this. - - https://docs.openstack.org/nova/latest/user/cells.html#step-by-step-for-common-use-cases - - The chart jobs are commented out below but that didn't fix it for me. - ''' - - if not re.search('ocata', args.image_version): - print_progress('Kolla', - 'Fix Nova, various issues, nova scheduler pod ' - 'will be restarted', - KOLLA_FINAL_PROGRESS) - - # todo: get these jobs to work - # chart_list = ['nova-cell0-create-db-job'] - # helm_install_micro_service_chart(args, chart_list) - - # chart_list = ['nova-api-create-simple-cell-job'] - # helm_install_micro_service_chart(args, chart_list) - - run_shell(args, - 'kubectl exec -it nova-conductor-0 -n kolla ' - 'nova-manage db sync') - run_shell(args, - 'kubectl exec -it nova-conductor-0 -n kolla ' - 'nova-manage cell_v2 discover_hosts') - run_shell(args, - 'kubectl delete pod nova-scheduler-0 -n kolla') - k8s_wait_for_running_negate(args) - - -def kolla_get_host_subnet(args): - '''Grab an address to access a demo vm - - Return the subnet, actual ip address and the last octet to start the DHCP - range - ''' - - # Grab default route - default = run_shell( - args, - "ip route | grep default | grep %s | awk '{ print $3 }'" % - args.MGMT_INT) - subnet = default[:default.rfind(".")] - r = list(range(2, 253)) - random.shuffle(r) - for k in r: - vip = run_shell(args, 'sudo nmap -sP -PR %s.%s' % (subnet, k)) - if "Host seems down" in vip: - ip = subnet + '.' + str(k) - break - return(subnet, ip, k) - - -def kolla_get_mgmt_subnet(args): - '''Grab an address to access a demo vm - - Return the subnet, actual ip address and the last octet to start the DHCP - range - ''' - - subnet = args.mgmt_ip[:args.mgmt_ip.rfind(".")] - r = list(range(2, 253)) - random.shuffle(r) - for k in r: - vip = run_shell(args, 'sudo nmap -sP -PR %s.%s' % (subnet, k)) - if "Host seems down" in vip: - ip = subnet + '.' + str(k) - break - return(subnet, ip, k) - - -def kolla_get_neutron_subnet(args): - '''Find and return a neutron ip address - - That can be used for the neutron subnet - - Because the neutron interface does not have an ip address. Briefly ask - DHCP for one, then release it after extracting the lease - ''' - - # -v -r doesn't seem to work - so run seperately - run_shell(args, - 'sudo dhclient %s -v > /tmp/dhcp 2>&1' % - args.NEUTRON_INT) - - run_shell(args, - 'sudo dhclient %s -r > /tmp/dhcp_r 2>&1' % - args.NEUTRON_INT) - - out = run_shell( - args, - "cat /tmp/dhcp | grep -i 'bound to ' | awk '{ print $3 }'") - - if out is None: - print('Kolla - no neutron subnet found, continuing but \ - openstack likely not healthy') - - subnet = out[:out.rfind(".")] - r = list(range(2, 253)) - random.shuffle(r) - for k in r: - vip = run_shell(args, 'sudo nmap -sP -PR %s.%s' % (subnet, k)) - if "Host seems down" in vip: - out = subnet + '.' + str(k) - break - return(subnet, out, k) - - -def kolla_setup_neutron(args): - '''Use kolla-ansible init-runonce logic but with correct networking''' - - neutron_subnet, neutron_start, octet = kolla_get_neutron_subnet(args) - # neutron_subnet, neutron_start, octet = kolla_get_host_subnet(args) - EXT_NET_CIDR = neutron_subnet + '.' + '0' + '/' + '24' - EXT_NET_GATEWAY = neutron_subnet + '.' + '1' - # Because I don't own these - only use one that I know is safe - neutron_end = octet + 10 - EXT_NET_RANGE = 'start=%s,end=%s' % ( - neutron_start, neutron_subnet + '.' + str(neutron_end)) - - if args.dev_mode: - print('DEV: CIDR=%s, GW=%s, range=%s' % - (EXT_NET_CIDR, EXT_NET_GATEWAY, EXT_NET_RANGE)) - - runonce = './runonce' - with open(runonce, "w") as w: - w.write(""" -#!/bin/bash -# -# This script is meant to be run once after running start for the first -# time. This script downloads a cirros image and registers it. Then it -# configures networking and nova quotas to allow 40 m1.small instances -# to be created. - -IMAGE_URL=http://download.cirros-cloud.net/0.4.0/ -IMAGE=cirros-0.4.0-x86_64-disk.img -IMAGE_NAME=cirros -IMAGE_TYPE=linux -EXT_NET_CIDR='%s' -EXT_NET_RANGE='%s' -EXT_NET_GATEWAY='%s' - -# Sanitize language settings to avoid commands bailing out -# with "unsupported locale setting" errors. -unset LANG -unset LANGUAGE -LC_ALL=C -export LC_ALL -for i in curl openstack; do - if [[ ! $(type ${i} 2>/dev/null) ]]; then - if [ "${i}" == 'curl' ]; then - echo "Please install ${i} before proceeding" - else - echo "Please install python-${i}client before proceeding" - fi - exit - fi -done -# Move to top level directory -REAL_PATH=$(python -c "import os,sys;print os.path.realpath('$0')") -cd "$(dirname "$REAL_PATH")/.." - -# Test for credentials set -if [[ "${OS_USERNAME}" == "" ]]; then - echo "No Keystone credentials specified. Try running source openrc" - exit -fi - -# Test to ensure configure script is run only once -if openstack image list | grep -q cirros; then - echo "This tool should only be run once per deployment." - exit -fi - -if ! [ -f "${IMAGE}" ]; then - curl -L -o ./${IMAGE} ${IMAGE_URL}/${IMAGE} -fi - -openstack image create --disk-format qcow2 --container-format bare --public \ - --property os_type=${IMAGE_TYPE} --file ./${IMAGE} ${IMAGE_NAME} - -openstack network create --external --provider-physical-network physnet1 \ - --provider-network-type flat public1 - -# Create a subnet to provider network -openstack subnet create --dhcp \ - --allocation-pool ${EXT_NET_RANGE} --network public1 \ - --subnet-range ${EXT_NET_CIDR} --gateway ${EXT_NET_GATEWAY} public1-subnet - -openstack network create --provider-network-type vxlan demo-net -openstack subnet create --subnet-range 10.0.0.0/24 --network demo-net \ - --gateway 10.0.0.1 --dns-nameserver 8.8.8.8 demo-subnet - -openstack router create demo-router -openstack router add subnet demo-router demo-subnet -openstack router set --external-gateway public1 demo-router - -# Get admin user and tenant IDs -ADMIN_USER_ID=$(openstack user list | awk '/ admin / {print $2}') -ADMIN_PROJECT_ID=$(openstack project list | awk '/ admin / {print $2}') -ADMIN_SEC_GROUP=$(openstack security group list --project \ - ${ADMIN_PROJECT_ID} | awk '/ default / {print $2}') - -# Sec Group Config -openstack security group rule create --ingress --ethertype IPv4 \ - --protocol icmp ${ADMIN_SEC_GROUP} -openstack security group rule create --ingress --ethertype IPv4 \ - --protocol tcp --dst-port 22 ${ADMIN_SEC_GROUP} -# Open heat-cfn so it can run on a different host -openstack security group rule create --ingress --ethertype IPv4 \ - --protocol tcp --dst-port 8000 ${ADMIN_SEC_GROUP} -openstack security group rule create --ingress --ethertype IPv4 \ - --protocol tcp --dst-port 8080 ${ADMIN_SEC_GROUP} - -if [ ! -f ~/.ssh/id_rsa.pub ]; then - ssh-keygen -t rsa -f ~/.ssh/id_rsa - chmod 600 ~/.ssh/id_rsa.pub -fi -if [ -r ~/.ssh/id_rsa.pub ]; then - openstack keypair create --public-key ~/.ssh/id_rsa.pub mykey -fi - -# Increase the quota to allow 40 m1.small instances to be created - -# 40 instances -openstack quota set --instances 40 ${ADMIN_PROJECT_ID} - -# 40 cores -openstack quota set --cores 40 ${ADMIN_PROJECT_ID} - -# 96GB ram -openstack quota set --ram 96000 ${ADMIN_PROJECT_ID} - -# add default flavors, if they don't already exist -if ! openstack flavor list | grep -q m1.tiny; then - openstack flavor create --id 1 --ram 512 --disk 1 --vcpus 1 m1.tiny - openstack flavor create --id 2 --ram 2048 --disk 20 --vcpus 1 m1.small - openstack flavor create --id 3 --ram 4096 --disk 40 --vcpus 2 m1.medium - openstack flavor create --id 4 --ram 8192 --disk 80 --vcpus 4 m1.large - openstack flavor create --id 5 --ram 16384 --disk 160 --vcpus 8 m1.xlarge -fi - -DEMO_NET_ID=$(openstack network list | awk '/ public1 / {print $2}') - -cat << EOF - -Done. - -To deploy a demo instance, run: - -openstack server create \\ - --image ${IMAGE_NAME} \\ - --flavor m1.tiny \\ - --key-name mykey \\ - --nic net-id=${DEMO_NET_ID} \\ - demo1 -EOF - """ % (EXT_NET_CIDR, EXT_NET_RANGE, EXT_NET_GATEWAY)) - - -def kolla_nw_and_images(args): - '''Final steps now that a working cluster is up. - - Run "runonce" to set everything up. - Install a demo image. - Attach a floating ip. - ''' - - if args.no_network: - return - - kolla_setup_neutron(args) - - print_progress('Kolla', - 'Configure Neutron, pull images', - KOLLA_FINAL_PROGRESS) - - out = run_shell( - args, - '. ~/keystonerc_admin; chmod 777 ./runonce; ./runonce') - logger.debug(out) - - demo_net_id = run_shell( - args, - ". ~/keystonerc_admin; " - "echo $(openstack network list | awk '/ public1 / {print $2}')") - logger.debug(demo_net_id) - - # Create a demo image - print_progress('Kolla', - 'Create a demo VM in our OpenStack cluster', - KOLLA_FINAL_PROGRESS) - - create_demo_vm = ' . ~/keystonerc_admin; openstack server ' \ - 'create --image cirros --flavor m1.tiny --key-name mykey ' \ - '--nic net-id=%s test' % demo_net_id.rstrip() - print(' To create a demo image VM do:') - print(create_demo_vm) - - # For now, only suggest a demo VM and floating ip - # - out = run_shell(args, - '. ~/keystonerc_admin; openstack server create ' - '--image cirros --flavor m1.tiny --key-name mykey ' - '--nic net-id=%s demo1' % demo_net_id.rstrip()) - logger.debug(out) - k8s_wait_for_vm(args, 'demo1') - - # Create a floating ip - print_progress('Kolla', - 'Create floating IP', - KOLLA_FINAL_PROGRESS) - - cmd = ". ~/keystonerc_admin; \ - openstack server add floating ip demo1 $(openstack floating ip \ - create public1 -f value -c floating_ip_address)" - out = run_shell(args, cmd) - logger.debug(out) - - # Display nova list - print_progress('Kolla', - 'Demo VM Info', - KOLLA_FINAL_PROGRESS) - - print(run_shell(args, '. ~/keystonerc_admin; nova list')) - # todo: ssh execute to ip address and ping google - - -def kolla_final_messages(args): - '''Setup horizon and print success message''' - - address = run_shell(args, "kubectl get svc horizon --namespace kolla " - "--no-headers | awk '{print $3}'") - username = run_shell( - args, - "cat ~/keystonerc_admin | grep OS_PASSWORD | awk '{print $2}'") - password = run_shell( - args, - "cat ~/keystonerc_admin | grep OS_USERNAME | awk '{print $2}'") - - print_progress('Kolla', - 'To Access Horizon:', - KOLLA_FINAL_PROGRESS) - - print(' Point your browser to: %s' % address) - print(' %s' % username) - print(' %s' % password) - - banner('Successfully deployed Kolla-Kubernetes. ' - 'OpenStack Cluster is ready for use') - - -def k8s_test_vip_int(args): - '''Test that the vip interface is not used''' - - # Allow the vip address to be the same as the mgmt_ip - if args.vip_ip != args.mgmt_ip: - truth = run_shell( - args, - 'sudo nmap -sP -PR %s | grep Host' % - args.vip_ip) - if re.search('Host is up', truth): - print('Kubernetes - vip Interface %s is in use, ' - 'choose another' % args.vip_ip) - sys.exit(1) - else: - logger.debug( - 'Kubernetes - VIP Keepalive Interface %s is valid' % - args.vip_ip) - - -def k8s_get_pods(args, namespace): - '''Display all pods per namespace list''' - - for name in namespace: - final = run_shell(args, 'kubectl get pods -n %s' % name) - - print_progress('Kolla', - 'Final Kolla Kubernetes OpenStack ' - 'pods for namespace %s:' % name, - KOLLA_FINAL_PROGRESS) - - print(final) - - -def k8s_check_nslookup(args): - '''Create a test pod and query nslookup against kubernetes - - Only seems to work in the default namespace - - Also handles the option to create a test pod manually like - the deployment guide advises. - ''' - - print_progress('Kubernetes', - "Test 'nslookup kubernetes' - bring up test pod", - K8S_FINAL_PROGRESS) - - demo(args, 'Lets create a simple pod and verify that DNS works', - 'If it does not then this deployment will not work.') - name = './busybox.yaml' - with open(name, "w") as w: - w.write(""" -apiVersion: v1 -kind: Pod -metadata: - name: kolla-dns-test -spec: - containers: - - name: busybox - image: busybox - args: - - sleep - - "1000000" -""") - demo(args, 'The busy box yaml is: %s' % name, '') - if args.demo: - print(run_shell(args, 'sudo cat ./busybox.yaml')) - - run_shell(args, 'kubectl create -f %s' % name) - k8s_wait_for_running_negate(args) - out = run_shell(args, - 'kubectl exec kolla-dns-test -- nslookup ' - 'kubernetes | grep -i address | wc -l') - demo(args, 'Kolla DNS test output: "%s"' % out, '') - if int(out) != 2: - print(" Warning 'nslookup kubernetes ' failed. YMMV continuing") - else: - banner("Kubernetes Cluster is up and running") - - -def kubernetes_test_cli(args): - '''Run some commands for demo purposes''' - - if not args.demo: - return - - demo(args, 'Test CLI:', 'Determine IP and port information from Service:') - print(run_shell(args, 'kubectl get svc -n kube-system')) - print(run_shell(args, 'kubectl get svc -n kolla')) - - demo(args, 'Test CLI:', 'View all k8s namespaces:') - print(run_shell(args, 'kubectl get namespaces')) - - demo(args, 'Test CLI:', 'Kolla Describe a pod in full detail:') - print(run_shell(args, 'kubectl describe pod ceph-admin -n kolla')) - - demo(args, 'Test CLI:', 'View all deployed services:') - print(run_shell(args, 'kubectl get deployment -n kube-system')) - - demo(args, 'Test CLI:', 'View configuration maps:') - print(run_shell(args, 'kubectl get configmap -n kube-system')) - - demo(args, 'Test CLI:', 'General Cluster information:') - print(run_shell(args, 'kubectl cluster-info')) - - demo(args, 'Test CLI:', 'View all jobs:') - print(run_shell(args, 'kubectl get jobs --all-namespaces')) - - demo(args, 'Test CLI:', 'View all deployments:') - print(run_shell(args, 'kubectl get deployments --all-namespaces')) - - demo(args, 'Test CLI:', 'View secrets:') - print(run_shell(args, 'kubectl get secrets')) - - demo(args, 'Test CLI:', 'View docker images') - print(run_shell(args, 'sudo docker images')) - - demo(args, 'Test CLI:', 'View deployed Helm Charts') - print(run_shell(args, 'helm list')) - - demo(args, 'Test CLI:', 'Working cluster kill a pod and watch resilience.') - demo(args, 'Test CLI:', 'kubectl delete pods -n kolla') - - -def k8s_bringup_kubernetes_cluster(args): - '''Bring up a working Kubernetes Cluster - - Explicitly using the Canal CNI for now - ''' - - if args.openstack: - print('Kolla - Building OpenStack on existing Kubernetes cluster') - return - - k8s_cleanup(args) - k8s_install_tools(args) - k8s_setup_ntp(args) - k8s_turn_things_off(args) - k8s_install_k8s(args) - if args.create_minion: - run_shell(args, 'sudo systemctl enable kubelet.service') - run_shell(args, 'sudo systemctl enable docker.service') - run_shell(args, 'sudo systemctl start docker.service') - banner('Kubernetes tools installed, minion ready') - sys.exit(1) - k8s_setup_dns(args) - k8s_reload_service_files(args) - k8s_start_kubelet(args) - k8s_fix_iptables(args) - k8s_deploy_k8s(args) - k8s_load_kubeadm_creds(args) - k8s_wait_for_kube_system(args) - k8s_add_api_server(args) - k8s_deploy_cni(args) - k8s_wait_for_pod_start(args, 'canal') - k8s_wait_for_running_negate(args) - k8s_schedule_master_node(args) - k8s_check_nslookup(args) - k8s_check_exit(args.kubernetes) - demo(args, 'Congrats - your kubernetes cluster should be up ' - 'and running now', '') - - -def kolla_get_image_tag(args): - '''Set the image tag - - In most cases the image tag is now the same as the image version. - For example, ocata = ocata. - - But for self-built images, then we need an image tag as that is specified - or generated by kolla build tool. - - So if user has entered an image tag - use it. - ''' - - if args.image_tag: - return(args.image_tag) - else: - return(args.image_version) - - -def kolla_install_logging(args): - '''Install log collection - - Experimental to test out various centralized logging options - - https://github.com/kubernetes/charts/blob/master/stable/fluent-bit/values.yaml - - Kafka can be added, but it's only available in a dev image. - - repository: fluent/fluent-bit-kafka-dev - tag: 0.4 - - Note that both changes to the forwarder and kafka require changes to - the helm chart. - ''' - - if not args.logs: - return - - name = '/tmp/fluentd_values.yaml' - with open(name, "w") as w: - w.write("""\ -# Minikube stores its logs in a seperate directory. -# enable if started in minikube. -on_minikube: false - -image: - fluent_bit: - repository: fluent/fluent-bit - tag: 0.12.10 - pullPolicy: Always - -backend: - type: forward - forward: - host: fluentd - port: 24284 - es: - host: elasticsearch - port: 9200 - kafka: - # See dev image note above - host: kafka - port: 9092 - topics: test - brokers: kafka:9092 - -env: [] - -resources: - limits: - memory: 100Mi - requests: - cpu: 100m - memory: 100Mi - -# Node tolerations for fluent-bit scheduling to nodes with taints -# Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ -## -tolerations: [] -#- key: "key" -# operator: "Equal|Exists" -# value: "value" -# effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" - -# Node labels for fluent-bit pod assignment -# Ref: https://kubernetes.io/docs/user-guide/node-selection/ -## -nodeSelector: {} -""") - - print_progress('Kolla', - 'Install fluent-bit log aggregator', - KOLLA_FINAL_PROGRESS) - run_shell(args, - 'helm install --name my-release -f %s ' - 'stable/fluent-bit' % name) - k8s_wait_for_running_negate(args) - - -def kolla_bring_up_openstack(args): - '''Install OpenStack with Kolla''' - - global KOLLA_FINAL_PROGRESS - - banner('Kolla - build and prepare OpenStack') - clean_progress() - # Start Kolla deployment - add_one_to_progress() - kolla_update_rbac(args) - kolla_install_deploy_helm(args) - kolla_install_repos(args) - kolla_setup_loopback_lvm(args) - kolla_install_os_client(args) - kolla_gen_passwords(args) - kolla_create_namespace(args) - - node_list = ['kolla_compute', 'kolla_controller'] - kolla_label_nodes(args, node_list) - kolla_modify_globals(args) - kolla_add_to_globals(args) - kolla_gen_configs(args) - kolla_enable_qemu(args) - kolla_gen_secrets(args) - kolla_create_config_maps(args) - kolla_build_micro_charts(args) - kolla_verify_helm_images(args) - - if 'ocata' in args.image_version: - kolla_create_cloud_v4(args) - else: - kolla_create_cloud(args) - - banner('Kolla - deploy OpenStack:') - - # Set up OVS for the Infrastructure - chart_list = ['openvswitch'] - demo(args, 'Install %s Helm Chart' % chart_list, '') - helm_install_service_chart(args, chart_list) - - # Bring up br-ex for keepalived to bind VIP to it - run_shell(args, 'sudo ifconfig br-ex up') - - # chart_list = ['keepalived-daemonset'] - # demo(args, 'Install %s Helm Chart' % chart_list, '') - # helm_install_micro_service_chart(args, chart_list) - - # Install Helm charts - chart_list = ['mariadb'] - demo(args, 'Install %s Helm Chart' % chart_list, '') - helm_install_service_chart(args, chart_list) - - # Install remaining service level charts - chart_list = ['rabbitmq', 'memcached', 'keystone', 'glance', - 'cinder-control', 'cinder-volume-lvm', 'horizon', - 'neutron'] - demo(args, 'Install %s Helm Chart' % chart_list, '') - helm_install_service_chart(args, chart_list) - - chart_list = ['nova-control', 'nova-compute'] - demo(args, 'Install %s Helm Chart' % chart_list, '') - helm_install_service_chart(args, chart_list) - - # Add v3 keystone end points - if args.dev_mode: - if not re.search('ocata', args.image_version): - print_progress('Kolla', - 'Install Cinder V3 API', - KOLLA_FINAL_PROGRESS) - - chart_list = ['cinder-create-keystone-endpoint-adminv3-job', - 'cinder-create-keystone-endpoint-internalv3-job', - 'cinder-create-keystone-endpoint-publicv3-job', - 'cinder-create-keystone-servicev3-job'] - helm_install_micro_service_chart(args, chart_list) - - # Restart horizon pod to get new api endpoints - horizon = run_shell( - args, - "kubectl get pods --all-namespaces | grep horizon " - "| awk '{print $2}'") - run_shell(args, - 'kubectl delete pod %s -n kolla' % horizon) - k8s_wait_for_running_negate(args) - - # Some updates needed to cinderclient - horizon = run_shell( - args, - "sudo docker ps | grep horizon | grep kolla_start " - "| awk '{print $1}'") - run_shell(args, - 'sudo docker exec -tu root -i %s pip install --upgrade ' - 'python-cinderclient' % horizon) - - cinder = run_shell(args, - "sudo docker ps | grep cinder-volume | " - "grep kolla_start | awk '{print $1}'") - run_shell(args, - 'sudo docker exec -tu root -i %s pip install --upgrade ' - 'python-cinderclient' % cinder) - - # Install logging container - kolla_install_logging(args) - - namespace_list = ['kube-system', 'kolla'] - k8s_get_pods(args, namespace_list) - - -def main(): - '''Main function.''' - - args = parse_args() - - # Force sudo early on - run_shell(args, 'sudo -v') - - # Populate IP Addresses - populate_ip_addresses(args) - - if args.dev_mode: - subnet, start, octet = kolla_get_host_subnet(args) - print('DEV: HOST: subnet=%s, start=%s' % - (subnet, start)) - subnet, start, octet = kolla_get_mgmt_subnet(args) - print('DEV: MGMT: sub /tmp/$$ - str="ceph auth get-or-create client.kolla mon 'allow r' osd 'allow" - str="$str class-read object_prefix rbd_children, allow rwx pool=kollavolumes" - kubectl exec ceph-admin -c main --namespace=kolla -- /bin/bash -c \ - "$str" | awk '{if($1 == "key"){print $3}}' > /tmp/$$ - kubectl create secret generic ceph-kolla --namespace=kolla \ - --from-file=key=/tmp/$$ - rm -f /tmp/$$ - -Create disks for 'rabbitmq' and 'mariadb' like so - -:: - - cmd="rbd create --pool kollavolumes --image-feature layering --size 10240" - cmd="$cmd mariadb; rbd map --pool kollavolumes mariadb; #format it and unmount/unmap..." - kubectl exec -it ceph-admin -- /bin/bash -xec "$cmd" - -Ceph managed by Kolla-Kubernetes -================================ - -It is very half baked, intended only for testing. Please don't store anything -you care about in it as we will guarantee it will loose your data. diff --git a/doc/source/conf.py b/doc/source/conf.py deleted file mode 100755 index 836bc9a90..000000000 --- a/doc/source/conf.py +++ /dev/null @@ -1,83 +0,0 @@ -# -*- coding: utf-8 -*- -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -import sys - -sys.path.insert(0, os.path.abspath('../..')) -# -- General configuration ---------------------------------------------------- - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. -extensions = [ - 'openstackdocstheme', - 'sphinx.ext.autodoc', - #'sphinx.ext.intersphinx', -] - -# autodoc generation is a bit aggressive and a nuisance when doing heavy -# text edit cycles. -# execute "export SPHINX_DEBUG=1" in your terminal to disable - -# The suffix of source filenames. -source_suffix = '.rst' - -# The master toctree document. -master_doc = 'index' - -# General information about the project. -project = u'kolla-kubernetes' -copyright = u'2013, OpenStack Foundation' - -# If true, '()' will be appended to :func: etc. cross-reference text. -add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -add_module_names = True - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' - -# -- Options for HTML output -------------------------------------------------- - -# The theme to use for HTML and HTML Help pages. Major themes that come with -# Sphinx are currently 'default' and 'sphinxdoc'. -# html_theme_path = ["."] -html_theme = 'openstackdocs' -# html_static_path = ['static'] - -# Must set this variable to include year, month, day, hours, and minutes. -html_last_updated_fmt = '%Y-%m-%d %H:%M' - -# Output file base name for HTML help builder. -htmlhelp_basename = '%sdoc' % project - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, author, documentclass -# [howto/manual]). -latex_documents = [ - ('index', - '%s.tex' % project, - u'%s Documentation' % project, - u'OpenStack Foundation', 'manual'), -] - -# Example configuration for intersphinx: refer to the Python standard library. -#intersphinx_mapping = {'http://docs.python.org/': None} - -# openstackdocstheme options -repository_name = 'openstack/kolla-kubernetes' -bug_project = 'kolla-kubernetes' -bug_tag = '' diff --git a/doc/source/contributing.rst b/doc/source/contributing.rst deleted file mode 100644 index 697e597a3..000000000 --- a/doc/source/contributing.rst +++ /dev/null @@ -1,58 +0,0 @@ -================================ -Contributing to kolla-kubernetes -================================ - -.. include:: ../../CONTRIBUTING.rst - - -Documentation Patches -===================== - -Before submitting a documentation patch, please build and review your edits -from a web browser. - -The reStructuredText_ files under ./doc/source will compile into HTML pages -under ./doc/build/html. - -:: - - # Build the docs - tox -e docs-constraints -v - - # Preview the locally-generated HTML pages within a web browser - open ./doc/build/html/index.html - - # Create a branch - common branch naming for docs follows the - # blueprint name but this is not enforced - git checkout -b bp/documentation-initialization - - # Subsequent independent changes are commonly named - # bp/documentation-initialization-X - # where X is monotomically increasing - # Edits to the same commit use 'git commit --amend' - - # Verify the scope of your changes is to the files you modified - git status - - # Ensure that the commit message references the blueprint - # by adding this line to the commit message: - # Partially-implements: blueprint documentation-initialization - - # OpenStack docs suggest 'git commit -a' but be careful - # safer bet is to commit the file and then use 'git status' to check - git commit - - # If it's a change to prior commit use 'git commit --amend' - # and don't edit the changeID - - # Check your changes are as you intend - git show - - # Check it in - git review - - # Go back to the master branch - git checkout master - -.. _reStructuredText: http://docutils.sourceforge.net/rst.html - diff --git a/doc/source/deployment-guide.rst b/doc/source/deployment-guide.rst deleted file mode 100644 index a7e120a44..000000000 --- a/doc/source/deployment-guide.rst +++ /dev/null @@ -1,904 +0,0 @@ -================================================ -Bare Metal Deployment Guide for kolla-kubernetes -================================================ - -.. warning:: - - This documentation is under construction and some browsers do not update - on changes to docs.openstack.org. To resolve this problem, click refresh - on the browser. The docs do work perfectly if followed but If you still run - into trouble, please join #openstack-kolla and we can learn together how to - solve whatever issues faced. Likely others in the community face the same - issues. - -.. note:: - - This document was tested against CentOS 7.3 and Ubuntu 16.04 Host - OS and AIO environments. - - All the steps should be run as non-root user. If you follow this guide as the - root user, helm cannot be found in ``/usr/local/bin/`` because the - path ``/usr/local/bin`` is not defaulted to enabled in CentOS 7. - -Introduction -============ - -There are many ways to deploy Kubernetes. This guide has been tested only with -kubeadm. The documentation for kubeadm is here: - -https://kubernetes.io/docs/getting-started-guides/kubeadm/ - -Here is a video shown at a Kubernetes specific meetup on February 15th, 2017: -https://youtu.be/rHCCUP2odd8 - -There are two steps to deploying kolla-kubernetes. The first step involves -deploying Kubernetes. The second step involves deploying Kolla that is -compatible with Kubernetes. - -Host machine requirements -========================= - -The host machine must satisfy the following minimum requirements: - -- 2 network interfaces -- 8GB main memory -- 40GB disk space - -Dependencies:: - - docker == 1.12.6 - helm >= 2.4.1 - kubectl >= 1.8.0 - kubeadm >= 1.8.0 - kubelet >= 1.8.0 - kubernetes-cni >= 0.5.1 - -.. note:: - - When working with Kubernetes it is considered a useful practice to open a - unique terminal window and run the command that watches all Kubernetes's - processes. This operation will show changes as they occur within - Kubernetes. This is referred to as the `watch terminal` in this - documentation:: - - watch -d kubectl get pods --all-namespaces - -.. note:: - - Alternatively run this which will provide more information - including pod ip addresses, but needs a wider terminal as a result:: - - watch -d kubectl get pods --all-namespaces -o wide - -Step 1: Deploy Kubernetes -========================= - -.. note:: - - This document recommends Kubernetes 1.6.4 or later. Note that - kubernetes 1.6.3 is DOA and will not work. - - 1.7.0 appears to be stable and is working in the gates, and is the - current latest version. - -.. warning:: - - This documentation assumes a POD CIDR of 10.1.0.0/16 and a service CIDR of - 10.3.3.0/24. Two rules must be followed when reading this guide. - - 1. The service and pod cidr cannot overlap - 2. The address spaces cannot already be allocated by your organization - - If the POD and CIDR addresses overlap in this documentation with your organizations's - IP address ranges, they may be changed. Simply substitute anywhere these addresses - are used with the custom cidrs you have chosen. - - -.. note:: - - If you fail to turn off SELinux and firewalld, Kubernetes will fail. - -Turn off SELinux:: - - sudo setenforce 0 - sudo sed -i 's/enforcing/permissive/g' /etc/selinux/config - -Turn off firewalld:: - - sudo systemctl stop firewalld - sudo systemctl disable firewalld - -.. note:: - - This operation configures the Kubernetes YUM repository. This step only - needs to be done once per server or VM. - -.. warning:: - - gpgcheck=0 is set below because the currently signed RPMs don't match - the yum-key.gpg key distributed by Kubernetes. YMMV. - - -CentOS ------- - -Write the Kubernetes repository file:: - - sudo tee /etc/yum.repos.d/kubernetes.repo< kubernetes.list - deb http://apt.kubernetes.io/ kubernetes-xenial main - EOF - - sudo cp -aR kubernetes.list /etc/apt/sources.list.d/kubernetes.list - - sudo apt-get update - -Install Kubernetes 1.6.4 or later and other dependencies:: - - sudo apt-get install -y docker.io kubelet kubeadm kubectl kubernetes-cni - - -Centos and Ubuntu ------------------ - -Enable and start Docker:: - - sudo systemctl enable docker - sudo systemctl start docker - -Ubuntu ------- - -Enable the proper CGROUP driver:: - - CGROUP_DRIVER=$(sudo docker info | grep "Cgroup Driver" | awk '{print $3}') - sudo sed -i "s|KUBELET_KUBECONFIG_ARGS=|KUBELET_KUBECONFIG_ARGS=--cgroup-driver=$CGROUP_DRIVER |g" /etc/systemd/system/kubelet.service.d/10-kubeadm.conf - -Centos and Ubuntu ------------------ - -Setup the DNS server with the service CIDR:: - - sudo sed -i 's/10.96.0.10/10.3.3.10/g' /etc/systemd/system/kubelet.service.d/10-kubeadm.conf - -.. note:: - - Kubernetes uses x.x.x.10 as the DNS server. The Kolla developers don't - know precisely why this is the case, however, current speculation is - that 1..9 are reserved for future expansion of Kubernetes infrastructure - services. - -Reload the hand-modified service files:: - - sudo systemctl daemon-reload - -Stop kubelet if it is running:: - - sudo systemctl stop kubelet - -Enable and start docker and kubelet:: - - sudo systemctl enable kubelet - sudo systemctl start kubelet - -Deploy Kubernetes with kubeadm:: - - sudo kubeadm init --pod-network-cidr=10.1.0.0/16 --service-cidr=10.3.3.0/24 - -.. note:: - - pod-network-cidr is a network private to Kubernetes that the PODs within - Kubernetes communicate on. The service-cidr is where IP addresses for - Kubernetes services are allocated. There is no recommendation that - the pod network should be /16 network in upstream documentation however, the - Kolla developers have found through experience that each node consumes - an entire /24 network, so this configuration would permit 255 Kubernetes nodes. - -.. note:: - - If the following issue occurs after running this command: - - `[preflight] Some fatal errors occurred: - /proc/sys/net/bridge/bridge-nf-call-iptables contents are not set - to 1` - - There are two work-arounds: - - - Add `net.bridge.bridge-nf-call-ip6tables = 1` and - `net.bridge.bridge-nf-call-iptables = 1` to - ``/etc/sysctl.conf`` - - Type `sysctl -p` to apply the settings from /etc/sysctl.conf - - Type `sysctl net.bridge.bridge-nf-call-ip6tables` and - `sysctl net.bridge.bridge-nf-call-iptables` to verify the values are set to 1. - - Or alternatively Run with `--skip-preflight-checks`. This runs - the risk of missing other issues that may be flagged. - -Load the kubedm credentials into the system:: - - mkdir -p $HOME/.kube - sudo -H cp /etc/kubernetes/admin.conf $HOME/.kube/config - sudo -H chown $(id -u):$(id -g) $HOME/.kube/config - -.. note:: - - Until this step is done, the `watch terminal` will not return information. - -The CNI driver is the networking driver that Kubernetes uses. Kolla uses Canal -currently in the gate and tests with it hundreds of times per day via -extensive gating mechanisms. Kolla recommends the use of Canal although other -CNI drivers may be used if they are properly configured. - -Deploy the Canal CNI driver:: - - curl -L https://raw.githubusercontent.com/projectcalico/canal/master/k8s-install/1.6/rbac.yaml -o rbac.yaml - kubectl apply -f rbac.yaml - - curl -L https://raw.githubusercontent.com/projectcalico/canal/master/k8s-install/1.6/canal.yaml -o canal.yaml - sed -i "s@10.244.0.0/16@10.1.0.0/16@" canal.yaml - kubectl apply -f canal.yaml - -Finally untaint the node (mark the master node as schedulable) so that -PODs can be scheduled to this AIO deployment:: - - kubectl taint nodes --all=true node-role.kubernetes.io/master:NoSchedule- - -.. note:: - - Canal CNI appears to untaint the master node in later - versions. This is a reported bug: - https://github.com/projectcalico/canal/issues/77 - - Kubernetes must start completely before verification will function - properly. - - In your `watch terminal`, confirm that Kubernetes has completed - initialization by observing that the dns pod is in `3/3 Running` - state. If you fail to wait, Step 2 will fail. - -Step 2: Validate Kubernetes -=========================== - -After executing Step 2, a working Kubernetes deployment should be achieved. - -Launch a busybox container:: - - kubectl run -i -t $(uuidgen) --image=busybox --restart=Never - -Verify DNS works properly by running below command within the busybox container:: - - nslookup kubernetes - -This should return a nslookup result without error:: - - $ kubectl run -i -t $(uuidgen) --image=busybox --restart=Never - Waiting for pod default/33c30c3b-8130-408a-b32f-83172bca19d0 to be running, status is Pending, pod ready: false - - # nslookup kubernetes - Server: 10.3.3.10 - Address 1: 10.3.3.10 kube-dns.kube-system.svc.cluster.local - - Name: kubernetes - Address 1: 10.3.3.1 kubernetes.default.svc.cluster.local - -.. warning:: - - If nslookup kubernetes fails, kolla-kubernetes will not deploy correctly. - If this occurs check that all preceding steps have been applied correctly, and that - the range of IP addresses chosen make sense to your particular environment. Running - in a VM can cause nested virtualization and or performance issues. If still stuck - seek further assistance from the Kubernetes or Kolla communities. - - -Step 3: Deploying kolla-kubernetes -================================== - -Override default RBAC settings:: - - kubectl update -f <(cat < get_helm.sh - chmod 700 get_helm.sh - ./get_helm.sh - helm init - -.. note:: - In your `watch terminal` wait for the tiller pod to successfully - come up. - -Verify both the client and server version of Helm are consistent:: - - helm version - -Install repositories necessary to install packaging:: - - sudo yum install -y epel-release ansible python-pip python-devel - -.. note:: - - You may find it helpful to create a directory to contain the files downloaded - during the installation of kolla-kubernetes. To do that:: - - mkdir kolla-bringup - cd kolla-bringup - -Clone kolla-ansible:: - - git clone http://github.com/openstack/kolla-ansible - -Clone kolla-kubernetes:: - - git clone http://github.com/openstack/kolla-kubernetes - -Install kolla-ansible and kolla-kubernetes:: - - sudo pip install -U kolla-ansible/ kolla-kubernetes/ - -Copy default Kolla configuration to /etc:: - - sudo cp -aR /usr/share/kolla-ansible/etc_examples/kolla /etc - -Copy default kolla-kubernetes configuration to /etc:: - - sudo cp -aR kolla-kubernetes/etc/kolla-kubernetes /etc - -Generate default passwords via SPRNG:: - - sudo kolla-kubernetes-genpwd - -Create a Kubernetes namespace to isolate this Kolla deployment:: - - kubectl create namespace kolla - -Label the AIO node as the compute and controller node:: - - kubectl label node $(hostname) kolla_compute=true - kubectl label node $(hostname) kolla_controller=true - -.. warning: - - The kolla-kubernetes deliverable has two configuration files. This is a little - clunky and we know about the problem :) We are working on getting all configuration - into cloud.yaml. Until that is fixed the variable in globals.yml `kolla_install_type` - must have the same contents as the variable in cloud.yaml `install_type`. In this - document we use the setting `source` although `binary` could also be used. - -Modify Kolla ``/etc/kolla/globals.yml`` configuration file:: - - 1. Set `network_interface` in `/etc/kolla/globals.yml` to the - Management interface name. E.g: `eth0`. - 2. Set `neutron_external_interface` in `/etc/kolla/globals.yml` to the - Neutron interface name. E.g: `eth1`. This is the external - interface that Neutron will use. It must not have an IP address - assigned to it. - -Add required configuration to the end of ``/etc/kolla/globals.yml``:: - - cat < add-to-globals.yml - kolla_install_type: "source" - tempest_image_alt_id: "{{ tempest_image_id }}" - tempest_flavor_ref_alt_id: "{{ tempest_flavor_ref_id }}" - - neutron_plugin_agent: "openvswitch" - api_interface_address: 0.0.0.0 - tunnel_interface_address: 0.0.0.0 - orchestration_engine: KUBERNETES - memcached_servers: "memcached" - keystone_admin_url: "http://keystone-admin:35357/v3" - keystone_internal_url: "http://keystone-internal:5000/v3" - keystone_public_url: "http://keystone-public:5000/v3" - glance_registry_host: "glance-registry" - neutron_host: "neutron" - keystone_database_address: "mariadb" - glance_database_address: "mariadb" - nova_database_address: "mariadb" - nova_api_database_address: "mariadb" - neutron_database_address: "mariadb" - cinder_database_address: "mariadb" - ironic_database_address: "mariadb" - placement_database_address: "mariadb" - rabbitmq_servers: "rabbitmq" - openstack_logging_debug: "True" - enable_heat: "no" - enable_cinder: "yes" - enable_cinder_backend_lvm: "yes" - enable_cinder_backend_iscsi: "yes" - enable_cinder_backend_rbd: "no" - enable_ceph: "no" - enable_elasticsearch: "no" - enable_kibana: "no" - glance_backend_ceph: "no" - cinder_backend_ceph: "no" - nova_backend_ceph: "no" - EOF - cat ./add-to-globals.yml | sudo tee -a /etc/kolla/globals.yml - -For operators using virtualization for evaluation purposes please enable -QEMU libvirt functionality and enable a workaround for a bug in libvirt:: - - sudo mkdir /etc/kolla/config - sudo tee /etc/kolla/config/nova.conf< 150:: - - ls | grep ".tgz" | wc -l - -Create a local cloud.yaml file for the deployment of the charts:: - - cat < cloud.yaml - global: - kolla: - all: - docker_registry: docker.io - image_tag: "4.0.0" - kube_logger: false - external_vip: "192.168.7.105" - base_distro: "centos" - install_type: "source" - tunnel_interface: "docker0" - keystone: - all: - admin_port_external: "true" - dns_name: "192.168.7.105" - public: - all: - port_external: "true" - rabbitmq: - all: - cookie: 67 - glance: - api: - all: - port_external: "true" - cinder: - api: - all: - port_external: "true" - volume_lvm: - all: - element_name: cinder-volume - daemonset: - lvm_backends: - - '192.168.7.105': 'cinder-volumes' - ironic: - conductor: - daemonset: - selector_key: "kolla_conductor" - nova: - placement_api: - all: - port_external: true - novncproxy: - all: - port: 6080 - port_external: true - openvswitch: - all: - add_port: true - ext_bridge_name: br-ex - ext_interface_name: enp1s0f1 - setup_bridge: true - horizon: - all: - port_external: true - EOF - -.. warning:: - - This file is populated with several values that will need to - be customized to your environment, this is explained below. - -.. note:: - - The placement api is enabled by default. If you wish to disable the - placement API to run Mitaka or Newton images, this can be done by - setting the `variable global.kolla.nova.all.placement_api_enabled` to `false` - in the cloud.yaml file. - -.. note:: - The default docker registry is ``docker.io``. If you want to use local - registry, modify the value of ``docker_registry`` to your local registry - -.. note:: - - The next operations are not a simple copy and paste as the rest of this - document is structured. - - In `/etc/kolla/globals.yml` you assigned your Management interface - name to `network_interface` (E.g. `eth0`) - we will refer to this - as: `YOUR_NETWORK_INTERFACE_NAME_FROM_GLOBALS.YML`. - - Record the ip address assigned to - `YOUR_NETWORK_INTERFACE_NAME_FROM_GLOBALS.YML` - (E.g. `10.240.43.81`). We will refer to this as: - `YOUR_NETWORK_INTERFACE_ADDRESS_FROM_GLOBALS.YML`. - - Also record the name of the `neutron_external_interface` from - `/etc/kolla/globals.yml` (E.g. `eth1`). We will refer to this as: - `YOUR_NEUTRON_INTERFACE_NAME_FROM_GLOBALS.YML`. - -Replace all occurrences of `192.168.7.105` with -`YOUR_NETWORK_INTERFACE_ADDRESS_FROM_GLOBALS.YML`:: - - sed -i "s@192.168.7.105@YOUR_NETWORK_INTERFACE_ADDRESS_FROM_GLOBALS.YML@g" ./cloud.yaml - -.. note:: - - This operation will have changed the values set in: `external_vip`, `dns_name` and - `cinder-volumes` variables. - -Replace `enp1s0f1` with `YOUR_NEUTRON_INTERFACE_NAME_FROM_GLOBALS.YML`:: - - sed -i "s@enp1s0f1@YOUR_NEUTRON_INTERFACE_NAME_FROM_GLOBALS.YML@g" ./cloud.yaml - -.. note:: - - This operation will have changed the value set in: - `ext_interface_name` variable. - -Replace `docker0` with the management interface name (E.g. `eth0`) used for -connectivity between nodes in kubernetes cluster, in most cases it -is `YOUR_NETWORK_INTERFACE_NAME_FROM_GLOBALS.YML`:: - - sed -i "s@docker0@YOUR_NETWORK_INTERFACE_NAME_FROM_GLOBALS.YML@g" ./cloud.yaml - -.. note:: - - This operation will have changed the value set in: - `tunnel_interface` variable. - -Start mariadb first and wait for it to enter into Running state:: - - helm install --debug kolla-kubernetes/helm/service/mariadb --namespace kolla --name mariadb --values ./cloud.yaml - -Start many of the remaining service level charts:: - - helm install --debug kolla-kubernetes/helm/service/rabbitmq --namespace kolla --name rabbitmq --values ./cloud.yaml - helm install --debug kolla-kubernetes/helm/service/memcached --namespace kolla --name memcached --values ./cloud.yaml - helm install --debug kolla-kubernetes/helm/service/keystone --namespace kolla --name keystone --values ./cloud.yaml - helm install --debug kolla-kubernetes/helm/service/glance --namespace kolla --name glance --values ./cloud.yaml - helm install --debug kolla-kubernetes/helm/service/cinder-control --namespace kolla --name cinder-control --values ./cloud.yaml - helm install --debug kolla-kubernetes/helm/service/horizon --namespace kolla --name horizon --values ./cloud.yaml - helm install --debug kolla-kubernetes/helm/service/openvswitch --namespace kolla --name openvswitch --values ./cloud.yaml - helm install --debug kolla-kubernetes/helm/service/neutron --namespace kolla --name neutron --values ./cloud.yaml - helm install --debug kolla-kubernetes/helm/service/nova-control --namespace kolla --name nova-control --values ./cloud.yaml - helm install --debug kolla-kubernetes/helm/service/nova-compute --namespace kolla --name nova-compute --values ./cloud.yaml - -Deploy iSCSI support with Cinder LVM (Optional) - -The Cinder LVM implementation requires a volume group to be set up. This can -either be a real physical volume or a loopback mounted file for development. -Use ``pvcreate`` and ``vgcreate`` to create the volume group. For example -with the devices ``/dev/sdb`` and ``/dev/sdc``:: - - - - pvcreate /dev/sdb /dev/sdc - vgcreate cinder-volumes /dev/sdb /dev/sdc - -During development, it may be desirable to use file backed block storage. It -is possible to use a file and mount it as a block device via the loopback -system:: - - mknod /dev/loop2 b 7 2 - dd if=/dev/zero of=/var/lib/cinder_data.img bs=1G count=20 - losetup /dev/loop2 /var/lib/cinder_data.img - pvcreate /dev/loop2 - vgcreate cinder-volumes /dev/loop2 - -Note that in the event where iSCSI daemon is active on the host, there is a -need to perform the following steps before executing the cinder-volume-lvm Helm -chart to avoid the iscsd container from going into crash loops:: - - sudo systemctl stop iscsid - sudo systemctl stop iscsid.socket - -Execute the cinder-volume-lvm Helm chart:: - - helm install --debug kolla-kubernetes/helm/service/cinder-volume-lvm --namespace kolla --name cinder-volume-lvm --values ./cloud.yaml - -In the `watch terminal` wait for all pods to enter into Running state. -If you didn't run watch in a different terminal, you can run it now:: - - watch -d kubectl get pods --all-namespaces - -Generate openrc file:: - - kolla-kubernetes/tools/build_local_admin_keystonerc.sh ext - . ~/keystonerc_admin - -.. note:: - - The ``ext`` option to create the keystonerc creates a keystonerc file - that is compatible with this guide. - -Install OpenStack clients:: - - sudo pip install "python-openstackclient" - sudo pip install "python-neutronclient" - sudo pip install "python-cinderclient" - -Bootstrap the cloud environment and create a VM as requested:: - - kolla-ansible/tools/init-runonce - -Create a floating IP address and add to the VM:: - - openstack server add floating ip demo1 $(openstack floating ip create public1 -f value -c floating_ip_address) - - -Troubleshooting and Tear Down -============================= - -TroubleShooting ---------------- -.. note:: - - This is just a list of popular commands the community has suggested - they use a lot. This is by no means a comprehensive guide to - debugging kubernetes or kolla. - -Determine IP and port information:: - - $ kubectl get svc -n kube-system - NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE - canal-etcd 10.3.3.100 6666/TCP 16h - kube-dns 10.3.3.10 53/UDP,53/TCP 16h - tiller-deploy 10.3.3.7 44134/TCP 16h - - $ kubectl get svc -n kolla - NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE - cinder-api 10.3.3.6 10.240.43.81 8776/TCP 15h - glance-api 10.3.3.150 10.240.43.81 9292/TCP 15h - glance-registry 10.3.3.119 9191/TCP 15h - horizon 10.3.3.15 10.240.43.81 80/TCP 15h - keystone-admin 10.3.3.253 10.240.43.81 35357/TCP 15h - keystone-internal 10.3.3.155 5000/TCP 15h - keystone-public 10.3.3.214 10.240.43.81 5000/TCP 15h - mariadb 10.3.3.57 3306/TCP 15h - memcached 10.3.3.180 11211/TCP 15h - neutron-server 10.3.3.145 10.240.43.81 9696/TCP 15h - nova-api 10.3.3.96 10.240.43.81 8774/TCP 15h - nova-metadata 10.3.3.118 8775/TCP 15h - nova-novncproxy 10.3.3.167 10.240.43.81 6080/TCP 15h - nova-placement-api 10.3.3.192 10.240.43.81 8780/TCP 15h - rabbitmq 10.3.3.158 5672/TCP 15h - rabbitmq-mgmt 10.3.3.105 15672/TCP 15h - -View all k8's namespaces:: - - $ kubectl get namespaces - NAME STATUS AGE - default Active 16h - kolla Active 15h - kube-public Active 16h - kube-system Active 16h - -Kolla Describe a pod in full detail:: - - kubectl describe pod ceph-admin -n kolla - ... - -View all deployed services:: - - $ kubectl get deployment -n kube-system - NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE - kube-dns 1 1 1 1 20h - tiller-deploy 1 1 1 1 20h - -View configuration maps:: - - $ kubectl get configmap -n kube-system - NAME DATA AGE - canal-config 4 20h - cinder-control.v1 1 20h - extension-apiserver-authentication 6 20h - glance.v1 1 20h - horizon.v1 1 20h - keystone.v1 1 20h - kube-proxy 1 20h - mariadb.v1 1 20h - memcached.v1 1 20h - neutron.v1 1 20h - nova-api-create.v1 1 19h - nova-cell0-create-db-job.v1 1 19h - nova-compute.v1 1 19h - nova-control.v1 1 19h - openvswitch.v1 1 20h - rabbitmq.v1 1 20h - -General Cluster information:: - - $ kubectl cluster-info - Kubernetes master is running at https://192.168.122.2:6443 - KubeDNS is running at https://192.168.122.2:6443/api/v1/proxy/namespaces/kube-system/services/kube-dns - -View all jobs:: - - $ kubectl get jobs --all-namespaces - NAMESPACE NAME DESIRED SUCCESSFUL AGE - kolla cinder-create-db 1 1 20h - kolla cinder-create-keystone-endpoint-admin 1 1 20h - kolla cinder-create-keystone-endpoint-adminv2 1 1 20h - kolla cinder-create-keystone-endpoint-internal 1 1 20h - kolla cinder-create-keystone-endpoint-internalv2 1 1 20h - kolla cinder-create-keystone-endpoint-public 1 1 20h - -View all deployments:: - - $ kubectl get deployments --all-namespaces - NAMESPACE NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE - kolla cinder-api 1 1 1 1 20h - kolla glance-api 1 1 1 1 20h - kolla glance-registry 1 1 1 1 20h - kolla horizon 1 1 1 1 20h - kolla keystone 1 1 1 1 20h - kolla memcached 1 1 1 1 20h - kolla neutron-server 1 1 1 1 20h - kolla nova-api 1 1 1 1 20h - kolla nova-novncproxy 1 1 1 1 20h - kolla placement-api 1 1 1 1 20h - kube-system kube-dns 1 1 1 1 20h - kube-system tiller-deploy 1 1 1 1 20h - -View secrets:: - - $ kubectl get secrets - NAME TYPE DATA AGE - default-token-3dzfp kubernetes.io/service-account-token 3 20h - -View docker images:: - - $ sudo docker images - REPOSITORY TAG IMAGE ID CREATED SIZE - gcr.io/kubernetes-helm/tiller v2.3.1 38527daf791d 7 days ago 56 MB - quay.io/calico/cni v1.6.2 db2dedf2181a 2 weeks ago 65.08 MB - gcr.io/google_containers/kube-proxy-amd64 v1.6.0 746d1460005f 3 weeks ago 109.2 MB - ... - -Tear Down ---------- -.. warning:: - - Some of these steps are dangerous. Be warned. - -To cleanup the database entry for a specific service such as nova:: - - helm install --debug /opt/kolla-kubernetes/helm/service/nova-cleanup --namespace kolla --name nova-cleanup --values cloud.yaml - -To delete a Helm release:: - - helm delete mariadb --purge - -To delete all Helm releases:: - - helm delete mariadb --purge - helm delete rabbitmq --purge - helm delete memcached --purge - helm delete keystone --purge - helm delete glance --purge - helm delete cinder-control --purge - helm delete horizon --purge - helm delete openvswitch --purge - helm delete neutron --purge - helm delete nova-control --purge - helm delete nova-compute --purge - helm delete nova-cell0-create-db-job --purge - helm delete cinder-volume-lvm --purge - -To clean up the host volumes between runs:: - - sudo rm -rf /var/lib/kolla/volumes/* - -To clean up Kubernetes and all docker containers entirely, run -this command, reboot, and run these commands again:: - - sudo kubeadm reset - -Other cleanups if your environment is corrup that may be useful:: - - sudo rm -rf /etc/kolla - sudo rm -rf /etc/kubernetes - sudo rm -rf /etc/kolla-kubernetes - - -Using OpenStack -=============== - -If you were able to successfully reach the end of this guide and -`demo1` was successfully deployed, here is a fun list of things you -can do with your new cluster. - -Access Horizon GUI ------------------- -1. Determine Horizon `EXTERNAL IP` Address:: - - $ kubectl get svc horizon --namespace=kolla - NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE - horizon 10.3.3.237 10.240.43.175 80/TCP 1d - -2. Determine username and password from keystone:: - - $ cat ~/keystonerc_admin | grep OS_USERNAME - export OS_USERNAME=admin - - $ cat ~/keystonerc_admin | grep OS_PASSWORD - export OS_PASSWORD=Sr6XMFXvbvxQCJ3Cib1xb0gZ3lOtBOD8FCxOcodU - -3. Run a browser that has access to your network, and access Horizon - GUI with the `EXTERNAL IP` from Step 1, using the credentials from Step 2. diff --git a/doc/source/development-environment.rst b/doc/source/development-environment.rst deleted file mode 100644 index f6f8fa753..000000000 --- a/doc/source/development-environment.rst +++ /dev/null @@ -1,485 +0,0 @@ -.. development_environment: - -========================================== -Kolla Kubernetes Development Environment -========================================== - -.. warning:: - - The development environment guide is outdated. It no longer - works with Kubernetes master. We are debating what to do about that - but until that time, please use the deployment guide for development. - -Overview -======== - -The kolla-kubernetes development environment is intended to run on a dedicated -development machine such as a workstation or laptop. This development -environment is not intended to run on a virtual machine although that -is feasible. Following this guide will have a minimal impact to the host -operating system. Some software and libraries will be installed and some -configuration changes will be required. - -Install Vagrant and Ansible -=========================== - -You can use Halcyon-Vagrant-Kubernetes with the VirtualBox, Libvirt or OpenStack -vagrant providers. The documentation here describes the Libvirt provider for -Linux hosts, but VirtualBox is perfectly acceptable as well if preferred. For -more information about Halcyon-Kubernetes, please refer to the Github -repositories: - -* https://github.com/att-comdev/halcyon-vagrant-kubernetes (Vagrant components) - -* https://github.com/att-comdev/halcyon-kubernetes (Ansible Playbooks) - - -.. note:: - - Currently, the following versions are tested and required: - * ansible >= 2.2.0 - * helm >= 2.2.0 - * kubernetes >= 1.5.2 - * vagrant <1.9.0 - - - -.. note:: - - The official Ubuntu image is currently incompatible with the vagrant-libvirt - provider, but works without issues using either the VirtualBox or OpenStack - providers. - - -CentOS 7.2 with Libvirt ------------------------ - -Firstly install Vagrant: - -.. path . -.. code-block:: console - - sudo yum install -y \ - https://releases.hashicorp.com/vagrant/1.8.1/vagrant_1.8.1_x86_64.rpm - -.. end - -Then install the deps for vagrant libvirt and ensure git-review is present: - -.. path . -.. code-block:: console - - sudo yum install -y libvirt \ - libxslt-devel \ - libxml2-devel \ - libvirt-devel \ - libguestfs-tools-c \ - ruby-devel \ - gcc \ - git \ - git-review \ - gcc-c++ - -.. end - -Now we can install the libvirt plugin itself: - -.. path . -.. code-block:: console - - vagrant plugin install vagrant-libvirt - -.. end - -Now you can setup Libvirt for use without requiring root privileges: - -.. path . -.. code-block:: console - - sudo bash -c 'cat << EOF > /etc/polkit-1/rules.d/80-libvirt-manage.rules - polkit.addRule(function(action, subject) { - if (action.id == "org.libvirt.unix.manage" && subject.local && subject.active && subject.isInGroup("wheel")) { - return polkit.Result.YES; - } - }); - EOF' - - sudo usermod -aG libvirt $USER - -.. end - -Once both Libvirt and Vagrant have been prepared, you should now start and enable Libvirt: - -.. path . -.. code-block:: console - - sudo systemctl start libvirtd - sudo systemctl enable libvirtd - -.. end - -Finally install Ansible to allow Halcyon Kubernetes to provision the cluster: - -.. path . -.. code-block:: console - - sudo yum install -y epel-release - sudo yum install -y ansible - -.. end - -Before continuing, log out and back in again for your session to have the correct -permissions applied. - - -Ubuntu 16.04 with Libvirt -------------------------- - -Firstly install Vagrant: - -.. path . -.. code-block:: console - - sudo apt-get update - # Note that theres is a packaging bug in ubuntu so the upstream package must - # be used: https://github.com/vagrant-libvirt/vagrant-libvirt/issues/575 - curl -L https://releases.hashicorp.com/vagrant/1.8.1/vagrant_1.8.1_x86_64.deb > /tmp/vagrant_1.8.1_x86_64.deb - sudo apt-get -y install /tmp/vagrant_1.8.1_x86_64.deb - -.. end - -Then install the dependencies for vagrant-libvirt and ensure git-review is present: - -.. path . -.. code-block:: console - - sudo sed -i 's/^# deb-src/deb-src/g' /etc/apt/sources.list - sudo apt-get update - sudo apt-get -y build-dep vagrant ruby-libvirt - sudo apt-get install -y \ - qemu-kvm \ - libvirt-bin \ - ebtables \ - dnsmasq \ - libxslt-dev \ - libxml2-dev \ - libvirt-dev \ - zlib1g-dev \ - ruby-dev \ - git \ - git-review \ - g++ \ - qemu-utils - -.. end - -Now we can install the libvirt plugin itself: - -.. path . -.. code-block:: console - - vagrant plugin install vagrant-libvirt - -.. end - -Now you can setup Libvirt for use without requiring root privileges: - -.. path . -.. code-block:: console - - sudo adduser $USER libvirtd - -.. end - -Finally, install Ansible to allow Halcyon Kubernetes to provision the cluster: - -.. path . -.. code-block:: console - - sudo apt-get install -y software-properties-common - sudo apt-add-repository -y ppa:ansible/ansible - sudo apt-get update - sudo apt-get install -y ansible - -.. end - -Before continuing, log out and back in again for your session to have the correct -permissions applied. - - -MacOS ----------- - -Install the CLI Developer tools by opening a terminal and running: - -.. path . -.. code-block:: console - - xcode-select --install - -.. end - -Download and install VirtualBox from: - * https://www.virtualbox.org/wiki/Downloads - -Download and install vagrant using the following url to obtain the package: - * https://releases.hashicorp.com/vagrant/1.8.7/vagrant_1.8.7.dmg -There is a bug in Vagrant 1.8.7's embedded curl that prevents boxes being -downloaded, as described in: https://github.com/mitchellh/vagrant/issues/7997. -This can be resolved by running the following command: - -.. path . -.. code-block:: console - - sudo rm -f /opt/vagrant/embedded/bin/curl - -.. end - - -If your version of MacOS doesn't not include git in the CLI Developer tools -installed above, you can download and install git from: - * https://git-scm.com/download/mac - -Now we can install Ansible: - -.. path . -.. code-block:: console - - easy_install --user pip - printf 'if [ -f ~/.bashrc ]; then\n . ~/.bashrc\nfi\n' >> $HOME/.profile - printf 'export PATH=$PATH:$HOME/Library/Python/2.7/bin\n' >> $HOME/.bashrc - . $HOME/.profile - pip install --user --upgrade ansible - sudo mkdir /etc/ansible - sudo curl -L https://raw.githubusercontent.com/ansible/ansible/devel/examples/ansible.cfg -o /etc/ansible/ansible.cfg - -.. end - - - -.. note:: - - Under MacOS, you may encounter an error during ``vagrant up``, complaining - that too many files are open. This is as recent versions of MacOS limit the - number of file descriptors per application to 200. A simple way to resolve - this is by running ``ulimit -n 4048`` from the CLI before bringing the - environment up. - -Install Kubernetes and Helm clients -=================================== - -To complete the development environment setup, it is mandatory to have -both a kubernetes client (kubectl) and a helm client (helm) installed on -the host operating system. - -Installing Clients on CentOS or Ubuntu --------------------------------------- - -To install the kubernetes clients: - -.. code-block:: console - - curl -L https://dl.k8s.io/v1.5.2/kubernetes-client-linux-amd64.tar.gz | tar -xzv - sudo cp kubernetes/client/bin/* /usr/local/bin - sudo chmod 755 /usr/local/bin/kubefed /usr/local/bin/kubectl - sudo chown root: /usr/local/bin/kubefed /usr/local/bin/kubectl - -.. end - -To install the helm client: - -.. code-block:: console - - curl -L https://storage.googleapis.com/kubernetes-helm/helm-v2.2.2-linux-amd64.tar.gz | tar -xzv - sudo cp linux-amd64/helm /usr/local/bin/helm - sudo chmod 755 /usr/local/bin/helm - sudo chown root: /usr/local/bin/helm - -.. end - -Installing Clients on MacOS ---------------------------- - -To install the kubernetes clients: - -.. code-block:: console - - curl -L https://dl.k8s.io/v1.5.2/kubernetes-client-darwin-amd64.tar.gz | tar -xzv - sudo cp kubernetes/client/bin/* /usr/local/bin - sudo chmod 755 /usr/local/bin/kubefed /usr/local/bin/kubectl - sudo chown root: /usr/local/bin/kubefed /usr/local/bin/kubectl - -.. end - -To install the helm client: - -.. code-block:: console - - curl -L https://storage.googleapis.com/kubernetes-helm/helm-v2.2.2-darwin-amd64.tar.gz | tar -xzv - sudo cp darwin-amd64/helm /usr/local/bin/helm - sudo chmod 755 /usr/local/bin/helm - sudo chown root: /usr/local/bin/helm - -.. end - -Setup environment -================= - -Clone the repo containing the dev environment: - -.. path . -.. code-block:: console - - git clone https://github.com/att-comdev/halcyon-vagrant-kubernetes - -.. end - - -Initialize the ```halcyon-vagrant-kubernetes``` repository: - -.. path . -.. code-block:: console - - cd halcyon-vagrant-kubernetes - git submodule init - git submodule update - -.. end - -You can then setup Halcyon Vagrant for Kolla. You can select either ``centos`` -or ``ubuntu`` as a guest operating system though currently Ubuntu is only -supported by the Vagrant VirtualBox and OpenStack providers. - -.. path . -.. code-block:: console - - ./setup-halcyon.sh \ - --k8s-config kolla \ - --k8s-version v1.5.2 \ - --guest-os centos - -.. end - - -.. note:: - - If you need to use a proxy then you should also edit the ``config.rb`` file - as follows: - * Set ``proxy_enable = true`` - * Set ``proxy_http`` and ``proxy_https`` values for your proxy - * Configure ``proxy_no`` as appropriate. ``proxy_no`` should also include - the ip's of all kube cluster members. - (i.e. 172.16.35.11,172.16.35.12,172.16.35.13,172.16.35.14) - * Edit the no_proxy environment variable on your host to include the kube - master IP (172.16.35.11) - - -Managing and interacting with the environment -============================================= - -The kube2 system in your halcyon-vagrant environment should have a minimum -of 4gb of ram and all others should be set to 2gb of ram. In your -config.rb script kube_vcpus should be set to 2 and kube_count should be -set to 4. - -Once the environment's dependencies have been resolved and configuration -completed, you can run the following commands to interact with it: - -.. path . -.. code-block:: console - - vagrant up # To create and start your halcyon-kubernetes cluster. - # You can also use --provider=libvirt - - ./get-k8s-creds.sh # To get the k8s credentials for the cluster and setup - # kubectl on your host to access it, if you have the helm - # client installed on your host this script will also set - # up the client to enable you to perform all development - # outside of the cluster. - - vagrant ssh kube1 # To ssh into the master node. - - vagrant destroy # To make it all go away. - - -.. end - - -Note that it will take a few minutes for everything to be operational, typically -between 2-5 mins after vagrant/ansible has finished for all services to be -online for my machine (Xeon E3-1240 v3, 32GB, SSD), primarily dependent on -network performance. This is as it takes time for the images to be pulled, and -CNI networking to come up, DNS being usually the last service to become active. - - -Testing the deployed environment -================================ - -Test everything works by starting a container with an interactive terminal: - -.. path . -.. code-block:: console - - kubectl run -i -t $(uuidgen) --image=busybox --restart=Never - -.. end - -Once that pod has started and your terminal has connected to it, you can then -test the Kubernetes DNS service (and by extension the CNI SDN layer) by running: - -.. path . -.. code-block:: console - - nslookup kubernetes - -.. end - -To test that helm is working you can run the following: - -.. path . -.. code-block:: console - - helm init --client-only - helm repo update - helm install stable/memcached --name helm-test - # check the deployment has succeeded - helm ls - # and to check via kubectl - kubectl get all - # and finally remove the test memcached chart - helm delete helm-test --purge - -.. end - -.. note:: - - If you receive the error ```Error: could not find a ready tiller pod``` - helm is likely pulling the image to the kubernetes cluster. This error - may also be returned if you have a proxy server environment and the - development environment is not setup properly for the proxy server. - - -Containerized development environment requirements and usage -===================================================== - -Make sure to run the ./get-k8s-creds.sh script or the development environment -container will not be able to connect to the vagrant kubernetes cluster. - -The kolla-kubernetes and kolla-ansible project should be checked out into -the same base directory as halcyon-vagrant-kubernetes. The default assumed -in kolla-kubernetes/tools/build_dev_image.sh is ~/devel. If that is not the -case in your environment then set the environment variable dev_path to the -path appropriate for you. - -.. path . -.. code-block:: console - - git clone https://github.com/openstack/kolla-kubernetes.git - git clone https://github.com/openstack/kolla-ansible.git - - # Set dev_path environment variable to match your development base dir - - kolla-kubernetes/tools/build_dev_image.sh - kolla-kubernetes/tools/run_dev_image.sh - -.. end diff --git a/doc/source/index.rst b/doc/source/index.rst deleted file mode 100644 index 98b8d6f7b..000000000 --- a/doc/source/index.rst +++ /dev/null @@ -1,23 +0,0 @@ -Welcome to kolla-kubernetes's documentation! -============================================ - -Contents -======== - -.. toctree:: - :maxdepth: 1 - - ceph-guide - contributing - deployment-guide - development-environment - private-registry - running-tests - service-security - -Indices and tables -================== - -* :ref:`genindex` -* :ref:`modindex` -* :ref:`search` diff --git a/doc/source/private-registry.rst b/doc/source/private-registry.rst deleted file mode 100644 index dc1c2a56c..000000000 --- a/doc/source/private-registry.rst +++ /dev/null @@ -1,118 +0,0 @@ -.. private-registry: - -============================================== -Kolla Kubernetes Private Docker Registry Guide -============================================== - -This guide documents how to configure the authentication and use of a -private registry within a Kubernetes cluster. The official Kubernetes -documentation may be found here -``_. -Please note that several methods exist, and more than one may work for -your setup. - -`Specifying ImagePullSecrets on a Pod -`_ -is the one method which will work across all Kubernetes installations, -regardless of the cloud provider or mechanism for automatic node -replacement. This is the recommended configuration. - - -How It Works -============ - -There are two steps: - -- Create an ImagePullSecret. These instructions may differ based on - the Docker registry provider. The two types of registry providers - currently covered by this guide include: - - - Standard Docker Registry with Username/Password Authentication - - GCR Google Container Registry - -- Patch the Kubernetes default service-account to add a reference to - the ImagePullSecret. By default and unless configured otherwise, - all Kubernetes pods are created under the default service-account. - Pods under the default service-account use the ImagePullSecret - credentials to authenticate and access the private Docker registry. - - -Create the ImagePullSecret -========================== - -Based on the Docker registry provider, follow the appropriate section -below to create the ImagePullSecret. - - -Standard Docker Registry with Username/Password Authentication --------------------------------------------------------------- - -A typical Docker registry only requires only username/password -authentication, without any other API keys or tokens (e.g. Docker -Hub). - -The Kubernetes official documentation for Creating a Secret with a -Docker Config may be found `here -`_. - -For the purposes of these instructions, create the ImagePullSecret to -be named ```private-docker-registry-secret```. - -:: - - # Create the ImagePullSecret named private-docker-registry-secret - # Be sure to replace the uppercase variables with your own. - kubectl create secret docker-registry private-docker-registry-secret \ - --docker-server=DOCKER_REGISTRY_SERVER \ - --docker-username=DOCKER_USER \ - --docker-password=DOCKER_PASSWORD \ - --docker-email=DOCKER_EMAIL - - -GCR Registry with Google Service Account Authentication -------------------------------------------------------- - -To allow any kubernetes cluster outside of Google Cloud to access the -GCR registry, the instuctions are a little more complex. These -instructions have been modified from `stackoverflow -`_. - -- Go to the Google Developer Console > Api Manager > Credentials, - click "Create credentials", and select "Service account key" -- Under "service account" select "new service account", name the new - key "gcr", and select JSON for the key type. -- Click on "Create" and the service-account key will be downloaded to your disk. -- You may want to save the key file, since there is no way to - re-download it from google. -- Rename the keyfile to be gcr-sa-key.json (GCR service account key), - for the purposes of these instructions. -- Using the keyfile, create the kubernetes secret named ```private-docker-registry-secret```:: - - # Create the docker-password from the file by stripping all - # newlines and squeezing whitespace. - DOCKER_PASSWORD=`cat gcr-sa-key.json | tr -s '[:space:]' | tr -d '\n'` - - # Create a Kubernetes secret named "private-docker-registry-secret" - kubectl create secret docker-registry private-docker-registry-secret \ - --docker-server "https://gcr.io" \ - --docker-username _json_key \ - --docker-email not@val.id \ - --docker-password="$DOCKER_PASSWORD" - - -Patch the Default Service-Account -================================= - -Patch the Kubernetes default service-account to add a reference to the -ImagePullSecret, after which pods under the default service-account -use the ImagePullSecret credentials to authenticate and access the -private Docker registry. - -:: - - # Patch the default service account to include the new - # ImagePullSecret - kubectl patch serviceaccount default -p '{"imagePullSecrets":[{"name":"private-docker-registry-secret"}]}' - -Now, your kubernetes cluster should have access to the private Docker registry. - diff --git a/doc/source/running-tests.rst b/doc/source/running-tests.rst deleted file mode 100644 index c5f72d6b2..000000000 --- a/doc/source/running-tests.rst +++ /dev/null @@ -1,109 +0,0 @@ -.. _running-tests: - -============= -Running tests -============= - -Kolla-kubernetes contains a suite of tests in the ``kolla_kubernetes/tests`` -directories. - -Any proposed code change in gerrit is automatically rejected by the OpenStack -Jenkins server [#f1]_ if the change causes test failures. - -It is recommended for developers to run the test suite before submitting patch -for review. This allows to catch errors as early as possible. - -Preferred way to run the tests -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The preferred way to run the unit tests is using ``tox``. It executes tests in -isolated environment, by creating separate virtualenv and installing -dependencies from the ``requirements.txt`` and ``test-requirements.txt`` files, -so the only package you install is ``tox`` itself and it is safe to install tox in venv: - -.. code-block:: console - - $virtualenv .venv - $. .venv/bin/activate - $ pip install tox - -See `the unit testing section of the Testing wiki page`_ for more information. -Following are some simple examples. - -To run the Python 2.7 tests: - -.. code-block:: console - - $ tox -e py27 - -To run the style tests: - -.. code-block:: console - - $ tox -e pep8 - -To run multiple tests separate items by commas: - -.. code-block:: console - - $ tox -e py27,py35,pep8 - -.. _the unit testing section of the Testing wiki page: https://wiki.openstack.org/wiki/Testing#Unit_Tests - -Running a subset of tests -------------------------- - -Instead of running all tests, you can specify an individual directory, file, -class or method that contains test code, i.e. filter full names of tests by a -string. - -To run the tests located only in the ``kolla_kubernetes/tests`` -directory use: - -.. code-block:: console - - $ tox -e py27 kolla_kubernetes.tests - -To run the tests of a specific file say ``kolla_kubernetes/tests/test_utils.py``: - -.. code-block:: console - - $ tox -e py27 test_utils - -To run the tests in the ``TestJinjaUtils`` class in -the ``kolla_kubernetes/tests/test_utils.py`` file: - -.. code-block:: console - - $ tox -e py27 test_utils.TestJinjaUtils - -To run the ``TestJinjaUtils.test_merge_configs_and_self_render`` test method in -the ``kolla_kubernetes/tests/test_utils.py`` file: - -.. code-block:: console - - $ tox -e py27 test_kolla_docker.ModuleArgsTest.test_module_args - -Debugging unit tests ------------------------- - -In order to break into the debugger from a unit test we need to insert -a breaking point to the code: - -.. code-block:: python - - import pdb; pdb.set_trace() - -Then run ``tox`` with the debug environment as one of the following:: - - tox -e debug - tox -e debug test_file_name.TestClass.test_name - -For more information see the `oslotest documentation -`_. - - -.. rubric:: Footnotes - -.. [#f1] See http://docs.openstack.org/infra/system-config/jenkins.html - diff --git a/doc/source/service-security.rst b/doc/source/service-security.rst deleted file mode 100644 index ef981037b..000000000 --- a/doc/source/service-security.rst +++ /dev/null @@ -1,71 +0,0 @@ -========================================================================= -Kubernetes Service Security... or, "Why is everything binding to 0.0.0.0" -========================================================================= - -Traditional OpenStack installs have generally used split networks (either VLAN -segments or multi-port NICs and independent networks). Kubernetes is designed -with the assumption that users are going to have a SDN fabric installed, of -which there are several different options using the CNI (Container Networking -Interface) API. Both underlay and overlay networking options are available as -CNI services. - -The Kubernetes fabric is mediated by the ``kube-proxy`` executable, thus even -software running on the node outside of a container is able to see Kubernetes -services. - -How are ports exposed? -====================== - -While using ``HostNetwork=True`` (``Net=Host`` in Docker parlance), processes -running inside of a container are using the network namespace of the host, -meaning that network operations are not containerized and, as far as the TCP/IP -stack is concerned, the process is running in the parent host. This means -that any process need to be just as careful about what ports are accessible -and how they are managing them as a process running outside of the container. -Thus, they must be careful which interface they listen to, who is allowed to -connect, etc. - -In Kubernetes, containers default to ``HostNetwork=False`` and thus work -inside of the Kubernetes network framework. They have no inbound ports -accessible by default unless you have set them to be exposed. - -The normal way of exposing ports is via a Kubernetes Service. A service has a -DNS alias exposed via SkyDNS (e.g. you are able to use ``mariadb`` to access -MariaDB) that points to the service IP address which is generally backed by a -Kubernetes Virtual IP. Services can be either internal services or external -services. Only services specifically marked as external services and -configured with either a LoadBalancer or a Ingress controller will be -accessible outside of the cluster. - -Services can be exposed with a type of ``NodePort``, which means that a port -from a configurable range will be allocated for a service on each node on each -port will be configured to proxy, which is intended for users to be able to -configure their own external load balancers. - -Thus, a server running inside of a container that doesn't have any services -exposed as ``NodePort`` can safely bind to 0.0.0.0 and rely on the underlying -network layer ensuring that attackers are unable to probe for it. - -Containers that need to run as ``HostNetwork=True`` are unable to be exposed -as services but are still able to connect to other Kubernetes services. - -What about other services running inside of the Kubernetes cluster? -=================================================================== - -By default, processes running on compute nodes within the cluster are part of -the same unrestricted network fabric. - -Certain processes, Nova Compute nodes, for example, are running user workloads -out of the control of the cluster administrator and thus should not have -unrestricted access to the cluster. There are two alternatives: - -First, compute nodes can be provisioned outside of the Kubernetes cluster. -This is necessary if you are using compute nodes with KVM or Ironic and often -times the easiest approach. - -Second, some of the CNI drivers (Calico being one example) can be configured -with NetworkPolicy objects to block access from certain nodes, which can -prevent compute nodes from seeing the internal services. However, as -currently implemented, pods will still be accessible from the host on which -they are running, it is also necessary to schedule any containers with -``HostNetworking=True`` on dedicated hosts. diff --git a/etc/kolla-kubernetes/kolla-kubernetes.yml b/etc/kolla-kubernetes/kolla-kubernetes.yml deleted file mode 100755 index 563df45e4..000000000 --- a/etc/kolla-kubernetes/kolla-kubernetes.yml +++ /dev/null @@ -1,275 +0,0 @@ ---- -# Any config options specified here will overwrite anything in globals.yml -# at run time. - -############################## -# Kolla Kubernetes options -############################## -# For now, set kolla_internal_vip_address in /etc/kolla/globals.yml to use as -# the ip address for all the services. -# kolla_internal_vip_address: "10.10.10.254" -# This address is used in ALL public endpoints and it serves as an entry point -# into kolla kubernetes cluster, needs to be changed by the operator. -kolla_kubernetes_external_vip: "10.57.120.254" -kolla_kubernetes_external_subnet: "24" -kolla_kubernetes_namespace: "kolla" - -############################## -# Kolla Kubernetes labels -############################## -# Labels can be overwridden like so -#kolla_kubernetes_hostlabel_specific_thing: -# key: kolla_other -# value: 'true' - -# To split your network nodes of from -# the controllers, do something like -# this: -#kolla_kubernetes_hostlabel_network: -# key: kolla_network -# value: 'true' - -kolla_kubernetes_hostlabel_controller: - key: kolla_controller - value: 'true' - -kolla_kubernetes_hostlabel_compute: - key: kolla_compute - value: 'true' - -#kolla_kubernetes_hostlabel_storage_lvm: -# key: kolla_storage -# value: 'true' - -######################## -# Kubernetes Cluster -######################## -keystone_replicas: "1" # may be > 1 -memcached_replicas: "1" # must == 1 even for multinode -rabbitmq_replicas: "1" # must == 1 even for multinode -horizon_replicas: "1" # may be > 1 -glance_api_replicas: "1" # may be > 1 -glance_registry_replicas: "1" # must == 1 even for multinode -neutron_server_replicas: "1" # may be > 1 -nova_api_replicas: "1" -nova_conductor_replicas: "1" -nova_scheduler_replicas: "1" -cinder_api_replicas: "1" -cinder_scheduler_replicas: "1" -# !!!ALERT!!! Changing the number of replica's for the cinder -# volume manager backed with ceph is not safe unless you are -# running at least newton and have configured a lock manager. -# This is not done out of the box currently. -cinder_volume_ceph_replicas: "1" -nova_consoleauth_replicas: "1" -nova_novncproxy_replicas: "1" -# !!!ALERT!!! Changing number of replicas for elasticsearch -# might cause issues and possible data corruption as currently -# Kubernetes does no support mutli-write access. It might -# change in future. -elasticsearch_replicas: "1" -kibana_replicas: "1" - -enable_openvswitch_tcp: "no" -enable_libvirt_tcp: "no" - -################################# -# Kubernetes Cluster DNS setting -################################# -dns_replicas: "1" -#dns_server_ip: "" -dns_domain_name: "openstack.kolla" -######################## -# Persistent Storage -######################## -storage_provider: "host" # host, ceph, gce, aws -storage_provider_fstype: "ext4" -storage_ceph: - # - WARNING: These sample defaults configure ceph access using the - # ceph "admin" user/key, because it involves the least amount of - # work for the a user to get ceph volumes working. However, it is - # highly recommended that the operator create a dedicated ceph - # user/key with access only to the ceph pool to be used by this - # Kubernetes cluster. - # - # Kubernetes nodes act as ceph clients because they must mount ceph - # volumes on behalf of pods. For a particular ceph user, there - # are two ways to pass the ceph secret keyring to Kubernetes. - # - # 1) The ceph user secret keyring may be loaded as a kubernetes - # secret. The base64-encoded secret must be referenced by - # storage_ceph.key. To disable this method, comment out the - # storage_ceph.key definition or set the value to "". The - # encoded secret may be created with this command: - # $ ssh ceph-mon cat /etc/ceph/path/to/ceph.client.keyring \ - # | grep key | awk '{print $3}' | base64 - # - # 2) The ceph user secret keyring may be stored on the Kubernetes - # node's filesystem, and then referenced by - # storage_ceph.keyring. To disable this method, comment out - # the storage_ceph.keyring definition or set the value to "". - # - # If both configurations are defined, Method 1) above takes - # precedence over method 2). Prefer using Method 1) to avoid - # provisioning the ceph key on every node, which is difficult if - # using cloud provider auto-provisioning. - # - # List of ceph monitor nodes - monitors: - - x.x.x.x - - y.y.y.y - # Default ceph user for authenticated access - user: admin - # The default pool to locate ceph volumes - pool: rbd - # Default user to use in order to run remote SSH commands - # e.g. kolla-kubernetes may execute: - # ssh root@ceph-mon rbd create pool/resourceName --size 1024 - ssh_user: root - # Any unique secret string within the kube env - secretName: ceph-secret - # The base64-encoded secret key which nodes need for mounting ceph volumes - key: EXAMPLEEXAMPLEEXAMPLEEXAMPLEEXAMPLEEXAMPLE= - # The ceph keyring file location on each kubernetes node's filesystem - keyring: /etc/ceph/ceph.client.admin.keyring - initial_mon: minikube - -################################ -# Persistent volumes sizes in GB -################################ -#glance_volume_size: "" -keystone_auth_url: "http://keystone-admin:35357" - -######################## -# Glance variables -######################## -openstack_glance_auth: "{'auth_url':'{{ keystone_auth_url }}','username':'{{ openstack_auth.username }}','password':'$KEYSTONE_ADMIN_PASSWORD','project_name':'{{ openstack_auth.project_name }}','domain_name':'default'}" -glance_admin_endpoint: "http://glance-api:{{ glance_api_port }}" -glance_public_endpoint: "http://{{ kolla_kubernetes_external_vip }}:{{ glance_api_port }}" -glance_internal_endpoint: "http://glance-api:{{ glance_api_port }}" - -######################## -# Neutron variables -######################## -openstack_neutron_auth: "{'auth_url':'{{ keystone_auth_url }}','username':'{{ openstack_auth.username }}','password':'$KEYSTONE_ADMIN_PASSWORD','project_name':'{{ openstack_auth.project_name }}','domain_name':'default'}" -neutron_admin_endpoint: "http://neutron-server:{{ neutron_server_port }}" -neutron_public_endpoint: "http://{{ kolla_kubernetes_external_vip }}:{{ neutron_server_port }}" -neutron_internal_endpoint: "http://neutron-server:{{ neutron_server_port }}" - -######################## -# Keystone variables -######################## -keystone_admin_url: "{{ admin_protocol }}://keystone-admin:{{ keystone_admin_port }}/v3" -keystone_internal_url: "{{ internal_protocol }}://keystone-internal:{{ keystone_public_port }}/v3" -keystone_public_url: "{{ public_protocol }}://{{ kolla_kubernetes_external_vip }}:{{ keystone_public_port }}/v3" -keystone_database_address: "mariadb" - -######################## -# NOVA variables -######################## -openstack_nova_auth: "{'auth_url':'{{ keystone_auth_url }}','username':'{{ openstack_auth.username }}','password':'$KEYSTONE_ADMIN_PASSWORD','project_name':'{{ openstack_auth.project_name }}','domain_name':'default'}" -nova_admin_endpoint: "http://nova-api:{{ nova_api_port }}/v2/%(tenant_id)s" -nova_public_endpoint: "http://{{ kolla_kubernetes_external_vip }}:{{ nova_api_port }}/v2/%(tenant_id)s" -nova_internal_endpoint: "http://nova-api:{{ nova_api_port }}/v2/%(tenant_id)s" -novncproxy_base_url: "{{ public_protocol }}://{{ kolla_kubernetes_external_vip }}:{{ nova_novncproxy_port }}/vnc_auto.html" - - -######################## -# Cinder variables -######################## -openstack_cinder_auth: "{'auth_url':'{{ keystone_auth_url }}','username':'{{ openstack_auth.username }}','password':'$KEYSTONE_ADMIN_PASSWORD','project_name':'{{ openstack_auth.project_name }}','domain_name':'default'}" -cinder_admin_endpoint: "http://cinder-api:{{ cinder_api_port }}/v1/%(tenant_id)s" -cinder_public_endpoint: "http://{{ kolla_kubernetes_external_vip }}:{{ cinder_api_port }}/v1/%(tenant_id)s" -cinder_internal_endpoint: "http://cinder-api:{{ cinder_api_port }}/v1/%(tenant_id)s" -cinder_v2_admin_endpoint: "http://cinder-api:{{ cinder_api_port }}/v2/%(tenant_id)s" -cinder_v2_public_endpoint: "http://{{ kolla_kubernetes_external_vip }}:{{ cinder_api_port }}/v2/%(tenant_id)s" -cinder_v2_internal_endpoint: "http://cinder-api:{{ cinder_api_port }}/v2/%(tenant_id)s" -cinder_v3_admin_endpoint: "http://cinder-api:{{ cinder_api_port }}/v3/%(tenant_id)s" -cinder_v3_public_endpoint: "http://{{ kolla_kubernetes_external_vip }}:{{ cinder_api_port }}/v3/%(tenant_id)s" -cinder_v3_internal_endpoint: "http://cinder-api:{{ cinder_api_port }}/v3/%(tenant_id)s" - -####################### -# Configuration below, allows to configure multiple backends for Cinder. -# At this point only iSCSI/LVM2 backend is supported. For Cinder to use -# iSCSI/LVM2 backend, IP address of the server hosting VG group as well -# as VG group name must be specified in the following format. -# iscsi_target_1 and iscsi_target_2 are just names and could be changed -# as long as they are unique. For each ip/vg_name pair, cinder.conf will have -# a new section created and autogenerated backend name consisted of: -# "ip"_"vgname" will be added to "enabled_backends" parameter. Example for these -# two iscsi targets, cinder.conf will automaticaly gets these lines: -#[DEFAULT] -#enabled_backends = {already existing backends},10.57.120.14_cinder-volumes,10.57.120.13_local-volumes -# -#[10.57.120.14_cinder-volumes] -#volume_group = cinder-volumes -#volume_backend_name = 10.57.120.14_cinder-volumes -#olume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver -#scsi_helper = tgtadm -#iscsi_protocol = iscsi -# -#[10.57.120.13_local-volumes] -#volume_group = local-volumes -#volume_backend_name = 10.57.120.13_local-volumes -#volume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver -#iscsi_helper = tgtadm -#iscsi_protocol = iscsi -####################### -cinder_storage_list: -- lvm_backends: - - iscsi_target_1: - ip: "10.57.120.14" - vg_name: "cinder-volumes" - - iscsi_target_2: - ip: "10.57.120.13" - vg_name: "local-volumes" -storage_interface: "eth0" - -######################## -# Workaround variables -######################## -kolla_kubernetes_ovs_setup_bridge: "yes" -kolla_kubernetes_ovs_add_port: "yes" - -######################## -# Ceph variables -######################## -# WARNING! reminder, this ceph setup is only intended for testing. - -ceph_osd_journal_dev: -- "/dev/loop0p1" -- "/dev/loop1p1" - -ceph_osd_data_dev: -- "/dev/loop0p2" -- "/dev/loop1p2" - -######################## -# Rabbitmq variables -######################## - -rabbitmq_management_external: "no" - -######################## -# Nodeport variables -######################## -#horizon_http_nodeport: "30080" -#horizon_https_nodeport: "30433" -#cinder_api_port_nodeport: "30877" -#rabbitmq_management_nodeport: "30877" -######################## -# List os PODs with disabled logging -######################## -log_disable: -- memcached -- nova-consoleauth -- openvswitch-ovsdb-compute -- openvswitch-ovsdb-network -- openvswitch-vswitchd-compute -- openvswitch-vswitchd-network -- neutron-dhcp-agent -- kube-dns-v11 -- iscsi-iscsid -- iscsi-tgtd -- elasticsearch -- keepalived diff --git a/etc/kolla-kubernetes/service_resources.yml b/etc/kolla-kubernetes/service_resources.yml deleted file mode 100755 index cd575cc24..000000000 --- a/etc/kolla-kubernetes/service_resources.yml +++ /dev/null @@ -1,709 +0,0 @@ -# YAML mapping of service->pods->containers -# This structure leaves room for adding configuration parameters. -# ORDER MATTERS: Kolla-Kubernetes will evaluate these list-item -# resources in order. -# The (configmap) resources are not defined here, since they come -# directly from kolla. -# The (disk, pv, pvc, and svc) resources hold volume or network state, -# and are more dangerous to delete. These resources are stateful. -# Deleting a disk, pv, or pvc will lose data on a volume. Deleting -# a svc will lose a load-balancer IP. -# The (bootstrap) resources should be run only once, to setup -# persistent state. -# The (pod) resources may be destroyed willy-nilly, since they hold no -# state. -# Template.vars are additional vars in the form of a dict passed to -# the jinja templating engine. Templates may access this dict with -# "kolla_kubernetes.template.vars.". This enables arguments -# to be passed to templates so that they may be re-used or -# configured. These template.vars may not contain any other nested -# jinja references, and are passed unmodified directly to the -# template. - -kolla-kubernetes: - services: - - name: ceph - pods: - resources: - configmap: - - name: ceph-mon - - name: ceph-osd - secret: - - name: ceph - template: services/ceph/ceph-secret.yml.j2 - disk: - pv: - pvc: - svc: - bootstrap: - - name: ceph-bootstrap-initial-mon - template: services/ceph/ceph-bootstrap-initial-mon.yml.j2 - pod: - - name: ceph-bootstrap-osd0 - template: services/ceph/ceph-bootstrap-osd.yml.j2 - vars: - index: '0' - - name: ceph-bootstrap-osd1 - template: services/ceph/ceph-bootstrap-osd.yml.j2 - vars: - index: '1' - - name: ceph-mon - template: services/ceph/ceph-mon-pod.yml.j2 - - name: ceph-osd0 - template: services/ceph/ceph-osd-pod.yml.j2 - vars: - index: '0' - - name: ceph-osd1 - template: services/ceph/ceph-osd-pod.yml.j2 - vars: - index: '1' - - name: ceph-admin - template: services/ceph/ceph-admin-pod.yml.j2 - - name: ceph-rbd - template: services/ceph/ceph-rbd-pod.yml.j2 - - name: mariadb - resources: - configmap: - - name: mariadb - - name: mariadb-logging - template: services/common/logging-configmap.yml.j2 - vars: - configmap_name: mariadb-logging - log_format: 'mariadb' - secret: - - name: memcached - resources: - configmap: - - name: memcached - secret: - - name: keystone - resources: - configmap: - - name: keystone - - name: keystone-logging - template: services/common/logging-configmap.yml.j2 - vars: - configmap_name: keystone-logging - log_format: 'openstack' - secret: - - name: horizon - resources: - configmap: - - name: horizon - - name: horizon-logging - template: services/common/logging-configmap.yml.j2 - vars: - configmap_name: horizon-logging - log_format: 'horizon' - secret: - - name: rabbitmq - resources: - configmap: - - name: rabbitmq - - name: rabbitmq-logging - template: services/common/logging-configmap.yml.j2 - vars: - configmap_name: rabbitmq-logging - log_format: 'rabbitmq' - secret: - - name: glance - resources: - configmap: - - name: glance-api - - name: glance-registry - - name: glance-api-haproxy - template: services/common/api-haproxy-configmap.yml.j2 - vars: - configmap_name: glance-api-haproxy - port_name: glance_api_port - - name: glance-api-logging - template: services/common/logging-configmap.yml.j2 - vars: - configmap_name: glance-api-logging - - name: glance-registry-haproxy - template: services/common/api-haproxy-configmap.yml.j2 - vars: - configmap_name: glance-registry-haproxy - port_name: glance_registry_port - - name: glance-registry-logging - template: services/common/logging-configmap.yml.j2 - vars: - configmap_name: glance-registry-logging - log_format: 'openstack' - secret: - - name: nova - pods: - - name: nova-compute - containers: - - name: nova-compute - - name: nova-libvirt - - name: nova-compute-ironic - - name: nova-control - containers: - - name: nova-api - - name: nova-scheduler - - name: nova-conductor - - name: nova-consoleauth - - name: nova-novncproxy - resources: - configmap: - - name: nova-api - - name: nova-api-haproxy - template: services/common/api-haproxy-configmap.yml.j2 - vars: - configmap_name: nova-api-haproxy - port_name: nova_api_port - - name: nova-api-logging - template: services/common/logging-configmap.yml.j2 - vars: - configmap_name: nova-api-logging - log_format: 'openstack' - - name: nova-compute-ironic - - name: nova-compute - - name: nova-compute-logging - template: services/common/logging-configmap.yml.j2 - vars: - configmap_name: nova-compute-logging - log_format: 'openstack' - - name: nova-conductor - - name: nova-conductor-logging - template: services/common/logging-configmap.yml.j2 - vars: - configmap_name: nova-conductor-logging - log_format: 'openstack' - - name: nova-libvirt - - name: nova-libvirt-logging - template: services/common/logging-configmap.yml.j2 - vars: - configmap_name: nova-libvirt-logging - log_format: 'openstack' - - name: nova-scheduler - - name: nova-scheduler-logging - template: services/common/logging-configmap.yml.j2 - vars: - configmap_name: nova-scheduler-logging - log_format: 'openstack' - - name: nova-consoleauth - - name: nova-novncproxy - - name: nova-novncproxy-haproxy - template: services/common/api-haproxy-configmap.yml.j2 - vars: - configmap_name: nova-novncproxy-haproxy - port_name: nova_novncproxy_port - - name: nova-novncproxy-logging - template: services/common/logging-configmap.yml.j2 - vars: - configmap_name: nova-novncproxy-logging - log_format: 'openstack' - - name: placement-api - - name: placement-api-haproxy - template: services/common/api-haproxy-configmap.yml.j2 - vars: - configmap_name: placement-api-haproxy - port_name: placement_api_port - secret: - - name: nova-libvirt - template: services/nova/nova-libvirt-secret.yml.j2 - disk: - pv: - pvc: - svc: - - name: nova-novncproxy - template: services/common/generic-service.yml.j2 - vars: - port_name: nova_novncproxy_port - service: nova - type: novncproxy - name: nova-novncproxy - - name: nova-api - template: services/common/generic-service.yml.j2 - vars: - port_name: nova_api_port - service: nova - type: api - name: nova-api - - name: nova-metadata - template: services/common/generic-service.yml.j2 - vars: - port_name: nova_metadata_port - service: nova - type: api - name: nova-metadata - bootstrap: - - name: nova-create-api-db - template: services/nova/nova-control-bootstrap-job-create-nova-api-db.yml.j2 - - name: nova-create-keystone-endpoint-public - template: services/common/common-create-keystone-endpoint.yml.j2 - vars: - service_name: nova - service_type: compute - interface: public - service_auth: openstack_nova_auth - description: Openstack Compute - endpoint: nova_public_endpoint - - name: nova-create-keystone-endpoint-internal - template: services/common/common-create-keystone-endpoint.yml.j2 - vars: - service_name: nova - service_type: compute - interface: internal - service_auth: openstack_nova_auth - description: Openstack Compute - endpoint: nova_internal_endpoint - - name: nova-create-keystone-endpoint-admin - template: services/common/common-create-keystone-endpoint.yml.j2 - vars: - service_name: nova - service_type: compute - interface: admin - service_auth: openstack_nova_auth - description: Openstack Compute - endpoint: nova_admin_endpoint - - name: nova-create-keystone-user - template: services/common/common-create-keystone-user.yml.j2 - vars: - user: nova - role: admin - service_auth: openstack_nova_auth - secret: nova-keystone-password - - name: nova-create-db - template: services/nova/nova-control-bootstrap-job-create-nova-db.yml.j2 - pod: - - name: nova-compute - template: services/nova/nova-compute-pod.yml.j2 - - name: nova-libvirt - template: services/nova/nova-libvirt-pod.yml.j2 - - name: nova-api - template: services/common/common-deployment.yml.j2 - vars: - replicas: nova_api_replicas - service_name: nova - service_type: api - service_location: kolla_kubernetes_hostlabel_nova_api - config_file_name: nova.conf - config_section_name: DEFAULT - config_listen_host_parameter: osapi_compute_listen - config_listen_port_parameter: osapi_compute_listen_port - port: nova_api_port - port_name: nova-api - image: nova_api_image_full - check_path: /healthcheck - extra_volumes: - - name: lib-modules - hostPath: - path: /lib/modules - extra_volume_mounts: - - mountPath: /lib/modules - name: lib-modules - privileged: true - - name: nova-conductor - template: services/nova/nova-control-conductor-pod.yml.j2 - - name: nova-scheduler - template: services/nova/nova-control-scheduler-pod.yml.j2 - - name: nova-consoleauth - template: services/nova/nova-control-consoleauth-pod.yml.j2 - - name: nova-novncproxy - template: services/common/common-deployment.yml.j2 - vars: - replicas: nova_novncproxy_replicas - service_name: nova - service_type: novncproxy - service_location: kolla_kubernetes_hostlabel_nova_novncproxy - config_file_name: nova.conf - config_section_name: vnc - config_listen_host_parameter: novncproxy_host - config_listen_port_parameter: novncproxy_port - port: nova_novncproxy_port - port_name: nova-novncproxy - image: nova_novncproxy_image_full - check_path: /vnc_auto.html - - name: openvswitch - pods: - - name: openvswitch-ovsdb - containers: - - name: openvswitch-ovsdb - - name: openvswitch-vswitchd - containers: - - name: openvswitch-vswitchd - resources: - configmap: - - name: openvswitch-db-server - - name: openvswitch-vswitchd - secret: - disk: - pv: - pvc: - svc: - bootstrap: - - name: openvswitch-set-external-ip - template: services/openvswitch/openvswitch-set-external-ip-job.yml.j2 - pod: - - name: openvswitch-ovsdb-compute - template: services/openvswitch/openvswitch-ovsdb-daemonset.yml.j2 - vars: - type: compute - - name: openvswitch-ovsdb-network - template: services/openvswitch/openvswitch-ovsdb-daemonset.yml.j2 - vars: - type: network - - name: openvswitch-vswitchd-compute - template: services/openvswitch/openvswitch-vswitchd-daemonset.yml.j2 - vars: - type: compute - - name: openvswitch-vswitchd-network - template: services/openvswitch/openvswitch-vswitchd-daemonset.yml.j2 - vars: - type: network - - name: neutron - resources: - configmap: - - name: neutron-server - - name: neutron-server-haproxy - template: services/common/api-haproxy-configmap.yml.j2 - vars: - configmap_name: neutron-server-haproxy - port_name: neutron_server_port - - name: neutron-server-logging - template: services/common/logging-configmap.yml.j2 - vars: - configmap_name: neutron-server-logging - log_format: 'openstack' - - name: neutron-dhcp-agent - - name: neutron-l3-agent - - name: neutron-l3-agent-logging - template: services/common/logging-configmap.yml.j2 - vars: - configmap_name: neutron-l3-agent-logging - log_format: 'openstack' - - name: neutron-metadata-agent - - name: neutron-metadata-agent-logging - template: services/common/logging-configmap.yml.j2 - vars: - configmap_name: neutron-metadata-agent-logging - log_format: 'openstack' - - name: neutron-openvswitch-agent - - name: neutron-openvswitch-agent-logging - template: services/common/logging-configmap.yml.j2 - vars: - configmap_name: neutron-openvswitch-agent-logging - log_format: 'openstack' - secret: - bootstrap: - - name: neutron-create-db - template: services/neutron/neutron-bootstrap-job-create-db.yml.j2 - - name: swift - pods: - - name: swift-account - containers: - - name: swift-rsyncd - - name: swift-account-server - - name: swift-account-auditor - - name: swift-account-replicator - - name: swift-account-reaper' - - name: swift-container - containers: - - name: swift-rsyncd - - name: swift-container-server - - name: swift-container-auditor - - name: swift-container-replicator - - name: swift-container-updater - - name: swift-object - containers: - - name: swift-rsyncd - - name: swift-object-server - - name: swift-object-auditor - - name: swift-object-replicator - - name: swift-object-updater - - name: swift-object-expirer - - name: swift-proxy - containers: - - name: swift-proxy-server - resources: - configmap: - - name: swift-rsyncd - - name: swift-account-server - - name: swift-account-auditor - - name: swift-account-replicator - - name: swift-account-reaper - - name: swift-rsyncd - - name: swift-container-server - - name: swift-container-auditor - - name: swift-container-replicator - - name: swift-container-updater - - name: swift-rsyncd - - name: swift-object-server - - name: swift-object-auditor - - name: swift-object-replicator - - name: swift-object-updater - - name: swift-object-expirer - - name: swift-proxy-server - secret: - disk: - pv: - pvc: - svc: - - name: swift-account - template: services/swift/swift-account-service.yml.j2 - - name: swift-container - template: services/swift/swift-container-service.yml.j2 - - name: swift-object - template: services/swift/swift-object-service.yml.j2 - - name: swift-proxy - template: services/swift/swift-proxy-service.yml.j2 - - name: swift-rsyncd - template: services/swift/swift-rsync-service.yml.j2 - bootstrap: - pod: - - name: swift-account - template: services/swift/swift-account-pod.yml.j2 - - name: swift-container - template: services/swift/swift-container-pod.yml.j2 - - name: swift-object - template: services/swift/swift-object-pod.yml.j2 - - name: swift-proxy - template: services/swift/swift-proxy-pod.yml.j2 - - name: iscsi - resources: - configmap: - - name: iscsid - - name: tgtd - secret: - - name: cinder - resources: - configmap: - - name: cinder-api - - name: cinder-api-haproxy - template: services/common/api-haproxy-configmap.yml.j2 - vars: - configmap_name: cinder-api-haproxy - port_name: cinder_api_port - - name: cinder-api-logging - template: services/common/logging-configmap.yml.j2 - vars: - configmap_name: cinder-api-logging - log_format: 'openstack' - - name: cinder-backup - - name: cinder-backup-logging - template: services/common/logging-configmap.yml.j2 - vars: - configmap_name: cinder-backup-logging - log_format: 'openstack' - - name: cinder-scheduler - - name: cinder-scheduler-logging - template: services/common/logging-configmap.yml.j2 - vars: - configmap_name: cinder-scheduler-logging - log_format: 'openstack' - - name: cinder-volume - - name: cinder-volume-logging - template: services/common/logging-configmap.yml.j2 - vars: - configmap_name: cinder-volume-logging - log_format: 'openstack' - secret: - - name: elasticsearch - pods: - - name: elasticsearch - containers: - - name: elasticsearch - resources: - configmap: - - name: elasticsearch - secret: - disk: - - name: elasticsearch - template: services/common/common-disk.sh.j2 - vars: - size_in_gb: 10 - pv: - - name: elasticsearch - template: services/common/common-pv.yml.j2 - vars: - name: elasticsearch - size_in_gb: 10 - pvc: - - name: elasticsearch - template: services/common/common-pvc.yml.j2 - vars: - name: elasticsearch - size_in_gb: 10 - svc: - - name: elasticsearch - template: services/common/generic-service.yml.j2 - vars: - port_name: elasticsearch_port - type: elasticsearch - service: elasticsearch - name: elasticsearch - bootstrap: - pod: - - name: elasticsearch - template: services/elasticsearch/elasticsearch-pod.yml.j2 - - name: kibana - pods: - - name: kibana - containers: - - name: kibana - resources: - configmap: - - name: kibana - - name: kibana-haproxy - template: services/common/api-haproxy-configmap.yml.j2 - vars: - configmap_name: kibana-haproxy - port_name: kibana_server_port - - name: kibana-logging - template: services/common/logging-configmap.yml.j2 - vars: - configmap_name: kibana-logging - secret: - disk: - pv: - pvc: - svc: - - name: kibana - template: services/common/generic-service.yml.j2 - vars: - port_name: kibana_server_port - service: kibana - type: kibana - name: kibana - bootstrap: - pod: - - name: kibana - template: services/common/common-deployment.yml.j2 - vars: - replicas: kibana_replicas - service_name: kibana - service_type: kibana - service_location: kolla_kubernetes_hostlabel_kibana - config_file_name: glance-registry.conf - config_section_name: DEFAULT - config_listen_host_parameter: bind_host - config_listen_port_parameter: bind_port - port: kibana_server_port - port_name: kibana - image: kibana_image_full - check_path: / - - name: keepalived - pods: - - name: keepalived - containers: - - name: keepalived - - name: keepalived - containers: - - name: keepalived - resources: - configmap: - - name: keepalived - template: services/keepalived/keepalived-configmap.yml.j2 - secret: - disk: - pv: - pvc: - svc: - bootstrap: - pod: - - name: keepalived - template: services/keepalived/keepalived-daemonset.yml.j2 - - name: ironic - pods: - - name: ironic-conductor - containers: - - name: ironic-conductor - - name: ironic-pxe - containers: - - name: ironic-pxe - - name: ironic-api - containers: - - name: ironic-api - - name: ironic-dnsmasq - containers: - - name: ironic-dnsmasq - - name: ironic-inspector - containers: - - name: ironic-inspector - resources: - configmap: - - name: ironic-api - - name: ironic-api-haproxy - template: services/common/api-haproxy-configmap.yml.j2 - vars: - configmap_name: ironic-api-haproxy - port_name: ironic_api_port - - name: ironic-api-logging - template: services/common/logging-configmap.yml.j2 - vars: - configmap_name: ironic-api-logging - log_format: 'openstack' - - name: ironic-conductor - - name: ironic-conductor-logging - template: services/common/logging-configmap.yml.j2 - vars: - configmap_name: ironic-conductor-logging - log_format: 'openstack' - - name: ironic-inspector-tftp - - name: ironic-conductor-tftp - - name: ironic-dnsmasq - - name: ironic-dnsmasq-logging - template: services/common/logging-configmap.yml.j2 - vars: - configmap_name: ironic-conductor-logging - log_format: 'openstack' - - name: ironic-inspector - - name: ironic-inspector-logging - template: services/common/logging-configmap.yml.j2 - vars: - configmap_name: ironic-inspector-logging - log_format: 'openstack' - - name: ironic-inspector-haproxy - template: services/common/api-haproxy-configmap.yml.j2 - vars: - configmap_name: ironic-inspector-haproxy - port_name: ironic_inspector_port - - name: heat - pods: - - name: heat-all - containers: - - name: heat-all - - name: heat-api - containers: - - name: heat-api - - name: heat-api-cfn - containers: - - name: heat-api-cfn - - name: heat-engine - containers: - - name: heat-engine - resources: - configmap: - - name: heat - - name: heat-all - - name: heat-api - - name: heat-api-haproxy - template: services/common/api-haproxy-configmap.yml.j2 - vars: - configmap_name: heat-api-haproxy - port_name: heat_api_port - - name: heat-api-logging - template: services/common/logging-configmap.yml.j2 - vars: - configmap_name: heat-api-logging - log_format: 'openstack' - - name: heat-api-cfn - - name: heat-api-cfn-haproxy - template: services/common/api-haproxy-configmap.yml.j2 - vars: - configmap_name: heat-api-cfn-haproxy - port_name: heat_api_cfn_port - - name: heat-api-cfn-logging - template: services/common/logging-configmap.yml.j2 - vars: - configmap_name: heat-api-cfn-logging - log_format: 'openstack' - - name: heat-engine - - name: heat-engine-logging - template: services/common/logging-configmap.yml.j2 - vars: - configmap_name: heat-engine-logging - log_format: 'openstack' diff --git a/etc/kolla/globals.yml b/etc/kolla/globals.yml deleted file mode 100644 index adb315c55..000000000 --- a/etc/kolla/globals.yml +++ /dev/null @@ -1,348 +0,0 @@ ---- -# You can use this file to override _any_ variable throughout Kolla. -# Additional options can be found in the -# 'kolla-ansible/ansible/group_vars/all.yml' file. Default value of all the -# commented parameters are shown here, To override the default value uncomment -# the parameter and change its value. - -################### -# Kolla options -################### -# Valid options are [ COPY_ONCE, COPY_ALWAYS ] -#config_strategy: "COPY_ALWAYS" - -# Valid options are [ centos, oraclelinux, ubuntu ] -#kolla_base_distro: "centos" - -# Valid options are [ binary, source ] -#kolla_install_type: "binary" - -# Valid option is Docker repository tag -#openstack_release: "" - -# Location of configuration overrides -#node_custom_config: "/etc/kolla/config" - -# This should be a VIP, an unused IP on your network that will float between -# the hosts running keepalived for high-availability. When running an All-In-One -# without haproxy and keepalived, this should be the first IP on your -# 'network_interface' as set in the Networking section below. -kolla_internal_vip_address: "10.10.10.254" - -# This is the DNS name that maps to the kolla_internal_vip_address VIP. By -# default it is the same as kolla_internal_vip_address. -#kolla_internal_fqdn: "{{ kolla_internal_vip_address }}" - -# This should be a VIP, an unused IP on your network that will float between -# the hosts running keepalived for high-availability. It defaults to the -# kolla_internal_vip_address, allowing internal and external communication to -# share the same address. Specify a kolla_external_vip_address to separate -# internal and external requests between two VIPs. -#kolla_external_vip_address: "{{ kolla_internal_vip_address }}" - -# The Public address used to communicate with OpenStack as set in the public_url -# for the endpoints that will be created. This DNS name should map to -# kolla_external_vip_address. -#kolla_external_fqdn: "{{ kolla_external_vip_address }}" - -#################### -# Docker options -#################### -# Below is an example of a private repository with authentication. Note the -# Docker registry password can also be set in the passwords.yml file. - -#docker_registry: "172.16.0.10:4000" -#docker_namespace: "companyname" -#docker_registry_username: "sam" -#docker_registry_password: "correcthorsebatterystaple" - - -############################### -# Neutron - Networking Options -############################### -# This interface is what all your api services will be bound to by default. -# Additionally, all vxlan/tunnel and storage network traffic will go over this -# interface by default. This interface must contain an IPv4 address. -# It is possible for hosts to have non-matching names of interfaces - these can -# be set in an inventory file per host or per group or stored separately, see -# http://docs.ansible.com/ansible/intro_inventory.html -# Yet another way to workaround the naming problem is to create a bond for the -# interface on all hosts and give the bond name here. Similar strategy can be -# followed for other types of interfaces. -#network_interface: "eth0" - -# These can be adjusted for even more customization. The default is the same as -# the 'network_interface'. These interfaces must contain an IPv4 address. -#kolla_external_vip_interface: "{{ network_interface }}" -#api_interface: "{{ network_interface }}" -#storage_interface: "{{ network_interface }}" -#cluster_interface: "{{ network_interface }}" -#tunnel_interface: "{{ network_interface }}" -#dns_interface: "{{ network_interface }}" - -# This is the raw interface given to neutron as its external network port. Even -# though an IP address can exist on this interface, it will be unusable in most -# configurations. It is recommended this interface not be configured with any IP -# addresses for that reason. -#neutron_external_interface: "eth1" - -# Valid options are [ openvswitch, linuxbridge ] -#neutron_plugin_agent: "openvswitch" - - -#################### -# keepalived options -#################### -# Arbitrary unique number from 0..255 -#keepalived_virtual_router_id: "51" - - -#################### -# TLS options -#################### -# To provide encryption and authentication on the kolla_external_vip_interface, -# TLS can be enabled. When TLS is enabled, certificates must be provided to -# allow clients to perform authentication. -#kolla_enable_tls_external: "no" -#kolla_external_fqdn_cert: "{{ node_config_directory }}/certificates/haproxy.pem" - - -#################### -# OpenStack options -#################### -# Use these options to set the various log levels across all OpenStack projects -# Valid options are [ True, False ] -#openstack_logging_debug: "False" - -# Valid options are [ novnc, spice ] -#nova_console: "novnc" - -# OpenStack services can be enabled or disabled with these options -#enable_aodh: "no" -#enable_barbican: "no" -#enable_ceilometer: "no" -#enable_central_logging: "no" -#enable_ceph: "no" -#enable_ceph_rgw: "no" -#enable_chrony: "no" -#enable_cinder: "no" -#enable_cinder_backend_hnas_iscsi: "no" -#enable_cinder_backend_hnas_nfs: "no" -#enable_cinder_backend_iscsi: "no" -#enable_cinder_backend_lvm: "no" -#enable_cinder_backend_nfs: "no" -#enable_cloudkitty: "no" -#enable_collectd: "no" -#enable_congress: "no" -#enable_designate: "no" -#enable_destroy_images: "no" -#enable_etcd: "no" -#enable_freezer: "no" -#enable_gnocchi: "no" -#enable_grafana: "no" -#enable_heat: "yes" -#enable_horizon: "yes" -#enable_horizon_cloudkitty: "{{ enable_cloudkitty | bool }}" -#enable_horizon_freezer: "{{ enable_freezer | bool }}" -#enable_horizon_ironic: "{{ enable_ironic | bool }}" -#enable_horizon_karbor: "{{ enable_karbor | bool }}" -#enable_horizon_magnum: "{{ enable_magnum | bool }}" -#enable_horizon_manila: "{{ enable_manila | bool }}" -#enable_horizon_mistral: "{{ enable_mistral | bool }}" -#enable_horizon_murano: "{{ enable_murano | bool }}" -#enable_horizon_neutron_lbaas: "{{ enable_neutron_lbaas | bool }}" -#enable_horizon_sahara: "{{ enable_sahara | bool }}" -#enable_horizon_searchlight: "{{ enable_searchlight | bool }}" -#enable_horizon_senlin: "{{ enable_senlin | bool }}" -#enable_horizon_solum: "{{ enable_solum | bool }}" -#enable_horizon_tacker: "{{ enable_tacker | bool }}" -#enable_horizon_trove: "{{ enable_trove | bool }}" -#enable_horizon_watcher: "{{ enable_watcher | bool }}" -#enable_influxdb: "no" -#enable_ironic: "no" -#enable_karbor: "no" -#enable_kuryr: "no" -#enable_magnum: "no" -#enable_manila: "no" -#enable_manila_backend_generic: "no" -#enable_manila_backend_hnas: "no" -#enable_mistral: "no" -#enable_mongodb: "no" -#enable_murano: "no" -#enable_multipathd: "no" -#enable_neutron_bgp_dragent: "no" -#enable_neutron_dvr: "no" -#enable_neutron_lbaas: "no" -#enable_neutron_fwaas: "no" -#enable_neutron_qos: "no" -#enable_neutron_agent_ha: "no" -#enable_neutron_vpnaas: "no" -#enable_nova_serialconsole_proxy: "no" -#enable_octavia: "no" -#enable_openvswitch: "{{ neutron_plugin_agent != 'linuxbridge' | bool }}" -#enable_panko: "no" -#enable_rally: "no" -#enable_sahara: "no" -#enable_searchlight: "no" -#enable_senlin: "no" -#enable_solum: "no" -#enable_swift: "no" -#enable_telegraf: "no" -#enable_tacker: "no" -#enable_tempest: "no" -#enable_trove: "no" -#enable_vmtp: "no" -#enable_watcher: "no" -#enable_zun: "no" - -################### -# Ceph options -################### -# Ceph can be setup with a caching to improve performance. To use the cache you -# must provide separate disks than those for the OSDs -#ceph_enable_cache: "no" - -# Ceph is not able to determine the size of a cache pool automatically, -# so the configuration on the absolute size is required here, otherwise the flush/evict will not work. -#ceph_target_max_bytes: "" -#ceph_target_max_objects: "" - -# Valid options are [ forward, none, writeback ] -#ceph_cache_mode: "writeback" - -# A requirement for using the erasure-coded pools is you must setup a cache tier -# Valid options are [ erasure, replicated ] -#ceph_pool_type: "replicated" - -# Integrate ceph rados object gateway with openstack keystone -#enable_ceph_rgw_keystone: "no" - - -############################## -# Keystone - Identity Options -############################## - -# Valid options are [ uuid, fernet ] -#keystone_token_provider: 'uuid' - -# Interval to rotate fernet keys by (in seconds). Must be an interval of -# 60(1 min), 120(2 min), 180(3 min), 240(4 min), 300(5 min), 360(6 min), -# 600(10 min), 720(12 min), 900(15 min), 1200(20 min), 1800(30 min), -# 3600(1 hour), 7200(2 hour), 10800(3 hour), 14400(4 hour), 21600(6 hour), -# 28800(8 hour), 43200(12 hour), 86400(1 day), 604800(1 week). -#fernet_token_expiry: 86400 - - -######################### -# Glance - Image Options -######################### -# Configure image backend. -#glance_backend_file: "yes" -#glance_backend_ceph: "no" - -####################### -# Ceilometer options -####################### -# Valid options are [ mongodb, mysql, gnocchi ] -#ceilometer_database_type: "mongodb" - -# Valid options are [ mongodb, gnocchi, panko ] -#ceilometer_event_type: "mongodb" - - -####################### -# Barbican options -####################### -# Valid options are [ simple_crypto, p11_crypto ] -#barbican_crypto_plugin: "simple_crypto" -#barbican_library_path: "/usr/lib/libCryptoki2_64.so" - -####################### -## Panko options -####################### -# Valid options are [ mongodb, mysql ] -#panko_database_type: "mysql" - -####################### -# Gnocchi options -####################### -# Valid options are [ file, ceph ] -#gnocchi_backend_storage: "{{ 'ceph' if enable_ceph|bool else 'file' }}" - - -################################# -# Cinder - Block Storage Options -################################# -# Enable / disable Cinder backends -#cinder_backend_ceph: "{{ enable_ceph }}" -#cinder_volume_group: "cinder-volumes" -#cinder_backup_driver: "nfs" -#cinder_backup_share: "" -#cinder_backup_mount_options_nfs: "" - - -####################### -# Designate options -####################### -# Valid options are [ bind9 ] -designate_backend: "bind9" -designate_ns_record: "sample.openstack.org" - -######################### -# Nova - Compute Options -######################### -#nova_backend_ceph: "{{ enable_ceph }}" - -# Valid options are [ qemu, kvm ] -#nova_compute_virt_type: "kvm" - -############################## -# Horizon - Dashboard Options -############################## -#horizon_backend_database: "{{ enable_murano | bool }}" - - -####################################### -# Manila - Shared File Systems Options -####################################### -# HNAS backend configuration -#hnas_ip: -#hnas_user: -#hnas_password: -#hnas_evs_id: -#hnas_evs_ip: -#hnas_file_system_name: - -################################## -# Swift - Object Storage Options -################################## -# Swift expects block devices to be available for storage. Two types of storage -# are supported: 1 - storage device with a special partition name and filesystem -# label, 2 - unpartitioned disk with a filesystem. The label of this filesystem -# is used to detect the disk which Swift will be using. - -# Swift support two mathcing modes, valid options are [ prefix, strict ] -#swift_devices_match_mode: "strict" - -# This parameter defines matching pattern: if "strict" mode was selected, -# for swift_devices_match_mode then swift_device_name should specify the name of -# the special swift partition for example: "KOLLA_SWIFT_DATA", if "prefix" mode was -# selected then swift_devices_name should specify a pattern which would match to -# filesystems' labels prepared for swift. -#swift_devices_name: "KOLLA_SWIFT_DATA" - - -################################################ -# Tempest - The OpenStack Integration Test Suite -################################################ -# following value must be set when enable tempest -tempest_image_id: -tempest_flavor_ref_id: -tempest_public_network_id: -tempest_floating_network_name: - -# tempest_image_alt_id: "{{ tempest_image_id }}" -# tempest_flavor_ref_alt_id: "{{ tempest_flavor_ref_id }}" -api_interface_address: 0.0.0.0 -tunnel_interface_address: 0.0.0.0 -orchestration_engine: KUBERNETES diff --git a/etc/kolla/passwords.yml b/etc/kolla/passwords.yml deleted file mode 100644 index 20850cb27..000000000 --- a/etc/kolla/passwords.yml +++ /dev/null @@ -1,195 +0,0 @@ ---- -################### -# Ceph options -#################### -# These options must be UUID4 values in string format -# XXXXXXXX-XXXX-4XXX-XXXX-XXXXXXXXXXXX -ceph_cluster_fsid: -# for backward compatible consideration, rbd_secret_uuid is only used for nova, -# cinder_rbd_secret_uuid is used for cinder -rbd_secret_uuid: -cinder_rbd_secret_uuid: - -################### -# Database options -#################### -database_password: - -#################### -# Docker options -#################### -# This should only be set if you require a password for your Docker registry -docker_registry_password: - -#################### -# OpenStack options -#################### -aodh_database_password: -aodh_keystone_password: - -barbican_database_password: -barbican_keystone_password: -barbican_p11_password: -barbican_crypto_key: -barbican_crypto_password: - -keystone_admin_password: -keystone_database_password: - -grafana_database_password: -grafana_admin_password: - -glance_database_password: -glance_keystone_password: - -gnocchi_database_password: -gnocchi_keystone_password: - -karbor_database_password: -karbor_keystone_password: -karbor_openstack_infra_id: - -kuryr_keystone_password: - -nova_database_password: -nova_api_database_password: -nova_keystone_password: - -placement_keystone_password: - -neutron_database_password: -neutron_keystone_password: -metadata_secret: - -cinder_database_password: -cinder_keystone_password: - -cloudkitty_database_password: -cloudkitty_keystone_password: - -panko_database_password: -panko_keystone_password: - -freezer_database_password: -freezer_keystone_password: - -sahara_database_password: -sahara_keystone_password: - -designate_database_password: -designate_pool_manager_database_password: -designate_keystone_password: -# This option must be UUID4 value in string format -designate_pool_id: -# This option must be HMAC-MD5 value in string format -designate_rndc_key: - -swift_keystone_password: -swift_hash_path_suffix: -swift_hash_path_prefix: - -heat_database_password: -heat_keystone_password: -heat_domain_admin_password: - -murano_database_password: -murano_keystone_password: - -ironic_database_password: -ironic_keystone_password: - -ironic_inspector_database_password: -ironic_inspector_keystone_password: - -magnum_database_password: -magnum_keystone_password: - -mistral_database_password: -mistral_keystone_password: - -trove_database_password: -trove_keystone_password: - -ceilometer_database_password: -ceilometer_keystone_password: - -watcher_database_password: -watcher_keystone_password: - -congress_database_password: -congress_keystone_password: - -rally_database_password: - -senlin_database_password: -senlin_keystone_password: - -solum_database_password: -solum_keystone_password: - -horizon_secret_key: -horizon_database_password: - -telemetry_secret_key: - -manila_database_password: -manila_keystone_password: - -octavia_database_password: -octavia_keystone_password: -octavia_ca_password: - -searchlight_keystone_password: - -tacker_database_password: -tacker_keystone_password: - -zun_database_password: -zun_keystone_password: - -memcache_secret_key: - -nova_ssh_key: - private_key: - public_key: - -kolla_ssh_key: - private_key: - public_key: - -keystone_ssh_key: - private_key: - public_key: - -bifrost_ssh_key: - private_key: - public_key: - -#################### -# Gnocchi options -#################### -gnocchi_project_id: -gnocchi_resource_id: -gnocchi_user_id: - -#################### -# RabbitMQ options -#################### -rabbitmq_password: -rabbitmq_cluster_cookie: - -#################### -# HAProxy options -#################### -haproxy_password: -keepalived_password: - -#################### -# Kibana options -#################### -kibana_password: - -#################### -# etcd options -#################### -etcd_cluster_token: diff --git a/helm/all_values.yaml b/helm/all_values.yaml deleted file mode 100755 index 0d6133b84..000000000 --- a/helm/all_values.yaml +++ /dev/null @@ -1,1345 +0,0 @@ -common: - global: - kolla: - all: - replicas: 1 - docker_registry: docker.io - docker_namespace: kolla - base_distro: centos - install_type: binary - container_config_directory: /var/lib/kolla/config_files - image_tag: 2.0.2 - fluentd_image_tag: 3.0.2 - kolla_toolbox_image_tag: 3.0.2 - kubernetes_entrypoint_image_tag: 4.0.0 - openvswitch_tcp: false - selector_key: kolla_controller - selector_value: "true" - image_pull_policy: IfNotPresent - container_config_directory: /var/lib/kolla/config_files - kubernetes_entrypoint: false - kolla_kubernetes_external_vip: 172.18.0.1 - kolla_kubernetes_external_subnet: 24 - kube_logger: true - prometheus_exporter: true - prometheus_port: 9101 - pv_enabled: true - -common-mariadb: - global: - kolla: - all: - database_host: mariadb - database_port: 3306 - -common-create-keystone-admin: - global: - kolla: - all: - keystone_admin_protocol: http - keystone_admin_svcname: keystone-admin - keystone_admin_port: 35357 - keystone_admin_project: admin - keystone_admin_username: admin - keystone_admin_domain_name: Default - region: RegionOne - -pod-http-termination: - global: - kolla: - all: - haproxy_python_termination: true - haproxy_image_tag: 3.0.2 - haproxy_prometheus_image_full: "prom/haproxy-exporter:v0.8.0" - grace_period_seconds: 172800 - -stateful-service: - global: - kolla: - all: - storage_provider: host - storage_provider_fstype: xfs - size_gb: 10 - ceph: - monitors: [] - pool: kollavolumes - secret_name: ceph-kolla - user: kolla - -test-ceph-initial-mon-job: - global: - kolla: - all: - storage_interface: eth0 - -test-ceph-mon-daemonset: - global: - kolla: - all: - storage_interface: eth0 - -test-ceph-initial-osd-job: - global: - kolla: - all: - storage_interface: eth0 - -keystone-admin-svc: - global: - kolla: - keystone: - all: - admin_port: 35357 - admin_node_port: 35357 - admin_port_external: false - admin_node_port_enabled: false - -keystone-internal-svc: - global: - kolla: - keystone: - all: - port: 5000 - node_port: 5000 - port_external: false - node_port_enabled: false - -keystone-public-svc: - global: - kolla: - keystone: - all: - port: 5000 - node_port: 5000 - port_external: true - node_port_enabled: false - -keystone-create-db-job: - global: - kolla: - keystone: - all: - database_name: "keystone" - database_user: "keystone" - -keystone-delete-db-job: - global: - kolla: - keystone: - all: - database_name: "keystone" - database_user: "keystone" - -keystone-create-endpoints-job: - global: - kolla: - all: - region: RegionOne - keystone: - all: - port: 5000 - admin_port: 35357 - admin_port_external: false - -keystone-api-deployment: - global: - kolla: - keystone: - all: - port: 5000 - admin_port: 35357 - -neutron-server-svc: - global: - kolla: - neutron: - server: - all: - port: 9696 - node_port: 9696 - port_external: true - node_port_enabled: false - -neutron-create-db-job: - global: - kolla: - neutron: - all: - database_name: "neutron" - database_user: "neutron" - -neutron-delete-db-job: - global: - kolla: - neutron: - all: - database_name: "neutron" - database_user: "neutron" - -neutron-create-keystone-user-job: - global: - kolla: - all: - keystone_user_project: service - keystone_user_project_domain: Default - keystone_user_domain: Default - keystone_user_role: admin - neutron: - all: - keystone_user_name: neutron - -neutron-delete-keystone-user-job: - global: - kolla: - all: - keystone_user_project: service - keystone_user_project_domain: Default - keystone_user_domain: Default - keystone_user_role: admin - neutron: - all: - keystone_user_name: neutron - -neutron-create-keystone-endpoint-public-job: - global: - kolla: - all: - external_vip: - neutron: - server: - all: - port: 9696 - -neutron-create-keystone-endpoint-internal-job: - global: - kolla: - neutron: - server: - all: - port: 9696 - create_keystone_endpoint: - all: - service: neutron-server - -neutron-create-keystone-endpoint-admin-job: - global: - kolla: - neutron: - server: - all: - port: 9696 - create_keystone_endpoint: - all: - service: neutron-server - -neutron-server-deployment: - global: - kolla: - neutron: - server: - all: - port: 9696 - -neutron-openvswitch-agent-daemonset: - type: network - global: - kolla: - all: - tunnel_interface: eth0 - neutron: - openvswitch_agent: - daemonset: - logger_configmap_name: neutron-openvswitch-agent-logger - -neutron-l3-agent-daemonset: - type: network - global: - kolla: - all: - tunnel_interface: eth0 - neutron: - all: - dvr: false - l3_agent: - daemonset: - logger_configmap_name: neutron-l3-agent-logger - -neutron-dhcp-agent-daemonset: - global: - kolla: - all: - tunnel_interface: eth0 - neutron: - dhcp_agent: - daemonset: - logger_configmap_name: neutron-dhcp-agent-logger - - -neutron-metadata-agent-daemonset: - type: network - global: - kolla: - neutron: - dhcp_agent: - daemonset: - logger_configmap_name: neutron-metadata-agent-logger - -openvswitch-ovsdb-daemonset: - type: network - -openvswitch-vswitchd-daemonset: - type: network - global: - kolla: - openvswitch: - all: - setup_bridge: true - add_port: false - ext_bridge_name: br-ex - ext_bridge_up: false - ext_interface_name: eth1 - -keepalived-daemonset: - global: - kolla: - keepalived: - all: - api_interface: br-ex - -rabbitmq-init-element-job: - global: - element_name: rabbitmq - kolla: - rabbitmq: - all: - cookie: - -rabbitmq-statefulset: - global: - element_name: rabbitmq - kolla: - rabbitmq: - all: - port: 5672 - mgmt_port: 15672 - statefulset: - dependencies: - jobs: - - rabbitmq-init-element - -rabbitmq-svc: - global: - element_name: rabbitmq - kolla: - rabbitmq: - all: - port: 5672 - node_port: 9696 - port_external: false - node_port_enabled: false - mgmt_port: 15672 - mgmt_node_port: 15672 - mgmt_port_external: false - mgmt_node_port_enabled: false - -memcached-svc: - global: - element_name: memcached - kolla: - memcached: - all: - enabled: true - port: 11211 - node_port: 11211 - port_external: false - node_port_enabled: false - -memcached-deployment: - global: - element_name: memcached - kolla: - memcached: - all: - enabled: true - port: 11211 - memcached_prometheus_image_full: "prom/memcached-exporter:v0.3.0" - -mariadb-init-element-job: - global: - element_name: mariadb - -mariadb-statefulset: - global: - element_name: mariadb - kolla: - mariadb: - all: - database_user: root - port: 3306 - -mariadb-svc: - global: - element_name: mariadb - kolla: - mariadb: - all: - port: 3306 - node_port: 3306 - port_external: false - node_port_enabled: false - -nova-api-svc: - global: - kolla: - nova: - api: - all: - port: 8774 - node_port: 8774 - port_external: true - node_port_enabled: false - -nova-placement-svc: - global: - kolla: - nova: - placement_api: - all: - port: 8780 - node_port: 8780 - port_external: true - node_port_enabled: false -nova-metadata-svc: - global: - kolla: - nova: - metadata: - all: - port: 8775 - node_port: 8775 - port_external: false - node_port_enabled: false - -nova-novncproxy-svc: - global: - kolla: - nova: - novncproxy: - all: - port: 6080 - node_port: 6080 - port_external: false - node_port_enabled: false - -nova-create-db-job: - global: - kolla: - nova: - all: - database_name: "nova" - database_user: "nova" - -nova-delete-db-job: - global: - kolla: - nova: - all: - database_name: "nova" - database_user: "nova" - -nova-api-create-db-job: - global: - kolla: - nova: - api: - all: - database_name: "nova_api" - database_user: "nova_api" - -nova-api-delete-db-job: - global: - kolla: - nova: - api: - all: - database_name: "nova_api" - database_user: "nova_api" - -nova-api-create-simple-cell-job: - global: - kolla: - nova: - api: - create_cell: - all: - cell_wait_compute: true - cell_wait_compute_sleep: 5 - -nova-cell0-create-db-job: - global: - kolla: - nova: - cell0: - all: - database_name: "nova_cell0" - database_user: "nova" - -nova-cell0-delete-db-job: - global: - kolla: - nova: - cell0: - all: - database_name: "nova_cell0" - database_user: "nova" - -nova-api-deployment: - global: - kolla: - nova: - api: - all: - port: 8774 - metadata: - all: - port: 8775 - -nova-placement-deployment: - global: - kolla: - nova: - placement_api: - all: - port: 8780 - -nova-novncproxy-deployment: - global: - kolla: - nova: - novncproxy: - all: - port: 6080 - -nova-compute-daemonset: - global: - kolla: - all: - tunnel_interface: eth0 - ceph_backend: false - nova: - novncproxy: - all: - host: nova-novncproxy - port: 6080 - compute: - all: - selector_key: kolla_compute - libvirt_tcp: false - -nova-libvirt-daemonset: - global: - kolla: - all: - ceph_backend: false - nova: - compute: - all: - selector_key: kolla_compute - libvirt_tcp: false - -nova-create-keystone-user-job: - global: - kolla: - all: - keystone_user_project: service - keystone_user_project_domain: Default - keystone_user_domain: Default - keystone_user_role: admin - nova: - all: - keystone_user_name: nova - -nova-delete-keystone-user-job: - global: - kolla: - all: - keystone_user_project: service - keystone_user_project_domain: Default - keystone_user_domain: Default - keystone_user_role: admin - nova: - all: - keystone_user_name: nova - -nova-placement-create-keystone-user-job: - global: - kolla: - all: - keystone_user_project: service - keystone_user_project_domain: Default - keystone_user_domain: Default - keystone_user_role: admin - nova: - placement_api: - all: - keystone_user_name: placement - -nova-placement-delete-keystone-user-job: - global: - kolla: - all: - keystone_user_project: service - keystone_user_project_domain: Default - keystone_user_domain: Default - keystone_user_role: admin - nova: - placement_api: - all: - keystone_user_name: placement - -nova-create-keystone-endpoint-admin-job: - global: - kolla: - nova: - api: - all: - port: 8774 - create_keystone_endpoint: - all: - service: nova-api - -nova-create-keystone-endpoint-internal-job: - global: - kolla: - nova: - api: - all: - port: 8774 - create_keystone_endpoint: - all: - service: nova-api - -nova-create-keystone-endpoint-public-job: - global: - kolla: - all: - external_vip: - nova: - api: - all: - port: 8774 - -nova-placement-create-keystone-endpoint-admin-job: - global: - kolla: - nova: - placement_api: - all: - port: 8780 - create_keystone_endpoint: - all: - service: placement-api - -nova-placement-create-keystone-endpoint-internal-job: - global: - kolla: - nova: - placement_api: - all: - port: 8780 - create_keystone_endpoint: - all: - service: placement-api - -nova-placement-create-keystone-endpoint-public-job: - global: - kolla: - all: - external_vip: - nova: - placement_api: - all: - port: 8780 - -heat-api-svc: - global: - kolla: - heat: - api: - all: - port: 8004 - node_port: 8004 - port_external: false - node_port_enabled: false - -heat-cfn-api-svc: - global: - kolla: - heat: - api_cfn: - all: - port: 8000 - node_port: 8004 - port_external: false - node_port_enabled: false - -heat-api-deployment: - global: - kolla: - heat: - api: - all: - port: 8004 - -heat-api-cfn-deployment: - global: - kolla: - heat: - api_cfn: - all: - port: 8000 - -heat-delete-db-job: - global: - kolla: - heat: - all: - database_name: "heat" - database_user: "heat" - -heat-create-keystone-user-job: - global: - kolla: - all: - keystone_user_project: service - keystone_user_project_domain: Default - keystone_user_domain: Default - keystone_user_role: admin - heat: - all: - keystone_user_name: heat - -heat-delete-keystone-user-job: - global: - kolla: - all: - keystone_user_project: service - keystone_user_project_domain: Default - keystone_user_domain: Default - keystone_user_role: admin - heat: - all: - keystone_user_name: heat - -heat-create-keystone-endpoint-admin-job: - global: - kolla: - heat: - api: - all: - port: 8004 - create_keystone_endpoint: - all: - service: heat-api - -heat-create-keystone-endpoint-internal-job: - global: - kolla: - heat: - api: - all: - port: 8004 - create_keystone_endpoint: - all: - service: heat-api - -heat-create-keystone-endpoint-public-job: - global: - kolla: - all: - external_vip: - heat: - api: - all: - port: 8004 - -heat-cfn-create-keystone-endpoint-admin-job: - global: - kolla: - heat: - api_cfn: - all: - port: 8000 - create_keystone_endpoint: - all: - service: heat-api - -heat-cfn-create-keystone-endpoint-internal-job: - global: - kolla: - heat: - api_cfn: - all: - port: 8000 - create_keystone_endpoint: - all: - service: heat-api - -heat-cfn-create-keystone-endpoint-public-job: - global: - kolla: - all: - external_vip: - heat: - api_cfn: - all: - port: 8000 - -heat-create-db-job: - global: - kolla: - heat: - all: - database_name: "heat" - database_user: "heat" - -glance-api-svc: - global: - kolla: - glance: - api: - all: - port: 9292 - node_port: 9292 - port_external: false - node_port_enabled: false - -glance-registry-svc: - global: - kolla: - glance: - registry: - all: - port: 9191 - node_port: 9191 - port_external: false - node_port_enabled: false - -cinder-create-db-job: - global: - kolla: - cinder: - all: - database_name: "cinder" - database_user: "cinder" - -cinder-delete-db-job: - global: - kolla: - cinder: - all: - database_name: "cinder" - database_user: "cinder" - -cinder-api-svc: - global: - kolla: - cinder: - api: - all: - port: 8776 - node_port: 8776 - port_external: false - node_port_enabled: false - -cinder-api-deployment: - global: - kolla: - cinder: - api: - all: - port: 8776 - -horizon-svc: - global: - kolla: - horizon: - all: - port: 80 - node_port: 80 - port_external: false - node_port_enabled: false - -glance-create-db-job: - global: - kolla: - glance: - all: - database_name: "glance" - database_user: "glance" - -glance-delete-db-job: - global: - kolla: - glance: - all: - database_name: "glance" - database_user: "glance" - -glance-manage-db-job: - global: - kolla: - all: - ceph_backend: false - glance: - api: - all: - pvc_name: glance - -glance-create-keystone-user-job: - global: - kolla: - all: - keystone_user_project: service - keystone_user_project_domain: Default - keystone_user_domain: Default - keystone_user_role: admin - glance: - all: - keystone_user_name: glance - -glance-delete-keystone-user-job: - global: - kolla: - all: - keystone_user_project: service - keystone_user_project_domain: Default - keystone_user_domain: Default - keystone_user_role: admin - glance: - all: - keystone_user_name: glance - -glance-create-keystone-endpoint-public-job: - global: - kolla: - all: - external_vip: - glance: - api: - all: - port: 9292 - -glance-create-keystone-endpoint-internal-job: - global: - kolla: - glance: - api: - all: - port: 9292 - create_keystone_endpoint: - all: - service: glance-api - -glance-create-keystone-endpoint-admin-job: - global: - kolla: - glance: - api: - all: - port: 9292 - create_keystone_endpoint: - all: - service: glance-api - -glance-api-deployment: - global: - kolla: - all: - ceph_backend: false - glance: - api: - all: - port: 9292 - pvc_name: glance - -glance-registry-deployment: - global: - kolla: - glance: - registry: - all: - port: 9191 - -cinder-create-keystone-user-job: - global: - kolla: - all: - keystone_user_project: service - keystone_user_project_domain: Default - keystone_user_domain: Default - keystone_user_role: admin - cinder: - all: - keystone_user_name: cinder - -cinder-delete-keystone-user-job: - global: - kolla: - all: - keystone_user_project: service - keystone_user_project_domain: Default - keystone_user_domain: Default - keystone_user_role: admin - cinder: - all: - keystone_user_name: cinder - -cinder-create-keystone-endpoint-admin-job: - global: - kolla: - cinder: - api: - all: - port: 8776 - create_keystone_endpoint: - all: - service: cinder-api - -cinder-create-keystone-endpoint-adminv2-job: - global: - kolla: - cinder: - api: - all: - port: 8776 - create_keystone_endpoint: - all: - service: cinder-api - -cinder-create-keystone-endpoint-adminv3-job: - global: - kolla: - cinder: - api: - all: - port: 8776 - create_keystone_endpoint: - all: - service: cinder-api - -cinder-create-keystone-endpoint-public-job: - global: - kolla: - all: - external_vip: - cinder: - api: - all: - port: 8776 - -cinder-create-keystone-endpoint-publicv2-job: - global: - kolla: - all: - external_vip: - cinder: - api: - all: - port: 8776 - -cinder-create-keystone-endpoint-publicv3-job: - global: - kolla: - all: - external_vip: - cinder: - api: - all: - port: 8776 - -cinder-create-keystone-endpoint-internal-job: - global: - kolla: - cinder: - api: - all: - port: 8776 - create_keystone_endpoint: - all: - service: cinder-api - -cinder-create-keystone-endpoint-internalv2-job: - global: - kolla: - cinder: - api: - all: - port: 8776 - create_keystone_endpoint: - all: - service: cinder-api - -cinder-create-keystone-endpoint-internalv3-job: - global: - kolla: - cinder: - api: - all: - port: 8776 - create_keystone_endpoint: - all: - service: cinder-api - -tgtd-daemonset: - global: - kolla: - all: - storage_interface: eth0 - -helm-repo-svc: - global: - kolla: - helm-repo: - all: - port: 8879 - -helm-repo-deployment: - global: - kolla: - helm-repo: - all: - port: 8879 - address: 0.0.0.0 - pvc_name: "helm-repo" - persistent: true - method: "git" - url: "https://github.com/openstack/kolla-kubernetes" - -ironic-api-svc: - global: - kolla: - ironic: - api: - all: - port: 6385 - port_external: true - node_port_enabled: false - ironic_provision: true - ironic_provision_vip: 172.21.0.10 - -ironic-inspector-svc: - global: - kolla: - ironic: - inspector: - all: - port: 5050 - port_external: true - node_port_enabled: false - ironic_provision: true - -ironic-api-create-db-job: - global: - kolla: - ironic: - api: - all: - database_name: "ironic" - database_user: "ironic" - -ironic-api-delete-db-job: - global: - kolla: - ironic: - api: - all: - database_name: "ironic" - database_user: "ironic" - -ironic-api-manage-db-job: - global: - kolla: - ironic: - api: - all: - database_name: "ironic" - database_user: "ironic" - -ironic-api-deployment: - global: - kolla: - ironic: - api: - all: - port: 6385 - - -ironic-conductor-daemonset: - global: - kolla: - ironic: - all: - ironic_api_ip: 172.21.0.10 - ironic_provision_cidr: 172.21.0.0/24 - -ironic-inspector-deployment: - global: - kolla: - ironic: - inspector: - all: - port: 5050 - ironic_interface: net2 - initramfs_url: - kernel_url: - ironic_dhcp_range: - ironic_inspection_cidr: 172.22.0.0/24 - inspect_lldp: false - -ironic-create-keystone-user-job: - global: - kolla: - all: - keystone_user_project: service - keystone_user_project_domain: Default - keystone_user_domain: Default - keystone_user_role: admin - ironic: - all: - keystone_user_name: ironic - -ironic-delete-keystone-user-job: - global: - kolla: - all: - keystone_user_project: service - keystone_user_project_domain: Default - keystone_user_domain: Default - keystone_user_role: admin - ironic: - all: - keystone_user_name: ironic - -ironic-inspector-create-keystone-user-job: - global: - kolla: - all: - keystone_user_project: service - keystone_user_project_domain: Default - keystone_user_domain: Default - keystone_user_role: admin - ironic: - inspector: - all: - keystone_user_name: ironic-inspector - -ironic-inspector-delete-keystone-user-job: - global: - kolla: - all: - keystone_user_project: service - keystone_user_project_domain: Default - keystone_user_domain: Default - keystone_user_role: admin - ironic: - inspector: - all: - keystone_user_name: ironic-inspector - -ironic-api-create-keystone-endpoint-admin-job: - global: - kolla: - ironic: - api: - all: - port: 6385 - create_keystone_endpoint: - all: - service: ironic-api - -ironic-api-create-keystone-endpoint-internal-job: - global: - kolla: - ironic: - api: - all: - port: 6385 - create_keystone_endpoint: - all: - service: ironic-api - -ironic-api-create-keystone-endpoint-public-job: - global: - kolla: - all: - external_vip: - ironic: - api: - all: - port: 6385 - create_keystone_endpoint: - all: - service: ironic-api - -ironic-inspector-create-keystone-endpoint-admin-job: - global: - kolla: - ironic: - inspector: - all: - port: 5050 - create_keystone_endpoint: - all: - service: ironic-inspector - -ironic-inspector-create-keystone-endpoint-internal-job: - global: - kolla: - ironic: - inspector: - all: - port: 5050 - create_keystone_endpoint: - all: - service: ironic-inspector - -ironic-inspector-create-keystone-endpoint-public-job: - global: - kolla: - all: - external_vip: - ironic: - inspector: - all: - port: 5050 - create_keystone_endpoint: - all: - service: ironic-inspector - -ironic-inspector-create-db-job: - global: - kolla: - ironic: - inspector: - all: - database_name: "ironic_inspector" - database_user: "ironic_inspector" - -ironic-inspector-delete-db-job: - global: - kolla: - ironic: - inspector: - all: - database_name: "ironic_inspector" - database_user: "ironic_inspector" - -ironic-inspector-manage-db-job: - global: - kolla: - ironic: - inspector: - all: - database_name: "ironic_inspector" - database_user: "ironic_inspector" diff --git a/helm/compute-kits/compute-kit/Chart.yaml b/helm/compute-kits/compute-kit/Chart.yaml deleted file mode 100644 index eb8687659..000000000 --- a/helm/compute-kits/compute-kit/Chart.yaml +++ /dev/null @@ -1,21 +0,0 @@ -name: compute-kit -version: 0.7.0-1 -description: Helm chart for openstack compute kit deployment -keywords: - - openstack - - mariadb - - rabbitmq - - memcached - - openvswitch - - keystone - - glance - - neutron - - cinder-control - - cinder-volume-ceph - - nova-control - - nova-compute - - horizon -sources: - - http://github.com/openstack -engine: gotpl -#icon: A URL to an SVG or PNG image to be used as an icon (optional). make this point to the new project icons when ready diff --git a/helm/compute-kits/compute-kit/requirements.yaml b/helm/compute-kits/compute-kit/requirements.yaml deleted file mode 100644 index afded4f67..000000000 --- a/helm/compute-kits/compute-kit/requirements.yaml +++ /dev/null @@ -1,40 +0,0 @@ -dependencies: - - name: kolla-common - repository: file://../../kolla-common - version: 0.7.0-1 - - name: mariadb - repository: file://../../service/mariadb - version: 0.7.0-1 - - name: rabbitmq - repository: file://../../service/rabbitmq - version: 0.7.0-1 - - name: memcached - repository: file://../../service/memcached - version: 0.7.0-1 - - name: keystone - repository: file://../../service/keystone - version: 0.7.0-1 - - name: glance - repository: file://../../service/glance - version: 0.7.0-1 - - name: openvswitch - repository: file://../../service/openvswitch - version: 0.7.0-1 - - name: neutron - repository: file://../../service/neutron - version: 0.7.0-1 - - name: cinder-control - repository: file://../../service/cinder-control - version: 0.7.0-1 - - name: cinder-volume-lvm - repository: file://../../service/cinder-volume-lvm - version: 0.7.0-1 - - name: nova-compute - repository: file://../../service/nova-compute - version: 0.7.0-1 - - name: nova-control - repository: file://../../service/nova-control - version: 0.7.0-1 - - name: horizon - repository: file://../../service/horizon - version: 0.7.0-1 diff --git a/helm/compute-kits/compute-kit/values.yaml b/helm/compute-kits/compute-kit/values.yaml deleted file mode 100644 index cf7997865..000000000 --- a/helm/compute-kits/compute-kit/values.yaml +++ /dev/null @@ -1,13 +0,0 @@ -global: - kolla: - all: - kubernetes_entrypoint: true - mariadb: - all: - element_name: mariadb - memcached: - all: - element_name: memcached - rabbitmq: - all: - element_name: rabbitmq diff --git a/helm/kolla-common/.helmignore b/helm/kolla-common/.helmignore deleted file mode 100644 index 0277311d0..000000000 --- a/helm/kolla-common/.helmignore +++ /dev/null @@ -1,4 +0,0 @@ -.DS_Store -.git -*.rst -*.RST diff --git a/helm/kolla-common/Chart.yaml b/helm/kolla-common/Chart.yaml deleted file mode 100644 index 93532381d..000000000 --- a/helm/kolla-common/Chart.yaml +++ /dev/null @@ -1,11 +0,0 @@ -name: kolla-common -version: 0.7.0-1 #FIXME make this changable -description: FIXME -keywords: - - openstack - - kolla - - common -sources: - - http://github.com/openstack -engine: gotpl -#icon: A URL to an SVG or PNG image to be used as an icon (optional). make this point to the new project icons when ready diff --git a/helm/kolla-common/templates/_common_api_apache_deployment.yaml b/helm/kolla-common/templates/_common_api_apache_deployment.yaml deleted file mode 100644 index 32c9b558a..000000000 --- a/helm/kolla-common/templates/_common_api_apache_deployment.yaml +++ /dev/null @@ -1,86 +0,0 @@ -{{- define "common_api_apache_deployment" }} -{{- $imagePullPolicy := include "kolla_val_get_str" (dict "key" "image_pull_policy" "searchPath" .searchPath "Values" .Values ) }} -{{- $containerConfigDirectory := include "kolla_val_get_str" (dict "key" "container_config_directory" "searchPath" .searchPath "Values" .Values ) }} -{{- $selectorKey := include "kolla_val_get_str" (dict "key" "selector_key" "searchPath" .searchPath "Values" .Values ) }} -{{- $selectorValue := include "kolla_val_get_str" (dict "key" "selector_value" "searchPath" .searchPath "Values" .Values ) }} -{{- $replicas := include "kolla_val_get_str" (dict "key" "replicas" "searchPath" .searchPath "Values" .Values ) }} -{{- $port := include "kolla_val_get_str" (dict "key" "port" "searchPath" .searchPath "Values" .Values ) }} -{{- $adminPort := include "kolla_val_get_str" (dict "key" "admin_port" "searchPath" .searchPath "Values" .Values ) }} -{{- $gracePeriodSeconds := include "kolla_val_get_str" (dict "key" "grace_period_seconds" "searchPath" .searchPath "Values" .Values ) }} -apiVersion: extensions/v1beta1 -kind: Deployment -metadata: - name: {{ .resourceName }} -spec: - replicas: {{ $replicas }} - strategy: - rollingUpdate: - maxSurge: 1 - maxUnavailable: 1 - type: RollingUpdate - template: - metadata: - labels: - service: {{ .serviceName }} - type: {{ .serviceType }} - annotations: - kolla_upgrade: minor_rolling_safe -{{- if .extraAnnotations }} -{{- include .extraAnnotations . | indent 8 }} -{{- end }} - spec: -#You've got 2 days to drain or figure out why it won't. - terminationGracePeriodSeconds: {{ $gracePeriodSeconds }} - nodeSelector: - {{ $selectorKey }}: {{ $selectorValue | quote }} - initContainers: -{{- include "common_dependency_container" . | indent 8 }} -{{- if .initContainers }} -{{- include .initContainers . | indent 8 }} -{{- end }} - containers: - - name: main - image: {{ .imageFull | quote }} - imagePullPolicy: {{ $imagePullPolicy | quote }} - lifecycle: - preStop: - exec: - command: - - apachectl -k graceful-stop - volumeMounts: -{{ include "common_volume_mounts" . | indent 12 }} - - mountPath: /var/lib/kolla-kubernetes/event - name: kolla-kubernetes-events - - mountPath: {{ $containerConfigDirectory }} - name: service-configmap -{{- if .extraVolumeMounts }} -{{- include .extraVolumeMounts . | indent 12 }} -{{- end }} -{{- if .privileged }} - securityContext: - privileged: true -{{- end }} - env: -{{ include "common_env_vars" . | indent 12 }} - ports: - - containerPort: {{ $port }} - name: {{ .portName }} -{{- if eq .resourceName "keystone" }} - - containerPort: {{ $adminPort }} - name: keystone-admin -{{- end }} -{{- include "common_containers" . | indent 8 }} - volumes: -{{- include "common_volumes" . | indent 8 }} - - name: kolla-kubernetes-events - emptyDir: {} - - name: service-configmap - configMap: - name: {{ .resourceName }} -{{- if .extraConfigmapConfig }} -{{- include .extraConfigmapConfig . | indent 12 }} -{{- end }} -{{- if .extraVolume }} -{{- include .extraVolume . | indent 8 }} -{{- end }} -{{- end }} diff --git a/helm/kolla-common/templates/_common_api_python_deployment.yaml b/helm/kolla-common/templates/_common_api_python_deployment.yaml deleted file mode 100644 index cb5d08a9f..000000000 --- a/helm/kolla-common/templates/_common_api_python_deployment.yaml +++ /dev/null @@ -1,205 +0,0 @@ -{{- define "common_api_python_deployment" }} -{{- $replicas := include "kolla_val_get_str" (dict "key" "replicas" "searchPath" .searchPath "Values" .Values ) }} -{{- $port := include "kolla_val_get_str" (dict "key" "port" "searchPath" .searchPath "Values" .Values ) }} -{{- $imagePullPolicy := include "kolla_val_get_str" (dict "key" "image_pull_policy" "searchPath" .searchPath "Values" .Values ) }} -{{- $containerConfigDirectory := include "kolla_val_get_str" (dict "key" "container_config_directory" "searchPath" .searchPath "Values" .Values ) }} -{{- $selectorKey := include "kolla_val_get_str" (dict "key" "selector_key" "searchPath" .searchPath "Values" .Values ) }} -{{- $selectorValue := include "kolla_val_get_str" (dict "key" "selector_value" "searchPath" .searchPath "Values" .Values ) }} -{{- $novaMetadataSearchPath := ":global.kolla.nova.metadata.deployment:global.kolla.nova.metadata.all:global.kolla.nova.all:global.kolla.all" }} -{{- $novaMetadataPort := include "kolla_val_get_str" (dict "key" "port" "searchPath" $novaMetadataSearchPath "Values" .Values ) }} -{{- $gracePeriodSeconds := include "kolla_val_get_str" (dict "key" "grace_period_seconds" "searchPath" .searchPath "Values" .Values ) }} -{{- $localVals := dict }} -{{- $c := dict "searchPath" .searchPath "key" "haproxy_python_termination" "retDict" $localVals "retKey" "haproxy" "Values" .Values }} -{{- include "kolla_val_get_raw" $c }} -{{- $c := dict "searchPath" .searchPath "key" "prometheus_exporter" "retDict" $localVals "retKey" "prometheus" "Values" .Values }} -{{- include "kolla_val_get_raw" $c }} -{{- $c := dict "searchPath" .searchPath "key" "prometheus_port" "retDict" $localVals "retKey" "prometheusPort" "Values" .Values }} -{{- include "kolla_val_get_raw" $c }} -apiVersion: extensions/v1beta1 -kind: Deployment -metadata: - name: {{ .resourceName }} -spec: - replicas: {{ $replicas }} - strategy: - rollingUpdate: - maxSurge: 1 - maxUnavailable: 1 - type: RollingUpdate - template: - metadata: - labels: - service: {{ .serviceName }} - type: {{ .serviceType }} - annotations: -{{- if $localVals.haproxy }} - kolla_upgrade: minor_rolling_safe -{{- if $localVals.prometheus }} - "prometheus.io/port": {{ $localVals.prometheusPort | quote }} - "prometheus.io/scrape": "true" -{{- end }} -{{- end }} - spec: -#You've got 2 days to drain or figure out why it won't. - terminationGracePeriodSeconds: {{ $gracePeriodSeconds }} - nodeSelector: - {{ $selectorKey }}: {{ $selectorValue | quote }} - initContainers: -#FIXME once all services are converted, bind_host can be dropped here and changed to default to 127.0.0.1 instead of 0.0.0.0. -# The init container overrides the listen address and port to ensure it does not conflict with haproxy and prevent -# other containers from directly accessing the service -{{- include "common_dependency_container" . | indent 8 }} - - name: update-config - image: {{ include "kolla_toolbox_image_full" . | quote }} - imagePullPolicy: {{ $imagePullPolicy | quote }} - command: - - /bin/sh - - -c - - | - cp -a /srv/configmap/..data/* /srv/pod-main-config/; -{{- if eq .resourceName "kibana" }} - IP=\"127.0.0.1\"; - PORT=\"8080\"; - sed -i 's|^elasticsearch.url:.*|elasticsearch.url: \"http://elasticsearch:9200\"|g' /srv/pod-main-config/kibana.yml; - sed -i 's|^server.host:.*|server.host: \"'$IP'\"|g' /srv/pod-main-config/kibana.yml; - sed -i 's|^server.port:.*|server.port: \"'$PORT'\"|g' /srv/pod-main-config/kibana.yml; -{{- else }} - CONF=/srv/pod-main-config/{{ .configFileName }}; -{{- if $localVals.haproxy }} -{{- if eq .resourceName "nova-api" }} - crudini --set $CONF DEFAULT metadata_listen 127.0.0.1; - crudini --set $CONF DEFAULT metadata_listen_port 8081; -{{- end }} - crudini --set $CONF {{ .configSectionName }} {{ .configListenHostParameter }} 127.0.0.1; - crudini --set $CONF {{ .configSectionName }} {{ .configListenPortParameter }} 8080; -{{- end }} -{{- end }} - volumeMounts: - - name: service-configmap - mountPath: /srv/configmap - - name: pod-main-config - mountPath: /srv/pod-main-config -{{- if .initContainers }} -{{- include .initContainers . | indent 8 }} -{{- end }} - containers: -{{- if $localVals.haproxy }} - - name: haproxy - image: {{ include "haproxy_image_full" . | quote }} - imagePullPolicy: {{ $imagePullPolicy | quote }} - command: - - /bin/bash - - -c - - | - kolla_start; - touch /var/lib/kolla-kubernetes/event/shutdown; - readinessProbe: - httpGet: - path: {{ .checkPath }} - port: {{ $port }} - initialDelaySeconds: 5 - timeoutSeconds: 5 - lifecycle: - preStop: - exec: -#FIXME move script into haproxy container -#NOTE this only works if you arn't doing a haproxy reconfigure too. -#But shouldn't ever have to do that in a setup like this. - command: - - /bin/bash - - -c - - | - kill -USR1 $(/values.yaml - -The user could override the setting with their own values.yaml in 3 different -ways: - * global.kolla.all.enable_logging = True - affects all kolla packages. - * global.kolla.mariadb.enable_logging = True - True only for mariadb related - microservices. - * enable_logging=True for the specific microservice. - -kolla_val_get_str -================= - -Definition Description: - Takes a search path of vals to look for, and a key and returns the first - defined value. - - *note* This is generally the function you want to use unless you must retain - the datatype for some reason. - -Inputs: - Values - a dictionary tree to search. - searchPath - a dictionary or a string with ':' seperated doted paths to - crawl through Values - key - an optional string that is appended to each searchPath item. -Outputs: - the string of the value requested -Example: - {{- $valPath := ":global.kolla.a:global.kolla.b:global.kolla.c" }} - {{- $c := dict "searchPath" $valPath "key" "my_setting" "Values" .Values }} - {{- include "kolla_val_get_str $c }} - - -kolla_val_get_raw -=================== - -Definition Description: - Takes a search path of vals to look for, and a key and returns the first - defined value. - - *note* Use this if you need to maintain the datatype. - -Inputs: - Values - a dictionary tree to search. - searchPath - a dictionary or a string with ':' seperated doted paths to - crawl through Values - key - an optional string that is appended to each searchPath item. - retDict - Dictionary to store returned value in. - retKey - Key in the retDict to store the returned value in. -Outputs: - retDict. is set to the first found value. -Example: - {{- $valPath := tuple "" "global.kolla.a" "global.kolla.b" "global.kolla.c" }} - {{- $localVals := dict }} - {{- $c := dict "searchPath" $valPath "key" "enable_some_feature" "retDict" $localVals "retKey" "bar" "Values" .Values }} - {{- include "kolla_val_get_raw" $c }} - {{- if $localVals.bar }} - someone set enable_some_feature to true! - {{- end }} - - -kolla_val_get_single -==================== - -Definition Description: - Takes a doted path of a val to look for, and returns the value. - - *note* You probably should be using one of the other functions instead. - -Inputs: - Values - a dictionary tree to search. - key - a string with the doted path to search for - retDict - Dictionary to store returned value in. - retKey - Key in the retDict to store the returned value in. -Outputs: - retDict. is set to the found value. - retval in the calling dictionary is set to the found value. -Example: - {{- $localVals := dict }} - {{- $c := dict "key" "global.kolla.a.enable_some_feature" "retDict" $localVals "retKey" "bar" "Values" .Values }} - {{- include "kolla_val_get_single" $c }} - {{- if $localVals.bar }} - someone set enable_some_feature to true! - {{- end }} diff --git a/helm/kolla-common/templates/_common_val_get.yaml b/helm/kolla-common/templates/_common_val_get.yaml deleted file mode 100644 index f51cae9d5..000000000 --- a/helm/kolla-common/templates/_common_val_get.yaml +++ /dev/null @@ -1,56 +0,0 @@ -{{- define "kolla_val_get_str" }} -{{- $c := dict "searchPath" .searchPath "retKey" "retval" "Values" .Values }} -{{- $_ := set $c "retDict" $c }} -{{- if hasKey . "key" }} -{{- $_ := set $c "key" .key }} -{{- end }} -{{- include "kolla_val_get_raw" $c }} -{{- if hasKey $c "retval" }} -{{- $c.retval }} -{{- end }} -{{- end }} - -{{- define "kolla_val_get_raw" }} -{{- $env := . }} -{{- $localVals := dict "searchPath" .searchPath "found" false }} -{{- if eq (typeOf .searchPath) "string" }} -{{- $_ := set $localVals "searchPath" (split ":" .searchPath) }} -{{- end }} -{{- range $localVals.searchPath }} -{{- if not $localVals.found }} -{{- $loopVals := dict "key" . }} -{{- if hasKey $env "key" }} -{{- if eq . "" }} -{{- $_ := set $loopVals "key" $env.key }} -{{- else }} -{{- $_ := set $loopVals "key" (printf "%s.%s" . $env.key) }} -{{- end }} -{{- end }} -{{- $c := dict "key" $loopVals.key "retDict" $env.retDict "retKey" $env.retKey "Values" $env.Values }} -{{- include "kolla_val_get_single" $c }} -{{- if hasKey $c "retval" }} -{{- $_ := set $localVals "found" true }} -{{- end }} -{{- end }} -{{- end }} -{{- end }} - -{{- define "kolla_val_get_single" }} -{{- $localVals := dict }} -{{- $env := . }} -{{- $_ := set $localVals "item" $env.Values }} -{{- $_ := set $localVals "allFound" true }} -{{- range (split "." $env.key) }} -{{- if and ($localVals.allFound) (hasKey $localVals.item .) }} -{{- $_ := set $localVals "item" (index $localVals.item .) }} -{{- else }} -{{- $_ := set $localVals "allFound" false }} -{{- end }} -{{- end }} -{{- if $localVals.allFound }} -{{- $_ := set $env "retval" $localVals.item }} -{{- end }} -{{- if and $localVals.allFound (hasKey $env "retDict") (hasKey $env "retKey") }} -{{- $_ := set $env.retDict $env.retKey $localVals.item }} -{{- end }} -{{- end }} diff --git a/helm/microservice/ceph-admin-pod/Chart.yaml b/helm/microservice/ceph-admin-pod/Chart.yaml deleted file mode 100644 index 5819fbe4d..000000000 --- a/helm/microservice/ceph-admin-pod/Chart.yaml +++ /dev/null @@ -1,11 +0,0 @@ -name: ceph-admin-pod -version: 0.7.0-1 #FIXME make this changable -description: FIXME -keywords: - - openstack - - ceph - - admin -sources: - - http://github.com/openstack -engine: gotpl -#icon: A URL to an SVG or PNG image to be used as an icon (optional). make this point to the new project icons when ready diff --git a/helm/microservice/ceph-admin-pod/requirements.yaml b/helm/microservice/ceph-admin-pod/requirements.yaml deleted file mode 100644 index 038deff5a..000000000 --- a/helm/microservice/ceph-admin-pod/requirements.yaml +++ /dev/null @@ -1,4 +0,0 @@ -dependencies: - - name: kolla-common - repository: file://../../kolla-common - version: 0.7.0-1 diff --git a/helm/microservice/ceph-admin-pod/templates/ceph-admin-pod.yaml b/helm/microservice/ceph-admin-pod/templates/ceph-admin-pod.yaml deleted file mode 100644 index 4dfa88b16..000000000 --- a/helm/microservice/ceph-admin-pod/templates/ceph-admin-pod.yaml +++ /dev/null @@ -1,64 +0,0 @@ -{{- $resourceName := "ceph-admin" }} -{{- $searchPath := ":global.kolla.ceph.admin.pod:global.kolla.ceph.admin.all:global.kolla.ceph.all:global.kolla.all" }} -{{- $c := dict "searchPath" $searchPath "Values" .Values }} -{{- $_ := set $c "contName" "ceph-mon" }} -{{- $_ := set $c "imageName" "image_full" }} -{{- $_ := set $c "tagName" "image_tag" }} -{{- $imageFull := include "kolla_build_image_full" $c }} -{{- $imagePullPolicy := include "kolla_val_get_str" (dict "key" "image_pull_policy" "searchPath" $searchPath "Values" .Values ) }} -{{- $selectorKey := include "kolla_val_get_str" (dict "key" "selector_key" "searchPath" $searchPath "Values" .Values ) }} -{{- $selectorValue := include "kolla_val_get_str" (dict "key" "selector_value" "searchPath" $searchPath "Values" .Values ) }} -{{- $netHostTrue := true }} -{{- $podTypeBootstrap := false }} -{{- with $env := dict "netHostTrue" $netHostTrue "podTypeBootstrap" false "resourceName" $resourceName "Values" .Values "Release" .Release "searchPath" $searchPath }} -apiVersion: v1 -kind: Pod -metadata: - name: ceph-admin -spec: - dnsPolicy: ClusterFirstWithHostNet - hostNetwork: True - hostPID: True - nodeSelector: - {{ $selectorKey }}: {{ $selectorValue | quote }} - restartPolicy: Never - containers: - - image: "{{ $imageFull }}" - imagePullPolicy: {{ $imagePullPolicy | quote }} - name: main - command: - - /bin/bash - - -xec - - | - modprobe rbd; - while true; do sleep 1000; done - securityContext: - privileged: true - volumeMounts: -{{- include "common_volume_mounts" $env | indent 8 }} - - mountPath: /etc/ceph/ceph.conf - name: ceph-conf - readOnly: true - subPath: ceph.conf - - mountPath: /etc/ceph/ceph.client.admin.keyring - subPath: data - name: ceph-client-admin-keyring - - mountPath: /dev - name: host-dev - - mountPath: /lib/modules - name: lib-modules - volumes: -{{- include "common_volumes" . | indent 4 }} - - name: ceph-conf - configMap: - name: ceph-conf - - name: host-dev - hostPath: - path: /dev - - name: ceph-client-admin-keyring - secret: - secretName: ceph-client-admin-keyring - - name: lib-modules - hostPath: - path: /lib/modules -{{- end }} diff --git a/helm/microservice/ceph-rbd-daemonset/Chart.yaml b/helm/microservice/ceph-rbd-daemonset/Chart.yaml deleted file mode 100644 index 790c1552c..000000000 --- a/helm/microservice/ceph-rbd-daemonset/Chart.yaml +++ /dev/null @@ -1,11 +0,0 @@ -name: ceph-rbd-daemonset -version: 0.7.0-1 #FIXME make this changable -description: FIXME -keywords: - - openstack - - ceph - - rbd -sources: - - http://github.com/openstack -engine: gotpl -#icon: A URL to an SVG or PNG image to be used as an icon (optional). make this point to the new project icons when ready diff --git a/helm/microservice/ceph-rbd-daemonset/requirements.yaml b/helm/microservice/ceph-rbd-daemonset/requirements.yaml deleted file mode 100644 index 038deff5a..000000000 --- a/helm/microservice/ceph-rbd-daemonset/requirements.yaml +++ /dev/null @@ -1,4 +0,0 @@ -dependencies: - - name: kolla-common - repository: file://../../kolla-common - version: 0.7.0-1 diff --git a/helm/microservice/ceph-rbd-daemonset/templates/ceph-rbd-daemonset.yaml b/helm/microservice/ceph-rbd-daemonset/templates/ceph-rbd-daemonset.yaml deleted file mode 100644 index 921702f3d..000000000 --- a/helm/microservice/ceph-rbd-daemonset/templates/ceph-rbd-daemonset.yaml +++ /dev/null @@ -1,92 +0,0 @@ -{{- $resourceName := "ceph-rbd" }} -{{- $searchPath := ":global.kolla.ceph.rbd.daemonset:global.kolla.ceph.rbd.all:global.kolla.ceph.all:global.kolla.all" }} -{{- $c := dict "searchPath" $searchPath "Values" .Values }} -{{- $_ := set $c "contName" "ceph-mon" }} -{{- $_ := set $c "imageName" "image_full" }} -{{- $_ := set $c "tagName" "image_tag" }} -{{- $imageFull := include "kolla_build_image_full" $c }} -{{- $imagePullPolicy := include "kolla_val_get_str" (dict "key" "image_pull_policy" "searchPath" $searchPath "Values" .Values ) }} -{{- $selectorKey := include "kolla_val_get_str" (dict "key" "selector_key" "searchPath" $searchPath "Values" .Values ) }} -{{- $selectorValue := include "kolla_val_get_str" (dict "key" "selector_value" "searchPath" $searchPath "Values" .Values ) }} -{{- $netHostTrue := true }} -{{- $podTypeBootstrap := false }} -{{- with $env := dict "netHostTrue" $netHostTrue "podTypeBootstrap" false "resourceName" $resourceName "Values" .Values "Release" .Release "searchPath" $searchPath }} -apiVersion: extensions/v1beta1 -kind: DaemonSet -metadata: - name: ceph-rbd - labels: - component: ceph - system: rbd -spec: - template: - metadata: - labels: - component: ceph - system: rbd - spec: - dnsPolicy: ClusterFirstWithHostNet - hostNetwork: True - hostPID: True - nodeSelector: - {{ $selectorKey }}: {{ $selectorValue | quote }} - containers: - - image: "{{ $imageFull }}" - imagePullPolicy: {{ $imagePullPolicy | quote }} - name: main - securityContext: - privileged: true - command: - - /bin/bash - - -xec - - | - modprobe rbd; - if [ -x /host/rbd ]; then - grep label=io.kubernetes.pod.namespace /host/rbd > /dev/null && rm -f /host/rbd - fi - if [ ! -x /host/rbd ]; then - echo IyEvYmluL2Jhc2gKCg== | base64 -d > /host/rbd; - echo 'ID=$(docker ps -q -f label=io.kubernetes.pod.namespace='$POD_NAMESPACE' -f label=io.kubernetes.pod.name='$POD_NAME' -f label=io.kubernetes.container.name=main);' >> /host/rbd; - echo 'docker exec --privileged -u 0 -i $ID /usr/bin/rbd "$@"' >> /host/rbd; - chmod +x /host/rbd; - fi; - while true; do sleep 1000; done - env: - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - volumeMounts: -{{- include "common_volume_mounts" $env | indent 12 }} - - mountPath: /host/ - name: host-usr-bin - - mountPath: /dev - name: host-dev - - mountPath: /sys - name: host-sys - - mountPath: /etc - name: host-etc - - mountPath: /lib/modules - name: lib-modules - volumes: -{{- include "common_volumes" . | indent 8 }} - - name: host-usr-bin - hostPath: - path: /usr/bin - - name: host-dev - hostPath: - path: /dev - - name: host-sys - hostPath: - path: /sys - - name: host-etc - hostPath: - path: /etc - - name: lib-modules - hostPath: - path: /lib/modules -{{- end }} diff --git a/helm/microservice/cinder-api-deployment/Chart.yaml b/helm/microservice/cinder-api-deployment/Chart.yaml deleted file mode 100644 index cfdd486fa..000000000 --- a/helm/microservice/cinder-api-deployment/Chart.yaml +++ /dev/null @@ -1,11 +0,0 @@ -name: cinder-api-deployment -version: 0.7.0-1 #FIXME make this changable -description: Helm chart for the cinder api pod -keywords: - - openstack - - cinder - - cinder-api -sources: - - http://github.com/openstack -engine: gotpl -#icon: A URL to an SVG or PNG image to be used as an icon (optional). make this point to the new project icons when ready diff --git a/helm/microservice/cinder-api-deployment/requirements.yaml b/helm/microservice/cinder-api-deployment/requirements.yaml deleted file mode 100644 index 038deff5a..000000000 --- a/helm/microservice/cinder-api-deployment/requirements.yaml +++ /dev/null @@ -1,4 +0,0 @@ -dependencies: - - name: kolla-common - repository: file://../../kolla-common - version: 0.7.0-1 diff --git a/helm/microservice/cinder-api-deployment/templates/cinder-api.yaml b/helm/microservice/cinder-api-deployment/templates/cinder-api.yaml deleted file mode 100644 index 60d38931f..000000000 --- a/helm/microservice/cinder-api-deployment/templates/cinder-api.yaml +++ /dev/null @@ -1,21 +0,0 @@ -{{- $searchPath := ":global.kolla.cinder.api.deployment:global.kolla.cinder.api.all:global.kolla.cinder.all:global.kolla.all" }} -{{- $c := dict "searchPath" $searchPath "Values" .Values }} -{{- $_ := set $c "contName" "cinder-api" }} -{{- $_ := set $c "imageName" "image_full" }} -{{- $_ := set $c "tagName" "image_tag" }} -{{- $imageFull := include "kolla_build_image_full" $c }} -{{- $resourceName := "cinder-api" }} -{{- $netHostTrue := false }} -{{- $podTypeBootstrap := false }} -{{- $serviceName := "cinder" }} -{{- $serviceType := "api" }} -{{- $configFileName := "cinder.conf" }} -{{- $configSectionName := "DEFAULT" }} -{{- $configListenHostParameter := "osapi_volume_listen" }} -{{- $configListenPortParameter := "osapi_volume_listen_port" }} -{{- $portName := "cinder-api" }} -{{- $checkPath := "/" }} -{{- $privileged := false }} -{{- with $env := dict "netHostTrue" $netHostTrue "podTypeBootstrap" $podTypeBootstrap "resourceName" $resourceName "serviceName" $serviceName "serviceType" $serviceType "configFileName" $configFileName "configSectionName" $configSectionName "configListenHostParameter" $configListenHostParameter "configListenPortParameter" $configListenPortParameter "portName" $portName "imageFull" $imageFull "checkPath" $checkPath "privileged" $privileged "Values" .Values "Release" .Release "searchPath" $searchPath }} -{{- include "common_api_python_deployment" $env }} -{{- end }} diff --git a/helm/microservice/cinder-api-svc/Chart.yaml b/helm/microservice/cinder-api-svc/Chart.yaml deleted file mode 100644 index 5b33c9d13..000000000 --- a/helm/microservice/cinder-api-svc/Chart.yaml +++ /dev/null @@ -1,11 +0,0 @@ -name: cinder-api-svc -version: 0.7.0-1 -description: Helm chart for the cinder-api service -keywords: - - openstack - - cinder - - api -sources: - - http://github.com/openstack -engine: gotpl -#icon: A URL to an SVG or PNG image to be used as an icon (optional). make this point to the new project icons when ready diff --git a/helm/microservice/cinder-api-svc/requirements.yaml b/helm/microservice/cinder-api-svc/requirements.yaml deleted file mode 100644 index 038deff5a..000000000 --- a/helm/microservice/cinder-api-svc/requirements.yaml +++ /dev/null @@ -1,4 +0,0 @@ -dependencies: - - name: kolla-common - repository: file://../../kolla-common - version: 0.7.0-1 diff --git a/helm/microservice/cinder-api-svc/templates/cinder-api-svc.yaml b/helm/microservice/cinder-api-svc/templates/cinder-api-svc.yaml deleted file mode 100644 index abb64ce29..000000000 --- a/helm/microservice/cinder-api-svc/templates/cinder-api-svc.yaml +++ /dev/null @@ -1,9 +0,0 @@ -{{- $searchPath := ":global.kolla.cinder.api.svc:global.kolla.cinder.api.all:global.kolla.cinder.all:global.kolla.all" }} -{{- $elementPort := include "kolla_val_get_str" (dict "key" "port" "searchPath" $searchPath "Values" .Values) }} -{{- $rootElementName := .Values.element_name | default "cinder" }} -{{- $elementServiceType := .Values.element_service_type | default "api" }} -{{- $elementService := $rootElementName }} -{{- $elementName := printf "%s-%s" $elementService $elementServiceType }} -{{- with $env := dict "element_port" $elementPort "element_name" $elementName "element_service" $elementService "element_service_type" $elementServiceType "Values" .Values "Release" .Release "searchPath" $searchPath }} -{{- include "common_svc" $env }} -{{- end }} diff --git a/helm/microservice/cinder-backup-statefulset/Chart.yaml b/helm/microservice/cinder-backup-statefulset/Chart.yaml deleted file mode 100644 index 1d75b97ff..000000000 --- a/helm/microservice/cinder-backup-statefulset/Chart.yaml +++ /dev/null @@ -1,11 +0,0 @@ -name: cinder-backup-statefulset -version: 0.7.0-1 #FIXME make this changable -description: Helm chart for the cinder backup pod -keywords: - - openstack - - cinder - - cinder-backup -sources: - - http://github.com/openstack -engine: gotpl -#icon: A URL to an SVG or PNG image to be used as an icon (optional). make this point to the new project icons when ready diff --git a/helm/microservice/cinder-backup-statefulset/requirements.yaml b/helm/microservice/cinder-backup-statefulset/requirements.yaml deleted file mode 100644 index 038deff5a..000000000 --- a/helm/microservice/cinder-backup-statefulset/requirements.yaml +++ /dev/null @@ -1,4 +0,0 @@ -dependencies: - - name: kolla-common - repository: file://../../kolla-common - version: 0.7.0-1 diff --git a/helm/microservice/cinder-backup-statefulset/templates/cinder-backup.yaml b/helm/microservice/cinder-backup-statefulset/templates/cinder-backup.yaml deleted file mode 100644 index 0303b9861..000000000 --- a/helm/microservice/cinder-backup-statefulset/templates/cinder-backup.yaml +++ /dev/null @@ -1,81 +0,0 @@ -{{- $searchPath := ":global.kolla.cinder.backup.statefulset:global.kolla.cinder.backup.all:global.kolla.cinder.all:global.kolla.all" }} -{{- $imagePullPolicy := include "kolla_val_get_str" (dict "key" "image_pull_policy" "searchPath" $searchPath "Values" .Values ) }} -{{- $containerConfigDirectory := include "kolla_val_get_str" (dict "key" "container_config_directory" "searchPath" $searchPath "Values" .Values ) }} -{{- $selectorKey := include "kolla_val_get_str" (dict "key" "selector_key" "searchPath" $searchPath "Values" .Values ) }} -{{- $selectorValue := include "kolla_val_get_str" (dict "key" "selector_value" "searchPath" $searchPath "Values" .Values ) }} -{{- $replicas := include "kolla_val_get_str" (dict "key" "replicas" "searchPath" $searchPath "Values" .Values ) }} -{{- $c := dict "searchPath" $searchPath "Values" .Values }} -{{- $_ := set $c "contName" "cinder-backup" }} -{{- $_ := set $c "imageName" "image_full" }} -{{- $_ := set $c "tagName" "image_tag" }} -{{- $imageFull := include "kolla_build_image_full" $c }} -{{- $resourceName := "cinder-backup" }} -{{- $netHostTrue := false }} -{{- $podTypeBootstrap := false }} -{{- $serviceName := "cinder" }} -{{- $serviceType := "backup" }} -{{- $privileged := false }} -{{- $elementName := .Values.element_name | default $resourceName }} -{{- with $env := dict "netHostTrue" $netHostTrue "podTypeBootstrap" $podTypeBootstrap "imageFull" $imageFull "resourceName" $resourceName "serviceName" $serviceName "serviceType" $serviceType "privileged" $privileged "Values" .Values "Release" .Release "searchPath" $searchPath }} -apiVersion: apps/v1beta1 -kind: StatefulSet -metadata: - name: {{ $elementName }} -spec: - serviceName: {{ $elementName }} - replicas: {{ $replicas }} - template: - metadata: - labels: - service: {{ $elementName }} - type: {{ $serviceType }} - spec: - nodeSelector: - {{ $selectorKey }}: {{ $selectorValue | quote }} - initContainers: -{{- include "common_dependency_container" $env | indent 8 }} - - name: initialize-cinder-volume - image: {{ include "kolla_toolbox_image_full" . | quote }} - command: - - sh - - -ce - - cp -a /srv/configmap/..data/* /srv/pod-main-config/; - volumeMounts: - - name: cinder-backup-configmap - mountPath: /srv/configmap - - name: pod-main-config - mountPath: /srv/pod-main-config - - name: initialize-cinder-logs - image: "{{ $imageFull }}" - command: - - sh - - -ce - - | - mkdir /var/log/kolla/cinder ; - chown -R cinder: /var/log/kolla/cinder - volumeMounts: - - name: kolla-logs - mountPath: /var/log/kolla - containers: - - name: main - image: "{{ $imageFull }}" - imagePullPolicy: {{ $imagePullPolicy | quote }} - volumeMounts: -{{- include "common_volume_mounts" $env | indent 12 }} - - mountPath: /var/lib/kolla-kubernetes/event - name: kolla-kubernetes-events - - mountPath: {{ $containerConfigDirectory }} - name: pod-main-config - env: -{{- include "common_env_vars" $env | indent 12 }} -{{- include "common_containers" $env | indent 8 }} - volumes: -{{- include "common_volumes" $env | indent 8 }} - - name: kolla-kubernetes-events - emptyDir: {} - - name: pod-main-config - emptyDir: {} - - name: cinder-backup-configmap - configMap: - name: {{ $elementName }} -{{- end }} diff --git a/helm/microservice/cinder-create-db-job/Chart.yaml b/helm/microservice/cinder-create-db-job/Chart.yaml deleted file mode 100644 index 0fb5a6174..000000000 --- a/helm/microservice/cinder-create-db-job/Chart.yaml +++ /dev/null @@ -1,11 +0,0 @@ -name: cinder-create-db-job -version: 0.7.0-1 -description: Helm chart to create the cinder database -keywords: - - openstack - - cinder - - cinder-create-db -sources: - - http://github.com/openstack -engine: gotpl -#icon: A URL to an SVG or PNG image to be used as an icon (optional). make this point to the new project icons when ready diff --git a/helm/microservice/cinder-create-db-job/requirements.yaml b/helm/microservice/cinder-create-db-job/requirements.yaml deleted file mode 100644 index 038deff5a..000000000 --- a/helm/microservice/cinder-create-db-job/requirements.yaml +++ /dev/null @@ -1,4 +0,0 @@ -dependencies: - - name: kolla-common - repository: file://../../kolla-common - version: 0.7.0-1 diff --git a/helm/microservice/cinder-create-db-job/templates/cinder-create-db.yaml b/helm/microservice/cinder-create-db-job/templates/cinder-create-db.yaml deleted file mode 100644 index 39c6ae798..000000000 --- a/helm/microservice/cinder-create-db-job/templates/cinder-create-db.yaml +++ /dev/null @@ -1,7 +0,0 @@ -{{- $searchPath := ":global.kolla.cinder.create_db.job:global.kolla.cinder.create_db.all:global.kolla.cinder.api.all:global.kolla.cinder.all:global.kolla.all" }} -{{- $resourceName := "cinder" }} -{{- $serviceName := "cinder" }} -{{- $podTypeBootstrap := true }} -{{- with $env := dict "resourceName" $resourceName "serviceName" $serviceName "podTypeBootstrap" $podTypeBootstrap "Values" .Values "Release" .Release "searchPath" $searchPath }} -{{- include "common_create_db_job" $env }} -{{- end }} diff --git a/helm/microservice/cinder-create-keystone-endpoint-admin-job/Chart.yaml b/helm/microservice/cinder-create-keystone-endpoint-admin-job/Chart.yaml deleted file mode 100644 index 20fd4f9a9..000000000 --- a/helm/microservice/cinder-create-keystone-endpoint-admin-job/Chart.yaml +++ /dev/null @@ -1,11 +0,0 @@ -name: cinder-create-keystone-endpoint-admin-job -version: 0.7.0-1 #FIXME make this changable -description: Helm chart for creating cinder admin keystone endpoint -keywords: - - openstack - - cinder - - keystone-endpoint-admin -sources: - - http://github.com/openstack -engine: gotpl -#icon: A URL to an SVG or PNG image to be used as an icon (optional). make this point to the new project icons when ready diff --git a/helm/microservice/cinder-create-keystone-endpoint-admin-job/requirements.yaml b/helm/microservice/cinder-create-keystone-endpoint-admin-job/requirements.yaml deleted file mode 100644 index 038deff5a..000000000 --- a/helm/microservice/cinder-create-keystone-endpoint-admin-job/requirements.yaml +++ /dev/null @@ -1,4 +0,0 @@ -dependencies: - - name: kolla-common - repository: file://../../kolla-common - version: 0.7.0-1 diff --git a/helm/microservice/cinder-create-keystone-endpoint-admin-job/templates/cinder-create-keystone-endpoint-admin.yaml b/helm/microservice/cinder-create-keystone-endpoint-admin-job/templates/cinder-create-keystone-endpoint-admin.yaml deleted file mode 100644 index bc5cd47f8..000000000 --- a/helm/microservice/cinder-create-keystone-endpoint-admin-job/templates/cinder-create-keystone-endpoint-admin.yaml +++ /dev/null @@ -1,15 +0,0 @@ -{{- $searchPath := ":global.kolla.cinder.create_keystone_endpoint.admin.job:global.kolla.cinder.create_keystone_endpoint.all:global.kolla.cinder.api.all:global.kolla.cinder.all:global.kolla.all" }} -{{- $resourceName := "cinder-create-keystone-endpoint-admin" }} -{{- $netHostTrue := false }} -{{- $podTypeBootstrap := true }} -{{- $serviceName := "cinder" }} -{{- $serviceType := "volume" }} -{{- $interface := "admin" }} -{{- $service := include "kolla_val_get_str" (dict "key" "service" "searchPath" $searchPath "Values" .Values) }} -{{- $port := include "kolla_val_get_str" (dict "key" "port" "searchPath" $searchPath "Values" .Values) }} -{{- $cinderAdminEndpointBuilt := printf "http://%s:%s/v1/%%(tenant_id)s" $service $port }} -{{- $endpointURLFullUser := include "kolla_val_get_str" (dict "key" "endpoint" "searchPath" $searchPath "Values" .Values) }} -{{- $endpointURLFull := $endpointURLFullUser | default $cinderAdminEndpointBuilt }} -{{- with $env := dict "netHostTrue" $netHostTrue "podTypeBootstrap" $podTypeBootstrap "resourceName" $resourceName "serviceName" $serviceName "serviceType" $serviceType "interface" $interface "endpointURLFull" $endpointURLFull "Values" .Values "Release" .Release "searchPath" $searchPath }} -{{- include "common_create_keystone_endpoint" $env }} -{{- end }} diff --git a/helm/microservice/cinder-create-keystone-endpoint-adminv2-job/Chart.yaml b/helm/microservice/cinder-create-keystone-endpoint-adminv2-job/Chart.yaml deleted file mode 100644 index 5160374da..000000000 --- a/helm/microservice/cinder-create-keystone-endpoint-adminv2-job/Chart.yaml +++ /dev/null @@ -1,12 +0,0 @@ -name: cinder-create-keystone-endpoint-adminv2-job -version: 0.7.0-1 #FIXME make this changable -description: Helm chart for creating cinder admin keystone v2 endpoint -keywords: - - openstack - - cinder - - keystone-endpoint-admin - - v2 -sources: - - http://github.com/openstack -engine: gotpl -#icon: A URL to an SVG or PNG image to be used as an icon (optional). make this point to the new project icons when ready diff --git a/helm/microservice/cinder-create-keystone-endpoint-adminv2-job/requirements.yaml b/helm/microservice/cinder-create-keystone-endpoint-adminv2-job/requirements.yaml deleted file mode 100644 index 038deff5a..000000000 --- a/helm/microservice/cinder-create-keystone-endpoint-adminv2-job/requirements.yaml +++ /dev/null @@ -1,4 +0,0 @@ -dependencies: - - name: kolla-common - repository: file://../../kolla-common - version: 0.7.0-1 diff --git a/helm/microservice/cinder-create-keystone-endpoint-adminv2-job/templates/cinder-create-keystone-endpoint-adminv2.yaml b/helm/microservice/cinder-create-keystone-endpoint-adminv2-job/templates/cinder-create-keystone-endpoint-adminv2.yaml deleted file mode 100644 index 26ef9df7b..000000000 --- a/helm/microservice/cinder-create-keystone-endpoint-adminv2-job/templates/cinder-create-keystone-endpoint-adminv2.yaml +++ /dev/null @@ -1,17 +0,0 @@ -{{- $searchPath := ":global.kolla.cinder.create_keystone_endpoint.adminv2.job:global.kolla.cinder.create_keystone_endpoint.all:global.kolla.cinder.api.all:global.kolla.cinder.all:global.kolla.all" }} -{{- $resourceName := "cinder-create-keystone-endpoint-adminv2" }} -{{- $netHostTrue := false }} -{{- $podTypeBootstrap := true }} -{{- $serviceName := "cinder" }} -{{- $serviceType := "volume" }} -{{- $interface := "admin" }} -{{- $service := include "kolla_val_get_str" (dict "key" "service" "searchPath" $searchPath "Values" .Values) }} -{{- $port := include "kolla_val_get_str" (dict "key" "port" "searchPath" $searchPath "Values" .Values) }} -{{- $cinderAdminEndpointBuilt := printf "http://%s:%s/v2/%%(tenant_id)s" $service $port }} -{{- $endpointURLFullUser := include "kolla_val_get_str" (dict "key" "endpoint" "searchPath" $searchPath "Values" .Values) }} -{{- $endpointURLFull := $endpointURLFullUser | default $cinderAdminEndpointBuilt }} -{{- $postfix := "v2" }} -{{- with $env := dict "netHostTrue" $netHostTrue "podTypeBootstrap" $podTypeBootstrap "resourceName" $resourceName "serviceName" $serviceName "serviceType" $serviceType "interface" $interface "endpointURLFull" $endpointURLFull "postfix" $postfix "Release" .Release "Values" .Values "searchPath" $searchPath }} -{{- include "common_create_keystone_endpoint" $env }} -{{- end }} - diff --git a/helm/microservice/cinder-create-keystone-endpoint-adminv3-job/Chart.yaml b/helm/microservice/cinder-create-keystone-endpoint-adminv3-job/Chart.yaml deleted file mode 100644 index 888fe3ff1..000000000 --- a/helm/microservice/cinder-create-keystone-endpoint-adminv3-job/Chart.yaml +++ /dev/null @@ -1,12 +0,0 @@ -name: cinder-create-keystone-endpoint-adminv3-job -version: 0.7.0-1 #FIXME make this changable -description: Helm chart for creating cinder admin keystone v3 endpoint -keywords: - - openstack - - cinder - - keystone-endpoint-admin - - v3 -sources: - - http://github.com/openstack -engine: gotpl -#icon: A URL to an SVG or PNG image to be used as an icon (optional). make this point to the new project icons when ready diff --git a/helm/microservice/cinder-create-keystone-endpoint-adminv3-job/requirements.yaml b/helm/microservice/cinder-create-keystone-endpoint-adminv3-job/requirements.yaml deleted file mode 100644 index 038deff5a..000000000 --- a/helm/microservice/cinder-create-keystone-endpoint-adminv3-job/requirements.yaml +++ /dev/null @@ -1,4 +0,0 @@ -dependencies: - - name: kolla-common - repository: file://../../kolla-common - version: 0.7.0-1 diff --git a/helm/microservice/cinder-create-keystone-endpoint-adminv3-job/templates/cinder-create-keystone-endpoint-adminv3.yaml b/helm/microservice/cinder-create-keystone-endpoint-adminv3-job/templates/cinder-create-keystone-endpoint-adminv3.yaml deleted file mode 100644 index 4582d4e3a..000000000 --- a/helm/microservice/cinder-create-keystone-endpoint-adminv3-job/templates/cinder-create-keystone-endpoint-adminv3.yaml +++ /dev/null @@ -1,17 +0,0 @@ -{{- $searchPath := ":global.kolla.cinder.create_keystone_endpoint.adminv3.job:global.kolla.cinder.create_keystone_endpoint.all:global.kolla.cinder.api.all:global.kolla.cinder.all:global.kolla.all" }} -{{- $resourceName := "cinder-create-keystone-endpoint-adminv3" }} -{{- $netHostTrue := false }} -{{- $podTypeBootstrap := true }} -{{- $serviceName := "cinder" }} -{{- $serviceType := "volume" }} -{{- $interface := "admin" }} -{{- $service := include "kolla_val_get_str" (dict "key" "service" "searchPath" $searchPath "Values" .Values) }} -{{- $port := include "kolla_val_get_str" (dict "key" "port" "searchPath" $searchPath "Values" .Values) }} -{{- $cinderAdminEndpointBuilt := printf "http://%s:%s/v3/%%(tenant_id)s" $service $port }} -{{- $endpointURLFullUser := include "kolla_val_get_str" (dict "key" "endpoint" "searchPath" $searchPath "Values" .Values) }} -{{- $endpointURLFull := $endpointURLFullUser | default $cinderAdminEndpointBuilt }} -{{- $postfix := "v3" }} -{{- with $env := dict "netHostTrue" $netHostTrue "podTypeBootstrap" $podTypeBootstrap "resourceName" $resourceName "serviceName" $serviceName "serviceType" $serviceType "interface" $interface "endpointURLFull" $endpointURLFull "postfix" $postfix "Release" .Release "Values" .Values "searchPath" $searchPath }} -{{- include "common_create_keystone_endpoint" $env }} -{{- end }} - diff --git a/helm/microservice/cinder-create-keystone-endpoint-internal-job/Chart.yaml b/helm/microservice/cinder-create-keystone-endpoint-internal-job/Chart.yaml deleted file mode 100644 index c4d085606..000000000 --- a/helm/microservice/cinder-create-keystone-endpoint-internal-job/Chart.yaml +++ /dev/null @@ -1,11 +0,0 @@ -name: cinder-create-keystone-endpoint-internal-job -version: 0.7.0-1 #FIXME make this changable -description: Helm chart for the cinder internal keystone endpoint -keywords: - - openstack - - cinder - - keystone-endpoint-internal -sources: - - http://github.com/openstack -engine: gotpl -#icon: A URL to an SVG or PNG image to be used as an icon (optional). make this point to the new project icons when ready diff --git a/helm/microservice/cinder-create-keystone-endpoint-internal-job/requirements.yaml b/helm/microservice/cinder-create-keystone-endpoint-internal-job/requirements.yaml deleted file mode 100644 index 038deff5a..000000000 --- a/helm/microservice/cinder-create-keystone-endpoint-internal-job/requirements.yaml +++ /dev/null @@ -1,4 +0,0 @@ -dependencies: - - name: kolla-common - repository: file://../../kolla-common - version: 0.7.0-1 diff --git a/helm/microservice/cinder-create-keystone-endpoint-internal-job/templates/cinder-create-keystone-endpoint-internal.yaml b/helm/microservice/cinder-create-keystone-endpoint-internal-job/templates/cinder-create-keystone-endpoint-internal.yaml deleted file mode 100644 index 06d1be9b7..000000000 --- a/helm/microservice/cinder-create-keystone-endpoint-internal-job/templates/cinder-create-keystone-endpoint-internal.yaml +++ /dev/null @@ -1,15 +0,0 @@ -{{- $searchPath := ":global.kolla.cinder.create_keystone_endpoint.internal.job:global.kolla.cinder.create_keystone_endpoint.all:global.kolla.cinder.api.all:global.kolla.cinder.all:global.kolla.all" }} -{{- $resourceName := "cinder-create-keystone-endpoint-internal" }} -{{- $netHostTrue := false }} -{{- $podTypeBootstrap := true }} -{{- $serviceName := "cinder" }} -{{- $serviceType := "volume" }} -{{- $interface := "internal" }} -{{- $service := include "kolla_val_get_str" (dict "key" "service" "searchPath" $searchPath "Values" .Values) }} -{{- $port := include "kolla_val_get_str" (dict "key" "port" "searchPath" $searchPath "Values" .Values) }} -{{- $cinderInternalEndpointBuilt := printf "http://%s:%s/v1/%%(tenant_id)s" $service $port }} -{{- $endpointURLFullUser := include "kolla_val_get_str" (dict "key" "endpoint" "searchPath" $searchPath "Values" .Values) }} -{{- $endpointURLFull := $endpointURLFullUser | default $cinderInternalEndpointBuilt }} -{{- with $env := dict "netHostTrue" $netHostTrue "podTypeBootstrap" $podTypeBootstrap "resourceName" $resourceName "serviceName" $serviceName "serviceType" $serviceType "interface" $interface "endpointURLFull" $endpointURLFull "Values" .Values "Release" .Release "searchPath" $searchPath }} -{{- include "common_create_keystone_endpoint" $env }} -{{- end }} diff --git a/helm/microservice/cinder-create-keystone-endpoint-internalv2-job/Chart.yaml b/helm/microservice/cinder-create-keystone-endpoint-internalv2-job/Chart.yaml deleted file mode 100644 index 6d1d9178b..000000000 --- a/helm/microservice/cinder-create-keystone-endpoint-internalv2-job/Chart.yaml +++ /dev/null @@ -1,12 +0,0 @@ -name: cinder-create-keystone-endpoint-internalv2-job -version: 0.7.0-1 #FIXME make this changable -description: Helm chart for creating cinder internal keystone v2 endpoint -keywords: - - openstack - - cinder - - keystone-endpoint-internal - - v2 -sources: - - http://github.com/openstack -engine: gotpl -#icon: A URL to an SVG or PNG image to be used as an icon (optional). make this point to the new project icons when ready diff --git a/helm/microservice/cinder-create-keystone-endpoint-internalv2-job/requirements.yaml b/helm/microservice/cinder-create-keystone-endpoint-internalv2-job/requirements.yaml deleted file mode 100644 index 038deff5a..000000000 --- a/helm/microservice/cinder-create-keystone-endpoint-internalv2-job/requirements.yaml +++ /dev/null @@ -1,4 +0,0 @@ -dependencies: - - name: kolla-common - repository: file://../../kolla-common - version: 0.7.0-1 diff --git a/helm/microservice/cinder-create-keystone-endpoint-internalv2-job/templates/cinder-create-keystone-endpoint-internalv2.yaml b/helm/microservice/cinder-create-keystone-endpoint-internalv2-job/templates/cinder-create-keystone-endpoint-internalv2.yaml deleted file mode 100644 index ff955cbd0..000000000 --- a/helm/microservice/cinder-create-keystone-endpoint-internalv2-job/templates/cinder-create-keystone-endpoint-internalv2.yaml +++ /dev/null @@ -1,16 +0,0 @@ -{{- $searchPath := ":global.kolla.cinder.create_keystone_endpoint.internalv2.job:global.kolla.cinder.create_keystone_endpoint.all:global.kolla.cinder.api.all:global.kolla.cinder.all:global.kolla.all" }} -{{- $resourceName := "cinder-create-keystone-endpoint-internalv2" }} -{{- $netHostTrue := false }} -{{- $podTypeBootstrap := true }} -{{- $serviceName := "cinder" }} -{{- $serviceType := "volume" }} -{{- $interface := "internal" }} -{{- $service := include "kolla_val_get_str" (dict "key" "service" "searchPath" $searchPath "Values" .Values) }} -{{- $port := include "kolla_val_get_str" (dict "key" "port" "searchPath" $searchPath "Values" .Values) }} -{{- $cinderInternalEndpointBuilt := printf "http://%s:%s/v2/%%(tenant_id)s" $service $port }} -{{- $endpointURLFullUser := include "kolla_val_get_str" (dict "key" "endpoint" "searchPath" $searchPath "Values" .Values) }} -{{- $endpointURLFull := $endpointURLFullUser | default $cinderInternalEndpointBuilt }} -{{- $postfix := "v2" }} -{{- with $env := dict "netHostTrue" $netHostTrue "podTypeBootstrap" $podTypeBootstrap "resourceName" $resourceName "serviceName" $serviceName "serviceType" $serviceType "interface" $interface "endpointURLFull" $endpointURLFull "postfix" $postfix "Release" .Release "Values" .Values "searchPath" $searchPath }} -{{- include "common_create_keystone_endpoint" $env }} -{{- end }} diff --git a/helm/microservice/cinder-create-keystone-endpoint-internalv3-job/Chart.yaml b/helm/microservice/cinder-create-keystone-endpoint-internalv3-job/Chart.yaml deleted file mode 100644 index a1dd3d89c..000000000 --- a/helm/microservice/cinder-create-keystone-endpoint-internalv3-job/Chart.yaml +++ /dev/null @@ -1,12 +0,0 @@ -name: cinder-create-keystone-endpoint-internalv3-job -version: 0.7.0-1 #FIXME make this changable -description: Helm chart for creating cinder internal keystone v3 endpoint -keywords: - - openstack - - cinder - - keystone-endpoint-internal - - v3 -sources: - - http://github.com/openstack -engine: gotpl -#icon: A URL to an SVG or PNG image to be used as an icon (optional). make this point to the new project icons when ready diff --git a/helm/microservice/cinder-create-keystone-endpoint-internalv3-job/requirements.yaml b/helm/microservice/cinder-create-keystone-endpoint-internalv3-job/requirements.yaml deleted file mode 100644 index 038deff5a..000000000 --- a/helm/microservice/cinder-create-keystone-endpoint-internalv3-job/requirements.yaml +++ /dev/null @@ -1,4 +0,0 @@ -dependencies: - - name: kolla-common - repository: file://../../kolla-common - version: 0.7.0-1 diff --git a/helm/microservice/cinder-create-keystone-endpoint-internalv3-job/templates/cinder-create-keystone-endpoint-internalv3.yaml b/helm/microservice/cinder-create-keystone-endpoint-internalv3-job/templates/cinder-create-keystone-endpoint-internalv3.yaml deleted file mode 100644 index b07599c02..000000000 --- a/helm/microservice/cinder-create-keystone-endpoint-internalv3-job/templates/cinder-create-keystone-endpoint-internalv3.yaml +++ /dev/null @@ -1,16 +0,0 @@ -{{- $searchPath := ":global.kolla.cinder.create_keystone_endpoint.internalv3.job:global.kolla.cinder.create_keystone_endpoint.all:global.kolla.cinder.api.all:global.kolla.cinder.all:global.kolla.all" }} -{{- $resourceName := "cinder-create-keystone-endpoint-internalv3" }} -{{- $netHostTrue := false }} -{{- $podTypeBootstrap := true }} -{{- $serviceName := "cinder" }} -{{- $serviceType := "volume" }} -{{- $interface := "internal" }} -{{- $service := include "kolla_val_get_str" (dict "key" "service" "searchPath" $searchPath "Values" .Values) }} -{{- $port := include "kolla_val_get_str" (dict "key" "port" "searchPath" $searchPath "Values" .Values) }} -{{- $cinderInternalEndpointBuilt := printf "http://%s:%s/v3/%%(tenant_id)s" $service $port }} -{{- $endpointURLFullUser := include "kolla_val_get_str" (dict "key" "endpoint" "searchPath" $searchPath "Values" .Values) }} -{{- $endpointURLFull := $endpointURLFullUser | default $cinderInternalEndpointBuilt }} -{{- $postfix := "v3" }} -{{- with $env := dict "netHostTrue" $netHostTrue "podTypeBootstrap" $podTypeBootstrap "resourceName" $resourceName "serviceName" $serviceName "serviceType" $serviceType "interface" $interface "endpointURLFull" $endpointURLFull "postfix" $postfix "Release" .Release "Values" .Values "searchPath" $searchPath }} -{{- include "common_create_keystone_endpoint" $env }} -{{- end }} diff --git a/helm/microservice/cinder-create-keystone-endpoint-public-job/Chart.yaml b/helm/microservice/cinder-create-keystone-endpoint-public-job/Chart.yaml deleted file mode 100644 index bda5e3505..000000000 --- a/helm/microservice/cinder-create-keystone-endpoint-public-job/Chart.yaml +++ /dev/null @@ -1,11 +0,0 @@ -name: cinder-create-keystone-endpoint-public-job -version: 0.7.0-1 #FIXME make this changable -description: Helm chart for creating cinder public keystone endpoint -keywords: - - openstack - - cinder - - keystone-endpoint-public -sources: - - http://github.com/openstack -engine: gotpl -#icon: A URL to an SVG or PNG image to be used as an icon (optional). make this point to the new project icons when ready diff --git a/helm/microservice/cinder-create-keystone-endpoint-public-job/requirements.yaml b/helm/microservice/cinder-create-keystone-endpoint-public-job/requirements.yaml deleted file mode 100644 index 038deff5a..000000000 --- a/helm/microservice/cinder-create-keystone-endpoint-public-job/requirements.yaml +++ /dev/null @@ -1,4 +0,0 @@ -dependencies: - - name: kolla-common - repository: file://../../kolla-common - version: 0.7.0-1 diff --git a/helm/microservice/cinder-create-keystone-endpoint-public-job/templates/cinder-create-keystone-endpoint-public.yaml b/helm/microservice/cinder-create-keystone-endpoint-public-job/templates/cinder-create-keystone-endpoint-public.yaml deleted file mode 100644 index a40902642..000000000 --- a/helm/microservice/cinder-create-keystone-endpoint-public-job/templates/cinder-create-keystone-endpoint-public.yaml +++ /dev/null @@ -1,15 +0,0 @@ -{{- $searchPath := ":global.kolla.cinder.create_keystone_endpoint.public.job:global.kolla.cinder.create_keystone_endpoint.all:global.kolla.cinder.api.all:global.kolla.cinder.all:global.kolla.all" }} -{{- $resourceName := "cinder-create-keystone-endpoint-public" }} -{{- $netHostTrue := false }} -{{- $podTypeBootstrap := true }} -{{- $serviceName := "cinder" }} -{{- $serviceType := "volume" }} -{{- $interface := "public" }} -{{- $port := include "kolla_val_get_str" (dict "key" "port" "searchPath" $searchPath "Values" .Values) }} -{{- $externalVip := include "kolla_val_get_str" (dict "key" "external_vip" "searchPath" $searchPath "Values" .Values) }} -{{- $cinderPublicEndpointBuilt := printf "http://%s:%s/v1/%%(tenant_id)s" $externalVip $port }} -{{- $endpointURLFullUser := include "kolla_val_get_str" (dict "key" "endpoint" "searchPath" $searchPath "Values" .Values) }} -{{- $endpointURLFull := $endpointURLFullUser | default $cinderPublicEndpointBuilt }} -{{- with $env := dict "netHostTrue" $netHostTrue "podTypeBootstrap" $podTypeBootstrap "resourceName" $resourceName "serviceName" $serviceName "serviceType" $serviceType "interface" $interface "endpointURLFull" $endpointURLFull "Values" .Values "Release" .Release "searchPath" $searchPath }} -{{- include "common_create_keystone_endpoint" $env }} -{{- end }} diff --git a/helm/microservice/cinder-create-keystone-endpoint-publicv2-job/Chart.yaml b/helm/microservice/cinder-create-keystone-endpoint-publicv2-job/Chart.yaml deleted file mode 100644 index afa7b5e65..000000000 --- a/helm/microservice/cinder-create-keystone-endpoint-publicv2-job/Chart.yaml +++ /dev/null @@ -1,12 +0,0 @@ -name: cinder-create-keystone-endpoint-publicv2-job -version: 0.7.0-1 #FIXME make this changable -description: Helm chart for creating cinder public keystone v2 endpoint -keywords: - - openstack - - cinder - - keystone-endpoint-public - - v2 -sources: - - http://github.com/openstack -engine: gotpl -#icon: A URL to an SVG or PNG image to be used as an icon (optional). make this point to the new project icons when ready diff --git a/helm/microservice/cinder-create-keystone-endpoint-publicv2-job/requirements.yaml b/helm/microservice/cinder-create-keystone-endpoint-publicv2-job/requirements.yaml deleted file mode 100644 index 038deff5a..000000000 --- a/helm/microservice/cinder-create-keystone-endpoint-publicv2-job/requirements.yaml +++ /dev/null @@ -1,4 +0,0 @@ -dependencies: - - name: kolla-common - repository: file://../../kolla-common - version: 0.7.0-1 diff --git a/helm/microservice/cinder-create-keystone-endpoint-publicv2-job/templates/cinder-create-keystone-endpoint-publicv2.yaml b/helm/microservice/cinder-create-keystone-endpoint-publicv2-job/templates/cinder-create-keystone-endpoint-publicv2.yaml deleted file mode 100644 index 2e6bd5a0a..000000000 --- a/helm/microservice/cinder-create-keystone-endpoint-publicv2-job/templates/cinder-create-keystone-endpoint-publicv2.yaml +++ /dev/null @@ -1,16 +0,0 @@ -{{- $searchPath := ":global.kolla.cinder.create_keystone_endpoint.publicv2.job:global.kolla.cinder.create_keystone_endpoint.all:global.kolla.cinder.api.all:global.kolla.cinder.all:global.kolla.all" }} -{{- $resourceName := "cinder-create-keystone-endpoint-publicv2" }} -{{- $netHostTrue := false }} -{{- $podTypeBootstrap := true }} -{{- $serviceName := "cinder" }} -{{- $serviceType := "volume" }} -{{- $interface := "public" }} -{{- $port := include "kolla_val_get_str" (dict "key" "port" "searchPath" $searchPath "Values" .Values ) }} -{{- $externalVip := include "kolla_val_get_str" (dict "key" "external_vip" "searchPath" $searchPath "Values" .Values ) }} -{{- $cinderPublicEndpointBuilt := printf "http://%s:%s/v2/%%(tenant_id)s" $externalVip $port }} -{{- $endpointURLFullUser := include "kolla_val_get_str" (dict "key" "endpoint" "searchPath" $searchPath "Values" .Values ) }} -{{- $endpointURLFull := $endpointURLFullUser | default $cinderPublicEndpointBuilt }} -{{- $postfix := "v2" }} -{{- with $env := dict "netHostTrue" $netHostTrue "podTypeBootstrap" $podTypeBootstrap "resourceName" $resourceName "serviceName" $serviceName "serviceType" $serviceType "interface" $interface "endpointURLFull" $endpointURLFull "postfix" $postfix "Release" .Release "Values" .Values "searchPath" $searchPath }} -{{- include "common_create_keystone_endpoint" $env }} -{{- end }} diff --git a/helm/microservice/cinder-create-keystone-endpoint-publicv3-job/Chart.yaml b/helm/microservice/cinder-create-keystone-endpoint-publicv3-job/Chart.yaml deleted file mode 100644 index a28d8b660..000000000 --- a/helm/microservice/cinder-create-keystone-endpoint-publicv3-job/Chart.yaml +++ /dev/null @@ -1,12 +0,0 @@ -name: cinder-create-keystone-endpoint-publicv3-job -version: 0.7.0-1 #FIXME make this changable -description: Helm chart for creating cinder public keystone v3 endpoint -keywords: - - openstack - - cinder - - keystone-endpoint-public - - v3 -sources: - - http://github.com/openstack -engine: gotpl -#icon: A URL to an SVG or PNG image to be used as an icon (optional). make this point to the new project icons when ready diff --git a/helm/microservice/cinder-create-keystone-endpoint-publicv3-job/requirements.yaml b/helm/microservice/cinder-create-keystone-endpoint-publicv3-job/requirements.yaml deleted file mode 100644 index 038deff5a..000000000 --- a/helm/microservice/cinder-create-keystone-endpoint-publicv3-job/requirements.yaml +++ /dev/null @@ -1,4 +0,0 @@ -dependencies: - - name: kolla-common - repository: file://../../kolla-common - version: 0.7.0-1 diff --git a/helm/microservice/cinder-create-keystone-endpoint-publicv3-job/templates/cinder-create-keystone-endpoint-publicv3.yaml b/helm/microservice/cinder-create-keystone-endpoint-publicv3-job/templates/cinder-create-keystone-endpoint-publicv3.yaml deleted file mode 100644 index 36c9db115..000000000 --- a/helm/microservice/cinder-create-keystone-endpoint-publicv3-job/templates/cinder-create-keystone-endpoint-publicv3.yaml +++ /dev/null @@ -1,16 +0,0 @@ -{{- $searchPath := ":global.kolla.cinder.create_keystone_endpoint.publicv3.job:global.kolla.cinder.create_keystone_endpoint.all:global.kolla.cinder.api.all:global.kolla.cinder.all:global.kolla.all" }} -{{- $resourceName := "cinder-create-keystone-endpoint-publicv3" }} -{{- $netHostTrue := false }} -{{- $podTypeBootstrap := true }} -{{- $serviceName := "cinder" }} -{{- $serviceType := "volume" }} -{{- $interface := "public" }} -{{- $port := include "kolla_val_get_str" (dict "key" "port" "searchPath" $searchPath "Values" .Values ) }} -{{- $externalVip := include "kolla_val_get_str" (dict "key" "external_vip" "searchPath" $searchPath "Values" .Values ) }} -{{- $cinderPublicEndpointBuilt := printf "http://%s:%s/v3/%%(tenant_id)s" $externalVip $port }} -{{- $endpointURLFullUser := include "kolla_val_get_str" (dict "key" "endpoint" "searchPath" $searchPath "Values" .Values ) }} -{{- $endpointURLFull := $endpointURLFullUser | default $cinderPublicEndpointBuilt }} -{{- $postfix := "v3" }} -{{- with $env := dict "netHostTrue" $netHostTrue "podTypeBootstrap" $podTypeBootstrap "resourceName" $resourceName "serviceName" $serviceName "serviceType" $serviceType "interface" $interface "endpointURLFull" $endpointURLFull "postfix" $postfix "Release" .Release "Values" .Values "searchPath" $searchPath }} -{{- include "common_create_keystone_endpoint" $env }} -{{- end }} diff --git a/helm/microservice/cinder-create-keystone-service-job/Chart.yaml b/helm/microservice/cinder-create-keystone-service-job/Chart.yaml deleted file mode 100644 index ed8338680..000000000 --- a/helm/microservice/cinder-create-keystone-service-job/Chart.yaml +++ /dev/null @@ -1,11 +0,0 @@ -name: cinder-create-keystone-service-job -version: 0.7.0-1 #FIXME make this changable -description: FIXME -keywords: - - openstack - - cinder - - keystone-service -sources: - - http://github.com/openstack -engine: gotpl -#icon: A URL to an SVG or PNG image to be used as an icon (optional). make this point to the new project icons when ready diff --git a/helm/microservice/cinder-create-keystone-service-job/requirements.yaml b/helm/microservice/cinder-create-keystone-service-job/requirements.yaml deleted file mode 100644 index 038deff5a..000000000 --- a/helm/microservice/cinder-create-keystone-service-job/requirements.yaml +++ /dev/null @@ -1,4 +0,0 @@ -dependencies: - - name: kolla-common - repository: file://../../kolla-common - version: 0.7.0-1 diff --git a/helm/microservice/cinder-create-keystone-service-job/templates/cinder-create-keystone-service.yaml b/helm/microservice/cinder-create-keystone-service-job/templates/cinder-create-keystone-service.yaml deleted file mode 100644 index 6a762e899..000000000 --- a/helm/microservice/cinder-create-keystone-service-job/templates/cinder-create-keystone-service.yaml +++ /dev/null @@ -1,10 +0,0 @@ -{{- $searchPath := ":global.kolla.cinder.create_keystone_service.job:global.kolla.cinder.create_keystone_service.all:global.kolla.cinder.all:global.kolla.all" }} -{{- $resourceName := "cinder-create-keystone-service" }} -{{- $netHostTrue := false }} -{{- $podTypeBootstrap := true }} -{{- $serviceName := "cinder" }} -{{- $serviceType := "volume" }} -{{- $description := "Openstack Block Storage" }} -{{- with $env := dict "netHostTrue" $netHostTrue "podTypeBootstrap" $podTypeBootstrap "resourceName" $resourceName "serviceName" $serviceName "serviceType" $serviceType "description" $description "Values" .Values "Release" .Release "searchPath" $searchPath }} -{{- include "common_create_keystone_service" $env }} -{{- end }} diff --git a/helm/microservice/cinder-create-keystone-servicev2-job/Chart.yaml b/helm/microservice/cinder-create-keystone-servicev2-job/Chart.yaml deleted file mode 100644 index 87d30037f..000000000 --- a/helm/microservice/cinder-create-keystone-servicev2-job/Chart.yaml +++ /dev/null @@ -1,12 +0,0 @@ -name: cinder-create-keystone-servicev2-job -version: 0.7.0-1 #FIXME make this changable -description: FIXME -keywords: - - openstack - - cinder - - keystone-service - - v2 -sources: - - http://github.com/openstack -engine: gotpl -#icon: A URL to an SVG or PNG image to be used as an icon (optional). make this point to the new project icons when ready diff --git a/helm/microservice/cinder-create-keystone-servicev2-job/requirements.yaml b/helm/microservice/cinder-create-keystone-servicev2-job/requirements.yaml deleted file mode 100644 index 038deff5a..000000000 --- a/helm/microservice/cinder-create-keystone-servicev2-job/requirements.yaml +++ /dev/null @@ -1,4 +0,0 @@ -dependencies: - - name: kolla-common - repository: file://../../kolla-common - version: 0.7.0-1 diff --git a/helm/microservice/cinder-create-keystone-servicev2-job/templates/cinder-create-keystone-servicev2.yaml b/helm/microservice/cinder-create-keystone-servicev2-job/templates/cinder-create-keystone-servicev2.yaml deleted file mode 100644 index 77fee8207..000000000 --- a/helm/microservice/cinder-create-keystone-servicev2-job/templates/cinder-create-keystone-servicev2.yaml +++ /dev/null @@ -1,12 +0,0 @@ -{{- $searchPath := ":global.kolla.cinder.create_keystone_servicev2.job:global.kolla.cinder.create_keystone_servicev2.all:global.kolla.cinder.all:global.kolla.all" }} -{{- $resourceName := "cinder-create-keystone-servicev2" }} -{{- $netHostTrue := false }} -{{- $podTypeBootstrap := true }} -{{- $serviceName := "cinder" }} -{{- $serviceType := "volume" }} -{{- $description := "Openstack Block Storage" }} -{{- $postfix := "v2" }} -{{- with $env := dict "netHostTrue" $netHostTrue "podTypeBootstrap" $podTypeBootstrap "resourceName" $resourceName "serviceName" $serviceName "serviceType" $serviceType "description" $description "postfix" $postfix "Values" .Values "Release" .Release "searchPath" $searchPath }} -{{- include "common_create_keystone_service" $env }} -{{- end }} - diff --git a/helm/microservice/cinder-create-keystone-servicev3-job/Chart.yaml b/helm/microservice/cinder-create-keystone-servicev3-job/Chart.yaml deleted file mode 100644 index c15169aee..000000000 --- a/helm/microservice/cinder-create-keystone-servicev3-job/Chart.yaml +++ /dev/null @@ -1,12 +0,0 @@ -name: cinder-create-keystone-servicev3-job -version: 0.7.0-1 #FIXME make this changable -description: FIXME -keywords: - - openstack - - cinder - - keystone-service - - v3 -sources: - - http://github.com/openstack -engine: gotpl -#icon: A URL to an SVG or PNG image to be used as an icon (optional). make this point to the new project icons when ready diff --git a/helm/microservice/cinder-create-keystone-servicev3-job/requirements.yaml b/helm/microservice/cinder-create-keystone-servicev3-job/requirements.yaml deleted file mode 100644 index 038deff5a..000000000 --- a/helm/microservice/cinder-create-keystone-servicev3-job/requirements.yaml +++ /dev/null @@ -1,4 +0,0 @@ -dependencies: - - name: kolla-common - repository: file://../../kolla-common - version: 0.7.0-1 diff --git a/helm/microservice/cinder-create-keystone-servicev3-job/templates/cinder-create-keystone-servicev3.yaml b/helm/microservice/cinder-create-keystone-servicev3-job/templates/cinder-create-keystone-servicev3.yaml deleted file mode 100644 index 794250d73..000000000 --- a/helm/microservice/cinder-create-keystone-servicev3-job/templates/cinder-create-keystone-servicev3.yaml +++ /dev/null @@ -1,12 +0,0 @@ -{{- $searchPath := ":global.kolla.cinder.create_keystone_servicev3.job:global.kolla.cinder.create_keystone_servicev3.all:global.kolla.cinder.all:global.kolla.all" }} -{{- $resourceName := "cinder-create-keystone-servicev3" }} -{{- $netHostTrue := false }} -{{- $podTypeBootstrap := true }} -{{- $serviceName := "cinder" }} -{{- $serviceType := "volume" }} -{{- $description := "Openstack Block Storage" }} -{{- $postfix := "v3" }} -{{- with $env := dict "netHostTrue" $netHostTrue "podTypeBootstrap" $podTypeBootstrap "resourceName" $resourceName "serviceName" $serviceName "serviceType" $serviceType "description" $description "postfix" $postfix "Values" .Values "Release" .Release "searchPath" $searchPath }} -{{- include "common_create_keystone_service" $env }} -{{- end }} - diff --git a/helm/microservice/cinder-create-keystone-user-job/Chart.yaml b/helm/microservice/cinder-create-keystone-user-job/Chart.yaml deleted file mode 100644 index 6c1dec60f..000000000 --- a/helm/microservice/cinder-create-keystone-user-job/Chart.yaml +++ /dev/null @@ -1,12 +0,0 @@ -name: cinder-create-keystone-user-job -version: 0.7.0-1 #FIXME make this changable -description: FIXME -keywords: - - openstack - - cinder - - user - - create -sources: - - http://github.com/openstack -engine: gotpl -#icon: A URL to an SVG or PNG image to be used as an icon (optional). make this point to the new project icons when ready diff --git a/helm/microservice/cinder-create-keystone-user-job/requirements.yaml b/helm/microservice/cinder-create-keystone-user-job/requirements.yaml deleted file mode 100644 index 038deff5a..000000000 --- a/helm/microservice/cinder-create-keystone-user-job/requirements.yaml +++ /dev/null @@ -1,4 +0,0 @@ -dependencies: - - name: kolla-common - repository: file://../../kolla-common - version: 0.7.0-1 diff --git a/helm/microservice/cinder-create-keystone-user-job/templates/cinder-create-keystone-user.yaml b/helm/microservice/cinder-create-keystone-user-job/templates/cinder-create-keystone-user.yaml deleted file mode 100644 index 4b24f92ba..000000000 --- a/helm/microservice/cinder-create-keystone-user-job/templates/cinder-create-keystone-user.yaml +++ /dev/null @@ -1,20 +0,0 @@ -{{- $searchPath := ":global.kolla.cinder.create_keystone_user.job:global.kolla.cinder.create_keystone_user.all:global.kolla.cinder.all:global.kolla.all" }} -{{- $resourceName := "cinder-create-keystone-user" }} -{{- $serviceName := "cinder" }} -{{- $serviceType := "create-keystone-user" }} -{{- $elementName := .Values.element_name | default $serviceName }} -{{- $elementType := .Values.element_type | $serviceType }} - -{{- $userName := include "kolla_val_get_str" (dict "key" "keystone_user_name" "searchPath" $searchPath "Values" .Values ) }} -{{- $userDomain := include "kolla_val_get_str" (dict "key" "keystone_user_domain" "searchPath" $searchPath "Values" .Values ) }} -{{- $userProject := include "kolla_val_get_str" (dict "key" "keystone_user_project" "searchPath" $searchPath "Values" .Values ) }} -{{- $userProjectDomain := include "kolla_val_get_str" (dict "key" "keystone_user_project_domain" "searchPath" $searchPath "Values" .Values ) }} -{{- $userRole := include "kolla_val_get_str" (dict "key" "keystone_user_role" "searchPath" $searchPath "Values" .Values ) }} - -{{- $netHostTrue := false }} -{{- $podTypeBootstrap := true }} - -{{- with $env := dict "resourceName" $resourceName "serviceName" $serviceName "serviceType" $serviceType "elementName" $elementName "elementType" $elementType "podTypeBootstrap" $podTypeBootstrap "userName" $userName "userDomain" $userDomain "userProject" $userProject "userProjectDomain" $userProjectDomain "userRole" $userRole "Values" .Values "Release" .Release "searchPath" $searchPath }} - -{{- include "common_create_keystone_user" $env }} -{{- end }} diff --git a/helm/microservice/cinder-delete-db-job/Chart.yaml b/helm/microservice/cinder-delete-db-job/Chart.yaml deleted file mode 100644 index cf9b79cc1..000000000 --- a/helm/microservice/cinder-delete-db-job/Chart.yaml +++ /dev/null @@ -1,12 +0,0 @@ -name: cinder-delete-db-job -version: 0.7.0-1 -description: Helm chart to delete the cinder database -keywords: - - openstack - - cinder - - delete - - database -sources: - - http://github.com/openstack -engine: gotpl -#icon: A URL to an SVG or PNG image to be used as an icon (optional). make this point to the new project icons when ready diff --git a/helm/microservice/cinder-delete-db-job/requirements.yaml b/helm/microservice/cinder-delete-db-job/requirements.yaml deleted file mode 100644 index 038deff5a..000000000 --- a/helm/microservice/cinder-delete-db-job/requirements.yaml +++ /dev/null @@ -1,4 +0,0 @@ -dependencies: - - name: kolla-common - repository: file://../../kolla-common - version: 0.7.0-1 diff --git a/helm/microservice/cinder-delete-db-job/templates/cinder-delete-db.yaml b/helm/microservice/cinder-delete-db-job/templates/cinder-delete-db.yaml deleted file mode 100644 index 97176c68a..000000000 --- a/helm/microservice/cinder-delete-db-job/templates/cinder-delete-db.yaml +++ /dev/null @@ -1,7 +0,0 @@ -{{- $searchPath := ":global.kolla.cinder.delete_db.job:global.kolla.cinder.delete_db.all:global.kolla.cinder.api.all:global.kolla.cinder.all:global.kolla.all" }} -{{- $resourceName := "cinder" }} -{{- $serviceName := "cinder" }} -{{- $podTypeBootstrap := true }} -{{- with $env := dict "resourceName" $resourceName "serviceName" $serviceName "podTypeBootstrap" $podTypeBootstrap "Values" .Values "Release" .Release "searchPath" $searchPath }} -{{- include "common_delete_db_job" $env }} -{{- end }} diff --git a/helm/microservice/cinder-delete-keystone-service-job/Chart.yaml b/helm/microservice/cinder-delete-keystone-service-job/Chart.yaml deleted file mode 100644 index ee41d70cb..000000000 --- a/helm/microservice/cinder-delete-keystone-service-job/Chart.yaml +++ /dev/null @@ -1,12 +0,0 @@ -name: cinder-delete-keystone-service-job -version: 0.7.0-1 -description: Helm chart to delete the cinder keystone service -keywords: - - openstack - - cinder - - delete - - service -sources: - - http://github.com/openstack -engine: gotpl -#icon: A URL to an SVG or PNG image to be used as an icon (optional). make this point to the new project icons when ready diff --git a/helm/microservice/cinder-delete-keystone-service-job/requirements.yaml b/helm/microservice/cinder-delete-keystone-service-job/requirements.yaml deleted file mode 100644 index 038deff5a..000000000 --- a/helm/microservice/cinder-delete-keystone-service-job/requirements.yaml +++ /dev/null @@ -1,4 +0,0 @@ -dependencies: - - name: kolla-common - repository: file://../../kolla-common - version: 0.7.0-1 diff --git a/helm/microservice/cinder-delete-keystone-service-job/templates/cinder-delete-keystone-service.yaml b/helm/microservice/cinder-delete-keystone-service-job/templates/cinder-delete-keystone-service.yaml deleted file mode 100644 index bbed6ddcd..000000000 --- a/helm/microservice/cinder-delete-keystone-service-job/templates/cinder-delete-keystone-service.yaml +++ /dev/null @@ -1,10 +0,0 @@ -{{- $searchPath := ":global.kolla.cinder.delete_keystone_service.job:global.kolla.cinder.delete_keystone_service.all:global.kolla.cinder.all:global.kolla.all" }} -{{- $resourceName := "cinder-delete-keystone-service" }} -{{- $netHostTrue := false }} -{{- $podTypeBootstrap := true }} -{{- $serviceName := "cinder" }} -{{- $serviceType := "volume" }} -{{- $description := "Openstack Block Storage" }} -{{- with $env := dict "netHostTrue" $netHostTrue "podTypeBootstrap" $podTypeBootstrap "resourceName" $resourceName "serviceName" $serviceName "serviceType" $serviceType "description" $description "Values" .Values "Release" .Release "searchPath" $searchPath }} -{{- include "common_delete_keystone_service" $env }} -{{- end }} diff --git a/helm/microservice/cinder-delete-keystone-servicev2-job/Chart.yaml b/helm/microservice/cinder-delete-keystone-servicev2-job/Chart.yaml deleted file mode 100644 index 5f382ed81..000000000 --- a/helm/microservice/cinder-delete-keystone-servicev2-job/Chart.yaml +++ /dev/null @@ -1,12 +0,0 @@ -name: cinder-delete-keystone-servicev2-job -version: 0.7.0-1 -description: Helm chart to delete the cinder keystone servicev2 -keywords: - - openstack - - cinder - - delete - - servicev2 -sources: - - http://github.com/openstack -engine: gotpl -#icon: A URL to an SVG or PNG image to be used as an icon (optional). make this point to the new project icons when ready diff --git a/helm/microservice/cinder-delete-keystone-servicev2-job/requirements.yaml b/helm/microservice/cinder-delete-keystone-servicev2-job/requirements.yaml deleted file mode 100644 index 038deff5a..000000000 --- a/helm/microservice/cinder-delete-keystone-servicev2-job/requirements.yaml +++ /dev/null @@ -1,4 +0,0 @@ -dependencies: - - name: kolla-common - repository: file://../../kolla-common - version: 0.7.0-1 diff --git a/helm/microservice/cinder-delete-keystone-servicev2-job/templates/cinder-delete-keystone-servicev2.yaml b/helm/microservice/cinder-delete-keystone-servicev2-job/templates/cinder-delete-keystone-servicev2.yaml deleted file mode 100644 index 9b1000b42..000000000 --- a/helm/microservice/cinder-delete-keystone-servicev2-job/templates/cinder-delete-keystone-servicev2.yaml +++ /dev/null @@ -1,12 +0,0 @@ -{{- $searchPath := ":global.kolla.cinder.delete_keystone_servicev2.job:global.kolla.cinder.delete_keystone_servicev2.all:global.kolla.cinder.all:global.kolla.all" }} -{{- $resourceName := "cinder-delete-keystone-servicev2" }} -{{- $netHostTrue := false }} -{{- $podTypeBootstrap := true }} -{{- $serviceName := "cinder" }} -{{- $serviceType := "volume" }} -{{- $description := "Openstack Block Storage" }} -{{- $postfix := "v2" }} -{{- with $env := dict "netHostTrue" $netHostTrue "podTypeBootstrap" $podTypeBootstrap "resourceName" $resourceName "serviceName" $serviceName "serviceType" $serviceType "description" $description "postfix" $postfix "Values" .Values "Release" .Release "searchPath" $searchPath }} -{{- include "common_delete_keystone_service" $env }} -{{- end }} - diff --git a/helm/microservice/cinder-delete-keystone-servicev3-job/Chart.yaml b/helm/microservice/cinder-delete-keystone-servicev3-job/Chart.yaml deleted file mode 100644 index e7d64f178..000000000 --- a/helm/microservice/cinder-delete-keystone-servicev3-job/Chart.yaml +++ /dev/null @@ -1,12 +0,0 @@ -name: cinder-delete-keystone-servicev3-job -version: 0.7.0-1 -description: Helm chart to delete the cinder keystone servicev3 -keywords: - - openstack - - cinder - - delete - - servicev3 -sources: - - http://github.com/openstack -engine: gotpl -#icon: A URL to an SVG or PNG image to be used as an icon (optional). make this point to the new project icons when ready diff --git a/helm/microservice/cinder-delete-keystone-servicev3-job/requirements.yaml b/helm/microservice/cinder-delete-keystone-servicev3-job/requirements.yaml deleted file mode 100644 index 038deff5a..000000000 --- a/helm/microservice/cinder-delete-keystone-servicev3-job/requirements.yaml +++ /dev/null @@ -1,4 +0,0 @@ -dependencies: - - name: kolla-common - repository: file://../../kolla-common - version: 0.7.0-1 diff --git a/helm/microservice/cinder-delete-keystone-servicev3-job/templates/cinder-delete-keystone-servicev3.yaml b/helm/microservice/cinder-delete-keystone-servicev3-job/templates/cinder-delete-keystone-servicev3.yaml deleted file mode 100644 index 07f203bff..000000000 --- a/helm/microservice/cinder-delete-keystone-servicev3-job/templates/cinder-delete-keystone-servicev3.yaml +++ /dev/null @@ -1,12 +0,0 @@ -{{- $searchPath := ":global.kolla.cinder.delete_keystone_servicev3.job:global.kolla.cinder.delete_keystone_servicev3.all:global.kolla.cinder.all:global.kolla.all" }} -{{- $resourceName := "cinder-delete-keystone-servicev3" }} -{{- $netHostTrue := false }} -{{- $podTypeBootstrap := true }} -{{- $serviceName := "cinder" }} -{{- $serviceType := "volume" }} -{{- $description := "Openstack Block Storage" }} -{{- $postfix := "v3" }} -{{- with $env := dict "netHostTrue" $netHostTrue "podTypeBootstrap" $podTypeBootstrap "resourceName" $resourceName "serviceName" $serviceName "serviceType" $serviceType "description" $description "postfix" $postfix "Values" .Values "Release" .Release "searchPath" $searchPath }} -{{- include "common_delete_keystone_service" $env }} -{{- end }} - diff --git a/helm/microservice/cinder-delete-keystone-user-job/Chart.yaml b/helm/microservice/cinder-delete-keystone-user-job/Chart.yaml deleted file mode 100644 index c7c3f13dd..000000000 --- a/helm/microservice/cinder-delete-keystone-user-job/Chart.yaml +++ /dev/null @@ -1,12 +0,0 @@ -name: cinder-delete-keystone-user-job -version: 0.7.0-1 -description: Helm chart to delete the cinder keystone user -keywords: - - openstack - - cinder - - delete - - user -sources: - - http://github.com/openstack -engine: gotpl -#icon: A URL to an SVG or PNG image to be used as an icon (optional). make this point to the new project icons when ready diff --git a/helm/microservice/cinder-delete-keystone-user-job/requirements.yaml b/helm/microservice/cinder-delete-keystone-user-job/requirements.yaml deleted file mode 100644 index 038deff5a..000000000 --- a/helm/microservice/cinder-delete-keystone-user-job/requirements.yaml +++ /dev/null @@ -1,4 +0,0 @@ -dependencies: - - name: kolla-common - repository: file://../../kolla-common - version: 0.7.0-1 diff --git a/helm/microservice/cinder-delete-keystone-user-job/templates/cinder-delete-keystone-user.yaml b/helm/microservice/cinder-delete-keystone-user-job/templates/cinder-delete-keystone-user.yaml deleted file mode 100644 index 65240f736..000000000 --- a/helm/microservice/cinder-delete-keystone-user-job/templates/cinder-delete-keystone-user.yaml +++ /dev/null @@ -1,20 +0,0 @@ -{{- $searchPath := ":global.kolla.cinder.delete_keystone_user.job:global.kolla.cinder.delete_keystone_user.all:global.kolla.cinder.all:global.kolla.all" }} -{{- $resourceName := "cinder-delete-keystone-user" }} -{{- $serviceName := "cinder" }} -{{- $serviceType := "delete-keystone-user" }} -{{- $elementName := .Values.element_name | default $serviceName }} -{{- $elementType := .Values.element_type | $serviceType }} - -{{- $userName := include "kolla_val_get_str" (dict "key" "keystone_user_name" "searchPath" $searchPath "Values" .Values ) }} -{{- $userDomain := include "kolla_val_get_str" (dict "key" "keystone_user_domain" "searchPath" $searchPath "Values" .Values ) }} -{{- $userProject := include "kolla_val_get_str" (dict "key" "keystone_user_project" "searchPath" $searchPath "Values" .Values ) }} -{{- $userProjectDomain := include "kolla_val_get_str" (dict "key" "keystone_user_project_domain" "searchPath" $searchPath "Values" .Values ) }} -{{- $userRole := include "kolla_val_get_str" (dict "key" "keystone_user_role" "searchPath" $searchPath "Values" .Values ) }} - -{{- $netHostTrue := false }} -{{- $podTypeBootstrap := true }} - -{{- with $env := dict "resourceName" $resourceName "serviceName" $serviceName "serviceType" $serviceType "elementName" $elementName "elementType" $elementType "podTypeBootstrap" $podTypeBootstrap "userName" $userName "userDomain" $userDomain "userProject" $userProject "userProjectDomain" $userProjectDomain "userRole" $userRole "Values" .Values "Release" .Release "searchPath" $searchPath }} - -{{- include "common_delete_keystone_user" $env }} -{{- end }} diff --git a/helm/microservice/cinder-manage-db-job/Chart.yaml b/helm/microservice/cinder-manage-db-job/Chart.yaml deleted file mode 100644 index 5d566ef62..000000000 --- a/helm/microservice/cinder-manage-db-job/Chart.yaml +++ /dev/null @@ -1,11 +0,0 @@ -name: cinder-manage-db-job -version: 0.7.0-1 #FIXME make this changable -description: Helm chart to manage the cinder database -keywords: - - openstack - - cinder - - cinder-manage-db -sources: - - http://github.com/openstack -engine: gotpl -#icon: A URL to an SVG or PNG image to be used as an icon (optional). make this point to the new project icons when ready diff --git a/helm/microservice/cinder-manage-db-job/requirements.yaml b/helm/microservice/cinder-manage-db-job/requirements.yaml deleted file mode 100644 index 038deff5a..000000000 --- a/helm/microservice/cinder-manage-db-job/requirements.yaml +++ /dev/null @@ -1,4 +0,0 @@ -dependencies: - - name: kolla-common - repository: file://../../kolla-common - version: 0.7.0-1 diff --git a/helm/microservice/cinder-manage-db-job/templates/cinder-manage-db.yaml b/helm/microservice/cinder-manage-db-job/templates/cinder-manage-db.yaml deleted file mode 100644 index 02ace00d4..000000000 --- a/helm/microservice/cinder-manage-db-job/templates/cinder-manage-db.yaml +++ /dev/null @@ -1,12 +0,0 @@ -{{- $searchPath := ":global.kolla.cinder.manage_db.job:global.kolla.cinder.manage_db.all:global.kolla.cinder.api.all:global.kolla.cinder.all:global.kolla.all" }} -{{- $c := dict "searchPath" $searchPath "Values" .Values }} -{{- $_ := set $c "contName" "cinder-api" }} -{{- $_ := set $c "imageName" "image_full" }} -{{- $_ := set $c "tagName" "image_tag" }} -{{- $imageFull := include "kolla_build_image_full" $c }} -{{- $resourceName := "cinder" }} -{{- $serviceName := "cinder-api" }} -{{- $podTypeBootstrap := true }} -{{- with $env := dict "resourceName" $resourceName "serviceName" $serviceName "podTypeBootstrap" $podTypeBootstrap "imageFull" $imageFull "Values" .Values "Release" .Release "searchPath" $searchPath }} -{{- include "common_manage_db_job" $env }} -{{- end }} diff --git a/helm/microservice/cinder-scheduler-statefulset/Chart.yaml b/helm/microservice/cinder-scheduler-statefulset/Chart.yaml deleted file mode 100644 index 0da671f6f..000000000 --- a/helm/microservice/cinder-scheduler-statefulset/Chart.yaml +++ /dev/null @@ -1,11 +0,0 @@ -name: cinder-scheduler-statefulset -version: 0.7.0-1 -description: Helm chart for cinder scheduler pod -keywords: - - openstack - - cinder - - cinder-scheduler -sources: - - http://github.com/openstack -engine: gotpl -#icon: A URL to an SVG or PNG image to be used as an icon (optional). make this point to the new project icons when ready diff --git a/helm/microservice/cinder-scheduler-statefulset/requirements.yaml b/helm/microservice/cinder-scheduler-statefulset/requirements.yaml deleted file mode 100644 index 038deff5a..000000000 --- a/helm/microservice/cinder-scheduler-statefulset/requirements.yaml +++ /dev/null @@ -1,4 +0,0 @@ -dependencies: - - name: kolla-common - repository: file://../../kolla-common - version: 0.7.0-1 diff --git a/helm/microservice/cinder-scheduler-statefulset/templates/cinder-scheduler.yaml b/helm/microservice/cinder-scheduler-statefulset/templates/cinder-scheduler.yaml deleted file mode 100644 index b22a073fc..000000000 --- a/helm/microservice/cinder-scheduler-statefulset/templates/cinder-scheduler.yaml +++ /dev/null @@ -1,15 +0,0 @@ -{{- $searchPath := ":global.kolla.cinder.scheduler.statefulset:global.kolla.cinder.scheduler.all:global.kolla.cinder.all:global.kolla.all" }} -{{- $c := dict "searchPath" $searchPath "Values" .Values }} -{{- $_ := set $c "contName" "cinder-scheduler" }} -{{- $_ := set $c "imageName" "image_full" }} -{{- $_ := set $c "tagName" "image_tag" }} -{{- $imageFull := include "kolla_build_image_full" $c }} -{{- $resourceName := "cinder-scheduler" }} -{{- $netHostTrue := false }} -{{- $podTypeBootstrap := false }} -{{- $serviceName := "cinder" }} -{{- $serviceType := "scheduler" }} -{{- $privileged := false }} -{{- with $env := dict "imageFull" $imageFull "resourceName" $resourceName "netHostTrue" $netHostTrue "podTypeBootstrap" $podTypeBootstrap "serviceName" $serviceName "serviceType" $serviceType "privileged" $privileged "Values" .Values "Release" .Release "searchPath" $searchPath }} -{{- include "common_statefulset" $env }} -{{- end }} diff --git a/helm/microservice/cinder-volume-ceph-statefulset/Chart.yaml b/helm/microservice/cinder-volume-ceph-statefulset/Chart.yaml deleted file mode 100644 index 4fb72b87b..000000000 --- a/helm/microservice/cinder-volume-ceph-statefulset/Chart.yaml +++ /dev/null @@ -1,11 +0,0 @@ -name: cinder-volume-ceph-statefulset -version: 0.7.0-1 #FIXME make this changable -description: FIXME -keywords: - - openstack - - cinder - - cinder-volume-ceph -sources: - - http://github.com/openstack -engine: gotpl -#icon: A URL to an SVG or PNG image to be used as an icon (optional). make this point to the new project icons when ready diff --git a/helm/microservice/cinder-volume-ceph-statefulset/requirements.yaml b/helm/microservice/cinder-volume-ceph-statefulset/requirements.yaml deleted file mode 100644 index 038deff5a..000000000 --- a/helm/microservice/cinder-volume-ceph-statefulset/requirements.yaml +++ /dev/null @@ -1,4 +0,0 @@ -dependencies: - - name: kolla-common - repository: file://../../kolla-common - version: 0.7.0-1 diff --git a/helm/microservice/cinder-volume-ceph-statefulset/templates/cinder-volume-ceph.yaml b/helm/microservice/cinder-volume-ceph-statefulset/templates/cinder-volume-ceph.yaml deleted file mode 100644 index 2892f47a7..000000000 --- a/helm/microservice/cinder-volume-ceph-statefulset/templates/cinder-volume-ceph.yaml +++ /dev/null @@ -1,97 +0,0 @@ -{{- $searchPath := ":global.kolla.cinder.volume_ceph.statefulset:global.kolla.cinder.volume_ceph.all:global.kolla.cinder.volume.all:global.kolla.cinder.all:global.kolla.all" }} -{{- $imagePullPolicy := include "kolla_val_get_str" (dict "key" "image_pull_policy" "searchPath" $searchPath "Values" .Values ) }} -{{- $containerConfigDirectory := include "kolla_val_get_str" (dict "key" "container_config_directory" "searchPath" $searchPath "Values" .Values ) }} -{{- $selectorKey := include "kolla_val_get_str" (dict "key" "selector_key" "searchPath" $searchPath "Values" .Values ) }} -{{- $selectorValue := include "kolla_val_get_str" (dict "key" "selector_value" "searchPath" $searchPath "Values" .Values ) }} -{{- $replicas := include "kolla_val_get_str" (dict "key" "replicas" "searchPath" $searchPath "Values" .Values ) }} -{{- $c := dict "searchPath" $searchPath "Values" .Values }} -{{- $_ := set $c "contName" "cinder-volume" }} -{{- $_ := set $c "imageName" "image_full" }} -{{- $_ := set $c "tagName" "image_tag" }} -{{- $imageFull := include "kolla_build_image_full" $c }} -{{- $resourceName := "cinder-volume-ceph" }} -{{- $netHostTrue := false }} -{{- $podTypeBootstrap := false }} -{{- $serviceName := "cinder" }} -{{- $serviceType := "volume" }} -{{- $privileged := false }} -{{- $elementName := .Values.element_name | default $resourceName }} -{{- with $env := dict "netHostTrue" $netHostTrue "podTypeBootstrap" $podTypeBootstrap "imageFull" $imageFull "resourceName" $resourceName "serviceName" $serviceName "serviceType" $serviceType "privileged" $privileged "Values" .Values "Release" .Release "searchPath" $searchPath }} -apiVersion: apps/v1beta1 -kind: StatefulSet -metadata: - name: {{ $elementName }} - labels: - component: {{ $resourceName }} - system: {{ $elementName }} -spec: - serviceName: {{ $serviceName }} - replicas: {{ $replicas }} - template: - metadata: - labels: - component: {{ $resourceName }} - system: {{ $elementName }} - spec: - hostPID: True - nodeSelector: - {{ $selectorKey }}: {{ $selectorValue | quote }} - initContainers: -{{- include "common_dependency_container" $env | indent 8 }} - - name: initialize-config - image: {{ include "kolla_toolbox_image_full" . | quote }} - command: - - bash - - -ec - - | - cp -a /srv/configmap/..data/* /srv/pod-main-config/; - cp -a /srv/ceph.conf /srv/pod-main-config/; - cp -a /srv/ceph.client.cinder.keyring /srv/pod-main-config/; - volumeMounts: - - name: ceph-conf - mountPath: /srv/ceph.conf - subPath: ceph.conf - - name: ceph-client-cinder-keyring - mountPath: /srv/ceph.client.cinder.keyring - subPath: ceph.client.cinder.keyring - - name: cinder-volume-configmap - mountPath: /srv/configmap - - name: pod-main-config - mountPath: /srv/pod-main-config - - name: initialize-cinder-logs - image: {{ $imageFull | quote }} - command: - - sh - - -ce - - | - mkdir /var/log/kolla/cinder; - chown -R cinder: /var/log/kolla/cinder - volumeMounts: - - name: kolla-logs - mountPath: /var/log/kolla - containers: - - image: "{{ $imageFull }}" - imagePullPolicy: {{ $imagePullPolicy | quote }} - name: main - env: -{{- include "common_env_vars" $env | indent 12 }} - volumeMounts: -{{- include "common_volume_mounts" $env | indent 12 }} - - mountPath: {{ $containerConfigDirectory }} - name: pod-main-config - readOnly: true -{{- include "common_containers" $env | indent 8 }} - volumes: -{{- include "common_volumes" $env | indent 8 }} - - name: pod-main-config - emptyDir: {} - - name: cinder-volume-configmap - configMap: - name: cinder-volume - - name: ceph-conf - configMap: - name: ceph-conf - - name: ceph-client-cinder-keyring - secret: - secretName: ceph-client-cinder-keyring -{{- end }} diff --git a/helm/microservice/cinder-volume-lvm-daemonset/Chart.yaml b/helm/microservice/cinder-volume-lvm-daemonset/Chart.yaml deleted file mode 100644 index ced4d4dcb..000000000 --- a/helm/microservice/cinder-volume-lvm-daemonset/Chart.yaml +++ /dev/null @@ -1,11 +0,0 @@ -name: cinder-volume-lvm-daemonset -version: 0.7.0-1 -description: cinder volume lvm microservice layer -keywords: - - openstack - - cinder - - cinder-volume-lvm -sources: - - http://github.com/openstack -engine: gotpl -#icon: A URL to an SVG or PNG image to be used as an icon (optional). make this point to the new project icons when ready diff --git a/helm/microservice/cinder-volume-lvm-daemonset/requirements.yaml b/helm/microservice/cinder-volume-lvm-daemonset/requirements.yaml deleted file mode 100644 index 038deff5a..000000000 --- a/helm/microservice/cinder-volume-lvm-daemonset/requirements.yaml +++ /dev/null @@ -1,4 +0,0 @@ -dependencies: - - name: kolla-common - repository: file://../../kolla-common - version: 0.7.0-1 diff --git a/helm/microservice/cinder-volume-lvm-daemonset/templates/cinder-volume-lvm-daemonset.yaml b/helm/microservice/cinder-volume-lvm-daemonset/templates/cinder-volume-lvm-daemonset.yaml deleted file mode 100644 index 0e86744b5..000000000 --- a/helm/microservice/cinder-volume-lvm-daemonset/templates/cinder-volume-lvm-daemonset.yaml +++ /dev/null @@ -1,159 +0,0 @@ -{{- $searchPath := ":global.kolla.cinder.volume_lvm.daemonset:global.kolla.cinder.volume_lvm.all:global.kolla.cinder.volume.all:global.kolla.cinder.all:global.kolla.all" }} -{{- $imagePullPolicy := include "kolla_val_get_str" (dict "key" "image_pull_policy" "searchPath" $searchPath "Values" .Values ) }} -{{- $containerConfigDirectory := include "kolla_val_get_str" (dict "key" "container_config_directory" "searchPath" $searchPath "Values" .Values ) }} -{{- $selectorKey := include "kolla_val_get_str" (dict "key" "selector_key" "searchPath" $searchPath "Values" .Values ) }} -{{- $selectorValue := include "kolla_val_get_str" (dict "key" "selector_value" "searchPath" $searchPath "Values" .Values ) }} -{{- $c := dict "searchPath" $searchPath "Values" .Values }} -{{- $_ := set $c "contName" "cinder-volume" }} -{{- $_ := set $c "imageName" "image_full" }} -{{- $_ := set $c "tagName" "image_tag" }} -{{- $imageFull := include "kolla_build_image_full" $c }} -{{- $resourceName := "cinder-volume-lvm" }} -{{- $netHostTrue := true }} -{{- $podTypeBootstrap := false }} -{{- $serviceName := "cinder" }} -{{- $serviceType := "volume" }} -{{- $privileged := true }} -{{- $localVals := dict }} -{{- $c1 := dict "searchPath" $searchPath "Values" .Values }} -{{- $_ := set $c1 "key" "lvm_backends" }} -{{- $_ := set $c1 "retDict" $localVals }} -{{- $_ := set $c1 "retKey" "lvm_backends" }} -{{- $_ := include "kolla_val_get_raw" $c1 }} -{{- $iscsiHelper := include "kolla_val_get_str" (dict "key" "iscsi_helper" "searchPath" $searchPath "Values" .Values ) | default "tgtadm"}} -{{- $elementName := include "kolla_val_get_str" (dict "key" "element_name" "searchPath" $searchPath "Values" .Values ) | default $resourceName }} -{{- with $env := dict "netHostTrue" $netHostTrue "podTypeBootstrap" $podTypeBootstrap "imageFull" $imageFull "resourceName" $resourceName "serviceName" $serviceName "serviceType" $serviceType "privileged" $privileged "Values" .Values "Release" .Release "searchPath" $searchPath }} -apiVersion: extensions/v1beta1 -kind: DaemonSet -metadata: - name: {{ $elementName }} - labels: - component: {{ $resourceName }} - system: {{ $elementName }} -spec: - template: - metadata: - labels: - component: {{ $resourceName }} - system: {{ $elementName }} - spec: - dnsPolicy: ClusterFirstWithHostNet - hostNetwork: True - hostIPC: True - nodeSelector: - {{ $selectorKey }}: {{ $selectorValue | quote }} - initContainers: - - name: initialize-cinder-volume-lvm - image: {{ include "kolla_toolbox_image_full" . | quote }} - imagePullPolicy: {{ $imagePullPolicy | quote }} - command: - - bash - - -ce - - | - cp -a /srv/configmap/..data/* /srv/pod-main-config/; - {{- range $key, $val := $localVals.lvm_backends }} - {{- range $key2, $val2 := $val }} - {{- $vg_ip := $key2 }} - {{- $vg_name := $val2 }} - {{- $lvm_enabled_backend := printf "%s_%s" $vg_ip $vg_name }} - crudini --set /srv/pod-main-config/cinder.conf DEFAULT enabled_backends {{ $lvm_enabled_backend }}; - crudini --set /srv/pod-main-config/cinder.conf {{ $lvm_enabled_backend }} volume_group {{ $vg_name }}; - crudini --set /srv/pod-main-config/cinder.conf {{ $lvm_enabled_backend }} volume_backend_name {{ $lvm_enabled_backend }}; - crudini --set /srv/pod-main-config/cinder.conf {{ $lvm_enabled_backend }} volume_driver 'cinder.volume.drivers.lvm.LVMVolumeDriver'; - crudini --set /srv/pod-main-config/cinder.conf {{ $lvm_enabled_backend }} iscsi_helper {{ $iscsiHelper }}; - crudini --set /srv/pod-main-config/cinder.conf {{ $lvm_enabled_backend }} iscsi_protocol 'iscsi'; - crudini --set /srv/pod-main-config/cinder.conf {{ $lvm_enabled_backend }} iscsi_ip_address {{ $vg_ip }}; - {{- end }} - {{- end }} - volumeMounts: - - name: cinder-volume-configmap - mountPath: /srv/configmap - - name: pod-main-config - mountPath: /srv/pod-main-config - - name: initialize-cinder-logs - image: {{ $imageFull | quote }} - imagePullPolicy: {{ $imagePullPolicy | quote }} - command: - - sh - - -ce - - | - mkdir /var/log/kolla/cinder; - chown -R cinder: /var/log/kolla/cinder - volumeMounts: - - name: kolla-logs - mountPath: /var/log/kolla - - name: initialize-iscsi-iqn - image: {{ $imageFull | quote }} - imagePullPolicy: {{ $imagePullPolicy | quote }} - securityContext: - runAsUser: 0 - privileged: true - command: - - bash - - -ce - - | - if [ ! -f /etc/iscsi/initiatorname.iscsi ]; then - iqn=$( cat /dev/urandom | tr -dc 'a-f0-9' | head -c 12 ); - echo InitiatorName=iqn.2010-10.org.openstack:$iqn > /etc/iscsi/initiatorname.iscsi; - chmod 640 /etc/iscsi/initiatorname.iscsi; - fi; - volumeMounts: - - name: iscsi-info - mountPath: /etc/iscsi - containers: - - image: "{{ $imageFull }}" - imagePullPolicy: {{ $imagePullPolicy | quote }} - name: main - securityContext: - privileged: true - env: -{{- include "common_env_vars" $env | indent 12 }} - volumeMounts: -{{- include "common_volume_mounts" $env | indent 12 }} - - mountPath: {{ $containerConfigDirectory }} - name: pod-main-config - readOnly: true - - mountPath: /run - name: run - - mountPath: /dev - name: dev - - mountPath: /sys/fs/cgroup - name: sys-fs-cgroup - - mountPath: /lib/modules - name: lib-modules - - mountPath: /var/lib/cinder - name: cinder-volumes - - mountPath: /etc/iscsi - name: iscsi-info - - mountPath: /etc/target - name: iscsi-target -{{- include "common_containers" . | indent 8 }} - volumes: -{{- include "common_volumes" . | indent 8 }} - - name: pod-main-config - emptyDir: {} - - name: cinder-volume-configmap - configMap: - name: {{ $elementName }} - - name: cinder-volumes - hostPath: - path: /var/lib/cinder - - name: lib-modules - hostPath: - path: /lib/modules - - name: run - hostPath: - path: /run - - name: dev - hostPath: - path: /dev - - name: sys-fs-cgroup - hostPath: - path: /sys/fs/cgroup - - name: iscsi-info - hostPath: - path: /var/lib/kolla/iscsi - - name: iscsi-target - hostPath: - path: /var/lib/kolla/target -{{- end }} diff --git a/helm/microservice/glance-api-deployment/Chart.yaml b/helm/microservice/glance-api-deployment/Chart.yaml deleted file mode 100644 index 5480153e4..000000000 --- a/helm/microservice/glance-api-deployment/Chart.yaml +++ /dev/null @@ -1,11 +0,0 @@ -name: glance-api-deployment -version: 0.7.0-1 -description: Helm chart for the glance api pod -keywords: - - openstack - - glance - - api -sources: - - http://github.com/openstack -engine: gotpl -#icon: A URL to an SVG or PNG image to be used as an icon (optional). make this point to the new project icons when ready diff --git a/helm/microservice/glance-api-deployment/requirements.yaml b/helm/microservice/glance-api-deployment/requirements.yaml deleted file mode 100644 index 038deff5a..000000000 --- a/helm/microservice/glance-api-deployment/requirements.yaml +++ /dev/null @@ -1,4 +0,0 @@ -dependencies: - - name: kolla-common - repository: file://../../kolla-common - version: 0.7.0-1 diff --git a/helm/microservice/glance-api-deployment/templates/glance-api.yaml b/helm/microservice/glance-api-deployment/templates/glance-api.yaml deleted file mode 100644 index 1807102b0..000000000 --- a/helm/microservice/glance-api-deployment/templates/glance-api.yaml +++ /dev/null @@ -1,193 +0,0 @@ -{{- $searchPath := ":global.kolla.glance.api.deployment:global.kolla.glance.api.all:global.kolla.glance.all:global.kolla.all" }} -{{- $imagePullPolicy := include "kolla_val_get_str" (dict "key" "image_pull_policy" "searchPath" $searchPath "Values" .Values ) }} -{{- $containerConfigDirectory := include "kolla_val_get_str" (dict "key" "container_config_directory" "searchPath" $searchPath "Values" .Values ) }} -{{- $selectorKey := include "kolla_val_get_str" (dict "key" "selector_key" "searchPath" $searchPath "Values" .Values ) }} -{{- $selectorValue := include "kolla_val_get_str" (dict "key" "selector_value" "searchPath" $searchPath "Values" .Values ) }} -{{- $replicas := include "kolla_val_get_str" (dict "key" "replicas" "searchPath" $searchPath "Values" .Values ) }} -{{- $port := include "kolla_val_get_str" (dict "key" "port" "searchPath" $searchPath "Values" .Values ) }} -{{- $pvcName := include "kolla_val_get_str" (dict "key" "pvc_name" "searchPath" $searchPath "Values" .Values ) }} -{{- $c := dict "searchPath" $searchPath "Values" .Values }} -{{- $_ := set $c "contName" "glance-api" }} -{{- $_ := set $c "imageName" "image_full" }} -{{- $_ := set $c "tagName" "image_tag" }} -{{- $imageFull := include "kolla_build_image_full" $c }} -{{- $localVals := dict }} -{{- $c1 := dict "key" "ceph_backend" "searchPath" $searchPath "Values" .Values }} -{{- $_ := set $c1 "retDict" $localVals }} -{{- $_ := set $c1 "retKey" "ceph_backend" }} -{{- $_ := include "kolla_val_get_raw" $c1 }} -{{- $c := dict "searchPath" .searchPath "key" "haproxy_python_termination" "retDict" $localVals "retKey" "haproxy" "Values" .Values }} -{{- include "kolla_val_get_raw" $c }} -{{- $resourceName := "glance-api" }} -{{- $netHostTrue := false }} -{{- $podTypeBootstrap := false }} -{{- $serviceName := "glance" }} -{{- $serviceType := "api" }} -{{- $configListenHostParameter := "bind_host" }} -{{- $configListenPortParameter := "bind_port" }} -{{- $portName := "glance-api" }} -{{- $checkPath := "/healthcheck" }} -{{- $privileged := false }} -{{- $gracePeriodSeconds := include "kolla_val_get_str" (dict "key" "grace_period_seconds" "searchPath" $searchPath "Values" .Values ) }} -{{- with $env := dict "netHostTrue" $netHostTrue "podTypeBootstrap" $podTypeBootstrap "resourceName" $resourceName "Values" .Values "Release" .Release "searchPath" $searchPath }} -apiVersion: extensions/v1beta1 -kind: Deployment -metadata: - name: {{ $resourceName }} -spec: - replicas: {{ $replicas }} - strategy: -{{- if $localVals.ceph_backend }} - rollingUpdate: - maxSurge: 1 - maxUnavailable: 1 - type: RollingUpdate -{{- else }} - rollingUpdate: - maxSurge: 0 - maxUnavailable: 100% - type: RollingUpdate -{{- end }} - template: - metadata: - labels: - service: {{ $serviceName }} - type: {{ $serviceType }} - annotations: -{{- if $localVals.ceph_backend }} - kolla_upgrade: minor_rolling_safe -{{- end }} - spec: -{{- if $localVals.ceph_backend }} - hostPID: True -{{- end }} - nodeSelector: - {{ $selectorKey }}: {{ $selectorValue | quote }} -#You've got 2 days to drain or figure out why it won't. -{{- if $localVals.haproxy }} - terminationGracePeriodSeconds: {{ $gracePeriodSeconds }} -{{- end }} - initContainers: -{{- include "common_dependency_container" $env | indent 8 }} - - name: update-config - image: {{ include "kolla_toolbox_image_full" . | quote }} - imagePullPolicy: {{ $imagePullPolicy | quote }} - command: - - /bin/sh - - -c - - | - cp -a /srv/configmap/..data/* /srv/pod-main-config/; -{{- if $localVals.ceph_backend }} - cp -a /srv/ceph.conf /srv/pod-main-config/; - cp -a /srv/ceph.client.glance.keyring /srv/pod-main-config/; -{{- end }} -{{- if $localVals.haproxy }} - CONF=/srv/pod-main-config/glance-api.conf; - crudini --set $CONF DEFAULT bind_host 127.0.0.1; - crudini --set $CONF DEFAULT bind_port 8080; -{{- end }} - volumeMounts: -{{- if $localVals.ceph_backend }} - - name: ceph-conf - mountPath: /srv/ceph.conf - subPath: ceph.conf - - name: ceph-client-glance-keyring - mountPath: /srv/ceph.client.glance.keyring - subPath: ceph.client.glance.keyring -{{- end }} - - name: service-configmap - mountPath: /srv/configmap - - name: pod-main-config - mountPath: /srv/pod-main-config - containers: -{{- if $localVals.haproxy }} - - name: haproxy - image: {{ include "haproxy_image_full" . | quote }} - imagePullPolicy: {{ $imagePullPolicy | quote }} - command: - - /bin/bash - - -c - - | - kolla_start; - touch /var/lib/kolla-kubernetes/event/shutdown; - readinessProbe: - httpGet: - path: {{ $checkPath }} - port: {{ $port }} - initialDelaySeconds: 5 - timeoutSeconds: 5 - lifecycle: - preStop: - exec: -#FIXME move script into haproxy container -#NOTE this only works if you arn't doing a haproxy reconfigure too. -#But shouldn't ever have to do that in a setup like this. - command: - - /bin/bash - - -c - - | - kill -USR1 $( /pxe/config.json; - crudini --set /ironic/ironic.conf conductor api_url http://{{ $ironicAPI }}:6385; - crudini --set /ironic/ironic.conf pxe tftp_server $IP; - volumeMounts: - - name: kolla-logs - mountPath: /var/log/kolla/ - - name: conductor-config - mountPath: /config/ironic - - name: pxe-config - mountPath: /config/pxe - - name: ironic-conductor-config - mountPath: /ironic/ - - name: ironic-pxe-config - mountPath: /pxe/ - - name: pxelinux - image: {{ $imageFullPXE | quote }} - imagePullPolicy: {{ $imagePullPolicy | quote }} - command: - - sh - - -xec - - | - if [ -f /var/lib/tftpboot/pxelinux.0 ]; then - cp /var/lib/tftpboot/pxelinux.0 /tftpboot; - fi; - if [ -f /usr/share/syslinux/chain.c32 ]; then - cp /usr/share/syslinux/chain.c32 /tftpboot; - fi; - if [ -f /usr/lib/PXELINUX/pxelinux.0 ]; then - cp /usr/lib/PXELINUX/pxelinux.0 /tftpboot; - fi; - if [ -f /usr/lib/syslinux/modules/bios/chain.c32 ]; then - cp /usr/lib/syslinux/modules/bios/chain.c32 /tftpboot; - fi; - mkdir -p /tftpboot/pxelinux.cfg; - volumeMounts: - - name: tftpboot - mountPath: /tftpboot/ - containers: - - name: pxe - imagePullPolicy: {{ $imagePullPolicy | quote }} - image: "{{ $imageFullPXE }}" - volumeMounts: -{{- include "common_volume_mounts" $env | indent 12 }} - - mountPath: {{ $containerConfigDirectory }} - name: ironic-pxe-config - - mountPath: /tftpboot - name: tftpboot - env: -{{- include "common_env_vars" $env | indent 12 }} - - name: main - securityContext: - privileged: true - imagePullPolicy: {{ $imagePullPolicy | quote }} - image: "{{ $imageFull }}" - volumeMounts: -{{- include "common_volume_mounts" $env | indent 12 }} - - mountPath: {{ $containerConfigDirectory }} - name: ironic-conductor-config - - mountPath: /var/lib/ironic - name: ironic-persistent-storage - - mountPath: /var/run - name: run - - mountPath: /dev - name: dev - - mountPath: /sys - name: sys - - mountPath: /tftpboot - name: tftpboot - env: -{{- include "common_env_vars" $env | indent 12 }} -{{- include "common_containers" $env | indent 8 }} - volumes: -{{- include "common_volumes" $env | indent 8 }} - - name: conductor-config - configMap: - name: {{ $elementName }} - - name: pxe-config - configMap: - name: ironic-conductor-tftp - - name: ironic-pxe-config - emptyDir: {} - - name: ironic-conductor-config - emptyDir: {} - - name: ironic-persistent-storage - hostPath: - path: /var/lib/ironic - - name: run - hostPath: - path: /var/run - - name: dev - hostPath: - path: /dev - - name: sys - hostPath: - path: /sys - - name: tftpboot - emptyDir: {} -{{- end }} diff --git a/helm/microservice/ironic-create-keystone-service-job/Chart.yaml b/helm/microservice/ironic-create-keystone-service-job/Chart.yaml deleted file mode 100644 index 536bc38a3..000000000 --- a/helm/microservice/ironic-create-keystone-service-job/Chart.yaml +++ /dev/null @@ -1,11 +0,0 @@ -name: ironic-create-keystone-service-job -version: 0.7.0-1 -description: Helm chart for the ironic keystone service -keywords: - - openstack - - ironic - - keystone-service -sources: - - http://github.com/openstack -engine: gotpl -#icon: A URL to an SVG or PNG image to be used as an icon (optional). make this point to the new project icons when ready diff --git a/helm/microservice/ironic-create-keystone-service-job/requirements.yaml b/helm/microservice/ironic-create-keystone-service-job/requirements.yaml deleted file mode 100644 index 038deff5a..000000000 --- a/helm/microservice/ironic-create-keystone-service-job/requirements.yaml +++ /dev/null @@ -1,4 +0,0 @@ -dependencies: - - name: kolla-common - repository: file://../../kolla-common - version: 0.7.0-1 diff --git a/helm/microservice/ironic-create-keystone-service-job/templates/ironic-create-keystone-service.yaml b/helm/microservice/ironic-create-keystone-service-job/templates/ironic-create-keystone-service.yaml deleted file mode 100644 index c081635a4..000000000 --- a/helm/microservice/ironic-create-keystone-service-job/templates/ironic-create-keystone-service.yaml +++ /dev/null @@ -1,10 +0,0 @@ -{{- $searchPath := ":global.kolla.ironic.create_keystone_service.job:global.kolla.ironic.create_keystone_service.all:global.kolla.ironic.all:global.kolla.all" }} -{{- $resourceName := "ironic-create-keystone-service" }} -{{- $netHostTrue := false }} -{{- $podTypeBootstrap := true }} -{{- $serviceName := "ironic" }} -{{- $serviceType := "baremetal" }} -{{- $description := "Ironic bare metal provisioning service" }} -{{- with $env := dict "netHostTrue" $netHostTrue "podTypeBootstrap" $podTypeBootstrap "resourceName" $resourceName "serviceName" $serviceName "serviceType" $serviceType "description" $description "Values" .Values "Release" .Release "searchPath" $searchPath }} -{{- include "common_create_keystone_service" $env }} -{{- end }} diff --git a/helm/microservice/ironic-create-keystone-user-job/Chart.yaml b/helm/microservice/ironic-create-keystone-user-job/Chart.yaml deleted file mode 100644 index 0ac1d670d..000000000 --- a/helm/microservice/ironic-create-keystone-user-job/Chart.yaml +++ /dev/null @@ -1,12 +0,0 @@ -name: ironic-create-keystone-user-job -version: 0.7.0-1 -description: Helm chart for the ironic keystone user -keywords: - - openstack - - ironic - - user - - create -sources: - - http://github.com/openstack -engine: gotpl -#icon: A URL to an SVG or PNG image to be used as an icon (optional). make this point to the new project icons when ready diff --git a/helm/microservice/ironic-create-keystone-user-job/requirements.yaml b/helm/microservice/ironic-create-keystone-user-job/requirements.yaml deleted file mode 100644 index 038deff5a..000000000 --- a/helm/microservice/ironic-create-keystone-user-job/requirements.yaml +++ /dev/null @@ -1,4 +0,0 @@ -dependencies: - - name: kolla-common - repository: file://../../kolla-common - version: 0.7.0-1 diff --git a/helm/microservice/ironic-create-keystone-user-job/templates/ironic-create-keystone-user.yaml b/helm/microservice/ironic-create-keystone-user-job/templates/ironic-create-keystone-user.yaml deleted file mode 100644 index a230b762b..000000000 --- a/helm/microservice/ironic-create-keystone-user-job/templates/ironic-create-keystone-user.yaml +++ /dev/null @@ -1,19 +0,0 @@ -{{- $searchPath := ":global.kolla.ironic.create_keystone_user.job:global.kolla.ironic.create_keystone_user.all:global.kolla.ironic.all:global.kolla.all" }} -{{- $resourceName := "ironic-create-keystone-user" }} -{{- $serviceName := "ironic" }} -{{- $serviceType := "create-keystone-user" }} -{{- $elementName := .Values.element_name | default $serviceName }} -{{- $elementType := .Values.element_type | $serviceType }} - -{{- $userName := include "kolla_val_get_str" (dict "key" "keystone_user_name" "searchPath" $searchPath "Values" .Values ) }} -{{- $userDomain := include "kolla_val_get_str" (dict "key" "keystone_user_domain" "searchPath" $searchPath "Values" .Values ) }} -{{- $userProject := include "kolla_val_get_str" (dict "key" "keystone_user_project" "searchPath" $searchPath "Values" .Values ) }} -{{- $userProjectDomain := include "kolla_val_get_str" (dict "key" "keystone_user_project_domain" "searchPath" $searchPath "Values" .Values ) }} -{{- $userRole := include "kolla_val_get_str" (dict "key" "keystone_user_role" "searchPath" $searchPath "Values" .Values ) }} - -{{- $netHostTrue := false }} -{{- $podTypeBootstrap := true }} -{{- with $env := dict "resourceName" $resourceName "serviceName" $serviceName "serviceType" $serviceType "elementName" $elementName "elementType" $elementType "podTypeBootstrap" $podTypeBootstrap "userName" $userName "userDomain" $userDomain "userProject" $userProject "userProjectDomain" $userProjectDomain "userRole" $userRole "Values" .Values "Release" .Release "searchPath" $searchPath }} - -{{- include "common_create_keystone_user" $env }} -{{- end }} diff --git a/helm/microservice/ironic-delete-keystone-service-job/Chart.yaml b/helm/microservice/ironic-delete-keystone-service-job/Chart.yaml deleted file mode 100644 index e1b1ddc63..000000000 --- a/helm/microservice/ironic-delete-keystone-service-job/Chart.yaml +++ /dev/null @@ -1,12 +0,0 @@ -name: ironic-delete-keystone-service-job -version: 0.7.0-1 -description: Helm chart to delete the ironic keystone service -keywords: - - openstack - - ironic - - delete - - service -sources: - - http://github.com/openstack -engine: gotpl -#icon: A URL to an SVG or PNG image to be used as an icon (optional). make this point to the new project icons when ready diff --git a/helm/microservice/ironic-delete-keystone-service-job/requirements.yaml b/helm/microservice/ironic-delete-keystone-service-job/requirements.yaml deleted file mode 100644 index 038deff5a..000000000 --- a/helm/microservice/ironic-delete-keystone-service-job/requirements.yaml +++ /dev/null @@ -1,4 +0,0 @@ -dependencies: - - name: kolla-common - repository: file://../../kolla-common - version: 0.7.0-1 diff --git a/helm/microservice/ironic-delete-keystone-service-job/templates/ironic-delete-keystone-service.yaml b/helm/microservice/ironic-delete-keystone-service-job/templates/ironic-delete-keystone-service.yaml deleted file mode 100644 index 51b7a06c0..000000000 --- a/helm/microservice/ironic-delete-keystone-service-job/templates/ironic-delete-keystone-service.yaml +++ /dev/null @@ -1,10 +0,0 @@ -{{- $searchPath := ":global.kolla.ironic.delete_keystone_service.job:global.kolla.ironic.delete_keystone_service.all:global.kolla.ironic.all:global.kolla.all" }} -{{- $resourceName := "ironic-delete-keystone-service" }} -{{- $netHostTrue := false }} -{{- $podTypeBootstrap := true }} -{{- $serviceName := "ironic" }} -{{- $serviceType := "baremetal" }} -{{- $description := "Openstack Baremetal" }} -{{- with $env := dict "netHostTrue" $netHostTrue "podTypeBootstrap" $podTypeBootstrap "resourceName" $resourceName "serviceName" $serviceName "serviceType" $serviceType "description" $description "Values" .Values "Release" .Release "searchPath" $searchPath }} -{{- include "common_delete_keystone_service" $env }} -{{- end }} diff --git a/helm/microservice/ironic-delete-keystone-user-job/Chart.yaml b/helm/microservice/ironic-delete-keystone-user-job/Chart.yaml deleted file mode 100644 index 3c69dff8f..000000000 --- a/helm/microservice/ironic-delete-keystone-user-job/Chart.yaml +++ /dev/null @@ -1,12 +0,0 @@ -name: ironic-delete-keystone-user-job -version: 0.7.0-1 -description: Helm chart to delete the ironic keystone user -keywords: - - openstack - - ironic - - delete - - user -sources: - - http://github.com/openstack -engine: gotpl -#icon: A URL to an SVG or PNG image to be used as an icon (optional). make this point to the new project icons when ready diff --git a/helm/microservice/ironic-delete-keystone-user-job/requirements.yaml b/helm/microservice/ironic-delete-keystone-user-job/requirements.yaml deleted file mode 100644 index 038deff5a..000000000 --- a/helm/microservice/ironic-delete-keystone-user-job/requirements.yaml +++ /dev/null @@ -1,4 +0,0 @@ -dependencies: - - name: kolla-common - repository: file://../../kolla-common - version: 0.7.0-1 diff --git a/helm/microservice/ironic-delete-keystone-user-job/templates/ironic-delete-keystone-user.yaml b/helm/microservice/ironic-delete-keystone-user-job/templates/ironic-delete-keystone-user.yaml deleted file mode 100644 index 0f1536090..000000000 --- a/helm/microservice/ironic-delete-keystone-user-job/templates/ironic-delete-keystone-user.yaml +++ /dev/null @@ -1,20 +0,0 @@ -{{- $searchPath := ":global.kolla.ironic.delete_keystone_user.job:global.kolla.ironic.delete_keystone_user.all:global.kolla.ironic.all:global.kolla.all" }} -{{- $resourceName := "ironic-delete-keystone-user" }} -{{- $serviceName := "ironic" }} -{{- $serviceType := "delete-keystone-user" }} -{{- $elementName := .Values.element_name | default $serviceName }} -{{- $elementType := .Values.element_type | $serviceType }} - -{{- $userName := include "kolla_val_get_str" (dict "key" "keystone_user_name" "searchPath" $searchPath "Values" .Values ) }} -{{- $userDomain := include "kolla_val_get_str" (dict "key" "keystone_user_domain" "searchPath" $searchPath "Values" .Values ) }} -{{- $userProject := include "kolla_val_get_str" (dict "key" "keystone_user_project" "searchPath" $searchPath "Values" .Values ) }} -{{- $userProjectDomain := include "kolla_val_get_str" (dict "key" "keystone_user_project_domain" "searchPath" $searchPath "Values" .Values ) }} -{{- $userRole := include "kolla_val_get_str" (dict "key" "keystone_user_role" "searchPath" $searchPath "Values" .Values ) }} - -{{- $netHostTrue := false }} -{{- $podTypeBootstrap := true }} - -{{- with $env := dict "resourceName" $resourceName "serviceName" $serviceName "serviceType" $serviceType "elementName" $elementName "elementType" $elementType "podTypeBootstrap" $podTypeBootstrap "userName" $userName "userDomain" $userDomain "userProject" $userProject "userProjectDomain" $userProjectDomain "userRole" $userRole "Values" .Values "Release" .Release "searchPath" $searchPath }} - -{{- include "common_delete_keystone_user" $env }} -{{- end }} diff --git a/helm/microservice/ironic-inspector-create-db-job/Chart.yaml b/helm/microservice/ironic-inspector-create-db-job/Chart.yaml deleted file mode 100644 index 42a2ee228..000000000 --- a/helm/microservice/ironic-inspector-create-db-job/Chart.yaml +++ /dev/null @@ -1,13 +0,0 @@ -name: ironic-inspector-create-db-job -version: 0.7.0-1 -description: ironic inspector create db microservice package -keywords: - - openstack - - ironic - - inspector - - db - - create -sources: - - http://github.com/openstack -engine: gotpl -#icon: A URL to an SVG or PNG image to be used as an icon (optional). make this point to the new project icons when ready diff --git a/helm/microservice/ironic-inspector-create-db-job/requirements.yaml b/helm/microservice/ironic-inspector-create-db-job/requirements.yaml deleted file mode 100644 index 038deff5a..000000000 --- a/helm/microservice/ironic-inspector-create-db-job/requirements.yaml +++ /dev/null @@ -1,4 +0,0 @@ -dependencies: - - name: kolla-common - repository: file://../../kolla-common - version: 0.7.0-1 diff --git a/helm/microservice/ironic-inspector-create-db-job/templates/ironic-inspector-create-db.yaml b/helm/microservice/ironic-inspector-create-db-job/templates/ironic-inspector-create-db.yaml deleted file mode 100644 index 83838ec0e..000000000 --- a/helm/microservice/ironic-inspector-create-db-job/templates/ironic-inspector-create-db.yaml +++ /dev/null @@ -1,7 +0,0 @@ -{{- $searchPath := ":global.kolla.ironic.inspector.create_db.job:global.kolla.ironic.inspector.create_db.all:global.kolla.ironic.inspector.all:global.kolla.ironic.all:global.kolla.all" }} -{{- $resourceName := "ironic-inspector" }} -{{- $serviceName := "ironic-inspector" }} -{{- $podTypeBootstrap := true }} -{{- with $env := dict "resourceName" $resourceName "serviceName" $serviceName "podTypeBootstrap" $podTypeBootstrap "Values" .Values "Release" .Release "searchPath" $searchPath }} -{{- include "common_create_db_job" $env }} -{{- end }} diff --git a/helm/microservice/ironic-inspector-create-keystone-endpoint-admin-job/Chart.yaml b/helm/microservice/ironic-inspector-create-keystone-endpoint-admin-job/Chart.yaml deleted file mode 100644 index 41792b919..000000000 --- a/helm/microservice/ironic-inspector-create-keystone-endpoint-admin-job/Chart.yaml +++ /dev/null @@ -1,12 +0,0 @@ -name: ironic-inspector-create-keystone-endpoint-admin-job -version: 0.7.0-1 -description: Helm chart for creating ironic keystone admin endpoint -keywords: - - openstack - - ironic - - inspector - - keystone-endpoint-admin -sources: - - http://github.com/openstack -engine: gotpl -#icon: A URL to an SVG or PNG image to be used as an icon (optional). make this point to the new project icons when ready diff --git a/helm/microservice/ironic-inspector-create-keystone-endpoint-admin-job/requirements.yaml b/helm/microservice/ironic-inspector-create-keystone-endpoint-admin-job/requirements.yaml deleted file mode 100644 index 038deff5a..000000000 --- a/helm/microservice/ironic-inspector-create-keystone-endpoint-admin-job/requirements.yaml +++ /dev/null @@ -1,4 +0,0 @@ -dependencies: - - name: kolla-common - repository: file://../../kolla-common - version: 0.7.0-1 diff --git a/helm/microservice/ironic-inspector-create-keystone-endpoint-admin-job/templates/ironic-inspector-create-keystone-endpoint-admin.yaml b/helm/microservice/ironic-inspector-create-keystone-endpoint-admin-job/templates/ironic-inspector-create-keystone-endpoint-admin.yaml deleted file mode 100644 index 1d3b9e80a..000000000 --- a/helm/microservice/ironic-inspector-create-keystone-endpoint-admin-job/templates/ironic-inspector-create-keystone-endpoint-admin.yaml +++ /dev/null @@ -1,15 +0,0 @@ -{{- $searchPath := ":global.kolla.ironic.inspector.create_keystone_endpoint.admin.job:global.kolla.ironic.inspector.create_keystone_endpoint.all:global.kolla.ironic.inspector.all:global.kolla.ironic.all:global.kolla.all" }} -{{- $resourceName := "ironic-inspector-create-keystone-endpoint-admin" }} -{{- $netHostTrue := false }} -{{- $podTypeBootstrap := true }} -{{- $serviceName := "ironic-inspector" }} -{{- $serviceType := "baremetal-introspection" }} -{{- $interface := "admin" }} -{{- $service := include "kolla_val_get_str" (dict "key" "service" "searchPath" $searchPath "Values" .Values) }} -{{- $port := include "kolla_val_get_str" (dict "key" "port" "searchPath" $searchPath "Values" .Values) }} -{{- $ironicAdminEndpointBuilt := printf "http://%s:%s" $service $port }} -{{- $endpointURLFullUser := include "kolla_val_get_str" (dict "key" "endpoint" "searchPath" $searchPath "Values" .Values) }} -{{- $endpointURLFull := $endpointURLFullUser | default $ironicAdminEndpointBuilt }} -{{- with $env := dict "netHostTrue" $netHostTrue "podTypeBootstrap" $podTypeBootstrap "resourceName" $resourceName "serviceName" $serviceName "serviceType" $serviceType "interface" $interface "endpointURLFull" $endpointURLFull "Release" .Release "Values" .Values "searchPath" $searchPath }} -{{- include "common_create_keystone_endpoint" $env }} -{{- end }} diff --git a/helm/microservice/ironic-inspector-create-keystone-endpoint-internal-job/Chart.yaml b/helm/microservice/ironic-inspector-create-keystone-endpoint-internal-job/Chart.yaml deleted file mode 100644 index f1f230d37..000000000 --- a/helm/microservice/ironic-inspector-create-keystone-endpoint-internal-job/Chart.yaml +++ /dev/null @@ -1,12 +0,0 @@ -name: ironic-inspector-create-keystone-endpoint-internal-job -version: 0.7.0-1 -description: Helm chart for the ironic inspector internal keystone endpoint -keywords: - - openstack - - ironic - - inspector - - keystone-endpoint-internal -sources: - - http://github.com/openstack -engine: gotpl -#icon: A URL to an SVG or PNG image to be used as an icon (optional). make this point to the new project icons when ready diff --git a/helm/microservice/ironic-inspector-create-keystone-endpoint-internal-job/requirements.yaml b/helm/microservice/ironic-inspector-create-keystone-endpoint-internal-job/requirements.yaml deleted file mode 100644 index 038deff5a..000000000 --- a/helm/microservice/ironic-inspector-create-keystone-endpoint-internal-job/requirements.yaml +++ /dev/null @@ -1,4 +0,0 @@ -dependencies: - - name: kolla-common - repository: file://../../kolla-common - version: 0.7.0-1 diff --git a/helm/microservice/ironic-inspector-create-keystone-endpoint-internal-job/templates/ironic-inspector-create-keystone-endpoint-internal.yaml b/helm/microservice/ironic-inspector-create-keystone-endpoint-internal-job/templates/ironic-inspector-create-keystone-endpoint-internal.yaml deleted file mode 100644 index 2b7bd90ed..000000000 --- a/helm/microservice/ironic-inspector-create-keystone-endpoint-internal-job/templates/ironic-inspector-create-keystone-endpoint-internal.yaml +++ /dev/null @@ -1,15 +0,0 @@ -{{- $searchPath := ":global.kolla.ironic.inspector.create_keystone_endpoint.internal.job:global.kolla.ironic.inspector.create_keystone_endpoint.all:global.kolla.ironic.inspector.all:global.kolla.ironic.all:global.kolla.all" }} -{{- $resourceName := "ironic-inspector-create-keystone-endpoint-internal" }} -{{- $netHostTrue := false }} -{{- $podTypeBootstrap := true }} -{{- $serviceName := "ironic-inspector" }} -{{- $serviceType := "baremetal-introspection" }} -{{- $interface := "internal" }} -{{- $service := include "kolla_val_get_str" (dict "key" "service" "searchPath" $searchPath "Values" .Values) }} -{{- $port := include "kolla_val_get_str" (dict "key" "port" "searchPath" $searchPath "Values" .Values) }} -{{- $ironicInternalEndpointBuilt := printf "http://%s:%s" $service $port }} -{{- $endpointURLFullUser := include "kolla_val_get_str" (dict "key" "endpoint" "searchPath" $searchPath "Values" .Values) }} -{{- $endpointURLFull := $endpointURLFullUser | default $ironicInternalEndpointBuilt }} -{{- with $env := dict "netHostTrue" $netHostTrue "podTypeBootstrap" $podTypeBootstrap "resourceName" $resourceName "serviceName" $serviceName "serviceType" $serviceType "interface" $interface "endpointURLFull" $endpointURLFull "Release" .Release "Values" .Values "searchPath" $searchPath }} -{{- include "common_create_keystone_endpoint" $env }} -{{- end }} diff --git a/helm/microservice/ironic-inspector-create-keystone-endpoint-public-job/Chart.yaml b/helm/microservice/ironic-inspector-create-keystone-endpoint-public-job/Chart.yaml deleted file mode 100644 index 21cb12c3d..000000000 --- a/helm/microservice/ironic-inspector-create-keystone-endpoint-public-job/Chart.yaml +++ /dev/null @@ -1,12 +0,0 @@ -name: ironic-inspector-create-keystone-endpoint-public-job -version: 0.7.0-1 -description: Helm chart for creating ironic inspector public keystone endpoint -keywords: - - openstack - - ironic - - inspector - - keystone-endpoint-admin -sources: - - http://github.com/openstack -engine: gotpl -#icon: A URL to an SVG or PNG image to be used as an icon (optional). make this point to the new project icons when ready diff --git a/helm/microservice/ironic-inspector-create-keystone-endpoint-public-job/requirements.yaml b/helm/microservice/ironic-inspector-create-keystone-endpoint-public-job/requirements.yaml deleted file mode 100644 index 038deff5a..000000000 --- a/helm/microservice/ironic-inspector-create-keystone-endpoint-public-job/requirements.yaml +++ /dev/null @@ -1,4 +0,0 @@ -dependencies: - - name: kolla-common - repository: file://../../kolla-common - version: 0.7.0-1 diff --git a/helm/microservice/ironic-inspector-create-keystone-endpoint-public-job/templates/ironic-inspector-create-keystone-endpoint-public.yaml b/helm/microservice/ironic-inspector-create-keystone-endpoint-public-job/templates/ironic-inspector-create-keystone-endpoint-public.yaml deleted file mode 100644 index 9843d1b63..000000000 --- a/helm/microservice/ironic-inspector-create-keystone-endpoint-public-job/templates/ironic-inspector-create-keystone-endpoint-public.yaml +++ /dev/null @@ -1,15 +0,0 @@ -{{- $searchPath := ":global.kolla.ironic.inspector.create_keystone_endpoint.public.job:global.kolla.ironic.inspector.create_keystone_endpoint.all:global.kolla.ironic.inspector.all:global.kolla.ironic.all:global.kolla.all" }} -{{- $resourceName := "ironic-inspector-create-keystone-endpoint-public" }} -{{- $netHostTrue := false }} -{{- $podTypeBootstrap := true }} -{{- $serviceName := "ironic-inspector" }} -{{- $serviceType := "baremetal-introspection" }} -{{- $interface := "public" }} -{{- $port := include "kolla_val_get_str" (dict "key" "port" "searchPath" $searchPath "Values" .Values) }} -{{- $externalVip := include "kolla_val_get_str" (dict "key" "external_vip" "searchPath" $searchPath "Values" .Values) }} -{{- $ironicPublicEndpointBuilt := printf "http://%s:%s" $externalVip $port }} -{{- $endpointURLFullUser := include "kolla_val_get_str" (dict "key" "endpoint" "searchPath" $searchPath "Values" .Values) }} -{{- $endpointURLFull := $endpointURLFullUser | default $ironicPublicEndpointBuilt }} -{{- with $env := dict "netHostTrue" $netHostTrue "podTypeBootstrap" $podTypeBootstrap "resourceName" $resourceName "serviceName" $serviceName "serviceType" $serviceType "interface" $interface "endpointURLFull" $endpointURLFull "Release" .Release "Values" .Values "searchPath" $searchPath }} -{{- include "common_create_keystone_endpoint" $env }} -{{- end }} diff --git a/helm/microservice/ironic-inspector-create-keystone-service-job/Chart.yaml b/helm/microservice/ironic-inspector-create-keystone-service-job/Chart.yaml deleted file mode 100644 index 41aee0bca..000000000 --- a/helm/microservice/ironic-inspector-create-keystone-service-job/Chart.yaml +++ /dev/null @@ -1,12 +0,0 @@ -name: ironic-inspector-create-keystone-service-job -version: 0.7.0-1 -description: Helm chart for the ironic inspector keystone service -keywords: - - openstack - - ironic - - inspector - - keystone-service -sources: - - http://github.com/openstack -engine: gotpl -#icon: A URL to an SVG or PNG image to be used as an icon (optional). make this point to the new project icons when ready diff --git a/helm/microservice/ironic-inspector-create-keystone-service-job/requirements.yaml b/helm/microservice/ironic-inspector-create-keystone-service-job/requirements.yaml deleted file mode 100644 index 038deff5a..000000000 --- a/helm/microservice/ironic-inspector-create-keystone-service-job/requirements.yaml +++ /dev/null @@ -1,4 +0,0 @@ -dependencies: - - name: kolla-common - repository: file://../../kolla-common - version: 0.7.0-1 diff --git a/helm/microservice/ironic-inspector-create-keystone-service-job/templates/ironic-inspector-create-keystone-service.yaml b/helm/microservice/ironic-inspector-create-keystone-service-job/templates/ironic-inspector-create-keystone-service.yaml deleted file mode 100644 index bfff64c9d..000000000 --- a/helm/microservice/ironic-inspector-create-keystone-service-job/templates/ironic-inspector-create-keystone-service.yaml +++ /dev/null @@ -1,10 +0,0 @@ -{{- $searchPath := ":global.kolla.ironic.inspector.create_keystone_service.job:global.kolla.ironic.inspector.create_keystone_service.all:global.kolla.ironic.inspector.all:global.kolla.all" }} -{{- $resourceName := "ironic-inspector-create-keystone-service" }} -{{- $netHostTrue := false }} -{{- $podTypeBootstrap := true }} -{{- $serviceName := "ironic-inspector" }} -{{- $serviceType := "baremetal-introspection" }} -{{- $description := "Ironic Inspector baremetal introspection service" }} -{{- with $env := dict "netHostTrue" $netHostTrue "podTypeBootstrap" $podTypeBootstrap "resourceName" $resourceName "serviceName" $serviceName "serviceType" $serviceType "description" $description "Values" .Values "Release" .Release "searchPath" $searchPath }} -{{- include "common_create_keystone_service" $env }} -{{- end }} diff --git a/helm/microservice/ironic-inspector-create-keystone-user-job/Chart.yaml b/helm/microservice/ironic-inspector-create-keystone-user-job/Chart.yaml deleted file mode 100644 index 4af403cc9..000000000 --- a/helm/microservice/ironic-inspector-create-keystone-user-job/Chart.yaml +++ /dev/null @@ -1,13 +0,0 @@ -name: