diff --git a/ansible/action_plugins/merge_configs.py b/ansible/action_plugins/merge_configs.py
deleted file mode 100644
index 7218a2ac99..0000000000
--- a/ansible/action_plugins/merge_configs.py
+++ /dev/null
@@ -1,96 +0,0 @@
-#!/usr/bin/python
-
-# Copyright 2015 Sam Yaple
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import ConfigParser
-import inspect
-import os
-from six import StringIO
-
-from ansible.plugins import action
-
-
-class ActionModule(action.ActionBase):
-
- TRANSFERS_FILES = True
-
- def read_config(self, source, config):
- # Only use config if present
- if os.access(source, os.R_OK):
- with open(source, 'r') as f:
- template_data = f.read()
- result = self._templar.template(template_data)
- fakefile = StringIO(result)
- config.readfp(fakefile)
- fakefile.close()
-
- def run(self, tmp=None, task_vars=None):
-
- if task_vars is None:
- task_vars = dict()
- result = super(ActionModule, self).run(tmp, task_vars)
-
- # NOTE(jeffrey4l): Ansible 2.1 add a remote_user param to the
- # _make_tmp_path function. inspect the number of the args here. In
- # this way, ansible 2.0 and ansible 2.1 are both supported
- make_tmp_path_args = inspect.getargspec(self._make_tmp_path)[0]
- if not tmp and len(make_tmp_path_args) == 1:
- tmp = self._make_tmp_path()
- if not tmp and len(make_tmp_path_args) == 2:
- remote_user = (task_vars.get('ansible_user')
- or self._play_context.remote_user)
- tmp = self._make_tmp_path(remote_user)
-
- sources = self._task.args.get('sources', None)
- extra_vars = self._task.args.get('vars', list())
-
- if not isinstance(sources, list):
- sources = [sources]
-
- temp_vars = task_vars.copy()
- temp_vars.update(extra_vars)
-
- config = ConfigParser.ConfigParser()
- old_vars = self._templar._available_variables
- self._templar.set_available_variables(temp_vars)
-
- for source in sources:
- self.read_config(source, config)
-
- self._templar.set_available_variables(old_vars)
- # Dump configparser to string via an emulated file
-
- fakefile = StringIO()
- config.write(fakefile)
-
- remote_path = self._connection._shell.join_path(tmp, 'src')
- xfered = self._transfer_data(remote_path, fakefile.getvalue())
- fakefile.close()
-
- new_module_args = self._task.args.copy()
- new_module_args.pop('vars', None)
- new_module_args.pop('sources', None)
-
- new_module_args.update(
- dict(
- src=xfered
- )
- )
-
- result.update(self._execute_module(module_name='copy',
- module_args=new_module_args,
- task_vars=task_vars,
- tmp=tmp))
- return result
diff --git a/ansible/action_plugins/merge_yaml.py b/ansible/action_plugins/merge_yaml.py
deleted file mode 100755
index 34ba7fb8db..0000000000
--- a/ansible/action_plugins/merge_yaml.py
+++ /dev/null
@@ -1,96 +0,0 @@
-#!/usr/bin/python
-
-# Copyright 2015 Sam Yaple
-# Copyright 2016 intel
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import inspect
-import os
-
-from yaml import dump
-from yaml import load
-try:
- from yaml import CDumper as Dumper # noqa: F401
- from yaml import CLoader as Loader # noqa: F401
-except ImportError:
- from yaml import Dumper # noqa: F401
- from yaml import Loader # noqa: F401
-
-
-from ansible.plugins import action
-
-
-class ActionModule(action.ActionBase):
-
- TRANSFERS_FILES = True
-
- def read_config(self, source):
- result = None
- # Only use config if present
- if os.access(source, os.R_OK):
- with open(source, 'r') as f:
- template_data = f.read()
- template_data = self._templar.template(template_data)
- result = load(template_data)
- return result or {}
-
- def run(self, tmp=None, task_vars=None):
- if task_vars is None:
- task_vars = dict()
- result = super(ActionModule, self).run(tmp, task_vars)
-
- # NOTE(jeffrey4l): Ansible 2.1 add a remote_user param to the
- # _make_tmp_path function. inspect the number of the args here. In
- # this way, ansible 2.0 and ansible 2.1 are both supported
- make_tmp_path_args = inspect.getargspec(self._make_tmp_path)[0]
- if not tmp and len(make_tmp_path_args) == 1:
- tmp = self._make_tmp_path()
- if not tmp and len(make_tmp_path_args) == 2:
- remote_user = (task_vars.get('ansible_user')
- or self._play_context.remote_user)
- tmp = self._make_tmp_path(remote_user)
- # save template args.
- extra_vars = self._task.args.get('vars', list())
- old_vars = self._templar._available_variables
-
- temp_vars = task_vars.copy()
- temp_vars.update(extra_vars)
- self._templar.set_available_variables(temp_vars)
-
- output = {}
- sources = self._task.args.get('sources', None)
- if not isinstance(sources, list):
- sources = [sources]
- for source in sources:
- output.update(self.read_config(source))
-
- # restore original vars
- self._templar.set_available_variables(old_vars)
-
- remote_path = self._connection._shell.join_path(tmp, 'src')
- xfered = self._transfer_data(remote_path,
- dump(output,
- default_flow_style=False))
- new_module_args = self._task.args.copy()
- new_module_args.update(
- dict(
- src=xfered
- )
- )
- del new_module_args['sources']
- result.update(self._execute_module(module_name='copy',
- module_args=new_module_args,
- task_vars=task_vars,
- tmp=tmp))
- return result
diff --git a/ansible/bifrost.yml b/ansible/bifrost.yml
deleted file mode 100644
index bf29352b68..0000000000
--- a/ansible/bifrost.yml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-- hosts: localhost
- roles:
- - { role: bifrost,
- tags: bifrost}
diff --git a/ansible/certificates.yml b/ansible/certificates.yml
deleted file mode 100644
index 410c698e99..0000000000
--- a/ansible/certificates.yml
+++ /dev/null
@@ -1,4 +0,0 @@
----
-- hosts: all
- roles:
- - certificates
diff --git a/ansible/destroy.yml b/ansible/destroy.yml
deleted file mode 100644
index 73b0870727..0000000000
--- a/ansible/destroy.yml
+++ /dev/null
@@ -1,4 +0,0 @@
----
-- hosts: all
- roles:
- - destroy
diff --git a/ansible/group_vars/all.yml b/ansible/group_vars/all.yml
deleted file mode 100644
index 91b1b50c77..0000000000
--- a/ansible/group_vars/all.yml
+++ /dev/null
@@ -1,427 +0,0 @@
----
-# The options in this file can be overridden in 'globals.yml'
-
-# The "temp" files that are created before merge need to stay persistent due
-# to the fact that ansible will register a "change" if it has to create them
-# again. Persistent files allow for idempotency
-container_config_directory: "/var/lib/kolla/config_files"
-
-# The directory to merge custom config files the kolla's config files
-node_custom_config: "/etc/kolla/config"
-
-# The project to generate configuration files for
-project: ""
-
-# The directory to store the config files on the destination node
-node_config_directory: "/etc/kolla/{{ project }}"
-
-
-###################
-# Kolla options
-###################
-
-# Which orchestration engine to use. Valid options are [ ANSIBLE, KUBERNETES ]
-orchestration_engine: "ANSIBLE"
-
-# Valid options are [ COPY_ONCE, COPY_ALWAYS ]
-config_strategy: "COPY_ALWAYS"
-
-# Valid options are [ centos, oraclelinux, ubuntu ]
-kolla_base_distro: "centos"
-# Valid options are [ binary, source ]
-kolla_install_type: "binary"
-
-kolla_internal_vip_address: "{{ kolla_internal_address }}"
-kolla_internal_fqdn: "{{ kolla_internal_vip_address }}"
-kolla_external_vip_address: "{{ kolla_internal_vip_address }}"
-kolla_external_fqdn: "{{ kolla_internal_fqdn if kolla_external_vip_address == kolla_internal_vip_address else kolla_external_vip_address }}"
-
-kolla_enable_sanity_checks: "no"
-
-kolla_enable_sanity_keystone: "{{ kolla_enable_sanity_checks }}"
-kolla_enable_sanity_glance: "{{ kolla_enable_sanity_checks }}"
-kolla_enable_sanity_cinder: "{{ kolla_enable_sanity_checks }}"
-kolla_enable_sanity_swift: "{{ kolla_enable_sanity_checks }}"
-
-
-####################
-# kolla-kubernetes
-####################
-# By default, Kolla API services bind to the network address assigned
-# to the api_interface. Allow the bind address to be an override. In
-# some cases (Kubernetes), the api_interface address is not known
-# until container runtime, and thus it is necessary to bind to all
-# interfaces "0.0.0.0". When used outside of Kubernetes, binding to
-# all interfaces may present a security issue, and thus is not
-# recommended.
-api_interface_address: "{{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] if orchestration_engine == 'ANSIBLE' else '0.0.0.0' }}"
-
-####################
-# Database options
-####################
-database_address: "{{ kolla_internal_fqdn }}"
-database_user: "root"
-database_port: "3306"
-
-
-####################
-# Docker options
-####################
-docker_registry_email:
-docker_registry:
-docker_namespace: "kolla"
-docker_registry_username:
-
-# Valid options are [ never, on-failure, always, unless-stopped ]
-docker_restart_policy: "unless-stopped"
-
-# '0' means unlimited retries
-docker_restart_policy_retry: "10"
-
-# Common options used throughout docker
-docker_common_options:
- auth_email: "{{ docker_registry_email }}"
- auth_password: "{{ docker_registry_password }}"
- auth_registry: "{{ docker_registry }}"
- auth_username: "{{ docker_registry_username }}"
- environment:
- KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}"
- restart_policy: "{{ docker_restart_policy }}"
- restart_retries: "{{ docker_restart_policy_retry }}"
-
-
-####################
-# keepalived options
-####################
-# Arbitrary unique number from 0..255
-keepalived_virtual_router_id: "51"
-
-
-####################
-# Networking options
-####################
-network_interface: "eth0"
-neutron_external_interface: "eth1"
-kolla_external_vip_interface: "{{ network_interface }}"
-api_interface: "{{ network_interface }}"
-storage_interface: "{{ network_interface }}"
-cluster_interface: "{{ network_interface }}"
-tunnel_interface: "{{ network_interface }}"
-bifrost_network_interface: "{{ network_interface }}"
-tunnel_interface_address: "{{ hostvars[inventory_hostname]['ansible_' + tunnel_interface]['ipv4']['address'] }}"
-
-# Valid options are [ openvswitch, linuxbridge, sfc ]
-neutron_plugin_agent: "openvswitch"
-
-# The default ports used by each service.
-aodh_api_port: "8042"
-
-barbican_api_port: "9311"
-
-ceilometer_api_port: "8777"
-
-congress_api_port: "1789"
-
-cloudkitty_api_port: "8889"
-
-designate_api_port: "9001"
-
-iscsi_port: "3260"
-
-gnocchi_api_port: "8041"
-
-mariadb_port: "{{ database_port }}"
-mariadb_wsrep_port: "4567"
-mariadb_ist_port: "4568"
-mariadb_sst_port: "4444"
-
-rabbitmq_port: "5672"
-rabbitmq_management_port: "15672"
-rabbitmq_cluster_port: "25672"
-rabbitmq_epmd_port: "4369"
-
-mongodb_port: "27017"
-mongodb_web_port: "28017"
-
-haproxy_stats_port: "1984"
-
-keystone_public_port: "5000"
-keystone_admin_port: "35357"
-keystone_ssh_port: "8023"
-
-glance_api_port: "9292"
-glance_registry_port: "9191"
-
-nova_api_port: "8774"
-nova_metadata_port: "8775"
-nova_novncproxy_port: "6080"
-nova_spicehtml5proxy_port: "6082"
-
-neutron_server_port: "9696"
-
-cinder_api_port: "8776"
-
-memcached_port: "11211"
-
-swift_proxy_server_port: "8080"
-swift_object_server_port: "6000"
-swift_account_server_port: "6001"
-swift_container_server_port: "6002"
-swift_rsync_port: "10873"
-
-sahara_api_port: "8386"
-
-heat_api_port: "8004"
-heat_api_cfn_port: "8000"
-
-murano_api_port: "8082"
-
-ironic_api_port: "6385"
-
-magnum_api_port: "9511"
-
-rgw_port: "6780"
-
-mistral_api_port: "8989"
-
-kibana_server_port: "5601"
-
-elasticsearch_port: "9200"
-
-manila_api_port: "8786"
-
-watcher_api_port: "9322"
-
-influxdb_admin_port: "8083"
-influxdb_http_port: "8086"
-
-senlin_api_port: "8778"
-
-etcd_client_port: "2379"
-etcd_peer_port: "2380"
-
-kuryr_port: "23750"
-
-searchlight_api_port: "9393"
-
-grafana_server_port: "3000"
-
-public_protocol: "{{ 'https' if kolla_enable_tls_external | bool else 'http' }}"
-internal_protocol: "http"
-admin_protocol: "http"
-
-####################
-# OpenStack options
-####################
-openstack_release: "3.0.0"
-openstack_logging_debug: "False"
-
-openstack_region_name: "RegionOne"
-
-openstack_service_workers: "{{ [ansible_processor_vcpus, 5]|min if orchestration_engine == 'ANSIBLE' else '1'}}"
-
-# Optionally allow Kolla to set sysctl values
-set_sysctl: "yes"
-
-# Valid options are [ novnc, spice ]
-nova_console: "novnc"
-
-# OpenStack authentication string. You should only need to override these if you
-# are changing the admin tenant/project or user.
-openstack_auth:
- auth_url: "{{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_admin_port }}"
- username: "admin"
- password: "{{ keystone_admin_password }}"
- project_name: "admin"
-
-# These roles are required for Kolla to be operation, however a savvy deployer
-# could disable some of these required roles and run their own services.
-enable_glance: "yes"
-enable_haproxy: "yes"
-enable_keystone: "yes"
-enable_mariadb: "yes"
-enable_memcached: "yes"
-enable_neutron: "yes"
-enable_nova: "yes"
-enable_rabbitmq: "yes"
-
-# Additional optional OpenStack features and services are specified here
-enable_aodh: "no"
-enable_barbican: "no"
-enable_cadf_notifications: "no"
-enable_ceilometer: "no"
-enable_central_logging: "no"
-enable_ceph: "no"
-enable_ceph_rgw: "no"
-enable_cinder: "no"
-enable_cinder_backend_iscsi: "no"
-enable_cinder_backend_lvm: "no"
-enable_cinder_backend_nfs: "no"
-enable_cloudkitty: "no"
-enable_congress: "no"
-enable_etcd: "no"
-enable_designate: "no"
-enable_gnocchi: "no"
-enable_grafana: "no"
-enable_heat: "yes"
-enable_horizon: "yes"
-enable_influxdb: "no"
-enable_ironic: "no"
-enable_iscsid: "{{ enable_cinder_backend_iscsi | bool or enable_cinder_backend_lvm | bool or enable_ironic | bool }}"
-enable_kuryr: "no"
-enable_magnum: "no"
-enable_manila: "no"
-enable_manila_backend_generic: "no"
-enable_manila_backend_hnas: "no"
-enable_mistral: "no"
-enable_mongodb: "no"
-enable_multipathd: "no"
-enable_murano: "no"
-enable_neutron_vpnaas: "no"
-enable_neutron_dvr: "no"
-enable_neutron_lbaas: "no"
-enable_neutron_qos: "no"
-enable_neutron_agent_ha: "no"
-enable_rally: "no"
-enable_sahara: "no"
-enable_searchlight: "no"
-enable_senlin: "no"
-enable_swift: "no"
-enable_telegraf: "no"
-enable_tempest: "no"
-enable_vmtp: "no"
-enable_watcher: "no"
-
-ironic_keystone_user: "ironic"
-neutron_keystone_user: "neutron"
-nova_keystone_user: "nova"
-
-# Nova fake driver and the number of fake driver per compute node
-enable_nova_fake: "no"
-num_nova_fake_per_node: 5
-
-# Monitoring options are specified here
-enable_collectd: "no"
-
-####################
-# Logging options
-####################
-
-elasticsearch_address: "{{ kolla_internal_vip_address }}"
-elasticsearch_protocol: "{{ internal_protocol }}"
-
-enable_elasticsearch: "{{ 'yes' if enable_central_logging | bool else 'no' }}"
-enable_kibana: "{{ 'yes' if enable_central_logging | bool else 'no' }}"
-
-####################
-# RabbitMQ options
-####################
-rabbitmq_user: "openstack"
-rabbitmq_version: "rabbitmq_server-3.6/plugins/rabbitmq_clusterer-3.6.x.ez/rabbitmq_clusterer-3.6.x-667f92b0/ebin"
-
-####################
-# HAProxy options
-####################
-haproxy_user: "openstack"
-haproxy_enable_external_vip: "{{ 'no' if kolla_external_vip_address == kolla_internal_vip_address else 'yes' }}"
-kolla_enable_tls_external: "no"
-kolla_external_fqdn_cert: "{{ node_config_directory }}/certificates/haproxy.pem"
-kolla_external_fqdn_cacert: "{{ node_config_directory }}/certificates/haproxy-ca.crt"
-
-
-####################
-# Kibana options
-####################
-kibana_user: "kibana"
-
-
-####################
-# Keystone options
-####################
-keystone_admin_url: "{{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_admin_port }}/v3"
-keystone_internal_url: "{{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_public_port }}/v3"
-keystone_public_url: "{{ public_protocol }}://{{ kolla_external_fqdn }}:{{ keystone_public_port }}/v3"
-
-# Valid options are [ uuid, fernet ]
-keystone_token_provider: "uuid"
-fernet_token_expiry: 86400
-
-
-#######################
-# Glance options
-#######################
-glance_backend_file: "{{ not enable_ceph | bool }}"
-glance_backend_ceph: "{{ enable_ceph }}"
-
-
-#######################
-# Ceilometer options
-#######################
-# Valid options are [ mongodb, mysql, gnocchi ]
-ceilometer_database_type: "mongodb"
-
-
-#################
-# Gnocchi options
-#################
-# Vaid options are [file, ceph]
-gnocchi_backend_storage: "{{ 'ceph' if enable_ceph|bool else 'file' }}"
-
-
-#################################
-# Cinder options
-#################################
-cinder_backend_ceph: "{{ enable_ceph }}"
-cinder_volume_group: "cinder-volumes"
-
-#######################
-# Cloudkitty options
-#######################
-# Valid options are [ ceilometer, gnocchi ]
-cloudkitty_collector_backend: "ceilometer"
-
-
-#######################
-# Nova options
-#######################
-nova_backend_ceph: "{{ enable_ceph }}"
-nova_backend: "{{ 'rbd' if nova_backend_ceph | bool else 'default' }}"
-
-
-#######################
-# Horizon options
-#######################
-horizon_backend_database: "no"
-
-###################
-# Ceph options
-###################
-# Ceph can be setup with a caching to improve performance. To use the cache you
-# must provide separate disks than those for the OSDs
-ceph_enable_cache: "no"
-# Valid options are [ forward, none, writeback ]
-ceph_cache_mode: "writeback"
-
-# Valid options are [ ext4, btrfs, xfs ]
-ceph_osd_filesystem: "xfs"
-
-# Set to 'yes-i-really-really-mean-it' to force wipe disks with existing partitions for OSDs. Only
-# set if you understand the consequences!
-ceph_osd_wipe_disk: ""
-
-# These are /etc/fstab options. Comma separated, no spaces (see fstab(8))
-ceph_osd_mount_options: "defaults,noatime"
-
-# A requirement for using the erasure-coded pools is you must setup a cache tier
-# Valid options are [ erasure, replicated ]
-ceph_pool_type: "replicated"
-
-ceph_cinder_pool_name: "volumes"
-ceph_cinder_backup_pool_name: "backups"
-ceph_glance_pool_name: "images"
-ceph_gnocchi_pool_name: "gnocchi"
-ceph_nova_pool_name: "vms"
-
-ceph_erasure_profile: "k=4 m=2 ruleset-failure-domain=host"
-ceph_rule: "default host {{ 'indep' if ceph_pool_type == 'erasure' else 'firstn' }}"
-ceph_cache_rule: "cache host firstn"
diff --git a/ansible/inventory/all-in-one b/ansible/inventory/all-in-one
deleted file mode 100644
index fbe77903d3..0000000000
--- a/ansible/inventory/all-in-one
+++ /dev/null
@@ -1,423 +0,0 @@
-# These initial groups are the only groups required to be modified. The
-# additional groups are for more control of the environment.
-[control]
-localhost ansible_connection=local
-
-[network]
-localhost ansible_connection=local
-
-[compute]
-localhost ansible_connection=local
-
-[storage]
-localhost ansible_connection=local
-
-[monitoring]
-localhost ansible_connection=local
-
-# You can explicitly specify which hosts run each project by updating the
-# groups in the sections below. Common services are grouped together.
-[collectd:children]
-compute
-
-[baremetal:children]
-control
-
-[grafana:children]
-monitoring
-
-[etcd:children]
-control
-
-[kibana:children]
-control
-
-[telegraf:children]
-compute
-control
-monitoring
-network
-storage
-
-[elasticsearch:children]
-control
-
-[haproxy:children]
-network
-
-[mariadb:children]
-control
-
-[rabbitmq:children]
-control
-
-[mongodb:children]
-control
-
-[keystone:children]
-control
-
-[glance:children]
-control
-
-[nova:children]
-control
-
-[neutron:children]
-network
-
-[cinder:children]
-control
-
-[cloudkitty:children]
-control
-
-[memcached:children]
-control
-
-[horizon:children]
-control
-
-[swift:children]
-control
-
-[barbican:children]
-control
-
-[heat:children]
-control
-
-[murano:children]
-control
-
-[ceph:children]
-control
-
-[ironic:children]
-control
-
-[influxdb:children]
-monitoring
-
-[magnum:children]
-control
-
-[sahara:children]
-control
-
-[mistral:children]
-control
-
-[manila:children]
-control
-
-[gnocchi:children]
-control
-
-[ceilometer:children]
-control
-
-[aodh:children]
-control
-
-[congress:children]
-control
-
-# Tempest
-[tempest:children]
-control
-
-[senlin:children]
-control
-
-[vmtp:children]
-control
-
-[watcher:children]
-control
-
-[rally:children]
-control
-
-[searchlight:children]
-control
-
-[designate:children]
-control
-
-# Additional control implemented here. These groups allow you to control which
-# services run on which hosts at a per-service level.
-#
-# Word of caution: Some services are required to run on the same host to
-# function appropriately. For example, neutron-metadata-agent must run on the
-# same host as the l3-agent and (depending on configuration) the dhcp-agent.
-
-# Glance
-[glance-api:children]
-glance
-
-[glance-registry:children]
-glance
-
-# Nova
-[nova-api:children]
-nova
-
-[nova-conductor:children]
-nova
-
-[nova-consoleauth:children]
-nova
-
-[nova-novncproxy:children]
-nova
-
-[nova-scheduler:children]
-nova
-
-[nova-spicehtml5proxy:children]
-nova
-
-[nova-compute-ironic:children]
-nova
-
-# Neutron
-[neutron-server:children]
-control
-
-[neutron-dhcp-agent:children]
-neutron
-
-[neutron-l3-agent:children]
-neutron
-
-[neutron-lbaas-agent:children]
-neutron
-
-[neutron-metadata-agent:children]
-neutron
-
-[neutron-vpnaas-agent:children]
-neutron
-
-# Ceph
-[ceph-mon:children]
-ceph
-
-[ceph-rgw:children]
-ceph
-
-[ceph-osd:children]
-storage
-
-# Cinder
-[cinder-api:children]
-cinder
-
-[cinder-backup:children]
-storage
-
-[cinder-scheduler:children]
-cinder
-
-[cinder-volume:children]
-storage
-
-# Cloudkitty
-[cloudkitty-api:children]
-cloudkitty
-
-[cloudkitty-processor:children]
-cloudkitty
-
-# iSCSI
-[iscsid:children]
-compute
-storage
-ironic-conductor
-
-[tgtd:children]
-storage
-
-# Manila
-[manila-api:children]
-manila
-
-[manila-scheduler:children]
-manila
-
-[manila-share:children]
-network
-
-[manila-data:children]
-manila
-
-# Swift
-[swift-proxy-server:children]
-swift
-
-[swift-account-server:children]
-storage
-
-[swift-container-server:children]
-storage
-
-[swift-object-server:children]
-storage
-
-# Barbican
-[barbican-api:children]
-barbican
-
-[barbican-keystone-listener:children]
-barbican
-
-[barbican-worker:children]
-barbican
-
-# Heat
-[heat-api:children]
-heat
-
-[heat-api-cfn:children]
-heat
-
-[heat-engine:children]
-heat
-
-# Murano
-[murano-api:children]
-murano
-
-[murano-engine:children]
-murano
-
-# Ironic
-[ironic-api:children]
-ironic
-
-[ironic-conductor:children]
-ironic
-
-[ironic-inspector:children]
-ironic
-
-[ironic-pxe:children]
-ironic
-
-# Magnum
-[magnum-api:children]
-magnum
-
-[magnum-conductor:children]
-magnum
-
-# Mistral
-[mistral-api:children]
-mistral
-
-[mistral-executor:children]
-mistral
-
-[mistral-engine:children]
-mistral
-
-# Aodh
-[aodh-api:children]
-aodh
-
-[aodh-evaluator:children]
-aodh
-
-[aodh-listener:children]
-aodh
-
-[aodh-notifier:children]
-aodh
-
-# Gnocchi
-[gnocchi-api:children]
-gnocchi
-
-[gnocchi-statsd:children]
-gnocchi
-
-[gnocchi-metricd:children]
-gnocchi
-
-# Sahara
-[sahara-api:children]
-sahara
-
-[sahara-engine:children]
-sahara
-
-# Ceilometer
-[ceilometer-api:children]
-ceilometer
-
-[ceilometer-central:children]
-ceilometer
-
-[ceilometer-notification:children]
-ceilometer
-
-[ceilometer-collector:children]
-ceilometer
-
-[ceilometer-compute:children]
-compute
-
-# Congress
-[congress-api:children]
-congress
-
-[congress-datasource:children]
-congress
-
-[congress-policy-engine:children]
-congress
-
-# Multipathd
-[multipathd:children]
-compute
-
-# Watcher
-[watcher-api:children]
-watcher
-
-[watcher-engine:children]
-watcher
-
-[watcher-applier:children]
-watcher
-
-# Senlin
-[senlin-api:children]
-senlin
-
-[senlin-engine:children]
-senlin
-
-# Searchlight
-[searchlight-api:children]
-searchlight
-
-[searchlight-listener:children]
-searchlight
-
-# Designate
-[designate-api:children]
-designate
-
-[designate-central:children]
-designate
-
-[designate-mdns:children]
-designate
-
-[designate-pool-manager:children]
-designate
-
-[designate-sink:children]
-designate
diff --git a/ansible/inventory/multinode b/ansible/inventory/multinode
deleted file mode 100644
index 1cd3179049..0000000000
--- a/ansible/inventory/multinode
+++ /dev/null
@@ -1,438 +0,0 @@
-# These initial groups are the only groups required to be modified. The
-# additional groups are for more control of the environment.
-[control]
-# These hostname must be resolvable from your deployment host
-control01
-control02
-control03
-
-# The above can also be specified as follows:
-#control[01:03] ansible_user=kolla
-
-# The network nodes are where your l3-agent and loadbalancers will run
-# This can be the same as a host in the control group
-[network]
-network01
-
-[compute]
-compute01
-
-[monitoring]
-monitoring01
-
-# When compute nodes and control nodes use different interfaces,
-# you can specify "api_interface" and another interfaces like below:
-#compute01 neutron_external_interface=eth0 api_interface=em1 storage_interface=em1 tunnel_interface=em1
-
-[storage]
-storage01
-
-[baremetal:children]
-control
-network
-compute
-storage
-
-# You can explicitly specify which hosts run each project by updating the
-# groups in the sections below. Common services are grouped together.
-[collectd:children]
-compute
-
-[grafana:children]
-monitoring
-
-[etcd:children]
-control
-
-[influxdb:children]
-monitoring
-
-[kibana:children]
-control
-
-[telegraf:children]
-compute
-control
-monitoring
-network
-storage
-
-[elasticsearch:children]
-control
-
-[haproxy:children]
-network
-
-[mariadb:children]
-control
-
-[rabbitmq:children]
-control
-
-[mongodb:children]
-control
-
-[keystone:children]
-control
-
-[glance:children]
-control
-
-[nova:children]
-control
-
-[neutron:children]
-network
-
-[cinder:children]
-control
-
-[cloudkitty:children]
-control
-
-[memcached:children]
-control
-
-[horizon:children]
-control
-
-[swift:children]
-control
-
-[barbican:children]
-control
-
-[heat:children]
-control
-
-[murano:children]
-control
-
-[ironic:children]
-control
-
-[ceph:children]
-control
-
-[magnum:children]
-control
-
-[sahara:children]
-control
-
-[mistral:children]
-control
-
-[manila:children]
-control
-
-[ceilometer:children]
-control
-
-[aodh:children]
-control
-
-[congress:children]
-control
-
-[gnocchi:children]
-control
-
-# Tempest
-[tempest:children]
-control
-
-[senlin:children]
-control
-
-[vmtp:children]
-control
-
-[watcher:children]
-control
-
-[rally:children]
-control
-
-[searchlight:children]
-control
-
-[designate:children]
-control
-
-# Additional control implemented here. These groups allow you to control which
-# services run on which hosts at a per-service level.
-#
-# Word of caution: Some services are required to run on the same host to
-# function appropriately. For example, neutron-metadata-agent must run on the
-# same host as the l3-agent and (depending on configuration) the dhcp-agent.
-
-# Glance
-[glance-api:children]
-glance
-
-[glance-registry:children]
-glance
-
-# Nova
-[nova-api:children]
-nova
-
-[nova-conductor:children]
-nova
-
-[nova-consoleauth:children]
-nova
-
-[nova-novncproxy:children]
-nova
-
-[nova-scheduler:children]
-nova
-
-[nova-spicehtml5proxy:children]
-nova
-
-[nova-compute-ironic:children]
-nova
-
-# Neutron
-[neutron-server:children]
-control
-
-[neutron-dhcp-agent:children]
-neutron
-
-[neutron-l3-agent:children]
-neutron
-
-[neutron-lbaas-agent:children]
-neutron
-
-[neutron-metadata-agent:children]
-neutron
-
-[neutron-vpnaas-agent:children]
-neutron
-
-# Ceph
-[ceph-mon:children]
-ceph
-
-[ceph-rgw:children]
-ceph
-
-[ceph-osd:children]
-storage
-
-# Cinder
-[cinder-api:children]
-cinder
-
-[cinder-backup:children]
-storage
-
-[cinder-scheduler:children]
-cinder
-
-[cinder-volume:children]
-storage
-
-# Cloudkitty
-[cloudkitty-api:children]
-cloudkitty
-
-[cloudkitty-processor:children]
-cloudkitty
-
-# iSCSI
-[iscsid:children]
-compute
-storage
-ironic-conductor
-
-[tgtd:children]
-storage
-
-# Manila
-[manila-api:children]
-manila
-
-[manila-scheduler:children]
-manila
-
-[manila-share:children]
-network
-
-[manila-data:children]
-manila
-
-# Swift
-[swift-proxy-server:children]
-swift
-
-[swift-account-server:children]
-storage
-
-[swift-container-server:children]
-storage
-
-[swift-object-server:children]
-storage
-
-# Barbican
-[barbican-api:children]
-barbican
-
-[barbican-keystone-listener:children]
-barbican
-
-[barbican-worker:children]
-barbican
-
-# Heat
-[heat-api:children]
-heat
-
-[heat-api-cfn:children]
-heat
-
-[heat-engine:children]
-heat
-
-# Murano
-[murano-api:children]
-murano
-
-[murano-engine:children]
-murano
-
-# Ironic
-[ironic-api:children]
-ironic
-
-[ironic-conductor:children]
-ironic
-
-[ironic-inspector:children]
-ironic
-
-[ironic-pxe:children]
-ironic
-
-# Magnum
-[magnum-api:children]
-magnum
-
-[magnum-conductor:children]
-magnum
-
-# Sahara
-[sahara-api:children]
-sahara
-
-[sahara-engine:children]
-sahara
-
-# Mistral
-[mistral-api:children]
-mistral
-
-[mistral-executor:children]
-mistral
-
-[mistral-engine:children]
-mistral
-
-# Ceilometer
-[ceilometer-api:children]
-ceilometer
-
-[ceilometer-central:children]
-ceilometer
-
-[ceilometer-notification:children]
-ceilometer
-
-[ceilometer-collector:children]
-ceilometer
-
-[ceilometer-compute:children]
-compute
-
-# Aodh
-[aodh-api:children]
-aodh
-
-[aodh-evaluator:children]
-aodh
-
-[aodh-listener:children]
-aodh
-
-[aodh-notifier:children]
-aodh
-
-# Congress
-[congress-api:children]
-congress
-
-[congress-datasource:children]
-congress
-
-[congress-policy-engine:children]
-congress
-
-# Gnocchi
-[gnocchi-api:children]
-gnocchi
-
-[gnocchi-statsd:children]
-gnocchi
-
-[gnocchi-metricd:children]
-gnocchi
-
-# Multipathd
-[multipathd:children]
-compute
-
-# Watcher
-[watcher-api:children]
-watcher
-
-[watcher-engine:children]
-watcher
-
-[watcher-applier:children]
-watcher
-
-# Senlin
-[senlin-api:children]
-senlin
-
-[senlin-engine:children]
-senlin
-
-# Searchlight
-[searchlight-api:children]
-searchlight
-
-[searchlight-listener:children]
-searchlight
-
-# Designate
-[designate-api:children]
-designate
-
-[designate-central:children]
-designate
-
-[designate-mdns:children]
-designate
-
-[designate-pool-manager:children]
-designate
-
-[designate-sink:children]
-designate
diff --git a/ansible/kolla-host.yml b/ansible/kolla-host.yml
deleted file mode 100644
index db50419ece..0000000000
--- a/ansible/kolla-host.yml
+++ /dev/null
@@ -1,7 +0,0 @@
----
-- hosts: baremetal
- gather_facts: no
- roles:
- - { role: baremetal,
- tags: baremetal }
-
diff --git a/ansible/library/bslurp.py b/ansible/library/bslurp.py
deleted file mode 100644
index b117123cd9..0000000000
--- a/ansible/library/bslurp.py
+++ /dev/null
@@ -1,193 +0,0 @@
-#!/usr/bin/python
-
-# Copyright 2015 Sam Yaple
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# This module has been relicensed from the source below:
-# https://github.com/SamYaple/yaodu/blob/master/ansible/library/bslurp
-
-DOCUMENTATION = '''
----
-module: bslurp
-short_description: Slurps a file from a remote node
-description:
- - Used for fetching a binary blob containing the file, then push that file
- to other hosts.
-options:
- src:
- description:
- - File to fetch. When dest is used, src is expected to be a str with data
- required: True
- type: str
- compress:
- description:
- - Compress file with zlib
- default: True
- type: bool
- dest:
- description:
- - Where to write out binary blob
- required: False
- type: str
- mode:
- description:
- - Destination file permissions
- default: '0644'
- type: str
- sha1:
- description:
- - sha1 hash of the underlying data
- default: None
- type: bool
-author: Sam Yaple
-'''
-
-EXAMPLES = '''
-Distribute a file from single to many hosts:
-
-- hosts: web_servers
- tasks:
- - name: Pull in web config
- bslurp: src="/path/to/file"
- register: file_data
- run_once: True
- - name: Push if changed
- bslurp:
- src: "{{ file_data.content }}"
- dest: "{{ file_data.source }}"
- mode: "{{ file_data.mode }}"
- sha1: "{{ file_data.sha1 }}"
-
-Distribute multiple files from single to many hosts:
-
-- hosts: web_servers
- tasks:
- - name: Pull in web config
- bslurp: src="{{ item }}"
- with_items:
- - "/path/to/file1"
- - "/path/to/file2"
- - "/path/to/file3"
- register: file_data
- run_once: True
- - name: Push if changed
- bslurp:
- src: "{{ item.content }}"
- dest: "{{ item.source }}"
- mode: "{{ item.mode }}"
- sha1: "{{ item.sha1 }}"
- with_items: file_data.results
-
-Distribute a file to many hosts without compression; Change
-permissions on dest:
-
-- hosts: web_servers
- tasks:
- - name: Pull in web config
- bslurp: src="/path/to/file"
- register: file_data
- run_once: True
- - name: Push if changed
- bslurp:
- src: "{{ file_data.content }}"
- dest: "/new/path/to/file"
- mode: "0777"
- compress: False
- sha1: "{{ file_data.sha1 }}"
-'''
-
-import base64
-import hashlib
-import os
-import traceback
-import zlib
-
-
-def copy_from_host(module):
- compress = module.params.get('compress')
- src = module.params.get('src')
-
- if not os.path.exists(src):
- module.fail_json(msg="file not found: {}".format(src))
- if not os.access(src, os.R_OK):
- module.fail_json(msg="file is not readable: {}".format(src))
-
- mode = oct(os.stat(src).st_mode & 0o777)
-
- with open(src, 'rb') as f:
- raw_data = f.read()
-
- sha1 = hashlib.sha1(raw_data).hexdigest()
- data = zlib.compress(raw_data) if compress else raw_data
-
- module.exit_json(content=base64.b64encode(data), sha1=sha1, mode=mode,
- source=src)
-
-
-def copy_to_host(module):
- compress = module.params.get('compress')
- dest = module.params.get('dest')
- mode = int(module.params.get('mode'), 0)
- sha1 = module.params.get('sha1')
- src = module.params.get('src')
-
- data = base64.b64decode(src)
- raw_data = zlib.decompress(data) if compress else data
-
- if sha1:
- if os.path.exists(dest):
- if os.access(dest, os.R_OK):
- with open(dest, 'rb') as f:
- if hashlib.sha1(f.read()).hexdigest() == sha1:
- module.exit_json(changed=False)
- else:
- module.exit_json(failed=True, changed=False,
- msg='file is not accessible: {}'.format(dest))
-
- if sha1 != hashlib.sha1(raw_data).hexdigest():
- module.exit_json(failed=True, changed=False,
- msg='sha1 sum does not match data')
-
- with os.fdopen(os.open(dest, os.O_WRONLY | os.O_CREAT, mode), 'wb') as f:
- f.write(raw_data)
-
- module.exit_json(changed=True)
-
-
-def main():
- argument_spec = dict(
- compress=dict(default=True, type='bool'),
- dest=dict(type='str'),
- mode=dict(default='0644', type='str'),
- sha1=dict(default=None, type='str'),
- src=dict(required=True, type='str')
- )
- module = AnsibleModule(argument_spec)
-
- dest = module.params.get('dest')
-
- try:
- if dest:
- copy_to_host(module)
- else:
- copy_from_host(module)
- except Exception:
- module.exit_json(failed=True, changed=True,
- msg=repr(traceback.format_exc()))
-
-
-# import module snippets
-from ansible.module_utils.basic import * # noqa
-if __name__ == '__main__':
- main()
diff --git a/ansible/library/kolla_docker.py b/ansible/library/kolla_docker.py
deleted file mode 100644
index 28f752a2e7..0000000000
--- a/ansible/library/kolla_docker.py
+++ /dev/null
@@ -1,751 +0,0 @@
-#!/usr/bin/python
-
-# Copyright 2015 Sam Yaple
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-DOCUMENTATION = '''
----
-module: kolla_docker
-short_description: Module for controlling Docker
-description:
- - A module targeting at controlling Docker as used by Kolla.
-options:
- common_options:
- description:
- - A dict containing common params such as login info
- required: False
- type: dict
- default: dict()
- action:
- description:
- - The action the module should take
- required: True
- type: str
- choices:
- - compare_image
- - create_volume
- - get_container_env
- - get_container_state
- - pull_image
- - remove_container
- - remove_volume
- - restart_container
- - start_container
- - stop_container
- api_version:
- description:
- - The version of the api for docker-py to use when contacting docker
- required: False
- type: str
- default: auto
- auth_email:
- description:
- - The email address used to authenticate
- required: False
- type: str
- auth_password:
- description:
- - The password used to authenticate
- required: False
- type: str
- auth_registry:
- description:
- - The registry to authenticate
- required: False
- type: str
- auth_username:
- description:
- - The username used to authenticate
- required: False
- type: str
- detach:
- description:
- - Detach from the container after it is created
- required: False
- default: True
- type: bool
- name:
- description:
- - Name of the container or volume to manage
- required: False
- type: str
- environment:
- description:
- - The environment to set for the container
- required: False
- type: dict
- image:
- description:
- - Name of the docker image
- required: False
- type: str
- ipc_mode:
- description:
- - Set docker ipc namespace
- required: False
- type: str
- default: None
- choices:
- - host
- cap_add:
- description:
- - Add capabilities to docker container
- required: False
- type: list
- default: list()
- security_opt:
- description:
- - Set container security profile
- required: False
- type: list
- default: list()
- labels:
- description:
- - List of labels to apply to container
- required: False
- type: dict
- default: dict()
- pid_mode:
- description:
- - Set docker pid namespace
- required: False
- type: str
- default: None
- choices:
- - host
- privileged:
- description:
- - Set the container to privileged
- required: False
- default: False
- type: bool
- remove_on_exit:
- description:
- - When not detaching from container, remove on successful exit
- required: False
- default: True
- type: bool
- restart_policy:
- description:
- - Determine what docker does when the container exits
- required: False
- type: str
- choices:
- - never
- - on-failure
- - always
- - unless-stopped
- restart_retries:
- description:
- - How many times to attempt a restart if restart_policy is set
- type: int
- default: 10
- volumes:
- description:
- - Set volumes for docker to use
- required: False
- type: list
- volumes_from:
- description:
- - Name or id of container(s) to use volumes from
- required: True
- type: list
-author: Sam Yaple
-'''
-
-EXAMPLES = '''
-- hosts: kolla_docker
- tasks:
- - name: Start container
- kolla_docker:
- image: ubuntu
- name: test_container
- action: start_container
- - name: Remove container
- kolla_docker:
- name: test_container
- action: remove_container
- - name: Pull image without starting container
- kolla_docker:
- action: pull_container
- image: private-registry.example.com:5000/ubuntu
- - name: Create named volume
- action: create_volume
- name: name_of_volume
- - name: Remove named volume
- action: remove_volume
- name: name_of_volume
-'''
-
-import json
-import os
-import traceback
-
-import docker
-
-
-class DockerWorker(object):
-
- def __init__(self, module):
- self.module = module
- self.params = self.module.params
- self.changed = False
-
- # TLS not fully implemented
- # tls_config = self.generate_tls()
-
- options = {
- 'version': self.params.get('api_version')
- }
-
- self.dc = docker.Client(**options)
-
- def generate_tls(self):
- tls = {'verify': self.params.get('tls_verify')}
- tls_cert = self.params.get('tls_cert'),
- tls_key = self.params.get('tls_key'),
- tls_cacert = self.params.get('tls_cacert')
-
- if tls['verify']:
- if tls_cert:
- self.check_file(tls_cert)
- self.check_file(tls_key)
- tls['client_cert'] = (tls_cert, tls_key)
- if tls_cacert:
- self.check_file(tls_cacert)
- tls['verify'] = tls_cacert
-
- return docker.tls.TLSConfig(**tls)
-
- def check_file(self, path):
- if not os.path.isfile(path):
- self.module.fail_json(
- failed=True,
- msg='There is no file at "{}"'.format(path)
- )
- if not os.access(path, os.R_OK):
- self.module.fail_json(
- failed=True,
- msg='Permission denied for file at "{}"'.format(path)
- )
-
- def check_image(self):
- find_image = ':'.join(self.parse_image())
- for image in self.dc.images():
- repo_tags = image.get('RepoTags')
- if not repo_tags:
- continue
- for image_name in repo_tags:
- if image_name == find_image:
- return image
-
- def check_volume(self):
- for vol in self.dc.volumes()['Volumes'] or list():
- if vol['Name'] == self.params.get('name'):
- return vol
-
- def check_container(self):
- find_name = '/{}'.format(self.params.get('name'))
- for cont in self.dc.containers(all=True):
- if find_name in cont['Names']:
- return cont
-
- def get_container_info(self):
- container = self.check_container()
- if not container:
- return None
- return self.dc.inspect_container(self.params.get('name'))
-
- def check_container_differs(self):
- container_info = self.get_container_info()
- return (
- self.compare_cap_add(container_info) or
- self.compare_security_opt(container_info) or
- self.compare_image(container_info) or
- self.compare_ipc_mode(container_info) or
- self.compare_labels(container_info) or
- self.compare_privileged(container_info) or
- self.compare_pid_mode(container_info) or
- self.compare_volumes(container_info) or
- self.compare_volumes_from(container_info) or
- self.compare_environment(container_info)
- )
-
- def compare_ipc_mode(self, container_info):
- new_ipc_mode = self.params.get('ipc_mode')
- current_ipc_mode = container_info['HostConfig'].get('IpcMode')
- if not current_ipc_mode:
- current_ipc_mode = None
-
- if new_ipc_mode != current_ipc_mode:
- return True
-
- def compare_cap_add(self, container_info):
- new_cap_add = self.params.get('cap_add', list())
- current_cap_add = container_info['HostConfig'].get('CapAdd',
- list())
- if not current_cap_add:
- current_cap_add = list()
- if set(new_cap_add).symmetric_difference(set(current_cap_add)):
- return True
-
- def compare_security_opt(self, container_info):
- new_sec_opt = self.params.get('security_opt', list())
- current_sec_opt = container_info['HostConfig'].get('SecurityOpt',
- list())
- if not current_sec_opt:
- current_sec_opt = list()
- if set(new_sec_opt).symmetric_difference(set(current_sec_opt)):
- return True
-
- def compare_pid_mode(self, container_info):
- new_pid_mode = self.params.get('pid_mode')
- current_pid_mode = container_info['HostConfig'].get('PidMode')
- if not current_pid_mode:
- current_pid_mode = None
-
- if new_pid_mode != current_pid_mode:
- return True
-
- def compare_privileged(self, container_info):
- new_privileged = self.params.get('privileged')
- current_privileged = container_info['HostConfig']['Privileged']
- if new_privileged != current_privileged:
- return True
-
- def compare_image(self, container_info=None):
- container_info = container_info or self.get_container_info()
- parse_repository_tag = docker.utils.parse_repository_tag
- if not container_info:
- return True
- new_image = self.check_image()
- current_image = container_info['Image']
- if not new_image:
- return True
- if new_image['Id'] != current_image:
- return True
- # NOTE(Jeffrey4l) when new image and the current image have
- # the same id, but the tag name different.
- elif (parse_repository_tag(container_info['Config']['Image']) !=
- parse_repository_tag(self.params.get('image'))):
- return True
-
- def compare_labels(self, container_info):
- new_labels = self.params.get('labels')
- current_labels = container_info['Config'].get('Labels', dict())
- image_labels = self.check_image().get('Labels', dict())
- for k, v in image_labels.items():
- if k in new_labels:
- if v != new_labels[k]:
- return True
- else:
- del current_labels[k]
-
- if new_labels != current_labels:
- return True
-
- def compare_volumes_from(self, container_info):
- new_vols_from = self.params.get('volumes_from')
- current_vols_from = container_info['HostConfig'].get('VolumesFrom')
- if not new_vols_from:
- new_vols_from = list()
- if not current_vols_from:
- current_vols_from = list()
-
- if set(current_vols_from).symmetric_difference(set(new_vols_from)):
- return True
-
- def compare_volumes(self, container_info):
- volumes, binds = self.generate_volumes()
- current_vols = container_info['Config'].get('Volumes')
- current_binds = container_info['HostConfig'].get('Binds')
- if not volumes:
- volumes = list()
- if not current_vols:
- current_vols = list()
- if not current_binds:
- current_binds = list()
-
- if set(volumes).symmetric_difference(set(current_vols)):
- return True
-
- new_binds = list()
- if binds:
- for k, v in binds.items():
- new_binds.append("{}:{}:{}".format(k, v['bind'], v['mode']))
-
- if set(new_binds).symmetric_difference(set(current_binds)):
- return True
-
- def compare_environment(self, container_info):
- if self.params.get('environment'):
- current_env = dict()
- for kv in container_info['Config'].get('Env', list()):
- k, v = kv.split('=', 1)
- current_env.update({k: v})
-
- for k, v in self.params.get('environment').items():
- if k not in current_env:
- return True
- if current_env[k] != v:
- return True
-
- def parse_image(self):
- full_image = self.params.get('image')
-
- if '/' in full_image:
- registry, image = full_image.split('/', 1)
- else:
- image = full_image
-
- if ':' in image:
- return full_image.rsplit(':', 1)
- else:
- return full_image, 'latest'
-
- def pull_image(self):
- if self.params.get('auth_username'):
- self.dc.login(
- username=self.params.get('auth_username'),
- password=self.params.get('auth_password'),
- registry=self.params.get('auth_registry'),
- email=self.params.get('auth_email')
- )
-
- image, tag = self.parse_image()
-
- statuses = [
- json.loads(line.strip()) for line in self.dc.pull(
- repository=image, tag=tag, stream=True
- )
- ]
-
- for status in reversed(statuses):
- if 'error' in status:
- if status['error'].endswith('not found'):
- self.module.fail_json(
- msg="The requested image does not exist: {}:{}".format(
- image, tag),
- failed=True
- )
- else:
- self.module.fail_json(
- msg="Unknown error message: {}".format(
- status['error']),
- failed=True
- )
-
- if status and status.get('status'):
- # NOTE(SamYaple): This allows us to use v1 and v2 docker
- # registries. Eventually docker will stop supporting v1
- # registries and when that happens we can remove this.
- if 'legacy registry' in status['status']:
- continue
- elif 'Downloaded newer image for' in status['status']:
- self.changed = True
- return
- elif 'Image is up to date for' in status['status']:
- return
- else:
- self.module.fail_json(
- msg="Unknown status message: {}".format(
- status['status']),
- failed=True
- )
-
- def remove_container(self):
- if self.check_container():
- self.changed = True
- self.dc.remove_container(
- container=self.params.get('name'),
- force=True
- )
-
- def generate_volumes(self):
- volumes = self.params.get('volumes')
- if not volumes:
- return None, None
-
- vol_list = list()
- vol_dict = dict()
-
- for vol in volumes:
- if ':' not in vol:
- vol_list.append(vol)
- continue
-
- split_vol = vol.split(':')
-
- if (len(split_vol) == 2
- and ('/' not in split_vol[0] or '/' in split_vol[1])):
- split_vol.append('rw')
-
- vol_list.append(split_vol[1])
- vol_dict.update({
- split_vol[0]: {
- 'bind': split_vol[1],
- 'mode': split_vol[2]
- }
- })
-
- return vol_list, vol_dict
-
- def build_host_config(self, binds):
- options = {
- 'network_mode': 'host',
- 'ipc_mode': self.params.get('ipc_mode'),
- 'cap_add': self.params.get('cap_add'),
- 'security_opt': self.params.get('security_opt'),
- 'pid_mode': self.params.get('pid_mode'),
- 'privileged': self.params.get('privileged'),
- 'volumes_from': self.params.get('volumes_from')
- }
-
- if self.params.get('restart_policy') in ['on-failure',
- 'always',
- 'unless-stopped']:
- options['restart_policy'] = {
- 'Name': self.params.get('restart_policy'),
- 'MaximumRetryCount': self.params.get('restart_retries')
- }
-
- if binds:
- options['binds'] = binds
-
- return self.dc.create_host_config(**options)
-
- def _inject_env_var(self, environment_info):
- newenv = {
- 'KOLLA_SERVICE_NAME': self.params.get('name').replace('_', '-')
- }
- environment_info.update(newenv)
- return environment_info
-
- def _format_env_vars(self):
- env = self._inject_env_var(self.params.get('environment'))
- return {k: "" if env[k] is None else env[k] for k in env}
-
- def build_container_options(self):
- volumes, binds = self.generate_volumes()
- return {
- 'detach': self.params.get('detach'),
- 'environment': self._format_env_vars(),
- 'host_config': self.build_host_config(binds),
- 'labels': self.params.get('labels'),
- 'image': self.params.get('image'),
- 'name': self.params.get('name'),
- 'volumes': volumes,
- 'tty': True
- }
-
- def create_container(self):
- self.changed = True
- options = self.build_container_options()
- self.dc.create_container(**options)
-
- def start_container(self):
- if not self.check_image():
- self.pull_image()
-
- container = self.check_container()
- if container and self.check_container_differs():
- self.remove_container()
- container = self.check_container()
-
- if not container:
- self.create_container()
- container = self.check_container()
-
- if not container['Status'].startswith('Up '):
- self.changed = True
- self.dc.start(container=self.params.get('name'))
-
- # We do not want to detach so we wait around for container to exit
- if not self.params.get('detach'):
- rc = self.dc.wait(self.params.get('name'))
- if rc != 0:
- self.module.fail_json(
- failed=True,
- changed=True,
- msg="Container exited with non-zero return code"
- )
- if self.params.get('remove_on_exit'):
- self.remove_container()
-
- def get_container_env(self):
- name = self.params.get('name')
- info = self.get_container_info()
- if not info:
- self.module.fail_json(msg="No such container: {}".format(name))
- else:
- envs = dict()
- for env in info['Config']['Env']:
- if '=' in env:
- key, value = env.split('=', 1)
- else:
- key, value = env, ''
- envs[key] = value
-
- self.module.exit_json(**envs)
-
- def get_container_state(self):
- name = self.params.get('name')
- info = self.get_container_info()
- if not info:
- self.module.fail_json(msg="No such container: {}".format(name))
- else:
- self.module.exit_json(**info['State'])
-
- def stop_container(self):
- name = self.params.get('name')
- container = self.check_container()
- if not container:
- self.module.fail_json(
- msg="No such container: {} to stop".format(name))
- elif not container['Status'].startswith('Exited '):
- self.changed = True
- self.dc.stop(name)
-
- def restart_container(self):
- name = self.params.get('name')
- info = self.get_container_info()
- if not info:
- self.module.fail_json(
- msg="No such container: {}".format(name))
- else:
- self.changed = True
- self.dc.restart(name)
-
- def create_volume(self):
- if not self.check_volume():
- self.changed = True
- self.dc.create_volume(name=self.params.get('name'), driver='local')
-
- def remove_volume(self):
- if self.check_volume():
- self.changed = True
- try:
- self.dc.remove_volume(name=self.params.get('name'))
- except docker.errors.APIError as e:
- if e.response.status_code == 409:
- self.module.fail_json(
- failed=True,
- msg="Volume named '{}' is currently in-use".format(
- self.params.get('name')
- )
- )
- raise
-
-
-def generate_module():
- argument_spec = dict(
- common_options=dict(required=False, type='dict', default=dict()),
- action=dict(required=True, type='str', choices=['compare_image',
- 'create_volume',
- 'get_container_env',
- 'get_container_state',
- 'pull_image',
- 'remove_container',
- 'remove_volume',
- 'restart_container',
- 'start_container',
- 'stop_container']),
- api_version=dict(required=False, type='str', default='auto'),
- auth_email=dict(required=False, type='str'),
- auth_password=dict(required=False, type='str'),
- auth_registry=dict(required=False, type='str'),
- auth_username=dict(required=False, type='str'),
- detach=dict(required=False, type='bool', default=True),
- labels=dict(required=False, type='dict', default=dict()),
- name=dict(required=False, type='str'),
- environment=dict(required=False, type='dict'),
- image=dict(required=False, type='str'),
- ipc_mode=dict(required=False, type='str', choices=['host']),
- cap_add=dict(required=False, type='list', default=list()),
- security_opt=dict(required=False, type='list', default=list()),
- pid_mode=dict(required=False, type='str', choices=['host']),
- privileged=dict(required=False, type='bool', default=False),
- remove_on_exit=dict(required=False, type='bool', default=True),
- restart_policy=dict(required=False, type='str', choices=[
- 'no',
- 'never',
- 'on-failure',
- 'always',
- 'unless-stopped']),
- restart_retries=dict(required=False, type='int', default=10),
- tls_verify=dict(required=False, type='bool', default=False),
- tls_cert=dict(required=False, type='str'),
- tls_key=dict(required=False, type='str'),
- tls_cacert=dict(required=False, type='str'),
- volumes=dict(required=False, type='list'),
- volumes_from=dict(required=False, type='list')
- )
- required_together = [
- ['tls_cert', 'tls_key']
- ]
- module = AnsibleModule(
- argument_spec=argument_spec,
- required_together=required_together,
- bypass_checks=True
- )
-
- new_args = module.params.pop('common_options', dict())
-
- # NOTE(jeffrey4l): merge the environment
- env = module.params.pop('environment', dict())
- if env:
- new_args['environment'].update(env)
-
- for key, value in module.params.items():
- if key in new_args and value is None:
- continue
- new_args[key] = value
-
- module.params = new_args
- return module
-
-
-def main():
- module = generate_module()
-
- # TODO(SamYaple): Replace with required_if when Ansible 2.0 lands
- if (module.params.get('action') in ['pull_image', 'start_container']
- and not module.params.get('image')):
- module.fail_json(
- msg="missing required arguments: image",
- failed=True
- )
- # TODO(SamYaple): Replace with required_if when Ansible 2.0 lands
- if (module.params.get('action') != 'pull_image'
- and not module.params.get('name')):
- module.fail_json(
- msg="missing required arguments: name",
- failed=True
- )
-
- try:
- dw = DockerWorker(module)
- # TODO(inc0): We keep it bool to have ansible deal with consistent
- # types. If we ever add method that will have to return some
- # meaningful data, we need to refactor all methods to return dicts.
- result = bool(getattr(dw, module.params.get('action'))())
- module.exit_json(changed=dw.changed, result=result)
- except Exception:
- module.exit_json(failed=True, changed=True,
- msg=repr(traceback.format_exc()))
-
-# import module snippets
-from ansible.module_utils.basic import * # noqa
-if __name__ == '__main__':
- main()
diff --git a/ansible/library/merge_configs.py b/ansible/library/merge_configs.py
deleted file mode 100644
index 5da09eed19..0000000000
--- a/ansible/library/merge_configs.py
+++ /dev/null
@@ -1,51 +0,0 @@
-#!/usr/bin/python
-
-# Copyright 2015 Sam Yaple
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-DOCUMENTATION = '''
----
-module: merge_configs
-short_description: Merge ini-style configs
-description:
- - ConfigParser is used to merge several ini-style configs into one
-options:
- dest:
- description:
- - The destination file name
- required: True
- type: str
- sources:
- description:
- - A list of files on the destination node to merge together
- default: None
- required: True
- type: str
-author: Sam Yaple
-'''
-
-EXAMPLES = '''
-Merge multiple configs:
-
-- hosts: database
- tasks:
- - name: Merge configs
- merge_configs:
- sources:
- - "/tmp/config_1.cnf"
- - "/tmp/config_2.cnf"
- - "/tmp/config_3.cnf"
- dest:
- - "/etc/mysql/my.cnf"
-'''
diff --git a/ansible/library/merge_yaml.py b/ansible/library/merge_yaml.py
deleted file mode 100644
index 66d316fa51..0000000000
--- a/ansible/library/merge_yaml.py
+++ /dev/null
@@ -1,51 +0,0 @@
-#!/usr/bin/python
-
-# Copyright 2015 Sam Yaple
-# Copyright 2016 intel
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-DOCUMENTATION = '''
----
-module: merge_yaml
-short_description: Merge yaml-style configs
-description:
- - PyYAML is used to merge several yaml files into one
-options:
- dest:
- description:
- - The destination file name
- required: True
- type: str
- sources:
- description:
- - A list of files on the destination node to merge together
- default: None
- required: True
- type: str
-author: Sean Mooney
-'''
-
-EXAMPLES = '''
-Merge multiple yaml files:
-
-- hosts: localhost
- tasks:
- - name: Merge yaml files
- merge_yaml:
- sources:
- - "/tmp/default.yml"
- - "/tmp/override.yml"
- dest:
- - "/tmp/out.yml"
-'''
diff --git a/ansible/mariadb_recovery.yml b/ansible/mariadb_recovery.yml
deleted file mode 100644
index 53cf73b42b..0000000000
--- a/ansible/mariadb_recovery.yml
+++ /dev/null
@@ -1,8 +0,0 @@
----
-- hosts: mariadb
- roles:
- - { role: mariadb,
- tags: mariadb,
- when: enable_mariadb | bool }
- vars:
- mariadb_recover: true
diff --git a/ansible/post-deploy.yml b/ansible/post-deploy.yml
deleted file mode 100644
index 76913dc546..0000000000
--- a/ansible/post-deploy.yml
+++ /dev/null
@@ -1,9 +0,0 @@
----
-- name: Creating admin openrc file on the deploy node
- hosts: all
- tasks:
- - template:
- src: "roles/common/templates/admin-openrc.sh.j2"
- dest: "{{ node_config_directory }}/admin-openrc.sh"
- run_once: True
- connection: local
diff --git a/ansible/roles/aodh/defaults/main.yml b/ansible/roles/aodh/defaults/main.yml
deleted file mode 100644
index 66e2b94590..0000000000
--- a/ansible/roles/aodh/defaults/main.yml
+++ /dev/null
@@ -1,43 +0,0 @@
----
-project_name: "aodh"
-
-
-####################
-# Database
-####################
-aodh_database_name: "aodh"
-aodh_database_user: "aodh"
-aodh_database_address: "{{ kolla_internal_fqdn }}:{{ database_port }}"
-
-####################
-# Docker
-####################
-aodh_api_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-aodh-api"
-aodh_api_tag: "{{ openstack_release }}"
-aodh_api_image_full: "{{ aodh_api_image }}:{{ aodh_api_tag }}"
-
-aodh_evaluator_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-aodh-evaluator"
-aodh_evaluator_tag: "{{ openstack_release }}"
-aodh_evaluator_image_full: "{{ aodh_evaluator_image }}:{{ aodh_evaluator_tag }}"
-
-aodh_listener_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-aodh-listener"
-aodh_listener_tag: "{{ openstack_release }}"
-aodh_listener_image_full: "{{ aodh_listener_image }}:{{ aodh_listener_tag }}"
-
-aodh_notifier_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-aodh-notifier"
-aodh_notifier_tag: "{{ openstack_release }}"
-aodh_notifier_image_full: "{{ aodh_notifier_image }}:{{ aodh_notifier_tag }}"
-
-
-####################
-# OpenStack
-####################
-aodh_admin_endpoint: "{{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ aodh_api_port }}"
-aodh_internal_endpoint: "{{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ aodh_api_port }}"
-aodh_public_endpoint: "{{ public_protocol }}://{{ kolla_external_fqdn }}:{{ aodh_api_port }}"
-
-aodh_logging_debug: "{{ openstack_logging_debug }}"
-
-aodh_keystone_user: "aodh"
-
-openstack_aodh_auth: "{'auth_url':'{{ openstack_auth.auth_url }}','username':'{{ openstack_auth.username }}','password':'{{ openstack_auth.password }}','project_name':'{{ openstack_auth.project_name }}','domain_name':'default'}"
diff --git a/ansible/roles/aodh/meta/main.yml b/ansible/roles/aodh/meta/main.yml
deleted file mode 100644
index 6b4fff8fef..0000000000
--- a/ansible/roles/aodh/meta/main.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-dependencies:
- - { role: common }
diff --git a/ansible/roles/aodh/tasks/bootstrap.yml b/ansible/roles/aodh/tasks/bootstrap.yml
deleted file mode 100644
index e532e899d9..0000000000
--- a/ansible/roles/aodh/tasks/bootstrap.yml
+++ /dev/null
@@ -1,41 +0,0 @@
----
-- name: Creating aodh database
- command: docker exec -t kolla_toolbox /usr/bin/ansible localhost
- -m mysql_db
- -a "login_host='{{ database_address }}'
- login_port='{{ database_port }}'
- login_user='{{ database_user }}'
- login_password='{{ database_password }}'
- name='{{ aodh_database_name }}'"
- register: database
- changed_when: "{{ database.stdout.find('localhost | SUCCESS => ') != -1 and
- (database.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
- failed_when: database.stdout.split()[2] != 'SUCCESS'
- run_once: True
- delegate_to: "{{ groups['aodh-api'][0] }}"
-
-- name: Reading json from variable
- set_fact:
- database_created: "{{ (database.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
-
-- name: Creating aodh database user and setting permissions
- command: docker exec -t kolla_toolbox /usr/bin/ansible localhost
- -m mysql_user
- -a "login_host='{{ database_address }}'
- login_port='{{ database_port }}'
- login_user='{{ database_user }}'
- login_password='{{ database_password }}'
- name='{{ aodh_database_name }}'
- password='{{ aodh_database_password }}'
- host='%'
- priv='{{ aodh_database_name }}.*:ALL'
- append_privs='yes'"
- register: database_user_create
- changed_when: "{{ database_user_create.stdout.find('localhost | SUCCESS => ') != -1 and
- (database_user_create.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
- failed_when: database_user_create.stdout.split()[2] != 'SUCCESS'
- run_once: True
- delegate_to: "{{ groups['aodh-api'][0] }}"
-
-- include: bootstrap_service.yml
- when: database_created
diff --git a/ansible/roles/aodh/tasks/bootstrap_service.yml b/ansible/roles/aodh/tasks/bootstrap_service.yml
deleted file mode 100644
index 5b3f0585c9..0000000000
--- a/ansible/roles/aodh/tasks/bootstrap_service.yml
+++ /dev/null
@@ -1,21 +0,0 @@
----
-- name: Running aodh bootstrap container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- detach: False
- environment:
- KOLLA_BOOTSTRAP:
- KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}"
- image: "{{ aodh_api_image_full }}"
- labels:
- BOOTSTRAP:
- name: "bootstrap_aodh"
- restart_policy: "never"
- volumes:
- - "{{ node_config_directory }}/aodh-api/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "aodh:/var/lib/aodh/"
- - "kolla_logs:/var/log/kolla/"
- run_once: True
- delegate_to: "{{ groups['aodh-api'][0] }}"
diff --git a/ansible/roles/aodh/tasks/config.yml b/ansible/roles/aodh/tasks/config.yml
deleted file mode 100644
index 971477aec0..0000000000
--- a/ansible/roles/aodh/tasks/config.yml
+++ /dev/null
@@ -1,63 +0,0 @@
----
-- name: Ensuring config directories exist
- file:
- path: "{{ node_config_directory }}/{{ item }}"
- state: "directory"
- recurse: yes
- with_items:
- - "aodh-api"
- - "aodh-evaluator"
- - "aodh-listener"
- - "aodh-notifier"
-
-- name: Copying over config.json files for services
- template:
- src: "{{ item }}.json.j2"
- dest: "{{ node_config_directory }}/{{ item }}/config.json"
- with_items:
- - "aodh-api"
- - "aodh-listener"
- - "aodh-evaluator"
- - "aodh-notifier"
-
-- name: Copying over aodh.conf
- merge_configs:
- vars:
- service_name: "{{ item }}"
- sources:
- - "{{ role_path }}/templates/aodh.conf.j2"
- - "{{ node_custom_config }}/global.conf"
- - "{{ node_custom_config }}/database.conf"
- - "{{ node_custom_config }}/messaging.conf"
- - "{{ node_custom_config }}/aodh.conf"
- - "{{ node_custom_config }}/aodh/{{ item }}.conf"
- - "{{ node_custom_config }}/aodh/{{ inventory_hostname }}/{{ item }}.conf"
- dest: "{{ node_config_directory }}/{{ item }}/aodh.conf"
- with_items:
- - "aodh-api"
- - "aodh-evaluator"
- - "aodh-listener"
- - "aodh-notifier"
-
-- name: Copying over wsgi-aodh files for services
- template:
- src: "wsgi-aodh.conf.j2"
- dest: "{{ node_config_directory }}/{{ item }}/wsgi-aodh.conf"
- with_items:
- - "aodh-api"
-
-- name: Check if policies shall be overwritten
- local_action: stat path="{{ node_custom_config }}/aodh/policy.json"
- register: aodh_policy
-
-- name: Copying over existing policy.json
- template:
- src: "{{ node_custom_config }}/aodh/policy.json"
- dest: "{{ node_config_directory }}/{{ item }}/policy.json"
- with_items:
- - "aodh-api"
- - "aodh-evaluator"
- - "aodh-listener"
- - "aodh-notifier"
- when:
- aodh_policy.stat.exists
diff --git a/ansible/roles/aodh/tasks/deploy.yml b/ansible/roles/aodh/tasks/deploy.yml
deleted file mode 100644
index bb4533fa0d..0000000000
--- a/ansible/roles/aodh/tasks/deploy.yml
+++ /dev/null
@@ -1,18 +0,0 @@
----
-- include: register.yml
- when: inventory_hostname in groups['aodh-api']
-
-- include: config.yml
- when: inventory_hostname in groups['aodh-api'] or
- inventory_hostname in groups['aodh-evaluator'] or
- inventory_hostname in groups['aodh-listener'] or
- inventory_hostname in groups['aodh-notifier']
-
-- include: bootstrap.yml
- when: inventory_hostname in groups['aodh-api']
-
-- include: start.yml
- when: inventory_hostname in groups['aodh-api'] or
- inventory_hostname in groups['aodh-evaluator'] or
- inventory_hostname in groups['aodh-listener'] or
- inventory_hostname in groups['aodh-notifier']
diff --git a/ansible/roles/aodh/tasks/main.yml b/ansible/roles/aodh/tasks/main.yml
deleted file mode 100644
index b017e8b4ad..0000000000
--- a/ansible/roles/aodh/tasks/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-- include: "{{ action }}.yml"
diff --git a/ansible/roles/aodh/tasks/precheck.yml b/ansible/roles/aodh/tasks/precheck.yml
deleted file mode 100644
index ed97d539c0..0000000000
--- a/ansible/roles/aodh/tasks/precheck.yml
+++ /dev/null
@@ -1 +0,0 @@
----
diff --git a/ansible/roles/aodh/tasks/pull.yml b/ansible/roles/aodh/tasks/pull.yml
deleted file mode 100644
index dae4426a5e..0000000000
--- a/ansible/roles/aodh/tasks/pull.yml
+++ /dev/null
@@ -1,28 +0,0 @@
----
-- name: Pulling aodh-api image
- kolla_docker:
- action: "pull_image"
- common_options: "{{ docker_common_options }}"
- image: "{{ aodh_api_image_full }}"
- when: inventory_hostname in groups['aodh-api']
-
-- name: Pulling aodh-listener image
- kolla_docker:
- action: "pull_image"
- common_options: "{{ docker_common_options }}"
- image: "{{ aodh_listener_image_full }}"
- when: inventory_hostname in groups['aodh-listener']
-
-- name: Pulling aodh-evaluator image
- kolla_docker:
- action: "pull_image"
- common_options: "{{ docker_common_options }}"
- image: "{{ aodh_evaluator_image_full }}"
- when: inventory_hostname in groups['aodh-evaluator']
-
-- name: Pulling aodh-notifier image
- kolla_docker:
- action: "pull_image"
- common_options: "{{ docker_common_options }}"
- image: "{{ aodh_notifier_image_full }}"
- when: inventory_hostname in groups['aodh-notifier']
diff --git a/ansible/roles/aodh/tasks/reconfigure.yml b/ansible/roles/aodh/tasks/reconfigure.yml
deleted file mode 100644
index c915b500d1..0000000000
--- a/ansible/roles/aodh/tasks/reconfigure.yml
+++ /dev/null
@@ -1,79 +0,0 @@
----
-- name: Ensuring the containers up
- kolla_docker:
- name: "{{ item.name }}"
- action: "get_container_state"
- register: container_state
- failed_when: container_state.Running == false
- when: inventory_hostname in groups[item.group]
- with_items:
- - { name: aodh_api, group: aodh-api }
- - { name: aodh_listener, group: aodh-listener }
- - { name: aodh_evaluator, group: aodh-evaluator }
- - { name: aodh_notifier, group: aodh-notifier }
-
-- include: config.yml
-
-- name: Check the configs
- command: docker exec {{ item.name }} /usr/local/bin/kolla_set_configs --check
- changed_when: false
- failed_when: false
- register: check_results
- when: inventory_hostname in groups[item.group]
- with_items:
- - { name: aodh_api, group: aodh-api }
- - { name: aodh_listener, group: aodh-listener }
- - { name: aodh_evaluator, group: aodh-evaluator }
- - { name: aodh_notifier, group: aodh-notifier }
-
-# NOTE(jeffrey4l): when config_strategy == 'COPY_ALWAYS'
-# and container env['KOLLA_CONFIG_STRATEGY'] == 'COPY_ONCE',
-# just remove the container and start again
-- name: Containers config strategy
- kolla_docker:
- name: "{{ item.name }}"
- action: "get_container_env"
- register: container_envs
- when: inventory_hostname in groups[item.group]
- with_items:
- - { name: aodh_api, group: aodh-api }
- - { name: aodh_listener, group: aodh-listener }
- - { name: aodh_evaluator, group: aodh-evaluator }
- - { name: aodh_notifier, group: aodh-notifier }
-
-- name: Remove the containers
- kolla_docker:
- name: "{{ item[0]['name'] }}"
- action: "remove_container"
- register: remove_containers
- when:
- - inventory_hostname in groups[item[0]['group']]
- - config_strategy == "COPY_ONCE" or item[1]['KOLLA_CONFIG_STRATEGY'] == 'COPY_ONCE'
- - item[2]['rc'] == 1
- with_together:
- - [{ name: aodh_api, group: aodh-api },
- { name: aodh_listener, group: aodh-listener },
- { name: aodh_evaluator, group: aodh-evaluator },
- { name: aodh_notifier, group: aodh-notifier }]
- - "{{ container_envs.results }}"
- - "{{ check_results.results }}"
-
-- include: start.yml
- when: remove_containers.changed
-
-- name: Restart containers
- kolla_docker:
- name: "{{ item[0]['name'] }}"
- action: "restart_container"
- when:
- - inventory_hostname in groups[item[0]['group']]
- - config_strategy == 'COPY_ALWAYS'
- - item[1]['KOLLA_CONFIG_STRATEGY'] != 'COPY_ONCE'
- - item[2]['rc'] == 1
- with_together:
- - [{ name: aodh_api, group: aodh-api },
- { name: aodh_listener, group: aodh-listener },
- { name: aodh_evaluator, group: aodh-evaluator },
- { name: aodh_notifier, group: aodh-notifier }]
- - "{{ container_envs.results }}"
- - "{{ check_results.results }}"
diff --git a/ansible/roles/aodh/tasks/register.yml b/ansible/roles/aodh/tasks/register.yml
deleted file mode 100644
index 202a11fffc..0000000000
--- a/ansible/roles/aodh/tasks/register.yml
+++ /dev/null
@@ -1,40 +0,0 @@
----
-- name: Creating the aodh service and endpoint
- command: docker exec -t kolla_toolbox /usr/bin/ansible localhost
- -m kolla_keystone_service
- -a "service_name=aodh
- service_type=alarming
- description='OpenStack Alarming Service'
- endpoint_region={{ openstack_region_name }}
- url='{{ item.url }}'
- interface='{{ item.interface }}'
- region_name={{ openstack_region_name }}
- auth={{ '{{ openstack_aodh_auth }}' }}"
- -e "{'openstack_aodh_auth':{{ openstack_aodh_auth }}}"
- register: aodh_endpoint
- changed_when: "{{ aodh_endpoint.stdout.find('localhost | SUCCESS => ') != -1 and (aodh_endpoint.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
- until: aodh_endpoint.stdout.split()[2] == 'SUCCESS'
- retries: 10
- delay: 5
- run_once: True
- with_items:
- - {'interface': 'admin', 'url': '{{ aodh_admin_endpoint }}'}
- - {'interface': 'internal', 'url': '{{ aodh_internal_endpoint }}'}
- - {'interface': 'public', 'url': '{{ aodh_public_endpoint }}'}
-
-- name: Creating the aodh project, user, and role
- command: docker exec -t kolla_toolbox /usr/bin/ansible localhost
- -m kolla_keystone_user
- -a "project=service
- user=aodh
- password={{ aodh_keystone_password }}
- role=admin
- region_name={{ openstack_region_name }}
- auth={{ '{{ openstack_aodh_auth }}' }}"
- -e "{'openstack_aodh_auth':{{ openstack_aodh_auth }}}"
- register: aodh_user
- changed_when: "{{ aodh_user.stdout.find('localhost | SUCCESS => ') != -1 and (aodh_user.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
- until: aodh_user.stdout.split()[2] == 'SUCCESS'
- retries: 10
- delay: 5
- run_once: True
diff --git a/ansible/roles/aodh/tasks/start.yml b/ansible/roles/aodh/tasks/start.yml
deleted file mode 100644
index 4084799193..0000000000
--- a/ansible/roles/aodh/tasks/start.yml
+++ /dev/null
@@ -1,49 +0,0 @@
----
-- name: Starting aodh-api container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- image: "{{ aodh_api_image_full }}"
- name: "aodh_api"
- volumes:
- - "{{ node_config_directory }}/aodh-api/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "aodh:/var/lib/aodh/"
- - "kolla_logs:/var/log/kolla/"
- when: inventory_hostname in groups['aodh-api']
-
-- name: Starting aodh-evaluator container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- image: "{{ aodh_evaluator_image_full }}"
- name: "aodh_evaluator"
- volumes:
- - "{{ node_config_directory }}/aodh-evaluator/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "kolla_logs:/var/log/kolla/"
- when: inventory_hostname in groups['aodh-evaluator']
-
-- name: Starting aodh-listener container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- image: "{{ aodh_listener_image_full }}"
- name: "aodh_listener"
- volumes:
- - "{{ node_config_directory }}/aodh-listener/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "kolla_logs:/var/log/kolla/"
- when: inventory_hostname in groups['aodh-listener']
-
-- name: Starting aodh-notifier container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- image: "{{ aodh_notifier_image_full }}"
- name: "aodh_notifier"
- volumes:
- - "{{ node_config_directory }}/aodh-notifier/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "kolla_logs:/var/log/kolla/"
- when: inventory_hostname in groups['aodh-notifier']
diff --git a/ansible/roles/aodh/tasks/upgrade.yml b/ansible/roles/aodh/tasks/upgrade.yml
deleted file mode 100644
index 308053080c..0000000000
--- a/ansible/roles/aodh/tasks/upgrade.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-- include: config.yml
-
-- include: bootstrap_service.yml
-
-- include: start.yml
diff --git a/ansible/roles/aodh/templates/aodh-api.json.j2 b/ansible/roles/aodh/templates/aodh-api.json.j2
deleted file mode 100644
index fc6b0b0815..0000000000
--- a/ansible/roles/aodh/templates/aodh-api.json.j2
+++ /dev/null
@@ -1,19 +0,0 @@
-{% set aodh_cmd = 'apache2' if kolla_base_distro in ['ubuntu', 'debian'] else 'httpd' %}
-{% set aodh_dir = 'apache2/conf-enabled' if kolla_base_distro in ['ubuntu', 'debian'] else 'httpd/conf.d' %}
-{
- "command": "{{ aodh_cmd }} -DFOREGROUND",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/aodh.conf",
- "dest": "/etc/aodh/aodh.conf",
- "owner": "aodh",
- "perm": "0600"
- },
- {
- "source": "{{ container_config_directory }}/wsgi-aodh.conf",
- "dest": "/etc/{{ aodh_dir }}/wsgi-aodh.conf",
- "owner": "root",
- "perm": "0600"
- }
- ]
-}
diff --git a/ansible/roles/aodh/templates/aodh-evaluator.json.j2 b/ansible/roles/aodh/templates/aodh-evaluator.json.j2
deleted file mode 100644
index 220be5a1e0..0000000000
--- a/ansible/roles/aodh/templates/aodh-evaluator.json.j2
+++ /dev/null
@@ -1,11 +0,0 @@
-{
- "command": "aodh-evaluator",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/aodh.conf",
- "dest": "/etc/aodh/aodh.conf",
- "owner": "aodh",
- "perm": "0600"
- }
- ]
-}
diff --git a/ansible/roles/aodh/templates/aodh-listener.json.j2 b/ansible/roles/aodh/templates/aodh-listener.json.j2
deleted file mode 100644
index 3b75e64c80..0000000000
--- a/ansible/roles/aodh/templates/aodh-listener.json.j2
+++ /dev/null
@@ -1,11 +0,0 @@
-{
- "command": "aodh-listener",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/aodh.conf",
- "dest": "/etc/aodh/aodh.conf",
- "owner": "aodh",
- "perm": "0600"
- }
- ]
-}
diff --git a/ansible/roles/aodh/templates/aodh-notifier.json.j2 b/ansible/roles/aodh/templates/aodh-notifier.json.j2
deleted file mode 100644
index da910cd2ba..0000000000
--- a/ansible/roles/aodh/templates/aodh-notifier.json.j2
+++ /dev/null
@@ -1,11 +0,0 @@
-{
- "command": "aodh-notifier",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/aodh.conf",
- "dest": "/etc/aodh/aodh.conf",
- "owner": "aodh",
- "perm": "0600"
- }
- ]
-}
diff --git a/ansible/roles/aodh/templates/aodh.conf.j2 b/ansible/roles/aodh/templates/aodh.conf.j2
deleted file mode 100644
index 0295a44afb..0000000000
--- a/ansible/roles/aodh/templates/aodh.conf.j2
+++ /dev/null
@@ -1,40 +0,0 @@
-#jinja2: trim_blocks: False
-[DEFAULT]
-auth_strategy = keystone
-log_dir = /var/log/kolla/aodh
-debug = {{ aodh_logging_debug }}
-notification_topics = notifications
-
-transport_url = rabbit://{% for host in groups['rabbitmq'] %}{{ rabbitmq_user }}:{{ rabbitmq_password }}@{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ rabbitmq_port }}{% if not loop.last %},{% endif %}{% endfor %}
-
-[api]
-port = {{ aodh_api_port }}
-host = {{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}
-
-[database]
-connection = mysql+pymysql://{{ aodh_database_user }}:{{ aodh_database_password }}@{{ aodh_database_address }}/{{ aodh_database_name }}
-
-
-[keystone_authtoken]
-memcache_security_strategy = ENCRYPT
-memcache_secret_key = {{ memcache_secret_key }}
-memcache_servers = {% for host in groups['memcached'] %}{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}
-auth_uri = {{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_public_port }}
-project_domain_name = default
-project_name = service
-user_domain_name = default
-username = {{ aodh_keystone_user }}
-password = {{ aodh_keystone_password }}
-auth_url = {{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_admin_port }}
-auth_type = password
-
-
-[service_credentials]
-auth_url = {{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_public_port }}/v3
-region_name = {{ openstack_region_name }}
-password = {{ aodh_keystone_password }}
-username = {{ aodh_keystone_user }}
-project_name = service
-project_domain_id = default
-user_domain_id = default
-auth_type = password
diff --git a/ansible/roles/aodh/templates/wsgi-aodh.conf.j2 b/ansible/roles/aodh/templates/wsgi-aodh.conf.j2
deleted file mode 100644
index e09266198f..0000000000
--- a/ansible/roles/aodh/templates/wsgi-aodh.conf.j2
+++ /dev/null
@@ -1,25 +0,0 @@
-{% set python_path = '/usr/lib/python2.7/site-packages' if kolla_install_type == 'binary' else '/var/lib/kolla/venv/lib/python2.7/site-packages' %}
-Listen {{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}:{{ aodh_api_port }}
-
-
-
- ## Vhost docroot
- DocumentRoot "/var/www/cgi-bin/aodh"
-
- ## Directories, there should at least be a declaration for /var/www/cgi-bin/aodh
-
-
- Options Indexes FollowSymLinks MultiViews
- AllowOverride None
- Require all granted
-
-
- ## Logging
- ErrorLog "/var/log/kolla/aodh/aodh_wsgi_error.log"
- ServerSignature Off
- CustomLog "/var/log/kolla/aodh/aodh_wsgi_access.log" combined
- WSGIApplicationGroup %{GLOBAL}
- WSGIDaemonProcess aodh group=aodh processes={{ openstack_service_workers }} threads=1 user=aodh python-path={{ python_path }}
- WSGIProcessGroup aodh
- WSGIScriptAlias / "/var/www/cgi-bin/aodh/app.wsgi"
-
diff --git a/ansible/roles/barbican/defaults/main.yml b/ansible/roles/barbican/defaults/main.yml
deleted file mode 100644
index 7fb8d628f9..0000000000
--- a/ansible/roles/barbican/defaults/main.yml
+++ /dev/null
@@ -1,40 +0,0 @@
----
-project_name: "barbican"
-
-
-####################
-# Database
-####################
-barbican_database_name: "barbican"
-barbican_database_user: "barbican"
-barbican_database_address: "{{ kolla_internal_fqdn }}:{{ database_port }}"
-
-
-####################
-# Docker
-####################
-barbican_api_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-barbican-api"
-barbican_api_tag: "{{ openstack_release }}"
-barbican_api_image_full: "{{ barbican_api_image }}:{{ barbican_api_tag }}"
-
-barbican_keystone_listener_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-barbican-keystone-listener"
-barbican_keystone_listener_tag: "{{ openstack_release }}"
-barbican_keystone_listener_image_full: "{{ barbican_keystone_listener_image }}:{{ barbican_keystone_listener_tag }}"
-
-barbican_worker_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-barbican-worker"
-barbican_worker_tag: "{{ openstack_release }}"
-barbican_worker_image_full: "{{ barbican_worker_image }}:{{ barbican_worker_tag }}"
-
-
-####################
-# OpenStack
-####################
-barbican_admin_endpoint: "{{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ barbican_api_port }}"
-barbican_internal_endpoint: "{{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ barbican_api_port }}"
-barbican_public_endpoint: "{{ public_protocol }}://{{ kolla_external_fqdn }}:{{ barbican_api_port }}"
-
-barbican_logging_debug: "{{ openstack_logging_debug }}"
-
-barbican_keystone_user: "barbican"
-
-openstack_barbican_auth: "{'auth_url':'{{ openstack_auth.auth_url }}','username':'{{ openstack_auth.username }}','password':'{{ openstack_auth.password }}','project_name':'{{ openstack_auth.project_name }}','domain_name':'default'}"
diff --git a/ansible/roles/barbican/meta/main.yml b/ansible/roles/barbican/meta/main.yml
deleted file mode 100644
index 6b4fff8fef..0000000000
--- a/ansible/roles/barbican/meta/main.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-dependencies:
- - { role: common }
diff --git a/ansible/roles/barbican/tasks/bootstrap.yml b/ansible/roles/barbican/tasks/bootstrap.yml
deleted file mode 100644
index b37ae86428..0000000000
--- a/ansible/roles/barbican/tasks/bootstrap.yml
+++ /dev/null
@@ -1,41 +0,0 @@
----
-- name: Creating barbican database
- command: docker exec -t kolla_toolbox /usr/bin/ansible localhost
- -m mysql_db
- -a "login_host='{{ database_address }}'
- login_port='{{ database_port }}'
- login_user='{{ database_user }}'
- login_password='{{ database_password }}'
- name='{{ barbican_database_name }}'"
- register: database
- changed_when: "{{ database.stdout.find('localhost | SUCCESS => ') != -1 and
- (database.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
- failed_when: database.stdout.split()[2] != 'SUCCESS'
- run_once: True
- delegate_to: "{{ groups['barbican-api'][0] }}"
-
-- name: Reading json from variable
- set_fact:
- database_created: "{{ (database.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
-
-- name: Creating barbican database user and setting permissions
- command: docker exec -t kolla_toolbox /usr/bin/ansible localhost
- -m mysql_user
- -a "login_host='{{ database_address }}'
- login_port='{{ database_port }}'
- login_user='{{ database_user }}'
- login_password='{{ database_password }}'
- name='{{ barbican_database_name }}'
- password='{{ barbican_database_password }}'
- host='%'
- priv='{{ barbican_database_name }}.*:ALL'
- append_privs='yes'"
- register: database_user_create
- changed_when: "{{ database_user_create.stdout.find('localhost | SUCCESS => ') != -1 and
- (database_user_create.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
- failed_when: database_user_create.stdout.split()[2] != 'SUCCESS'
- run_once: True
- delegate_to: "{{ groups['barbican-api'][0] }}"
-
-- include: bootstrap_service.yml
- when: database_created
diff --git a/ansible/roles/barbican/tasks/bootstrap_service.yml b/ansible/roles/barbican/tasks/bootstrap_service.yml
deleted file mode 100644
index 04ece90fdb..0000000000
--- a/ansible/roles/barbican/tasks/bootstrap_service.yml
+++ /dev/null
@@ -1,21 +0,0 @@
----
-- name: Running barbican bootstrap container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- detach: False
- environment:
- KOLLA_BOOTSTRAP:
- KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}"
- image: "{{ barbican_api_image_full }}"
- labels:
- BOOTSTRAP:
- name: "bootstrap_barbican"
- restart_policy: "never"
- volumes:
- - "{{ node_config_directory }}/barbican-api/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "barbican:/var/lib/barbican/"
- - "kolla_logs:/var/log/kolla/"
- run_once: True
- delegate_to: "{{ groups['barbican-api'][0] }}"
diff --git a/ansible/roles/barbican/tasks/config.yml b/ansible/roles/barbican/tasks/config.yml
deleted file mode 100644
index 0b71c1ce5f..0000000000
--- a/ansible/roles/barbican/tasks/config.yml
+++ /dev/null
@@ -1,68 +0,0 @@
----
-- name: Ensuring config directories exist
- file:
- path: "{{ node_config_directory }}/{{ item }}"
- state: "directory"
- recurse: yes
- with_items:
- - "barbican-api/vassals"
- - "barbican-keystone-listener"
- - "barbican-worker"
-
-- name: Copying over config.json files for services
- template:
- src: "{{ item }}.json.j2"
- dest: "{{ node_config_directory }}/{{ item }}/config.json"
- with_items:
- - "barbican-api"
- - "barbican-worker"
- - "barbican-keystone-listener"
-
-- name: Copying over barbican-api.ini
- merge_configs:
- sources:
- - "{{ role_path }}/templates/barbican-api.ini.j2"
- - "{{ node_custom_config }}/barbican-api/barbican-api.ini"
- - "{{ node_custom_config }}/barbican-api/{{ inventory_hostname }}/barbican-api.ini"
- dest: "{{ node_config_directory }}/barbican-api/vassals/barbican-api.ini"
-
-- name: Copying over barbican-api-paste.ini
- merge_configs:
- sources:
- - "{{ role_path }}/templates/barbican-api-paste.ini.j2"
- - "{{ node_custom_config }}/barbican-api/barbican-api-paste.ini"
- - "{{ node_custom_config }}/barbican-api/{{ inventory_hostname }}/barbican-api-paste.ini"
- dest: "{{ node_config_directory }}/barbican-api/barbican-api-paste.ini"
-
-- name: Copying over barbican.conf
- merge_configs:
- vars:
- service_name: "{{ item }}"
- sources:
- - "{{ role_path }}/templates/barbican.conf.j2"
- - "{{ node_custom_config }}/global.conf"
- - "{{ node_custom_config }}/database.conf"
- - "{{ node_custom_config }}/messaging.conf"
- - "{{ node_custom_config }}/barbican.conf"
- - "{{ node_custom_config }}/barbican/{{ item }}.conf"
- - "{{ node_custom_config }}/barbican/{{ inventory_hostname }}/barbican.conf"
- dest: "{{ node_config_directory }}/{{ item }}/barbican.conf"
- with_items:
- - "barbican-api"
- - "barbican-keystone-listener"
- - "barbican-worker"
-
-- name: Check if policies shall be overwritten
- local_action: stat path="{{ node_custom_config }}/barbican/policy.json"
- register: barbican_policy
-
-- name: Copying over existing policy.json
- template:
- src: "{{ node_custom_config }}/barbican/policy.json"
- dest: "{{ node_config_directory }}/{{ item }}/policy.json"
- with_items:
- - "barbican-api"
- - "barbican-keystone-listener"
- - "barbican-worker"
- when:
- barbican_policy.stat.exists
diff --git a/ansible/roles/barbican/tasks/deploy.yml b/ansible/roles/barbican/tasks/deploy.yml
deleted file mode 100644
index 4b9f3ebcc3..0000000000
--- a/ansible/roles/barbican/tasks/deploy.yml
+++ /dev/null
@@ -1,16 +0,0 @@
----
-- include: register.yml
- when: inventory_hostname in groups['barbican-api']
-
-- include: config.yml
- when: inventory_hostname in groups['barbican-api'] or
- inventory_hostname in groups['barbican-worker'] or
- inventory_hostname in groups['barbican-keystone-listener']
-
-- include: bootstrap.yml
- when: inventory_hostname in groups['barbican-api']
-
-- include: start.yml
- when: inventory_hostname in groups['barbican-api'] or
- inventory_hostname in groups['barbican-worker'] or
- inventory_hostname in groups['barbican-keystone-listener']
diff --git a/ansible/roles/barbican/tasks/main.yml b/ansible/roles/barbican/tasks/main.yml
deleted file mode 100644
index b017e8b4ad..0000000000
--- a/ansible/roles/barbican/tasks/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-- include: "{{ action }}.yml"
diff --git a/ansible/roles/barbican/tasks/precheck.yml b/ansible/roles/barbican/tasks/precheck.yml
deleted file mode 100644
index ed97d539c0..0000000000
--- a/ansible/roles/barbican/tasks/precheck.yml
+++ /dev/null
@@ -1 +0,0 @@
----
diff --git a/ansible/roles/barbican/tasks/pull.yml b/ansible/roles/barbican/tasks/pull.yml
deleted file mode 100644
index 284f8970da..0000000000
--- a/ansible/roles/barbican/tasks/pull.yml
+++ /dev/null
@@ -1,21 +0,0 @@
----
-- name: Pulling barbican-api image
- kolla_docker:
- action: "pull_image"
- common_options: "{{ docker_common_options }}"
- image: "{{ barbican_api_image_full }}"
- when: inventory_hostname in groups['barbican-api']
-
-- name: Pulling barbican-keystone-listener image
- kolla_docker:
- action: "pull_image"
- common_options: "{{ docker_common_options }}"
- image: "{{ barbican_keystone_listener_image_full }}"
- when: inventory_hostname in groups['barbican-keystone-listener']
-
-- name: Pulling barbican-worker image
- kolla_docker:
- action: "pull_image"
- common_options: "{{ docker_common_options }}"
- image: "{{ barbican_worker_image_full }}"
- when: inventory_hostname in groups['barbican-worker']
diff --git a/ansible/roles/barbican/tasks/reconfigure.yml b/ansible/roles/barbican/tasks/reconfigure.yml
deleted file mode 100644
index 84f378665f..0000000000
--- a/ansible/roles/barbican/tasks/reconfigure.yml
+++ /dev/null
@@ -1,74 +0,0 @@
----
-- name: Ensuring the containers up
- kolla_docker:
- name: "{{ item.name }}"
- action: "get_container_state"
- register: container_state
- failed_when: container_state.Running == false
- when: inventory_hostname in groups[item.group]
- with_items:
- - { name: barbican_api, group: barbican-api }
- - { name: barbican_keystone_listener, group: barbican-keystone-listener }
- - { name: barbican_worker, group: barbican-worker }
-
-- include: config.yml
-
-- name: Check the configs
- command: docker exec {{ item.name }} /usr/local/bin/kolla_set_configs --check
- changed_when: false
- failed_when: false
- register: check_results
- when: inventory_hostname in groups[item.group]
- with_items:
- - { name: barbican_api, group: barbican-api }
- - { name: barbican_keystone_listener, group: barbican-keystone-listener }
- - { name: barbican_worker, group: barbican-worker }
-
-# NOTE(jeffrey4l): when config_strategy == 'COPY_ALWAYS'
-# and container env['KOLLA_CONFIG_STRATEGY'] == 'COPY_ONCE',
-# just remove the container and start again
-- name: Containers config strategy
- kolla_docker:
- name: "{{ item.name }}"
- action: "get_container_env"
- register: container_envs
- when: inventory_hostname in groups[item.group]
- with_items:
- - { name: barbican_api, group: barbican-api }
- - { name: barbican_keystone_listener, group: barbican-keystone-listener }
- - { name: barbican_worker, group: barbican-worker }
-
-- name: Remove the containers
- kolla_docker:
- name: "{{ item[0]['name'] }}"
- action: "remove_container"
- register: remove_containers
- when:
- - inventory_hostname in groups[item[0]['group']]
- - config_strategy == "COPY_ONCE" or item[1]['KOLLA_CONFIG_STRATEGY'] == 'COPY_ONCE'
- - item[2]['rc'] == 1
- with_together:
- - [{ name: barbican_api, group: barbican-api },
- { name: barbican_keystone_listener, group: barbican-keystone-listener },
- { name: barbican_worker, group: barbican-worker }]
- - "{{ container_envs.results }}"
- - "{{ check_results.results }}"
-
-- include: start.yml
- when: remove_containers.changed
-
-- name: Restart containers
- kolla_docker:
- name: "{{ item[0]['name'] }}"
- action: "restart_container"
- when:
- - inventory_hostname in groups[item[0]['group']]
- - config_strategy == 'COPY_ALWAYS'
- - item[1]['KOLLA_CONFIG_STRATEGY'] != 'COPY_ONCE'
- - item[2]['rc'] == 1
- with_together:
- - [{ name: barbican_api, group: barbican-api },
- { name: barbican_keystone_listener, group: barbican-keystone-listener },
- { name: barbican_worker, group: barbican-worker }]
- - "{{ container_envs.results }}"
- - "{{ check_results.results }}"
diff --git a/ansible/roles/barbican/tasks/register.yml b/ansible/roles/barbican/tasks/register.yml
deleted file mode 100644
index 5bc6719c94..0000000000
--- a/ansible/roles/barbican/tasks/register.yml
+++ /dev/null
@@ -1,40 +0,0 @@
----
-- name: Creating the barbican service and endpoint
- command: docker exec -t kolla_toolbox /usr/bin/ansible localhost
- -m kolla_keystone_service
- -a "service_name=barbican
- service_type=key-manager
- description='Barbican Key Management Service'
- endpoint_region={{ openstack_region_name }}
- url='{{ item.url }}'
- interface='{{ item.interface }}'
- region_name={{ openstack_region_name }}
- auth={{ '{{ openstack_barbican_auth }}' }}"
- -e "{'openstack_barbican_auth':{{ openstack_barbican_auth }}}"
- register: barbican_endpoint
- changed_when: "{{ barbican_endpoint.stdout.find('localhost | SUCCESS => ') != -1 and (barbican_endpoint.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
- until: barbican_endpoint.stdout.split()[2] == 'SUCCESS'
- retries: 10
- delay: 5
- run_once: True
- with_items:
- - {'interface': 'admin', 'url': '{{ barbican_admin_endpoint }}'}
- - {'interface': 'internal', 'url': '{{ barbican_internal_endpoint }}'}
- - {'interface': 'public', 'url': '{{ barbican_public_endpoint }}'}
-
-- name: Creating the barbican project, user, and role
- command: docker exec -t kolla_toolbox /usr/bin/ansible localhost
- -m kolla_keystone_user
- -a "project=service
- user=barbican
- password={{ barbican_keystone_password }}
- role=admin
- region_name={{ openstack_region_name }}
- auth={{ '{{ openstack_barbican_auth }}' }}"
- -e "{'openstack_barbican_auth':{{ openstack_barbican_auth }}}"
- register: barbican_user
- changed_when: "{{ barbican_user.stdout.find('localhost | SUCCESS => ') != -1 and (barbican_user.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
- until: barbican_user.stdout.split()[2] == 'SUCCESS'
- retries: 10
- delay: 5
- run_once: True
diff --git a/ansible/roles/barbican/tasks/start.yml b/ansible/roles/barbican/tasks/start.yml
deleted file mode 100644
index c862beaaf9..0000000000
--- a/ansible/roles/barbican/tasks/start.yml
+++ /dev/null
@@ -1,37 +0,0 @@
----
-- name: Starting barbican-api container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- image: "{{ barbican_api_image_full }}"
- name: "barbican_api"
- volumes:
- - "{{ node_config_directory }}/barbican-api/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "barbican:/var/lib/barbican/"
- - "kolla_logs:/var/log/kolla/"
- when: inventory_hostname in groups['barbican-api']
-
-- name: Starting barbican-keystone-listener container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- image: "{{ barbican_keystone_listener_image_full }}"
- name: "barbican_keystone_listener"
- volumes:
- - "{{ node_config_directory }}/barbican-keystone-listener/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "kolla_logs:/var/log/kolla/"
- when: inventory_hostname in groups['barbican-keystone-listener']
-
-- name: Starting barbican-worker container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- image: "{{ barbican_worker_image_full }}"
- name: "barbican_worker"
- volumes:
- - "{{ node_config_directory }}/barbican-worker/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "kolla_logs:/var/log/kolla/"
- when: inventory_hostname in groups['barbican-worker']
diff --git a/ansible/roles/barbican/tasks/upgrade.yml b/ansible/roles/barbican/tasks/upgrade.yml
deleted file mode 100644
index 308053080c..0000000000
--- a/ansible/roles/barbican/tasks/upgrade.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-- include: config.yml
-
-- include: bootstrap_service.yml
-
-- include: start.yml
diff --git a/ansible/roles/barbican/templates/barbican-api-paste.ini.j2 b/ansible/roles/barbican/templates/barbican-api-paste.ini.j2
deleted file mode 100644
index a1030a9f65..0000000000
--- a/ansible/roles/barbican/templates/barbican-api-paste.ini.j2
+++ /dev/null
@@ -1,60 +0,0 @@
-[composite:main]
-use = egg:Paste#urlmap
-/: barbican_version
-/v1: barbican-api-keystone
-
-# Use this pipeline for Barbican API - versions no authentication
-[pipeline:barbican_version]
-pipeline = cors versionapp
-
-# Use this pipeline for Barbican API - DEFAULT no authentication
-[pipeline:barbican_api]
-pipeline = cors unauthenticated-context apiapp
-
-#Use this pipeline to activate a repoze.profile middleware and HTTP port,
-# to provide profiling information for the REST API processing.
-[pipeline:barbican-profile]
-pipeline = cors unauthenticated-context egg:Paste#cgitb egg:Paste#httpexceptions profile apiapp
-
-#Use this pipeline for keystone auth
-[pipeline:barbican-api-keystone]
-pipeline = cors authtoken context apiapp
-
-#Use this pipeline for keystone auth with audit feature
-[pipeline:barbican-api-keystone-audit]
-pipeline = authtoken context audit apiapp
-
-[app:apiapp]
-paste.app_factory = barbican.api.app:create_main_app
-
-[app:versionapp]
-paste.app_factory = barbican.api.app:create_version_app
-
-[filter:simple]
-paste.filter_factory = barbican.api.middleware.simple:SimpleFilter.factory
-
-[filter:unauthenticated-context]
-paste.filter_factory = barbican.api.middleware.context:UnauthenticatedContextMiddleware.factory
-
-[filter:context]
-paste.filter_factory = barbican.api.middleware.context:ContextMiddleware.factory
-
-[filter:audit]
-paste.filter_factory = keystonemiddleware.audit:filter_factory
-audit_map_file = /etc/barbican/api_audit_map.conf
-
-[filter:authtoken]
-paste.filter_factory = keystonemiddleware.auth_token:filter_factory
-
-[filter:profile]
-use = egg:repoze.profile
-log_filename = myapp.profile
-cachegrind_filename = cachegrind.out.myapp
-discard_first_request = true
-path = /__profile__
-flush_at_shutdown = true
-unwind = false
-
-[filter:cors]
-paste.filter_factory = oslo_middleware.cors:filter_factory
-oslo_config_project = barbican
diff --git a/ansible/roles/barbican/templates/barbican-api.ini.j2 b/ansible/roles/barbican/templates/barbican-api.ini.j2
deleted file mode 100644
index 3e1fc65bb6..0000000000
--- a/ansible/roles/barbican/templates/barbican-api.ini.j2
+++ /dev/null
@@ -1,11 +0,0 @@
-[uwsgi]
-socket = {{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}:{{ barbican_api_port }}
-protocol = http
-processes = {{ openstack_service_workers }}
-lazy = true
-vacuum = true
-no-default-app = true
-memory-report = true
-plugins = python
-paste = config:/etc/barbican/barbican-api-paste.ini
-add-header = Connection: close
diff --git a/ansible/roles/barbican/templates/barbican-api.json.j2 b/ansible/roles/barbican/templates/barbican-api.json.j2
deleted file mode 100644
index fe8ba1b30e..0000000000
--- a/ansible/roles/barbican/templates/barbican-api.json.j2
+++ /dev/null
@@ -1,23 +0,0 @@
-{
- "command": "uwsgi --master --emperor /etc/barbican/vassals --logto /var/log/kolla/barbican/barbican-api.log",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/barbican.conf",
- "dest": "/etc/barbican/barbican.conf",
- "owner": "barbican",
- "perm": "0600"
- },
- {
- "source": "{{ container_config_directory }}/vassals/barbican-api.ini",
- "dest": "/etc/barbican/vassals/barbican-api.ini",
- "owner": "barbican",
- "perm": "0600"
- },
- {
- "source": "{{ container_config_directory }}/barbican-api-paste.ini",
- "dest": "/etc/barbican/barbican-api-paste.ini",
- "owner": "barbican",
- "perm": "0600"
- }
- ]
-}
diff --git a/ansible/roles/barbican/templates/barbican-keystone-listener.json.j2 b/ansible/roles/barbican/templates/barbican-keystone-listener.json.j2
deleted file mode 100644
index cb7cb3e210..0000000000
--- a/ansible/roles/barbican/templates/barbican-keystone-listener.json.j2
+++ /dev/null
@@ -1,11 +0,0 @@
-{
- "command": "barbican-keystone-listener",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/barbican.conf",
- "dest": "/etc/barbican/barbican.conf",
- "owner": "barbican",
- "perm": "0600"
- }
- ]
-}
diff --git a/ansible/roles/barbican/templates/barbican-worker.json.j2 b/ansible/roles/barbican/templates/barbican-worker.json.j2
deleted file mode 100644
index 861cd6c7f0..0000000000
--- a/ansible/roles/barbican/templates/barbican-worker.json.j2
+++ /dev/null
@@ -1,11 +0,0 @@
-{
- "command": "barbican-worker",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/barbican.conf",
- "dest": "/etc/barbican/barbican.conf",
- "owner": "barbican",
- "perm": "0600"
- }
- ]
-}
diff --git a/ansible/roles/barbican/templates/barbican.conf.j2 b/ansible/roles/barbican/templates/barbican.conf.j2
deleted file mode 100644
index 55c6b22101..0000000000
--- a/ansible/roles/barbican/templates/barbican.conf.j2
+++ /dev/null
@@ -1,47 +0,0 @@
-[DEFAULT]
-debug = {{ barbican_logging_debug }}
-log_dir = /var/log/kolla/barbican
-
-
-bind_port = {{ barbican_api_port }}
-bind_host = {{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}
-host_href = {{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ barbican_api_port }}
-backlog = 4096
-max_allowed_secret_in_bytes = 10000
-max_allowed_request_size_in_bytes = 1000000
-
-db_auto_create = False
-sql_connection = mysql://{{ barbican_database_user }}:{{ barbican_database_password }}@{{ barbican_database_address }}/{{ barbican_database_name }}
-
-transport_url = rabbit://{% for host in groups['rabbitmq'] %}{{ rabbitmq_user }}:{{ rabbitmq_password }}@{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ rabbitmq_port }}{% if not loop.last %},{% endif %}{% endfor %}
-
-[keystone_notifications]
-enable = True
-
-[keystone_authtoken]
-auth_uri = {{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_public_port }}
-project_domain_id = default
-project_name = service
-user_domain_id = default
-username = {{ barbican_keystone_user }}
-password = {{ barbican_keystone_password }}
-auth_url = {{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_admin_port }}
-auth_type = password
-
-memcache_security_strategy = ENCRYPT
-memcache_secret_key = {{ memcache_secret_key }}
-{% if orchestration_engine == 'KUBERNETES' %}
-memcache_servers = {{ memcached_servers }}
-{% else %}
-memcached_servers = {% for host in groups['memcached'] %}{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}
-{% endif %}
-
-[service_credentials]
-auth_url = {{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_public_port }}
-region_name = {{ openstack_region_name }}
-password = {{ barbican_keystone_password }}
-username = {{ barbican_keystone_user }}
-project_name = service
-project_domain_id = default
-user_domain_id = default
-auth_type = password
diff --git a/ansible/roles/baremetal/defaults/main.yml b/ansible/roles/baremetal/defaults/main.yml
deleted file mode 100644
index 87cae0e4db..0000000000
--- a/ansible/roles/baremetal/defaults/main.yml
+++ /dev/null
@@ -1,33 +0,0 @@
----
-docker_apt_url: "https://apt.dockerproject.org"
-docker_apt_key_id: F76221572C52609D
-
-docker_yum_url: "https://yum.dockerproject.org"
-docker_gpg_fingerprint: "58118E89F3A912897C070ADBF76221572C52609D"
-
-customize_etc_hosts: True
-
-create_kolla_user: True
-
-docker_storage_driver: ""
-
-debian_pkg_install:
- - docker-engine=1.11.*
- - git
- - python-setuptools
- - ntp
-
-redhat_pkg_install:
- - epel-release
- - docker-engine-1.11.0
- - git
- - python-setuptools
- - ntp
-
-ubuntu_pkg_removals:
- - lxd
- - lxc
- - libvirt
-
-redhat_pkg_removals:
- - libvirt
diff --git a/ansible/roles/baremetal/tasks/bootstrap-servers.yml b/ansible/roles/baremetal/tasks/bootstrap-servers.yml
deleted file mode 100644
index 3888ed5cb9..0000000000
--- a/ansible/roles/baremetal/tasks/bootstrap-servers.yml
+++ /dev/null
@@ -1,4 +0,0 @@
----
-- include: pre-install.yml
-- include: install.yml
-- include: post-install.yml
diff --git a/ansible/roles/baremetal/tasks/install.yml b/ansible/roles/baremetal/tasks/install.yml
deleted file mode 100644
index 7538ee1002..0000000000
--- a/ansible/roles/baremetal/tasks/install.yml
+++ /dev/null
@@ -1,81 +0,0 @@
----
-- name: Update apt cache
- command: apt-get update
- become: True
- when: ansible_os_family == 'Debian'
-
-- name: Set firewall default policy
- ufw: state=disabled policy=allow
- when: ansible_os_family == 'Debian'
-
-- name: Check if firewalld is installed
- command: rpm -q firewalld
- register: firewalld_check
- failed_when: firewalld_check.rc > 1
- when: ansible_os_family == 'RedHat'
-
-- name: Disable firewalld
- become: True
- service:
- name: "{{ item }}"
- enabled: false
- state: stopped
- with_items:
- - firewalld
- when:
- - ansible_os_family == 'RedHat'
- - firewalld_check.rc == 0
-
-- name: Install apt packages
- package: name={{item}} state=present
- become: True
- with_items: "{{ debian_pkg_install }}"
- when: ansible_os_family == 'Debian'
-
-- name: Install wily kernel
- package: name=linux-generic-lts-wily state=latest
- register: kernel_updated
- become: True
- when:
- - ansible_distribution|lower == "ubuntu" | bool
- - ansible_distribution_release|lower == "trusty" | bool
-
-- name: Set reboot required
- set_fact:
- reboot_required: True
- when:
- - kernel_updated is defined
- - kernel_updated.changed
-
-- name: Install deltarpm packages
- package: name={{item}} state=installed
- become: True
- with_items:
- - deltarpm
- when: ansible_os_family == 'RedHat'
-
-- name: Install yum packages
- package: name={{item}} state=present
- become: True
- with_items: "{{ redhat_pkg_install }}"
- when: ansible_os_family == 'RedHat'
-
-- name: Install pip
- easy_install: name=pip
- become: True
-
-- name: Install docker-py
- pip: name=docker-py state=latest
- become: True
-
-- name: Remove packages
- package: name={{item}} state=absent
- with_items: "{{ ubuntu_pkg_removals }}"
- become: True
- when: ansible_distribution|lower == "ubuntu" | bool
-
-- name: Remove packages
- package: name={{item}} state=absent
- with_items: "{{ redhat_pkg_removals }}"
- become: True
- when: ansible_os_family == 'RedHat'
diff --git a/ansible/roles/baremetal/tasks/main.yml b/ansible/roles/baremetal/tasks/main.yml
deleted file mode 100644
index b017e8b4ad..0000000000
--- a/ansible/roles/baremetal/tasks/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-- include: "{{ action }}.yml"
diff --git a/ansible/roles/baremetal/tasks/post-install.yml b/ansible/roles/baremetal/tasks/post-install.yml
deleted file mode 100644
index f745776626..0000000000
--- a/ansible/roles/baremetal/tasks/post-install.yml
+++ /dev/null
@@ -1,85 +0,0 @@
----
-- name: Ensure docker service directory exists
- file:
- path=/etc/systemd/system/docker.service.d
- state=directory
- recurse=yes
- become: True
- when: (ansible_distribution == "Ubuntu" and ansible_distribution_major_version > "14") or
- (ansible_os_family == "RedHat") or (ansible_distribution == "Debian")
-
-- name: Configure docker service
- become: True
- template:
- src=docker_systemd_service.j2
- dest=/etc/systemd/system/docker.service.d/kolla.conf
- when: (ansible_distribution == "Ubuntu" and ansible_distribution_major_version > "14") or
- (ansible_os_family == "RedHat") or (ansible_distribution == "Debian")
-
-- name: Reload docker service file
- become: True
- command: systemctl daemon-reload
- when: (ansible_distribution == "Ubuntu" and ansible_distribution_major_version > "14") or
- (ansible_os_family == "RedHat") or (ansible_distribution == "Debian")
-
-- name: Configure docker service
- become: True
- template:
- src=docker_defaults.j2
- dest=/etc/default/docker
- when: (ansible_distribution == "Ubuntu" and ansible_distribution_major_version < "15")
-
-- name: Docker mount shared
- command: mount --make-shared /run
- become: True
- when: (ansible_distribution == "Ubuntu" and ansible_distribution_major_version < "15")
-
-- name: Get stat of libvirtd apparmor profile
- stat: path=/etc/apparmor.d/usr.sbin.libvirtd
- register: apparmor_libvirtd_profile
- when: ansible_distribution == "Ubuntu"
-
-- name: Remove apparmor profile for libvirt
- command: apparmor_parser -R /etc/apparmor.d/usr.sbin.libvirtd
- become: True
- when:
- - ansible_distribution == "Ubuntu"
- - apparmor_libvirtd_profile.stat.exists == True
-
-- name: Create docker group
- group:
- name: docker
- become: True
-
-- name: Add kolla user to docker group
- user:
- name: kolla
- append: yes
- groups: docker
- become: True
- when: create_kolla_user | bool == True
-
-- name: Start docker
- service:
- name: docker
- state: started
- become: yes
-
-- name: Restart docker
- service:
- name: docker
- state: restarted
- become: yes
-
-- name: Enable docker
- service:
- name: docker
- enabled: yes
- become: yes
-
-- name: Reboot
- command: reboot -f
- become: yes
- when:
- - reboot_required is defined
- - reboot_required | bool == true
diff --git a/ansible/roles/baremetal/tasks/pre-install.yml b/ansible/roles/baremetal/tasks/pre-install.yml
deleted file mode 100644
index 0ea5e5a18d..0000000000
--- a/ansible/roles/baremetal/tasks/pre-install.yml
+++ /dev/null
@@ -1,137 +0,0 @@
----
-# NOTE: raw install is required to support cloud images which do not have python installed
-- name: "Install python2 and python-simplejson"
- become: true
- raw: "yum install -y python python-simplejson || (apt-get update && apt-get install -y python2.7 python-simplejson)"
-
-- name: Gather facts
- setup:
-
-- name: Ensure localhost in /etc/hosts
- lineinfile:
- dest: /etc/hosts
- regexp: "^127.0.0.1.*"
- line: "127.0.0.1 localhost"
- state: present
- become: True
- when: customize_etc_hosts | bool == True
-
-- name: Generate /etc/hosts for all of the nodes
- blockinfile:
- dest: /etc/hosts
- marker: "# {mark} ANSIBLE GENERATED HOSTS"
- block: |
- {% for host in groups['all'] %}
- {{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }} {{ hostvars[host]['ansible_hostname'] }}
- {% endfor %}
- become: True
- when: customize_etc_hosts | bool == True
-
-- name: Ensure sudo group is present
- group: name=sudo state=present
- become: True
-
-- name: Ensure kolla group is present
- group: name=kolla state=present
- become: True
- when: create_kolla_user | bool == True
-
-- name: Create kolla user
- user:
- name: kolla
- state: present
- group: kolla
- groups: "sudo"
- become: True
- when: create_kolla_user | bool == True
-
-- name: Grant kolla user passwordless sudo
- lineinfile:
- dest: /etc/sudoers
- state: present
- regexp: '^kolla'
- line: 'kolla ALL=(ALL) NOPASSWD: ALL'
- become: True
- when: create_kolla_user | bool == True
-
-- name: Add public key to kolla user authorized keys
- authorized_key:
- user: kolla
- key: "{{ kolla_ssh_key.public_key }}"
- become: True
- when: create_kolla_user | bool == True
-
-- name: Install apt packages
- command: apt-get update
- become: True
- when: ansible_os_family == 'Debian'
-
-- name: Install ubuntu ca certs
- package: name={{item}} state=latest
- become: True
- with_items:
- - ca-certificates
- - apt-transport-https
- when:
- - ansible_os_family == 'Debian'
-
-- name: Ensure apt sources list directory exists
- file: path=/etc/apt/sources.list.d state=directory recurse=yes
- become: True
- when: ansible_os_family == 'Debian'
-
-- name: Enable docker repo apt
- template:
- src: docker_apt_repo.j2
- dest: /etc/apt/sources.list.d/docker.list
- become: True
- when: ansible_os_family == 'Debian'
-
-- name: Install docker apt gpg key
- apt_key:
- url: "{{ docker_apt_url }}/gpg"
- id: "{{ docker_apt_key_id }}"
- state: present
- become: True
- when:
- - ansible_os_family == 'Debian'
- - ansible_distribution == 'Ubuntu'
-
-- name: Ensure yum repos directory exists
- file: path=/etc/yum.repos.d/ state=directory recurse=yes
- become: True
- when: ansible_os_family == 'RedHat'
-
-- name: Enable docker repo yum
- become: True
- template:
- src: docker_yum_repo.j2
- dest: /etc/yum.repos.d/docker.repo
- when: ansible_os_family == 'RedHat'
-
-- name: Install docker rpm gpg key
- rpm_key:
- state: present
- key: "{{ docker_yum_url }}/gpg"
- become: True
- when: ansible_os_family == 'RedHat'
-
-- name: Ensure /etc/kolla directory exists
- file:
- path: /etc/kolla
- state: directory
- recurse: yes
- owner: kolla
- group: kolla
- mode: 755
- become: True
- when: create_kolla_user | bool == True
-
-- name: Ensure /etc/kolla directory exists
- file:
- path: /etc/kolla
- state: directory
- recurse: yes
- mode: 666
- become: True
- when: create_kolla_user | bool == False
diff --git a/ansible/roles/baremetal/tasks/precheck.yml b/ansible/roles/baremetal/tasks/precheck.yml
deleted file mode 100644
index ed97d539c0..0000000000
--- a/ansible/roles/baremetal/tasks/precheck.yml
+++ /dev/null
@@ -1 +0,0 @@
----
diff --git a/ansible/roles/baremetal/templates/docker_apt_repo.j2 b/ansible/roles/baremetal/templates/docker_apt_repo.j2
deleted file mode 100644
index a317c5b56f..0000000000
--- a/ansible/roles/baremetal/templates/docker_apt_repo.j2
+++ /dev/null
@@ -1,2 +0,0 @@
-# main docker repo
-deb {{docker_apt_url}}/repo {{ansible_distribution|lower}}-{{ansible_distribution_release|lower}} main
diff --git a/ansible/roles/baremetal/templates/docker_defaults.j2 b/ansible/roles/baremetal/templates/docker_defaults.j2
deleted file mode 100644
index 0b6b375bb5..0000000000
--- a/ansible/roles/baremetal/templates/docker_defaults.j2
+++ /dev/null
@@ -1,26 +0,0 @@
-# Docker Upstart and SysVinit configuration file
-
-#
-# THIS FILE DOES NOT APPLY TO SYSTEMD
-#
-# Please see the documentation for "systemd drop-ins":
-# https://docs.docker.com/engine/articles/systemd/
-#
-
-# Customize location of Docker binary (especially for development testing).
-#DOCKERD="/usr/local/bin/dockerd"
-
-# Use DOCKER_OPTS to modify the daemon startup options.
-DOCKER_OPTS=""
-{% if docker_storage_driver %}
-DOCKER_OPTS="$DOCKER_OPTS --storage-driver={{ docker_storage_driver }}"
-{% endif %}
-{% if docker_registry %}
-DOCKER_OPTS="$DOCKER_OPTS --insecure-registry {{ docker_registry }}"
-{% endif %}
-
-# If you need Docker to use an HTTP proxy, it can also be specified here.
-#export http_proxy="http://127.0.0.1:3128/"
-
-# This is also a handy place to tweak where Docker's temporary files go.
-#export TMPDIR="/mnt/bigdrive/docker-tmp"
diff --git a/ansible/roles/baremetal/templates/docker_systemd_service.j2 b/ansible/roles/baremetal/templates/docker_systemd_service.j2
deleted file mode 100644
index fb021a677f..0000000000
--- a/ansible/roles/baremetal/templates/docker_systemd_service.j2
+++ /dev/null
@@ -1,11 +0,0 @@
-[Service]
-MountFlags=shared
-ExecStart=
-ExecStart=/usr/bin/docker daemon \
-{% if docker_registry %}
---insecure-registry {{ docker_registry }} \
-{% endif %}
-{% if docker_storage_driver %}
---storage-driver={{ docker_storage_driver }}
-{% endif %}
- -H fd://
diff --git a/ansible/roles/baremetal/templates/docker_yum_repo.j2 b/ansible/roles/baremetal/templates/docker_yum_repo.j2
deleted file mode 100644
index a165efab20..0000000000
--- a/ansible/roles/baremetal/templates/docker_yum_repo.j2
+++ /dev/null
@@ -1,6 +0,0 @@
-[docker-repo]
-name=Docker main Repository
-baseurl={{docker_yum_url}}/repo/main/{{ansible_distribution|lower}}/{{ansible_distribution_major_version|lower}}
-enabled=1
-gpgcheck=1
-gpgkey={{docker_yum_url}}/gpg
diff --git a/ansible/roles/bifrost/defaults/main.yml b/ansible/roles/bifrost/defaults/main.yml
deleted file mode 100644
index 53c51361a3..0000000000
--- a/ansible/roles/bifrost/defaults/main.yml
+++ /dev/null
@@ -1,7 +0,0 @@
----
-####################
-# Docker
-####################
-bifrost_deploy_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-bifrost-deploy"
-bifrost_deploy_tag: "{{ openstack_release }}"
-bifrost_deploy_image_full: "{{ bifrost_deploy_image }}:{{ bifrost_deploy_tag }}"
diff --git a/ansible/roles/bifrost/tasks/bootstrap.yml b/ansible/roles/bifrost/tasks/bootstrap.yml
deleted file mode 100644
index 3d505b3c82..0000000000
--- a/ansible/roles/bifrost/tasks/bootstrap.yml
+++ /dev/null
@@ -1,28 +0,0 @@
----
-- name: Bootstrap bifrost (this may take several minutes)
- command: >
- docker exec bifrost_deploy
- bash -c '/bifrost/scripts/env-setup.sh && source /bifrost/env-vars
- && source /opt/stack/ansible/hacking/env-setup &&
- ansible-playbook -vvvv -i /bifrost/playbooks/inventory/localhost
- /bifrost/playbooks/install.yaml -e @/etc/bifrost/bifrost.yml
- -e @/etc/bifrost/dib.yml -e skip_package_install=true'
-- name: Installing ssh keys
- command: >
- docker exec bifrost_deploy
- bash -c 'mkdir /root/.ssh ; mkdir /home/ironic/.ssh;
- cp -f /etc/bifrost/id_rsa /root/.ssh/id_rsa &&
- cp -f /etc/bifrost/id_rsa.pub /root/.ssh/id_rsa.pub &&
- cp -f /etc/bifrost/ssh_config /root/.ssh/config &&
- cp -f /etc/bifrost/id_rsa /home/ironic/.ssh/id_rsa &&
- cp -f /etc/bifrost/id_rsa.pub /home/ironic/.ssh/id_rsa.pub &&
- cp -f /etc/bifrost/ssh_config /home/ironic/.ssh/config &&
- chmod 600 /root/.ssh/id_rsa &&
- chmod 600 /root/.ssh/id_rsa.pub &&
- chmod 600 /root/.ssh/config &&
- chmod 600 /home/ironic/.ssh/id_rsa &&
- chmod 600 /home/ironic/.ssh/id_rsa.pub &&
- chmod 600 /home/ironic/.ssh/config &&
- chown ironic:ironic /home/ironic/.ssh/id_rsa &&
- chown ironic:ironic /home/ironic/.ssh/id_rsa.pub &&
- chown ironic:ironic /home/ironic/.ssh/config'
diff --git a/ansible/roles/bifrost/tasks/config.yml b/ansible/roles/bifrost/tasks/config.yml
deleted file mode 100644
index 717c4dead2..0000000000
--- a/ansible/roles/bifrost/tasks/config.yml
+++ /dev/null
@@ -1,29 +0,0 @@
----
-- name: Ensuring config directories exist
- file:
- path: "{{ node_config_directory }}/{{ item }}"
- state: "directory"
- recurse: yes
- with_items:
- - "bifrost"
-
-- name: Generate bifrost configs
- merge_yaml:
- sources:
- - "{{ role_path }}/templates/{{ item }}.yml.j2"
- - "{{ node_custom_config }}/{{ item }}.yml"
- - "{{ node_custom_config }}/bifrost/{{ item }}.yml"
- dest: "{{ node_config_directory }}/bifrost/{{ item }}.yml"
- with_items:
- - "bifrost"
- - "dib"
- - "servers"
-
-- name: Template ssh keys
- template:
- src: "{{ item.src }}"
- dest: "{{ node_config_directory }}/bifrost/{{ item.dest }}"
- with_items:
- - { src: "id_rsa", dest: "id_rsa" }
- - { src: "id_rsa.pub", dest: "id_rsa.pub" }
- - { src: "ssh_config", dest: "ssh_config" }
diff --git a/ansible/roles/bifrost/tasks/deploy-servers.yml b/ansible/roles/bifrost/tasks/deploy-servers.yml
deleted file mode 100644
index e8d98abda8..0000000000
--- a/ansible/roles/bifrost/tasks/deploy-servers.yml
+++ /dev/null
@@ -1,20 +0,0 @@
----
-- name: Enrolling physical servers with ironic
- command: >
- docker exec bifrost_deploy
- bash -c 'source /bifrost/env-vars
- && source /opt/stack/ansible/hacking/env-setup &&
- export BIFROST_INVENTORY_SOURCE=/etc/bifrost/servers.yml &&
- ansible-playbook -vvvv -i /bifrost/playbooks/inventory/bifrost_inventory.py
- /bifrost/playbooks/enroll-dynamic.yaml -e "ansible_python_interpreter=/var/lib/kolla/venv/bin/python"
- -e @/etc/bifrost/bifrost.yml'
-
-- name: Deploy physical servers with ironic
- command: >
- docker exec bifrost_deploy
- bash -c 'source /bifrost/env-vars
- && source /opt/stack/ansible/hacking/env-setup &&
- export BIFROST_INVENTORY_SOURCE=/etc/bifrost/servers.yml &&
- ansible-playbook -vvvv -i /bifrost/playbooks/inventory/bifrost_inventory.py
- /bifrost/playbooks/deploy-dynamic.yaml -e "ansible_python_interpreter=/var/lib/kolla/venv/bin/python"
- -e @/etc/bifrost/bifrost.yml'
diff --git a/ansible/roles/bifrost/tasks/deploy.yml b/ansible/roles/bifrost/tasks/deploy.yml
deleted file mode 100644
index fe58cc3f76..0000000000
--- a/ansible/roles/bifrost/tasks/deploy.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-- include: config.yml
-
-- include: start.yml
-
-- include: bootstrap.yml
diff --git a/ansible/roles/bifrost/tasks/main.yml b/ansible/roles/bifrost/tasks/main.yml
deleted file mode 100644
index b017e8b4ad..0000000000
--- a/ansible/roles/bifrost/tasks/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-- include: "{{ action }}.yml"
diff --git a/ansible/roles/bifrost/tasks/precheck.yml b/ansible/roles/bifrost/tasks/precheck.yml
deleted file mode 100644
index ed97d539c0..0000000000
--- a/ansible/roles/bifrost/tasks/precheck.yml
+++ /dev/null
@@ -1 +0,0 @@
----
diff --git a/ansible/roles/bifrost/tasks/reconfigure.yml b/ansible/roles/bifrost/tasks/reconfigure.yml
deleted file mode 100644
index fdd316fcc1..0000000000
--- a/ansible/roles/bifrost/tasks/reconfigure.yml
+++ /dev/null
@@ -1,64 +0,0 @@
----
-- name: Ensuring the containers up
- kolla_docker:
- name: "{{ item.name }}"
- action: "get_container_state"
- register: container_state
- failed_when: container_state.Running == false
- when: inventory_hostname in groups[item.group]
- with_items:
- - { name: bifrost-deploy, group: bifrost-deploy }
-
-- include: config.yml
-
-- name: Check the configs
- command: docker exec {{ item.name }} /usr/local/bin/kolla_set_configs --check
- changed_when: false
- failed_when: false
- register: check_results
- when: inventory_hostname in groups[item.group]
- with_items:
- - { name: bifrost-deploy, group: bifrost-deploy }
-
-# NOTE(jeffrey4l): when config_strategy == 'COPY_ALWAYS'
-# and container env['KOLLA_CONFIG_STRATEGY'] == 'COPY_ONCE',
-# just remove the container and start again
-- name: Containers config strategy
- kolla_docker:
- name: "{{ item.name }}"
- action: "get_container_env"
- register: container_envs
- when: inventory_hostname in groups[item.group]
- with_items:
- - { name: bifrost-deploy, group: bifrost-deploy }
-
-- name: Remove the containers
- kolla_docker:
- name: "{{ item[0]['name'] }}"
- action: "remove_container"
- register: remove_containers
- when:
- - inventory_hostname in groups[item[0]['group']]
- - config_strategy == "COPY_ONCE" or item[1]['KOLLA_CONFIG_STRATEGY'] == 'COPY_ONCE'
- - item[2]['rc'] == 1
- with_together:
- - [{ name: bifrost-deploy, group: bifrost-deploy }]
- - "{{ container_envs.results }}"
- - "{{ check_results.results }}"
-
-- include: start.yml
- when: remove_containers.changed
-
-- name: Restart containers
- kolla_docker:
- name: "{{ item[0]['name'] }}"
- action: "restart_container"
- when:
- - inventory_hostname in groups[item[0]['group']]
- - config_strategy == 'COPY_ALWAYS'
- - item[1]['KOLLA_CONFIG_STRATEGY'] != 'COPY_ONCE'
- - item[2]['rc'] == 1
- with_together:
- - [{ name: bifrost-deploy, group: bifrost-deploy }]
- - "{{ container_envs.results }}"
- - "{{ check_results.results }}"
diff --git a/ansible/roles/bifrost/tasks/start.yml b/ansible/roles/bifrost/tasks/start.yml
deleted file mode 100644
index 58b247b665..0000000000
--- a/ansible/roles/bifrost/tasks/start.yml
+++ /dev/null
@@ -1,12 +0,0 @@
----
-- name: Starting bifrost deploy container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- image: "{{ bifrost_deploy_image_full }}"
- name: "bifrost_deploy"
- privileged: True
- volumes:
- - "{{ node_config_directory }}/bifrost/:/etc/bifrost:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "/dev:/dev"
diff --git a/ansible/roles/bifrost/tasks/upgrade.yml b/ansible/roles/bifrost/tasks/upgrade.yml
deleted file mode 100644
index 1f16915ad9..0000000000
--- a/ansible/roles/bifrost/tasks/upgrade.yml
+++ /dev/null
@@ -1,4 +0,0 @@
----
-- include: config.yml
-
-- include: start.yml
diff --git a/ansible/roles/bifrost/templates/bifrost.yml.j2 b/ansible/roles/bifrost/templates/bifrost.yml.j2
deleted file mode 100644
index 224d11a08a..0000000000
--- a/ansible/roles/bifrost/templates/bifrost.yml.j2
+++ /dev/null
@@ -1,3 +0,0 @@
-mysql_service_name: mysql
-ansible_python_interpreter: /var/lib/kolla/venv/bin/python
-network_interface: "{{ bifrost_network_interface }}"
diff --git a/ansible/roles/bifrost/templates/dib.yml.j2 b/ansible/roles/bifrost/templates/dib.yml.j2
deleted file mode 100644
index 9e54fba23b..0000000000
--- a/ansible/roles/bifrost/templates/dib.yml.j2
+++ /dev/null
@@ -1,2 +0,0 @@
-create_image_via_dib: "true"
-dib_os_element: "debian"
diff --git a/ansible/roles/bifrost/templates/id_rsa b/ansible/roles/bifrost/templates/id_rsa
deleted file mode 100644
index d1d3a9ed49..0000000000
--- a/ansible/roles/bifrost/templates/id_rsa
+++ /dev/null
@@ -1 +0,0 @@
-{{ bifrost_ssh_key.private_key }}
diff --git a/ansible/roles/bifrost/templates/id_rsa.pub b/ansible/roles/bifrost/templates/id_rsa.pub
deleted file mode 100644
index 173bbb204b..0000000000
--- a/ansible/roles/bifrost/templates/id_rsa.pub
+++ /dev/null
@@ -1 +0,0 @@
-{{ bifrost_ssh_key.public_key }}
diff --git a/ansible/roles/bifrost/templates/servers.yml.j2 b/ansible/roles/bifrost/templates/servers.yml.j2
deleted file mode 100644
index ed97d539c0..0000000000
--- a/ansible/roles/bifrost/templates/servers.yml.j2
+++ /dev/null
@@ -1 +0,0 @@
----
diff --git a/ansible/roles/bifrost/templates/ssh_config b/ansible/roles/bifrost/templates/ssh_config
deleted file mode 100644
index 41dff51a6b..0000000000
--- a/ansible/roles/bifrost/templates/ssh_config
+++ /dev/null
@@ -1,3 +0,0 @@
-Host *
- StrictHostKeyChecking no
- UserKnownHostsFile /dev/null
diff --git a/ansible/roles/ceilometer/defaults/main.yml b/ansible/roles/ceilometer/defaults/main.yml
deleted file mode 100644
index 60205fe3eb..0000000000
--- a/ansible/roles/ceilometer/defaults/main.yml
+++ /dev/null
@@ -1,51 +0,0 @@
----
-project_name: "ceilometer"
-
-
-####################
-# Database
-####################
-ceilometer_database_name: "ceilometer"
-ceilometer_database_user: "ceilometer"
-ceilometer_database_port: "{{ mongodb_port if ceilometer_database_type == 'mongodb' else database_port }}"
-
-ceilometer_database_mongodb_address: "{% for host in groups['mongodb'] %}{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ ceilometer_database_port }}{% if not loop.last %},{% endif %}{% endfor %}"
-ceilometer_database_mysql_address: "{{ kolla_internal_fqdn }}"
-
-
-####################
-# Docker
-####################
-ceilometer_notification_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-ceilometer-notification"
-ceilometer_notification_tag: "{{ openstack_release }}"
-ceilometer_notification_image_full: "{{ ceilometer_notification_image }}:{{ ceilometer_notification_tag }}"
-
-ceilometer_api_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-ceilometer-api"
-ceilometer_api_tag: "{{ openstack_release }}"
-ceilometer_api_image_full: "{{ ceilometer_api_image }}:{{ ceilometer_api_tag }}"
-
-ceilometer_central_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-ceilometer-central"
-ceilometer_central_tag: "{{ openstack_release }}"
-ceilometer_central_image_full: "{{ ceilometer_central_image }}:{{ ceilometer_central_tag }}"
-
-ceilometer_collector_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-ceilometer-collector"
-ceilometer_collector_tag: "{{ openstack_release }}"
-ceilometer_collector_image_full: "{{ ceilometer_collector_image }}:{{ ceilometer_collector_tag }}"
-
-ceilometer_compute_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-ceilometer-compute"
-ceilometer_compute_tag: "{{ openstack_release }}"
-ceilometer_compute_image_full: "{{ ceilometer_compute_image }}:{{ ceilometer_compute_tag }}"
-
-
-####################
-# OpenStack
-####################
-ceilometer_admin_endpoint: "{{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ ceilometer_api_port }}"
-ceilometer_internal_endpoint: "{{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ ceilometer_api_port }}"
-ceilometer_public_endpoint: "{{ public_protocol }}://{{ kolla_external_fqdn }}:{{ ceilometer_api_port }}"
-
-ceilometer_logging_debug: "{{ openstack_logging_debug }}"
-
-ceilometer_keystone_user: "ceilometer"
-
-openstack_ceilometer_auth: "{'auth_url':'{{ openstack_auth.auth_url }}','username':'{{ openstack_auth.username }}','password':'{{ openstack_auth.password }}','project_name':'{{ openstack_auth.project_name }}','domain_name':'default'}"
diff --git a/ansible/roles/ceilometer/meta/main.yml b/ansible/roles/ceilometer/meta/main.yml
deleted file mode 100644
index 6b4fff8fef..0000000000
--- a/ansible/roles/ceilometer/meta/main.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-dependencies:
- - { role: common }
diff --git a/ansible/roles/ceilometer/tasks/bootstrap.yml b/ansible/roles/ceilometer/tasks/bootstrap.yml
deleted file mode 100644
index 7078709ed6..0000000000
--- a/ansible/roles/ceilometer/tasks/bootstrap.yml
+++ /dev/null
@@ -1,73 +0,0 @@
----
-- name: Creating Ceilometer MongoDB database
- command: docker exec -t mongodb mongo --host {{ ceilometer_database_mongodb_address }} --eval 'db = db.getSiblingDB("{{ ceilometer_database_name }}"); db.createUser({user':' "{{ ceilometer_database_user }}", pwd':' "{{ ceilometer_database_password}}", roles':' [ "readWrite", "dbAdmin" ]})'
- register: mongodb_ceilometer_database
- run_once: true
- failed_when:
- - "'already' not in mongodb_ceilometer_database.stdout"
- - mongodb_ceilometer_database.stdout.split()[4] != 'connecting'
- delegate_to: "{{ groups['mongodb'][0] }}"
- when:
- - ceilometer_database_type == "mongodb"
-
-- name: Checking Ceilometer mysql database
- command: docker exec -t kolla_toolbox /usr/bin/ansible localhost
- -m mysql_db
- -a "login_host='{{ ceilometer_database_mysql_address }}'
- login_port='{{ ceilometer_database_port }}'
- login_user='{{ ceilometer_database_user }}'
- login_password='{{ ceilometer_database_password }}'
- name='{{ ceilometer_database_name }}'"
- register: mysql_access
- failed_when: False
- changed_when: False
- run_once: True
- when:
- - ceilometer_database_type == "mysql"
-
-- name: Creating Ceilometer mysql database
- command: docker exec -t kolla_toolbox /usr/bin/ansible localhost
- -m mysql_db
- -a "login_host='{{ ceilometer_database_mysql_address }}'
- login_port='{{ ceilometer_database_port }}'
- login_user='{{ database_user }}'
- login_password='{{ database_password }}'
- name='{{ ceilometer_database_name }}'"
- register: mysql_ceilometer_database
- changed_when: "{{ mysql_ceilometer_database.stdout.find('localhost | SUCCESS => ') != -1 and
- (mysql_ceilometer_database.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
- failed_when: mysql_ceilometer_database.stdout.split()[2] != 'SUCCESS'
- run_once: True
- delegate_to: "{{ groups['ceilometer-api'][0] }}"
- when:
- - ceilometer_database_type == "mysql"
- - "'FAILED' in mysql_access.stdout"
-
-- name: Creating Ceilometer database user and setting permissions
- command: docker exec -t kolla_toolbox /usr/bin/ansible localhost
- -m mysql_user
- -a "login_host='{{ ceilometer_database_mysql_address }}'
- login_port='{{ ceilometer_database_port }}'
- login_user='{{ database_user }}'
- login_password='{{ database_password }}'
- name='{{ ceilometer_database_name }}'
- password='{{ ceilometer_database_password }}'
- host='%'
- priv='{{ ceilometer_database_name }}.*:ALL'
- append_privs='yes'"
- register: database_user_create
- changed_when: "{{ database_user_create.stdout.find('localhost | SUCCESS => ') != -1 and
- (database_user_create.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
- failed_when: database_user_create.stdout.split()[2] != 'SUCCESS'
- run_once: True
- delegate_to: "{{ groups['ceilometer-api'][0] }}"
- when:
- - ceilometer_database_type == "mysql"
- - "'FAILED' in mysql_access.stdout"
-
-# TODO(Jeffrey4l): fix idempotent when ceilomter_database_type == "gnocchi"
-# NOTE(vbel): bootstrapping of mysql db for ceilometer is idempotent
-- include: bootstrap_service.yml
- when: ((ceilometer_database_type == "mongodb" and mongodb_ceilometer_database.changed)
- or ceilometer_database_type == "mysql"
- or ceilometer_database_type == "gnocchi")
diff --git a/ansible/roles/ceilometer/tasks/bootstrap_service.yml b/ansible/roles/ceilometer/tasks/bootstrap_service.yml
deleted file mode 100644
index 487b7999fd..0000000000
--- a/ansible/roles/ceilometer/tasks/bootstrap_service.yml
+++ /dev/null
@@ -1,22 +0,0 @@
----
-- name: Running Ceilometer bootstrap container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- detach: False
- environment:
- KOLLA_BOOTSTRAP:
- KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}"
- CEILOMETER_DATABASE_TYPE: "{{ ceilometer_database_type }}"
- image: "{{ ceilometer_api_image_full }}"
- labels:
- BOOTSTRAP:
- name: "bootstrap_ceilometer"
- restart_policy: "never"
- volumes:
- - "{{ node_config_directory }}/ceilometer-api/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "ceilometer:/var/lib/ceilometer/"
- - "kolla_logs:/var/log/kolla/"
- run_once: True
- delegate_to: "{{ groups['ceilometer-api'][0] }}"
diff --git a/ansible/roles/ceilometer/tasks/config.yml b/ansible/roles/ceilometer/tasks/config.yml
deleted file mode 100644
index e4c6cdf7b3..0000000000
--- a/ansible/roles/ceilometer/tasks/config.yml
+++ /dev/null
@@ -1,78 +0,0 @@
----
-- name: Ensuring config directories exist
- file:
- path: "{{ node_config_directory }}/{{ item }}"
- state: "directory"
- recurse: yes
- with_items:
- - "ceilometer-notification"
- - "ceilometer-collector"
- - "ceilometer-api"
- - "ceilometer-central"
- - "ceilometer-compute"
-
-- name: Copying over config.json files for services
- template:
- src: "{{ item }}.json.j2"
- dest: "{{ node_config_directory }}/{{ item }}/config.json"
- with_items:
- - "ceilometer-notification"
- - "ceilometer-collector"
- - "ceilometer-api"
- - "ceilometer-central"
- - "ceilometer-compute"
-
-- name: Copying over ceilometer-api.conf
- template:
- src: "{{ item }}"
- dest: "{{ node_config_directory }}/ceilometer-api/wsgi-ceilometer-api.conf"
- with_first_found:
- - "{{ node_custom_config }}/ceilometer/{{ inventory_hostname }}/wsgi-ceilometer-api.conf"
- - "{{ node_custom_config }}/ceilometer/wsgi-ceilometer-api.conf"
- - "wsgi-ceilometer-api.conf.j2"
-
-- name: Copying over ceilometer.conf
- merge_configs:
- vars:
- service_name: "{{ item }}"
- sources:
- - "{{ role_path }}/templates/ceilometer.conf.j2"
- - "{{ node_custom_config }}/global.conf"
- - "{{ node_custom_config }}/database.conf"
- - "{{ node_custom_config }}/messaging.conf"
- - "{{ node_custom_config }}/ceilometer.conf"
- - "{{ node_custom_config }}/ceilometer/{{ item }}.conf"
- - "{{ node_custom_config }}/ceilometer/{{ inventory_hostname }}/ceilometer.conf"
- dest: "{{ node_config_directory }}/{{ item }}/ceilometer.conf"
- with_items:
- - "ceilometer-notification"
- - "ceilometer-collector"
- - "ceilometer-api"
- - "ceilometer-central"
- - "ceilometer-compute"
-
-- name: Copying over event and pipeline yaml for notification service
- template:
- src: "{{ item }}.j2"
- dest: "{{ node_config_directory }}/ceilometer-notification/{{ item }}"
- with_items:
- - "event_definitions.yaml"
- - "event_pipeline.yaml"
- - "pipeline.yaml"
-
-- name: Check if policies shall be overwritten
- local_action: stat path="{{ node_custom_config }}/ceilometer/policy.json"
- register: ceilometer_policy
-
-- name: Copying over existing policy.json
- template:
- src: "{{ node_custom_config }}/ceilometer/policy.json"
- dest: "{{ node_config_directory }}/{{ item }}/policy.json"
- with_items:
- - "ceilometer-notification"
- - "ceilometer-collector"
- - "ceilometer-api"
- - "ceilometer-central"
- - "ceilometer-compute"
- when:
- ceilometer_policy.stat.exists
diff --git a/ansible/roles/ceilometer/tasks/deploy.yml b/ansible/roles/ceilometer/tasks/deploy.yml
deleted file mode 100644
index 7f219ae9a3..0000000000
--- a/ansible/roles/ceilometer/tasks/deploy.yml
+++ /dev/null
@@ -1,14 +0,0 @@
----
-- include: register.yml
- when: inventory_hostname in groups['ceilometer']
-
-- include: config.yml
- when: inventory_hostname in groups['ceilometer'] or
- inventory_hostname in groups['compute']
-
-- include: bootstrap.yml
- when: inventory_hostname in groups['ceilometer']
-
-- include: start.yml
- when: inventory_hostname in groups['ceilometer'] or
- inventory_hostname in groups['compute']
diff --git a/ansible/roles/ceilometer/tasks/main.yml b/ansible/roles/ceilometer/tasks/main.yml
deleted file mode 100644
index b017e8b4ad..0000000000
--- a/ansible/roles/ceilometer/tasks/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-- include: "{{ action }}.yml"
diff --git a/ansible/roles/ceilometer/tasks/precheck.yml b/ansible/roles/ceilometer/tasks/precheck.yml
deleted file mode 100644
index ed97d539c0..0000000000
--- a/ansible/roles/ceilometer/tasks/precheck.yml
+++ /dev/null
@@ -1 +0,0 @@
----
diff --git a/ansible/roles/ceilometer/tasks/pull.yml b/ansible/roles/ceilometer/tasks/pull.yml
deleted file mode 100644
index 0105e02d6e..0000000000
--- a/ansible/roles/ceilometer/tasks/pull.yml
+++ /dev/null
@@ -1,35 +0,0 @@
----
-- name: Pulling ceilometer-notification image
- kolla_docker:
- action: "pull_image"
- common_options: "{{ docker_common_options }}"
- image: "{{ ceilometer_notification_image_full }}"
- when: inventory_hostname in groups['ceilometer-notification']
-
-- name: Pulling ceilometer-api image
- kolla_docker:
- action: "pull_image"
- common_options: "{{ docker_common_options }}"
- image: "{{ ceilometer_api_image_full }}"
- when: inventory_hostname in groups['ceilometer-api']
-
-- name: Pulling ceilometer-central image
- kolla_docker:
- action: "pull_image"
- common_options: "{{ docker_common_options }}"
- image: "{{ ceilometer_central_image_full }}"
- when: inventory_hostname in groups['ceilometer-central']
-
-- name: Pulling ceilometer-collector image
- kolla_docker:
- action: "pull_image"
- common_options: "{{ docker_common_options }}"
- image: "{{ ceilometer_collector_image_full }}"
- when: inventory_hostname in groups['ceilometer-collector']
-
-- name: Pulling ceilometer-compute image
- kolla_docker:
- action: "pull_image"
- common_options: "{{ docker_common_options }}"
- image: "{{ ceilometer_compute_image_full }}"
- when: inventory_hostname in groups['ceilometer-compute']
diff --git a/ansible/roles/ceilometer/tasks/reconfigure.yml b/ansible/roles/ceilometer/tasks/reconfigure.yml
deleted file mode 100644
index e0e1df5102..0000000000
--- a/ansible/roles/ceilometer/tasks/reconfigure.yml
+++ /dev/null
@@ -1,84 +0,0 @@
----
-- name: Ensuring the containers up
- kolla_docker:
- name: "{{ item.name }}"
- action: "get_container_state"
- register: container_state
- failed_when: container_state.Running == false
- when: inventory_hostname in groups[item.group]
- with_items:
- - { name: ceilometer_api, group: ceilometer-api }
- - { name: ceilometer_central, group: ceilometer-central }
- - { name: ceilometer_notification, group: ceilometer-notification }
- - { name: ceilometer_collector, group: ceilometer-collector }
- - { name: ceilometer_compute, group: ceilometer-compute }
-
-- include: config.yml
-
-- name: Check the configs
- command: docker exec {{ item.name }} /usr/local/bin/kolla_set_configs --check
- changed_when: false
- failed_when: false
- register: check_results
- when: inventory_hostname in groups[item.group]
- with_items:
- - { name: ceilometer_api, group: ceilometer-api }
- - { name: ceilometer_central, group: ceilometer-central }
- - { name: ceilometer_notification, group: ceilometer-notification }
- - { name: ceilometer_collector, group: ceilometer-collector }
- - { name: ceilometer_compute, group: ceilometer-compute }
-
-# NOTE(jeffrey4l): when config_strategy == 'COPY_ALWAYS'
-# and container env['KOLLA_CONFIG_STRATEGY'] == 'COPY_ONCE',
-# just remove the container and start again
-- name: Containers config strategy
- kolla_docker:
- name: "{{ item.name }}"
- action: "get_container_env"
- register: container_envs
- when: inventory_hostname in groups[item.group]
- with_items:
- - { name: ceilometer_api, group: ceilometer-api }
- - { name: ceilometer_central, group: ceilometer-central }
- - { name: ceilometer_notification, group: ceilometer-notification }
- - { name: ceilometer_collector, group: ceilometer-collector }
- - { name: ceilometer_compute, group: ceilometer-compute }
-
-- name: Remove the containers
- kolla_docker:
- name: "{{ item[0]['name'] }}"
- action: "remove_container"
- register: remove_containers
- when:
- - inventory_hostname in groups[item[0]['group']]
- - config_strategy == "COPY_ONCE" or item[1]['KOLLA_CONFIG_STRATEGY'] == 'COPY_ONCE'
- - item[2]['rc'] == 1
- with_together:
- - [{ name: ceilometer_api, group: ceilometer-api },
- { name: ceilometer_central, group: ceilometer-central },
- { name: ceilometer_notification, group: ceilometer-notification },
- { name: ceilometer_collector, group: ceilometer-collector },
- { name: ceilometer_compute, group: ceilometer-compute }]
- - "{{ container_envs.results }}"
- - "{{ check_results.results }}"
-
-- include: start.yml
- when: remove_containers.changed
-
-- name: Restart containers
- kolla_docker:
- name: "{{ item[0]['name'] }}"
- action: "restart_container"
- when:
- - inventory_hostname in groups[item[0]['group']]
- - config_strategy == 'COPY_ALWAYS'
- - item[1]['KOLLA_CONFIG_STRATEGY'] != 'COPY_ONCE'
- - item[2]['rc'] == 1
- with_together:
- - [{ name: ceilometer_api, group: ceilometer-api },
- { name: ceilometer_central, group: ceilometer-central },
- { name: ceilometer_notification, group: ceilometer-notification },
- { name: ceilometer_collector, group: ceilometer-collector },
- { name: ceilometer_compute, group: ceilometer-compute }]
- - "{{ container_envs.results }}"
- - "{{ check_results.results }}"
diff --git a/ansible/roles/ceilometer/tasks/register.yml b/ansible/roles/ceilometer/tasks/register.yml
deleted file mode 100644
index c3cdd1a15b..0000000000
--- a/ansible/roles/ceilometer/tasks/register.yml
+++ /dev/null
@@ -1,40 +0,0 @@
----
-- name: Creating the Ceilometer service and endpoint
- command: docker exec -t kolla_toolbox /usr/bin/ansible localhost
- -m kolla_keystone_service
- -a "service_name=ceilometer
- service_type=metering
- description='Openstack Telemetry'
- endpoint_region={{ openstack_region_name }}
- url='{{ item.url }}'
- interface='{{ item.interface }}'
- region_name={{ openstack_region_name }}
- auth={{ '{{ openstack_ceilometer_auth }}' }}"
- -e "{'openstack_ceilometer_auth':{{ openstack_ceilometer_auth }}}"
- register: ceilometer_endpoint
- changed_when: "{{ ceilometer_endpoint.stdout.find('localhost | SUCCESS => ') != -1 and (ceilometer_endpoint.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
- until: ceilometer_endpoint.stdout.split()[2] == 'SUCCESS'
- retries: 10
- delay: 5
- run_once: True
- with_items:
- - {'interface': 'admin', 'url': '{{ ceilometer_admin_endpoint }}'}
- - {'interface': 'internal', 'url': '{{ ceilometer_internal_endpoint }}'}
- - {'interface': 'public', 'url': '{{ ceilometer_public_endpoint }}'}
-
-- name: Creating the Ceilometer project, user, and role
- command: docker exec -t kolla_toolbox /usr/bin/ansible localhost
- -m kolla_keystone_user
- -a "project=service
- user=ceilometer
- password={{ ceilometer_keystone_password }}
- role=admin
- region_name={{ openstack_region_name }}
- auth={{ '{{ openstack_ceilometer_auth }}' }}"
- -e "{'openstack_ceilometer_auth':{{ openstack_ceilometer_auth }}}"
- register: ceilometer_user
- changed_when: "{{ ceilometer_user.stdout.find('localhost | SUCCESS => ') != -1 and (ceilometer_user.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
- until: ceilometer_user.stdout.split()[2] == 'SUCCESS'
- retries: 10
- delay: 5
- run_once: True
diff --git a/ansible/roles/ceilometer/tasks/start.yml b/ansible/roles/ceilometer/tasks/start.yml
deleted file mode 100644
index 4cd760e5b8..0000000000
--- a/ansible/roles/ceilometer/tasks/start.yml
+++ /dev/null
@@ -1,67 +0,0 @@
----
-- name: Starting ceilometer-notification container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- image: "{{ ceilometer_notification_image_full }}"
- name: "ceilometer_notification"
- volumes:
- - "{{ node_config_directory }}/ceilometer-notification/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "kolla_logs:/var/log/kolla/"
- when: inventory_hostname in groups['ceilometer-notification']
-
-- name: Starting ceilometer-api container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- image: "{{ ceilometer_api_image_full }}"
- name: "ceilometer_api"
- volumes:
- - "{{ node_config_directory }}/ceilometer-api/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "ceilometer:/var/lib/ceilometer/"
- - "kolla_logs:/var/log/kolla/"
- when: inventory_hostname in groups['ceilometer-api']
-
-- name: Starting ceilometer-central container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- image: "{{ ceilometer_central_image_full }}"
- name: "ceilometer_central"
- volumes:
- - "{{ node_config_directory }}/ceilometer-central/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "ceilometer:/var/lib/ceilometer/"
- - "kolla_logs:/var/log/kolla/"
- when: inventory_hostname in groups['ceilometer-central']
-
-- name: Starting ceilometer-collector container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- image: "{{ ceilometer_collector_image_full }}"
- name: "ceilometer_collector"
- volumes:
- - "{{ node_config_directory }}/ceilometer-collector/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "ceilometer:/var/lib/ceilometer/"
- - "kolla_logs:/var/log/kolla/"
- when: inventory_hostname in groups['ceilometer-collector']
-
-- name: Starting ceilometer-compute container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- image: "{{ ceilometer_compute_image_full }}"
- name: "ceilometer_compute"
- privileged: True
- volumes:
- - "{{ node_config_directory }}/ceilometer-compute/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "/run/:/run/:shared"
- - "ceilometer:/var/lib/ceilometer/"
- - "kolla_logs:/var/log/kolla/"
- - "nova_libvirt:/var/lib/libvirt"
- when: inventory_hostname in groups['ceilometer-compute']
diff --git a/ansible/roles/ceilometer/tasks/upgrade.yml b/ansible/roles/ceilometer/tasks/upgrade.yml
deleted file mode 100644
index 308053080c..0000000000
--- a/ansible/roles/ceilometer/tasks/upgrade.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-- include: config.yml
-
-- include: bootstrap_service.yml
-
-- include: start.yml
diff --git a/ansible/roles/ceilometer/templates/ceilometer-api.json.j2 b/ansible/roles/ceilometer/templates/ceilometer-api.json.j2
deleted file mode 100644
index ed8ae5ebe1..0000000000
--- a/ansible/roles/ceilometer/templates/ceilometer-api.json.j2
+++ /dev/null
@@ -1,20 +0,0 @@
-{% set apache_cmd = 'apache2' if kolla_base_distro in ['ubuntu', 'debian'] else 'httpd' %}
-{% set apache_dir = 'apache2/conf-enabled' if kolla_base_distro in ['ubuntu', 'debian'] else 'httpd/conf.d' %}
-{% set apache_file = '000-default.conf' if kolla_base_distro in ['ubuntu', 'debian'] else 'ceilometer-api.conf' %}
-{
- "command": "/usr/sbin/{{ apache_cmd }} -DFOREGROUND",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/ceilometer.conf",
- "dest": "/etc/ceilometer/ceilometer.conf",
- "owner": "ceilometer",
- "perm": "0600"
- },
- {
- "source": "{{ container_config_directory }}/wsgi-ceilometer-api.conf",
- "dest": "/etc/{{ apache_dir }}/{{ apache_file }}",
- "owner": "ceilometer",
- "perm": "0644"
- }
- ]
-}
diff --git a/ansible/roles/ceilometer/templates/ceilometer-central.json.j2 b/ansible/roles/ceilometer/templates/ceilometer-central.json.j2
deleted file mode 100644
index e879afd594..0000000000
--- a/ansible/roles/ceilometer/templates/ceilometer-central.json.j2
+++ /dev/null
@@ -1,11 +0,0 @@
-{
- "command": "ceilometer-polling --polling-namespaces central",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/ceilometer.conf",
- "dest": "/etc/ceilometer/ceilometer.conf",
- "owner": "ceilometer",
- "perm": "0600"
- }
- ]
-}
diff --git a/ansible/roles/ceilometer/templates/ceilometer-collector.json.j2 b/ansible/roles/ceilometer/templates/ceilometer-collector.json.j2
deleted file mode 100644
index 32970e1271..0000000000
--- a/ansible/roles/ceilometer/templates/ceilometer-collector.json.j2
+++ /dev/null
@@ -1,11 +0,0 @@
-{
- "command": "ceilometer-collector",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/ceilometer.conf",
- "dest": "/etc/ceilometer/ceilometer.conf",
- "owner": "ceilometer",
- "perm": "0600"
- }
- ]
-}
diff --git a/ansible/roles/ceilometer/templates/ceilometer-compute.json.j2 b/ansible/roles/ceilometer/templates/ceilometer-compute.json.j2
deleted file mode 100644
index b8ed69455b..0000000000
--- a/ansible/roles/ceilometer/templates/ceilometer-compute.json.j2
+++ /dev/null
@@ -1,11 +0,0 @@
-{
- "command": "ceilometer-polling --polling-namespaces compute",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/ceilometer.conf",
- "dest": "/etc/ceilometer/ceilometer.conf",
- "owner": "ceilometer",
- "perm": "0600"
- }
- ]
-}
diff --git a/ansible/roles/ceilometer/templates/ceilometer-notification.json.j2 b/ansible/roles/ceilometer/templates/ceilometer-notification.json.j2
deleted file mode 100644
index 769a8c8ce1..0000000000
--- a/ansible/roles/ceilometer/templates/ceilometer-notification.json.j2
+++ /dev/null
@@ -1,29 +0,0 @@
-{
- "command": "ceilometer-agent-notification",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/ceilometer.conf",
- "dest": "/etc/ceilometer/ceilometer.conf",
- "owner": "ceilometer",
- "perm": "0600"
- },
- {
- "source": "{{ container_config_directory }}/event_definitions.yaml",
- "dest": "/etc/ceilometer/event_definitions.yaml",
- "owner": "ceilometer",
- "perm": "0600"
- },
- {
- "source": "{{ container_config_directory }}/event_pipeline.yaml",
- "dest": "/etc/ceilometer/event_pipeline.yaml",
- "owner": "ceilometer",
- "perm": "0600"
- },
- {
- "source": "{{ container_config_directory }}/pipeline.yaml",
- "dest": "/etc/ceilometer/pipeline.yaml",
- "owner": "ceilometer",
- "perm": "0600"
- }
- ]
-}
diff --git a/ansible/roles/ceilometer/templates/ceilometer.conf.j2 b/ansible/roles/ceilometer/templates/ceilometer.conf.j2
deleted file mode 100644
index 91f5205416..0000000000
--- a/ansible/roles/ceilometer/templates/ceilometer.conf.j2
+++ /dev/null
@@ -1,64 +0,0 @@
-[DEFAULT]
-debug = {{ ceilometer_logging_debug }}
-
-log_dir = /var/log/kolla/ceilometer
-
-transport_url = rabbit://{% for host in groups['rabbitmq'] %}{{ rabbitmq_user }}:{{ rabbitmq_password }}@{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ rabbitmq_port }}{% if not loop.last %},{% endif %}{% endfor %}
-
-{% if ceilometer_database_type == 'gnocchi' %}
-meter_dispatchers = gnocchi
-event_dispatchers = gnocchi
-{% else %}
-meter_dispatchers = database
-event_dispatchers = database
-{% endif %}
-
-[api]
-port = {{ ceilometer_api_port }}
-host = {{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}
-
-[collector]
-
-[database]
-{% if ceilometer_database_type == "mongodb" %}
-event_connection = mongodb://{{ ceilometer_database_user }}:{{ ceilometer_database_password }}@{{ ceilometer_database_mongodb_address }}/{{ ceilometer_database_name }}
-metering_connection = mongodb://{{ ceilometer_database_user }}:{{ ceilometer_database_password }}@{{ ceilometer_database_mongodb_address }}/{{ ceilometer_database_name }}
-{% elif ceilometer_database_type == "mysql" %}
-event_connection = mysql+pymysql://{{ ceilometer_database_user }}:{{ ceilometer_database_password }}@{{ ceilometer_database_mysql_address }}:{{ ceilometer_database_port }}/{{ ceilometer_database_name }}
-metering_connection = mysql+pymysql://{{ ceilometer_database_user }}:{{ ceilometer_database_password }}@{{ ceilometer_database_mysql_address }}:{{ ceilometer_database_port }}/{{ ceilometer_database_name }}
-{% endif %}
-
-[keystone_authtoken]
-auth_uri = {{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_public_port }}
-project_domain_name = Default
-project_name = service
-user_domain_name = Default
-username = {{ ceilometer_keystone_user }}
-password = {{ ceilometer_keystone_password }}
-auth_url = {{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_admin_port }}
-auth_type = password
-
-memcache_security_strategy = ENCRYPT
-memcache_secret_key = {{ memcache_secret_key }}
-memcached_servers = {% for host in groups['memcached'] %}{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}
-
-
-[notification]
-store_events = true
-
-[service_credentials]
-auth_url = {{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_public_port }}/v3
-region_name = {{ openstack_region_name }}
-password = {{ ceilometer_keystone_password }}
-username = {{ ceilometer_keystone_user }}
-project_name = service
-project_domain_id = default
-user_domain_id = default
-auth_type = password
-
-{% if ceilometer_database_type == 'gnocchi' %}
-[dispatcher_gnocchi]
-filter_service_activity = False
-url = {{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ gnocchi_api_port }}
-archive_policy = low
-{% endif %}
diff --git a/ansible/roles/ceilometer/templates/event_definitions.yaml.j2 b/ansible/roles/ceilometer/templates/event_definitions.yaml.j2
deleted file mode 100644
index 5f0d82ae5e..0000000000
--- a/ansible/roles/ceilometer/templates/event_definitions.yaml.j2
+++ /dev/null
@@ -1,553 +0,0 @@
----
-- event_type: compute.instance.*
- traits: &instance_traits
- tenant_id:
- fields: payload.tenant_id
- user_id:
- fields: payload.user_id
- instance_id:
- fields: payload.instance_id
- host:
- fields: publisher_id.`split(., 1, 1)`
- service:
- fields: publisher_id.`split(., 0, -1)`
- memory_mb:
- type: int
- fields: payload.memory_mb
- disk_gb:
- type: int
- fields: payload.disk_gb
- root_gb:
- type: int
- fields: payload.root_gb
- ephemeral_gb:
- type: int
- fields: payload.ephemeral_gb
- vcpus:
- type: int
- fields: payload.vcpus
- instance_type_id:
- type: int
- fields: payload.instance_type_id
- instance_type:
- fields: payload.instance_type
- state:
- fields: payload.state
- os_architecture:
- fields: payload.image_meta.'org.openstack__1__architecture'
- os_version:
- fields: payload.image_meta.'org.openstack__1__os_version'
- os_distro:
- fields: payload.image_meta.'org.openstack__1__os_distro'
- launched_at:
- type: datetime
- fields: payload.launched_at
- deleted_at:
- type: datetime
- fields: payload.deleted_at
-- event_type: compute.instance.exists
- traits:
- <<: *instance_traits
- audit_period_beginning:
- type: datetime
- fields: payload.audit_period_beginning
- audit_period_ending:
- type: datetime
- fields: payload.audit_period_ending
-- event_type: ['volume.exists', 'volume.create.*', 'volume.delete.*', 'volume.resize.*', 'volume.attach.*', 'volume.detach.*', 'volume.update.*', 'snapshot.exists', 'snapshot.create.*', 'snapshot.delete.*', 'snapshot.update.*']
- traits: &cinder_traits
- user_id:
- fields: payload.user_id
- project_id:
- fields: payload.tenant_id
- availability_zone:
- fields: payload.availability_zone
- display_name:
- fields: payload.display_name
- replication_status:
- fields: payload.replication_status
- status:
- fields: payload.status
- created_at:
- fields: payload.created_at
-- event_type: ['volume.exists', 'volume.create.*', 'volume.delete.*', 'volume.resize.*', 'volume.attach.*', 'volume.detach.*', 'volume.update.*']
- traits:
- <<: *cinder_traits
- resource_id:
- fields: payload.volume_id
- host:
- fields: payload.host
- size:
- fields: payload.size
- type:
- fields: payload.volume_type
- replication_status:
- fields: payload.replication_status
-- event_type: ['snapshot.exists', 'snapshot.create.*', 'snapshot.delete.*', 'snapshot.update.*']
- traits:
- <<: *cinder_traits
- resource_id:
- fields: payload.snapshot_id
- volume_id:
- fields: payload.volume_id
-- event_type: ['image_volume_cache.*']
- traits:
- image_id:
- fields: payload.image_id
- host:
- fields: payload.host
-- event_type: ['image.update', 'image.upload', 'image.delete']
- traits: &glance_crud
- project_id:
- fields: payload.owner
- resource_id:
- fields: payload.id
- name:
- fields: payload.name
- status:
- fields: payload.status
- created_at:
- fields: payload.created_at
- user_id:
- fields: payload.owner
- deleted_at:
- fields: payload.deleted_at
- size:
- fields: payload.size
-- event_type: image.send
- traits: &glance_send
- receiver_project:
- fields: payload.receiver_tenant_id
- receiver_user:
- fields: payload.receiver_user_id
- user_id:
- fields: payload.owner_id
- image_id:
- fields: payload.image_id
- destination_ip:
- fields: payload.destination_ip
- bytes_sent:
- fields: payload.bytes_sent
-- event_type: orchestration.stack.*
- traits: &orchestration_crud
- project_id:
- fields: payload.tenant_id
- user_id:
- fields: ['_context_trustor_user_id', '_context_user_id']
- resource_id:
- fields: payload.stack_identity
-- event_type: sahara.cluster.*
- traits: &sahara_crud
- project_id:
- fields: payload.project_id
- user_id:
- fields: _context_user_id
- resource_id:
- fields: payload.cluster_id
-- event_type: sahara.cluster.health
- traits: &sahara_health
- <<: *sahara_crud
- verification_id:
- fields: payload.verification_id
- health_check_status:
- fields: payload.health_check_status
- health_check_name:
- fields: payload.health_check_name
- health_check_description:
- fields: payload.health_check_description
- created_at:
- type: datetime
- fields: payload.created_at
- updated_at:
- type: datetime
- fields: payload.updated_at
-- event_type: ['identity.user.*', 'identity.project.*', 'identity.group.*', 'identity.role.*', 'identity.OS-TRUST:trust.*',
- 'identity.region.*', 'identity.service.*', 'identity.endpoint.*', 'identity.policy.*']
- traits: &identity_crud
- resource_id:
- fields: payload.resource_info
- initiator_id:
- fields: payload.initiator.id
- project_id:
- fields: payload.initiator.project_id
- domain_id:
- fields: payload.initiator.domain_id
-- event_type: identity.role_assignment.*
- traits: &identity_role_assignment
- role:
- fields: payload.role
- group:
- fields: payload.group
- domain:
- fields: payload.domain
- user:
- fields: payload.user
- project:
- fields: payload.project
-- event_type: identity.authenticate
- traits: &identity_authenticate
- typeURI:
- fields: payload.typeURI
- id:
- fields: payload.id
- action:
- fields: payload.action
- eventType:
- fields: payload.eventType
- eventTime:
- fields: payload.eventTime
- outcome:
- fields: payload.outcome
- initiator_typeURI:
- fields: payload.initiator.typeURI
- initiator_id:
- fields: payload.initiator.id
- initiator_name:
- fields: payload.initiator.name
- initiator_host_agent:
- fields: payload.initiator.host.agent
- initiator_host_addr:
- fields: payload.initiator.host.address
- target_typeURI:
- fields: payload.target.typeURI
- target_id:
- fields: payload.target.id
- observer_typeURI:
- fields: payload.observer.typeURI
- observer_id:
- fields: payload.observer.id
-- event_type: objectstore.http.request
- traits: &objectstore_request
- typeURI:
- fields: payload.typeURI
- id:
- fields: payload.id
- action:
- fields: payload.action
- eventType:
- fields: payload.eventType
- eventTime:
- fields: payload.eventTime
- outcome:
- fields: payload.outcome
- initiator_typeURI:
- fields: payload.initiator.typeURI
- initiator_id:
- fields: payload.initiator.id
- initiator_project_id:
- fields: payload.initiator.project_id
- target_typeURI:
- fields: payload.target.typeURI
- target_id:
- fields: payload.target.id
- target_action:
- fields: payload.target.action
- target_metadata_path:
- fields: payload.target.metadata.path
- target_metadata_version:
- fields: payload.target.metadata.version
- target_metadata_container:
- fields: payload.target.metadata.container
- target_metadata_object:
- fields: payload.target.metadata.object
- observer_id:
- fields: payload.observer.id
-- event_type: magnetodb.table.*
- traits: &kv_store
- resource_id:
- fields: payload.table_uuid
- user_id:
- fields: _context_user_id
- project_id:
- fields: _context_tenant
-- event_type: ['network.*', 'subnet.*', 'port.*', 'router.*', 'floatingip.*', 'pool.*', 'vip.*', 'member.*', 'health_monitor.*', 'healthmonitor.*', 'listener.*', 'loadbalancer.*', 'firewall.*', 'firewall_policy.*', 'firewall_rule.*', 'vpnservice.*', 'ipsecpolicy.*', 'ikepolicy.*', 'ipsec_site_connection.*']
- traits: &network_traits
- user_id:
- fields: _context_user_id
- project_id:
- fields: _context_tenant_id
-- event_type: network.*
- traits:
- <<: *network_traits
- resource_id:
- fields: ['payload.network.id', 'payload.id']
-- event_type: subnet.*
- traits:
- <<: *network_traits
- resource_id:
- fields: ['payload.subnet.id', 'payload.id']
-- event_type: port.*
- traits:
- <<: *network_traits
- resource_id:
- fields: ['payload.port.id', 'payload.id']
-- event_type: router.*
- traits:
- <<: *network_traits
- resource_id:
- fields: ['payload.router.id', 'payload.id']
-- event_type: floatingip.*
- traits:
- <<: *network_traits
- resource_id:
- fields: ['payload.floatingip.id', 'payload.id']
-- event_type: pool.*
- traits:
- <<: *network_traits
- resource_id:
- fields: ['payload.pool.id', 'payload.id']
-- event_type: vip.*
- traits:
- <<: *network_traits
- resource_id:
- fields: ['payload.vip.id', 'payload.id']
-- event_type: member.*
- traits:
- <<: *network_traits
- resource_id:
- fields: ['payload.member.id', 'payload.id']
-- event_type: health_monitor.*
- traits:
- <<: *network_traits
- resource_id:
- fields: ['payload.health_monitor.id', 'payload.id']
-- event_type: healthmonitor.*
- traits:
- <<: *network_traits
- resource_id:
- fields: ['payload.healthmonitor.id', 'payload.id']
-- event_type: listener.*
- traits:
- <<: *network_traits
- resource_id:
- fields: ['payload.listener.id', 'payload.id']
-- event_type: loadbalancer.*
- traits:
- <<: *network_traits
- resource_id:
- fields: ['payload.loadbalancer.id', 'payload.id']
-- event_type: firewall.*
- traits:
- <<: *network_traits
- resource_id:
- fields: ['payload.firewall.id', 'payload.id']
-- event_type: firewall_policy.*
- traits:
- <<: *network_traits
- resource_id:
- fields: ['payload.firewall_policy.id', 'payload.id']
-- event_type: firewall_rule.*
- traits:
- <<: *network_traits
- resource_id:
- fields: ['payload.firewall_rule.id', 'payload.id']
-- event_type: vpnservice.*
- traits:
- <<: *network_traits
- resource_id:
- fields: ['payload.vpnservice.id', 'payload.id']
-- event_type: ipsecpolicy.*
- traits:
- <<: *network_traits
- resource_id:
- fields: ['payload.ipsecpolicy.id', 'payload.id']
-- event_type: ikepolicy.*
- traits:
- <<: *network_traits
- resource_id:
- fields: ['payload.ikepolicy.id', 'payload.id']
-- event_type: ipsec_site_connection.*
- traits:
- <<: *network_traits
- resource_id:
- fields: ['payload.ipsec_site_connection.id', 'payload.id']
-- event_type: '*http.*'
- traits: &http_audit
- project_id:
- fields: payload.initiator.project_id
- user_id:
- fields: payload.initiator.id
- typeURI:
- fields: payload.typeURI
- eventType:
- fields: payload.eventType
- action:
- fields: payload.action
- outcome:
- fields: payload.outcome
- id:
- fields: payload.id
- eventTime:
- fields: payload.eventTime
- requestPath:
- fields: payload.requestPath
- observer_id:
- fields: payload.observer.id
- target_id:
- fields: payload.target.id
- target_typeURI:
- fields: payload.target.typeURI
- target_name:
- fields: payload.target.name
- initiator_typeURI:
- fields: payload.initiator.typeURI
- initiator_id:
- fields: payload.initiator.id
- initiator_name:
- fields: payload.initiator.name
- initiator_host_address:
- fields: payload.initiator.host.address
-- event_type: '*http.response'
- traits:
- <<: *http_audit
- reason_code:
- fields: payload.reason.reasonCode
-- event_type: ['dns.domain.create', 'dns.domain.update', 'dns.domain.delete']
- traits: &dns_domain_traits
- status:
- fields: payload.status
- retry:
- fields: payload.retry
- description:
- fields: payload.description
- expire:
- fields: payload.expire
- email:
- fields: payload.email
- ttl:
- fields: payload.ttl
- action:
- fields: payload.action
- name:
- fields: payload.name
- resource_id:
- fields: payload.id
- created_at:
- fields: payload.created_at
- updated_at:
- fields: payload.updated_at
- version:
- fields: payload.version
- parent_domain_id:
- fields: parent_domain_id
- serial:
- fields: payload.serial
-- event_type: dns.domain.exists
- traits:
- <<: *dns_domain_traits
- audit_period_beginning:
- type: datetime
- fields: payload.audit_period_beginning
- audit_period_ending:
- type: datetime
- fields: payload.audit_period_ending
-- event_type: trove.*
- traits: &trove_base_traits
- state:
- fields: payload.state_description
- instance_type:
- fields: payload.instance_type
- user_id:
- fields: payload.user_id
- resource_id:
- fields: payload.instance_id
- instance_type_id:
- fields: payload.instance_type_id
- launched_at:
- type: datetime
- fields: payload.launched_at
- instance_name:
- fields: payload.instance_name
- state:
- fields: payload.state
- nova_instance_id:
- fields: payload.nova_instance_id
- service_id:
- fields: payload.service_id
- created_at:
- type: datetime
- fields: payload.created_at
- region:
- fields: payload.region
-- event_type: ['trove.instance.create', 'trove.instance.modify_volume', 'trove.instance.modify_flavor', 'trove.instance.delete']
- traits: &trove_common_traits
- name:
- fields: payload.name
- availability_zone:
- fields: payload.availability_zone
- instance_size:
- type: int
- fields: payload.instance_size
- volume_size:
- type: int
- fields: payload.volume_size
- nova_volume_id:
- fields: payload.nova_volume_id
-- event_type: trove.instance.create
- traits:
- <<: [*trove_base_traits, *trove_common_traits]
-- event_type: trove.instance.modify_volume
- traits:
- <<: [*trove_base_traits, *trove_common_traits]
- old_volume_size:
- type: int
- fields: payload.old_volume_size
- modify_at:
- type: datetime
- fields: payload.modify_at
-- event_type: trove.instance.modify_flavor
- traits:
- <<: [*trove_base_traits, *trove_common_traits]
- old_instance_size:
- type: int
- fields: payload.old_instance_size
- modify_at:
- type: datetime
- fields: payload.modify_at
-- event_type: trove.instance.delete
- traits:
- <<: [*trove_base_traits, *trove_common_traits]
- deleted_at:
- type: datetime
- fields: payload.deleted_at
-- event_type: trove.instance.exists
- traits:
- <<: *trove_base_traits
- display_name:
- fields: payload.display_name
- audit_period_beginning:
- type: datetime
- fields: payload.audit_period_beginning
- audit_period_ending:
- type: datetime
- fields: payload.audit_period_ending
-- event_type: profiler.*
- traits:
- project:
- fields: payload.project
- service:
- fields: payload.service
- name:
- fields: payload.name
- base_id:
- fields: payload.base_id
- trace_id:
- fields: payload.trace_id
- parent_id:
- fields: payload.parent_id
- timestamp:
- fields: payload.timestamp
- host:
- fields: payload.info.host
- path:
- fields: payload.info.request.path
- query:
- fields: payload.info.request.query
- method:
- fields: payload.info.request.method
- scheme:
- fields: payload.info.request.scheme
- db.statement:
- fields: payload.info.db.statement
- db.params:
- fields: payload.info.db.params
diff --git a/ansible/roles/ceilometer/templates/event_pipeline.yaml.j2 b/ansible/roles/ceilometer/templates/event_pipeline.yaml.j2
deleted file mode 100644
index 10275f7492..0000000000
--- a/ansible/roles/ceilometer/templates/event_pipeline.yaml.j2
+++ /dev/null
@@ -1,13 +0,0 @@
----
-sources:
- - name: event_source
- events:
- - "*"
- sinks:
- - event_sink
-sinks:
- - name: event_sink
- transformers:
- triggers:
- publishers:
- - notifier://
diff --git a/ansible/roles/ceilometer/templates/pipeline.yaml.j2 b/ansible/roles/ceilometer/templates/pipeline.yaml.j2
deleted file mode 100644
index a5bd5148e7..0000000000
--- a/ansible/roles/ceilometer/templates/pipeline.yaml.j2
+++ /dev/null
@@ -1,92 +0,0 @@
----
-sources:
- - name: meter_source
- interval: 600
- meters:
- - "*"
- sinks:
- - meter_sink
- - name: cpu_source
- interval: 600
- meters:
- - "cpu"
- sinks:
- - cpu_sink
- - cpu_delta_sink
- - name: disk_source
- interval: 600
- meters:
- - "disk.read.bytes"
- - "disk.read.requests"
- - "disk.write.bytes"
- - "disk.write.requests"
- - "disk.device.read.bytes"
- - "disk.device.read.requests"
- - "disk.device.write.bytes"
- - "disk.device.write.requests"
- sinks:
- - disk_sink
- - name: network_source
- interval: 600
- meters:
- - "network.incoming.bytes"
- - "network.incoming.packets"
- - "network.outgoing.bytes"
- - "network.outgoing.packets"
- sinks:
- - network_sink
-sinks:
- - name: meter_sink
- transformers:
- publishers:
- - notifier://
- - name: cpu_sink
- transformers:
- - name: "rate_of_change"
- parameters:
- target:
- name: "cpu_util"
- unit: "%"
- type: "gauge"
- scale: "100.0 / (10**9 * (resource_metadata.cpu_number or 1))"
- publishers:
- - notifier://
- - name: cpu_delta_sink
- transformers:
- - name: "delta"
- parameters:
- target:
- name: "cpu.delta"
- growth_only: True
- publishers:
- - notifier://
- - name: disk_sink
- transformers:
- - name: "rate_of_change"
- parameters:
- source:
- map_from:
- name: "(disk\\.device|disk)\\.(read|write)\\.(bytes|requests)"
- unit: "(B|request)"
- target:
- map_to:
- name: "\\1.\\2.\\3.rate"
- unit: "\\1/s"
- type: "gauge"
- publishers:
- - notifier://
- - name: network_sink
- transformers:
- - name: "rate_of_change"
- parameters:
- source:
- map_from:
- name: "network\\.(incoming|outgoing)\\.(bytes|packets)"
- unit: "(B|packet)"
- target:
- map_to:
- name: "network.\\1.\\2.rate"
- unit: "\\1/s"
- type: "gauge"
- publishers:
- - notifier://
diff --git a/ansible/roles/ceilometer/templates/wsgi-ceilometer-api.conf.j2 b/ansible/roles/ceilometer/templates/wsgi-ceilometer-api.conf.j2
deleted file mode 100644
index 4adb84bf29..0000000000
--- a/ansible/roles/ceilometer/templates/wsgi-ceilometer-api.conf.j2
+++ /dev/null
@@ -1,24 +0,0 @@
-{% set python_path = '/usr/lib/python2.7/site-packages' if kolla_install_type == 'binary' else '/var/lib/kolla/venv/lib/python2.7/site-packages' %}
-Listen {{ api_interface_address }}:{{ ceilometer_api_port }}
-
-
- LogLevel info
- ErrorLog /var/log/kolla/ceilometer/ceilometer-api.log
- CustomLog /var/log/kolla/ceilometer/ceilometer-api-access.log combined
-
- WSGIScriptReloading On
- WSGIDaemonProcess ceilometer-api processes={{ openstack_service_workers }} threads=1 user=ceilometer group=ceilometer display-name=%{GROUP} python-path={{ python_path }}
- WSGIProcessGroup ceilometer-api
- WSGIScriptAlias / {{ python_path }}/ceilometer/api/app.wsgi
-
-
- = 2.4>
- Require all granted
-
-
- Order allow,deny
- Allow from all
-
-
-
-
diff --git a/ansible/roles/ceph/defaults/main.yml b/ansible/roles/ceph/defaults/main.yml
deleted file mode 100644
index bed9624561..0000000000
--- a/ansible/roles/ceph/defaults/main.yml
+++ /dev/null
@@ -1,23 +0,0 @@
----
-project_name: "ceph"
-
-
-####################
-# Docker
-####################
-ceph_mon_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-ceph-mon"
-ceph_mon_tag: "{{ openstack_release }}"
-ceph_mon_image_full: "{{ ceph_mon_image }}:{{ ceph_mon_tag }}"
-
-ceph_osd_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-ceph-osd"
-ceph_osd_tag: "{{ openstack_release }}"
-ceph_osd_image_full: "{{ ceph_osd_image }}:{{ ceph_osd_tag }}"
-
-ceph_rgw_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-ceph-rgw"
-ceph_rgw_tag: "{{ openstack_release }}"
-ceph_rgw_image_full: "{{ ceph_rgw_image }}:{{ ceph_rgw_tag }}"
-
-####################
-# Ceph
-####################
-osd_initial_weight: "1"
diff --git a/ansible/roles/ceph/meta/main.yml b/ansible/roles/ceph/meta/main.yml
deleted file mode 100644
index 6b4fff8fef..0000000000
--- a/ansible/roles/ceph/meta/main.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-dependencies:
- - { role: common }
diff --git a/ansible/roles/ceph/tasks/bootstrap_mons.yml b/ansible/roles/ceph/tasks/bootstrap_mons.yml
deleted file mode 100644
index a5ec39d4c1..0000000000
--- a/ansible/roles/ceph/tasks/bootstrap_mons.yml
+++ /dev/null
@@ -1,38 +0,0 @@
----
-- name: Cleaning up temp file on localhost
- local_action: file path=/tmp/kolla_ceph_cluster state=absent
- changed_when: False
- check_mode: no
- run_once: True
-
-- name: Creating temp file on localhost
- local_action: copy content=None dest=/tmp/kolla_ceph_cluster mode=0644
- changed_when: False
- check_mode: no
- run_once: True
-
-- name: Creating ceph_mon_config volume
- kolla_docker:
- action: "create_volume"
- common_options: "{{ docker_common_options }}"
- name: "ceph_mon_config"
- register: ceph_mon_config_volume
-
-- name: Writing hostname of host with existing cluster files to temp file
- local_action: copy content="{{ inventory_hostname }}" dest=/tmp/kolla_ceph_cluster mode=0644
- changed_when: False
- check_mode: no
- when: not ceph_mon_config_volume.changed
-
-- name: Registering host from temp file
- set_fact:
- delegate_host: "{{ lookup('file', '/tmp/kolla_ceph_cluster') }}"
-
-- name: Cleaning up temp file on localhost
- local_action: file path=/tmp/kolla_ceph_cluster state=absent
- changed_when: False
- check_mode: no
- run_once: True
-
-- include: generate_cluster.yml
- when: delegate_host == 'None' and inventory_hostname == groups['ceph-mon'][0]
diff --git a/ansible/roles/ceph/tasks/bootstrap_osds.yml b/ansible/roles/ceph/tasks/bootstrap_osds.yml
deleted file mode 100644
index e5d0ab8b08..0000000000
--- a/ansible/roles/ceph/tasks/bootstrap_osds.yml
+++ /dev/null
@@ -1,117 +0,0 @@
----
-- name: Looking up disks to bootstrap for Ceph OSDs
- command: docker exec -t kolla_toolbox sudo -E /usr/bin/ansible localhost
- -m find_disks
- -a "partition_name='KOLLA_CEPH_OSD_BOOTSTRAP' match_mode='prefix'"
- register: osd_lookup
- changed_when: "{{ osd_lookup.stdout.find('localhost | SUCCESS => ') != -1 and (osd_lookup.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
- failed_when: osd_lookup.stdout.split()[2] != 'SUCCESS'
-
-- name: Parsing disk info for Ceph OSDs
- set_fact:
- osds_bootstrap: "{{ (osd_lookup.stdout.split('localhost | SUCCESS => ')[1]|from_json).disks|from_json }}"
-
-- name: Looking up disks to bootstrap for Ceph Cache OSDs
- command: docker exec -t kolla_toolbox sudo -E /usr/bin/ansible localhost
- -m find_disks
- -a "partition_name='KOLLA_CEPH_OSD_CACHE_BOOTSTRAP' match_mode='prefix'"
- register: osd_cache_lookup
- changed_when: "{{ osd_cache_lookup.stdout.find('localhost | SUCCESS => ') != -1 and (osd_cache_lookup.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
- failed_when: osd_cache_lookup.stdout.split()[2] != 'SUCCESS'
-
-- name: Parsing disk info for Ceph Cache OSDs
- set_fact:
- osds_cache_bootstrap: "{{ (osd_cache_lookup.stdout.split('localhost | SUCCESS => ')[1]|from_json).disks|from_json }}"
-
-- pause:
- prompt: |
- WARNING: It seems {{ item.device }} is marked to be wiped and partitioned for Ceph data and
- a co-located journal, but appears to contain other existing partitions (>1).
-
- If you are sure you want this disk to be *wiped* for use with Ceph, press enter.
-
- Otherwise, press Ctrl-C, then 'A'. (You can disable this check by setting
- ceph_osd_wipe_disk: 'yes-i-really-really-mean-it' within globals.yml)
- with_items: "{{ osds_bootstrap|default([]) }}"
- when:
- - item.external_journal | bool == False
- - ansible_devices[item.device.split('/')[2]].partitions|count > 1
- - ceph_osd_wipe_disk != "yes-i-really-really-mean-it"
-
-- name: Bootstrapping Ceph OSDs
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- detach: False
- environment:
- KOLLA_BOOTSTRAP:
- KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}"
- OSD_DEV: "{{ item.1.device }}"
- OSD_PARTITION: "{{ item.1.partition }}"
- OSD_PARTITION_NUM: "{{ item.1.partition_num }}"
- JOURNAL_DEV: "{{ item.1.journal_device }}"
- JOURNAL_PARTITION: "{{ item.1.journal }}"
- JOURNAL_PARTITION_NUM: "{{ item.1.journal_num }}"
- USE_EXTERNAL_JOURNAL: "{{ item.1.external_journal | bool }}"
- OSD_FILESYSTEM: "{{ ceph_osd_filesystem }}"
- OSD_INITIAL_WEIGHT: "{{ osd_initial_weight }}"
- HOSTNAME: "{{ hostvars[inventory_hostname]['ansible_' + storage_interface]['ipv4']['address'] }}"
- image: "{{ ceph_osd_image_full }}"
- labels:
- BOOTSTRAP:
- name: "bootstrap_osd_{{ item.0 }}"
- privileged: True
- restart_policy: "never"
- volumes:
- - "{{ node_config_directory }}/ceph-osd/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "/dev/:/dev/"
- - "kolla_logs:/var/log/kolla/"
- with_indexed_items: "{{ osds_bootstrap|default([]) }}"
-
-- pause:
- prompt: |
- WARNING: It seems {{ item.device }} is marked to be wiped and partitioned for Ceph data and
- a co-located journal, but appears to contain other existing partitions (>1).
-
- If you are sure you want this disk to be *wiped* for use with Ceph, press enter.
-
- Otherwise, press Ctrl-C, then 'A'. (You can disable this check by setting
- ceph_osd_wipe_disk: 'yes-i-really-really-mean-it' within globals.yml)
- with_items: "{{ osds_cache_bootstrap|default([]) }}"
- when:
- - item.external_journal | bool == False
- - ansible_devices[item.device.split('/')[2]].partitions|count > 1
- - ceph_osd_wipe_disk != "yes-i-really-really-mean-it"
-
-- name: Bootstrapping Ceph Cache OSDs
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- detach: False
- environment:
- KOLLA_BOOTSTRAP:
- KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}"
- CEPH_CACHE:
- OSD_DEV: "{{ item.1.device }}"
- OSD_PARTITION: "{{ item.1.partition }}"
- OSD_PARTITION_NUM: "{{ item.1.partition_num }}"
- JOURNAL_DEV: "{{ item.1.journal_device }}"
- JOURNAL_PARTITION: "{{ item.1.journal }}"
- JOURNAL_PARTITION_NUM: "{{ item.1.journal_num }}"
- USE_EXTERNAL_JOURNAL: "{{ item.1.external_journal | bool }}"
- OSD_FILESYSTEM: "{{ ceph_osd_filesystem }}"
- OSD_INITIAL_WEIGHT: "{{ osd_initial_weight }}"
- HOSTNAME: "{{ hostvars[inventory_hostname]['ansible_' + storage_interface]['ipv4']['address'] }}"
- image: "{{ ceph_osd_image_full }}"
- labels:
- BOOTSTRAP:
- name: "bootstrap_osd_cache_{{ item.0 }}"
- privileged: True
- restart_policy: "never"
- volumes:
- - "{{ node_config_directory }}/ceph-osd/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "/dev/:/dev/"
- - "kolla_logs:/var/log/kolla/"
- with_indexed_items: "{{ osds_cache_bootstrap|default([]) }}"
diff --git a/ansible/roles/ceph/tasks/config.yml b/ansible/roles/ceph/tasks/config.yml
deleted file mode 100644
index d88e9645de..0000000000
--- a/ansible/roles/ceph/tasks/config.yml
+++ /dev/null
@@ -1,33 +0,0 @@
----
-- name: Ensuring config directories exist
- file:
- path: "{{ node_config_directory }}/{{ item }}"
- state: "directory"
- recurse: yes
- with_items:
- - "ceph-mon"
- - "ceph-osd"
- - "ceph-rgw"
-
-- name: Copying over config.json files for services
- template:
- src: "{{ item }}.json.j2"
- dest: "{{ node_config_directory }}/{{ item }}/config.json"
- with_items:
- - "ceph-mon"
- - "ceph-osd"
- - "ceph-rgw"
-
-- name: Copying over ceph.conf
- merge_configs:
- vars:
- service_name: "{{ item }}"
- sources:
- - "{{ role_path }}/templates/ceph.conf.j2"
- - "{{ node_custom_config }}/ceph.conf"
- - "{{ node_custom_config }}/ceph/{{ inventory_hostname }}/ceph.conf"
- dest: "{{ node_config_directory }}/{{ item }}/ceph.conf"
- with_items:
- - "ceph-mon"
- - "ceph-osd"
- - "ceph-rgw"
diff --git a/ansible/roles/ceph/tasks/deploy.yml b/ansible/roles/ceph/tasks/deploy.yml
deleted file mode 100644
index 403265af74..0000000000
--- a/ansible/roles/ceph/tasks/deploy.yml
+++ /dev/null
@@ -1,21 +0,0 @@
----
-- include: config.yml
-
-- include: bootstrap_mons.yml
- when: inventory_hostname in groups['ceph-mon']
-
-- include: distribute_keyrings.yml
-
-- include: start_mons.yml
- when: inventory_hostname in groups['ceph-mon']
-
-- include: bootstrap_osds.yml
- when: inventory_hostname in groups['ceph-osd']
-
-- include: start_osds.yml
- when: inventory_hostname in groups['ceph-osd']
-
-- include: start_rgws.yml
- when:
- - inventory_hostname in groups['ceph-rgw']
- - enable_ceph_rgw | bool
diff --git a/ansible/roles/ceph/tasks/distribute_keyrings.yml b/ansible/roles/ceph/tasks/distribute_keyrings.yml
deleted file mode 100644
index ea4952a42a..0000000000
--- a/ansible/roles/ceph/tasks/distribute_keyrings.yml
+++ /dev/null
@@ -1,46 +0,0 @@
----
-- name: Fetching Ceph keyrings
- command: docker exec ceph_mon fetch_ceph_keys.py
- register: ceph_files_json
- changed_when: "{{ (ceph_files_json.stdout | from_json).changed }}"
- failed_when: "{{ (ceph_files_json.stdout | from_json).failed }}"
- delegate_to: "{{ delegate_host }}"
- run_once: True
-
-- name: Reading json from variable
- set_fact:
- ceph_files: "{{ (ceph_files_json.stdout | from_json) }}"
-
-- name: Pushing Ceph keyring for OSDs
- bslurp:
- src: "{{ item.content }}"
- dest: "{{ node_config_directory }}/ceph-osd/{{ item.filename }}"
- mode: 0600
- sha1: "{{ item.sha1 }}"
- with_items:
- - "{{ ceph_files['ceph.client.admin.keyring'] }}"
- when: inventory_hostname in groups['ceph-osd']
-
-- name: Pushing Ceph keyrings for Mons
- bslurp:
- src: "{{ item.content }}"
- dest: "{{ node_config_directory }}/ceph-mon/{{ item.filename }}"
- mode: 0600
- sha1: "{{ item.sha1 }}"
- with_items:
- - "{{ ceph_files['ceph.client.admin.keyring'] }}"
- - "{{ ceph_files['ceph.client.mon.keyring'] }}"
- - "{{ ceph_files['ceph.client.radosgw.keyring'] }}"
- - "{{ ceph_files['ceph.monmap'] }}"
- when: inventory_hostname in groups['ceph-mon']
-
-- name: Pushing Ceph keyrings for RGWs
- bslurp:
- src: "{{ item.content }}"
- dest: "{{ node_config_directory }}/ceph-rgw/{{ item.filename }}"
- mode: 0600
- sha1: "{{ item.sha1 }}"
- with_items:
- - "{{ ceph_files['ceph.client.admin.keyring'] }}"
- - "{{ ceph_files['ceph.client.radosgw.keyring'] }}"
- when: inventory_hostname in groups['ceph-rgw']
diff --git a/ansible/roles/ceph/tasks/generate_cluster.yml b/ansible/roles/ceph/tasks/generate_cluster.yml
deleted file mode 100644
index beb78e5876..0000000000
--- a/ansible/roles/ceph/tasks/generate_cluster.yml
+++ /dev/null
@@ -1,28 +0,0 @@
----
-- name: Generating initial Ceph keyrings and monmap
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- detach: False
- environment:
- KOLLA_BOOTSTRAP:
- KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}"
- MON_IP: "{{ hostvars[inventory_hostname]['ansible_' + storage_interface]['ipv4']['address'] }}"
- HOSTNAME: "{{ hostvars[inventory_hostname]['ansible_' + storage_interface]['ipv4']['address'] }}"
- image: "{{ ceph_mon_image_full }}"
- labels:
- BOOTSTRAP:
- name: "bootstrap_ceph_mon"
- restart_policy: "never"
- volumes:
- - "{{ node_config_directory }}/ceph-mon/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "ceph_mon:/var/lib/ceph"
- - "ceph_mon_config:/etc/ceph"
- - "kolla_logs:/var/log/kolla/"
-
-- include: start_mons.yml
-
-- name: Setting host for cluster files
- set_fact:
- delegate_host: "{{ inventory_hostname }}"
diff --git a/ansible/roles/ceph/tasks/main.yml b/ansible/roles/ceph/tasks/main.yml
deleted file mode 100644
index b017e8b4ad..0000000000
--- a/ansible/roles/ceph/tasks/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-- include: "{{ action }}.yml"
diff --git a/ansible/roles/ceph/tasks/precheck.yml b/ansible/roles/ceph/tasks/precheck.yml
deleted file mode 100644
index ed97d539c0..0000000000
--- a/ansible/roles/ceph/tasks/precheck.yml
+++ /dev/null
@@ -1 +0,0 @@
----
diff --git a/ansible/roles/ceph/tasks/pull.yml b/ansible/roles/ceph/tasks/pull.yml
deleted file mode 100644
index fde080f967..0000000000
--- a/ansible/roles/ceph/tasks/pull.yml
+++ /dev/null
@@ -1,21 +0,0 @@
----
-- name: Pulling ceph-mon image
- kolla_docker:
- action: "pull_image"
- common_options: "{{ docker_common_options }}"
- image: "{{ ceph_mon_image_full }}"
- when: inventory_hostname in groups['ceph-mon']
-
-- name: Pulling ceph-osd image
- kolla_docker:
- action: "pull_image"
- common_options: "{{ docker_common_options }}"
- image: "{{ ceph_osd_image_full }}"
- when: inventory_hostname in groups['ceph-osd']
-
-- name: Pulling ceph-rgw image
- kolla_docker:
- action: "pull_image"
- common_options: "{{ docker_common_options }}"
- image: "{{ ceph_rgw_image_full }}"
- when: inventory_hostname in groups['ceph-rgw']
diff --git a/ansible/roles/ceph/tasks/reconfigure.yml b/ansible/roles/ceph/tasks/reconfigure.yml
deleted file mode 100644
index ee7a78779f..0000000000
--- a/ansible/roles/ceph/tasks/reconfigure.yml
+++ /dev/null
@@ -1,214 +0,0 @@
----
-- name: Ensuring the ceph_mon container is up
- kolla_docker:
- name: "{{ item.name }}"
- action: "get_container_state"
- register: ceph_mon_container_state
- failed_when: container_state.Running == false
- when: inventory_hostname in groups[item.group]
- with_items:
- - { name: ceph_mon, group: ceph-mon }
-
-- name: Looking up OSDs for Ceph
- command: docker exec -t kolla_toolbox sudo -E /usr/bin/ansible localhost
- -m find_disks
- -a "partition_name='KOLLA_CEPH_DATA' match_mode='prefix'"
- register: osd_lookup
- changed_when: "{{ osd_lookup.stdout.find('localhost | SUCCESS => ') != -1 and (osd_lookup.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
- failed_when: osd_lookup.stdout.split()[2] != 'SUCCESS'
- when: inventory_hostname in groups['ceph-osd']
-
-- name: Reading data from variable
- set_fact:
- osds: "{{ (osd_lookup.stdout.split('localhost | SUCCESS => ')[1]|from_json).disks|from_json }}"
- when: inventory_hostname in groups['ceph-osd']
-
-- name: Gathering OSD IDs
- command: "cat /var/lib/ceph/osd/{{ item['fs_uuid'] }}/whoami"
- with_items: "{{ osds }}"
- register: osd_ids
- changed_when: False
- failed_when: osd_ids.rc != 0
- when: inventory_hostname in groups['ceph-osd']
-
-- name: Ensuring the ceph_osd container is up
- kolla_docker:
- name: "ceph_osd_{{ item.stdout }}"
- action: "get_container_state"
- register: ceph_osd_container_state
- failed_when: ceph_osd_container_state.Running == false
- when: inventory_hostname in groups['ceph-osd']
- with_items: "{{ osd_ids.results }}"
-
-- name: Ensuring the ceph_rgw container is up
- kolla_docker:
- name: "{{ item.name }}"
- action: "get_container_state"
- register: ceph_rgw_container_state
- failed_when: container_state.Running == false
- when:
- - enable_ceph_rgw | bool
- - inventory_hostname in groups[item.group]
- with_items:
- - { name: ceph_rgw, group: ceph-rgw }
-
-- include: config.yml
-
-- name: Check the configs in ceph_mon container
- command: docker exec {{ item.name }} /usr/local/bin/kolla_set_configs --check
- changed_when: false
- failed_when: false
- register: ceph_mon_check_results
- when: inventory_hostname in groups[item.group]
- with_items:
- - { name: ceph_mon, group: ceph-mon }
-
-- name: Check the configs in the ceph_osd container
- command: docker exec ceph_osd_{{ item.stdout }} /usr/local/bin/kolla_set_configs --check
- changed_when: false
- failed_when: false
- register: ceph_osd_check_results
- with_items: "{{ osd_ids.results }}"
- when: inventory_hostname in groups['ceph-osd']
-
-- name: Check the configs in ceph_rgw container
- command: docker exec {{ item.name }} /usr/local/bin/kolla_set_configs --check
- changed_when: false
- failed_when: false
- register: ceph_rgw_check_results
- when:
- - inventory_hostname in groups[item.group]
- - enable_ceph_rgw | bool
- with_items:
- - { name: ceph_rgw, group: ceph-rgw}
-
-- name: Containers config strategy for ceph_mon container
- kolla_docker:
- name: "{{ item.name }}"
- action: "get_container_env"
- register: ceph_mon_container_envs
- when: inventory_hostname in groups[item.group]
- with_items:
- - { name: ceph_mon, group: ceph-mon }
-
-- name: Containers config strategy for the ceph_osd containers
- kolla_docker:
- name: "ceph_osd_{{ item.stdout }}"
- action: "get_container_env"
- register: ceph_osd_container_envs
- with_items: "{{ osd_ids.results }}"
- when:
- - inventory_hostname in groups['ceph-osd']
- - osds
-
-- name: Containers config strategy for ceph_rgw container
- kolla_docker:
- name: "{{ item.name }}"
- action: "get_container_env"
- register: ceph_rgw_container_envs
- when:
- - inventory_hostname in groups[item.group]
- - enable_ceph_rgw | bool
- with_items:
- - { name: ceph_rgw, group: ceph-rgw }
-
-- name: Remove the ceph_mon container
- kolla_docker:
- name: "{{ item[0]['name'] }}"
- action: "remove_container"
- register: remove_ceph_mon_container
- when:
- - inventory_hostname in groups[item[0]['group']]
- - config_strategy == "COPY_ONCE" or item[1]['KOLLA_CONFIG_STRATEGY'] == 'COPY_ONCE'
- - item[2]['rc'] == 1
- with_together:
- - [{ name: ceph_mon, group: ceph-mon }]
- - "{{ ceph_mon_container_envs.results }}"
- - "{{ ceph_mon_check_results.results }}"
-
-- name: Remove the ceph_osd containers
- kolla_docker:
- name: "ceph_osd_{{ item.0.stdout }}"
- action: "remove_container"
- register: remove_ceph_osd_containers
- when:
- - inventory_hostname in groups['ceph-osd']
- - config_strategy == "COPY_ONCE" or item[1]['KOLLA_CONFIG_STRATEGY'] == 'COPY_ONCE'
- - item[2]['rc'] == 1
- - osds
- with_together:
- - "{{ osd_ids.results }}"
- - "{{ ceph_osd_container_envs.results }}"
- - "{{ ceph_osd_check_results.results }}"
-
-- name: Remove the ceph_rgw container
- kolla_docker:
- name: "{{ item[0]['name'] }}"
- action: "remove_container"
- register: remove_ceph_rgw_container
- when:
- - enable_ceph_rgw | bool
- - inventory_hostname in groups[item[0]['group']]
- - config_strategy == "COPY_ONCE" or item[1]['KOLLA_CONFIG_STRATEGY'] == 'COPY_ONCE'
- - item[2]['rc'] == 1
- with_together:
- - [{ name: ceph_rgw, group: ceph-rgw }]
- - "{{ ceph_rgw_container_envs.results }}"
- - "{{ ceph_rgw_check_results.results }}"
-
-- include: start_mons.yml
- when:
- - inventory_hostname in groups['ceph-mon']
- - remove_ceph_mon_container.changed
-
-- include: start_osds.yml
- when:
- - inventory_hostname in groups['ceph-osd']
- - remove_ceph_osd_containers.changed
-
-- include: start_rgws.yml
- when:
- - inventory_hostname in groups['ceph-rgw']
- - remove_ceph_rgw_container.changed
-
-- name: Restart the ceph_mon container
- kolla_docker:
- name: "ceph_mon"
- action: "restart_container"
- when:
- - inventory_hostname in groups['ceph-mon']
- - config_strategy == 'COPY_ALWAYS'
- - item[0]['KOLLA_CONFIG_STRATEGY'] != 'COPY_ONCE'
- - item[1]['rc'] == 1
- with_together:
- - "{{ ceph_mon_container_envs.results }}"
- - "{{ ceph_mon_check_results.results }}"
-
-- name: Restart the ceph_osd container
- kolla_docker:
- name: "ceph_osd_{{ item.0.stdout }}"
- action: "restart_container"
- when:
- - inventory_hostname in groups['ceph-osd']
- - config_strategy == 'COPY_ALWAYS'
- - item[1]['KOLLA_CONFIG_STRATEGY'] != 'COPY_ONCE'
- - item[2]['rc'] == 1
- - osds
- with_together:
- - "{{ osd_ids.results }}"
- - "{{ ceph_osd_container_envs.results }}"
- - "{{ ceph_osd_check_results.results }}"
-
-- name: Restart the ceph_rgw container
- kolla_docker:
- name: "ceph_rgw"
- action: "restart_container"
- when:
- - enable_ceph_rgw | bool
- - inventory_hostname in groups['ceph-rgw']
- - config_strategy == 'COPY_ALWAYS'
- - item[0]['KOLLA_CONFIG_STRATEGY'] != 'COPY_ONCE'
- - item[1]['rc'] == 1
- with_together:
- - "{{ ceph_rgw_container_envs.results }}"
- - "{{ ceph_rgw_check_results.results }}"
diff --git a/ansible/roles/ceph/tasks/start_mons.yml b/ansible/roles/ceph/tasks/start_mons.yml
deleted file mode 100644
index ccc2467bf6..0000000000
--- a/ansible/roles/ceph/tasks/start_mons.yml
+++ /dev/null
@@ -1,16 +0,0 @@
----
-- name: Starting ceph-mon container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- environment:
- KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}"
- HOSTNAME: "{{ hostvars[inventory_hostname]['ansible_' + storage_interface]['ipv4']['address'] }}"
- image: "{{ ceph_mon_image_full }}"
- name: "ceph_mon"
- volumes:
- - "{{ node_config_directory }}/ceph-mon/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "ceph_mon:/var/lib/ceph"
- - "ceph_mon_config:/etc/ceph"
- - "kolla_logs:/var/log/kolla/"
diff --git a/ansible/roles/ceph/tasks/start_osds.yml b/ansible/roles/ceph/tasks/start_osds.yml
deleted file mode 100644
index b5d6429874..0000000000
--- a/ansible/roles/ceph/tasks/start_osds.yml
+++ /dev/null
@@ -1,51 +0,0 @@
----
-- name: Looking up OSDs for Ceph
- command: docker exec -t kolla_toolbox sudo -E /usr/bin/ansible localhost
- -m find_disks
- -a "partition_name='KOLLA_CEPH_DATA' match_mode='prefix'"
- register: osd_lookup
- changed_when: "{{ osd_lookup.stdout.find('localhost | SUCCESS => ') != -1 and (osd_lookup.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
- failed_when: osd_lookup.stdout.split()[2] != 'SUCCESS'
-
-- name: Parsing disk info for Ceph OSDs
- set_fact:
- osds: "{{ (osd_lookup.stdout.split('localhost | SUCCESS => ')[1]|from_json).disks|from_json }}"
-
-- name: Mounting Ceph OSD volumes
- mount:
- src: "UUID={{ item.fs_uuid }}"
- fstype: "{{ ceph_osd_filesystem }}"
- state: mounted
- name: "/var/lib/ceph/osd/{{ item['fs_uuid'] }}"
- opts: "{{ ceph_osd_mount_options }}"
- with_items: "{{ osds }}"
-
-- name: Gathering OSD IDs
- command: "cat /var/lib/ceph/osd/{{ item['fs_uuid'] }}/whoami"
- with_items: "{{ osds }}"
- register: id
- changed_when: False
- failed_when: id.rc != 0
-
-- name: Starting ceph-osd container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- environment:
- KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}"
- OSD_ID: "{{ item.0.stdout }}"
- JOURNAL_PARTITION: "{{ item.1.journal }}"
- image: "{{ ceph_osd_image_full }}"
- name: "ceph_osd_{{ item.0.stdout }}"
- pid_mode: "host"
- privileged: True
- volumes:
- - "{{ node_config_directory }}/ceph-osd/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "/dev/:/dev/"
- - "/var/lib/ceph/osd/{{ item.1['fs_uuid'] }}:/var/lib/ceph/osd/ceph-{{ item.0.stdout }}"
- - "kolla_logs:/var/log/kolla/"
- with_together:
- - "{{ id.results }}"
- - "{{ osds }}"
- when: osds
diff --git a/ansible/roles/ceph/tasks/start_rgws.yml b/ansible/roles/ceph/tasks/start_rgws.yml
deleted file mode 100644
index 22ad1d2133..0000000000
--- a/ansible/roles/ceph/tasks/start_rgws.yml
+++ /dev/null
@@ -1,11 +0,0 @@
----
-- name: Starting ceph-rgw container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- image: "{{ ceph_rgw_image_full }}"
- name: "ceph_rgw"
- volumes:
- - "{{ node_config_directory }}/ceph-rgw/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "kolla_logs:/var/log/kolla/"
diff --git a/ansible/roles/ceph/tasks/upgrade.yml b/ansible/roles/ceph/tasks/upgrade.yml
deleted file mode 100644
index bf1c4ffde6..0000000000
--- a/ansible/roles/ceph/tasks/upgrade.yml
+++ /dev/null
@@ -1,13 +0,0 @@
----
-- include: config.yml
-
-- include: start_mons.yml
- when: inventory_hostname in groups['ceph-mon']
-
-- include: start_osds.yml
- when: inventory_hostname in groups['ceph-osd']
-
-- include: start_rgws.yml
- when:
- - inventory_hostname in groups['ceph-rgw']
- - enable_ceph_rgw | bool
diff --git a/ansible/roles/ceph/templates/ceph-mon.json.j2 b/ansible/roles/ceph/templates/ceph-mon.json.j2
deleted file mode 100644
index c1cc964547..0000000000
--- a/ansible/roles/ceph/templates/ceph-mon.json.j2
+++ /dev/null
@@ -1,43 +0,0 @@
-{
-{%- if orchestration_engine == 'KUBERNETES' %}
- "command": "/usr/bin/ceph-mon -d -i @MONID@ --public-addr @MONADDR@:6789",
-{%- else %}
- "command": "/usr/bin/ceph-mon -d -i {{ hostvars[inventory_hostname]['ansible_' + storage_interface]['ipv4']['address'] }} --public-addr {{ hostvars[inventory_hostname]['ansible_' + storage_interface]['ipv4']['address'] }}:6789",
-{%- endif %}
- "config_files": [
- {
- "source": "{{ container_config_directory }}/ceph.conf",
- "dest": "/etc/ceph/ceph.conf",
- "owner": "ceph",
- "perm": "0600"
- },
- {
- "source": "{{ container_config_directory }}/ceph.client.admin.keyring",
- "dest": "/etc/ceph/ceph.client.admin.keyring",
- "owner": "ceph",
- "perm": "0600",
- "optional": true
- },
- {
- "source": "{{ container_config_directory }}/ceph.client.mon.keyring",
- "dest": "/etc/ceph/ceph.client.mon.keyring",
- "owner": "ceph",
- "perm": "0600",
- "optional": true
- },
- {
- "source": "{{ container_config_directory }}/ceph.client.radosgw.keyring",
- "dest": "/etc/ceph/ceph.client.radosgw.keyring",
- "owner": "ceph",
- "perm": "0600",
- "optional": true
- },
- {
- "source": "{{ container_config_directory }}/ceph.monmap",
- "dest": "/etc/ceph/ceph.monmap",
- "owner": "ceph",
- "perm": "0600",
- "optional": true
- }
- ]
-}
diff --git a/ansible/roles/ceph/templates/ceph-osd.json.j2 b/ansible/roles/ceph/templates/ceph-osd.json.j2
deleted file mode 100644
index f6d62e8923..0000000000
--- a/ansible/roles/ceph/templates/ceph-osd.json.j2
+++ /dev/null
@@ -1,21 +0,0 @@
-{
-{%- if orchestration_engine == 'KUBERNETES' %}
- "command": "/usr/bin/ceph-osd -f -d --public-addr @HOSTADDR@ --cluster-addr @CLUSTERADDR@",
-{%- else %}
- "command": "/usr/bin/ceph-osd -f -d --public-addr {{ hostvars[inventory_hostname]['ansible_' + storage_interface]['ipv4']['address'] }} --cluster-addr {{ hostvars[inventory_hostname]['ansible_' + cluster_interface]['ipv4']['address'] }}",
-{%- endif %}
- "config_files": [
- {
- "source": "{{ container_config_directory }}/ceph.conf",
- "dest": "/etc/ceph/ceph.conf",
- "owner": "ceph",
- "perm": "0600"
- },
- {
- "source": "{{ container_config_directory }}/ceph.client.admin.keyring",
- "dest": "/etc/ceph/ceph.client.admin.keyring",
- "owner": "ceph",
- "perm": "0600"
- }
- ]
-}
diff --git a/ansible/roles/ceph/templates/ceph-rgw.json.j2 b/ansible/roles/ceph/templates/ceph-rgw.json.j2
deleted file mode 100644
index 7cb0ccdd44..0000000000
--- a/ansible/roles/ceph/templates/ceph-rgw.json.j2
+++ /dev/null
@@ -1,23 +0,0 @@
-{
- "command": "/usr/bin/radosgw -c /etc/ceph/ceph.conf -n client.radosgw.gateway -d",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/ceph.conf",
- "dest": "/etc/ceph/ceph.conf",
- "owner": "ceph",
- "perm": "0600"
- },
- {
- "source": "{{ container_config_directory }}/ceph.client.admin.keyring",
- "dest": "/etc/ceph/ceph.client.admin.keyring",
- "owner": "ceph",
- "perm": "0600"
- },
- {
- "source": "{{ container_config_directory }}/ceph.client.radosgw.keyring",
- "dest": "/etc/ceph/ceph.client.radosgw.keyring",
- "owner": "ceph",
- "perm": "0600"
- }
- ]
-}
diff --git a/ansible/roles/ceph/templates/ceph.conf.j2 b/ansible/roles/ceph/templates/ceph.conf.j2
deleted file mode 100644
index 2d69c8d6de..0000000000
--- a/ansible/roles/ceph/templates/ceph.conf.j2
+++ /dev/null
@@ -1,38 +0,0 @@
-[global]
-log file = /var/log/kolla/ceph/$cluster-$name.log
-log to syslog = true
-err to syslog = true
-
-fsid = {{ ceph_cluster_fsid }}
-{% if orchestration_engine != 'KUBERNETES' %}
-mon initial members = {% for host in groups['ceph-mon'] %}{{ hostvars[host]['ansible_' + hostvars[host]['storage_interface']]['ipv4']['address'] }}{% if not loop.last %}, {% endif %}{% endfor %}
-
-mon host = {% for host in groups['ceph-mon'] %}{{ hostvars[host]['ansible_' + hostvars[host]['storage_interface']]['ipv4']['address'] }}{% if not loop.last %}, {% endif %}{% endfor %}
-
-mon addr = {% for host in groups['ceph-mon'] %}{{ hostvars[host]['ansible_' + hostvars[host]['storage_interface']]['ipv4']['address'] }}:6789{% if not loop.last %}, {% endif %}{% endfor %}
-{% endif %}
-
-auth cluster required = cephx
-auth service required = cephx
-auth client required = cephx
-
-# NOTE(inc0): This line will mean that if ceph was upgraded, it will run as root
-# until contents of /var/lib/ceph are chowned to ceph user.
-# This change was introduced in Jewel version and we should include
-# chown operation in upgrade procedure. https://bugs.launchpad.net/kolla/+bug/1620702
-setuser match path = /var/lib/ceph/$type/$cluster-$id
-
-[mon]
-# NOTE(SamYaple): The monitor files have been known to grow very large. The
-# only fix for that is to compact the files.
-mon compact on start = true
-
-{% if service_name is defined and service_name == 'ceph-rgw' %}
-[client.radosgw.gateway]
-{% if orchestration_engine != 'KUBERNETES' %}
-host = {{ hostvars[inventory_hostname]['ansible_' + storage_interface]['ipv4']['address'] }}
-rgw frontends = civetweb port={{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}:{{ rgw_port }}
-{% endif %}
-keyring = /etc/ceph/ceph.client.radosgw.keyring
-log file = /var/log/kolla/ceph/client.radosgw.gateway.log
-{% endif %}
diff --git a/ansible/roles/ceph_pools.yml b/ansible/roles/ceph_pools.yml
deleted file mode 100644
index ae496e9569..0000000000
--- a/ansible/roles/ceph_pools.yml
+++ /dev/null
@@ -1,80 +0,0 @@
----
-# TODO(SamYaple): Improve failed_when and changed_when tests if possible
-- name: Creating ceph erasure profile
- command: docker exec ceph_mon ceph osd erasure-code-profile set erasure-profile {{ ceph_erasure_profile }}
- delegate_to: "{{ groups['ceph-mon'][0] }}"
- changed_when: False
- failed_when: False
- run_once: True
- when: pool_type == "erasure"
-
-- name: Creating ceph ruleset
- command: docker exec ceph_mon ceph osd crush rule create-erasure disks erasure-profile
- delegate_to: "{{ groups['ceph-mon'][0] }}"
- changed_when: False
- failed_when: False
- run_once: True
- when: pool_type == "erasure"
-
-- name: Creating ceph ruleset
- command: docker exec ceph_mon ceph osd crush rule create-simple disks {{ ceph_rule }}
- delegate_to: "{{ groups['ceph-mon'][0] }}"
- changed_when: False
- failed_when: False
- run_once: True
- when: pool_type == "replicated"
-
-- name: Creating ceph pool
- command: docker exec ceph_mon ceph osd pool create {{ pool_name }} 128 128 {{ pool_type }} {{ 'erasure-profile' if pool_type == 'erasure' else '' }} disks
- delegate_to: "{{ groups['ceph-mon'][0] }}"
- changed_when: False
- failed_when: False
- run_once: True
-
-- name: Creating ceph ruleset for cache
- command: docker exec ceph_mon ceph osd crush rule create-simple cache {{ ceph_cache_rule }}
- delegate_to: "{{ groups['ceph-mon'][0] }}"
- changed_when: False
- failed_when: False
- run_once: True
- when: "{{ ceph_enable_cache | bool }}"
-
-- name: Creating ceph pool for cache
- command: docker exec ceph_mon ceph osd pool create {{ pool_name }}-cache 128 128 replicated cache
- delegate_to: "{{ groups['ceph-mon'][0] }}"
- changed_when: False
- failed_when: False
- run_once: True
- when: "{{ ceph_enable_cache | bool }}"
-
-- name: Adding cache to pool
- command: docker exec ceph_mon ceph osd tier add {{ pool_name }} {{ pool_name }}-cache
- delegate_to: "{{ groups['ceph-mon'][0] }}"
- changed_when: False
- failed_when: False
- run_once: True
- when: "{{ ceph_enable_cache | bool }}"
-
-- name: Setting cache-mode
- command: docker exec ceph_mon ceph osd tier cache-mode {{ pool_name }}-cache {{ cache_mode }}
- delegate_to: "{{ groups['ceph-mon'][0] }}"
- changed_when: False
- failed_when: False
- run_once: True
- when: "{{ ceph_enable_cache | bool }}"
-
-- name: Setting cache overlay for pool
- command: docker exec ceph_mon ceph osd tier set-overlay {{ pool_name }} {{ pool_name }}-cache
- delegate_to: "{{ groups['ceph-mon'][0] }}"
- changed_when: False
- failed_when: False
- run_once: True
- when: "{{ ceph_enable_cache | bool }}"
-
-- name: Setting cache hit_set_type
- command: docker exec ceph_mon ceph osd pool set {{ pool_name }}-cache hit_set_type bloom
- delegate_to: "{{ groups['ceph-mon'][0] }}"
- changed_when: False
- failed_when: False
- run_once: True
- when: "{{ ceph_enable_cache | bool }}"
diff --git a/ansible/roles/certificates/tasks/generate.yml b/ansible/roles/certificates/tasks/generate.yml
deleted file mode 100644
index b0014e13aa..0000000000
--- a/ansible/roles/certificates/tasks/generate.yml
+++ /dev/null
@@ -1,41 +0,0 @@
----
-- name: Ensuring config directories exist
- file:
- path: "{{ node_config_directory }}/{{ item }}"
- state: "directory"
- recurse: yes
- with_items:
- - "certificates/private"
-
-- name: Creating SSL configuration file
- template:
- src: "{{ item }}.j2"
- dest: "{{ node_config_directory }}/certificates/{{ item }}"
- with_items:
- - "openssl-kolla.cnf"
-
-- name: Creating Key
- command: creates="{{ item }}" openssl genrsa -out {{ item }}
- with_items:
- - "{{ node_config_directory }}/certificates/private/haproxy.key"
-
-- name: Creating Server Certificate
- command: creates="{{ item }}" openssl req -new -nodes -sha256 -x509 \
- -subj "/C=US/ST=NC/L=RTP/O=kolla/CN={{ kolla_external_fqdn }}" \
- -config {{ node_config_directory }}/certificates/openssl-kolla.cnf \
- -days 3650 \
- -extensions v3_req \
- -key {{ node_config_directory }}/certificates/private/haproxy.key \
- -out {{ item }}
- with_items:
- - "{{ node_config_directory }}/certificates/private/haproxy.crt"
-
-- name: Creating CA Certificate File
- copy:
- src: "{{ node_config_directory }}/certificates/private/haproxy.crt"
- dest: "{{ node_config_directory }}/certificates/haproxy-ca.crt"
-
-- name: Creating Server PEM File
- assemble:
- src: "{{ node_config_directory }}/certificates/private"
- dest: "{{ node_config_directory }}/certificates/haproxy.pem"
diff --git a/ansible/roles/certificates/tasks/main.yml b/ansible/roles/certificates/tasks/main.yml
deleted file mode 100644
index 2403646bcf..0000000000
--- a/ansible/roles/certificates/tasks/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-- include: generate.yml
diff --git a/ansible/roles/certificates/tasks/precheck.yml b/ansible/roles/certificates/tasks/precheck.yml
deleted file mode 100644
index ed97d539c0..0000000000
--- a/ansible/roles/certificates/tasks/precheck.yml
+++ /dev/null
@@ -1 +0,0 @@
----
diff --git a/ansible/roles/certificates/templates/openssl-kolla.cnf.j2 b/ansible/roles/certificates/templates/openssl-kolla.cnf.j2
deleted file mode 100644
index c9bbce5321..0000000000
--- a/ansible/roles/certificates/templates/openssl-kolla.cnf.j2
+++ /dev/null
@@ -1,16 +0,0 @@
-[req]
-distinguished_name = req_distinguished_name
-req_extensions = v3_req
-
-[req_distinguished_name]
-countryName = US
-stateOrProvinceName = NC
-localityName = RTP
-organizationalUnitName = kolla
-commonName = {{ kolla_external_fqdn }}
-
-[v3_req]
-subjectAltName = @alt_names
-
-[alt_names]
-IP.1 = {{ kolla_external_vip_address }}
diff --git a/ansible/roles/cinder/defaults/main.yml b/ansible/roles/cinder/defaults/main.yml
deleted file mode 100644
index 757022f02c..0000000000
--- a/ansible/roles/cinder/defaults/main.yml
+++ /dev/null
@@ -1,81 +0,0 @@
----
-project_name: "cinder"
-
-####################
-# Ceph
-####################
-ceph_cinder_pool_type: "{{ ceph_pool_type }}"
-ceph_cinder_cache_mode: "{{ ceph_cache_mode }}"
-ceph_cinder_backup_pool_type: "{{ ceph_pool_type }}"
-ceph_cinder_backup_cache_mode: "{{ ceph_cache_mode }}"
-
-# Due to Ansible issues on include, you cannot override these variables. Please
-# override the variables they reference instead.
-cinder_pool_name: "{{ ceph_cinder_pool_name }}"
-cinder_pool_type: "{{ ceph_cinder_pool_type }}"
-cinder_cache_mode: "{{ ceph_cinder_cache_mode }}"
-cinder_backup_pool_name: "{{ ceph_cinder_backup_pool_name }}"
-cinder_backup_pool_type: "{{ ceph_cinder_backup_pool_type }}"
-cinder_backup_cache_mode: "{{ ceph_cinder_backup_cache_mode }}"
-
-
-####################
-# Database
-####################
-cinder_database_name: "cinder"
-cinder_database_user: "cinder"
-cinder_database_address: "{{ kolla_internal_fqdn }}:{{ database_port }}"
-
-
-####################
-# Docker
-####################
-cinder_volume_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-cinder-volume"
-cinder_volume_tag: "{{ openstack_release }}"
-cinder_volume_image_full: "{{ cinder_volume_image }}:{{ cinder_volume_tag }}"
-
-cinder_scheduler_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-cinder-scheduler"
-cinder_scheduler_tag: "{{ openstack_release }}"
-cinder_scheduler_image_full: "{{ cinder_scheduler_image }}:{{ cinder_scheduler_tag }}"
-
-cinder_backup_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-cinder-backup"
-cinder_backup_tag: "{{ openstack_release }}"
-cinder_backup_image_full: "{{ cinder_backup_image }}:{{ cinder_backup_tag }}"
-
-cinder_api_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-cinder-api"
-cinder_api_tag: "{{ openstack_release }}"
-cinder_api_image_full: "{{ cinder_api_image }}:{{ cinder_api_tag }}"
-
-
-####################
-# OpenStack
-####################
-cinder_admin_endpoint: "{{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ cinder_api_port }}/v1/%(tenant_id)s"
-cinder_internal_endpoint: "{{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ cinder_api_port }}/v1/%(tenant_id)s"
-cinder_public_endpoint: "{{ public_protocol }}://{{ kolla_external_fqdn }}:{{ cinder_api_port }}/v1/%(tenant_id)s"
-cinder_v2_admin_endpoint: "{{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ cinder_api_port }}/v2/%(tenant_id)s"
-cinder_v2_internal_endpoint: "{{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ cinder_api_port }}/v2/%(tenant_id)s"
-cinder_v2_public_endpoint: "{{ public_protocol }}://{{ kolla_external_fqdn }}:{{ cinder_api_port }}/v2/%(tenant_id)s"
-
-cinder_logging_debug: "{{ openstack_logging_debug }}"
-
-cinder_keystone_user: "cinder"
-
-openstack_cinder_auth: "{'auth_url':'{{ openstack_auth.auth_url }}','username':'{{ openstack_auth.username }}','password':'{{ openstack_auth.password }}','project_name':'{{ openstack_auth.project_name }}'}"
-
-
-####################
-# Cinder
-####################
-cinder_backends:
- - name: "rbd-1"
- driver: "ceph"
- enabled: "{{ enable_ceph | bool and cinder_backend_ceph | bool }}"
- - name: "lvm-1"
- driver: "lvm"
- enabled: "{{ enable_cinder_backend_lvm | bool }}"
- - name: "nfs-1"
- driver: "nfs"
- enabled: "{{ enable_cinder_backend_nfs | bool }}"
-
-cinder_enabled_backends: "{{ cinder_backends|selectattr('enabled', 'equalto', true)|list }}"
diff --git a/ansible/roles/cinder/meta/main.yml b/ansible/roles/cinder/meta/main.yml
deleted file mode 100644
index 6b4fff8fef..0000000000
--- a/ansible/roles/cinder/meta/main.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-dependencies:
- - { role: common }
diff --git a/ansible/roles/cinder/tasks/bootstrap.yml b/ansible/roles/cinder/tasks/bootstrap.yml
deleted file mode 100644
index f2a62dc141..0000000000
--- a/ansible/roles/cinder/tasks/bootstrap.yml
+++ /dev/null
@@ -1,41 +0,0 @@
----
-- name: Creating Cinder database
- command: docker exec -t kolla_toolbox /usr/bin/ansible localhost
- -m mysql_db
- -a "login_host='{{ database_address }}'
- login_port='{{ database_port }}'
- login_user='{{ database_user }}'
- login_password='{{ database_password }}'
- name='{{ cinder_database_name }}'"
- register: database
- changed_when: "{{ database.stdout.find('localhost | SUCCESS => ') != -1 and
- (database.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
- failed_when: database.stdout.split()[2] != 'SUCCESS'
- run_once: True
- delegate_to: "{{ groups['cinder-api'][0] }}"
-
-- name: Reading json from variable
- set_fact:
- database_created: "{{ (database.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
-
-- name: Creating Cinder database user and setting permissions
- command: docker exec -t kolla_toolbox /usr/bin/ansible localhost
- -m mysql_user
- -a "login_host='{{ database_address }}'
- login_port='{{ database_port }}'
- login_user='{{ database_user }}'
- login_password='{{ database_password }}'
- name='{{ cinder_database_name }}'
- password='{{ cinder_database_password }}'
- host='%'
- priv='{{ cinder_database_name }}.*:ALL'
- append_privs='yes'"
- register: database_user_create
- changed_when: "{{ database_user_create.stdout.find('localhost | SUCCESS => ') != -1 and
- (database_user_create.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
- failed_when: database_user_create.stdout.split()[2] != 'SUCCESS'
- run_once: True
- delegate_to: "{{ groups['cinder-api'][0] }}"
-
-- include: bootstrap_service.yml
- when: database_created
diff --git a/ansible/roles/cinder/tasks/bootstrap_service.yml b/ansible/roles/cinder/tasks/bootstrap_service.yml
deleted file mode 100644
index 7968f466c9..0000000000
--- a/ansible/roles/cinder/tasks/bootstrap_service.yml
+++ /dev/null
@@ -1,20 +0,0 @@
----
-- name: Running Cinder bootstrap container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- detach: False
- environment:
- KOLLA_BOOTSTRAP:
- KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}"
- image: "{{ cinder_api_image_full }}"
- labels:
- BOOTSTRAP:
- name: "bootstrap_cinder"
- restart_policy: "never"
- volumes:
- - "{{ node_config_directory }}/cinder-api/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "kolla_logs:/var/log/kolla/"
- run_once: True
- delegate_to: "{{ groups['cinder-api'][0] }}"
diff --git a/ansible/roles/cinder/tasks/ceph.yml b/ansible/roles/cinder/tasks/ceph.yml
deleted file mode 100644
index abf4606d9b..0000000000
--- a/ansible/roles/cinder/tasks/ceph.yml
+++ /dev/null
@@ -1,57 +0,0 @@
----
-- name: Ensuring config directory exists
- file:
- path: "{{ node_config_directory }}/{{ item }}"
- state: "directory"
- with_items:
- - "cinder-volume"
- - "cinder-backup"
- when: inventory_hostname in groups['cinder-volume']
-
-- name: Copying over config(s)
- template:
- src: roles/ceph/templates/ceph.conf.j2
- dest: "{{ node_config_directory }}/{{ item }}/ceph.conf"
- with_items:
- - "cinder-volume"
- - "cinder-backup"
- when: inventory_hostname in groups['cinder-volume']
-
-- include: ../../ceph_pools.yml
- vars:
- pool_name: "{{ cinder_pool_name }}"
- pool_type: "{{ cinder_pool_type }}"
- cache_mode: "{{ cinder_cache_mode }}"
-
-- include: ../../ceph_pools.yml
- vars:
- pool_name: "{{ cinder_backup_pool_name }}"
- pool_type: "{{ cinder_backup_pool_type }}"
- cache_mode: "{{ cinder_backup_cache_mode }}"
-
-# TODO(SamYaple): Improve changed_when tests
-- name: Pulling cephx keyring for cinder
- command: docker exec ceph_mon ceph auth get-or-create client.cinder mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool={{ ceph_cinder_pool_name }}, allow rwx pool={{ ceph_cinder_pool_name }}-cache, allow rwx pool={{ ceph_nova_pool_name }}, allow rwx pool={{ ceph_nova_pool_name }}-cache, allow rx pool={{ ceph_glance_pool_name }}, allow rx pool={{ ceph_glance_pool_name }}-cache'
- register: cephx_key_cinder
- delegate_to: "{{ groups['ceph-mon'][0] }}"
- changed_when: False
- run_once: True
-
-# TODO(SamYaple): Improve changed_when tests
-- name: Pulling cephx keyring for cinder-backup
- command: docker exec ceph_mon ceph auth get-or-create client.cinder-backup mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool={{ ceph_cinder_backup_pool_name }}, allow rwx pool={{ ceph_cinder_backup_pool_name }}-cache'
- register: cephx_key_cinder_backup
- delegate_to: "{{ groups['ceph-mon'][0] }}"
- changed_when: False
- run_once: True
-
-- name: Pushing cephx keyring
- copy:
- content: "{{ item.content }}\n\r"
- dest: "{{ node_config_directory }}/{{ item.service_name }}/ceph.client.{{ item.key_name }}.keyring"
- mode: "0600"
- with_items:
- - { service_name: "cinder-volume", key_name: "cinder", content: "{{ cephx_key_cinder.stdout }}" }
- - { service_name: "cinder-backup", key_name: "cinder", content: "{{ cephx_key_cinder.stdout }}" }
- - { service_name: "cinder-backup", key_name: "cinder-backup", content: "{{ cephx_key_cinder_backup.stdout }}" }
- when: inventory_hostname in groups['cinder-volume']
diff --git a/ansible/roles/cinder/tasks/check.yml b/ansible/roles/cinder/tasks/check.yml
deleted file mode 100644
index be12310760..0000000000
--- a/ansible/roles/cinder/tasks/check.yml
+++ /dev/null
@@ -1,19 +0,0 @@
----
-- name: Cinder sanity checks
- command: docker exec -t kolla_toolbox /usr/bin/ansible localhost
- -m kolla_sanity
- -a "service=cinder
- project=service
- user=admin
- password={{ cinder_keystone_password }}
- role=admin
- region_name={{ openstack_region_name }}
- auth={{ '{{ openstack_cinder_auth }}' }}"
- -e "{'openstack_cinder_auth':{{ openstack_cinder_auth }}}"
- register: cinder_sanity
- changed_when: "{{ cinder_sanity.stdout.find('localhost | SUCCESS => ') != -1 and (cinder_sanity.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
- until: cinder_sanity.stdout.split()[2] == 'SUCCESS'
- retries: 10
- delay: 5
- run_once: True
- when: kolla_enable_sanity_cinder | bool
diff --git a/ansible/roles/cinder/tasks/config.yml b/ansible/roles/cinder/tasks/config.yml
deleted file mode 100644
index 49f1879464..0000000000
--- a/ansible/roles/cinder/tasks/config.yml
+++ /dev/null
@@ -1,68 +0,0 @@
----
-- name: Ensuring config directories exist
- file:
- path: "{{ node_config_directory }}/{{ item }}"
- state: "directory"
- recurse: yes
- with_items:
- - "cinder-api"
- - "cinder-backup"
- - "cinder-scheduler"
- - "cinder-volume"
-
-- name: Copying over config.json files for services
- template:
- src: "{{ item }}.json.j2"
- dest: "{{ node_config_directory }}/{{ item }}/config.json"
- with_items:
- - "cinder-api"
- - "cinder-backup"
- - "cinder-scheduler"
- - "cinder-volume"
-
-- name: Copying over cinder.conf
- merge_configs:
- vars:
- service_name: "{{ item }}"
- sources:
- - "{{ role_path }}/templates/cinder.conf.j2"
- - "{{ node_custom_config }}/global.conf"
- - "{{ node_custom_config }}/database.conf"
- - "{{ node_custom_config }}/messaging.conf"
- - "{{ node_custom_config }}/cinder.conf"
- - "{{ node_custom_config }}/cinder/{{ item }}.conf"
- - "{{ node_custom_config }}/cinder/{{ inventory_hostname }}/cinder.conf"
- dest: "{{ node_config_directory }}/{{ item }}/cinder.conf"
- with_items:
- - "cinder-api"
- - "cinder-backup"
- - "cinder-scheduler"
- - "cinder-volume"
-
-- name: Check if policies shall be overwritten
- local_action: stat path="{{ node_custom_config }}/cinder/policy.json"
- register: cinder_policy
-
-- name: Copying over existing policy.json
- template:
- src: "{{ node_custom_config }}/cinder/policy.json"
- dest: "{{ node_config_directory }}/{{ item }}/policy.json"
- with_items:
- - "cinder-api"
- - "cinder-backup"
- - "cinder-scheduler"
- - "cinder-volume"
- when:
- cinder_policy.stat.exists
-
-- name: Copying over nfs_shares files for cinder_volume
- template:
- src: "{{ item }}"
- dest: "{{ node_config_directory }}/cinder-volume/nfs_shares"
- with_first_found:
- - files:
- - "{{ node_custom_config }}/nfs_shares.j2"
- - "{{ node_custom_config }}/cinder/nfs_shares.j2"
- - "{{ node_custom_config }}/cinder/cinder-volume/nfs_shares.j2"
- - "{{ node_custom_config }}/cinder/{{ inventory_hostname }}/nfs_shares.j2"
- skip: "{{ not enable_cinder_backend_nfs | bool }}"
diff --git a/ansible/roles/cinder/tasks/deploy.yml b/ansible/roles/cinder/tasks/deploy.yml
deleted file mode 100644
index b01e2efae8..0000000000
--- a/ansible/roles/cinder/tasks/deploy.yml
+++ /dev/null
@@ -1,39 +0,0 @@
----
-- include: ceph.yml
- when:
- - (enable_ceph | bool) and (cinder_backend_ceph | bool)
- - inventory_hostname in groups['ceph-mon'] or
- inventory_hostname in groups['cinder-api'] or
- inventory_hostname in groups['cinder-volume'] or
- inventory_hostname in groups['cinder-scheduler'] or
- inventory_hostname in groups['cinder-backup']
-
-- include: external_ceph.yml
- when:
- - (enable_ceph | bool == False) and (cinder_backend_ceph | bool)
- - inventory_hostname in groups['cinder-volume'] or
- inventory_hostname in groups['cinder-backup']
-
-- include: register.yml
- when: inventory_hostname in groups['cinder-api']
-
-- include: config.yml
- when: inventory_hostname in groups['cinder-api'] or
- inventory_hostname in groups['cinder-volume'] or
- inventory_hostname in groups['cinder-scheduler'] or
- inventory_hostname in groups['cinder-backup']
-
-- include: bootstrap.yml
- when: inventory_hostname in groups['cinder-api']
-
-- include: start.yml
- when: inventory_hostname in groups['cinder-api'] or
- inventory_hostname in groups['cinder-volume'] or
- inventory_hostname in groups['cinder-scheduler'] or
- inventory_hostname in groups['cinder-backup']
-
-- include: check.yml
- when: inventory_hostname in groups['cinder-api'] or
- inventory_hostname in groups['cinder-volume'] or
- inventory_hostname in groups['cinder-scheduler'] or
- inventory_hostname in groups['cinder-backup']
diff --git a/ansible/roles/cinder/tasks/external_ceph.yml b/ansible/roles/cinder/tasks/external_ceph.yml
deleted file mode 100644
index 45fe480ee5..0000000000
--- a/ansible/roles/cinder/tasks/external_ceph.yml
+++ /dev/null
@@ -1,36 +0,0 @@
----
-- name: Ensuring config directory exists
- file:
- path: "{{ node_config_directory }}/cinder-{{ item }}"
- state: "directory"
- when: inventory_hostname in groups['cinder-volume'] or
- inventory_hostname in groups['cinder-backup']
- with_items:
- - volume
- - backup
-
-- name: Copying over ceph.conf for Cinder
- merge_configs:
- vars:
- service_name: "{{ item }}"
- sources:
- - "{{ node_custom_config }}/cinder/ceph.conf"
- - "{{ node_custom_config }}/cinder/{{ item }}/ceph.conf"
- dest: "{{ node_config_directory }}/{{ item }}/ceph.conf"
- with_items:
- - "cinder-backup"
- - "cinder-volume"
-
-- name: Copy over Ceph keyring files for cinder-volume
- copy:
- src: "{{ item }}"
- dest: "{{ node_config_directory }}/cinder-volume/"
- with_fileglob:
- - "{{ node_custom_config }}/cinder/cinder-volume/ceph.client*"
-
-- name: Copy over Ceph keyring files for cinder-backup
- copy:
- src: "{{ item }}"
- dest: "{{ node_config_directory }}/cinder-backup/"
- with_fileglob:
- - "{{ node_custom_config }}/cinder/cinder-volume/ceph.client*"
diff --git a/ansible/roles/cinder/tasks/main.yml b/ansible/roles/cinder/tasks/main.yml
deleted file mode 100644
index b017e8b4ad..0000000000
--- a/ansible/roles/cinder/tasks/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-- include: "{{ action }}.yml"
diff --git a/ansible/roles/cinder/tasks/precheck.yml b/ansible/roles/cinder/tasks/precheck.yml
deleted file mode 100644
index ed97d539c0..0000000000
--- a/ansible/roles/cinder/tasks/precheck.yml
+++ /dev/null
@@ -1 +0,0 @@
----
diff --git a/ansible/roles/cinder/tasks/pull.yml b/ansible/roles/cinder/tasks/pull.yml
deleted file mode 100644
index 6e2249f2df..0000000000
--- a/ansible/roles/cinder/tasks/pull.yml
+++ /dev/null
@@ -1,28 +0,0 @@
----
-- name: Pulling cinder-api image
- kolla_docker:
- action: "pull_image"
- common_options: "{{ docker_common_options }}"
- image: "{{ cinder_api_image_full }}"
- when: inventory_hostname in groups['cinder-api']
-
-- name: Pulling cinder-backup image
- kolla_docker:
- action: "pull_image"
- common_options: "{{ docker_common_options }}"
- image: "{{ cinder_backup_image_full }}"
- when: inventory_hostname in groups['cinder-backup']
-
-- name: Pulling cinder-scheduler image
- kolla_docker:
- action: "pull_image"
- common_options: "{{ docker_common_options }}"
- image: "{{ cinder_scheduler_image_full }}"
- when: inventory_hostname in groups['cinder-scheduler']
-
-- name: Pulling cinder-volume image
- kolla_docker:
- action: "pull_image"
- common_options: "{{ docker_common_options }}"
- image: "{{ cinder_volume_image_full }}"
- when: inventory_hostname in groups['cinder-volume']
diff --git a/ansible/roles/cinder/tasks/reconfigure.yml b/ansible/roles/cinder/tasks/reconfigure.yml
deleted file mode 100644
index af453c8875..0000000000
--- a/ansible/roles/cinder/tasks/reconfigure.yml
+++ /dev/null
@@ -1,79 +0,0 @@
----
-- name: Ensuring the containers up
- kolla_docker:
- name: "{{ item.name }}"
- action: "get_container_state"
- register: container_state
- failed_when: container_state.Running == false
- when: inventory_hostname in groups[item.group]
- with_items:
- - { name: cinder_api, group: cinder-api }
- - { name: cinder_scheduler, group: cinder-scheduler }
- - { name: cinder_volume, group: cinder-volume }
- - { name: cinder_backup, group: cinder-backup }
-
-- include: config.yml
-
-- name: Check the configs
- command: docker exec {{ item.name }} /usr/local/bin/kolla_set_configs --check
- changed_when: false
- failed_when: false
- register: check_results
- when: inventory_hostname in groups[item.group]
- with_items:
- - { name: cinder_api, group: cinder-api }
- - { name: cinder_scheduler, group: cinder-scheduler }
- - { name: cinder_volume, group: cinder-volume }
- - { name: cinder_backup, group: cinder-backup }
-
-# NOTE(jeffrey4l): when config_strategy == 'COPY_ALWAYS'
-# and container env['KOLLA_CONFIG_STRATEGY'] == 'COPY_ONCE',
-# just remove the container and start again
-- name: Containers config strategy
- kolla_docker:
- name: "{{ item.name }}"
- action: "get_container_env"
- register: container_envs
- when: inventory_hostname in groups[item.group]
- with_items:
- - { name: cinder_api, group: cinder-api }
- - { name: cinder_scheduler, group: cinder-scheduler }
- - { name: cinder_volume, group: cinder-volume }
- - { name: cinder_backup, group: cinder-backup }
-
-- name: Remove the containers
- kolla_docker:
- name: "{{ item[0]['name'] }}"
- action: "remove_container"
- register: remove_containers
- when:
- - inventory_hostname in groups[item[0]['group']]
- - config_strategy == "COPY_ONCE" or item[1]['KOLLA_CONFIG_STRATEGY'] == 'COPY_ONCE'
- - item[2]['rc'] == 1
- with_together:
- - [{ name: cinder_api, group: cinder-api },
- { name: cinder_scheduler, group: cinder-scheduler },
- { name: cinder_volume, group: cinder-volume },
- { name: cinder_backup, group: cinder-backup }]
- - "{{ container_envs.results }}"
- - "{{ check_results.results }}"
-
-- include: start.yml
- when: remove_containers.changed
-
-- name: Restart containers
- kolla_docker:
- name: "{{ item[0]['name'] }}"
- action: "restart_container"
- when:
- - inventory_hostname in groups[item[0]['group']]
- - config_strategy == 'COPY_ALWAYS'
- - item[1]['KOLLA_CONFIG_STRATEGY'] != 'COPY_ONCE'
- - item[2]['rc'] == 1
- with_together:
- - [{ name: cinder_api, group: cinder-api },
- { name: cinder_scheduler, group: cinder-scheduler },
- { name: cinder_volume, group: cinder-volume },
- { name: cinder_backup, group: cinder-backup }]
- - "{{ container_envs.results }}"
- - "{{ check_results.results }}"
diff --git a/ansible/roles/cinder/tasks/register.yml b/ansible/roles/cinder/tasks/register.yml
deleted file mode 100644
index 550077034c..0000000000
--- a/ansible/roles/cinder/tasks/register.yml
+++ /dev/null
@@ -1,43 +0,0 @@
----
-- name: Creating the Cinder service and endpoint
- command: docker exec -t kolla_toolbox /usr/bin/ansible localhost
- -m kolla_keystone_service
- -a "service_name={{ item.service_name }}
- service_type={{ item.service_type }}
- description='Openstack Block Storage'
- endpoint_region={{ openstack_region_name }}
- url='{{ item.url }}'
- interface='{{ item.interface }}'
- region_name={{ openstack_region_name }}
- auth={{ '{{ openstack_cinder_auth }}' }}"
- -e "{'openstack_cinder_auth':{{ openstack_cinder_auth }}}"
- register: cinder_endpoint
- changed_when: "{{ cinder_endpoint.stdout.find('localhost | SUCCESS => ') != -1 and (cinder_endpoint.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
- until: cinder_endpoint.stdout.split()[2] == 'SUCCESS'
- retries: 10
- delay: 5
- run_once: True
- with_items:
- - {'interface': 'admin', 'url': '{{ cinder_admin_endpoint }}', 'service_name': 'cinder', 'service_type': 'volume'}
- - {'interface': 'internal', 'url': '{{ cinder_internal_endpoint }}', 'service_name': 'cinder', 'service_type': 'volume'}
- - {'interface': 'public', 'url': '{{ cinder_public_endpoint }}', 'service_name': 'cinder', 'service_type': 'volume'}
- - {'interface': 'admin', 'url': '{{ cinder_v2_admin_endpoint }}', 'service_name': 'cinderv2', 'service_type': 'volumev2'}
- - {'interface': 'internal', 'url': '{{ cinder_v2_internal_endpoint }}', 'service_name': 'cinderv2', 'service_type': 'volumev2'}
- - {'interface': 'public', 'url': '{{ cinder_v2_public_endpoint }}', 'service_name': 'cinderv2', 'service_type': 'volumev2'}
-
-- name: Creating the Cinder project, user, and role
- command: docker exec -t kolla_toolbox /usr/bin/ansible localhost
- -m kolla_keystone_user
- -a "project=service
- user=cinder
- password={{ cinder_keystone_password }}
- role=admin
- region_name={{ openstack_region_name }}
- auth={{ '{{ openstack_cinder_auth }}' }}"
- -e "{'openstack_cinder_auth':{{ openstack_cinder_auth }}}"
- register: cinder_user
- changed_when: "{{ cinder_user.stdout.find('localhost | SUCCESS => ') != -1 and (cinder_user.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
- until: cinder_user.stdout.split()[2] == 'SUCCESS'
- retries: 10
- delay: 5
- run_once: True
diff --git a/ansible/roles/cinder/tasks/start.yml b/ansible/roles/cinder/tasks/start.yml
deleted file mode 100644
index db2e19bed5..0000000000
--- a/ansible/roles/cinder/tasks/start.yml
+++ /dev/null
@@ -1,67 +0,0 @@
----
-- name: Starting cinder-api container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- name: "cinder_api"
- image: "{{ cinder_api_image_full }}"
- volumes:
- - "{{ node_config_directory }}/cinder-api/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "kolla_logs:/var/log/kolla/"
- when: inventory_hostname in groups['cinder-api']
-
-- name: Starting cinder-scheduler container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- name: "cinder_scheduler"
- image: "{{ cinder_scheduler_image_full }}"
- volumes:
- - "{{ node_config_directory }}/cinder-scheduler/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "kolla_logs:/var/log/kolla/"
- when: inventory_hostname in groups['cinder-scheduler']
-
-- name: Prepare volumes list for cinder-volume
- set_fact:
- cinder_volume_mounts:
- - "{{ node_config_directory }}/cinder-volume/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "/dev/:/dev/"
- - "/run/:/run/:shared"
- - "{% if enable_iscsid | bool %}cinder:/var/lib/cinder{% endif %}"
- - "{% if enable_iscsid | bool %}iscsi_info:/etc/iscsi{% endif %}"
- - "kolla_logs:/var/log/kolla/"
-
-- name: Starting cinder-volume container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- name: "cinder_volume"
- image: "{{ cinder_volume_image_full }}"
- privileged: True
- ipc_mode: "host"
- volumes: '{{ cinder_volume_mounts | reject("equalto", "") | list}}'
- when: inventory_hostname in groups['cinder-volume']
-
-- name: Prepare volumes list for cinder-backup
- set_fact:
- cinder_backup_mounts:
- - "{{ node_config_directory }}/cinder-backup/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "/dev/:/dev/"
- - "/run/:/run/:shared"
- - "{% if enable_cinder_backend_lvm | bool %}cinder:/var/lib/cinder{% endif %}"
- - "{% if enable_cinder_backend_lvm | bool %}iscsi_info:/etc/iscsi{% endif %}"
- - "kolla_logs:/var/log/kolla/"
-
-- name: Starting cinder-backup container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- name: "cinder_backup"
- image: "{{ cinder_backup_image_full }}"
- privileged: True
- volumes: '{{ cinder_backup_mounts | reject("equalto", "") | list}}'
- when: inventory_hostname in groups['cinder-backup']
diff --git a/ansible/roles/cinder/tasks/upgrade.yml b/ansible/roles/cinder/tasks/upgrade.yml
deleted file mode 100644
index 308053080c..0000000000
--- a/ansible/roles/cinder/tasks/upgrade.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-- include: config.yml
-
-- include: bootstrap_service.yml
-
-- include: start.yml
diff --git a/ansible/roles/cinder/templates/cinder-api.json.j2 b/ansible/roles/cinder/templates/cinder-api.json.j2
deleted file mode 100644
index 27825ed506..0000000000
--- a/ansible/roles/cinder/templates/cinder-api.json.j2
+++ /dev/null
@@ -1,23 +0,0 @@
-{
- "command": "cinder-api --config-file /etc/cinder/cinder.conf",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/cinder.conf",
- "dest": "/etc/cinder/cinder.conf",
- "owner": "cinder",
- "perm": "0600"
- }
- ],
- "permissions": [
- {
- "path": "/var/lib/cinder",
- "owner": "cinder:cinder",
- "recurse": true
- },
- {
- "path": "/var/log/kolla/cinder",
- "owner": "cinder:cinder",
- "recurse": true
- }
- ]
-}
diff --git a/ansible/roles/cinder/templates/cinder-backup.json.j2 b/ansible/roles/cinder/templates/cinder-backup.json.j2
deleted file mode 100644
index d42428ac16..0000000000
--- a/ansible/roles/cinder/templates/cinder-backup.json.j2
+++ /dev/null
@@ -1,30 +0,0 @@
-{
- "command": "cinder-backup --config-file /etc/cinder/cinder.conf",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/cinder.conf",
- "dest": "/etc/cinder/cinder.conf",
- "owner": "cinder",
- "perm": "0600"
- }{% if cinder_backend_ceph | bool %},
- {
- "source": "{{ container_config_directory }}/ceph.*",
- "dest": "/etc/ceph/",
- "owner": "cinder",
- "perm": "0700",
- "optional": {{ (not cinder_backend_ceph | bool) | string | lower }}
- }{% endif %}
- ],
- "permissions": [
- {
- "path": "/var/lib/cinder",
- "owner": "cinder:cinder",
- "recurse": true
- },
- {
- "path": "/var/log/kolla/cinder",
- "owner": "cinder:cinder",
- "recurse": true
- }
- ]
-}
diff --git a/ansible/roles/cinder/templates/cinder-scheduler.json.j2 b/ansible/roles/cinder/templates/cinder-scheduler.json.j2
deleted file mode 100644
index b5ef7b5481..0000000000
--- a/ansible/roles/cinder/templates/cinder-scheduler.json.j2
+++ /dev/null
@@ -1,23 +0,0 @@
-{
- "command": "cinder-scheduler --config-file /etc/cinder/cinder.conf",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/cinder.conf",
- "dest": "/etc/cinder/cinder.conf",
- "owner": "cinder",
- "perm": "0600"
- }
- ],
- "permissions": [
- {
- "path": "/var/lib/cinder",
- "owner": "cinder:cinder",
- "recurse": true
- },
- {
- "path": "/var/log/kolla/cinder",
- "owner": "cinder:cinder",
- "recurse": true
- }
- ]
-}
diff --git a/ansible/roles/cinder/templates/cinder-volume.json.j2 b/ansible/roles/cinder/templates/cinder-volume.json.j2
deleted file mode 100644
index 5dc729b145..0000000000
--- a/ansible/roles/cinder/templates/cinder-volume.json.j2
+++ /dev/null
@@ -1,44 +0,0 @@
-{
- "command": "cinder-volume --config-file /etc/cinder/cinder.conf",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/cinder.conf",
- "dest": "/etc/cinder/cinder.conf",
- "owner": "cinder",
- "perm": "0600"
- },
- {
- "source": "{{ container_config_directory }}/ceph.*",
- "dest": "/etc/ceph/",
- "owner": "cinder",
- "perm": "0700",
- "optional": {{ (not cinder_backend_ceph | bool) | string | lower }}
- },
- {
- "source": "{{ container_config_directory }}/ceph.conf",
- "dest": "/etc/ceph/ceph.conf",
- "owner": "cinder",
- "perm": "0600",
- "optional": {{ (not cinder_backend_ceph | bool) | string | lower }}
- },
- {
- "source": "{{ container_config_directory }}/nfs_shares",
- "dest": "/etc/cinder/nfs_shares",
- "owner": "cinder",
- "perm": "0600",
- "optional": {{ (not enable_cinder_backend_nfs | bool) | string | lower }}
- }
- ],
- "permissions": [
- {
- "path": "/var/lib/cinder",
- "owner": "cinder:cinder",
- "recurse": true
- },
- {
- "path": "/var/log/kolla/cinder",
- "owner": "cinder:cinder",
- "recurse": true
- }
- ]
-}
diff --git a/ansible/roles/cinder/templates/cinder.conf.j2 b/ansible/roles/cinder/templates/cinder.conf.j2
deleted file mode 100644
index 16f07b976c..0000000000
--- a/ansible/roles/cinder/templates/cinder.conf.j2
+++ /dev/null
@@ -1,110 +0,0 @@
-[DEFAULT]
-debug = {{ cinder_logging_debug }}
-
-log_dir = /var/log/kolla/cinder
-use_forwarded_for = true
-
-# Set use_stderr to False or the logs will also be sent to stderr
-# and collected by Docker
-use_stderr = False
-
-enable_v1_api=false
-osapi_volume_workers = {{ openstack_service_workers }}
-volume_name_template = volume-%s
-
-glance_api_servers = {% for host in groups['glance-api'] %}{{ internal_protocol }}://{% if orchestration_engine == 'KUBERNETES' %}glance-api{% else %}{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}{% endif %}:{{ glance_api_port }}{% if not loop.last %},{% endif %}{% endfor %}
-
-glance_num_retries = {{ groups['glance-api'] | length }}
-glance_api_version = 2
-
-os_region_name = {{ openstack_region_name }}
-
-{% if cinder_enabled_backends %}
-enabled_backends = {{ cinder_enabled_backends|map(attribute='name')|join(',') }}
-{% endif %}
-
-{% if service_name == "cinder-backup" and enable_ceph | bool and cinder_backend_ceph | bool %}
-backup_driver = cinder.backup.drivers.ceph
-backup_ceph_conf = /etc/ceph/ceph.conf
-backup_ceph_user = cinder-backup
-backup_ceph_chunk_size = 134217728
-backup_ceph_pool = {{ ceph_cinder_backup_pool_name }}
-backup_ceph_stripe_unit = 0
-backup_ceph_stripe_count = 0
-restore_discard_excess_bytes = true
-{% endif %}
-
-osapi_volume_listen = {{ api_interface_address }}
-osapi_volume_listen_port = {{ cinder_api_port }}
-
-api_paste_config = /etc/cinder/api-paste.ini
-nova_catalog_info = compute:nova:internalURL
-
-auth_strategy = keystone
-
-transport_url = rabbit://{% for host in groups['rabbitmq'] %}{{ rabbitmq_user }}:{{ rabbitmq_password }}@{% if orchestration_engine == 'KUBERNETES' %}rabbitmq{% else %}{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}{% endif %}:{{ rabbitmq_port }}{% if not loop.last %},{% endif %}{% endfor %}
-
-[oslo_messaging_notifications]
-{% if enable_ceilometer | bool %}
-driver = messagingv2
-{% endif %}
-
-[database]
-connection = mysql+pymysql://{{ cinder_database_user }}:{{ cinder_database_password }}@{% if orchestration_engine == 'KUBERNETES' %}{{ cinder_database_address }}{% else %}{{ cinder_database_address }}{% endif %}/{{ cinder_database_name }}
-max_retries = -1
-
-[keystone_authtoken]
-{% if orchestration_engine == 'KUBERNETES' %}
-auth_uri = {{ keystone_internal_url }}
-auth_url = {{ keystone_admin_url }}
-{% else %}
-auth_uri = {{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_public_port }}
-auth_url = {{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_admin_port }}
-{% endif %}
-auth_type = password
-project_domain_id = default
-user_domain_id = default
-project_name = service
-username = {{ cinder_keystone_user }}
-password = {{ cinder_keystone_password }}
-
-memcache_security_strategy = ENCRYPT
-memcache_secret_key = {{ memcache_secret_key }}
-memcached_servers = {% for host in groups['memcached'] %}{% if orchestration_engine == 'KUBERNETES' %}memcached{% else %}{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}{% endif %}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}
-
-
-[oslo_concurrency]
-lock_path = /var/lib/cinder/tmp
-
-
-{% if enable_cinder_backend_lvm | bool %}
-[lvm-1]
-volume_group = {{ cinder_volume_group }}
-volume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver
-volume_backend_name = lvm-1
-iscsi_helper = tgtadm
-iscsi_protocol = iscsi
-
-{% elif enable_ceph | bool and cinder_backend_ceph | bool %}
-[rbd-1]
-volume_driver = cinder.volume.drivers.rbd.RBDDriver
-rbd_pool = {{ ceph_cinder_pool_name }}
-rbd_ceph_conf = /etc/ceph/ceph.conf
-rbd_flatten_volume_from_snapshot = false
-rbd_max_clone_depth = 5
-rbd_store_chunk_size = 4
-rados_connect_timeout = -1
-rbd_user = cinder
-rbd_secret_uuid = {{ rbd_secret_uuid }}
-report_discard_supported = True
-{% endif %}
-
-{% if enable_cinder_backend_nfs | bool %}
-[nfs-1]
-volume_driver = cinder.volume.drivers.nfs.NfsDriver
-volume_backend_name = nfs-1
-nfs_shares_config = /etc/cinder/nfs_shares
-{% endif %}
-
-[privsep_entrypoint]
-helper_command=sudo cinder-rootwrap /etc/cinder/rootwrap.conf privsep-helper --config-file /etc/cinder/cinder.conf
diff --git a/ansible/roles/cloudkitty/defaults/main.yml b/ansible/roles/cloudkitty/defaults/main.yml
deleted file mode 100644
index a344866f20..0000000000
--- a/ansible/roles/cloudkitty/defaults/main.yml
+++ /dev/null
@@ -1,39 +0,0 @@
----
-project_name: "cloudkitty"
-
-####################
-# Database
-####################
-cloudkitty_database_name: "cloudkitty"
-cloudkitty_database_user: "cloudkitty"
-cloudkitty_database_address: "{{ kolla_internal_fqdn }}:{{ database_port }}"
-
-
-####################
-# Docker
-####################
-cloudkitty_api_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-cloudkitty-api"
-cloudkitty_api_tag: "{{ openstack_release }}"
-cloudkitty_api_image_full: "{{ cloudkitty_api_image }}:{{ cloudkitty_api_tag }}"
-
-cloudkitty_processor_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-cloudkitty-processor"
-cloudkitty_processor_tag: "{{ openstack_release }}"
-cloudkitty_processor_image_full: "{{ cloudkitty_processor_image }}:{{ cloudkitty_processor_tag }}"
-
-####################
-# OpenStack
-####################
-cloudkitty_admin_endpoint: "{{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ cloudkitty_api_port }}"
-cloudkitty_internal_endpoint: "{{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ cloudkitty_api_port }}"
-cloudkitty_public_endpoint: "{{ public_protocol }}://{{ kolla_external_fqdn }}:{{ cloudkitty_api_port }}"
-
-cloudkitty_logging_debug: "{{ openstack_logging_debug }}"
-
-cloudkitty_keystone_user: "cloudkitty"
-
-openstack_cloudkitty_auth: "{'auth_url':'{{ openstack_auth.auth_url }}','username':'{{ openstack_auth.username }}','password':'{{ openstack_auth.password }}','project_name':'{{ openstack_auth.project_name }}'}"
-
-####################
-# Cloudkitty
-####################
-cloudkitty_openstack_keystone_default_role: "rating"
diff --git a/ansible/roles/cloudkitty/meta/main.yml b/ansible/roles/cloudkitty/meta/main.yml
deleted file mode 100644
index 6b4fff8fef..0000000000
--- a/ansible/roles/cloudkitty/meta/main.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-dependencies:
- - { role: common }
diff --git a/ansible/roles/cloudkitty/tasks/bootstrap.yml b/ansible/roles/cloudkitty/tasks/bootstrap.yml
deleted file mode 100644
index 4e9de7a173..0000000000
--- a/ansible/roles/cloudkitty/tasks/bootstrap.yml
+++ /dev/null
@@ -1,41 +0,0 @@
----
-- name: Creating Cloudkitty database
- command: docker exec -t kolla_toolbox /usr/bin/ansible localhost
- -m mysql_db
- -a "login_host='{{ database_address }}'
- login_port='{{ database_port }}'
- login_user='{{ database_user }}'
- login_password='{{ database_password }}'
- name='{{ cloudkitty_database_name }}'"
- register: database
- changed_when: "{{ database.stdout.find('localhost | SUCCESS => ') != -1 and
- (database.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
- failed_when: database.stdout.split()[2] != 'SUCCESS'
- run_once: True
- delegate_to: "{{ groups['cloudkitty-api'][0] }}"
-
-- name: Reading json from variable
- set_fact:
- database_created: "{{ (database.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
-
-- name: Creating Cloudkitty database user and setting permissions
- command: docker exec -t kolla_toolbox /usr/bin/ansible localhost
- -m mysql_user
- -a "login_host='{{ database_address }}'
- login_port='{{ database_port }}'
- login_user='{{ database_user }}'
- login_password='{{ database_password }}'
- name='{{ cloudkitty_database_name }}'
- password='{{ cloudkitty_database_password }}'
- host='%'
- priv='{{ cloudkitty_database_name }}.*:ALL'
- append_privs='yes'"
- register: database_user_create
- changed_when: "{{ database_user_create.stdout.find('localhost | SUCCESS => ') != -1 and
- (database_user_create.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
- failed_when: database_user_create.stdout.split()[2] != 'SUCCESS'
- run_once: True
- delegate_to: "{{ groups['cloudkitty-api'][0] }}"
-
-- include: bootstrap_service.yml
- when: database_created
diff --git a/ansible/roles/cloudkitty/tasks/bootstrap_service.yml b/ansible/roles/cloudkitty/tasks/bootstrap_service.yml
deleted file mode 100644
index b6cdfac0a9..0000000000
--- a/ansible/roles/cloudkitty/tasks/bootstrap_service.yml
+++ /dev/null
@@ -1,20 +0,0 @@
----
-- name: Running Cloudkitty bootstrap container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- detach: False
- environment:
- KOLLA_BOOTSTRAP:
- KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}"
- image: "{{ cloudkitty_api_image_full }}"
- labels:
- BOOTSTRAP:
- name: "bootstrap_cloudkitty"
- restart_policy: "never"
- volumes:
- - "{{ node_config_directory }}/cloudkitty-api/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "kolla_logs:/var/log/kolla/"
- run_once: True
- delegate_to: "{{ groups['cloudkitty-api'][0] }}"
diff --git a/ansible/roles/cloudkitty/tasks/config.yml b/ansible/roles/cloudkitty/tasks/config.yml
deleted file mode 100644
index d614334ca4..0000000000
--- a/ansible/roles/cloudkitty/tasks/config.yml
+++ /dev/null
@@ -1,48 +0,0 @@
----
-- name: Ensuring config directories exist
- file:
- path: "{{ node_config_directory }}/{{ item }}"
- state: "directory"
- recurse: yes
- with_items:
- - "cloudkitty-api"
- - "cloudkitty-processor"
-
-- name: Copying over config.json files for services
- template:
- src: "{{ item }}.json.j2"
- dest: "{{ node_config_directory }}/{{ item }}/config.json"
- with_items:
- - "cloudkitty-api"
- - "cloudkitty-processor"
-
-- name: Copying over cloudkitty.conf
- merge_configs:
- vars:
- service_name: "{{ item }}"
- sources:
- - "{{ role_path }}/templates/cloudkitty.conf.j2"
- - "{{ node_custom_config }}/global.conf"
- - "{{ node_custom_config }}/database.conf"
- - "{{ node_custom_config }}/messaging.conf"
- - "{{ node_custom_config }}/cloudkitty.conf"
- - "{{ node_custom_config }}/cloudkitty/{{ item }}.conf"
- - "{{ node_custom_config }}/cloudkitty/{{ inventory_hostname }}/cloudkitty.conf"
- dest: "{{ node_config_directory }}/{{ item }}/cloudkitty.conf"
- with_items:
- - "cloudkitty-api"
- - "cloudkitty-processor"
-
-- name: Check if policies shall be overwritten
- local_action: stat path="{{ node_custom_config }}/cloudkitty/policy.json"
- register: cloudkitty_policy
-
-- name: Copying over existing policy.json
- template:
- src: "{{ node_custom_config }}/cloudkitty/policy.json"
- dest: "{{ node_config_directory }}/{{ item }}/policy.json"
- with_items:
- - "cloudkitty-api"
- - "cloudkitty-processor"
- when:
- cloudkitty_policy.stat.exists
diff --git a/ansible/roles/cloudkitty/tasks/deploy.yml b/ansible/roles/cloudkitty/tasks/deploy.yml
deleted file mode 100644
index 5cff1aee2a..0000000000
--- a/ansible/roles/cloudkitty/tasks/deploy.yml
+++ /dev/null
@@ -1,14 +0,0 @@
----
-- include: register.yml
- when: inventory_hostname in groups['cloudkitty-api']
-
-- include: config.yml
- when: inventory_hostname in groups['cloudkitty-api'] or
- inventory_hostname in groups['cloudkitty-processor']
-
-- include: bootstrap.yml
- when: inventory_hostname in groups['cloudkitty-api']
-
-- include: start.yml
- when: inventory_hostname in groups['cloudkitty-api'] or
- inventory_hostname in groups['cloudkitty-processor']
diff --git a/ansible/roles/cloudkitty/tasks/main.yml b/ansible/roles/cloudkitty/tasks/main.yml
deleted file mode 100644
index b017e8b4ad..0000000000
--- a/ansible/roles/cloudkitty/tasks/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-- include: "{{ action }}.yml"
diff --git a/ansible/roles/cloudkitty/tasks/precheck.yml b/ansible/roles/cloudkitty/tasks/precheck.yml
deleted file mode 100644
index ed97d539c0..0000000000
--- a/ansible/roles/cloudkitty/tasks/precheck.yml
+++ /dev/null
@@ -1 +0,0 @@
----
diff --git a/ansible/roles/cloudkitty/tasks/pull.yml b/ansible/roles/cloudkitty/tasks/pull.yml
deleted file mode 100644
index efd7bd7df5..0000000000
--- a/ansible/roles/cloudkitty/tasks/pull.yml
+++ /dev/null
@@ -1,14 +0,0 @@
----
-- name: Pulling cloudkitty-api image
- kolla_docker:
- action: "pull_image"
- common_options: "{{ docker_common_options }}"
- image: "{{ cloudkitty_api_image_full }}"
- when: inventory_hostname in groups['cloudkitty-api']
-
-- name: Pulling cloudkitty-processor image
- kolla_docker:
- action: "pull_image"
- common_options: "{{ docker_common_options }}"
- image: "{{ cloudkitty_processor_image_full }}"
- when: inventory_hostname in groups['cloudkitty-processor']
diff --git a/ansible/roles/cloudkitty/tasks/reconfigure.yml b/ansible/roles/cloudkitty/tasks/reconfigure.yml
deleted file mode 100644
index 6cd265dfc1..0000000000
--- a/ansible/roles/cloudkitty/tasks/reconfigure.yml
+++ /dev/null
@@ -1,66 +0,0 @@
----
-- name: Ensuring the containers up
- kolla_docker:
- name: "{{ item.name }}"
- action: "get_container_state"
- register: container_state
- failed_when: container_state.Running == false
- when: inventory_hostname in groups[item.group]
- with_items:
- - { name: cloudkitty_api, group: cloudkitty-api }
- - { name: cloudkitty_processor, group: cloudkitty-processor }
-
-- include: config.yml
-
-- name: Check the configs
- command: docker exec {{ item.name }} /usr/local/bin/kolla_set_configs --check
- changed_when: false
- failed_when: false
- register: check_results
- when: inventory_hostname in groups[item.group]
- with_items:
- - { name: cloudkitty_api, group: cloudkitty-api }
- - { name: cloudkitty_processor, group: cloudkitty-processor }
-
-- name: Containers config strategy
- kolla_docker:
- name: "{{ item.name }}"
- action: "get_container_env"
- register: container_envs
- when: inventory_hostname in groups[item.group]
- with_items:
- - { name: cloudkitty_api, group: cloudkitty-api }
- - { name: cloudkitty_processor, group: cloudkitty-processor }
-
-- name: Remove the containers
- kolla_docker:
- name: "{{ item[0]['name'] }}"
- action: "remove_container"
- register: remove_containers
- when:
- - inventory_hostname in groups[item[0]['group']]
- - config_strategy == "COPY_ONCE" or item[1]['KOLLA_CONFIG_STRATEGY'] == 'COPY_ONCE'
- - item[2]['rc'] == 1
- with_together:
- - [{ name: cloudkitty_api, group: cloudkitty-api },
- { name: cloudkitty_processor, group: cloudkitty-processor }]
- - "{{ container_envs.results }}"
- - "{{ check_results.results }}"
-
-- include: start.yml
- when: remove_containers.changed
-
-- name: Restart containers
- kolla_docker:
- name: "{{ item[0]['name'] }}"
- action: "restart_container"
- when:
- - config_strategy == 'COPY_ALWAYS'
- - item[1]['KOLLA_CONFIG_STRATEGY'] != 'COPY_ONCE'
- - item[2]['rc'] == 1
- - inventory_hostname in groups[item[0]['group']]
- with_together:
- - [{ name: cloudkitty_api, group: cloudkitty-api },
- { name: cloudkitty_processor, group: cloudkitty-processor }]
- - "{{ container_envs.results }}"
- - "{{ check_results.results }}"
diff --git a/ansible/roles/cloudkitty/tasks/register.yml b/ansible/roles/cloudkitty/tasks/register.yml
deleted file mode 100644
index aa608c9282..0000000000
--- a/ansible/roles/cloudkitty/tasks/register.yml
+++ /dev/null
@@ -1,53 +0,0 @@
----
-- name: Creating the Cloudkitty service and endpoint
- command: docker exec -t kolla_toolbox /usr/bin/ansible localhost
- -m kolla_keystone_service
- -a "service_name=cloudkitty
- service_type=rating
- description='OpenStack Rating'
- endpoint_region={{ openstack_region_name }}
- url='{{ item.url }}'
- interface='{{ item.interface }}'
- region_name={{ openstack_region_name }}
- auth={{ '{{ openstack_cloudkitty_auth }}' }}"
- -e "{'openstack_cloudkitty_auth':{{ openstack_cloudkitty_auth }}}"
- register: cloudkitty_endpoint
- changed_when: "{{ cloudkitty_endpoint.stdout.find('localhost | SUCCESS => ') != -1 and (cloudkitty_endpoint.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
- until: cloudkitty_endpoint.stdout.split()[2] == 'SUCCESS'
- retries: 10
- delay: 5
- run_once: True
- with_items:
- - {'interface': 'admin', 'url': '{{ cloudkitty_admin_endpoint }}'}
- - {'interface': 'internal', 'url': '{{ cloudkitty_internal_endpoint }}'}
- - {'interface': 'public', 'url': '{{ cloudkitty_public_endpoint }}'}
-
-- name: Creating the Cloudkitty project, user, and role
- command: docker exec -t kolla_toolbox /usr/bin/ansible localhost
- -m kolla_keystone_user
- -a "project=service
- user=cloudkitty
- password={{ cloudkitty_keystone_password }}
- role=admin
- region_name={{ openstack_region_name }}
- auth={{ '{{ openstack_cloudkitty_auth }}' }}"
- -e "{'openstack_cloudkitty_auth':{{ openstack_cloudkitty_auth }}}"
- register: cloudkitty_user
- changed_when: "{{ cloudkitty_user.stdout.find('localhost | SUCCESS => ') != -1 and (cloudkitty_user.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
- until: cloudkitty_user.stdout.split()[2] == 'SUCCESS'
- retries: 10
- delay: 5
- run_once: True
-
-- name: Creating the rating role
- command: docker exec -t kolla_toolbox /usr/bin/ansible localhost
- -m os_keystone_role
- -a "name={{ cloudkitty_openstack_keystone_default_role }}
- auth={{ '{{ openstack_cloudkitty_auth }}' }}"
- -e "{'openstack_cloudkitty_auth':{{ openstack_cloudkitty_auth }}}"
- register: cloudkitty_role
- changed_when: "{{ cloudkitty_role.stdout.find('localhost | SUCCESS => ') != -1 and (cloudkitty_role.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
- until: cloudkitty_role.stdout.split()[2] == 'SUCCESS'
- retries: 10
- delay: 5
- run_once: True
diff --git a/ansible/roles/cloudkitty/tasks/start.yml b/ansible/roles/cloudkitty/tasks/start.yml
deleted file mode 100644
index 5b4741d73b..0000000000
--- a/ansible/roles/cloudkitty/tasks/start.yml
+++ /dev/null
@@ -1,24 +0,0 @@
----
-- name: Starting cloudkitty-processor container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- image: "{{ cloudkitty_processor_image_full }}"
- name: "cloudkitty_processor"
- volumes:
- - "{{ node_config_directory }}/cloudkitty-processor/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "kolla_logs:/var/log/kolla/"
- when: inventory_hostname in groups['cloudkitty-processor']
-
-- name: Starting cloudkitty-api container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- image: "{{ cloudkitty_api_image_full }}"
- name: "cloudkitty_api"
- volumes:
- - "{{ node_config_directory }}/cloudkitty-api/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "kolla_logs:/var/log/kolla/"
- when: inventory_hostname in groups['cloudkitty-api']
diff --git a/ansible/roles/cloudkitty/tasks/upgrade.yml b/ansible/roles/cloudkitty/tasks/upgrade.yml
deleted file mode 100644
index 308053080c..0000000000
--- a/ansible/roles/cloudkitty/tasks/upgrade.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-- include: config.yml
-
-- include: bootstrap_service.yml
-
-- include: start.yml
diff --git a/ansible/roles/cloudkitty/templates/cloudkitty-api.json.j2 b/ansible/roles/cloudkitty/templates/cloudkitty-api.json.j2
deleted file mode 100644
index 2a57a51af5..0000000000
--- a/ansible/roles/cloudkitty/templates/cloudkitty-api.json.j2
+++ /dev/null
@@ -1,11 +0,0 @@
-{
- "command": "cloudkitty-api --config-file /etc/cloudkitty/cloudkitty.conf",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/cloudkitty.conf",
- "dest": "/etc/cloudkitty/cloudkitty.conf",
- "owner": "cloudkitty",
- "perm": "0600"
- }
- ]
-}
diff --git a/ansible/roles/cloudkitty/templates/cloudkitty-processor.json.j2 b/ansible/roles/cloudkitty/templates/cloudkitty-processor.json.j2
deleted file mode 100644
index 3dd845c33b..0000000000
--- a/ansible/roles/cloudkitty/templates/cloudkitty-processor.json.j2
+++ /dev/null
@@ -1,11 +0,0 @@
-{
- "command": "cloudkitty-processor --config-file /etc/cloudkitty/cloudkitty.conf",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/cloudkitty.conf",
- "dest": "/etc/cloudkitty/cloudkitty.conf",
- "owner": "cloudkitty",
- "perm": "0600"
- }
- ]
-}
diff --git a/ansible/roles/cloudkitty/templates/cloudkitty.conf.j2 b/ansible/roles/cloudkitty/templates/cloudkitty.conf.j2
deleted file mode 100644
index cd347b2697..0000000000
--- a/ansible/roles/cloudkitty/templates/cloudkitty.conf.j2
+++ /dev/null
@@ -1,51 +0,0 @@
-[DEFAULT]
-debug = {{ cloudkitty_logging_debug }}
-
-log_dir = /var/log/kolla/cloudkitty
-
-transport_url = rabbit://{% for host in groups['rabbitmq'] %}{{ rabbitmq_user }}:{{ rabbitmq_password }}@{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ rabbitmq_port }}{% if not loop.last %},{% endif %}{% endfor %}
-
-[database]
-connection = mysql+pymysql://{{ cloudkitty_database_user }}:{{ cloudkitty_database_password }}@{{ cloudkitty_database_address}}/{{ cloudkitty_database_name }}
-max_retries = -1
-
-[keystone_authtoken]
-auth_uri = {{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_public_port }}
-auth_url = {{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_admin_port }}
-auth_type = password
-project_domain_id = default
-user_domain_id = default
-project_name = service
-username = {{ cloudkitty_keystone_user }}
-password = {{ cloudkitty_keystone_password }}
-
-memcache_security_strategy = ENCRYPT
-memcache_secret_key = {{ memcache_secret_key }}
-memcached_servers = {% for host in groups['memcached'] %}{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}
-
-[oslo_concurrency]
-lock_path = /var/lib/cloudkitty/tmp
-
-[collect]
-collector = {{ cloudkitty_collector_backend }}
-services = compute,image{% if enable_cinder | bool %},volume{% endif %}, network_bw_out, network_bw_in, network_floating
-
-[oslo_messaging_notifications]
-driver = messagingv2
-topics = notifications
-
-[keystone_fetcher]
-keystone_version = 3
-auth_section = keystone_authtoken
-
-{% if cloudkitty_collector_backend == "gnocchi" %}
-[gnocchi_collector]
-auth_section = keystone_authtoken
-{% elif cloudkitty_collector_backend == "ceilometer" %}
-[ceilometer_collector]
-auth_section = keystone_authtoken
-{% endif %}
-
-[api]
-host_ip = {{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}
-port = {{ cloudkitty_api_port }}
diff --git a/ansible/roles/collectd/defaults/main.yml b/ansible/roles/collectd/defaults/main.yml
deleted file mode 100644
index 93cfd4cf3c..0000000000
--- a/ansible/roles/collectd/defaults/main.yml
+++ /dev/null
@@ -1,12 +0,0 @@
----
-####################
-# Docker
-####################
-collectd_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-collectd"
-collectd_tag: "{{ openstack_release }}"
-collectd_image_full: "{{ collectd_image }}:{{ collectd_tag }}"
-
-####################
-# OpenStack
-####################
-collectd_logging_debug: "{{ openstack_logging_debug }}"
diff --git a/ansible/roles/collectd/meta/main.yml b/ansible/roles/collectd/meta/main.yml
deleted file mode 100644
index 6b4fff8fef..0000000000
--- a/ansible/roles/collectd/meta/main.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-dependencies:
- - { role: common }
diff --git a/ansible/roles/collectd/tasks/config.yml b/ansible/roles/collectd/tasks/config.yml
deleted file mode 100644
index af9417b7e0..0000000000
--- a/ansible/roles/collectd/tasks/config.yml
+++ /dev/null
@@ -1,23 +0,0 @@
----
-- name: Ensuring collectd config directories exist
- file:
- path: "{{ node_config_directory }}/{{ item }}"
- state: "directory"
- recurse: yes
- with_items:
- - "collectd"
-
-- name: Ensuring Plugin directory exist
- file:
- path: "{{ node_config_directory }}/{{ item }}/collectd.conf.d"
- state: "directory"
- recurse: yes
- with_items:
- - "collectd"
-
-- name: Copying over config.json files for services
- template:
- src: "{{ item }}.json.j2"
- dest: "{{ node_config_directory }}/{{ item }}/config.json"
- with_items:
- - "collectd"
diff --git a/ansible/roles/collectd/tasks/deploy.yml b/ansible/roles/collectd/tasks/deploy.yml
deleted file mode 100644
index 1f16915ad9..0000000000
--- a/ansible/roles/collectd/tasks/deploy.yml
+++ /dev/null
@@ -1,4 +0,0 @@
----
-- include: config.yml
-
-- include: start.yml
diff --git a/ansible/roles/collectd/tasks/main.yml b/ansible/roles/collectd/tasks/main.yml
deleted file mode 100644
index b017e8b4ad..0000000000
--- a/ansible/roles/collectd/tasks/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-- include: "{{ action }}.yml"
diff --git a/ansible/roles/collectd/tasks/precheck.yml b/ansible/roles/collectd/tasks/precheck.yml
deleted file mode 100644
index ed97d539c0..0000000000
--- a/ansible/roles/collectd/tasks/precheck.yml
+++ /dev/null
@@ -1 +0,0 @@
----
diff --git a/ansible/roles/collectd/tasks/pull.yml b/ansible/roles/collectd/tasks/pull.yml
deleted file mode 100644
index d182b27b67..0000000000
--- a/ansible/roles/collectd/tasks/pull.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-- name: Pulling collectd image
- kolla_docker:
- action: "pull_image"
- common_options: "{{ docker_common_options }}"
- image: "{{ collectd_image_full }}"
diff --git a/ansible/roles/collectd/tasks/reconfigure.yml b/ansible/roles/collectd/tasks/reconfigure.yml
deleted file mode 100644
index 602850983e..0000000000
--- a/ansible/roles/collectd/tasks/reconfigure.yml
+++ /dev/null
@@ -1,61 +0,0 @@
----
-- name: Ensuring the containers up
- kolla_docker:
- name: "{{ item.name }}"
- action: "get_container_state"
- register: container_state
- failed_when: container_state.Running == false
- when: inventory_hostname in groups[item.group]
- with_items:
- - { name: collectd, group: collectd }
-
-- include: config.yml
-
-- name: Check the configs
- command: docker exec {{ item.name }} /usr/local/bin/kolla_set_configs --check
- changed_when: false
- failed_when: false
- register: check_results
- when: inventory_hostname in groups[item.group]
- with_items:
- - { name: collectd, group: collectd }
-
-- name: Containers config strategy
- kolla_docker:
- name: "{{ item.name }}"
- action: "get_container_env"
- register: container_envs
- when: inventory_hostname in groups[item.group]
- with_items:
- - { name: collectd, group: collectd }
-
-- name: Remove the containers
- kolla_docker:
- name: "{{ item[0]['name'] }}"
- action: "remove_container"
- register: remove_containers
- when:
- - config_strategy == "COPY_ONCE" or item[1]['KOLLA_CONFIG_STRATEGY'] == 'COPY_ONCE'
- - item[2]['rc'] == 1
- - inventory_hostname in groups[item[0]['group']]
- with_together:
- - [{ name: collectd, group: collectd }]
- - "{{ container_envs.results }}"
- - "{{ check_results.results }}"
-
-- include: start.yml
- when: remove_containers.changed
-
-- name: Restart containers
- kolla_docker:
- name: "{{ item[0]['name'] }}"
- action: "restart_container"
- when:
- - config_strategy == 'COPY_ALWAYS'
- - item[1]['KOLLA_CONFIG_STRATEGY'] != 'COPY_ONCE'
- - item[2]['rc'] == 1
- - inventory_hostname in groups[item[0]['group']]
- with_together:
- - [{ name: collectd, group: collectd }]
- - "{{ container_envs.results }}"
- - "{{ check_results.results }}"
diff --git a/ansible/roles/collectd/tasks/start.yml b/ansible/roles/collectd/tasks/start.yml
deleted file mode 100644
index 8675c00eb8..0000000000
--- a/ansible/roles/collectd/tasks/start.yml
+++ /dev/null
@@ -1,15 +0,0 @@
----
-- name: Starting collectd container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- image: "{{ collectd_image_full }}"
- name: "collectd"
- privileged: True
- volumes:
- - "{{ node_config_directory }}/collectd/:{{ container_config_directory }}/:ro"
- - "{{node_config_directory }}/collectd/collectd.conf.d/:/etc/collectd/collectd.conf.d/"
- - "/etc/localtime:/etc/localtime:ro"
- - "kolla_logs:/var/log/kolla/"
- - "/sys/:/sys/:ro"
- - "/dev/:/dev/:ro"
diff --git a/ansible/roles/collectd/tasks/upgrade.yml b/ansible/roles/collectd/tasks/upgrade.yml
deleted file mode 100644
index 1f16915ad9..0000000000
--- a/ansible/roles/collectd/tasks/upgrade.yml
+++ /dev/null
@@ -1,4 +0,0 @@
----
-- include: config.yml
-
-- include: start.yml
diff --git a/ansible/roles/collectd/templates/collectd.json.j2 b/ansible/roles/collectd/templates/collectd.json.j2
deleted file mode 100644
index 7d70179270..0000000000
--- a/ansible/roles/collectd/templates/collectd.json.j2
+++ /dev/null
@@ -1,3 +0,0 @@
-{
- "command": "/usr/sbin/collectd -f"
-}
diff --git a/ansible/roles/common/defaults/main.yml b/ansible/roles/common/defaults/main.yml
deleted file mode 100644
index 8e93160512..0000000000
--- a/ansible/roles/common/defaults/main.yml
+++ /dev/null
@@ -1,27 +0,0 @@
----
-# Due to the way we do our inventory, ansible does not pick up on the fact that
-# this role has already run. We can track what has run with host facts.
-common_run: False
-
-####################
-# Docker
-####################
-kolla_toolbox_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-kolla-toolbox"
-kolla_toolbox_tag: "{{ openstack_release }}"
-kolla_toolbox_image_full: "{{ kolla_toolbox_image }}:{{ kolla_toolbox_tag }}"
-
-heka_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-heka"
-heka_tag: "{{ openstack_release }}"
-heka_image_full: "{{ heka_image }}:{{ heka_tag }}"
-
-cron_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-cron"
-cron_tag: "{{ openstack_release }}"
-cron_image_full: "{{ cron_image }}:{{ cron_tag }}"
-
-fluentd_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-fluentd"
-fluentd_tag: "{{ openstack_release }}"
-fluentd_image_full: "{{ fluentd_image }}:{{ fluentd_tag }}"
-
-kubetoolbox_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-kubetoolbox"
-kubetoolbox_tag: "{{ openstack_release }}"
-kubetoolbox_image_full: "{{ kubetoolbox_image }}:{{ kubetoolbox_tag }}"
diff --git a/ansible/roles/common/tasks/bootstrap.yml b/ansible/roles/common/tasks/bootstrap.yml
deleted file mode 100644
index 6c889e0870..0000000000
--- a/ansible/roles/common/tasks/bootstrap.yml
+++ /dev/null
@@ -1,7 +0,0 @@
----
-- name: Creating log volume
- kolla_docker:
- action: "create_volume"
- common_options: "{{ docker_common_options }}"
- name: "kolla_logs"
- register: kolla_logs_volume
diff --git a/ansible/roles/common/tasks/config.yml b/ansible/roles/common/tasks/config.yml
deleted file mode 100644
index 6124aa2b3d..0000000000
--- a/ansible/roles/common/tasks/config.yml
+++ /dev/null
@@ -1,101 +0,0 @@
----
-- name: Ensuring config directories exist
- file:
- path: "{{ node_config_directory }}/{{ item }}"
- state: "directory"
- recurse: yes
- with_items:
- - "heka"
- - "kolla-toolbox"
- - "cron"
- - "cron/logrotate"
-
-- name: Copying over config.json files for services
- template:
- src: "{{ item }}.json.j2"
- dest: "{{ node_config_directory }}/{{ item }}/config.json"
- with_items:
- - "heka"
- - "kolla-toolbox"
- - "cron"
-
-- name: Copying over heka config files
- template:
- src: "heka-{{ item.src|default(item.name) }}.toml.j2"
- dest: "{{ node_config_directory }}/heka/heka-{{ item.name }}.toml"
- when: item.enabled | bool
- with_items:
- - { name: "aodh", enabled: "{{ enable_aodh }}" }
- - { name: "barbican", enabled: "{{ enable_barbican }}"}
- - { name: "elasticsearch", enabled: "{{ enable_elasticsearch }}" }
- - { name: "global", enabled: "yes" }
- - { name: "gnocchi", enabled: "{{ enable_gnocchi }}" }
- - { name: "grafana", enabled: "{{ enable_grafana }}" }
- - { name: "haproxy", enabled: "{{ enable_haproxy }}" }
- - { name: "horizon", enabled: "{{ enable_horizon }}" }
- - { name: "keepalived", enabled: "{{ enable_haproxy }}" }
- - { name: "keystone", enabled: "{{ enable_keystone }}" }
- - { name: "mariadb", enabled: "{{ enable_mariadb }}" }
- - { name: "openstack", enabled: "yes" }
- - { name: "rabbitmq", enabled: "{{ enable_rabbitmq }}" }
- - { name: "swift-account-auditor", src: "swift", enabled: "{{ enable_swift }}" }
- - { name: "swift-account-reaper", src: "swift", enabled: "{{ enable_swift }}" }
- - { name: "swift-account-replicator", src: "swift", enabled: "{{ enable_swift }}" }
- - { name: "swift-account-server", src: "swift", enabled: "{{ enable_swift }}" }
- - { name: "swift-container-auditor", src: "swift", enabled: "{{ enable_swift }}" }
- - { name: "swift-container-replicator", src: "swift", enabled: "{{ enable_swift }}" }
- - { name: "swift-container-server", src: "swift", enabled: "{{ enable_swift }}" }
- - { name: "swift-container-updater", src: "swift", enabled: "{{ enable_swift }}" }
- - { name: "swift-object-auditor", src: "swift", enabled: "{{ enable_swift }}" }
- - { name: "swift-object-expirer", src: "swift", enabled: "{{ enable_swift }}" }
- - { name: "swift-object-replicator", src: "swift", enabled: "{{ enable_swift }}" }
- - { name: "swift-object-server", src: "swift", enabled: "{{ enable_swift }}" }
- - { name: "swift-object-updater", src: "swift", enabled: "{{ enable_swift }}" }
- - { name: "swift-proxy-server", src: "swift", enabled: "{{ enable_swift }}" }
- - { name: "swift-rsyncd", src: "swift", enabled: "{{ enable_swift }}" }
-
-- name: Heka custom config file exists
- stat: "path={{ node_custom_config }}/heka/heka-custom.toml"
- register: heka_custom_stat_result
-
-- name: Copying over heka custom config file
- template:
- src: "{{ node_custom_config }}/heka/heka-custom.toml"
- dest: "{{ node_config_directory }}/heka/heka-custom.toml"
- when: heka_custom_stat_result.stat.exists == true
-
-- name: Copying over cron logrotate config files
- template:
- src: "cron-logrotate-{{ item.name }}.conf.j2"
- dest: "{{ node_config_directory }}/cron/logrotate/{{ item.name }}.conf"
- when: item.enabled | bool
- with_items:
- - { name: "ansible", enabled: "yes" }
- - { name: "aodh", enabled: "{{ enable_aodh }}" }
- - { name: "barbican", enabled: "{{ enable_barbican }}" }
- - { name: "ceilometer", enabled: "{{ enable_ceilometer }}" }
- - { name: "cinder", enabled: "{{ enable_cinder }}" }
- - { name: "cloudkitty", enabled: "{{ enable_cloudkitty }}" }
- - { name: "elasticsearch", enabled: "{{ enable_elasticsearch }}" }
- - { name: "glance", enabled: "{{ enable_glance }}" }
- - { name: "global", enabled: "yes" }
- - { name: "gnocchi", enabled: "{{ enable_gnocchi }}" }
- - { name: "grafana", enabled: "{{ enable_grafana }}" }
- - { name: "haproxy", enabled: "{{ enable_haproxy }}" }
- - { name: "heat", enabled: "{{ enable_heat }}" }
- - { name: "iscsid", enabled: "{{ enable_iscsid }}" }
- - { name: "keepalived", enabled: "{{ enable_haproxy }}" }
- - { name: "keystone", enabled: "{{ enable_keystone }}" }
- - { name: "magnum", enabled: "{{ enable_magnum }}" }
- - { name: "manila", enabled: "{{ enable_manila }}" }
- - { name: "mariadb", enabled: "{{ enable_mariadb }}" }
- - { name: "mistral", enabled: "{{ enable_mistral }}" }
- - { name: "murano", enabled: "{{ enable_murano }}" }
- - { name: "neutron", enabled: "{{ enable_neutron }}" }
- - { name: "nova", enabled: "{{ enable_nova }}" }
- - { name: "rabbitmq", enabled: "{{ enable_rabbitmq }}" }
- - { name: "sahara", enabled: "{{ enable_sahara }}" }
- - { name: "searchlight", enabled: "{{ enable_searchlight }}" }
- - { name: "senlin", enabled: "{{ enable_senlin }}" }
- - { name: "swift", enabled: "{{ enable_swift }}" }
- - { name: "watcher", enabled: "{{ enable_watcher }}" }
diff --git a/ansible/roles/common/tasks/deploy.yml b/ansible/roles/common/tasks/deploy.yml
deleted file mode 100644
index 98daa4021c..0000000000
--- a/ansible/roles/common/tasks/deploy.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-- include: config.yml
-
-- include: bootstrap.yml
-
-- include: start.yml
diff --git a/ansible/roles/common/tasks/main.yml b/ansible/roles/common/tasks/main.yml
deleted file mode 100644
index c65c9e8588..0000000000
--- a/ansible/roles/common/tasks/main.yml
+++ /dev/null
@@ -1,10 +0,0 @@
----
-- include: "{{ action }}.yml"
- tags: common
- when: not common_run
-
-- name: Registering common role has run
- set_fact:
- common_run: True
- tags: common
- when: not common_run
diff --git a/ansible/roles/common/tasks/precheck.yml b/ansible/roles/common/tasks/precheck.yml
deleted file mode 100644
index ed97d539c0..0000000000
--- a/ansible/roles/common/tasks/precheck.yml
+++ /dev/null
@@ -1 +0,0 @@
----
diff --git a/ansible/roles/common/tasks/pull.yml b/ansible/roles/common/tasks/pull.yml
deleted file mode 100644
index e19971ce68..0000000000
--- a/ansible/roles/common/tasks/pull.yml
+++ /dev/null
@@ -1,18 +0,0 @@
----
-- name: Pulling kolla-toolbox image
- kolla_docker:
- action: "pull_image"
- common_options: "{{ docker_common_options }}"
- image: "{{ kolla_toolbox_image_full }}"
-
-- name: Pulling heka image
- kolla_docker:
- action: "pull_image"
- common_options: "{{ docker_common_options }}"
- image: "{{ heka_image_full }}"
-
-- name: Pulling cron image
- kolla_docker:
- action: "pull_image"
- common_options: "{{ docker_common_options }}"
- image: "{{ cron_image_full }}"
diff --git a/ansible/roles/common/tasks/reconfigure.yml b/ansible/roles/common/tasks/reconfigure.yml
deleted file mode 100644
index 961c656fa3..0000000000
--- a/ansible/roles/common/tasks/reconfigure.yml
+++ /dev/null
@@ -1,42 +0,0 @@
----
-- name: Ensuring the heka container is up
- kolla_docker:
- name: "heka"
- action: "get_container_state"
- register: container_state
- failed_when: container_state.Running == false
-
-- include: config.yml
-
-- name: Checking the heka config
- command: docker exec heka /usr/local/bin/kolla_set_configs --check
- changed_when: false
- failed_when: false
- register: check_result
-
-- name: Getting the heka container config strategy
- kolla_docker:
- name: "heka"
- action: "get_container_env"
- register: container_env
-
-- name: Removing the heka container
- kolla_docker:
- name: "heka"
- action: "remove_container"
- register: remove_container
- when:
- - config_strategy == "COPY_ONCE" or container_env["KOLLA_CONFIG_STRATEGY"] == "COPY_ONCE"
- - check_result.rc == 1
-
-- include: start.yml
- when: remove_container.changed
-
-- name: Restarting the heka container
- kolla_docker:
- name: "heka"
- action: "restart_container"
- when:
- - config_strategy == "COPY_ALWAYS"
- - container_env["KOLLA_CONFIG_STRATEGY"] == "COPY_ALWAYS"
- - check_result.rc == 1
diff --git a/ansible/roles/common/tasks/start.yml b/ansible/roles/common/tasks/start.yml
deleted file mode 100644
index 963cd5de1c..0000000000
--- a/ansible/roles/common/tasks/start.yml
+++ /dev/null
@@ -1,49 +0,0 @@
----
-- name: Starting heka container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- environment:
- KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}"
- SKIP_LOG_SETUP: "true"
- image: "{{ heka_image_full }}"
- name: "heka"
- volumes:
- - "{{ node_config_directory }}/heka/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "kolla_logs:/var/log/kolla/"
- - "heka:/var/cache/hekad"
- - "heka_socket:/var/lib/kolla/heka/"
-
-- name: Starting kolla-toolbox container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- environment:
- ANSIBLE_NOCOLOR: "1"
- ANSIBLE_LIBRARY: "/usr/share/ansible"
- image: "{{ kolla_toolbox_image_full }}"
- name: "kolla_toolbox"
- privileged: True
- volumes:
- - "{{ node_config_directory }}/kolla-toolbox/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "/dev/:/dev/"
- - "/run/:/run/:shared"
- - "kolla_logs:/var/log/kolla/"
-
-- name: Initializing toolbox container using normal user
- command: docker exec -t kolla_toolbox /usr/bin/ansible --version
- changed_when: false
-
-- name: Starting cron container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- image: "{{ cron_image_full }}"
- name: "cron"
- volumes:
- - "{{ node_config_directory }}/cron/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "heka_socket:/var/lib/kolla/heka/"
- - "kolla_logs:/var/log/kolla/"
diff --git a/ansible/roles/common/tasks/upgrade.yml b/ansible/roles/common/tasks/upgrade.yml
deleted file mode 100644
index 1f16915ad9..0000000000
--- a/ansible/roles/common/tasks/upgrade.yml
+++ /dev/null
@@ -1,4 +0,0 @@
----
-- include: config.yml
-
-- include: start.yml
diff --git a/ansible/roles/common/templates/admin-openrc.sh.j2 b/ansible/roles/common/templates/admin-openrc.sh.j2
deleted file mode 100644
index ef3890043e..0000000000
--- a/ansible/roles/common/templates/admin-openrc.sh.j2
+++ /dev/null
@@ -1,11 +0,0 @@
-export OS_PROJECT_DOMAIN_NAME=default
-export OS_USER_DOMAIN_NAME=default
-export OS_PROJECT_NAME=admin
-export OS_TENANT_NAME=admin
-export OS_USERNAME=admin
-export OS_PASSWORD={{ keystone_admin_password }}
-export OS_AUTH_URL={{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_admin_port }}/v3
-export OS_IDENTITY_API_VERSION=3
-{% if kolla_enable_tls_external | bool and kolla_external_fqdn_cacert %}
-export OS_CACERT={{ kolla_external_fqdn_cacert }}
-{% endif %}
diff --git a/ansible/roles/common/templates/cron-logrotate-ansible.conf.j2 b/ansible/roles/common/templates/cron-logrotate-ansible.conf.j2
deleted file mode 100644
index 3bc0f435b0..0000000000
--- a/ansible/roles/common/templates/cron-logrotate-ansible.conf.j2
+++ /dev/null
@@ -1,3 +0,0 @@
-"/var/log/kolla/ansible.log"
-{
-}
diff --git a/ansible/roles/common/templates/cron-logrotate-aodh.conf.j2 b/ansible/roles/common/templates/cron-logrotate-aodh.conf.j2
deleted file mode 100644
index fd333f6e41..0000000000
--- a/ansible/roles/common/templates/cron-logrotate-aodh.conf.j2
+++ /dev/null
@@ -1,3 +0,0 @@
-"/var/log/kolla/aodh/*.log"
-{
-}
diff --git a/ansible/roles/common/templates/cron-logrotate-barbican.conf.j2 b/ansible/roles/common/templates/cron-logrotate-barbican.conf.j2
deleted file mode 100644
index eaa5f0f494..0000000000
--- a/ansible/roles/common/templates/cron-logrotate-barbican.conf.j2
+++ /dev/null
@@ -1,3 +0,0 @@
-"/var/log/kolla/barbican/*.log"
-{
-}
diff --git a/ansible/roles/common/templates/cron-logrotate-ceilometer.conf.j2 b/ansible/roles/common/templates/cron-logrotate-ceilometer.conf.j2
deleted file mode 100644
index 5993500a5b..0000000000
--- a/ansible/roles/common/templates/cron-logrotate-ceilometer.conf.j2
+++ /dev/null
@@ -1,3 +0,0 @@
-"/var/log/kolla/ceilometer/*.log"
-{
-}
diff --git a/ansible/roles/common/templates/cron-logrotate-cinder.conf.j2 b/ansible/roles/common/templates/cron-logrotate-cinder.conf.j2
deleted file mode 100644
index b5e59abd26..0000000000
--- a/ansible/roles/common/templates/cron-logrotate-cinder.conf.j2
+++ /dev/null
@@ -1,3 +0,0 @@
-"/var/log/kolla/cinder/*.log"
-{
-}
diff --git a/ansible/roles/common/templates/cron-logrotate-cloudkitty.conf.j2 b/ansible/roles/common/templates/cron-logrotate-cloudkitty.conf.j2
deleted file mode 100644
index 2dada642d8..0000000000
--- a/ansible/roles/common/templates/cron-logrotate-cloudkitty.conf.j2
+++ /dev/null
@@ -1,3 +0,0 @@
-"/var/log/kolla/cloudkitty/*.log"
-{
-}
diff --git a/ansible/roles/common/templates/cron-logrotate-elasticsearch.conf.j2 b/ansible/roles/common/templates/cron-logrotate-elasticsearch.conf.j2
deleted file mode 100644
index cbdd2c780a..0000000000
--- a/ansible/roles/common/templates/cron-logrotate-elasticsearch.conf.j2
+++ /dev/null
@@ -1,3 +0,0 @@
-"/var/log/kolla/elasticsearch/*.log"
-{
-}
diff --git a/ansible/roles/common/templates/cron-logrotate-glance.conf.j2 b/ansible/roles/common/templates/cron-logrotate-glance.conf.j2
deleted file mode 100644
index 5b20985bd0..0000000000
--- a/ansible/roles/common/templates/cron-logrotate-glance.conf.j2
+++ /dev/null
@@ -1,3 +0,0 @@
-"/var/log/kolla/glance/*.log"
-{
-}
diff --git a/ansible/roles/common/templates/cron-logrotate-global.conf.j2 b/ansible/roles/common/templates/cron-logrotate-global.conf.j2
deleted file mode 100644
index c21e3d1c07..0000000000
--- a/ansible/roles/common/templates/cron-logrotate-global.conf.j2
+++ /dev/null
@@ -1,21 +0,0 @@
-weekly
-
-rotate 6
-
-copytruncate
-
-compress
-
-delaycompress
-
-notifempty
-
-missingok
-
-minsize 30M
-
-maxsize 100M
-
-su root kolla
-
-include /etc/logrotate.d
diff --git a/ansible/roles/common/templates/cron-logrotate-gnocchi.conf.j2 b/ansible/roles/common/templates/cron-logrotate-gnocchi.conf.j2
deleted file mode 100644
index d6d5d6f449..0000000000
--- a/ansible/roles/common/templates/cron-logrotate-gnocchi.conf.j2
+++ /dev/null
@@ -1,3 +0,0 @@
-"/var/log/kolla/gnocchi/*.log"
-{
-}
diff --git a/ansible/roles/common/templates/cron-logrotate-grafana.conf.j2 b/ansible/roles/common/templates/cron-logrotate-grafana.conf.j2
deleted file mode 100644
index 2a3a0889a4..0000000000
--- a/ansible/roles/common/templates/cron-logrotate-grafana.conf.j2
+++ /dev/null
@@ -1,3 +0,0 @@
-"/var/log/kolla/grafana/*.log"
-{
-}
diff --git a/ansible/roles/common/templates/cron-logrotate-haproxy.conf.j2 b/ansible/roles/common/templates/cron-logrotate-haproxy.conf.j2
deleted file mode 100644
index 7af26dd38b..0000000000
--- a/ansible/roles/common/templates/cron-logrotate-haproxy.conf.j2
+++ /dev/null
@@ -1,3 +0,0 @@
-"/var/log/kolla/haproxy/haproxy.log"
-{
-}
diff --git a/ansible/roles/common/templates/cron-logrotate-heat.conf.j2 b/ansible/roles/common/templates/cron-logrotate-heat.conf.j2
deleted file mode 100644
index 7f5e89a77e..0000000000
--- a/ansible/roles/common/templates/cron-logrotate-heat.conf.j2
+++ /dev/null
@@ -1,3 +0,0 @@
-"/var/log/kolla/heat/*.log"
-{
-}
diff --git a/ansible/roles/common/templates/cron-logrotate-iscsid.conf.j2 b/ansible/roles/common/templates/cron-logrotate-iscsid.conf.j2
deleted file mode 100644
index 47fd93d1a0..0000000000
--- a/ansible/roles/common/templates/cron-logrotate-iscsid.conf.j2
+++ /dev/null
@@ -1,3 +0,0 @@
-"/var/log/kolla/iscsi/iscsi.log"
-{
-}
diff --git a/ansible/roles/common/templates/cron-logrotate-keepalived.conf.j2 b/ansible/roles/common/templates/cron-logrotate-keepalived.conf.j2
deleted file mode 100644
index 6808746347..0000000000
--- a/ansible/roles/common/templates/cron-logrotate-keepalived.conf.j2
+++ /dev/null
@@ -1,3 +0,0 @@
-"/var/log/kolla/keepalived/keepalived.log"
-{
-}
diff --git a/ansible/roles/common/templates/cron-logrotate-keystone.conf.j2 b/ansible/roles/common/templates/cron-logrotate-keystone.conf.j2
deleted file mode 100644
index 79cf10b347..0000000000
--- a/ansible/roles/common/templates/cron-logrotate-keystone.conf.j2
+++ /dev/null
@@ -1,3 +0,0 @@
-"/var/log/kolla/keystone/*.log"
-{
-}
diff --git a/ansible/roles/common/templates/cron-logrotate-magnum.conf.j2 b/ansible/roles/common/templates/cron-logrotate-magnum.conf.j2
deleted file mode 100644
index 4b2c2af4e6..0000000000
--- a/ansible/roles/common/templates/cron-logrotate-magnum.conf.j2
+++ /dev/null
@@ -1,3 +0,0 @@
-"/var/log/kolla/magnum/*.log"
-{
-}
diff --git a/ansible/roles/common/templates/cron-logrotate-manila.conf.j2 b/ansible/roles/common/templates/cron-logrotate-manila.conf.j2
deleted file mode 100644
index 9b9719514f..0000000000
--- a/ansible/roles/common/templates/cron-logrotate-manila.conf.j2
+++ /dev/null
@@ -1,3 +0,0 @@
-"/var/log/kolla/manila/*.log"
-{
-}
diff --git a/ansible/roles/common/templates/cron-logrotate-mariadb.conf.j2 b/ansible/roles/common/templates/cron-logrotate-mariadb.conf.j2
deleted file mode 100644
index 8543983e04..0000000000
--- a/ansible/roles/common/templates/cron-logrotate-mariadb.conf.j2
+++ /dev/null
@@ -1,3 +0,0 @@
-"/var/log/kolla/mariadb/*.log"
-{
-}
diff --git a/ansible/roles/common/templates/cron-logrotate-mistral.conf.j2 b/ansible/roles/common/templates/cron-logrotate-mistral.conf.j2
deleted file mode 100644
index e8e6da8e39..0000000000
--- a/ansible/roles/common/templates/cron-logrotate-mistral.conf.j2
+++ /dev/null
@@ -1,3 +0,0 @@
-"/var/log/kolla/mistral/*.log"
-{
-}
diff --git a/ansible/roles/common/templates/cron-logrotate-murano.conf.j2 b/ansible/roles/common/templates/cron-logrotate-murano.conf.j2
deleted file mode 100644
index ab33090192..0000000000
--- a/ansible/roles/common/templates/cron-logrotate-murano.conf.j2
+++ /dev/null
@@ -1,3 +0,0 @@
-"/var/log/kolla/murano/*.log"
-{
-}
diff --git a/ansible/roles/common/templates/cron-logrotate-neutron.conf.j2 b/ansible/roles/common/templates/cron-logrotate-neutron.conf.j2
deleted file mode 100644
index f4c2268292..0000000000
--- a/ansible/roles/common/templates/cron-logrotate-neutron.conf.j2
+++ /dev/null
@@ -1,3 +0,0 @@
-"/var/log/kolla/neutron/*.log"
-{
-}
diff --git a/ansible/roles/common/templates/cron-logrotate-nova.conf.j2 b/ansible/roles/common/templates/cron-logrotate-nova.conf.j2
deleted file mode 100644
index 657a994da1..0000000000
--- a/ansible/roles/common/templates/cron-logrotate-nova.conf.j2
+++ /dev/null
@@ -1,3 +0,0 @@
-"/var/log/kolla/nova/*.log"
-{
-}
diff --git a/ansible/roles/common/templates/cron-logrotate-rabbitmq.conf.j2 b/ansible/roles/common/templates/cron-logrotate-rabbitmq.conf.j2
deleted file mode 100644
index cf2dc3f367..0000000000
--- a/ansible/roles/common/templates/cron-logrotate-rabbitmq.conf.j2
+++ /dev/null
@@ -1,3 +0,0 @@
-"/var/log/kolla/rabbitmq/*.log"
-{
-}
diff --git a/ansible/roles/common/templates/cron-logrotate-sahara.conf.j2 b/ansible/roles/common/templates/cron-logrotate-sahara.conf.j2
deleted file mode 100644
index 57a98d315c..0000000000
--- a/ansible/roles/common/templates/cron-logrotate-sahara.conf.j2
+++ /dev/null
@@ -1,3 +0,0 @@
-"/var/log/kolla/sahara/*.log"
-{
-}
diff --git a/ansible/roles/common/templates/cron-logrotate-searchlight.conf.j2 b/ansible/roles/common/templates/cron-logrotate-searchlight.conf.j2
deleted file mode 100644
index da916664ae..0000000000
--- a/ansible/roles/common/templates/cron-logrotate-searchlight.conf.j2
+++ /dev/null
@@ -1,3 +0,0 @@
-"/var/log/kolla/searchlight/*.log"
-{
-}
diff --git a/ansible/roles/common/templates/cron-logrotate-senlin.conf.j2 b/ansible/roles/common/templates/cron-logrotate-senlin.conf.j2
deleted file mode 100644
index b4a61be8c0..0000000000
--- a/ansible/roles/common/templates/cron-logrotate-senlin.conf.j2
+++ /dev/null
@@ -1,3 +0,0 @@
-"/var/log/kolla/senlin/*.log"
-{
-}
diff --git a/ansible/roles/common/templates/cron-logrotate-swift.conf.j2 b/ansible/roles/common/templates/cron-logrotate-swift.conf.j2
deleted file mode 100644
index 0ad2badf1b..0000000000
--- a/ansible/roles/common/templates/cron-logrotate-swift.conf.j2
+++ /dev/null
@@ -1,3 +0,0 @@
-"/var/log/kolla/swift/*.log"
-{
-}
diff --git a/ansible/roles/common/templates/cron-logrotate-watcher.conf.j2 b/ansible/roles/common/templates/cron-logrotate-watcher.conf.j2
deleted file mode 100644
index e7edaf909d..0000000000
--- a/ansible/roles/common/templates/cron-logrotate-watcher.conf.j2
+++ /dev/null
@@ -1,3 +0,0 @@
-"/var/log/kolla/watcher/*.log"
-{
-}
diff --git a/ansible/roles/common/templates/cron.json.j2 b/ansible/roles/common/templates/cron.json.j2
deleted file mode 100644
index 8a338195e0..0000000000
--- a/ansible/roles/common/templates/cron.json.j2
+++ /dev/null
@@ -1,50 +0,0 @@
-{% set cron_cmd = 'cron -f' if kolla_base_distro in ['ubuntu', 'debian'] else 'crond -s -n' %}
-{% set services = [
- ( 'ansible', 'yes' ),
- ( 'aodh', enable_aodh ),
- ( 'barbican', enable_barbican ),
- ( 'ceilometer', enable_ceilometer ),
- ( 'cinder', enable_cinder ),
- ( 'cloudkitty', enable_cloudkitty ),
- ( 'elasticsearch', enable_elasticsearch ),
- ( 'glance', enable_glance ),
- ( 'gnocchi', enable_gnocchi ),
- ( 'grafana', enable_grafana ),
- ( 'haproxy', enable_haproxy ),
- ( 'heat', enable_heat ),
- ( 'iscsid', enable_iscsid ),
- ( 'keepalived', enable_haproxy ),
- ( 'keystone', enable_keystone ),
- ( 'magnum', enable_magnum ),
- ( 'manila', enable_manila ),
- ( 'mariadb', enable_mariadb ),
- ( 'mistral', enable_mistral ),
- ( 'murano', enable_murano ),
- ( 'neutron', enable_neutron ),
- ( 'nova', enable_nova ),
- ( 'rabbitmq', enable_rabbitmq ),
- ( 'sahara', enable_sahara ),
- ( 'searchlight', enable_searchlight ),
- ( 'senlin', enable_senlin ),
- ( 'swift', enable_swift )
-] %}
-{
- "command": "{{ cron_cmd }}",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/logrotate/global.conf",
- "dest": "/etc/logrotate.conf",
- "owner": "root",
- "perm": "0644"
- },
-{% for service, enabled in services if enabled | bool %}
- {
- "source": "{{ container_config_directory }}/logrotate/{{ service }}.conf",
- "dest": "/etc/logrotate.d/{{ service }}.conf",
- "owner": "root",
- "perm": "0644"
- }{{ ',' if not loop.last else '' }}
-{% endfor %}
-
- ]
-}
diff --git a/ansible/roles/common/templates/heka-aodh.toml.j2 b/ansible/roles/common/templates/heka-aodh.toml.j2
deleted file mode 100644
index 419d923e98..0000000000
--- a/ansible/roles/common/templates/heka-aodh.toml.j2
+++ /dev/null
@@ -1,13 +0,0 @@
-[aodh_apache_log_decoder]
-type = "SandboxDecoder"
-filename = "lua_decoders/os_aodh_apache_log.lua"
- [aodh_apache_log_decoder.config]
- apache_log_pattern = '%{X-Forwarded-For}i %l %u %t \"%r\" %>s %b %D \"%{Referer}i\" \"%{User-Agent}i\"'
-
-[aodh_apache_logstreamer_input]
-type = "LogstreamerInput"
-decoder = "aodh_apache_log_decoder"
-log_directory = "/var/log/kolla"
-file_match = 'aodh/aodh-apache-(?P.+)-access\.log\.?(?P\d*)$'
-priority = ["^Seq"]
-differentiator = ["aodh-apache-", "Service"]
diff --git a/ansible/roles/common/templates/heka-barbican.toml.j2 b/ansible/roles/common/templates/heka-barbican.toml.j2
deleted file mode 100644
index 9ec0e8c6b6..0000000000
--- a/ansible/roles/common/templates/heka-barbican.toml.j2
+++ /dev/null
@@ -1,13 +0,0 @@
-[barbican_apache_log_decoder]
-type = "SandboxDecoder"
-filename = "lua_decoders/os_barbican_apache_log.lua"
- [barbican_apache_log_decoder.config]
- apache_log_pattern = '%{X-Forwarded-For}i %l %u %t \"%r\" %>s %b %D \"%{Referer}i\" \"%{User-Agent}i\"'
-
-[barbican_apache_logstreamer_input]
-type = "LogstreamerInput"
-decoder = "barbican_apache_log_decoder"
-log_directory = "/var/log/kolla"
-file_match = 'barbican/barbican-apache-(?P.+)-access\.log\.?(?P\d*)$'
-priority = ["^Seq"]
-differentiator = ["barbican-apache-", "Service"]
diff --git a/ansible/roles/common/templates/heka-ceilometer.toml.j2 b/ansible/roles/common/templates/heka-ceilometer.toml.j2
deleted file mode 100644
index cb2cb538e9..0000000000
--- a/ansible/roles/common/templates/heka-ceilometer.toml.j2
+++ /dev/null
@@ -1,13 +0,0 @@
-[ceilometer_apache_log_decoder]
-type = "SandboxDecoder"
-filename = "lua_decoders/os_ceilometer_apache_log.lua"
- [ceilometer_apache_log_decoder.config]
- apache_log_pattern = '%{X-Forwarded-For}i %l %u %t \"%r\" %>s %b %D \"%{Referer}i\" \"%{User-Agent}i\"'
-
-[ceilometer_apache_logstreamer_input]
-type = "LogstreamerInput"
-decoder = "ceilometer_apache_log_decoder"
-log_directory = "/var/log/kolla"
-file_match = 'ceilometer/(?Pceilometer-api.*)\.log\.?(?P\d*)$'
-priority = ["^Seq"]
-differentiator = ["Service"]
diff --git a/ansible/roles/common/templates/heka-elasticsearch.toml.j2 b/ansible/roles/common/templates/heka-elasticsearch.toml.j2
deleted file mode 100644
index f3d4b13465..0000000000
--- a/ansible/roles/common/templates/heka-elasticsearch.toml.j2
+++ /dev/null
@@ -1,16 +0,0 @@
-[elasticsearch_json_encoder]
-type = "ESJsonEncoder"
-index = {{'"%{Type}-%{%Y.%m.%d}"'}}
-es_index_from_timestamp = true
-fields = ["Timestamp", "Type", "Logger", "Severity", "Payload", "Pid", "Hostname", "DynamicFields"]
-
-[elasticsearch_output]
-type = "ElasticSearchOutput"
-server = "{{ elasticsearch_protocol }}://{{ elasticsearch_address }}:{{ elasticsearch_port }}"
-message_matcher = "Type == 'log'"
-encoder = "elasticsearch_json_encoder"
-use_buffering = true
- [elasticsearch_output.buffering]
- max_buffer_size = 1073741824 # 1024 * 1024 * 1024
- max_file_size = 134217728 #Â 128 * 1024 * 1024
- full_action = "drop"
diff --git a/ansible/roles/common/templates/heka-global.toml.j2 b/ansible/roles/common/templates/heka-global.toml.j2
deleted file mode 100644
index 5e325026ae..0000000000
--- a/ansible/roles/common/templates/heka-global.toml.j2
+++ /dev/null
@@ -1,30 +0,0 @@
-[hekad]
-maxprocs = {{ ansible_processor_count * ansible_processor_cores }}
-
-[syslog_encoder]
-type = "SandboxEncoder"
-filename = "lua_encoders/os_syslog.lua"
-
-[syslog_log_decoder]
-type = "SandboxDecoder"
-filename = "lua_decoders/os_syslog.lua"
- [syslog_log_decoder.config]
- hostname = "{{ ansible_hostname }}"
-
-[swift_log_decoder]
-type = "SandboxDecoder"
-filename = "lua_decoders/os_swift_log.lua"
- [swift_log_decoder.config]
- hostname = "{{ ansible_hostname }}"
-
-[multi_log_decoder]
-type = "MultiDecoder"
-subs = ["syslog_log_decoder", "swift_log_decoder"]
-cascade_strategy = "first-wins"
-
-[syslog_input]
-type = "UdpInput"
-net = "unixgram"
-address = "/var/lib/kolla/heka/log"
-decoder = "multi_log_decoder"
-splitter = "NullSplitter"
diff --git a/ansible/roles/common/templates/heka-gnocchi.toml.j2 b/ansible/roles/common/templates/heka-gnocchi.toml.j2
deleted file mode 100644
index e2596762d8..0000000000
--- a/ansible/roles/common/templates/heka-gnocchi.toml.j2
+++ /dev/null
@@ -1,13 +0,0 @@
-[gnocchi_apache_log_decoder]
-type = "SandboxDecoder"
-filename = "lua_decoders/os_gnocchi_apache_log.lua"
- [gnocchi_apache_log_decoder.config]
- apache_log_pattern = '%{X-Forwarded-For}i %l %u %t \"%r\" %>s %b %D \"%{Referer}i\" \"%{User-Agent}i\"'
-
-[gnocchi_apache_logstreamer_input]
-type = "LogstreamerInput"
-decoder = "gnocchi_apache_log_decoder"
-log_directory = "/var/log/kolla"
-file_match = 'gnocchi/gnocchi-apache-(?P.+)-access\.log\.?(?P\d*)$'
-priority = ["^Seq"]
-differentiator = ["gnocchi-apache-", "Service"]
diff --git a/ansible/roles/common/templates/heka-grafana.toml.j2 b/ansible/roles/common/templates/heka-grafana.toml.j2
deleted file mode 100644
index 7565c083e1..0000000000
--- a/ansible/roles/common/templates/heka-grafana.toml.j2
+++ /dev/null
@@ -1,18 +0,0 @@
-[grafana_log_decoder]
-type = "SandboxDecoder"
-filename = "lua_decoders/os_grafana_log.lua"
-
-[grafana_log_splitter]
-type = "RegexSplitter"
-delimiter = '\n\n(=[^=]+====)'
-delimiter_eol = false
-deliver_incomplete_final = true
-
-[grafana_logstreamer_input]
-type = "LogstreamerInput"
-decoder = "grafana_log_decoder"
-splitter = "grafana_log_splitter"
-log_directory = "/var/log/kolla"
-file_match = 'grafana/(?Pgrafana.*)\.log\.?(?P\d*)$'
-priority = ["^Seq"]
-differentiator = ["Service"]
diff --git a/ansible/roles/common/templates/heka-haproxy.toml.j2 b/ansible/roles/common/templates/heka-haproxy.toml.j2
deleted file mode 100644
index 49338fd0a3..0000000000
--- a/ansible/roles/common/templates/heka-haproxy.toml.j2
+++ /dev/null
@@ -1,9 +0,0 @@
-# HAProxy writes its logs to Syslog, so the generic
-# Syslog input set in heka-global.toml.j2 is used.
-
-[haproxy_file_output]
-type = "FileOutput"
-message_matcher = "Fields[programname] =~ /(?i:haproxy)/"
-path = "/var/log/kolla/haproxy/haproxy.log"
-encoder = "syslog_encoder"
-folder_perm = "755"
diff --git a/ansible/roles/common/templates/heka-horizon.toml.j2 b/ansible/roles/common/templates/heka-horizon.toml.j2
deleted file mode 100644
index b447e9938a..0000000000
--- a/ansible/roles/common/templates/heka-horizon.toml.j2
+++ /dev/null
@@ -1,13 +0,0 @@
-[horizon_apache_log_decoder]
-type = "SandboxDecoder"
-filename = "lua_decoders/os_horizon_apache_log.lua"
- [horizon_apache_log_decoder.config]
- apache_log_pattern = '%{X-Forwarded-For}i %l %u %t \"%r\" %>s %b %D \"%{Referer}i\" \"%{User-Agent}i\"'
-
-[horizon_apache_logstreamer_input]
-type = "LogstreamerInput"
-decoder = "horizon_apache_log_decoder"
-log_directory = "/var/log/kolla"
-file_match = 'horizon/(?Phorizon.*)\.log\.?(?P\d*)$'
-priority = ["^Seq"]
-differentiator = ["Service"]
diff --git a/ansible/roles/common/templates/heka-keepalived.toml.j2 b/ansible/roles/common/templates/heka-keepalived.toml.j2
deleted file mode 100644
index 2159982343..0000000000
--- a/ansible/roles/common/templates/heka-keepalived.toml.j2
+++ /dev/null
@@ -1,9 +0,0 @@
-# Keepalived writes its logs to Syslog, so the generic
-# Syslog input set in heka-global.toml.j2 is used.
-
-[keepalived_file_output]
-type = "FileOutput"
-message_matcher = "Fields[programname] =~ /(?i:keepalived)/"
-path = "/var/log/kolla/keepalived/keepalived.log"
-encoder = "syslog_encoder"
-folder_perm = "755"
diff --git a/ansible/roles/common/templates/heka-keystone.toml.j2 b/ansible/roles/common/templates/heka-keystone.toml.j2
deleted file mode 100644
index 19cc69c6d9..0000000000
--- a/ansible/roles/common/templates/heka-keystone.toml.j2
+++ /dev/null
@@ -1,13 +0,0 @@
-[keystone_apache_log_decoder]
-type = "SandboxDecoder"
-filename = "lua_decoders/os_keystone_apache_log.lua"
- [keystone_apache_log_decoder.config]
- apache_log_pattern = '%{X-Forwarded-For}i %l %u %t \"%r\" %>s %b %D \"%{Referer}i\" \"%{User-Agent}i\"'
-
-[keystone_apache_logstreamer_input]
-type = "LogstreamerInput"
-decoder = "keystone_apache_log_decoder"
-log_directory = "/var/log/kolla"
-file_match = 'keystone/keystone-apache-(?P.+)-access\.log\.?(?P\d*)$'
-priority = ["^Seq"]
-differentiator = ["keystone-apache-", "Service"]
diff --git a/ansible/roles/common/templates/heka-mariadb.toml.j2 b/ansible/roles/common/templates/heka-mariadb.toml.j2
deleted file mode 100644
index 07d7105410..0000000000
--- a/ansible/roles/common/templates/heka-mariadb.toml.j2
+++ /dev/null
@@ -1,11 +0,0 @@
-[mariadb_log_decoder]
-type = "SandboxDecoder"
-filename = "lua_decoders/os_mysql_log.lua"
-
-[mariadb_logstreamer_input]
-type = "LogstreamerInput"
-decoder = "mariadb_log_decoder"
-log_directory = "/var/log/kolla"
-file_match = 'mariadb/mariadb\.log\.?(?P\d*)$'
-priority = ["^Seq"]
-differentiator = ['mariadb']
diff --git a/ansible/roles/common/templates/heka-openstack.toml.j2 b/ansible/roles/common/templates/heka-openstack.toml.j2
deleted file mode 100644
index ef6334dc11..0000000000
--- a/ansible/roles/common/templates/heka-openstack.toml.j2
+++ /dev/null
@@ -1,11 +0,0 @@
-[openstack_log_decoder]
-type = "SandboxDecoder"
-filename = "lua_decoders/os_openstack_log.lua"
-
-[openstack_logstreamer_input]
-type = "LogstreamerInput"
-decoder = "openstack_log_decoder"
-log_directory = "/var/log/kolla"
-file_match = '(?Pcloudkitty|nova|glance|keystone|neutron|ceilometer|ceph|cinder|heat|murano|magnum|mistral|manila|searchlight|senlin|sahara)/(?P.*)\.log\.?(?P\d*)$'
-priority = ["^Seq"]
-differentiator = ["Service", "_", "Program"]
diff --git a/ansible/roles/common/templates/heka-rabbitmq.toml.j2 b/ansible/roles/common/templates/heka-rabbitmq.toml.j2
deleted file mode 100644
index d2c49aa1e7..0000000000
--- a/ansible/roles/common/templates/heka-rabbitmq.toml.j2
+++ /dev/null
@@ -1,18 +0,0 @@
-[rabbitmq_log_decoder]
-type = "SandboxDecoder"
-filename = "lua_decoders/os_rabbitmq_log.lua"
-
-[rabbitmq_log_splitter]
-type = "RegexSplitter"
-delimiter = '\n\n(=[^=]+====)'
-delimiter_eol = false
-deliver_incomplete_final = true
-
-[rabbitmq_logstreamer_input]
-type = "LogstreamerInput"
-decoder = "rabbitmq_log_decoder"
-splitter = "rabbitmq_log_splitter"
-log_directory = "/var/log/kolla"
-file_match = 'rabbitmq/(?Prabbit.*)\.log\.?(?P\d*)$'
-priority = ["^Seq"]
-differentiator = ["Service"]
diff --git a/ansible/roles/common/templates/heka-swift.toml.j2 b/ansible/roles/common/templates/heka-swift.toml.j2
deleted file mode 100644
index c70b1631db..0000000000
--- a/ansible/roles/common/templates/heka-swift.toml.j2
+++ /dev/null
@@ -1,9 +0,0 @@
-# Swift writes its logs to Syslog, so the generic Syslog input set in
-# heka-global.toml.j2 is used.
-
-[{{ item.name }}_file_output]
-type = "FileOutput"
-message_matcher = "Fields[programname] == '{{ item.name }}'"
-path = "/var/log/kolla/swift/{{ item.name }}.log"
-encoder = "syslog_encoder"
-folder_perm = "755"
diff --git a/ansible/roles/common/templates/heka.json.j2 b/ansible/roles/common/templates/heka.json.j2
deleted file mode 100644
index a49258e4af..0000000000
--- a/ansible/roles/common/templates/heka.json.j2
+++ /dev/null
@@ -1,139 +0,0 @@
-{% set swift_services = ["swift-account-auditor", "swift-account-reaper", "swift-account-replicator", "swift-account-server", "swift-container-auditor", "swift-container-replicator", "swift-container-server", "swift-container-updater", "swift-object-auditor", "swift-object-expirer", "swift-object-replicator", "swift-object-server", "swift-object-updater", "swift-proxy-server", "swift-rsyncd"] %}
-{
- "command": "/usr/bin/hekad -config=/etc/heka/",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/heka-elasticsearch.toml",
- "dest": "/etc/heka/heka-elasticsearch.toml",
- "owner": "heka",
- "perm": "0600",
- "optional": {{ (not enable_central_logging | bool) | string | lower }}
- },
- {
- "source": "{{ container_config_directory }}/heka-global.toml",
- "dest": "/etc/heka/heka-global.toml",
- "owner": "heka",
- "perm": "0600"
- },
- {
- "source": "{{ container_config_directory }}/heka-aodh.toml",
- "dest": "/etc/heka/heka-aodh.toml",
- "owner": "heka",
- "perm": "0600",
- "optional": {{ (not enable_aodh | bool) | string | lower }}
- },
- {
- "source": "{{ container_config_directory }}/heka-gnocchi.toml",
- "dest": "/etc/heka/heka-gnocchi.toml",
- "owner": "heka",
- "perm": "0600",
- "optional": {{ (not enable_gnocchi | bool) | string | lower }}
- },
- {
- "source": "{{ container_config_directory }}/heka-grafana.toml",
- "dest": "/etc/heka/heka-grafana.toml",
- "owner": "heka",
- "perm": "0600",
- "optional": {{ (not enable_grafana | bool) | string | lower }}
- },
- {
- "source": "{{ container_config_directory }}/heka-barbican.toml",
- "dest": "/etc/heka/heka-barbican.toml",
- "owner": "heka",
- "perm": "0600",
- "optional": {{ (not enable_barbican | bool) | string | lower }}
- },
- {
- "source": "{{ container_config_directory }}/heka-custom.toml",
- "dest": "/etc/heka/heka-custom.toml",
- "owner": "heka",
- "perm": "0600",
- "optional": true
- },
- {
- "source": "{{ container_config_directory }}/heka-haproxy.toml",
- "dest": "/etc/heka/heka-haproxy.toml",
- "owner": "heka",
- "perm": "0600",
- "optional": {{ (not enable_haproxy | bool) | string | lower }}
- },
- {
- "source": "{{ container_config_directory }}/heka-keepalived.toml",
- "dest": "/etc/heka/heka-keepalived.toml",
- "owner": "heka",
- "perm": "0600",
- "optional": {{ (not enable_haproxy | bool) | string | lower }}
- },
- {
- "source": "{{ container_config_directory }}/heka-rabbitmq.toml",
- "dest": "/etc/heka/heka-rabbitmq.toml",
- "owner": "heka",
- "perm": "0600",
- "optional": {{ (not enable_rabbitmq | bool) | string | lower }}
- },
- {
- "source": "{{ container_config_directory }}/heka-openstack.toml",
- "dest": "/etc/heka/heka-openstack.toml",
- "owner": "heka",
- "perm": "0600"
- },
- {
- "source": "{{ container_config_directory }}/heka-mariadb.toml",
- "dest": "/etc/heka/heka-mariadb.toml",
- "owner": "heka",
- "perm": "0600",
- "optional": {{ (not enable_mariadb | bool) | string | lower }}
- },
- {
- "source": "{{ container_config_directory }}/heka-keystone.toml",
- "dest": "/etc/heka/heka-keystone.toml",
- "owner": "heka",
- "perm": "0600",
- "optional": {{ (not enable_keystone | bool) | string | lower }}
- },
- {
- "source": "{{ container_config_directory }}/heka-horizon.toml",
- "dest": "/etc/heka/heka-horizon.toml",
- "owner": "heka",
- "perm": "0600",
- "optional": {{ (not enable_horizon | bool) | string | lower }}
- },
-{% for swift_service in swift_services %}
- {
- "source": "{{ container_config_directory }}/heka-{{ swift_service }}.toml",
- "dest": "/etc/heka/heka-{{ swift_service }}.toml",
- "owner": "heka",
- "perm": "0600",
- "optional": {{ (not enable_swift | bool) | string | lower }}
- }{% if not loop.last %},{% endif %}
-{% endfor %}
-
- ],
- "permissions": [
- {
- "path": "/var/cache/hekad",
- "owner": "heka:heka",
- "recurse": true
- },
- {
- "path": "/var/lib/kolla/heka",
- "owner": "heka:heka",
- "recurse": true
- },
- {
- "path": "/var/log/kolla/haproxy",
- "owner": "heka:kolla",
- "recurse": true
- },
- {
- "path": "/var/log/kolla/keepalived",
- "owner": "heka:kolla",
- "recurse": true
- },
- {
- "path": "/var/log/kolla/swift",
- "owner": "heka:kolla",
- "recurse": true
- }
- ]
-}
diff --git a/ansible/roles/common/templates/kolla-toolbox.json.j2 b/ansible/roles/common/templates/kolla-toolbox.json.j2
deleted file mode 100644
index 7f0dc05193..0000000000
--- a/ansible/roles/common/templates/kolla-toolbox.json.j2
+++ /dev/null
@@ -1,10 +0,0 @@
-{
- "command": "sleep infinity",
- "config_files": [],
- "permissions": [
- {
- "path": "/var/log/kolla/ansible.log",
- "owner": "ansible:ansible"
- }
- ]
-}
diff --git a/ansible/roles/congress/defaults/main.yml b/ansible/roles/congress/defaults/main.yml
deleted file mode 100644
index f54059c43b..0000000000
--- a/ansible/roles/congress/defaults/main.yml
+++ /dev/null
@@ -1,39 +0,0 @@
----
-project_name: "congress"
-
-####################
-# Database
-####################
-congress_database_name: "congress"
-congress_database_user: "congress"
-congress_database_address: "{{ kolla_internal_fqdn }}:{{ database_port }}"
-
-
-####################
-# Docker
-####################
-congress_policy_engine_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-congress-policy-engine"
-congress_policy_engine_tag: "{{ openstack_release }}"
-congress_policy_engine_image_full: "{{ congress_policy_engine_image }}:{{ congress_policy_engine_tag }}"
-
-congress_datasource_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-congress-datasource"
-congress_datasource_tag: "{{ openstack_release }}"
-congress_datasource_image_full: "{{ congress_datasource_image }}:{{ congress_datasource_tag }}"
-
-congress_api_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-congress-api"
-congress_api_tag: "{{ openstack_release }}"
-congress_api_image_full: "{{ congress_api_image }}:{{ congress_api_tag }}"
-
-
-####################
-# OpenStack
-####################
-congress_admin_endpoint: "{{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ congress_api_port }}"
-congress_internal_endpoint: "{{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ congress_api_port }}"
-congress_public_endpoint: "{{ public_protocol }}://{{ kolla_external_fqdn }}:{{ congress_api_port }}"
-
-congress_logging_debug: "{{ openstack_logging_debug }}"
-
-congress_keystone_user: "congress"
-
-openstack_congress_auth: "{'auth_url':'{{ openstack_auth.auth_url }}','username':'{{ openstack_auth.username }}','password':'{{ openstack_auth.password }}','project_name':'{{ openstack_auth.project_name }}'}"
diff --git a/ansible/roles/congress/meta/main.yml b/ansible/roles/congress/meta/main.yml
deleted file mode 100644
index 6b4fff8fef..0000000000
--- a/ansible/roles/congress/meta/main.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-dependencies:
- - { role: common }
diff --git a/ansible/roles/congress/tasks/bootstrap.yml b/ansible/roles/congress/tasks/bootstrap.yml
deleted file mode 100644
index 3049594160..0000000000
--- a/ansible/roles/congress/tasks/bootstrap.yml
+++ /dev/null
@@ -1,41 +0,0 @@
----
-- name: Creating congress database
- command: docker exec -t kolla_toolbox /usr/bin/ansible localhost
- -m mysql_db
- -a "login_host='{{ database_address }}'
- login_port='{{ database_port }}'
- login_user='{{ database_user }}'
- login_password='{{ database_password }}'
- name='{{ congress_database_name }}'"
- register: database
- changed_when: "{{ database.stdout.find('localhost | SUCCESS => ') != -1 and
- (database.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
- failed_when: database.stdout.split()[2] != 'SUCCESS'
- run_once: True
- delegate_to: "{{ groups['congress-api'][0] }}"
-
-- name: Reading json from variable
- set_fact:
- database_created: "{{ (database.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
-
-- name: Creating congress database user and setting permissions
- command: docker exec -t kolla_toolbox /usr/bin/ansible localhost
- -m mysql_user
- -a "login_host='{{ database_address }}'
- login_port='{{ database_port }}'
- login_user='{{ database_user }}'
- login_password='{{ database_password }}'
- name='{{ congress_database_name }}'
- password='{{ congress_database_password }}'
- host='%'
- priv='{{ congress_database_name }}.*:ALL'
- append_privs='yes'"
- register: database_user_create
- changed_when: "{{ database_user_create.stdout.find('localhost | SUCCESS => ') != -1 and
- (database_user_create.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
- failed_when: database_user_create.stdout.split()[2] != 'SUCCESS'
- run_once: True
- delegate_to: "{{ groups['congress-api'][0] }}"
-
-- include: bootstrap_service.yml
- when: database_created
diff --git a/ansible/roles/congress/tasks/bootstrap_service.yml b/ansible/roles/congress/tasks/bootstrap_service.yml
deleted file mode 100644
index 03959d6680..0000000000
--- a/ansible/roles/congress/tasks/bootstrap_service.yml
+++ /dev/null
@@ -1,20 +0,0 @@
----
-- name: Running congress bootstrap container
- kolla_docker:
- action: "start_container"
- common_options: "{{docker_common_options}}"
- detach: False
- environment:
- KOLLA_BOOTSTRAP:
- KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}"
- image: "{{ congress_api_image_full }}"
- labels:
- BOOTSTRAP:
- name: "bootstrap_congress"
- restart_policy: "never"
- volumes:
- - "{{ node_config_directory }}/congress-api/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "kolla_logs:/var/log/kolla/"
- run_once: True
- delegate_to: "{{ groups['congress-api'][0] }}"
diff --git a/ansible/roles/congress/tasks/config.yml b/ansible/roles/congress/tasks/config.yml
deleted file mode 100644
index f447726597..0000000000
--- a/ansible/roles/congress/tasks/config.yml
+++ /dev/null
@@ -1,52 +0,0 @@
----
-- name: Ensuring config directories exist
- file:
- path: "{{ node_config_directory }}/{{ item }}"
- state: "directory"
- recurse: yes
- with_items:
- - "congress-api"
- - "congress-policy-engine"
- - "congress-datasource"
-
-- name: Copying over config.json files for services
- template:
- src: "{{ item }}.json.j2"
- dest: "{{ node_config_directory }}/{{ item }}/config.json"
- with_items:
- - "congress-api"
- - "congress-policy-engine"
- - "congress-datasource"
-
-- name: Copying over congress.conf
- merge_configs:
- vars:
- service_name: "{{ item }}"
- sources:
- - "{{ role_path }}/templates/congress.conf.j2"
- - "{{ node_custom_config }}/global.conf"
- - "{{ node_custom_config }}/database.conf"
- - "{{ node_custom_config }}/messaging.conf"
- - "{{ node_custom_config }}/congress.conf"
- - "{{ node_custom_config }}/congress/{{ item }}.conf"
- - "{{ node_custom_config }}/congress/{{ inventory_hostname }}/congress.conf"
- dest: "{{ node_config_directory }}/{{ item }}/congress.conf"
- with_items:
- - "congress-api"
- - "congress-policy-engine"
- - "congress-datasource"
-
-- name: Check if policies shall be overwritten
- local_action: stat path="{{ node_custom_config }}/congress/policy.json"
- register: congress_policy
-
-- name: Copying over existing policy.json
- template:
- src: "{{ node_custom_config }}/congress/policy.json"
- dest: "{{ node_config_directory }}/{{ item }}/policy.json"
- with_items:
- - "congress-api"
- - "congress-policy-engine"
- - "congress-datasource"
- when:
- congress_policy.stat.exists
diff --git a/ansible/roles/congress/tasks/deploy.yml b/ansible/roles/congress/tasks/deploy.yml
deleted file mode 100644
index 4672d2dfda..0000000000
--- a/ansible/roles/congress/tasks/deploy.yml
+++ /dev/null
@@ -1,16 +0,0 @@
----
-- include: register.yml
- when: inventory_hostname in groups['congress-api']
-
-- include: config.yml
- when: inventory_hostname in groups['congress-api'] or
- inventory_hostname in groups['congress-policy-engine'] or
- inventory_hostname in groups['congress-datasource']
-
-- include: bootstrap.yml
- when: inventory_hostname in groups['congress-api']
-
-- include: start.yml
- when: inventory_hostname in groups['congress-api'] or
- inventory_hostname in groups['congress-policy-engine'] or
- inventory_hostname in groups['congress-datasource']
diff --git a/ansible/roles/congress/tasks/main.yml b/ansible/roles/congress/tasks/main.yml
deleted file mode 100644
index b017e8b4ad..0000000000
--- a/ansible/roles/congress/tasks/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-- include: "{{ action }}.yml"
diff --git a/ansible/roles/congress/tasks/precheck.yml b/ansible/roles/congress/tasks/precheck.yml
deleted file mode 100644
index ed97d539c0..0000000000
--- a/ansible/roles/congress/tasks/precheck.yml
+++ /dev/null
@@ -1 +0,0 @@
----
diff --git a/ansible/roles/congress/tasks/pull.yml b/ansible/roles/congress/tasks/pull.yml
deleted file mode 100644
index f48e9971f0..0000000000
--- a/ansible/roles/congress/tasks/pull.yml
+++ /dev/null
@@ -1,21 +0,0 @@
----
-- name: Pulling congress-api image
- kolla_docker:
- action: "pull_image"
- common_options: "{{ docker_common_options }}"
- image: "{{ congress_api_image_full }}"
- when: inventory_hostname in groups['congress-api']
-
-- name: Pulling congress-policy-engine image
- kolla_docker:
- action: "pull_image"
- common_options: "{{ docker_common_options }}"
- image: "{{ congress_policy_engine_image_full }}"
- when: inventory_hostname in groups['congress-policy-engine']
-
-- name: Pulling congress-datasource image
- kolla_docker:
- action: "pull_image"
- common_options: "{{ docker_common_options }}"
- image: "{{ congress_datasource_image_full }}"
- when: inventory_hostname in groups['congress-datasource']
diff --git a/ansible/roles/congress/tasks/reconfigure.yml b/ansible/roles/congress/tasks/reconfigure.yml
deleted file mode 100644
index a9611bcabc..0000000000
--- a/ansible/roles/congress/tasks/reconfigure.yml
+++ /dev/null
@@ -1,74 +0,0 @@
----
-- name: Ensuring the containers up
- kolla_docker:
- name: "{{ item.name }}"
- action: "get_container_state"
- register: container_state
- failed_when: container_state.Running == false
- when: inventory_hostname in groups[item.group]
- with_items:
- - { name: congress_api, group: congress-api }
- - { name: congress_datasource, group: congress-datasource }
- - { name: congress_policy_engine, group: congress-policy-engine }
-
-- include: config.yml
-
-- name: Check the configs
- command: docker exec {{ item.name }} /usr/local/bin/kolla_set_configs --check
- changed_when: false
- failed_when: false
- register: check_results
- when: inventory_hostname in groups[item.group]
- with_items:
- - { name: congress_api, group: congress-api }
- - { name: congress_datasource, group: congress-datasource }
- - { name: congress_policy_engine, group: congress-policy-engine }
-
-# NOTE(jeffrey4l): when config_strategy == 'COPY_ALWAYS'
-# and container env['KOLLA_CONFIG_STRATEGY'] == 'COPY_ONCE',
-# just remove the container and start again
-- name: Containers config strategy
- kolla_docker:
- name: "{{ item.name }}"
- action: "get_container_env"
- register: container_envs
- when: inventory_hostname in groups[item.group]
- with_items:
- - { name: congress_api, group: congress-api }
- - { name: congress_datasource, group: congress-datasource }
- - { name: congress_policy_engine, group: congress-policy-engine }
-
-- name: Remove the containers
- kolla_docker:
- name: "{{ item[0]['name'] }}"
- action: "remove_container"
- register: remove_containers
- when:
- - inventory_hostname in groups[item[0]['group']]
- - config_strategy == "COPY_ONCE" or item[1]['KOLLA_CONFIG_STRATEGY'] == 'COPY_ONCE'
- - item[2]['rc'] == 1
- with_together:
- - [{ name: congress_api, group: congress-api },
- { name: congress_datasource, group: congress-datasource },
- { name: congress_policy_engine, group: congress-policy-engine }]
- - "{{ container_envs.results }}"
- - "{{ check_results.results }}"
-
-- include: start.yml
- when: remove_containers.changed
-
-- name: Restart containers
- kolla_docker:
- name: "{{ item[0]['name'] }}"
- action: "restart_container"
- when:
- - inventory_hostname in groups[item[0]['group']]
- - config_strategy == 'COPY_ALWAYS'
- - item[1]['KOLLA_CONFIG_STRATEGY'] != 'COPY_ONCE'
- - item[2]['rc'] == 1
- with_together:
- - [{ name: congress_api, group: congress-api },
- { name: congress_datasource, group: congress-datasource },
- { name: congress_policy_engine, group: congress-policy-engine }]
- - "{{ container_envs.results }}"
- - "{{ check_results.results }}"
diff --git a/ansible/roles/congress/tasks/register.yml b/ansible/roles/congress/tasks/register.yml
deleted file mode 100644
index e0a387acb3..0000000000
--- a/ansible/roles/congress/tasks/register.yml
+++ /dev/null
@@ -1,40 +0,0 @@
----
-- name: Creating the congress service and endpoint
- command: docker exec -t kolla_toolbox /usr/bin/ansible localhost
- -m kolla_keystone_service
- -a "service_name=congress
- service_type=application_catalog
- description='Openstack Application Catalog'
- endpoint_region={{ openstack_region_name }}
- url='{{ item.url }}'
- interface='{{ item.interface }}'
- region_name={{ openstack_region_name }}
- auth={{ '{{ openstack_congress_auth }}' }}"
- -e "{'openstack_congress_auth':{{ openstack_congress_auth }}}"
- register: congress_endpoint
- changed_when: "{{ congress_endpoint.stdout.find('localhost | SUCCESS => ') != -1 and (congress_endpoint.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
- until: congress_endpoint.stdout.split()[2] == 'SUCCESS'
- retries: 10
- delay: 5
- run_once: True
- with_items:
- - {'interface': 'admin', 'url': '{{ congress_admin_endpoint }}'}
- - {'interface': 'internal', 'url': '{{ congress_internal_endpoint }}'}
- - {'interface': 'public', 'url': '{{ congress_public_endpoint }}'}
-
-- name: Creating the congress project, user, and role
- command: docker exec -t kolla_toolbox /usr/bin/ansible localhost
- -m kolla_keystone_user
- -a "project=service
- user=congress
- password={{ congress_keystone_password }}
- role=admin
- region_name={{ openstack_region_name }}
- auth={{ '{{ openstack_congress_auth }}' }}"
- -e "{'openstack_congress_auth':{{ openstack_congress_auth }}}"
- register: congress_user
- changed_when: "{{ congress_user.stdout.find('localhost | SUCCESS => ') != -1 and (congress_user.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
- until: congress_user.stdout.split()[2] == 'SUCCESS'
- retries: 10
- delay: 5
- run_once: True
diff --git a/ansible/roles/congress/tasks/start.yml b/ansible/roles/congress/tasks/start.yml
deleted file mode 100644
index 724d85e711..0000000000
--- a/ansible/roles/congress/tasks/start.yml
+++ /dev/null
@@ -1,36 +0,0 @@
----
-- name: Starting congress-policy-engine container
- kolla_docker:
- action: "start_container"
- name: "congress_policy_engine"
- common_options: "{{docker_common_options}}"
- image: "{{ congress_policy_engine_image_full }}"
- volumes:
- - "{{ node_config_directory }}/congress-policy-engine/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "kolla_logs:/var/log/kolla/"
- when: inventory_hostname in groups['congress-policy-engine']
-
-- name: Starting congress-datasource container
- kolla_docker:
- action: "start_container"
- name: "congress_datasource"
- common_options: "{{docker_common_options}}"
- image: "{{ congress_datasource_image_full }}"
- volumes:
- - "{{ node_config_directory }}/congress-datasource/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "kolla_logs:/var/log/kolla/"
- when: inventory_hostname in groups['congress-datasource']
-
-- name: Starting congress-api container
- kolla_docker:
- action: "start_container"
- name: "congress_api"
- common_options: "{{docker_common_options}}"
- image: "{{ congress_api_image_full }}"
- volumes:
- - "{{ node_config_directory }}/congress-api/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "kolla_logs:/var/log/kolla/"
- when: inventory_hostname in groups['congress-api']
diff --git a/ansible/roles/congress/tasks/upgrade.yml b/ansible/roles/congress/tasks/upgrade.yml
deleted file mode 100644
index 308053080c..0000000000
--- a/ansible/roles/congress/tasks/upgrade.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-- include: config.yml
-
-- include: bootstrap_service.yml
-
-- include: start.yml
diff --git a/ansible/roles/congress/templates/congress-api.json.j2 b/ansible/roles/congress/templates/congress-api.json.j2
deleted file mode 100644
index a7b2cae577..0000000000
--- a/ansible/roles/congress/templates/congress-api.json.j2
+++ /dev/null
@@ -1,18 +0,0 @@
-{
- "command": "congress-server --api --node_id=api-node --config-file /etc/congress/congress.conf",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/congress.conf",
- "dest": "/etc/congress/congress.conf",
- "owner": "congress",
- "perm": "0600"
- }
- ],
- "permissions": [
- {
- "path": "/var/log/kolla/congress",
- "owner": "congress:congress",
- "recurse": true
- }
- ]
-}
diff --git a/ansible/roles/congress/templates/congress-datasource.json.j2 b/ansible/roles/congress/templates/congress-datasource.json.j2
deleted file mode 100644
index 9b4f9865e6..0000000000
--- a/ansible/roles/congress/templates/congress-datasource.json.j2
+++ /dev/null
@@ -1,18 +0,0 @@
-{
- "command": "congress-server --datasources --node_id=data-node --config-file /etc/congress/congress.conf",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/congress.conf",
- "dest": "/etc/congress/congress.conf",
- "owner": "congress",
- "perm": "0600"
- }
- ],
- "permissions": [
- {
- "path": "/var/log/kolla/congress",
- "owner": "congress:congress",
- "recurse": true
- }
- ]
-}
diff --git a/ansible/roles/congress/templates/congress-policy-engine.json.j2 b/ansible/roles/congress/templates/congress-policy-engine.json.j2
deleted file mode 100644
index f9b2e69e51..0000000000
--- a/ansible/roles/congress/templates/congress-policy-engine.json.j2
+++ /dev/null
@@ -1,18 +0,0 @@
-{
- "command": "congress-server --policy_engine --node_id=policy-node --config-file /etc/congress/congress.conf",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/congress.conf",
- "dest": "/etc/congress/congress.conf",
- "owner": "congress",
- "perm": "0600"
- }
- ],
- "permissions": [
- {
- "path": "/var/log/kolla/congress",
- "owner": "congress:congress",
- "recurse": true
- }
- ]
-}
diff --git a/ansible/roles/congress/templates/congress.conf.j2 b/ansible/roles/congress/templates/congress.conf.j2
deleted file mode 100644
index b1ebb1346b..0000000000
--- a/ansible/roles/congress/templates/congress.conf.j2
+++ /dev/null
@@ -1,49 +0,0 @@
-[DEFAULT]
-debug = {{ congress_logging_debug }}
-
-log_dir = /var/log/kolla/congress
-
-drivers = congress.datasources.neutronv2_driver.NeutronV2Driver,congress.datasources.glancev2_driver.GlanceV2Driver,congress.datasources.nova_driver.NovaDriver,congress.datasources.keystone_driver.KeystoneDriver,congress.datasources.ceilometer_driver.CeilometerDriver,congress.datasources.cinder_driver.CinderDriver,congress.datasources.swift_driver.SwiftDriver,congress.datasources.plexxi_driver.PlexxiDriver,congress.datasources.vCenter_driver.VCenterDriver,congress.datasources.murano_driver.MuranoDriver,congress.datasources.ironic_driver.IronicDriver
-
-auth_strategy = keystone
-
-os_region_name = {{ openstack_region_name }}
-
-# NOTE: set use_stderr to False or the logs will also be sent to
-# stderr and collected by Docker
-use_stderr = False
-
-my_ip = {{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}
-
-api_paste_config = /etc/congress/api-paste.ini
-
-{% if service_name == 'congress-api' %}
-bind_host = {{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}
-bind_port = {{ congress_api_port }}
-{% endif %}
-
-transport_url = rabbit://{% for host in groups['rabbitmq'] %}{{ rabbitmq_user }}:{{ rabbitmq_password }}@{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ rabbitmq_port }}{% if not loop.last %},{% endif %}{% endfor %}
-
-[database]
-connection = mysql+pymysql://{{ congress_database_user }}:{{ congress_database_password }}@{{ congress_database_address }}/{{ congress_database_name }}
-max_retries = -1
-
-[keystone_authtoken]
-auth_uri = {{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_public_port }}
-auth_url = {{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_admin_port }}
-auth_type = password
-project_domain_id = default
-user_domain_id = default
-project_name = service
-username = {{ congress_keystone_user }}
-password = {{ congress_keystone_password }}
-
-memcache_security_strategy = ENCRYPT
-memcache_secret_key = {{ memcache_secret_key }}
-memcached_servers = {% for host in groups['memcached'] %}{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}
-
-[congress]
-url = {{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ congress_api_port }}
-
-[oslo_messaging_notifications]
-driver = noop
diff --git a/ansible/roles/designate/defaults/main.yml b/ansible/roles/designate/defaults/main.yml
deleted file mode 100644
index 5a47ee0f68..0000000000
--- a/ansible/roles/designate/defaults/main.yml
+++ /dev/null
@@ -1,56 +0,0 @@
----
-project_name: "designate"
-
-####################
-# Database
-####################
-designate_database_name: "designate"
-designate_database_user: "designate"
-designate_database_address: "{{ kolla_internal_fqdn }}:{{ database_port }}"
-
-designate_pool_manager_database_name: "designate_pool_manager"
-designate_pool_manager_database_user: "designate_pool_manager"
-designate_pool_manager_database_address: "{{ kolla_internal_fqdn }}:{{ database_port }}"
-
-
-####################
-# Docker
-####################
-
-designate_central_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-designate-central"
-designate_central_tag: "{{ openstack_release }}"
-designate_central_image_full: "{{ designate_central_image }}:{{ designate_central_tag }}"
-
-designate_api_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-designate-api"
-designate_api_tag: "{{ openstack_release }}"
-designate_api_image_full: "{{ designate_api_image }}:{{ designate_api_tag }}"
-
-designate_backend_bind9_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-designate-backend-bind9"
-designate_backend_bind9_tag: "{{ openstack_release }}"
-designate_backend_bind9_image_full: "{{ designate_backend_bind9_image }}:{{ designate_backend_bind9_tag }}"
-
-designate_mdns_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-designate-mdns"
-designate_mdns_tag: "{{ openstack_release }}"
-designate_mdns_image_full: "{{ designate_mdns_image }}:{{ designate_mdns_tag }}"
-
-designate_pool_manager_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-designate-pool-manager"
-designate_pool_manager_tag: "{{ openstack_release }}"
-designate_pool_manager_image_full: "{{ designate_pool_manager_image }}:{{ designate_pool_manager_tag }}"
-
-designate_sink_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-designate-sink"
-designate_sink_tag: "{{ openstack_release }}"
-designate_sink_image_full: "{{ designate_sink_image }}:{{ designate_sink_tag }}"
-
-
-####################
-# OpenStack
-####################
-designate_admin_endpoint: "{{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ designate_api_port }}"
-designate_internal_endpoint: "{{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ designate_api_port }}"
-designate_public_endpoint: "{{ public_protocol }}://{{ kolla_external_fqdn }}:{{ designate_api_port }}"
-
-designate_logging_debug: "{{ openstack_logging_debug }}"
-
-designate_keystone_user: "designate"
-
-openstack_designate_auth: "{'auth_url':'{{ openstack_auth.auth_url }}','username':'{{ openstack_auth.username }}','password':'{{ openstack_auth.password }}','project_name':'{{ openstack_auth.project_name }}'}"
diff --git a/ansible/roles/designate/meta/main.yml b/ansible/roles/designate/meta/main.yml
deleted file mode 100644
index 6b4fff8fef..0000000000
--- a/ansible/roles/designate/meta/main.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-dependencies:
- - { role: common }
diff --git a/ansible/roles/designate/tasks/bootstrap.yml b/ansible/roles/designate/tasks/bootstrap.yml
deleted file mode 100644
index 427e9db1ae..0000000000
--- a/ansible/roles/designate/tasks/bootstrap.yml
+++ /dev/null
@@ -1,78 +0,0 @@
----
-- name: Creating Designate database
- command: docker exec -t kolla_toolbox /usr/bin/ansible localhost
- -m mysql_db
- -a "login_host='{{ database_address }}'
- login_port='{{ database_port }}'
- login_user='{{ database_user }}'
- login_password='{{ database_password }}'
- name='{{ designate_database_name }}'"
- register: database
- changed_when: "{{ database.stdout.find('localhost | SUCCESS => ') != -1 and
- (database.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
- failed_when: database.stdout.split()[2] != 'SUCCESS'
- run_once: True
- delegate_to: "{{ groups['designate-central'][0] }}"
-
-- name: Reading json from variable
- set_fact:
- database_created: "{{ (database.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
-
-- name: Creating Designate Pool Manager database
- command: docker exec -t kolla_toolbox /usr/bin/ansible localhost
- -m mysql_db
- -a "login_host='{{ database_address }}'
- login_port='{{ database_port }}'
- login_user='{{ database_user }}'
- login_password='{{ database_password }}'
- name='{{ designate_pool_manager_database_name }}'"
- register: database_pool_manager
- changed_when: "{{ database.stdout.find('localhost | SUCCESS => ') != -1 and
- (database.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
- failed_when: database.stdout.split()[2] != 'SUCCESS'
- run_once: True
- delegate_to: "{{ groups['designate-central'][0] }}"
-
-- name: Reading json from variable
- set_fact:
- database_pool_manager_created: "{{ (database_pool_manager.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
-
-- name: Creating Designate database user and setting permissions
- command: docker exec -t kolla_toolbox /usr/bin/ansible localhost
- -m mysql_user
- -a "login_host='{{ database_address }}'
- login_port='{{ database_port }}'
- login_user='{{ database_user }}'
- login_password='{{ database_password }}'
- name='{{ designate_database_name }}'
- password='{{ designate_database_password }}'
- host='%'
- priv='{{ designate_database_name }}.*:ALL'
- append_privs='yes'"
- register: database_user_create
- changed_when: "{{ database_user_create.stdout.find('localhost | SUCCESS => ') != -1 and
- (database_user_create.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
- failed_when: database_user_create.stdout.split()[2] != 'SUCCESS'
- run_once: True
- delegate_to: "{{ groups['designate-central'][0] }}"
-
-- name: Creating Designate Pool Manager database user and setting permissions
- command: docker exec -t kolla_toolbox /usr/bin/ansible localhost
- -m mysql_user
- -a "login_host='{{ database_address }}'
- login_port='{{ database_port }}'
- login_user='{{ database_user }}'
- login_password='{{ database_password }}'
- name='{{ designate_pool_manager_database_name }}'
- password='{{ designate_pool_manager_database_password }}'
- host='%'
- priv='{{ designate_pool_manager_database_name }}.*:ALL'
- append_privs='yes'"
- register: database_pool_manager_user_create
- changed_when: "{{ database_pool_manager_user_create.stdout.find('localhost | SUCCESS => ') != -1 and
- (database_pool_manager_user_create.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
- failed_when: database_pool_manager_user_create.stdout.split()[2] != 'SUCCESS'
- run_once: True
-
-- include: bootstrap_service.yml
- when: database_created
diff --git a/ansible/roles/designate/tasks/bootstrap_service.yml b/ansible/roles/designate/tasks/bootstrap_service.yml
deleted file mode 100644
index ab530e8b73..0000000000
--- a/ansible/roles/designate/tasks/bootstrap_service.yml
+++ /dev/null
@@ -1,20 +0,0 @@
----
-- name: Running Designate bootstrap container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- detach: False
- environment:
- KOLLA_BOOTSTRAP:
- KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}"
- image: "{{ designate_central_image_full }}"
- labels:
- BOOTSTRAP:
- name: "bootstrap_designate"
- restart_policy: "never"
- volumes:
- - "{{ node_config_directory }}/designate-central/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "kolla_logs:/var/log/kolla/"
- run_once: True
- delegate_to: "{{ groups['designate-central'][0] }}"
diff --git a/ansible/roles/designate/tasks/config.yml b/ansible/roles/designate/tasks/config.yml
deleted file mode 100644
index f42f2415cd..0000000000
--- a/ansible/roles/designate/tasks/config.yml
+++ /dev/null
@@ -1,43 +0,0 @@
----
-- name: Ensuring config directories exist
- file:
- path: "{{ node_config_directory }}/{{ item }}"
- state: "directory"
- recurse: yes
- with_items:
- - "designate-api"
- - "designate-central"
- - "designate-mdns"
- - "designate-pool-manager"
- - "designate-sink"
-
-- name: Copying over config.json files for services
- template:
- src: "{{ item }}.json.j2"
- dest: "{{ node_config_directory }}/{{ item }}/config.json"
- with_items:
- - "designate-api"
- - "designate-central"
- - "designate-mdns"
- - "designate-pool-manager"
- - "designate-sink"
-
-- name: Copying over designate.conf
- merge_configs:
- vars:
- service_name: "{{ item }}"
- sources:
- - "{{ role_path }}/templates/designate.conf.j2"
- - "{{ node_custom_config }}/global.conf"
- - "{{ node_custom_config }}/database.conf"
- - "{{ node_custom_config }}/messaging.conf"
- - "{{ node_custom_config }}/designate.conf"
- - "{{ node_custom_config }}/designate/{{ item }}.conf"
- - "{{ node_custom_config }}/designate/{{ inventory_hostname }}/designate.conf"
- dest: "{{ node_config_directory }}/{{ item }}/designate.conf"
- with_items:
- - "designate-api"
- - "designate-central"
- - "designate-mdns"
- - "designate-pool-manager"
- - "designate-sink"
diff --git a/ansible/roles/designate/tasks/deploy.yml b/ansible/roles/designate/tasks/deploy.yml
deleted file mode 100644
index 71d43606a6..0000000000
--- a/ansible/roles/designate/tasks/deploy.yml
+++ /dev/null
@@ -1,20 +0,0 @@
----
-- include: register.yml
- when: inventory_hostname in groups['designate-api']
-
-- include: config.yml
- when: inventory_hostname in groups['designate-api'] or
- inventory_hostname in groups['designate-central'] or
- inventory_hostname in groups['designate-mdns'] or
- inventory_hostname in groups['designate-pool-manager'] or
- inventory_hostname in groups['designate-sink']
-
-- include: bootstrap.yml
- when: inventory_hostname in groups['designate-central']
-
-- include: start.yml
- when: inventory_hostname in groups['designate-api'] or
- inventory_hostname in groups['designate-central'] or
- inventory_hostname in groups['designate-mdns'] or
- inventory_hostname in groups['designate-pool-manager'] or
- inventory_hostname in groups['designate-sink']
diff --git a/ansible/roles/designate/tasks/main.yml b/ansible/roles/designate/tasks/main.yml
deleted file mode 100644
index b017e8b4ad..0000000000
--- a/ansible/roles/designate/tasks/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-- include: "{{ action }}.yml"
diff --git a/ansible/roles/designate/tasks/pull.yml b/ansible/roles/designate/tasks/pull.yml
deleted file mode 100644
index b3896c1037..0000000000
--- a/ansible/roles/designate/tasks/pull.yml
+++ /dev/null
@@ -1,35 +0,0 @@
----
-- name: Pulling designate-api image
- kolla_docker:
- action: "pull_image"
- common_options: "{{ docker_common_options }}"
- image: "{{ designate_api_image_full }}"
- when: inventory_hostname in groups['designate-api']
-
-- name: Pulling designate-central image
- kolla_docker:
- action: "pull_image"
- common_options: "{{ docker_common_options }}"
- image: "{{ designate_central_image_full }}"
- when: inventory_hostname in groups['designate-central']
-
-- name: Pulling designate-mdns image
- kolla_docker:
- action: "pull_image"
- common_options: "{{ docker_common_options }}"
- image: "{{ designate_mdns_image_full }}"
- when: inventory_hostname in groups['designate-mdns']
-
-- name: Pulling designate-pool-manager image
- kolla_docker:
- action: "pull_image"
- common_options: "{{ docker_common_options }}"
- image: "{{ designate_pool_manager_image_full }}"
- when: inventory_hostname in groups['designate-pool-manager']
-
-- name: Pulling designate-sink image
- kolla_docker:
- action: "pull_image"
- common_options: "{{ docker_common_options }}"
- image: "{{ designate_sink_image_full }}"
- when: inventory_hostname in groups['designate-sink']
diff --git a/ansible/roles/designate/tasks/reconfigure.yml b/ansible/roles/designate/tasks/reconfigure.yml
deleted file mode 100644
index ace5333de3..0000000000
--- a/ansible/roles/designate/tasks/reconfigure.yml
+++ /dev/null
@@ -1,86 +0,0 @@
----
-- name: Ensuring the containers up
- kolla_docker:
- name: "{{ item.name }}"
- action: "get_container_state"
- register: container_states
- failed_when: container_states.Running == false
- when:
- - "{{ item.enabled|default(True) }}"
- - inventory_hostname in groups[item.group]
- with_items:
- - { name: designate_central, group: designate-central }
- - { name: designate_api, group: designate-api }
- - { name: designate_mdns, group: designate-mdns }
- - { name: designate_pool_manager, group: designate-pool-manager }
- - { name: designate_sink, group: designate-sink }
-
-- include: config.yml
-
-- name: Check the configs
- command: docker exec {{ item.name }} /usr/local/bin/kolla_set_configs --check
- changed_when: false
- failed_when: false
- register: check_results
- when: inventory_hostname in groups[item.group]
- with_items:
- - { name: designate_central, group: designate-central }
- - { name: designate_api, group: designate-api }
- - { name: designate_mdns, group: designate-mdns }
- - { name: designate_pool_manager, group: designate-pool-manager }
- - { name: designate_sink, group: designate-sink }
-
-# NOTE(jeffrey4l): when config_strategy == 'COPY_ALWAYS'
-# and container env['KOLLA_CONFIG_STRATEGY'] == 'COPY_ONCE',
-# just remove the container and start again
-- name: Containers config strategy
- kolla_docker:
- name: "{{ item.name }}"
- action: "get_container_env"
- register: container_envs
- when: inventory_hostname in groups[item.group]
- with_items:
- - { name: designate_central, group: designate-central }
- - { name: designate_api, group: designate-api }
- - { name: designate_mdns, group: designate-mdns }
- - { name: designate_pool_manager, group: designate-pool-manager }
- - { name: designate_sink, group: designate-sink }
-
-- name: Remove the containers
- kolla_docker:
- name: "{{ item[0]['name'] }}"
- action: "remove_container"
- register: remove_containers
- when:
- - inventory_hostname in groups[item[0]['group']]
- - config_strategy == "COPY_ONCE" or item[1]['KOLLA_CONFIG_STRATEGY'] == 'COPY_ONCE'
- - item[2]['rc'] == 1
- with_together:
- - [{ name: designate_central, group: designate-central },
- { name: designate_api, group: designate-api },
- { name: designate_mdns, group: designate-mdns },
- { name: designate_pool_manager, group: designate-pool-manager },
- { name: designate_sink, group: designate-sink }]
- - "{{ container_envs.results }}"
- - "{{ check_results.results }}"
-
-- include: start.yml
- when: remove_containers.changed
-
-- name: Restart containers
- kolla_docker:
- name: "{{ item[0]['name'] }}"
- action: "restart_container"
- when:
- - inventory_hostname in groups[item[0]['group']]
- - config_strategy == 'COPY_ALWAYS'
- - item[1]['KOLLA_CONFIG_STRATEGY'] != 'COPY_ONCE'
- - item[2]['rc'] == 1
- with_together:
- - [{ name: designate_central, group: designate-central },
- { name: designate_api, group: designate-api },
- { name: designate_mdns, group: designate-mdns },
- { name: designate_pool_manager, group: designate-pool-manager },
- { name: designate_sink, group: designate-sink }]
- - "{{ container_envs.results }}"
- - "{{ check_results.results }}"
diff --git a/ansible/roles/designate/tasks/register.yml b/ansible/roles/designate/tasks/register.yml
deleted file mode 100644
index e17db9bc27..0000000000
--- a/ansible/roles/designate/tasks/register.yml
+++ /dev/null
@@ -1,40 +0,0 @@
----
-- name: Creating the Designate service and endpoint
- command: docker exec -t kolla_toolbox /usr/bin/ansible localhost
- -m kolla_keystone_service
- -a "service_name=designate
- service_type=dns
- description='Designate DNS Service'
- endpoint_region={{ openstack_region_name }}
- url='{{ item.url }}'
- interface='{{ item.interface }}'
- region_name={{ openstack_region_name }}
- auth={{ '{{ openstack_designate_auth }}' }}"
- -e "{'openstack_designate_auth':{{ openstack_designate_auth }}}"
- register: designate_endpoint
- changed_when: "{{ designate_endpoint.stdout.find('localhost | SUCCESS => ') != -1 and (designate_endpoint.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
- until: designate_endpoint.stdout.split()[2] == 'SUCCESS'
- retries: 10
- delay: 5
- run_once: True
- with_items:
- - {'interface': 'admin', 'url': '{{ designate_admin_endpoint }}'}
- - {'interface': 'internal', 'url': '{{ designate_internal_endpoint }}'}
- - {'interface': 'public', 'url': '{{ designate_public_endpoint }}'}
-
-- name: Creating the Designate project, user, and role
- command: docker exec -t kolla_toolbox /usr/bin/ansible localhost
- -m kolla_keystone_user
- -a "project=service
- user=designate
- password={{ designate_keystone_password }}
- role=admin
- region_name={{ openstack_region_name }}
- auth={{ '{{ openstack_designate_auth }}' }}"
- -e "{'openstack_designate_auth':{{ openstack_designate_auth }}}"
- register: designate_user
- changed_when: "{{ designate_user.stdout.find('localhost | SUCCESS => ') != -1 and (designate_user.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
- until: designate_user.stdout.split()[2] == 'SUCCESS'
- retries: 10
- delay: 5
- run_once: True
diff --git a/ansible/roles/designate/tasks/start.yml b/ansible/roles/designate/tasks/start.yml
deleted file mode 100644
index e4c80990fb..0000000000
--- a/ansible/roles/designate/tasks/start.yml
+++ /dev/null
@@ -1,60 +0,0 @@
----
-- name: Starting designate-central container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- image: "{{ designate_central_image_full }}"
- name: "designate_central"
- volumes:
- - "{{ node_config_directory }}/designate-central/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "kolla_logs:/var/log/kolla/"
- when: inventory_hostname in groups['designate-central']
-
-- name: Starting designate-api container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- image: "{{ designate_api_image_full }}"
- name: "designate_api"
- volumes:
- - "{{ node_config_directory }}/designate-api/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "kolla_logs:/var/log/kolla/"
- when: inventory_hostname in groups['designate-api']
-
-- name: Starting designate-mdns container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- image: "{{ designate_mdns_image_full }}"
- name: "designate_mdns"
- volumes:
- - "{{ node_config_directory }}/designate-mdns/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "kolla_logs:/var/log/kolla/"
- when: inventory_hostname in groups['designate-mdns']
-
-- name: Starting designate-pool-manager container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- image: "{{ designate_pool_manager_image_full }}"
- name: "designate_pool_manager"
- volumes:
- - "{{ node_config_directory }}/designate-pool-manager/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "kolla_logs:/var/log/kolla/"
- when: inventory_hostname in groups['designate-pool-manager']
-
-- name: Starting designate-sink container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- image: "{{ designate_sink_image_full }}"
- name: "designate_sink"
- volumes:
- - "{{ node_config_directory }}/designate-sink/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "kolla_logs:/var/log/kolla/"
- when: inventory_hostname in groups['designate-sink']
diff --git a/ansible/roles/designate/tasks/upgrade.yml b/ansible/roles/designate/tasks/upgrade.yml
deleted file mode 100644
index 308053080c..0000000000
--- a/ansible/roles/designate/tasks/upgrade.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-- include: config.yml
-
-- include: bootstrap_service.yml
-
-- include: start.yml
diff --git a/ansible/roles/designate/templates/designate-api.json.j2 b/ansible/roles/designate/templates/designate-api.json.j2
deleted file mode 100644
index 92f6e0c6a4..0000000000
--- a/ansible/roles/designate/templates/designate-api.json.j2
+++ /dev/null
@@ -1,11 +0,0 @@
-{
- "command": "designate-api --config-file /etc/designate/designate.conf",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/designate.conf",
- "dest": "/etc/designate/designate.conf",
- "owner": "designate",
- "perm": "0600"
- }
- ]
-}
diff --git a/ansible/roles/designate/templates/designate-central.json.j2 b/ansible/roles/designate/templates/designate-central.json.j2
deleted file mode 100644
index bd4bf299aa..0000000000
--- a/ansible/roles/designate/templates/designate-central.json.j2
+++ /dev/null
@@ -1,11 +0,0 @@
-{
- "command": "designate-central --config-file /etc/designate/designate.conf",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/designate.conf",
- "dest": "/etc/designate/designate.conf",
- "owner": "designate",
- "perm": "0600"
- }
- ]
-}
diff --git a/ansible/roles/designate/templates/designate-mdns.json.j2 b/ansible/roles/designate/templates/designate-mdns.json.j2
deleted file mode 100644
index dbb098e47e..0000000000
--- a/ansible/roles/designate/templates/designate-mdns.json.j2
+++ /dev/null
@@ -1,11 +0,0 @@
-{
- "command": "designate-mdns --config-file /etc/designate/designate.conf",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/designate.conf",
- "dest": "/etc/designate/designate.conf",
- "owner": "designate",
- "perm": "0600"
- }
- ]
-}
diff --git a/ansible/roles/designate/templates/designate-pool-manager.json.j2 b/ansible/roles/designate/templates/designate-pool-manager.json.j2
deleted file mode 100644
index 59d09171fd..0000000000
--- a/ansible/roles/designate/templates/designate-pool-manager.json.j2
+++ /dev/null
@@ -1,11 +0,0 @@
-{
- "command": "designate-pool-manager --config-file /etc/designate/designate.conf",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/designate.conf",
- "dest": "/etc/designate/designate.conf",
- "owner": "designate",
- "perm": "0600"
- }
- ]
-}
diff --git a/ansible/roles/designate/templates/designate-sink.json.j2 b/ansible/roles/designate/templates/designate-sink.json.j2
deleted file mode 100644
index 41b11dce3e..0000000000
--- a/ansible/roles/designate/templates/designate-sink.json.j2
+++ /dev/null
@@ -1,11 +0,0 @@
-{
- "command": "designate-sink --config-file /etc/designate/designate.conf",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/designate.conf",
- "dest": "/etc/designate/designate.conf",
- "owner": "designate",
- "perm": "0600"
- }
- ]
-}
diff --git a/ansible/roles/designate/templates/designate.conf.j2 b/ansible/roles/designate/templates/designate.conf.j2
deleted file mode 100644
index 17e0e59740..0000000000
--- a/ansible/roles/designate/templates/designate.conf.j2
+++ /dev/null
@@ -1,92 +0,0 @@
-[DEFAULT]
-
-debug = {{ designate_logging_debug }}
-
-log-dir = /var/log/kolla/designate
-
-notification_driver = messaging
-notification_topics = notifications_designate
-
-transport_url = rabbit://{% for host in groups['rabbitmq'] %}{{ rabbitmq_user }}:{{ rabbitmq_password }}@{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ rabbitmq_port }}{% if not loop.last %},{% endif %}{% endfor %}
-
-[service:api]
-api_base_uri = {{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ designate_api_port }}
-api_host = {{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}
-api_port = {{ designate_api_port }}
-
-enable_api_v1 = True
-enabled_extensions_v1 = 'diagnostics, quotas, reports, sync, touch'
-enable_api_v2 = True
-enabled_extensions_v2 = 'quotas, reports'
-
-api_paste_config = /usr/share/designate/api-paste.ini
-
-[keystone_authtoken]
-auth_uri = {{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_public_port }}
-auth_url = {{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_admin_port }}
-auth_type = password
-project_domain_id = default
-user_domain_id = default
-project_name = service
-username = {{ designate_keystone_user }}
-password = {{ designate_keystone_password }}
-http_connect_timeout = 60
-
-memcache_security_strategy = ENCRYPT
-memcache_secret_key = {{ memcache_secret_key }}
-memcached_servers = {% for host in groups['memcached'] %}{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}
-
-[cors]
-
-[cors.subdomain]
-
-[service:sink]
-enabled_notification_handlers = nova_fixed, neutron_floatingip
-
-[service:mdns]
-host = {{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}
-
-[service:agent]
-host = {{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}
-
-[service:zone_manager]
-
-[zone_manager_task:domain_purge]
-
-[zone_manager_task:delayed_notify]
-
-[service:pool_manager]
-cache_driver = memcache
-
-[pool_manager_cache:sqlalchemy]
-connection = mysql+pymysql://{{ designate_pool_manager_database_user }}:{{ designate_pool_manager_database_password }}@{{ designate_pool_manager_database_address }}/{{ designate_pool_manager_database_name }}
-max_retries = 10
-idle_timeout = 3600
-
-[pool_manager_cache:memcache]
-memcached_servers = {% for host in groups['memcached'] %}{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}
-
-[network_api:neutron]
-
-[storage:sqlalchemy]
-connection = mysql+pymysql://{{ designate_database_user }}:{{ designate_database_password }}@{{ designate_database_address }}/{{ designate_database_name }}
-max_retries = 10
-idle_timeout = 3600
-
-[handler:nova_fixed]
-notification_topics = notifications_designate
-control_exchange = nova
-format = '(display_name)s.%(domain)s'
-
-[handler:neutron_floatingip]
-notification_topics = notifications_designate
-control_exchange = neutron
-format = '%(octet0)s-%(octet1)s-%(octet2)s-%(octet3)s.%(domain)s'
-
-[backend:agent:bind9]
-
-[backend:agent:denominator]
-
-[oslo_concurrency]
-
-[coordination]
diff --git a/ansible/roles/destroy/defaults/main.yml b/ansible/roles/destroy/defaults/main.yml
deleted file mode 100644
index 0d75e357de..0000000000
--- a/ansible/roles/destroy/defaults/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-destroy_include_images: no
diff --git a/ansible/roles/destroy/tasks/cleanup_containers.yml b/ansible/roles/destroy/tasks/cleanup_containers.yml
deleted file mode 100644
index c6aee8ef5e..0000000000
--- a/ansible/roles/destroy/tasks/cleanup_containers.yml
+++ /dev/null
@@ -1,10 +0,0 @@
----
-- name: Destroying all Kolla containers and volumes on compute node
- environment:
- COMPUTE: true
- command: /tmp/kolla-cleanup/tools/cleanup-containers
- when: inventory_hostname in groups['compute']
-
-- name: Destroying all Kolla containers and volumes on non compute node
- command: /tmp/kolla-cleanup/tools/cleanup-containers
- when: inventory_hostname not in groups['compute']
diff --git a/ansible/roles/destroy/tasks/cleanup_host.yml b/ansible/roles/destroy/tasks/cleanup_host.yml
deleted file mode 100644
index 228fb70bbd..0000000000
--- a/ansible/roles/destroy/tasks/cleanup_host.yml
+++ /dev/null
@@ -1,12 +0,0 @@
----
-- name: Destroying Kolla host configuration
- command: >
- env enable_haproxy={{ enable_haproxy }}
- kolla_internal_vip_address={{ kolla_internal_vip_address }}
- kolla_external_vip_address={{ kolla_external_vip_address }}
- /tmp/kolla-cleanup/tools/cleanup-host
-
-- name: Destroying kolla-cleanup folder
- file:
- path: /tmp/kolla-cleanup
- state: absent
diff --git a/ansible/roles/destroy/tasks/cleanup_images.yml b/ansible/roles/destroy/tasks/cleanup_images.yml
deleted file mode 100644
index e6b8d6073a..0000000000
--- a/ansible/roles/destroy/tasks/cleanup_images.yml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-- name: Destroying Kolla images
- command: /tmp/kolla-cleanup/tools/cleanup-images
- when:
- - destroy_include_images | bool
diff --git a/ansible/roles/destroy/tasks/copy_tools.yml b/ansible/roles/destroy/tasks/copy_tools.yml
deleted file mode 100644
index 6a990e197c..0000000000
--- a/ansible/roles/destroy/tasks/copy_tools.yml
+++ /dev/null
@@ -1,31 +0,0 @@
----
-- name: Creating /kolla-cleanup/tools directory on node
- file:
- state: directory
- path: /tmp/kolla-cleanup/tools
-
-- name: Copying validate-docker-execute.sh file
- copy:
- src: ../tools/validate-docker-execute.sh
- dest: /tmp/kolla-cleanup/tools
- mode: 0755
-
-- name: Copying cleanup-containers file
- copy:
- src: ../tools/cleanup-containers
- dest: /tmp/kolla-cleanup/tools
- mode: 0755
-
-- name: Copying cleanup-host file
- copy:
- src: ../tools/cleanup-host
- dest: /tmp/kolla-cleanup/tools
- mode: 0755
-
-- name: Copying cleanup-images file
- copy:
- src: ../tools/cleanup-images
- dest: /tmp/kolla-cleanup/tools
- mode: 0755
- when:
- - destroy_include_images | bool
diff --git a/ansible/roles/destroy/tasks/main.yml b/ansible/roles/destroy/tasks/main.yml
deleted file mode 100644
index 13b59b7fbf..0000000000
--- a/ansible/roles/destroy/tasks/main.yml
+++ /dev/null
@@ -1,8 +0,0 @@
----
-- include: copy_tools.yml
-
-- include: cleanup_containers.yml
-
-- include: cleanup_images.yml
-
-- include: cleanup_host.yml
diff --git a/ansible/roles/destroy/tasks/precheck.yml b/ansible/roles/destroy/tasks/precheck.yml
deleted file mode 100644
index ed97d539c0..0000000000
--- a/ansible/roles/destroy/tasks/precheck.yml
+++ /dev/null
@@ -1 +0,0 @@
----
diff --git a/ansible/roles/elasticsearch/defaults/main.yml b/ansible/roles/elasticsearch/defaults/main.yml
deleted file mode 100644
index 947a1cb9f1..0000000000
--- a/ansible/roles/elasticsearch/defaults/main.yml
+++ /dev/null
@@ -1,12 +0,0 @@
----
-####################
-# Elasticsearch
-####################
-elasticsearch_cluster_name: "kolla_logging"
-
-####################
-# Docker
-####################
-elasticsearch_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-elasticsearch"
-elasticsearch_tag: "{{ openstack_release }}"
-elasticsearch_image_full: "{{ elasticsearch_image }}:{{ elasticsearch_tag }}"
diff --git a/ansible/roles/elasticsearch/meta/main.yml b/ansible/roles/elasticsearch/meta/main.yml
deleted file mode 100644
index 6b4fff8fef..0000000000
--- a/ansible/roles/elasticsearch/meta/main.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-dependencies:
- - { role: common }
diff --git a/ansible/roles/elasticsearch/tasks/config.yml b/ansible/roles/elasticsearch/tasks/config.yml
deleted file mode 100644
index 97b4dc5f0d..0000000000
--- a/ansible/roles/elasticsearch/tasks/config.yml
+++ /dev/null
@@ -1,22 +0,0 @@
----
-- name: Ensuring config directories exist
- file:
- path: "{{ node_config_directory }}/{{ item }}"
- state: "directory"
- recurse: yes
- with_items:
- - "elasticsearch"
-
-- name: Copying over config.json files for services
- template:
- src: "{{ item }}.json.j2"
- dest: "{{ node_config_directory }}/{{ item }}/config.json"
- with_items:
- - "elasticsearch"
-
-- name: Copying over elasticsearch.yml
- template:
- src: "elasticsearch.yml.j2"
- dest: "{{ node_config_directory }}/{{ item }}/{{ item }}.yml"
- with_items:
- - "elasticsearch"
diff --git a/ansible/roles/elasticsearch/tasks/deploy.yml b/ansible/roles/elasticsearch/tasks/deploy.yml
deleted file mode 100644
index 1f16915ad9..0000000000
--- a/ansible/roles/elasticsearch/tasks/deploy.yml
+++ /dev/null
@@ -1,4 +0,0 @@
----
-- include: config.yml
-
-- include: start.yml
diff --git a/ansible/roles/elasticsearch/tasks/main.yml b/ansible/roles/elasticsearch/tasks/main.yml
deleted file mode 100644
index b017e8b4ad..0000000000
--- a/ansible/roles/elasticsearch/tasks/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-- include: "{{ action }}.yml"
diff --git a/ansible/roles/elasticsearch/tasks/precheck.yml b/ansible/roles/elasticsearch/tasks/precheck.yml
deleted file mode 100644
index ed97d539c0..0000000000
--- a/ansible/roles/elasticsearch/tasks/precheck.yml
+++ /dev/null
@@ -1 +0,0 @@
----
diff --git a/ansible/roles/elasticsearch/tasks/pull.yml b/ansible/roles/elasticsearch/tasks/pull.yml
deleted file mode 100644
index 6dae5004a8..0000000000
--- a/ansible/roles/elasticsearch/tasks/pull.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-- name: Pulling elasticsearch image
- kolla_docker:
- action: "pull_image"
- common_options: "{{ docker_common_options }}"
- image: "{{ elasticsearch_image_full }}"
diff --git a/ansible/roles/elasticsearch/tasks/reconfigure.yml b/ansible/roles/elasticsearch/tasks/reconfigure.yml
deleted file mode 100644
index 480746ef4b..0000000000
--- a/ansible/roles/elasticsearch/tasks/reconfigure.yml
+++ /dev/null
@@ -1,64 +0,0 @@
----
-- name: Ensuring the containers up
- kolla_docker:
- name: "{{ item.name }}"
- action: "get_container_state"
- register: container_state
- failed_when: container_state.Running == false
- when: inventory_hostname in groups[item.group]
- with_items:
- - { name: elasticsearch, group: elasticsearch }
-
-- include: config.yml
-
-- name: Check the configs
- command: docker exec {{ item.name }} /usr/local/bin/kolla_set_configs --check
- changed_when: false
- failed_when: false
- register: check_results
- when: inventory_hostname in groups[item.group]
- with_items:
- - { name: elasticsearch, group: elasticsearch }
-
-# NOTE(jeffrey4l): when config_strategy == 'COPY_ALWAYS'
-# and container env['KOLLA_CONFIG_STRATEGY'] == 'COPY_ONCE',
-# just remove the container and start again
-- name: Containers config strategy
- kolla_docker:
- name: "{{ item.name }}"
- action: "get_container_env"
- register: container_envs
- when: inventory_hostname in groups[item.group]
- with_items:
- - { name: elasticsearch, group: elasticsearch }
-
-- name: Remove the containers
- kolla_docker:
- name: "{{ item[0]['name'] }}"
- action: "remove_container"
- register: remove_containers
- when:
- - config_strategy == "COPY_ONCE" or item[1]['KOLLA_CONFIG_STRATEGY'] == 'COPY_ONCE'
- - item[2]['rc'] == 1
- - inventory_hostname in groups[item[0]['group']]
- with_together:
- - [{ name: elasticsearch, group: elasticsearch }]
- - "{{ container_envs.results }}"
- - "{{ check_results.results }}"
-
-- include: start.yml
- when: remove_containers.changed
-
-- name: Restart containers
- kolla_docker:
- name: "{{ item[0]['name'] }}"
- action: "restart_container"
- when:
- - config_strategy == 'COPY_ALWAYS'
- - item[1]['KOLLA_CONFIG_STRATEGY'] != 'COPY_ONCE'
- - item[2]['rc'] == 1
- - inventory_hostname in groups[item[0]['group']]
- with_together:
- - [{ name: elasticsearch, group: elasticsearch }]
- - "{{ container_envs.results }}"
- - "{{ check_results.results }}"
diff --git a/ansible/roles/elasticsearch/tasks/start.yml b/ansible/roles/elasticsearch/tasks/start.yml
deleted file mode 100644
index 12b167eda0..0000000000
--- a/ansible/roles/elasticsearch/tasks/start.yml
+++ /dev/null
@@ -1,12 +0,0 @@
----
-- name: Starting Elasticsearch container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- image: "{{ elasticsearch_image_full }}"
- name: "elasticsearch"
- volumes:
- - "{{ node_config_directory }}/elasticsearch/:{{ container_config_directory }}/"
- - "/etc/localtime:/etc/localtime:ro"
- - "elasticsearch:/var/lib/elasticsearch/data"
- when: inventory_hostname in groups ['elasticsearch']
diff --git a/ansible/roles/elasticsearch/tasks/upgrade.yml b/ansible/roles/elasticsearch/tasks/upgrade.yml
deleted file mode 100644
index 1f16915ad9..0000000000
--- a/ansible/roles/elasticsearch/tasks/upgrade.yml
+++ /dev/null
@@ -1,4 +0,0 @@
----
-- include: config.yml
-
-- include: start.yml
diff --git a/ansible/roles/elasticsearch/templates/elasticsearch.json.j2 b/ansible/roles/elasticsearch/templates/elasticsearch.json.j2
deleted file mode 100644
index f34d2ea3bb..0000000000
--- a/ansible/roles/elasticsearch/templates/elasticsearch.json.j2
+++ /dev/null
@@ -1,23 +0,0 @@
-{
- "command": "/usr/share/elasticsearch/bin/elasticsearch",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/elasticsearch.yml",
- "dest": "/usr/share/elasticsearch/config/elasticsearch.yml",
- "owner": "elasticsearch",
- "perm": "0600"
- }
- ],
- "permissions": [
- {
- "path": "/var/lib/elasticsearch",
- "owner": "elasticsearch:elasticsearch",
- "recurse": true
- },
- {
- "path": "/var/log/kolla/elasticsearch",
- "owner": "elasticsearch:elasticsearch",
- "recurse": true
- }
- ]
-}
diff --git a/ansible/roles/elasticsearch/templates/elasticsearch.yml.j2 b/ansible/roles/elasticsearch/templates/elasticsearch.yml.j2
deleted file mode 100644
index 14f1bc4711..0000000000
--- a/ansible/roles/elasticsearch/templates/elasticsearch.yml.j2
+++ /dev/null
@@ -1,19 +0,0 @@
-{% set num_nodes = groups['elasticsearch'] | length %}
-{% set minimum_master_nodes = (num_nodes / 2 + 1) | round(0, 'floor') | int if num_nodes > 2 else 1 %}
-{% set recover_after_nodes = (num_nodes * 2 / 3) | round(0, 'floor') | int if num_nodes > 1 else 1 %}
-node.name: "{% if orchestration_engine == 'KUBERNETES' %}0.0.0.0{% else %}{{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}{% endif %}"
-network.host: {% if orchestration_engine == 'KUBERNETES' %}_eth0:ipv4_{% else %}"{{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}"{% endif %}
-
-cluster.name: "{% if orchestration_engine == 'KUBERNETES' %}0.0.0.0{% else %}{{ elasticsearch_cluster_name }}{% endif %}"
-node.master: true
-node.data: true
-discovery.zen.ping.unicast.hosts: [{% if orchestration_engine == 'KUBERNETES' %}"0.0.0.0"{% else %}{% for host in groups['elasticsearch'] %}"{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}"{% if not loop.last %},{% endif %}{% endfor %}{% endif %}]
-
-discovery.zen.minimum_master_nodes: {{ minimum_master_nodes }}
-gateway.expected_nodes: {{ num_nodes }}
-gateway.recover_after_time: "5m"
-gateway.recover_after_nodes: {{ recover_after_nodes }}
-path.conf: "/etc/elasticsearch"
-path.data: "/var/lib/elasticsearch/data"
-path.logs: "/var/log/kolla/elasticsearch"
-path.scripts: "/etc/elasticsearch/scripts"
diff --git a/ansible/roles/etcd/defaults/main.yml b/ansible/roles/etcd/defaults/main.yml
deleted file mode 100644
index c8f73263a7..0000000000
--- a/ansible/roles/etcd/defaults/main.yml
+++ /dev/null
@@ -1,10 +0,0 @@
----
-project_name: "etcd"
-
-
-####################
-# Docker
-####################
-etcd_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-etcd"
-etcd_tag: "{{ openstack_release }}"
-etcd_image_full: "{{ etcd_image }}:{{ etcd_tag }}"
diff --git a/ansible/roles/etcd/meta/main.yml b/ansible/roles/etcd/meta/main.yml
deleted file mode 100644
index 6b4fff8fef..0000000000
--- a/ansible/roles/etcd/meta/main.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-dependencies:
- - { role: common }
diff --git a/ansible/roles/etcd/tasks/bootstrap.yml b/ansible/roles/etcd/tasks/bootstrap.yml
deleted file mode 100644
index d098ff99bd..0000000000
--- a/ansible/roles/etcd/tasks/bootstrap.yml
+++ /dev/null
@@ -1,19 +0,0 @@
----
-- name: Running etcd bootstrap container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- detach: False
- environment:
- KOLLA_BOOTSTRAP:
- KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}"
- image: "{{ etcd_image_full }}"
- labels:
- BOOTSTRAP:
- name: "bootstrap_etcd"
- restart_policy: "never"
- volumes:
- - "{{ node_config_directory }}/etcd/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "kolla_etcd:/var/lib/etcd/"
- - "kolla_logs:/var/log/kolla/"
diff --git a/ansible/roles/etcd/tasks/config.yml b/ansible/roles/etcd/tasks/config.yml
deleted file mode 100644
index 2ff3ac1f0e..0000000000
--- a/ansible/roles/etcd/tasks/config.yml
+++ /dev/null
@@ -1,15 +0,0 @@
----
-- name: Ensuring config directories exist
- file:
- path: "{{ node_config_directory }}/{{ item }}"
- state: "directory"
- recurse: yes
- with_items:
- - "etcd"
-
-- name: Copying over config.json files for services
- template:
- src: "{{ item }}.json.j2"
- dest: "{{ node_config_directory }}/{{ item }}/config.json"
- with_items:
- - "etcd"
diff --git a/ansible/roles/etcd/tasks/deploy.yml b/ansible/roles/etcd/tasks/deploy.yml
deleted file mode 100644
index 98daa4021c..0000000000
--- a/ansible/roles/etcd/tasks/deploy.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-- include: config.yml
-
-- include: bootstrap.yml
-
-- include: start.yml
diff --git a/ansible/roles/etcd/tasks/main.yml b/ansible/roles/etcd/tasks/main.yml
deleted file mode 100644
index b017e8b4ad..0000000000
--- a/ansible/roles/etcd/tasks/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-- include: "{{ action }}.yml"
diff --git a/ansible/roles/etcd/tasks/precheck.yml b/ansible/roles/etcd/tasks/precheck.yml
deleted file mode 100644
index ed97d539c0..0000000000
--- a/ansible/roles/etcd/tasks/precheck.yml
+++ /dev/null
@@ -1 +0,0 @@
----
diff --git a/ansible/roles/etcd/tasks/pull.yml b/ansible/roles/etcd/tasks/pull.yml
deleted file mode 100644
index 57d5bdae51..0000000000
--- a/ansible/roles/etcd/tasks/pull.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-- name: Pulling etcd image
- kolla_docker:
- action: "pull_image"
- common_options: "{{ docker_common_options }}"
- image: "{{ etcd_image_full }}"
diff --git a/ansible/roles/etcd/tasks/reconfigure.yml b/ansible/roles/etcd/tasks/reconfigure.yml
deleted file mode 100644
index 6eece782d3..0000000000
--- a/ansible/roles/etcd/tasks/reconfigure.yml
+++ /dev/null
@@ -1,64 +0,0 @@
----
-- name: Ensuring the containers up
- kolla_docker:
- name: "{{ item.name }}"
- action: "get_container_state"
- register: container_state
- failed_when: container_state.Running == false
- when: inventory_hostname in groups[item.group]
- with_items:
- - { name: etcd, group: etcd }
-
-- include: config.yml
-
-- name: Check the configs
- command: docker exec {{ item.name }} /usr/local/bin/kolla_set_configs --check
- changed_when: false
- failed_when: false
- register: check_results
- when: inventory_hostname in groups[item.group]
- with_items:
- - { name: etcd, group: etcd }
-
-# NOTE(jeffrey4l): when config_strategy == 'COPY_ALWAYS'
-# and container env['KOLLA_CONFIG_STRATEGY'] == 'COPY_ONCE',
-# just remove the container and start again
-- name: Containers config strategy
- kolla_docker:
- name: "{{ item.name }}"
- action: "get_container_env"
- register: container_envs
- when: inventory_hostname in groups[item.group]
- with_items:
- - { name: etcd, group: etcd }
-
-- name: Remove the containers
- kolla_docker:
- name: "{{ item[0]['name'] }}"
- action: "remove_container"
- register: remove_containers
- when:
- - inventory_hostname in groups[item[0]['group']]
- - config_strategy == "COPY_ONCE" or item[1]['KOLLA_CONFIG_STRATEGY'] == 'COPY_ONCE'
- - item[2]['rc'] == 1
- with_together:
- - [{ name: etcd, group: etcd }]
- - "{{ container_envs.results }}"
- - "{{ check_results.results }}"
-
-- include: start.yml
- when: remove_containers.changed
-
-- name: Restart containers
- kolla_docker:
- name: "{{ item[0]['name'] }}"
- action: "restart_container"
- when:
- - inventory_hostname in groups[item[0]['group']]
- - config_strategy == 'COPY_ALWAYS'
- - item[1]['KOLLA_CONFIG_STRATEGY'] != 'COPY_ONCE'
- - item[2]['rc'] == 1
- with_together:
- - [{ name: etcd, group: etcd }]
- - "{{ container_envs.results }}"
- - "{{ check_results.results }}"
diff --git a/ansible/roles/etcd/tasks/start.yml b/ansible/roles/etcd/tasks/start.yml
deleted file mode 100644
index 7546f8841d..0000000000
--- a/ansible/roles/etcd/tasks/start.yml
+++ /dev/null
@@ -1,24 +0,0 @@
----
-- name: Starting etcd container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- environment:
- ETCD_DATA_DIR: "/var/lib/etcd"
- ETCD_NAME: "{{ ansible_hostname }}"
- ETCD_ADVERTISE_CLIENT_URLS: "{{ internal_protocol }}://{{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}:{{ etcd_client_port }}"
- ETCD_LISTEN_CLIENT_URLS: "{{ internal_protocol }}://{{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}:{{ etcd_client_port }}"
- ETCD_INITIAL_ADVERTISE_PEER_URLS: "{{ internal_protocol }}://{{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}:{{ etcd_peer_port }}"
- ETCD_LISTEN_PEER_URLS: "{{ internal_protocol }}://{{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}:{{ etcd_peer_port }}"
- ETCD_INITIAL_CLUSTER_TOKEN: "{{ etcd_cluster_token }}"
- ETCD_INITIAL_CLUSTER: "{% for host in groups['etcd'] %}{{ hostvars[host]['ansible_hostname'] }}={{ internal_protocol }}://{{ hostvars[host]['ansible_' + api_interface]['ipv4']['address'] }}:{{ etcd_peer_port }}{% if not loop.last %},{% endif %}{% endfor %}"
- ETCD_INITIAL_CLUSTER_STATE: "new"
- ETCD_OUT_FILE: "/var/log/kolla/etcd/etcd.log"
- KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}"
- image: "{{ etcd_image_full }}"
- name: "etcd"
- volumes:
- - "{{ node_config_directory }}/etcd/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "kolla_etcd:/var/lib/etcd/"
- - "kolla_logs:/var/log/kolla/"
diff --git a/ansible/roles/etcd/tasks/upgrade.yml b/ansible/roles/etcd/tasks/upgrade.yml
deleted file mode 100644
index 1f16915ad9..0000000000
--- a/ansible/roles/etcd/tasks/upgrade.yml
+++ /dev/null
@@ -1,4 +0,0 @@
----
-- include: config.yml
-
-- include: start.yml
diff --git a/ansible/roles/etcd/templates/etcd.json.j2 b/ansible/roles/etcd/templates/etcd.json.j2
deleted file mode 100644
index 3ea11fd909..0000000000
--- a/ansible/roles/etcd/templates/etcd.json.j2
+++ /dev/null
@@ -1,3 +0,0 @@
-{
- "command": "etcd"
-}
diff --git a/ansible/roles/glance/defaults/main.yml b/ansible/roles/glance/defaults/main.yml
deleted file mode 100644
index 6a7a5985da..0000000000
--- a/ansible/roles/glance/defaults/main.yml
+++ /dev/null
@@ -1,52 +0,0 @@
----
-project_name: "glance"
-glance_service_groups:
- - { name: glance_api, service: glance-api, group: glance-api }
- - { name: glance_registry, service: glance-registry, group: glance-registry }
-
-
-####################
-# Ceph
-####################
-ceph_glance_pool_type: "{{ ceph_pool_type }}"
-ceph_glance_cache_mode: "{{ ceph_cache_mode }}"
-
-# Due to Ansible issues on include, you cannot override these variables. Please
-# override the variables they reference instead.
-glance_pool_name: "{{ ceph_glance_pool_name }}"
-glance_pool_type: "{{ ceph_glance_pool_type }}"
-glance_cache_mode: "{{ ceph_glance_cache_mode }}"
-
-
-####################
-# Database
-####################
-glance_database_name: "glance"
-glance_database_user: "glance"
-glance_database_address: "{{ kolla_internal_fqdn }}:{{ database_port }}"
-
-
-####################
-# Docker
-####################
-glance_registry_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-glance-registry"
-glance_registry_tag: "{{ openstack_release }}"
-glance_registry_image_full: "{{ glance_registry_image }}:{{ glance_registry_tag }}"
-
-glance_api_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-glance-api"
-glance_api_tag: "{{ openstack_release }}"
-glance_api_image_full: "{{ glance_api_image }}:{{ glance_api_tag }}"
-
-
-####################
-# OpenStack
-####################
-glance_admin_endpoint: "{{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ glance_api_port }}"
-glance_internal_endpoint: "{{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ glance_api_port }}"
-glance_public_endpoint: "{{ public_protocol }}://{{ kolla_external_fqdn }}:{{ glance_api_port }}"
-
-glance_logging_debug: "{{ openstack_logging_debug }}"
-
-glance_keystone_user: "glance"
-
-openstack_glance_auth: "{'auth_url':'{{ openstack_auth.auth_url }}','username':'{{ openstack_auth.username }}','password':'{{ openstack_auth.password }}','project_name':'{{ openstack_auth.project_name }}','domain_name':'default'}"
diff --git a/ansible/roles/glance/meta/main.yml b/ansible/roles/glance/meta/main.yml
deleted file mode 100644
index 6b4fff8fef..0000000000
--- a/ansible/roles/glance/meta/main.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-dependencies:
- - { role: common }
diff --git a/ansible/roles/glance/tasks/bootstrap.yml b/ansible/roles/glance/tasks/bootstrap.yml
deleted file mode 100644
index 10d650d926..0000000000
--- a/ansible/roles/glance/tasks/bootstrap.yml
+++ /dev/null
@@ -1,41 +0,0 @@
----
-- name: Creating Glance database
- command: docker exec -t kolla_toolbox /usr/bin/ansible localhost
- -m mysql_db
- -a "login_host='{{ database_address }}'
- login_port='{{ database_port }}'
- login_user='{{ database_user }}'
- login_password='{{ database_password }}'
- name='{{ glance_database_name }}'"
- register: database
- changed_when: "{{ database.stdout.find('localhost | SUCCESS => ') != -1 and
- (database.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
- failed_when: database.stdout.split()[2] != 'SUCCESS'
- run_once: True
- delegate_to: "{{ groups['glance-api'][0] }}"
-
-- name: Reading json from variable
- set_fact:
- database_created: "{{ (database.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
-
-- name: Creating Glance database user and setting permissions
- command: docker exec -t kolla_toolbox /usr/bin/ansible localhost
- -m mysql_user
- -a "login_host='{{ database_address }}'
- login_port='{{ database_port }}'
- login_user='{{ database_user }}'
- login_password='{{ database_password }}'
- name='{{ glance_database_name }}'
- password='{{ glance_database_password }}'
- host='%'
- priv='{{ glance_database_name }}.*:ALL'
- append_privs='yes'"
- register: database_user_create
- changed_when: "{{ database_user_create.stdout.find('localhost | SUCCESS => ') != -1 and
- (database_user_create.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
- failed_when: database_user_create.stdout.split()[2] != 'SUCCESS'
- run_once: True
- delegate_to: "{{ groups['glance-api'][0] }}"
-
-- include: bootstrap_service.yml
- when: database_created
diff --git a/ansible/roles/glance/tasks/bootstrap_service.yml b/ansible/roles/glance/tasks/bootstrap_service.yml
deleted file mode 100644
index decc23177d..0000000000
--- a/ansible/roles/glance/tasks/bootstrap_service.yml
+++ /dev/null
@@ -1,21 +0,0 @@
----
-- name: Running Glance bootstrap container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- detach: False
- environment:
- KOLLA_BOOTSTRAP:
- KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}"
- image: "{{ glance_api_image_full }}"
- labels:
- BOOTSTRAP:
- name: "bootstrap_glance"
- restart_policy: "never"
- volumes:
- - "{{ node_config_directory }}/glance-api/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "glance:/var/lib/glance/"
- - "kolla_logs:/var/log/kolla/"
- run_once: True
- delegate_to: "{{ groups['glance-api'][0] }}"
diff --git a/ansible/roles/glance/tasks/ceph.yml b/ansible/roles/glance/tasks/ceph.yml
deleted file mode 100644
index cb031fa09e..0000000000
--- a/ansible/roles/glance/tasks/ceph.yml
+++ /dev/null
@@ -1,33 +0,0 @@
----
-- name: Ensuring config directory exists
- file:
- path: "{{ node_config_directory }}/glance-api"
- state: "directory"
- when: inventory_hostname in groups['glance-api']
-
-- name: Copying over config(s)
- template:
- src: roles/ceph/templates/ceph.conf.j2
- dest: "{{ node_config_directory }}/glance-api/ceph.conf"
- when: inventory_hostname in groups['glance-api']
-
-- include: ../../ceph_pools.yml
- vars:
- pool_name: "{{ glance_pool_name }}"
- pool_type: "{{ glance_pool_type }}"
- cache_mode: "{{ glance_cache_mode }}"
-
-# TODO(SamYaple): Improve changed_when tests
-- name: Pulling cephx keyring
- command: docker exec ceph_mon ceph auth get-or-create client.glance mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool={{ ceph_glance_pool_name }}, allow rwx pool={{ ceph_glance_pool_name }}-cache'
- register: cephx_key
- delegate_to: "{{ groups['ceph-mon'][0] }}"
- changed_when: False
- run_once: True
-
-- name: Pushing cephx keyring
- copy:
- content: "{{ cephx_key.stdout }}\n\r"
- dest: "{{ node_config_directory }}/glance-api/ceph.client.glance.keyring"
- mode: "0600"
- when: inventory_hostname in groups['glance-api']
diff --git a/ansible/roles/glance/tasks/check.yml b/ansible/roles/glance/tasks/check.yml
deleted file mode 100644
index 6dab34da6a..0000000000
--- a/ansible/roles/glance/tasks/check.yml
+++ /dev/null
@@ -1,19 +0,0 @@
----
-- name: Glance sanity checks
- command: docker exec -t kolla_toolbox /usr/bin/ansible localhost
- -m kolla_sanity
- -a "service=glance
- project=service
- user=admin
- password={{ glance_keystone_password }}
- role=admin
- region_name={{ openstack_region_name }}
- auth={{ '{{ openstack_glance_auth }}' }}"
- -e "{'openstack_glance_auth':{{ openstack_glance_auth }}}"
- register: glance_sanity
- changed_when: "{{ glance_sanity.stdout.find('localhost | SUCCESS => ') != -1 and (glance_sanity.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
- until: glance_sanity.stdout.split()[2] == 'SUCCESS'
- retries: 10
- delay: 5
- run_once: True
- when: kolla_enable_sanity_glance | bool
diff --git a/ansible/roles/glance/tasks/config.yml b/ansible/roles/glance/tasks/config.yml
deleted file mode 100644
index d88993353c..0000000000
--- a/ansible/roles/glance/tasks/config.yml
+++ /dev/null
@@ -1,44 +0,0 @@
----
-- name: Ensuring config directories exist
- file:
- path: "{{ node_config_directory }}/{{ item.service }}"
- state: "directory"
- recurse: yes
- when: inventory_hostname in groups[item.group]
- with_items: "{{ glance_service_groups }}"
-
-- name: Copying over config.json files for services
- template:
- src: "{{ item.service }}.json.j2"
- dest: "{{ node_config_directory }}/{{ item.service }}/config.json"
- when: inventory_hostname in groups[item.group]
- with_items: "{{ glance_service_groups }}"
-
-- name: Copying over glance-*.conf
- merge_configs:
- vars:
- service_name: "{{ item.service }}"
- sources:
- - "{{ role_path }}/templates/{{ item.service }}.conf.j2"
- - "{{ node_custom_config }}/global.conf"
- - "{{ node_custom_config }}/database.conf"
- - "{{ node_custom_config }}/messaging.conf"
- - "{{ node_custom_config }}/glance.conf"
- - "{{ node_custom_config }}/glance/{{ item.service }}.conf"
- - "{{ node_custom_config }}/glance/{{ inventory_hostname }}/{{ item.service }}.conf"
- dest: "{{ node_config_directory }}/{{ item.service }}/{{ item.service }}.conf"
- when: inventory_hostname in groups[item.group]
- with_items: "{{ glance_service_groups }}"
-
-- name: Check if policies shall be overwritten
- local_action: stat path="{{ node_custom_config }}/glance/policy.json"
- register: glance_policy
-
-- name: Copying over existing policy.json
- template:
- src: "{{ node_custom_config }}/glance/policy.json"
- dest: "{{ node_config_directory }}/{{ item.service }}/policy.json"
- when:
- - inventory_hostname in groups[item.group]
- - glance_policy.stat.exists
- with_items: "{{ glance_service_groups }}"
diff --git a/ansible/roles/glance/tasks/deploy.yml b/ansible/roles/glance/tasks/deploy.yml
deleted file mode 100644
index 0704c65bbd..0000000000
--- a/ansible/roles/glance/tasks/deploy.yml
+++ /dev/null
@@ -1,31 +0,0 @@
----
-- include: ceph.yml
- when:
- - (enable_ceph | bool) and (glance_backend_ceph | bool)
- - inventory_hostname in groups['ceph-mon'] or
- inventory_hostname in groups['glance-api'] or
- inventory_hostname in groups['glance-registry']
-
-- include: external_ceph.yml
- when:
- - (enable_ceph | bool == False) and (glance_backend_ceph | bool)
- - inventory_hostname in groups['glance-api'] or
- inventory_hostname in groups['glance-registry']
-
-- include: register.yml
- when: inventory_hostname in groups['glance-api']
-
-- include: config.yml
- when: inventory_hostname in groups['glance-api'] or
- inventory_hostname in groups['glance-registry']
-
-- include: bootstrap.yml
- when: inventory_hostname in groups['glance-api']
-
-- include: start.yml
- when: inventory_hostname in groups['glance-api'] or
- inventory_hostname in groups['glance-registry']
-
-- include: check.yml
- when: inventory_hostname in groups['glance-api'] or
- inventory_hostname in groups['glance-registry']
diff --git a/ansible/roles/glance/tasks/external_ceph.yml b/ansible/roles/glance/tasks/external_ceph.yml
deleted file mode 100644
index 43e9cf7205..0000000000
--- a/ansible/roles/glance/tasks/external_ceph.yml
+++ /dev/null
@@ -1,13 +0,0 @@
----
-- name: Ensuring config directory exists
- file:
- path: "{{ node_config_directory }}/glance-api"
- state: "directory"
- when: inventory_hostname in groups['glance-api']
-
-- name: Copy over ceph files
- copy:
- src: "{{ item }}"
- dest: "{{ node_config_directory }}/glance-api/"
- with_fileglob:
- - "{{ node_custom_config }}/glance/ceph*"
diff --git a/ansible/roles/glance/tasks/main.yml b/ansible/roles/glance/tasks/main.yml
deleted file mode 100644
index b017e8b4ad..0000000000
--- a/ansible/roles/glance/tasks/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-- include: "{{ action }}.yml"
diff --git a/ansible/roles/glance/tasks/precheck.yml b/ansible/roles/glance/tasks/precheck.yml
deleted file mode 100644
index ed97d539c0..0000000000
--- a/ansible/roles/glance/tasks/precheck.yml
+++ /dev/null
@@ -1 +0,0 @@
----
diff --git a/ansible/roles/glance/tasks/pull.yml b/ansible/roles/glance/tasks/pull.yml
deleted file mode 100644
index 0a3ebeb581..0000000000
--- a/ansible/roles/glance/tasks/pull.yml
+++ /dev/null
@@ -1,14 +0,0 @@
----
-- name: Pulling glance-api image
- kolla_docker:
- action: "pull_image"
- common_options: "{{ docker_common_options }}"
- image: "{{ glance_api_image_full }}"
- when: inventory_hostname in groups['glance-api']
-
-- name: Pulling glance-registry image
- kolla_docker:
- action: "pull_image"
- common_options: "{{ docker_common_options }}"
- image: "{{ glance_registry_image_full }}"
- when: inventory_hostname in groups['glance-registry']
diff --git a/ansible/roles/glance/tasks/reconfigure.yml b/ansible/roles/glance/tasks/reconfigure.yml
deleted file mode 100644
index 22f0eb79b5..0000000000
--- a/ansible/roles/glance/tasks/reconfigure.yml
+++ /dev/null
@@ -1,63 +0,0 @@
----
-- name: Ensuring the containers up
- kolla_docker:
- name: "{{ item.name }}"
- action: "get_container_state"
- register: container_state
- failed_when: container_state.Running == false
- when: inventory_hostname in groups[item.group]
- with_items: "{{ glance_service_groups }}"
-
-- include: config.yml
-
-- name: Check the configs
- command: docker exec {{ item.name }} /usr/local/bin/kolla_set_configs --check
- changed_when: false
- failed_when: false
- register: check_results
- when: inventory_hostname in groups[item.group]
- with_items: "{{ glance_service_groups }}"
-
-# NOTE(jeffrey4l): when config_strategy == 'COPY_ALWAYS'
-# and container env['KOLLA_CONFIG_STRATEGY'] == 'COPY_ONCE',
-# just remove the container and start again
-- name: Containers config strategy
- kolla_docker:
- name: "{{ item.name }}"
- action: "get_container_env"
- register: container_envs
- when: inventory_hostname in groups[item.group]
- with_items: "{{ glance_service_groups }}"
-
-- name: Remove the containers
- kolla_docker:
- name: "{{ item[0]['name'] }}"
- action: "remove_container"
- register: remove_containers
- when:
- - inventory_hostname in groups[item[0]['group']]
- - config_strategy == "COPY_ONCE" or item[1]['KOLLA_CONFIG_STRATEGY'] == 'COPY_ONCE'
- - item[2]['rc'] == 1
- with_together:
- - "{{ glance_service_groups }}"
- - "{{ container_envs.results }}"
- - "{{ check_results.results }}"
-
-- include: bootstrap_service.yml
-
-- include: start.yml
- when: remove_containers.changed
-
-- name: Restart containers
- kolla_docker:
- name: "{{ item[0]['name'] }}"
- action: "restart_container"
- when:
- - inventory_hostname in groups[item[0]['group']]
- - config_strategy == 'COPY_ALWAYS'
- - item[1]['KOLLA_CONFIG_STRATEGY'] != 'COPY_ONCE'
- - item[2]['rc'] == 1
- with_together:
- - "{{ glance_service_groups }}"
- - "{{ container_envs.results }}"
- - "{{ check_results.results }}"
diff --git a/ansible/roles/glance/tasks/register.yml b/ansible/roles/glance/tasks/register.yml
deleted file mode 100644
index d0cfaa7d98..0000000000
--- a/ansible/roles/glance/tasks/register.yml
+++ /dev/null
@@ -1,40 +0,0 @@
----
-- name: Creating the Glance service and endpoint
- command: docker exec -t kolla_toolbox /usr/bin/ansible localhost
- -m kolla_keystone_service
- -a "service_name=glance
- service_type=image
- description='Openstack Image'
- endpoint_region={{ openstack_region_name }}
- url='{{ item.url }}'
- interface='{{ item.interface }}'
- region_name={{ openstack_region_name }}
- auth={{ '{{ openstack_glance_auth }}' }}"
- -e "{'openstack_glance_auth':{{ openstack_glance_auth }}}"
- register: glance_endpoint
- changed_when: "{{ glance_endpoint.stdout.find('localhost | SUCCESS => ') != -1 and (glance_endpoint.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
- until: glance_endpoint.stdout.split()[2] == 'SUCCESS'
- retries: 10
- delay: 5
- run_once: True
- with_items:
- - {'interface': 'admin', 'url': '{{ glance_admin_endpoint }}'}
- - {'interface': 'internal', 'url': '{{ glance_internal_endpoint }}'}
- - {'interface': 'public', 'url': '{{ glance_public_endpoint }}'}
-
-- name: Creating the Glance project, user, and role
- command: docker exec -t kolla_toolbox /usr/bin/ansible localhost
- -m kolla_keystone_user
- -a "project=service
- user=glance
- password={{ glance_keystone_password }}
- role=admin
- region_name={{ openstack_region_name }}
- auth={{ '{{ openstack_glance_auth }}' }}"
- -e "{'openstack_glance_auth':{{ openstack_glance_auth }}}"
- register: glance_user
- changed_when: "{{ glance_user.stdout.find('localhost | SUCCESS => ') != -1 and (glance_user.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
- until: glance_user.stdout.split()[2] == 'SUCCESS'
- retries: 10
- delay: 5
- run_once: True
diff --git a/ansible/roles/glance/tasks/start.yml b/ansible/roles/glance/tasks/start.yml
deleted file mode 100644
index bec16bbee3..0000000000
--- a/ansible/roles/glance/tasks/start.yml
+++ /dev/null
@@ -1,25 +0,0 @@
----
-- name: Starting glance-registry container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- image: "{{ glance_registry_image_full }}"
- name: "glance_registry"
- volumes:
- - "{{ node_config_directory }}/glance-registry/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "kolla_logs:/var/log/kolla/"
- when: inventory_hostname in groups['glance-registry']
-
-- name: Starting glance-api container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- image: "{{ glance_api_image_full }}"
- name: "glance_api"
- volumes:
- - "{{ node_config_directory }}/glance-api/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "glance:/var/lib/glance/"
- - "kolla_logs:/var/log/kolla/"
- when: inventory_hostname in groups['glance-api']
diff --git a/ansible/roles/glance/tasks/upgrade.yml b/ansible/roles/glance/tasks/upgrade.yml
deleted file mode 100644
index 308053080c..0000000000
--- a/ansible/roles/glance/tasks/upgrade.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-- include: config.yml
-
-- include: bootstrap_service.yml
-
-- include: start.yml
diff --git a/ansible/roles/glance/templates/glance-api.conf.j2 b/ansible/roles/glance/templates/glance-api.conf.j2
deleted file mode 100644
index 9d53735948..0000000000
--- a/ansible/roles/glance/templates/glance-api.conf.j2
+++ /dev/null
@@ -1,78 +0,0 @@
-[DEFAULT]
-debug = {{ glance_logging_debug }}
-
-# NOTE(elemoine) log_dir alone does not work for Glance
-log_file = /var/log/kolla/glance/api.log
-use_forwarded_for = true
-
-bind_host = {{ api_interface_address }}
-bind_port = {{ glance_api_port }}
-workers = {{ openstack_service_workers }}
-
-{% if orchestration_engine == 'KUBERNETES' %}
-registry_host = {{ glance_registry_host }}
-{% else %}
-registry_host = {{ kolla_internal_fqdn }}
-{% endif %}
-
-{% if enable_ceph | bool %}
-show_image_direct_url= True
-show_multiple_locations = True
-{% endif %}
-
-cinder_catalog_info = volume:cinder:internalURL
-
-transport_url = rabbit://{% for host in groups['rabbitmq'] %}{{ rabbitmq_user }}:{{ rabbitmq_password }}@{% if orchestration_engine == 'KUBERNETES' %}rabbitmq{% else %}{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}{% endif %}:{{ rabbitmq_port }}{% if not loop.last %},{% endif %}{% endfor %}
-
-[database]
-connection = mysql+pymysql://{{ glance_database_user }}:{{ glance_database_password }}@{{ glance_database_address }}/{{ glance_database_name }}
-max_retries = -1
-
-[keystone_authtoken]
-{% if orchestration_engine == 'KUBERNETES' %}
-auth_uri = {{ keystone_internal_url }}
-auth_url = {{ keystone_admin_url }}
-{% else %}
-auth_uri = {{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_public_port }}
-auth_url = {{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_admin_port }}
-{% endif %}
-auth_type = password
-project_domain_id = default
-user_domain_id = default
-project_name = service
-username = {{ glance_keystone_user }}
-password = {{ glance_keystone_password }}
-
-{# For Kolla-Ansible, generate the memcache servers based on the list of
-memcached servers in the inventory and memcached_servers should be un-set.
-For Kolla-Kubernetes, it is necessary to define the memcached_servers
-variable in globals.yml to set it to the Kubernetes service for memcached. #}
-memcache_security_strategy = ENCRYPT
-memcache_secret_key = {{ memcache_secret_key }}
-{% if orchestration_engine == 'KUBERNETES' %}
-memcache_servers = {{ memcached_servers }}
-{% else %}
-memcached_servers = {% for host in groups['memcached'] %}{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}
-{%- endif %}
-
-[paste_deploy]
-flavor = keystone
-
-[glance_store]
-{% if enable_ceph | bool and glance_backend_ceph | bool %}
-default_store = rbd
-stores = rbd,http
-rbd_store_user = glance
-rbd_store_pool = {{ ceph_glance_pool_name }}
-rbd_store_chunk_size = 8
-{% else %}
-default_store = file
-filesystem_store_datadir = /var/lib/glance/images/
-{% endif %}
-
-[oslo_messaging_notifications]
-{% if enable_ceilometer | bool %}
-driver = messagingv2
-{% else %}
-driver = noop
-{% endif %}
diff --git a/ansible/roles/glance/templates/glance-api.json.j2 b/ansible/roles/glance/templates/glance-api.json.j2
deleted file mode 100644
index 2b6caebbdb..0000000000
--- a/ansible/roles/glance/templates/glance-api.json.j2
+++ /dev/null
@@ -1,30 +0,0 @@
-{
- "command": "glance-api",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/glance-api.conf",
- "dest": "/etc/glance/glance-api.conf",
- "owner": "glance",
- "perm": "0600"
- }{% if glance_backend_ceph | bool %},
- {
- "source": "{{ container_config_directory }}/ceph.*",
- "dest": "/etc/ceph/",
- "owner": "glance",
- "perm": "0700"
- }
- {% endif %}
- ],
- "permissions": [
- {
- "path": "/var/lib/glance",
- "owner": "glance:glance",
- "recurse": true
- },
- {
- "path": "/var/log/kolla/glance",
- "owner": "glance:glance",
- "recurse": true
- }
- ]
-}
diff --git a/ansible/roles/glance/templates/glance-registry.conf.j2 b/ansible/roles/glance/templates/glance-registry.conf.j2
deleted file mode 100644
index b982e8788c..0000000000
--- a/ansible/roles/glance/templates/glance-registry.conf.j2
+++ /dev/null
@@ -1,52 +0,0 @@
-[DEFAULT]
-debug = {{ glance_logging_debug }}
-
-# NOTE(elemoine) log_dir alone does not work for Glance
-log_file = /var/log/kolla/glance/registry.log
-
-bind_host = {{ api_interface_address }}
-bind_port = {{ glance_registry_port }}
-workers = {{ openstack_service_workers }}
-
-transport_url = rabbit://{% for host in groups['rabbitmq'] %}{{ rabbitmq_user }}:{{ rabbitmq_password }}@{% if orchestration_engine == 'KUBERNETES' %}rabbitmq{% else %}{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}{% endif %}:{{ rabbitmq_port }}{% if not loop.last %},{% endif %}{% endfor %}
-
-[database]
-connection = mysql+pymysql://{{ glance_database_user }}:{{ glance_database_password }}@{{ glance_database_address }}/{{ glance_database_name }}
-max_retries = -1
-
-[keystone_authtoken]
-{% if orchestration_engine == 'KUBERNETES' %}
-auth_uri = {{ keystone_internal_url }}
-auth_url = {{ keystone_admin_url }}
-{% else %}
-auth_uri = {{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_public_port }}
-auth_url = {{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_admin_port }}
-{% endif %}
-auth_type = password
-project_domain_id = default
-user_domain_id = default
-project_name = service
-username = {{ glance_keystone_user }}
-password = {{ glance_keystone_password }}
-
-{# For Kolla-Ansible, generate the memcache servers based on the list of
-memcached servers in the inventory and memcached_servers should be un-set.
-For Kolla-Kubernetes, it is necessary to define the memcached_servers
-variable in globals.yml to set it to the Kubernetes service for memcached. #}
-memcache_security_strategy = ENCRYPT
-memcache_secret_key = {{ memcache_secret_key }}
-{% if orchestration_engine == 'KUBERNETES' %}
-memcache_servers = {{ memcached_servers }}
-{% else %}
-memcached_servers = {% for host in groups['memcached'] %}{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}
-{%- endif %}
-
-[paste_deploy]
-flavor = keystone
-
-[oslo_messaging_notifications]
-{% if enable_ceilometer | bool %}
-driver = messagingv2
-{% else %}
-driver = noop
-{% endif %}
diff --git a/ansible/roles/glance/templates/glance-registry.json.j2 b/ansible/roles/glance/templates/glance-registry.json.j2
deleted file mode 100644
index bfd60c507a..0000000000
--- a/ansible/roles/glance/templates/glance-registry.json.j2
+++ /dev/null
@@ -1,18 +0,0 @@
-{
- "command": "glance-registry",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/glance-registry.conf",
- "dest": "/etc/glance/glance-registry.conf",
- "owner": "glance",
- "perm": "0600"
- }
- ],
- "permissions": [
- {
- "path": "/var/log/kolla/glance",
- "owner": "glance:glance",
- "recurse": true
- }
- ]
-}
diff --git a/ansible/roles/gnocchi/defaults/main.yml b/ansible/roles/gnocchi/defaults/main.yml
deleted file mode 100644
index cd80d1695d..0000000000
--- a/ansible/roles/gnocchi/defaults/main.yml
+++ /dev/null
@@ -1,52 +0,0 @@
----
-project_name: "gnocchi"
-
-
-####################
-# Ceph
-####################
-ceph_gnocchi_pool_type: "{{ ceph_pool_type }}"
-ceph_gnocchi_cache_mode: "{{ ceph_cache_mode }}"
-
-# Due to Ansible issues on include, you cannot override these variables. Please
-# override the variables they reference instead.
-gnocchi_pool_name: "{{ ceph_gnocchi_pool_name }}"
-gnocchi_pool_type: "{{ ceph_gnocchi_pool_type }}"
-gnocchi_cache_mode: "{{ ceph_gnocchi_cache_mode }}"
-
-
-####################
-# Database
-####################
-gnocchi_database_name: "gnocchi"
-gnocchi_database_user: "gnocchi"
-gnocchi_database_address: "{{ kolla_internal_fqdn }}:{{ database_port }}"
-
-
-####################
-# Docker
-####################
-gnocchi_api_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-gnocchi-api"
-gnocchi_api_tag: "{{ openstack_release }}"
-gnocchi_api_image_full: "{{ gnocchi_api_image }}:{{ gnocchi_api_tag }}"
-
-gnocchi_statsd_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-gnocchi-statsd"
-gnocchi_statsd_tag: "{{ openstack_release }}"
-gnocchi_statsd_image_full: "{{ gnocchi_statsd_image }}:{{ gnocchi_statsd_tag }}"
-
-gnocchi_metricd_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-gnocchi-metricd"
-gnocchi_metricd_tag: "{{ openstack_release }}"
-gnocchi_metricd_image_full: "{{ gnocchi_metricd_image }}:{{ gnocchi_metricd_tag }}"
-
-####################
-# OpenStack
-####################
-gnocchi_admin_endpoint: "{{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ gnocchi_api_port }}"
-gnocchi_internal_endpoint: "{{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ gnocchi_api_port }}"
-gnocchi_public_endpoint: "{{ public_protocol }}://{{ kolla_external_fqdn }}:{{ gnocchi_api_port }}"
-
-gnocchi_logging_debug: "{{ openstack_logging_debug }}"
-
-gnocchi_keystone_user: "gnocchi"
-
-openstack_gnocchi_auth: "{'auth_url':'{{ openstack_auth.auth_url }}','username':'{{ openstack_auth.username }}','password':'{{ openstack_auth.password }}','project_name':'{{ openstack_auth.project_name }}','domain_name':'default'}"
diff --git a/ansible/roles/gnocchi/meta/main.yml b/ansible/roles/gnocchi/meta/main.yml
deleted file mode 100644
index 6b4fff8fef..0000000000
--- a/ansible/roles/gnocchi/meta/main.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-dependencies:
- - { role: common }
diff --git a/ansible/roles/gnocchi/tasks/bootstrap.yml b/ansible/roles/gnocchi/tasks/bootstrap.yml
deleted file mode 100644
index 70611b85d5..0000000000
--- a/ansible/roles/gnocchi/tasks/bootstrap.yml
+++ /dev/null
@@ -1,41 +0,0 @@
----
-- name: Creating gnocchi database
- command: docker exec -t kolla_toolbox /usr/bin/ansible localhost
- -m mysql_db
- -a "login_host='{{ database_address }}'
- login_port='{{ database_port }}'
- login_user='{{ database_user }}'
- login_password='{{ database_password }}'
- name='{{ gnocchi_database_name }}'"
- register: database
- changed_when: "{{ database.stdout.find('localhost | SUCCESS => ') != -1 and
- (database.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
- failed_when: database.stdout.split()[2] != 'SUCCESS'
- run_once: True
- delegate_to: "{{ groups['gnocchi-api'][0] }}"
-
-- name: Reading json from variable
- set_fact:
- database_created: "{{ (database.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
-
-- name: Creating gnocchi database user and setting permissions
- command: docker exec -t kolla_toolbox /usr/bin/ansible localhost
- -m mysql_user
- -a "login_host='{{ database_address }}'
- login_port='{{ database_port }}'
- login_user='{{ database_user }}'
- login_password='{{ database_password }}'
- name='{{ gnocchi_database_name }}'
- password='{{ gnocchi_database_password }}'
- host='%'
- priv='{{ gnocchi_database_name }}.*:ALL'
- append_privs='yes'"
- register: database_user_create
- changed_when: "{{ database_user_create.stdout.find('localhost | SUCCESS => ') != -1 and
- (database_user_create.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
- failed_when: database_user_create.stdout.split()[2] != 'SUCCESS'
- run_once: True
- delegate_to: "{{ groups['gnocchi-api'][0] }}"
-
-- include: bootstrap_service.yml
- when: database_created
diff --git a/ansible/roles/gnocchi/tasks/bootstrap_service.yml b/ansible/roles/gnocchi/tasks/bootstrap_service.yml
deleted file mode 100644
index 91e140cf7c..0000000000
--- a/ansible/roles/gnocchi/tasks/bootstrap_service.yml
+++ /dev/null
@@ -1,21 +0,0 @@
----
-- name: Running gnocchi bootstrap container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- detach: False
- environment:
- KOLLA_BOOTSTRAP:
- KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}"
- image: "{{ gnocchi_api_image_full }}"
- labels:
- BOOTSTRAP:
- name: "bootstrap_gnocchi"
- restart_policy: "never"
- volumes:
- - "{{ node_config_directory }}/gnocchi-api/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "gnocchi:/var/lib/gnocchi/"
- - "kolla_logs:/var/log/kolla/"
- run_once: True
- delegate_to: "{{ groups['gnocchi-api'][0] }}"
diff --git a/ansible/roles/gnocchi/tasks/ceph.yml b/ansible/roles/gnocchi/tasks/ceph.yml
deleted file mode 100644
index e5c6d9a169..0000000000
--- a/ansible/roles/gnocchi/tasks/ceph.yml
+++ /dev/null
@@ -1,45 +0,0 @@
----
-- name: Ensuring config directory exists
- file:
- path: "{{ node_config_directory }}/{{ item }}"
- state: "directory"
- when: inventory_hostname in groups[item]
- with_items:
- - "gnocchi-api"
- - "gnocchi-metricd"
- - "gnocchi-statsd"
-
-- name: Copying over config(s)
- template:
- src: roles/ceph/templates/ceph.conf.j2
- dest: "{{ node_config_directory }}/{{ item }}/ceph.conf"
- when: inventory_hostname in groups[item]
- with_items:
- - "gnocchi-api"
- - "gnocchi-metricd"
- - "gnocchi-statsd"
-
-- include: ../../ceph_pools.yml
- vars:
- pool_name: "{{ gnocchi_pool_name }}"
- pool_type: "{{ gnocchi_pool_type }}"
- cache_mode: "{{ gnocchi_cache_mode }}"
-
-# TODO(SamYaple): Improve changed_when tests
-- name: Pulling cephx keyring
- command: docker exec ceph_mon ceph auth get-or-create client.gnocchi mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool={{ ceph_gnocchi_pool_name }}, allow rwx pool={{ ceph_gnocchi_pool_name }}-cache'
- register: cephx_key
- delegate_to: "{{ groups['ceph-mon'][0] }}"
- changed_when: False
- run_once: True
-
-- name: Pushing cephx keyring
- copy:
- content: "{{ cephx_key.stdout }}\n\r"
- dest: "{{ node_config_directory }}/{{ item }}/ceph.client.gnocchi.keyring"
- mode: "0600"
- when: inventory_hostname in groups[item]
- with_items:
- - "gnocchi-api"
- - "gnocchi-metricd"
- - "gnocchi-statsd"
diff --git a/ansible/roles/gnocchi/tasks/config.yml b/ansible/roles/gnocchi/tasks/config.yml
deleted file mode 100644
index 5e74dca266..0000000000
--- a/ansible/roles/gnocchi/tasks/config.yml
+++ /dev/null
@@ -1,67 +0,0 @@
----
-- name: Ensuring config directories exist
- file:
- path: "{{ node_config_directory }}/{{ item }}"
- state: "directory"
- recurse: yes
- with_items:
- - "gnocchi-api"
- - "gnocchi-statsd"
- - "gnocchi-metricd"
-
-- name: Copying over config.json files for services
- template:
- src: "{{ item }}.json.j2"
- dest: "{{ node_config_directory }}/{{ item }}/config.json"
- with_items:
- - "gnocchi-api"
- - "gnocchi-metricd"
- - "gnocchi-statsd"
-
-- name: Copying over api-paste.ini
- merge_configs:
- sources:
- - "{{ role_path }}/templates/api-paste.ini.j2"
- - "{{ node_custom_config }}/gnocchi/api-paste.ini"
- - "{{ node_custom_config }}/gnocchi/{{ inventory_hostname }}/api-paste.ini"
- dest: "{{ node_config_directory }}/gnocchi-api/api-paste.ini"
-
-- name: Copying over gnocchi.conf
- merge_configs:
- vars:
- service_name: "{{ item }}"
- sources:
- - "{{ role_path }}/templates/gnocchi.conf.j2"
- - "{{ node_custom_config }}/global.conf"
- - "{{ node_custom_config }}/database.conf"
- - "{{ node_custom_config }}/messaging.conf"
- - "{{ node_custom_config }}/gnocchi.conf"
- - "{{ node_custom_config }}/gnocchi/{{ item }}.conf"
- - "{{ node_custom_config }}/gnocchi/{{ inventory_hostname }}/gnocchi.conf"
- dest: "{{ node_config_directory }}/{{ item }}/gnocchi.conf"
- with_items:
- - "gnocchi-api"
- - "gnocchi-statsd"
- - "gnocchi-metricd"
-
-- name: Copying over wsgi-gnocchi.conf
- template:
- src: "wsgi-gnocchi.conf.j2"
- dest: "{{ node_config_directory }}/{{ item }}/wsgi-gnocchi.conf"
- with_items:
- - "gnocchi-api"
-
-- name: Check if policies shall be overwritten
- local_action: stat path="{{ node_custom_config }}/gnocchi/policy.json"
- register: gnocchi_policy
-
-- name: Copying over existing policy.json
- template:
- src: "{{ node_custom_config }}/gnocchi/policy.json"
- dest: "{{ node_config_directory }}/{{ item }}/policy.json"
- with_items:
- - "gnocchi-api"
- - "gnocchi-statsd"
- - "gnocchi-metricd"
- when:
- gnocchi_policy.stat.exists
diff --git a/ansible/roles/gnocchi/tasks/deploy.yml b/ansible/roles/gnocchi/tasks/deploy.yml
deleted file mode 100644
index 0fb27eca91..0000000000
--- a/ansible/roles/gnocchi/tasks/deploy.yml
+++ /dev/null
@@ -1,26 +0,0 @@
----
-- include: ceph.yml
- when:
- - enable_ceph|bool
- - gnocchi_backend_storage == 'ceph'
-
-- include: external_ceph.yml
- when:
- - enable_ceph|bool == False
- - gnocchi_backend_storage == 'ceph'
-
-- include: register.yml
- when: inventory_hostname in groups['gnocchi-api']
-
-- include: config.yml
- when: inventory_hostname in groups['gnocchi-statsd'] or
- inventory_hostname in groups['gnocchi-api'] or
- inventory_hostname in groups['gnocchi-metricd']
-
-- include: bootstrap.yml
- when: inventory_hostname in groups['gnocchi-api']
-
-- include: start.yml
- when: inventory_hostname in groups['gnocchi-statsd'] or
- inventory_hostname in groups['gnocchi-api'] or
- inventory_hostname in groups['gnocchi-metricd']
diff --git a/ansible/roles/gnocchi/tasks/external_ceph.yml b/ansible/roles/gnocchi/tasks/external_ceph.yml
deleted file mode 100644
index 623ed80079..0000000000
--- a/ansible/roles/gnocchi/tasks/external_ceph.yml
+++ /dev/null
@@ -1,30 +0,0 @@
----
-- name: Ensuring config directory exists
- file:
- path: "{{ node_config_directory }}/{{ item }}"
- state: "directory"
- when: inventory_hostname in groups[item]
- with_items:
- - "gnocchi-api"
- - "gnocchi-metricd"
- - "gnocchi-statsd"
-
-- name: Copy over ceph.conf file
- copy:
- src: "{{ node_custom_config }}/{{ item }}/ceph.conf"
- dest: "{{ node_config_directory }}/{{ item }}/ceph.conf"
- when: inventory_hostname in groups[item]
- with_items:
- - "gnocchi-api"
- - "gnocchi-metricd"
- - "gnocchi-statsd"
-
-- name: Copy over ceph gnocchi keyring
- copy:
- src: "{{ node_custom_config }}/{{ item }}/ceph.client.gnocchi.keyring"
- dest: "{{ node_config_directory }}/{{ item }}/ceph.client.gnocchi.keryring"
- when: inventory_hostname in groups[item]
- with_items:
- - "gnocchi-api"
- - "gnocchi-metricd"
- - "gnocchi-statsd"
diff --git a/ansible/roles/gnocchi/tasks/main.yml b/ansible/roles/gnocchi/tasks/main.yml
deleted file mode 100644
index b017e8b4ad..0000000000
--- a/ansible/roles/gnocchi/tasks/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-- include: "{{ action }}.yml"
diff --git a/ansible/roles/gnocchi/tasks/precheck.yml b/ansible/roles/gnocchi/tasks/precheck.yml
deleted file mode 100644
index ed97d539c0..0000000000
--- a/ansible/roles/gnocchi/tasks/precheck.yml
+++ /dev/null
@@ -1 +0,0 @@
----
diff --git a/ansible/roles/gnocchi/tasks/pull.yml b/ansible/roles/gnocchi/tasks/pull.yml
deleted file mode 100644
index caefd8aaf6..0000000000
--- a/ansible/roles/gnocchi/tasks/pull.yml
+++ /dev/null
@@ -1,21 +0,0 @@
----
-- name: Pulling gnocchi-api image
- kolla_docker:
- action: "pull_image"
- common_options: "{{ docker_common_options }}"
- image: "{{ gnocchi_api_image_full }}"
- when: inventory_hostname in groups['gnocchi-api']
-
-- name: Pulling gnocchi-metricd image
- kolla_docker:
- action: "pull_image"
- common_options: "{{ docker_common_options }}"
- image: "{{ gnocchi_metricd_image_full }}"
- when: inventory_hostname in groups['gnocchi-metricd']
-
-- name: Pulling gnocchi-statsd image
- kolla_docker:
- action: "pull_image"
- common_options: "{{ docker_common_options }}"
- image: "{{ gnocchi_statsd_image_full }}"
- when: inventory_hostname in groups['gnocchi-statsd']
diff --git a/ansible/roles/gnocchi/tasks/reconfigure.yml b/ansible/roles/gnocchi/tasks/reconfigure.yml
deleted file mode 100644
index 39798455c8..0000000000
--- a/ansible/roles/gnocchi/tasks/reconfigure.yml
+++ /dev/null
@@ -1,74 +0,0 @@
----
-- name: Ensuring the containers up
- kolla_docker:
- name: "{{ item.name }}"
- action: "get_container_state"
- register: container_state
- failed_when: container_state.Running == false
- when: inventory_hostname in groups[item.group]
- with_items:
- - { name: gnocchi_api, group: gnocchi-api }
- - { name: gnocchi_metricd, group: gnocchi-metricd }
- - { name: gnocchi_statsd, group: gnocchi-statsd }
-
-- include: config.yml
-
-- name: Check the configs
- command: docker exec {{ item.name }} /usr/local/bin/kolla_set_configs --check
- changed_when: false
- failed_when: false
- register: check_results
- when: inventory_hostname in groups[item.group]
- with_items:
- - { name: gnocchi_api, group: gnocchi-api }
- - { name: gnocchi_metricd, group: gnocchi-metricd }
- - { name: gnocchi_statsd, group: gnocchi-statsd }
-
-# NOTE(jeffrey4l): when config_strategy == 'COPY_ALWAYS'
-# and container env['KOLLA_CONFIG_STRATEGY'] == 'COPY_ONCE',
-# just remove the container and start again
-- name: Containers config strategy
- kolla_docker:
- name: "{{ item.name }}"
- action: "get_container_env"
- register: container_envs
- when: inventory_hostname in groups[item.group]
- with_items:
- - { name: gnocchi_api, group: gnocchi-api }
- - { name: gnocchi_metricd, group: gnocchi-metricd }
- - { name: gnocchi_statsd, group: gnocchi-statsd }
-
-- name: Remove the containers
- kolla_docker:
- name: "{{ item[0]['name'] }}"
- action: "remove_container"
- register: remove_containers
- when:
- - inventory_hostname in groups[item[0]['group']]
- - config_strategy == "COPY_ONCE" or item[1]['KOLLA_CONFIG_STRATEGY'] == 'COPY_ONCE'
- - item[2]['rc'] == 1
- with_together:
- - [{ name: gnocchi_api, group: gnocchi-api },
- { name: gnocchi_metricd, group: gnocchi-metricd },
- { name: gnocchi_statsd, group: gnocchi-statsd }]
- - "{{ container_envs.results }}"
- - "{{ check_results.results }}"
-
-- include: start.yml
- when: remove_containers.changed
-
-- name: Restart containers
- kolla_docker:
- name: "{{ item[0]['name'] }}"
- action: "restart_container"
- when:
- - inventory_hostname in groups[item[0]['group']]
- - config_strategy == 'COPY_ALWAYS'
- - item[1]['KOLLA_CONFIG_STRATEGY'] != 'COPY_ONCE'
- - item[2]['rc'] == 1
- with_together:
- - [{ name: gnocchi_api, group: gnocchi-api },
- { name: gnocchi_metricd, group: gnocchi-metricd },
- { name: gnocchi_statsd, group: gnocchi-statsd }]
- - "{{ container_envs.results }}"
- - "{{ check_results.results }}"
diff --git a/ansible/roles/gnocchi/tasks/register.yml b/ansible/roles/gnocchi/tasks/register.yml
deleted file mode 100644
index 5b60ff22f4..0000000000
--- a/ansible/roles/gnocchi/tasks/register.yml
+++ /dev/null
@@ -1,40 +0,0 @@
----
-- name: Creating the gnocchi service and endpoint
- command: docker exec -t kolla_toolbox /usr/bin/ansible localhost
- -m kolla_keystone_service
- -a "service_name=gnocchi
- service_type=metric
- description='OpenStack Metric Service'
- endpoint_region={{ openstack_region_name }}
- url='{{ item.url }}'
- interface='{{ item.interface }}'
- region_name={{ openstack_region_name }}
- auth={{ '{{ openstack_gnocchi_auth }}' }}"
- -e "{'openstack_gnocchi_auth':{{ openstack_gnocchi_auth }}}"
- register: gnocchi_endpoint
- changed_when: "{{ gnocchi_endpoint.stdout.find('localhost | SUCCESS => ') != -1 and (gnocchi_endpoint.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
- until: gnocchi_endpoint.stdout.split()[2] == 'SUCCESS'
- retries: 10
- delay: 5
- run_once: True
- with_items:
- - {'interface': 'admin', 'url': '{{ gnocchi_admin_endpoint }}'}
- - {'interface': 'internal', 'url': '{{ gnocchi_internal_endpoint }}'}
- - {'interface': 'public', 'url': '{{ gnocchi_public_endpoint }}'}
-
-- name: Creating the gnocchi project, user, and role
- command: docker exec -t kolla_toolbox /usr/bin/ansible localhost
- -m kolla_keystone_user
- -a "project=service
- user=gnocchi
- password={{ gnocchi_keystone_password }}
- role=admin
- region_name={{ openstack_region_name }}
- auth={{ '{{ openstack_gnocchi_auth }}' }}"
- -e "{'openstack_gnocchi_auth':{{ openstack_gnocchi_auth }}}"
- register: gnocchi_user
- changed_when: "{{ gnocchi_user.stdout.find('localhost | SUCCESS => ') != -1 and (gnocchi_user.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
- until: gnocchi_user.stdout.split()[2] == 'SUCCESS'
- retries: 10
- delay: 5
- run_once: True
diff --git a/ansible/roles/gnocchi/tasks/start.yml b/ansible/roles/gnocchi/tasks/start.yml
deleted file mode 100644
index 5d0231fe47..0000000000
--- a/ansible/roles/gnocchi/tasks/start.yml
+++ /dev/null
@@ -1,39 +0,0 @@
----
-- name: Starting gnocchi-api container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- image: "{{ gnocchi_api_image_full }}"
- name: "gnocchi_api"
- volumes:
- - "{{ node_config_directory }}/gnocchi-api/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "gnocchi:/var/lib/gnocchi/"
- - "kolla_logs:/var/log/kolla/"
- when: inventory_hostname in groups['gnocchi-api']
-
-- name: Starting gnocchi-metricd container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- image: "{{ gnocchi_metricd_image_full }}"
- name: "gnocchi_metricd"
- volumes:
- - "{{ node_config_directory }}/gnocchi-metricd/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "gnocchi:/var/lib/gnocchi/"
- - "kolla_logs:/var/log/kolla/"
- when: inventory_hostname in groups['gnocchi-metricd']
-
-- name: Starting gnocchi-statsd container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- image: "{{ gnocchi_statsd_image_full }}"
- name: "gnocchi_statsd"
- volumes:
- - "{{ node_config_directory }}/gnocchi-statsd/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "gnocchi:/var/lib/gnocchi/"
- - "kolla_logs:/var/log/kolla/"
- when: inventory_hostname in groups['gnocchi-statsd']
diff --git a/ansible/roles/gnocchi/tasks/upgrade.yml b/ansible/roles/gnocchi/tasks/upgrade.yml
deleted file mode 100644
index 308053080c..0000000000
--- a/ansible/roles/gnocchi/tasks/upgrade.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-- include: config.yml
-
-- include: bootstrap_service.yml
-
-- include: start.yml
diff --git a/ansible/roles/gnocchi/templates/api-paste.ini.j2 b/ansible/roles/gnocchi/templates/api-paste.ini.j2
deleted file mode 100644
index 22b8c44b0c..0000000000
--- a/ansible/roles/gnocchi/templates/api-paste.ini.j2
+++ /dev/null
@@ -1,38 +0,0 @@
-# Use gnocchi+noauth in the pipeline if you don't want keystone authentication
-[pipeline:main]
-pipeline = gnocchi+auth
-
-[composite:gnocchi+noauth]
-use = egg:Paste#urlmap
-/ = gnocchiversions
-/v1 = gnocchiv1
-
-[composite:gnocchi+auth]
-use = egg:Paste#urlmap
-/ = gnocchiversions
-/v1 = gnocchiv1+auth
-
-[pipeline:gnocchiv1+auth]
-pipeline = healthcheck keystone_authtoken gnocchiv1
-
-[app:gnocchiversions]
-paste.app_factory = gnocchi.rest.app:app_factory
-root = gnocchi.rest.VersionsController
-
-[app:gnocchiv1]
-paste.app_factory = gnocchi.rest.app:app_factory
-root = gnocchi.rest.V1Controller
-
-[filter:healthcheck]
-paste.filter_factory = oslo_middleware:Healthcheck.factory
-path = /status
-backends = disable_by_file
-
-
-[filter:keystone_authtoken]
-paste.filter_factory = keystonemiddleware.auth_token:filter_factory
-oslo_config_project = gnocchi
-
-[filter:cors]
-paste.filter_factory = oslo_middleware.cors:filter_factory
-oslo_config_project = gnocchi
diff --git a/ansible/roles/gnocchi/templates/gnocchi-api.json.j2 b/ansible/roles/gnocchi/templates/gnocchi-api.json.j2
deleted file mode 100644
index 583e6e9a29..0000000000
--- a/ansible/roles/gnocchi/templates/gnocchi-api.json.j2
+++ /dev/null
@@ -1,49 +0,0 @@
-{% set gnocchi_cmd = 'apache2' if kolla_base_distro in ['ubuntu', 'debian'] else 'httpd' %}
-{% set gnocchi_dir = 'apache2/conf-enabled' if kolla_base_distro in ['ubuntu', 'debian'] else 'httpd/conf.d' %}
-{
- "command": "{{ gnocchi_cmd }} -DFOREGROUND",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/gnocchi.conf",
- "dest": "/etc/gnocchi/gnocchi.conf",
- "owner": "gnocchi",
- "perm": "0600"
- },
- {
- "source": "{{ container_config_directory }}/api-paste.ini",
- "dest": "/etc/gnocchi/api-paste.ini",
- "owner": "gnocchi",
- "perm": "0600"
- },
- {
- "source": "{{ container_config_directory }}/wsgi-gnocchi.conf",
- "dest": "/etc/{{ gnocchi_dir }}/wsgi-gnocchi.conf",
- "owner": "gnocchi",
- "perm": "0644"
- }{% if gnocchi_backend_storage == 'ceph' %},
- {
- "source": "{{ container_config_directory }}/ceph.conf",
- "dest": "/etc/ceph/ceph.conf",
- "owner": "gnocchi",
- "perm": "0600"
- },
- {
- "source": "{{ container_config_directory }}/ceph.client.gnocchi.keyring",
- "dest": "/etc/ceph/ceph.client.gnocchi.keyring",
- "owner": "gnocchi",
- "perm": "0600"
- }{% endif %}
-
- ],
- "permissions": [
- {
- "path": "/var/lib/gnocchi",
- "owner": "gnocchi:gnocchi",
- "recurse": true
- },
- {
- "path": "/var/log/kolla/gnocchi",
- "owner": "gnocchi:kolla"
- }
- ]
-}
diff --git a/ansible/roles/gnocchi/templates/gnocchi-metricd.json.j2 b/ansible/roles/gnocchi/templates/gnocchi-metricd.json.j2
deleted file mode 100644
index 9188a1c8e8..0000000000
--- a/ansible/roles/gnocchi/templates/gnocchi-metricd.json.j2
+++ /dev/null
@@ -1,38 +0,0 @@
-{
- "command": "gnocchi-metricd --logfile /var/log/kolla/gnocchi/gnocchi-metricd.log",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/gnocchi.conf",
- "dest": "/etc/gnocchi/gnocchi.conf",
- "owner": "gnocchi",
- "perm": "0600"
- }{% if gnocchi_backend_storage == 'ceph' %},
- {
- "source": "{{ container_config_directory }}/ceph.conf",
- "dest": "/etc/ceph/ceph.conf",
- "owner": "gnocchi",
- "perm": "0600"
- },
- {
- "source": "{{ container_config_directory }}/ceph.client.gnocchi.keyring",
- "dest": "/etc/ceph/ceph.client.gnocchi.keyring",
- "owner": "gnocchi",
- "perm": "0600"
- }{% endif %}
- ],
- "permissions": [
- {
- "path": "/var/lib/gnocchi",
- "owner": "gnocchi:gnocchi",
- "recurse": true
- },
- {
- "path": "/var/log/kolla/gnocchi",
- "owner": "gnocchi:kolla"
- },
- {
- "path": "/var/log/kolla/gnocchi/gnocchi-metricd.*",
- "owner": "gnocchi:gnocchi"
- }
- ]
-}
diff --git a/ansible/roles/gnocchi/templates/gnocchi-statsd.json.j2 b/ansible/roles/gnocchi/templates/gnocchi-statsd.json.j2
deleted file mode 100644
index 83073147b3..0000000000
--- a/ansible/roles/gnocchi/templates/gnocchi-statsd.json.j2
+++ /dev/null
@@ -1,38 +0,0 @@
-{
- "command": "gnocchi-statsd --logfile /var/log/kolla/gnocchi/gnocchi-statsd.log",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/gnocchi.conf",
- "dest": "/etc/gnocchi/gnocchi.conf",
- "owner": "gnocchi",
- "perm": "0600"
- }{% if gnocchi_backend_storage == 'ceph' %},
- {
- "source": "{{ container_config_directory }}/ceph.conf",
- "dest": "/etc/ceph/ceph.conf",
- "owner": "gnocchi",
- "perm": "0600"
- },
- {
- "source": "{{ container_config_directory }}/ceph.client.gnocchi.keyring",
- "dest": "/etc/ceph/ceph.client.gnocchi.keyring",
- "owner": "gnocchi",
- "perm": "0600"
- }{% endif %}
- ],
- "permissions": [
- {
- "path": "/var/lib/gnocchi",
- "owner": "gnocchi:gnocchi",
- "recurse": true
- },
- {
- "path": "/var/log/kolla/gnocchi",
- "owner": "gnocchi:kolla"
- },
- {
- "path": "/var/log/kolla/gnocchi/gnocchi-statsd.*",
- "owner": "gnocchi:gnocchi"
- }
- ]
-}
diff --git a/ansible/roles/gnocchi/templates/gnocchi.conf.j2 b/ansible/roles/gnocchi/templates/gnocchi.conf.j2
deleted file mode 100644
index 30d8e33c1c..0000000000
--- a/ansible/roles/gnocchi/templates/gnocchi.conf.j2
+++ /dev/null
@@ -1,56 +0,0 @@
-[DEFAULT]
-debug = {{ gnocchi_logging_debug }}
-
-log_dir = /var/log/kolla/gnocchi
-
-
-[api]
-port = {{ gnocchi_api_port }}
-host = {{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}
-middlewares = keystonemiddleware.auth_token.AuthProtocol
-
-
-[database]
-connection = mysql+pymysql://{{ gnocchi_database_user }}:{{ gnocchi_database_password }}@{{ gnocchi_database_address }}/{{ gnocchi_database_name }}
-max_pool_size = 50
-max_overflow = 1000
-max_retries = -1
-
-[statsd]
-resource_id = {{ gnocchi_resource_id }}
-user_id = {{ gnocchi_user_id }}
-project_id = {{ gnocchi_project_id }}
-archive_policy_name = low
-
-flush_delay=10
-
-
-[indexer]
-url = mysql+pymysql://{{ gnocchi_database_user }}:{{ gnocchi_database_password }}@{{ gnocchi_database_address }}/{{ gnocchi_database_name }}
-
-
-[keystone_authtoken]
-auth_uri = {{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_public_port }}/v3
-project_domain_id = default
-project_name = service
-user_domain_id = default
-username = {{ gnocchi_keystone_user }}
-password = {{ gnocchi_keystone_password }}
-auth_url = {{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_admin_port }}
-auth_type = password
-
-memcache_security_strategy = ENCRYPT
-memcache_secret_key = {{ memcache_secret_key }}
-memcached_servers = {% for host in groups['memcached'] %}{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}
-
-[storage]
-{% if gnocchi_backend_storage == 'file' %}
-driver = file
-file_basepath = /var/lib/gnocchi
-{% elif gnocchi_backend_storage == 'ceph' %}
-driver = ceph
-ceph_pool = {{ gnocchi_pool_name }}
-ceph_username = gnocchi
-ceph_keyring = /etc/ceph/ceph.client.gnocchi.keyring
-ceph_conffile = /etc/ceph/ceph.conf
-{% endif %}
diff --git a/ansible/roles/gnocchi/templates/wsgi-gnocchi.conf.j2 b/ansible/roles/gnocchi/templates/wsgi-gnocchi.conf.j2
deleted file mode 100644
index 3257eb237e..0000000000
--- a/ansible/roles/gnocchi/templates/wsgi-gnocchi.conf.j2
+++ /dev/null
@@ -1,17 +0,0 @@
-{% set python_path = '/usr/lib/python2.7/site-packages' if kolla_install_type == 'binary' else '/var/lib/kolla/venv/lib/python2.7/site-packages' %}
-Listen {{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}:{{ gnocchi_api_port }}
-
-
-
- ErrorLog "/var/log/kolla/gnocchi/gnocchi-api-error.log"
- CustomLog "/var/log/kolla/gnocchi/gnocchi-api-access.log" combined
- WSGIApplicationGroup %{GLOBAL}
- WSGIDaemonProcess gnocchi group=gnocchi processes={{ openstack_service_workers }} threads=1 user=gnocchi python-path={{ python_path }}
- WSGIProcessGroup gnocchi
- WSGIScriptAlias / "{{ python_path }}/gnocchi/rest/app.wsgi"
-
-
- Require all granted
-
-
-
diff --git a/ansible/roles/grafana/defaults/main.yml b/ansible/roles/grafana/defaults/main.yml
deleted file mode 100644
index e101161c52..0000000000
--- a/ansible/roles/grafana/defaults/main.yml
+++ /dev/null
@@ -1,16 +0,0 @@
----
-project_name: "grafana"
-
-####################
-# Database
-####################
-grafana_database_name: "grafana"
-grafana_database_user: "grafana"
-grafana_database_address: "{{ kolla_internal_fqdn }}:{{ database_port }}"
-
-##########
-# Grafana
-##########
-grafana_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-grafana"
-grafana_tag: "{{ openstack_release }}"
-grafana_image_full: "{{ grafana_image }}:{{ grafana_tag }}"
diff --git a/ansible/roles/grafana/meta/main.yml b/ansible/roles/grafana/meta/main.yml
deleted file mode 100644
index 6b4fff8fef..0000000000
--- a/ansible/roles/grafana/meta/main.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-dependencies:
- - { role: common }
diff --git a/ansible/roles/grafana/tasks/bootstrap.yml b/ansible/roles/grafana/tasks/bootstrap.yml
deleted file mode 100644
index 492b516e20..0000000000
--- a/ansible/roles/grafana/tasks/bootstrap.yml
+++ /dev/null
@@ -1,38 +0,0 @@
----
-- name: Creating grafana database
- command: docker exec -t kolla_toolbox /usr/bin/ansible localhost
- -m mysql_db
- -a "login_host='{{ database_address }}'
- login_port='{{ database_port }}'
- login_user='{{ database_user }}'
- login_password='{{ database_password }}'
- name='{{ grafana_database_name }}'"
- register: database
- changed_when: "{{ database.stdout.find('localhost | SUCCESS => ') != -1 and
- (database.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
- failed_when: database.stdout.split()[2] != 'SUCCESS'
- run_once: True
- delegate_to: "{{ groups['grafana'][0] }}"
-
-- name: Reading json from variable
- set_fact:
- database_created: "{{ (database.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
-
-- name: Creating grafana database user and setting permissions
- command: docker exec -t kolla_toolbox /usr/bin/ansible localhost
- -m mysql_user
- -a "login_host='{{ database_address }}'
- login_port='{{ database_port }}'
- login_user='{{ database_user }}'
- login_password='{{ database_password }}'
- name='{{ grafana_database_name }}'
- password='{{ grafana_database_password }}'
- host='%'
- priv='{{ grafana_database_name }}.*:ALL'
- append_privs='yes'"
- register: database_user_create
- changed_when: "{{ database_user_create.stdout.find('localhost | SUCCESS => ') != -1 and
- (database_user_create.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
- failed_when: database_user_create.stdout.split()[2] != 'SUCCESS'
- run_once: True
- delegate_to: "{{ groups['grafana'][0] }}"
diff --git a/ansible/roles/grafana/tasks/config.yml b/ansible/roles/grafana/tasks/config.yml
deleted file mode 100644
index ca3ddabc23..0000000000
--- a/ansible/roles/grafana/tasks/config.yml
+++ /dev/null
@@ -1,27 +0,0 @@
----
-- name: Ensuring config directories exist
- file:
- path: "{{ node_config_directory }}/{{ item }}"
- state: "directory"
- recurse: yes
- with_items:
- - "grafana"
-
-- name: Copying over config.json files
- template:
- src: "{{ item }}.json.j2"
- dest: "{{ node_config_directory }}/{{ item }}/config.json"
- with_items:
- - "grafana"
-
-- name: Copying over grafana.ini
- merge_configs:
- vars:
- service_name: "{{ item }}"
- sources:
- - "{{ role_path }}/templates/grafana.ini.j2"
- - "{{ node_custom_config }}/{{ item }}.ini"
- - "{{ node_custom_config }}/grafana/{{ inventory_hostname }}/{{ item }}.ini"
- dest: "{{ node_config_directory }}/grafana/grafana.ini"
- with_items:
- - "grafana"
diff --git a/ansible/roles/grafana/tasks/deploy.yml b/ansible/roles/grafana/tasks/deploy.yml
deleted file mode 100644
index 98b45ee90c..0000000000
--- a/ansible/roles/grafana/tasks/deploy.yml
+++ /dev/null
@@ -1,9 +0,0 @@
----
-- include: config.yml
- when: inventory_hostname in groups['grafana']
-
-- include: bootstrap.yml
- when: inventory_hostname in groups['grafana']
-
-- include: start.yml
- when: inventory_hostname in groups['grafana']
diff --git a/ansible/roles/grafana/tasks/main.yml b/ansible/roles/grafana/tasks/main.yml
deleted file mode 100644
index b017e8b4ad..0000000000
--- a/ansible/roles/grafana/tasks/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-- include: "{{ action }}.yml"
diff --git a/ansible/roles/grafana/tasks/precheck.yml b/ansible/roles/grafana/tasks/precheck.yml
deleted file mode 100644
index ed97d539c0..0000000000
--- a/ansible/roles/grafana/tasks/precheck.yml
+++ /dev/null
@@ -1 +0,0 @@
----
diff --git a/ansible/roles/grafana/tasks/pull.yml b/ansible/roles/grafana/tasks/pull.yml
deleted file mode 100644
index 376892ab61..0000000000
--- a/ansible/roles/grafana/tasks/pull.yml
+++ /dev/null
@@ -1,7 +0,0 @@
----
-- name: Pulling grafana image
- kolla_docker:
- action: "pull_image"
- common_options: "{{ docker_common_options }}"
- image: "{{ grafana_image_full }}"
- when: inventory_hostname in groups['grafana']
diff --git a/ansible/roles/grafana/tasks/reconfigure.yml b/ansible/roles/grafana/tasks/reconfigure.yml
deleted file mode 100644
index b8ca4ed270..0000000000
--- a/ansible/roles/grafana/tasks/reconfigure.yml
+++ /dev/null
@@ -1,47 +0,0 @@
----
-- name: Ensuring the containers up
- kolla_docker:
- name: "grafana"
- action: "get_container_state"
- register: container_state
- failed_when: container_state.Running == false
- when: inventory_hostname in groups['grafana']
-
-- include: config.yml
-
-- name: Check the configs
- command: docker exec grafana /usr/local/bin/kolla_set_configs --check
- changed_when: false
- failed_when: false
- register: check_results
- when: inventory_hostname in groups['grafana']
-
-# NOTE(jeffrey4l): when config_strategy == 'COPY_ALWAYS'
-# and container env['KOLLA_CONFIG_STRATEGY'] == 'COPY_ONCE',
-# just remove the container and start again
-- name: Containers config strategy
- kolla_docker:
- name: "grafana"
- action: "get_container_env"
- register: container_envs
- when: inventory_hostname in groups['grafana']
-
-- name: Remove the containers
- kolla_docker:
- name: "grafana"
- action: "remove_container"
- register: remove_containers
- when:
- - config_strategy == "COPY_ONCE"
- - inventory_hostname in groups['grafana']
-
-- include: start.yml
- when: remove_containers.changed
-
-- name: Restart containers
- kolla_docker:
- name: "grafana"
- action: "restart_container"
- when:
- - config_strategy == 'COPY_ALWAYS'
- - inventory_hostname in groups['grafana']
diff --git a/ansible/roles/grafana/tasks/start.yml b/ansible/roles/grafana/tasks/start.yml
deleted file mode 100644
index 0d3114dab7..0000000000
--- a/ansible/roles/grafana/tasks/start.yml
+++ /dev/null
@@ -1,13 +0,0 @@
----
-- name: Starting grafana container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- image: "{{ grafana_image_full }}"
- name: "grafana"
- volumes:
- - "{{ node_config_directory }}/grafana/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "grafana:/var/lib/grafana/"
- - "kolla_logs:/var/log/kolla/"
- when: inventory_hostname in groups['grafana']
diff --git a/ansible/roles/grafana/tasks/upgrade.yml b/ansible/roles/grafana/tasks/upgrade.yml
deleted file mode 100644
index 1f16915ad9..0000000000
--- a/ansible/roles/grafana/tasks/upgrade.yml
+++ /dev/null
@@ -1,4 +0,0 @@
----
-- include: config.yml
-
-- include: start.yml
diff --git a/ansible/roles/grafana/templates/grafana.ini.j2 b/ansible/roles/grafana/templates/grafana.ini.j2
deleted file mode 100644
index 6128a48d19..0000000000
--- a/ansible/roles/grafana/templates/grafana.ini.j2
+++ /dev/null
@@ -1,39 +0,0 @@
-[paths]
-data = /var/lib/grafana
-logs = /var/log/kolla/grafana
-plugins = /var/lib/grafana/plugins
-
-[server]
-protocol = http
-http_addr = {{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}
-http_port = {{ grafana_server_port }}
-
-router_logging = true
-
-static_root_path = public
-
-enable_gzip = false
-
-[database]
-type = mysql
-host = {{ grafana_database_address }}
-name = {{ grafana_database_name }}
-user = {{ grafana_database_user }}
-password = {{ grafana_database_password }}
-ssl_mode = disable
-
-[session]
-provider = mysql
-provider_config = grafana:{{ grafana_database_password }}@tcp({{ grafana_database_address }})/grafana
-
-cookie_name = grafana_sess
-cookie_secure = false
-session_life_time = 86400
-
-[analytics]
-reporting_enabled = false
-check_for_updates = false
-
-[security]
-admin_user = admin
-admin_password = {{ grafana_admin_password }}
diff --git a/ansible/roles/grafana/templates/grafana.json.j2 b/ansible/roles/grafana/templates/grafana.json.j2
deleted file mode 100644
index d9a75b8bc0..0000000000
--- a/ansible/roles/grafana/templates/grafana.json.j2
+++ /dev/null
@@ -1,23 +0,0 @@
-{
- "command": "/usr/sbin/grafana-server --config=/etc/grafana/grafana.ini",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/grafana.ini",
- "dest": "/etc/grafana/grafana.ini",
- "owner": "grafana",
- "perm": "0600"
- }
- ],
- "permissions": [
- {
- "path": "/var/lib/grafana",
- "owner": "grafana:grafana",
- "recurse": true
- },
- {
- "path": "/var/log/kolla/grafana",
- "owner": "grafana:grafana",
- "recurse": true
- }
- ]
-}
diff --git a/ansible/roles/haproxy/defaults/main.yml b/ansible/roles/haproxy/defaults/main.yml
deleted file mode 100644
index 1b8c327ada..0000000000
--- a/ansible/roles/haproxy/defaults/main.yml
+++ /dev/null
@@ -1,14 +0,0 @@
----
-project_name: "haproxy"
-
-
-####################
-# Docker
-####################
-keepalived_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-keepalived"
-keepalived_tag: "{{ openstack_release }}"
-keepalived_image_full: "{{ keepalived_image }}:{{ keepalived_tag }}"
-
-haproxy_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-haproxy"
-haproxy_tag: "{{ openstack_release }}"
-haproxy_image_full: "{{ haproxy_image }}:{{ haproxy_tag }}"
diff --git a/ansible/roles/haproxy/meta/main.yml b/ansible/roles/haproxy/meta/main.yml
deleted file mode 100644
index 6b4fff8fef..0000000000
--- a/ansible/roles/haproxy/meta/main.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-dependencies:
- - { role: common }
diff --git a/ansible/roles/haproxy/tasks/config.yml b/ansible/roles/haproxy/tasks/config.yml
deleted file mode 100644
index b23f19f3f0..0000000000
--- a/ansible/roles/haproxy/tasks/config.yml
+++ /dev/null
@@ -1,50 +0,0 @@
----
-- name: Setting sysctl values
- sysctl: name={{ item.name }} value={{ item.value }} sysctl_set=yes
- with_items:
- - { name: "net.ipv4.ip_nonlocal_bind", value: 1}
- - { name: "net.unix.max_dgram_qlen", value: 128}
- when: set_sysctl | bool
-
-- name: Ensuring config directories exist
- file:
- path: "{{ node_config_directory }}/{{ item }}"
- state: "directory"
- recurse: yes
- with_items:
- - "keepalived"
- - "haproxy"
-
-- name: Copying over config.json files for services
- template:
- src: "{{ item }}.json.j2"
- dest: "{{ node_config_directory }}/{{ item }}/config.json"
- with_items:
- - "keepalived"
- - "haproxy"
-
-- name: Copying over haproxy.cfg
- template:
- src: "{{ item }}"
- dest: "{{ node_config_directory }}/haproxy/haproxy.cfg"
- with_first_found:
- - "{{ node_custom_config }}/haproxy/{{ inventory_hostname }}/haproxy.cfg"
- - "{{ node_custom_config }}/haproxy/haproxy.cfg"
- - "haproxy.cfg.j2"
-
-- name: Copying over keepalived.conf
- template:
- src: "{{ item }}"
- dest: "{{ node_config_directory }}/keepalived/keepalived.conf"
- with_first_found:
- - "{{ node_custom_config }}/keepalived/{{ inventory_hostname }}/keepalived.conf"
- - "{{ node_custom_config }}/keepalived/keepalived.conf"
- - "keepalived.conf.j2"
-
-- name: Copying over haproxy.pem
- copy:
- src: "{{ kolla_external_fqdn_cert }}"
- dest: "{{ node_config_directory }}/haproxy/{{ item }}"
- with_items:
- - "haproxy.pem"
- when: kolla_enable_tls_external | bool
diff --git a/ansible/roles/haproxy/tasks/deploy.yml b/ansible/roles/haproxy/tasks/deploy.yml
deleted file mode 100644
index ab9215b168..0000000000
--- a/ansible/roles/haproxy/tasks/deploy.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-- include: config.yml
- when: inventory_hostname in groups['haproxy']
-
-- include: start.yml
- when: inventory_hostname in groups['haproxy']
diff --git a/ansible/roles/haproxy/tasks/main.yml b/ansible/roles/haproxy/tasks/main.yml
deleted file mode 100644
index 0e3f30f464..0000000000
--- a/ansible/roles/haproxy/tasks/main.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-- include: "{{ action }}.yml"
- when: inventory_hostname in groups['haproxy']
diff --git a/ansible/roles/haproxy/tasks/precheck.yml b/ansible/roles/haproxy/tasks/precheck.yml
deleted file mode 100644
index ed97d539c0..0000000000
--- a/ansible/roles/haproxy/tasks/precheck.yml
+++ /dev/null
@@ -1 +0,0 @@
----
diff --git a/ansible/roles/haproxy/tasks/pull.yml b/ansible/roles/haproxy/tasks/pull.yml
deleted file mode 100644
index f005168064..0000000000
--- a/ansible/roles/haproxy/tasks/pull.yml
+++ /dev/null
@@ -1,14 +0,0 @@
----
-- name: Pulling keepalived image
- kolla_docker:
- action: "pull_image"
- common_options: "{{ docker_common_options }}"
- image: "{{ keepalived_image_full }}"
- when: inventory_hostname in groups['haproxy']
-
-- name: Pulling haproxy image
- kolla_docker:
- action: "pull_image"
- common_options: "{{ docker_common_options }}"
- image: "{{ haproxy_image_full }}"
- when: inventory_hostname in groups['haproxy']
diff --git a/ansible/roles/haproxy/tasks/reconfigure.yml b/ansible/roles/haproxy/tasks/reconfigure.yml
deleted file mode 100644
index ea2b3dfc02..0000000000
--- a/ansible/roles/haproxy/tasks/reconfigure.yml
+++ /dev/null
@@ -1,74 +0,0 @@
----
-- name: Ensuring the containers up
- kolla_docker:
- name: "{{ item.name }}"
- action: "get_container_state"
- register: container_state
- failed_when: container_state.Running == false
- when: inventory_hostname in groups[item.group]
- with_items:
- - { name: haproxy, group: haproxy }
- - { name: keepalived, group: haproxy }
-
-- include: config.yml
-
-- name: Check the configs
- command: docker exec {{ item.name }} /usr/local/bin/kolla_set_configs --check
- changed_when: false
- failed_when: false
- register: check_results
- when: inventory_hostname in groups[item.group]
- with_items:
- - { name: haproxy, group: haproxy }
- - { name: keepalived, group: haproxy }
-
-- name: Containers config strategy
- kolla_docker:
- name: "{{ item.name }}"
- action: "get_container_env"
- register: container_envs
- when: inventory_hostname in groups[item.group]
- with_items:
- - { name: haproxy, group: haproxy }
- - { name: keepalived, group: haproxy }
-
-- name: Remove the containers
- kolla_docker:
- name: "{{ item[0]['name'] }}"
- action: "remove_container"
- register: remove_containers
- when:
- - config_strategy == "COPY_ONCE" or item[1]['KOLLA_CONFIG_STRATEGY'] == 'COPY_ONCE'
- - item[2]['rc'] == 1
- - inventory_hostname in groups[item[0]['group']]
- with_together:
- - [{ name: haproxy, group: haproxy },
- { name: keepalived, group: haproxy }]
- - "{{ container_envs.results }}"
- - "{{ check_results.results }}"
-
-- include: start.yml
- when: remove_containers.changed
-
-# container_envs.results is a list of two elements, first corresponds to
-# haproxy container result and second to keepalived container result and the
-# same applicable for check_results.results
-- name: Ensuring latest haproxy config is used
- command: docker exec haproxy /usr/local/bin/kolla_ensure_haproxy_latest_config
- register: status
- changed_when: status.stdout.find('changed') != -1
- when:
- - config_strategy == 'COPY_ALWAYS'
- - container_envs.results[0]['KOLLA_CONFIG_STRATEGY'] != 'COPY_ONCE'
- - check_results.results[0]['rc'] == 1
- - inventory_hostname in groups['haproxy']
-
-- name: Restart keepalived container
- kolla_docker:
- name: "keepalived"
- action: "restart_container"
- when:
- - config_strategy == 'COPY_ALWAYS'
- - container_envs.results[1]['KOLLA_CONFIG_STRATEGY'] != 'COPY_ONCE'
- - check_results.results[1]['rc'] == 1
- - inventory_hostname in groups['haproxy']
diff --git a/ansible/roles/haproxy/tasks/start.yml b/ansible/roles/haproxy/tasks/start.yml
deleted file mode 100644
index 692de8591c..0000000000
--- a/ansible/roles/haproxy/tasks/start.yml
+++ /dev/null
@@ -1,37 +0,0 @@
----
-- name: Starting haproxy container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- image: "{{ haproxy_image_full }}"
- name: "haproxy"
- privileged: True
- volumes:
- - "{{ node_config_directory }}/haproxy/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "haproxy_socket:/var/lib/kolla/haproxy/"
- - "heka_socket:/var/lib/kolla/heka/"
-
-- name: Starting keepalived container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- image: "{{ keepalived_image_full }}"
- name: "keepalived"
- privileged: True
- volumes:
- - "{{ node_config_directory }}/keepalived/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "/lib/modules:/lib/modules:ro"
- - "haproxy_socket:/var/lib/kolla/haproxy/"
- - "heka_socket:/var/lib/kolla/heka/"
-
-- name: Ensuring latest haproxy config is used
- command: docker exec haproxy /usr/local/bin/kolla_ensure_haproxy_latest_config
- register: status
- changed_when: status.stdout.find('changed') != -1
-
-- name: Waiting for virtual IP to appear
- wait_for:
- host: "{{ kolla_internal_vip_address }}"
- port: "{{ database_port }}"
diff --git a/ansible/roles/haproxy/tasks/upgrade.yml b/ansible/roles/haproxy/tasks/upgrade.yml
deleted file mode 100644
index ce1adcfa36..0000000000
--- a/ansible/roles/haproxy/tasks/upgrade.yml
+++ /dev/null
@@ -1,19 +0,0 @@
----
-- include: config.yml
-
-- set_fact: secondary_addresses={{ hostvars[inventory_hostname]['ansible_' + api_interface].get('ipv4_secondaries', []) | map(attribute='address') | list }}
-
-- name: Stopping all slave keepalived containers
- kolla_docker:
- action: "stop_container"
- common_options: "{{ docker_common_options }}"
- name: "keepalived"
- when: kolla_internal_vip_address not in secondary_addresses
-
-# Upgrading master keepalived and haproxy
-- include: start.yml
- when: kolla_internal_vip_address in secondary_addresses
-
-# Upgrading slave keepalived and haproxy
-- include: start.yml
- when: kolla_internal_vip_address not in secondary_addresses
diff --git a/ansible/roles/haproxy/templates/haproxy.cfg.j2 b/ansible/roles/haproxy/templates/haproxy.cfg.j2
deleted file mode 100644
index 23244582dd..0000000000
--- a/ansible/roles/haproxy/templates/haproxy.cfg.j2
+++ /dev/null
@@ -1,649 +0,0 @@
-{% set tls_bind_info = 'ssl crt /etc/haproxy/haproxy.pem' if kolla_enable_tls_external | bool else '' %}
-global
- chroot /var/lib/haproxy
- user haproxy
- group haproxy
- daemon
- log /var/lib/kolla/heka/log local0
- maxconn 4000
- stats socket /var/lib/kolla/haproxy/haproxy.sock
-{% if kolla_enable_tls_external | bool %}
- ssl-default-bind-ciphers DEFAULT:!MEDIUM:!3DES
- ssl-default-bind-options no-sslv3 no-tlsv10
- tune.ssl.default-dh-param 4096
-{% endif %}
-
-defaults
- log global
- mode http
- option redispatch
- option httplog
- option forwardfor
- retries 3
- timeout http-request 10s
- timeout queue 1m
- timeout connect 10s
- timeout client 1m
- timeout server 1m
- timeout check 10s
-
-listen stats
- bind {{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}:{{ haproxy_stats_port }}
- mode http
- stats enable
- stats uri /
- stats refresh 15s
- stats realm Haproxy\ Stats
- stats auth {{ haproxy_user }}:{{ haproxy_password }}
-
-{% if enable_rabbitmq | bool %}
-listen rabbitmq_management
- bind {{ kolla_internal_vip_address }}:{{ rabbitmq_management_port }}
-{% for host in groups['rabbitmq'] %}
- server {{ hostvars[host]['ansible_hostname'] }} {{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ rabbitmq_management_port }} check inter 2000 rise 2 fall 5
-{% endfor %}
-{% endif %}
-
-{% if enable_mongodb | bool %}
-listen mongodb
- bind {{ kolla_internal_vip_address }}:{{ mongodb_port }}
-{% for host in groups['mongodb'] %}
- server {{ hostvars[host]['ansible_hostname'] }} {{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ mongodb_port }} check inter 2000 rise 2 fall 5
-{% endfor %}
-{% endif %}
-
-{% if enable_keystone | bool %}
-listen keystone_internal
- bind {{ kolla_internal_vip_address }}:{{ keystone_public_port }}
- http-request del-header X-Forwarded-Proto
-{% for host in groups['keystone'] %}
- server {{ hostvars[host]['ansible_hostname'] }} {{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ keystone_public_port }} check inter 2000 rise 2 fall 5
-{% endfor %}
-{% if haproxy_enable_external_vip | bool %}
-
-listen keystone_external
- bind {{ kolla_external_vip_address }}:{{ keystone_public_port }} {{ tls_bind_info }}
- http-request del-header X-Forwarded-Proto
- http-request set-header X-Forwarded-Proto https if { ssl_fc }
-{% for host in groups['keystone'] %}
- server {{ hostvars[host]['ansible_hostname'] }} {{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ keystone_public_port }} check inter 2000 rise 2 fall 5
-{% endfor %}
-{% endif %}
-
-listen keystone_admin
- bind {{ kolla_internal_vip_address }}:{{ keystone_admin_port }}
- http-request del-header X-Forwarded-Proto
-{% for host in groups['keystone'] %}
- server {{ hostvars[host]['ansible_hostname'] }} {{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ keystone_admin_port }} check inter 2000 rise 2 fall 5
-{% endfor %}
-{% endif %}
-
-{% if enable_glance | bool %}
-listen glance_registry
- bind {{ kolla_internal_vip_address }}:{{ glance_registry_port }}
-{% for host in groups['glance-registry'] %}
- server {{ hostvars[host]['ansible_hostname'] }} {{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ glance_registry_port }} check inter 2000 rise 2 fall 5
-{% endfor %}
-
-listen glance_api
- bind {{ kolla_internal_vip_address }}:{{ glance_api_port }}
-{% for host in groups['glance-api'] %}
- server {{ hostvars[host]['ansible_hostname'] }} {{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ glance_api_port }} check inter 2000 rise 2 fall 5
-{% endfor %}
-{% if haproxy_enable_external_vip | bool %}
-
-listen glance_api_external
- bind {{ kolla_external_vip_address }}:{{ glance_api_port }} {{ tls_bind_info }}
-{% for host in groups['glance-api'] %}
- server {{ hostvars[host]['ansible_hostname'] }} {{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ glance_api_port }} check inter 2000 rise 2 fall 5
-{% endfor %}
-{% endif %}
-{% endif %}
-
-{% if enable_nova | bool %}
-listen nova_api
- bind {{ kolla_internal_vip_address }}:{{ nova_api_port }}
- http-request del-header X-Forwarded-Proto
-{% for host in groups['nova-api'] %}
- server {{ hostvars[host]['ansible_hostname'] }} {{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ nova_api_port }} check inter 2000 rise 2 fall 5
-{% endfor %}
-
-listen nova_metadata
- bind {{ kolla_internal_vip_address }}:{{ nova_metadata_port }}
- http-request del-header X-Forwarded-Proto
-{% for host in groups['nova-api'] %}
- server {{ hostvars[host]['ansible_hostname'] }} {{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ nova_metadata_port }} check inter 2000 rise 2 fall 5
-{% endfor %}
-
-{% if nova_console == 'novnc' %}
-listen nova_novncproxy
- bind {{ kolla_internal_vip_address }}:{{ nova_novncproxy_port }}
- http-request del-header X-Forwarded-Proto
- http-request set-header X-Forwarded-Proto https if { ssl_fc }
-{% for host in groups['nova-novncproxy'] %}
- server {{ hostvars[host]['ansible_hostname'] }} {{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ nova_novncproxy_port }} check inter 2000 rise 2 fall 5
-{% endfor %}
-{% elif nova_console == 'spice' %}
-listen nova_spicehtml5proxy
- bind {{ kolla_internal_vip_address }}:{{ nova_spicehtml5proxy_port }}
-{% for host in groups['nova-spicehtml5proxy'] %}
- server {{ hostvars[host]['ansible_hostname'] }} {{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ nova_spicehtml5proxy_port }} check inter 2000 rise 2 fall 5
-{% endfor %}
-{% endif %}
-{% if haproxy_enable_external_vip | bool %}
-
-listen nova_api_external
- bind {{ kolla_external_vip_address }}:{{ nova_api_port }} {{ tls_bind_info }}
- http-request del-header X-Forwarded-Proto
- http-request set-header X-Forwarded-Proto https if { ssl_fc }
-{% for host in groups['nova-api'] %}
- server {{ hostvars[host]['ansible_hostname'] }} {{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ nova_api_port }} check inter 2000 rise 2 fall 5
-{% endfor %}
-
-listen nova_metadata_external
- bind {{ kolla_external_vip_address }}:{{ nova_metadata_port }} {{ tls_bind_info }}
- http-request del-header X-Forwarded-Proto
- http-request set-header X-Forwarded-Proto https if { ssl_fc }
-{% for host in groups['nova-api'] %}
- server {{ hostvars[host]['ansible_hostname'] }} {{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ nova_metadata_port }} check inter 2000 rise 2 fall 5
-{% endfor %}
-
-{% if nova_console == 'novnc' %}
-listen nova_novncproxy_external
- bind {{ kolla_external_vip_address }}:{{ nova_novncproxy_port }} {{ tls_bind_info }}
- http-request del-header X-Forwarded-Proto
- http-request set-header X-Forwarded-Proto https if { ssl_fc }
-{% for host in groups['nova-novncproxy'] %}
- server {{ hostvars[host]['ansible_hostname'] }} {{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ nova_novncproxy_port }} check inter 2000 rise 2 fall 5
-{% endfor %}
-{% elif nova_console == 'spice' %}
-listen nova_spicehtml5proxy_external
- bind {{ kolla_external_vip_address }}:{{ nova_spicehtml5proxy_port }} {{ tls_bind_info }}
- http-request del-header X-Forwarded-Proto
- http-request set-header X-Forwarded-Proto https if { ssl_fc }
-{% for host in groups['nova-spicehtml5proxy'] %}
- server {{ hostvars[host]['ansible_hostname'] }} {{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ nova_spicehtml5proxy_port }} check inter 2000 rise 2 fall 5
-{% endfor %}
-{% endif %}
-{% endif %}
-{% endif %}
-
-{% if enable_neutron | bool %}
-listen neutron_server
- bind {{ kolla_internal_vip_address }}:{{ neutron_server_port }}
-{% for host in groups['neutron-server'] %}
- server {{ hostvars[host]['ansible_hostname'] }} {{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ neutron_server_port }} check inter 2000 rise 2 fall 5
-{% endfor %}
-{% if haproxy_enable_external_vip | bool %}
-
-listen neutron_server_external
- bind {{ kolla_external_vip_address }}:{{ neutron_server_port }} {{ tls_bind_info }}
-{% for host in groups['neutron-server'] %}
- server {{ hostvars[host]['ansible_hostname'] }} {{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ neutron_server_port }} check inter 2000 rise 2 fall 5
-{% endfor %}
-{% endif %}
-{% endif %}
-
-{% if enable_horizon | bool %}
-listen horizon
- bind {{ kolla_internal_vip_address }}:80
- balance source
- http-request del-header X-Forwarded-Proto
-{% for host in groups['horizon'] %}
- server {{ hostvars[host]['ansible_hostname'] }} {{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:80 check inter 2000 rise 2 fall 5
-{% endfor %}
-
-{% if haproxy_enable_external_vip | bool %}
-{% if kolla_enable_tls_external | bool %}
-listen horizon_external
- bind {{ kolla_external_vip_address }}:443 {{ tls_bind_info }}
- balance source
- http-request del-header X-Forwarded-Proto
- http-request set-header X-Forwarded-Proto https if { ssl_fc }
-{% for host in groups['horizon'] %}
- server {{ hostvars[host]['ansible_hostname'] }} {{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:80 check inter 2000 rise 2 fall 5
-{% endfor %}
-
-frontend horizon_external_redirect
- bind {{ kolla_external_vip_address }}:80
- redirect scheme https code 301 if !{ ssl_fc }
-{% else %}
-listen horizon_external
- bind {{ kolla_external_vip_address }}:80
-{% for host in groups['horizon'] %}
- server {{ hostvars[host]['ansible_hostname'] }} {{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:80 check inter 2000 rise 2 fall 5
-{% endfor %}
-{% endif %}
-{% endif %}
-{% endif %}
-
-{% if enable_cinder | bool %}
-listen cinder_api
- bind {{ kolla_internal_vip_address }}:{{ cinder_api_port }}
- http-request del-header X-Forwarded-Proto
-{% for host in groups['cinder-api'] %}
- server {{ hostvars[host]['ansible_hostname'] }} {{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ cinder_api_port }} check inter 2000 rise 2 fall 5
-{% endfor %}
-{% if haproxy_enable_external_vip | bool %}
-
-listen cinder_api_external
- bind {{ kolla_external_vip_address }}:{{ cinder_api_port }} {{ tls_bind_info }}
- http-request del-header X-Forwarded-Proto
- http-request set-header X-Forwarded-Proto https if { ssl_fc }
-{% for host in groups['cinder-api'] %}
- server {{ hostvars[host]['ansible_hostname'] }} {{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ cinder_api_port }} check inter 2000 rise 2 fall 5
-{% endfor %}
-{% endif %}
-{% endif %}
-
-{% if enable_cloudkitty | bool %}
-listen cloudkitty_api
- bind {{ kolla_internal_vip_address }}:{{ cloudkitty_api_port }}
- http-request del-header X-Forwarded-Proto
-{% for host in groups['cloudkitty-api'] %}
- server {{ hostvars[host]['ansible_hostname'] }} {{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ cloudkitty_api_port }} check inter 2000 rise 2 fall 5
-{% endfor %}
-{% if haproxy_enable_external_vip | bool %}
-
-listen cloudkitty_api_external
- bind {{ kolla_external_vip_address }}:{{ cloudkitty_api_port }} {{ tls_bind_info }}
- http-request del-header X-Forwarded-Proto
- http-request set-header X-Forwarded-Proto https if { ssl_fc }
-{% for host in groups['cloudkitty-api'] %}
- server {{ hostvars[host]['ansible_hostname'] }} {{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ cloudkitty_api_port }} check inter 2000 rise 2 fall 5
-{% endfor %}
-{% endif %}
-{% endif %}
-
-{% if enable_heat | bool %}
-listen heat_api
- bind {{ kolla_internal_vip_address }}:{{ heat_api_port }}
- http-request del-header X-Forwarded-Proto
-{% for host in groups['heat-api'] %}
- server {{ hostvars[host]['ansible_hostname'] }} {{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ heat_api_port }} check inter 2000 rise 2 fall 5
-{% endfor %}
-
-listen heat_api_cfn
- bind {{ kolla_internal_vip_address }}:{{ heat_api_cfn_port }}
- http-request del-header X-Forwarded-Proto
-{% for host in groups['heat-api-cfn'] %}
- server {{ hostvars[host]['ansible_hostname'] }} {{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ heat_api_cfn_port }} check inter 2000 rise 2 fall 5
-{% endfor %}
-{% if haproxy_enable_external_vip | bool %}
-
-listen heat_api_external
- bind {{ kolla_external_vip_address }}:{{ heat_api_port }} {{ tls_bind_info }}
- http-request del-header X-Forwarded-Proto
- http-request set-header X-Forwarded-Proto https if { ssl_fc }
-{% for host in groups['heat-api'] %}
- server {{ hostvars[host]['ansible_hostname'] }} {{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ heat_api_port }} check inter 2000 rise 2 fall 5
-{% endfor %}
-
-listen heat_api_cfn_external
- bind {{ kolla_external_vip_address }}:{{ heat_api_cfn_port }} {{ tls_bind_info }}
- http-request del-header X-Forwarded-Proto
- http-request set-header X-Forwarded-Proto https if { ssl_fc }
-{% for host in groups['heat-api-cfn'] %}
- server {{ hostvars[host]['ansible_hostname'] }} {{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ heat_api_cfn_port }} check inter 2000 rise 2 fall 5
-{% endfor %}
-{% endif %}
-{% endif %}
-
-{% if enable_grafana | bool %}
-listen grafana_server
- bind {{ kolla_internal_vip_address }}:{{ grafana_server_port }}
- http-request del-header X-Forwarded-Proto
- http-request set-header X-Forwarded-Proto https if { ssl_fc }
-{% for host in groups['grafana'] %}
- server {{ hostvars[host]['ansible_hostname'] }} {{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ grafana_server_port }} check inter 2000 rise 2 fall 5
-{% endfor %}
-{% if haproxy_enable_external_vip | bool %}
-
-listen grafana_server_external
- bind {{ kolla_external_vip_address }}:{{ grafana_server_port }} {{ tls_bind_info }}
- http-request del-header X-Forwarded-Proto
- http-request set-header X-Forwarded-Proto https if { ssl_fc }
-{% for host in groups['grafana'] %}
- server {{ hostvars[host]['ansible_hostname'] }} {{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ grafana_server_port }} check inter 2000 rise 2 fall 5
-{% endfor %}
-{% endif %}
-{% endif %}
-
-{% if enable_ironic | bool %}
-listen ironic_api
- bind {{ kolla_internal_vip_address }}:{{ ironic_api_port }}
-{% for host in groups['ironic-api'] %}
- server {{ hostvars[host]['ansible_hostname'] }} {{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ ironic_api_port }} check inter 2000 rise 2 fall 5
-{% endfor %}
-{% if haproxy_enable_external_vip | bool %}
-
-listen ironic_api_external
- bind {{ kolla_external_vip_address }}:{{ ironic_api_port }} {{ tls_bind_info }}
-{% for host in groups['ironic-api'] %}
- server {{ hostvars[host]['ansible_hostname'] }} {{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ ironic_api_port }} check inter 2000 rise 2 fall 5
-{% endfor %}
-{% endif %}
-{% endif %}
-
-{% if enable_senlin | bool %}
-listen senlin_api
- bind {{ kolla_internal_vip_address }}:{{ senlin_api_port }}
-{% for host in groups['senlin-api'] %}
- server {{ hostvars[host]['ansible_hostname'] }} {{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ senlin_api_port }} check inter 2000 rise 2 fall 5
-{% endfor %}
-{% if haproxy_enable_external_vip | bool %}
-
-listen senlin_api_external
- bind {{ kolla_external_vip_address }}:{{ senlin_api_port }} {{ tls_bind_info }}
-{% for host in groups['senlin-api'] %}
- server {{ hostvars[host]['ansible_hostname'] }} {{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ senlin_api_port }} check inter 2000 rise 2 fall 5
-{% endfor %}
-{% endif %}
-{% endif %}
-
-{% if enable_swift | bool %}
-listen swift_api
- bind {{ kolla_internal_vip_address }}:{{ swift_proxy_server_port }}
-{% for host in groups['swift-proxy-server'] %}
- server {{ hostvars[host]['ansible_hostname'] }} {{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ swift_proxy_server_port }} check inter 2000 rise 2 fall 5
-{% endfor %}
-{% if haproxy_enable_external_vip | bool %}
-
-listen swift_api_external
- bind {{ kolla_external_vip_address }}:{{ swift_proxy_server_port }} {{ tls_bind_info }}
-{% for host in groups['swift-proxy-server'] %}
- server {{ hostvars[host]['ansible_hostname'] }} {{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ swift_proxy_server_port }} check inter 2000 rise 2 fall 5
-{% endfor %}
-{% endif %}
-{% endif %}
-
-{% if enable_murano | bool %}
-listen murano_api
- bind {{ kolla_internal_vip_address }}:{{ murano_api_port }}
-{% for host in groups['murano-api'] %}
- server {{ hostvars[host]['ansible_hostname'] }} {{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ murano_api_port }} check inter 2000 rise 2 fall 5
-{% endfor %}
-{% if haproxy_enable_external_vip | bool %}
-
-listen murano_api_external
- bind {{ kolla_external_vip_address }}:{{ murano_api_port }} {{ tls_bind_info }}
-{% for host in groups['murano-api'] %}
- server {{ hostvars[host]['ansible_hostname'] }} {{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ murano_api_port }} check inter 2000 rise 2 fall 5
-{% endfor %}
-{% endif %}
-{% endif %}
-
-{% if enable_manila | bool %}
-listen manila_api
- bind {{ kolla_internal_vip_address }}:{{ manila_api_port }}
-{% for host in groups['manila-api'] %}
- server {{ hostvars[host]['ansible_hostname'] }} {{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ manila_api_port }} check inter 2000 rise 2 fall 5
-{% endfor %}
-{% if haproxy_enable_external_vip | bool %}
-
-listen manila_api_external
- bind {{ kolla_external_vip_address }}:{{ manila_api_port }} {{ tls_bind_info }}
-{% for host in groups['manila-api'] %}
- server {{ hostvars[host]['ansible_hostname'] }} {{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ manila_api_port }} check inter 2000 rise 2 fall 5
-{% endfor %}
-{% endif %}
-{% endif %}
-
-{% if enable_magnum | bool %}
-listen magnum_api
- bind {{ kolla_internal_vip_address }}:{{ magnum_api_port }}
-{% for host in groups['magnum-api'] %}
- server {{ hostvars[host]['ansible_hostname'] }} {{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ magnum_api_port }} check inter 2000 rise 2 fall 5
-{% endfor %}
-{% if haproxy_enable_external_vip | bool %}
-
-listen magnum_api_external
- bind {{ kolla_external_vip_address }}:{{ magnum_api_port }} {{ tls_bind_info }}
-{% for host in groups['magnum-api'] %}
- server {{ hostvars[host]['ansible_hostname'] }} {{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ magnum_api_port }} check inter 2000 rise 2 fall 5
-{% endfor %}
-{% endif %}
-{% endif %}
-
-{% if enable_watcher | bool and enable_ceilometer | bool %}
-listen watcher_api
- bind {{ kolla_internal_vip_address }}:{{ watcher_api_port }}
-{% for host in groups['watcher-api'] %}
- server {{ hostvars[host]['ansible_hostname'] }} {{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ watcher_api_port }} check inter 2000 rise 2 fall 5
-{% endfor %}
-{% if haproxy_enable_external_vip | bool %}
-
-listen watcher_api_external
- bind {{ kolla_external_vip_address }}:{{ watcher_api_port }} {{ tls_bind_info }}
-{% for host in groups['watcher-api'] %}
- server {{ hostvars[host]['ansible_hostname'] }} {{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ watcher_api_port }} check inter 2000 rise 2 fall 5
-{% endfor %}
-{% endif %}
-{% endif %}
-
-{% if enable_sahara | bool %}
-listen sahara_api
- bind {{ kolla_internal_vip_address }}:{{ sahara_api_port }}
-{% for host in groups['sahara-api'] %}
- server {{ hostvars[host]['ansible_hostname'] }} {{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ sahara_api_port }} check inter 2000 rise 2 fall 5
-{% endfor %}
-{% if haproxy_enable_external_vip | bool %}
-
-listen sahara_api_external
- bind {{ kolla_external_vip_address }}:{{ sahara_api_port }} {{ tls_bind_info }}
-{% for host in groups['sahara-api'] %}
- server {{ hostvars[host]['ansible_hostname'] }} {{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ sahara_api_port }} check inter 2000 rise 2 fall 5
-{% endfor %}
-{% endif %}
-{% endif %}
-
-{% if enable_searchlight | bool %}
-listen searchlight_api
- bind {{ kolla_internal_vip_address }}:{{ searchlight_api_port }}
-{% for host in groups['searchlight-api'] %}
- server {{ hostvars[host]['ansible_hostname'] }} {{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ searchlight_api_port }} check inter 2000 rise 2 fall 5
-{% endfor %}
-{% if haproxy_enable_external_vip | bool %}
-
-listen searchlight_api_external
- bind {{ kolla_external_vip_address }}:{{ searchlight_api_port }} {{ tls_bind_info }}
-{% for host in groups['searchlight-api'] %}
- server {{ hostvars[host]['ansible_hostname'] }} {{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ searchlight_api_port }} check inter 2000 rise 2 fall 5
-{% endfor %}
-{% endif %}
-{% endif %}
-
-{% if enable_ceph | bool and enable_ceph_rgw | bool %}
-listen radosgw
- bind {{ kolla_internal_vip_address }}:{{ rgw_port }}
-{% for host in groups['ceph-rgw'] %}
- server {{ hostvars[host]['ansible_hostname'] }} {{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ rgw_port }} check inter 2000 rise 2 fall 5
-{% endfor %}
-{% if haproxy_enable_external_vip | bool %}
-
-listen radosgw_external
- bind {{ kolla_external_vip_address }}:{{ rgw_port }} {{ tls_bind_info }}
-{% for host in groups['ceph-rgw'] %}
- server {{ hostvars[host]['ansible_hostname'] }} {{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ rgw_port }} check inter 2000 rise 2 fall 5
-{% endfor %}
-{% endif %}
-{% endif %}
-
-{% if enable_kibana | bool %}
-
-userlist kibanauser
- user {{ kibana_user }} insecure-password {{ kibana_password }}
-
-listen kibana
- bind {{ kolla_internal_vip_address }}:{{ kibana_server_port }}
- acl auth_acl http_auth(kibanauser)
- http-request auth realm basicauth unless auth_acl
-{% for host in groups['kibana'] %}
- server {{ hostvars[host]['ansible_hostname'] }} {{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ kibana_server_port }} check inter 2000 rise 2 fall 5
-{% endfor %}
-{% if haproxy_enable_external_vip | bool %}
-
-listen kibana_external
- bind {{ kolla_external_vip_address }}:{{ kibana_server_port }} {{ tls_bind_info }}
- http-request del-header X-Forwarded-Proto
- http-request set-header X-Forwarded-Proto https if { ssl_fc }
- acl auth_acl http_auth(kibanauser)
- http-request auth realm basicauth unless auth_acl
-{% for host in groups['kibana'] %}
- server {{ hostvars[host]['ansible_hostname'] }} {{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ kibana_server_port }} check inter 2000 rise 2 fall 5
-{% endfor %}
-{% endif %}
-{% endif %}
-
-{% if enable_gnocchi | bool %}
-listen gnocchi_api
- bind {{ kolla_internal_vip_address }}:{{ gnocchi_api_port }}
-{% for host in groups['gnocchi-api'] %}
- server {{ hostvars[host]['ansible_hostname'] }} {{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ gnocchi_api_port }} check inter 2000 rise 2 fall 5
-{% endfor %}
-{% if haproxy_enable_external_vip | bool %}
-
-listen gnocchi_api_external
- bind {{ kolla_external_vip_address }}:{{ gnocchi_api_port }} {{ tls_bind_info }}
-{% for host in groups['gnocchi-api'] %}
- server {{ hostvars[host]['ansible_hostname'] }} {{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ gnocchi_api_port }} check inter 2000 rise 2 fall 5
-{% endfor %}
-{% endif %}
-{% endif %}
-
-{% if enable_elasticsearch | bool %}
-listen elasticsearch
- option dontlog-normal
- bind {{ kolla_internal_vip_address }}:{{ elasticsearch_port }}
-{% for host in groups['elasticsearch'] %}
- server {{ hostvars[host]['ansible_hostname'] }} {{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ elasticsearch_port }} check inter 2000 rise 2 fall 5
-{% endfor %}
-{% endif %}
-
-{% if enable_barbican | bool %}
-listen barbican_api
- bind {{ kolla_internal_vip_address }}:{{ barbican_api_port }}
-{% for host in groups['barbican-api'] %}
- server {{ hostvars[host]['ansible_hostname'] }} {{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ barbican_api_port }} check inter 2000 rise 2 fall 5
-{% endfor %}
-{% if haproxy_enable_external_vip | bool %}
-
-listen barbican_api_external
- bind {{ kolla_external_vip_address }}:{{ barbican_api_port }} {{ tls_bind_info }}
-{% for host in groups['barbican-api'] %}
- server {{ hostvars[host]['ansible_hostname'] }} {{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ barbican_api_port }} check inter 2000 rise 2 fall 5
-{% endfor %}
-{% endif %}
-{% endif %}
-
-{% if enable_ceilometer | bool %}
-listen ceilometer_api
- bind {{ kolla_internal_vip_address }}:{{ ceilometer_api_port }}
-{% for host in groups['ceilometer-api'] %}
- server {{ hostvars[host]['ansible_hostname'] }} {{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ ceilometer_api_port }} check inter 2000 rise 2 fall 5
-{% endfor %}
-{% if haproxy_enable_external_vip | bool %}
-
-listen ceilometer_api_external
- bind {{ kolla_external_vip_address }}:{{ ceilometer_api_port }} {{ tls_bind_info }}
-{% for host in groups['ceilometer-api'] %}
- server {{ hostvars[host]['ansible_hostname'] }} {{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ ceilometer_api_port }} check inter 2000 rise 2 fall 5
-{% endfor %}
-{% endif %}
-{% endif %}
-
-{% if enable_aodh | bool %}
-listen aodh_api
- bind {{ kolla_internal_vip_address }}:{{ aodh_api_port }}
-{% for host in groups['aodh-api'] %}
- server {{ hostvars[host]['ansible_hostname'] }} {{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ aodh_api_port }} check inter 2000 rise 2 fall 5
-{% endfor %}
-{% if haproxy_enable_external_vip | bool %}
-
-listen aodh_api_external
- bind {{ kolla_external_vip_address }}:{{ aodh_api_port }} {{ tls_bind_info }}
-{% for host in groups['aodh-api'] %}
- server {{ hostvars[host]['ansible_hostname'] }} {{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ aodh_api_port }} check inter 2000 rise 2 fall 5
-{% endfor %}
-{% endif %}
-{% endif %}
-
-{% if enable_congress | bool %}
-listen congress_api
- bind {{ kolla_internal_vip_address }}:{{ congress_api_port }}
- http-request del-header X-Forwarded-Proto
-{% for host in groups['congress-api'] %}
- server {{ hostvars[host]['ansible_hostname'] }} {{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ congress_api_port }} check inter 2000 rise 2 fall 5
-{% endfor %}
-{% if haproxy_enable_external_vip | bool %}
-
-listen congress_api_external
- bind {{ kolla_external_vip_address }}:{{ congress_api_port }} {{ tls_bind_info }}
- http-request del-header X-Forwarded-Proto
- http-request set-header X-Forwarded-Proto https if { ssl_fc }
-{% for host in groups['congress-api'] %}
- server {{ hostvars[host]['ansible_hostname'] }} {{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ congress_api_port }} check inter 2000 rise 2 fall 5
-{% endfor %}
-{% endif %}
-{% endif %}
-
-{% if enable_designate | bool %}
-listen designate_api
- bind {{ kolla_internal_vip_address }}:{{ designate_api_port }}
-{% for host in groups['designate-api'] %}
- server {{ hostvars[host]['ansible_hostname'] }} {{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ designate_api_port }} check inter 2000 rise 2 fall 5
-{% endfor %}
-{% if haproxy_enable_external_vip | bool %}
-
-listen designate_api_external
- bind {{ kolla_external_vip_address }}:{{ designate_api_port }} {{ tls_bind_info }}
-{% for host in groups['designate-api'] %}
- server {{ hostvars[host]['ansible_hostname'] }} {{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ designate_api_port }} check inter 2000 rise 2 fall 5
-{% endfor %}
-{% endif %}
-{% endif %}
-
-{% if enable_mistral | bool %}
-listen mistral_api
- bind {{ kolla_internal_vip_address }}:{{ mistral_api_port }}
-{% for host in groups['mistral-api'] %}
- server {{ hostvars[host]['ansible_hostname'] }} {{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ mistral_api_port }} check inter 2000 rise 2 fall 5
-{% endfor %}
-{% if haproxy_enable_external_vip | bool %}
-
-listen mistral_api_external
- bind {{ kolla_external_vip_address }}:{{ mistral_api_port }} {{ tls_bind_info }}
-{% for host in groups['mistral-api'] %}
- server {{ hostvars[host]['ansible_hostname'] }} {{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ mistral_api_port }} check inter 2000 rise 2 fall 5
-{% endfor %}
-{% endif %}
-{% endif %}
-
-# (NOTE): This defaults section deletes forwardfor as recommended by:
-# https://marc.info/?l=haproxy&m=141684110710132&w=1
-
-defaults
- log global
- mode http
- option redispatch
- option httplog
- retries 3
- timeout http-request 10s
- timeout queue 1m
- timeout connect 10s
- timeout client 1m
- timeout server 1m
- timeout check 10s
-
-{% if enable_mariadb | bool %}
-listen mariadb
- mode tcp
- option tcplog
- option tcpka
- option mysql-check user haproxy post-41
- bind {{ kolla_internal_vip_address }}:{{ mariadb_port }}
-{% for host in groups['mariadb'] %}
- server {{ hostvars[host]['ansible_hostname'] }} {{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ mariadb_port }} check inter 2000 rise 2 fall 5 {% if not loop.first %}backup{% endif %}
-
-{% endfor %}
-{% endif %}
diff --git a/ansible/roles/haproxy/templates/haproxy.json.j2 b/ansible/roles/haproxy/templates/haproxy.json.j2
deleted file mode 100644
index c95dd77031..0000000000
--- a/ansible/roles/haproxy/templates/haproxy.json.j2
+++ /dev/null
@@ -1,18 +0,0 @@
-{
- "command": "/usr/sbin/haproxy-systemd-wrapper -f /etc/haproxy/haproxy.cfg -p /run/haproxy.pid",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/haproxy.cfg",
- "dest": "/etc/haproxy/haproxy.cfg",
- "owner": "root",
- "perm": "0644"
- },
- {
- "source": "{{ container_config_directory }}/haproxy.pem",
- "dest": "/etc/haproxy/haproxy.pem",
- "owner": "root",
- "perm": "0600",
- "optional": {{ (not kolla_enable_tls_external | bool) | string | lower }}
- }
- ]
-}
diff --git a/ansible/roles/haproxy/templates/keepalived.conf.j2 b/ansible/roles/haproxy/templates/keepalived.conf.j2
deleted file mode 100644
index 8307a2f503..0000000000
--- a/ansible/roles/haproxy/templates/keepalived.conf.j2
+++ /dev/null
@@ -1,33 +0,0 @@
-vrrp_script check_alive {
- script "/check_alive.sh"
- interval 2
- fall 2
- rise 10
-}
-
-vrrp_instance kolla_internal_vip_{{ keepalived_virtual_router_id }} {
- state BACKUP
- nopreempt
- interface {{ api_interface }}
- virtual_router_id {{ keepalived_virtual_router_id }}
- priority {{ groups['haproxy'].index(inventory_hostname) + 1 }}
- advert_int 1
- virtual_ipaddress {
- {{ kolla_internal_vip_address }} dev {{ api_interface }}
-{% if haproxy_enable_external_vip | bool %}
- {{ kolla_external_vip_address }} dev {{ kolla_external_vip_interface }}
-{% endif %}
- }
-{% if haproxy_enable_external_vip | bool and api_interface != kolla_external_vip_interface %}
- track_interface {
- {{ kolla_external_vip_interface }}
- }
-{% endif %}
- authentication {
- auth_type PASS
- auth_pass {{ keepalived_password }}
- }
- track_script {
- check_alive
- }
-}
diff --git a/ansible/roles/haproxy/templates/keepalived.json.j2 b/ansible/roles/haproxy/templates/keepalived.json.j2
deleted file mode 100644
index 63a760c675..0000000000
--- a/ansible/roles/haproxy/templates/keepalived.json.j2
+++ /dev/null
@@ -1,11 +0,0 @@
-{
- "command": "/usr/sbin/keepalived -nld -p /run/keepalived.pid",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/keepalived.conf",
- "dest": "/etc/keepalived/keepalived.conf",
- "owner": "root",
- "perm": "0644"
- }
- ]
-}
diff --git a/ansible/roles/heat/defaults/main.yml b/ansible/roles/heat/defaults/main.yml
deleted file mode 100644
index 5b6999cdec..0000000000
--- a/ansible/roles/heat/defaults/main.yml
+++ /dev/null
@@ -1,43 +0,0 @@
----
-project_name: "heat"
-
-####################
-# Database
-####################
-heat_database_name: "heat"
-heat_database_user: "heat"
-heat_database_address: "{{ kolla_internal_fqdn }}:{{ database_port }}"
-
-
-####################
-# Docker
-####################
-heat_api_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-heat-api"
-heat_api_tag: "{{ openstack_release }}"
-heat_api_image_full: "{{ heat_api_image }}:{{ heat_api_tag }}"
-
-heat_api_cfn_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-heat-api-cfn"
-heat_api_cfn_tag: "{{ openstack_release }}"
-heat_api_cfn_image_full: "{{ heat_api_cfn_image }}:{{ heat_api_cfn_tag }}"
-
-heat_engine_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-heat-engine"
-heat_engine_tag: "{{ openstack_release }}"
-heat_engine_image_full: "{{ heat_engine_image }}:{{ heat_engine_tag }}"
-
-####################
-# OpenStack
-####################
-heat_admin_endpoint: "{{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ heat_api_port }}/v1/%(tenant_id)s"
-heat_internal_endpoint: "{{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ heat_api_port }}/v1/%(tenant_id)s"
-heat_public_endpoint: "{{ public_protocol }}://{{ kolla_external_fqdn }}:{{ heat_api_port }}/v1/%(tenant_id)s"
-heat_cfn_admin_endpoint: "{{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ heat_api_cfn_port }}/v1"
-heat_cfn_internal_endpoint: "{{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ heat_api_cfn_port }}/v1"
-heat_cfn_public_endpoint: "{{ public_protocol }}://{{ kolla_external_fqdn }}:{{ heat_api_cfn_port }}/v1"
-
-heat_logging_debug: "{{ openstack_logging_debug }}"
-
-heat_keystone_user: "heat"
-heat_stack_user_role: "heat_stack_user"
-heat_stack_owner_role: "heat_stack_owner"
-
-openstack_heat_auth: "{'auth_url':'{{ openstack_auth.auth_url }}','username':'{{ openstack_auth.username }}','password':'{{ openstack_auth.password }}','project_name':'{{ openstack_auth.project_name }}'}"
diff --git a/ansible/roles/heat/meta/main.yml b/ansible/roles/heat/meta/main.yml
deleted file mode 100644
index 6b4fff8fef..0000000000
--- a/ansible/roles/heat/meta/main.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-dependencies:
- - { role: common }
diff --git a/ansible/roles/heat/tasks/bootstrap.yml b/ansible/roles/heat/tasks/bootstrap.yml
deleted file mode 100644
index 50c74920b2..0000000000
--- a/ansible/roles/heat/tasks/bootstrap.yml
+++ /dev/null
@@ -1,41 +0,0 @@
----
-- name: Creating Heat database
- command: docker exec -t kolla_toolbox /usr/bin/ansible localhost
- -m mysql_db
- -a "login_host='{{ database_address }}'
- login_port='{{ database_port }}'
- login_user='{{ database_user }}'
- login_password='{{ database_password }}'
- name='{{ heat_database_name }}'"
- register: database
- changed_when: "{{ database.stdout.find('localhost | SUCCESS => ') != -1 and
- (database.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
- failed_when: database.stdout.split()[2] != 'SUCCESS'
- run_once: True
- delegate_to: "{{ groups['heat-api'][0] }}"
-
-- name: Reading json from variable
- set_fact:
- database_created: "{{ (database.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
-
-- name: Creating Heat database user and setting permissions
- command: docker exec -t kolla_toolbox /usr/bin/ansible localhost
- -m mysql_user
- -a "login_host='{{ database_address }}'
- login_port='{{ database_port }}'
- login_user='{{ database_user }}'
- login_password='{{ database_password }}'
- name='{{ heat_database_name }}'
- password='{{ heat_database_password }}'
- host='%'
- priv='{{ heat_database_name }}.*:ALL'
- append_privs='yes'"
- register: database_user_create
- changed_when: "{{ database_user_create.stdout.find('localhost | SUCCESS => ') != -1 and
- (database_user_create.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
- failed_when: database_user_create.stdout.split()[2] != 'SUCCESS'
- run_once: True
- delegate_to: "{{ groups['heat-api'][0] }}"
-
-- include: bootstrap_service.yml
- when: database_created
diff --git a/ansible/roles/heat/tasks/bootstrap_service.yml b/ansible/roles/heat/tasks/bootstrap_service.yml
deleted file mode 100644
index f3d874c1a5..0000000000
--- a/ansible/roles/heat/tasks/bootstrap_service.yml
+++ /dev/null
@@ -1,26 +0,0 @@
----
-- name: Running Heat bootstrap container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- detach: False
- environment:
- KOLLA_BOOTSTRAP:
- KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}"
- OS_AUTH_URL: "{{ openstack_auth.auth_url }}"
- OS_IDENTITY_API_VERSION: "3"
- OS_USERNAME: "{{ openstack_auth.username }}"
- OS_PASSWORD: "{{ openstack_auth.password }}"
- OS_PROJECT_NAME: "{{ openstack_auth.project_name }}"
- HEAT_DOMAIN_ADMIN_PASSWORD: "{{ heat_domain_admin_password }}"
- image: "{{ heat_api_image_full }}"
- labels:
- BOOTSTRAP:
- name: "bootstrap_heat"
- restart_policy: "never"
- volumes:
- - "{{ node_config_directory }}/heat-api/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "kolla_logs:/var/log/kolla/"
- run_once: True
- delegate_to: "{{ groups['heat-api'][0] }}"
diff --git a/ansible/roles/heat/tasks/config.yml b/ansible/roles/heat/tasks/config.yml
deleted file mode 100644
index 125062f905..0000000000
--- a/ansible/roles/heat/tasks/config.yml
+++ /dev/null
@@ -1,59 +0,0 @@
----
-- name: Ensuring config directories exist
- file:
- path: "{{ node_config_directory }}/{{ item }}"
- state: "directory"
- recurse: yes
- with_items:
- - "heat-api"
- - "heat-api-cfn"
- - "heat-engine"
-
-- name: Copying over config.json files for services
- template:
- src: "{{ item }}.json.j2"
- dest: "{{ node_config_directory }}/{{ item }}/config.json"
- with_items:
- - "heat-api"
- - "heat-api-cfn"
- - "heat-engine"
-
-- name: Copying over the heat-engine environment file
- template:
- src: "_deprecated.yaml"
- dest: "{{ node_config_directory }}/{{ item }}/_deprecated.yaml"
- with_items:
- - "heat-engine"
-
-- name: Copying over heat.conf
- merge_configs:
- vars:
- service_name: "{{ item }}"
- sources:
- - "{{ role_path }}/templates/heat.conf.j2"
- - "{{ node_custom_config }}/global.conf"
- - "{{ node_custom_config }}/database.conf"
- - "{{ node_custom_config }}/messaging.conf"
- - "{{ node_custom_config }}/heat.conf"
- - "{{ node_custom_config }}/heat/{{ item }}.conf"
- - "{{ node_custom_config }}/heat/{{ inventory_hostname }}/heat.conf"
- dest: "{{ node_config_directory }}/{{ item }}/heat.conf"
- with_items:
- - "heat-api"
- - "heat-api-cfn"
- - "heat-engine"
-
-- name: Check if policies shall be overwritten
- local_action: stat path="{{ node_custom_config }}/heat/policy.json"
- register: heat_policy
-
-- name: Copying over existing policy.json
- template:
- src: "{{ node_custom_config }}/heat/policy.json"
- dest: "{{ node_config_directory }}/{{ item }}/policy.json"
- with_items:
- - "heat-api"
- - "heat-api-cfn"
- - "heat-engine"
- when:
- heat_policy.stat.exists
diff --git a/ansible/roles/heat/tasks/deploy.yml b/ansible/roles/heat/tasks/deploy.yml
deleted file mode 100644
index a27fbf2ccc..0000000000
--- a/ansible/roles/heat/tasks/deploy.yml
+++ /dev/null
@@ -1,16 +0,0 @@
----
-- include: register.yml
- when: inventory_hostname in groups['heat-api']
-
-- include: config.yml
- when: inventory_hostname in groups['heat-api'] or
- inventory_hostname in groups['heat-api-cfn'] or
- inventory_hostname in groups['heat-engine']
-
-- include: bootstrap.yml
- when: inventory_hostname in groups['heat-api']
-
-- include: start.yml
- when: inventory_hostname in groups['heat-api'] or
- inventory_hostname in groups['heat-api-cfn'] or
- inventory_hostname in groups['heat-engine']
diff --git a/ansible/roles/heat/tasks/main.yml b/ansible/roles/heat/tasks/main.yml
deleted file mode 100644
index b017e8b4ad..0000000000
--- a/ansible/roles/heat/tasks/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-- include: "{{ action }}.yml"
diff --git a/ansible/roles/heat/tasks/precheck.yml b/ansible/roles/heat/tasks/precheck.yml
deleted file mode 100644
index ed97d539c0..0000000000
--- a/ansible/roles/heat/tasks/precheck.yml
+++ /dev/null
@@ -1 +0,0 @@
----
diff --git a/ansible/roles/heat/tasks/pull.yml b/ansible/roles/heat/tasks/pull.yml
deleted file mode 100644
index b671b7b253..0000000000
--- a/ansible/roles/heat/tasks/pull.yml
+++ /dev/null
@@ -1,21 +0,0 @@
----
-- name: Pulling heat-api image
- kolla_docker:
- action: "pull_image"
- common_options: "{{ docker_common_options }}"
- image: "{{ heat_api_image_full }}"
- when: inventory_hostname in groups['heat-api']
-
-- name: Pulling heat-api-cfn image
- kolla_docker:
- action: "pull_image"
- common_options: "{{ docker_common_options }}"
- image: "{{ heat_api_cfn_image_full }}"
- when: inventory_hostname in groups['heat-api-cfn']
-
-- name: Pulling heat-engine image
- kolla_docker:
- action: "pull_image"
- common_options: "{{ docker_common_options }}"
- image: "{{ heat_engine_image_full }}"
- when: inventory_hostname in groups['heat-engine']
diff --git a/ansible/roles/heat/tasks/reconfigure.yml b/ansible/roles/heat/tasks/reconfigure.yml
deleted file mode 100644
index 1e4bfeacd5..0000000000
--- a/ansible/roles/heat/tasks/reconfigure.yml
+++ /dev/null
@@ -1,74 +0,0 @@
----
-- name: Ensuring the containers up
- kolla_docker:
- name: "{{ item.name }}"
- action: "get_container_state"
- register: container_state
- failed_when: container_state.Running == false
- when: inventory_hostname in groups[item.group]
- with_items:
- - { name: heat_api, group: heat-api }
- - { name: heat_api_cfn, group: heat-api-cfn }
- - { name: heat_engine, group: heat-engine }
-
-- include: config.yml
-
-- name: Check the configs
- command: docker exec {{ item.name }} /usr/local/bin/kolla_set_configs --check
- changed_when: false
- failed_when: false
- register: check_results
- when: inventory_hostname in groups[item.group]
- with_items:
- - { name: heat_api, group: heat-api }
- - { name: heat_api_cfn, group: heat-api-cfn }
- - { name: heat_engine, group: heat-engine }
-
-# NOTE(jeffrey4l): when config_strategy == 'COPY_ALWAYS'
-# and container env['KOLLA_CONFIG_STRATEGY'] == 'COPY_ONCE',
-# just remove the container and start again
-- name: Containers config strategy
- kolla_docker:
- name: "{{ item.name }}"
- action: "get_container_env"
- register: container_envs
- when: inventory_hostname in groups[item.group]
- with_items:
- - { name: heat_api, group: heat-api }
- - { name: heat_api_cfn, group: heat-api-cfn }
- - { name: heat_engine, group: heat-engine }
-
-- name: Remove the containers
- kolla_docker:
- name: "{{ item[0]['name'] }}"
- action: "remove_container"
- register: remove_containers
- when:
- - inventory_hostname in groups[item[0]['group']]
- - config_strategy == "COPY_ONCE" or item[1]['KOLLA_CONFIG_STRATEGY'] == 'COPY_ONCE'
- - item[2]['rc'] == 1
- with_together:
- - [{ name: heat_api, group: heat-api },
- { name: heat_api_cfn, group: heat-api-cfn },
- { name: heat_engine, group: heat-engine }]
- - "{{ container_envs.results }}"
- - "{{ check_results.results }}"
-
-- include: start.yml
- when: remove_containers.changed
-
-- name: Restart containers
- kolla_docker:
- name: "{{ item[0]['name'] }}"
- action: "restart_container"
- when:
- - inventory_hostname in groups[item[0]['group']]
- - config_strategy == 'COPY_ALWAYS'
- - item[1]['KOLLA_CONFIG_STRATEGY'] != 'COPY_ONCE'
- - item[2]['rc'] == 1
- with_together:
- - [{ name: heat_api, group: heat-api },
- { name: heat_api_cfn, group: heat-api-cfn },
- { name: heat_engine, group: heat-engine }]
- - "{{ container_envs.results }}"
- - "{{ check_results.results }}"
diff --git a/ansible/roles/heat/tasks/register.yml b/ansible/roles/heat/tasks/register.yml
deleted file mode 100644
index 70ee29f981..0000000000
--- a/ansible/roles/heat/tasks/register.yml
+++ /dev/null
@@ -1,69 +0,0 @@
----
-- name: Creating the Heat service and endpoint
- command: docker exec -t kolla_toolbox /usr/bin/ansible localhost
- -m kolla_keystone_service
- -a "service_name={{ item.service_name }}
- service_type={{ item.service_type }}
- description='Openstack Orchestration'
- endpoint_region={{ openstack_region_name }}
- url='{{ item.url }}'
- interface='{{ item.interface }}'
- region_name={{ openstack_region_name }}
- auth={{ '{{ openstack_heat_auth }}' }}"
- -e "{'openstack_heat_auth':{{ openstack_heat_auth }}}"
- register: heat_endpoint
- changed_when: "{{ heat_endpoint.stdout.find('localhost | SUCCESS => ') != -1 and (heat_endpoint.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
- until: heat_endpoint.stdout.split()[2] == 'SUCCESS'
- retries: 10
- delay: 5
- run_once: True
- with_items:
- - {'interface': 'admin', 'url': '{{ heat_admin_endpoint }}', 'service_name': 'heat', 'service_type': 'orchestration'}
- - {'interface': 'internal', 'url': '{{ heat_internal_endpoint }}', 'service_name': 'heat', 'service_type': 'orchestration'}
- - {'interface': 'public', 'url': '{{ heat_public_endpoint }}', 'service_name': 'heat', 'service_type': 'orchestration'}
- - {'interface': 'admin', 'url': '{{ heat_cfn_admin_endpoint }}', 'service_name': 'heat-cfn', 'service_type': 'cloudformation'}
- - {'interface': 'internal', 'url': '{{ heat_cfn_internal_endpoint }}', 'service_name': 'heat-cfn', 'service_type': 'cloudformation'}
- - {'interface': 'public', 'url': '{{ heat_cfn_public_endpoint }}', 'service_name': 'heat-cfn', 'service_type': 'cloudformation'}
-
-- name: Creating the Heat project, user, and role
- command: docker exec -t kolla_toolbox /usr/bin/ansible localhost
- -m kolla_keystone_user
- -a "project=service
- user=heat
- password={{ heat_keystone_password }}
- role=admin
- region_name={{ openstack_region_name }}
- auth={{ '{{ openstack_heat_auth }}' }}"
- -e "{'openstack_heat_auth':{{ openstack_heat_auth }}}"
- register: heat_user
- changed_when: "{{ heat_user.stdout.find('localhost | SUCCESS => ') != -1 and (heat_user.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
- until: heat_user.stdout.split()[2] == 'SUCCESS'
- retries: 10
- delay: 5
- run_once: True
-
-- name: Creating the heat_stack_user role
- command: docker exec -t kolla_toolbox /usr/bin/ansible localhost
- -m os_keystone_role
- -a "name={{ heat_stack_user_role }}
- auth={{ '{{ openstack_heat_auth }}' }}"
- -e "{'openstack_heat_auth':{{ openstack_heat_auth }}}"
- register: heat_stack_user_role_result
- changed_when: "{{ heat_stack_user_role_result.stdout.find('localhost | SUCCESS => ') != -1 and (heat_stack_user_role_result.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
- until: heat_stack_user_role_result.stdout.split()[2] == 'SUCCESS'
- retries: 10
- delay: 5
- run_once: True
-
-- name: Creating the heat_stack_owner role
- command: docker exec -t kolla_toolbox /usr/bin/ansible localhost
- -m os_keystone_role
- -a "name={{ heat_stack_owner_role }}
- auth={{ '{{ openstack_heat_auth }}' }}"
- -e "{'openstack_heat_auth':{{ openstack_heat_auth }}}"
- register: heat_stack_owner_role_result
- changed_when: "{{ heat_stack_owner_role_result.stdout.find('localhost | SUCCESS => ') != -1 and (heat_stack_owner_role_result.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
- until: heat_stack_owner_role_result.stdout.split()[2] == 'SUCCESS'
- retries: 10
- delay: 5
- run_once: True
diff --git a/ansible/roles/heat/tasks/start.yml b/ansible/roles/heat/tasks/start.yml
deleted file mode 100644
index c1238e74e3..0000000000
--- a/ansible/roles/heat/tasks/start.yml
+++ /dev/null
@@ -1,36 +0,0 @@
----
-- name: Starting heat-api container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- image: "{{ heat_api_image_full }}"
- name: "heat_api"
- volumes:
- - "{{ node_config_directory }}/heat-api/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "kolla_logs:/var/log/kolla/"
- when: inventory_hostname in groups['heat-api']
-
-- name: Starting heat-api-cfn container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- image: "{{ heat_api_cfn_image_full }}"
- name: "heat_api_cfn"
- volumes:
- - "{{ node_config_directory }}/heat-api-cfn/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "kolla_logs:/var/log/kolla/"
- when: inventory_hostname in groups['heat-api-cfn']
-
-- name: Starting heat-engine container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- image: "{{ heat_engine_image_full }}"
- name: "heat_engine"
- volumes:
- - "{{ node_config_directory }}/heat-engine/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "kolla_logs:/var/log/kolla/"
- when: inventory_hostname in groups['heat-engine']
diff --git a/ansible/roles/heat/tasks/upgrade.yml b/ansible/roles/heat/tasks/upgrade.yml
deleted file mode 100644
index 308053080c..0000000000
--- a/ansible/roles/heat/tasks/upgrade.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-- include: config.yml
-
-- include: bootstrap_service.yml
-
-- include: start.yml
diff --git a/ansible/roles/heat/templates/_deprecated.yaml b/ansible/roles/heat/templates/_deprecated.yaml
deleted file mode 100644
index 76cc8e387c..0000000000
--- a/ansible/roles/heat/templates/_deprecated.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
-resource_registry:
- "OS::Heat::HARestarter":
- "OS::Heat::SoftwareDeployments":
- "OS::Heat::StructuredDeployments":
diff --git a/ansible/roles/heat/templates/heat-api-cfn.json.j2 b/ansible/roles/heat/templates/heat-api-cfn.json.j2
deleted file mode 100644
index 30f266582d..0000000000
--- a/ansible/roles/heat/templates/heat-api-cfn.json.j2
+++ /dev/null
@@ -1,18 +0,0 @@
-{
- "command": "heat-api-cfn",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/heat.conf",
- "dest": "/etc/heat/heat.conf",
- "owner": "heat",
- "perm": "0600"
- }
- ],
- "permissions": [
- {
- "path": "/var/log/kolla/heat",
- "owner": "heat:heat",
- "recurse": true
- }
- ]
-}
diff --git a/ansible/roles/heat/templates/heat-api.json.j2 b/ansible/roles/heat/templates/heat-api.json.j2
deleted file mode 100644
index c198cbf4be..0000000000
--- a/ansible/roles/heat/templates/heat-api.json.j2
+++ /dev/null
@@ -1,18 +0,0 @@
-{
- "command": "heat-api",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/heat.conf",
- "dest": "/etc/heat/heat.conf",
- "owner": "heat",
- "perm": "0600"
- }
- ],
- "permissions": [
- {
- "path": "/var/log/kolla/heat",
- "owner": "heat:heat",
- "recurse": true
- }
- ]
-}
diff --git a/ansible/roles/heat/templates/heat-engine.json.j2 b/ansible/roles/heat/templates/heat-engine.json.j2
deleted file mode 100644
index 40d76a08f0..0000000000
--- a/ansible/roles/heat/templates/heat-engine.json.j2
+++ /dev/null
@@ -1,24 +0,0 @@
-{
- "command": "heat-engine",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/heat.conf",
- "dest": "/etc/heat/heat.conf",
- "owner": "heat",
- "perm": "0600"
- },
- {
- "source": "{{ container_config_directory }}/_deprecated.yaml",
- "dest": "/etc/heat/environment.d/_deprecated.yaml",
- "owner": "heat",
- "perm": "0600"
- }
- ],
- "permissions": [
- {
- "path": "/var/log/kolla/heat",
- "owner": "heat:heat",
- "recurse": true
- }
- ]
-}
diff --git a/ansible/roles/heat/templates/heat.conf.j2 b/ansible/roles/heat/templates/heat.conf.j2
deleted file mode 100644
index c68e4453f1..0000000000
--- a/ansible/roles/heat/templates/heat.conf.j2
+++ /dev/null
@@ -1,91 +0,0 @@
-[DEFAULT]
-debug = {{ heat_logging_debug }}
-
-log_dir = /var/log/kolla/heat
-
-heat_watch_server_url = {{ public_protocol }}://{{ kolla_external_fqdn }}:{{ heat_api_cfn_port }}
-heat_metadata_server_url = {{ public_protocol }}://{{ kolla_external_fqdn }}:{{ heat_api_cfn_port }}
-heat_waitcondition_server_url = {{ public_protocol }}://{{ kolla_external_fqdn }}:{{ heat_api_cfn_port }}/v1/waitcondition
-
-heat_stack_user_role = {{ heat_stack_user_role }}
-
-stack_domain_admin = heat_domain_admin
-stack_domain_admin_password = {{ heat_domain_admin_password }}
-stack_user_domain_name = heat_user_domain
-
-deferred_auth_method = trusts
-trusts_delegated_role = heat_stack_owner
-num_engine_workers = {{ openstack_service_workers }}
-
-transport_url = rabbit://{% for host in groups['rabbitmq'] %}{{ rabbitmq_user }}:{{ rabbitmq_password }}@{% if orchestration_engine == 'KUBERNETES' %}rabbitmq{% else %}{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}{% endif %}:{{ rabbitmq_port }}{% if not loop.last %},{% endif %}{% endfor %}
-
-{% if service_name == 'heat-api' %}
-[heat_api]
-bind_host = {{ api_interface_address }}
-bind_port = {{ heat_api_port }}
-works = {{ openstack_service_workers }}
-{% endif %}
-
-{% if service_name == 'heat-api-cfn' %}
-[heat_api_cfn]
-bind_host = {{ api_interface_address }}
-bind_port = {{ heat_api_cfn_port }}
-{% endif %}
-
-[database]
-connection = mysql+pymysql://{{ heat_database_user }}:{{ heat_database_password }}@{{ heat_database_address }}/{{ heat_database_name }}
-max_retries = -1
-
-[keystone_authtoken]
-auth_uri = {{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_public_port }}
-auth_url = {{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_admin_port }}
-auth_type = password
-project_domain_id = default
-user_domain_id = default
-project_name = service
-username = {{ heat_keystone_user }}
-password = {{ heat_keystone_password }}
-
-memcache_security_strategy = ENCRYPT
-memcache_secret_key = {{ memcache_secret_key }}
-memcached_servers = {% for host in groups['memcached'] %}{% if orchestration_engine == 'KUBERNETES' %}memcached{% else %}{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}{% endif %}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}
-
-
-[cache]
-backend = oslo_cache.memcache_pool
-enabled = True
-memcache_servers = {% for host in groups['memcached'] %}{% if orchestration_engine == 'KUBERNETES' %}memcached{% else %}{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}{% endif %}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}
-
-
-[trustee]
-{% if orchestration_engine == 'KUBERNETES' %}
-auth_uri = {{ keystone_internal_url }}
-auth_url = {{ keystone_admin_url }}
-{% else %}
-auth_uri = {{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_public_port }}
-auth_url = {{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_admin_port }}
-{% endif %}
-auth_type = password
-user_domain_id = default
-username = {{ heat_keystone_user }}
-password = {{ heat_keystone_password }}
-
-[ec2authtoken]
-{% if orchestration_engine == 'KUBERNETES' %}
-auth_uri = {{ keystone_internal_url }}
-{% else %}
-auth_uri = {{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_public_port }}
-{% endif %}
-
-[clients_keystone]
-{% if orchestration_engine == 'KUBERNETES' %}
-auth_uri = {{ keystone_internal_url }}
-{% else %}
-auth_uri = {{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_public_port }}
-{% endif %}
-
-[oslo_messaging_notifications]
-driver = noop
-
-[clients]
-endpoint_type = internalURL
diff --git a/ansible/roles/horizon/defaults/main.yml b/ansible/roles/horizon/defaults/main.yml
deleted file mode 100644
index 2658e2fd3b..0000000000
--- a/ansible/roles/horizon/defaults/main.yml
+++ /dev/null
@@ -1,30 +0,0 @@
----
-project_name: "horizon"
-
-####################
-# Database
-####################
-horizon_database_name: "horizon"
-horizon_database_user: "horizon"
-horizon_database_address: "{{ kolla_internal_fqdn }}:{{ database_port }}"
-
-####################
-# Docker
-####################
-horizon_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-horizon"
-horizon_tag: "{{ openstack_release }}"
-horizon_image_full: "{{ horizon_image }}:{{ horizon_tag }}"
-
-
-####################
-# OpenStack
-####################
-openstack_horizon_auth: "{'auth_url':'{{ openstack_auth.auth_url }}','username':'{{ openstack_auth.username }}','password':'{{ openstack_auth.password }}','project_name':'{{ openstack_auth.project_name }}'}"
-
-horizon_logging_debug: "{{ openstack_logging_debug }}"
-
-
-####################
-# Horizon
-####################
-horizon_openstack_keystone_default_role: "_member_"
diff --git a/ansible/roles/horizon/meta/main.yml b/ansible/roles/horizon/meta/main.yml
deleted file mode 100644
index 6b4fff8fef..0000000000
--- a/ansible/roles/horizon/meta/main.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-dependencies:
- - { role: common }
diff --git a/ansible/roles/horizon/tasks/bootstrap.yml b/ansible/roles/horizon/tasks/bootstrap.yml
deleted file mode 100644
index 6e6127495a..0000000000
--- a/ansible/roles/horizon/tasks/bootstrap.yml
+++ /dev/null
@@ -1,41 +0,0 @@
----
-- name: Creating Horizon database
- command: docker exec -t kolla_toolbox /usr/bin/ansible localhost
- -m mysql_db
- -a "login_host='{{ database_address }}'
- login_port='{{ database_port }}'
- login_user='{{ database_user }}'
- login_password='{{ database_password }}'
- name='{{ horizon_database_name }}'"
- register: database
- changed_when: "{{ database.stdout.find('localhost | SUCCESS => ') != -1 and
- (database.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
- failed_when: database.stdout.split()[2] != 'SUCCESS'
- run_once: True
- delegate_to: "{{ groups['horizon'][0] }}"
-
-- name: Reading json from variable
- set_fact:
- database_created: "{{ (database.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
-
-- name: Creating Horizon database user and setting permissions
- command: docker exec -t kolla_toolbox /usr/bin/ansible localhost
- -m mysql_user
- -a "login_host='{{ database_address }}'
- login_port='{{ database_port }}'
- login_user='{{ database_user }}'
- login_password='{{ database_password }}'
- name='{{ horizon_database_name }}'
- password='{{ horizon_database_password }}'
- host='%'
- priv='{{ horizon_database_name }}.*:ALL'
- append_privs='yes'"
- register: database_user_create
- changed_when: "{{ database_user_create.stdout.find('localhost | SUCCESS => ') != -1 and
- (database_user_create.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
- failed_when: database_user_create.stdout.split()[2] != 'SUCCESS'
- run_once: True
- delegate_to: "{{ groups['horizon'][0] }}"
-
-- include: bootstrap_service.yml
- when: database_created
diff --git a/ansible/roles/horizon/tasks/bootstrap_service.yml b/ansible/roles/horizon/tasks/bootstrap_service.yml
deleted file mode 100644
index c20280f6bd..0000000000
--- a/ansible/roles/horizon/tasks/bootstrap_service.yml
+++ /dev/null
@@ -1,19 +0,0 @@
----
-- name: Running Horizon bootstrap container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- detach: False
- environment:
- KOLLA_BOOTSTRAP:
- KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}"
- image: "{{ horizon_image_full }}"
- labels:
- BOOTSTRAP:
- name: "bootstrap_horizon"
- restart_policy: "never"
- volumes:
- - "{{ node_config_directory }}/horizon/:{{ container_config_directory }}/:ro"
- - "kolla_logs:/var/log/kolla/"
- run_once: True
- delegate_to: "{{ groups['horizon'][0] }}"
diff --git a/ansible/roles/horizon/tasks/config.yml b/ansible/roles/horizon/tasks/config.yml
deleted file mode 100644
index c361c83521..0000000000
--- a/ansible/roles/horizon/tasks/config.yml
+++ /dev/null
@@ -1,33 +0,0 @@
----
-- name: Ensuring config directories exist
- file:
- path: "{{ node_config_directory }}/{{ item }}"
- state: "directory"
- recurse: yes
- with_items:
- - "horizon"
-
-- name: Copying over config.json files for services
- template:
- src: "{{ item }}.json.j2"
- dest: "{{ node_config_directory }}/{{ item }}/config.json"
- with_items:
- - "horizon"
-
-- name: Copying over horizon.conf
- template:
- src: "{{ item }}"
- dest: "{{ node_config_directory }}/horizon/horizon.conf"
- with_first_found:
- - "{{ node_custom_config }}/horizon/{{ inventory_hostname }}/horizon.conf"
- - "{{ node_custom_config }}/horizon/horizon.conf"
- - "horizon.conf.j2"
-
-- name: Copying over local_settings
- template:
- src: "{{ item }}"
- dest: "{{ node_config_directory }}/horizon/local_settings"
- with_first_found:
- - "{{ node_custom_config }}/horizon/{{ inventory_hostname }}/local_settings"
- - "{{ node_custom_config }}/horizon/local_settings"
- - "local_settings.j2"
diff --git a/ansible/roles/horizon/tasks/deploy.yml b/ansible/roles/horizon/tasks/deploy.yml
deleted file mode 100644
index 3c04e136d2..0000000000
--- a/ansible/roles/horizon/tasks/deploy.yml
+++ /dev/null
@@ -1,9 +0,0 @@
----
-- include: config.yml
-
-- include: register.yml
-
-- include: bootstrap.yml
- when: horizon_backend_database | bool
-
-- include: start.yml
diff --git a/ansible/roles/horizon/tasks/main.yml b/ansible/roles/horizon/tasks/main.yml
deleted file mode 100644
index b017e8b4ad..0000000000
--- a/ansible/roles/horizon/tasks/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-- include: "{{ action }}.yml"
diff --git a/ansible/roles/horizon/tasks/precheck.yml b/ansible/roles/horizon/tasks/precheck.yml
deleted file mode 100644
index ed97d539c0..0000000000
--- a/ansible/roles/horizon/tasks/precheck.yml
+++ /dev/null
@@ -1 +0,0 @@
----
diff --git a/ansible/roles/horizon/tasks/pull.yml b/ansible/roles/horizon/tasks/pull.yml
deleted file mode 100644
index 99eab899de..0000000000
--- a/ansible/roles/horizon/tasks/pull.yml
+++ /dev/null
@@ -1,7 +0,0 @@
----
-- name: Pulling horizon image
- kolla_docker:
- action: "pull_image"
- common_options: "{{ docker_common_options }}"
- image: "{{ horizon_image_full }}"
- when: inventory_hostname in groups['horizon']
diff --git a/ansible/roles/horizon/tasks/reconfigure.yml b/ansible/roles/horizon/tasks/reconfigure.yml
deleted file mode 100644
index e4ae77c07e..0000000000
--- a/ansible/roles/horizon/tasks/reconfigure.yml
+++ /dev/null
@@ -1,64 +0,0 @@
----
-- name: Ensuring the containers up
- kolla_docker:
- name: "{{ item.name }}"
- action: "get_container_state"
- register: container_state
- failed_when: container_state.Running == false
- when: inventory_hostname in groups[item.group]
- with_items:
- - { name: horizon, group: horizon }
-
-- include: config.yml
-
-- name: Check the configs
- command: docker exec {{ item.name }} /usr/local/bin/kolla_set_configs --check
- changed_when: false
- failed_when: false
- register: check_results
- when: inventory_hostname in groups[item.group]
- with_items:
- - { name: horizon, group: horizon }
-
-# NOTE(jeffrey4l): when config_strategy == 'COPY_ALWAYS'
-# and container env['KOLLA_CONFIG_STRATEGY'] == 'COPY_ONCE',
-# just remove the container and start again
-- name: Containers config strategy
- kolla_docker:
- name: "{{ item.name }}"
- action: "get_container_env"
- register: container_envs
- when: inventory_hostname in groups[item.group]
- with_items:
- - { name: horizon, group: horizon }
-
-- name: Remove the containers
- kolla_docker:
- name: "{{ item[0]['name'] }}"
- action: "remove_container"
- register: remove_containers
- when:
- - config_strategy == "COPY_ONCE" or item[1]['KOLLA_CONFIG_STRATEGY'] == 'COPY_ONCE'
- - item[2]['rc'] == 1
- - inventory_hostname in groups[item[0]['group']]
- with_together:
- - [{ name: horizon, group: horizon }]
- - "{{ container_envs.results }}"
- - "{{ check_results.results }}"
-
-- include: start.yml
- when: remove_containers.changed
-
-- name: Restart containers
- kolla_docker:
- name: "{{ item[0]['name'] }}"
- action: "restart_container"
- when:
- - config_strategy == 'COPY_ALWAYS'
- - item[1]['KOLLA_CONFIG_STRATEGY'] != 'COPY_ONCE'
- - item[2]['rc'] == 1
- - inventory_hostname in groups[item[0]['group']]
- with_together:
- - [{ name: horizon, group: horizon }]
- - "{{ container_envs.results }}"
- - "{{ check_results.results }}"
diff --git a/ansible/roles/horizon/tasks/register.yml b/ansible/roles/horizon/tasks/register.yml
deleted file mode 100644
index 0c78e9821f..0000000000
--- a/ansible/roles/horizon/tasks/register.yml
+++ /dev/null
@@ -1,13 +0,0 @@
----
-- name: Creating the _member_ role
- command: docker exec -t kolla_toolbox /usr/bin/ansible localhost
- -m os_keystone_role
- -a "name={{ horizon_openstack_keystone_default_role }}
- auth={{ '{{ openstack_horizon_auth }}' }}"
- -e "{'openstack_horizon_auth':{{ openstack_horizon_auth }}}"
- register: horizon_role
- changed_when: "{{ horizon_role.stdout.find('localhost | SUCCESS => ') != -1 and (horizon_role.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
- until: horizon_role.stdout.split()[2] == 'SUCCESS'
- retries: 10
- delay: 5
- run_once: True
diff --git a/ansible/roles/horizon/tasks/start.yml b/ansible/roles/horizon/tasks/start.yml
deleted file mode 100644
index 746cae5683..0000000000
--- a/ansible/roles/horizon/tasks/start.yml
+++ /dev/null
@@ -1,14 +0,0 @@
----
-- name: Starting horizon container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- image: "{{ horizon_image_full }}"
- name: "horizon"
- environment:
- ENABLE_NEUTRON_LBAAS: "{{ 'yes' if enable_neutron_lbaas | bool else 'no' }}"
- volumes:
- - "{{ node_config_directory }}/horizon/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "kolla_logs:/var/log/kolla/"
- when: inventory_hostname in groups['horizon']
diff --git a/ansible/roles/horizon/tasks/upgrade.yml b/ansible/roles/horizon/tasks/upgrade.yml
deleted file mode 100644
index 1f16915ad9..0000000000
--- a/ansible/roles/horizon/tasks/upgrade.yml
+++ /dev/null
@@ -1,4 +0,0 @@
----
-- include: config.yml
-
-- include: start.yml
diff --git a/ansible/roles/horizon/templates/horizon.conf.j2 b/ansible/roles/horizon/templates/horizon.conf.j2
deleted file mode 100644
index b69b390103..0000000000
--- a/ansible/roles/horizon/templates/horizon.conf.j2
+++ /dev/null
@@ -1,71 +0,0 @@
-{% set python_path = '/usr/lib/python2.7/site-packages' if kolla_install_type == 'binary' else '/var/lib/kolla/venv/lib/python2.7/site-packages' %}
-Listen {{ api_interface_address }}:80
-
-
- LogLevel warn
- ErrorLog /var/log/kolla/horizon/horizon.log
- CustomLog /var/log/kolla/horizon/horizon-access.log combined
-
- WSGIScriptReloading On
- WSGIDaemonProcess horizon-http processes=5 threads=1 user=horizon group=horizon display-name=%{GROUP} python-path={{ python_path }}
- WSGIProcessGroup horizon-http
- WSGIScriptAlias / {{ python_path }}/openstack_dashboard/wsgi/django.wsgi
- WSGIPassAuthorization On
-
-
- Require all granted
-
-
- Alias /static {{ python_path }}/static
-
- SetHandler None
-
-
-
-{% if kolla_enable_tls_external | bool %}
-Header edit Location ^http://(.*)$ https://$1
-{% else %}
-# NOTE(Jeffrey4l): Only enable deflate when tls is disabled until the
-# OSSN-0037 is fixed.
-# see https://wiki.openstack.org/wiki/OSSN/OSSN-0037 for more information.
-
- # Compress HTML, CSS, JavaScript, Text, XML and fonts
- AddOutputFilterByType DEFLATE application/javascript
- AddOutputFilterByType DEFLATE application/rss+xml
- AddOutputFilterByType DEFLATE application/vnd.ms-fontobject
- AddOutputFilterByType DEFLATE application/x-font
- AddOutputFilterByType DEFLATE application/x-font-opentype
- AddOutputFilterByType DEFLATE application/x-font-otf
- AddOutputFilterByType DEFLATE application/x-font-truetype
- AddOutputFilterByType DEFLATE application/x-font-ttf
- AddOutputFilterByType DEFLATE application/x-javascript
- AddOutputFilterByType DEFLATE application/xhtml+xml
- AddOutputFilterByType DEFLATE application/xml
- AddOutputFilterByType DEFLATE font/opentype
- AddOutputFilterByType DEFLATE font/otf
- AddOutputFilterByType DEFLATE font/ttf
- AddOutputFilterByType DEFLATE image/svg+xml
- AddOutputFilterByType DEFLATE image/x-icon
- AddOutputFilterByType DEFLATE text/css
- AddOutputFilterByType DEFLATE text/html
- AddOutputFilterByType DEFLATE text/javascript
- AddOutputFilterByType DEFLATE text/plain
- AddOutputFilterByType DEFLATE text/xml
-
-{% endif %}
-
-
-
- ExpiresActive on
- ExpiresDefault "access plus 1 month"
- ExpiresByType application/javascript "access plus 1 year"
- ExpiresByType text/css "access plus 1 year"
- ExpiresByType image/x-ico "access plus 1 year"
- ExpiresByType image/jpg "access plus 1 year"
- ExpiresByType image/jpeg "access plus 1 year"
- ExpiresByType image/gif "access plus 1 year"
- ExpiresByType image/png "access plus 1 year"
- Header merge Cache-Control public
- Header unset ETag
-
-
diff --git a/ansible/roles/horizon/templates/horizon.json.j2 b/ansible/roles/horizon/templates/horizon.json.j2
deleted file mode 100644
index b52554d48a..0000000000
--- a/ansible/roles/horizon/templates/horizon.json.j2
+++ /dev/null
@@ -1,20 +0,0 @@
-{% set apache_cmd = 'apache2' if kolla_base_distro in ['ubuntu', 'debian'] else 'httpd' %}
-{% set apache_dir = 'apache2/conf-enabled' if kolla_base_distro in ['ubuntu', 'debian'] else 'httpd/conf.d' %}
-{% set apache_file = '000-default.conf' if kolla_base_distro in ['ubuntu', 'debian'] else 'horizon.conf' %}
-{
- "command": "/usr/sbin/{{ apache_cmd }} -DFOREGROUND",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/horizon.conf",
- "dest": "/etc/{{ apache_dir }}/{{ apache_file }}",
- "owner": "horizon",
- "perm": "0644"
- },
- {
- "source": "{{ container_config_directory }}/local_settings",
- "dest": "/etc/openstack-dashboard/local_settings",
- "owner": "horizon",
- "perm": "0644"
- }
- ]
-}
diff --git a/ansible/roles/horizon/templates/local_settings.j2 b/ansible/roles/horizon/templates/local_settings.j2
deleted file mode 100644
index 733e2ab426..0000000000
--- a/ansible/roles/horizon/templates/local_settings.j2
+++ /dev/null
@@ -1,818 +0,0 @@
-# -*- coding: utf-8 -*-
-
-import os
-
-from django.utils.translation import ugettext_lazy as _
-
-from openstack_dashboard import exceptions
-from openstack_dashboard.settings import HORIZON_CONFIG
-
-DEBUG = {{ horizon_logging_debug }}
-TEMPLATE_DEBUG = DEBUG
-
-COMPRESS_OFFLINE = True
-
-# WEBROOT is the location relative to Webserver root
-# should end with a slash.
-WEBROOT = '/'
-#LOGIN_URL = WEBROOT + 'auth/login/'
-#LOGOUT_URL = WEBROOT + 'auth/logout/'
-#
-# LOGIN_REDIRECT_URL can be used as an alternative for
-# HORIZON_CONFIG.user_home, if user_home is not set.
-# Do not set it to '/home/', as this will cause circular redirect loop
-#LOGIN_REDIRECT_URL = WEBROOT
-
-# If horizon is running in production (DEBUG is False), set this
-# with the list of host/domain names that the application can serve.
-# For more information see:
-# https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
-ALLOWED_HOSTS = ['*']
-
-{% if horizon_backend_database | bool %}
-SESSION_ENGINE = 'django.contrib.sessions.backends.db'
-DATABASES = {
- 'default': {
- 'ENGINE': 'django.db.backends.mysql',
- 'NAME': '{{ horizon_database_name }}',
- 'USER': '{{ horizon_database_user }}',
- 'PASSWORD': '{{ horizon_database_password }}',
- 'HOST': '{{ database_address }}',
- 'PORT': '{{ database_port }}'
- }
-}
-{% endif %}
-
-# Set SSL proxy settings:
-# Pass this header from the proxy after terminating the SSL,
-# and don't forget to strip it from the client's request.
-# For more information see:
-# https://docs.djangoproject.com/en/dev/ref/settings/#secure-proxy-ssl-header
-#SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
-
-# If Horizon is being served through SSL, then uncomment the following two
-# settings to better secure the cookies from security exploits
-#CSRF_COOKIE_SECURE = True
-#SESSION_COOKIE_SECURE = True
-
-{% if kolla_enable_tls_external | bool %}
-SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
-CSRF_COOKIE_SECURE = True
-SESSION_COOKIE_SECURE = True
-{% endif %}
-
-# The absolute path to the directory where message files are collected.
-# The message file must have a .json file extension. When the user logins to
-# horizon, the message files collected are processed and displayed to the user.
-#MESSAGES_PATH=None
-
-# Overrides for OpenStack API versions. Use this setting to force the
-# OpenStack dashboard to use a specific API version for a given service API.
-# Versions specified here should be integers or floats, not strings.
-# NOTE: The version should be formatted as it appears in the URL for the
-# service API. For example, The identity service APIs have inconsistent
-# use of the decimal point, so valid options would be 2.0 or 3.
-# Minimum compute version to get the instance locked status is 2.9.
-#OPENSTACK_API_VERSIONS = {
-# "data-processing": 1.1,
-# "identity": 3,
-# "volume": 2,
-# "compute": 2,
-#}
-
-OPENSTACK_API_VERSIONS = {
- "identity": 3,
-}
-
-# Set this to True if running on a multi-domain model. When this is enabled, it
-# will require the user to enter the Domain name in addition to the username
-# for login.
-#OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = False
-
-# Overrides the default domain used when running on single-domain model
-# with Keystone V3. All entities will be created in the default domain.
-# NOTE: This value must be the ID of the default domain, NOT the name.
-# Also, you will most likely have a value in the keystone policy file like this
-# "cloud_admin": "rule:admin_required and domain_id:"
-# This value must match the domain id specified there.
-#OPENSTACK_KEYSTONE_DEFAULT_DOMAIN = 'default'
-
-# Set this to True to enable panels that provide the ability for users to
-# manage Identity Providers (IdPs) and establish a set of rules to map
-# federation protocol attributes to Identity API attributes.
-# This extension requires v3.0+ of the Identity API.
-#OPENSTACK_KEYSTONE_FEDERATION_MANAGEMENT = False
-
-# Set Console type:
-# valid options are "AUTO"(default), "VNC", "SPICE", "RDP", "SERIAL" or None
-# Set to None explicitly if you want to deactivate the console.
-#CONSOLE_TYPE = "AUTO"
-
-# If provided, a "Report Bug" link will be displayed in the site header
-# which links to the value of this setting (ideally a URL containing
-# information on how to report issues).
-#HORIZON_CONFIG["bug_url"] = "http://bug-report.example.com"
-
-# Show backdrop element outside the modal, do not close the modal
-# after clicking on backdrop.
-#HORIZON_CONFIG["modal_backdrop"] = "static"
-
-# Specify a regular expression to validate user passwords.
-#HORIZON_CONFIG["password_validator"] = {
-# "regex": '.*',
-# "help_text": _("Your password does not meet the requirements."),
-#}
-
-# Disable simplified floating IP address management for deployments with
-# multiple floating IP pools or complex network requirements.
-#HORIZON_CONFIG["simple_ip_management"] = False
-
-# Turn off browser autocompletion for forms including the login form and
-# the database creation workflow if so desired.
-#HORIZON_CONFIG["password_autocomplete"] = "off"
-
-# Setting this to True will disable the reveal button for password fields,
-# including on the login form.
-#HORIZON_CONFIG["disable_password_reveal"] = False
-
-LOCAL_PATH = '/tmp'
-
-# Set custom secret key:
-# You can either set it to a specific value or you can let horizon generate a
-# default secret key that is unique on this machine, e.i. regardless of the
-# amount of Python WSGI workers (if used behind Apache+mod_wsgi): However,
-# there may be situations where you would want to set this explicitly, e.g.
-# when multiple dashboard instances are distributed on different machines
-# (usually behind a load-balancer). Either you have to make sure that a session
-# gets all requests routed to the same dashboard instance or you set the same
-# SECRET_KEY for all of them.
-SECRET_KEY='{{ horizon_secret_key }}'
-
-# We recommend you use memcached for development; otherwise after every reload
-# of the django development server, you will have to login again. To use
-# memcached set CACHES to something like
-#CACHES = {
-# 'default': {
-# 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
-# 'LOCATION': '127.0.0.1:11211',
-# },
-#}
-
-{% if groups['memcached'] | length > 0 and horizon_backend_database | bool == False %}
-SESSION_ENGINE = 'django.contrib.sessions.backends.cache'
-CACHES = {
- 'default': {
- 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
-
-{% if orchestration_engine == 'KUBERNETES' %}
- 'LOCATION': '{{ memcached_servers }}'
-{% else %}
- 'LOCATION': [{% for host in groups['memcached'] %}'{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ memcached_port }}'{% if not loop.last %},{% endif %}{% endfor %}]
-{%- endif %}
- }
-}
-{% endif %}
-
-# Send email to the console by default
-EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
-# Or send them to /dev/null
-#EMAIL_BACKEND = 'django.core.mail.backends.dummy.EmailBackend'
-
-# Configure these for your outgoing email host
-#EMAIL_HOST = 'smtp.my-company.com'
-#EMAIL_PORT = 25
-#EMAIL_HOST_USER = 'djangomail'
-#EMAIL_HOST_PASSWORD = 'top-secret!'
-
-# For multiple regions uncomment this configuration, and add (endpoint, title).
-#AVAILABLE_REGIONS = [
-# ('http://cluster1.example.com:5000/v2.0', 'cluster1'),
-# ('http://cluster2.example.com:5000/v2.0', 'cluster2'),
-#]
-
-OPENSTACK_HOST = "{% if orchestration_engine == 'KUBERNETES' %}{{ api_interface_address }}{% else %}{{ kolla_internal_fqdn }}{% endif %}"
-
-OPENSTACK_KEYSTONE_URL = "{{ keystone_internal_url }}"
-OPENSTACK_KEYSTONE_DEFAULT_ROLE = "{{ horizon_openstack_keystone_default_role }}"
-
-# Enables keystone web single-sign-on if set to True.
-#WEBSSO_ENABLED = False
-
-# Determines which authentication choice to show as default.
-#WEBSSO_INITIAL_CHOICE = "credentials"
-
-# The list of authentication mechanisms which include keystone
-# federation protocols and identity provider/federation protocol
-# mapping keys (WEBSSO_IDP_MAPPING). Current supported protocol
-# IDs are 'saml2' and 'oidc' which represent SAML 2.0, OpenID
-# Connect respectively.
-# Do not remove the mandatory credentials mechanism.
-# Note: The last two tuples are sample mapping keys to a identity provider
-# and federation protocol combination (WEBSSO_IDP_MAPPING).
-#WEBSSO_CHOICES = (
-# ("credentials", _("Keystone Credentials")),
-# ("oidc", _("OpenID Connect")),
-# ("saml2", _("Security Assertion Markup Language")),
-# ("acme_oidc", "ACME - OpenID Connect"),
-# ("acme_saml2", "ACME - SAML2"),
-#)
-
-# A dictionary of specific identity provider and federation protocol
-# combinations. From the selected authentication mechanism, the value
-# will be looked up as keys in the dictionary. If a match is found,
-# it will redirect the user to a identity provider and federation protocol
-# specific WebSSO endpoint in keystone, otherwise it will use the value
-# as the protocol_id when redirecting to the WebSSO by protocol endpoint.
-# NOTE: The value is expected to be a tuple formatted as: (, ).
-#WEBSSO_IDP_MAPPING = {
-# "acme_oidc": ("acme", "oidc"),
-# "acme_saml2": ("acme", "saml2"),
-#}
-
-# Disable SSL certificate checks (useful for self-signed certificates):
-#OPENSTACK_SSL_NO_VERIFY = True
-
-# The CA certificate to use to verify SSL connections
-#OPENSTACK_SSL_CACERT = '/path/to/cacert.pem'
-
-# The OPENSTACK_KEYSTONE_BACKEND settings can be used to identify the
-# capabilities of the auth backend for Keystone.
-# If Keystone has been configured to use LDAP as the auth backend then set
-# can_edit_user to False and name to 'ldap'.
-#
-# TODO(tres): Remove these once Keystone has an API to identify auth backend.
-OPENSTACK_KEYSTONE_BACKEND = {
- 'name': 'native',
- 'can_edit_user': True,
- 'can_edit_group': True,
- 'can_edit_project': True,
- 'can_edit_domain': True,
- 'can_edit_role': True,
-}
-
-# Setting this to True, will add a new "Retrieve Password" action on instance,
-# allowing Admin session password retrieval/decryption.
-#OPENSTACK_ENABLE_PASSWORD_RETRIEVE = False
-
-# The Launch Instance user experience has been significantly enhanced.
-# You can choose whether to enable the new launch instance experience,
-# the legacy experience, or both. The legacy experience will be removed
-# in a future release, but is available as a temporary backup setting to ensure
-# compatibility with existing deployments. Further development will not be
-# done on the legacy experience. Please report any problems with the new
-# experience via the Launchpad tracking system.
-#
-# Toggle LAUNCH_INSTANCE_LEGACY_ENABLED and LAUNCH_INSTANCE_NG_ENABLED to
-# determine the experience to enable. Set them both to true to enable
-# both.
-#LAUNCH_INSTANCE_LEGACY_ENABLED = True
-#LAUNCH_INSTANCE_NG_ENABLED = False
-
-# A dictionary of settings which can be used to provide the default values for
-# properties found in the Launch Instance modal.
-#LAUNCH_INSTANCE_DEFAULTS = {
-# 'config_drive': False,
-# 'enable_scheduler_hints': True
-#}
-
-# The Xen Hypervisor has the ability to set the mount point for volumes
-# attached to instances (other Hypervisors currently do not). Setting
-# can_set_mount_point to True will add the option to set the mount point
-# from the UI.
-OPENSTACK_HYPERVISOR_FEATURES = {
- 'can_set_mount_point': False,
- 'can_set_password': False,
- 'requires_keypair': False,
- 'enable_quotas': True
-}
-
-# The OPENSTACK_CINDER_FEATURES settings can be used to enable optional
-# services provided by cinder that is not exposed by its extension API.
-OPENSTACK_CINDER_FEATURES = {
- 'enable_backup': False,
-}
-
-# The OPENSTACK_NEUTRON_NETWORK settings can be used to enable optional
-# services provided by neutron. Options currently available are load
-# balancer service, security groups, quotas, VPN service.
-OPENSTACK_NEUTRON_NETWORK = {
- 'enable_router': True,
- 'enable_quotas': True,
- 'enable_ipv6': True,
- 'enable_distributed_router': False,
- 'enable_ha_router': False,
- 'enable_lb': True,
- 'enable_firewall': True,
- 'enable_vpn': True,
- 'enable_fip_topology_check': True,
-
- # Default dns servers you would like to use when a subnet is
- # created. This is only a default, users can still choose a different
- # list of dns servers when creating a new subnet.
- # The entries below are examples only, and are not appropriate for
- # real deployments
- # 'default_dns_nameservers': ["8.8.8.8", "8.8.4.4", "208.67.222.222"],
-
- # The profile_support option is used to detect if an external router can be
- # configured via the dashboard. When using specific plugins the
- # profile_support can be turned on if needed.
- 'profile_support': None,
- #'profile_support': 'cisco',
-
- # Set which provider network types are supported. Only the network types
- # in this list will be available to choose from when creating a network.
- # Network types include local, flat, vlan, gre, vxlan and geneve.
- # 'supported_provider_types': ['*'],
-
- # You can configure available segmentation ID range per network type
- # in your deployment.
- # 'segmentation_id_range': {
- # 'vlan': [1024, 2048],
- # 'vxlan': [4094, 65536],
- # },
-
- # You can define additional provider network types here.
- # 'extra_provider_types': {
- # 'awesome_type': {
- # 'display_name': 'Awesome New Type',
- # 'require_physical_network': False,
- # 'require_segmentation_id': True,
- # }
- # },
-
- # Set which VNIC types are supported for port binding. Only the VNIC
- # types in this list will be available to choose from when creating a
- # port.
- # VNIC types include 'normal', 'macvtap' and 'direct'.
- # Set to empty list or None to disable VNIC type selection.
- 'supported_vnic_types': ['*'],
-}
-
-# The OPENSTACK_HEAT_STACK settings can be used to disable password
-# field required while launching the stack.
-OPENSTACK_HEAT_STACK = {
- 'enable_user_pass': True,
-}
-
-# The OPENSTACK_IMAGE_BACKEND settings can be used to customize features
-# in the OpenStack Dashboard related to the Image service, such as the list
-# of supported image formats.
-#OPENSTACK_IMAGE_BACKEND = {
-# 'image_formats': [
-# ('', _('Select format')),
-# ('aki', _('AKI - Amazon Kernel Image')),
-# ('ami', _('AMI - Amazon Machine Image')),
-# ('ari', _('ARI - Amazon Ramdisk Image')),
-# ('docker', _('Docker')),
-# ('iso', _('ISO - Optical Disk Image')),
-# ('ova', _('OVA - Open Virtual Appliance')),
-# ('qcow2', _('QCOW2 - QEMU Emulator')),
-# ('raw', _('Raw')),
-# ('vdi', _('VDI - Virtual Disk Image')),
-# ('vhd', _('VHD - Virtual Hard Disk')),
-# ('vmdk', _('VMDK - Virtual Machine Disk')),
-# ],
-#}
-
-# The IMAGE_CUSTOM_PROPERTY_TITLES settings is used to customize the titles for
-# image custom property attributes that appear on image detail pages.
-IMAGE_CUSTOM_PROPERTY_TITLES = {
- "architecture": _("Architecture"),
- "kernel_id": _("Kernel ID"),
- "ramdisk_id": _("Ramdisk ID"),
- "image_state": _("Euca2ools state"),
- "project_id": _("Project ID"),
- "image_type": _("Image Type"),
-}
-
-# The IMAGE_RESERVED_CUSTOM_PROPERTIES setting is used to specify which image
-# custom properties should not be displayed in the Image Custom Properties
-# table.
-IMAGE_RESERVED_CUSTOM_PROPERTIES = []
-
-# Set to 'legacy' or 'direct' to allow users to upload images to glance via
-# Horizon server. When enabled, a file form field will appear on the create
-# image form. If set to 'off', there will be no file form field on the create
-# image form. See documentation for deployment considerations.
-#HORIZON_IMAGES_UPLOAD_MODE = 'legacy'
-
-# OPENSTACK_ENDPOINT_TYPE specifies the endpoint type to use for the endpoints
-# in the Keystone service catalog. Use this setting when Horizon is running
-# external to the OpenStack environment. The default is 'publicURL'.
-OPENSTACK_ENDPOINT_TYPE = "internalURL"
-
-# SECONDARY_ENDPOINT_TYPE specifies the fallback endpoint type to use in the
-# case that OPENSTACK_ENDPOINT_TYPE is not present in the endpoints
-# in the Keystone service catalog. Use this setting when Horizon is running
-# external to the OpenStack environment. The default is None. This
-# value should differ from OPENSTACK_ENDPOINT_TYPE if used.
-#SECONDARY_ENDPOINT_TYPE = None
-
-# The number of objects (Swift containers/objects or images) to display
-# on a single page before providing a paging element (a "more" link)
-# to paginate results.
-API_RESULT_LIMIT = 1000
-API_RESULT_PAGE_SIZE = 20
-
-# The size of chunk in bytes for downloading objects from Swift
-SWIFT_FILE_TRANSFER_CHUNK_SIZE = 512 * 1024
-
-# Specify a maximum number of items to display in a dropdown.
-DROPDOWN_MAX_ITEMS = 30
-
-# The timezone of the server. This should correspond with the timezone
-# of your entire OpenStack installation, and hopefully be in UTC.
-TIME_ZONE = "UTC"
-
-# When launching an instance, the menu of available flavors is
-# sorted by RAM usage, ascending. If you would like a different sort order,
-# you can provide another flavor attribute as sorting key. Alternatively, you
-# can provide a custom callback method to use for sorting. You can also provide
-# a flag for reverse sort. For more info, see
-# http://docs.python.org/2/library/functions.html#sorted
-#CREATE_INSTANCE_FLAVOR_SORT = {
-# 'key': 'name',
-# # or
-# 'key': my_awesome_callback_method,
-# 'reverse': False,
-#}
-
-# Set this to True to display an 'Admin Password' field on the Change Password
-# form to verify that it is indeed the admin logged-in who wants to change
-# the password.
-#ENFORCE_PASSWORD_CHECK = False
-
-# Modules that provide /auth routes that can be used to handle different types
-# of user authentication. Add auth plugins that require extra route handling to
-# this list.
-#AUTHENTICATION_URLS = [
-# 'openstack_auth.urls',
-#]
-
-# The Horizon Policy Enforcement engine uses these values to load per service
-# policy rule files. The content of these files should match the files the
-# OpenStack services are using to determine role based access control in the
-# target installation.
-
-# Path to directory containing policy.json files
-POLICY_FILES_PATH = '/etc/openstack-dashboard'
-
-# Map of local copy of service policy files.
-# Please insure that your identity policy file matches the one being used on
-# your keystone servers. There is an alternate policy file that may be used
-# in the Keystone v3 multi-domain case, policy.v3cloudsample.json.
-# This file is not included in the Horizon repository by default but can be
-# found at
-# http://git.openstack.org/cgit/openstack/keystone/tree/etc/ \
-# policy.v3cloudsample.json
-# Having matching policy files on the Horizon and Keystone servers is essential
-# for normal operation. This holds true for all services and their policy files.
-#POLICY_FILES = {
-# 'identity': 'keystone_policy.json',
-# 'compute': 'nova_policy.json',
-# 'volume': 'cinder_policy.json',
-# 'image': 'glance_policy.json',
-# 'orchestration': 'heat_policy.json',
-# 'network': 'neutron_policy.json',
-# 'telemetry': 'ceilometer_policy.json',
-#}
-
-# TODO: (david-lyle) remove when plugins support adding settings.
-# Note: Only used when trove-dashboard plugin is configured to be used by
-# Horizon.
-# Trove user and database extension support. By default support for
-# creating users and databases on database instances is turned on.
-# To disable these extensions set the permission here to something
-# unusable such as ["!"].
-#TROVE_ADD_USER_PERMS = []
-#TROVE_ADD_DATABASE_PERMS = []
-
-# Change this patch to the appropriate list of tuples containing
-# a key, label and static directory containing two files:
-# _variables.scss and _styles.scss
-#AVAILABLE_THEMES = [
-# ('default', 'Default', 'themes/default'),
-# ('material', 'Material', 'themes/material'),
-#]
-
-LOGGING = {
- 'version': 1,
- # When set to True this will disable all logging except
- # for loggers specified in this configuration dictionary. Note that
- # if nothing is specified here and disable_existing_loggers is True,
- # django.db.backends will still log unless it is disabled explicitly.
- 'disable_existing_loggers': False,
- 'formatters': {
- 'operation': {
- # The format of "%(message)s" is defined by
- # OPERATION_LOG_OPTIONS['format']
- 'format': '%(asctime)s %(message)s'
- },
- },
- 'handlers': {
- 'null': {
- 'level': 'DEBUG',
- 'class': 'logging.NullHandler',
- },
- 'console': {
- # Set the level to "DEBUG" for verbose output logging.
- 'level': 'INFO',
- 'class': 'logging.StreamHandler',
- },
- 'operation': {
- 'level': 'INFO',
- 'class': 'logging.StreamHandler',
- 'formatter': 'operation',
- },
- },
- 'loggers': {
- # Logging from django.db.backends is VERY verbose, send to null
- # by default.
- 'django.db.backends': {
- 'handlers': ['null'],
- 'propagate': False,
- },
- 'requests': {
- 'handlers': ['null'],
- 'propagate': False,
- },
- 'horizon': {
- 'handlers': ['console'],
- 'level': 'DEBUG',
- 'propagate': False,
- },
- 'horizon.operation_log': {
- 'handlers': ['operation'],
- 'level': 'INFO',
- 'propagate': False,
- },
- 'openstack_dashboard': {
- 'handlers': ['console'],
- 'level': 'DEBUG',
- 'propagate': False,
- },
- 'novaclient': {
- 'handlers': ['console'],
- 'level': 'DEBUG',
- 'propagate': False,
- },
- 'cinderclient': {
- 'handlers': ['console'],
- 'level': 'DEBUG',
- 'propagate': False,
- },
- 'keystoneclient': {
- 'handlers': ['console'],
- 'level': 'DEBUG',
- 'propagate': False,
- },
- 'glanceclient': {
- 'handlers': ['console'],
- 'level': 'DEBUG',
- 'propagate': False,
- },
- 'neutronclient': {
- 'handlers': ['console'],
- 'level': 'DEBUG',
- 'propagate': False,
- },
- 'heatclient': {
- 'handlers': ['console'],
- 'level': 'DEBUG',
- 'propagate': False,
- },
- 'ceilometerclient': {
- 'handlers': ['console'],
- 'level': 'DEBUG',
- 'propagate': False,
- },
- 'swiftclient': {
- 'handlers': ['console'],
- 'level': 'DEBUG',
- 'propagate': False,
- },
- 'openstack_auth': {
- 'handlers': ['console'],
- 'level': 'DEBUG',
- 'propagate': False,
- },
- 'nose.plugins.manager': {
- 'handlers': ['console'],
- 'level': 'DEBUG',
- 'propagate': False,
- },
- 'django': {
- 'handlers': ['console'],
- 'level': 'DEBUG',
- 'propagate': False,
- },
- 'iso8601': {
- 'handlers': ['null'],
- 'propagate': False,
- },
- 'scss': {
- 'handlers': ['null'],
- 'propagate': False,
- },
- },
-}
-
-# 'direction' should not be specified for all_tcp/udp/icmp.
-# It is specified in the form.
-SECURITY_GROUP_RULES = {
- 'all_tcp': {
- 'name': _('All TCP'),
- 'ip_protocol': 'tcp',
- 'from_port': '1',
- 'to_port': '65535',
- },
- 'all_udp': {
- 'name': _('All UDP'),
- 'ip_protocol': 'udp',
- 'from_port': '1',
- 'to_port': '65535',
- },
- 'all_icmp': {
- 'name': _('All ICMP'),
- 'ip_protocol': 'icmp',
- 'from_port': '-1',
- 'to_port': '-1',
- },
- 'ssh': {
- 'name': 'SSH',
- 'ip_protocol': 'tcp',
- 'from_port': '22',
- 'to_port': '22',
- },
- 'smtp': {
- 'name': 'SMTP',
- 'ip_protocol': 'tcp',
- 'from_port': '25',
- 'to_port': '25',
- },
- 'dns': {
- 'name': 'DNS',
- 'ip_protocol': 'tcp',
- 'from_port': '53',
- 'to_port': '53',
- },
- 'http': {
- 'name': 'HTTP',
- 'ip_protocol': 'tcp',
- 'from_port': '80',
- 'to_port': '80',
- },
- 'pop3': {
- 'name': 'POP3',
- 'ip_protocol': 'tcp',
- 'from_port': '110',
- 'to_port': '110',
- },
- 'imap': {
- 'name': 'IMAP',
- 'ip_protocol': 'tcp',
- 'from_port': '143',
- 'to_port': '143',
- },
- 'ldap': {
- 'name': 'LDAP',
- 'ip_protocol': 'tcp',
- 'from_port': '389',
- 'to_port': '389',
- },
- 'https': {
- 'name': 'HTTPS',
- 'ip_protocol': 'tcp',
- 'from_port': '443',
- 'to_port': '443',
- },
- 'smtps': {
- 'name': 'SMTPS',
- 'ip_protocol': 'tcp',
- 'from_port': '465',
- 'to_port': '465',
- },
- 'imaps': {
- 'name': 'IMAPS',
- 'ip_protocol': 'tcp',
- 'from_port': '993',
- 'to_port': '993',
- },
- 'pop3s': {
- 'name': 'POP3S',
- 'ip_protocol': 'tcp',
- 'from_port': '995',
- 'to_port': '995',
- },
- 'ms_sql': {
- 'name': 'MS SQL',
- 'ip_protocol': 'tcp',
- 'from_port': '1433',
- 'to_port': '1433',
- },
- 'mysql': {
- 'name': 'MYSQL',
- 'ip_protocol': 'tcp',
- 'from_port': '3306',
- 'to_port': '3306',
- },
- 'rdp': {
- 'name': 'RDP',
- 'ip_protocol': 'tcp',
- 'from_port': '3389',
- 'to_port': '3389',
- },
-}
-
-# Deprecation Notice:
-#
-# The setting FLAVOR_EXTRA_KEYS has been deprecated.
-# Please load extra spec metadata into the Glance Metadata Definition Catalog.
-#
-# The sample quota definitions can be found in:
-# /etc/metadefs/compute-quota.json
-#
-# The metadata definition catalog supports CLI and API:
-# $glance --os-image-api-version 2 help md-namespace-import
-# $glance-manage db_load_metadefs
-#
-# See Metadata Definitions on: http://docs.openstack.org/developer/glance/
-
-# TODO: (david-lyle) remove when plugins support settings natively
-# Note: This is only used when the Sahara plugin is configured and enabled
-# for use in Horizon.
-# Indicate to the Sahara data processing service whether or not
-# automatic floating IP allocation is in effect. If it is not
-# in effect, the user will be prompted to choose a floating IP
-# pool for use in their cluster. False by default. You would want
-# to set this to True if you were running Nova Networking with
-# auto_assign_floating_ip = True.
-#SAHARA_AUTO_IP_ALLOCATION_ENABLED = False
-
-# The hash algorithm to use for authentication tokens. This must
-# match the hash algorithm that the identity server and the
-# auth_token middleware are using. Allowed values are the
-# algorithms supported by Python's hashlib library.
-#OPENSTACK_TOKEN_HASH_ALGORITHM = 'md5'
-
-# AngularJS requires some settings to be made available to
-# the client side. Some settings are required by in-tree / built-in horizon
-# features. These settings must be added to REST_API_REQUIRED_SETTINGS in the
-# form of ['SETTING_1','SETTING_2'], etc.
-#
-# You may remove settings from this list for security purposes, but do so at
-# the risk of breaking a built-in horizon feature. These settings are required
-# for horizon to function properly. Only remove them if you know what you
-# are doing. These settings may in the future be moved to be defined within
-# the enabled panel configuration.
-# You should not add settings to this list for out of tree extensions.
-# See: https://wiki.openstack.org/wiki/Horizon/RESTAPI
-REST_API_REQUIRED_SETTINGS = ['OPENSTACK_HYPERVISOR_FEATURES',
- 'LAUNCH_INSTANCE_DEFAULTS',
- 'OPENSTACK_IMAGE_FORMATS']
-
-# Additional settings can be made available to the client side for
-# extensibility by specifying them in REST_API_ADDITIONAL_SETTINGS
-# !! Please use extreme caution as the settings are transferred via HTTP/S
-# and are not encrypted on the browser. This is an experimental API and
-# may be deprecated in the future without notice.
-#REST_API_ADDITIONAL_SETTINGS = []
-
-# DISALLOW_IFRAME_EMBED can be used to prevent Horizon from being embedded
-# within an iframe. Legacy browsers are still vulnerable to a Cross-Frame
-# Scripting (XFS) vulnerability, so this option allows extra security hardening
-# where iframes are not used in deployment. Default setting is True.
-# For more information see:
-# http://tinyurl.com/anticlickjack
-#DISALLOW_IFRAME_EMBED = True
-
-# Help URL can be made available for the client. To provide a help URL, edit the
-# following attribute to the URL of your choice.
-#HORIZON_CONFIG["help_url"] = "http://openstack.mycompany.org"
-
-# Settings for OperationLogMiddleware
-# OPERATION_LOG_ENABLED is flag to use the function to log an operation on
-# Horizon.
-# mask_targets is arrangement for appointing a target to mask.
-# method_targets is arrangement of HTTP method to output log.
-# format is the log contents.
-#OPERATION_LOG_ENABLED = False
-#OPERATION_LOG_OPTIONS = {
-# 'mask_fields': ['password'],
-# 'target_methods': ['POST'],
-# 'format': ("[%(domain_name)s] [%(domain_id)s] [%(project_name)s]"
-# " [%(project_id)s] [%(user_name)s] [%(user_id)s] [%(request_scheme)s]"
-# " [%(referer_url)s] [%(request_url)s] [%(message)s] [%(method)s]"
-# " [%(http_status)s] [%(param)s]"),
-#}
-
-# The default date range in the Overview panel meters - either minus N
-# days (if the value is integer N), or from the beginning of the current month
-# until today (if set to None). This setting should be used to limit the amount
-# of data fetched by default when rendering the Overview panel.
-#OVERVIEW_DAYS_RANGE = 1
-
-# To allow operators to require admin users provide a search criteria first
-# before loading any data into the admin views, set the following attribute to
-# True
-#ADMIN_FILTER_DATA_FIRST=False
diff --git a/ansible/roles/influxdb/defaults/main.yml b/ansible/roles/influxdb/defaults/main.yml
deleted file mode 100644
index 4633af3e2b..0000000000
--- a/ansible/roles/influxdb/defaults/main.yml
+++ /dev/null
@@ -1,9 +0,0 @@
----
-project_name: "influxdb"
-
-####################
-# Docker
-####################
-influxdb_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-influxdb"
-influxdb_tag: "{{ openstack_release }}"
-influxdb_image_full: "{{ influxdb_image }}:{{ influxdb_tag }}"
diff --git a/ansible/roles/influxdb/meta/main.yml b/ansible/roles/influxdb/meta/main.yml
deleted file mode 100644
index 6b4fff8fef..0000000000
--- a/ansible/roles/influxdb/meta/main.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-dependencies:
- - { role: common }
diff --git a/ansible/roles/influxdb/tasks/config.yml b/ansible/roles/influxdb/tasks/config.yml
deleted file mode 100644
index f9b7544f89..0000000000
--- a/ansible/roles/influxdb/tasks/config.yml
+++ /dev/null
@@ -1,23 +0,0 @@
----
-- name: Ensuring config directories exist
- file:
- path: "{{ node_config_directory }}/influxdb"
- state: "directory"
- recurse: yes
- when: inventory_hostname in groups['influxdb']
-
-- name: Copying over config.json files
- template:
- src: "{{ item }}.json.j2"
- dest: "{{ node_config_directory }}/influxdb/config.json"
- when: inventory_hostname in groups['influxdb']
- with_items:
- - influxdb
-
-- name: Copying over influxdb config file
- template:
- src: "{{ role_path }}/templates/{{ item }}.conf.j2"
- dest: "{{ node_config_directory }}/influxdb/influxdb.conf"
- when: inventory_hostname in groups['influxdb']
- with_items:
- - influxdb
diff --git a/ansible/roles/influxdb/tasks/deploy.yml b/ansible/roles/influxdb/tasks/deploy.yml
deleted file mode 100644
index 1f16915ad9..0000000000
--- a/ansible/roles/influxdb/tasks/deploy.yml
+++ /dev/null
@@ -1,4 +0,0 @@
----
-- include: config.yml
-
-- include: start.yml
diff --git a/ansible/roles/influxdb/tasks/main.yml b/ansible/roles/influxdb/tasks/main.yml
deleted file mode 100644
index b017e8b4ad..0000000000
--- a/ansible/roles/influxdb/tasks/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-- include: "{{ action }}.yml"
diff --git a/ansible/roles/influxdb/tasks/precheck.yml b/ansible/roles/influxdb/tasks/precheck.yml
deleted file mode 100644
index ed97d539c0..0000000000
--- a/ansible/roles/influxdb/tasks/precheck.yml
+++ /dev/null
@@ -1 +0,0 @@
----
diff --git a/ansible/roles/influxdb/tasks/pull.yml b/ansible/roles/influxdb/tasks/pull.yml
deleted file mode 100644
index c611eebd4e..0000000000
--- a/ansible/roles/influxdb/tasks/pull.yml
+++ /dev/null
@@ -1,7 +0,0 @@
----
-- name: Pulling influxdb image
- kolla_docker:
- action: "pull_image"
- common_options: "{{ docker_common_options }}"
- image: "{{ influxdb_image_full }}"
- when: inventory_hostname in groups['influxdb']
diff --git a/ansible/roles/influxdb/tasks/reconfigure.yml b/ansible/roles/influxdb/tasks/reconfigure.yml
deleted file mode 100644
index 6049df4746..0000000000
--- a/ansible/roles/influxdb/tasks/reconfigure.yml
+++ /dev/null
@@ -1,47 +0,0 @@
----
-- name: Ensuring the containers up
- kolla_docker:
- name: "influxdb"
- action: "get_container_state"
- register: container_state
- failed_when: container_state.Running == false
- when: inventory_hostname in groups['influxdb']
-
-- include: config.yml
-
-- name: Check the configs
- command: docker exec influxdb /usr/local/bin/kolla_set_configs --check
- changed_when: false
- failed_when: false
- register: check_results
- when: inventory_hostname in groups['influxdb']
-
-# NOTE(jeffrey4l): when config_strategy == 'COPY_ALWAYS'
-# and container env['KOLLA_CONFIG_STRATEGY'] == 'COPY_ONCE',
-# just remove the container and start again
-- name: Containers config strategy
- kolla_docker:
- name: "influxdb"
- action: "get_container_env"
- register: container_envs
- when: inventory_hostname in groups['influxdb']
-
-- name: Remove the containers
- kolla_docker:
- name: "influxdb"
- action: "remove_container"
- register: remove_containers
- when:
- - config_strategy == "COPY_ONCE"
- - inventory_hostname in groups['influxdb']
-
-- include: start.yml
- when: remove_containers.changed
-
-- name: Restart containers
- kolla_docker:
- name: "influxdb"
- action: "restart_container"
- when:
- - config_strategy == 'COPY_ALWAYS'
- - inventory_hostname in groups['influxdb']
diff --git a/ansible/roles/influxdb/tasks/start.yml b/ansible/roles/influxdb/tasks/start.yml
deleted file mode 100644
index 1a98e71234..0000000000
--- a/ansible/roles/influxdb/tasks/start.yml
+++ /dev/null
@@ -1,13 +0,0 @@
----
-- name: Starting influxdb container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- image: "{{ influxdb_image_full }}"
- name: "influxdb"
- volumes:
- - "{{ node_config_directory }}/influxdb/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "influxdb:/var/lib/influxdb"
- - "kolla_logs:/var/log/kolla/"
- when: inventory_hostname in groups['influxdb']
diff --git a/ansible/roles/influxdb/tasks/upgrade.yml b/ansible/roles/influxdb/tasks/upgrade.yml
deleted file mode 100644
index 1f16915ad9..0000000000
--- a/ansible/roles/influxdb/tasks/upgrade.yml
+++ /dev/null
@@ -1,4 +0,0 @@
----
-- include: config.yml
-
-- include: start.yml
diff --git a/ansible/roles/influxdb/templates/influxdb.conf.j2 b/ansible/roles/influxdb/templates/influxdb.conf.j2
deleted file mode 100644
index cded6fbe68..0000000000
--- a/ansible/roles/influxdb/templates/influxdb.conf.j2
+++ /dev/null
@@ -1,57 +0,0 @@
-reporting-disabled = false
-[logging]
-level = "info"
-file = "/var/log/kolla/influxdb/influxdb.log"
-[meta]
- dir = "/var/lib/influxdb/meta"
- retention-autocreate = true
- logging-enabled = true
- pprof-enabled = false
- lease-duration = "1m0s"
-[data]
- enabled = true
- dir = "/var/lib/influxdb/data"
- wal-dir = "/var/lib/influxdb/wal"
- wal-logging-enabled = true
- data-logging-enabled = true
-[cluster]
- shard-writer-timeout = "5s"
- write-timeout = "10s"
- max-concurrent-queries = 0
- query-timeout = "0s"
- max-select-point = 0
- max-select-series = 0
- max-select-buckets = 0
-[retention]
- enabled = true
- check-interval = "30m"
-[shard-precreation]
- enabled = true
- check-interval = "10m"
- advance-period = "30m"
-[monitor]
- store-enabled = true
- store-database = "_internal"
- store-interval = "10s"
-[admin]
- enabled = true
- bind-address = "{{ api_interface_address }}:{{ influxdb_admin_port }}"
- https-enabled = false
-[http]
- enabled = true
- bind-address = "{{ api_interface_address }}:{{ influxdb_http_port }}"
- auth-enabled = false
- log-enabled = true
- write-tracing = false
- pprof-enabled = false
- https-enabled = false
- max-row-limit = 10000
-[[graphite]]
- enabled = false
-[[opentsdb]]
- enabled = false
-[[udp]]
- enabled = false
-[continuous_queries]
- log-enabled = true
- enabled = true
diff --git a/ansible/roles/influxdb/templates/influxdb.json.j2 b/ansible/roles/influxdb/templates/influxdb.json.j2
deleted file mode 100644
index 3ffa2fe3a8..0000000000
--- a/ansible/roles/influxdb/templates/influxdb.json.j2
+++ /dev/null
@@ -1,23 +0,0 @@
-{
- "command": "/usr/bin/influxd -config /etc/influxdb/influxdb.conf",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/influxdb.conf",
- "dest": "/etc/influxdb/influxdb.conf",
- "owner": "influxdb",
- "perm": "0600"
- }
- ],
- "permissions": [
- {
- "path": "/var/lib/influxdb",
- "owner": "influxdb:influxdb",
- "recurse": true
- },
- {
- "path": "/var/log/kolla/influxdb",
- "owner": "influxdb:influxdb",
- "recurse": true
- }
- ]
-}
diff --git a/ansible/roles/ironic/defaults/main.yml b/ansible/roles/ironic/defaults/main.yml
deleted file mode 100644
index 1b052ebe10..0000000000
--- a/ansible/roles/ironic/defaults/main.yml
+++ /dev/null
@@ -1,41 +0,0 @@
----
-project_name: "ironic"
-
-####################
-# Database
-####################
-ironic_database_name: "ironic"
-ironic_database_user: "ironic"
-ironic_database_address: "{{ kolla_internal_fqdn }}:{{ database_port }}"
-
-
-####################
-# Docker
-####################
-ironic_api_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-ironic-api"
-ironic_api_tag: "{{ openstack_release }}"
-ironic_api_image_full: "{{ ironic_api_image }}:{{ ironic_api_tag }}"
-
-ironic_conductor_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-ironic-conductor"
-ironic_conductor_tag: "{{ openstack_release }}"
-ironic_conductor_image_full: "{{ ironic_conductor_image }}:{{ ironic_conductor_tag }}"
-
-ironic_inspector_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-ironic-inspector"
-ironic_inspector_tag: "{{ openstack_release }}"
-ironic_inspector_image_full: "{{ ironic_inspector_image }}:{{ ironic_inspector_tag }}"
-
-ironic_pxe_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-ironic-pxe"
-ironic_pxe_tag: "{{ openstack_release }}"
-ironic_pxe_image_full: "{{ ironic_pxe_image }}:{{ ironic_pxe_tag }}"
-
-
-####################
-# OpenStack
-####################
-ironic_admin_endpoint: "{{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ ironic_api_port }}"
-ironic_internal_endpoint: "{{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ ironic_api_port }}"
-ironic_public_endpoint: "{{ public_protocol }}://{{ kolla_external_fqdn }}:{{ ironic_api_port }}"
-
-ironic_logging_debug: "{{ openstack_logging_debug }}"
-
-openstack_ironic_auth: "{'auth_url':'{{ openstack_auth.auth_url }}','username':'{{ openstack_auth.username }}','password':'{{ openstack_auth.password }}','project_name':'{{ openstack_auth.project_name }}'}"
diff --git a/ansible/roles/ironic/meta/main.yml b/ansible/roles/ironic/meta/main.yml
deleted file mode 100644
index 6b4fff8fef..0000000000
--- a/ansible/roles/ironic/meta/main.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-dependencies:
- - { role: common }
diff --git a/ansible/roles/ironic/tasks/bootstrap.yml b/ansible/roles/ironic/tasks/bootstrap.yml
deleted file mode 100644
index 54eecc0f0e..0000000000
--- a/ansible/roles/ironic/tasks/bootstrap.yml
+++ /dev/null
@@ -1,61 +0,0 @@
----
-- name: Creating Ironic database
- command: docker exec -t kolla_toolbox /usr/bin/ansible localhost
- -m mysql_db
- -a "login_host='{{ database_address }}'
- login_port='{{ database_port }}'
- login_user='{{ database_user }}'
- login_password='{{ database_password }}'
- name='{{ ironic_database_name }}'"
- register: database
- changed_when: "{{ database.stdout.find('localhost | SUCCESS => ') != -1 and
- (database.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
- failed_when: database.stdout.split()[2] != 'SUCCESS'
- run_once: True
- delegate_to: "{{ groups['ironic-api'][0] }}"
-
-- name: Reading json from variable
- set_fact:
- database_created: "{{ (database.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
-
-- name: Creating Ironic database user and setting permissions
- command: docker exec -t kolla_toolbox /usr/bin/ansible localhost
- -m mysql_user
- -a "login_host='{{ database_address }}'
- login_port='{{ database_port }}'
- login_user='{{ database_user }}'
- login_password='{{ database_password }}'
- name='{{ ironic_database_name }}'
- password='{{ ironic_database_password }}'
- host='%'
- priv='{{ ironic_database_name }}.*:ALL'
- append_privs='yes'"
- register: database_user_create
- changed_when: "{{ database.stdout.find('localhost | SUCCESS => ') != -1 and
- (database_user_create.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
- failed_when: database_user_create.stdout.split()[2] != 'SUCCESS'
- run_once: True
- delegate_to: "{{ groups['ironic-api'][0] }}"
-
-- include: bootstrap_service.yml
- when: database_created
-
-- name: Running Ironic-PXE bootstrap container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- detach: False
- environment:
- KOLLA_BOOTSTRAP:
- KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}"
- image: "{{ ironic_pxe_image_full }}"
- labels:
- BOOTSTRAP:
- name: "bootstrap_ironic_pxe"
- restart_policy: "never"
- volumes:
- - "{{ node_config_directory }}/ironic-pxe/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "ironic_pxe:/tftpboot/"
- run_once: True
- delegate_to: "{{ groups['ironic-pxe'][0] }}"
diff --git a/ansible/roles/ironic/tasks/bootstrap_service.yml b/ansible/roles/ironic/tasks/bootstrap_service.yml
deleted file mode 100644
index 986d1a6572..0000000000
--- a/ansible/roles/ironic/tasks/bootstrap_service.yml
+++ /dev/null
@@ -1,19 +0,0 @@
----
-- name: Running Ironic bootstrap container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- detach: False
- environment:
- KOLLA_BOOTSTRAP:
- KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}"
- image: "{{ ironic_api_image_full }}"
- labels:
- BOOTSTRAP:
- name: "bootstrap_ironic"
- restart_policy: "never"
- volumes:
- - "{{ node_config_directory }}/ironic-api/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- run_once: True
- delegate_to: "{{ groups['ironic-api'][0] }}"
diff --git a/ansible/roles/ironic/tasks/config.yml b/ansible/roles/ironic/tasks/config.yml
deleted file mode 100644
index d082ecf088..0000000000
--- a/ansible/roles/ironic/tasks/config.yml
+++ /dev/null
@@ -1,55 +0,0 @@
----
-- name: Ensuring config directories exist
- file:
- path: "{{ node_config_directory }}/{{ item }}"
- state: "directory"
- recurse: yes
- with_items:
- - "ironic-api"
- - "ironic-conductor"
- - "ironic-inspector"
- - "ironic-pxe"
-
-- name: Copying over config.json files for services
- template:
- src: "{{ item }}.json.j2"
- dest: "{{ node_config_directory }}/{{ item }}/config.json"
- with_items:
- - "ironic-api"
- - "ironic-conductor"
- - "ironic-inspector"
- - "ironic-pxe"
-
-- name: Copying over ironic.conf
- merge_configs:
- vars:
- service_name: "{{ item }}"
- sources:
- - "{{ role_path }}/templates/ironic.conf.j2"
- - "{{ node_custom_config }}/global.conf"
- - "{{ node_custom_config }}/database.conf"
- - "{{ node_custom_config }}/messaging.conf"
- - "{{ node_custom_config }}/ironic.conf"
- - "{{ node_custom_config }}/ironic/{{ item }}.conf"
- - "{{ node_custom_config }}/ironic/{{ inventory_hostname }}/ironic.conf"
- dest: "{{ node_config_directory }}/{{ item }}/ironic.conf"
- with_items:
- - "ironic-api"
- - "ironic-conductor"
- - "ironic-inspector"
-
-- name: Check if policies shall be overwritten
- local_action: stat path="{{ node_custom_config }}/ironic/policy.json"
- register: ironic_policy
-
-- name: Copying over existing policy.json
- template:
- src: "{{ node_custom_config }}/ironic/policy.json"
- dest: "{{ node_config_directory }}/{{ item }}/policy.json"
- with_items:
- - "ironic-api"
- - "ironic-conductor"
- - "ironic-inspector"
- - "ironic-pxe"
- when:
- ironic_policy.stat.exists
diff --git a/ansible/roles/ironic/tasks/deploy.yml b/ansible/roles/ironic/tasks/deploy.yml
deleted file mode 100644
index 09fa82b225..0000000000
--- a/ansible/roles/ironic/tasks/deploy.yml
+++ /dev/null
@@ -1,18 +0,0 @@
----
-- include: register.yml
- when: inventory_hostname in groups['ironic-api']
-
-- include: config.yml
- when: inventory_hostname in groups['ironic-api'] or
- inventory_hostname in groups['ironic-conductor'] or
- inventory_hostname in groups['ironic-inspector'] or
- inventory_hostname in groups['ironic-pxe']
-
-- include: bootstrap.yml
- when: inventory_hostname in groups['ironic-api']
-
-- include: start.yml
- when: inventory_hostname in groups['ironic-api'] or
- inventory_hostname in groups['ironic-conductor'] or
- inventory_hostname in groups['ironic-inspector'] or
- inventory_hostname in groups['ironic-pxe']
diff --git a/ansible/roles/ironic/tasks/main.yml b/ansible/roles/ironic/tasks/main.yml
deleted file mode 100644
index b017e8b4ad..0000000000
--- a/ansible/roles/ironic/tasks/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-- include: "{{ action }}.yml"
diff --git a/ansible/roles/ironic/tasks/precheck.yml b/ansible/roles/ironic/tasks/precheck.yml
deleted file mode 100644
index ed97d539c0..0000000000
--- a/ansible/roles/ironic/tasks/precheck.yml
+++ /dev/null
@@ -1 +0,0 @@
----
diff --git a/ansible/roles/ironic/tasks/pull.yml b/ansible/roles/ironic/tasks/pull.yml
deleted file mode 100644
index 630f34eb17..0000000000
--- a/ansible/roles/ironic/tasks/pull.yml
+++ /dev/null
@@ -1,28 +0,0 @@
----
-- name: Pulling ironic-api image
- kolla_docker:
- action: "pull_image"
- common_options: "{{ docker_common_options }}"
- image: "{{ ironic_api_image_full }}"
- when: inventory_hostname in groups['ironic-api']
-
-- name: Pulling ironic-conductor image
- kolla_docker:
- action: "pull_image"
- common_options: "{{ docker_common_options }}"
- image: "{{ ironic_conductor_image_full }}"
- when: inventory_hostname in groups['ironic-conductor']
-
-- name: Pulling ironic-inspector image
- kolla_docker:
- action: "pull_image"
- common_options: "{{ docker_common_options }}"
- image: "{{ ironic_inspector_image_full }}"
- when: inventory_hostname in groups['ironic-inspector']
-
-- name: Pulling ironic-pxe image
- kolla_docker:
- action: "pull_image"
- common_options: "{{ docker_common_options }}"
- image: "{{ ironic_pxe_image_full }}"
- when: inventory_hostname in groups['ironic-pxe']
diff --git a/ansible/roles/ironic/tasks/reconfigure.yml b/ansible/roles/ironic/tasks/reconfigure.yml
deleted file mode 100644
index 392c8219c9..0000000000
--- a/ansible/roles/ironic/tasks/reconfigure.yml
+++ /dev/null
@@ -1,76 +0,0 @@
----
-- name: Ensuring the containers up
- kolla_docker:
- name: "{{ item.name }}"
- action: "get_container_state"
- register: container_state
- failed_when: container_state.Running == false
- when: inventory_hostname in groups[item.group]
- with_items:
- - { name: ironic_pxe, group: ironic-pxe }
- - { name: ironic_api, group: ironic-api }
- - { name: ironic_conductor, group: ironic-conductor }
- - { name: ironic_inspector, group: ironic-inspector }
-
-- include: config.yml
-
-- name: Check the configs
- command: docker exec {{ item.name }} /usr/local/bin/kolla_set_configs --check
- changed_when: false
- failed_when: false
- register: check_results
- when: inventory_hostname in groups[item.group]
- with_items:
- - { name: ironic_pxe, group: ironic-pxe }
- - { name: ironic_api, group: ironic-api }
- - { name: ironic_conductor, group: ironic-conductor }
- - { name: ironic_inspector, group: ironic-inspector }
-
-- name: Containers config strategy
- kolla_docker:
- name: "{{ item.name }}"
- action: "get_container_env"
- register: container_envs
- when: inventory_hostname in groups[item.group]
- with_items:
- - { name: ironic_pxe, group: ironic-pxe }
- - { name: ironic_api, group: ironic-api }
- - { name: ironic_conductor, group: ironic-conductor }
- - { name: ironic_inspector, group: ironic-inspector }
-
-- name: Remove the containers
- kolla_docker:
- name: "{{ item[0]['name'] }}"
- action: "remove_container"
- register: remove_containers
- when:
- - inventory_hostname in groups[item[0]['group']]
- - config_strategy == "COPY_ONCE" or item[1]['KOLLA_CONFIG_STRATEGY'] == 'COPY_ONCE'
- - item[2]['rc'] == 1
- with_together:
- - [{ name: ironic_pxe, group: ironic-pxe },
- { name: ironic_api, group: ironic-api },
- { name: ironic_conductor, group: ironic-conductor },
- { name: ironic_inspector, group: ironic-inspector }]
- - "{{ container_envs.results }}"
- - "{{ check_results.results }}"
-
-- include: start.yml
- when: remove_containers.changed
-
-- name: Restart containers
- kolla_docker:
- name: "{{ item[0]['name'] }}"
- action: "restart_container"
- when:
- - inventory_hostname in groups[item[0]['group']]
- - config_strategy == 'COPY_ALWAYS'
- - item[1]['KOLLA_CONFIG_STRATEGY'] != 'COPY_ONCE'
- - item[2]['rc'] == 1
- with_together:
- - [{ name: ironic_pxe, group: ironic-pxe },
- { name: ironic_api, group: ironic-api },
- { name: ironic_conductor, group: ironic-conductor },
- { name: ironic_inspector, group: ironic-inspector }]
- - "{{ container_envs.results }}"
- - "{{ check_results.results }}"
diff --git a/ansible/roles/ironic/tasks/register.yml b/ansible/roles/ironic/tasks/register.yml
deleted file mode 100644
index 0a6d52a1e7..0000000000
--- a/ansible/roles/ironic/tasks/register.yml
+++ /dev/null
@@ -1,40 +0,0 @@
----
-- name: Creating the Ironic service and endpoint
- command: docker exec -t kolla_toolbox /usr/bin/ansible localhost
- -m kolla_keystone_service
- -a "service_name=ironic
- service_type=baremetal
- description='Ironic bare metal provisioning service'
- endpoint_region={{ openstack_region_name }}
- url='{{ item.url }}'
- interface='{{ item.interface }}'
- region_name={{ openstack_region_name }}
- auth={{ '{{ openstack_ironic_auth }}' }}"
- -e "{'openstack_ironic_auth':{{ openstack_ironic_auth }}}"
- register: ironic_endpoint
- changed_when: "{{ ironic_endpoint.stdout.find('localhost | SUCCESS => ') != -1 and (ironic_endpoint.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
- until: ironic_endpoint.stdout.split()[2] == 'SUCCESS'
- retries: 10
- delay: 5
- run_once: True
- with_items:
- - {'interface': 'admin', 'url': '{{ ironic_admin_endpoint }}'}
- - {'interface': 'internal', 'url': '{{ ironic_internal_endpoint }}'}
- - {'interface': 'public', 'url': '{{ ironic_public_endpoint }}'}
-
-- name: Creating the Ironic project, user, and role
- command: docker exec -t kolla_toolbox /usr/bin/ansible localhost
- -m kolla_keystone_user
- -a "project=service
- user={{ ironic_keystone_user }}
- password={{ ironic_keystone_password }}
- role=admin
- region_name={{ openstack_region_name }}
- auth={{ '{{ openstack_ironic_auth }}' }}"
- -e "{'openstack_ironic_auth':{{ openstack_ironic_auth }}}"
- register: ironic_user
- changed_when: "{{ ironic_user.stdout.find('localhost | SUCCESS => ') != -1 and (ironic_user.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
- until: ironic_user.stdout.split()[2] == 'SUCCESS'
- retries: 10
- delay: 5
- run_once: True
diff --git a/ansible/roles/ironic/tasks/start.yml b/ansible/roles/ironic/tasks/start.yml
deleted file mode 100644
index 874f3e176b..0000000000
--- a/ansible/roles/ironic/tasks/start.yml
+++ /dev/null
@@ -1,54 +0,0 @@
----
-- name: Starting ironic-pxe container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- name: "ironic_pxe"
- image: "{{ ironic_pxe_image_full }}"
- volumes:
- - "{{ node_config_directory }}/ironic-pxe/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "ironic_pxe:/tftpboot/"
- when: inventory_hostname in groups['ironic-pxe']
-
-- name: Starting ironic-api container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- image: "{{ ironic_api_image_full }}"
- name: "ironic_api"
- volumes:
- - "{{ node_config_directory }}/ironic-api/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "kolla_logs:/var/log/kolla"
- when: inventory_hostname in groups['ironic-api']
-
-- name: Starting ironic-conductor container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- image: "{{ ironic_conductor_image_full }}"
- name: "ironic_conductor"
- privileged: True
- volumes:
- - "{{ node_config_directory }}/ironic-conductor/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "/sys:/sys"
- - "/dev:/dev"
- - "/run:/run:shared"
- - "kolla_logs:/var/log/kolla"
- - "ironic:/var/lib/ironic"
- - "ironic_pxe:/tftpboot/"
- when: inventory_hostname in groups['ironic-conductor']
-
-- name: Starting ironic-inspector container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- image: "{{ ironic_inspector_image_full }}"
- name: "ironic_inspector"
- privileged: True
- volumes:
- - "{{ node_config_directory }}/ironic-inspector/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- when: inventory_hostname in groups['ironic-inspector']
diff --git a/ansible/roles/ironic/tasks/upgrade.yml b/ansible/roles/ironic/tasks/upgrade.yml
deleted file mode 100644
index 308053080c..0000000000
--- a/ansible/roles/ironic/tasks/upgrade.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-- include: config.yml
-
-- include: bootstrap_service.yml
-
-- include: start.yml
diff --git a/ansible/roles/ironic/templates/ironic-api.json.j2 b/ansible/roles/ironic/templates/ironic-api.json.j2
deleted file mode 100644
index bf10f31058..0000000000
--- a/ansible/roles/ironic/templates/ironic-api.json.j2
+++ /dev/null
@@ -1,18 +0,0 @@
-{
- "command": "ironic-api",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/ironic.conf",
- "dest": "/etc/ironic/ironic.conf",
- "owner": "ironic",
- "perm": "0600"
- }
- ],
- "permissions": [
- {
- "path": "/var/log/kolla/ironic",
- "owner": "ironic:ironic",
- "recurse": true
- }
- ]
-}
diff --git a/ansible/roles/ironic/templates/ironic-conductor.json.j2 b/ansible/roles/ironic/templates/ironic-conductor.json.j2
deleted file mode 100644
index 46aa5ed6b8..0000000000
--- a/ansible/roles/ironic/templates/ironic-conductor.json.j2
+++ /dev/null
@@ -1,28 +0,0 @@
-{
- "command": "ironic-conductor",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/ironic.conf",
- "dest": "/etc/ironic/ironic.conf",
- "owner": "ironic",
- "perm": "0600"
- }
- ],
- "permissions": [
- {
- "path": "/var/log/kolla/ironic",
- "owner": "ironic:ironic",
- "recurse": true
- },
- {
- "path": "/var/lib/ironic",
- "owner": "ironic:ironic",
- "recurse": true
- },
- {
- "path": "/tftpboot",
- "owner": "ironic:ironic",
- "recurse": true
- }
- ]
-}
diff --git a/ansible/roles/ironic/templates/ironic-inspector.json.j2 b/ansible/roles/ironic/templates/ironic-inspector.json.j2
deleted file mode 100644
index fee13e3e45..0000000000
--- a/ansible/roles/ironic/templates/ironic-inspector.json.j2
+++ /dev/null
@@ -1,11 +0,0 @@
-{
- "command": "ironic-inspector --config-file /etc/ironic-inspector/ironic.conf",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/ironic.conf",
- "dest": "/etc/ironic-inspector/ironic.conf",
- "owner": "ironic",
- "perm": "0600"
- }
- ]
-}
diff --git a/ansible/roles/ironic/templates/ironic-pxe.json.j2 b/ansible/roles/ironic/templates/ironic-pxe.json.j2
deleted file mode 100644
index 3fdf9d88bd..0000000000
--- a/ansible/roles/ironic/templates/ironic-pxe.json.j2
+++ /dev/null
@@ -1,4 +0,0 @@
-{
- "command": "/usr/sbin/in.tftpd --verbose --foreground --user root --address 0.0.0.0:69 --map-file /map-file /tftpboot",
- "config_files": []
-}
diff --git a/ansible/roles/ironic/templates/ironic.conf.j2 b/ansible/roles/ironic/templates/ironic.conf.j2
deleted file mode 100644
index c1f84ae554..0000000000
--- a/ansible/roles/ironic/templates/ironic.conf.j2
+++ /dev/null
@@ -1,59 +0,0 @@
-[DEFAULT]
-debug = {{ ironic_logging_debug }}
-
-log_dir = /var/log/kolla/ironic
-
-admin_user = {{ openstack_auth.username }}
-admin_password = {{ keystone_admin_password }}
-
-enabled_drivers = pxe_ipmitool
-
-transport_url = rabbit://{% for host in groups['rabbitmq'] %}{{ rabbitmq_user }}:{{ rabbitmq_password }}@{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ rabbitmq_port }}{% if not loop.last %},{% endif %}{% endfor %}
-
-{% if service_name == 'ironic-api' %}
-[api]
-host_ip = {{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}
-{% endif %}
-
-{% if service_name == 'ironic-conductor' %}
-[conductor]
-api_url = {{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ ironic_api_port }}
-automated_clean=false
-{% endif %}
-
-{% if service_name == 'ironic-inspector' %}
-[ironic]
-os_auth_url = {{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_public_port }}/v2.0
-os_username = {{ openstack_auth.username }}
-os_password = {{ openstack_auth.password }}
-os_tenant_name = {{ openstack_auth.project_name }}
-identity_uri = {{ openstack_auth.auth_url }}
-
-[firewall]
-dnsmasq_interface = {{ api_interface }}
-{% endif %}
-
-[database]
-connection = mysql+pymysql://{{ ironic_database_user }}:{{ ironic_database_password }}@{{ ironic_database_address }}/{{ ironic_database_name }}
-max_retries = -1
-
-[keystone_authtoken]
-auth_uri = {{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_public_port }}
-auth_url = {{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_admin_port }}
-auth_type = password
-project_domain_id = default
-user_domain_id = default
-project_name = service
-username = {{ ironic_keystone_user }}
-password = {{ ironic_keystone_password }}
-
-memcache_security_strategy = ENCRYPT
-memcache_secret_key = {{ memcache_secret_key }}
-memcached_servers = {% for host in groups['memcached'] %}{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}
-
-
-[glance]
-glance_host = {{ kolla_internal_fqdn }}
-
-[neutron]
-url = {{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ neutron_server_port }}
diff --git a/ansible/roles/iscsi/defaults/main.yml b/ansible/roles/iscsi/defaults/main.yml
deleted file mode 100644
index 9af4cbbd31..0000000000
--- a/ansible/roles/iscsi/defaults/main.yml
+++ /dev/null
@@ -1,13 +0,0 @@
----
-project_name: "iscsi"
-
-####################
-# Docker
-####################
-iscsid_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-iscsid"
-iscsid_tag: "{{ openstack_release }}"
-iscsid_image_full: "{{ iscsid_image }}:{{ iscsid_tag }}"
-
-tgtd_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-tgtd"
-tgtd_tag: "{{ openstack_release }}"
-tgtd_image_full: "{{ tgtd_image }}:{{ tgtd_tag }}"
diff --git a/ansible/roles/iscsi/meta/main.yml b/ansible/roles/iscsi/meta/main.yml
deleted file mode 100644
index 6b4fff8fef..0000000000
--- a/ansible/roles/iscsi/meta/main.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-dependencies:
- - { role: common }
diff --git a/ansible/roles/iscsi/tasks/config.yml b/ansible/roles/iscsi/tasks/config.yml
deleted file mode 100644
index 0ebe3dd745..0000000000
--- a/ansible/roles/iscsi/tasks/config.yml
+++ /dev/null
@@ -1,40 +0,0 @@
----
-- name: Ensuring config directories exist
- file:
- path: "{{ node_config_directory }}/{{ item }}"
- state: "directory"
- recurse: yes
- when: inventory_hostname in groups['compute'] or
- inventory_hostname in groups['storage']
- with_items:
- - "iscsid"
-
-- name: Copying over config.json files for services
- template:
- src: "{{ item }}.json.j2"
- dest: "{{ node_config_directory }}/{{ item }}/config.json"
- when: inventory_hostname in groups['compute'] or
- inventory_hostname in groups['storage']
- with_items:
- - "iscsid"
-
-- name: Ensuring config directories exist
- file:
- path: "{{ node_config_directory }}/{{ item }}"
- state: "directory"
- recurse: yes
- when:
- - inventory_hostname in groups['tgtd']
- - enable_cinder_backend_lvm | bool
- with_items:
- - "tgtd"
-
-- name: Copying over config.json files for services
- template:
- src: "{{ item }}.json.j2"
- dest: "{{ node_config_directory }}/{{ item }}/config.json"
- when:
- - inventory_hostname in groups['tgtd']
- - enable_cinder_backend_lvm | bool
- with_items:
- - "tgtd"
diff --git a/ansible/roles/iscsi/tasks/deploy.yml b/ansible/roles/iscsi/tasks/deploy.yml
deleted file mode 100644
index 1f16915ad9..0000000000
--- a/ansible/roles/iscsi/tasks/deploy.yml
+++ /dev/null
@@ -1,4 +0,0 @@
----
-- include: config.yml
-
-- include: start.yml
diff --git a/ansible/roles/iscsi/tasks/main.yml b/ansible/roles/iscsi/tasks/main.yml
deleted file mode 100644
index b017e8b4ad..0000000000
--- a/ansible/roles/iscsi/tasks/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-- include: "{{ action }}.yml"
diff --git a/ansible/roles/iscsi/tasks/precheck.yml b/ansible/roles/iscsi/tasks/precheck.yml
deleted file mode 100644
index ed97d539c0..0000000000
--- a/ansible/roles/iscsi/tasks/precheck.yml
+++ /dev/null
@@ -1 +0,0 @@
----
diff --git a/ansible/roles/iscsi/tasks/pull.yml b/ansible/roles/iscsi/tasks/pull.yml
deleted file mode 100644
index df84c86cc7..0000000000
--- a/ansible/roles/iscsi/tasks/pull.yml
+++ /dev/null
@@ -1,14 +0,0 @@
----
-- name: Pulling iscsid image
- kolla_docker:
- action: "pull_image"
- common_options: "{{ docker_common_options }}"
- image: "{{ iscsid_image_full }}"
- when: inventory_hostname in groups['iscsid']
-
-- name: Pulling tgtd image
- kolla_docker:
- action: "pull_image"
- common_options: "{{ docker_common_options }}"
- image: "{{ tgtd_image_full }}"
- when: inventory_hostname in groups['tgtd']
diff --git a/ansible/roles/iscsi/tasks/reconfigure.yml b/ansible/roles/iscsi/tasks/reconfigure.yml
deleted file mode 100644
index ed97d539c0..0000000000
--- a/ansible/roles/iscsi/tasks/reconfigure.yml
+++ /dev/null
@@ -1 +0,0 @@
----
diff --git a/ansible/roles/iscsi/tasks/start.yml b/ansible/roles/iscsi/tasks/start.yml
deleted file mode 100644
index f1135188a8..0000000000
--- a/ansible/roles/iscsi/tasks/start.yml
+++ /dev/null
@@ -1,45 +0,0 @@
----
-- name: Starting iscsid container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- image: "{{ iscsid_image_full }}"
- name: "iscsid"
- ipc_mode: "host"
- privileged: True
- volumes:
- - "{{ node_config_directory }}/iscsid/:{{ container_config_directory }}/:ro"
- - "kolla_logs:/var/log/kolla/"
- - "/etc/localtime:/etc/localtime:ro"
- - "/dev/:/dev/"
- - "/run/:/run/:shared"
- - "/sys/fs/cgroup:/sys/fs/cgroup:ro"
- - "/lib/modules:/lib/modules:ro"
- - "/sys/kernel/config:/configfs"
- - "cinder:/var/lib/cinder"
- - "iscsi_info:/etc/iscsi"
- when: inventory_hostname in groups['compute'] or
- inventory_hostname in groups['storage']
-
-- name: Starting tgtd container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- image: "{{ tgtd_image_full }}"
- name: "tgtd"
- ipc_mode: "host"
- privileged: True
- volumes:
- - "{{ node_config_directory }}/tgtd/:{{ container_config_directory }}/:ro"
- - "kolla_logs:/var/log/kolla/"
- - "/etc/localtime:/etc/localtime:ro"
- - "/dev/:/dev/"
- - "/run/:/run/:shared"
- - "/sys/fs/cgroup:/sys/fs/cgroup:ro"
- - "/lib/modules:/lib/modules:ro"
- - "/sys/kernel/config:/configfs"
- - "cinder:/var/lib/cinder"
- - "iscsi_info:/etc/iscsi"
- when:
- - inventory_hostname in groups['tgtd']
- - enable_cinder_backend_lvm | bool
diff --git a/ansible/roles/iscsi/tasks/upgrade.yml b/ansible/roles/iscsi/tasks/upgrade.yml
deleted file mode 100644
index 1f16915ad9..0000000000
--- a/ansible/roles/iscsi/tasks/upgrade.yml
+++ /dev/null
@@ -1,4 +0,0 @@
----
-- include: config.yml
-
-- include: start.yml
diff --git a/ansible/roles/iscsi/templates/iscsid.json.j2 b/ansible/roles/iscsi/templates/iscsid.json.j2
deleted file mode 100644
index f44cf16c97..0000000000
--- a/ansible/roles/iscsi/templates/iscsid.json.j2
+++ /dev/null
@@ -1,4 +0,0 @@
-{
- "command": "iscsid -d 8 -f --pid=/run/iscsid.pid",
- "config_files": []
-}
diff --git a/ansible/roles/iscsi/templates/tgtd.json.j2 b/ansible/roles/iscsi/templates/tgtd.json.j2
deleted file mode 100644
index 3f38ef996f..0000000000
--- a/ansible/roles/iscsi/templates/tgtd.json.j2
+++ /dev/null
@@ -1,4 +0,0 @@
-{
- "command": "tgtd -d 1 -f --iscsi portal={{ api_interface_address }}:{{ iscsi_port }}",
- "config_files": []
-}
diff --git a/ansible/roles/keystone/defaults/main.yml b/ansible/roles/keystone/defaults/main.yml
deleted file mode 100644
index 0fda77a9ae..0000000000
--- a/ansible/roles/keystone/defaults/main.yml
+++ /dev/null
@@ -1,40 +0,0 @@
----
-project_name: "keystone"
-
-####################
-# Database
-####################
-keystone_database_name: "keystone"
-keystone_database_user: "keystone"
-keystone_database_address: "{{ kolla_internal_fqdn }}:{{ database_port }}"
-
-
-####################
-# Fernet
-####################
-keystone_username: "keystone"
-keystone_groupname: "keystone"
-
-
-####################
-# Docker
-####################
-keystone_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-keystone"
-keystone_tag: "{{ openstack_release }}"
-keystone_image_full: "{{ keystone_image }}:{{ keystone_tag }}"
-
-keystone_fernet_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-keystone-fernet"
-keystone_fernet_tag: "{{ openstack_release }}"
-keystone_fernet_image_full: "{{ keystone_fernet_image }}:{{ keystone_fernet_tag }}"
-
-keystone_ssh_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-keystone-ssh"
-keystone_ssh_tag: "{{ openstack_release }}"
-keystone_ssh_image_full: "{{ keystone_ssh_image }}:{{ keystone_ssh_tag }}"
-
-
-####################
-# OpenStack
-####################
-keystone_logging_debug: "{{ openstack_logging_debug }}"
-
-openstack_keystone_auth: "{'auth_url':'{{ openstack_auth.auth_url }}','username':'{{ openstack_auth.username }}','password':'{{ openstack_auth.password }}','project_name':'{{ openstack_auth.project_name }}','domain_name':'default'}"
diff --git a/ansible/roles/keystone/files/fernet_rotate_cron_generator.py b/ansible/roles/keystone/files/fernet_rotate_cron_generator.py
deleted file mode 100644
index da468a8515..0000000000
--- a/ansible/roles/keystone/files/fernet_rotate_cron_generator.py
+++ /dev/null
@@ -1,107 +0,0 @@
-#!/usr/bin/python
-
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# This module creates a list of cron intervals for a node in a group of nodes
-# to ensure each node runs a cron in round robbin style.
-
-from __future__ import print_function
-import argparse
-import json
-import sys
-
-MINUTE_SPAN = 1
-HOUR_SPAN = 60
-DAY_SPAN = 24 * HOUR_SPAN
-WEEK_SPAN = 7 * DAY_SPAN
-
-
-def json_exit(msg=None, failed=False, changed=False):
- if type(msg) is not dict:
- msg = {'msg': str(msg)}
- msg.update({'failed': failed, 'changed': changed})
- print(json.dumps(msg))
- sys.exit()
-
-
-def generate(host_index, total_hosts, total_rotation_mins):
- min = '*'
- hour = '*'
- day = '*'
- crons = []
-
- if host_index >= total_hosts:
- return crons
-
- rotation_frequency = total_rotation_mins // total_hosts
- cron_min = rotation_frequency * host_index
-
- # Build crons for a week period
- if total_rotation_mins == WEEK_SPAN:
- day = cron_min // DAY_SPAN
- hour = (cron_min % DAY_SPAN) // HOUR_SPAN
- min = cron_min % HOUR_SPAN
- crons.append({'min': min, 'hour': hour, 'day': day})
-
- # Build crons for a day period
- elif total_rotation_mins == DAY_SPAN:
- hour = cron_min // HOUR_SPAN
- min = cron_min % HOUR_SPAN
- crons.append({'min': min, 'hour': hour, 'day': day})
-
- # Build crons for multiple of an hour
- elif total_rotation_mins % HOUR_SPAN == 0:
- for multiple in range(1, DAY_SPAN // total_rotation_mins + 1):
- time = cron_min
- if multiple > 1:
- time += total_rotation_mins * (multiple - 1)
-
- hour = time // HOUR_SPAN
- min = time % HOUR_SPAN
- crons.append({'min': min, 'hour': hour, 'day': day})
-
- # Build crons for multiple of a minute
- elif total_rotation_mins % MINUTE_SPAN == 0:
- for multiple in range(1, HOUR_SPAN // total_rotation_mins + 1):
- time = cron_min
- if multiple > 1:
- time += total_rotation_mins * (multiple - 1)
-
- min = time // MINUTE_SPAN
- crons.append({'min': min, 'hour': hour, 'day': day})
-
- return crons
-
-
-def main():
- parser = argparse.ArgumentParser(description='''Creates a list of cron
- intervals for a node in a group of nodes to ensure each node runs
- a cron in round robbin style.''')
- parser.add_argument('-t', '--time',
- help='Time in seconds for a token rotation cycle',
- required=True,
- type=int)
- parser.add_argument('-i', '--index',
- help='Index of host starting from 0',
- required=True,
- type=int)
- parser.add_argument('-n', '--number',
- help='Number of tokens that should exist',
- required=True,
- type=int)
- args = parser.parse_args()
- json_exit({'cron_jobs': generate(args.index, args.number, args.time)})
-
-
-if __name__ == "__main__":
- main()
diff --git a/ansible/roles/keystone/meta/main.yml b/ansible/roles/keystone/meta/main.yml
deleted file mode 100644
index 6b4fff8fef..0000000000
--- a/ansible/roles/keystone/meta/main.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-dependencies:
- - { role: common }
diff --git a/ansible/roles/keystone/tasks/bootstrap.yml b/ansible/roles/keystone/tasks/bootstrap.yml
deleted file mode 100644
index 301933e47c..0000000000
--- a/ansible/roles/keystone/tasks/bootstrap.yml
+++ /dev/null
@@ -1,41 +0,0 @@
----
-- name: Creating Keystone database
- command: docker exec -t kolla_toolbox /usr/bin/ansible localhost
- -m mysql_db
- -a "login_host='{{ database_address }}'
- login_port='{{ database_port }}'
- login_user='{{ database_user }}'
- login_password='{{ database_password }}'
- name='{{ keystone_database_name }}'"
- register: database
- changed_when: "{{ database.stdout.find('localhost | SUCCESS => ') != -1 and
- (database.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
- failed_when: database.stdout.split()[2] != 'SUCCESS'
- run_once: True
- delegate_to: "{{ groups['keystone'][0] }}"
-
-- name: Reading json from variable
- set_fact:
- database_created: "{{ (database.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
-
-- name: Creating Keystone database user and setting permissions
- command: docker exec -t kolla_toolbox /usr/bin/ansible localhost
- -m mysql_user
- -a "login_host='{{ database_address }}'
- login_port='{{ database_port }}'
- login_user='{{ database_user }}'
- login_password='{{ database_password }}'
- name='{{ keystone_database_name }}'
- password='{{ keystone_database_password }}'
- host='%'
- priv='{{ keystone_database_name }}.*:ALL'
- append_privs='yes'"
- register: database_user_create
- changed_when: "{{ database_user_create.stdout.find('localhost | SUCCESS => ') != -1 and
- (database_user_create.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
- failed_when: database_user_create.stdout.split()[2] != 'SUCCESS'
- run_once: True
- delegate_to: "{{ groups['keystone'][0] }}"
-
-- include: bootstrap_service.yml
- when: database_created
diff --git a/ansible/roles/keystone/tasks/bootstrap_service.yml b/ansible/roles/keystone/tasks/bootstrap_service.yml
deleted file mode 100644
index cce78e7ced..0000000000
--- a/ansible/roles/keystone/tasks/bootstrap_service.yml
+++ /dev/null
@@ -1,20 +0,0 @@
----
-- name: Running Keystone bootstrap container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- detach: False
- environment:
- KOLLA_BOOTSTRAP:
- KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}"
- image: "{{ keystone_image_full }}"
- labels:
- BOOTSTRAP:
- name: "bootstrap_keystone"
- restart_policy: "never"
- volumes:
- - "{{ node_config_directory }}/keystone/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "kolla_logs:/var/log/kolla/"
- run_once: True
- delegate_to: "{{ groups['keystone'][0] }}"
diff --git a/ansible/roles/keystone/tasks/check.yml b/ansible/roles/keystone/tasks/check.yml
deleted file mode 100644
index 56d000aebc..0000000000
--- a/ansible/roles/keystone/tasks/check.yml
+++ /dev/null
@@ -1,19 +0,0 @@
----
-- name: Keystone sanity checks
- command: docker exec -t kolla_toolbox /usr/bin/ansible localhost
- -m kolla_sanity
- -a "service=keystone
- project=service
- user=admin
- password={{ keystone_admin_password }}
- role=admin
- region_name={{ openstack_region_name }}
- auth={{ '{{ openstack_keystone_auth }}' }}"
- -e "{'openstack_keystone_auth':{{ openstack_keystone_auth }}}"
- register: keystone_sanity
- changed_when: "{{ keystone_sanity.stdout.find('localhost | SUCCESS => ') != -1 and (keystone_sanity.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
- until: keystone_sanity.stdout.split()[2] == 'SUCCESS'
- retries: 10
- delay: 5
- run_once: True
- when: kolla_enable_sanity_keystone | bool
diff --git a/ansible/roles/keystone/tasks/config.yml b/ansible/roles/keystone/tasks/config.yml
deleted file mode 100644
index 6951545716..0000000000
--- a/ansible/roles/keystone/tasks/config.yml
+++ /dev/null
@@ -1,118 +0,0 @@
----
-- name: Check if policies shall be overwritten
- local_action: stat path="{{ node_custom_config }}/keystone/policy.json"
- register: keystone_policy
-
-- name: Check if Keystone Domain specific settings enabled
- local_action: stat path="{{ node_custom_config }}/keystone/domains"
- register: keystone_domain_cfg
-
-- name: Ensuring config directories exist
- file:
- path: "{{ node_config_directory }}/{{ item }}"
- state: "directory"
- recurse: yes
- with_items:
- - "keystone"
- - "keystone-fernet"
- - "keystone-ssh"
-
-- name: Creating Keystone Domain directory
- file:
- dest: "{{ node_config_directory }}/{{ item }}/domains/"
- state: "directory"
- when:
- keystone_domain_cfg.stat.exists
- with_items:
- - "keystone"
-
-- name: Copying over config.json files for services
- template:
- src: "{{ item }}.json.j2"
- dest: "{{ node_config_directory }}/{{ item }}/config.json"
- with_items:
- - "keystone"
- - "keystone-fernet"
- - "keystone-ssh"
-
-- name: Copying over keystone.conf
- merge_configs:
- vars:
- service_name: "{{ item }}"
- sources:
- - "{{ role_path }}/templates/keystone.conf.j2"
- - "{{ node_custom_config }}/global.conf"
- - "{{ node_custom_config }}/database.conf"
- - "{{ node_custom_config }}/messaging.conf"
- - "{{ node_custom_config }}/keystone.conf"
- - "{{ node_custom_config }}/keystone/{{ inventory_hostname }}/keystone.conf"
- dest: "{{ node_config_directory }}/{{ item }}/keystone.conf"
- with_items:
- - "keystone"
- - "keystone-fernet"
- - "keystone-ssh"
-
-- name: Copying Keystone Domain specific settings
- copy:
- src: "{{ item }}"
- dest: "{{ node_config_directory }}/keystone/domains/"
- with_fileglob:
- - "{{ node_custom_config }}/keystone/domains/*"
-
-- name: Copying over existing policy.json
- template:
- src: "{{ node_custom_config }}/keystone/policy.json"
- dest: "{{ node_config_directory }}/{{ item }}/policy.json"
- with_items:
- - "keystone"
- - "keystone-fernet"
- when:
- keystone_policy.stat.exists
-
-- name: Copying over wsgi-keystone.conf
- template:
- src: "{{ item }}"
- dest: "{{ node_config_directory }}/keystone/wsgi-keystone.conf"
- with_first_found:
- - "{{ node_custom_config }}/keystone/{{ inventory_hostname }}/wsgi-keystone.conf"
- - "{{ node_custom_config }}/keystone/wsgi-keystone.conf"
- - "wsgi-keystone.conf.j2"
-
-- name: Copying over keystone-paste.ini
- merge_configs:
- sources:
- - "{{ role_path }}/templates/keystone-paste.ini.j2"
- - "{{ node_custom_config }}/keystone/keystone-paste.ini"
- - "{{ node_custom_config }}/keystone/{{ inventory_hostname }}/keystone-paste.ini"
- dest: "{{ node_config_directory }}/keystone/keystone-paste.ini"
-
-- name: Generate the required cron jobs for the node
- local_action: "command python {{ role_path }}/files/fernet_rotate_cron_generator.py -t {{ (fernet_token_expiry | int) // 60 }} -i {{ groups['keystone'].index(inventory_hostname) }} -n {{ (groups['keystone'] | length) }}"
- register: cron_jobs_json
- when: keystone_token_provider == 'fernet'
-
-- name: Save the returned from cron jobs for building the crontab
- set_fact:
- cron_jobs: "{{ (cron_jobs_json.stdout | from_json).cron_jobs }}"
- when: keystone_token_provider == 'fernet'
-
-- name: Copying files for keystone-fernet
- template:
- src: "{{ item.src }}"
- dest: "{{ node_config_directory }}/keystone-fernet/{{ item.dest }}"
- with_items:
- - { src: "crontab.j2", dest: "crontab" }
- - { src: "fernet-rotate.sh.j2", dest: "fernet-rotate.sh" }
- - { src: "fernet-node-sync.sh.j2", dest: "fernet-node-sync.sh" }
- - { src: "id_rsa", dest: "id_rsa" }
- - { src: "ssh_config.j2", dest: "ssh_config" }
- when: keystone_token_provider == 'fernet'
-
-- name: Copying files for keystone-ssh
- template:
- src: "{{ item.src }}"
- dest: "{{ node_config_directory }}/keystone-ssh/{{ item.dest }}"
- with_items:
- - { src: "sshd_config.j2", dest: "sshd_config" }
- - { src: "id_rsa.pub", dest: "id_rsa.pub" }
- when: keystone_token_provider == 'fernet'
diff --git a/ansible/roles/keystone/tasks/deploy.yml b/ansible/roles/keystone/tasks/deploy.yml
deleted file mode 100644
index 9ccf17b9a7..0000000000
--- a/ansible/roles/keystone/tasks/deploy.yml
+++ /dev/null
@@ -1,19 +0,0 @@
----
-- include: config.yml
- when: inventory_hostname in groups['keystone']
-
-- include: bootstrap.yml
- when: inventory_hostname in groups['keystone']
-
-- include: start.yml
- when: inventory_hostname in groups['keystone']
-
-- include: init_fernet.yml
- when:
- - inventory_hostname in groups['keystone']
- - keystone_token_provider == 'fernet'
-
-- include: register.yml
- when: inventory_hostname in groups['keystone']
-
-- include: check.yml
diff --git a/ansible/roles/keystone/tasks/init_fernet.yml b/ansible/roles/keystone/tasks/init_fernet.yml
deleted file mode 100644
index c608202ab7..0000000000
--- a/ansible/roles/keystone/tasks/init_fernet.yml
+++ /dev/null
@@ -1,15 +0,0 @@
----
-- name: Initialise fernet key authentication
- command: "docker exec -t keystone_fernet kolla_keystone_bootstrap {{ keystone_username }} {{ keystone_groupname }}"
- register: fernet_create
- changed_when: "{{ fernet_create.stdout.find('localhost | SUCCESS => ') != -1 and (fernet_create.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
- until: "(fernet_create.stdout.split()[2] == 'SUCCESS') or (fernet_create.stdout.find('Key repository is already initialized') != -1)"
- retries: 10
- delay: 5
- run_once: True
- delegate_to: "{{ groups['keystone'][0] }}"
-
-- name: Run key distribution
- command: docker exec -t keystone_fernet /usr/bin/fernet-rotate.sh
- run_once: True
- delegate_to: "{{ groups['keystone'][0] }}"
diff --git a/ansible/roles/keystone/tasks/main.yml b/ansible/roles/keystone/tasks/main.yml
deleted file mode 100644
index b017e8b4ad..0000000000
--- a/ansible/roles/keystone/tasks/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-- include: "{{ action }}.yml"
diff --git a/ansible/roles/keystone/tasks/precheck.yml b/ansible/roles/keystone/tasks/precheck.yml
deleted file mode 100644
index ed97d539c0..0000000000
--- a/ansible/roles/keystone/tasks/precheck.yml
+++ /dev/null
@@ -1 +0,0 @@
----
diff --git a/ansible/roles/keystone/tasks/pull.yml b/ansible/roles/keystone/tasks/pull.yml
deleted file mode 100644
index c9152b1486..0000000000
--- a/ansible/roles/keystone/tasks/pull.yml
+++ /dev/null
@@ -1,25 +0,0 @@
----
-- name: Pulling keystone image
- kolla_docker:
- action: "pull_image"
- common_options: "{{ docker_common_options }}"
- image: "{{ keystone_image_full }}"
- when: inventory_hostname in groups['keystone']
-
-- name: Pulling keystone_fernet image
- kolla_docker:
- action: "pull_image"
- common_options: "{{ docker_common_options }}"
- image: "{{ keystone_fernet_image_full }}"
- when:
- - inventory_hostname in groups['keystone']
- - keystone_token_provider == 'fernet'
-
-- name: Pulling keystone_ssh image
- kolla_docker:
- action: "pull_image"
- common_options: "{{ docker_common_options }}"
- image: "{{ keystone_ssh_image_full }}"
- when:
- - inventory_hostname in groups['keystone']
- - keystone_token_provider == 'fernet'
diff --git a/ansible/roles/keystone/tasks/reconfigure.yml b/ansible/roles/keystone/tasks/reconfigure.yml
deleted file mode 100644
index 6ef22ad9ff..0000000000
--- a/ansible/roles/keystone/tasks/reconfigure.yml
+++ /dev/null
@@ -1,79 +0,0 @@
----
-- name: Set variable for keystone components used in reconfigure
- set_fact:
- keystone_items:
- - { name: keystone, group: keystone }
-
-- name: Create fernet related components for variable if fernet is enabled
- set_fact:
- keystone_fernet_items:
- - { name: keystone_fernet, group: keystone }
- - { name: keystone_ssh, group: keystone }
- when: keystone_token_provider == 'fernet'
-
-- name: Merge fernet related components to variable if fernet is enabled
- set_fact:
- keystone_items: "{{ keystone_items + keystone_fernet_items }}"
- when: keystone_token_provider == 'fernet'
-
-- name: Ensuring the containers up
- kolla_docker:
- name: "{{ item.name }}"
- action: "get_container_state"
- register: container_state
- failed_when: container_state.Running == false
- when: inventory_hostname in groups[item.group]
- with_items: "{{ keystone_items }}"
-
-- include: config.yml
-
-- name: Check the configs
- command: docker exec {{ item.name }} /usr/local/bin/kolla_set_configs --check
- changed_when: false
- failed_when: false
- register: check_results
- when: inventory_hostname in groups[item.group]
- with_items: "{{ keystone_items }}"
-
-# NOTE(jeffrey4l): when config_strategy == 'COPY_ALWAYS'
-# and container env['KOLLA_CONFIG_STRATEGY'] == 'COPY_ONCE',
-# just remove the container and start again
-- name: Containers config strategy
- kolla_docker:
- name: "{{ item.name }}"
- action: "get_container_env"
- register: container_envs
- when: inventory_hostname in groups[item.group]
- with_items: "{{ keystone_items }}"
-
-
-- name: Remove the containers
- kolla_docker:
- name: "{{ item[0]['name'] }}"
- action: "remove_container"
- register: remove_containers
- when:
- - config_strategy == "COPY_ONCE" or item[1]['KOLLA_CONFIG_STRATEGY'] == 'COPY_ONCE'
- - item[2]['rc'] == 1
- - inventory_hostname in groups[item[0]['group']]
- with_together:
- - "{{ keystone_items }}"
- - "{{ container_envs.results }}"
- - "{{ check_results.results }}"
-
-- include: start.yml
- when: remove_containers.changed
-
-- name: Restart containers
- kolla_docker:
- name: "{{ item[0]['name'] }}"
- action: "restart_container"
- when:
- - config_strategy == 'COPY_ALWAYS'
- - item[1]['KOLLA_CONFIG_STRATEGY'] != 'COPY_ONCE'
- - item[2]['rc'] == 1
- - inventory_hostname in groups[item[0]['group']]
- with_together:
- - "{{ keystone_items }}"
- - "{{ container_envs.results }}"
- - "{{ check_results.results }}"
diff --git a/ansible/roles/keystone/tasks/register.yml b/ansible/roles/keystone/tasks/register.yml
deleted file mode 100644
index 08875907d2..0000000000
--- a/ansible/roles/keystone/tasks/register.yml
+++ /dev/null
@@ -1,7 +0,0 @@
----
-- name: Creating admin project, user, role, service, and endpoint
- command: docker exec keystone kolla_keystone_bootstrap {{ openstack_auth.username }} {{ openstack_auth.password }} {{ openstack_auth.project_name }} admin {{ keystone_admin_url }} {{ keystone_internal_url }} {{ keystone_public_url }} {{ openstack_region_name }}
- register: keystone_bootstrap
- changed_when: "{{ (keystone_bootstrap.stdout | from_json).changed }}"
- failed_when: "{{ (keystone_bootstrap.stdout | from_json).failed }}"
- run_once: True
diff --git a/ansible/roles/keystone/tasks/start.yml b/ansible/roles/keystone/tasks/start.yml
deleted file mode 100644
index 382a0d1e69..0000000000
--- a/ansible/roles/keystone/tasks/start.yml
+++ /dev/null
@@ -1,49 +0,0 @@
----
-- name: Set variable for initial keystone volumes
- set_fact:
- keystone_volumes:
- - "{{ node_config_directory }}/keystone/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "kolla_logs:/var/log/kolla/"
-
-- name: Add fernet volume to keystone volumes variable if fernet enabled
- set_fact:
- keystone_volumes: "{{ keystone_volumes + [\"keystone_fernet_tokens:/etc/keystone/fernet-keys\"] }}"
- when: keystone_token_provider == 'fernet'
-
-- name: Starting keystone container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- image: "{{ keystone_image_full }}"
- name: "keystone"
- volumes: "{{ keystone_volumes }}"
-
-- name: Wait for keystone startup
- wait_for: host={{ kolla_internal_fqdn }} port={{ keystone_admin_port }}
-
-- name: Starting keystone-ssh container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- image: "{{ keystone_ssh_image_full }}"
- name: "keystone_ssh"
- volumes:
- - "{{ node_config_directory }}/keystone-ssh/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "kolla_logs:/var/log/kolla/"
- - "keystone_fernet_tokens:/etc/keystone/fernet-keys"
- when: keystone_token_provider == 'fernet'
-
-- name: Starting keystone-fernet container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- image: "{{ keystone_fernet_image_full }}"
- name: "keystone_fernet"
- volumes:
- - "{{ node_config_directory }}/keystone-fernet/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "kolla_logs:/var/log/kolla/"
- - "keystone_fernet_tokens:/etc/keystone/fernet-keys"
- when: keystone_token_provider == 'fernet'
diff --git a/ansible/roles/keystone/tasks/upgrade.yml b/ansible/roles/keystone/tasks/upgrade.yml
deleted file mode 100644
index 308053080c..0000000000
--- a/ansible/roles/keystone/tasks/upgrade.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-- include: config.yml
-
-- include: bootstrap_service.yml
-
-- include: start.yml
diff --git a/ansible/roles/keystone/templates/crontab.j2 b/ansible/roles/keystone/templates/crontab.j2
deleted file mode 100644
index af16e114fd..0000000000
--- a/ansible/roles/keystone/templates/crontab.j2
+++ /dev/null
@@ -1,3 +0,0 @@
-{% for cron_job in cron_jobs %}
-{{ cron_job['min'] }} {{ cron_job['hour'] }} * * {{ cron_job['day'] }} /usr/bin/fernet-rotate.sh
-{% endfor %}
diff --git a/ansible/roles/keystone/templates/fernet-node-sync.sh.j2 b/ansible/roles/keystone/templates/fernet-node-sync.sh.j2
deleted file mode 100644
index a100f23771..0000000000
--- a/ansible/roles/keystone/templates/fernet-node-sync.sh.j2
+++ /dev/null
@@ -1,16 +0,0 @@
-#!/bin/bash
-
-# Get data on the fernet tokens
-TOKEN_CHECK=$(/usr/bin/fetch_fernet_tokens.py -t {{ fernet_token_expiry }} -n {{ (groups['keystone'] | length) + 1 }})
-
-# Ensure the primary token exists and is not stale
-if $(echo "$TOKEN_CHECK" | grep -q '"update_required":"false"'); then
- exit 0;
-fi
-
-# For each host node sync tokens
-{% for host in groups['keystone'] %}
-{% if inventory_hostname != host %}
-/usr/bin/rsync -azu --delete -e 'ssh -i /var/lib/keystone/.ssh/id_rsa -p {{ keystone_ssh_port }} -F /var/lib/keystone/.ssh/config' keystone@{{ host }}:/etc/keystone/fernet-keys/ /etc/keystone/fernet-keys
-{% endif %}
-{% endfor %}
diff --git a/ansible/roles/keystone/templates/fernet-rotate.sh.j2 b/ansible/roles/keystone/templates/fernet-rotate.sh.j2
deleted file mode 100644
index 28c5b6f670..0000000000
--- a/ansible/roles/keystone/templates/fernet-rotate.sh.j2
+++ /dev/null
@@ -1,9 +0,0 @@
-#!/bin/bash
-
-keystone-manage --config-file /etc/keystone/keystone.conf fernet_rotate --keystone-user {{ keystone_username }} --keystone-group {{ keystone_groupname }}
-
-{% for host in groups['keystone'] %}
-{% if inventory_hostname != host %}
-/usr/bin/rsync -az -e 'ssh -i /var/lib/keystone/.ssh/id_rsa -p {{ keystone_ssh_port }} -F /var/lib/keystone/.ssh/config' --delete /etc/keystone/fernet-keys/ keystone@{{ host }}:/etc/keystone/fernet-keys
-{% endif %}
-{% endfor %}
diff --git a/ansible/roles/keystone/templates/id_rsa b/ansible/roles/keystone/templates/id_rsa
deleted file mode 100644
index 3e27166162..0000000000
--- a/ansible/roles/keystone/templates/id_rsa
+++ /dev/null
@@ -1 +0,0 @@
-{{ keystone_ssh_key.private_key }}
diff --git a/ansible/roles/keystone/templates/id_rsa.pub b/ansible/roles/keystone/templates/id_rsa.pub
deleted file mode 100644
index 529f98ab89..0000000000
--- a/ansible/roles/keystone/templates/id_rsa.pub
+++ /dev/null
@@ -1 +0,0 @@
-{{ keystone_ssh_key.public_key }}
diff --git a/ansible/roles/keystone/templates/keystone-fernet.json.j2 b/ansible/roles/keystone/templates/keystone-fernet.json.j2
deleted file mode 100644
index 9078977b5e..0000000000
--- a/ansible/roles/keystone/templates/keystone-fernet.json.j2
+++ /dev/null
@@ -1,41 +0,0 @@
-{% set cron_cmd = 'cron -f' if kolla_base_distro in ['ubuntu', 'debian'] else 'crond -s -n' %}
-{
- "command": "{{ cron_cmd }}",
- "config_files": [{
- "source": "{{ container_config_directory }}/keystone.conf",
- "dest": "/etc/keystone/keystone.conf",
- "owner": "keystone",
- "perm": "0600"
- },
- {
- "source": "{{ container_config_directory }}/crontab",
- "dest": "/var/spool/cron/crontabs/root/fernet-cron",
- "owner": "root",
- "perm": "0644"
- },
- {
- "source": "{{ container_config_directory }}/fernet-rotate.sh",
- "dest": "/usr/bin/fernet-rotate.sh",
- "owner": "root",
- "perm": "0755"
- },
- {
- "source": "{{ container_config_directory }}/fernet-node-sync.sh",
- "dest": "/usr/bin/fernet-node-sync.sh",
- "owner": "root",
- "perm": "0755"
- },
- {
- "source": "{{ container_config_directory }}/ssh_config",
- "dest": "/var/lib/keystone/.ssh/config",
- "owner": "keystone",
- "perm": "0600"
- },
- {
- "source": "{{ container_config_directory }}/id_rsa",
- "dest": "/var/lib/keystone/.ssh/id_rsa",
- "owner": "keystone",
- "perm": "0600"
- }
- ]
-}
diff --git a/ansible/roles/keystone/templates/keystone-paste.ini.j2 b/ansible/roles/keystone/templates/keystone-paste.ini.j2
deleted file mode 100644
index 0e2ee368e2..0000000000
--- a/ansible/roles/keystone/templates/keystone-paste.ini.j2
+++ /dev/null
@@ -1,83 +0,0 @@
-# Keystone PasteDeploy configuration file.
-
-[filter:debug]
-use = egg:oslo.middleware#debug
-
-[filter:request_id]
-use = egg:oslo.middleware#request_id
-
-[filter:build_auth_context]
-use = egg:keystone#build_auth_context
-
-[filter:token_auth]
-use = egg:keystone#token_auth
-
-[filter:json_body]
-use = egg:keystone#json_body
-
-[filter:cors]
-use = egg:oslo.middleware#cors
-oslo_config_project = keystone
-
-[filter:ec2_extension]
-use = egg:keystone#ec2_extension
-
-[filter:ec2_extension_v3]
-use = egg:keystone#ec2_extension_v3
-
-[filter:s3_extension]
-use = egg:keystone#s3_extension
-
-[filter:url_normalize]
-use = egg:keystone#url_normalize
-
-[filter:sizelimit]
-use = egg:oslo.middleware#sizelimit
-
-[app:public_service]
-use = egg:keystone#public_service
-
-[app:service_v3]
-use = egg:keystone#service_v3
-
-[app:admin_service]
-use = egg:keystone#admin_service
-
-[pipeline:public_api]
-# The last item in this pipeline must be public_service or an equivalent
-# application. It cannot be a filter.
-pipeline = cors sizelimit url_normalize request_id build_auth_context token_auth json_body ec2_extension public_service
-
-[pipeline:admin_api]
-# The last item in this pipeline must be admin_service or an equivalent
-# application. It cannot be a filter.
-pipeline = cors sizelimit url_normalize request_id build_auth_context token_auth json_body ec2_extension s3_extension admin_service
-
-[pipeline:api_v3]
-# The last item in this pipeline must be service_v3 or an equivalent
-# application. It cannot be a filter.
-pipeline = cors sizelimit url_normalize request_id build_auth_context token_auth json_body ec2_extension_v3 s3_extension service_v3
-
-[app:public_version_service]
-use = egg:keystone#public_version_service
-
-[app:admin_version_service]
-use = egg:keystone#admin_version_service
-
-[pipeline:public_version_api]
-pipeline = cors sizelimit url_normalize public_version_service
-
-[pipeline:admin_version_api]
-pipeline = cors sizelimit url_normalize admin_version_service
-
-[composite:main]
-use = egg:Paste#urlmap
-/v2.0 = public_api
-/v3 = api_v3
-/ = public_version_api
-
-[composite:admin]
-use = egg:Paste#urlmap
-/v2.0 = admin_api
-/v3 = api_v3
-/ = admin_version_api
diff --git a/ansible/roles/keystone/templates/keystone-ssh.json.j2 b/ansible/roles/keystone/templates/keystone-ssh.json.j2
deleted file mode 100644
index c13e0eda60..0000000000
--- a/ansible/roles/keystone/templates/keystone-ssh.json.j2
+++ /dev/null
@@ -1,17 +0,0 @@
-{
- "command": "/usr/sbin/sshd -D",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/sshd_config",
- "dest": "/etc/ssh/sshd_config",
- "owner": "root",
- "perm": "0644"
- },
- {
- "source": "{{ container_config_directory }}/id_rsa.pub",
- "dest": "/var/lib/keystone/.ssh/authorized_keys",
- "owner": "keystone",
- "perm": "0600"
- }
- ]
-}
diff --git a/ansible/roles/keystone/templates/keystone.conf.j2 b/ansible/roles/keystone/templates/keystone.conf.j2
deleted file mode 100644
index ff33a241ff..0000000000
--- a/ansible/roles/keystone/templates/keystone.conf.j2
+++ /dev/null
@@ -1,52 +0,0 @@
-[DEFAULT]
-debug = {{ keystone_logging_debug }}
-{% if enable_cadf_notifications | bool %}
-notification_format = cadf
-transport_url = rabbit://{% for host in groups['rabbitmq'] %}{{ rabbitmq_user }}:{{ rabbitmq_password }}@{% if orchestration_engine == 'KUBERNETES' %}rabbitmq{% else %}{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}{% endif %}:{{ rabbitmq_port }}{% if not loop.last %},{% endif %}{% endfor %}
-{% endif %}
-
-# NOTE(elemoine) log_dir alone does not work for Keystone
-log_file = /var/log/kolla/keystone/keystone.log
-
-secure_proxy_ssl_header = HTTP_X_FORWARDED_PROTO
-
-[database]
-connection = mysql+pymysql://{{ keystone_database_user }}:{{ keystone_database_password }}@{{ keystone_database_address }}/{{ keystone_database_name }}
-max_retries = -1
-
-{% if keystone_domain_cfg.stat.exists %}
-[identity]
-domain_specific_drivers_enabled = true
-domain_config_dir = /etc/keystone/domains
-{% endif %}
-
-[token]
-{% if keystone_token_provider == 'uuid' %}
-provider = uuid
-{% elif keystone_token_provider == 'fernet' %}
-provider = {{ keystone_token_provider }}
-expiration = {{ fernet_token_expiry }}
-
-[fernet_tokens]
-max_active_keys = {{ (groups['keystone'] | length) + 1 }}
-{% endif %}
-
-[cache]
-backend = oslo_cache.memcache_pool
-enabled = True
-
-{# For Kolla-Ansible, generate the memcache servers based on the list of
-memcached servers in the inventory and memcached_servers should be un-set.
-For Kolla-Kubernetes, it is necessary to define the memcached_servers
-variable in globals.yml to set it to the Kubernetes service for memcached. #}
-
-{% if orchestration_engine == 'KUBERNETES' %}
-memcache_servers = {{ memcached_servers }}
-{% else %}
-memcache_servers = {% for host in groups['memcached'] %}{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}
-{%- endif %}
-
-{% if enable_cadf_notifications | bool %}
-[oslo_messaging_notifications]
-driver = messagingv2
-{% endif %}
diff --git a/ansible/roles/keystone/templates/keystone.json.j2 b/ansible/roles/keystone/templates/keystone.json.j2
deleted file mode 100644
index 3cd7921116..0000000000
--- a/ansible/roles/keystone/templates/keystone.json.j2
+++ /dev/null
@@ -1,49 +0,0 @@
-{% set keystone_cmd = 'apache2' if kolla_base_distro in ['ubuntu', 'debian'] else 'httpd' %}
-{% set keystone_dir = 'apache2/conf-enabled' if kolla_base_distro in ['ubuntu', 'debian'] else 'httpd/conf.d' %}
-{
- "command": "/usr/sbin/{{ keystone_cmd }}",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/keystone.conf",
- "dest": "/etc/keystone/keystone.conf",
- "owner": "keystone",
- "perm": "0600"
- },
- {
- "source": "{{ container_config_directory }}/keystone-paste.ini",
- "dest": "/etc/keystone/keystone-paste.ini",
- "owner": "keystone",
- "perm": "0600"
- },
- {
- "source": "{{ container_config_directory }}/domains",
- "dest": "/etc/keystone/domains",
- "owner": "keystone",
- "perm": "0700",
- "optional": true
- },
- {
- "source": "{{ container_config_directory }}/policy.json",
- "dest": "/etc/keystone/policy.json",
- "owner": "keystone",
- "perm": "0600",
- "optional": true
- },
- {
- "source": "{{ container_config_directory }}/wsgi-keystone.conf",
- "dest": "/etc/{{ keystone_dir }}/wsgi-keystone.conf",
- "owner": "keystone",
- "perm": "0644"
- }
- ],
- "permissions": [
- {
- "path": "/var/log/kolla",
- "owner": "keystone:kolla"
- },
- {
- "path": "/var/log/kolla/keystone/keystone.log",
- "owner": "keystone:keystone"
- }
- ]
-}
diff --git a/ansible/roles/keystone/templates/ssh_config.j2 b/ansible/roles/keystone/templates/ssh_config.j2
deleted file mode 100644
index 4a177f6552..0000000000
--- a/ansible/roles/keystone/templates/ssh_config.j2
+++ /dev/null
@@ -1,4 +0,0 @@
-Host *
- StrictHostKeyChecking no
- UserKnownHostsFile /dev/null
- Port {{ keystone_ssh_port }}
diff --git a/ansible/roles/keystone/templates/sshd_config.j2 b/ansible/roles/keystone/templates/sshd_config.j2
deleted file mode 100644
index 8b66f42c7d..0000000000
--- a/ansible/roles/keystone/templates/sshd_config.j2
+++ /dev/null
@@ -1,5 +0,0 @@
-Port {{ keystone_ssh_port }}
-ListenAddress {{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}
-
-SyslogFacility AUTHPRIV
-UsePAM yes
diff --git a/ansible/roles/keystone/templates/wsgi-keystone.conf.j2 b/ansible/roles/keystone/templates/wsgi-keystone.conf.j2
deleted file mode 100644
index a3f98173ab..0000000000
--- a/ansible/roles/keystone/templates/wsgi-keystone.conf.j2
+++ /dev/null
@@ -1,32 +0,0 @@
-{% set keystone_log_dir = '/var/log/kolla/keystone' %}
-{% set python_path = '/usr/lib/python2.7/site-packages' if kolla_install_type == 'binary' else '/var/lib/kolla/venv/lib/python2.7/site-packages' %}
-Listen {{ api_interface_address }}:{{ keystone_public_port }}
-Listen {{ api_interface_address }}:{{ keystone_admin_port }}
-
-
- WSGIDaemonProcess keystone-public processes={{ openstack_service_workers }} threads=1 user=keystone group=keystone display-name=%{GROUP} python-path={{ python_path }}
- WSGIProcessGroup keystone-public
- WSGIScriptAlias / /var/www/cgi-bin/keystone/main
- WSGIApplicationGroup %{GLOBAL}
- WSGIPassAuthorization On
- = 2.4>
- ErrorLogFormat "%{cu}t %M"
-
- ErrorLog "{{ keystone_log_dir }}/keystone-apache-public-error.log"
- LogFormat "%{X-Forwarded-For}i %l %u %t \"%r\" %>s %b %D \"%{Referer}i\" \"%{User-Agent}i\"" logformat
- CustomLog "{{ keystone_log_dir }}/keystone-apache-public-access.log" logformat
-
-
-
- WSGIDaemonProcess keystone-admin processes={{ openstack_service_workers }} threads=1 user=keystone group=keystone display-name=%{GROUP} python-path={{ python_path }}
- WSGIProcessGroup keystone-admin
- WSGIScriptAlias / /var/www/cgi-bin/keystone/admin
- WSGIApplicationGroup %{GLOBAL}
- WSGIPassAuthorization On
- = 2.4>
- ErrorLogFormat "%{cu}t %M"
-
- ErrorLog "{{ keystone_log_dir }}/keystone-apache-admin-error.log"
- LogFormat "%{X-Forwarded-For}i %l %u %t \"%r\" %>s %b %D \"%{Referer}i\" \"%{User-Agent}i\"" logformat
- CustomLog "{{ keystone_log_dir }}/keystone-apache-admin-access.log" logformat
-
diff --git a/ansible/roles/kibana/defaults/main.yml b/ansible/roles/kibana/defaults/main.yml
deleted file mode 100644
index e35e2f1e71..0000000000
--- a/ansible/roles/kibana/defaults/main.yml
+++ /dev/null
@@ -1,16 +0,0 @@
----
-####################
-# Kibana
-####################
-kibana_default_app_id: "discover"
-kibana_elasticsearch_request_timeout: 300000
-kibana_elasticsearch_shard_timeout: 0
-kibana_elasticsearch_ssl_verify: false
-
-
-####################
-# Docker
-####################
-kibana_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-kibana"
-kibana_tag: "{{ openstack_release }}"
-kibana_image_full: "{{ kibana_image }}:{{ kibana_tag }}"
diff --git a/ansible/roles/kibana/meta/main.yml b/ansible/roles/kibana/meta/main.yml
deleted file mode 100644
index 6b4fff8fef..0000000000
--- a/ansible/roles/kibana/meta/main.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-dependencies:
- - { role: common }
diff --git a/ansible/roles/kibana/tasks/config.yml b/ansible/roles/kibana/tasks/config.yml
deleted file mode 100644
index edccb229c5..0000000000
--- a/ansible/roles/kibana/tasks/config.yml
+++ /dev/null
@@ -1,22 +0,0 @@
----
-- name: Ensuring kibana config directories exist
- file:
- path: "{{ node_config_directory }}/{{ item }}"
- state: "directory"
- recurse: yes
- with_items:
- - "kibana"
-
-- name: Copying over config.json files for services
- template:
- src: "{{ item }}.json.j2"
- dest: "{{ node_config_directory }}/{{ item }}/config.json"
- with_items:
- - "kibana"
-
-- name: Copying over Kibana configuration file
- template:
- src: "{{ item }}.yml.j2"
- dest: "{{ node_config_directory }}/{{ item }}/{{ item }}.yml"
- with_items:
- - "kibana"
diff --git a/ansible/roles/kibana/tasks/deploy.yml b/ansible/roles/kibana/tasks/deploy.yml
deleted file mode 100644
index 1f16915ad9..0000000000
--- a/ansible/roles/kibana/tasks/deploy.yml
+++ /dev/null
@@ -1,4 +0,0 @@
----
-- include: config.yml
-
-- include: start.yml
diff --git a/ansible/roles/kibana/tasks/main.yml b/ansible/roles/kibana/tasks/main.yml
deleted file mode 100644
index b017e8b4ad..0000000000
--- a/ansible/roles/kibana/tasks/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-- include: "{{ action }}.yml"
diff --git a/ansible/roles/kibana/tasks/precheck.yml b/ansible/roles/kibana/tasks/precheck.yml
deleted file mode 100644
index ed97d539c0..0000000000
--- a/ansible/roles/kibana/tasks/precheck.yml
+++ /dev/null
@@ -1 +0,0 @@
----
diff --git a/ansible/roles/kibana/tasks/pull.yml b/ansible/roles/kibana/tasks/pull.yml
deleted file mode 100644
index afe39cd3e6..0000000000
--- a/ansible/roles/kibana/tasks/pull.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-- name: Pulling Kibana image
- kolla_docker:
- action: "pull_image"
- common_options: "{{ docker_common_options }}"
- image: "{{ kibana_image_full }}"
diff --git a/ansible/roles/kibana/tasks/reconfigure.yml b/ansible/roles/kibana/tasks/reconfigure.yml
deleted file mode 100644
index 54cfea05d5..0000000000
--- a/ansible/roles/kibana/tasks/reconfigure.yml
+++ /dev/null
@@ -1,64 +0,0 @@
----
-- name: Ensuring the containers up
- kolla_docker:
- name: "{{ item.name }}"
- action: "get_container_state"
- register: container_state
- failed_when: container_state.Running == false
- when: inventory_hostname in groups[item.group]
- with_items:
- - { name: kibana, group: kibana }
-
-- include: config.yml
-
-- name: Check the configs
- command: docker exec {{ item.name }} /usr/local/bin/kolla_set_configs --check
- changed_when: false
- failed_when: false
- register: check_results
- when: inventory_hostname in groups[item.group]
- with_items:
- - { name: kibana, group: kibana }
-
-# NOTE(jeffrey4l): when config_strategy == 'COPY_ALWAYS'
-# and container env['KOLLA_CONFIG_STRATEGY'] == 'COPY_ONCE',
-# just remove the container and start again
-- name: Containers config strategy
- kolla_docker:
- name: "{{ item.name }}"
- action: "get_container_env"
- register: container_envs
- when: inventory_hostname in groups[item.group]
- with_items:
- - { name: kibana, group: kibana }
-
-- name: Remove the containers
- kolla_docker:
- name: "{{ item[0]['name'] }}"
- action: "remove_container"
- register: remove_containers
- when:
- - config_strategy == "COPY_ONCE" or item[1]['KOLLA_CONFIG_STRATEGY'] == 'COPY_ONCE'
- - item[2]['rc'] == 1
- - inventory_hostname in groups[item[0]['group']]
- with_together:
- - [{ name: kibana, group: kibana }]
- - "{{ container_envs.results }}"
- - "{{ check_results.results }}"
-
-- include: start.yml
- when: remove_containers.changed
-
-- name: Restart containers
- kolla_docker:
- name: "{{ item[0]['name'] }}"
- action: "restart_container"
- when:
- - config_strategy == 'COPY_ALWAYS'
- - item[1]['KOLLA_CONFIG_STRATEGY'] != 'COPY_ONCE'
- - item[2]['rc'] == 1
- - inventory_hostname in groups[item[0]['group']]
- with_together:
- - [{ name: kibana, group: kibana }]
- - "{{ container_envs.results }}"
- - "{{ check_results.results }}"
diff --git a/ansible/roles/kibana/tasks/start.yml b/ansible/roles/kibana/tasks/start.yml
deleted file mode 100644
index 239e892c2f..0000000000
--- a/ansible/roles/kibana/tasks/start.yml
+++ /dev/null
@@ -1,11 +0,0 @@
----
-- name: Starting Kibana container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- image: "{{ kibana_image_full }}"
- name: "kibana"
- volumes:
- - "{{ node_config_directory }}/kibana/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "kolla_logs:/var/log/kolla/"
diff --git a/ansible/roles/kibana/tasks/upgrade.yml b/ansible/roles/kibana/tasks/upgrade.yml
deleted file mode 100644
index 1f16915ad9..0000000000
--- a/ansible/roles/kibana/tasks/upgrade.yml
+++ /dev/null
@@ -1,4 +0,0 @@
----
-- include: config.yml
-
-- include: start.yml
diff --git a/ansible/roles/kibana/templates/kibana.json.j2 b/ansible/roles/kibana/templates/kibana.json.j2
deleted file mode 100644
index 9410a07d96..0000000000
--- a/ansible/roles/kibana/templates/kibana.json.j2
+++ /dev/null
@@ -1,11 +0,0 @@
-{
- "command": "/opt/kibana/bin/kibana",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/kibana.yml",
- "dest": "/opt/kibana/config/kibana.yml",
- "owner": "kibana",
- "perm": "0640"
- }
- ]
-}
diff --git a/ansible/roles/kibana/templates/kibana.yml.j2 b/ansible/roles/kibana/templates/kibana.yml.j2
deleted file mode 100644
index 9d161b0fbe..0000000000
--- a/ansible/roles/kibana/templates/kibana.yml.j2
+++ /dev/null
@@ -1,19 +0,0 @@
-kibana.defaultAppId: "{{ kibana_default_app_id }}"
-logging.dest: /var/log/kolla/kibana/kibana.log
-server.port: {{ kibana_server_port }}
-server.host: "{% if orchestration_engine == 'KUBERNETES' %}0.0.0.0{% else %}{{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}{% endif %}"
-elasticsearch.url: "{{ internal_protocol }}://{% if orchestration_engine == 'KUBERNETES' %}elasticsearch{% else %}{{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}{% endif %}:{{ elasticsearch_port }}"
-elasticsearch.requestTimeout: {{ kibana_elasticsearch_request_timeout }}
-elasticsearch.shardTimeout: {{ kibana_elasticsearch_shard_timeout }}
-elasticsearch.ssl.verify: {{ kibana_elasticsearch_ssl_verify }}
-bundled_plugin_ids:
- - plugins/dashboard/index
- - plugins/discover/index
- - plugins/doc/index
- - plugins/kibana/index
- - plugins/markdown_vis/index
- - plugins/metric_vis/index
- - plugins/settings/index
- - plugins/table_vis/index
- - plugins/vis_types/index
- - plugins/visualize/index
diff --git a/ansible/roles/kuryr/defaults/main.yml b/ansible/roles/kuryr/defaults/main.yml
deleted file mode 100644
index 1c755b1c44..0000000000
--- a/ansible/roles/kuryr/defaults/main.yml
+++ /dev/null
@@ -1,20 +0,0 @@
----
-project_name: "kuryr"
-
-
-####################
-# Docker
-####################
-kuryr_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-kuryr-libnetwork"
-kuryr_tag: "{{ openstack_release }}"
-kuryr_image_full: "{{ kuryr_image }}:{{ kuryr_tag }}"
-
-
-####################
-# OpenStack
-####################
-kuryr_logging_debug: "{{ openstack_logging_debug }}"
-
-kuryr_keystone_user: "kuryr"
-
-openstack_kuryr_auth: "{'auth_url':'{{ openstack_auth.auth_url }}','username':'{{ openstack_auth.username }}','password':'{{ openstack_auth.password }}','project_name':'{{ openstack_auth.project_name }}','domain_name':'default'}"
diff --git a/ansible/roles/kuryr/tasks/bootstrap.yml b/ansible/roles/kuryr/tasks/bootstrap.yml
deleted file mode 100644
index 0962484c24..0000000000
--- a/ansible/roles/kuryr/tasks/bootstrap.yml
+++ /dev/null
@@ -1,19 +0,0 @@
----
-- name: Running Kuryr bootstrap container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- detach: False
- environment:
- KOLLA_BOOTSTRAP:
- KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}"
- image: "{{ kuryr_image_full }}"
- labels:
- BOOTSTRAP:
- name: "bootstrap_kuryr"
- restart_policy: "never"
- volumes:
- - "{{ node_config_directory }}/kuryr/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "/run:/run:shared"
- - "/usr/lib/docker:/usr/lib/docker"
diff --git a/ansible/roles/kuryr/tasks/config.yml b/ansible/roles/kuryr/tasks/config.yml
deleted file mode 100644
index 001793780c..0000000000
--- a/ansible/roles/kuryr/tasks/config.yml
+++ /dev/null
@@ -1,36 +0,0 @@
----
-- name: Ensuring config directories exist
- file:
- path: "{{ node_config_directory }}/{{ item }}"
- state: "directory"
- recurse: yes
- with_items:
- - "kuryr"
-
-- name: Copying over config.json files for services
- template:
- src: "{{ item }}.json.j2"
- dest: "{{ node_config_directory }}/{{ item }}/config.json"
- with_items:
- - "kuryr"
-
-- name: Copying over kuryr.conf
- template:
- src: "kuryr.conf.j2"
- dest: "{{ node_config_directory }}/kuryr/kuryr.conf"
-
-- name: Copying over kuryr.spec
- template:
- src: "kuryr.spec.j2"
- dest: "{{ node_config_directory }}/kuryr/kuryr.spec"
-
-- name: Check if policies shall be overwritten
- local_action: stat path="{{ node_custom_config }}/kuryr/policy.json"
- register: kuryr_policy
-
-- name: Copying over existing policy.json
- template:
- src: "{{ node_custom_config }}/kuryr/policy.json"
- dest: "{{ node_config_directory }}/kuryr/policy.json"
- when:
- kuryr_policy.stat.exists
diff --git a/ansible/roles/kuryr/tasks/deploy.yml b/ansible/roles/kuryr/tasks/deploy.yml
deleted file mode 100644
index 5c48120b7c..0000000000
--- a/ansible/roles/kuryr/tasks/deploy.yml
+++ /dev/null
@@ -1,8 +0,0 @@
----
-- include: register.yml
-
-- include: config.yml
-
-- include: bootstrap.yml
-
-- include: start.yml
diff --git a/ansible/roles/kuryr/tasks/main.yml b/ansible/roles/kuryr/tasks/main.yml
deleted file mode 100644
index b017e8b4ad..0000000000
--- a/ansible/roles/kuryr/tasks/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-- include: "{{ action }}.yml"
diff --git a/ansible/roles/kuryr/tasks/precheck.yml b/ansible/roles/kuryr/tasks/precheck.yml
deleted file mode 100644
index ed97d539c0..0000000000
--- a/ansible/roles/kuryr/tasks/precheck.yml
+++ /dev/null
@@ -1 +0,0 @@
----
diff --git a/ansible/roles/kuryr/tasks/pull.yml b/ansible/roles/kuryr/tasks/pull.yml
deleted file mode 100644
index 44b24470e6..0000000000
--- a/ansible/roles/kuryr/tasks/pull.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-- name: Pulling kuryr image
- kolla_docker:
- action: "pull_image"
- common_options: "{{ docker_common_options }}"
- image: "{{ kuryr_image_full }}"
diff --git a/ansible/roles/kuryr/tasks/register.yml b/ansible/roles/kuryr/tasks/register.yml
deleted file mode 100644
index b8b232354d..0000000000
--- a/ansible/roles/kuryr/tasks/register.yml
+++ /dev/null
@@ -1,17 +0,0 @@
----
-- name: Creating the Kuryr project, user, and role
- command: docker exec -t kolla_toolbox /usr/bin/ansible localhost
- -m kolla_keystone_user
- -a "project=service
- user={{ kuryr_keystone_user }}
- password={{ kuryr_keystone_password }}
- role=admin
- region_name={{ openstack_region_name }}
- auth={{ '{{ openstack_kuryr_auth }}' }}"
- -e "{'openstack_kuryr_auth':{{ openstack_kuryr_auth }}}"
- register: kuryr_user
- changed_when: "{{ kuryr_user.stdout.find('localhost | SUCCESS => ') != -1 and (kuryr_user.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
- until: kuryr_user.stdout.split()[2] == 'SUCCESS'
- retries: 10
- delay: 5
- run_once: True
diff --git a/ansible/roles/kuryr/tasks/start.yml b/ansible/roles/kuryr/tasks/start.yml
deleted file mode 100644
index fe93383bf3..0000000000
--- a/ansible/roles/kuryr/tasks/start.yml
+++ /dev/null
@@ -1,19 +0,0 @@
----
-# NOTE(huikang, apuimedo): when you request a driver in a docker operation, such
-# as docker network create, docker searches /usr/lib/docker or /etc/docker
-# subdirs for network/storage plugin specs or json definitions. so it's either
-# have ansible place the file there, or volume mount it and let the container
-# place the file there
-- name: Starting kuryr container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- image: "{{ kuryr_image_full }}"
- name: "kuryr"
- privileged: True
- volumes:
- - "{{ node_config_directory }}/kuryr/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "/run:/run:shared"
- - "/usr/lib/docker:/usr/lib/docker"
- when: inventory_hostname in groups['compute']
diff --git a/ansible/roles/kuryr/tasks/upgrade.yml b/ansible/roles/kuryr/tasks/upgrade.yml
deleted file mode 100644
index 1f16915ad9..0000000000
--- a/ansible/roles/kuryr/tasks/upgrade.yml
+++ /dev/null
@@ -1,4 +0,0 @@
----
-- include: config.yml
-
-- include: start.yml
diff --git a/ansible/roles/kuryr/templates/kuryr.conf.j2 b/ansible/roles/kuryr/templates/kuryr.conf.j2
deleted file mode 100644
index 22b6199077..0000000000
--- a/ansible/roles/kuryr/templates/kuryr.conf.j2
+++ /dev/null
@@ -1,17 +0,0 @@
-[DEFAULT]
-kuryr_uri = http://{{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}:{{ kuryr_port }}
-debug = {{ kuryr_logging_debug }}
-
-[binding]
-
-[neutron]
-auth_uri = {{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_public_port }}
-auth_url = {{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_admin_port }}
-auth_type = password
-project_domain_name = Default
-project_name = service
-user_domain_name = Default
-project_domain_id = default
-user_domain_id = default
-password = {{ kuryr_keystone_password }}
-username = {{ kuryr_keystone_user }}
diff --git a/ansible/roles/kuryr/templates/kuryr.json.j2 b/ansible/roles/kuryr/templates/kuryr.json.j2
deleted file mode 100644
index 373c25a767..0000000000
--- a/ansible/roles/kuryr/templates/kuryr.json.j2
+++ /dev/null
@@ -1,17 +0,0 @@
-{
- "command": "kuryr-server --config-file /etc/kuryr/kuryr.conf",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/kuryr.conf",
- "dest": "/etc/kuryr/kuryr.conf",
- "owner": "root",
- "perm": "0600"
- },
- {
- "source": "{{ container_config_directory }}/kuryr.spec",
- "dest": "/usr/lib/docker/plugins/kuryr/kuryr.spec",
- "owner": "root",
- "perm": "0600"
- }
- ]
-}
diff --git a/ansible/roles/kuryr/templates/kuryr.spec.j2 b/ansible/roles/kuryr/templates/kuryr.spec.j2
deleted file mode 100644
index 8d513bfb7c..0000000000
--- a/ansible/roles/kuryr/templates/kuryr.spec.j2
+++ /dev/null
@@ -1 +0,0 @@
-http://{{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}:{{ kuryr_port }}
diff --git a/ansible/roles/magnum/defaults/main.yml b/ansible/roles/magnum/defaults/main.yml
deleted file mode 100644
index c44c254eb8..0000000000
--- a/ansible/roles/magnum/defaults/main.yml
+++ /dev/null
@@ -1,36 +0,0 @@
----
-project_name: "magnum"
-
-####################
-# Database
-####################
-magnum_database_name: "magnum"
-magnum_database_user: "magnum"
-magnum_database_address: "{{ kolla_internal_fqdn }}:{{ database_port }}"
-
-
-####################
-# Docker
-####################
-magnum_api_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-magnum-api"
-magnum_api_tag: "{{ openstack_release }}"
-magnum_api_image_full: "{{ magnum_api_image }}:{{ magnum_api_tag }}"
-magnum_conductor_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-magnum-conductor"
-magnum_conductor_tag: "{{ openstack_release }}"
-magnum_conductor_image_full: "{{ magnum_conductor_image }}:{{ magnum_conductor_tag }}"
-
-
-####################
-# OpenStack
-####################
-magnum_admin_endpoint: "{{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ magnum_api_port }}/v1"
-magnum_internal_endpoint: "{{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ magnum_api_port }}/v1"
-magnum_public_endpoint: "{{ public_protocol }}://{{ kolla_external_fqdn }}:{{ magnum_api_port }}/v1"
-
-magnum_logging_debug: "{{ openstack_logging_debug }}"
-
-magnum_keystone_user: "magnum"
-magnum_trustee_domain_admin: "magnum_trustee_domain_admin"
-magnum_trustee_domain: "magnum"
-
-openstack_magnum_auth: "{'auth_url':'{{ openstack_auth.auth_url }}','username':'{{ openstack_auth.username }}','password':'{{ openstack_auth.password }}','project_name':'{{ openstack_auth.project_name }}'}"
diff --git a/ansible/roles/magnum/meta/main.yml b/ansible/roles/magnum/meta/main.yml
deleted file mode 100644
index 6b4fff8fef..0000000000
--- a/ansible/roles/magnum/meta/main.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-dependencies:
- - { role: common }
diff --git a/ansible/roles/magnum/tasks/bootstrap.yml b/ansible/roles/magnum/tasks/bootstrap.yml
deleted file mode 100644
index af9d640fbe..0000000000
--- a/ansible/roles/magnum/tasks/bootstrap.yml
+++ /dev/null
@@ -1,41 +0,0 @@
----
-- name: Creating Magnum database
- command: docker exec -t kolla_toolbox /usr/bin/ansible localhost
- -m mysql_db
- -a "login_host='{{ database_address }}'
- login_port='{{ database_port }}'
- login_user='{{ database_user }}'
- login_password='{{ database_password }}'
- name='{{ magnum_database_name }}'"
- register: database
- changed_when: "{{ database.stdout.find('localhost | SUCCESS => ') != -1 and
- (database.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
- failed_when: database.stdout.split()[2] != 'SUCCESS'
- run_once: True
- delegate_to: "{{ groups['magnum-api'][0] }}"
-
-- name: Reading json from variable
- set_fact:
- database_created: "{{ (database.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
-
-- name: Creating Magnum database user and setting permissions
- command: docker exec -t kolla_toolbox /usr/bin/ansible localhost
- -m mysql_user
- -a "login_host='{{ database_address }}'
- login_port='{{ database_port }}'
- login_user='{{ database_user }}'
- login_password='{{ database_password }}'
- name='{{ magnum_database_name }}'
- password='{{ magnum_database_password }}'
- host='%'
- priv='{{ magnum_database_name }}.*:ALL'
- append_privs='yes'"
- register: database_user_create
- changed_when: "{{ database_user_create.stdout.find('localhost | SUCCESS => ') != -1 and
- (database_user_create.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
- failed_when: database_user_create.stdout.split()[2] != 'SUCCESS'
- run_once: True
- delegate_to: "{{ groups['magnum-api'][0] }}"
-
-- include: bootstrap_service.yml
- when: database_created
diff --git a/ansible/roles/magnum/tasks/bootstrap_service.yml b/ansible/roles/magnum/tasks/bootstrap_service.yml
deleted file mode 100644
index 2f5e2b5031..0000000000
--- a/ansible/roles/magnum/tasks/bootstrap_service.yml
+++ /dev/null
@@ -1,20 +0,0 @@
----
-- name: Running Magnum bootstrap container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- detach: False
- environment:
- KOLLA_BOOTSTRAP:
- KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}"
- image: "{{ magnum_api_image_full }}"
- labels:
- BOOTSTRAP:
- name: "bootstrap_magnum"
- restart_policy: "never"
- volumes:
- - "{{ node_config_directory }}/magnum-api/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "kolla_logs:/var/log/kolla"
- run_once: True
- delegate_to: "{{ groups['magnum-api'][0] }}"
diff --git a/ansible/roles/magnum/tasks/config.yml b/ansible/roles/magnum/tasks/config.yml
deleted file mode 100644
index 2fa679bc61..0000000000
--- a/ansible/roles/magnum/tasks/config.yml
+++ /dev/null
@@ -1,48 +0,0 @@
----
-- name: Ensuring config directories exist
- file:
- path: "{{ node_config_directory }}/{{ item }}"
- state: "directory"
- recurse: yes
- with_items:
- - "magnum-api"
- - "magnum-conductor"
-
-- name: Copying over config.json files for services
- template:
- src: "{{ item }}.json.j2"
- dest: "{{ node_config_directory }}/{{ item }}/config.json"
- with_items:
- - "magnum-api"
- - "magnum-conductor"
-
-- name: Copying over magnum.conf
- merge_configs:
- vars:
- service_name: "magnum-api"
- sources:
- - "{{ role_path }}/templates/magnum.conf.j2"
- - "{{ node_custom_config }}/global.conf"
- - "{{ node_custom_config }}/database.conf"
- - "{{ node_custom_config }}/messaging.conf"
- - "{{ node_custom_config }}/magnum.conf"
- - "{{ node_custom_config }}/magnum/{{ item }}.conf"
- - "{{ node_custom_config }}/magnum/{{ inventory_hostname }}/magnum.conf"
- dest: "{{ node_config_directory }}/{{ item }}/magnum.conf"
- with_items:
- - "magnum-api"
- - "magnum-conductor"
-
-- name: Check if policies shall be overwritten
- local_action: stat path="{{ node_custom_config }}/magnum/policy.json"
- register: magnum_policy
-
-- name: Copying over existing policy.json
- template:
- src: "{{ node_custom_config }}/magnum/policy.json"
- dest: "{{ node_config_directory }}/{{ item }}/policy.json"
- with_items:
- - "magnum-api"
- - "magnum-conductor"
- when:
- magnum_policy.stat.exists
diff --git a/ansible/roles/magnum/tasks/deploy.yml b/ansible/roles/magnum/tasks/deploy.yml
deleted file mode 100644
index 50e56814ac..0000000000
--- a/ansible/roles/magnum/tasks/deploy.yml
+++ /dev/null
@@ -1,14 +0,0 @@
----
-- include: register.yml
- when: inventory_hostname in groups['magnum-api']
-
-- include: config.yml
- when: inventory_hostname in groups['magnum-api'] or
- inventory_hostname in groups['magnum-conductor']
-
-- include: bootstrap.yml
- when: inventory_hostname in groups['magnum-api']
-
-- include: start.yml
- when: inventory_hostname in groups['magnum-api'] or
- inventory_hostname in groups['magnum-conductor']
diff --git a/ansible/roles/magnum/tasks/main.yml b/ansible/roles/magnum/tasks/main.yml
deleted file mode 100644
index b017e8b4ad..0000000000
--- a/ansible/roles/magnum/tasks/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-- include: "{{ action }}.yml"
diff --git a/ansible/roles/magnum/tasks/precheck.yml b/ansible/roles/magnum/tasks/precheck.yml
deleted file mode 100644
index ed97d539c0..0000000000
--- a/ansible/roles/magnum/tasks/precheck.yml
+++ /dev/null
@@ -1 +0,0 @@
----
diff --git a/ansible/roles/magnum/tasks/pull.yml b/ansible/roles/magnum/tasks/pull.yml
deleted file mode 100644
index 3825c7520a..0000000000
--- a/ansible/roles/magnum/tasks/pull.yml
+++ /dev/null
@@ -1,14 +0,0 @@
----
-- name: Pulling magnum-api image
- kolla_docker:
- action: "pull_image"
- common_options: "{{ docker_common_options }}"
- image: "{{ magnum_api_image_full }}"
- when: inventory_hostname in groups['magnum-api']
-
-- name: Pulling magnum-conductor image
- kolla_docker:
- action: "pull_image"
- common_options: "{{ docker_common_options }}"
- image: "{{ magnum_conductor_image_full }}"
- when: inventory_hostname in groups['magnum-conductor']
diff --git a/ansible/roles/magnum/tasks/reconfigure.yml b/ansible/roles/magnum/tasks/reconfigure.yml
deleted file mode 100644
index 9598ffb89d..0000000000
--- a/ansible/roles/magnum/tasks/reconfigure.yml
+++ /dev/null
@@ -1,69 +0,0 @@
----
-- name: Ensuring the containers up
- kolla_docker:
- name: "{{ item.name }}"
- action: "get_container_state"
- register: container_state
- failed_when: container_state.Running == false
- when: inventory_hostname in groups[item.group]
- with_items:
- - { name: magnum_api, group: magnum-api }
- - { name: magnum_conductor, group: magnum-conductor }
-
-- include: config.yml
-
-- name: Check the configs
- command: docker exec {{ item.name }} /usr/local/bin/kolla_set_configs --check
- changed_when: false
- failed_when: false
- register: check_results
- when: inventory_hostname in groups[item.group]
- with_items:
- - { name: magnum_api, group: magnum-api }
- - { name: magnum_conductor, group: magnum-conductor }
-
-# NOTE(jeffrey4l): when config_strategy == 'COPY_ALWAYS'
-# and container env['KOLLA_CONFIG_STRATEGY'] == 'COPY_ONCE',
-# just remove the container and start again
-- name: Containers config strategy
- kolla_docker:
- name: "{{ item.name }}"
- action: "get_container_env"
- register: container_envs
- when: inventory_hostname in groups[item.group]
- with_items:
- - { name: magnum_api, group: magnum-api }
- - { name: magnum_conductor, group: magnum-conductor }
-
-- name: Remove the containers
- kolla_docker:
- name: "{{ item[0]['name'] }}"
- action: "remove_container"
- register: remove_containers
- when:
- - inventory_hostname in groups[item[0]['group']]
- - config_strategy == "COPY_ONCE" or item[1]['KOLLA_CONFIG_STRATEGY'] == 'COPY_ONCE'
- - item[2]['rc'] == 1
- with_together:
- - [{ name: magnum_api, group: magnum-api },
- { name: magnum_conductor, group: magnum-conductor }]
- - "{{ container_envs.results }}"
- - "{{ check_results.results }}"
-
-- include: start.yml
- when: remove_containers.changed
-
-- name: Restart containers
- kolla_docker:
- name: "{{ item[0]['name'] }}"
- action: "restart_container"
- when:
- - inventory_hostname in groups[item[0]['group']]
- - config_strategy == 'COPY_ALWAYS'
- - item[1]['KOLLA_CONFIG_STRATEGY'] != 'COPY_ONCE'
- - item[2]['rc'] == 1
- with_together:
- - [{ name: magnum_api, group: magnum-api },
- { name: magnum_conductor, group: magnum-conductor }]
- - "{{ container_envs.results }}"
- - "{{ check_results.results }}"
diff --git a/ansible/roles/magnum/tasks/register.yml b/ansible/roles/magnum/tasks/register.yml
deleted file mode 100644
index 31ac4a0851..0000000000
--- a/ansible/roles/magnum/tasks/register.yml
+++ /dev/null
@@ -1,88 +0,0 @@
----
-- name: Creating the Magnum service and endpoint
- command: docker exec -t kolla_toolbox /usr/bin/ansible localhost
- -m kolla_keystone_service
- -a "service_name=magnum
- service_type=container-infra
- description='Openstack Container Service'
- endpoint_region={{ openstack_region_name }}
- url='{{ item.url }}'
- interface='{{ item.interface }}'
- region_name={{ openstack_region_name }}
- auth={{ '{{ openstack_magnum_auth }}' }}"
- -e "{'openstack_magnum_auth':{{ openstack_magnum_auth }}}"
- register: magnum_endpoint
- changed_when: "{{ magnum_endpoint.stdout.find('localhost | SUCCESS => ') != -1 and (magnum_endpoint.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
- until: magnum_endpoint.stdout.split()[2] == 'SUCCESS'
- retries: 10
- delay: 5
- run_once: True
- with_items:
- - {'interface': 'admin', 'url': '{{ magnum_admin_endpoint }}'}
- - {'interface': 'internal', 'url': '{{ magnum_internal_endpoint }}'}
- - {'interface': 'public', 'url': '{{ magnum_public_endpoint }}'}
-
-- name: Creating the Magnum project, user, and role
- command: docker exec -t kolla_toolbox /usr/bin/ansible localhost
- -m kolla_keystone_user
- -a "project=service
- user=magnum
- password={{ magnum_keystone_password }}
- role=admin
- region_name={{ openstack_region_name }}
- auth={{ '{{ openstack_magnum_auth }}' }}"
- -e "{'openstack_magnum_auth':{{ openstack_magnum_auth }}}"
- register: magnum_user
- changed_when: "{{ magnum_user.stdout.find('localhost | SUCCESS => ') != -1 and (magnum_user.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
- until: magnum_user.stdout.split()[2] == 'SUCCESS'
- retries: 10
- delay: 5
- run_once: True
-
-- name: Creating Magnum trustee domain
- command: docker exec -t kolla_toolbox /usr/bin/ansible localhost
- -m os_keystone_domain
- -a "name={{ magnum_trustee_domain }}
- description='Owns users and projects created by magnum'
- auth={{ '{{ openstack_magnum_auth }}' }}"
- -e "{'openstack_magnum_auth':{{ openstack_magnum_auth }}}"
- register: trustee_domain
- changed_when: "{{ trustee_domain.stdout.find('localhost | SUCCESS => ') != -1 and (trustee_domain.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
- until: trustee_domain.stdout.split()[2] == 'SUCCESS'
- retries: 10
- delay: 5
- run_once: True
-
-- name: Setting Magnum trustee domain value
- set_fact:
- magnum_trustee_domain_id: "{{ (trustee_domain.stdout.split('localhost | SUCCESS => ')[1]|from_json).id }}"
-
-- name: Creating Magnum trustee user
- command: docker exec -t kolla_toolbox /usr/bin/ansible localhost
- -m os_user
- -a "name={{ magnum_trustee_domain_admin }}
- domain={{ magnum_trustee_domain }}
- password={{ magnum_keystone_password }}
- auth={{ '{{ openstack_magnum_auth }}' }}"
- -e "{'openstack_magnum_auth':{{ openstack_magnum_auth }}}"
- register: trustee_user
- changed_when: "{{ trustee_user.stdout.find('localhost | SUCCESS => ') != -1 and (trustee_user.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
- until: trustee_user.stdout.split()[2] == 'SUCCESS'
- retries: 10
- delay: 5
- run_once: True
-
-- name: Creating Magnum trustee user role
- command: docker exec -t kolla_toolbox /usr/bin/ansible localhost
- -m os_user_role
- -a "domain={{ magnum_trustee_domain_id }}
- user={{ magnum_trustee_domain_admin }}
- role=admin
- auth={{ '{{ openstack_magnum_auth }}' }}"
- -e "{'openstack_magnum_auth':{{ openstack_magnum_auth }}}"
- register: magnum_user_role_result
- changed_when: "{{ magnum_user_role_result.stdout.find('localhost | SUCCESS => ') != -1 and (magnum_user_role_result.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
- until: magnum_user_role_result.stdout.split()[2] == 'SUCCESS'
- retries: 10
- delay: 5
- run_once: True
diff --git a/ansible/roles/magnum/tasks/start.yml b/ansible/roles/magnum/tasks/start.yml
deleted file mode 100644
index b43839484d..0000000000
--- a/ansible/roles/magnum/tasks/start.yml
+++ /dev/null
@@ -1,25 +0,0 @@
----
-- name: Starting magnum-api container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- image: "{{ magnum_api_image_full }}"
- name: "magnum_api"
- volumes:
- - "{{ node_config_directory }}/magnum-api/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "kolla_logs:/var/log/kolla/"
- when: inventory_hostname in groups['magnum-api']
-
-- name: Starting magnum-conductor container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- image: "{{ magnum_conductor_image_full }}"
- name: "magnum_conductor"
- volumes:
- - "{{ node_config_directory }}/magnum-conductor/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "magnum:/var/lib/magnum/"
- - "kolla_logs:/var/log/kolla/"
- when: inventory_hostname in groups['magnum-conductor']
diff --git a/ansible/roles/magnum/tasks/upgrade.yml b/ansible/roles/magnum/tasks/upgrade.yml
deleted file mode 100644
index 308053080c..0000000000
--- a/ansible/roles/magnum/tasks/upgrade.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-- include: config.yml
-
-- include: bootstrap_service.yml
-
-- include: start.yml
diff --git a/ansible/roles/magnum/templates/magnum-api.json.j2 b/ansible/roles/magnum/templates/magnum-api.json.j2
deleted file mode 100644
index b79de70053..0000000000
--- a/ansible/roles/magnum/templates/magnum-api.json.j2
+++ /dev/null
@@ -1,18 +0,0 @@
-{
- "command": "magnum-api --config-file /etc/magnum/magnum.conf",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/magnum.conf",
- "dest": "/etc/magnum/magnum.conf",
- "owner": "magnum",
- "perm": "0600"
- }
- ],
- "permissions": [
- {
- "path": "/var/log/kolla/magnum",
- "owner": "magnum:magnum",
- "recurse": true
- }
- ]
-}
diff --git a/ansible/roles/magnum/templates/magnum-conductor.json.j2 b/ansible/roles/magnum/templates/magnum-conductor.json.j2
deleted file mode 100644
index 40a6fa2657..0000000000
--- a/ansible/roles/magnum/templates/magnum-conductor.json.j2
+++ /dev/null
@@ -1,23 +0,0 @@
-{
- "command": "magnum-conductor --config-file /etc/magnum/magnum.conf",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/magnum.conf",
- "dest": "/etc/magnum/magnum.conf",
- "owner": "magnum",
- "perm": "0600"
- }
- ],
- "permissions": [
- {
- "path": "/var/lib/magnum",
- "owner": "magnum:magnum",
- "recurse": true
- },
- {
- "path": "/var/log/kolla/magnum",
- "owner": "magnum:magnum",
- "recurse": true
- }
- ]
-}
diff --git a/ansible/roles/magnum/templates/magnum.conf.j2 b/ansible/roles/magnum/templates/magnum.conf.j2
deleted file mode 100644
index e567d32396..0000000000
--- a/ansible/roles/magnum/templates/magnum.conf.j2
+++ /dev/null
@@ -1,67 +0,0 @@
-[DEFAULT]
-debug = {{ magnum_logging_debug }}
-state_path = /var/lib/magnum
-log_dir = /var/log/kolla/magnum
-
-transport_url = rabbit://{% for host in groups['rabbitmq'] %}{{ rabbitmq_user }}:{{ rabbitmq_password }}@{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ rabbitmq_port }}{% if not loop.last %},{% endif %}{% endfor %}
-
-{% if service_name == 'magnum-api' %}
-[api]
-port = {{ magnum_api_port }}
-host = {{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}
-{% endif %}
-
-[oslo_policy]
-policy_file = /etc/magnum/policy.json
-
-[database]
-connection = mysql+pymysql://{{ magnum_database_user }}:{{ magnum_database_password }}@{{ magnum_database_address}}/{{ magnum_database_name }}
-max_retries = -1
-
-[heat_client]
-region_name = {{ openstack_region_name }}
-
-[cinder_client]
-region_name = {{ openstack_region_name }}
-
-[barbican_client]
-region_name = {{ openstack_region_name }}
-
-[keystone_auth]
-auth_url = {{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_public_port }}/v3
-user_domain_name = Default
-project_domain_name = Default
-project_name = service
-password = {{ magnum_keystone_password }}
-username = {{ magnum_keystone_user }}
-auth_type = password
-
-[keystone_authtoken]
-auth_version = v3
-auth_uri = {{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_public_port }}/v3
-auth_url = {{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_admin_port }}
-auth_type = password
-project_domain_name = Default
-user_domain_name = Default
-project_name = service
-username = {{ magnum_keystone_user }}
-password = {{ magnum_keystone_password }}
-
-memcache_security_strategy = ENCRYPT
-memcache_secret_key = {{ memcache_secret_key }}
-memcached_servers = {% for host in groups['memcached'] %}{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}
-
-[trust]
-trustee_domain_admin_password = {{ magnum_keystone_password }}
-trustee_domain_admin_name = {{ magnum_trustee_domain_admin }}
-trustee_domain_name = {{ magnum_trustee_domain }}
-
-[oslo_concurrency]
-lock_path = /var/lib/magnum/tmp
-
-[certificates]
-{% if enable_barbican | bool %}
-cert_manager_type = barbican
-{% else %}
-cert_manager_type = x509keypair
-{% endif %}
diff --git a/ansible/roles/manila/defaults/main.yml b/ansible/roles/manila/defaults/main.yml
deleted file mode 100644
index 2ba9665953..0000000000
--- a/ansible/roles/manila/defaults/main.yml
+++ /dev/null
@@ -1,60 +0,0 @@
----
-project_name: "manila"
-
-####################
-## Database
-#####################
-manila_database_name: "manila"
-manila_database_user: "manila"
-manila_database_address: "{{ kolla_internal_fqdn }}:{{ database_port }}"
-
-
-#####################
-## Docker
-#####################
-manila_share_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-manila-share"
-manila_share_tag: "{{ openstack_release }}"
-manila_share_image_full: "{{ manila_share_image }}:{{ manila_share_tag }}"
-
-manila_scheduler_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-manila-scheduler"
-manila_scheduler_tag: "{{ openstack_release }}"
-manila_scheduler_image_full: "{{ manila_scheduler_image }}:{{ manila_scheduler_tag }}"
-
-manila_api_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-manila-api"
-manila_api_tag: "{{ openstack_release }}"
-manila_api_image_full: "{{ manila_api_image }}:{{ manila_api_tag }}"
-
-manila_data_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-manila-data"
-manila_data_tag: "{{ openstack_release }}"
-manila_data_image_full: "{{ manila_data_image }}:{{ manila_data_tag }}"
-
-#####################
-## OpenStack
-#####################
-manila_admin_endpoint: "{{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ manila_api_port }}/v1/%(tenant_id)s"
-manila_internal_endpoint: "{{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ manila_api_port }}/v1/%(tenant_id)s"
-manila_public_endpoint: "{{ public_protocol }}://{{ kolla_external_fqdn }}:{{ manila_api_port }}/v1/%(tenant_id)s"
-manila_v2_admin_endpoint: "{{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ manila_api_port }}/v2/%(tenant_id)s"
-manila_v2_internal_endpoint: "{{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ manila_api_port }}/v2/%(tenant_id)s"
-manila_v2_public_endpoint: "{{ public_protocol }}://{{ kolla_external_fqdn }}:{{ manila_api_port }}/v2/%(tenant_id)s"
-
-manila_logging_debug: "{{ openstack_logging_debug }}"
-
-manila_keystone_user: "manila"
-manila_service_instance_user: "manila"
-manila_service_instance_password: "manila"
-
-openstack_manila_auth: "{'auth_url':'{{ openstack_auth.auth_url }}','username':'{{ openstack_auth.username }}','password':'{{ openstack_auth.password }}','project_name':'{{ openstack_auth.project_name }}'}"
-
-####################
-# Manila
-####################
-manila_backends:
- - name: "generic"
- driver: "generic"
- enabled: "{{ enable_manila_backend_generic | bool }}"
- - name: "hnas1"
- driver: "hnas"
- enabled: "{{ enable_manila_backend_hnas | bool }}"
-
-manila_enabled_backends: "{{ manila_backends|selectattr('enabled', 'equalto', true)|list }}"
diff --git a/ansible/roles/manila/meta/main.yml b/ansible/roles/manila/meta/main.yml
deleted file mode 100644
index 6b4fff8fef..0000000000
--- a/ansible/roles/manila/meta/main.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-dependencies:
- - { role: common }
diff --git a/ansible/roles/manila/tasks/bootstrap.yml b/ansible/roles/manila/tasks/bootstrap.yml
deleted file mode 100644
index 0d51963b0e..0000000000
--- a/ansible/roles/manila/tasks/bootstrap.yml
+++ /dev/null
@@ -1,41 +0,0 @@
----
-- name: Creating Manila database
- command: docker exec -t kolla_toolbox /usr/bin/ansible localhost
- -m mysql_db
- -a "login_host='{{ database_address }}'
- login_port='{{ database_port }}'
- login_user='{{ database_user }}'
- login_password='{{ database_password }}'
- name='{{ manila_database_name }}'"
- register: database
- changed_when: "{{ database.stdout.find('localhost | SUCCESS => ') != -1 and
- (database.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
- failed_when: database.stdout.split()[2] != 'SUCCESS'
- run_once: True
- delegate_to: "{{ groups['manila-api'][0] }}"
-
-- name: Reading json from variable
- set_fact:
- database_created: "{{ (database.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
-
-- name: Creating Manila database user and setting permissions
- command: docker exec -t kolla_toolbox /usr/bin/ansible localhost
- -m mysql_user
- -a "login_host='{{ database_address }}'
- login_port='{{ database_port }}'
- login_user='{{ database_user }}'
- login_password='{{ database_password }}'
- name='{{ manila_database_name }}'
- password='{{ manila_database_password }}'
- host='%'
- priv='{{ manila_database_name }}.*:ALL'
- append_privs='yes'"
- register: database_user_create
- changed_when: "{{ database_user_create.stdout.find('localhost | SUCCESS => ') != -1 and
- (database_user_create.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
- failed_when: database_user_create.stdout.split()[2] != 'SUCCESS'
- run_once: True
- delegate_to: "{{ groups['manila-api'][0] }}"
-
-- include: bootstrap_service.yml
- when: database_created
diff --git a/ansible/roles/manila/tasks/bootstrap_service.yml b/ansible/roles/manila/tasks/bootstrap_service.yml
deleted file mode 100644
index 501e6062b4..0000000000
--- a/ansible/roles/manila/tasks/bootstrap_service.yml
+++ /dev/null
@@ -1,20 +0,0 @@
----
-- name: Running Manila bootstrap container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- detach: False
- environment:
- KOLLA_BOOTSTRAP:
- KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}"
- image: "{{ manila_api_image_full }}"
- labels:
- BOOTSTRAP:
- name: "bootstrap_manila"
- restart_policy: "never"
- volumes:
- - "{{ node_config_directory }}/manila-api/:{{ container_config_directory }}/:ro"
- - "kolla_logs:/var/log/kolla/"
- - "/etc/localtime:/etc/localtime:ro"
- run_once: True
- delegate_to: "{{ groups['manila-api'][0] }}"
diff --git a/ansible/roles/manila/tasks/config.yml b/ansible/roles/manila/tasks/config.yml
deleted file mode 100644
index c975cdd75e..0000000000
--- a/ansible/roles/manila/tasks/config.yml
+++ /dev/null
@@ -1,73 +0,0 @@
----
-- name: Ensuring config directories exist
- file:
- path: "{{ node_config_directory }}/{{ item }}"
- state: "directory"
- recurse: yes
- with_items:
- - "manila-api"
- - "manila-data"
- - "manila-scheduler"
- - "manila-share"
-
-- name: Copying over config.json files for services
- template:
- src: "{{ item }}.json.j2"
- dest: "{{ node_config_directory }}/{{ item }}/config.json"
- with_items:
- - "manila-api"
- - "manila-data"
- - "manila-scheduler"
- - "manila-share"
-
-- name: Copying over manila.conf
- merge_configs:
- vars:
- service_name: "{{ item }}"
- sources:
- - "{{ role_path }}/templates/manila.conf.j2"
- - "{{ node_custom_config }}/global.conf"
- - "{{ node_custom_config }}/database.conf"
- - "{{ node_custom_config }}/messaging.conf"
- - "{{ node_custom_config }}/manila.conf"
- - "{{ node_custom_config }}/manila/{{ item }}.conf"
- - "{{ node_custom_config }}/manila/{{ inventory_hostname }}/manila.conf"
- dest: "{{ node_config_directory }}/{{ item }}/manila.conf"
- with_items:
- - "manila-api"
- - "manila-data"
- - "manila-scheduler"
-
-- name: Copying over manila-share.conf
- merge_configs:
- vars:
- service_name: "{{ item }}"
- sources:
- - "{{ role_path }}/templates/manila.conf.j2"
- - "{{ role_path }}/templates/manila-share.conf.j2"
- - "{{ node_custom_config }}/global.conf"
- - "{{ node_custom_config }}/database.conf"
- - "{{ node_custom_config }}/messaging.conf"
- - "{{ node_custom_config }}/manila.conf"
- - "{{ node_custom_config }}/{{ item }}.conf"
- - "{{ node_custom_config }}/manila/{{ item }}.conf"
- - "{{ node_custom_config }}/manila/{{ inventory_hostname }}/manila.conf"
- dest: "{{ node_config_directory }}/{{ item }}/manila.conf"
- with_items:
- - "manila-share"
-
-- name: Check if policies shall be overwritten
- local_action: stat path="{{ node_custom_config }}/manila/policy.json"
- register: manila_policy
-
-- name: Copying over existing policy.json
- template:
- src: "{{ node_custom_config }}/manila/policy.json"
- dest: "{{ node_config_directory }}/{{ item }}/policy.json"
- with_items:
- - "manila-api"
- - "manila-data"
- - "manila-scheduler"
- - "manila-share"
- when:
- manila_policy.stat.exists
diff --git a/ansible/roles/manila/tasks/deploy.yml b/ansible/roles/manila/tasks/deploy.yml
deleted file mode 100644
index 37135a4093..0000000000
--- a/ansible/roles/manila/tasks/deploy.yml
+++ /dev/null
@@ -1,18 +0,0 @@
----
-- include: register.yml
- when: inventory_hostname in groups['manila-api']
-
-- include: config.yml
- when: inventory_hostname in groups['manila-api'] or
- inventory_hostname in groups['manila-data'] or
- inventory_hostname in groups['manila-share'] or
- inventory_hostname in groups['manila-scheduler']
-
-- include: bootstrap.yml
- when: inventory_hostname in groups['manila-api']
-
-- include: start.yml
- when: inventory_hostname in groups['manila-api'] or
- inventory_hostname in groups['manila-data'] or
- inventory_hostname in groups['manila-share'] or
- inventory_hostname in groups['manila-scheduler']
diff --git a/ansible/roles/manila/tasks/main.yml b/ansible/roles/manila/tasks/main.yml
deleted file mode 100644
index b017e8b4ad..0000000000
--- a/ansible/roles/manila/tasks/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-- include: "{{ action }}.yml"
diff --git a/ansible/roles/manila/tasks/precheck.yml b/ansible/roles/manila/tasks/precheck.yml
deleted file mode 100644
index ed97d539c0..0000000000
--- a/ansible/roles/manila/tasks/precheck.yml
+++ /dev/null
@@ -1 +0,0 @@
----
diff --git a/ansible/roles/manila/tasks/pull.yml b/ansible/roles/manila/tasks/pull.yml
deleted file mode 100644
index 520e29c088..0000000000
--- a/ansible/roles/manila/tasks/pull.yml
+++ /dev/null
@@ -1,28 +0,0 @@
----
-- name: Pulling manila-api image
- kolla_docker:
- action: "pull_image"
- common_options: "{{ docker_common_options }}"
- image: "{{ manila_api_image_full }}"
- when: inventory_hostname in groups['manila-api']
-
-- name: Pulling manila-scheduler image
- kolla_docker:
- action: "pull_image"
- common_options: "{{ docker_common_options }}"
- image: "{{ manila_scheduler_image_full }}"
- when: inventory_hostname in groups['manila-scheduler']
-
-- name: Pulling manila-share image
- kolla_docker:
- action: "pull_image"
- common_options: "{{ docker_common_options }}"
- image: "{{ manila_share_image_full }}"
- when: inventory_hostname in groups['manila-share']
-
-- name: Pulling manila-data image
- kolla_docker:
- action: "pull_image"
- common_options: "{{ docker_common_options }}"
- image: "{{ manila_data_image_full }}"
- when: inventory_hostname in groups['manila-data']
diff --git a/ansible/roles/manila/tasks/reconfigure.yml b/ansible/roles/manila/tasks/reconfigure.yml
deleted file mode 100644
index 90ab181044..0000000000
--- a/ansible/roles/manila/tasks/reconfigure.yml
+++ /dev/null
@@ -1,79 +0,0 @@
----
-- name: Ensuring the containers up
- kolla_docker:
- name: "{{ item.name }}"
- action: "get_container_state"
- register: container_state
- failed_when: container_state.Running == false
- when: inventory_hostname in groups[item.group]
- with_items:
- - { name: manila_api, group: manila-api }
- - { name: manila_data, group: manila-data }
- - { name: manila_scheduler, group: manila-scheduler }
- - { name: manila_share, group: manila-share }
-
-- include: config.yml
-
-- name: Check the configs
- command: docker exec {{ item.name }} /usr/local/bin/kolla_set_configs --check
- changed_when: false
- failed_when: false
- register: check_results
- when: inventory_hostname in groups[item.group]
- with_items:
- - { name: manila_api, group: manila-api }
- - { name: manila_data, group: manila-data }
- - { name: manila_scheduler, group: manila-scheduler }
- - { name: manila_share, group: manila-share }
-
-# NOTE(jeffrey4l): when config_strategy == 'COPY_ALWAYS'
-# and container env['KOLLA_CONFIG_STRATEGY'] == 'COPY_ONCE',
-# just remove the container and start again
-- name: Containers config strategy
- kolla_docker:
- name: "{{ item.name }}"
- action: "get_container_env"
- register: container_envs
- when: inventory_hostname in groups[item.group]
- with_items:
- - { name: manila_api, group: manila-api }
- - { name: manila_data, group: manila-data }
- - { name: manila_scheduler, group: manila-scheduler }
- - { name: manila_share, group: manila-share }
-
-- name: Remove the containers
- kolla_docker:
- name: "{{ item[0]['name'] }}"
- action: "remove_container"
- register: remove_containers
- when:
- - inventory_hostname in groups[item[0]['group']]
- - config_strategy == "COPY_ONCE" or item[1]['KOLLA_CONFIG_STRATEGY'] == 'COPY_ONCE'
- - item[2]['rc'] == 1
- with_together:
- - [{ name: manila_api, group: manila-api },
- { name: manila_data, group: manila-data },
- { name: manila_scheduler, group: manila-scheduler },
- { name: manila_share, group: manila-share }]
- - "{{ container_envs.results }}"
- - "{{ check_results.results }}"
-
-- include: start.yml
- when: remove_containers.changed
-
-- name: Restart containers
- kolla_docker:
- name: "{{ item[0]['name'] }}"
- action: "restart_container"
- when:
- - inventory_hostname in groups[item[0]['group']]
- - config_strategy == 'COPY_ALWAYS'
- - item[1]['KOLLA_CONFIG_STRATEGY'] != 'COPY_ONCE'
- - item[2]['rc'] == 1
- with_together:
- - [{ name: manila_api, group: manila-api },
- { name: manila_data, group: manila-data },
- { name: manila_scheduler, group: manila-scheduler },
- { name: manila_share, group: manila-share }]
- - "{{ container_envs.results }}"
- - "{{ check_results.results }}"
diff --git a/ansible/roles/manila/tasks/register.yml b/ansible/roles/manila/tasks/register.yml
deleted file mode 100644
index a804b32158..0000000000
--- a/ansible/roles/manila/tasks/register.yml
+++ /dev/null
@@ -1,43 +0,0 @@
----
-- name: Creating the Manila service and endpoint
- command: docker exec -t kolla_toolbox /usr/bin/ansible localhost
- -m kolla_keystone_service
- -a "service_name={{ item.service_name }}
- service_type={{ item.service_type }}
- description='Openstack Shared Filesystems'
- endpoint_region={{ openstack_region_name }}
- url='{{ item.url }}'
- interface='{{ item.interface }}'
- region_name={{ openstack_region_name }}
- auth={{ '{{ openstack_manila_auth }}' }}"
- -e "{'openstack_manila_auth':{{ openstack_manila_auth }}}"
- register: manila_endpoint
- changed_when: "{{ manila_endpoint.stdout.find('localhost | SUCCESS => ') != -1 and (manila_endpoint.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
- until: manila_endpoint.stdout.split()[2] == 'SUCCESS'
- retries: 10
- delay: 5
- run_once: True
- with_items:
- - {'interface': 'admin', 'url': '{{ manila_admin_endpoint }}', 'service_name': 'manila', 'service_type': 'share'}
- - {'interface': 'internal', 'url': '{{ manila_internal_endpoint }}', 'service_name': 'manila', 'service_type': 'share'}
- - {'interface': 'public', 'url': '{{ manila_public_endpoint }}', 'service_name': 'manila', 'service_type': 'share'}
- - {'interface': 'admin', 'url': '{{ manila_v2_admin_endpoint }}', 'service_name': 'manilav2', 'service_type': 'sharev2'}
- - {'interface': 'internal', 'url': '{{ manila_v2_internal_endpoint }}', 'service_name': 'manilav2', 'service_type': 'sharev2'}
- - {'interface': 'public', 'url': '{{ manila_v2_public_endpoint }}', 'service_name': 'manilav2', 'service_type': 'sharev2'}
-
-- name: Creating the Manila project, user and role
- command: docker exec -t kolla_toolbox /usr/bin/ansible localhost
- -m kolla_keystone_user
- -a "project=service
- user=manila
- password={{ manila_keystone_password }}
- role=admin
- region_name={{ openstack_region_name }}
- auth={{ '{{ openstack_manila_auth }}' }}"
- -e "{'openstack_manila_auth':{{ openstack_manila_auth }}}"
- register: manila_user
- changed_when: "{{ manila_user.stdout.find('localhost | SUCCESS => ') != -1 and (manila_user.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
- until: manila_user.stdout.split()[2] == 'SUCCESS'
- retries: 10
- delay: 5
- run_once: True
diff --git a/ansible/roles/manila/tasks/start.yml b/ansible/roles/manila/tasks/start.yml
deleted file mode 100644
index 103b9f506a..0000000000
--- a/ansible/roles/manila/tasks/start.yml
+++ /dev/null
@@ -1,52 +0,0 @@
----
-- name: Starting manila-api container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- name: "manila_api"
- image: "{{ manila_api_image_full }}"
- volumes:
- - "{{ node_config_directory }}/manila-api/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "kolla_logs:/var/log/kolla/"
- when: inventory_hostname in groups['manila-api']
-
-- name: Starting manila-scheduler container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- name: "manila_scheduler"
- image: "{{ manila_scheduler_image_full }}"
- volumes:
- - "{{ node_config_directory }}/manila-scheduler/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "kolla_logs:/var/log/kolla/"
- when: inventory_hostname in groups['manila-scheduler']
-
-- name: Starting manila-share container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- name: "manila_share"
- image: "{{ manila_share_image_full }}"
- privileged: True
- volumes:
- - "{{ node_config_directory }}/manila-share/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "/run/:/run/:shared"
- - "kolla_logs:/var/log/kolla/"
- when: inventory_hostname in groups['manila-share']
-
-- name: Starting manila-data container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- name: "manila_data"
- image: "{{ manila_data_image_full }}"
- privileged: True
- volumes:
- - "{{ node_config_directory }}/manila-data/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "/run/:/run/:shared"
- - "kolla_logs:/var/log/kolla/"
- when: inventory_hostname in groups['manila-data']
diff --git a/ansible/roles/manila/tasks/upgrade.yml b/ansible/roles/manila/tasks/upgrade.yml
deleted file mode 100644
index 308053080c..0000000000
--- a/ansible/roles/manila/tasks/upgrade.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-- include: config.yml
-
-- include: bootstrap_service.yml
-
-- include: start.yml
diff --git a/ansible/roles/manila/templates/manila-api.json.j2 b/ansible/roles/manila/templates/manila-api.json.j2
deleted file mode 100644
index 2d6196459d..0000000000
--- a/ansible/roles/manila/templates/manila-api.json.j2
+++ /dev/null
@@ -1,18 +0,0 @@
-{
- "command": "manila-api --config-file /etc/manila/manila.conf",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/manila.conf",
- "dest": "/etc/manila/manila.conf",
- "owner": "manila",
- "perm": "0600"
- }
- ],
- "permissions": [
- {
- "path": "/var/log/kolla/manila",
- "owner": "manila:manila",
- "recurse": true
- }
- ]
-}
diff --git a/ansible/roles/manila/templates/manila-data.json.j2 b/ansible/roles/manila/templates/manila-data.json.j2
deleted file mode 100644
index b5a8ce2bba..0000000000
--- a/ansible/roles/manila/templates/manila-data.json.j2
+++ /dev/null
@@ -1,18 +0,0 @@
-{
- "command": "manila-data --config-file /etc/manila/manila.conf",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/manila.conf",
- "dest": "/etc/manila/manila.conf",
- "owner": "manila",
- "perm": "0600"
- }
- ],
- "permissions": [
- {
- "path": "/var/log/kolla/manila",
- "owner": "manila:manila",
- "recurse": true
- }
- ]
-}
diff --git a/ansible/roles/manila/templates/manila-scheduler.json.j2 b/ansible/roles/manila/templates/manila-scheduler.json.j2
deleted file mode 100644
index e59e85b5bd..0000000000
--- a/ansible/roles/manila/templates/manila-scheduler.json.j2
+++ /dev/null
@@ -1,18 +0,0 @@
-{
- "command": "manila-scheduler --config-file /etc/manila/manila.conf",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/manila.conf",
- "dest": "/etc/manila/manila.conf",
- "owner": "manila",
- "perm": "0600"
- }
- ],
- "permissions": [
- {
- "path": "/var/log/kolla/manila",
- "owner": "manila:manila",
- "recurse": true
- }
- ]
-}
diff --git a/ansible/roles/manila/templates/manila-share.conf.j2 b/ansible/roles/manila/templates/manila-share.conf.j2
deleted file mode 100644
index cd15fd10ed..0000000000
--- a/ansible/roles/manila/templates/manila-share.conf.j2
+++ /dev/null
@@ -1,83 +0,0 @@
-[DEFAULT]
-{% if manila_enabled_backends %}
-enabled_share_backends = {{ manila_enabled_backends|map(attribute='name')|join(',') }}
-{% endif %}
-
-default_share_type = default_share_type
-
-[cinder]
-auth_uri = {{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_public_port }}
-auth_url = {{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_admin_port }}
-auth_type = password
-project_domain_id = default
-user_domain_id = default
-region_name = {{ openstack_region_name }}
-project_name = service
-username = cinder
-password = {{ cinder_keystone_password }}
-
-memcache_security_strategy = ENCRYPT
-memcache_secret_key = {{ memcache_secret_key }}
-memcached_servers = {% for host in groups['memcached'] %}{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}
-
-[nova]
-auth_uri = {{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_public_port }}
-auth_url = {{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_admin_port }}
-auth_type = password
-project_domain_id = default
-user_domain_id = default
-region_name = {{ openstack_region_name }}
-project_name = service
-username = {{ nova_keystone_user }}
-password = {{ nova_keystone_password }}
-
-memcache_security_strategy = ENCRYPT
-memcache_secret_key = {{ memcache_secret_key }}
-memcached_servers = {% for host in groups['memcached'] %}{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}
-
-[neutron]
-url = {{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ neutron_server_port }}
-uth_uri = {{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_public_port }}
-auth_url = {{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_admin_port }}
-auth_type = password
-project_domain_id = default
-user_domain_id = default
-region_name = {{ openstack_region_name }}
-project_name = service
-username = {{ neutron_keystone_user }}
-password = {{ neutron_keystone_password }}
-
-memcache_security_strategy = ENCRYPT
-memcache_secret_key = {{ memcache_secret_key }}
-memcached_servers = {% for host in groups['memcached'] %}{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}
-
-{% if enable_manila_backend_generic | bool %}
-[generic]
-share_driver = manila.share.drivers.generic.GenericShareDriver
-{% if neutron_plugin_agent == "openvswitch" %}
-interface_driver = manila.network.linux.interface.OVSInterfaceDriver
-{% elif neutron_plugin_agent == "linuxbridge" %}
-interface_driver = manila.network.linux.interface.BridgeInterfaceDriver
-{% endif %}
-
-driver_handles_share_servers = True
-
-service_instance_password = {{ manila_service_instance_password }}
-service_instance_user = {{ manila_service_instance_user }}
-service_image_name = manila-service-image
-
-share_backend_name = GENERIC
-{% endif %}
-
-{% if enable_manila_backend_hnas | bool %}
-[hnas1]
-share_backend_name = HNAS1
-share_driver = manila.share.drivers.hitachi.hnas.driver.HitachiHNASDriver
-driver_handles_share_servers = False
-hitachi_hnas_ip = {{ hnas_ip }}
-hitachi_hnas_user = {{ hnas_user }}
-hitachi_hnas_password = {{ hnas_password }}
-hitachi_hnas_evs_id = {{ hnas_evs_id }}
-hitachi_hnas_evs_ip = {{ hnas_evs_ip }}
-hitachi_hnas_file_system_name = {{ hnas_file_system_name }}
-{% endif %}
diff --git a/ansible/roles/manila/templates/manila-share.json.j2 b/ansible/roles/manila/templates/manila-share.json.j2
deleted file mode 100644
index 3108156554..0000000000
--- a/ansible/roles/manila/templates/manila-share.json.j2
+++ /dev/null
@@ -1,18 +0,0 @@
-{
- "command": "manila-share --config-file /etc/manila/manila.conf",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/manila.conf",
- "dest": "/etc/manila/manila.conf",
- "owner": "manila",
- "perm": "0600"
- }
- ],
- "permissions": [
- {
- "path": "/var/log/kolla/manila",
- "owner": "manila:manila",
- "recurse": true
- }
- ]
-}
diff --git a/ansible/roles/manila/templates/manila.conf.j2 b/ansible/roles/manila/templates/manila.conf.j2
deleted file mode 100644
index 08f518e098..0000000000
--- a/ansible/roles/manila/templates/manila.conf.j2
+++ /dev/null
@@ -1,42 +0,0 @@
-[DEFAULT]
-debug = {{ manila_logging_debug }}
-
-log_dir = /var/log/kolla/manila
-use_forwarded_for = true
-
-my_ip = {{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}
-
-osapi_share_listen = {{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}
-osapi_share_listen_port = {{ manila_api_port }}
-osapi_share_workers = {{ openstack_service_workers }}
-
-rootwrap_config = /etc/manila/rootwrap.conf
-api_paste_config = /etc/manila/api-paste.ini
-
-auth_strategy = keystone
-
-os_region_name = {{ openstack_region_name }}
-
-transport_url = rabbit://{% for host in groups['rabbitmq'] %}{{ rabbitmq_user }}:{{ rabbitmq_password }}@{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ rabbitmq_port }}{% if not loop.last %},{% endif %}{% endfor %}
-
-[oslo_concurrency]
-lock_path = /var/lib/manila/tmp
-
-[database]
-connection = mysql+pymysql://{{ manila_database_user }}:{{ manila_database_password }}@{{ manila_database_address }}/{{ manila_database_name }}
-max_retries = -1
-
-[keystone_authtoken]
-signing_dir = /var/cache/manila
-auth_uri = {{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_public_port }}
-auth_url = {{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_admin_port }}
-auth_type = password
-project_domain_id = default
-user_domain_id = default
-project_name = service
-username = {{ manila_keystone_user }}
-password = {{ manila_keystone_password }}
-
-memcache_security_strategy = ENCRYPT
-memcache_secret_key = {{ memcache_secret_key }}
-memcached_servers = {% for host in groups['memcached'] %}{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}
diff --git a/ansible/roles/mariadb/defaults/main.yml b/ansible/roles/mariadb/defaults/main.yml
deleted file mode 100644
index 410e86d3e9..0000000000
--- a/ansible/roles/mariadb/defaults/main.yml
+++ /dev/null
@@ -1,15 +0,0 @@
----
-project_name: "mariadb"
-
-####################
-# Database
-####################
-database_cluster_name: "openstack"
-database_max_timeout: 60
-
-####################
-# Docker
-####################
-mariadb_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-mariadb"
-mariadb_tag: "{{ openstack_release }}"
-mariadb_image_full: "{{ mariadb_image }}:{{ mariadb_tag }}"
diff --git a/ansible/roles/mariadb/meta/main.yml b/ansible/roles/mariadb/meta/main.yml
deleted file mode 100644
index 6b4fff8fef..0000000000
--- a/ansible/roles/mariadb/meta/main.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-dependencies:
- - { role: common }
diff --git a/ansible/roles/mariadb/tasks/bootstrap.yml b/ansible/roles/mariadb/tasks/bootstrap.yml
deleted file mode 100644
index 6647cb598c..0000000000
--- a/ansible/roles/mariadb/tasks/bootstrap.yml
+++ /dev/null
@@ -1,10 +0,0 @@
----
-- include: lookup_cluster.yml
-
-- include: bootstrap_cluster.yml
- when:
- - delegate_host == 'None'
- - inventory_hostname == groups['mariadb'][0]
-
-- include: recover_cluster.yml
- when: mariadb_recover | default(False)
diff --git a/ansible/roles/mariadb/tasks/bootstrap_cluster.yml b/ansible/roles/mariadb/tasks/bootstrap_cluster.yml
deleted file mode 100644
index 51915f4e32..0000000000
--- a/ansible/roles/mariadb/tasks/bootstrap_cluster.yml
+++ /dev/null
@@ -1,39 +0,0 @@
----
-- name: Running MariaDB bootstrap container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- detach: False
- environment:
- KOLLA_BOOTSTRAP:
- KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}"
- DB_ROOT_PASSWORD: "{{ database_password }}"
- DB_MAX_TIMEOUT: "{{ database_max_timeout }}"
- image: "{{ mariadb_image_full }}"
- labels:
- BOOTSTRAP:
- name: "bootstrap_mariadb"
- restart_policy: "never"
- volumes:
- - "{{ node_config_directory }}/mariadb/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "kolla_logs:/var/log/kolla/"
- - "mariadb:/var/lib/mysql"
-
-- name: Starting first MariaDB container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- environment:
- KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}"
- BOOTSTRAP_ARGS: "--wsrep-new-cluster"
- image: "{{ mariadb_image_full }}"
- labels:
- BOOTSTRAP:
- name: "mariadb"
- restart_policy: "never"
- volumes:
- - "{{ node_config_directory }}/mariadb/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "kolla_logs:/var/log/kolla/"
- - "mariadb:/var/lib/mysql"
diff --git a/ansible/roles/mariadb/tasks/check.yml b/ansible/roles/mariadb/tasks/check.yml
deleted file mode 100644
index 5389e78aef..0000000000
--- a/ansible/roles/mariadb/tasks/check.yml
+++ /dev/null
@@ -1,8 +0,0 @@
----
-- name: Waiting for MariaDB service to be ready through VIP
- command: "docker exec mariadb mysql -h {{ kolla_internal_fqdn }} -P {{ mariadb_port }} -u haproxy -e 'show databases;'"
- register: result
- until: result | success
- changed_when: False
- retries: 6
- delay: 10
diff --git a/ansible/roles/mariadb/tasks/config.yml b/ansible/roles/mariadb/tasks/config.yml
deleted file mode 100644
index a748c1adf0..0000000000
--- a/ansible/roles/mariadb/tasks/config.yml
+++ /dev/null
@@ -1,34 +0,0 @@
----
-- name: Ensuring config directories exist
- file:
- path: "{{ node_config_directory }}/{{ item }}"
- state: "directory"
- recurse: yes
- with_items:
- - "mariadb"
-
-- name: Copying over config.json files for services
- template:
- src: "{{ item }}.json.j2"
- dest: "{{ node_config_directory }}/{{ item }}/config.json"
- with_items:
- - "mariadb"
-
-- name: Copying over galera.cnf
- merge_configs:
- vars:
- service_name: "{{ item }}"
- sources:
- - "{{ role_path }}/templates/galera.cnf.j2"
- - "{{ node_custom_config }}/galera.cnf"
- - "{{ node_custom_config }}/mariadb/{{ inventory_hostname }}/galera.cnf"
- dest: "{{ node_config_directory }}/{{ item }}/galera.cnf"
- with_items:
- - "mariadb"
-
-- name: Copying over wsrep-notify.sh
- template:
- src: "{{ role_path }}/templates/wsrep-notify.sh.j2"
- dest: "{{ node_config_directory }}/{{ item }}/wsrep-notify.sh"
- with_items:
- - "mariadb"
diff --git a/ansible/roles/mariadb/tasks/deploy.yml b/ansible/roles/mariadb/tasks/deploy.yml
deleted file mode 100644
index 93f19edce4..0000000000
--- a/ansible/roles/mariadb/tasks/deploy.yml
+++ /dev/null
@@ -1,23 +0,0 @@
----
-- include: config.yml
-
-- include: bootstrap.yml
-
-- include: start.yml
-
-# We use register as a test to see whether the database is active and ready to
-# communicate. This run on all hosts that have a database and attempts to talk
-# to the local database rather than the vip,
-- include: register.yml
-
-# This will restart the container we initially used to bootstrap the cluster to
-# make it match the other containers environment-wise. This also prevents a
-# change from showing up when rerunning the playbooks
-- include: start.yml
-
-# Since the last start.yml may have recreated some containers we must wait and
-# check the health again to ensure the hosts are active.
-- include: register.yml
-
-# Test haproxy user through VIP
-- include: check.yml
diff --git a/ansible/roles/mariadb/tasks/lookup_cluster.yml b/ansible/roles/mariadb/tasks/lookup_cluster.yml
deleted file mode 100644
index efca14ee08..0000000000
--- a/ansible/roles/mariadb/tasks/lookup_cluster.yml
+++ /dev/null
@@ -1,35 +0,0 @@
----
-- name: Cleaning up temp file on localhost
- local_action: file path=/tmp/kolla_mariadb_cluster state=absent
- changed_when: False
- check_mode: no
- run_once: True
-
-- name: Creating temp file on localhost
- local_action: copy content=None dest=/tmp/kolla_mariadb_cluster mode=0644
- changed_when: False
- check_mode: no
- run_once: True
-
-- name: Creating mariadb volume
- kolla_docker:
- action: "create_volume"
- common_options: "{{ docker_common_options }}"
- name: "mariadb"
- register: mariadb_volume
-
-- name: Writing hostname of host with existing cluster files to temp file
- local_action: copy content={{ ansible_hostname }} dest=/tmp/kolla_mariadb_cluster mode=0644
- changed_when: False
- check_mode: no
- when: not mariadb_volume | changed
-
-- name: Registering host from temp file
- set_fact:
- delegate_host: "{{ lookup('file', '/tmp/kolla_mariadb_cluster') }}"
-
-- name: Cleaning up temp file on localhost
- local_action: file path=/tmp/kolla_mariadb_cluster state=absent
- changed_when: False
- check_mode: no
- run_once: True
diff --git a/ansible/roles/mariadb/tasks/main.yml b/ansible/roles/mariadb/tasks/main.yml
deleted file mode 100644
index b017e8b4ad..0000000000
--- a/ansible/roles/mariadb/tasks/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-- include: "{{ action }}.yml"
diff --git a/ansible/roles/mariadb/tasks/precheck.yml b/ansible/roles/mariadb/tasks/precheck.yml
deleted file mode 100644
index ed97d539c0..0000000000
--- a/ansible/roles/mariadb/tasks/precheck.yml
+++ /dev/null
@@ -1 +0,0 @@
----
diff --git a/ansible/roles/mariadb/tasks/pull.yml b/ansible/roles/mariadb/tasks/pull.yml
deleted file mode 100644
index 13d27693b3..0000000000
--- a/ansible/roles/mariadb/tasks/pull.yml
+++ /dev/null
@@ -1,7 +0,0 @@
----
-- name: Pulling mariadb image
- kolla_docker:
- action: "pull_image"
- common_options: "{{ docker_common_options }}"
- image: "{{ mariadb_image_full }}"
- when: inventory_hostname in groups['mariadb']
diff --git a/ansible/roles/mariadb/tasks/reconfigure.yml b/ansible/roles/mariadb/tasks/reconfigure.yml
deleted file mode 100644
index 92bf538c47..0000000000
--- a/ansible/roles/mariadb/tasks/reconfigure.yml
+++ /dev/null
@@ -1,66 +0,0 @@
----
-- name: Ensuring the containers up
- kolla_docker:
- name: "{{ item.name }}"
- action: "get_container_state"
- register: container_state
- failed_when: container_state.Running == false
- when: inventory_hostname in groups[item.group]
- with_items:
- - { name: mariadb, group: mariadb }
-
-- include: config.yml
-
-- name: Check the configs
- command: docker exec {{ item.name }} /usr/local/bin/kolla_set_configs --check
- changed_when: false
- failed_when: false
- register: check_results
- when: inventory_hostname in groups[item.group]
- with_items:
- - { name: mariadb, group: mariadb }
-
-# NOTE(jeffrey4l): when config_strategy == 'COPY_ALWAYS'
-# and container env['KOLLA_CONFIG_STRATEGY'] == 'COPY_ONCE',
-# just remove the container and start again
-- name: Containers config strategy
- kolla_docker:
- name: "{{ item.name }}"
- action: "get_container_env"
- register: container_envs
- when: inventory_hostname in groups[item.group]
- with_items:
- - { name: mariadb, group: mariadb }
-
-- name: Remove the containers
- kolla_docker:
- name: "{{ item[0]['name'] }}"
- action: "remove_container"
- register: remove_containers
- when:
- - config_strategy == "COPY_ONCE" or item[1]['KOLLA_CONFIG_STRATEGY'] == 'COPY_ONCE'
- - item[2]['rc'] == 1
- - inventory_hostname in groups[item[0]['group']]
- with_together:
- - [{ name: mariadb, group: mariadb }]
- - "{{ container_envs.results }}"
- - "{{ check_results.results }}"
-
-- include: start.yml
- when: remove_containers.changed
-
-- name: Restart containers
- kolla_docker:
- name: "{{ item[0]['name'] }}"
- action: "restart_container"
- when:
- - config_strategy == 'COPY_ALWAYS'
- - item[1]['KOLLA_CONFIG_STRATEGY'] != 'COPY_ONCE'
- - item[2]['rc'] == 1
- - inventory_hostname in groups[item[0]['group']]
- with_together:
- - [{ name: mariadb, group: mariadb }]
- - "{{ container_envs.results }}"
- - "{{ check_results.results }}"
-
-- include: check.yml
diff --git a/ansible/roles/mariadb/tasks/recover_cluster.yml b/ansible/roles/mariadb/tasks/recover_cluster.yml
deleted file mode 100644
index bf62a85f4c..0000000000
--- a/ansible/roles/mariadb/tasks/recover_cluster.yml
+++ /dev/null
@@ -1,94 +0,0 @@
----
-- fail:
- msg: "MariaDB cluster was not found. Is your inventory correct?"
- when: delegate_host == 'None'
-
-- name: Checking if and mariadb containers are running
- kolla_docker:
- name: "mariadb"
- action: "get_container_state"
- register: container_state
-
-- fail:
- msg: "There are running MariaDB nodes, please stop them first."
- when: container_state.Running | bool
- any_errors_fatal: True
-
-- name: Cleaning up temp file on mariadb hosts
- file: path=/tmp/kolla_mariadb_grastate.dat state=absent
- changed_when: false
- check_mode: no
-
-- name: Cleaning up temp file on localhost
- local_action: file path=/tmp/kolla_mariadb_recover_inventory_name state=absent
- changed_when: false
- check_mode: no
- run_once: true
-
-- block:
- - name: Copying grastate.dat file from mariadb container
- command: docker cp mariadb:/var/lib/mysql/grastate.dat /tmp/kolla_mariadb_grastate.dat
- changed_when: false
-
- - name: Print the content of grastate.dat file
- command: cat /tmp/kolla_mariadb_grastate.dat
- register: cat_grastate
- changed_when: false
-
- - name: Registering mariadb seqno variable
- set_fact:
- seqno: "{{ (cat_grastate.stdout|from_yaml).seqno }}"
- changed_when: false
-
- - name: Comparing seqno value on all mariadb hosts
- shell: "if [[ {{ hostvars[inventory_hostname]['seqno'] }} -lt {{ hostvars[item]['seqno'] }} ]]; then echo {{ hostvars[item]['seqno'] }}; fi"
- with_items: "{{ groups['mariadb'] }}"
- changed_when: false
- register: seqno_compare
-
- - name: Writing hostname of host with the largest seqno to temp file
- local_action: copy content={{ inventory_hostname }} dest=/tmp/kolla_mariadb_recover_inventory_name mode=0644
- changed_when: false
- when: seqno_compare.results | map(attribute='stdout') | join('') == ""
-
- - name: Registering mariadb_recover_inventory_name from temp file
- set_fact:
- mariadb_recover_inventory_name: "{{ lookup('file', '/tmp/kolla_mariadb_recover_inventory_name') }}"
- when:
- - mariadb_recover_inventory_name is not defined
-
-- name: Cleaning up temp file on mariadb hosts
- file: path=/tmp/kolla_mariadb_grastate.dat state=absent
- changed_when: false
- check_mode: no
-
-- name: Cleaning up temp file on localhost
- local_action: file path=/tmp/kolla_mariadb_recover_inventory_name state=absent
- changed_when: false
- check_mode: no
- run_once: true
-
-- name: Starting first MariaDB container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- environment:
- KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}"
- BOOTSTRAP_ARGS: "--wsrep-new-cluster"
- image: "{{ mariadb_image_full }}"
- labels:
- BOOTSTRAP:
- name: "mariadb"
- restart_policy: "never"
- volumes:
- - "{{ node_config_directory }}/mariadb/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "kolla_logs:/var/log/kolla/"
- - "mariadb:/var/lib/mysql"
- when:
- - (mariadb_recover_inventory_name is not defined and inventory_hostname == groups['mariadb'][0]) or
- (mariadb_recover_inventory_name is defined and inventory_hostname == mariadb_recover_inventory_name)
-
-- name: Reset bootstrap fact
- set_fact:
- delegate_host: "None"
diff --git a/ansible/roles/mariadb/tasks/register.yml b/ansible/roles/mariadb/tasks/register.yml
deleted file mode 100644
index bd9a3e7c88..0000000000
--- a/ansible/roles/mariadb/tasks/register.yml
+++ /dev/null
@@ -1,21 +0,0 @@
----
-- name: Creating haproxy mysql user
- command: docker exec -t kolla_toolbox /usr/bin/ansible localhost
- -m mysql_user
- -a "login_host='{{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}'
- login_port='{{ mariadb_port }}'
- login_user='{{ database_user }}'
- login_password='{{ database_password }}'
- name='haproxy'
- password=''
- host='%'
- priv=*.*:USAGE"
- register: haproxy_user
- changed_when: "{{ haproxy_user.stdout.find('localhost | SUCCESS => ') != -1 and (haproxy_user.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
- until: haproxy_user.stdout.split()[2] == 'SUCCESS'
- retries: 10
- delay: 5
-
-- name: Cleaning up facts
- set_fact:
- delegate_host: "bootstraped"
diff --git a/ansible/roles/mariadb/tasks/start.yml b/ansible/roles/mariadb/tasks/start.yml
deleted file mode 100644
index f71ad37a52..0000000000
--- a/ansible/roles/mariadb/tasks/start.yml
+++ /dev/null
@@ -1,31 +0,0 @@
----
-- name: Starting mariadb container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- image: "{{ mariadb_image_full }}"
- name: "mariadb"
- volumes:
- - "{{ node_config_directory }}/mariadb/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "mariadb:/var/lib/mysql"
- - "kolla_logs:/var/log/kolla/"
- when: delegate_host != 'None' or
- ( groups['mariadb'] | length ) == 1 or
- ( delegate_host == 'None' and mariadb_recover_inventory_name is not defined and inventory_hostname != groups['mariadb'][0] ) or
- ( delegate_host == 'None' and mariadb_recover_inventory_name is defined and inventory_hostname != mariadb_recover_inventory_name )
-
-
-# TODO(jeffrey4l), remove the task check when the wair_for bug is fixed
-# https://github.com/ansible/ansible-modules-core/issues/2788
-- name: Waiting for MariaDB service to be ready
- wait_for:
- host: "{{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}"
- port: "{{ mariadb_port }}"
- connect_timeout: 1
- timeout: 60
- search_regex: "MariaDB"
- register: check_mariadb_port
- until: check_mariadb_port | success
- retries: 10
- delay: 6
diff --git a/ansible/roles/mariadb/tasks/upgrade.yml b/ansible/roles/mariadb/tasks/upgrade.yml
deleted file mode 100644
index fafc44ff38..0000000000
--- a/ansible/roles/mariadb/tasks/upgrade.yml
+++ /dev/null
@@ -1,8 +0,0 @@
----
-- include: config.yml
-
-- include: lookup_cluster.yml
-
-- include: start.yml
-
-- include: check.yml
diff --git a/ansible/roles/mariadb/templates/galera.cnf.j2 b/ansible/roles/mariadb/templates/galera.cnf.j2
deleted file mode 100644
index d74c74f29e..0000000000
--- a/ansible/roles/mariadb/templates/galera.cnf.j2
+++ /dev/null
@@ -1,57 +0,0 @@
-{%- set wsrep_driver = '/usr/lib/galera/libgalera_smm.so' if kolla_base_distro == 'ubuntu' else '/usr/lib64/galera/libgalera_smm.so' %}
-
-{#- Disable Galera in the case of of Kubernetes as its not supported yet. Otherwise, #}
-{#- mariadb will fail to start #}
-{%- set wsrep_driver = 'none' if orchestration_engine == 'KUBERNETES' else wsrep_driver %}
-
-[client]
-default-character-set=utf8
-
-[mysql]
-default-character-set=utf8
-
-[mysqld]
-bind-address={{ api_interface_address }}
-port={{ mariadb_port }}
-
-log-error=/var/log/kolla/mariadb/mariadb.log
-
-binlog_format=ROW
-default-storage-engine=innodb
-innodb_autoinc_lock_mode=2
-
-collation-server = utf8_unicode_ci
-init-connect='SET NAMES utf8'
-character-set-server = utf8
-
-datadir=/var/lib/mysql/
-
-wsrep_cluster_address=gcomm://{% if (groups['mariadb'] | length) > 1 %}{% for host in groups['mariadb'] %}{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ mariadb_wsrep_port }}{% if not loop.last %},{% endif %}{% endfor %}{% endif %}
-
-wsrep_provider_options=gmcast.listen_addr=tcp://{{ api_interface_address }}:{{ mariadb_wsrep_port }};ist.recv_addr={{ api_interface_address }}:{{ mariadb_ist_port }}
-
-wsrep_node_address={{ api_interface_address }}:{{ mariadb_wsrep_port }}
-wsrep_sst_receive_address={{ api_interface_address }}:{{ mariadb_sst_port }}
-
-wsrep_provider={{ wsrep_driver }}
-wsrep_cluster_name="{{ database_cluster_name }}"
-wsrep_node_name={{ ansible_hostname }}
-wsrep_sst_method=xtrabackup-v2
-wsrep_sst_auth={{ database_user }}:{{ database_password }}
-wsrep_slave_threads=4
-wsrep_notify_cmd=/usr/local/bin/wsrep-notify.sh
-
-max_connections=10000
-
-key_buffer_size = '64M'
-max_heap_table_size = '64M'
-tmp_table_size = '64M'
-{% set dynamic_pool_size_mb = (hostvars[inventory_hostname]['ansible_memtotal_mb'] * 0.4) | round | int %}
-{% if dynamic_pool_size_mb < 8192 %}
-innodb_buffer_pool_size = '{{ dynamic_pool_size_mb }}M'
-{% else %}
-innodb_buffer_pool_size = '8192M'
-{% endif %}
-
-[server]
-pid-file=/var/lib/mysql/mariadb.pid
diff --git a/ansible/roles/mariadb/templates/mariadb.json.j2 b/ansible/roles/mariadb/templates/mariadb.json.j2
deleted file mode 100644
index 1b77f5e88f..0000000000
--- a/ansible/roles/mariadb/templates/mariadb.json.j2
+++ /dev/null
@@ -1,30 +0,0 @@
-{% set mysql_dir = 'mysql' if kolla_base_distro in ['ubuntu', 'debian'] else '' %}
-{
- "command": "/usr/bin/mysqld_safe",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/galera.cnf",
- "dest": "/etc/{{ mysql_dir }}/my.cnf",
- "owner": "mysql",
- "perm": "0600"
- },
- {
- "source": "{{ container_config_directory }}/wsrep-notify.sh",
- "dest": "/usr/local/bin/wsrep-notify.sh",
- "owner": "mysql",
- "perm": "0700"
- }
- ],
- "permissions": [
- {
- "path": "/var/log/kolla/mariadb",
- "owner": "mysql:mysql",
- "recurse": true
- },
- {
- "path": "/var/lib/mysql",
- "owner": "mysql:mysql",
- "recurse": true
- }
- ]
-}
diff --git a/ansible/roles/mariadb/templates/wsrep-notify.sh.j2 b/ansible/roles/mariadb/templates/wsrep-notify.sh.j2
deleted file mode 100644
index 73acde3fe6..0000000000
--- a/ansible/roles/mariadb/templates/wsrep-notify.sh.j2
+++ /dev/null
@@ -1,75 +0,0 @@
-#!/bin/bash -e
-
-# Edit parameters below to specify the address and login to server.
-USER={{ database_user }}
-PSWD={{ database_password }}
-HOST={{ api_interface_address }}
-PORT={{ mariadb_port }}
-LB_USER=haproxy
-
-ENABLE_LB="UPDATE mysql.user SET User='${LB_USER}' WHERE User='${LB_USER}_blocked';"
-DISABLE_LB="UPDATE mysql.user SET User='${LB_USER}_blocked' WHERE User='${LB_USER}';"
-MYSQL_CMD="`type -p mysql` -B -u$USER -p$PSWD -h$HOST -P$PORT"
-
-status_update()
-{
- echo "SET SESSION wsrep_on=off;"
- echo "$@"
- echo "FLUSH PRIVILEGES;"
-}
-
-get_sst_method()
-{
- $MYSQL_CMD -s -N -e "SHOW VARIABLES LIKE 'wsrep_sst_method';" | awk '{ print $2 }'
-}
-
-while [ $# -gt 0 ]
-do
- case $1 in
- --status)
- STATUS=$2
- shift
- ;;
- --uuid)
- CLUSTER_UUID=$2
- shift
- ;;
- --primary)
- [ "$2" = "yes" ] && PRIMARY="1" || PRIMARY="0"
- shift
- ;;
- --index)
- INDEX=$2
- shift
- ;;
- --members)
- MEMBERS=$2
- shift
- ;;
- esac
- shift
-done
-
-case $STATUS in
-Synced)
- CMD=$ENABLE_LB
- ;;
-Donor)
- # enabling donor only if xtrabackup configured
- SST_METHOD=`get_sst_method`
- [[ $SST_METHOD =~ 'xtrabackup' ]] && CMD=$ENABLE_LB || CMD=$DISABLE_LB
- ;;
-Undefined)
- # shutting down database: do nothing
- ;;
-*)
- CMD=$DISABLE_LB
- ;;
-esac
-
-if [ -n "$CMD" ]
-then
- status_update "$CMD" | $MYSQL_CMD
-fi
-
-exit 0
diff --git a/ansible/roles/memcached/defaults/main.yml b/ansible/roles/memcached/defaults/main.yml
deleted file mode 100644
index 8377b77d47..0000000000
--- a/ansible/roles/memcached/defaults/main.yml
+++ /dev/null
@@ -1,14 +0,0 @@
----
-project_name: "memcached"
-
-####################
-# Docker
-####################
-memcached_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-memcached"
-memcached_tag: "{{ openstack_release }}"
-memcached_image_full: "{{ memcached_image }}:{{ memcached_tag }}"
-
-####################
-# Memcached options
-####################
-memcached_connection_limit: "5000"
diff --git a/ansible/roles/memcached/tasks/config.yml b/ansible/roles/memcached/tasks/config.yml
deleted file mode 100644
index f721633c7d..0000000000
--- a/ansible/roles/memcached/tasks/config.yml
+++ /dev/null
@@ -1,15 +0,0 @@
----
-- name: Ensuring config directories exist
- file:
- path: "{{ node_config_directory }}/{{ item }}"
- state: "directory"
- recurse: yes
- with_items:
- - "memcached"
-
-- name: Copying over config.json files for services
- template:
- src: "{{ item }}.json.j2"
- dest: "{{ node_config_directory }}/{{ item }}/config.json"
- with_items:
- - "memcached"
diff --git a/ansible/roles/memcached/tasks/deploy.yml b/ansible/roles/memcached/tasks/deploy.yml
deleted file mode 100644
index 1f16915ad9..0000000000
--- a/ansible/roles/memcached/tasks/deploy.yml
+++ /dev/null
@@ -1,4 +0,0 @@
----
-- include: config.yml
-
-- include: start.yml
diff --git a/ansible/roles/memcached/tasks/main.yml b/ansible/roles/memcached/tasks/main.yml
deleted file mode 100644
index b017e8b4ad..0000000000
--- a/ansible/roles/memcached/tasks/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-- include: "{{ action }}.yml"
diff --git a/ansible/roles/memcached/tasks/precheck.yml b/ansible/roles/memcached/tasks/precheck.yml
deleted file mode 100644
index ed97d539c0..0000000000
--- a/ansible/roles/memcached/tasks/precheck.yml
+++ /dev/null
@@ -1 +0,0 @@
----
diff --git a/ansible/roles/memcached/tasks/pull.yml b/ansible/roles/memcached/tasks/pull.yml
deleted file mode 100644
index 1f8a639e83..0000000000
--- a/ansible/roles/memcached/tasks/pull.yml
+++ /dev/null
@@ -1,7 +0,0 @@
----
-- name: Pulling memcached image
- kolla_docker:
- action: "pull_image"
- common_options: "{{ docker_common_options }}"
- image: "{{ memcached_image_full }}"
- when: inventory_hostname in groups['memcached']
diff --git a/ansible/roles/memcached/tasks/reconfigure.yml b/ansible/roles/memcached/tasks/reconfigure.yml
deleted file mode 100644
index b63684beb7..0000000000
--- a/ansible/roles/memcached/tasks/reconfigure.yml
+++ /dev/null
@@ -1,64 +0,0 @@
----
-- name: Ensuring the containers up
- kolla_docker:
- name: "{{ item.name }}"
- action: "get_container_state"
- register: container_state
- failed_when: container_state.Running == false
- when: inventory_hostname in groups[item.group]
- with_items:
- - { name: memcached, group: memcached }
-
-- include: config.yml
-
-- name: Check the configs
- command: docker exec {{ item.name }} /usr/local/bin/kolla_set_configs --check
- changed_when: false
- failed_when: false
- register: check_results
- when: inventory_hostname in groups[item.group]
- with_items:
- - { name: memcached, group: memcached }
-
-# NOTE(jeffrey4l): when config_strategy == 'COPY_ALWAYS'
-# and container env['KOLLA_CONFIG_STRATEGY'] == 'COPY_ONCE',
-# just remove the container and start again
-- name: Containers config strategy
- kolla_docker:
- name: "{{ item.name }}"
- action: "get_container_env"
- register: container_envs
- when: inventory_hostname in groups[item.group]
- with_items:
- - { name: memcached, group: memcached }
-
-- name: Remove the containers
- kolla_docker:
- name: "{{ item[0]['name'] }}"
- action: "remove_container"
- register: remove_containers
- when:
- - config_strategy == "COPY_ONCE" or item[1]['KOLLA_CONFIG_STRATEGY'] == 'COPY_ONCE'
- - item[2]['rc'] == 1
- - inventory_hostname in groups[item[0]['group']]
- with_together:
- - [{ name: memcached, group: memcached }]
- - "{{ container_envs.results }}"
- - "{{ check_results.results }}"
-
-- include: start.yml
- when: remove_containers.changed
-
-- name: Restart containers
- kolla_docker:
- name: "{{ item[0]['name'] }}"
- action: "restart_container"
- when:
- - config_strategy == 'COPY_ALWAYS'
- - item[1]['KOLLA_CONFIG_STRATEGY'] != 'COPY_ONCE'
- - item[2]['rc'] == 1
- - inventory_hostname in groups[item[0]['group']]
- with_together:
- - [{ name: memcached, group: memcached }]
- - "{{ container_envs.results }}"
- - "{{ check_results.results }}"
diff --git a/ansible/roles/memcached/tasks/start.yml b/ansible/roles/memcached/tasks/start.yml
deleted file mode 100644
index c1a193f40f..0000000000
--- a/ansible/roles/memcached/tasks/start.yml
+++ /dev/null
@@ -1,11 +0,0 @@
----
-- name: Starting memcached container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- image: "{{ memcached_image_full }}"
- name: "memcached"
- volumes:
- - "{{ node_config_directory }}/memcached/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- when: inventory_hostname in groups['memcached']
diff --git a/ansible/roles/memcached/tasks/upgrade.yml b/ansible/roles/memcached/tasks/upgrade.yml
deleted file mode 100644
index 1f16915ad9..0000000000
--- a/ansible/roles/memcached/tasks/upgrade.yml
+++ /dev/null
@@ -1,4 +0,0 @@
----
-- include: config.yml
-
-- include: start.yml
diff --git a/ansible/roles/memcached/templates/memcached.json.j2 b/ansible/roles/memcached/templates/memcached.json.j2
deleted file mode 100644
index dcad615a64..0000000000
--- a/ansible/roles/memcached/templates/memcached.json.j2
+++ /dev/null
@@ -1,4 +0,0 @@
-{
- "command": "/usr/bin/memcached -vv -l {{ api_interface_address }} -p {{ memcached_port }} -c {{ memcached_connection_limit }}",
- "config_files": []
-}
diff --git a/ansible/roles/mistral/defaults/main.yml b/ansible/roles/mistral/defaults/main.yml
deleted file mode 100644
index cca068623d..0000000000
--- a/ansible/roles/mistral/defaults/main.yml
+++ /dev/null
@@ -1,39 +0,0 @@
----
-project_name: "mistral"
-
-####################
-# Database
-####################
-mistral_database_name: "mistral"
-mistral_database_user: "mistral"
-mistral_database_address: "{{ kolla_internal_fqdn }}:{{ database_port }}"
-
-
-####################
-# Docker
-####################
-mistral_engine_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-mistral-engine"
-mistral_engine_tag: "{{ openstack_release }}"
-mistral_engine_image_full: "{{ mistral_engine_image }}:{{ mistral_engine_tag }}"
-
-mistral_executor_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-mistral-executor"
-mistral_executor_tag: "{{ openstack_release }}"
-mistral_executor_image_full: "{{ mistral_executor_image }}:{{ mistral_executor_tag }}"
-
-mistral_api_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-mistral-api"
-mistral_api_tag: "{{ openstack_release }}"
-mistral_api_image_full: "{{ mistral_api_image }}:{{ mistral_api_tag }}"
-
-
-####################
-# OpenStack
-####################
-mistral_admin_endpoint: "{{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ mistral_api_port }}/v2"
-mistral_internal_endpoint: "{{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ mistral_api_port }}/v2"
-mistral_public_endpoint: "{{ public_protocol }}://{{ kolla_external_fqdn }}:{{ mistral_api_port }}/v2"
-
-mistral_logging_debug: "{{ openstack_logging_debug }}"
-
-mistral_keystone_user: "mistral"
-
-openstack_mistral_auth: "{'auth_url':'{{ openstack_auth.auth_url }}','username':'{{ openstack_auth.username }}','password':'{{ openstack_auth.password }}','project_name':'{{ openstack_auth.project_name }}'}"
diff --git a/ansible/roles/mistral/meta/main.yml b/ansible/roles/mistral/meta/main.yml
deleted file mode 100644
index 6b4fff8fef..0000000000
--- a/ansible/roles/mistral/meta/main.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-dependencies:
- - { role: common }
diff --git a/ansible/roles/mistral/tasks/bootstrap.yml b/ansible/roles/mistral/tasks/bootstrap.yml
deleted file mode 100644
index 10f8a7773f..0000000000
--- a/ansible/roles/mistral/tasks/bootstrap.yml
+++ /dev/null
@@ -1,41 +0,0 @@
----
-- name: Creating Mistral database
- command: docker exec -t kolla_toolbox /usr/bin/ansible localhost
- -m mysql_db
- -a "login_host='{{ database_address }}'
- login_port='{{ database_port }}'
- login_user='{{ database_user }}'
- login_password='{{ database_password }}'
- name='{{ mistral_database_name }}'"
- register: database
- changed_when: "{{ database.stdout.find('localhost | SUCCESS => ') != -1 and
- (database.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
- failed_when: database.stdout.split()[2] != 'SUCCESS'
- run_once: True
- delegate_to: "{{ groups['mistral-api'][0] }}"
-
-- name: Reading json from variable
- set_fact:
- database_created: "{{ (database.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
-
-- name: Creating Mistral database user and setting permissions
- command: docker exec -t kolla_toolbox /usr/bin/ansible localhost
- -m mysql_user
- -a "login_host='{{ database_address }}'
- login_port='{{ database_port }}'
- login_user='{{ database_user }}'
- login_password='{{ database_password }}'
- name='{{ mistral_database_name }}'
- password='{{ mistral_database_password }}'
- host='%'
- priv='{{ mistral_database_name }}.*:ALL'
- append_privs='yes'"
- register: database_user_create
- changed_when: "{{ database_user_create.stdout.find('localhost | SUCCESS => ') != -1 and
- (database_user_create.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
- failed_when: database_user_create.stdout.split()[2] != 'SUCCESS'
- run_once: True
- delegate_to: "{{ groups['mistral-api'][0] }}"
-
-- include: bootstrap_service.yml
- when: database_created
diff --git a/ansible/roles/mistral/tasks/bootstrap_service.yml b/ansible/roles/mistral/tasks/bootstrap_service.yml
deleted file mode 100644
index 01fadc8d39..0000000000
--- a/ansible/roles/mistral/tasks/bootstrap_service.yml
+++ /dev/null
@@ -1,20 +0,0 @@
----
-- name: Running Mistral bootstrap container
- kolla_docker:
- action: "start_container"
- common_options: "{{docker_common_options}}"
- detach: False
- environment:
- KOLLA_BOOTSTRAP:
- KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}"
- image: "{{ mistral_api_image_full }}"
- labels:
- BOOTSTRAP:
- name: "bootstrap_mistral"
- restart_policy: "never"
- volumes:
- - "{{ node_config_directory }}/mistral-api/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "kolla_logs:/var/log/kolla/"
- run_once: True
- delegate_to: "{{ groups['mistral-api'][0] }}"
diff --git a/ansible/roles/mistral/tasks/config.yml b/ansible/roles/mistral/tasks/config.yml
deleted file mode 100644
index 62f12d18a7..0000000000
--- a/ansible/roles/mistral/tasks/config.yml
+++ /dev/null
@@ -1,52 +0,0 @@
----
-- name: Ensuring config directories exist
- file:
- path: "{{ node_config_directory }}/{{ item }}"
- state: "directory"
- recurse: yes
- with_items:
- - "mistral-api"
- - "mistral-engine"
- - "mistral-executor"
-
-- name: Copying over config.json files for services
- template:
- src: "{{ item }}.json.j2"
- dest: "{{ node_config_directory }}/{{ item }}/config.json"
- with_items:
- - "mistral-api"
- - "mistral-engine"
- - "mistral-executor"
-
-- name: Copying over mistral.conf
- merge_configs:
- vars:
- service_name: "{{ item }}"
- sources:
- - "{{ role_path }}/templates/mistral.conf.j2"
- - "{{ node_custom_config }}/global.conf"
- - "{{ node_custom_config }}/database.conf"
- - "{{ node_custom_config }}/messaging.conf"
- - "{{ node_custom_config }}/mistral.conf"
- - "{{ node_custom_config }}/mistral/{{ item }}.conf"
- - "{{ node_custom_config }}/mistral/{{ inventory_hostname }}/mistral.conf"
- dest: "{{ node_config_directory }}/{{ item }}/mistral.conf"
- with_items:
- - "mistral-api"
- - "mistral-engine"
- - "mistral-executor"
-
-- name: Check if policies shall be overwritten
- local_action: stat path="{{ node_custom_config }}/mistral/policy.json"
- register: mistral_policy
-
-- name: Copying over existing policy.json
- template:
- src: "{{ node_custom_config }}/mistral/policy.json"
- dest: "{{ node_config_directory }}/{{ item }}/policy.json"
- with_items:
- - "mistral-api"
- - "mistral-engine"
- - "mistral-executor"
- when:
- mistral_policy.stat.exists
diff --git a/ansible/roles/mistral/tasks/deploy.yml b/ansible/roles/mistral/tasks/deploy.yml
deleted file mode 100644
index 5f062e5c51..0000000000
--- a/ansible/roles/mistral/tasks/deploy.yml
+++ /dev/null
@@ -1,16 +0,0 @@
----
-- include: register.yml
- when: inventory_hostname in groups['mistral-api']
-
-- include: config.yml
- when: inventory_hostname in groups['mistral-api'] or
- inventory_hostname in groups['mistral-engine'] or
- inventory_hostname in groups['mistral-executor']
-
-- include: bootstrap.yml
- when: inventory_hostname in groups['mistral-api']
-
-- include: start.yml
- when: inventory_hostname in groups['mistral-api'] or
- inventory_hostname in groups['mistral-engine'] or
- inventory_hostname in groups['mistral-executor']
diff --git a/ansible/roles/mistral/tasks/main.yml b/ansible/roles/mistral/tasks/main.yml
deleted file mode 100644
index b017e8b4ad..0000000000
--- a/ansible/roles/mistral/tasks/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-- include: "{{ action }}.yml"
diff --git a/ansible/roles/mistral/tasks/precheck.yml b/ansible/roles/mistral/tasks/precheck.yml
deleted file mode 100644
index ed97d539c0..0000000000
--- a/ansible/roles/mistral/tasks/precheck.yml
+++ /dev/null
@@ -1 +0,0 @@
----
diff --git a/ansible/roles/mistral/tasks/pull.yml b/ansible/roles/mistral/tasks/pull.yml
deleted file mode 100644
index 6a17830b44..0000000000
--- a/ansible/roles/mistral/tasks/pull.yml
+++ /dev/null
@@ -1,21 +0,0 @@
----
-- name: Pulling mistral-api image
- kolla_docker:
- action: "pull_image"
- common_options: "{{ docker_common_options }}"
- image: "{{ mistral_api_image_full }}"
- when: inventory_hostname in groups['mistral-api']
-
-- name: Pulling mistral-engine image
- kolla_docker:
- action: "pull_image"
- common_options: "{{ docker_common_options }}"
- image: "{{ mistral_engine_image_full }}"
- when: inventory_hostname in groups['mistral-engine']
-
-- name: Pulling mistral-executor image
- kolla_docker:
- action: "pull_image"
- common_options: "{{ docker_common_options }}"
- image: "{{ mistral_executor_image_full }}"
- when: inventory_hostname in groups['mistral-executor']
diff --git a/ansible/roles/mistral/tasks/reconfigure.yml b/ansible/roles/mistral/tasks/reconfigure.yml
deleted file mode 100644
index d0d9e7ae97..0000000000
--- a/ansible/roles/mistral/tasks/reconfigure.yml
+++ /dev/null
@@ -1,71 +0,0 @@
----
-- name: Ensuring the containers up
- kolla_docker:
- name: "{{ item.name }}"
- action: "get_container_state"
- register: container_state
- failed_when: container_state.Running == false
- when: inventory_hostname in groups[item.group]
- with_items:
- - { name: mistral_api, group: mistral-api }
- - { name: mistral_engine, group: mistral-engine }
- - { name: mistral_executor, group: mistral-executor }
-
-- include: config.yml
-
-- name: Check the configs
- command: docker exec {{ item.name }} /usr/local/bin/kolla_set_configs --check
- changed_when: false
- failed_when: false
- register: check_results
- when: inventory_hostname in groups[item.group]
- with_items:
- - { name: mistral_api, group: mistral-api }
- - { name: mistral_engine, group: mistral-engine }
- - { name: mistral_executor, group: mistral-executor }
-
-- name: Containers config strategy
- kolla_docker:
- name: "{{ item.name }}"
- action: "get_container_env"
- register: container_envs
- when: inventory_hostname in groups[item.group]
- with_items:
- - { name: mistral_api, group: mistral-api }
- - { name: mistral_engine, group: mistral-engine }
- - { name: mistral_executor, group: mistral-executor }
-
-- name: Remove the containers
- kolla_docker:
- name: "{{ item[0]['name'] }}"
- action: "remove_container"
- register: remove_containers
- when:
- - inventory_hostname in groups[item[0]['group']]
- - config_strategy == "COPY_ONCE" or item[1]['KOLLA_CONFIG_STRATEGY'] == 'COPY_ONCE'
- - item[2]['rc'] == 1
- with_together:
- - [{ name: mistral_api, group: mistral-api },
- { name: mistral_engine, group: mistral-engine },
- { name: mistral_executor, group: mistral-executor }]
- - "{{ container_envs.results }}"
- - "{{ check_results.results }}"
-
-- include: start.yml
- when: remove_containers.changed
-
-- name: Restart containers
- kolla_docker:
- name: "{{ item[0]['name'] }}"
- action: "restart_container"
- when:
- - inventory_hostname in groups[item[0]['group']]
- - config_strategy == 'COPY_ALWAYS'
- - item[1]['KOLLA_CONFIG_STRATEGY'] != 'COPY_ONCE'
- - item[2]['rc'] == 1
- with_together:
- - [{ name: mistral_api, group: mistral-api },
- { name: mistral_engine, group: mistral-engine },
- { name: mistral_executor, group: mistral-executor }]
- - "{{ container_envs.results }}"
- - "{{ check_results.results }}"
diff --git a/ansible/roles/mistral/tasks/register.yml b/ansible/roles/mistral/tasks/register.yml
deleted file mode 100644
index 0ac4906fe6..0000000000
--- a/ansible/roles/mistral/tasks/register.yml
+++ /dev/null
@@ -1,40 +0,0 @@
----
-- name: Creating the Mistral service and endpoint
- command: docker exec -t kolla_toolbox /usr/bin/ansible localhost
- -m kolla_keystone_service
- -a "service_name=mistral
- service_type=workflowv2
- description='Openstack Workflow'
- endpoint_region={{ openstack_region_name }}
- url='{{ item.url }}'
- interface='{{ item.interface }}'
- region_name={{ openstack_region_name }}
- auth={{ '{{ openstack_mistral_auth }}' }}"
- -e "{'openstack_mistral_auth':{{ openstack_mistral_auth }}}"
- register: mistral_endpoint
- changed_when: "{{ mistral_endpoint.stdout.find('localhost | SUCCESS => ') != -1 and (mistral_endpoint.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
- until: mistral_endpoint.stdout.split()[2] == 'SUCCESS'
- retries: 10
- delay: 5
- run_once: True
- with_items:
- - {'interface': 'admin', 'url': '{{ mistral_admin_endpoint }}'}
- - {'interface': 'internal', 'url': '{{ mistral_internal_endpoint }}'}
- - {'interface': 'public', 'url': '{{ mistral_public_endpoint }}'}
-
-- name: Creating the Mistral project, user, and role
- command: docker exec -t kolla_toolbox /usr/bin/ansible localhost
- -m kolla_keystone_user
- -a "project=service
- user=mistral
- password={{ mistral_keystone_password }}
- role=admin
- region_name={{ openstack_region_name }}
- auth={{ '{{ openstack_mistral_auth }}' }}"
- -e "{'openstack_mistral_auth':{{ openstack_mistral_auth }}}"
- register: mistral_user
- changed_when: "{{ mistral_user.stdout.find('localhost | SUCCESS => ') != -1 and (mistral_user.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
- until: mistral_user.stdout.split()[2] == 'SUCCESS'
- retries: 10
- delay: 5
- run_once: True
diff --git a/ansible/roles/mistral/tasks/start.yml b/ansible/roles/mistral/tasks/start.yml
deleted file mode 100644
index 7aef02880e..0000000000
--- a/ansible/roles/mistral/tasks/start.yml
+++ /dev/null
@@ -1,36 +0,0 @@
----
-- name: Starting mistral-engine container
- kolla_docker:
- action: "start_container"
- name: "mistral_engine"
- common_options: "{{docker_common_options}}"
- image: "{{ mistral_engine_image_full }}"
- volumes:
- - "{{ node_config_directory }}/mistral-engine/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "kolla_logs:/var/log/kolla/"
- when: inventory_hostname in groups['mistral-engine']
-
-- name: Starting mistral-executor container
- kolla_docker:
- action: "start_container"
- name: "mistral_executor"
- common_options: "{{docker_common_options}}"
- image: "{{ mistral_executor_image_full }}"
- volumes:
- - "{{ node_config_directory }}/mistral-executor/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "kolla_logs:/var/log/kolla/"
- when: inventory_hostname in groups['mistral-executor']
-
-- name: Starting mistral-api container
- kolla_docker:
- action: "start_container"
- name: "mistral_api"
- common_options: "{{docker_common_options}}"
- image: "{{ mistral_api_image_full }}"
- volumes:
- - "{{ node_config_directory }}/mistral-api/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "kolla_logs:/var/log/kolla/"
- when: inventory_hostname in groups['mistral-api']
diff --git a/ansible/roles/mistral/tasks/upgrade.yml b/ansible/roles/mistral/tasks/upgrade.yml
deleted file mode 100644
index 308053080c..0000000000
--- a/ansible/roles/mistral/tasks/upgrade.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-- include: config.yml
-
-- include: bootstrap_service.yml
-
-- include: start.yml
diff --git a/ansible/roles/mistral/templates/mistral-api.json.j2 b/ansible/roles/mistral/templates/mistral-api.json.j2
deleted file mode 100644
index 3fdb470826..0000000000
--- a/ansible/roles/mistral/templates/mistral-api.json.j2
+++ /dev/null
@@ -1,18 +0,0 @@
-{
- "command": "mistral-server --server api --config-file /etc/mistral/mistral.conf",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/mistral.conf",
- "dest": "/etc/mistral/mistral.conf",
- "owner": "mistral",
- "perm": "0644"
- }
- ],
- "permissions": [
- {
- "path": "/var/log/kolla/mistral",
- "owner": "mistral:mistral",
- "recurse": true
- }
- ]
-}
diff --git a/ansible/roles/mistral/templates/mistral-engine.json.j2 b/ansible/roles/mistral/templates/mistral-engine.json.j2
deleted file mode 100644
index a37250facd..0000000000
--- a/ansible/roles/mistral/templates/mistral-engine.json.j2
+++ /dev/null
@@ -1,18 +0,0 @@
-{
- "command": "mistral-server --server engine --config-file /etc/mistral/mistral.conf",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/mistral.conf",
- "dest": "/etc/mistral/mistral.conf",
- "owner": "mistral",
- "perm": "0644"
- }
- ],
- "permissions": [
- {
- "path": "/var/log/kolla/mistral",
- "owner": "mistral:mistral",
- "recurse": true
- }
- ]
-}
diff --git a/ansible/roles/mistral/templates/mistral-executor.json.j2 b/ansible/roles/mistral/templates/mistral-executor.json.j2
deleted file mode 100644
index 405a20183d..0000000000
--- a/ansible/roles/mistral/templates/mistral-executor.json.j2
+++ /dev/null
@@ -1,18 +0,0 @@
-{
- "command": "mistral-server --server executor --config-file /etc/mistral/mistral.conf",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/mistral.conf",
- "dest": "/etc/mistral/mistral.conf",
- "owner": "mistral",
- "perm": "0644"
- }
- ],
- "permissions": [
- {
- "path": "/var/log/kolla/mistral",
- "owner": "mistral:mistral",
- "recurse": true
- }
- ]
-}
diff --git a/ansible/roles/mistral/templates/mistral.conf.j2 b/ansible/roles/mistral/templates/mistral.conf.j2
deleted file mode 100644
index c2a051b3ae..0000000000
--- a/ansible/roles/mistral/templates/mistral.conf.j2
+++ /dev/null
@@ -1,41 +0,0 @@
-[DEFAULT]
-debug = {{ mistral_logging_debug }}
-
-log_dir = /var/log/kolla/mistral
-
-# NOTE(elemoine): set use_stderr to False or the logs will also be sent to
-# stderr and collected by Docker
-use_stderr = False
-
-transport_url = rabbit://{% for host in groups['rabbitmq'] %}{{ rabbitmq_user }}:{{ rabbitmq_password }}@{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ rabbitmq_port }}{% if not loop.last %},{% endif %}{% endfor %}
-
-{% if service_name == 'mistral-api' %}
-[api]
-host = {{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}
-port = {{ mistral_api_port }}
-{% endif %}
-
-[database]
-connection = mysql+pymysql://{{ mistral_database_user }}:{{ mistral_database_password }}@{{ mistral_database_address }}/{{ mistral_database_name }}
-max_retries = -1
-
-[keystone_authtoken]
-auth_uri = {{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_public_port }}/v3
-auth_url = {{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_admin_port }}/v3
-auth_type = password
-project_domain_id = default
-user_domain_id = default
-project_name = service
-username = {{ mistral_keystone_user }}
-password = {{ mistral_keystone_password }}
-
-memcache_security_strategy = ENCRYPT
-memcache_secret_key = {{ memcache_secret_key }}
-memcached_servers = {% for host in groups['memcached'] %}{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}
-
-
-[mistral]
-url = {{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ mistral_api_port }}
-
-[oslo_messaging_notifications]
-driver = noop
diff --git a/ansible/roles/mongodb/defaults/main.yml b/ansible/roles/mongodb/defaults/main.yml
deleted file mode 100644
index 9bf44a5831..0000000000
--- a/ansible/roles/mongodb/defaults/main.yml
+++ /dev/null
@@ -1,16 +0,0 @@
----
-project_name: "mongodb"
-
-
-####################
-# Docker
-####################
-mongodb_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-mongodb"
-mongodb_tag: "{{ openstack_release }}"
-mongodb_image_full: "{{ mongodb_image }}:{{ mongodb_tag }}"
-
-
-####################
-# Mongodb
-####################
-mongodb_replication_set_name: "rs0"
diff --git a/ansible/roles/mongodb/meta/main.yml b/ansible/roles/mongodb/meta/main.yml
deleted file mode 100644
index 6b4fff8fef..0000000000
--- a/ansible/roles/mongodb/meta/main.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-dependencies:
- - { role: common }
diff --git a/ansible/roles/mongodb/tasks/bootstrap.yml b/ansible/roles/mongodb/tasks/bootstrap.yml
deleted file mode 100644
index 6b2329cab7..0000000000
--- a/ansible/roles/mongodb/tasks/bootstrap.yml
+++ /dev/null
@@ -1,17 +0,0 @@
----
-- name: Starting mongodb bootstrap container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- detach: False
- environment:
- KOLLA_BOOTSTRAP:
- KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}"
- image: "{{ mongodb_image_full }}"
- name: "bootstrap_mongodb"
- restart_policy: "never"
- volumes:
- - "{{ node_config_directory }}/mongodb/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "kolla_logs:/var/log/kolla/"
- - "mongodb:/var/lib/mongodb/"
diff --git a/ansible/roles/mongodb/tasks/bootstrap_cluster.yml b/ansible/roles/mongodb/tasks/bootstrap_cluster.yml
deleted file mode 100644
index 31068f8b97..0000000000
--- a/ansible/roles/mongodb/tasks/bootstrap_cluster.yml
+++ /dev/null
@@ -1,17 +0,0 @@
----
-- name: Copying the mongodb replication set bootstrap script
- local_action: template src=bootstrap_cluster.js.j2 dest=/tmp/mongodb_bootstrap_replication_set.js
- run_once: True
-
-- name: Bootstraping the mongodb replication set
- command: "docker exec -t mongodb mongo {{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }} --quiet --eval '{{ lookup('file','/tmp/mongodb_bootstrap_replication_set.js') }}'"
- register: bootstrap_mongodb_cluster
- failed_when: "{{ (bootstrap_mongodb_cluster.stdout|from_json).ok != 1 }}"
- delegate_to: "{{ groups['mongodb'][0] }}"
- run_once: True
-
-- name: Deleting the mongodb replication set bootstrap script
- local_action: file path=/tmp/mongodb_bootstrap_replication_set.js state=absent
- changed_when: false
- failed_when: false
- run_once: True
diff --git a/ansible/roles/mongodb/tasks/config.yml b/ansible/roles/mongodb/tasks/config.yml
deleted file mode 100644
index 9501e38986..0000000000
--- a/ansible/roles/mongodb/tasks/config.yml
+++ /dev/null
@@ -1,22 +0,0 @@
----
-- name: Ensuring config directories exist
- file:
- path: "{{ node_config_directory }}/{{ item }}"
- state: "directory"
- recurse: yes
- with_items:
- - "mongodb"
-
-- name: Copying over config.json files for services
- template:
- src: "{{ item }}.json.j2"
- dest: "{{ node_config_directory }}/{{ item }}/config.json"
- with_items:
- - "mongodb"
-
-- name: Copying over mongodb.conf
- template:
- src: "{{ item }}.j2"
- dest: "{{ node_config_directory }}/mongodb/{{ item }}"
- with_items:
- - "mongodb.conf"
diff --git a/ansible/roles/mongodb/tasks/deploy.yml b/ansible/roles/mongodb/tasks/deploy.yml
deleted file mode 100644
index fd4ed25937..0000000000
--- a/ansible/roles/mongodb/tasks/deploy.yml
+++ /dev/null
@@ -1,9 +0,0 @@
----
-- include: config.yml
- when: inventory_hostname in groups['mongodb']
-
-- include: bootstrap.yml
- when: inventory_hostname in groups['mongodb']
-
-- include: start.yml
- when: inventory_hostname in groups['mongodb']
diff --git a/ansible/roles/mongodb/tasks/main.yml b/ansible/roles/mongodb/tasks/main.yml
deleted file mode 100644
index b017e8b4ad..0000000000
--- a/ansible/roles/mongodb/tasks/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-- include: "{{ action }}.yml"
diff --git a/ansible/roles/mongodb/tasks/precheck.yml b/ansible/roles/mongodb/tasks/precheck.yml
deleted file mode 100644
index ed97d539c0..0000000000
--- a/ansible/roles/mongodb/tasks/precheck.yml
+++ /dev/null
@@ -1 +0,0 @@
----
diff --git a/ansible/roles/mongodb/tasks/pull.yml b/ansible/roles/mongodb/tasks/pull.yml
deleted file mode 100644
index 642e132c03..0000000000
--- a/ansible/roles/mongodb/tasks/pull.yml
+++ /dev/null
@@ -1,7 +0,0 @@
----
-- name: Pulling mongodb image
- kolla_docker:
- action: "pull_image"
- common_options: "{{ docker_common_options }}"
- image: "{{ mongodb_image_full }}"
- when: inventory_hostname in groups['mongodb']
diff --git a/ansible/roles/mongodb/tasks/reconfigure.yml b/ansible/roles/mongodb/tasks/reconfigure.yml
deleted file mode 100644
index 9bec471870..0000000000
--- a/ansible/roles/mongodb/tasks/reconfigure.yml
+++ /dev/null
@@ -1,64 +0,0 @@
----
-- name: Ensuring the containers up
- kolla_docker:
- name: "{{ item.name }}"
- action: "get_container_state"
- register: container_state
- failed_when: container_state.Running == false
- when: inventory_hostname in groups[item.group]
- with_items:
- - { name: mongodb, group: mongodb }
-
-- include: config.yml
-
-- name: Check the configs
- command: docker exec {{ item.name }} /usr/local/bin/kolla_set_configs --check
- changed_when: false
- failed_when: false
- register: check_results
- when: inventory_hostname in groups[item.group]
- with_items:
- - { name: mongodb, group: mongodb }
-
-# NOTE(jeffrey4l): when config_strategy == 'COPY_ALWAYS'
-# and container env['KOLLA_CONFIG_STRATEGY'] == 'COPY_ONCE',
-# just remove the container and start again
-- name: Containers config strategy
- kolla_docker:
- name: "{{ item.name }}"
- action: "get_container_env"
- register: container_envs
- when: inventory_hostname in groups[item.group]
- with_items:
- - { name: mongodb, group: mongodb }
-
-- name: Remove the containers
- kolla_docker:
- name: "{{ item[0]['name'] }}"
- action: "remove_container"
- register: remove_containers
- when:
- - config_strategy == "COPY_ONCE" or item[1]['KOLLA_CONFIG_STRATEGY'] == 'COPY_ONCE'
- - item[2]['rc'] == 1
- - inventory_hostname in groups[item[0]['group']]
- with_together:
- - [{ name: mongodb, group: mongodb }]
- - "{{ container_envs.results }}"
- - "{{ check_results.results }}"
-
-- include: start.yml
- when: remove_containers.changed
-
-- name: Restart containers
- kolla_docker:
- name: "{{ item[0]['name'] }}"
- action: "restart_container"
- when:
- - config_strategy == 'COPY_ALWAYS'
- - item[1]['KOLLA_CONFIG_STRATEGY'] != 'COPY_ONCE'
- - item[2]['rc'] == 1
- - inventory_hostname in groups[item[0]['group']]
- with_together:
- - [{ name: mongodb, group: mongodb }]
- - "{{ container_envs.results }}"
- - "{{ check_results.results }}"
diff --git a/ansible/roles/mongodb/tasks/start.yml b/ansible/roles/mongodb/tasks/start.yml
deleted file mode 100644
index 3926a73168..0000000000
--- a/ansible/roles/mongodb/tasks/start.yml
+++ /dev/null
@@ -1,26 +0,0 @@
----
-- name: Starting mongodb container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- image: "{{ mongodb_image_full }}"
- name: "mongodb"
- privileged: True
- volumes:
- - "{{ node_config_directory }}/mongodb/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "kolla_logs:/var/log/kolla/"
- - "mongodb:/var/lib/mongodb"
-
-- name: Waiting for the mongodb startup
- wait_for: host={{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }} port={{ mongodb_port }}
-
-- name: Checking current replication status
- command: "docker exec -t mongodb mongo {{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }} --quiet --eval rs.status().ok"
- register: mongodb_replication_status
- changed_when: false
- delegate_to: "{{ groups['mongodb'][0] }}"
- run_once: True
-
-- include: "bootstrap_cluster.yml"
- when: mongodb_replication_status.stdout != "1"
diff --git a/ansible/roles/mongodb/tasks/upgrade.yml b/ansible/roles/mongodb/tasks/upgrade.yml
deleted file mode 100644
index 1f16915ad9..0000000000
--- a/ansible/roles/mongodb/tasks/upgrade.yml
+++ /dev/null
@@ -1,4 +0,0 @@
----
-- include: config.yml
-
-- include: start.yml
diff --git a/ansible/roles/mongodb/templates/bootstrap_cluster.js.j2 b/ansible/roles/mongodb/templates/bootstrap_cluster.js.j2
deleted file mode 100644
index 05ea1c655a..0000000000
--- a/ansible/roles/mongodb/templates/bootstrap_cluster.js.j2
+++ /dev/null
@@ -1,14 +0,0 @@
-printjson(rs.initiate(
- {
- "_id" : "{{ mongodb_replication_set_name }}",
- "version" : 1,
- "members" : [
- {% for host in groups["mongodb"] %}
- {
- "_id" : {{ loop.index }},
- "host" : "{{ hostvars[host]['ansible_' + storage_interface]['ipv4']['address'] }}:{{ mongodb_port }}"
- }{% if not loop.last %},{% endif %}
- {% endfor %}
- ]
- }
-))
diff --git a/ansible/roles/mongodb/templates/mongodb.conf.j2 b/ansible/roles/mongodb/templates/mongodb.conf.j2
deleted file mode 100644
index c5cc8344db..0000000000
--- a/ansible/roles/mongodb/templates/mongodb.conf.j2
+++ /dev/null
@@ -1,17 +0,0 @@
-systemLog:
- destination: file
- logAppend: true
- path: /var/log/kolla/mongodb/mongodb.log
-
-storage:
- dbPath: /var/lib/mongodb
- # Enable journaling, http://www.mongodb.org/display/DOCS/Journaling
- journal:
- enabled: true
-
-net:
- bindIp: {{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}
- port: {{ mongodb_port }}
-
-replication:
- replSetName: {{ mongodb_replication_set_name }}
diff --git a/ansible/roles/mongodb/templates/mongodb.json.j2 b/ansible/roles/mongodb/templates/mongodb.json.j2
deleted file mode 100644
index 5dccb78b97..0000000000
--- a/ansible/roles/mongodb/templates/mongodb.json.j2
+++ /dev/null
@@ -1,23 +0,0 @@
-{
- "command": "/usr/bin/mongod --unixSocketPrefix=/var/run/mongodb --config /etc/mongodb.conf run",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/mongodb.conf",
- "dest": "/etc/mongodb.conf",
- "owner": "mongodb",
- "perm": "0644"
- }
- ],
- "permissions": [
- {
- "path": "/var/lib/kolla/mongodb",
- "owner": "mongodb:mongodb",
- "recurse": true
- },
- {
- "path": "/var/log/kolla/mongodb",
- "owner": "mongodb:mongodb",
- "recurse": true
- }
- ]
-}
diff --git a/ansible/roles/multipathd/defaults/main.yml b/ansible/roles/multipathd/defaults/main.yml
deleted file mode 100644
index 0a2a56dff0..0000000000
--- a/ansible/roles/multipathd/defaults/main.yml
+++ /dev/null
@@ -1,9 +0,0 @@
----
-project_name: "multipathd"
-
-####################
-# Docker
-####################
-multipathd_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-multipathd"
-multipathd_tag: "{{ openstack_release }}"
-multipathd_image_full: "{{ multipathd_image }}:{{ multipathd_tag }}"
diff --git a/ansible/roles/multipathd/tasks/config.yml b/ansible/roles/multipathd/tasks/config.yml
deleted file mode 100644
index a831b32a6c..0000000000
--- a/ansible/roles/multipathd/tasks/config.yml
+++ /dev/null
@@ -1,24 +0,0 @@
----
-- name: Ensuring config directories exist
- file:
- path: "{{ node_config_directory }}/{{ item }}"
- state: "directory"
- recurse: yes
- when: inventory_hostname in groups['compute']
- with_items:
- - "multipathd"
-
-- name: Copying over config.json files for services
- template:
- src: "{{ item }}.json.j2"
- dest: "{{ node_config_directory }}/{{ item }}/config.json"
- when: inventory_hostname in groups['compute']
- with_items:
- - "multipathd"
-
-- name: Copying over multipath.conf
- template:
- src: "{{ role_path }}/templates/multipath.conf.j2"
- dest: "{{ node_config_directory }}/{{ item }}/multipath.conf"
- with_items:
- - "multipathd"
diff --git a/ansible/roles/multipathd/tasks/deploy.yml b/ansible/roles/multipathd/tasks/deploy.yml
deleted file mode 100644
index 1f16915ad9..0000000000
--- a/ansible/roles/multipathd/tasks/deploy.yml
+++ /dev/null
@@ -1,4 +0,0 @@
----
-- include: config.yml
-
-- include: start.yml
diff --git a/ansible/roles/multipathd/tasks/main.yml b/ansible/roles/multipathd/tasks/main.yml
deleted file mode 100644
index b017e8b4ad..0000000000
--- a/ansible/roles/multipathd/tasks/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-- include: "{{ action }}.yml"
diff --git a/ansible/roles/multipathd/tasks/precheck.yml b/ansible/roles/multipathd/tasks/precheck.yml
deleted file mode 100644
index ed97d539c0..0000000000
--- a/ansible/roles/multipathd/tasks/precheck.yml
+++ /dev/null
@@ -1 +0,0 @@
----
diff --git a/ansible/roles/multipathd/tasks/pull.yml b/ansible/roles/multipathd/tasks/pull.yml
deleted file mode 100644
index 41d70f65e1..0000000000
--- a/ansible/roles/multipathd/tasks/pull.yml
+++ /dev/null
@@ -1,7 +0,0 @@
----
-- name: Pulling multipathd image
- kolla_docker:
- action: "pull_image"
- common_options: "{{ docker_common_options }}"
- image: "{{ multipathd_image_full }}"
- when: inventory_hostname in groups['multipathd']
diff --git a/ansible/roles/multipathd/tasks/reconfigure.yml b/ansible/roles/multipathd/tasks/reconfigure.yml
deleted file mode 100644
index ed97d539c0..0000000000
--- a/ansible/roles/multipathd/tasks/reconfigure.yml
+++ /dev/null
@@ -1 +0,0 @@
----
diff --git a/ansible/roles/multipathd/tasks/start.yml b/ansible/roles/multipathd/tasks/start.yml
deleted file mode 100644
index 2bf69eab2d..0000000000
--- a/ansible/roles/multipathd/tasks/start.yml
+++ /dev/null
@@ -1,21 +0,0 @@
----
-- name: Starting multipathd container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- image: "{{ multipathd_image_full }}"
- name: "multipathd"
- ipc_mode: "host"
- privileged: True
- volumes:
- - "{{ node_config_directory }}/multipathd/:{{ container_config_directory }}/:ro"
- - "kolla_logs:/var/log/kolla/"
- - "/etc/localtime:/etc/localtime:ro"
- - "/dev/:/dev/"
- - "/run/:/run/:shared"
- - "/sys/fs/cgroup:/sys/fs/cgroup:ro"
- - "/lib/modules:/lib/modules:ro"
- - "/sys/kernel/config:/configfs"
- - "cinder:/var/lib/cinder"
- - "iscsi_info:/etc/iscsi"
- when: inventory_hostname in groups['compute']
diff --git a/ansible/roles/multipathd/tasks/upgrade.yml b/ansible/roles/multipathd/tasks/upgrade.yml
deleted file mode 100644
index 1f16915ad9..0000000000
--- a/ansible/roles/multipathd/tasks/upgrade.yml
+++ /dev/null
@@ -1,4 +0,0 @@
----
-- include: config.yml
-
-- include: start.yml
diff --git a/ansible/roles/multipathd/templates/multipath.conf.j2 b/ansible/roles/multipathd/templates/multipath.conf.j2
deleted file mode 100644
index d063f31a87..0000000000
--- a/ansible/roles/multipathd/templates/multipath.conf.j2
+++ /dev/null
@@ -1,11 +0,0 @@
-defaults {
- user_friendly_names no
- polling_interval 30
-}
-
-blacklist {
- devnode "^(ram|raw|loop|fd|md|dm-|sr|scd|st)[0-9]*"
- devnode "^sd[a]"
- devnode "^hd[a-z]"
- devnode "^cciss!c[0-9]d[0-9]*"
-}
diff --git a/ansible/roles/multipathd/templates/multipathd.json.j2 b/ansible/roles/multipathd/templates/multipathd.json.j2
deleted file mode 100644
index 2570a607e2..0000000000
--- a/ansible/roles/multipathd/templates/multipathd.json.j2
+++ /dev/null
@@ -1,11 +0,0 @@
-{
- "command": "/sbin/multipathd -d",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/multipath.conf",
- "dest": "/etc/multipath.conf",
- "owner": "root",
- "perm": "0600"
- }
- ]
-}
diff --git a/ansible/roles/murano/defaults/main.yml b/ansible/roles/murano/defaults/main.yml
deleted file mode 100644
index 50ccef01fa..0000000000
--- a/ansible/roles/murano/defaults/main.yml
+++ /dev/null
@@ -1,35 +0,0 @@
----
-project_name: "murano"
-
-####################
-# Database
-####################
-murano_database_name: "murano"
-murano_database_user: "murano"
-murano_database_address: "{{ kolla_internal_fqdn }}:{{ database_port }}"
-
-
-####################
-# Docker
-####################
-murano_engine_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-murano-engine"
-murano_engine_tag: "{{ openstack_release }}"
-murano_engine_image_full: "{{ murano_engine_image }}:{{ murano_engine_tag }}"
-
-murano_api_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-murano-api"
-murano_api_tag: "{{ openstack_release }}"
-murano_api_image_full: "{{ murano_api_image }}:{{ murano_api_tag }}"
-
-
-####################
-# OpenStack
-####################
-murano_admin_endpoint: "{{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ murano_api_port }}"
-murano_internal_endpoint: "{{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ murano_api_port }}"
-murano_public_endpoint: "{{ public_protocol }}://{{ kolla_external_fqdn }}:{{ murano_api_port }}"
-
-murano_logging_debug: "{{ openstack_logging_debug }}"
-
-murano_keystone_user: "murano"
-
-openstack_murano_auth: "{'auth_url':'{{ openstack_auth.auth_url }}','username':'{{ openstack_auth.username }}','password':'{{ openstack_auth.password }}','project_name':'{{ openstack_auth.project_name }}'}"
diff --git a/ansible/roles/murano/meta/main.yml b/ansible/roles/murano/meta/main.yml
deleted file mode 100644
index 6b4fff8fef..0000000000
--- a/ansible/roles/murano/meta/main.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-dependencies:
- - { role: common }
diff --git a/ansible/roles/murano/tasks/bootstrap.yml b/ansible/roles/murano/tasks/bootstrap.yml
deleted file mode 100644
index 931636ee82..0000000000
--- a/ansible/roles/murano/tasks/bootstrap.yml
+++ /dev/null
@@ -1,41 +0,0 @@
----
-- name: Creating Murano database
- command: docker exec -t kolla_toolbox /usr/bin/ansible localhost
- -m mysql_db
- -a "login_host='{{ database_address }}'
- login_port='{{ database_port }}'
- login_user='{{ database_user }}'
- login_password='{{ database_password }}'
- name='{{ murano_database_name }}'"
- register: database
- changed_when: "{{ database.stdout.find('localhost | SUCCESS => ') != -1 and
- (database.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
- failed_when: database.stdout.split()[2] != 'SUCCESS'
- run_once: True
- delegate_to: "{{ groups['murano-api'][0] }}"
-
-- name: Reading json from variable
- set_fact:
- database_created: "{{ (database.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
-
-- name: Creating Murano database user and setting permissions
- command: docker exec -t kolla_toolbox /usr/bin/ansible localhost
- -m mysql_user
- -a "login_host='{{ database_address }}'
- login_port='{{ database_port }}'
- login_user='{{ database_user }}'
- login_password='{{ database_password }}'
- name='{{ murano_database_name }}'
- password='{{ murano_database_password }}'
- host='%'
- priv='{{ murano_database_name }}.*:ALL'
- append_privs='yes'"
- register: database_user_create
- changed_when: "{{ database_user_create.stdout.find('localhost | SUCCESS => ') != -1 and
- (database_user_create.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
- failed_when: database_user_create.stdout.split()[2] != 'SUCCESS'
- run_once: True
- delegate_to: "{{ groups['murano-api'][0] }}"
-
-- include: bootstrap_service.yml
- when: database_created
diff --git a/ansible/roles/murano/tasks/bootstrap_service.yml b/ansible/roles/murano/tasks/bootstrap_service.yml
deleted file mode 100644
index 59a59a4c2f..0000000000
--- a/ansible/roles/murano/tasks/bootstrap_service.yml
+++ /dev/null
@@ -1,20 +0,0 @@
----
-- name: Running Murano bootstrap container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- detach: False
- environment:
- KOLLA_BOOTSTRAP:
- KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}"
- image: "{{ murano_api_image_full }}"
- labels:
- BOOTSTRAP:
- name: "bootstrap_murano"
- restart_policy: "never"
- volumes:
- - "{{ node_config_directory }}/murano-api/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "kolla_logs:/var/log/kolla/"
- run_once: True
- delegate_to: "{{ groups['murano-api'][0] }}"
diff --git a/ansible/roles/murano/tasks/config.yml b/ansible/roles/murano/tasks/config.yml
deleted file mode 100644
index 725703f72a..0000000000
--- a/ansible/roles/murano/tasks/config.yml
+++ /dev/null
@@ -1,48 +0,0 @@
----
-- name: Ensuring config directories exist
- file:
- path: "{{ node_config_directory }}/{{ item }}"
- state: "directory"
- recurse: yes
- with_items:
- - "murano-api"
- - "murano-engine"
-
-- name: Copying over config.json files for services
- template:
- src: "{{ item }}.json.j2"
- dest: "{{ node_config_directory }}/{{ item }}/config.json"
- with_items:
- - "murano-api"
- - "murano-engine"
-
-- name: Copying over murano.conf
- merge_configs:
- vars:
- service_name: "{{ item }}"
- sources:
- - "{{ role_path }}/templates/murano.conf.j2"
- - "{{ node_config_directory }}/config/global.conf"
- - "{{ node_config_directory }}/config/database.conf"
- - "{{ node_config_directory }}/config/messaging.conf"
- - "{{ node_config_directory }}/config/murano.conf"
- - "{{ node_config_directory }}/config/murano/{{ item }}.conf"
- - "{{ node_config_directory }}/config/murano/{{ inventory_hostname }}/murano.conf"
- dest: "{{ node_config_directory }}/{{ item }}/murano.conf"
- with_items:
- - "murano-api"
- - "murano-engine"
-
-- name: Check if policies shall be overwritten
- local_action: stat path="{{ node_custom_config }}/murano/policy.json"
- register: murano_policy
-
-- name: Copying over existing policy.json
- template:
- src: "{{ node_custom_config }}/murano/policy.json"
- dest: "{{ node_config_directory }}/{{ item }}/policy.json"
- with_items:
- - "murano-api"
- - "murano-engine"
- when:
- murano_policy.stat.exists
diff --git a/ansible/roles/murano/tasks/deploy.yml b/ansible/roles/murano/tasks/deploy.yml
deleted file mode 100644
index 13d6447329..0000000000
--- a/ansible/roles/murano/tasks/deploy.yml
+++ /dev/null
@@ -1,14 +0,0 @@
----
-- include: register.yml
- when: inventory_hostname in groups['murano-api']
-
-- include: config.yml
- when: inventory_hostname in groups['murano-api'] or
- inventory_hostname in groups['murano-engine']
-
-- include: bootstrap.yml
- when: inventory_hostname in groups['murano-api']
-
-- include: start.yml
- when: inventory_hostname in groups['murano-api'] or
- inventory_hostname in groups['murano-engine']
diff --git a/ansible/roles/murano/tasks/main.yml b/ansible/roles/murano/tasks/main.yml
deleted file mode 100644
index b017e8b4ad..0000000000
--- a/ansible/roles/murano/tasks/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-- include: "{{ action }}.yml"
diff --git a/ansible/roles/murano/tasks/precheck.yml b/ansible/roles/murano/tasks/precheck.yml
deleted file mode 100644
index ed97d539c0..0000000000
--- a/ansible/roles/murano/tasks/precheck.yml
+++ /dev/null
@@ -1 +0,0 @@
----
diff --git a/ansible/roles/murano/tasks/pull.yml b/ansible/roles/murano/tasks/pull.yml
deleted file mode 100644
index 5f979e56aa..0000000000
--- a/ansible/roles/murano/tasks/pull.yml
+++ /dev/null
@@ -1,14 +0,0 @@
----
-- name: Pulling murano-api image
- kolla_docker:
- action: "pull_image"
- common_options: "{{ docker_common_options }}"
- image: "{{ murano_api_image_full }}"
- when: inventory_hostname in groups['murano-api']
-
-- name: Pulling murano-engine image
- kolla_docker:
- action: "pull_image"
- common_options: "{{ docker_common_options }}"
- image: "{{ murano_engine_image_full }}"
- when: inventory_hostname in groups['murano-engine']
diff --git a/ansible/roles/murano/tasks/reconfigure.yml b/ansible/roles/murano/tasks/reconfigure.yml
deleted file mode 100644
index d5d784aa04..0000000000
--- a/ansible/roles/murano/tasks/reconfigure.yml
+++ /dev/null
@@ -1,66 +0,0 @@
----
-- name: Ensuring the containers up
- kolla_docker:
- name: "{{ item.name }}"
- action: "get_container_state"
- register: container_state
- failed_when: container_state.Running == false
- when: inventory_hostname in groups[item.group]
- with_items:
- - { name: murano_api, group: murano-api }
- - { name: murano_engine, group: murano-engine }
-
-- include: config.yml
-
-- name: Check the configs
- command: docker exec {{ item.name }} /usr/local/bin/kolla_set_configs --check
- changed_when: false
- failed_when: false
- register: check_results
- when: inventory_hostname in groups[item.group]
- with_items:
- - { name: murano_api, group: murano-api }
- - { name: murano_engine, group: murano-engine }
-
-- name: Containers config strategy
- kolla_docker:
- name: "{{ item.name }}"
- action: "get_container_env"
- register: container_envs
- when: inventory_hostname in groups[item.group]
- with_items:
- - { name: murano_api, group: murano-api }
- - { name: murano_engine, group: murano-engine }
-
-- name: Remove the containers
- kolla_docker:
- name: "{{ item[0]['name'] }}"
- action: "remove_container"
- register: remove_containers
- when:
- - inventory_hostname in groups[item[0]['group']]
- - config_strategy == "COPY_ONCE" or item[1]['KOLLA_CONFIG_STRATEGY'] == 'COPY_ONCE'
- - item[2]['rc'] == 1
- with_together:
- - [{ name: murano_api, group: murano-api },
- { name: murano_engine, group: murano-engine }]
- - "{{ container_envs.results }}"
- - "{{ check_results.results }}"
-
-- include: start.yml
- when: remove_containers.changed
-
-- name: Restart containers
- kolla_docker:
- name: "{{ item[0]['name'] }}"
- action: "restart_container"
- when:
- - inventory_hostname in groups[item[0]['group']]
- - config_strategy == 'COPY_ALWAYS'
- - item[1]['KOLLA_CONFIG_STRATEGY'] != 'COPY_ONCE'
- - item[2]['rc'] == 1
- with_together:
- - [{ name: murano_api, group: murano-api },
- { name: murano_engine, group: murano-engine }]
- - "{{ container_envs.results }}"
- - "{{ check_results.results }}"
diff --git a/ansible/roles/murano/tasks/register.yml b/ansible/roles/murano/tasks/register.yml
deleted file mode 100644
index 308afc5b09..0000000000
--- a/ansible/roles/murano/tasks/register.yml
+++ /dev/null
@@ -1,40 +0,0 @@
----
-- name: Creating the Murano service and endpoint
- command: docker exec -t kolla_toolbox /usr/bin/ansible localhost
- -m kolla_keystone_service
- -a "service_name=murano
- service_type=application-catalog
- description='Openstack Application Catalogue'
- endpoint_region={{ openstack_region_name }}
- url='{{ item.url }}'
- interface='{{ item.interface }}'
- region_name={{ openstack_region_name }}
- auth={{ '{{ openstack_murano_auth }}' }}"
- -e "{'openstack_murano_auth':{{ openstack_murano_auth }}}"
- register: murano_endpoint
- changed_when: "{{ murano_endpoint.stdout.find('localhost | SUCCESS => ') != -1 and (murano_endpoint.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
- until: murano_endpoint.stdout.split()[2] == 'SUCCESS'
- retries: 10
- delay: 5
- run_once: True
- with_items:
- - {'interface': 'admin', 'url': '{{ murano_admin_endpoint }}'}
- - {'interface': 'internal', 'url': '{{ murano_internal_endpoint }}'}
- - {'interface': 'public', 'url': '{{ murano_public_endpoint }}'}
-
-- name: Creating the Murano project, user, and role
- command: docker exec -t kolla_toolbox /usr/bin/ansible localhost
- -m kolla_keystone_user
- -a "project=service
- user=murano
- password={{ murano_keystone_password }}
- role=admin
- region_name={{ openstack_region_name }}
- auth={{ '{{ openstack_murano_auth }}' }}"
- -e "{'openstack_murano_auth':{{ openstack_murano_auth }}}"
- register: murano_user
- changed_when: "{{ murano_user.stdout.find('localhost | SUCCESS => ') != -1 and (murano_user.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
- until: murano_user.stdout.split()[2] == 'SUCCESS'
- retries: 10
- delay: 5
- run_once: True
diff --git a/ansible/roles/murano/tasks/start.yml b/ansible/roles/murano/tasks/start.yml
deleted file mode 100644
index a4311b1051..0000000000
--- a/ansible/roles/murano/tasks/start.yml
+++ /dev/null
@@ -1,67 +0,0 @@
----
-- name: Starting murano-engine container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- image: "{{ murano_engine_image_full }}"
- name: "murano_engine"
- volumes:
- - "{{ node_config_directory }}/murano-engine/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "kolla_logs:/var/log/kolla/"
- when: inventory_hostname in groups['murano-engine']
-
-- name: Starting murano-api container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- image: "{{ murano_api_image_full }}"
- name: "murano_api"
- volumes:
- - "{{ node_config_directory }}/murano-api/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "kolla_logs:/var/log/kolla/"
- when: inventory_hostname in groups['murano-api']
-
-- name: Waiting for Murano API service to be ready on first node
- wait_for:
- host: "{{ api_interface_address }}"
- port: "{{ murano_api_port }}"
- connect_timeout: 1
- timeout: 60
- run_once: True
- register: check_murano_port
- until: check_murano_port | success
- retries: 10
- delay: 6
- delegate_to: "{{ groups['murano-api'][0] }}"
-
-- name: Checking if Murano core library package exists
- command: "docker exec murano_api murano \
- --os-username admin \
- --os-password {{ keystone_admin_password }} \
- --os-project-name admin \
- --os-auth-url \
- {{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_admin_port }}/v3 \
- --murano-url \
- {{ admin_protocol }}://{{ api_interface_address }}:{{ murano_api_port }} \
- package-list"
- register: status
- changed_when: False
- run_once: True
- delegate_to: "{{ groups['murano-api'][0] }}"
-
-- name: Importing Murano core library package
- command: "docker exec murano_api murano \
- --os-username admin \
- --os-password {{ keystone_admin_password }} \
- --os-project-name admin \
- --os-auth-url \
- {{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_admin_port }}/v3 \
- --murano-url \
- {{ admin_protocol }}://{{ api_interface_address }}:{{ murano_api_port }} \
- package-import --is-public /io.murano.zip"
- run_once: True
- delegate_to: "{{ groups['murano-api'][0] }}"
- when:
- - status.stdout.find("io.murano") == -1
diff --git a/ansible/roles/murano/tasks/upgrade.yml b/ansible/roles/murano/tasks/upgrade.yml
deleted file mode 100644
index 630211e23d..0000000000
--- a/ansible/roles/murano/tasks/upgrade.yml
+++ /dev/null
@@ -1,8 +0,0 @@
----
-- include: register.yml
-
-- include: config.yml
-
-- include: bootstrap_service.yml
-
-- include: start.yml
diff --git a/ansible/roles/murano/templates/murano-api.json.j2 b/ansible/roles/murano/templates/murano-api.json.j2
deleted file mode 100644
index 07a6b8693d..0000000000
--- a/ansible/roles/murano/templates/murano-api.json.j2
+++ /dev/null
@@ -1,18 +0,0 @@
-{
- "command": "murano-api --config-file /etc/murano/murano.conf",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/murano.conf",
- "dest": "/etc/murano/murano.conf",
- "owner": "murano",
- "perm": "0600"
- }
- ],
- "permissions": [
- {
- "path": "/var/log/kolla/murano",
- "owner": "murano:murano",
- "recurse": true
- }
- ]
-}
diff --git a/ansible/roles/murano/templates/murano-engine.json.j2 b/ansible/roles/murano/templates/murano-engine.json.j2
deleted file mode 100644
index 98a328e72b..0000000000
--- a/ansible/roles/murano/templates/murano-engine.json.j2
+++ /dev/null
@@ -1,18 +0,0 @@
-{
- "command": "murano-engine --config-file /etc/murano/murano.conf",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/murano.conf",
- "dest": "/etc/murano/murano.conf",
- "owner": "murano",
- "perm": "0600"
- }
- ],
- "permissions": [
- {
- "path": "/var/log/kolla/murano",
- "owner": "murano:murano",
- "recurse": true
- }
- ]
-}
diff --git a/ansible/roles/murano/templates/murano.conf.j2 b/ansible/roles/murano/templates/murano.conf.j2
deleted file mode 100644
index 5e95df8667..0000000000
--- a/ansible/roles/murano/templates/murano.conf.j2
+++ /dev/null
@@ -1,43 +0,0 @@
-[DEFAULT]
-debug = {{ murano_logging_debug }}
-
-log_dir = /var/log/kolla/murano
-
-{% if service_name == 'murano-api' %}
-bind_host = {{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}
-bind_port = {{ murano_api_port }}
-{% endif %}
-
-transport_url = rabbit://{% for host in groups['rabbitmq'] %}{{ rabbitmq_user }}:{{ rabbitmq_password }}@{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ rabbitmq_port }}{% if not loop.last %},{% endif %}{% endfor %}
-
-[engine]
-engine_workers = {{ openstack_service_workers }}
-
-[database]
-connection = mysql+pymysql://{{ murano_database_user }}:{{ murano_database_password }}@{{ murano_database_address }}/{{ murano_database_name }}
-max_retries = -1
-
-[keystone]
-auth_url = {{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_admin_port }}
-
-[keystone_authtoken]
-auth_uri = {{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_public_port }}
-auth_host = {{ kolla_internal_fqdn }}
-auth_port = {{ keystone_public_port }}
-auth_protocol = {{ internal_protocol }}
-admin_tenant_name = service
-admin_user = {{ murano_keystone_user }}
-admin_password = {{ murano_keystone_password }}
-
-memcache_security_strategy = ENCRYPT
-memcache_secret_key = {{ memcache_secret_key }}
-memcached_servers = {% for host in groups['memcached'] %}{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}
-
-
-[murano]
-url = {{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ murano_api_port }}
-api_workers = {{ openstack_service_workers }}
-
-
-[oslo_messaging_notifications]
-driver = messagingv2
diff --git a/ansible/roles/neutron/defaults/main.yml b/ansible/roles/neutron/defaults/main.yml
deleted file mode 100644
index 699c6b688c..0000000000
--- a/ansible/roles/neutron/defaults/main.yml
+++ /dev/null
@@ -1,75 +0,0 @@
----
-project_name: "neutron"
-
-####################
-# Database
-####################
-neutron_database_name: "neutron"
-neutron_database_user: "neutron"
-neutron_database_address: "{{ kolla_internal_fqdn }}:{{ database_port }}"
-
-
-####################
-# Docker
-####################
-neutron_dhcp_agent_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-neutron-dhcp-agent"
-neutron_dhcp_agent_tag: "{{ openstack_release }}"
-neutron_dhcp_agent_image_full: "{{ neutron_dhcp_agent_image }}:{{ neutron_dhcp_agent_tag }}"
-
-neutron_l3_agent_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-neutron-l3-agent"
-neutron_l3_agent_tag: "{{ openstack_release }}"
-neutron_l3_agent_image_full: "{{ neutron_l3_agent_image }}:{{ neutron_l3_agent_tag }}"
-
-neutron_lbaas_agent_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-neutron-lbaas-agent"
-neutron_lbaas_agent_tag: "{{ openstack_release }}"
-neutron_lbaas_agent_image_full: "{{ neutron_lbaas_agent_image }}:{{ neutron_lbaas_agent_tag }}"
-
-neutron_linuxbridge_agent_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-neutron-linuxbridge-agent"
-neutron_linuxbridge_agent_tag: "{{ openstack_release }}"
-neutron_linuxbridge_agent_image_full: "{{ neutron_linuxbridge_agent_image }}:{{ neutron_linuxbridge_agent_tag }}"
-
-neutron_metadata_agent_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-neutron-metadata-agent"
-neutron_metadata_agent_tag: "{{ openstack_release }}"
-neutron_metadata_agent_image_full: "{{ neutron_metadata_agent_image }}:{{ neutron_metadata_agent_tag }}"
-
-neutron_openvswitch_agent_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-neutron-openvswitch-agent"
-neutron_openvswitch_agent_tag: "{{ openstack_release }}"
-neutron_openvswitch_agent_image_full: "{{ neutron_openvswitch_agent_image }}:{{ neutron_openvswitch_agent_tag }}"
-
-neutron_sfc_agent_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-neutron-sfc-agent"
-neutron_sfc_agent_tag: "{{ openstack_release }}"
-neutron_sfc_agent_image_full: "{{ neutron_sfc_agent_image }}:{{ neutron_sfc_agent_tag }}"
-
-neutron_server_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-neutron-server"
-neutron_server_tag: "{{ openstack_release }}"
-neutron_server_image_full: "{{ neutron_server_image }}:{{ neutron_server_tag }}"
-
-neutron_vpnaas_agent_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-neutron-vpnaas-agent"
-neutron_vpnaas_agent_tag: "{{ openstack_release }}"
-neutron_vpnaas_agent_image_full: "{{ neutron_vpnaas_agent_image }}:{{ neutron_vpnaas_agent_tag }}"
-
-openvswitch_db_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-openvswitch-db-server"
-openvswitch_db_tag: "{{ openstack_release }}"
-openvswitch_db_image_full: "{{ openvswitch_db_image }}:{{ openvswitch_db_tag }}"
-
-openvswitch_vswitchd_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-openvswitch-vswitchd"
-openvswitch_vswitchd_tag: "{{ openstack_release }}"
-openvswitch_vswitchd_image_full: "{{ openvswitch_vswitchd_image }}:{{ openvswitch_vswitchd_tag }}"
-
-
-####################
-# OpenStack
-####################
-dhcp_agents_per_network: 2
-min_l3_agents_per_router: 2
-max_l3_agents_per_router: 3
-
-neutron_admin_endpoint: "{{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ neutron_server_port }}"
-neutron_internal_endpoint: "{{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ neutron_server_port }}"
-neutron_public_endpoint: "{{ public_protocol }}://{{ kolla_external_fqdn }}:{{ neutron_server_port }}"
-
-neutron_logging_debug: "{{ openstack_logging_debug }}"
-
-neutron_bridge_name: "br-ex"
-
-openstack_neutron_auth: "{'auth_url':'{{ openstack_auth.auth_url }}','username':'{{ openstack_auth.username }}','password':'{{ openstack_auth.password }}','project_name':'{{ openstack_auth.project_name }}'}"
diff --git a/ansible/roles/neutron/meta/main.yml b/ansible/roles/neutron/meta/main.yml
deleted file mode 100644
index 6b4fff8fef..0000000000
--- a/ansible/roles/neutron/meta/main.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-dependencies:
- - { role: common }
diff --git a/ansible/roles/neutron/tasks/bootstrap.yml b/ansible/roles/neutron/tasks/bootstrap.yml
deleted file mode 100644
index 526743fb93..0000000000
--- a/ansible/roles/neutron/tasks/bootstrap.yml
+++ /dev/null
@@ -1,41 +0,0 @@
----
-- name: Creating Neutron database
- command: docker exec -t kolla_toolbox /usr/bin/ansible localhost
- -m mysql_db
- -a "login_host='{{ database_address }}'
- login_port='{{ database_port }}'
- login_user='{{ database_user }}'
- login_password='{{ database_password }}'
- name='{{ neutron_database_name }}'"
- register: database
- changed_when: "{{ database.stdout.find('localhost | SUCCESS => ') != -1 and
- (database.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
- failed_when: database.stdout.split()[2] != 'SUCCESS'
- run_once: True
- delegate_to: "{{ groups['neutron-server'][0] }}"
-
-- name: Reading json from variable
- set_fact:
- database_created: "{{ (database.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
-
-- name: Creating Neutron database user and setting permissions
- command: docker exec -t kolla_toolbox /usr/bin/ansible localhost
- -m mysql_user
- -a "login_host='{{ database_address }}'
- login_port='{{ database_port }}'
- login_user='{{ database_user }}'
- login_password='{{ database_password }}'
- name='{{ neutron_database_name }}'
- password='{{ neutron_database_password }}'
- host='%'
- priv='{{ neutron_database_name }}.*:ALL'
- append_privs='yes'"
- register: database_user_create
- changed_when: "{{ database_user_create.stdout.find('localhost | SUCCESS => ') != -1 and
- (database_user_create.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
- failed_when: database_user_create.stdout.split()[2] != 'SUCCESS'
- run_once: True
- delegate_to: "{{ groups['neutron-server'][0] }}"
-
-- include: bootstrap_service.yml
- when: database_created
diff --git a/ansible/roles/neutron/tasks/bootstrap_service.yml b/ansible/roles/neutron/tasks/bootstrap_service.yml
deleted file mode 100644
index 50d114535f..0000000000
--- a/ansible/roles/neutron/tasks/bootstrap_service.yml
+++ /dev/null
@@ -1,66 +0,0 @@
----
-- name: Running Neutron bootstrap container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- detach: False
- environment:
- KOLLA_BOOTSTRAP:
- KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}"
- image: "{{ neutron_server_image_full }}"
- labels:
- BOOTSTRAP:
- name: "bootstrap_neutron"
- restart_policy: "never"
- volumes:
- - "{{ node_config_directory }}/neutron-server/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "kolla_logs:/var/log/kolla/"
- run_once: True
- delegate_to: "{{ groups['neutron-server'][0] }}"
-
-- name: Running Neutron lbaas bootstrap container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- detach: False
- environment:
- KOLLA_BOOTSTRAP:
- KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}"
- image: "{{ neutron_lbaas_agent_image_full }}"
- labels:
- BOOTSTRAP:
- name: "bootstrap_neutron_lbaas_agent"
- restart_policy: "never"
- volumes:
- - "{{ node_config_directory }}/neutron-lbaas-agent/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "kolla_logs:/var/log/kolla/"
- when:
- - enable_neutron_lbaas | bool
- - inventory_hostname in groups['neutron-lbaas-agent']
- run_once: True
- delegate_to: "{{ groups['neutron-lbaas-agent'][0] }}"
-
-- name: Running Neutron vpnaas bootstrap container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- detach: False
- environment:
- KOLLA_BOOTSTRAP:
- KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}"
- image: "{{ neutron_vpnaas_agent_image_full }}"
- labels:
- BOOTSTRAP:
- name: "bootstrap_neutron_vpnaas_agent"
- restart_policy: "never"
- volumes:
- - "{{ node_config_directory }}/neutron-vpnaas-agent/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "kolla_logs:/var/log/kolla/"
- when:
- - enable_neutron_vpnaas | bool
- - inventory_hostname in groups['neutron-vpnaas-agent']
- run_once: True
- delegate_to: "{{ groups['neutron-vpnaas-agent'][0] }}"
diff --git a/ansible/roles/neutron/tasks/config-neutron-fake.yml b/ansible/roles/neutron/tasks/config-neutron-fake.yml
deleted file mode 100644
index 7051911f00..0000000000
--- a/ansible/roles/neutron/tasks/config-neutron-fake.yml
+++ /dev/null
@@ -1,49 +0,0 @@
----
-- name: Ensuring config directories exist
- file:
- path: "{{ node_config_directory }}/neutron-openvswitch-agent-fake-{{ item }}"
- state: "directory"
- recurse: yes
- with_sequence: start=1 end={{ num_nova_fake_per_node }}
- when: inventory_hostname in groups['compute']
-
-- name: Copying over config.json files for services
- template:
- src: "neutron-openvswitch-agent.json.j2"
- dest: "{{ node_config_directory }}/neutron-openvswitch-agent-fake-{{ item }}/config.json"
- with_sequence: start=1 end={{ num_nova_fake_per_node }}
- when:
- - inventory_hostname in groups['compute']
- - neutron_plugin_agent == "openvswitch"
-
-- name: Copying over neutron.conf
- merge_configs:
- vars:
- service_name: "{{ item }}"
- sources:
- - "{{ role_path }}/templates/neutron.conf.j2"
- - "{{ node_config_directory }}/config/global.conf"
- - "{{ node_config_directory }}/config/database.conf"
- - "{{ node_config_directory }}/config/messaging.conf"
- - "{{ node_config_directory }}/config/neutron.conf"
- - "{{ node_config_directory }}/config/neutron/{{ item }}.conf"
- - "{{ node_config_directory }}/config/neutron/{{ inventory_hostname }}/neutron.conf"
- dest: "{{ node_config_directory }}/neutron-openvswitch-agent-fake-{{ item }}/neutron.conf"
- with_sequence: start=1 end={{ num_nova_fake_per_node }}
- when:
- - inventory_hostname in groups['compute']
- - neutron_plugin_agent == "openvswitch"
-
-- name: Copying over ml2_conf.ini
- merge_configs:
- vars:
- service_name: "{{ item }}"
- sources:
- - "{{ role_path }}/templates/ml2_conf.ini.j2"
- - "{{ node_config_directory }}/config/neutron/ml2_conf.ini"
- - "{{ node_config_directory }}/config/neutron/{{ inventory_hostname }}/neutron.conf"
- dest: "{{ node_config_directory }}/neutron-openvswitch-agent-fake-{{ item }}/ml2_conf.ini"
- with_sequence: start=1 end={{ num_nova_fake_per_node }}
- when:
- - inventory_hostname in groups['compute']
- - neutron_plugin_agent == "openvswitch"
diff --git a/ansible/roles/neutron/tasks/config.yml b/ansible/roles/neutron/tasks/config.yml
deleted file mode 100644
index bc18d03808..0000000000
--- a/ansible/roles/neutron/tasks/config.yml
+++ /dev/null
@@ -1,205 +0,0 @@
----
-- name: Setting sysctl values
- sysctl: name={{ item.name }} value={{ item.value }} sysctl_set=yes
- with_items:
- - { name: "net.ipv4.ip_forward", value: 1}
- - { name: "net.ipv4.conf.all.rp_filter", value: 0}
- - { name: "net.ipv4.conf.default.rp_filter", value: 0}
- when:
- - set_sysctl | bool
- - inventory_hostname in groups['neutron-l3-agent']
- or inventory_hostname in groups['neutron-vpnaas-agent']
-
-- name: Ensuring config directories exist
- file:
- path: "{{ node_config_directory }}/{{ item }}"
- state: "directory"
- recurse: yes
- with_items:
- - "neutron-dhcp-agent"
- - "neutron-l3-agent"
- - "neutron-linuxbridge-agent"
- - "neutron-metadata-agent"
- - "neutron-openvswitch-agent"
- - "neutron-server"
- - "openvswitch-db-server"
- - "openvswitch-vswitchd"
- - "neutron-lbaas-agent"
- - "neutron-vpnaas-agent"
-
-- name: Copying over config.json files for services
- template:
- src: "{{ item }}.json.j2"
- dest: "{{ node_config_directory }}/{{ item }}/config.json"
- with_items:
- - "neutron-dhcp-agent"
- - "neutron-l3-agent"
- - "neutron-linuxbridge-agent"
- - "neutron-metadata-agent"
- - "neutron-openvswitch-agent"
- - "neutron-server"
- - "openvswitch-db-server"
- - "openvswitch-vswitchd"
- - "neutron-lbaas-agent"
- - "neutron-vpnaas-agent"
-
-- name: Copying over neutron.conf
- merge_configs:
- vars:
- service_name: "{{ item }}"
- sources:
- - "{{ role_path }}/templates/neutron.conf.j2"
- - "{{ node_custom_config }}/global.conf"
- - "{{ node_custom_config }}/database.conf"
- - "{{ node_custom_config }}/messaging.conf"
- - "{{ node_custom_config }}/neutron.conf"
- - "{{ node_custom_config }}/neutron/{{ item }}.conf"
- - "{{ node_custom_config }}/neutron/{{ inventory_hostname }}/neutron.conf"
- dest: "{{ node_config_directory }}/{{ item }}/neutron.conf"
- with_items:
- - "neutron-dhcp-agent"
- - "neutron-l3-agent"
- - "neutron-linuxbridge-agent"
- - "neutron-metadata-agent"
- - "neutron-openvswitch-agent"
- - "neutron-server"
- - "neutron-lbaas-agent"
- - "neutron-vpnaas-agent"
-
-- name: Copying over neutron_lbaas.conf
- merge_configs:
- vars:
- service_name: "{{ item }}"
- sources:
- - "{{ role_path }}/templates/neutron_lbaas.conf.j2"
- - "{{ node_custom_config }}/neutron/neutron_lbaas.conf"
- - "{{ node_custom_config }}/neutron/{{ inventory_hostname }}/neutron_lbaas.conf"
- dest: "{{ node_config_directory }}/{{ item }}/neutron_lbaas.conf"
- with_items:
- - "neutron-server"
-
-- name: Copying over neutron_vpnaas.conf
- merge_configs:
- vars:
- service_name: "{{ item }}"
- sources:
- - "{{ role_path }}/templates/neutron_vpnaas.conf.j2"
- - "{{ node_custom_config }}/neutron/neutron_vpnaas.conf"
- - "{{ node_custom_config }}/neutron/{{ inventory_hostname }}/neutron_vpnaas.conf"
- dest: "{{ node_config_directory }}/{{ item }}/neutron_vpnaas.conf"
- with_items:
- - "neutron-server"
-
-- name: Copying over ml2_conf.ini
- merge_configs:
- vars:
- service_name: "{{ item }}"
- sources:
- - "{{ role_path }}/templates/ml2_conf.ini.j2"
- - "{{ node_custom_config }}/neutron/ml2_conf.ini"
- - "{{ node_custom_config }}/neutron/{{ inventory_hostname }}/ml2_conf.ini"
- dest: "{{ node_config_directory }}/{{ item }}/ml2_conf.ini"
- with_items:
- - "neutron-dhcp-agent"
- - "neutron-l3-agent"
- - "neutron-linuxbridge-agent"
- - "neutron-lbaas-agent"
- - "neutron-metadata-agent"
- - "neutron-openvswitch-agent"
- - "neutron-server"
- - "neutron-vpnaas-agent"
-
-- name: Copying over dhcp_agent.ini
- merge_configs:
- vars:
- service_name: "{{ item }}"
- sources:
- - "{{ role_path }}/templates/dhcp_agent.ini.j2"
- - "{{ node_custom_config }}/neutron/dhcp_agent.ini"
- dest: "{{ node_config_directory }}/{{ item }}/dhcp_agent.ini"
- with_items:
- - "neutron-dhcp-agent"
-
-- name: Copying over dnsmasq.conf
- template:
- src: "dnsmasq.conf.j2"
- dest: "{{ node_config_directory }}/{{ item }}/dnsmasq.conf"
- with_items:
- - "neutron-dhcp-agent"
-
-- name: Copying over l3_agent.ini
- merge_configs:
- vars:
- service_name: "{{ item }}"
- sources:
- - "{{ role_path }}/templates/l3_agent.ini.j2"
- - "{{ node_custom_config }}/neutron/l3_agent.ini"
- dest: "{{ node_config_directory }}/{{ item }}/l3_agent.ini"
- with_items:
- - "neutron-l3-agent"
- - "neutron-vpnaas-agent"
-
-- name: Copying over fwaas_driver.ini
- merge_configs:
- vars:
- service_name: "{{ item }}"
- sources:
- - "{{ role_path }}/templates/fwaas_driver.ini.j2"
- - "{{ node_custom_config }}/neutron/fwaas_driver.ini"
- dest: "{{ node_config_directory }}/{{ item }}/fwaas_driver.ini"
- with_items:
- - "neutron-l3-agent"
- - "neutron-vpnaas-agent"
-
-- name: Copying over metadata_agent.ini
- merge_configs:
- vars:
- service_name: "{{ item }}"
- sources:
- - "{{ role_path }}/templates/metadata_agent.ini.j2"
- - "{{ node_custom_config }}/neutron/metadata_agent.ini"
- dest: "{{ node_config_directory }}/{{ item }}/metadata_agent.ini"
- with_items:
- - "neutron-metadata-agent"
-
-- name: Copying over lbaas_agent.ini
- merge_configs:
- vars:
- service_name: "{{ item }}"
- sources:
- - "{{ role_path }}/templates/lbaas_agent.ini.j2"
- - "{{ node_custom_config }}/neutron/lbaas_agent.ini"
- dest: "{{ node_config_directory }}/{{ item }}/lbaas_agent.ini"
- with_items:
- - "neutron-lbaas-agent"
-
-- name: Copying over vpnaas_agent.ini
- merge_configs:
- vars:
- service_name: "{{ item }}"
- sources:
- - "{{ role_path }}/templates/vpnaas_agent.ini.j2"
- - "/etc/kolla/config/neutron/vpnaas_agent.ini"
- dest: "{{ node_config_directory }}/{{ item }}/vpnaas_agent.ini"
- with_items:
- - "neutron-vpnaas-agent"
-
-- name: Check if policies shall be overwritten
- local_action: stat path="{{ node_custom_config }}/neutron/policy.json"
- register: neutron_policy
-
-- name: Copying over existing policy.json
- template:
- src: "{{ node_custom_config }}/neutron/policy.json"
- dest: "{{ node_config_directory }}/{{ item }}/policy.json"
- with_items:
- - "neutron-dhcp-agent"
- - "neutron-l3-agent"
- - "neutron-linuxbridge-agent"
- - "neutron-metadata-agent"
- - "neutron-openvswitch-agent"
- - "neutron-server"
- - "neutron-lbaas-agent"
- - "neutron-vpnaas-agent"
- when:
- neutron_policy.stat.exists
diff --git a/ansible/roles/neutron/tasks/deploy.yml b/ansible/roles/neutron/tasks/deploy.yml
deleted file mode 100644
index 7a75bdb204..0000000000
--- a/ansible/roles/neutron/tasks/deploy.yml
+++ /dev/null
@@ -1,34 +0,0 @@
----
-# enforce ironic usage only with openvswitch
-- include: ironic-check.yml
-
-- include: register.yml
- when: inventory_hostname in groups['neutron-server']
-
-- include: config.yml
- when: inventory_hostname in groups['compute']
- or (enable_manila | bool and inventory_hostname in groups['manila-share'])
- or inventory_hostname in groups['neutron-dhcp-agent']
- or inventory_hostname in groups['neutron-l3-agent']
- or inventory_hostname in groups['neutron-metadata-agent']
- or inventory_hostname in groups['neutron-server']
- or inventory_hostname in groups['neutron-lbaas-agent']
- or inventory_hostname in groups['neutron-vpnaas-agent']
-
-- include: config-neutron-fake.yml
- when:
- - inventory_hostname in groups['compute']
- - enable_nova_fake | bool
-
-- include: bootstrap.yml
- when: inventory_hostname in groups['neutron-server']
-
-- include: start.yml
- when: inventory_hostname in groups['compute']
- or (enable_manila | bool and inventory_hostname in groups['manila-share'])
- or inventory_hostname in groups['neutron-dhcp-agent']
- or inventory_hostname in groups['neutron-l3-agent']
- or inventory_hostname in groups['neutron-metadata-agent']
- or inventory_hostname in groups['neutron-server']
- or inventory_hostname in groups['neutron-lbaas-agent']
- or inventory_hostname in groups['neutron-vpnaas-agent']
diff --git a/ansible/roles/neutron/tasks/ironic-check.yml b/ansible/roles/neutron/tasks/ironic-check.yml
deleted file mode 100644
index a578ce2c72..0000000000
--- a/ansible/roles/neutron/tasks/ironic-check.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-# TODO(SamYaple): run verification checks at start of playbook
-- fail: msg="neutron_plugin_agent must use openvswitch with Ironic"
- when:
- - enable_ironic | bool
- - neutron_plugin_agent != "openvswitch"
diff --git a/ansible/roles/neutron/tasks/main.yml b/ansible/roles/neutron/tasks/main.yml
deleted file mode 100644
index b017e8b4ad..0000000000
--- a/ansible/roles/neutron/tasks/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-- include: "{{ action }}.yml"
diff --git a/ansible/roles/neutron/tasks/precheck.yml b/ansible/roles/neutron/tasks/precheck.yml
deleted file mode 100644
index ed97d539c0..0000000000
--- a/ansible/roles/neutron/tasks/precheck.yml
+++ /dev/null
@@ -1 +0,0 @@
----
diff --git a/ansible/roles/neutron/tasks/pull.yml b/ansible/roles/neutron/tasks/pull.yml
deleted file mode 100644
index fa6bf505e2..0000000000
--- a/ansible/roles/neutron/tasks/pull.yml
+++ /dev/null
@@ -1,84 +0,0 @@
----
-- name: Pulling neutron-dhcp-agent image
- kolla_docker:
- action: "pull_image"
- common_options: "{{ docker_common_options }}"
- image: "{{ neutron_dhcp_agent_image_full }}"
- when: inventory_hostname in groups['neutron-dhcp-agent']
-
-- name: Pulling neutron-l3-agent image
- kolla_docker:
- action: "pull_image"
- common_options: "{{ docker_common_options }}"
- image: "{{ neutron_l3_agent_image_full }}"
- when: inventory_hostname in groups['neutron-l3-agent']
-
-- name: Pulling neutron-linuxbridge-agent image
- kolla_docker:
- action: "pull_image"
- common_options: "{{ docker_common_options }}"
- image: "{{ neutron_linuxbridge_agent_image_full }}"
- when:
- - (inventory_hostname in groups['compute']
- or (enable_manila | bool and inventory_hostname in groups['manila-share'])
- or inventory_hostname in groups['neutron-dhcp-agent']
- or inventory_hostname in groups['neutron-l3-agent']
- or inventory_hostname in groups['neutron-metadata-agent']
- or inventory_hostname in groups['neutron-vpnaas-agent'])
- - neutron_plugin_agent == "linuxbridge"
-
-- name: Pulling neutron-metadata-agent image
- kolla_docker:
- action: "pull_image"
- common_options: "{{ docker_common_options }}"
- image: "{{ neutron_metadata_agent_image_full }}"
- when: inventory_hostname in groups['neutron-metadata-agent']
-
-- name: Pulling neutron-openvswitch-agent image
- kolla_docker:
- action: "pull_image"
- common_options: "{{ docker_common_options }}"
- image: "{{ neutron_openvswitch_agent_image_full }}"
- when:
- - (inventory_hostname in groups['compute']
- or (enable_manila | bool and inventory_hostname in groups['manila-share'])
- or inventory_hostname in groups['neutron-dhcp-agent']
- or inventory_hostname in groups['neutron-l3-agent']
- or inventory_hostname in groups['neutron-metadata-agent']
- or inventory_hostname in groups['neutron-vpnaas-agent'])
- - neutron_plugin_agent == "openvswitch"
-
-- name: Pulling neutron-server image
- kolla_docker:
- action: "pull_image"
- common_options: "{{ docker_common_options }}"
- image: "{{ neutron_server_image_full }}"
- when: inventory_hostname in groups['neutron-server']
-
-- name: Pulling openvswitch-db image
- kolla_docker:
- action: "pull_image"
- common_options: "{{ docker_common_options }}"
- image: "{{ openvswitch_db_image_full }}"
- when:
- - (inventory_hostname in groups['compute']
- or (enable_manila | bool and inventory_hostname in groups['manila-share'])
- or inventory_hostname in groups['neutron-dhcp-agent']
- or inventory_hostname in groups['neutron-l3-agent']
- or inventory_hostname in groups['neutron-metadata-agent']
- or inventory_hostname in groups['neutron-vpnaas-agent'])
- - neutron_plugin_agent == "openvswitch"
-
-- name: Pulling openvswitch-vswitchd image
- kolla_docker:
- action: "pull_image"
- common_options: "{{ docker_common_options }}"
- image: "{{ openvswitch_vswitchd_image_full }}"
- when:
- - (inventory_hostname in groups['compute']
- or (enable_manila | bool and inventory_hostname in groups['manila-share'])
- or inventory_hostname in groups['neutron-dhcp-agent']
- or inventory_hostname in groups['neutron-l3-agent']
- or inventory_hostname in groups['neutron-metadata-agent']
- or inventory_hostname in groups['neutron-vpnaas-agent'])
- - neutron_plugin_agent == "openvswitch"
diff --git a/ansible/roles/neutron/tasks/reconfigure.yml b/ansible/roles/neutron/tasks/reconfigure.yml
deleted file mode 100644
index ad8ef35aba..0000000000
--- a/ansible/roles/neutron/tasks/reconfigure.yml
+++ /dev/null
@@ -1,324 +0,0 @@
----
-- name: Ensuring the containers running neutron-server and neutron agents are up
- kolla_docker:
- name: "{{ item.name }}"
- action: "get_container_state"
- register: neutron_container_states
- failed_when: neutron_container_states.Running == false
- when:
- - "{{ item.enabled|default(True) }}"
- - inventory_hostname in groups[item.group]
- with_items:
- - { name: neutron_server, group: neutron-server }
- - { name: neutron_dhcp_agent, group: neutron-dhcp-agent }
- - { name: neutron_l3_agent, group: neutron-l3-agent }
- - { name: neutron_l3_agent, group: compute, enabled: "{{ enable_neutron_dvr | bool }}" }
- - { name: neutron_lbaas_agent, group: neutron-lbaas-agent, enabled: "{{ enable_neutron_lbaas | bool }}" }
- - { name: neutron_metadata_agent, group: neutron-metadata-agent }
- - { name: neutron_metadata_agent, group: compute, enabled: "{{ enable_neutron_dvr | bool }}" }
- - { name: neutron_vpnaas_agent, group: neutron-vpnaas-agent, enabled: "{{ enable_neutron_vpnaas | bool }}" }
-
-- name: Ensuring the neutron_openvswitch_agent container is up
- kolla_docker:
- name: neutron_openvswitch_agent
- action: "get_container_state"
- register: openvswitch_agent_container_states
- failed_when: openvswitch_agent_container_states.Running == false
- when:
- - neutron_plugin_agent == "openvswitch"
- - (
- ( inventory_hostname in groups['compute']
- or (enable_manila | bool and inventory_hostname in groups['manila-share'])
- or inventory_hostname in groups['neutron-dhcp-agent']
- or inventory_hostname in groups['neutron-l3-agent']
- or inventory_hostname in groups['neutron-lbaas-agent']
- or inventory_hostname in groups['neutron-metadata-agent']
- or inventory_hostname in groups['neutron-vpnaas-agent']
- and not enable_nova_fake | bool
- ) or
- ( inventory_hostname in groups['neutron-dhcp-agent']
- or inventory_hostname in groups['neutron-l3-agent']
- or inventory_hostname in groups['neutron-lbaas-agent']
- or inventory_hostname in groups['neutron-metadata-agent']
- or inventory_hostname in groups['neutron-vpnaas-agent']
- and enable_nova_fake | bool
- )
- )
-
-- name: Ensuring the neutron_linuxbridge_agent container is up
- kolla_docker:
- name: neutron_linuxbridge_agent
- action: "get_container_state"
- register: linuxbridge_agent_container_states
- failed_when: linuxbridge_agent_container_states.Running == false
- when:
- - neutron_plugin_agent == "linuxbridge"
- - (inventory_hostname in groups['compute']
- or (enable_manila | bool and inventory_hostname in groups['manila-share'])
- or inventory_hostname in groups['neutron-dhcp-agent']
- or inventory_hostname in groups['neutron-l3-agent']
- or inventory_hostname in groups['neutron-lbaas-agent']
- or inventory_hostname in groups['neutron-metadata-agent']
- or inventory_hostname in groups['neutron-vpnaas-agent'])
-
-- include: config.yml
-
-- name: Check the configs in containers running neutron-server and neutron agents
- command: docker exec {{ item.name }} /usr/local/bin/kolla_set_configs --check
- changed_when: false
- failed_when: false
- register: neutron_check_results
- when:
- - "{{ item.enabled|default(True) }}"
- - inventory_hostname in groups[item.group]
- with_items:
- - { name: neutron_server, group: neutron-server }
- - { name: neutron_dhcp_agent, group: neutron-dhcp-agent }
- - { name: neutron_l3_agent, group: neutron-l3-agent }
- - { name: neutron_l3_agent, group: compute, enabled: "{{ enable_neutron_dvr | bool }}" }
- - { name: neutron_lbaas_agent, group: neutron-lbaas-agent, enabled: "{{ enable_neutron_lbaas | bool }}" }
- - { name: neutron_metadata_agent, group: neutron-metadata-agent }
- - { name: neutron_metadata_agent, group: compute, enabled: "{{ enable_neutron_dvr | bool }}" }
- - { name: neutron_vpnaas_agent, group: neutron-vpnaas-agent, enabled: "{{ enable_neutron_vpnaas | bool }}" }
-
-- name: Check the configs in the neutron_openvswitch_agent container
- command: docker exec neutron_openvswitch_agent /usr/local/bin/kolla_set_configs --check
- changed_when: false
- failed_when: false
- register: openvswitch_agent_check_results
- when:
- - neutron_plugin_agent == "openvswitch"
- - (
- ( inventory_hostname in groups['compute']
- or (enable_manila | bool and inventory_hostname in groups['manila-share'])
- or inventory_hostname in groups['neutron-dhcp-agent']
- or inventory_hostname in groups['neutron-l3-agent']
- or inventory_hostname in groups['neutron-lbaas-agent']
- or inventory_hostname in groups['neutron-metadata-agent']
- or inventory_hostname in groups['neutron-vpnaas-agent']
- and not enable_nova_fake | bool
- ) or
- ( inventory_hostname in groups['neutron-dhcp-agent']
- or inventory_hostname in groups['neutron-l3-agent']
- or inventory_hostname in groups['neutron-lbaas-agent']
- or inventory_hostname in groups['neutron-metadata-agent']
- or inventory_hostname in groups['neutron-vpnaas-agent']
- and enable_nova_fake | bool
- )
- )
-
-- name: Check the configs in the neutron_linuxbridge_agent container
- command: docker exec neutron_linuxbridge_agent /usr/local/bin/kolla_set_configs --check
- changed_when: false
- failed_when: false
- register: linuxbridge_agent_check_results
- when:
- - neutron_plugin_agent == "linuxbridge"
- - (inventory_hostname in groups['compute']
- or (enable_manila | bool and inventory_hostname in groups['manila-share'])
- or inventory_hostname in groups['neutron-dhcp-agent']
- or inventory_hostname in groups['neutron-l3-agent']
- or inventory_hostname in groups['neutron-lbaas-agent']
- or inventory_hostname in groups['neutron-metadata-agent']
- or inventory_hostname in groups['neutron-vpnaas-agent'])
-
-# NOTE(jeffrey4l): when config_strategy == 'COPY_ALWAYS'
-# and container env['KOLLA_CONFIG_STRATEGY'] == 'COPY_ONCE',
-# just remove the container and start again
-- name: Containers config strategy for containers running neutron-server and neutron agents
- kolla_docker:
- name: "{{ item.name }}"
- action: "get_container_env"
- register: neutron_container_envs
- when:
- - "{{ item.enabled|default(True) }}"
- - inventory_hostname in groups[item.group]
- with_items:
- - { name: neutron_server, group: neutron-server }
- - { name: neutron_dhcp_agent, group: neutron-dhcp-agent }
- - { name: neutron_l3_agent, group: neutron-l3-agent }
- - { name: neutron_l3_agent, group: compute, enabled: "{{ enable_neutron_dvr | bool }}" }
- - { name: neutron_lbaas_agent, group: neutron-lbaas-agent, enabled: "{{ enable_neutron_lbaas | bool }}" }
- - { name: neutron_metadata_agent, group: neutron-metadata-agent }
- - { name: neutron_metadata_agent, group: compute, enabled: "{{ enable_neutron_dvr | bool }}" }
- - { name: neutron_vpnaas_agent, group: neutron-vpnaas-agent, enabled: "{{ enable_neutron_vpnaas | bool }}" }
-
-- name: Container config strategy for the neutron_openvswitch_agent container
- kolla_docker:
- name: "neutron_openvswitch_agent"
- action: "get_container_env"
- register: openvswitch_agent_envs
- when:
- - neutron_plugin_agent == "openvswitch"
- - (
- ( not enable_nova_fake | bool
- and inventory_hostname in groups['compute']
- or (enable_manila | bool and inventory_hostname in groups['manila-share'])
- or inventory_hostname in groups['neutron-dhcp-agent']
- or inventory_hostname in groups['neutron-l3-agent']
- or inventory_hostname in groups['neutron-lbaas-agent']
- or inventory_hostname in groups['neutron-metadata-agent']
- or inventory_hostname in groups['neutron-vpnaas-agent']
- ) or
- ( enable_nova_fake | bool
- and inventory_hostname in groups['neutron-dhcp-agent']
- or inventory_hostname in groups['neutron-l3-agent']
- or inventory_hostname in groups['neutron-lbaas-agent']
- or inventory_hostname in groups['neutron-metadata-agent']
- or inventory_hostname in groups['neutron-vpnaas-agent']
- )
- )
-
-- name: Container config strategy for the neutron_linuxbridge_agent container
- kolla_docker:
- name: "neutron_linuxbridge_agent"
- action: "get_container_env"
- register: linuxbridge_agent_envs
- when:
- - neutron_plugin_agent == "linuxbridge"
- - (inventory_hostname in groups['compute']
- or (enable_manila | bool and inventory_hostname in groups['manila-share'])
- or inventory_hostname in groups['neutron-dhcp-agent']
- or inventory_hostname in groups['neutron-l3-agent']
- or inventory_hostname in groups['neutron-lbaas-agent']
- or inventory_hostname in groups['neutron-metadata-agent']
- or inventory_hostname in groups['neutron-vpnaas-agent'])
-
-- name: Remove the containers running neutron-server and neutron agents
- kolla_docker:
- name: "{{ item[0]['name'] }}"
- action: "remove_container"
- register: neutron_remove_containers
- when:
- - "{{ item[0].enabled | default(True) }}"
- - inventory_hostname in groups[item[0]['group']]
- - config_strategy == "COPY_ONCE" or item[1]['KOLLA_CONFIG_STRATEGY'] == 'COPY_ONCE'
- - item[2]['rc'] == 1
- with_together:
- - [{ name: neutron_server, group: neutron-server },
- { name: neutron_dhcp_agent, group: neutron-dhcp-agent },
- { name: neutron_l3_agent, group: neutron-l3-agent },
- { name: neutron_l3_agent, group: compute, enabled: "{{ enable_neutron_dvr | bool }}" },
- { name: neutron_lbaas_agent, group: neutron-lbaas-agent, enabled: "{{ enable_neutron_lbaas | bool }}" },
- { name: neutron_metadata_agent, group: neutron-metadata-agent },
- { name: neutron_metadata_agent, group: compute, enabled: "{{ enable_neutron_dvr | bool }}" },
- { name: neutron_vpnaas_agent, group: neutron-vpnaas-agent, enabled: "{{ enable_neutron_vpnaas | bool }}" }]
- - "{{ neutron_container_envs.results }}"
- - "{{ neutron_check_results.results }}"
-
-- name: Remove the neutron_openvswitch_agent container
- kolla_docker:
- name: "neutron_openvswitch_agent"
- action: "remove_container"
- register: openvswitch_agent_remove_containers
- when:
- - neutron_plugin_agent == "openvswitch"
- - (
- ( not enable_nova_fake | bool
- and inventory_hostname in groups['compute']
- or (enable_manila | bool and inventory_hostname in groups['manila-share'])
- or inventory_hostname in groups['neutron-dhcp-agent']
- or inventory_hostname in groups['neutron-l3-agent']
- or inventory_hostname in groups['neutron-lbaas-agent']
- or inventory_hostname in groups['neutron-metadata-agent']
- or inventory_hostname in groups['neutron-vpnaas-agent']
- ) or
- ( enable_nova_fake | bool
- and inventory_hostname in groups['neutron-dhcp-agent']
- or inventory_hostname in groups['neutron-l3-agent']
- or inventory_hostname in groups['neutron-lbaas-agent']
- or inventory_hostname in groups['neutron-metadata-agent']
- or inventory_hostname in groups['neutron-vpnaas-agent']
- )
- )
- - config_strategy == "COPY_ONCE" or openvswitch_agent_envs['KOLLA_CONFIG_STRATEGY'] == 'COPY_ONCE'
- - openvswitch_agent_check_results['rc'] == 1
-
-- name: Remove the neutron_linuxbridge_agent container
- kolla_docker:
- name: "neutron_linuxbridge_agent"
- action: "remove_container"
- register: linuxbridge_agent_remove_containers
- when:
- - neutron_plugin_agent == "linuxbridge"
- - (inventory_hostname in groups['compute']
- or inventory_hostname in groups['manila-share']
- or inventory_hostname in groups['neutron-dhcp-agent']
- or inventory_hostname in groups['neutron-l3-agent']
- or inventory_hostname in groups['neutron-lbaas-agent']
- or inventory_hostname in groups['neutron-metadata-agent']
- or inventory_hostname in groups['neutron-vpnaas-agent'])
- - config_strategy == "COPY_ONCE" or linuxbridge_agent_envs['KOLLA_CONFIG_STRATEGY'] == 'COPY_ONCE'
- - linuxbridge_agent_check_results['rc'] == 1
-
-- include: start.yml
- when: neutron_remove_containers.changed
- or openvswitch_agent_remove_containers.changed
- or linuxbridge_agent_remove_containers.changed
-
-- name: Restart containers running neutron-server and neutron agents
- kolla_docker:
- name: "{{ item[0]['name'] }}"
- action: "restart_container"
- when:
- - "{{ item[0].enabled|default(True) }}"
- - config_strategy == 'COPY_ALWAYS'
- - inventory_hostname in groups[item[0]['group']]
- - item[1]['KOLLA_CONFIG_STRATEGY'] != 'COPY_ONCE'
- - item[2]['rc'] == 1
- with_together:
- - [{ name: neutron_server, group: neutron-server },
- { name: neutron_dhcp_agent, group: neutron-dhcp-agent },
- { name: neutron_l3_agent, group: neutron-l3-agent },
- { name: neutron_l3_agent, group: compute, enabled: "{{ enable_neutron_dvr | bool }}" },
- { name: neutron_lbaas_agent, group: neutron-lbaas-agent, enabled: "{{ enable_neutron_lbaas | bool }}" },
- { name: neutron_metadata_agent, group: neutron-metadata-agent },
- { name: neutron_metadata_agent, group: compute, enabled: "{{ enable_neutron_dvr | bool }}" },
- { name: neutron_vpnaas_agent, group: neutron-vpnaas-agent, enabled: "{{ enable_neutron_vpnaas | bool }}" }]
- - "{{ neutron_container_envs.results }}"
- - "{{ neutron_check_results.results }}"
-
-- name: Restart the neutron_openvswitch_agent container
- kolla_docker:
- name: "neutron_openvswitch_agent"
- action: "restart_container"
- when:
- - config_strategy == 'COPY_ALWAYS'
- - neutron_plugin_agent == "openvswitch"
- - (
- ( inventory_hostname in groups['compute']
- or (enable_manila | bool and inventory_hostname in groups['manila-share'])
- or inventory_hostname in groups['neutron-dhcp-agent']
- or inventory_hostname in groups['neutron-l3-agent']
- or inventory_hostname in groups['neutron-lbaas-agent']
- or inventory_hostname in groups['neutron-metadata-agent']
- or inventory_hostname in groups['neutron-vpnaas-agent']
- and not enable_nova_fake | bool
- ) or
- ( inventory_hostname in groups['neutron-dhcp-agent']
- or inventory_hostname in groups['neutron-l3-agent']
- or inventory_hostname in groups['neutron-lbaas-agent']
- or inventory_hostname in groups['neutron-metadata-agent']
- or inventory_hostname in groups['neutron-vpnaas-agent']
- and enable_nova_fake | bool
- )
- )
- - openvswitch_agent_envs['KOLLA_CONFIG_STRATEGY'] != 'COPY_ONCE'
- - openvswitch_agent_check_results['rc'] == 1
-
-- name: Restart the neutron_linuxbridge_agent container
- kolla_docker:
- name: "neutron_linuxbridge_agent"
- action: "restart_container"
- when:
- - config_strategy == 'COPY_ALWAYS'
- - neutron_plugin_agent == "linuxbridge"
- - (inventory_hostname in groups['compute']
- or (enable_manila | bool and inventory_hostname in groups['manila-share'])
- or inventory_hostname in groups['neutron-dhcp-agent']
- or inventory_hostname in groups['neutron-l3-agent']
- or inventory_hostname in groups['neutron-lbaas-agent']
- or inventory_hostname in groups['neutron-metadata-agent']
- or inventory_hostname in groups['neutron-vpnaas-agent'])
- - linuxbridge_agent_envs['KOLLA_CONFIG_STRATEGY'] != 'COPY_ONCE'
- - linuxbridge_agent_check_results['rc'] == 1
diff --git a/ansible/roles/neutron/tasks/register.yml b/ansible/roles/neutron/tasks/register.yml
deleted file mode 100644
index fee70230ec..0000000000
--- a/ansible/roles/neutron/tasks/register.yml
+++ /dev/null
@@ -1,40 +0,0 @@
----
-- name: Creating the Neutron service and endpoint
- command: docker exec -t kolla_toolbox /usr/bin/ansible localhost
- -m kolla_keystone_service
- -a "service_name=neutron
- service_type=network
- description='Openstack Networking'
- endpoint_region={{ openstack_region_name }}
- url='{{ item.url }}'
- interface='{{ item.interface }}'
- region_name={{ openstack_region_name }}
- auth={{ '{{ openstack_neutron_auth }}' }}"
- -e "{'openstack_neutron_auth':{{ openstack_neutron_auth }}}"
- register: neutron_endpoint
- changed_when: "{{ neutron_endpoint.stdout.find('localhost | SUCCESS => ') != -1 and (neutron_endpoint.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
- until: neutron_endpoint.stdout.split()[2] == 'SUCCESS'
- retries: 10
- delay: 5
- run_once: True
- with_items:
- - {'interface': 'admin', 'url': '{{ neutron_admin_endpoint }}'}
- - {'interface': 'internal', 'url': '{{ neutron_internal_endpoint }}'}
- - {'interface': 'public', 'url': '{{ neutron_public_endpoint }}'}
-
-- name: Creating the Neutron project, user, and role
- command: docker exec -t kolla_toolbox /usr/bin/ansible localhost
- -m kolla_keystone_user
- -a "project=service
- user=neutron
- password={{ neutron_keystone_password }}
- role=admin
- region_name={{ openstack_region_name }}
- auth={{ '{{ openstack_neutron_auth }}' }}"
- -e "{'openstack_neutron_auth':{{ openstack_neutron_auth }}}"
- register: neutron_user
- changed_when: "{{ neutron_user.stdout.find('localhost | SUCCESS => ') != -1 and (neutron_user.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
- until: neutron_user.stdout.split()[2] == 'SUCCESS'
- retries: 10
- delay: 5
- run_once: True
diff --git a/ansible/roles/neutron/tasks/start.yml b/ansible/roles/neutron/tasks/start.yml
deleted file mode 100644
index 95f5e65d4c..0000000000
--- a/ansible/roles/neutron/tasks/start.yml
+++ /dev/null
@@ -1,302 +0,0 @@
----
-- name: Starting openvswitch-db container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- image: "{{ openvswitch_db_image_full }}"
- name: "openvswitch_db"
- volumes:
- - "{{ node_config_directory }}/openvswitch-db-server/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "/run:/run:shared"
- - "kolla_logs:/var/log/kolla/"
- - "openvswitch_db:/var/lib/openvswitch/"
- when:
- - (inventory_hostname in groups['compute']
- or (enable_manila | bool and inventory_hostname in groups['manila-share'])
- or inventory_hostname in groups['neutron-dhcp-agent']
- or inventory_hostname in groups['neutron-l3-agent']
- or inventory_hostname in groups['neutron-metadata-agent']
- or inventory_hostname in groups['neutron-vpnaas-agent'])
- - neutron_plugin_agent == "openvswitch"
-
-- name: Waiting the openvswitch_db service to be ready
- command: docker exec openvswitch_db ovs-vsctl --no-wait show
- register: check_result
- until: check_result | success
- changed_when: False
- retries: 30
- delay: 2
- when:
- - (inventory_hostname in groups['compute']
- or (enable_manila | bool and inventory_hostname in groups['manila-share'])
- or inventory_hostname in groups['neutron-dhcp-agent']
- or inventory_hostname in groups['neutron-l3-agent']
- or inventory_hostname in groups['neutron-metadata-agent']
- or inventory_hostname in groups['neutron-vpnaas-agent'])
- - neutron_plugin_agent == "openvswitch"
-
-- name: Ensuring OVS bridge is properly setup
- command: docker exec openvswitch_db /usr/local/bin/kolla_ensure_openvswitch_configured {{ item.0 }} {{ item.1 }}
- register: status
- changed_when: status.stdout.find('changed') != -1
- when:
- - (inventory_hostname in groups['compute']
- or (enable_manila | bool and inventory_hostname in groups['manila-share'])
- or inventory_hostname in groups['neutron-dhcp-agent']
- or inventory_hostname in groups['neutron-l3-agent']
- or inventory_hostname in groups['neutron-metadata-agent']
- or inventory_hostname in groups['neutron-vpnaas-agent'])
- - neutron_plugin_agent == "openvswitch"
- with_together:
- - "{{ neutron_bridge_name.split(',') }}"
- - "{{ neutron_external_interface.split(',') }}"
-
-- name: Starting openvswitch-vswitchd container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- image: "{{ openvswitch_vswitchd_image_full }}"
- name: "openvswitch_vswitchd"
- privileged: True
- volumes:
- - "{{ node_config_directory }}/openvswitch-vswitchd/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "/lib/modules:/lib/modules:ro"
- - "/run:/run:shared"
- - "kolla_logs:/var/log/kolla/"
- when:
- - (inventory_hostname in groups['compute']
- or (enable_manila | bool and inventory_hostname in groups['manila-share'])
- or inventory_hostname in groups['neutron-dhcp-agent']
- or inventory_hostname in groups['neutron-l3-agent']
- or inventory_hostname in groups['neutron-metadata-agent']
- or inventory_hostname in groups['neutron-vpnaas-agent'])
- - neutron_plugin_agent == "openvswitch"
-
-- name: Starting neutron-server container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- image: "{{ neutron_server_image_full }}"
- name: "neutron_server"
- volumes:
- - "{{ node_config_directory }}/neutron-server/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "kolla_logs:/var/log/kolla/"
- when: inventory_hostname in groups['neutron-server']
-
-- name: Starting neutron-openvswitch-agent container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- image: "{{ neutron_openvswitch_agent_image_full }}"
- name: "neutron_openvswitch_agent"
- privileged: True
- volumes:
- - "{{ node_config_directory }}/neutron-openvswitch-agent/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "/lib/modules:/lib/modules:ro"
- - "/run:/run:shared"
- - "kolla_logs:/var/log/kolla/"
- when:
- - (
- ( inventory_hostname in groups['compute']
- or (enable_manila | bool and inventory_hostname in groups['manila-share'])
- or inventory_hostname in groups['neutron-dhcp-agent']
- or inventory_hostname in groups['neutron-l3-agent']
- or inventory_hostname in groups['neutron-metadata-agent']
- or inventory_hostname in groups['neutron-vpnaas-agent']
- and not enable_nova_fake | bool
- ) or
- ( inventory_hostname in groups['neutron-dhcp-agent']
- or inventory_hostname in groups['neutron-l3-agent']
- or inventory_hostname in groups['neutron-metadata-agent']
- or inventory_hostname in groups['neutron-vpnaas-agent']
- and enable_nova_fake | bool
- )
- )
- - neutron_plugin_agent == "openvswitch"
-
-- name: Starting neutron-sfc-agent container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- image: "{{ neutron_sfc_agent_image_full }}"
- name: "neutron_sfc_agent"
- privileged: True
- volumes:
- - "{{ node_config_directory }}/neutron-sfc-agent/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "/lib/modules:/lib/modules:ro"
- - "/run:/run:shared"
- - "kolla_logs:/var/log/kolla/"
- when:
- - (
- ( inventory_hostname in groups['compute']
- or (enable_manila | bool and inventory_hostname in groups['manila-share'])
- or inventory_hostname in groups['neutron-dhcp-agent']
- or inventory_hostname in groups['neutron-l3-agent']
- or inventory_hostname in groups['neutron-metadata-agent']
- and not enable_nova_fake | bool
- ) or
- ( inventory_hostname in groups['neutron-dhcp-agent']
- or inventory_hostname in groups['neutron-l3-agent']
- or inventory_hostname in groups['neutron-metadata-agent']
- and enable_nova_fake | bool
- )
- )
- - neutron_plugin_agent == "sfc"
-
-- name: Starting neutron-openvswitch-agent container for fake nova compute
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- image: "{{ neutron_openvswitch_agent_image_full }}"
- name: "neutron_openvswitch_agent_fake_{{ item }}"
- privileged: True
- volumes:
- - "{{ node_config_directory }}/neutron-openvswitch-agent-fake-{{ item }}/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "/lib/modules:/lib/modules:ro"
- - "/run:/run:shared"
- - "kolla_logs:/var/log/kolla/"
- with_sequence: start=1 end={{ num_nova_fake_per_node }}
- when:
- - inventory_hostname in groups['compute']
- - neutron_plugin_agent == "openvswitch"
- - enable_nova_fake | bool
-
-- name: Starting neutron-sfc-agent container for fake nova compute
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- image: "{{ neutron_sfc_agent_image_full }}"
- name: "neutron_sfc_agent_fake_{{ item }}"
- privileged: True
- volumes:
- - "{{ node_config_directory }}/neutron-sfc-agent-fake-{{ item }}/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "/lib/modules:/lib/modules:ro"
- - "/run:/run:shared"
- - "kolla_logs:/var/log/kolla/"
- with_sequence: start=1 end={{ num_nova_fake_per_node }}
- when:
- - inventory_hostname in groups['compute']
- - neutron_plugin_agent == "sfc"
- - enable_nova_fake | bool
-
-- name: Starting neutron-linuxbridge-agent container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- environment:
- KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}"
- NEUTRON_BRIDGE: "br-ex"
- NEUTRON_INTERFACE: "{{ neutron_external_interface }}"
- image: "{{ neutron_linuxbridge_agent_image_full }}"
- name: "neutron_linuxbridge_agent"
- privileged: True
- volumes:
- - "{{ node_config_directory }}/neutron-linuxbridge-agent/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "/lib/modules:/lib/modules:ro"
- - "/run:/run:shared"
- - "kolla_logs:/var/log/kolla/"
- when:
- - (inventory_hostname in groups['compute']
- or (enable_manila | bool and inventory_hostname in groups['manila-share'])
- or inventory_hostname in groups['neutron-dhcp-agent']
- or inventory_hostname in groups['neutron-l3-agent']
- or inventory_hostname in groups['neutron-metadata-agent']
- or inventory_hostname in groups['neutron-vpnaas-agent'])
- - neutron_plugin_agent == "linuxbridge"
-
-- name: Starting neutron-dhcp-agent container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- image: "{{ neutron_dhcp_agent_image_full }}"
- name: "neutron_dhcp_agent"
- privileged: True
- volumes:
- - "{{ node_config_directory }}/neutron-dhcp-agent/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "/run/:/run/:shared"
- - "/run/netns/:/run/netns/:shared"
- - "neutron_metadata_socket:/var/lib/neutron/kolla/"
- - "kolla_logs:/var/log/kolla/"
- when:
- - inventory_hostname in groups['neutron-dhcp-agent']
-
-- name: Starting neutron-l3-agent container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- image: "{{ neutron_l3_agent_image_full }}"
- name: "neutron_l3_agent"
- privileged: True
- volumes:
- - "{{ node_config_directory }}/neutron-l3-agent/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "/run:/run:shared"
- - "/run/netns/:/run/netns/:shared"
- - "neutron_metadata_socket:/var/lib/neutron/kolla/"
- - "kolla_logs:/var/log/kolla/"
- when:
- - (inventory_hostname in groups['neutron-l3-agent']
- or (inventory_hostname in groups['compute'] and enable_neutron_dvr | bool))
- - not enable_neutron_vpnaas | bool
-
-- name: Starting neutron-lbaas-agent container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- image: "{{ neutron_lbaas_agent_image_full }}"
- name: "neutron_lbaas_agent"
- privileged: True
- volumes:
- - "{{ node_config_directory }}/neutron-lbaas-agent/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "/run:/run:shared"
- - "/run/netns/:/run/netns/:shared"
- - "neutron_metadata_socket:/var/lib/neutron/kolla/"
- - "kolla_logs:/var/log/kolla/"
- when:
- - enable_neutron_lbaas | bool
- - inventory_hostname in groups['neutron-lbaas-agent']
-
-- name: Starting neutron-metadata-agent container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- image: "{{ neutron_metadata_agent_image_full }}"
- name: "neutron_metadata_agent"
- privileged: True
- volumes:
- - "{{ node_config_directory }}/neutron-metadata-agent/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "/run/netns/:/run/netns/:shared"
- - "neutron_metadata_socket:/var/lib/neutron/kolla/"
- - "kolla_logs:/var/log/kolla/"
- when:
- - (inventory_hostname in groups['neutron-metadata-agent']
- or (inventory_hostname in groups['compute'] and enable_neutron_dvr | bool))
-
-- name: Starting neutron-vpnaas-agent container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- image: "{{ neutron_vpnaas_agent_image_full }}"
- name: "neutron_vpnaas_agent"
- privileged: True
- volumes:
- - "{{ node_config_directory }}/neutron-vpnaas-agent/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "/run:/run:shared"
- - "/run/netns/:/run/netns/:shared"
- - "neutron_metadata_socket:/var/lib/neutron/kolla/"
- - "kolla_logs:/var/log/kolla/"
- when:
- - enable_neutron_vpnaas | bool
- - inventory_hostname in groups['neutron-vpnaas-agent']
diff --git a/ansible/roles/neutron/tasks/upgrade.yml b/ansible/roles/neutron/tasks/upgrade.yml
deleted file mode 100644
index 84b6f5b569..0000000000
--- a/ansible/roles/neutron/tasks/upgrade.yml
+++ /dev/null
@@ -1,26 +0,0 @@
----
-- include: config.yml
- when: inventory_hostname in groups['compute']
- or inventory_hostname in groups['neutron-dhcp-agent']
- or inventory_hostname in groups['neutron-l3-agent']
- or inventory_hostname in groups['neutron-lbaas-agent']
- or inventory_hostname in groups['neutron-metadata-agent']
- or inventory_hostname in groups['neutron-server']
- or inventory_hostname in groups['neutron-vpnaas-agent']
-
-- include: config-neutron-fake.yml
- when:
- - inventory_hostname in groups['compute']
- - enable_nova_fake | bool
-
-- include: bootstrap_service.yml
- when: inventory_hostname in groups['neutron-server']
-
-- include: start.yml
- when: inventory_hostname in groups['compute']
- or inventory_hostname in groups['neutron-dhcp-agent']
- or inventory_hostname in groups['neutron-l3-agent']
- or inventory_hostname in groups['neutron-lbaas-agent']
- or inventory_hostname in groups['neutron-metadata-agent']
- or inventory_hostname in groups['neutron-server']
- or inventory_hostname in groups['neutron-vpnaas-agent']
diff --git a/ansible/roles/neutron/templates/dhcp_agent.ini.j2 b/ansible/roles/neutron/templates/dhcp_agent.ini.j2
deleted file mode 100644
index 8480096609..0000000000
--- a/ansible/roles/neutron/templates/dhcp_agent.ini.j2
+++ /dev/null
@@ -1,5 +0,0 @@
-# dhcp_agent.ini
-[DEFAULT]
-dnsmasq_config_file = /etc/neutron/dnsmasq.conf
-enable_isolated_metadata = true
-force_metadata = true
diff --git a/ansible/roles/neutron/templates/dnsmasq.conf.j2 b/ansible/roles/neutron/templates/dnsmasq.conf.j2
deleted file mode 100644
index 91566c4185..0000000000
--- a/ansible/roles/neutron/templates/dnsmasq.conf.j2
+++ /dev/null
@@ -1 +0,0 @@
-log-facility=/var/log/kolla/neutron/dnsmasq.log
diff --git a/ansible/roles/neutron/templates/fwaas_driver.ini.j2 b/ansible/roles/neutron/templates/fwaas_driver.ini.j2
deleted file mode 100644
index b020e6bbd8..0000000000
--- a/ansible/roles/neutron/templates/fwaas_driver.ini.j2
+++ /dev/null
@@ -1 +0,0 @@
-[fwaas]
diff --git a/ansible/roles/neutron/templates/l3_agent.ini.j2 b/ansible/roles/neutron/templates/l3_agent.ini.j2
deleted file mode 100644
index 7b513d4eb4..0000000000
--- a/ansible/roles/neutron/templates/l3_agent.ini.j2
+++ /dev/null
@@ -1,11 +0,0 @@
-#jinja2: trim_blocks: False
-[DEFAULT]
-{% if enable_neutron_dvr | bool %}
-{% if inventory_hostname in groups['network'] %}
-agent_mode = dvr_snat
-{% elif inventory_hostname in groups['compute'] %}
-agent_mode = dvr
-{% endif %}
-{% else %}
-agent_mode = legacy
-{% endif %}
diff --git a/ansible/roles/neutron/templates/lbaas_agent.ini.j2 b/ansible/roles/neutron/templates/lbaas_agent.ini.j2
deleted file mode 100644
index 973bd61800..0000000000
--- a/ansible/roles/neutron/templates/lbaas_agent.ini.j2
+++ /dev/null
@@ -1,6 +0,0 @@
-[DEFAULT]
-debug = {{ neutron_logging_debug }}
-device_driver = neutron_lbaas.drivers.haproxy.namespace_driver.HaproxyNSDriver
-
-[haproxy]
-user_group = haproxy
diff --git a/ansible/roles/neutron/templates/metadata_agent.ini.j2 b/ansible/roles/neutron/templates/metadata_agent.ini.j2
deleted file mode 100644
index 79f6aa2305..0000000000
--- a/ansible/roles/neutron/templates/metadata_agent.ini.j2
+++ /dev/null
@@ -1,6 +0,0 @@
-# metadata_agent.ini
-[DEFAULT]
-nova_metadata_ip = {% if orchestration_engine == 'KUBERNETES' %}nova-metadata{% else %}{{ kolla_internal_fqdn }}{% endif %}
-
-nova_metadata_port = {{ nova_metadata_port }}
-metadata_proxy_shared_secret = {{ metadata_secret }}
diff --git a/ansible/roles/neutron/templates/ml2_conf.ini.j2 b/ansible/roles/neutron/templates/ml2_conf.ini.j2
deleted file mode 100644
index b689440e0d..0000000000
--- a/ansible/roles/neutron/templates/ml2_conf.ini.j2
+++ /dev/null
@@ -1,72 +0,0 @@
-# ml2_conf.ini
-[ml2]
-{% if enable_ironic | bool %}
-tenant_network_types = vxlan, flat
-mechanism_drivers = openvswitch
-{% else %}
-# Changing type_drivers after bootstrap can lead to database inconsistencies
-type_drivers = flat,vlan,vxlan
-tenant_network_types = vxlan
-{% endif %}
-
-{% if neutron_plugin_agent == "openvswitch" %}
-mechanism_drivers = openvswitch,l2population
-{% elif neutron_plugin_agent == "linuxbridge" %}
-mechanism_drivers = linuxbridge,l2population
-{% endif %}
-
-{% if enable_neutron_qos | bool %}
-extension_drivers = qos
-{% endif %}
-
-[ml2_type_vlan]
-{% if enable_ironic | bool %}
-network_vlan_ranges = physnet1
-{% else %}
-network_vlan_ranges =
-{% endif %}
-
-[ml2_type_flat]
-{% if enable_ironic | bool %}
-flat_networks = *
-{% else %}
-flat_networks = {% for bridge in neutron_bridge_name.split(',') %}physnet{{ loop.index0 + 1 }}{% if not loop.last %},{% endif %}{% endfor %}
-{% endif %}
-
-[ml2_type_vxlan]
-vni_ranges = 1:1000
-vxlan_group = 239.1.1.1
-
-[securitygroup]
-{% if neutron_plugin_agent == "openvswitch" %}
-firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
-{% elif neutron_plugin_agent == "linuxbridge" %}
-firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
-{% endif %}
-
-{% if neutron_plugin_agent == "openvswitch" %}
-[agent]
-tunnel_types = vxlan
-l2_population = true
-arp_responder = true
-
-{% if enable_neutron_dvr | bool %}
-enable_distributed_routing = True
-{% endif %}
-
-[ovs]
-bridge_mappings = {% for bridge in neutron_bridge_name.split(',') %}physnet{{ loop.index0 + 1 }}:{{ bridge }}{% if not loop.last %},{% endif %}{% endfor %}
-
-ovsdb_connection = tcp:{{ api_interface_address }}:6640
-{% if enable_nova_fake | bool %}
-integration_bridge = br-int-{{ item }}
-{% endif %}
-{% elif neutron_plugin_agent == "linuxbridge" %}
-[linux_bridge]
-physical_interface_mappings = physnet1:{{ neutron_external_interface }}
-
-
-[vxlan]
-l2_population = true
-{% endif %}
-local_ip = {{ tunnel_interface_address }}
diff --git a/ansible/roles/neutron/templates/neutron-dhcp-agent.json.j2 b/ansible/roles/neutron/templates/neutron-dhcp-agent.json.j2
deleted file mode 100644
index f5eed2a526..0000000000
--- a/ansible/roles/neutron/templates/neutron-dhcp-agent.json.j2
+++ /dev/null
@@ -1,41 +0,0 @@
-{
- "command": "neutron-dhcp-agent --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini --config-file /etc/neutron/dhcp_agent.ini",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/neutron.conf",
- "dest": "/etc/neutron/neutron.conf",
- "owner": "neutron",
- "perm": "0600"
- },
- {
- "source": "{{ container_config_directory }}/ml2_conf.ini",
- "dest": "/etc/neutron/plugins/ml2/ml2_conf.ini",
- "owner": "neutron",
- "perm": "0600"
- },
- {
- "source": "{{ container_config_directory }}/dhcp_agent.ini",
- "dest": "/etc/neutron/dhcp_agent.ini",
- "owner": "neutron",
- "perm": "0600"
- },
- {
- "source": "{{ container_config_directory }}/dnsmasq.conf",
- "dest": "/etc/neutron/dnsmasq.conf",
- "owner": "neutron",
- "perm": "0600"
- }
- ],
- "permissions": [
- {
- "path": "/var/log/kolla/neutron",
- "owner": "neutron:neutron",
- "recurse": true
- },
- {
- "path": "/var/lib/neutron/kolla",
- "owner": "neutron:neutron",
- "recurse": true
- }
- ]
-}
diff --git a/ansible/roles/neutron/templates/neutron-l3-agent.json.j2 b/ansible/roles/neutron/templates/neutron-l3-agent.json.j2
deleted file mode 100644
index d3ef7653fb..0000000000
--- a/ansible/roles/neutron/templates/neutron-l3-agent.json.j2
+++ /dev/null
@@ -1,41 +0,0 @@
-{
- "command": "neutron-l3-agent --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/l3_agent.ini --config-file /etc/neutron/fwaas_driver.ini --config-file /etc/neutron/plugins/ml2/ml2_conf.ini",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/neutron.conf",
- "dest": "/etc/neutron/neutron.conf",
- "owner": "neutron",
- "perm": "0600"
- },
- {
- "source": "{{ container_config_directory }}/ml2_conf.ini",
- "dest": "/etc/neutron/plugins/ml2/ml2_conf.ini",
- "owner": "neutron",
- "perm": "0600"
- },
- {
- "source": "{{ container_config_directory }}/fwaas_driver.ini",
- "dest": "/etc/neutron/fwaas_driver.ini",
- "owner": "neutron",
- "perm": "0600"
- },
- {
- "source": "{{ container_config_directory }}/l3_agent.ini",
- "dest": "/etc/neutron/l3_agent.ini",
- "owner": "neutron",
- "perm": "0600"
- }
- ],
- "permissions": [
- {
- "path": "/var/log/kolla/neutron",
- "owner": "neutron:neutron",
- "recurse": true
- },
- {
- "path": "/var/lib/neutron/kolla",
- "owner": "neutron:neutron",
- "recurse": true
- }
- ]
-}
diff --git a/ansible/roles/neutron/templates/neutron-lbaas-agent.json.j2 b/ansible/roles/neutron/templates/neutron-lbaas-agent.json.j2
deleted file mode 100644
index 0955c30514..0000000000
--- a/ansible/roles/neutron/templates/neutron-lbaas-agent.json.j2
+++ /dev/null
@@ -1,35 +0,0 @@
-{
- "command": "neutron-lbaasv2-agent --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini --config-file /etc/neutron/lbaas_agent.ini",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/neutron.conf",
- "dest": "/etc/neutron/neutron.conf",
- "owner": "neutron",
- "perm": "0600"
- },
- {
- "source": "{{ container_config_directory }}/lbaas_agent.ini",
- "dest": "/etc/neutron/lbaas_agent.ini",
- "owner": "neutron",
- "perm": "0600"
- },
- {
- "source": "{{ container_config_directory }}/ml2_conf.ini",
- "dest": "/etc/neutron/plugins/ml2/ml2_conf.ini",
- "owner": "neutron",
- "perm": "0600"
- }
- ],
- "permissions": [
- {
- "path": "/var/log/kolla/neutron",
- "owner": "neutron:neutron",
- "recurse": true
- },
- {
- "path": "/var/lib/neutron/kolla",
- "owner": "neutron:neutron",
- "recurse": true
- }
- ]
-}
diff --git a/ansible/roles/neutron/templates/neutron-linuxbridge-agent.json.j2 b/ansible/roles/neutron/templates/neutron-linuxbridge-agent.json.j2
deleted file mode 100644
index 6055414af3..0000000000
--- a/ansible/roles/neutron/templates/neutron-linuxbridge-agent.json.j2
+++ /dev/null
@@ -1,24 +0,0 @@
-{
- "command": "neutron-linuxbridge-agent --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/neutron.conf",
- "dest": "/etc/neutron/neutron.conf",
- "owner": "neutron",
- "perm": "0600"
- },
- {
- "source": "{{ container_config_directory }}/ml2_conf.ini",
- "dest": "/etc/neutron/plugins/ml2/ml2_conf.ini",
- "owner": "neutron",
- "perm": "0600"
- }
- ],
- "permissions": [
- {
- "path": "/var/log/kolla/neutron",
- "owner": "neutron:neutron",
- "recurse": true
- }
- ]
-}
diff --git a/ansible/roles/neutron/templates/neutron-metadata-agent.json.j2 b/ansible/roles/neutron/templates/neutron-metadata-agent.json.j2
deleted file mode 100644
index 205ad3bd34..0000000000
--- a/ansible/roles/neutron/templates/neutron-metadata-agent.json.j2
+++ /dev/null
@@ -1,35 +0,0 @@
-{
- "command": "neutron-metadata-agent --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/metadata_agent.ini",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/neutron.conf",
- "dest": "/etc/neutron/neutron.conf",
- "owner": "neutron",
- "perm": "0600"
- },
- {
- "source": "{{ container_config_directory }}/ml2_conf.ini",
- "dest": "/etc/neutron/plugins/ml2/ml2_conf.ini",
- "owner": "neutron",
- "perm": "0600"
- },
- {
- "source": "{{ container_config_directory }}/metadata_agent.ini",
- "dest": "/etc/neutron/metadata_agent.ini",
- "owner": "neutron",
- "perm": "0600"
- }
- ],
- "permissions": [
- {
- "path": "/var/log/kolla/neutron",
- "owner": "neutron:neutron",
- "recurse": true
- },
- {
- "path": "/var/lib/neutron/kolla",
- "owner": "neutron:neutron",
- "recurse": true
- }
- ]
-}
diff --git a/ansible/roles/neutron/templates/neutron-openvswitch-agent.json.j2 b/ansible/roles/neutron/templates/neutron-openvswitch-agent.json.j2
deleted file mode 100644
index 853db81085..0000000000
--- a/ansible/roles/neutron/templates/neutron-openvswitch-agent.json.j2
+++ /dev/null
@@ -1,24 +0,0 @@
-{
- "command": "neutron-openvswitch-agent --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/neutron.conf",
- "dest": "/etc/neutron/neutron.conf",
- "owner": "neutron",
- "perm": "0600"
- },
- {
- "source": "{{ container_config_directory }}/ml2_conf.ini",
- "dest": "/etc/neutron/plugins/ml2/ml2_conf.ini",
- "owner": "neutron",
- "perm": "0600"
- }
- ],
- "permissions": [
- {
- "path": "/var/log/kolla/neutron",
- "owner": "neutron:neutron",
- "recurse": true
- }
- ]
-}
diff --git a/ansible/roles/neutron/templates/neutron-server.json.j2 b/ansible/roles/neutron/templates/neutron-server.json.j2
deleted file mode 100644
index 36402f0a5e..0000000000
--- a/ansible/roles/neutron/templates/neutron-server.json.j2
+++ /dev/null
@@ -1,36 +0,0 @@
-{
- "command": "neutron-server --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini --config-file /etc/neutron/neutron_lbaas.conf --config-file /etc/neutron/neutron_vpnaas.conf",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/neutron.conf",
- "dest": "/etc/neutron/neutron.conf",
- "owner": "neutron",
- "perm": "0600"
- },
- {
- "source": "{{ container_config_directory }}/neutron_lbaas.conf",
- "dest": "/etc/neutron/neutron_lbaas.conf",
- "owner": "neutron",
- "perm": "0600"
- },
- {
- "source": "{{ container_config_directory }}/neutron_vpnaas.conf",
- "dest": "/etc/neutron/neutron_vpnaas.conf",
- "owner": "neutron",
- "perm": "0600"
- },
- {
- "source": "{{ container_config_directory }}/ml2_conf.ini",
- "dest": "/etc/neutron/plugins/ml2/ml2_conf.ini",
- "owner": "neutron",
- "perm": "0600"
- }
- ],
- "permissions": [
- {
- "path": "/var/log/kolla/neutron",
- "owner": "neutron:neutron",
- "recurse": true
- }
- ]
-}
diff --git a/ansible/roles/neutron/templates/neutron-vpnaas-agent.json.j2 b/ansible/roles/neutron/templates/neutron-vpnaas-agent.json.j2
deleted file mode 100644
index 2c0853d488..0000000000
--- a/ansible/roles/neutron/templates/neutron-vpnaas-agent.json.j2
+++ /dev/null
@@ -1,47 +0,0 @@
-{
- "command": "neutron-vpn-agent --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/fwaas_driver.ini --config-file /etc/neutron/l3_agent.ini --config-file /etc/neutron/vpnaas_agent.ini --config-file /etc/neutron/plugins/ml2/ml2_conf.ini",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/neutron.conf",
- "dest": "/etc/neutron/neutron.conf",
- "owner": "neutron",
- "perm": "0600"
- },
- {
- "source": "{{ container_config_directory }}/ml2_conf.ini",
- "dest": "/etc/neutron/plugins/ml2/ml2_conf.ini",
- "owner": "neutron",
- "perm": "0600"
- },
- {
- "source": "{{ container_config_directory }}/fwaas_driver.ini",
- "dest": "/etc/neutron/fwaas_driver.ini",
- "owner": "neutron",
- "perm": "0600"
- },
- {
- "source": "{{ container_config_directory }}/l3_agent.ini",
- "dest": "/etc/neutron/l3_agent.ini",
- "owner": "neutron",
- "perm": "0600"
- },
- {
- "source": "{{ container_config_directory }}/vpnaas_agent.ini",
- "dest": "/etc/neutron/vpnaas_agent.ini",
- "owner": "neutron",
- "perm": "0600"
- }
- ],
- "permissions": [
- {
- "path": "/var/log/kolla/neutron",
- "owner": "neutron:neutron",
- "recurse": true
- },
- {
- "path": "/var/lib/neutron/kolla",
- "owner": "neutron:neutron",
- "recurse": true
- }
- ]
-}
diff --git a/ansible/roles/neutron/templates/neutron.conf.j2 b/ansible/roles/neutron/templates/neutron.conf.j2
deleted file mode 100644
index 472431e1b1..0000000000
--- a/ansible/roles/neutron/templates/neutron.conf.j2
+++ /dev/null
@@ -1,106 +0,0 @@
-# neutron.conf
-[DEFAULT]
-debug = {{ neutron_logging_debug }}
-
-log_dir = /var/log/kolla/neutron
-
-# NOTE(elemoine): set use_stderr to False or the logs will also be sent to
-# stderr and collected by Docker
-use_stderr = False
-
-bind_host = {{ api_interface_address }}
-bind_port = {{ neutron_server_port }}
-
-api_paste_config = /usr/share/neutron/api-paste.ini
-endpoint_type = internalURL
-
-api_workers = {{ openstack_service_workers }}
-metadata_workers = {{ openstack_service_workers }}
-
-# NOTE(SamYaple): We must specify this value here rather than the metadata conf
-# because it is used by the l3 and dhcp agents. The reason the path has 'kolla'
-# in it is because we are sharing this socket in a volume which is it's own dir
-metadata_proxy_socket = /var/lib/neutron/kolla/metadata_proxy
-
-{% if neutron_plugin_agent == "openvswitch" %}
-interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
-{% elif neutron_plugin_agent == "linuxbridge" %}
-interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver
-{% endif %}
-
-{% if enable_nova_fake | bool %}
-ovs_integration_bridge = br-int-{{ item }}
-host = {{ ansible_hostname }}_{{ item }}
-{% endif %}
-
-allow_overlapping_ips = true
-core_plugin = ml2
-service_plugins = router{% if enable_neutron_lbaas | bool %},neutron_lbaas.services.loadbalancer.plugin.LoadBalancerPluginv2{% endif %}{% if enable_neutron_qos | bool %},qos{% endif %}{% if enable_neutron_vpnaas | bool %},neutron_vpnaas.services.vpn.plugin.VPNDriverPlugin{% endif %}{% if neutron_plugin_agent == "sfc" %}flow_classifier,sfc{% endif %}
-
-{% if enable_neutron_agent_ha | bool %}
-dhcp_agents_per_network = {{ dhcp_agents_per_network }}
-l3_ha = true
-max_l3_agents_per_router = {{ max_l3_agents_per_router }}
-min_l3_agents_per_router = {{ min_l3_agents_per_router }}
-{% endif %}
-
-transport_url = rabbit://{% for host in groups['rabbitmq'] %}{{ rabbitmq_user }}:{{ rabbitmq_password }}@{% if orchestration_engine == 'KUBERNETES' %}rabbitmq{% else %}{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}{% endif %}:{{ rabbitmq_port }}{% if not loop.last %},{% endif %}{% endfor %}
-
-{% if enable_neutron_dvr | bool %}
-router_distributed = True
-{% endif %}
-
-[nova]
-auth_url = {{ keystone_admin_url }}
-auth_type = password
-project_domain_id = default
-user_domain_id = default
-region_name = {{ openstack_region_name }}
-project_name = service
-username = {{ nova_keystone_user }}
-password = {{ nova_keystone_password }}
-endpoint_type = internal
-
-[oslo_concurrency]
-lock_path = /var/lib/neutron/tmp
-
-[agent]
-root_helper = sudo neutron-rootwrap /etc/neutron/rootwrap.conf
-
-[database]
-connection = mysql+pymysql://{{ neutron_database_user }}:{{ neutron_database_password }}@{{ neutron_database_address }}/{{ neutron_database_name }}
-max_retries = -1
-
-[keystone_authtoken]
-auth_uri = {{ keystone_internal_url }}
-auth_url = {{ keystone_admin_url }}
-auth_type = password
-project_domain_id = default
-user_domain_id = default
-project_name = service
-username = {{ neutron_keystone_user }}
-password = {{ neutron_keystone_password }}
-
-memcache_security_strategy = ENCRYPT
-memcache_secret_key = {{ memcache_secret_key }}
-
-{% if orchestration_engine == 'KUBERNETES' %}
-memcache_servers = {{ memcached_servers }}
-{% else %}
-memcached_servers = {% for host in groups['memcached'] %}{% if orchestration_engine == 'KUBERNETES' %}memcached{% else %}{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}{% endif %}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}
-{% endif %}
-
-[oslo_messaging_notifications]
-{% if enable_ceilometer | bool or enable_designate | bool %}
-driver = messagingv2
-{% set topics=["notifications" if enable_ceilometer|bool else "", "notifications_designate" if enable_designate|bool else ""] %}
-topcis = {{ topics|reject("equalto", "")|list|join(",") }}
-{% else %}
-driver = noop
-{% endif %}
-
-{% if neutron_plugin_agent == "sfc" %}
-[sfc]
-drivers = ovs
-[flowclassifier]
-{% endif %}
diff --git a/ansible/roles/neutron/templates/neutron_lbaas.conf.j2 b/ansible/roles/neutron/templates/neutron_lbaas.conf.j2
deleted file mode 100644
index bfacaba544..0000000000
--- a/ansible/roles/neutron/templates/neutron_lbaas.conf.j2
+++ /dev/null
@@ -1,4 +0,0 @@
-{% if enable_neutron_lbaas | bool %}
-[service_providers]
-service_provider = LOADBALANCERV2:Haproxy:neutron_lbaas.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default
-{% endif %}
diff --git a/ansible/roles/neutron/templates/neutron_vpnaas.conf.j2 b/ansible/roles/neutron/templates/neutron_vpnaas.conf.j2
deleted file mode 100644
index 47eeefb4a7..0000000000
--- a/ansible/roles/neutron/templates/neutron_vpnaas.conf.j2
+++ /dev/null
@@ -1,4 +0,0 @@
-{% if enable_neutron_vpnaas | bool %}
-[service_providers]
-service_provider = VPN:openswan:neutron_vpnaas.services.vpn.service_drivers.ipsec.IPsecVPNDriver:default
-{% endif %}
diff --git a/ansible/roles/neutron/templates/openvswitch-db-server.json.j2 b/ansible/roles/neutron/templates/openvswitch-db-server.json.j2
deleted file mode 100644
index 72b9ad694a..0000000000
--- a/ansible/roles/neutron/templates/openvswitch-db-server.json.j2
+++ /dev/null
@@ -1,4 +0,0 @@
-{
- "command": "start-ovsdb-server {{ api_interface_address }} {% if orchestration_engine == 'KUBERNETES' %} {{ neutron_bridge_name }} {{ neutron_external_interface }} {% endif %}",
- "config_files": []
-}
diff --git a/ansible/roles/neutron/templates/openvswitch-vswitchd.json.j2 b/ansible/roles/neutron/templates/openvswitch-vswitchd.json.j2
deleted file mode 100644
index 97308d886b..0000000000
--- a/ansible/roles/neutron/templates/openvswitch-vswitchd.json.j2
+++ /dev/null
@@ -1,4 +0,0 @@
-{
- "command": "/usr/sbin/ovs-vswitchd unix:/run/openvswitch/db.sock -vconsole:emer -vsyslog:err -vfile:info --mlockall --log-file=/var/log/kolla/openvswitch/ovs-vswitchd.log",
- "config_files": []
-}
diff --git a/ansible/roles/neutron/templates/vpnaas_agent.ini.j2 b/ansible/roles/neutron/templates/vpnaas_agent.ini.j2
deleted file mode 100644
index 2f4d33d638..0000000000
--- a/ansible/roles/neutron/templates/vpnaas_agent.ini.j2
+++ /dev/null
@@ -1,11 +0,0 @@
-{% set vpn_device_driver = 'neutron_vpnaas.services.vpn.device_drivers.strongswan_ipsec.StrongSwanDriver' if kolla_base_distro in ['ubuntu', 'debian'] else 'neutron_vpnaas.services.vpn.device_drivers.ipsec.OpenSwanDriver'%}
-[DEFAULT]
-
-[ipsec]
-enable_detailed_logging = {{ neutron_logging_debug }}
-
-[service_providers]
-service_provider = VPN:openswan:neutron_vpnaas.services.vpn.service_drivers.ipsec.IPsecVPNDriver:default
-
-[vpnagent]
-vpn_device_driver = {{ vpn_device_driver }}
diff --git a/ansible/roles/nova/defaults/main.yml b/ansible/roles/nova/defaults/main.yml
deleted file mode 100644
index bf24c02f78..0000000000
--- a/ansible/roles/nova/defaults/main.yml
+++ /dev/null
@@ -1,90 +0,0 @@
----
-project_name: "nova"
-
-####################
-# Ceph
-####################
-ceph_nova_pool_type: "{{ ceph_pool_type }}"
-ceph_nova_cache_mode: "{{ ceph_cache_mode }}"
-
-# Due to Ansible issues on include, you cannot override these variables. Please
-# override the variables they reference instead.
-nova_pool_name: "{{ ceph_nova_pool_name }}"
-nova_pool_type: "{{ ceph_nova_pool_type }}"
-nova_cache_mode: "{{ ceph_nova_cache_mode }}"
-
-# Discard option for nova managed disks. Requires libvirt (1, 0, 6) or later and
-# qemu (1, 6, 0) or later. Set to "" to disable.
-nova_hw_disk_discard: "unmap"
-
-
-####################
-# Database
-####################
-nova_database_name: "nova"
-nova_database_user: "nova"
-nova_database_address: "{{ kolla_internal_fqdn }}:{{ database_port }}"
-
-nova_api_database_name: "nova_api"
-nova_api_database_user: "nova_api"
-nova_api_database_address: "{{ kolla_internal_fqdn }}:{{ database_port }}"
-
-####################
-# Docker
-####################
-nova_libvirt_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-nova-libvirt"
-nova_libvirt_tag: "{{ openstack_release }}"
-nova_libvirt_image_full: "{{ nova_libvirt_image }}:{{ nova_libvirt_tag }}"
-
-nova_ssh_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-nova-ssh"
-nova_ssh_tag: "{{ openstack_release }}"
-nova_ssh_image_full: "{{ nova_ssh_image }}:{{ nova_ssh_tag }}"
-
-nova_conductor_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-nova-conductor"
-nova_conductor_tag: "{{ openstack_release }}"
-nova_conductor_image_full: "{{ nova_conductor_image }}:{{ nova_conductor_tag }}"
-
-nova_consoleauth_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-nova-consoleauth"
-nova_consoleauth_tag: "{{ openstack_release }}"
-nova_consoleauth_image_full: "{{ nova_consoleauth_image }}:{{ nova_consoleauth_tag }}"
-
-nova_novncproxy_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-nova-novncproxy"
-nova_novncproxy_tag: "{{ openstack_release }}"
-nova_novncproxy_image_full: "{{ nova_novncproxy_image }}:{{ nova_novncproxy_tag }}"
-
-nova_spicehtml5proxy_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-nova-spicehtml5proxy"
-nova_spicehtml5proxy_tag: "{{ openstack_release }}"
-nova_spicehtml5proxy_image_full: "{{ nova_spicehtml5proxy_image }}:{{ nova_spicehtml5proxy_tag }}"
-
-nova_scheduler_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-nova-scheduler"
-nova_scheduler_tag: "{{ openstack_release }}"
-nova_scheduler_image_full: "{{ nova_scheduler_image }}:{{ nova_scheduler_tag }}"
-
-nova_compute_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-nova-compute"
-nova_compute_tag: "{{ openstack_release }}"
-nova_compute_image_full: "{{ nova_compute_image }}:{{ nova_compute_tag }}"
-
-nova_api_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-nova-api"
-nova_api_tag: "{{ openstack_release }}"
-nova_api_image_full: "{{ nova_api_image }}:{{ nova_api_tag }}"
-
-nova_compute_ironic_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-nova-compute-ironic"
-nova_compute_ironic_tag: "{{ openstack_release }}"
-nova_compute_ironic_image_full: "{{ nova_compute_ironic_image }}:{{ nova_compute_ironic_tag }}"
-
-####################
-# OpenStack
-####################
-nova_legacy_admin_endpoint: "{{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ nova_api_port }}/v2/%(tenant_id)s"
-nova_legacy_internal_endpoint: "{{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ nova_api_port }}/v2/%(tenant_id)s"
-nova_legacy_public_endpoint: "{{ public_protocol }}://{{ kolla_external_fqdn }}:{{ nova_api_port }}/v2/%(tenant_id)s"
-
-nova_admin_endpoint: "{{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ nova_api_port }}/v2.1/%(tenant_id)s"
-nova_internal_endpoint: "{{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ nova_api_port }}/v2.1/%(tenant_id)s"
-nova_public_endpoint: "{{ public_protocol }}://{{ kolla_external_fqdn }}:{{ nova_api_port }}/v2.1/%(tenant_id)s"
-
-nova_logging_debug: "{{ openstack_logging_debug }}"
-
-openstack_nova_auth: "{'auth_url':'{{ openstack_auth.auth_url }}','username':'{{ openstack_auth.username }}','password':'{{ openstack_auth.password }}','project_name':'{{ openstack_auth.project_name }}'}"
-
-nova_ssh_port: "8022"
diff --git a/ansible/roles/nova/meta/main.yml b/ansible/roles/nova/meta/main.yml
deleted file mode 100644
index 6b4fff8fef..0000000000
--- a/ansible/roles/nova/meta/main.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-dependencies:
- - { role: common }
diff --git a/ansible/roles/nova/tasks/bootstrap.yml b/ansible/roles/nova/tasks/bootstrap.yml
deleted file mode 100644
index dd2222591e..0000000000
--- a/ansible/roles/nova/tasks/bootstrap.yml
+++ /dev/null
@@ -1,79 +0,0 @@
----
-- name: Creating Nova database
- command: docker exec -t kolla_toolbox /usr/bin/ansible localhost
- -m mysql_db
- -a "login_host='{{ database_address }}'
- login_port='{{ database_port }}'
- login_user='{{ database_user }}'
- login_password='{{ database_password }}'
- name='{{ nova_database_name }}'"
- register: database
- changed_when: "{{ database.stdout.find('localhost | SUCCESS => ') != -1 and
- (database.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
- failed_when: database.stdout.split()[2] != 'SUCCESS'
- run_once: True
- delegate_to: "{{ groups['nova-api'][0] }}"
-
-- name: Reading json from variable
- set_fact:
- database_created: "{{ (database.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
-
-- name: Creating Nova-api database
- command: docker exec -t kolla_toolbox /usr/bin/ansible localhost
- -m mysql_db
- -a "login_host='{{ database_address }}'
- login_port='{{ database_port }}'
- login_user='{{ database_user }}'
- login_password='{{ database_password }}'
- name='{{ nova_api_database_name }}'"
- register: database_api
- changed_when: "{{ database_api.stdout.find('localhost | SUCCESS => ') != -1 and
- (database_api.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
- failed_when: database_api.stdout.split()[2] != 'SUCCESS'
- run_once: True
- delegate_to: "{{ groups['nova-api'][0] }}"
-
-- name: Reading json from variable
- set_fact:
- database_api_created: "{{ (database_api.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
-
-- name: Creating Nova database user and setting permissions
- command: docker exec -t kolla_toolbox /usr/bin/ansible localhost
- -m mysql_user
- -a "login_host='{{ database_address }}'
- login_port='{{ database_port }}'
- login_user='{{ database_user }}'
- login_password='{{ database_password }}'
- name='{{ nova_database_name }}'
- password='{{ nova_database_password }}'
- host='%'
- priv='{{ nova_database_name }}.*:ALL'
- append_privs='yes'"
- register: database_user_create
- changed_when: "{{ database_user_create.stdout.find('localhost | SUCCESS => ') != -1 and
- (database_user_create.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
- failed_when: database_user_create.stdout.split()[2] != 'SUCCESS'
- run_once: True
- delegate_to: "{{ groups['nova-api'][0] }}"
-
-- name: Creating Nova-api database user and setting permissions
- command: docker exec -t kolla_toolbox /usr/bin/ansible localhost
- -m mysql_user
- -a "login_host='{{ database_address }}'
- login_port='{{ database_port }}'
- login_user='{{ database_user }}'
- login_password='{{ database_password }}'
- name='{{ nova_api_database_name }}'
- password='{{ nova_api_database_password }}'
- host='%'
- priv='{{ nova_api_database_name }}.*:ALL'
- append_privs='yes'"
- register: database_api_user_create
- changed_when: "{{ database_api_user_create.stdout.find('localhost | SUCCESS => ') != -1 and
- (database_api_user_create.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
- failed_when: database_api_user_create.stdout.split()[2] != 'SUCCESS'
- run_once: True
- delegate_to: "{{ groups['nova-api'][0] }}"
-
-- include: bootstrap_service.yml
- when: database_created
diff --git a/ansible/roles/nova/tasks/bootstrap_service.yml b/ansible/roles/nova/tasks/bootstrap_service.yml
deleted file mode 100644
index 33ce56728e..0000000000
--- a/ansible/roles/nova/tasks/bootstrap_service.yml
+++ /dev/null
@@ -1,20 +0,0 @@
----
-- name: Running Nova bootstrap container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- detach: False
- environment:
- KOLLA_BOOTSTRAP:
- KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}"
- image: "{{ nova_api_image_full }}"
- labels:
- BOOTSTRAP:
- name: "bootstrap_nova"
- restart_policy: "never"
- volumes:
- - "{{ node_config_directory }}/nova-api/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "kolla_logs:/var/log/kolla/"
- run_once: True
- delegate_to: "{{ groups['nova-api'][0] }}"
diff --git a/ansible/roles/nova/tasks/ceph.yml b/ansible/roles/nova/tasks/ceph.yml
deleted file mode 100644
index 36a2dfc2a4..0000000000
--- a/ansible/roles/nova/tasks/ceph.yml
+++ /dev/null
@@ -1,61 +0,0 @@
----
-- name: Ensuring config directory exists
- file:
- path: "{{ node_config_directory }}/{{ item }}"
- state: "directory"
- with_items:
- - "nova-compute"
- - "nova-libvirt/secrets"
- when: inventory_hostname in groups['compute']
-
-- name: Copying over config(s)
- template:
- src: roles/ceph/templates/ceph.conf.j2
- dest: "{{ node_config_directory }}/{{ item }}/ceph.conf"
- with_items:
- - "nova-compute"
- - "nova-libvirt"
- when: inventory_hostname in groups['compute']
-
-- include: ../../ceph_pools.yml
- vars:
- pool_name: "{{ nova_pool_name }}"
- pool_type: "{{ nova_pool_type }}"
- cache_mode: "{{ nova_cache_mode }}"
-
-# TODO(SamYaple): Improve changed_when tests
-- name: Pulling cephx keyring for nova
- command: docker exec ceph_mon ceph auth get-or-create client.nova mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool={{ ceph_cinder_pool_name }}, allow rwx pool={{ ceph_cinder_pool_name }}-cache, allow rwx pool={{ ceph_nova_pool_name }}, allow rwx pool={{ ceph_nova_pool_name }}-cache, allow rwx pool={{ ceph_glance_pool_name }}, allow rwx pool={{ ceph_glance_pool_name }}-cache'
- register: cephx_key
- delegate_to: "{{ groups['ceph-mon'][0] }}"
- changed_when: False
- run_once: True
-
-# TODO(SamYaple): Improve failed_when and changed_when tests
-- name: Pulling cephx keyring for libvirt
- command: docker exec ceph_mon ceph auth get-key client.nova
- register: cephx_raw_key
- delegate_to: "{{ groups['ceph-mon'][0] }}"
- changed_when: False
- run_once: True
-
-- name: Pushing cephx keyring for nova
- copy:
- content: "{{ cephx_key.stdout }}\n\r"
- dest: "{{ node_config_directory }}/nova-compute/ceph.client.nova.keyring"
- mode: "0600"
- when: inventory_hostname in groups['compute']
-
-- name: Pushing secrets xml for libvirt
- template:
- src: "secret.xml.j2"
- dest: "{{ node_config_directory }}/nova-libvirt/secrets/{{ rbd_secret_uuid }}.xml"
- mode: "0600"
- when: inventory_hostname in groups['compute']
-
-- name: Pushing secrets key for libvirt
- copy:
- content: "{{ cephx_raw_key.stdout }}"
- dest: "{{ node_config_directory }}/nova-libvirt/secrets/{{ rbd_secret_uuid }}.base64"
- mode: "0600"
- when: inventory_hostname in groups['compute']
diff --git a/ansible/roles/nova/tasks/config-nova-fake.yml b/ansible/roles/nova/tasks/config-nova-fake.yml
deleted file mode 100644
index 2bd74e1152..0000000000
--- a/ansible/roles/nova/tasks/config-nova-fake.yml
+++ /dev/null
@@ -1,28 +0,0 @@
----
-- name: Ensuring config directories exist
- file:
- path: "{{ node_config_directory }}/nova-compute-fake-{{ item }}"
- state: "directory"
- recurse: yes
- with_sequence: start=1 end={{ num_nova_fake_per_node }}
-
-- name: Copying over config.json files for services
- template:
- src: "nova-compute.json.j2"
- dest: "{{ node_config_directory }}/nova-compute-fake-{{ item }}/config.json"
- with_sequence: start=1 end={{ num_nova_fake_per_node }}
-
-- name: Copying over nova.conf
- merge_configs:
- vars:
- service_name: "{{ item }}"
- sources:
- - "{{ role_path }}/templates/nova.conf.j2"
- - "{{ node_config_directory }}/config/global.conf"
- - "{{ node_config_directory }}/config/database.conf"
- - "{{ node_config_directory }}/config/messaging.conf"
- - "{{ node_config_directory }}/config/nova.conf"
- - "{{ node_config_directory }}/config/nova/{{ item }}.conf"
- - "{{ node_config_directory }}/config/nova/{{ inventory_hostname }}/nova.conf"
- dest: "{{ node_config_directory }}/nova-compute-fake-{{ item }}/nova.conf"
- with_sequence: start=1 end={{ num_nova_fake_per_node }}
diff --git a/ansible/roles/nova/tasks/config.yml b/ansible/roles/nova/tasks/config.yml
deleted file mode 100644
index e7c7a06e3a..0000000000
--- a/ansible/roles/nova/tasks/config.yml
+++ /dev/null
@@ -1,105 +0,0 @@
----
-- name: Setting sysctl values
- sysctl: name={{ item.name }} value={{ item.value }} sysctl_set=yes
- with_items:
- - { name: "net.bridge.bridge-nf-call-iptables", value: 1}
- - { name: "net.bridge.bridge-nf-call-ip6tables", value: 1}
- - { name: "net.ipv4.conf.all.rp_filter", value: 0}
- - { name: "net.ipv4.conf.default.rp_filter", value: 0}
- when:
- - set_sysctl | bool
- - inventory_hostname in groups['compute']
-
-- name: Ensuring config directories exist
- file:
- path: "{{ node_config_directory }}/{{ item }}"
- state: "directory"
- recurse: yes
- with_items:
- - "nova-api"
- - "nova-compute"
- - "nova-compute-ironic"
- - "nova-conductor"
- - "nova-consoleauth"
- - "nova-libvirt"
- - "nova-novncproxy"
- - "nova-scheduler"
- - "nova-spicehtml5proxy"
- - "nova-ssh"
-
-- name: Copying over config.json files for services
- template:
- src: "{{ item }}.json.j2"
- dest: "{{ node_config_directory }}/{{ item }}/config.json"
- with_items:
- - "nova-api"
- - "nova-compute"
- - "nova-compute-ironic"
- - "nova-conductor"
- - "nova-consoleauth"
- - "nova-libvirt"
- - "nova-novncproxy"
- - "nova-scheduler"
- - "nova-spicehtml5proxy"
- - "nova-ssh"
-
-- name: Copying over nova.conf
- merge_configs:
- vars:
- service_name: "{{ item }}"
- sources:
- - "{{ role_path }}/templates/nova.conf.j2"
- - "{{ node_custom_config }}/global.conf"
- - "{{ node_custom_config }}/database.conf"
- - "{{ node_custom_config }}/messaging.conf"
- - "{{ node_custom_config }}/nova.conf"
- - "{{ node_custom_config }}/nova/{{ item }}.conf"
- - "{{ node_custom_config }}/nova/{{ inventory_hostname }}/nova.conf"
- dest: "{{ node_config_directory }}/{{ item }}/nova.conf"
- with_items:
- - "nova-api"
- - "nova-compute"
- - "nova-compute-ironic"
- - "nova-conductor"
- - "nova-consoleauth"
- - "nova-novncproxy"
- - "nova-scheduler"
- - "nova-spicehtml5proxy"
-
-- name: Copying over libvirt configuration
- template:
- src: "{{ item.src }}"
- dest: "{{ node_config_directory }}/nova-libvirt/{{ item.dest }}"
- with_items:
- - { src: "qemu.conf.j2", dest: "qemu.conf" }
- - { src: "libvirtd.conf.j2", dest: "libvirtd.conf" }
-
-- name: Copying files for nova-ssh
- template:
- src: "{{ item.src }}"
- dest: "{{ node_config_directory }}/nova-ssh/{{ item.dest }}"
- with_items:
- - { src: "sshd_config.j2", dest: "sshd_config" }
- - { src: "id_rsa", dest: "id_rsa" }
- - { src: "id_rsa.pub", dest: "id_rsa.pub" }
- - { src: "ssh_config.j2", dest: "ssh_config" }
-
-- name: Check if policies shall be overwritten
- local_action: stat path="{{ node_custom_config }}/nova/policy.json"
- register: nova_policy
-
-- name: Copying over existing policy.json
- template:
- src: "{{ node_custom_config }}/nova/policy.json"
- dest: "{{ node_config_directory }}/{{ item }}/policy.json"
- with_items:
- - "nova-api"
- - "nova-compute"
- - "nova-compute-ironic"
- - "nova-conductor"
- - "nova-consoleauth"
- - "nova-novncproxy"
- - "nova-scheduler"
- - "nova-spicehtml5proxy"
- when:
- nova_policy.stat.exists
diff --git a/ansible/roles/nova/tasks/deploy.yml b/ansible/roles/nova/tasks/deploy.yml
deleted file mode 100644
index 21efc2b2e1..0000000000
--- a/ansible/roles/nova/tasks/deploy.yml
+++ /dev/null
@@ -1,44 +0,0 @@
----
-- include: ceph.yml
- when:
- - enable_ceph | bool and nova_backend == "rbd"
- - inventory_hostname in groups['ceph-mon'] or
- inventory_hostname in groups['compute'] or
- inventory_hostname in groups['nova-api'] or
- inventory_hostname in groups['nova-conductor'] or
- inventory_hostname in groups['nova-consoleauth'] or
- inventory_hostname in groups['nova-novncproxy'] or
- inventory_hostname in groups['nova-scheduler']
-
-- include: external-ceph.yml
- when:
- - enable_ceph | bool == False and nova_backend == "rbd"
- - inventory_hostname in groups['compute']
-
-- include: register.yml
- when: inventory_hostname in groups['nova-api']
-
-- include: config.yml
- when: inventory_hostname in groups['compute'] or
- inventory_hostname in groups['nova-api'] or
- inventory_hostname in groups['nova-conductor'] or
- inventory_hostname in groups['nova-consoleauth'] or
- inventory_hostname in groups['nova-novncproxy'] or
- inventory_hostname in groups['nova-scheduler']
-
-- include: config-nova-fake.yml
- when:
- - enable_nova_fake | bool
- - inventory_hostname in groups['compute']
-
-- include: bootstrap.yml
- when: inventory_hostname in groups['nova-api'] or
- inventory_hostname in groups['compute']
-
-- include: start.yml
- when: inventory_hostname in groups['compute'] or
- inventory_hostname in groups['nova-api'] or
- inventory_hostname in groups['nova-conductor'] or
- inventory_hostname in groups['nova-consoleauth'] or
- inventory_hostname in groups['nova-novncproxy'] or
- inventory_hostname in groups['nova-scheduler']
diff --git a/ansible/roles/nova/tasks/external-ceph.yml b/ansible/roles/nova/tasks/external-ceph.yml
deleted file mode 100644
index 8743e77020..0000000000
--- a/ansible/roles/nova/tasks/external-ceph.yml
+++ /dev/null
@@ -1,49 +0,0 @@
----
-- name: Ensuring config directory exists
- file:
- path: "{{ node_config_directory }}/{{ item }}"
- state: "directory"
- with_items:
- - "nova-compute"
- - "nova-libvirt/secrets"
- when: inventory_hostname in groups['compute']
-
-- name: Find keyring files
- local_action: find paths="{{ node_custom_config }}/nova/" patterns="^ceph\.client\..*?\.keyring$" use_regex=True
- register: cephx_keyring_files
-
-- name: Copy over ceph keyring file
- copy:
- src: "{{ cephx_keyring_files.files[0].path }}"
- dest: "{{ node_config_directory }}/{{item}}/"
- with_items:
- - nova-compute
- - nova-libvirt
- when: inventory_hostname in groups['compute']
-
-- name: Copy over ceph.conf
- copy:
- src: "{{ node_custom_config }}/nova/ceph.conf"
- dest: "{{ node_config_directory }}/{{ item }}/"
- with_items:
- - nova-compute
- - nova-libvirt
- when: inventory_hostname in groups['compute']
-
-- name: Pushing secrets xml for libvirt
- template:
- src: "secret.xml.j2"
- dest: "{{ node_config_directory }}/nova-libvirt/secrets/{{ rbd_secret_uuid }}.xml"
- mode: "0600"
- when: inventory_hostname in groups['compute']
-
-- name: Extract key from file
- local_action: shell cat {{ cephx_keyring_files.files[0].path }} | grep -E 'key\s*=' | awk '{ print $3 }'
- register: cephx_raw_key
-
-- name: Pushing secrets key for libvirt
- copy:
- content: "{{ cephx_raw_key.stdout }}"
- dest: "{{ node_config_directory }}/nova-libvirt/secrets/{{ rbd_secret_uuid }}.base64"
- mode: "0600"
- when: inventory_hostname in groups['compute']
diff --git a/ansible/roles/nova/tasks/main.yml b/ansible/roles/nova/tasks/main.yml
deleted file mode 100644
index b017e8b4ad..0000000000
--- a/ansible/roles/nova/tasks/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-- include: "{{ action }}.yml"
diff --git a/ansible/roles/nova/tasks/precheck.yml b/ansible/roles/nova/tasks/precheck.yml
deleted file mode 100644
index ed97d539c0..0000000000
--- a/ansible/roles/nova/tasks/precheck.yml
+++ /dev/null
@@ -1 +0,0 @@
----
diff --git a/ansible/roles/nova/tasks/pull.yml b/ansible/roles/nova/tasks/pull.yml
deleted file mode 100644
index 6d067ab7e4..0000000000
--- a/ansible/roles/nova/tasks/pull.yml
+++ /dev/null
@@ -1,78 +0,0 @@
----
-- name: Pulling nova-api image
- kolla_docker:
- action: "pull_image"
- common_options: "{{ docker_common_options }}"
- image: "{{ nova_api_image_full }}"
- when: inventory_hostname in groups['nova-api']
-
-- name: Pulling nova-compute image
- kolla_docker:
- action: "pull_image"
- common_options: "{{ docker_common_options }}"
- image: "{{ nova_compute_image_full }}"
- when:
- - inventory_hostname in groups['compute']
- - not enable_nova_fake | bool
-
-- name: Pulling nova-compute-ironic image
- kolla_docker:
- action: "pull_image"
- common_options: "{{ docker_common_options }}"
- image: "{{ nova_compute_ironic_image_full }}"
- when:
- - inventory_hostname in groups['nova-compute-ironic']
- - enable_ironic | bool
-
-- name: Pulling nova-conductor image
- kolla_docker:
- action: "pull_image"
- common_options: "{{ docker_common_options }}"
- image: "{{ nova_conductor_image_full }}"
- when: inventory_hostname in groups['nova-conductor']
-
-- name: Pulling nova-consoleauth image
- kolla_docker:
- action: "pull_image"
- common_options: "{{ docker_common_options }}"
- image: "{{ nova_consoleauth_image_full }}"
- when: inventory_hostname in groups['nova-consoleauth']
-
-- name: Pulling nova-libvirt image
- kolla_docker:
- action: "pull_image"
- common_options: "{{ docker_common_options }}"
- image: "{{ nova_libvirt_image_full }}"
- when: inventory_hostname in groups['compute']
-
-- name: Pulling nova-ssh image
- kolla_docker:
- action: "pull_image"
- common_options: "{{ docker_common_options }}"
- image: "{{ nova_ssh_image_full }}"
- when: inventory_hostname in groups['compute']
-
-- name: Pulling nova-novncproxy image
- kolla_docker:
- action: "pull_image"
- common_options: "{{ docker_common_options }}"
- image: "{{ nova_novncproxy_image_full }}"
- when:
- - inventory_hostname in groups['nova-novncproxy']
- - nova_console == 'novnc'
-
-- name: Pulling nova-scheduler image
- kolla_docker:
- action: "pull_image"
- common_options: "{{ docker_common_options }}"
- image: "{{ nova_scheduler_image_full }}"
- when: inventory_hostname in groups['nova-scheduler']
-
-- name: Pulling nova-spicehtml5proxy image
- kolla_docker:
- action: "pull_image"
- common_options: "{{ docker_common_options }}"
- image: "{{ nova_spicehtml5proxy_image_full }}"
- when:
- - inventory_hostname in groups['nova-spicehtml5proxy']
- - nova_console == 'spice'
diff --git a/ansible/roles/nova/tasks/reconfigure.yml b/ansible/roles/nova/tasks/reconfigure.yml
deleted file mode 100644
index b551dd92ac..0000000000
--- a/ansible/roles/nova/tasks/reconfigure.yml
+++ /dev/null
@@ -1,318 +0,0 @@
----
-- name: Ensuring the nova libvirt, ssh, conductor, api, consoleauth and scheduler containers are up
- kolla_docker:
- name: "{{ item.name }}"
- action: "get_container_state"
- register: container_state
- failed_when: container_state.Running == false
- when: inventory_hostname in groups[item.group]
- with_items:
- - { name: nova_libvirt, group: compute }
- - { name: nova_ssh, group: compute }
- - { name: nova_conductor, group: nova-conductor }
- - { name: nova_api, group: nova-api }
- - { name: nova_consoleauth, group: nova-consoleauth }
- - { name: nova_scheduler, group: nova-scheduler }
-
-- name: Ensuring the nova_compute container is up
- kolla_docker:
- name: "nova_compute"
- action: "get_container_state"
- register: container_state
- failed_when: container_state.Running == false
- when:
- - not enable_nova_fake | bool
- - inventory_hostname in groups['compute']
-
-- name: Ensuring the nova_compute_ironic container is up
- kolla_docker:
- name: "nova_compute_ironic"
- action: "get_container_state"
- register: container_state
- failed_when: container_state.Running == false
- when:
- - enable_ironic | bool
- - inventory_hostname in groups['nova-compute-ironic']
-
-- name: Ensuring the nova_novncproxy container is up
- kolla_docker:
- name: "nova_novncproxy"
- action: "get_container_state"
- register: container_state
- failed_when: container_state.Running == false
- when:
- - nova_console == 'novnc'
- - inventory_hostname in groups['nova-novncproxy']
-
-- name: Ensuring the nova_spicehtml5proxy container is up
- kolla_docker:
- name: "nova_spicehtml5proxy"
- action: "get_container_state"
- register: container_state
- failed_when: container_state.Running == false
- when:
- - nova_console == 'spice'
- - inventory_hostname in groups['nova-spicehtml5proxy']
-
-- include: config.yml
-
-- name: Check the configs for nova libvirt, ssh, conductor, api, consoleauth and scheduler containers
- command: docker exec {{ item.name }} /usr/local/bin/kolla_set_configs --check
- changed_when: false
- failed_when: false
- register: check_results
- when: inventory_hostname in groups[item.group]
- with_items:
- - { name: nova_libvirt, group: compute }
- - { name: nova_ssh, group: compute }
- - { name: nova_conductor, group: nova-conductor }
- - { name: nova_api, group: nova-api }
- - { name: nova_consoleauth, group: nova-consoleauth }
- - { name: nova_scheduler, group: nova-scheduler }
-
-- name: Check the configs in the nova_compute container
- command: docker exec nova_compute /usr/local/bin/kolla_set_configs --check
- changed_when: false
- failed_when: false
- register: nova_compute_check_result
- when:
- - not enable_nova_fake | bool
- - inventory_hostname in groups['compute']
-
-- name: Check the configs in the nova_compute_ironic container
- command: docker exec nova_compute_ironic /usr/local/bin/kolla_set_configs --check
- changed_when: false
- failed_when: false
- register: nova_compute_ironic_check_result
- when:
- - enable_ironic | bool
- - inventory_hostname in groups['nova-compute-ironic']
-
-- name: Check the configs in the nova_novncproxy container
- command: docker exec nova_novncproxy /usr/local/bin/kolla_set_configs --check
- changed_when: false
- failed_when: false
- register: nova_novncproxy_check_result
- when:
- - nova_console == 'novnc'
- - inventory_hostname in groups['nova-novncproxy']
-
-- name: Check the configs in the nova_spicehtml5proxy container
- command: docker exec nova_spicehtml5proxy /usr/local/bin/kolla_set_configs --check
- changed_when: false
- failed_when: false
- register: nova_spicehtml5proxy_check_result
- when:
- - nova_console == 'spice'
- - inventory_hostname in groups['nova-spicehtml5proxy']
-
-# NOTE(jeffrey4l): when config_strategy == 'COPY_ALWAYS'
-# and container env['KOLLA_CONFIG_STRATEGY'] == 'COPY_ONCE',
-# just remove the container and start again
-- name: Containers config strategy for nova libvirt, ssh, conductor, api, consoleauth and scheduler containers
- kolla_docker:
- name: "{{ item.name }}"
- action: "get_container_env"
- register: container_envs
- when: inventory_hostname in groups[item.group]
- with_items:
- - { name: nova_libvirt, group: compute }
- - { name: nova_ssh, group: compute }
- - { name: nova_conductor, group: nova-conductor }
- - { name: nova_api, group: nova-api }
- - { name: nova_consoleauth, group: nova-consoleauth }
- - { name: nova_scheduler, group: nova-scheduler }
-
-- name: Container config strategy for nova_compute
- kolla_docker:
- name: nova_compute
- action: "get_container_env"
- register: nova_compute_container_env
- when:
- - not enable_nova_fake | bool
- - inventory_hostname in groups['compute']
-
-- name: Container config strategy for nova_compute_ironic
- kolla_docker:
- name: nova_compute_ironic
- action: "get_container_env"
- register: nova_compute_ironic_container_env
- when:
- - enable_ironic | bool
- - inventory_hostname in groups['nova-compute-ironic']
-
-- name: Container config strategy for nova_novncproxy
- kolla_docker:
- name: nova_novncproxy
- action: "get_container_env"
- register: nova_novncproxy_container_env
- when:
- - nova_console == 'novnc'
- - inventory_hostname in groups['nova-novncproxy']
-
-- name: Container config strategy for nova_spicehtml5proxy
- kolla_docker:
- name: nova_spicehtml5proxy
- action: "get_container_env"
- register: nova_spicehtml5proxy_container_env
- when:
- - nova_console == 'spice'
- - inventory_hostname in groups['nova-spicehtml5proxy']
-
-- name: Remove the nova libvirt, ssh, conductor, api, consoleauth and scheduler containers
- kolla_docker:
- name: "{{ item[0]['name'] }}"
- action: "remove_container"
- register: remove_containers
- until: remote_container|success
- retries: "{{ item[0]['retries']|default(0) }}"
- when:
- - inventory_hostname in groups[item[0]['group']]
- - config_strategy == "COPY_ONCE" or item[1]['KOLLA_CONFIG_STRATEGY'] == 'COPY_ONCE'
- - item[2]['rc'] == 1
- with_together:
- # NOTE(Jeffrey4l): retry 1 to remove nova_libvirt container because when
- # guests running, nova_libvirt will raise error even though it is removed.
- - [{ name: nova_libvirt, group: compute, retries: 1 },
- { name: nova_ssh, group: compute },
- { name: nova_conductor, group: nova-conductor },
- { name: nova_api, group: nova-api },
- { name: nova_consoleauth, group: nova-consoleauth },
- { name: nova_scheduler, group: nova-scheduler }]
- - "{{ container_envs.results }}"
- - "{{ check_results.results }}"
-
-- name: Remove nova_compute container
- kolla_docker:
- name: nova_compute
- action: "remove_container"
- register: remove_nova_compute_container
- when:
- - not enable_nova_fake | bool
- - inventory_hostname in groups['compute']
- - config_strategy == 'COPY_ONCE' or nova_compute_container_env['KOLLA_CONFIG_STRATEGY'] == 'COPY_ONCE'
- - nova_compute_check_result['rc'] == 1
-
-- name: Remove nova_compute_ironic container
- kolla_docker:
- name: nova_compute_ironic
- action: "remove_container"
- register: remove_nova_compute_ironic_container
- when:
- - enable_ironic | bool
- - inventory_hostname in groups['nova-compute-ironic']
- - config_strategy == 'COPY_ONCE' or nova_compute_ironic_container_env['KOLLA_CONFIG_STRATEGY'] == 'COPY_ONCE'
- - nova_compute_ironic_check_result['rc'] == 1
-
-- name: Remove nova_novncproxy container
- kolla_docker:
- name: nova_novncproxy
- action: "remove_container"
- register: remove_nova_novncproxy_container
- when:
- - nova_console == 'novnc'
- - inventory_hostname in groups['nova-novncproxy']
- - config_strategy == 'COPY_ONCE' or nova_novncproxy_container_env['KOLLA_CONFIG_STRATEGY'] == 'COPY_ONCE'
- - nova_novncproxy_check_result['rc'] == 1
-
-- name: Remove nova_spicehtml5proxy container
- kolla_docker:
- name: nova_spicehtml5proxy
- action: "remove_container"
- register: remove_nova_spicehtml5proxy_container
- when:
- - nova_console == 'spice'
- - inventory_hostname in groups['nova-spicehtml5proxy']
- - config_strategy == 'COPY_ONCE' or nova_spicehtml5proxy_container_env['KOLLA_CONFIG_STRATEGY'] == 'COPY_ONCE'
- - nova_spicehtml5proxy_check_result['rc'] == 1
-
-- include: start.yml
- when: remove_containers.changed
-
-- include: start.yml
- when:
- - not enable_nova_fake | bool
- - remove_nova_compute_container.changed
-
-- include: start.yml
- when:
- - enable_ironic | bool
- - remove_nova_compute_ironic_container.changed
-
-- include: start.yml
- when:
- - enable_ironic | bool
- - remove_nova_compute_ironic_container.changed
-
-- include: start.yml
- when:
- - nova_console == 'novnc'
- - remove_nova_novncproxy_container.changed
-
-- include: start.yml
- when:
- - nova_console == 'spice'
- - remove_nova_spicehtml5proxy_container.changed
-
-- name: Restart the nova libvirt, ssh, conductor, api, consoleauth and scheduler containers
- kolla_docker:
- name: "{{ item[0]['name'] }}"
- action: "restart_container"
- when:
- - config_strategy == 'COPY_ALWAYS'
- - inventory_hostname in groups[item[0]['group']]
- - item[1]['KOLLA_CONFIG_STRATEGY'] != 'COPY_ONCE'
- - item[2]['rc'] == 1
- with_together:
- - [{ name: nova_libvirt, group: compute },
- { name: nova_ssh, group: compute },
- { name: nova_conductor, group: nova-conductor },
- { name: nova_api, group: nova-api },
- { name: nova_consoleauth, group: nova-consoleauth },
- { name: nova_scheduler, group: nova-scheduler }]
- - "{{ container_envs.results }}"
- - "{{ check_results.results }}"
-
-- name: Restart the nova_compute container
- kolla_docker:
- name: "nova_compute"
- action: "restart_container"
- when:
- - not enable_nova_fake | bool
- - config_strategy == 'COPY_ALWAYS'
- - inventory_hostname in groups['compute']
- - nova_compute_container_env['KOLLA_CONFIG_STRATEGY'] != 'COPY_ONCE'
- - nova_compute_check_result['rc'] == 1
-
-- name: Restart the nova_compute_ironic container
- kolla_docker:
- name: "nova_compute_ironic"
- action: "restart_container"
- when:
- - enable_ironic | bool
- - config_strategy == 'COPY_ALWAYS'
- - inventory_hostname in groups['nova-compute-ironic']
- - nova_compute_ironic_container_env['KOLLA_CONFIG_STRATEGY'] != 'COPY_ONCE'
- - nova_compute_ironic_check_result['rc'] == 1
-
-- name: Restart the nova_novncproxy container
- kolla_docker:
- name: "nova_novncproxy"
- action: "restart_container"
- when:
- - nova_console == 'novnc'
- - config_strategy == 'COPY_ALWAYS'
- - inventory_hostname in groups['nova-novncproxy']
- - nova_novncproxy_container_env['KOLLA_CONFIG_STRATEGY'] != 'COPY_ONCE'
- - nova_novncproxy_check_result['rc'] == 1
-
-- name: Restart the nova_spicehtml5proxy container
- kolla_docker:
- name: "nova_spicehtml5proxy"
- action: "restart_container"
- when:
- - nova_console == 'spice'
- - config_strategy == 'COPY_ALWAYS'
- - inventory_hostname in groups['nova-spicehtml5proxy']
- - nova_spicehtml5proxy_container_env['KOLLA_CONFIG_STRATEGY'] != 'COPY_ONCE'
- - nova_spicehtml5proxy_check_result['rc'] == 1
diff --git a/ansible/roles/nova/tasks/register.yml b/ansible/roles/nova/tasks/register.yml
deleted file mode 100644
index a1b87e5c1d..0000000000
--- a/ansible/roles/nova/tasks/register.yml
+++ /dev/null
@@ -1,43 +0,0 @@
----
-- name: Creating the Nova service and endpoint
- command: docker exec -t kolla_toolbox /usr/bin/ansible localhost
- -m kolla_keystone_service
- -a "service_name={{ item.name }}
- service_type={{ item.service_type }}
- description='{{ item.description }}'
- endpoint_region={{ openstack_region_name }}
- url='{{ item.url }}'
- interface='{{ item.interface }}'
- region_name={{ openstack_region_name }}
- auth={{ '{{ openstack_nova_auth }}' }}"
- -e "{'openstack_nova_auth':{{ openstack_nova_auth }}}"
- register: nova_endpoint
- changed_when: "{{ nova_endpoint.stdout.find('localhost | SUCCESS => ') != -1 and (nova_endpoint.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
- until: nova_endpoint.stdout.split()[2] == 'SUCCESS'
- retries: 10
- delay: 5
- run_once: True
- with_items:
- - {'name': 'nova_legacy', 'service_type': 'compute_legacy', 'interface': 'admin', 'url': '{{ nova_legacy_admin_endpoint }}', 'description': 'OpenStack Compute Service (Legacy 2.0)'}
- - {'name': 'nova_legacy', 'service_type': 'compute_legacy', 'interface': 'internal', 'url': '{{ nova_legacy_internal_endpoint }}', 'description': 'OpenStack Compute Service (Legacy 2.0)'}
- - {'name': 'nova_legacy', 'service_type': 'compute_legacy', 'interface': 'public', 'url': '{{ nova_legacy_public_endpoint }}', 'description': 'OpenStack Compute Service (Legacy 2.0)'}
- - {'name': 'nova', 'service_type': 'compute', 'interface': 'admin', 'url': '{{ nova_admin_endpoint }}', 'description': 'OpenStack Compute Service'}
- - {'name': 'nova', 'service_type': 'compute', 'interface': 'internal', 'url': '{{ nova_internal_endpoint }}', 'description': 'OpenStack Compute Service'}
- - {'name': 'nova', 'service_type': 'compute', 'interface': 'public', 'url': '{{ nova_public_endpoint }}', 'description': 'OpenStack Compute Service'}
-
-- name: Creating the Nova project, user, and role
- command: docker exec -t kolla_toolbox /usr/bin/ansible localhost
- -m kolla_keystone_user
- -a "project=service
- user=nova
- password={{ nova_keystone_password }}
- role=admin
- region_name={{ openstack_region_name }}
- auth={{ '{{ openstack_nova_auth }}' }}"
- -e "{'openstack_nova_auth':{{ openstack_nova_auth }}}"
- register: nova_user
- changed_when: "{{ nova_user.stdout.find('localhost | SUCCESS => ') != -1 and (nova_user.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
- until: nova_user.stdout.split()[2] == 'SUCCESS'
- retries: 10
- delay: 5
- run_once: True
diff --git a/ansible/roles/nova/tasks/reload.yml b/ansible/roles/nova/tasks/reload.yml
deleted file mode 100644
index 6b37a9091d..0000000000
--- a/ansible/roles/nova/tasks/reload.yml
+++ /dev/null
@@ -1,21 +0,0 @@
----
-# This play calls sighup on every service to refresh upgrade levels
-- name: Sighup nova-api
- command: docker exec -t nova_api kill -1 1
- when: inventory_hostname in groups['nova-api']
-
-- name: Sighup nova-conductor
- command: docker exec -t nova_conductor kill -1 1
- when: inventory_hostname in groups['nova-conductor']
-
-- name: Sighup nova-consoleauth
- command: docker exec -t nova_consoleauth kill -1 1
- when: inventory_hostname in groups['nova-consoleauth']
-
-- name: Sighup nova-scheduler
- command: docker exec -t nova_scheduler kill -1 1
- when: inventory_hostname in groups['nova-scheduler']
-
-- name: Sighup nova-compute
- command: docker exec -t nova_compute kill -1 1
- when: inventory_hostname in groups['compute']
diff --git a/ansible/roles/nova/tasks/start.yml b/ansible/roles/nova/tasks/start.yml
deleted file mode 100644
index 659729b199..0000000000
--- a/ansible/roles/nova/tasks/start.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-- include: start_controllers.yml
-
-- include: start_conductors.yml
-
-- include: start_compute.yml
diff --git a/ansible/roles/nova/tasks/start_compute.yml b/ansible/roles/nova/tasks/start_compute.yml
deleted file mode 100644
index d40e7a007c..0000000000
--- a/ansible/roles/nova/tasks/start_compute.yml
+++ /dev/null
@@ -1,103 +0,0 @@
----
-- name: Starting nova-libvirt container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- image: "{{ nova_libvirt_image_full }}"
- name: "nova_libvirt"
- pid_mode: "host"
- privileged: True
- volumes:
- - "{{ node_config_directory }}/nova-libvirt/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "/lib/modules:/lib/modules:ro"
- - "/run/:/run/:shared"
- - "/dev:/dev"
- - "/sys/fs/cgroup:/sys/fs/cgroup"
- - "kolla_logs:/var/log/kolla/"
- - "libvirtd:/var/lib/libvirt"
- - "nova_compute:/var/lib/nova/"
- - "/var/lib/nova/mnt:/var/lib/nova/mnt:shared"
- - "nova_libvirt_qemu:/etc/libvirt/qemu"
- register: start_nova_libvirt_container
- # NOTE(Jeffrey4l): retry 5 to remove nova_libvirt container because when
- # guests running, nova_libvirt will raise error even though it is removed.
- retries: 5
- until: start_nova_libvirt_container|success
- when: inventory_hostname in groups['compute']
-
-- name: Prepare volumes list
- set_fact:
- mounts:
- - "{{ node_config_directory }}/nova-compute/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "/lib/modules:/lib/modules:ro"
- - "/run:/run:shared"
- - "/dev:/dev"
- - "kolla_logs:/var/log/kolla/"
- - "{% if enable_iscsid | bool %}iscsi_info:/etc/iscsi{% endif %}"
- - "libvirtd:/var/lib/libvirt"
- - "nova_compute:/var/lib/nova/"
- - "/var/lib/nova/mnt:/var/lib/nova/mnt:shared"
-
-- name: Starting nova-compute container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- image: "{{ nova_compute_image_full }}"
- name: "nova_compute"
- privileged: True
- volumes: '{{ mounts | reject("equalto", "") | list}}'
- when:
- - inventory_hostname in groups['compute']
- - not enable_nova_fake | bool
- - not enable_ironic | bool
-
-- name: Starting nova-compute-ironic container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- image: "{{ nova_compute_ironic_image_full }}"
- name: "nova_compute_ironic"
- volumes:
- - "{{ node_config_directory }}/nova-compute-ironic/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "kolla_logs:/var/log/kolla/"
- when:
- - inventory_hostname in groups['nova-compute-ironic']
- - enable_ironic | bool
-
-- name: Starting nova-compute-fake containers
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- image: "{{ nova_compute_image_full }}"
- name: "nova_compute_fake_{{ item }}"
- privileged: True
- volumes:
- - "{{ node_config_directory }}/nova-compute-fake-{{ item }}/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "/lib/modules:/lib/modules:ro"
- - "/run:/run:shared"
- - "kolla_logs:/var/log/kolla/"
- with_sequence: start=1 end={{ num_nova_fake_per_node }}
- when:
- - inventory_hostname in groups['compute']
- - enable_nova_fake | bool
-
-- name: Starting nova-ssh container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- image: "{{ nova_ssh_image_full }}"
- name: "nova_ssh"
- volumes:
- - "{{ node_config_directory }}/nova-ssh/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "kolla_logs:/var/log/kolla"
- - "nova_compute:/var/lib/nova"
- - "/var/lib/nova/mnt:/var/lib/nova/mnt:shared"
- - "heka_socket:/var/lib/kolla/heka/"
- # TODO(jeffrey4l): how to handle the nova-compute-fake and
- # nova-compute-ironic
- when: inventory_hostname in groups['compute']
diff --git a/ansible/roles/nova/tasks/start_conductors.yml b/ansible/roles/nova/tasks/start_conductors.yml
deleted file mode 100644
index aa5fe4dd00..0000000000
--- a/ansible/roles/nova/tasks/start_conductors.yml
+++ /dev/null
@@ -1,12 +0,0 @@
----
-- name: Starting nova-conductor container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- name: "nova_conductor"
- image: "{{ nova_conductor_image_full }}"
- volumes:
- - "{{ node_config_directory }}/nova-conductor/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "kolla_logs:/var/log/kolla/"
- when: inventory_hostname in groups['nova-conductor']
diff --git a/ansible/roles/nova/tasks/start_controllers.yml b/ansible/roles/nova/tasks/start_controllers.yml
deleted file mode 100644
index 184927281a..0000000000
--- a/ansible/roles/nova/tasks/start_controllers.yml
+++ /dev/null
@@ -1,66 +0,0 @@
----
-- name: Starting nova-api container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- image: "{{ nova_api_image_full }}"
- name: "nova_api"
- privileged: True
- volumes:
- - "{{ node_config_directory }}/nova-api/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "/lib/modules:/lib/modules:ro"
- - "kolla_logs:/var/log/kolla/"
- when: inventory_hostname in groups['nova-api']
-
-- name: Starting nova-consoleauth container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- image: "{{ nova_consoleauth_image_full }}"
- name: "nova_consoleauth"
- volumes:
- - "{{ node_config_directory }}/nova-consoleauth/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "kolla_logs:/var/log/kolla/"
- when: inventory_hostname in groups['nova-consoleauth']
-
-- name: Starting nova-novncproxy container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- image: "{{ nova_novncproxy_image_full }}"
- name: "nova_novncproxy"
- volumes:
- - "{{ node_config_directory }}/nova-novncproxy/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "kolla_logs:/var/log/kolla/"
- when:
- - inventory_hostname in groups['nova-novncproxy']
- - nova_console == 'novnc'
-
-- name: Starting nova-scheduler container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- image: "{{ nova_scheduler_image_full }}"
- name: "nova_scheduler"
- volumes:
- - "{{ node_config_directory }}/nova-scheduler/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "kolla_logs:/var/log/kolla/"
- when: inventory_hostname in groups['nova-scheduler']
-
-- name: Starting nova-spicehtml5proxy container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- image: "{{ nova_spicehtml5proxy_image_full }}"
- name: "nova_spicehtml5proxy"
- volumes:
- - "{{ node_config_directory }}/nova-spicehtml5proxy/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "kolla_logs:/var/log/kolla/"
- when:
- - inventory_hostname in groups['nova-spicehtml5proxy']
- - nova_console == 'spice'
diff --git a/ansible/roles/nova/tasks/upgrade.yml b/ansible/roles/nova/tasks/upgrade.yml
deleted file mode 100644
index bc24e375e1..0000000000
--- a/ansible/roles/nova/tasks/upgrade.yml
+++ /dev/null
@@ -1,38 +0,0 @@
----
-# Create new set of configs on nodes
-- include: config.yml
-
-# TODO(inc0): since nova is creating new database in L->M, we need to call it.
-# It should be removed later
-- include: bootstrap.yml
-
-- include: bootstrap_service.yml
-
-- include: register.yml
-
-- name: Checking if conductor container needs upgrading
- kolla_docker:
- action: "compare_image"
- common_options: "{{ docker_common_options }}"
- name: "nova_conductor"
- image: "{{ nova_conductor_image_full }}"
- when: inventory_hostname in groups['nova-conductor']
- register: conductor_differs
-
-# Short downtime here, but from user perspective his call will just timeout or execute later
-- name: Stopping all nova_conductor containers
- kolla_docker:
- action: "stop_container"
- common_options: "{{ docker_common_options }}"
- name: "nova_conductor"
- when:
- - inventory_hostname in groups['nova-conductor']
- - conductor_differs['result']
-
-- include: start_conductors.yml
-
-- include: start_controllers.yml
-
-- include: start_compute.yml
-
-- include: reload.yml
diff --git a/ansible/roles/nova/templates/id_rsa b/ansible/roles/nova/templates/id_rsa
deleted file mode 100644
index 173a4b3e12..0000000000
--- a/ansible/roles/nova/templates/id_rsa
+++ /dev/null
@@ -1 +0,0 @@
-{{ nova_ssh_key.private_key }}
diff --git a/ansible/roles/nova/templates/id_rsa.pub b/ansible/roles/nova/templates/id_rsa.pub
deleted file mode 100644
index 16bd674f22..0000000000
--- a/ansible/roles/nova/templates/id_rsa.pub
+++ /dev/null
@@ -1 +0,0 @@
-{{ nova_ssh_key.public_key }}
diff --git a/ansible/roles/nova/templates/libvirtd.conf.j2 b/ansible/roles/nova/templates/libvirtd.conf.j2
deleted file mode 100644
index 70e958929c..0000000000
--- a/ansible/roles/nova/templates/libvirtd.conf.j2
+++ /dev/null
@@ -1,6 +0,0 @@
-listen_tcp = 1
-auth_tcp = "none"
-ca_file = ""
-log_level = 3
-log_outputs = "3:file:/var/log/kolla/libvirt/libvirtd.log"
-listen_addr = "{{ api_interface_address }}"
diff --git a/ansible/roles/nova/templates/nova-api.json.j2 b/ansible/roles/nova/templates/nova-api.json.j2
deleted file mode 100644
index 28642bd593..0000000000
--- a/ansible/roles/nova/templates/nova-api.json.j2
+++ /dev/null
@@ -1,18 +0,0 @@
-{
- "command": "nova-api",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/nova.conf",
- "dest": "/etc/nova/nova.conf",
- "owner": "nova",
- "perm": "0600"
- }
- ],
- "permissions": [
- {
- "path": "/var/log/kolla/nova",
- "owner": "nova:nova",
- "recurse": true
- }
- ]
-}
diff --git a/ansible/roles/nova/templates/nova-compute-ironic.json.j2 b/ansible/roles/nova/templates/nova-compute-ironic.json.j2
deleted file mode 100644
index 94e2b5faef..0000000000
--- a/ansible/roles/nova/templates/nova-compute-ironic.json.j2
+++ /dev/null
@@ -1,18 +0,0 @@
-{
- "command": "nova-compute",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/nova.conf",
- "dest": "/etc/nova/nova.conf",
- "owner": "nova",
- "perm": "0600"
- }
- ],
- "permissions": [
- {
- "path": "/var/log/kolla/nova",
- "owner": "nova:nova",
- "recurse": true
- }
- ]
-}
diff --git a/ansible/roles/nova/templates/nova-compute.json.j2 b/ansible/roles/nova/templates/nova-compute.json.j2
deleted file mode 100644
index 018bf7a809..0000000000
--- a/ansible/roles/nova/templates/nova-compute.json.j2
+++ /dev/null
@@ -1,29 +0,0 @@
-{
- "command": "nova-compute",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/nova.conf",
- "dest": "/etc/nova/nova.conf",
- "owner": "nova",
- "perm": "0600"
- }{% if nova_backend == "rbd" %},
- {
- "source": "{{ container_config_directory }}/ceph.*",
- "dest": "/etc/ceph/",
- "owner": "nova",
- "perm": "0700"
- }{% endif %}
- ],
- "permissions": [
- {
- "path": "/var/log/kolla/nova",
- "owner": "nova:nova",
- "recurse": true
- },
- {
- "path": "/var/lib/nova",
- "owner": "nova:nova",
- "recurse": true
- }
- ]
-}
diff --git a/ansible/roles/nova/templates/nova-conductor.json.j2 b/ansible/roles/nova/templates/nova-conductor.json.j2
deleted file mode 100644
index 6a7328713d..0000000000
--- a/ansible/roles/nova/templates/nova-conductor.json.j2
+++ /dev/null
@@ -1,18 +0,0 @@
-{
- "command": "nova-conductor",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/nova.conf",
- "dest": "/etc/nova/nova.conf",
- "owner": "nova",
- "perm": "0600"
- }
- ],
- "permissions": [
- {
- "path": "/var/log/kolla/nova",
- "owner": "nova:nova",
- "recurse": true
- }
- ]
-}
diff --git a/ansible/roles/nova/templates/nova-consoleauth.json.j2 b/ansible/roles/nova/templates/nova-consoleauth.json.j2
deleted file mode 100644
index 9cc3240d7d..0000000000
--- a/ansible/roles/nova/templates/nova-consoleauth.json.j2
+++ /dev/null
@@ -1,18 +0,0 @@
-{
- "command": "nova-consoleauth",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/nova.conf",
- "dest": "/etc/nova/nova.conf",
- "owner": "nova",
- "perm": "0600"
- }
- ],
- "permissions": [
- {
- "path": "/var/log/kolla/nova",
- "owner": "nova:nova",
- "recurse": true
- }
- ]
-}
diff --git a/ansible/roles/nova/templates/nova-libvirt.json.j2 b/ansible/roles/nova/templates/nova-libvirt.json.j2
deleted file mode 100644
index aa19f7a396..0000000000
--- a/ansible/roles/nova/templates/nova-libvirt.json.j2
+++ /dev/null
@@ -1,29 +0,0 @@
-{
- "command": "/usr/sbin/libvirtd --listen",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/libvirtd.conf",
- "dest": "/etc/libvirt/libvirtd.conf",
- "owner": "root",
- "perm": "0644"
- },
- {
- "source": "{{ container_config_directory }}/qemu.conf",
- "dest": "/etc/libvirt/qemu.conf",
- "owner": "root",
- "perm": "0644"
- }{% if nova_backend == "rbd" %},
- {
- "source": "{{ container_config_directory }}/secrets",
- "dest": "/etc/libvirt/secrets",
- "owner": "root",
- "perm": "0600"
- },
- {
- "source": "{{ container_config_directory }}/ceph.conf",
- "dest": "/etc/ceph/ceph.conf",
- "owner": "root",
- "perm": "0600"
- }{% endif %}
- ]
-}
diff --git a/ansible/roles/nova/templates/nova-network.json.j2 b/ansible/roles/nova/templates/nova-network.json.j2
deleted file mode 100644
index 53e93e692d..0000000000
--- a/ansible/roles/nova/templates/nova-network.json.j2
+++ /dev/null
@@ -1,11 +0,0 @@
-{
- "command": "nova-network",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/nova.conf",
- "dest": "/etc/nova/nova.conf",
- "owner": "nova",
- "perm": "0600"
- }
- ]
-}
diff --git a/ansible/roles/nova/templates/nova-novncproxy.json.j2 b/ansible/roles/nova/templates/nova-novncproxy.json.j2
deleted file mode 100644
index d34efb3d69..0000000000
--- a/ansible/roles/nova/templates/nova-novncproxy.json.j2
+++ /dev/null
@@ -1,18 +0,0 @@
-{
- "command": "nova-novncproxy",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/nova.conf",
- "dest": "/etc/nova/nova.conf",
- "owner": "nova",
- "perm": "0600"
- }
- ],
- "permissions": [
- {
- "path": "/var/log/kolla/nova",
- "owner": "nova:nova",
- "recurse": true
- }
- ]
-}
diff --git a/ansible/roles/nova/templates/nova-scheduler.json.j2 b/ansible/roles/nova/templates/nova-scheduler.json.j2
deleted file mode 100644
index 36638987a0..0000000000
--- a/ansible/roles/nova/templates/nova-scheduler.json.j2
+++ /dev/null
@@ -1,18 +0,0 @@
-{
- "command": "nova-scheduler",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/nova.conf",
- "dest": "/etc/nova/nova.conf",
- "owner": "nova",
- "perm": "0600"
- }
- ],
- "permissions": [
- {
- "path": "/var/log/kolla/nova",
- "owner": "nova:nova",
- "recurse": true
- }
- ]
-}
diff --git a/ansible/roles/nova/templates/nova-spicehtml5proxy.json.j2 b/ansible/roles/nova/templates/nova-spicehtml5proxy.json.j2
deleted file mode 100644
index e12354bf43..0000000000
--- a/ansible/roles/nova/templates/nova-spicehtml5proxy.json.j2
+++ /dev/null
@@ -1,18 +0,0 @@
-{
- "command": "nova-spicehtml5proxy",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/nova.conf",
- "dest": "/etc/nova/nova.conf",
- "owner": "nova",
- "perm": "0600"
- }
- ],
- "permissions": [
- {
- "path": "/var/log/kolla/nova",
- "owner": "nova:nova",
- "recurse": true
- }
- ]
-}
diff --git a/ansible/roles/nova/templates/nova-ssh.json.j2 b/ansible/roles/nova/templates/nova-ssh.json.j2
deleted file mode 100644
index 1fb041ecc9..0000000000
--- a/ansible/roles/nova/templates/nova-ssh.json.j2
+++ /dev/null
@@ -1,29 +0,0 @@
-{
- "command": "/usr/sbin/sshd -D",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/sshd_config",
- "dest": "/etc/ssh/sshd_config",
- "owner": "root",
- "perm": "0644"
- },
- {
- "source": "{{ container_config_directory }}/ssh_config",
- "dest": "/var/lib/nova/.ssh/config",
- "owner": "nova",
- "perm": "0600"
- },
- {
- "source": "{{ container_config_directory }}/id_rsa",
- "dest": "/var/lib/nova/.ssh/id_rsa",
- "owner": "nova",
- "perm": "0600"
- },
- {
- "source": "{{ container_config_directory }}/id_rsa.pub",
- "dest": "/var/lib/nova/.ssh/authorized_keys",
- "owner": "nova",
- "perm": "0600"
- }
- ]
-}
diff --git a/ansible/roles/nova/templates/nova.conf.j2 b/ansible/roles/nova/templates/nova.conf.j2
deleted file mode 100644
index a686e0dc33..0000000000
--- a/ansible/roles/nova/templates/nova.conf.j2
+++ /dev/null
@@ -1,209 +0,0 @@
-# nova.conf
-[DEFAULT]
-debug = {{ nova_logging_debug }}
-
-log_dir = /var/log/kolla/nova
-use_forwarded_for = true
-
-state_path = /var/lib/nova
-
-{% if kolla_enable_tls_external | bool %}
-secure_proxy_ssl_header = X-Forwarded-Proto
-{% endif %}
-
-osapi_compute_listen = {{ api_interface_address }}
-osapi_compute_listen_port = {{ nova_api_port }}
-osapi_compute_workers = {{ openstack_service_workers }}
-metadata_workers = {{ openstack_service_workers }}
-
-metadata_listen = {{ api_interface_address }}
-metadata_listen_port = {{ nova_metadata_port }}
-
-use_neutron = True
-firewall_driver = nova.virt.firewall.NoopFirewallDriver
-
-{% if neutron_plugin_agent == "openvswitch" %}
-linuxnet_interface_driver = nova.network.linux_net.LinuxOVSInterfaceDriver
-{% elif neutron_plugin_agent == "linuxbridge" %}
-linuxnet_interface_driver = nova.network.linux_net.NeutronLinuxBridgeInterfaceDriver
-{% endif %}
-
-allow_resize_to_same_host = true
-
-{% if enable_ironic | bool %}
-scheduler_host_manager = ironic_host_manager
-{% endif %}
-
-{% if service_name == "nova-compute-ironic" %}
-compute_driver = ironic.IronicDriver
-vnc_enabled = False
-ram_allocation_ratio = 1.0
-reserved_host_memory_mb = 0
-{% elif enable_nova_fake | bool %}
-scheduler_default_filters = RetryFilter,AvailabilityZoneFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter
-host = {{ ansible_hostname }}_{{ item }}
-compute_driver = fake.FakeDriver
-{% else %}
-compute_driver = libvirt.LibvirtDriver
-{% endif %}
-
-# Though my_ip is not used directly, lots of other variables use $my_ip
-my_ip = {{ api_interface_address }}
-
-{% if enable_ceilometer | bool %}
-instance_usage_audit = True
-instance_usage_audit_period = hour
-notify_on_state_change = vm_and_task_state
-{% if enable_watcher | bool %}
-compute_monitors=nova.compute.monitors.cpu.virt_driver
-{% endif %}
-{% endif %}
-
-transport_url = rabbit://{% for host in groups['rabbitmq'] %}{{ rabbitmq_user }}:{{ rabbitmq_password }}@{% if orchestration_engine == 'KUBERNETES' %}rabbitmq{% else %}{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}{% endif %}:{{ rabbitmq_port }}{% if not loop.last %},{% endif %}{% endfor %}
-
-[conductor]
-workers = {{ openstack_service_workers }}
-
-{% if nova_console == 'novnc' %}
-[vnc]
-novncproxy_host = {{ api_interface_address }}
-novncproxy_port = {{ nova_novncproxy_port }}
-vncserver_listen = {{ api_interface_address }}
-vncserver_proxyclient_address = {{ api_interface_address }}
-{% if inventory_hostname in groups['compute'] %}
-novncproxy_base_url = {{ public_protocol }}://{% if orchestration_engine == 'KUBERNETES' %}kolla_kubernetes_external_vip{% else %}{{ kolla_external_fqdn }}{% endif %}:{{ nova_novncproxy_port }}/vnc_auto.html
-{% endif %}
-{% elif nova_console == 'spice' %}
-[vnc]
-# We have to turn off vnc to use spice
-enabled = false
-[spice]
-enabled = true
-server_listen = {{ api_interface_address }}
-server_proxyclient_address = {{ api_interface_address }}
-{% if inventory_hostname in groups['compute'] %}
-html5proxy_base_url = {{ public_protocol }}://{% if orchestration_engine == 'KUBERNETES' %}kolla_kubernetes_external_vip{% else %}{{ kolla_external_fqdn }}{% endif %}:{{ nova_spicehtml5proxy_port }}/spice_auto.html
-{% endif %}
-html5proxy_host = {{ api_interface_address }}
-html5proxy_port = {{ nova_spicehtml5proxy_port }}
-{% endif %}
-
-{% if service_name == "nova-compute-ironic" %}
-[ironic]
-username = {{ ironic_keystone_user }}
-password = {{ ironic_keystone_password }}
-auth_url = {{ openstack_auth.auth_url }}/v3
-auth_type = password
-project_name = service
-user_domain_name = default
-project_domain_name = default
-api_endpoint = {{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ ironic_api_port }}/v1
-{% endif %}
-
-[oslo_concurrency]
-lock_path = /var/lib/nova/tmp
-
-[glance]
-api_servers = {% for host in groups['glance-api'] %}{{ internal_protocol }}://{% if orchestration_engine == 'KUBERNETES' %}glance-api{% else %}{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}{% endif %}:{{ glance_api_port }}{% if not loop.last %},{% endif %}{% endfor %}
-
-num_retries = {{ groups['glance-api'] | length }}
-
-[cinder]
-catalog_info = volumev2:cinderv2:internalURL
-
-[neutron]
-{% if orchestration_engine == 'KUBERNETES' %}
-url = {{ internal_protocol }}://neutron-server:{{ neutron_server_port }}
-{% else %}
-url = {{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ neutron_server_port }}
-{% endif %}
-metadata_proxy_shared_secret = {{ metadata_secret }}
-service_metadata_proxy = true
-
-auth_url = {{ keystone_admin_url }}
-auth_type = password
-project_domain_name = default
-user_domain_id = default
-project_name = service
-username = {{ neutron_keystone_user }}
-password = {{ neutron_keystone_password }}
-
-[database]
-connection = mysql+pymysql://{{ nova_database_user }}:{{ nova_database_password }}@{{ nova_database_address }}/{{ nova_database_name }}
-max_pool_size = 50
-max_overflow = 1000
-max_retries = -1
-
-[api_database]
-connection = mysql+pymysql://{{ nova_api_database_user }}:{{ nova_api_database_password }}@{{ nova_api_database_address }}/{{ nova_api_database_name }}
-max_retries = -1
-
-[cache]
-backend = oslo_cache.memcache_pool
-enabled = True
-memcache_servers = {% for host in groups['memcached'] %}{% if orchestration_engine == 'KUBERNETES' %}memcached{% else %}{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}{% endif %}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}
-
-
-[keystone_authtoken]
-{% if orchestration_engine == 'KUBERNETES' %}
-auth_uri = {{ keystone_internal_url }}
-auth_url = {{ keystone_admin_url }}
-{% else %}
-auth_uri = {{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_public_port }}
-auth_url = {{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_admin_port }}
-{% endif %}
-auth_type = password
-project_domain_id = default
-user_domain_id = default
-project_name = service
-username = {{ nova_keystone_user }}
-password = {{ nova_keystone_password }}
-
-memcache_security_strategy = ENCRYPT
-memcache_secret_key = {{ memcache_secret_key }}
-memcached_servers = {% for host in groups['memcached'] %}{% if orchestration_engine == 'KUBERNETES' %}memcached{% else %}{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}{% endif%}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}
-
-
-[libvirt]
-connection_uri = "qemu+tcp://{{ api_interface_address }}/system"
-{% if enable_ceph | bool and nova_backend == "rbd" %}
-images_type = rbd
-images_rbd_pool = {{ ceph_nova_pool_name }}
-images_rbd_ceph_conf = /etc/ceph/ceph.conf
-rbd_user = nova
-disk_cachemodes="network=writeback"
-{% if nova_hw_disk_discard != '' %}
-hw_disk_discard = {{ nova_hw_disk_discard }}
-{% endif %}
-{% endif %}
-{% if nova_backend == "rbd" %}
-rbd_secret_uuid = {{ rbd_secret_uuid }}
-{% endif %}
-
-
-[upgrade_levels]
-compute = auto
-
-[oslo_messaging_notifications]
-{% if enable_ceilometer | bool or enable_designate | bool %}
-driver = messagingv2
-{% set topics=["notifications" if enable_ceilometer|bool else "", "notifications_designate" if enable_designate|bool else ""] %}
-topcis = {{ topics|reject("equalto", "")|list|join(",") }}
-{% else %}
-driver = noop
-{% endif %}
-
-[privsep_entrypoint]
-helper_command=sudo nova-rootwrap /etc/nova/rootwrap.conf privsep-helper --config-file /etc/nova/nova.conf
-
-[glance]
-debug = {{ nova_logging_debug }}
-
-[guestfs]
-debug = {{ nova_logging_debug }}
-
-[wsgi]
-api_paste_config = /etc/nova/api-paste.ini
-
-[scheduler]
-max_attempts = 10
diff --git a/ansible/roles/nova/templates/qemu.conf.j2 b/ansible/roles/nova/templates/qemu.conf.j2
deleted file mode 100644
index 16bd9d4d9a..0000000000
--- a/ansible/roles/nova/templates/qemu.conf.j2
+++ /dev/null
@@ -1 +0,0 @@
-stdio_handler = "file"
diff --git a/ansible/roles/nova/templates/secret.xml.j2 b/ansible/roles/nova/templates/secret.xml.j2
deleted file mode 100644
index eab903be4f..0000000000
--- a/ansible/roles/nova/templates/secret.xml.j2
+++ /dev/null
@@ -1,6 +0,0 @@
-
- {{ rbd_secret_uuid }}
-
- client.nova secret
-
-
diff --git a/ansible/roles/nova/templates/ssh_config.j2 b/ansible/roles/nova/templates/ssh_config.j2
deleted file mode 100644
index 7c5c962f9d..0000000000
--- a/ansible/roles/nova/templates/ssh_config.j2
+++ /dev/null
@@ -1,4 +0,0 @@
-Host *
- StrictHostKeyChecking no
- UserKnownHostsFile /dev/null
- port {{ nova_ssh_port }}
diff --git a/ansible/roles/nova/templates/sshd_config.j2 b/ansible/roles/nova/templates/sshd_config.j2
deleted file mode 100644
index 29bea8dea1..0000000000
--- a/ansible/roles/nova/templates/sshd_config.j2
+++ /dev/null
@@ -1,5 +0,0 @@
-Port {{ nova_ssh_port }}
-ListenAddress {{ api_interface_address }}
-
-SyslogFacility AUTHPRIV
-UsePAM yes
diff --git a/ansible/roles/prechecks/tasks/main.yml b/ansible/roles/prechecks/tasks/main.yml
deleted file mode 100644
index aa37e38485..0000000000
--- a/ansible/roles/prechecks/tasks/main.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-- include: port_checks.yml
-
-- include: service_checks.yml
-
-- include: package_checks.yml
diff --git a/ansible/roles/prechecks/tasks/package_checks.yml b/ansible/roles/prechecks/tasks/package_checks.yml
deleted file mode 100644
index 49d2037c4e..0000000000
--- a/ansible/roles/prechecks/tasks/package_checks.yml
+++ /dev/null
@@ -1,12 +0,0 @@
----
-- name: Checking docker-py version
- command: python -c "import docker; print docker.__version__"
- register: result
- changed_when: false
- failed_when: result | failed or
- result.stdout | version_compare(docker_py_version_min, '<')
-
-- name: Checking Ansible version
- local_action: fail msg="Current Ansible version {{ ansible_version.full }} is less than {{ ansible_version_min }}"
- run_once: true
- when: ansible_version.full | version_compare(ansible_version_min, '<')
diff --git a/ansible/roles/prechecks/tasks/port_checks.yml b/ansible/roles/prechecks/tasks/port_checks.yml
deleted file mode 100644
index faf7a76145..0000000000
--- a/ansible/roles/prechecks/tasks/port_checks.yml
+++ /dev/null
@@ -1,947 +0,0 @@
----
-- name: Checking the api_interface is present
- fail: "msg='Please check the api_interface property - interface {{ api_interface }} not found'"
- when: api_interface not in ansible_interfaces
-
-- name: Checking the api_interface is active
- fail: "msg='Please check the api_interface settings - interface {{ api_interface }} is not active'"
- when: hostvars[inventory_hostname]['ansible_' + api_interface]['active'] != True
-
-- name: Checking the api_interface configuration
- fail: "msg='Please check the api_interface settings - interface {{ api_interface }} configuration missing'"
- when: hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4'] is not defined
-
-- name: Checking the api_interface ip address configuration
- fail: "msg='Please check the api_interface settings - interface {{ api_interface }} ip address problem'"
- when: hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] is not defined
-
-- name: Checking if kolla_internal_vip_address and kolla_external_vip_address are not pingable from any node
- command: ping -c 3 {{ item }}
- register: ping_output
- changed_when: false
- with_items:
- - "{{ kolla_internal_vip_address }}"
- - "{{ kolla_external_vip_address }}"
- failed_when: ping_output.rc != 1
- when: enable_haproxy | bool
-
-- name: Checking if kolla_internal_vip_address is in the same network as api_interface on all nodes
- command: ip -4 -o addr show dev {{ api_interface }}
- register: ip_addr_output
- changed_when: false
- failed_when: "'169.254.' not in kolla_internal_vip_address and \
- kolla_internal_vip_address | ipaddr(ip_addr_output.stdout.split()[3]) is none"
- when: enable_haproxy | bool
-
-- name: Checking free port for Aodh API
- wait_for:
- host: "{{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}"
- port: "{{ aodh_api_port }}"
- connect_timeout: 1
- state: stopped
- when:
- - inventory_hostname in groups['aodh-api']
- - enable_aodh | bool
-
-- name: Checking free port for Aodh API HAProxy
- wait_for:
- host: "{{ kolla_internal_vip_address }}"
- port: "{{ aodh_api_port }}"
- connect_timeout: 1
- state: stopped
- when:
- - inventory_hostname in groups['haproxy']
- - enable_aodh | bool
-
-- name: Checking free port for Barbican API
- wait_for:
- host: "{{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}"
- port: "{{ barbican_api_port }}"
- connect_timeout: 1
- state: stopped
- when:
- - inventory_hostname in groups['barbican-api']
- - enable_barbican | bool
-
-- name: Checking free port for Barbican API HAProxy
- wait_for:
- host: "{{ kolla_internal_vip_address }}"
- port: "{{ barbican_api_port }}"
- connect_timeout: 1
- state: stopped
- when:
- - inventory_hostname in groups['haproxy']
- - enable_barbican | bool
-
-- name: Checking free port for Ceilometer API
- wait_for:
- host: "{{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}"
- port: "{{ ceilometer_api_port }}"
- connect_timeout: 1
- state: stopped
- when:
- - inventory_hostname in groups['ceilometer-api']
- - enable_ceilometer | bool
-
-- name: Checking free port for Ceilometer API HAProxy
- wait_for:
- host: "{{ kolla_internal_vip_address }}"
- port: "{{ ceilometer_api_port }}"
- connect_timeout: 1
- state: stopped
- when:
- - inventory_hostname in groups['haproxy']
- - enable_ceilometer | bool
-
-- name: Checking free port for Congress API
- wait_for:
- host: "{{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}"
- port: "{{ congress_api_port }}"
- connect_timeout: 1
- state: stopped
- when:
- - inventory_hostname in groups['congress-api']
- - enable_congress | bool
-
-- name: Checking free port for Congress API HAProxy
- wait_for:
- host: "{{ kolla_internal_vip_address }}"
- port: "{{ congress_api_port }}"
- connect_timeout: 1
- state: stopped
- when:
- - inventory_hostname in groups['haproxy']
- - enable_congress | bool
-
-- name: Checking free port for Cinder API
- wait_for:
- host: "{{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}"
- port: "{{ cinder_api_port }}"
- connect_timeout: 1
- state: stopped
- when:
- - inventory_hostname in groups['cinder-api']
- - enable_cinder | bool
-
-- name: Checking free port for Cinder API HAProxy
- wait_for:
- host: "{{ kolla_internal_vip_address }}"
- port: "{{ cinder_api_port }}"
- connect_timeout: 1
- state: stopped
- when:
- - inventory_hostname in groups['haproxy']
- - enable_cinder | bool
-
-- name: Checking free port for Cloudkitty API
- wait_for:
- host: "{{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}"
- port: "{{ cloudkitty_api_port }}"
- connect_timeout: 1
- state: stopped
- when:
- - inventory_hostname in groups['cloudkitty-api']
- - enable_cloudkitty | bool
-
-- name: Checking free port for Cloudkitty API HAProxy
- wait_for:
- host: "{{ kolla_internal_vip_address }}"
- port: "{{ cloudkitty_api_port }}"
- connect_timeout: 1
- state: stopped
- when:
- - inventory_hostname in groups['haproxy']
- - enable_cloudkitty | bool
-
-- name: Checking free port for Etcd Peer
- wait_for:
- host: "{{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}"
- port: "{{ etcd_peer_port }}"
- connect_timeout: 1
- state: stopped
- when:
- - inventory_hostname in groups['etcd']
- - enable_etcd | bool
-
-- name: Checking free port for Etcd Client
- wait_for:
- host: "{{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}"
- port: "{{ etcd_client_port }}"
- connect_timeout: 1
- state: stopped
- when:
- - inventory_hostname in groups['etcd']
- - enable_etcd | bool
-
-- name: Checking free port for Designate API
- wait_for:
- host: "{{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}"
- port: "{{ designate_api_port }}"
- connect_timeout: 1
- state: stopped
- when: inventory_hostname in groups['designate-api']
-
-- name: Checking free port for Designate API HAProxy
- wait_for:
- host: "{{ kolla_internal_vip_address }}"
- port: "{{ designate_api_port }}"
- connect_timeout: 1
- state: stopped
- when: inventory_hostname in groups['haproxy']
-
-- name: Checking free port for Glance API
- wait_for:
- host: "{{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}"
- port: "{{ glance_api_port }}"
- connect_timeout: 1
- state: stopped
- when:
- - inventory_hostname in groups['glance-api']
- - enable_glance | bool
-
-- name: Checking free port for Glance API HAProxy
- wait_for:
- host: "{{ kolla_internal_vip_address }}"
- port: "{{ glance_api_port }}"
- connect_timeout: 1
- state: stopped
- when:
- - inventory_hostname in groups['haproxy']
- - enable_glance | bool
-
-- name: Checking free port for Glance Registry
- wait_for:
- host: "{{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}"
- port: "{{ glance_registry_port }}"
- connect_timeout: 1
- state: stopped
- when:
- - inventory_hostname in groups['glance-registry']
- - enable_glance | bool
-
-- name: Checking free port for Glance Registry HAProxy
- wait_for:
- host: "{{ kolla_internal_vip_address }}"
- port: "{{ glance_registry_port }}"
- connect_timeout: 1
- state: stopped
- when:
- - inventory_hostname in groups['haproxy']
- - enable_glance | bool
-
-- name: Checking free port for Gnocchi API
- wait_for:
- host: "{{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}"
- port: "{{ gnocchi_api_port }}"
- connect_timeout: 1
- state: stopped
- when:
- - inventory_hostname in groups['gnocchi-api']
- - enable_gnocchi | bool
-
-- name: Checking free port for Gnocchi API HAProxy
- wait_for:
- host: "{{ kolla_internal_vip_address }}"
- port: "{{ gnocchi_api_port }}"
- connect_timeout: 1
- state: stopped
- when:
- - inventory_hostname in groups['haproxy']
- - enable_gnocchi | bool
-
-- name: Checking free port for Kuryr
- wait_for:
- host: "{{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}"
- port: "{{ kuryr_port }}"
- connect_timeout: 1
- state: stopped
- when:
- - inventory_hostname in groups['compute']
- - enable_kuryr | bool
-
-- name: Checking free port for Sahara API
- wait_for:
- host: "{{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}"
- port: "{{ sahara_api_port }}"
- connect_timeout: 1
- state: stopped
- when:
- - inventory_hostname in groups['sahara-api']
- - enable_sahara | bool
-
-- name: Checking free port for Sahara API HAProxy
- wait_for:
- host: "{{ kolla_internal_vip_address }}"
- port: "{{ sahara_api_port }}"
- connect_timeout: 1
- state: stopped
- when:
- - inventory_hostname in groups['haproxy']
- - enable_sahara | bool
-
-- name: Checking free port for HAProxy stats
- wait_for:
- host: "{{ kolla_internal_vip_address }}"
- port: "{{ haproxy_stats_port }}"
- connect_timeout: 1
- state: stopped
- when:
- - inventory_hostname in groups['haproxy']
- - enable_haproxy | bool
-
-- name: Checking free port for Heat API
- wait_for:
- host: "{{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}"
- port: "{{ heat_api_port }}"
- connect_timeout: 1
- state: stopped
- when:
- - inventory_hostname in groups['heat-api']
- - enable_heat | bool
-
-- name: Checking free port for Heat API HAProxy
- wait_for:
- host: "{{ kolla_internal_vip_address }}"
- port: "{{ heat_api_port }}"
- connect_timeout: 1
- state: stopped
- when:
- - inventory_hostname in groups['haproxy']
- - enable_heat | bool
-
-- name: Checking free port for Heat API CFN
- wait_for:
- host: "{{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}"
- port: "{{ heat_api_cfn_port }}"
- connect_timeout: 1
- state: stopped
- when:
- - inventory_hostname in groups['heat-api-cfn']
- - enable_heat | bool
-
-- name: Checking free port for Heat API CFN HAProxy
- wait_for:
- host: "{{ kolla_internal_vip_address }}"
- port: "{{ heat_api_cfn_port }}"
- connect_timeout: 1
- state: stopped
- when:
- - inventory_hostname in groups['haproxy']
- - enable_heat | bool
-
-- name: Checking free port for Horizon
- wait_for:
- host: "{{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}"
- port: "80"
- connect_timeout: 1
- state: stopped
- when:
- - inventory_hostname in groups['horizon']
- - enable_horizon | bool
-
-- name: Checking free port for Horizon HAProxy
- wait_for:
- host: "{{ kolla_internal_vip_address }}"
- port: "80"
- connect_timeout: 1
- state: stopped
- when:
- - inventory_hostname in groups['haproxy']
- - enable_horizon | bool
-
-- name: Checking free port for Ironic
- wait_for:
- host: "{{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}"
- port: "{{ ironic_api_port }}"
- connect_timeout: 1
- state: stopped
- when:
- - inventory_hostname in groups['ironic-api']
- - enable_ironic | bool
-
-- name: Checking free port for Ironic HAProxy
- wait_for:
- host: "{{ kolla_internal_vip_address }}"
- port: "{{ ironic_api_port }}"
- connect_timeout: 1
- state: stopped
- when:
- - inventory_hostname in groups['haproxy']
- - enable_ironic | bool
-
-- name: Checking free port for Influxdb Admin
- wait_for:
- host: "{{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}"
- port: "{{ influxdb_admin_port }}"
- connect_timeout: 1
- state: stopped
- when:
- - inventory_hostname in groups['influxdb']
- - enable_influxdb | bool
-
-- name: Checking free port for Influxdb Http
- wait_for:
- host: "{{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}"
- port: "{{ influxdb_http_port }}"
- connect_timeout: 1
- state: stopped
- when:
- - inventory_hostname in groups['influxdb']
- - enable_influxdb | bool
-
-- name: Checking free port for Keystone Admin
- wait_for:
- host: "{{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}"
- port: "{{ keystone_admin_port }}"
- connect_timeout: 1
- state: stopped
- when:
- - inventory_hostname in groups['keystone']
- - enable_keystone | bool
-
-- name: Checking free port for Keystone Admin HAProxy
- wait_for:
- host: "{{ kolla_internal_vip_address }}"
- port: "{{ keystone_admin_port }}"
- connect_timeout: 1
- state: stopped
- when:
- - inventory_hostname in groups['haproxy']
- - enable_keystone | bool
-
-- name: Checking free port for Keystone Public
- wait_for:
- host: "{{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}"
- port: "{{ keystone_public_port }}"
- connect_timeout: 1
- state: stopped
- when:
- - inventory_hostname in groups['keystone']
- - enable_keystone | bool
-
-- name: Checking free port for Keystone Public HAProxy
- wait_for:
- host: "{{ kolla_internal_vip_address }}"
- port: "{{ keystone_public_port }}"
- connect_timeout: 1
- state: stopped
- when:
- - inventory_hostname in groups['haproxy']
- - enable_keystone | bool
-
-- name: Checking free port for iscsi
- wait_for:
- host: "{{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}"
- port: "{{ iscsi_port }}"
- connect_timeout: 1
- state: stopped
- when:
- - inventory_hostname in groups['tgtd']
- - enable_iscsid | bool
-
-- name: Checking free port for Magnum API
- wait_for:
- host: "{{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}"
- port: "{{ magnum_api_port }}"
- connect_timeout: 1
- state: stopped
- when:
- - inventory_hostname in groups['magnum-api']
- - enable_magnum | bool
-
-- name: Checking free port for Magnum API HAProxy
- wait_for:
- host: "{{ kolla_internal_vip_address }}"
- port: "{{ magnum_api_port }}"
- connect_timeout: 1
- state: stopped
- when:
- - inventory_hostname in groups['haproxy']
- - enable_magnum | bool
-
-- name: Checking free port for MariaDB
- wait_for:
- host: "{{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}"
- port: "{{ database_port }}"
- connect_timeout: 1
- state: stopped
- when:
- - inventory_hostname in groups['mariadb']
- - enable_mariadb | bool
-
-- name: Checking free port for MariaDB HAProxy
- wait_for:
- host: "{{ kolla_internal_vip_address }}"
- port: "{{ database_port }}"
- connect_timeout: 1
- state: stopped
- when:
- - inventory_hostname in groups['haproxy']
- - enable_mariadb | bool
-
-- name: Checking free port for MariaDB WSREP
- wait_for:
- host: "{{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}"
- port: "{{ mariadb_wsrep_port }}"
- connect_timeout: 1
- state: stopped
- when:
- - inventory_hostname in groups['mariadb']
- - enable_mariadb | bool
-
-- name: Checking free port for MariaDB IST
- wait_for:
- host: "{{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}"
- port: "{{ mariadb_ist_port }}"
- connect_timeout: 1
- state: stopped
- when:
- - inventory_hostname in groups['mariadb']
- - enable_mariadb | bool
-
-- name: Checking free port for MariaDB SST
- wait_for:
- host: "{{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}"
- port: "{{ mariadb_sst_port }}"
- connect_timeout: 1
- state: stopped
- when:
- - inventory_hostname in groups['mariadb']
- - enable_mariadb | bool
-
-- name: Checking free port for Manila API
- wait_for:
- host: "{{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}"
- port: "{{ manila_api_port }}"
- connect_timeout: 1
- state: stopped
- when:
- - inventory_hostname in groups['manila-api']
- - enable_manila | bool
-
-- name: Checking free port for Manila API HAProxy
- wait_for:
- host: "{{ kolla_internal_vip_address }}"
- port: "{{ manila_api_port }}"
- connect_timeout: 1
- state: stopped
- when:
- - inventory_hostname in groups['haproxy']
- - enable_manila | bool
-
-- name: Checking free port for Memcached
- wait_for:
- host: "{{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}"
- port: "{{ memcached_port }}"
- connect_timeout: 1
- state: stopped
- when:
- - inventory_hostname in groups['memcached']
- - enable_memcached | bool
-
-- name: Checking free port for Murano API
- wait_for:
- host: "{{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}"
- port: "{{ murano_api_port }}"
- connect_timeout: 1
- state: stopped
- when:
- - inventory_hostname in groups['murano-api']
- - enable_murano | bool
-
-- name: Checking free port for Murano API HAProxy
- wait_for:
- host: "{{ kolla_internal_vip_address }}"
- port: "{{ murano_api_port }}"
- connect_timeout: 1
- state: stopped
- when:
- - inventory_hostname in groups['haproxy']
- - enable_murano | bool
-
-- name: Checking free port for Neutron Server
- wait_for:
- host: "{{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}"
- port: "{{ neutron_server_port }}"
- connect_timeout: 1
- state: stopped
- when:
- - inventory_hostname in groups['neutron-server']
- - enable_neutron | bool
-
-- name: Checking free port for Neutron Server HAProxy
- wait_for:
- host: "{{ kolla_internal_vip_address }}"
- port: "{{ neutron_server_port }}"
- connect_timeout: 1
- state: stopped
- when:
- - inventory_hostname in groups['haproxy']
- - enable_neutron | bool
-
-- name: Checking free port for Nova API
- wait_for:
- host: "{{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}"
- port: "{{ nova_api_port }}"
- connect_timeout: 1
- state: stopped
- when:
- - inventory_hostname in groups['nova-api']
- - enable_nova | bool
-
-- name: Checking free port for Nova API HAProxy
- wait_for:
- host: "{{ kolla_internal_vip_address }}"
- port: "{{ nova_api_port }}"
- connect_timeout: 1
- state: stopped
- when:
- - inventory_hostname in groups['haproxy']
- - enable_nova | bool
-
-- name: Checking free port for Nova Metadata
- wait_for:
- host: "{{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}"
- port: "{{ nova_metadata_port }}"
- connect_timeout: 1
- state: stopped
- when:
- - inventory_hostname in groups['nova-api']
- - enable_nova | bool
-
-- name: Checking free port for Nova Metadata HAProxy
- wait_for:
- host: "{{ kolla_internal_vip_address }}"
- port: "{{ nova_metadata_port }}"
- connect_timeout: 1
- state: stopped
- when:
- - inventory_hostname in groups['haproxy']
- - enable_nova | bool
-
-- name: Checking free port for Nova NoVNC Proxy
- wait_for:
- host: "{{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}"
- port: "{{ nova_novncproxy_port }}"
- connect_timeout: 1
- state: stopped
- when:
- - inventory_hostname in groups['nova-novncproxy']
- - enable_nova | bool
- - nova_console == 'novnc'
-
-- name: Checking free port for Nova NoVNC HAProxy
- wait_for:
- host: "{{ kolla_internal_vip_address }}"
- port: "{{ nova_novncproxy_port }}"
- connect_timeout: 1
- state: stopped
- when:
- - inventory_hostname in groups['haproxy']
- - enable_nova | bool
- - nova_console == 'novnc'
-
-- name: Checking free port for Nova Spice HTML5 Proxy
- wait_for:
- host: "{{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}"
- port: "{{ nova_spicehtml5proxy_port }}"
- connect_timeout: 1
- state: stopped
- when:
- - inventory_hostname in groups['nova-spicehtml5proxy']
- - enable_nova | bool
- - nova_console == 'spice'
-
-- name: Checking free port for Nova Spice HTML5 HAProxy
- wait_for:
- host: "{{ kolla_internal_vip_address }}"
- port: "{{ nova_spicehtml5proxy_port }}"
- connect_timeout: 1
- state: stopped
- when:
- - inventory_hostname in groups['haproxy']
- - enable_nova | bool
- - nova_console == 'spice'
-
-- name: Checking free port for RabbitMQ
- wait_for:
- host: "{{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}"
- port: "{{ rabbitmq_port }}"
- connect_timeout: 1
- state: stopped
- when:
- - inventory_hostname in groups['rabbitmq']
- - enable_rabbitmq | bool
-
-- name: Checking free port for RabbitMQ Management
- wait_for:
- host: "{{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}"
- port: "{{ rabbitmq_management_port }}"
- connect_timeout: 1
- state: stopped
- when:
- - inventory_hostname in groups['rabbitmq']
- - enable_rabbitmq | bool
-
-- name: Checking free port for RabbitMQ Management HAProxy
- wait_for:
- host: "{{ kolla_internal_vip_address }}"
- port: "{{ rabbitmq_management_port }}"
- connect_timeout: 1
- state: stopped
- when:
- - inventory_hostname in groups['haproxy']
- - enable_rabbitmq | bool
-
-- name: Checking free port for RabbitMQ Cluster
- wait_for:
- host: "{{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}"
- port: "{{ rabbitmq_cluster_port }}"
- connect_timeout: 1
- state: stopped
- when:
- - inventory_hostname in groups['rabbitmq']
- - enable_rabbitmq | bool
-
-- name: Checking free port for RabbitMQ EPMD
- wait_for:
- host: "{{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}"
- port: "{{ rabbitmq_epmd_port }}"
- connect_timeout: 1
- state: stopped
- when:
- - inventory_hostname in groups['rabbitmq']
- - enable_rabbitmq | bool
-
-- name: Check if all rabbit hostnames are resolvable
- command: "getent ahostsv4 {{ hostvars[item]['ansible_hostname'] }}"
- changed_when: false
- register: rabbitmq_hostnames
- with_items: "{{ groups['rabbitmq'] }}"
- when: enable_rabbitmq | bool
-
-- fail: msg="Hostname has to resolve to IP address of api_interface"
- with_items: "{{ rabbitmq_hostnames.results }}"
- when:
- - "'{{ hostvars[item['item']]['ansible_' + hostvars[item['item']]['api_interface']]['ipv4']['address'] }}' not in '{{ item.stdout }}'"
- - enable_rabbitmq | bool
-
-- name: Checking free port for Mongodb
- wait_for:
- host: "{{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}"
- port: "{{ mongodb_port }}"
- connect_timeout: 1
- state: stopped
- when:
- - inventory_hostname in groups['mongodb']
- - enable_mongodb | bool
-
-- name: Checking free port for Mongodb Web
- wait_for:
- host: "{{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}"
- port: "{{ mongodb_web_port }}"
- connect_timeout: 1
- state: stopped
- when:
- - inventory_hostname in groups['mongodb']
- - enable_mongodb | bool
-
-- name: Checking free port for Mongodb HAProxy
- wait_for:
- host: "{{ kolla_internal_vip_address }}"
- port: "{{ mongodb_port }}"
- connect_timeout: 1
- state: stopped
- when:
- - inventory_hostname in groups['haproxy']
- - enable_mongodb | bool
-
-- name: Checking free port for Kibana Server
- wait_for:
- host: "{{ hostvars[inventory_hostname]['ansible_' + storage_interface]['ipv4']['address'] }}"
- port: "{{ kibana_server_port }}"
- connect_timeout: 1
- state: stopped
- when:
- - inventory_hostname in groups['kibana']
- - enable_kibana | bool
-
-- name: Checking free port for Rsync
- wait_for:
- host: "{{ hostvars[inventory_hostname]['ansible_' + storage_interface]['ipv4']['address'] }}"
- port: "873"
- connect_timeout: 1
- state: stopped
- when:
- - inventory_hostname in groups['swift-object-server']
- - enable_swift | bool
-
-- name: Checking free port for Swift Object Server
- wait_for:
- host: "{{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}"
- port: "{{ swift_object_server_port }}"
- connect_timeout: 1
- state: stopped
- when:
- - inventory_hostname in groups['swift-object-server']
- - enable_swift | bool
-
-- name: Checking free port for Swift Account Server
- wait_for:
- host: "{{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}"
- port: "{{ swift_account_server_port }}"
- connect_timeout: 1
- state: stopped
- when:
- - inventory_hostname in groups['swift-account-server']
- - enable_swift | bool
-
-- name: Checking free port for Swift Container Server
- wait_for:
- host: "{{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}"
- port: "{{ swift_container_server_port }}"
- connect_timeout: 1
- state: stopped
- when:
- - inventory_hostname in groups['swift-container-server']
- - enable_swift | bool
-
-- name: Checking free port for Swift Proxy Server
- wait_for:
- host: "{{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}"
- port: "{{ swift_proxy_server_port }}"
- connect_timeout: 1
- state: stopped
- when:
- - inventory_hostname in groups['swift-proxy-server']
- - enable_swift | bool
-
-- name: Checking free port for Swift Proxy Server HAProxy
- wait_for:
- host: "{{ kolla_internal_vip_address }}"
- port: "{{ swift_proxy_server_port }}"
- connect_timeout: 1
- state: stopped
- when:
- - inventory_hostname in groups['haproxy']
- - enable_swift | bool
-
-- name: Checking free port for RadosGW
- wait_for:
- host: "{{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}"
- port: "{{ rgw_port }}"
- connect_timeout: 1
- state: stopped
- when:
- - inventory_hostname in groups['ceph-rgw']
- - enable_ceph_rgw | bool
-
-- name: Checking free port for RadosGW HAProxy
- wait_for:
- host: "{{ kolla_internal_vip_address }}"
- port: "{{ rgw_port }}"
- connect_timeout: 1
- state: stopped
- when:
- - inventory_hostname in groups['haproxy']
- - enable_ceph_rgw | bool
-
-- name: Checking free port for Grafana server
- wait_for:
- host: "{{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}"
- port: "{{ grafana_server_port }}"
- connect_timeout: 1
- state: stopped
- when:
- - inventory_hostname in groups['grafana']
- - enable_grafana | bool
-
-- name: Checking free port for Grafana server HAProxy
- wait_for:
- host: "{{ kolla_internal_vip_address }}"
- port: "{{ grafana_server_port }}"
- connect_timeout: 1
- state: stopped
- when:
- - inventory_hostname in groups['haproxy']
- - enable_grafana | bool
-
-- name: Checking free port for Senlin API
- wait_for:
- host: "{{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}"
- port: "{{ senlin_api_port }}"
- connect_timeout: 1
- state: stopped
- when:
- - inventory_hostname in groups['senlin-api']
- - enable_senlin | bool
-
-- name: Checking free port for Senlin API HAProxy
- wait_for:
- host: "{{ kolla_internal_vip_address }}"
- port: "{{ senlin_api_port }}"
- connect_timeout: 1
- state: stopped
- when:
- - inventory_hostname in groups['haproxy']
- - enable_senlin | bool
-
-- name: Checking free port for Mistral API
- wait_for:
- host: "{{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}"
- port: "{{ mistral_api_port }}"
- connect_timeout: 1
- state: stopped
- when:
- - inventory_hostname in groups['mistral-api']
- - enable_mistral | bool
-
-- name: Checking free port for Mistral API HAProxy
- wait_for:
- host: "{{ kolla_internal_vip_address }}"
- port: "{{ mistral_api_port }}"
- connect_timeout: 1
- state: stopped
- when:
- - inventory_hostname in groups['haproxy']
- - enable_mistral | bool
-
-- name: Checking free port for Watcher API
- wait_for:
- host: "{{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}"
- port: "{{ watcher_api_port }}"
- connect_timeout: 1
- state: stopped
- when:
- - inventory_hostname in groups['watcher-api']
- - enable_watcher | bool
-
-- name: Checking free port for Watcher API HAProxy
- wait_for:
- host: "{{ kolla_internal_vip_address }}"
- port: "{{ watcher_api_port }}"
- connect_timeout: 1
- state: stopped
- when:
- - inventory_hostname in groups['haproxy']
- - enable_watcher | bool
-
-- name: Checking free port for Searchlight API
- wait_for:
- host: "{{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}"
- port: "{{ searchlight_api_port }}"
- connect_timeout: 1
- state: stopped
- when:
- - inventory_hostname in groups['searchlight-api']
- - enable_searchlight | bool
-
-- name: Checking free port for Searchlight API HAProxy
- wait_for:
- host: "{{ kolla_internal_vip_address }}"
- port: "{{ searchlight_api_port }}"
- connect_timeout: 1
- state: stopped
- when:
- - inventory_hostname in groups['haproxy']
- - enable_searchlight | bool
diff --git a/ansible/roles/prechecks/tasks/precheck.yml b/ansible/roles/prechecks/tasks/precheck.yml
deleted file mode 100644
index ed97d539c0..0000000000
--- a/ansible/roles/prechecks/tasks/precheck.yml
+++ /dev/null
@@ -1 +0,0 @@
----
diff --git a/ansible/roles/prechecks/tasks/service_checks.yml b/ansible/roles/prechecks/tasks/service_checks.yml
deleted file mode 100644
index add1cc3188..0000000000
--- a/ansible/roles/prechecks/tasks/service_checks.yml
+++ /dev/null
@@ -1,78 +0,0 @@
----
-- name: Checking that libvirt is not running
- stat: path=/var/run/libvirt/libvirt-sock
- register: result
- failed_when: result.stat.exists
- when: inventory_hostname in groups['compute']
-
-- name: Checking Docker version
- command: docker version
- register: result
- changed_when: false
- failed_when: result | failed
- or (result.stdout | from_yaml).Server.Version | regex_replace('(\\d+\\.\\d+\\.\\d+).*', '\\1') | version_compare(docker_version_min, '<')
-
-# When MountFlags is set to shared, a signal bit configured on 20th bit of a number
-# We need to check the 20th bit. 2^20 = 1048576. So we are validating against it.
-- name: Checking if 'MountFlags' for docker service is set to 'shared'
- command: systemctl show docker
- register: result
- changed_when: false
- failed_when: result.stdout.find('MountFlags=1048576') == -1
- when:
- - (inventory_hostname in groups['neutron-dhcp-agent']
- or inventory_hostname in groups['neutron-l3-agent']
- or inventory_hostname in groups['neutron-metadata-agent'])
- - ansible_os_family == 'RedHat'
- or (ansible_distribution == 'Ubuntu' and ansible_distribution_version > '14.04')
-
-- name: Checking if '/run' mount flag is set to 'shared'
- command: awk '$5 == "/run" {print $7}' /proc/self/mountinfo
- register: result
- changed_when: false
- failed_when: result.stdout.find('shared') == -1
- when:
- - (inventory_hostname in groups['neutron-dhcp-agent']
- or inventory_hostname in groups['neutron-l3-agent']
- or inventory_hostname in groups['neutron-metadata-agent'])
- - ansible_distribution == 'Ubuntu' and ansible_distribution_version == '14.04'
-
-- name: Checking empty passwords in passwords.yml. Run kolla-genpwd if this task fails
- local_action: command grep '^[^#].*:\s*$' "{{ CONFIG_DIR }}/passwords.yml"
- register: result
- changed_when: false
- failed_when: result.stdout | regex_replace('(.*ssh_key.*)', '') | search(":")
-
-- name: Checking fernet_token_expiry in globals.yml. Update fernet_token_expiry to allowed value if this task fails
- run_once: true
- local_action: command awk '/^fernet_token_expiry/ { print $2 }' "{{ CONFIG_DIR }}/globals.yml"
- register: result
- changed_when: false
- failed_when: result.stdout | regex_replace('(60|120|180|240|300|360|600|720|900|1200|1800|3600|7200|10800|14400|21600|28800|43200|86400|604800)', '') | search(".+")
-
-- name: Checking number of network agents
- local_action: fail msg="Number of network agents are less than two when enabling agent ha"
- changed_when: false
- when:
- - enable_neutron_agent_ha | bool
- - groups['neutron-dhcp-agent'] | length < 2
- or groups['neutron-l3-agent'] | length < 2
-
-- name: Checking mongodb backend for ceilometer
- run_once: True
- local_action: fail msg="ceilometer_database_type set to 'mongodb' but mongodb is not enabled"
- changed_when: false
- when:
- - enable_ceilometer | bool
- - not enable_mongodb | bool
- - ceilometer_database_type == "mongodb"
-
-- name: Checking LVM volume group exists for Cinder
- command: "vgs {{ cinder_volume_group }}"
- register: result
- changed_when: false
- failed_when: result | failed
- when:
- - inventory_hostname in groups['cinder-volume']
- - enable_cinder | bool
- - enable_cinder_backend_lvm | bool
diff --git a/ansible/roles/prechecks/vars/main.yml b/ansible/roles/prechecks/vars/main.yml
deleted file mode 100644
index fd2237209d..0000000000
--- a/ansible/roles/prechecks/vars/main.yml
+++ /dev/null
@@ -1,4 +0,0 @@
----
-docker_version_min: '1.10.0'
-docker_py_version_min: '1.6.0'
-ansible_version_min: '2.0.0'
diff --git a/ansible/roles/rabbitmq/defaults/main.yml b/ansible/roles/rabbitmq/defaults/main.yml
deleted file mode 100644
index 4fb19feb48..0000000000
--- a/ansible/roles/rabbitmq/defaults/main.yml
+++ /dev/null
@@ -1,17 +0,0 @@
----
-project_name: "rabbitmq"
-
-####################
-# Docker
-####################
-rabbitmq_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-rabbitmq"
-rabbitmq_tag: "{{ openstack_release }}"
-rabbitmq_image_full: "{{ rabbitmq_image }}:{{ rabbitmq_tag }}"
-
-
-####################
-# Message-Broker
-####################
-rabbitmq_user: "openstack"
-rabbitmq_cluster_name: "openstack"
-rabbitmq_hostname: "{{ ansible_hostname }}"
diff --git a/ansible/roles/rabbitmq/meta/main.yml b/ansible/roles/rabbitmq/meta/main.yml
deleted file mode 100644
index 6b4fff8fef..0000000000
--- a/ansible/roles/rabbitmq/meta/main.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-dependencies:
- - { role: common }
diff --git a/ansible/roles/rabbitmq/tasks/bootstrap.yml b/ansible/roles/rabbitmq/tasks/bootstrap.yml
deleted file mode 100644
index e5a5eb97dd..0000000000
--- a/ansible/roles/rabbitmq/tasks/bootstrap.yml
+++ /dev/null
@@ -1,27 +0,0 @@
----
-- name: Creating rabbitmq volume
- kolla_docker:
- action: "create_volume"
- common_options: "{{ docker_common_options }}"
- name: "rabbitmq"
- register: rabbitmq_volume
-
-- name: Running RabbitMQ bootstrap container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- detach: False
- environment:
- KOLLA_BOOTSTRAP:
- KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}"
- RABBITMQ_CLUSTER_COOKIE: "{{ rabbitmq_cluster_cookie }}"
- image: "{{ rabbitmq_image_full }}"
- labels:
- BOOTSTRAP:
- name: "rabbitmq_bootstrap"
- restart_policy: "never"
- volumes:
- - "{{ node_config_directory }}/rabbitmq/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "rabbitmq:/var/lib/rabbitmq/"
- when: rabbitmq_volume | changed
diff --git a/ansible/roles/rabbitmq/tasks/config.yml b/ansible/roles/rabbitmq/tasks/config.yml
deleted file mode 100644
index e104fb8bc9..0000000000
--- a/ansible/roles/rabbitmq/tasks/config.yml
+++ /dev/null
@@ -1,25 +0,0 @@
----
-- name: Ensuring config directories exist
- file:
- path: "{{ node_config_directory }}/{{ item }}"
- state: "directory"
- recurse: yes
- with_items:
- - "rabbitmq"
-
-- name: Copying over config.json files for services
- template:
- src: "{{ item }}.json.j2"
- dest: "{{ node_config_directory }}/{{ item }}/config.json"
- with_items:
- - "rabbitmq"
-
-- name: Copying over rabbitmq configs
- template:
- src: "{{ item }}.j2"
- dest: "{{ node_config_directory }}/rabbitmq/{{ item }}"
- with_items:
- - "rabbitmq-env.conf"
- - "rabbitmq.config"
- - "rabbitmq-clusterer.config"
- - "definitions.json"
diff --git a/ansible/roles/rabbitmq/tasks/deploy.yml b/ansible/roles/rabbitmq/tasks/deploy.yml
deleted file mode 100644
index 98daa4021c..0000000000
--- a/ansible/roles/rabbitmq/tasks/deploy.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-- include: config.yml
-
-- include: bootstrap.yml
-
-- include: start.yml
diff --git a/ansible/roles/rabbitmq/tasks/main.yml b/ansible/roles/rabbitmq/tasks/main.yml
deleted file mode 100644
index f7e4afef42..0000000000
--- a/ansible/roles/rabbitmq/tasks/main.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-- include: "{{ action }}.yml"
- when: inventory_hostname in groups['rabbitmq']
diff --git a/ansible/roles/rabbitmq/tasks/precheck.yml b/ansible/roles/rabbitmq/tasks/precheck.yml
deleted file mode 100644
index ed97d539c0..0000000000
--- a/ansible/roles/rabbitmq/tasks/precheck.yml
+++ /dev/null
@@ -1 +0,0 @@
----
diff --git a/ansible/roles/rabbitmq/tasks/pull.yml b/ansible/roles/rabbitmq/tasks/pull.yml
deleted file mode 100644
index cb289fabf5..0000000000
--- a/ansible/roles/rabbitmq/tasks/pull.yml
+++ /dev/null
@@ -1,7 +0,0 @@
----
-- name: Pulling rabbitmq image
- kolla_docker:
- action: "pull_image"
- common_options: "{{ docker_common_options }}"
- image: "{{ rabbitmq_image_full }}"
- when: inventory_hostname in groups['rabbitmq']
diff --git a/ansible/roles/rabbitmq/tasks/reconfigure.yml b/ansible/roles/rabbitmq/tasks/reconfigure.yml
deleted file mode 100644
index 7524002b77..0000000000
--- a/ansible/roles/rabbitmq/tasks/reconfigure.yml
+++ /dev/null
@@ -1,64 +0,0 @@
----
-- name: Ensuring the containers up
- kolla_docker:
- name: "{{ item.name }}"
- action: "get_container_state"
- register: container_state
- failed_when: container_state.Running == false
- when: inventory_hostname in groups[item.group]
- with_items:
- - { name: rabbitmq, group: rabbitmq }
-
-- include: config.yml
-
-- name: Check the configs
- command: docker exec {{ item.name }} /usr/local/bin/kolla_set_configs --check
- changed_when: false
- failed_when: false
- register: check_results
- when: inventory_hostname in groups[item.group]
- with_items:
- - { name: rabbitmq, group: rabbitmq }
-
-# NOTE(jeffrey4l): when config_strategy == 'COPY_ALWAYS'
-# and container env['KOLLA_CONFIG_STRATEGY'] == 'COPY_ONCE',
-# just remove the container and start again
-- name: Containers config strategy
- kolla_docker:
- name: "{{ item.name }}"
- action: "get_container_env"
- register: container_envs
- when: inventory_hostname in groups[item.group]
- with_items:
- - { name: rabbitmq, group: rabbitmq }
-
-- name: Remove the containers
- kolla_docker:
- name: "{{ item[0]['name'] }}"
- action: "remove_container"
- register: remove_containers
- when:
- - config_strategy == "COPY_ONCE" or item[1]['KOLLA_CONFIG_STRATEGY'] == 'COPY_ONCE'
- - item[2]['rc'] == 1
- - inventory_hostname in groups[item[0]['group']]
- with_together:
- - [{ name: rabbitmq, group: rabbitmq }]
- - "{{ container_envs.results }}"
- - "{{ check_results.results }}"
-
-- include: start.yml
- when: remove_containers.changed
-
-- name: Restart containers
- kolla_docker:
- name: "{{ item[0]['name'] }}"
- action: "restart_container"
- when:
- - config_strategy == 'COPY_ALWAYS'
- - item[1]['KOLLA_CONFIG_STRATEGY'] != 'COPY_ONCE'
- - item[2]['rc'] == 1
- - inventory_hostname in groups[item[0]['group']]
- with_together:
- - [{ name: rabbitmq, group: rabbitmq }]
- - "{{ container_envs.results }}"
- - "{{ check_results.results }}"
diff --git a/ansible/roles/rabbitmq/tasks/start.yml b/ansible/roles/rabbitmq/tasks/start.yml
deleted file mode 100644
index 9501545ff9..0000000000
--- a/ansible/roles/rabbitmq/tasks/start.yml
+++ /dev/null
@@ -1,12 +0,0 @@
----
-- name: Starting rabbitmq container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- image: "{{ rabbitmq_image_full }}"
- name: "rabbitmq"
- volumes:
- - "{{ node_config_directory }}/rabbitmq/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "rabbitmq:/var/lib/rabbitmq/"
- - "kolla_logs:/var/log/kolla/"
diff --git a/ansible/roles/rabbitmq/tasks/upgrade.yml b/ansible/roles/rabbitmq/tasks/upgrade.yml
deleted file mode 100644
index e76d3b8727..0000000000
--- a/ansible/roles/rabbitmq/tasks/upgrade.yml
+++ /dev/null
@@ -1,37 +0,0 @@
----
-- name: Checking if rabbitmq container needs upgrading
- kolla_docker:
- action: "compare_image"
- common_options: "{{ docker_common_options }}"
- name: "rabbitmq"
- image: "{{ rabbitmq_image_full }}"
- when: inventory_hostname in groups['rabbitmq']
- register: rabbitmq_differs
-
-- include: config.yml
-
-- name: Find gospel node
- command: docker exec -t rabbitmq /usr/local/bin/rabbitmq_get_gospel_node
- changed_when: "{{ (gospel_node.stdout | from_json).changed }}"
- failed_when: "{{ (gospel_node.stdout | from_json).failed }}"
- register: gospel_node
- run_once: True
-
-- name: Stopping non-gospel nodes
- kolla_docker:
- action: "stop_container"
- common_options: "{{ docker_common_options }}"
- name: "rabbitmq"
- when:
- - rabbitmq_hostname != (gospel_node.stdout | from_json).hostname
- - rabbitmq_differs['result']
-
-- include: start.yml
- when:
- - rabbitmq_hostname == (gospel_node.stdout | from_json).hostname
- - rabbitmq_differs['result']
-
-- include: start.yml
- when:
- - rabbitmq_hostname != (gospel_node.stdout | from_json).hostname
- - rabbitmq_differs['result']
diff --git a/ansible/roles/rabbitmq/templates/definitions.json.j2 b/ansible/roles/rabbitmq/templates/definitions.json.j2
deleted file mode 100644
index fdb7267b51..0000000000
--- a/ansible/roles/rabbitmq/templates/definitions.json.j2
+++ /dev/null
@@ -1,14 +0,0 @@
-{
- "vhosts": [
- {"name": "/"}
- ],
- "users": [
- {"name": "{{ rabbitmq_user }}", "password": "{{ rabbitmq_password }}", "tags": "administrator"}
- ],
- "permissions": [
- {"user": "{{ rabbitmq_user }}", "vhost": "/", "configure": ".*", "write": ".*", "read": ".*"}
- ],
- "policies":[
- {"vhost": "/", "name": "ha-all", "pattern": ".*", "apply-to": "all", "definition": {"ha-mode":"all"}, "priority":0}
- ]
-}
diff --git a/ansible/roles/rabbitmq/templates/rabbitmq-clusterer.config.j2 b/ansible/roles/rabbitmq/templates/rabbitmq-clusterer.config.j2
deleted file mode 100644
index 34bf9b0dd7..0000000000
--- a/ansible/roles/rabbitmq/templates/rabbitmq-clusterer.config.j2
+++ /dev/null
@@ -1,11 +0,0 @@
-[
- {version, 1},
- {nodes, [
- {% for host in groups['rabbitmq'] %}
- {'rabbit@{{ hostvars[host]['ansible_hostname'] }}', disc}
- {%- if not loop.last -%},{%- endif %}
- {% endfor %}
- ]},
- {gospel,
- {node, 'rabbit@{{ hostvars[groups['rabbitmq'][0]]['ansible_hostname'] }}'}}
-].
diff --git a/ansible/roles/rabbitmq/templates/rabbitmq-env.conf.j2 b/ansible/roles/rabbitmq/templates/rabbitmq-env.conf.j2
deleted file mode 100644
index 564967cb9c..0000000000
--- a/ansible/roles/rabbitmq/templates/rabbitmq-env.conf.j2
+++ /dev/null
@@ -1,19 +0,0 @@
-RABBITMQ_NODENAME=rabbit
-{% if orchestration_engine == 'ANSIBLE' %}
-RABBITMQ_BOOT_MODULE=rabbit_clusterer
-RABBITMQ_SERVER_ADDITIONAL_ERL_ARGS="-pa /usr/lib/rabbitmq/lib/{{ rabbitmq_version }}"
-{%- endif %}
-
-RABBITMQ_LOG_BASE=/var/log/kolla/rabbitmq
-
-# TODO(sdake, vhosakot)
-# erlang by default binds to wildcard (all interfaces) and can potentially
-# interfere with the neutron external or tenant networks. We should in theory
-# bind epmd to the host's IPv4 address to address the issue however this also
-# has issues and can crash erlang when it is compiled with IPv6 support.
-# See bugs:
-# https://bugs.launchpad.net/ubuntu/+source/erlang/+bug/1374109
-# https://bugs.launchpad.net/kolla/+bug/1562701
-# https://bugzilla.redhat.com/show_bug.cgi?id=1324922
-#export ERL_EPMD_ADDRESS={{ api_interface_address }}
-export ERL_EPMD_PORT={{ rabbitmq_epmd_port }}
diff --git a/ansible/roles/rabbitmq/templates/rabbitmq.config.j2 b/ansible/roles/rabbitmq/templates/rabbitmq.config.j2
deleted file mode 100644
index f0d85b0f8e..0000000000
--- a/ansible/roles/rabbitmq/templates/rabbitmq.config.j2
+++ /dev/null
@@ -1,24 +0,0 @@
-[
- {kernel, [
- {inet_dist_use_interface, {% raw %}{{% endraw %}{{ api_interface_address | regex_replace('\.', ',') }}}},
- {inet_dist_listen_min, {{ rabbitmq_cluster_port }}},
- {inet_dist_listen_max, {{ rabbitmq_cluster_port }}}
- ]},
- {rabbit, [
- {tcp_listeners, [
- {"{{ api_interface_address }}", {{ rabbitmq_port }}}
- ]}{% if orchestration_engine == 'ANSIBLE' %},
- {cluster_partition_handling, autoheal}
- {%- endif %}
- ]},
- {rabbitmq_management, [
- {listener, [
- {ip, "{{ api_interface_address }}"},
- {port, {{ rabbitmq_management_port }}}
- ]},
- {load_definitions, "/etc/rabbitmq/definitions.json"}
- ]}{% if orchestration_engine == 'ANSIBLE' %},
- {rabbitmq_clusterer, [{config, "/etc/rabbitmq/rabbitmq-clusterer.config"}]}
-{%- endif %}
-].
-% EOF
diff --git a/ansible/roles/rabbitmq/templates/rabbitmq.json.j2 b/ansible/roles/rabbitmq/templates/rabbitmq.json.j2
deleted file mode 100644
index 07987def37..0000000000
--- a/ansible/roles/rabbitmq/templates/rabbitmq.json.j2
+++ /dev/null
@@ -1,41 +0,0 @@
-{
- "command": "/usr/sbin/rabbitmq-server",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/rabbitmq-env.conf",
- "dest": "/etc/rabbitmq/rabbitmq-env.conf",
- "owner": "rabbitmq",
- "perm": "0600"
- },
- {
- "source": "{{ container_config_directory }}/rabbitmq.config",
- "dest": "/etc/rabbitmq/rabbitmq.config",
- "owner": "rabbitmq",
- "perm": "0600"
- },
- {
- "source": "{{ container_config_directory }}/rabbitmq-clusterer.config",
- "dest": "/etc/rabbitmq/rabbitmq-clusterer.config",
- "owner": "rabbitmq",
- "perm": "0600"
- },
- {
- "source": "{{ container_config_directory }}/definitions.json",
- "dest": "/etc/rabbitmq/definitions.json",
- "owner": "rabbitmq",
- "perm": "0600"
- }
- ],
- "permissions": [
- {
- "path": "/var/lib/rabbitmq",
- "owner": "rabbitmq:rabbitmq",
- "recurse": true
- },
- {
- "path": "/var/log/kolla/rabbitmq",
- "owner": "rabbitmq:rabbitmq",
- "recurse": true
- }
- ]
-}
diff --git a/ansible/roles/rally/defaults/main.yml b/ansible/roles/rally/defaults/main.yml
deleted file mode 100644
index ced0999cb0..0000000000
--- a/ansible/roles/rally/defaults/main.yml
+++ /dev/null
@@ -1,18 +0,0 @@
----
-project_name: "rally"
-
-
-########
-# Docker
-########
-rally_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-rally"
-rally_tag: "{{ openstack_release }}"
-rally_image_full: "{{ rally_image }}:{{ rally_tag }}"
-
-
-####################
-# Database
-####################
-rally_database_name: "rally"
-rally_database_user: "rally"
-rally_database_address: "{{ kolla_internal_fqdn }}:{{ database_port }}"
diff --git a/ansible/roles/rally/meta/main.yml b/ansible/roles/rally/meta/main.yml
deleted file mode 100644
index 6b4fff8fef..0000000000
--- a/ansible/roles/rally/meta/main.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-dependencies:
- - { role: common }
diff --git a/ansible/roles/rally/tasks/bootstrap.yml b/ansible/roles/rally/tasks/bootstrap.yml
deleted file mode 100644
index 9875c3e57b..0000000000
--- a/ansible/roles/rally/tasks/bootstrap.yml
+++ /dev/null
@@ -1,41 +0,0 @@
----
-- name: Creating rally database
- command: docker exec -t kolla_toolbox /usr/bin/ansible localhost
- -m mysql_db
- -a "login_host='{{ database_address }}'
- login_port='{{ database_port }}'
- login_user='{{ database_user }}'
- login_password='{{ database_password }}'
- name='{{ rally_database_name }}'"
- register: database
- changed_when: "{{ database.stdout.find('localhost | SUCCESS => ') != -1 and
- (database.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
- failed_when: database.stdout.split()[2] != 'SUCCESS'
- run_once: True
- delegate_to: "{{ groups['rally'][0] }}"
-
-- name: Reading json from variable
- set_fact:
- database_created: "{{ (database.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
-
-- name: Creating rally database user and setting permissions
- command: docker exec -t kolla_toolbox /usr/bin/ansible localhost
- -m mysql_user
- -a "login_host='{{ database_address }}'
- login_port='{{ database_port }}'
- login_user='{{ database_user }}'
- login_password='{{ database_password }}'
- name='{{ rally_database_name }}'
- password='{{ rally_database_password }}'
- host='%'
- priv='{{ rally_database_name }}.*:ALL'
- append_privs='yes'"
- register: database_user_create
- changed_when: "{{ database_user_create.stdout.find('localhost | SUCCESS => ') != -1 and
- (database_user_create.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
- failed_when: database_user_create.stdout.split()[2] != 'SUCCESS'
- run_once: True
- delegate_to: "{{ groups['rally'][0] }}"
-
-- include: bootstrap_service.yml
- when: database_created
diff --git a/ansible/roles/rally/tasks/bootstrap_service.yml b/ansible/roles/rally/tasks/bootstrap_service.yml
deleted file mode 100644
index 47a7c8f7f8..0000000000
--- a/ansible/roles/rally/tasks/bootstrap_service.yml
+++ /dev/null
@@ -1,20 +0,0 @@
----
-- name: Running rally bootstrap container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- detach: False
- environment:
- KOLLA_BOOTSTRAP:
- KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}"
- image: "{{ rally_image_full }}"
- labels:
- BOOTSTRAP:
- name: "bootstrap_rally"
- restart_policy: "never"
- volumes:
- - "{{ node_config_directory }}/rally/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "kolla_logs:/var/log/kolla/"
- run_once: True
- delegate_to: "{{ groups['rally'][0] }}"
diff --git a/ansible/roles/rally/tasks/config.yml b/ansible/roles/rally/tasks/config.yml
deleted file mode 100644
index 3304915de3..0000000000
--- a/ansible/roles/rally/tasks/config.yml
+++ /dev/null
@@ -1,37 +0,0 @@
----
-- name: Ensuring config directories exist
- file:
- path: "{{ node_config_directory }}/{{ item }}"
- state: "directory"
- recurse: yes
- with_items:
- - "rally"
-
-- name: Copying over config.json files for services
- template:
- src: "{{ item }}.json.j2"
- dest: "{{ node_config_directory }}/{{ item }}/config.json"
- with_items:
- - "rally"
-
-- name: Copying over rally.conf
- merge_configs:
- vars:
- project_name: "rally"
- sources:
- - "{{ role_path }}/templates/rally.conf.j2"
- - "{{ node_custom_config }}/rally.conf"
- dest: "{{ node_config_directory }}/{{ item }}/rally.conf"
- with_items:
- - "rally"
-
-- name: Check if policies shall be overwritten
- local_action: stat path="{{ node_custom_config }}/rally/policy.json"
- register: rally_policy
-
-- name: Copying over existing policy.json
- template:
- src: "{{ node_custom_config }}/rally/policy.json"
- dest: "{{ node_config_directory }}/rally/policy.json"
- when:
- rally_policy.stat.exists
diff --git a/ansible/roles/rally/tasks/deploy.yml b/ansible/roles/rally/tasks/deploy.yml
deleted file mode 100644
index 1dc8c04e30..0000000000
--- a/ansible/roles/rally/tasks/deploy.yml
+++ /dev/null
@@ -1,4 +0,0 @@
----
-- include: config.yml
-- include: bootstrap.yml
-- include: start.yml
diff --git a/ansible/roles/rally/tasks/main.yml b/ansible/roles/rally/tasks/main.yml
deleted file mode 100644
index b017e8b4ad..0000000000
--- a/ansible/roles/rally/tasks/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-- include: "{{ action }}.yml"
diff --git a/ansible/roles/rally/tasks/precheck.yml b/ansible/roles/rally/tasks/precheck.yml
deleted file mode 100644
index ed97d539c0..0000000000
--- a/ansible/roles/rally/tasks/precheck.yml
+++ /dev/null
@@ -1 +0,0 @@
----
diff --git a/ansible/roles/rally/tasks/pull.yml b/ansible/roles/rally/tasks/pull.yml
deleted file mode 100644
index bcada160d8..0000000000
--- a/ansible/roles/rally/tasks/pull.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-- name: Pulling rally image
- kolla_docker:
- action: "pull_image"
- common_options: "{{ docker_common_options }}"
- image: "{{ rally_image_full }}"
diff --git a/ansible/roles/rally/tasks/reconfigure.yml b/ansible/roles/rally/tasks/reconfigure.yml
deleted file mode 100644
index 79c3dde5ac..0000000000
--- a/ansible/roles/rally/tasks/reconfigure.yml
+++ /dev/null
@@ -1,64 +0,0 @@
----
-- name: Ensuring the containers up
- kolla_docker:
- name: "{{ item.name }}"
- action: "get_container_state"
- register: container_state
- failed_when: container_state.Running == false
- when: inventory_hostname in groups[item.group]
- with_items:
- - { name: rally, group: rally }
-
-- include: config.yml
-
-- name: Check the configs
- command: docker exec {{ item.name }} /usr/local/bin/kolla_set_configs --check
- changed_when: false
- failed_when: false
- register: check_results
- when: inventory_hostname in groups[item.group]
- with_items:
- - { name: rally, group: rally }
-
-# NOTE(jeffrey4l): when config_strategy == 'COPY_ALWAYS'
-# and container env['KOLLA_CONFIG_STRATEGY'] == 'COPY_ONCE',
-# just remove the container and start again
-- name: Containers config strategy
- kolla_docker:
- name: "{{ item.name }}"
- action: "get_container_env"
- register: container_envs
- when: inventory_hostname in groups[item.group]
- with_items:
- - { name: rally, group: rally }
-
-- name: Remove the containers
- kolla_docker:
- name: "{{ item[0]['name'] }}"
- action: "remove_container"
- register: remove_containers
- when:
- - config_strategy == "COPY_ONCE" or item[1]['KOLLA_CONFIG_STRATEGY'] == 'COPY_ONCE'
- - item[2]['rc'] == 1
- - inventory_hostname in groups[item[0]['group']]
- with_together:
- - [{ name: rally, group: rally }]
- - "{{ container_envs.results }}"
- - "{{ check_results.results }}"
-
-- include: start.yml
- when: remove_containers.changed
-
-- name: Restart containers
- kolla_docker:
- name: "{{ item[0]['name'] }}"
- action: "restart_container"
- when:
- - config_strategy == 'COPY_ALWAYS'
- - item[1]['KOLLA_CONFIG_STRATEGY'] != 'COPY_ONCE'
- - item[2]['rc'] == 1
- - inventory_hostname in groups[item[0]['group']]
- with_together:
- - [{ name: rally, group: rally }]
- - "{{ container_envs.results }}"
- - "{{ check_results.results }}"
diff --git a/ansible/roles/rally/tasks/start.yml b/ansible/roles/rally/tasks/start.yml
deleted file mode 100644
index f95000fa17..0000000000
--- a/ansible/roles/rally/tasks/start.yml
+++ /dev/null
@@ -1,11 +0,0 @@
----
-- name: Starting rally container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- image: "{{ rally_image_full }}"
- name: "rally"
- volumes:
- - "{{ node_config_directory }}/rally/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "kolla_logs:/var/log/kolla/"
diff --git a/ansible/roles/rally/tasks/upgrade.yml b/ansible/roles/rally/tasks/upgrade.yml
deleted file mode 100644
index 308053080c..0000000000
--- a/ansible/roles/rally/tasks/upgrade.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-- include: config.yml
-
-- include: bootstrap_service.yml
-
-- include: start.yml
diff --git a/ansible/roles/rally/templates/rally.conf.j2 b/ansible/roles/rally/templates/rally.conf.j2
deleted file mode 100644
index 853751871b..0000000000
--- a/ansible/roles/rally/templates/rally.conf.j2
+++ /dev/null
@@ -1,9 +0,0 @@
-[DEFAULT]
-debug = {{ openstack_logging_debug }}
-log_file = rally.log
-use_stderr = False
-log_dir = /var/log/kolla/rally/
-
-[database]
-connection = mysql+pymysql://{{ rally_database_user }}:{{ rally_database_password }}@{{ rally_database_address }}/{{ rally_database_name }}
-max_retries = -1
diff --git a/ansible/roles/rally/templates/rally.json.j2 b/ansible/roles/rally/templates/rally.json.j2
deleted file mode 100644
index 3db0d88109..0000000000
--- a/ansible/roles/rally/templates/rally.json.j2
+++ /dev/null
@@ -1,18 +0,0 @@
-{
- "command": "sleep infinity",
- "config_files":[
- {
- "source": "{{ container_config_directory }}/rally.conf",
- "dest": "/etc/rally/rally.conf",
- "owner": "rally",
- "perm": "0600"
- }
- ],
- "permissions": [
- {
- "path": "/var/log/kolla/rally",
- "owner": "rally:rally",
- "recurse": true
- }
- ]
-}
diff --git a/ansible/roles/sahara/defaults/main.yml b/ansible/roles/sahara/defaults/main.yml
deleted file mode 100644
index d545f68346..0000000000
--- a/ansible/roles/sahara/defaults/main.yml
+++ /dev/null
@@ -1,36 +0,0 @@
----
-project_name: "sahara"
-
-
-####################
-# Database
-####################
-sahara_database_name: "sahara"
-sahara_database_user: "sahara"
-sahara_database_address: "{{ kolla_internal_fqdn }}:{{ database_port }}"
-
-
-####################
-# Docker
-####################
-sahara_engine_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-sahara-engine"
-sahara_engine_tag: "{{ openstack_release }}"
-sahara_engine_image_full: "{{ sahara_engine_image }}:{{ sahara_engine_tag }}"
-
-sahara_api_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-sahara-api"
-sahara_api_tag: "{{ openstack_release }}"
-sahara_api_image_full: "{{ sahara_api_image }}:{{ sahara_api_tag }}"
-
-
-####################
-# OpenStack
-####################
-sahara_admin_endpoint: "{{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ sahara_api_port }}/v1.1/%(tenant_id)s"
-sahara_internal_endpoint: "{{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ sahara_api_port }}/v1.1/%(tenant_id)s"
-sahara_public_endpoint: "{{ public_protocol }}://{{ kolla_external_fqdn }}:{{ sahara_api_port }}/v1.1/%(tenant_id)s"
-
-sahara_logging_debug: "{{ openstack_logging_debug }}"
-
-sahara_keystone_user: "sahara"
-
-openstack_sahara_auth: "{'auth_url':'{{ openstack_auth.auth_url }}','username':'{{ openstack_auth.username }}','password':'{{ openstack_auth.password }}','project_name':'{{ openstack_auth.project_name }}','domain_name':'default'}"
diff --git a/ansible/roles/sahara/meta/main.yml b/ansible/roles/sahara/meta/main.yml
deleted file mode 100644
index 6b4fff8fef..0000000000
--- a/ansible/roles/sahara/meta/main.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-dependencies:
- - { role: common }
diff --git a/ansible/roles/sahara/tasks/bootstrap.yml b/ansible/roles/sahara/tasks/bootstrap.yml
deleted file mode 100644
index 6e98f125b8..0000000000
--- a/ansible/roles/sahara/tasks/bootstrap.yml
+++ /dev/null
@@ -1,41 +0,0 @@
----
-- name: Creating sahara database
- command: docker exec -t kolla_toolbox /usr/bin/ansible localhost
- -m mysql_db
- -a "login_host='{{ database_address }}'
- login_port='{{ database_port }}'
- login_user='{{ database_user }}'
- login_password='{{ database_password }}'
- name='{{ sahara_database_name }}'"
- register: database
- changed_when: "{{ database.stdout.find('localhost | SUCCESS => ') != -1 and
- (database.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
- failed_when: database.stdout.split()[2] != 'SUCCESS'
- run_once: True
- delegate_to: "{{ groups['sahara-api'][0] }}"
-
-- name: Reading json from variable
- set_fact:
- database_created: "{{ (database.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
-
-- name: Creating sahara database user and setting permissions
- command: docker exec -t kolla_toolbox /usr/bin/ansible localhost
- -m mysql_user
- -a "login_host='{{ database_address }}'
- login_port='{{ database_port }}'
- login_user='{{ database_user }}'
- login_password='{{ database_password }}'
- name='{{ sahara_database_name }}'
- password='{{ sahara_database_password }}'
- host='%'
- priv='{{ sahara_database_name }}.*:ALL'
- append_privs='yes'"
- register: database_user_create
- changed_when: "{{ database_user_create.stdout.find('localhost | SUCCESS => ') != -1 and
- (database_user_create.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
- failed_when: database_user_create.stdout.split()[2] != 'SUCCESS'
- run_once: True
- delegate_to: "{{ groups['sahara-api'][0] }}"
-
-- include: bootstrap_service.yml
- when: database_created
diff --git a/ansible/roles/sahara/tasks/bootstrap_service.yml b/ansible/roles/sahara/tasks/bootstrap_service.yml
deleted file mode 100644
index e4b88fa4c4..0000000000
--- a/ansible/roles/sahara/tasks/bootstrap_service.yml
+++ /dev/null
@@ -1,21 +0,0 @@
----
-- name: Running Sahara bootstrap container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- detach: False
- environment:
- KOLLA_BOOTSTRAP:
- KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}"
- image: "{{ sahara_api_image_full }}"
- labels:
- BOOTSTRAP:
- name: "bootstrap_sahara"
- restart_policy: "never"
- volumes:
- - "{{ node_config_directory }}/sahara-api/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "kolla_logs:/var/log/kolla/"
- - "sahara:/var/lib/sahara/"
- run_once: True
- delegate_to: "{{ groups['sahara-api'][0] }}"
diff --git a/ansible/roles/sahara/tasks/config.yml b/ansible/roles/sahara/tasks/config.yml
deleted file mode 100644
index 59cda53143..0000000000
--- a/ansible/roles/sahara/tasks/config.yml
+++ /dev/null
@@ -1,48 +0,0 @@
----
-- name: Ensuring config directories exist
- file:
- path: "{{ node_config_directory }}/{{ item }}"
- state: "directory"
- recurse: yes
- with_items:
- - "sahara-api"
- - "sahara-engine"
-
-- name: Copying over config.json files for services
- template:
- src: "{{ item }}.json.j2"
- dest: "{{ node_config_directory }}/{{ item }}/config.json"
- with_items:
- - "sahara-api"
- - "sahara-engine"
-
-- name: Copying over sahara.conf
- merge_configs:
- vars:
- service_name: "{{ item }}"
- sources:
- - "{{ role_path }}/templates/sahara.conf.j2"
- - "{{ node_custom_config }}/global.conf"
- - "{{ node_custom_config }}/database.conf"
- - "{{ node_custom_config }}/messaging.conf"
- - "{{ node_custom_config }}/sahara.conf"
- - "{{ node_custom_config }}/sahara/{{ item }}.conf"
- - "{{ node_custom_config }}/sahara/{{ inventory_hostname }}/sahara.conf"
- dest: "{{ node_config_directory }}/{{ item }}/sahara.conf"
- with_items:
- - "sahara-api"
- - "sahara-engine"
-
-- name: Check if policies shall be overwritten
- local_action: stat path="{{ node_custom_config }}/sahara/policy.json"
- register: sahara_policy
-
-- name: Copying over existing policy.json
- template:
- src: "{{ node_custom_config }}/sahara/policy.json"
- dest: "{{ node_config_directory }}/{{ item }}/policy.json"
- with_items:
- - "sahara-api"
- - "sahara-engine"
- when:
- sahara_policy.stat.exists
diff --git a/ansible/roles/sahara/tasks/deploy.yml b/ansible/roles/sahara/tasks/deploy.yml
deleted file mode 100644
index f109cf7360..0000000000
--- a/ansible/roles/sahara/tasks/deploy.yml
+++ /dev/null
@@ -1,14 +0,0 @@
----
-- include: register.yml
- when: inventory_hostname in groups['sahara-api']
-
-- include: config.yml
- when: inventory_hostname in groups['sahara-api'] or
- inventory_hostname in groups['sahara-engine']
-
-- include: bootstrap.yml
- when: inventory_hostname in groups['sahara-api']
-
-- include: start.yml
- when: inventory_hostname in groups['sahara-api'] or
- inventory_hostname in groups['sahara-engine']
diff --git a/ansible/roles/sahara/tasks/main.yml b/ansible/roles/sahara/tasks/main.yml
deleted file mode 100644
index b017e8b4ad..0000000000
--- a/ansible/roles/sahara/tasks/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-- include: "{{ action }}.yml"
diff --git a/ansible/roles/sahara/tasks/precheck.yml b/ansible/roles/sahara/tasks/precheck.yml
deleted file mode 100644
index ed97d539c0..0000000000
--- a/ansible/roles/sahara/tasks/precheck.yml
+++ /dev/null
@@ -1 +0,0 @@
----
diff --git a/ansible/roles/sahara/tasks/pull.yml b/ansible/roles/sahara/tasks/pull.yml
deleted file mode 100644
index 8d0037f485..0000000000
--- a/ansible/roles/sahara/tasks/pull.yml
+++ /dev/null
@@ -1,14 +0,0 @@
----
-- name: Pulling sahara-engine image
- kolla_docker:
- action: "pull_image"
- common_options: "{{ docker_common_options }}"
- image: "{{ sahara_engine_image_full }}"
- when: inventory_hostname in groups['sahara-engine']
-
-- name: Pulling sahara-api image
- kolla_docker:
- action: "pull_image"
- common_options: "{{ docker_common_options }}"
- image: "{{ sahara_api_image_full }}"
- when: inventory_hostname in groups['sahara-api']
diff --git a/ansible/roles/sahara/tasks/reconfigure.yml b/ansible/roles/sahara/tasks/reconfigure.yml
deleted file mode 100644
index 890f10645b..0000000000
--- a/ansible/roles/sahara/tasks/reconfigure.yml
+++ /dev/null
@@ -1,69 +0,0 @@
----
-- name: Ensuring the containers up
- kolla_docker:
- name: "{{ item.name }}"
- action: "get_container_state"
- register: container_state
- failed_when: container_state.Running == false
- when: inventory_hostname in groups[item.group]
- with_items:
- - { name: sahara_api, group: sahara-api }
- - { name: sahara_engine, group: sahara-engine }
-
-- include: config.yml
-
-- name: Check the configs
- command: docker exec {{ item.name }} /usr/local/bin/kolla_set_configs --check
- changed_when: false
- failed_when: false
- register: check_results
- when: inventory_hostname in groups[item.group]
- with_items:
- - { name: sahara_api, group: sahara-api }
- - { name: sahara_engine, group: sahara-engine }
-
-# NOTE(jeffrey4l): when config_strategy == 'COPY_ALWAYS'
-# and container env['KOLLA_CONFIG_STRATEGY'] == 'COPY_ONCE',
-# just remove the container and start again
-- name: Containers config strategy
- kolla_docker:
- name: "{{ item.name }}"
- action: "get_container_env"
- register: container_envs
- when: inventory_hostname in groups[item.group]
- with_items:
- - { name: sahara_api, group: sahara-api }
- - { name: sahara_engine, group: sahara-engine }
-
-- name: Remove the containers
- kolla_docker:
- name: "{{ item[0]['name'] }}"
- action: "remove_container"
- register: remove_containers
- when:
- - inventory_hostname in groups[item[0]['group']]
- - config_strategy == "COPY_ONCE" or item[1]['KOLLA_CONFIG_STRATEGY'] == 'COPY_ONCE'
- - item[2]['rc'] == 1
- with_together:
- - [{ name: sahara_api, group: sahara-api },
- { name: sahara_engine, group: sahara-engine },]
- - "{{ container_envs.results }}"
- - "{{ check_results.results }}"
-
-- include: start.yml
- when: remove_containers.changed
-
-- name: Restart containers
- kolla_docker:
- name: "{{ item[0]['name'] }}"
- action: "restart_container"
- when:
- - inventory_hostname in groups[item[0]['group']]
- - config_strategy == 'COPY_ALWAYS'
- - item[1]['KOLLA_CONFIG_STRATEGY'] != 'COPY_ONCE'
- - item[2]['rc'] == 1
- with_together:
- - [{ name: sahara_api, group: sahara-api },
- { name: sahara_engine, group: sahara-engine },]
- - "{{ container_envs.results }}"
- - "{{ check_results.results }}"
diff --git a/ansible/roles/sahara/tasks/register.yml b/ansible/roles/sahara/tasks/register.yml
deleted file mode 100644
index 7c031e0932..0000000000
--- a/ansible/roles/sahara/tasks/register.yml
+++ /dev/null
@@ -1,40 +0,0 @@
----
-- name: Creating the Sahara service and endpoint
- command: docker exec -t kolla_toolbox /usr/bin/ansible localhost
- -m kolla_keystone_service
- -a "service_name=sahara
- service_type=data_processing
- description='Sahara Data Processing'
- endpoint_region={{ openstack_region_name }}
- url='{{ item.url }}'
- interface='{{ item.interface }}'
- region_name={{ openstack_region_name }}
- auth={{ '{{ openstack_sahara_auth }}' }}"
- -e "{'openstack_sahara_auth':{{ openstack_sahara_auth }}}"
- register: sahara_endpoint
- changed_when: "{{ sahara_endpoint.stdout.find('localhost | SUCCESS => ') != -1 and (sahara_endpoint.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
- until: sahara_endpoint.stdout.split()[2] == 'SUCCESS'
- retries: 10
- delay: 5
- run_once: True
- with_items:
- - {'interface': 'admin', 'url': '{{ sahara_admin_endpoint }}'}
- - {'interface': 'internal', 'url': '{{ sahara_internal_endpoint }}'}
- - {'interface': 'public', 'url': '{{ sahara_public_endpoint }}'}
-
-- name: Creating the Sahara project, user, and role
- command: docker exec -t kolla_toolbox /usr/bin/ansible localhost
- -m kolla_keystone_user
- -a "project=service
- user=sahara
- password={{ sahara_keystone_password }}
- role=admin
- region_name={{ openstack_region_name }}
- auth={{ '{{ openstack_sahara_auth }}' }}"
- -e "{'openstack_sahara_auth':{{ openstack_sahara_auth }}}"
- register: sahara_user
- changed_when: "{{ sahara_user.stdout.find('localhost | SUCCESS => ') != -1 and (sahara_user.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
- until: sahara_user.stdout.split()[2] == 'SUCCESS'
- retries: 10
- delay: 5
- run_once: True
diff --git a/ansible/roles/sahara/tasks/start.yml b/ansible/roles/sahara/tasks/start.yml
deleted file mode 100644
index bb7b22d6bf..0000000000
--- a/ansible/roles/sahara/tasks/start.yml
+++ /dev/null
@@ -1,26 +0,0 @@
----
-- name: Starting sahara-api container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- image: "{{ sahara_api_image_full }}"
- name: "sahara_api"
- volumes:
- - "{{ node_config_directory }}/sahara-api/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "sahara:/var/lib/sahara/"
- - "kolla_logs:/var/log/kolla/"
- when: inventory_hostname in groups['sahara-api']
-
-- name: Starting sahara-engine container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- image: "{{ sahara_engine_image_full }}"
- name: "sahara_engine"
- volumes:
- - "{{ node_config_directory }}/sahara-engine/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "sahara:/var/lib/sahara/"
- - "kolla_logs:/var/log/kolla/"
- when: inventory_hostname in groups['sahara-engine']
diff --git a/ansible/roles/sahara/tasks/upgrade.yml b/ansible/roles/sahara/tasks/upgrade.yml
deleted file mode 100644
index 308053080c..0000000000
--- a/ansible/roles/sahara/tasks/upgrade.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-- include: config.yml
-
-- include: bootstrap_service.yml
-
-- include: start.yml
diff --git a/ansible/roles/sahara/templates/sahara-api.json.j2 b/ansible/roles/sahara/templates/sahara-api.json.j2
deleted file mode 100644
index 33e45f49ec..0000000000
--- a/ansible/roles/sahara/templates/sahara-api.json.j2
+++ /dev/null
@@ -1,23 +0,0 @@
-{
- "command": "sahara-api --config-file /etc/sahara/sahara.conf",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/sahara.conf",
- "dest": "/etc/sahara/sahara.conf",
- "owner": "sahara",
- "perm": "0600"
- }
- ],
- "permissions": [
- {
- "path": "/var/lib/sahara",
- "owner": "sahara:sahara",
- "recurse": true
- },
- {
- "path": "/var/log/kolla/sahara",
- "owner": "sahara:sahara",
- "recurse": true
- }
- ]
-}
diff --git a/ansible/roles/sahara/templates/sahara-engine.json.j2 b/ansible/roles/sahara/templates/sahara-engine.json.j2
deleted file mode 100644
index f677b503d5..0000000000
--- a/ansible/roles/sahara/templates/sahara-engine.json.j2
+++ /dev/null
@@ -1,23 +0,0 @@
-{
- "command": "sahara-engine --config-file /etc/sahara/sahara.conf",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/sahara.conf",
- "dest": "/etc/sahara/sahara.conf",
- "owner": "sahara",
- "perm": "0600"
- }
- ],
- "permissions": [
- {
- "path": "/var/lib/sahara",
- "owner": "sahara:sahara",
- "recurse": true
- },
- {
- "path": "/var/log/kolla/sahara",
- "owner": "sahara:sahara",
- "recurse": true
- }
- ]
-}
diff --git a/ansible/roles/sahara/templates/sahara.conf.j2 b/ansible/roles/sahara/templates/sahara.conf.j2
deleted file mode 100644
index 8b116b63a7..0000000000
--- a/ansible/roles/sahara/templates/sahara.conf.j2
+++ /dev/null
@@ -1,47 +0,0 @@
-[DEFAULT]
-debug = {{ sahara_logging_debug }}
-log_dir = /var/log/kolla/sahara
-port = {{ sahara_api_port }}
-host = {{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}
-transport_url = rabbit://{% for host in groups['rabbitmq'] %}{{ rabbitmq_user }}:{{ rabbitmq_password }}@{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ rabbitmq_port }}{% if not loop.last %},{% endif %}{% endfor %}
-
-use_neutron = True
-api_workers = {{ openstack_service_workers }}
-use_floating_ips = False
-use_namespaces = True
-use_rootwrap = True
-
-[database]
-connection = mysql+pymysql://{{ sahara_database_user }}:{{ sahara_database_password }}@{{ sahara_database_address }}/{{ sahara_database_name }}
-
-[keystone_authtoken]
-auth_uri = {{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_public_port }}
-project_domain_name = default
-project_name = service
-user_domain_name = default
-username = {{ sahara_keystone_user }}
-password = {{ sahara_keystone_password }}
-auth_url = {{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_admin_port }}
-auth_type = password
-
-memcache_security_strategy = ENCRYPT
-memcache_secret_key = {{ memcache_secret_key }}
-{% if orchestration_engine == 'KUBERNETES' %}
-memcache_servers = {{ memcached_servers }}
-{% else %}
-memcached_servers = {% for host in groups['memcached'] %}{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}
-{% endif %}
-
-[service_credentials]
-auth_url = {{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_public_port }}
-region_name = {{ openstack_region_name }}
-password = {{ sahara_keystone_password }}
-username = {{ sahara_keystone_user }}
-project_name = service
-project_domain_id = default
-user_domain_id = default
-auth_type = password
-
-
-[profiler]
-enabled = False
diff --git a/ansible/roles/searchlight/defaults/main.yml b/ansible/roles/searchlight/defaults/main.yml
deleted file mode 100644
index 3030943633..0000000000
--- a/ansible/roles/searchlight/defaults/main.yml
+++ /dev/null
@@ -1,32 +0,0 @@
----
-project_name: "searchlight"
-
-####################
-# Elasticsearch
-####################
-searchlight_elasticsearch_url: "{{ kolla_internal_fqdn }}:{{ elasticsearch_port }}"
-
-####################
-# Docker
-####################
-searchlight_listener_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-searchlight-listener"
-searchlight_listener_tag: "{{ openstack_release }}"
-searchlight_listener_image_full: "{{ searchlight_listener_image }}:{{ searchlight_listener_tag }}"
-
-searchlight_api_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-searchlight-api"
-searchlight_api_tag: "{{ openstack_release }}"
-searchlight_api_image_full: "{{ searchlight_api_image }}:{{ searchlight_api_tag }}"
-
-
-####################
-# OpenStack
-####################
-searchlight_admin_endpoint: "{{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ searchlight_api_port }}"
-searchlight_internal_endpoint: "{{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ searchlight_api_port }}"
-searchlight_public_endpoint: "{{ public_protocol }}://{{ kolla_external_fqdn }}:{{ searchlight_api_port }}"
-
-searchlight_logging_debug: "{{ openstack_logging_debug }}"
-
-searchlight_keystone_user: "searchlight"
-
-openstack_searchlight_auth: "{'auth_url':'{{ openstack_auth.auth_url }}','username':'{{ openstack_auth.username }}','password':'{{ openstack_auth.password }}','project_name':'{{ openstack_auth.project_name }}'}"
diff --git a/ansible/roles/searchlight/meta/main.yml b/ansible/roles/searchlight/meta/main.yml
deleted file mode 100644
index 6b4fff8fef..0000000000
--- a/ansible/roles/searchlight/meta/main.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-dependencies:
- - { role: common }
diff --git a/ansible/roles/searchlight/tasks/bootstrap.yml b/ansible/roles/searchlight/tasks/bootstrap.yml
deleted file mode 100644
index a537e12f78..0000000000
--- a/ansible/roles/searchlight/tasks/bootstrap.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-- include: bootstrap_service.yml
diff --git a/ansible/roles/searchlight/tasks/bootstrap_service.yml b/ansible/roles/searchlight/tasks/bootstrap_service.yml
deleted file mode 100644
index 7f114924ba..0000000000
--- a/ansible/roles/searchlight/tasks/bootstrap_service.yml
+++ /dev/null
@@ -1,20 +0,0 @@
----
-- name: Running Searchlight bootstrap container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- detach: False
- environment:
- KOLLA_BOOTSTRAP:
- KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}"
- image: "{{ searchlight_api_image_full }}"
- labels:
- BOOTSTRAP:
- name: "bootstrap_searchlight"
- restart_policy: "never"
- volumes:
- - "{{ node_config_directory }}/searchlight-api/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "kolla_logs:/var/log/kolla/"
- run_once: True
- delegate_to: "{{ groups['searchlight-api'][0] }}"
diff --git a/ansible/roles/searchlight/tasks/config.yml b/ansible/roles/searchlight/tasks/config.yml
deleted file mode 100644
index 1bec9a7ac5..0000000000
--- a/ansible/roles/searchlight/tasks/config.yml
+++ /dev/null
@@ -1,46 +0,0 @@
----
-- name: Ensuring config directories exist
- file:
- path: "{{ node_config_directory }}/{{ item }}"
- state: "directory"
- recurse: yes
- with_items:
- - "searchlight-api"
- - "searchlight-listener"
-
-- name: Copying over config.json files for services
- template:
- src: "{{ item }}.json.j2"
- dest: "{{ node_config_directory }}/{{ item }}/config.json"
- with_items:
- - "searchlight-api"
- - "searchlight-listener"
-
-- name: Copying over searchlight.conf
- merge_configs:
- vars:
- service_name: "{{ item }}"
- sources:
- - "{{ role_path }}/templates/searchlight.conf.j2"
- - "{{ node_custom_config }}/global.conf"
- - "{{ node_custom_config }}/messaging.conf"
- - "{{ node_custom_config }}/searchlight.conf"
- - "{{ node_custom_config }}/searchlight/{{ inventory_hostname }}/searchlight.conf"
- dest: "{{ node_config_directory }}/{{ item }}/searchlight.conf"
- with_items:
- - "searchlight-api"
- - "searchlight-listener"
-
-- name: Check if policies shall be overwritten
- local_action: stat path="{{ node_custom_config }}/searchlight/policy.json"
- register: searchlight_policy
-
-- name: Copying over existing policy.json
- template:
- src: "{{ node_custom_config }}/searchlight/policy.json"
- dest: "{{ node_config_directory }}/{{ item }}/policy.json"
- with_items:
- - "searchlight-api"
- - "searchlight-listener"
- when:
- searchlight_policy.stat.exists
diff --git a/ansible/roles/searchlight/tasks/deploy.yml b/ansible/roles/searchlight/tasks/deploy.yml
deleted file mode 100644
index ea73ba9f38..0000000000
--- a/ansible/roles/searchlight/tasks/deploy.yml
+++ /dev/null
@@ -1,12 +0,0 @@
----
-- include: register.yml
- when: inventory_hostname in groups['searchlight']
-
-- include: config.yml
- when: inventory_hostname in groups['searchlight']
-
-- include: bootstrap.yml
- when: inventory_hostname in groups['searchlight']
-
-- include: start.yml
- when: inventory_hostname in groups['searchlight']
diff --git a/ansible/roles/searchlight/tasks/main.yml b/ansible/roles/searchlight/tasks/main.yml
deleted file mode 100644
index b017e8b4ad..0000000000
--- a/ansible/roles/searchlight/tasks/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-- include: "{{ action }}.yml"
diff --git a/ansible/roles/searchlight/tasks/precheck.yml b/ansible/roles/searchlight/tasks/precheck.yml
deleted file mode 100644
index ed97d539c0..0000000000
--- a/ansible/roles/searchlight/tasks/precheck.yml
+++ /dev/null
@@ -1 +0,0 @@
----
diff --git a/ansible/roles/searchlight/tasks/pull.yml b/ansible/roles/searchlight/tasks/pull.yml
deleted file mode 100644
index 7389653810..0000000000
--- a/ansible/roles/searchlight/tasks/pull.yml
+++ /dev/null
@@ -1,14 +0,0 @@
----
-- name: Pulling searchlight-api image
- kolla_docker:
- action: "pull_image"
- common_options: "{{ docker_common_options }}"
- image: "{{ searchlight_api_image_full }}"
- when: inventory_hostname in groups['searchlight-api']
-
-- name: Pulling searchlight-listener image
- kolla_docker:
- action: "pull_image"
- common_options: "{{ docker_common_options }}"
- image: "{{ searchlight_listener_image_full }}"
- when: inventory_hostname in groups['searchlight-listener']
diff --git a/ansible/roles/searchlight/tasks/reconfigure.yml b/ansible/roles/searchlight/tasks/reconfigure.yml
deleted file mode 100644
index c1a1403fc8..0000000000
--- a/ansible/roles/searchlight/tasks/reconfigure.yml
+++ /dev/null
@@ -1,66 +0,0 @@
----
-- name: Ensuring the containers up
- kolla_docker:
- name: "{{ item.name }}"
- action: "get_container_state"
- register: container_state
- failed_when: container_state.Running == false
- when: inventory_hostname in groups[item.group]
- with_items:
- - { name: searchlight_api, group: searchlight-api }
- - { name: searchlight_listener, group: searchlight-listener }
-
-- include: config.yml
-
-- name: Check the configs
- command: docker exec {{ item.name }} /usr/local/bin/kolla_set_configs --check
- changed_when: false
- failed_when: false
- register: check_results
- when: inventory_hostname in groups[item.group]
- with_items:
- - { name: searchlight_api, group: searchlight-api }
- - { name: searchlight_listener, group: searchlight-listener }
-
-- name: Containers config strategy
- kolla_docker:
- name: "{{ item.name }}"
- action: "get_container_env"
- register: container_envs
- when: inventory_hostname in groups[item.group]
- with_items:
- - { name: searchlight_api, group: searchlight-api }
- - { name: searchlight_listener, group: searchlight-listener }
-
-- name: Remove the containers
- kolla_docker:
- name: "{{ item[0]['name'] }}"
- action: "remove_container"
- register: remove_containers
- when:
- - config_strategy == "COPY_ONCE" or item[1]['KOLLA_CONFIG_STRATEGY'] == 'COPY_ONCE'
- - item[2]['rc'] == 1
- - inventory_hostname in groups[item[0]['group']]
- with_together:
- - [{ name: searchlight_api, group: searchlight-api },
- { name: searchlight_listener, group: searchlight-listener }]
- - "{{ container_envs.results }}"
- - "{{ check_results.results }}"
-
-- include: start.yml
- when: remove_containers.changed
-
-- name: Restart containers
- kolla_docker:
- name: "{{ item[0]['name'] }}"
- action: "restart_container"
- when:
- - config_strategy == 'COPY_ALWAYS'
- - item[1]['KOLLA_CONFIG_STRATEGY'] != 'COPY_ONCE'
- - item[2]['rc'] == 1
- - inventory_hostname in groups[item[0]['group']]
- with_together:
- - [{ name: searchlight_api, group: searchlight-api },
- { name: searchlight_listener, group: searchlight-listener }]
- - "{{ container_envs.results }}"
- - "{{ check_results.results }}"
diff --git a/ansible/roles/searchlight/tasks/register.yml b/ansible/roles/searchlight/tasks/register.yml
deleted file mode 100644
index 4724e7b96d..0000000000
--- a/ansible/roles/searchlight/tasks/register.yml
+++ /dev/null
@@ -1,40 +0,0 @@
----
-- name: Creating the Searchlight service and endpoint
- command: docker exec -t kolla_toolbox /usr/bin/ansible localhost
- -m kolla_keystone_service
- -a "service_name=searchlight
- service_type=search
- description='Openstack Index Service'
- endpoint_region={{ openstack_region_name }}
- url='{{ item.url }}'
- interface='{{ item.interface }}'
- region_name={{ openstack_region_name }}
- auth={{ '{{ openstack_searchlight_auth }}' }}"
- -e "{'openstack_searchlight_auth':{{ openstack_searchlight_auth }}}"
- register: searchlight_endpoint
- changed_when: "{{ searchlight_endpoint.stdout.find('localhost | SUCCESS => ') != -1 and (searchlight_endpoint.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
- until: searchlight_endpoint.stdout.split()[2] == 'SUCCESS'
- retries: 10
- delay: 5
- run_once: True
- with_items:
- - {'interface': 'admin', 'url': '{{ searchlight_admin_endpoint }}'}
- - {'interface': 'internal', 'url': '{{ searchlight_internal_endpoint }}'}
- - {'interface': 'public', 'url': '{{ searchlight_public_endpoint }}'}
-
-- name: Creating the Searchlight project, user, and role
- command: docker exec -t kolla_toolbox /usr/bin/ansible localhost
- -m kolla_keystone_user
- -a "project=service
- user=searchlight
- password={{ searchlight_keystone_password }}
- role=admin
- region_name={{ openstack_region_name }}
- auth={{ '{{ openstack_searchlight_auth }}' }}"
- -e "{'openstack_searchlight_auth':{{ openstack_searchlight_auth }}}"
- register: searchlight_user
- changed_when: "{{ searchlight_user.stdout.find('localhost | SUCCESS => ') != -1 and (searchlight_user.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
- until: searchlight_user.stdout.split()[2] == 'SUCCESS'
- retries: 10
- delay: 5
- run_once: True
diff --git a/ansible/roles/searchlight/tasks/start.yml b/ansible/roles/searchlight/tasks/start.yml
deleted file mode 100644
index f97e789d0f..0000000000
--- a/ansible/roles/searchlight/tasks/start.yml
+++ /dev/null
@@ -1,24 +0,0 @@
----
-- name: Starting searchlight-api container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- image: "{{ searchlight_api_image_full }}"
- name: "searchlight_api"
- volumes:
- - "{{ node_config_directory }}/searchlight-api/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "kolla_logs:/var/log/kolla/"
- when: inventory_hostname in groups['searchlight-api']
-
-- name: Starting searchlight-listener container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- image: "{{ searchlight_listener_image_full }}"
- name: "searchlight_listener"
- volumes:
- - "{{ node_config_directory }}/searchlight-listener/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "kolla_logs:/var/log/kolla/"
- when: inventory_hostname in groups['searchlight-listener']
diff --git a/ansible/roles/searchlight/tasks/upgrade.yml b/ansible/roles/searchlight/tasks/upgrade.yml
deleted file mode 100644
index 308053080c..0000000000
--- a/ansible/roles/searchlight/tasks/upgrade.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-- include: config.yml
-
-- include: bootstrap_service.yml
-
-- include: start.yml
diff --git a/ansible/roles/searchlight/templates/searchlight-api.json.j2 b/ansible/roles/searchlight/templates/searchlight-api.json.j2
deleted file mode 100644
index a003291ba8..0000000000
--- a/ansible/roles/searchlight/templates/searchlight-api.json.j2
+++ /dev/null
@@ -1,18 +0,0 @@
-{
- "command": "searchlight-api",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/searchlight.conf",
- "dest": "/etc/searchlight/searchlight.conf",
- "owner": "searchlight",
- "perm": "0600"
- }
- ],
- "permissions": [
- {
- "path": "/var/log/kolla/searchlight",
- "owner": "searchlight:searchlight",
- "recurse": true
- }
- ]
-}
diff --git a/ansible/roles/searchlight/templates/searchlight-listener.json.j2 b/ansible/roles/searchlight/templates/searchlight-listener.json.j2
deleted file mode 100644
index 5f06a04993..0000000000
--- a/ansible/roles/searchlight/templates/searchlight-listener.json.j2
+++ /dev/null
@@ -1,18 +0,0 @@
-{
- "command": "searchlight-listener",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/searchlight.conf",
- "dest": "/etc/searchlight/searchlight.conf",
- "owner": "searchlight",
- "perm": "0600"
- }
- ],
- "permissions": [
- {
- "path": "/var/log/kolla/searchlight",
- "owner": "searchlight:searchlight",
- "recurse": true
- }
- ]
-}
diff --git a/ansible/roles/searchlight/templates/searchlight.conf.j2 b/ansible/roles/searchlight/templates/searchlight.conf.j2
deleted file mode 100644
index 3317fca1fe..0000000000
--- a/ansible/roles/searchlight/templates/searchlight.conf.j2
+++ /dev/null
@@ -1,90 +0,0 @@
-[DEFAULT]
-debug = {{ searchlight_logging_debug }}
-
-policy_file = /etc/searchlight/policy.json
-log_dir = /var/log/kolla/searchlight
-transport_url = rabbit://{% for host in groups['rabbitmq'] %}{{ rabbitmq_user }}:{{ rabbitmq_password }}@{% if orchestration_engine == 'KUBERNETES' %}rabbitmq{% else %}{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}{% endif %}:{{ rabbitmq_port }}{% if not loop.last %},{% endif %}{% endfor %}
-
-
-[api]
-port = {{ searchlight_api_port }}
-bind_host = {{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}
-public_endpoint = {{ public_protocol }}://{{ kolla_external_fqdn }}:{{ searchlight_api_port }}
-
-[elasticsearch]
-hosts = {{ searchlight_elasticsearch_url }}
-
-[listener]
-workers = {{ openstack_service_workers }}
-notifications_pool = searchlight-listener
-
-[paste_deploy]
-flavor = keystone
-
-[keystone_authtoken]
-auth_uri = {{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_public_port }}
-auth_url = {{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_admin_port }}
-project_domain_name = default
-project_name = service
-user_domain_name = default
-username = {{ searchlight_keystone_user }}
-password = {{ searchlight_keystone_password }}
-auth_type = password
-
-memcache_security_strategy = ENCRYPT
-memcache_secret_key = {{ memcache_secret_key }}
-memcached_servers = {% for host in groups['memcached'] %}{% if orchestration_engine == 'KUBERNETES' %}memcached{% else %}{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}{% endif %}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}
-
-[service_credentials]
-auth_uri = {{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_public_port }}
-auth_url = {{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_admin_port }}
-region_name = {{ openstack_region_name }}
-project_domain_name = default
-project_name = service
-user_domain_name = default
-username = {{ searchlight_keystone_user }}
-password = {{ searchlight_keystone_password }}
-auth_type = password
-auth_plugin = password
-
-memcache_security_strategy = ENCRYPT
-memcache_secret_key = {{ memcache_secret_key }}
-memcached_servers = {% for host in groups['memcached'] %}{% if orchestration_engine == 'KUBERNETES' %}memcached{% else %}{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}{% endif %}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}
-
-[resource_plugin:os_designate_zone]
-enabled = False
-
-[resource_plugin:os_designate_recordset]
-enabled = False
-
-{% if not enable_swift | bool %}
-[resource_plugin:os_swift_account]
-enabled = False
-
-[resource_plugin:os_swift_container]
-enabled = False
-
-[resource_plugin:os_swift_object]
-enabled = False
-{% endif %}
-
-{% if not enable_nova | bool %}
-[resource_plugin:os_server_nova]
-enabled = False
-{% endif %}
-
-{% if not enable_glance | bool %}
-[resource_plugin:os_glance_image]
-enabled = False
-
-[resource_plugin:os_glance_metadef]
-enabled = False
-{% endif %}
-
-{% if not enable_neutron | bool %}
-[resource_plugin:os_neutron_net]
-enabled = False
-
-[resource_plugin:os_neutron_port]
-enabled = False
-{% endif %}
diff --git a/ansible/roles/senlin/defaults/main.yml b/ansible/roles/senlin/defaults/main.yml
deleted file mode 100644
index 811ad25c78..0000000000
--- a/ansible/roles/senlin/defaults/main.yml
+++ /dev/null
@@ -1,35 +0,0 @@
----
-project_name: "senlin"
-
-####################
-# Database
-####################
-senlin_database_name: "senlin"
-senlin_database_user: "senlin"
-senlin_database_address: "{{ kolla_internal_fqdn }}:{{ database_port }}"
-
-
-####################
-# Docker
-####################
-senlin_engine_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-senlin-engine"
-senlin_engine_tag: "{{ openstack_release }}"
-senlin_engine_image_full: "{{ senlin_engine_image }}:{{ senlin_engine_tag }}"
-
-senlin_api_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-senlin-api"
-senlin_api_tag: "{{ openstack_release }}"
-senlin_api_image_full: "{{ senlin_api_image }}:{{ senlin_api_tag }}"
-
-
-####################
-# OpenStack
-####################
-senlin_admin_endpoint: "{{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ senlin_api_port }}"
-senlin_internal_endpoint: "{{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ senlin_api_port }}"
-senlin_public_endpoint: "{{ public_protocol }}://{{ kolla_external_fqdn }}:{{ senlin_api_port }}"
-
-senlin_logging_debug: "{{ openstack_logging_debug }}"
-
-senlin_keystone_user: "senlin"
-
-openstack_senlin_auth: "{'auth_url':'{{ openstack_auth.auth_url }}','username':'{{ openstack_auth.username }}','password':'{{ openstack_auth.password }}','project_name':'{{ openstack_auth.project_name }}'}"
diff --git a/ansible/roles/senlin/meta/main.yml b/ansible/roles/senlin/meta/main.yml
deleted file mode 100644
index 6b4fff8fef..0000000000
--- a/ansible/roles/senlin/meta/main.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-dependencies:
- - { role: common }
diff --git a/ansible/roles/senlin/tasks/bootstrap.yml b/ansible/roles/senlin/tasks/bootstrap.yml
deleted file mode 100644
index 95a599b179..0000000000
--- a/ansible/roles/senlin/tasks/bootstrap.yml
+++ /dev/null
@@ -1,41 +0,0 @@
----
-- name: Creating Senlin database
- command: docker exec -t kolla_toolbox /usr/bin/ansible localhost
- -m mysql_db
- -a "login_host='{{ database_address }}'
- login_port='{{ database_port }}'
- login_user='{{ database_user }}'
- login_password='{{ database_password }}'
- name='{{ senlin_database_name }}'"
- register: database
- changed_when: "{{ database.stdout.find('localhost | SUCCESS => ') != -1 and
- (database.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
- failed_when: database.stdout.split()[2] != 'SUCCESS'
- run_once: True
- delegate_to: "{{ groups['senlin-api'][0] }}"
-
-- name: Reading json from variable
- set_fact:
- database_created: "{{ (database.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
-
-- name: Creating Senlin database user and setting permissions
- command: docker exec -t kolla_toolbox /usr/bin/ansible localhost
- -m mysql_user
- -a "login_host='{{ database_address }}'
- login_port='{{ database_port }}'
- login_user='{{ database_user }}'
- login_password='{{ database_password }}'
- name='{{ senlin_database_name }}'
- password='{{ senlin_database_password }}'
- host='%'
- priv='{{ senlin_database_name }}.*:ALL'
- append_privs='yes'"
- register: database_user_create
- changed_when: "{{ database_user_create.stdout.find('localhost | SUCCESS => ') != -1 and
- (database_user_create.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
- failed_when: database_user_create.stdout.split()[2] != 'SUCCESS'
- run_once: True
- delegate_to: "{{ groups['senlin-api'][0] }}"
-
-- include: bootstrap_service.yml
- when: database_created
diff --git a/ansible/roles/senlin/tasks/bootstrap_service.yml b/ansible/roles/senlin/tasks/bootstrap_service.yml
deleted file mode 100644
index ff3504d957..0000000000
--- a/ansible/roles/senlin/tasks/bootstrap_service.yml
+++ /dev/null
@@ -1,20 +0,0 @@
----
-- name: Running Senlin bootstrap container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- detach: False
- environment:
- KOLLA_BOOTSTRAP:
- KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}"
- image: "{{ senlin_api_image_full }}"
- labels:
- BOOTSTRAP:
- name: "bootstrap_senlin"
- restart_policy: "never"
- volumes:
- - "{{ node_config_directory }}/senlin-api/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "kolla_logs:/var/log/kolla/"
- run_once: True
- delegate_to: "{{ groups['senlin-api'][0] }}"
diff --git a/ansible/roles/senlin/tasks/config.yml b/ansible/roles/senlin/tasks/config.yml
deleted file mode 100644
index 54773529b4..0000000000
--- a/ansible/roles/senlin/tasks/config.yml
+++ /dev/null
@@ -1,48 +0,0 @@
----
-- name: Ensuring config directories exist
- file:
- path: "{{ node_config_directory }}/{{ item }}"
- state: "directory"
- recurse: yes
- with_items:
- - "senlin-api"
- - "senlin-engine"
-
-- name: Copying over config.json files for services
- template:
- src: "{{ item }}.json.j2"
- dest: "{{ node_config_directory }}/{{ item }}/config.json"
- with_items:
- - "senlin-api"
- - "senlin-engine"
-
-- name: Copying over senlin.conf
- merge_configs:
- vars:
- service_name: "{{ item }}"
- sources:
- - "{{ role_path }}/templates/senlin.conf.j2"
- - "{{ node_custom_config }}/global.conf"
- - "{{ node_custom_config }}/database.conf"
- - "{{ node_custom_config }}/messaging.conf"
- - "{{ node_custom_config }}/senlin.conf"
- - "{{ node_custom_config }}/senlin/{{ item }}.conf"
- - "{{ node_custom_config }}/senlin/{{ inventory_hostname }}/senlin.conf"
- dest: "{{ node_config_directory }}/{{ item }}/senlin.conf"
- with_items:
- - "senlin-api"
- - "senlin-engine"
-
-- name: Check if policies shall be overwritten
- local_action: stat path="{{ node_custom_config }}/senlin/policy.json"
- register: senlin_policy
-
-- name: Copying over existing policy.json
- template:
- src: "{{ node_custom_config }}/senlin/policy.json"
- dest: "{{ node_config_directory }}/{{ item }}/policy.json"
- with_items:
- - "senlin-api"
- - "senlin-engine"
- when:
- senlin_policy.stat.exists
diff --git a/ansible/roles/senlin/tasks/deploy.yml b/ansible/roles/senlin/tasks/deploy.yml
deleted file mode 100644
index 46aedd6e1f..0000000000
--- a/ansible/roles/senlin/tasks/deploy.yml
+++ /dev/null
@@ -1,14 +0,0 @@
----
-- include: register.yml
- when: inventory_hostname in groups['senlin-api']
-
-- include: config.yml
- when: inventory_hostname in groups['senlin-api'] or
- inventory_hostname in groups['senlin-engine']
-
-- include: bootstrap.yml
- when: inventory_hostname in groups['senlin-api']
-
-- include: start.yml
- when: inventory_hostname in groups['senlin-api'] or
- inventory_hostname in groups['senlin-engine']
diff --git a/ansible/roles/senlin/tasks/main.yml b/ansible/roles/senlin/tasks/main.yml
deleted file mode 100644
index b017e8b4ad..0000000000
--- a/ansible/roles/senlin/tasks/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-- include: "{{ action }}.yml"
diff --git a/ansible/roles/senlin/tasks/precheck.yml b/ansible/roles/senlin/tasks/precheck.yml
deleted file mode 100644
index ed97d539c0..0000000000
--- a/ansible/roles/senlin/tasks/precheck.yml
+++ /dev/null
@@ -1 +0,0 @@
----
diff --git a/ansible/roles/senlin/tasks/pull.yml b/ansible/roles/senlin/tasks/pull.yml
deleted file mode 100644
index 3a21aee052..0000000000
--- a/ansible/roles/senlin/tasks/pull.yml
+++ /dev/null
@@ -1,14 +0,0 @@
----
-- name: Pulling senlin-api image
- kolla_docker:
- action: "pull_image"
- common_options: "{{ docker_common_options }}"
- image: "{{ senlin_api_image_full }}"
- when: inventory_hostname in groups['senlin-api']
-
-- name: Pulling senlin-engine image
- kolla_docker:
- action: "pull_image"
- common_options: "{{ docker_common_options }}"
- image: "{{ senlin_engine_image_full }}"
- when: inventory_hostname in groups['senlin-engine']
diff --git a/ansible/roles/senlin/tasks/reconfigure.yml b/ansible/roles/senlin/tasks/reconfigure.yml
deleted file mode 100644
index ffd25dc392..0000000000
--- a/ansible/roles/senlin/tasks/reconfigure.yml
+++ /dev/null
@@ -1,66 +0,0 @@
----
-- name: Ensuring the containers up
- kolla_docker:
- name: "{{ item.name }}"
- action: "get_container_state"
- register: container_state
- failed_when: container_state.Running == false
- when: inventory_hostname in groups[item.group]
- with_items:
- - { name: senlin_api, group: senlin-api }
- - { name: senlin_engine, group: senlin-engine }
-
-- include: config.yml
-
-- name: Check the configs
- command: docker exec {{ item.name }} /usr/local/bin/kolla_set_configs --check
- changed_when: false
- failed_when: false
- register: check_results
- when: inventory_hostname in groups[item.group]
- with_items:
- - { name: senlin_api, group: senlin-api }
- - { name: senlin_engine, group: senlin-engine }
-
-- name: Containers config strategy
- kolla_docker:
- name: "{{ item.name }}"
- action: "get_container_env"
- register: container_envs
- when: inventory_hostname in groups[item.group]
- with_items:
- - { name: senlin_api, group: senlin-api }
- - { name: senlin_engine, group: senlin-engine }
-
-- name: Remove the containers
- kolla_docker:
- name: "{{ item[0]['name'] }}"
- action: "remove_container"
- register: remove_containers
- when:
- - config_strategy == "COPY_ONCE" or item[1]['KOLLA_CONFIG_STRATEGY'] == 'COPY_ONCE'
- - item[2]['rc'] == 1
- - inventory_hostname in groups[item[0]['group']]
- with_together:
- - [{ name: senlin_api, group: senlin-api },
- { name: senlin_engine, group: senlin-engine }]
- - "{{ container_envs.results }}"
- - "{{ check_results.results }}"
-
-- include: start.yml
- when: remove_containers.changed
-
-- name: Restart containers
- kolla_docker:
- name: "{{ item[0]['name'] }}"
- action: "restart_container"
- when:
- - config_strategy == 'COPY_ALWAYS'
- - item[1]['KOLLA_CONFIG_STRATEGY'] != 'COPY_ONCE'
- - item[2]['rc'] == 1
- - inventory_hostname in groups[item[0]['group']]
- with_together:
- - [{ name: senlin_api, group: senlin-api },
- { name: senlin_engine, group: senlin-engine }]
- - "{{ container_envs.results }}"
- - "{{ check_results.results }}"
diff --git a/ansible/roles/senlin/tasks/register.yml b/ansible/roles/senlin/tasks/register.yml
deleted file mode 100644
index c9d0185f4f..0000000000
--- a/ansible/roles/senlin/tasks/register.yml
+++ /dev/null
@@ -1,40 +0,0 @@
----
-- name: Creating the Senlin service and endpoint
- command: docker exec -t kolla_toolbox /usr/bin/ansible localhost
- -m kolla_keystone_service
- -a "service_name=senlin
- service_type=clustering
- description='Senlin Clustering Service'
- endpoint_region={{ openstack_region_name }}
- url='{{ item.url }}'
- interface='{{ item.interface }}'
- region_name={{ openstack_region_name }}
- auth={{ '{{ openstack_senlin_auth }}' }}"
- -e "{'openstack_senlin_auth':{{ openstack_senlin_auth }}}"
- register: senlin_endpoint
- changed_when: "{{ senlin_endpoint.stdout.find('localhost | SUCCESS => ') != -1 and (senlin_endpoint.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
- until: senlin_endpoint.stdout.split()[2] == 'SUCCESS'
- retries: 10
- delay: 5
- run_once: True
- with_items:
- - {'interface': 'admin', 'url': '{{ senlin_admin_endpoint }}'}
- - {'interface': 'internal', 'url': '{{ senlin_internal_endpoint }}'}
- - {'interface': 'public', 'url': '{{ senlin_public_endpoint }}'}
-
-- name: Creating the Senlin project, user, and role
- command: docker exec -t kolla_toolbox /usr/bin/ansible localhost
- -m kolla_keystone_user
- -a "project=service
- user=senlin
- password={{ senlin_keystone_password }}
- role=admin
- region_name={{ openstack_region_name }}
- auth={{ '{{ openstack_senlin_auth }}' }}"
- -e "{'openstack_senlin_auth':{{ openstack_senlin_auth }}}"
- register: senlin_user
- changed_when: "{{ senlin_user.stdout.find('localhost | SUCCESS => ') != -1 and (senlin_user.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
- until: senlin_user.stdout.split()[2] == 'SUCCESS'
- retries: 10
- delay: 5
- run_once: True
diff --git a/ansible/roles/senlin/tasks/start.yml b/ansible/roles/senlin/tasks/start.yml
deleted file mode 100644
index f539bb3cd5..0000000000
--- a/ansible/roles/senlin/tasks/start.yml
+++ /dev/null
@@ -1,24 +0,0 @@
----
-- name: Starting senlin-engine container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- image: "{{ senlin_engine_image_full }}"
- name: "senlin_engine"
- volumes:
- - "{{ node_config_directory }}/senlin-engine/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "kolla_logs:/var/log/kolla/"
- when: inventory_hostname in groups['senlin-engine']
-
-- name: Starting senlin-api container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- image: "{{ senlin_api_image_full }}"
- name: "senlin_api"
- volumes:
- - "{{ node_config_directory }}/senlin-api/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "kolla_logs:/var/log/kolla/"
- when: inventory_hostname in groups['senlin-api']
diff --git a/ansible/roles/senlin/tasks/upgrade.yml b/ansible/roles/senlin/tasks/upgrade.yml
deleted file mode 100644
index 308053080c..0000000000
--- a/ansible/roles/senlin/tasks/upgrade.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-- include: config.yml
-
-- include: bootstrap_service.yml
-
-- include: start.yml
diff --git a/ansible/roles/senlin/templates/senlin-api.json.j2 b/ansible/roles/senlin/templates/senlin-api.json.j2
deleted file mode 100644
index 0e287719b8..0000000000
--- a/ansible/roles/senlin/templates/senlin-api.json.j2
+++ /dev/null
@@ -1,18 +0,0 @@
-{
- "command": "senlin-api --config-file /etc/senlin/senlin.conf",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/senlin.conf",
- "dest": "/etc/senlin/senlin.conf",
- "owner": "senlin",
- "perm": "0600"
- }
- ],
- "permissions": [
- {
- "path": "/var/log/kolla/senlin",
- "owner": "senlin:senlin",
- "recurse": true
- }
- ]
-}
diff --git a/ansible/roles/senlin/templates/senlin-engine.json.j2 b/ansible/roles/senlin/templates/senlin-engine.json.j2
deleted file mode 100644
index bc643475ed..0000000000
--- a/ansible/roles/senlin/templates/senlin-engine.json.j2
+++ /dev/null
@@ -1,18 +0,0 @@
-{
- "command": "senlin-engine --config-file /etc/senlin/senlin.conf",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/senlin.conf",
- "dest": "/etc/senlin/senlin.conf",
- "owner": "senlin",
- "perm": "0600"
- }
- ],
- "permissions": [
- {
- "path": "/var/log/kolla/senlin",
- "owner": "senlin:senlin",
- "recurse": true
- }
- ]
-}
diff --git a/ansible/roles/senlin/templates/senlin.conf.j2 b/ansible/roles/senlin/templates/senlin.conf.j2
deleted file mode 100644
index 6018444d26..0000000000
--- a/ansible/roles/senlin/templates/senlin.conf.j2
+++ /dev/null
@@ -1,41 +0,0 @@
-[DEFAULT]
-debug = {{ senlin_logging_debug }}
-
-log_dir = /var/log/kolla/senlin
-
-transport_url = rabbit://{% for host in groups['rabbitmq'] %}{{ rabbitmq_user }}:{{ rabbitmq_password }}@{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ rabbitmq_port }}{% if not loop.last %},{% endif %}{% endfor %}
-
-{% if service_name == 'senlin-api' %}
-[senlin_api]
-bind_host = {{ api_interface_address }}
-bind_port = {{ senlin_api_port }}
-{% endif %}
-
-[authentication]
-auth_url = {{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_admin_port }}
-service_username = {{ senlin_keystone_user }}
-service_password = {{ senlin_keystone_password }}
-service_project_name = service
-service_user_domain = default
-service_project_domain = default
-
-[database]
-connection = mysql+pymysql://{{ senlin_database_user }}:{{ senlin_database_password }}@{{ senlin_database_address }}/{{ senlin_database_name }}
-max_retries = -1
-
-[keystone_authtoken]
-auth_uri = {{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_public_port }}
-auth_url = {{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_admin_port }}
-auth_type = password
-project_domain_id = default
-user_domain_id = default
-project_name = service
-username = {{ senlin_keystone_user }}
-password = {{ senlin_keystone_password }}
-
-memcache_security_strategy = ENCRYPT
-memcache_secret_key = {{ memcache_secret_key }}
-memcached_servers = {% for host in groups['memcached'] %}{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}
-
-[oslo_messaging_notifications]
-driver = noop
diff --git a/ansible/roles/swift/defaults/main.yml b/ansible/roles/swift/defaults/main.yml
deleted file mode 100644
index e10f8cda99..0000000000
--- a/ansible/roles/swift/defaults/main.yml
+++ /dev/null
@@ -1,47 +0,0 @@
----
-project_name: "swift"
-
-####################
-# Docker
-####################
-swift_proxy_server_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-swift-proxy-server"
-swift_proxy_server_tag: "{{ openstack_release }}"
-swift_proxy_server_image_full: "{{ swift_proxy_server_image }}:{{ swift_proxy_server_tag }}"
-
-swift_account_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-swift-account"
-swift_account_tag: "{{ openstack_release }}"
-swift_account_image_full: "{{ swift_account_image }}:{{ swift_account_tag }}"
-
-swift_container_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-swift-container"
-swift_container_tag: "{{ openstack_release }}"
-swift_container_image_full: "{{ swift_container_image }}:{{ swift_container_tag }}"
-
-swift_object_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-swift-object"
-swift_object_tag: "{{ openstack_release }}"
-swift_object_image_full: "{{ swift_object_image }}:{{ swift_object_tag }}"
-
-swift_object_expirer_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-swift-object-expirer"
-swift_object_expirer_tag: "{{ openstack_release }}"
-swift_object_expirer_image_full: "{{ swift_object_expirer_image }}:{{ swift_object_expirer_tag }}"
-
-swift_rsyncd_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-swift-rsyncd"
-swift_rsyncd_tag: "{{ openstack_release }}"
-swift_rsyncd_image_full: "{{ swift_rsyncd_image }}:{{ swift_rsyncd_tag }}"
-
-####################
-# OpenStack
-####################
-swift_admin_endpoint: "{{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ swift_proxy_server_port }}/v1"
-swift_internal_endpoint: "{{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ swift_proxy_server_port }}/v1/AUTH_%(tenant_id)s"
-swift_public_endpoint: "{{ public_protocol }}://{{ kolla_external_fqdn }}:{{ swift_proxy_server_port }}/v1/AUTH_%(tenant_id)s"
-
-swift_logging_debug: "{{ openstack_logging_debug }}"
-
-swift_keystone_user: "swift"
-swift_admin_tenant_name: "admin"
-
-swift_devices_mount_point: "/srv/node"
-swift_devices_match_mode: "strict"
-swift_devices_name: "KOLLA_SWIFT_DATA"
-
-openstack_swift_auth: "{'auth_url':'{{ openstack_auth.auth_url }}','username':'{{ openstack_auth.username }}','password':'{{ openstack_auth.password }}','project_name':'{{ openstack_auth.project_name }}'}"
diff --git a/ansible/roles/swift/meta/main.yml b/ansible/roles/swift/meta/main.yml
deleted file mode 100644
index 6b4fff8fef..0000000000
--- a/ansible/roles/swift/meta/main.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-dependencies:
- - { role: common }
diff --git a/ansible/roles/swift/tasks/check.yml b/ansible/roles/swift/tasks/check.yml
deleted file mode 100644
index e655ac0e7d..0000000000
--- a/ansible/roles/swift/tasks/check.yml
+++ /dev/null
@@ -1,19 +0,0 @@
----
-- name: Swift sanity checks
- command: docker exec -t kolla_toolbox /usr/bin/ansible localhost
- -m kolla_sanity
- -a "service=swift
- project=service
- user=admin
- password={{ swift_keystone_password }}
- role=admin
- region_name={{ openstack_region_name }}
- auth={{ '{{ openstack_swift_auth }}' }}"
- -e "{'openstack_swift_auth':{{ openstack_swift_auth }}}"
- register: swift_sanity
- changed_when: "{{ swift_sanity.stdout.find('localhost | SUCCESS => ') != -1 and (swift_sanity.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
- until: swift_sanity.stdout.split()[2] == 'SUCCESS'
- retries: 10
- delay: 5
- run_once: True
- when: kolla_enable_sanity_swift | bool
diff --git a/ansible/roles/swift/tasks/config.yml b/ansible/roles/swift/tasks/config.yml
deleted file mode 100644
index 3b81038ee9..0000000000
--- a/ansible/roles/swift/tasks/config.yml
+++ /dev/null
@@ -1,180 +0,0 @@
----
-- name: Ensuring config directories exist
- file:
- path: "{{ node_config_directory }}/{{ item }}"
- state: "directory"
- recurse: yes
- with_items:
- - "swift"
- - "swift-account-auditor"
- - "swift-account-reaper"
- - "swift-account-replicator"
- - "swift-account-server"
- - "swift-container-auditor"
- - "swift-container-replicator"
- - "swift-container-server"
- - "swift-container-updater"
- - "swift-object-auditor"
- - "swift-object-expirer"
- - "swift-object-replicator"
- - "swift-object-server"
- - "swift-object-updater"
- - "swift-proxy-server"
- - "swift-rsyncd"
-
-- name: Copying over config.json files for services
- template:
- src: "{{ item }}.json.j2"
- dest: "{{ node_config_directory }}/{{ item }}/config.json"
- with_items:
- - "swift-account-auditor"
- - "swift-account-reaper"
- - "swift-account-replicator"
- - "swift-account-server"
- - "swift-container-auditor"
- - "swift-container-replicator"
- - "swift-container-server"
- - "swift-container-updater"
- - "swift-object-auditor"
- - "swift-object-expirer"
- - "swift-object-replicator"
- - "swift-object-server"
- - "swift-object-updater"
- - "swift-proxy-server"
- - "swift-rsyncd"
-
-- name: Copying over swift.conf
- merge_configs:
- vars:
- service_name: "swift-{{ item }}"
- sources:
- - "{{ role_path }}/templates/swift.conf.j2"
- - "{{ node_custom_config }}/global.conf"
- - "{{ node_custom_config }}/swift.conf"
- - "{{ node_custom_config }}/swift/{{ item }}.conf"
- - "{{ node_custom_config }}/swift/{{ inventory_hostname }}/{{ item }}.conf"
- dest: "{{ node_config_directory }}/swift-{{ item }}/swift.conf"
- with_items:
- - "account-auditor"
- - "account-reaper"
- - "account-replicator"
- - "account-server"
- - "container-auditor"
- - "container-replicator"
- - "container-server"
- - "container-updater"
- - "object-auditor"
- - "object-expirer"
- - "object-replicator"
- - "object-server"
- - "object-updater"
- - "proxy-server"
-
-- name: Copying over account-*.conf
- merge_configs:
- vars:
- service_name: "swift-{{ item }}"
- sources:
- - "{{ role_path }}/templates/account.conf.j2"
- - "{{ node_custom_config }}/global.conf"
- - "{{ node_custom_config }}/swift/account.conf"
- - "{{ node_custom_config }}/swift/{{ item }}.conf"
- - "{{ node_custom_config }}/swift/{{ inventory_hostname }}/{{ item }}.conf"
- dest: "{{ node_config_directory }}/swift-{{ item }}/{{ item }}.conf"
- with_items:
- - "account-auditor"
- - "account-reaper"
- - "account-replicator"
- - "account-server"
-
-- name: Copying over container-*.conf
- merge_configs:
- vars:
- service_name: "swift-{{ item }}"
- sources:
- - "{{ role_path }}/templates/container.conf.j2"
- - "{{ node_custom_config }}/global.conf"
- - "{{ node_custom_config }}/swift/container.conf"
- - "{{ node_custom_config }}/swift/{{ item }}.conf"
- - "{{ node_custom_config }}/swift/{{ inventory_hostname }}/{{ item }}.conf"
- dest: "{{ node_config_directory }}/swift-{{ item }}/{{ item }}.conf"
- with_items:
- - "container-auditor"
- - "container-replicator"
- - "container-server"
- - "container-updater"
-
-- name: Copying over object-*.conf
- merge_configs:
- vars:
- service_name: "swift-{{ item }}"
- sources:
- - "{{ role_path }}/templates/object.conf.j2"
- - "{{ node_custom_config }}/global.conf"
- - "{{ node_custom_config }}/swift/object.conf"
- - "{{ node_custom_config }}/swift/{{ item }}.conf"
- - "{{ node_custom_config }}/swift/{{ inventory_hostname }}/{{ item }}.conf"
- dest: "{{ node_config_directory }}/swift-{{ item }}/{{ item }}.conf"
- with_items:
- - "object-auditor"
- - "object-expirer"
- - "object-replicator"
- - "object-server"
- - "object-updater"
-
-- name: Copying over proxy-server.conf
- merge_configs:
- vars:
- service_name: "swift-{{ item }}"
- sources:
- - "{{ role_path }}/templates/proxy-server.conf.j2"
- - "{{ node_custom_config }}/global.conf"
- - "{{ node_custom_config }}/swift/{{ item }}.conf"
- - "{{ node_custom_config }}/swift/{{ inventory_hostname }}/{{ item }}.conf"
- dest: "{{ node_config_directory }}/swift-{{ item }}/{{ item }}.conf"
- with_items:
- - "proxy-server"
-
-- name: Copying over rsyncd.conf
- template:
- src: "rsyncd.conf.j2"
- dest: "{{ node_config_directory }}/swift-rsyncd/rsyncd.conf"
-
-- name: Copying over Swift ring files
- copy:
- src: "{{ node_custom_config }}/swift/{{ item }}"
- dest: "{{ node_config_directory }}/swift/{{ item }}"
- backup: yes
- with_items:
- - "account.builder"
- - "account.ring.gz"
- - "container.builder"
- - "container.ring.gz"
- - "object.builder"
- - "object.ring.gz"
-
-- name: Check if policies shall be overwritten
- local_action: stat path="{{ node_custom_config }}/swift/policy.json"
- register: swift_policy
-
-- name: Copying over existing policy.json
- template:
- src: "{{ node_custom_config }}/swift/policy.json"
- dest: "{{ node_config_directory }}/{{ item }}/policy.json"
- with_items:
- - "swift-account-auditor"
- - "swift-account-reaper"
- - "swift-account-replicator"
- - "swift-account-server"
- - "swift-container-auditor"
- - "swift-container-replicator"
- - "swift-container-server"
- - "swift-container-updater"
- - "swift-object-auditor"
- - "swift-object-expirer"
- - "swift-object-replicator"
- - "swift-object-server"
- - "swift-object-updater"
- - "swift-proxy-server"
- when:
- swift_policy.stat.exists
diff --git a/ansible/roles/swift/tasks/deploy.yml b/ansible/roles/swift/tasks/deploy.yml
deleted file mode 100644
index 0a9ab4fed3..0000000000
--- a/ansible/roles/swift/tasks/deploy.yml
+++ /dev/null
@@ -1,24 +0,0 @@
----
-- include: register.yml
- when: inventory_hostname in groups['swift-account-server'] or
- inventory_hostname in groups['swift-container-server'] or
- inventory_hostname in groups['swift-object-server'] or
- inventory_hostname in groups['swift-proxy-server']
-
-- include: config.yml
- when: inventory_hostname in groups['swift-account-server'] or
- inventory_hostname in groups['swift-container-server'] or
- inventory_hostname in groups['swift-object-server'] or
- inventory_hostname in groups['swift-proxy-server']
-
-- include: start.yml
- when: inventory_hostname in groups['swift-account-server'] or
- inventory_hostname in groups['swift-container-server'] or
- inventory_hostname in groups['swift-object-server'] or
- inventory_hostname in groups['swift-proxy-server']
-
-- include: check.yml
- when: inventory_hostname in groups['swift-account-server'] or
- inventory_hostname in groups['swift-container-server'] or
- inventory_hostname in groups['swift-object-server'] or
- inventory_hostname in groups['swift-proxy-server']
diff --git a/ansible/roles/swift/tasks/main.yml b/ansible/roles/swift/tasks/main.yml
deleted file mode 100644
index b017e8b4ad..0000000000
--- a/ansible/roles/swift/tasks/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-- include: "{{ action }}.yml"
diff --git a/ansible/roles/swift/tasks/precheck.yml b/ansible/roles/swift/tasks/precheck.yml
deleted file mode 100644
index ed97d539c0..0000000000
--- a/ansible/roles/swift/tasks/precheck.yml
+++ /dev/null
@@ -1 +0,0 @@
----
diff --git a/ansible/roles/swift/tasks/pull.yml b/ansible/roles/swift/tasks/pull.yml
deleted file mode 100644
index 65cd6bb343..0000000000
--- a/ansible/roles/swift/tasks/pull.yml
+++ /dev/null
@@ -1,37 +0,0 @@
----
-- name: Pulling rsyncd image
- kolla_docker:
- action: "pull_image"
- common_options: "{{ docker_common_options }}"
- image: "{{ swift_rsyncd_image_full }}"
- when: inventory_hostname in groups['swift-account-server'] or
- inventory_hostname in groups['swift-container-server'] or
- inventory_hostname in groups['swift-object-server']
-
-- name: Pulling swift-proxy-server image
- kolla_docker:
- action: "pull_image"
- common_options: "{{ docker_common_options }}"
- image: "{{ swift_proxy_server_image_full }}"
- when: inventory_hostname in groups['swift-proxy-server']
-
-- name: Pulling swift-account image
- kolla_docker:
- action: "pull_image"
- common_options: "{{ docker_common_options }}"
- image: "{{ swift_account_image_full }}"
- when: inventory_hostname in groups['swift-account-server']
-
-- name: Pulling swift-container image
- kolla_docker:
- action: "pull_image"
- common_options: "{{ docker_common_options }}"
- image: "{{ swift_container_image_full }}"
- when: inventory_hostname in groups['swift-container-server']
-
-- name: Pulling swift-object image
- kolla_docker:
- action: "pull_image"
- common_options: "{{ docker_common_options }}"
- image: "{{ swift_object_image_full }}"
- when: inventory_hostname in groups['swift-object-server']
diff --git a/ansible/roles/swift/tasks/reconfigure.yml b/ansible/roles/swift/tasks/reconfigure.yml
deleted file mode 100644
index 3cb6ebe548..0000000000
--- a/ansible/roles/swift/tasks/reconfigure.yml
+++ /dev/null
@@ -1,144 +0,0 @@
----
-- name: Ensuring the containers up
- kolla_docker:
- name: "{{ item.name }}"
- action: "get_container_state"
- register: container_state
- failed_when: container_state.Running == false
- when: inventory_hostname in groups[item.group]
- with_items:
- - { name: swift_account_server, group: swift-account-server }
- - { name: swift_account_auditor, group: swift-account-server }
- - { name: swift_account_replicator, group: swift-account-server }
- - { name: swift_account_reaper, group: swift-account-server }
- - { name: swift_rsyncd, group: swift-account-server }
- - { name: swift_container_server, group: swift-container-server }
- - { name: swift_container_auditor, group: swift-container-server }
- - { name: swift_container_replicator, group: swift-container-server }
- - { name: swift_container_updater, group: swift-container-server }
- - { name: swift_rsyncd, group: swift-container-server }
- - { name: swift_object_server, group: swift-object-server }
- - { name: swift_object_auditor, group: swift-object-server }
- - { name: swift_object_replicator, group: swift-object-server }
- - { name: swift_object_updater, group: swift-object-server }
- - { name: swift_object_expirer, group: swift-object-server }
- - { name: swift_rsyncd, group: swift-object-server }
- - { name: swift_proxy_server, group: swift-proxy-server }
-
-- include: config.yml
-
-- name: Check the configs
- command: docker exec {{ item.name }} /usr/local/bin/kolla_set_configs --check
- changed_when: false
- failed_when: false
- register: check_results
- when: inventory_hostname in groups[item.group]
- with_items:
- - { name: swift_account_server, group: swift-account-server }
- - { name: swift_account_auditor, group: swift-account-server }
- - { name: swift_account_replicator, group: swift-account-server }
- - { name: swift_account_reaper, group: swift-account-server }
- - { name: swift_rsyncd, group: swift-account-server }
- - { name: swift_container_server, group: swift-container-server }
- - { name: swift_container_auditor, group: swift-container-server }
- - { name: swift_container_replicator, group: swift-container-server }
- - { name: swift_container_updater, group: swift-container-server }
- - { name: swift_rsyncd, group: swift-container-server }
- - { name: swift_object_server, group: swift-object-server }
- - { name: swift_object_auditor, group: swift-object-server }
- - { name: swift_object_replicator, group: swift-object-server }
- - { name: swift_object_updater, group: swift-object-server }
- - { name: swift_object_expirer, group: swift-object-server }
- - { name: swift_rsyncd, group: swift-object-server }
- - { name: swift_proxy_server, group: swift-proxy-server }
-
-# NOTE(jeffrey4l): when config_strategy == 'COPY_ALWAYS'
-# and container env['KOLLA_CONFIG_STRATEGY'] == 'COPY_ONCE',
-# just remove the container and start again
-- name: Containers config strategy
- kolla_docker:
- name: "{{ item.name }}"
- action: "get_container_env"
- register: container_envs
- when: inventory_hostname in groups[item.group]
- with_items:
- - { name: swift_account_server, group: swift-account-server }
- - { name: swift_account_auditor, group: swift-account-server }
- - { name: swift_account_replicator, group: swift-account-server }
- - { name: swift_account_reaper, group: swift-account-server }
- - { name: swift_rsyncd, group: swift-account-server }
- - { name: swift_container_server, group: swift-container-server }
- - { name: swift_container_auditor, group: swift-container-server }
- - { name: swift_container_replicator, group: swift-container-server }
- - { name: swift_container_updater, group: swift-container-server }
- - { name: swift_rsyncd, group: swift-container-server }
- - { name: swift_object_server, group: swift-object-server }
- - { name: swift_object_auditor, group: swift-object-server }
- - { name: swift_object_replicator, group: swift-object-server }
- - { name: swift_object_updater, group: swift-object-server }
- - { name: swift_object_expirer, group: swift-object-server }
- - { name: swift_rsyncd, group: swift-object-server }
- - { name: swift_proxy_server, group: swift-proxy-server }
-
-- name: Remove the containers
- kolla_docker:
- name: "{{ item[0]['name'] }}"
- action: "remove_container"
- register: remove_containers
- when:
- - inventory_hostname in groups[item[0]['group']]
- - config_strategy == "COPY_ONCE" or item[1]['KOLLA_CONFIG_STRATEGY'] == 'COPY_ONCE'
- - item[2]['rc'] == 1
- with_together:
- - [{ name: swift_account_server, group: swift-account-server },
- { name: swift_account_auditor, group: swift-account-server },
- { name: swift_account_replicator, group: swift-account-server },
- { name: swift_account_reaper, group: swift-account-server },
- { name: swift_rsyncd, group: swift-account-server },
- { name: swift_container_server, group: swift-container-server },
- { name: swift_container_auditor, group: swift-container-server },
- { name: swift_container_replicator, group: swift-container-server },
- { name: swift_container_updater, group: swift-container-server },
- { name: swift_rsyncd, group: swift-container-server },
- { name: swift_object_server, group: swift-object-server },
- { name: swift_object_auditor, group: swift-object-server },
- { name: swift_object_replicator, group: swift-object-server },
- { name: swift_object_updater, group: swift-object-server },
- { name: swift_object_expirer, group: swift-object-server },
- { name: swift_rsyncd, group: swift-object-server },
- { name: swift_proxy_server, group: swift-proxy-server }]
- - "{{ container_envs.results }}"
- - "{{ check_results.results }}"
-
-- include: start.yml
- when: remove_containers.changed
-
-- name: Restart containers
- kolla_docker:
- name: "{{ item[0]['name'] }}"
- action: "restart_container"
- when:
- - inventory_hostname in groups[item[0]['group']]
- - config_strategy == 'COPY_ALWAYS'
- - item[1]['KOLLA_CONFIG_STRATEGY'] != 'COPY_ONCE'
- - item[2]['rc'] == 1
- with_together:
- - [{ name: swift_account_server, group: swift-account-server },
- { name: swift_account_auditor, group: swift-account-server },
- { name: swift_account_replicator, group: swift-account-server },
- { name: swift_account_reaper, group: swift-account-server },
- { name: swift_rsyncd, group: swift-account-server },
- { name: swift_container_server, group: swift-container-server },
- { name: swift_container_auditor, group: swift-container-server },
- { name: swift_container_replicator, group: swift-container-server },
- { name: swift_container_updater, group: swift-container-server },
- { name: swift_rsyncd, group: swift-container-server },
- { name: swift_object_server, group: swift-object-server },
- { name: swift_object_auditor, group: swift-object-server },
- { name: swift_object_replicator, group: swift-object-server },
- { name: swift_object_updater, group: swift-object-server },
- { name: swift_object_expirer, group: swift-object-server },
- { name: swift_rsyncd, group: swift-object-server },
- { name: swift_proxy_server, group: swift-proxy-server }]
- - "{{ container_envs.results }}"
- - "{{ check_results.results }}"
diff --git a/ansible/roles/swift/tasks/register.yml b/ansible/roles/swift/tasks/register.yml
deleted file mode 100644
index 628e8aea6f..0000000000
--- a/ansible/roles/swift/tasks/register.yml
+++ /dev/null
@@ -1,40 +0,0 @@
----
-- name: Creating the Swift service and endpoint
- command: docker exec -t kolla_toolbox /usr/bin/ansible localhost
- -m kolla_keystone_service
- -a "service_name=swift
- service_type=object-store
- description='Openstack Object Storage'
- endpoint_region={{ openstack_region_name }}
- url='{{ item.url }}'
- interface='{{ item.interface }}'
- region_name={{ openstack_region_name }}
- auth={{ '{{ openstack_swift_auth }}' }}"
- -e "{'openstack_swift_auth':{{ openstack_swift_auth }}}"
- register: swift_endpoint
- changed_when: "{{ swift_endpoint.stdout.find('localhost | SUCCESS => ') != -1 and (swift_endpoint.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
- until: swift_endpoint.stdout.split()[2] == 'SUCCESS'
- retries: 10
- delay: 5
- run_once: True
- with_items:
- - {'interface': 'admin', 'url': '{{ swift_admin_endpoint }}'}
- - {'interface': 'internal', 'url': '{{ swift_internal_endpoint }}'}
- - {'interface': 'public', 'url': '{{ swift_public_endpoint }}'}
-
-- name: Creating the Swift project, user, and role
- command: docker exec -t kolla_toolbox /usr/bin/ansible localhost
- -m kolla_keystone_user
- -a "project=service
- user={{ swift_keystone_user }}
- password={{ swift_keystone_password }}
- role={{ swift_admin_tenant_name }}
- region_name={{ openstack_region_name }}
- auth={{ '{{ openstack_swift_auth }}' }}"
- -e "{'openstack_swift_auth':{{ openstack_swift_auth }}}"
- register: swift_user
- changed_when: "{{ swift_user.stdout.find('localhost | SUCCESS => ') != -1 and (swift_user.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
- until: swift_user.stdout.split()[2] == 'SUCCESS'
- retries: 10
- delay: 5
- run_once: True
diff --git a/ansible/roles/swift/tasks/start.yml b/ansible/roles/swift/tasks/start.yml
deleted file mode 100644
index 39b08f174b..0000000000
--- a/ansible/roles/swift/tasks/start.yml
+++ /dev/null
@@ -1,242 +0,0 @@
----
-- name: Looking up disks for Swift
- command: docker exec -t kolla_toolbox sudo -E /usr/bin/ansible localhost
- -m find_disks
- -a "name={{ swift_devices_name }}
- match_mode={{ swift_devices_match_mode }}"
- register: swift_disk_lookup
- changed_when: "{{ swift_disk_lookup.stdout.find('localhost | SUCCESS => ') != -1 and
- (swift_disk_lookup.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
- failed_when: swift_disk_lookup.stdout.split()[2] != 'SUCCESS'
- when: inventory_hostname in groups['swift-account-server'] or
- inventory_hostname in groups['swift-container-server'] or
- inventory_hostname in groups['swift-object-server']
-
-- name: Reading data from variable
- set_fact:
- swift_disks: "{{ (swift_disk_lookup.stdout.split('localhost | SUCCESS => ')[1]|from_json).disks|from_json }}"
- when: inventory_hostname in groups['swift-account-server'] or
- inventory_hostname in groups['swift-container-server'] or
- inventory_hostname in groups['swift-object-server']
-
-- name: Mounting Swift disks
- mount:
- src: "UUID={{ item.fs_uuid }}"
- fstype: xfs
- opts: "noatime,nodiratime,nobarrier,logbufs=8"
- state: mounted
- name: "{{ swift_devices_mount_point }}/{{ item['fs_label'] }}"
- with_items: "{{ swift_disks }}"
- when: inventory_hostname in groups['swift-account-server'] or
- inventory_hostname in groups['swift-container-server'] or
- inventory_hostname in groups['swift-object-server']
-
-- name: Starting swift-rsyncd container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- image: "{{ swift_rsyncd_image_full }}"
- name: "swift_rsyncd"
- volumes:
- - "{{ node_config_directory }}/swift-rsyncd/:{{ container_config_directory }}/:ro"
- - "{{ swift_devices_mount_point }}:{{ swift_devices_mount_point }}"
- - "/etc/localtime:/etc/localtime:ro"
- - "heka_socket:/var/lib/kolla/heka/"
- when: inventory_hostname in groups['swift-account-server'] or
- inventory_hostname in groups['swift-container-server'] or
- inventory_hostname in groups['swift-object-server']
-
-- name: Starting swift-account-server container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- name: "swift_account_server"
- image: "{{ swift_account_image_full }}"
- volumes:
- - "{{ node_config_directory }}/swift/:/var/lib/kolla/swift/:ro"
- - "{{ node_config_directory }}/swift-account-server/:{{ container_config_directory }}/:ro"
- - "{{ swift_devices_mount_point }}:{{ swift_devices_mount_point }}"
- - "/etc/localtime:/etc/localtime:ro"
- - "heka_socket:/var/lib/kolla/heka/"
- when: inventory_hostname in groups['swift-account-server']
-
-- name: Starting swift-account-auditor container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- image: "{{ swift_account_image_full }}"
- name: "swift_account_auditor"
- volumes:
- - "{{ node_config_directory }}/swift/:/var/lib/kolla/swift/:ro"
- - "{{ node_config_directory }}/swift-account-auditor/:{{ container_config_directory }}/:ro"
- - "{{ swift_devices_mount_point }}:{{ swift_devices_mount_point }}"
- - "/etc/localtime:/etc/localtime:ro"
- - "heka_socket:/var/lib/kolla/heka/"
- when: inventory_hostname in groups['swift-account-server']
-
-- name: Starting swift-account-replicator container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- image: "{{ swift_account_image_full }}"
- name: "swift_account_replicator"
- volumes:
- - "{{ node_config_directory }}/swift/:/var/lib/kolla/swift/:ro"
- - "{{ node_config_directory }}/swift-account-replicator/:{{ container_config_directory }}/:ro"
- - "{{ swift_devices_mount_point }}:{{ swift_devices_mount_point }}"
- - "/etc/localtime:/etc/localtime:ro"
- - "heka_socket:/var/lib/kolla/heka/"
- when: inventory_hostname in groups['swift-account-server']
-
-- name: Starting swift-account-reaper container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- image: "{{ swift_account_image_full }}"
- name: "swift_account_reaper"
- volumes:
- - "{{ node_config_directory }}/swift/:/var/lib/kolla/swift/:ro"
- - "{{ node_config_directory }}/swift-account-reaper/:{{ container_config_directory }}/:ro"
- - "{{ swift_devices_mount_point }}:{{ swift_devices_mount_point }}"
- - "/etc/localtime:/etc/localtime:ro"
- - "heka_socket:/var/lib/kolla/heka/"
- when: inventory_hostname in groups['swift-account-server']
-
-- name: Starting swift-container-server container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- image: "{{ swift_container_image_full }}"
- name: "swift_container_server"
- volumes:
- - "{{ node_config_directory }}/swift/:/var/lib/kolla/swift/:ro"
- - "{{ node_config_directory }}/swift-container-server/:{{ container_config_directory }}/:ro"
- - "{{ swift_devices_mount_point }}:{{ swift_devices_mount_point }}"
- - "/etc/localtime:/etc/localtime:ro"
- - "heka_socket:/var/lib/kolla/heka/"
- when: inventory_hostname in groups['swift-container-server']
-
-- name: Starting swift-container-auditor container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- image: "{{ swift_container_image_full }}"
- name: "swift_container_auditor"
- volumes:
- - "{{ node_config_directory }}/swift/:/var/lib/kolla/swift/:ro"
- - "{{ node_config_directory }}/swift-container-auditor/:{{ container_config_directory }}/:ro"
- - "{{ swift_devices_mount_point }}:{{ swift_devices_mount_point }}"
- - "/etc/localtime:/etc/localtime:ro"
- - "heka_socket:/var/lib/kolla/heka/"
- when: inventory_hostname in groups['swift-container-server']
-
-- name: Starting swift-container-replicator container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- image: "{{ swift_container_image_full }}"
- name: "swift_container_replicator"
- volumes:
- - "{{ node_config_directory }}/swift/:/var/lib/kolla/swift/:ro"
- - "{{ node_config_directory }}/swift-container-replicator/:{{ container_config_directory }}/:ro"
- - "{{ swift_devices_mount_point }}:{{ swift_devices_mount_point }}"
- - "/etc/localtime:/etc/localtime:ro"
- - "heka_socket:/var/lib/kolla/heka/"
- when: inventory_hostname in groups['swift-container-server']
-
-- name: Starting swift-container-updater container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- image: "{{ swift_container_image_full }}"
- name: "swift_container_updater"
- volumes:
- - "{{ node_config_directory }}/swift/:/var/lib/kolla/swift/:ro"
- - "{{ node_config_directory }}/swift-container-updater/:{{ container_config_directory }}/:ro"
- - "{{ swift_devices_mount_point }}:{{ swift_devices_mount_point }}"
- - "/etc/localtime:/etc/localtime:ro"
- - "heka_socket:/var/lib/kolla/heka/"
- when: inventory_hostname in groups['swift-container-server']
-
-- name: Starting swift-object-server container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- image: "{{ swift_object_image_full }}"
- name: "swift_object_server"
- volumes:
- - "{{ node_config_directory }}/swift/:/var/lib/kolla/swift/:ro"
- - "{{ node_config_directory }}/swift-object-server/:{{ container_config_directory }}/:ro"
- - "{{ swift_devices_mount_point }}:{{ swift_devices_mount_point }}"
- - "/etc/localtime:/etc/localtime:ro"
- - "heka_socket:/var/lib/kolla/heka/"
- when: inventory_hostname in groups['swift-object-server']
-
-- name: Starting swift-object-auditor container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- image: "{{ swift_object_image_full }}"
- name: "swift_object_auditor"
- volumes:
- - "{{ node_config_directory }}/swift/:/var/lib/kolla/swift/:ro"
- - "{{ node_config_directory }}/swift-object-auditor/:{{ container_config_directory }}/:ro"
- - "{{ swift_devices_mount_point }}:{{ swift_devices_mount_point }}"
- - "/etc/localtime:/etc/localtime:ro"
- - "heka_socket:/var/lib/kolla/heka/"
- when: inventory_hostname in groups['swift-object-server']
-
-- name: Starting swift-object-replicator container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- image: "{{ swift_object_image_full }}"
- name: "swift_object_replicator"
- volumes:
- - "{{ node_config_directory }}/swift/:/var/lib/kolla/swift/:ro"
- - "{{ node_config_directory }}/swift-object-replicator/:{{ container_config_directory }}/:ro"
- - "{{ swift_devices_mount_point }}:{{ swift_devices_mount_point }}"
- - "/etc/localtime:/etc/localtime:ro"
- - "heka_socket:/var/lib/kolla/heka/"
- when: inventory_hostname in groups['swift-object-server']
-
-- name: Starting swift-object-updater container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- image: "{{ swift_object_image_full }}"
- name: "swift_object_updater"
- volumes:
- - "{{ node_config_directory }}/swift/:/var/lib/kolla/swift/:ro"
- - "{{ node_config_directory }}/swift-object-updater/:{{ container_config_directory }}/:ro"
- - "{{ swift_devices_mount_point }}:{{ swift_devices_mount_point }}"
- - "/etc/localtime:/etc/localtime:ro"
- - "heka_socket:/var/lib/kolla/heka/"
- when: inventory_hostname in groups['swift-object-server']
-
-- name: Starting swift-object-expirer container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- image: "{{ swift_object_expirer_image_full }}"
- name: "swift_object_expirer"
- volumes:
- - "{{ node_config_directory }}/swift/:/var/lib/kolla/swift/:ro"
- - "{{ node_config_directory }}/swift-object-expirer/:{{ container_config_directory }}/:ro"
- - "{{ swift_devices_mount_point }}:{{ swift_devices_mount_point }}"
- - "/etc/localtime:/etc/localtime:ro"
- - "heka_socket:/var/lib/kolla/heka/"
- when: inventory_hostname in groups['swift-object-server']
-
-- name: Starting swift-proxy-server container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- image: "{{ swift_proxy_server_image_full }}"
- name: "swift_proxy_server"
- volumes:
- - "{{ node_config_directory }}/swift/:/var/lib/kolla/swift/:ro"
- - "{{ node_config_directory }}/swift-proxy-server/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "heka_socket:/var/lib/kolla/heka/"
- when: inventory_hostname in groups['swift-proxy-server']
diff --git a/ansible/roles/swift/tasks/stop_and_start.yml b/ansible/roles/swift/tasks/stop_and_start.yml
deleted file mode 100644
index fca139f573..0000000000
--- a/ansible/roles/swift/tasks/stop_and_start.yml
+++ /dev/null
@@ -1,28 +0,0 @@
----
-# NOTE(pbourke): because the images that are used to start Swift containers
-# were refactored for Mitaka, we need to completely remove the existing
-# containers or we get a conflict when attempting to start the new ones.
-- name: "Cleaning out old Swift containers"
- kolla_docker:
- name: "{{ item }}"
- # TODO(pbourke): Swift recommend using a SIGHUP to gracefully stop the
- # services. Update once kolla_docker supports this.
- action: remove_container
- with_items:
- - "swift_rsyncd"
- - "swift_account_server"
- - "swift_account_auditor"
- - "swift_account_replicator"
- - "swift_account_reaper"
- - "swift_container_server"
- - "swift_container_auditor"
- - "swift_container_replicator"
- - "swift_container_updater"
- - "swift_object_server"
- - "swift_object_auditor"
- - "swift_object_replicator"
- - "swift_object_updater"
- - "swift_object_expirer"
- - "swift_proxy_server"
-
-- include: start.yml
diff --git a/ansible/roles/swift/tasks/upgrade.yml b/ansible/roles/swift/tasks/upgrade.yml
deleted file mode 100644
index 667a6d45e8..0000000000
--- a/ansible/roles/swift/tasks/upgrade.yml
+++ /dev/null
@@ -1,4 +0,0 @@
----
-- include: config.yml
-
-- include: stop_and_start.yml
diff --git a/ansible/roles/swift/templates/account.conf.j2 b/ansible/roles/swift/templates/account.conf.j2
deleted file mode 100644
index df8437b946..0000000000
--- a/ansible/roles/swift/templates/account.conf.j2
+++ /dev/null
@@ -1,28 +0,0 @@
-[DEFAULT]
-bind_ip = {{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}
-bind_port = {{ swift_account_server_port }}
-devices = {{ swift_devices_mount_point }}
-mount_check = false
-log_address = /var/lib/kolla/heka/log
-log_name = {{ service_name }}
-log_facility = LOG_LOCAL0
-log_level = INFO
-
-[pipeline:main]
-pipeline = account-server
-
-[app:account-server]
-use = egg:swift#account
-
-{% if service_name == 'swift-account-auditor' %}
-[account-auditor]
-{% endif %}
-
-{% if service_name == 'swift-account-replicator' %}
-[account-replicator]
-sync_module = {replication_ip}:{meta}:account
-{% endif %}
-
-{% if service_name == 'swift-account-reaper' %}
-[account-reaper]
-{% endif %}
diff --git a/ansible/roles/swift/templates/container.conf.j2 b/ansible/roles/swift/templates/container.conf.j2
deleted file mode 100644
index 2daad6bc15..0000000000
--- a/ansible/roles/swift/templates/container.conf.j2
+++ /dev/null
@@ -1,28 +0,0 @@
-[DEFAULT]
-bind_ip = {{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}
-bind_port = {{ swift_container_server_port }}
-devices = {{ swift_devices_mount_point }}
-mount_check = false
-log_address = /var/lib/kolla/heka/log
-log_name = {{ service_name }}
-log_facility = LOG_LOCAL0
-log_level = INFO
-
-[pipeline:main]
-pipeline = container-server
-
-[app:container-server]
-use = egg:swift#container
-
-{% if service_name == 'swift-container-auditor' %}
-[container-auditor]
-{% endif %}
-
-{% if service_name == 'swift-container-replicator' %}
-[container-replicator]
-sync_module = {replication_ip}:{meta}:container
-{% endif %}
-
-{% if service_name == 'swift-container-updater' %}
-[container-updater]
-{% endif %}
diff --git a/ansible/roles/swift/templates/object.conf.j2 b/ansible/roles/swift/templates/object.conf.j2
deleted file mode 100644
index bd8b25fa92..0000000000
--- a/ansible/roles/swift/templates/object.conf.j2
+++ /dev/null
@@ -1,40 +0,0 @@
-[DEFAULT]
-bind_ip = {{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}
-bind_port = {{ swift_object_server_port }}
-devices = {{ swift_devices_mount_point }}
-mount_check = false
-
-log_address = /var/lib/kolla/heka/log
-log_name = {{ service_name }}
-log_facility = LOG_LOCAL0
-log_level = INFO
-
-[pipeline:main]
-{% if service_name == 'swift-object-expirer' %}
-pipeline = proxy-server
-{% else %}
-pipeline = object-server
-{% endif %}
-
-[app:object-server]
-use = egg:swift#object
-
-{% if service_name == 'swift-object-auditor' %}
-[object-auditor]
-{% endif %}
-
-[object-replicator]
-{% if service_name == 'swift-object-replicator' %}
-sync_module = {replication_ip}:{meta}:object
-{% endif %}
-
-{% if service_name == 'swift-object-updater' %}
-[object-updater]
-{% endif %}
-
-{% if service_name == 'swift-object-expirer' %}
-[object-expirer]
-
-[app:proxy-server]
-use = egg:swift#proxy
-{% endif %}
diff --git a/ansible/roles/swift/templates/proxy-server.conf.j2 b/ansible/roles/swift/templates/proxy-server.conf.j2
deleted file mode 100644
index 40fc02a97a..0000000000
--- a/ansible/roles/swift/templates/proxy-server.conf.j2
+++ /dev/null
@@ -1,67 +0,0 @@
-[DEFAULT]
-bind_ip = {{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}
-bind_port = {{ swift_proxy_server_port }}
-
-log_address = /var/lib/kolla/heka/log
-log_name = {{ service_name }}
-log_facility = LOG_LOCAL0
-log_level = INFO
-
-[pipeline:main]
-pipeline = catch_errors gatekeeper healthcheck cache container_sync bulk ratelimit authtoken keystoneauth slo dlo proxy-server
-
-[app:proxy-server]
-use = egg:swift#proxy
-allow_account_management = true
-account_autocreate = true
-
-[filter:cache]
-use = egg:swift#memcache
-memcache_servers = {% for host in groups['swift-proxy-server'] %}{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}
-
-[filter:catch_errors]
-use = egg:swift#catch_errors
-
-[filter:healthcheck]
-use = egg:swift#healthcheck
-
-[filter:proxy-logging]
-use = egg:swift#proxy_logging
-
-[filter:authtoken]
-paste.filter_factory = keystonemiddleware.auth_token:filter_factory
-auth_uri = {{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_public_port }}
-auth_url = {{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_admin_port }}
-auth_type = password
-project_domain_id = default
-user_domain_id = default
-project_name = service
-username = {{ swift_keystone_user }}
-password = {{ swift_keystone_password }}
-
-memcache_security_strategy = ENCRYPT
-memcache_secret_key = {{ memcache_secret_key }}
-memcached_servers = {% for host in groups['memcached'] %}{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}
-
-
-[filter:keystoneauth]
-use = egg:swift#keystoneauth
-operator_roles = admin,user
-
-[filter:container_sync]
-use = egg:swift#container_sync
-
-[filter:bulk]
-use = egg:swift#bulk
-
-[filter:ratelimit]
-use = egg:swift#ratelimit
-
-[filter:gatekeeper]
-use = egg:swift#gatekeeper
-
-[filter:slo]
-use = egg:swift#slo
-
-[filter:dlo]
-use = egg:swift#dlo
diff --git a/ansible/roles/swift/templates/rsyncd.conf.j2 b/ansible/roles/swift/templates/rsyncd.conf.j2
deleted file mode 100644
index 38a40b1098..0000000000
--- a/ansible/roles/swift/templates/rsyncd.conf.j2
+++ /dev/null
@@ -1,27 +0,0 @@
-uid = swift
-gid = swift
-address = {{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}
-
-{% if inventory_hostname in groups['swift-account-server'] %}
-[account]
-max connections = 2
-path = {{ swift_devices_mount_point }}
-read only = false
-lock file = /var/lock/account.lock
-{% endif %}
-
-{% if inventory_hostname in groups['swift-container-server'] %}
-[container]
-max connections = 4
-path = {{ swift_devices_mount_point }}
-read only = false
-lock file = /var/lock/container.lock
-{% endif %}
-
-{% if inventory_hostname in groups['swift-object-server'] %}
-[object]
-max connections = 8
-path = {{ swift_devices_mount_point }}
-read only = false
-lock file = /var/lock/object.lock
-{% endif %}
diff --git a/ansible/roles/swift/templates/swift-account-auditor.json.j2 b/ansible/roles/swift/templates/swift-account-auditor.json.j2
deleted file mode 100644
index ccdd21a429..0000000000
--- a/ansible/roles/swift/templates/swift-account-auditor.json.j2
+++ /dev/null
@@ -1,23 +0,0 @@
-{
- "command": "swift-account-auditor /etc/swift/account-auditor.conf --verbose",
- "config_files": [
- {
- "source": "/var/lib/kolla/swift/account.ring.gz",
- "dest": "/etc/swift/account.ring.gz",
- "owner": "swift",
- "perm": "0640"
- },
- {
- "source": "{{ container_config_directory }}/swift.conf",
- "dest": "/etc/swift/swift.conf",
- "owner": "swift",
- "perm": "0640"
- },
- {
- "source": "{{ container_config_directory }}/account-auditor.conf",
- "dest": "/etc/swift/account-auditor.conf",
- "owner": "swift",
- "perm": "0640"
- }
- ]
-}
diff --git a/ansible/roles/swift/templates/swift-account-reaper.json.j2 b/ansible/roles/swift/templates/swift-account-reaper.json.j2
deleted file mode 100644
index 9471bfd502..0000000000
--- a/ansible/roles/swift/templates/swift-account-reaper.json.j2
+++ /dev/null
@@ -1,23 +0,0 @@
-{
- "command": "swift-account-reaper /etc/swift/account-reaper.conf --verbose",
- "config_files": [
- {
- "source": "/var/lib/kolla/swift/account.ring.gz",
- "dest": "/etc/swift/account.ring.gz",
- "owner": "swift",
- "perm": "0640"
- },
- {
- "source": "{{ container_config_directory }}/swift.conf",
- "dest": "/etc/swift/swift.conf",
- "owner": "swift",
- "perm": "0640"
- },
- {
- "source": "{{ container_config_directory }}/account-reaper.conf",
- "dest": "/etc/swift/account-reaper.conf",
- "owner": "swift",
- "perm": "0640"
- }
- ]
-}
diff --git a/ansible/roles/swift/templates/swift-account-replicator.json.j2 b/ansible/roles/swift/templates/swift-account-replicator.json.j2
deleted file mode 100644
index a079cd13af..0000000000
--- a/ansible/roles/swift/templates/swift-account-replicator.json.j2
+++ /dev/null
@@ -1,23 +0,0 @@
-{
- "command": "swift-account-replicator /etc/swift/account-replicator.conf --verbose",
- "config_files": [
- {
- "source": "/var/lib/kolla/swift/account.ring.gz",
- "dest": "/etc/swift/account.ring.gz",
- "owner": "swift",
- "perm": "0640"
- },
- {
- "source": "{{ container_config_directory }}/swift.conf",
- "dest": "/etc/swift/swift.conf",
- "owner": "swift",
- "perm": "0640"
- },
- {
- "source": "{{ container_config_directory }}/account-replicator.conf",
- "dest": "/etc/swift/account-replicator.conf",
- "owner": "swift",
- "perm": "0640"
- }
- ]
-}
diff --git a/ansible/roles/swift/templates/swift-account-server.json.j2 b/ansible/roles/swift/templates/swift-account-server.json.j2
deleted file mode 100644
index 516c836d88..0000000000
--- a/ansible/roles/swift/templates/swift-account-server.json.j2
+++ /dev/null
@@ -1,23 +0,0 @@
-{
- "command": "swift-account-server /etc/swift/account-server.conf --verbose",
- "config_files": [
- {
- "source": "/var/lib/kolla/swift/account.ring.gz",
- "dest": "/etc/swift/account.ring.gz",
- "owner": "swift",
- "perm": "0640"
- },
- {
- "source": "{{ container_config_directory }}/swift.conf",
- "dest": "/etc/swift/swift.conf",
- "owner": "swift",
- "perm": "0640"
- },
- {
- "source": "{{ container_config_directory }}/account-server.conf",
- "dest": "/etc/swift/account-server.conf",
- "owner": "swift",
- "perm": "0640"
- }
- ]
-}
diff --git a/ansible/roles/swift/templates/swift-container-auditor.json.j2 b/ansible/roles/swift/templates/swift-container-auditor.json.j2
deleted file mode 100644
index 05ed8105a2..0000000000
--- a/ansible/roles/swift/templates/swift-container-auditor.json.j2
+++ /dev/null
@@ -1,23 +0,0 @@
-{
- "command": "swift-container-auditor /etc/swift/container-auditor.conf --verbose",
- "config_files": [
- {
- "source": "/var/lib/kolla/swift/container.ring.gz",
- "dest": "/etc/swift/container.ring.gz",
- "owner": "swift",
- "perm": "0640"
- },
- {
- "source": "{{ container_config_directory }}/swift.conf",
- "dest": "/etc/swift/swift.conf",
- "owner": "swift",
- "perm": "0640"
- },
- {
- "source": "{{ container_config_directory }}/container-auditor.conf",
- "dest": "/etc/swift/container-auditor.conf",
- "owner": "swift",
- "perm": "0640"
- }
- ]
-}
diff --git a/ansible/roles/swift/templates/swift-container-replicator.json.j2 b/ansible/roles/swift/templates/swift-container-replicator.json.j2
deleted file mode 100644
index 5821930a29..0000000000
--- a/ansible/roles/swift/templates/swift-container-replicator.json.j2
+++ /dev/null
@@ -1,23 +0,0 @@
-{
- "command": "swift-container-replicator /etc/swift/container-replicator.conf --verbose",
- "config_files": [
- {
- "source": "/var/lib/kolla/swift/container.ring.gz",
- "dest": "/etc/swift/container.ring.gz",
- "owner": "swift",
- "perm": "0640"
- },
- {
- "source": "{{ container_config_directory }}/swift.conf",
- "dest": "/etc/swift/swift.conf",
- "owner": "swift",
- "perm": "0640"
- },
- {
- "source": "{{ container_config_directory }}/container-replicator.conf",
- "dest": "/etc/swift/container-replicator.conf",
- "owner": "swift",
- "perm": "0640"
- }
- ]
-}
diff --git a/ansible/roles/swift/templates/swift-container-server.json.j2 b/ansible/roles/swift/templates/swift-container-server.json.j2
deleted file mode 100644
index 538001ee3d..0000000000
--- a/ansible/roles/swift/templates/swift-container-server.json.j2
+++ /dev/null
@@ -1,23 +0,0 @@
-{
- "command": "swift-container-server /etc/swift/container-server.conf --verbose",
- "config_files": [
- {
- "source": "/var/lib/kolla/swift/container.ring.gz",
- "dest": "/etc/swift/container.ring.gz",
- "owner": "swift",
- "perm": "0640"
- },
- {
- "source": "{{ container_config_directory }}/swift.conf",
- "dest": "/etc/swift/swift.conf",
- "owner": "swift",
- "perm": "0640"
- },
- {
- "source": "{{ container_config_directory }}/container-server.conf",
- "dest": "/etc/swift/container-server.conf",
- "owner": "swift",
- "perm": "0640"
- }
- ]
-}
diff --git a/ansible/roles/swift/templates/swift-container-updater.json.j2 b/ansible/roles/swift/templates/swift-container-updater.json.j2
deleted file mode 100644
index 123c911cea..0000000000
--- a/ansible/roles/swift/templates/swift-container-updater.json.j2
+++ /dev/null
@@ -1,29 +0,0 @@
-{
- "command": "swift-container-updater /etc/swift/container-updater.conf --verbose",
- "config_files": [
- {
- "source": "/var/lib/kolla/swift/account.ring.gz",
- "dest": "/etc/swift/account.ring.gz",
- "owner": "swift",
- "perm": "0640"
- },
- {
- "source": "/var/lib/kolla/swift/container.ring.gz",
- "dest": "/etc/swift/container.ring.gz",
- "owner": "swift",
- "perm": "0640"
- },
- {
- "source": "{{ container_config_directory }}/swift.conf",
- "dest": "/etc/swift/swift.conf",
- "owner": "swift",
- "perm": "0640"
- },
- {
- "source": "{{ container_config_directory }}/container-updater.conf",
- "dest": "/etc/swift/container-updater.conf",
- "owner": "swift",
- "perm": "0640"
- }
- ]
-}
diff --git a/ansible/roles/swift/templates/swift-object-auditor.json.j2 b/ansible/roles/swift/templates/swift-object-auditor.json.j2
deleted file mode 100644
index 46b1ad5463..0000000000
--- a/ansible/roles/swift/templates/swift-object-auditor.json.j2
+++ /dev/null
@@ -1,29 +0,0 @@
-{
- "command": "swift-object-auditor /etc/swift/object-auditor.conf --verbose",
- "config_files": [
- {
- "source": "/var/lib/kolla/swift/container.ring.gz",
- "dest": "/etc/swift/container.ring.gz",
- "owner": "swift",
- "perm": "0640"
- },
- {
- "source": "/var/lib/kolla/swift/object.ring.gz",
- "dest": "/etc/swift/object.ring.gz",
- "owner": "swift",
- "perm": "0640"
- },
- {
- "source": "{{ container_config_directory }}/swift.conf",
- "dest": "/etc/swift/swift.conf",
- "owner": "swift",
- "perm": "0640"
- },
- {
- "source": "{{ container_config_directory }}/object-auditor.conf",
- "dest": "/etc/swift/object-auditor.conf",
- "owner": "swift",
- "perm": "0640"
- }
- ]
-}
diff --git a/ansible/roles/swift/templates/swift-object-expirer.json.j2 b/ansible/roles/swift/templates/swift-object-expirer.json.j2
deleted file mode 100644
index 5ebb4889e9..0000000000
--- a/ansible/roles/swift/templates/swift-object-expirer.json.j2
+++ /dev/null
@@ -1,35 +0,0 @@
-{
- "command": "swift-object-expirer /etc/swift/object-expirer.conf --verbose",
- "config_files": [
- {
- "source": "/var/lib/kolla/swift/account.ring.gz",
- "dest": "/etc/swift/account.ring.gz",
- "owner": "swift",
- "perm": "0640"
- },
- {
- "source": "/var/lib/kolla/swift/container.ring.gz",
- "dest": "/etc/swift/container.ring.gz",
- "owner": "swift",
- "perm": "0640"
- },
- {
- "source": "/var/lib/kolla/swift/object.ring.gz",
- "dest": "/etc/swift/object.ring.gz",
- "owner": "swift",
- "perm": "0640"
- },
- {
- "source": "{{ container_config_directory }}/swift.conf",
- "dest": "/etc/swift/swift.conf",
- "owner": "swift",
- "perm": "0640"
- },
- {
- "source": "{{ container_config_directory }}/object-expirer.conf",
- "dest": "/etc/swift/object-expirer.conf",
- "owner": "swift",
- "perm": "0640"
- }
- ]
-}
diff --git a/ansible/roles/swift/templates/swift-object-replicator.json.j2 b/ansible/roles/swift/templates/swift-object-replicator.json.j2
deleted file mode 100644
index 8fc5eb1594..0000000000
--- a/ansible/roles/swift/templates/swift-object-replicator.json.j2
+++ /dev/null
@@ -1,29 +0,0 @@
-{
- "command": "swift-object-replicator /etc/swift/object-replicator.conf --verbose",
- "config_files": [
- {
- "source": "/var/lib/kolla/swift/container.ring.gz",
- "dest": "/etc/swift/container.ring.gz",
- "owner": "swift",
- "perm": "0640"
- },
- {
- "source": "/var/lib/kolla/swift/object.ring.gz",
- "dest": "/etc/swift/object.ring.gz",
- "owner": "swift",
- "perm": "0640"
- },
- {
- "source": "{{ container_config_directory }}/swift.conf",
- "dest": "/etc/swift/swift.conf",
- "owner": "swift",
- "perm": "0640"
- },
- {
- "source": "{{ container_config_directory }}/object-replicator.conf",
- "dest": "/etc/swift/object-replicator.conf",
- "owner": "swift",
- "perm": "0640"
- }
- ]
-}
diff --git a/ansible/roles/swift/templates/swift-object-server.json.j2 b/ansible/roles/swift/templates/swift-object-server.json.j2
deleted file mode 100644
index 31913d4bb3..0000000000
--- a/ansible/roles/swift/templates/swift-object-server.json.j2
+++ /dev/null
@@ -1,29 +0,0 @@
-{
- "command": "swift-object-server /etc/swift/object-server.conf --verbose",
- "config_files": [
- {
- "source": "/var/lib/kolla/swift/container.ring.gz",
- "dest": "/etc/swift/container.ring.gz",
- "owner": "swift",
- "perm": "0640"
- },
- {
- "source": "/var/lib/kolla/swift/object.ring.gz",
- "dest": "/etc/swift/object.ring.gz",
- "owner": "swift",
- "perm": "0640"
- },
- {
- "source": "{{ container_config_directory }}/swift.conf",
- "dest": "/etc/swift/swift.conf",
- "owner": "swift",
- "perm": "0640"
- },
- {
- "source": "{{ container_config_directory }}/object-server.conf",
- "dest": "/etc/swift/object-server.conf",
- "owner": "swift",
- "perm": "0640"
- }
- ]
-}
diff --git a/ansible/roles/swift/templates/swift-object-updater.json.j2 b/ansible/roles/swift/templates/swift-object-updater.json.j2
deleted file mode 100644
index d34130640a..0000000000
--- a/ansible/roles/swift/templates/swift-object-updater.json.j2
+++ /dev/null
@@ -1,29 +0,0 @@
-{
- "command": "swift-object-updater /etc/swift/object-updater.conf --verbose",
- "config_files": [
- {
- "source": "/var/lib/kolla/swift/container.ring.gz",
- "dest": "/etc/swift/container.ring.gz",
- "owner": "swift",
- "perm": "0640"
- },
- {
- "source": "/var/lib/kolla/swift/object.ring.gz",
- "dest": "/etc/swift/object.ring.gz",
- "owner": "swift",
- "perm": "0640"
- },
- {
- "source": "{{ container_config_directory }}/swift.conf",
- "dest": "/etc/swift/swift.conf",
- "owner": "swift",
- "perm": "0640"
- },
- {
- "source": "{{ container_config_directory }}/object-updater.conf",
- "dest": "/etc/swift/object-updater.conf",
- "owner": "swift",
- "perm": "0640"
- }
- ]
-}
diff --git a/ansible/roles/swift/templates/swift-proxy-server.json.j2 b/ansible/roles/swift/templates/swift-proxy-server.json.j2
deleted file mode 100644
index 39e43fb5b8..0000000000
--- a/ansible/roles/swift/templates/swift-proxy-server.json.j2
+++ /dev/null
@@ -1,35 +0,0 @@
-{
- "command": "swift-proxy-server /etc/swift/proxy-server.conf --verbose",
- "config_files": [
- {
- "source": "/var/lib/kolla/swift/account.ring.gz",
- "dest": "/etc/swift/account.ring.gz",
- "owner": "swift",
- "perm": "0640"
- },
- {
- "source": "/var/lib/kolla/swift/container.ring.gz",
- "dest": "/etc/swift/container.ring.gz",
- "owner": "swift",
- "perm": "0640"
- },
- {
- "source": "/var/lib/kolla/swift/object.ring.gz",
- "dest": "/etc/swift/object.ring.gz",
- "owner": "swift",
- "perm": "0640"
- },
- {
- "source": "{{ container_config_directory }}/swift.conf",
- "dest": "/etc/swift/swift.conf",
- "owner": "swift",
- "perm": "0640"
- },
- {
- "source": "{{ container_config_directory }}/proxy-server.conf",
- "dest": "/etc/swift/proxy-server.conf",
- "owner": "swift",
- "perm": "0640"
- }
- ]
-}
diff --git a/ansible/roles/swift/templates/swift-rsyncd.json.j2 b/ansible/roles/swift/templates/swift-rsyncd.json.j2
deleted file mode 100644
index fb63f58f0f..0000000000
--- a/ansible/roles/swift/templates/swift-rsyncd.json.j2
+++ /dev/null
@@ -1,11 +0,0 @@
-{
- "command": "/usr/bin/rsync --daemon --no-detach --port={{swift_rsync_port}} --config=/etc/rsyncd.conf",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/rsyncd.conf",
- "dest": "/etc/rsyncd.conf",
- "owner": "swift",
- "perm": "0640"
- }
- ]
-}
diff --git a/ansible/roles/swift/templates/swift.conf.j2 b/ansible/roles/swift/templates/swift.conf.j2
deleted file mode 100644
index 917a32cf1c..0000000000
--- a/ansible/roles/swift/templates/swift.conf.j2
+++ /dev/null
@@ -1,3 +0,0 @@
-[swift-hash]
-swift_hash_path_suffix = {{ swift_hash_path_suffix }}
-swift_hash_path_prefix = {{ swift_hash_path_prefix }}
diff --git a/ansible/roles/telegraf/defaults/main.yml b/ansible/roles/telegraf/defaults/main.yml
deleted file mode 100644
index c11bf0a379..0000000000
--- a/ansible/roles/telegraf/defaults/main.yml
+++ /dev/null
@@ -1,18 +0,0 @@
----
-project_name: "telegraf"
-
-####################
-# Docker
-####################
-telegraf_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-telegraf"
-telegraf_tag: "{{ openstack_release }}"
-telegraf_image_full: "{{ telegraf_image }}:{{ telegraf_tag }}"
-
-
-####################
-# Protocols
-####################
-elasticsearch_proto: "http"
-haproxy_proto: "http"
-influxdb_proto: "http"
-rabbitmq_proto: "http"
diff --git a/ansible/roles/telegraf/meta/main.yml b/ansible/roles/telegraf/meta/main.yml
deleted file mode 100644
index 6b4fff8fef..0000000000
--- a/ansible/roles/telegraf/meta/main.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-dependencies:
- - { role: common }
diff --git a/ansible/roles/telegraf/tasks/config.yml b/ansible/roles/telegraf/tasks/config.yml
deleted file mode 100644
index 19f8697601..0000000000
--- a/ansible/roles/telegraf/tasks/config.yml
+++ /dev/null
@@ -1,26 +0,0 @@
----
-- name: Ensuring config directories exist
- file:
- path: "{{ node_config_directory }}/{{ item }}"
- state: "directory"
- recurse: yes
- with_items:
- - "telegraf"
- - "telegraf/config"
-
-- name: Copying over default config.json files
- template:
- src: "telegraf.json.j2"
- dest: "{{ node_config_directory }}/telegraf/config.json"
-
-- name: Copying over telegraf config file
- template:
- src: "telegraf.conf.j2"
- dest: "{{ node_config_directory }}/telegraf/telegraf.conf"
-
-- name: Copying over telegraf plugin files
- copy:
- src: "{{ item }}"
- dest: "{{ node_config_directory }}/telegraf/config"
- with_fileglob:
- - "{{ role_path }}/templates/config/*.conf"
diff --git a/ansible/roles/telegraf/tasks/deploy.yml b/ansible/roles/telegraf/tasks/deploy.yml
deleted file mode 100644
index 1f16915ad9..0000000000
--- a/ansible/roles/telegraf/tasks/deploy.yml
+++ /dev/null
@@ -1,4 +0,0 @@
----
-- include: config.yml
-
-- include: start.yml
diff --git a/ansible/roles/telegraf/tasks/main.yml b/ansible/roles/telegraf/tasks/main.yml
deleted file mode 100644
index b017e8b4ad..0000000000
--- a/ansible/roles/telegraf/tasks/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-- include: "{{ action }}.yml"
diff --git a/ansible/roles/telegraf/tasks/precheck.yml b/ansible/roles/telegraf/tasks/precheck.yml
deleted file mode 100644
index ed97d539c0..0000000000
--- a/ansible/roles/telegraf/tasks/precheck.yml
+++ /dev/null
@@ -1 +0,0 @@
----
diff --git a/ansible/roles/telegraf/tasks/pull.yml b/ansible/roles/telegraf/tasks/pull.yml
deleted file mode 100644
index e1a5b45703..0000000000
--- a/ansible/roles/telegraf/tasks/pull.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-- name: Pulling telegraf image
- kolla_docker:
- action: "pull_image"
- common_options: "{{ docker_common_options }}"
- image: "{{ telegraf_image_full }}"
diff --git a/ansible/roles/telegraf/tasks/reconfigure.yml b/ansible/roles/telegraf/tasks/reconfigure.yml
deleted file mode 100644
index 68a27d8a57..0000000000
--- a/ansible/roles/telegraf/tasks/reconfigure.yml
+++ /dev/null
@@ -1,42 +0,0 @@
----
-- name: Ensuring the containers up
- kolla_docker:
- name: "telegraf"
- action: "get_container_state"
- register: container_state
- failed_when: container_state.Running == false
-
-- include: config.yml
-
-- name: Check the configs
- command: docker exec telegraf /usr/local/bin/kolla_set_configs --check
- changed_when: false
- failed_when: false
- register: check_results
-
-# NOTE(jeffrey4l): when config_strategy == 'COPY_ALWAYS'
-# and container env['KOLLA_CONFIG_STRATEGY'] == 'COPY_ONCE',
-# just remove the container and start again
-- name: Containers config strategy
- kolla_docker:
- name: "telegraf"
- action: "get_container_env"
- register: container_envs
-
-- name: Remove the containers
- kolla_docker:
- name: "telegraf"
- action: "remove_container"
- register: remove_containers
- when:
- - config_strategy == "COPY_ONCE"
-
-- include: start.yml
- when: remove_containers.changed
-
-- name: Restart containers
- kolla_docker:
- name: "telegraf"
- action: "restart_container"
- when:
- - config_strategy == 'COPY_ALWAYS'
diff --git a/ansible/roles/telegraf/tasks/start.yml b/ansible/roles/telegraf/tasks/start.yml
deleted file mode 100644
index b50cec4530..0000000000
--- a/ansible/roles/telegraf/tasks/start.yml
+++ /dev/null
@@ -1,18 +0,0 @@
----
-- name: Starting telegraf container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- image: "{{ telegraf_image_full }}"
- name: "telegraf"
- environment:
- HOST_PROC: "/rootfs/proc"
- HOST_SYS: "/rootfs/sys"
- volumes:
- - "{{ node_config_directory }}/telegraf/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "kolla_logs:/var/log/kolla/"
- - "/sys:/rootfs/sys:ro"
- - "/proc:/rootfs/proc:ro"
- - "/var/run/docker.sock:/var/run/docker.sock:ro"
- pid_mode: "host"
diff --git a/ansible/roles/telegraf/tasks/upgrade.yml b/ansible/roles/telegraf/tasks/upgrade.yml
deleted file mode 100644
index 1f16915ad9..0000000000
--- a/ansible/roles/telegraf/tasks/upgrade.yml
+++ /dev/null
@@ -1,4 +0,0 @@
----
-- include: config.yml
-
-- include: start.yml
diff --git a/ansible/roles/telegraf/templates/telegraf.conf.j2 b/ansible/roles/telegraf/templates/telegraf.conf.j2
deleted file mode 100644
index 1f81ad91cb..0000000000
--- a/ansible/roles/telegraf/templates/telegraf.conf.j2
+++ /dev/null
@@ -1,53 +0,0 @@
-[global_tags]
-[agent]
- interval = "10s"
- round_interval = true
- metric_batch_size = 1000
- metric_buffer_limit = 10000
- collection_jitter = "0s"
- flush_interval = "10s"
- flush_jitter = "0s"
- debug = false
- quiet = false
- hostname = ""
- omit_hostname = false
-[[outputs.influxdb]]
- urls = [{% for host in groups['influxdb'] %}"{{ influxdb_proto }}://{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address']}}:{{ influxdb_http_port }}"{% if not loop.last %},{% endif %}{% endfor %}]
- database = "telegraf" # required
- retention_policy = "default"
- write_consistency = "any"
- timeout = "5s"
-[[inputs.cpu]]
- percpu = true
- totalcpu = true
- fielddrop = ["time_*"]
-[[inputs.disk]]
- ignore_fs = ["tmpfs", "devtmpfs"]
-[[inputs.diskio]]
-[[inputs.kernel]]
-[[inputs.mem]]
-[[inputs.processes]]
-[[inputs.swap]]
-[[inputs.system]]
-[[inputs.net]]
- interfaces = []
-{% if inventory_hostname in groups['haproxy'] and enable_haproxy | bool %}
-[[inputs.haproxy]]
- servers = ["{{ haproxy_proto }}://{{ haproxy_user }}:{{ haproxy_password }}@{{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}:{{ haproxy_stats_port }}"]
-{% endif %}
-{% if inventory_hostname in groups['memcached'] and enable_memcached | bool %}
-[[inputs.memcached]]
- servers = ["{{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}:{{ memcached_port }}"]
-{% endif %}
-{% if inventory_hostname in groups['elasticsearch'] and enable_elasticsearch | bool %}
-[[inputs.elasticsearch]]
- servers = ["{{ elasticsearch_proto }}://{{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}:{{ elasticsearch_port }}"]
- local = true
- cluster_health = true
-{% endif %}
-{% if inventory_hostname in groups['rabbitmq'] and enable_rabbitmq | bool %}
-[[inputs.rabbitmq]]
- url = "{{ rabbitmq_proto }}://{{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}:{{ rabbitmq_management_port }}"
- username = "{{ rabbitmq_user }}"
- password = "{{ rabbitmq_password }}"
-{% endif %}
diff --git a/ansible/roles/telegraf/templates/telegraf.json.j2 b/ansible/roles/telegraf/templates/telegraf.json.j2
deleted file mode 100644
index 74ed2e928e..0000000000
--- a/ansible/roles/telegraf/templates/telegraf.json.j2
+++ /dev/null
@@ -1,24 +0,0 @@
-{
- "command": "telegraf -config /etc/telegraf/telegraf.conf -config-directory /etc/telegraf/telegraf.d/",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/telegraf.conf",
- "dest": "/etc/telegraf/telegraf.conf",
- "owner": "telegraf",
- "perm": "0600"
- },
- {
- "source": "{{ container_config_directory }}/config/*",
- "dest": "/etc/telegraf/telegraf.d/",
- "owner": "telegraf",
- "perm": "0600"
- }
- ],
- "permissions": [
- {
- "path": "/var/log/kolla/telegraf",
- "owner": "telegraf:telegraf",
- "recurse": true
- }
- ]
-}
diff --git a/ansible/roles/tempest/defaults/main.yml b/ansible/roles/tempest/defaults/main.yml
deleted file mode 100644
index cebab6c55a..0000000000
--- a/ansible/roles/tempest/defaults/main.yml
+++ /dev/null
@@ -1,23 +0,0 @@
----
-project_name: "tempest"
-
-
-########
-# Docker
-########
-tempest_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-tempest"
-tempest_tag: "{{ openstack_release }}"
-tempest_image_full: "{{ tempest_image }}:{{ tempest_tag }}"
-
-
-###########################
-# Tempest Required Resource
-###########################
-image_url: "http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-x86_64-disk.img"
-
-tempest_image_id:
-tempest_image_alt_id: "{{ tempest_image_id }}"
-tempest_flavor_ref_id:
-tempest_flavor_ref_alt_id: "{{ tempest_flavor_ref_id }}"
-tempest_public_network_id:
-tempest_floating_network_name:
diff --git a/ansible/roles/tempest/meta/main.yml b/ansible/roles/tempest/meta/main.yml
deleted file mode 100644
index 6b4fff8fef..0000000000
--- a/ansible/roles/tempest/meta/main.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-dependencies:
- - { role: common }
diff --git a/ansible/roles/tempest/tasks/config.yml b/ansible/roles/tempest/tasks/config.yml
deleted file mode 100644
index 6422270d3a..0000000000
--- a/ansible/roles/tempest/tasks/config.yml
+++ /dev/null
@@ -1,37 +0,0 @@
----
-- name: Ensuring config directories exist
- file:
- path: "{{ node_config_directory }}/{{ item }}"
- state: "directory"
- recurse: yes
- with_items:
- - "tempest"
-
-- name: Copying over config.json files for services
- template:
- src: "{{ item }}.json.j2"
- dest: "{{ node_config_directory }}/{{ item }}/config.json"
- with_items:
- - "tempest"
-
-- name: Copying over tempest.conf
- merge_configs:
- vars:
- project_name: "tempest"
- sources:
- - "{{ role_path }}/templates/tempest.conf.j2"
- - "{{ node_custom_config }}/tempest.conf"
- dest: "{{ node_config_directory }}/{{ item }}/tempest.conf"
- with_items:
- - "tempest"
-
-- name: Check if policies shall be overwritten
- local_action: stat path="{{ node_custom_config }}/tempest/policy.json"
- register: tempest_policy
-
-- name: Copying over existing policy.json
- template:
- src: "{{ node_custom_config }}/tempest/policy.json"
- dest: "{{ node_config_directory }}/tempest/policy.json"
- when:
- tempest_policy.stat.exists
diff --git a/ansible/roles/tempest/tasks/deploy.yml b/ansible/roles/tempest/tasks/deploy.yml
deleted file mode 100644
index 1f16915ad9..0000000000
--- a/ansible/roles/tempest/tasks/deploy.yml
+++ /dev/null
@@ -1,4 +0,0 @@
----
-- include: config.yml
-
-- include: start.yml
diff --git a/ansible/roles/tempest/tasks/main.yml b/ansible/roles/tempest/tasks/main.yml
deleted file mode 100644
index b017e8b4ad..0000000000
--- a/ansible/roles/tempest/tasks/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-- include: "{{ action }}.yml"
diff --git a/ansible/roles/tempest/tasks/precheck.yml b/ansible/roles/tempest/tasks/precheck.yml
deleted file mode 100644
index ed97d539c0..0000000000
--- a/ansible/roles/tempest/tasks/precheck.yml
+++ /dev/null
@@ -1 +0,0 @@
----
diff --git a/ansible/roles/tempest/tasks/pull.yml b/ansible/roles/tempest/tasks/pull.yml
deleted file mode 100644
index a31b625354..0000000000
--- a/ansible/roles/tempest/tasks/pull.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-- name: Pulling tempest image
- kolla_docker:
- action: "pull_image"
- common_options: "{{ docker_common_options }}"
- image: "{{ tempest_image_full }}"
diff --git a/ansible/roles/tempest/tasks/reconfigure.yml b/ansible/roles/tempest/tasks/reconfigure.yml
deleted file mode 100644
index 0aeab552f0..0000000000
--- a/ansible/roles/tempest/tasks/reconfigure.yml
+++ /dev/null
@@ -1,64 +0,0 @@
----
-- name: Ensuring the containers up
- kolla_docker:
- name: "{{ item.name }}"
- action: "get_container_state"
- register: container_state
- failed_when: container_state.Running == false
- when: inventory_hostname in groups[item.group]
- with_items:
- - { name: tempest, group: tempest }
-
-- include: config.yml
-
-- name: Check the configs
- command: docker exec {{ item.name }} /usr/local/bin/kolla_set_configs --check
- changed_when: false
- failed_when: false
- register: check_results
- when: inventory_hostname in groups[item.group]
- with_items:
- - { name: tempest, group: tempest }
-
-# NOTE(jeffrey4l): when config_strategy == 'COPY_ALWAYS'
-# and container env['KOLLA_CONFIG_STRATEGY'] == 'COPY_ONCE',
-# just remove the container and start again
-- name: Containers config strategy
- kolla_docker:
- name: "{{ item.name }}"
- action: "get_container_env"
- register: container_envs
- when: inventory_hostname in groups[item.group]
- with_items:
- - { name: tempest, group: tempest }
-
-- name: Remove the containers
- kolla_docker:
- name: "{{ item[0]['name'] }}"
- action: "remove_container"
- register: remove_containers
- when:
- - config_strategy == "COPY_ONCE" or item[1]['KOLLA_CONFIG_STRATEGY'] == 'COPY_ONCE'
- - item[2]['rc'] == 1
- - inventory_hostname in groups[item[0]['group']]
- with_together:
- - [{ name: tempest, group: tempest }]
- - "{{ container_envs.results }}"
- - "{{ check_results.results }}"
-
-- include: start.yml
- when: remove_containers.changed
-
-- name: Restart containers
- kolla_docker:
- name: "{{ item[0]['name'] }}"
- action: "restart_container"
- when:
- - config_strategy == 'COPY_ALWAYS'
- - item[1]['KOLLA_CONFIG_STRATEGY'] != 'COPY_ONCE'
- - item[2]['rc'] == 1
- - inventory_hostname in groups[item[0]['group']]
- with_together:
- - [{ name: tempest, group: tempest }]
- - "{{ container_envs.results }}"
- - "{{ check_results.results }}"
diff --git a/ansible/roles/tempest/tasks/start.yml b/ansible/roles/tempest/tasks/start.yml
deleted file mode 100644
index 97b6552c54..0000000000
--- a/ansible/roles/tempest/tasks/start.yml
+++ /dev/null
@@ -1,11 +0,0 @@
----
-- name: Starting tempest container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- image: "{{ tempest_image_full }}"
- name: "tempest"
- volumes:
- - "{{ node_config_directory }}/tempest/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "kolla_logs:/var/log/kolla/"
diff --git a/ansible/roles/tempest/tasks/upgrade.yml b/ansible/roles/tempest/tasks/upgrade.yml
deleted file mode 100644
index 1f16915ad9..0000000000
--- a/ansible/roles/tempest/tasks/upgrade.yml
+++ /dev/null
@@ -1,4 +0,0 @@
----
-- include: config.yml
-
-- include: start.yml
diff --git a/ansible/roles/tempest/templates/tempest.conf.j2 b/ansible/roles/tempest/templates/tempest.conf.j2
deleted file mode 100644
index a0dc90cb59..0000000000
--- a/ansible/roles/tempest/templates/tempest.conf.j2
+++ /dev/null
@@ -1,73 +0,0 @@
-[DEFAULT]
-debug = {{ openstack_logging_debug }}
-log_file = tempest.log
-use_stderr = False
-log_dir = /var/log/kolla/tempest/
-
-[auth]
-admin_username = admin
-admin_password = {{ keystone_admin_password }}
-admin_project_name = admin
-admin_domain_name = default
-
-
-[dashboard]
-dashboard_url = {{ internal_protocol }}://{{ kolla_internal_fqdn }}
-login_url = {{ internal_protocol }}://{{ kolla_internal_fqdn }}/auth/login/
-
-[service_available]
-cinder = {{ enable_cinder }}
-neutron = {{ enable_neutron }}
-glance = {{ enable_glance }}
-swift = {{ enable_swift }}
-nova = {{ enable_nova }}
-heat = {{ enable_heat }}
-horizon = {{ enable_horizon }}
-ceilometer = {{ enable_ceilometer }}
-
-[compute]
-max_microversion = latest
-image_ref = {{ tempest_image_id }}
-image_ref_alt = {{ tempest_image_alt_id }}
-flavor_ref = {{ tempest_flavor_ref_id }}
-flavor_ref_alt = {{ tempest_flavor_ref_alt_id }}
-region = {{ openstack_region_name }}
-
-[dashboard]
-dashboard_url = {{ internal_protocol }}://{{ kolla_internal_fqdn }}/
-login_url = {{ internal_protocol }}://{{ kolla_internal_fqdn }}/auth/login
-
-[identity]
-region = {{ openstack_region_name }}
-auth_version = v3
-uri = {{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_admin_port }}/v2.0
-uri_v3 = {{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_admin_port }}/v3
-
-[image]
-region = {{ openstack_region_name }}
-http_image = {{ image_url }}
-
-[network]
-region = {{ openstack_region_name }}
-public_network_id = {{ tempest_public_network_id }}
-floating_network_name = {{ tempest_floating_network_name }}
-project_networks_reachable = false
-
-[network-feature-enabled]
-ipv6 = false
-
-[object-storage]
-region = {{ openstack_region_name }}
-
-[orchestration]
-region = {{ openstack_region_name }}
-
-[volume]
-region = {{ openstack_region_name }}
-
-[volume-feature-enabled]
-api_v1 = False
-
-[validation]
-image_ssh_user = cirros
-image_ssh_password = cubswin:)
diff --git a/ansible/roles/tempest/templates/tempest.json.j2 b/ansible/roles/tempest/templates/tempest.json.j2
deleted file mode 100644
index 3ff5ea788e..0000000000
--- a/ansible/roles/tempest/templates/tempest.json.j2
+++ /dev/null
@@ -1,11 +0,0 @@
-{
- "command": "sleep infinity",
- "config_files":[
- {
- "source": "{{ container_config_directory }}/tempest.conf",
- "dest": "/etc/tempest/tempest.conf",
- "owner": "root",
- "perm": "0600"
- }
- ]
-}
diff --git a/ansible/roles/vmtp/defaults/main.yml b/ansible/roles/vmtp/defaults/main.yml
deleted file mode 100644
index 1899d8be0b..0000000000
--- a/ansible/roles/vmtp/defaults/main.yml
+++ /dev/null
@@ -1,25 +0,0 @@
----
-project_name: "vmtp"
-
-####################
-# Docker
-####################
-vmtp_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-vmtp"
-vmtp_tag: "{{ openstack_release }}"
-vmtp_image_full: "{{ vmtp_image }}:{{ vmtp_tag }}"
-
-#########################
-# VMTP Specific resources
-#########################
-vmtp_vm_image_name: "Ubuntu Server 16.04"
-vmtp_vm_ssh_username: "ubuntu"
-vmtp_vm_flavor_type: "m1.small"
-vmtp_vm_nameservers: ['8.8.8.8', '8.8.4.4']
-vmtp_vm_image_url: "https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img"
-vmtp_internal_network_name: ['vmtp-demo-net']
-vmtp_internal_subnet_name: ['vmtp-demo-subnet']
-vmtp_internal_subnet_name_ipv6: ['vmtp-demo-v6-subnet']
-vmtp_internal_cidr: ['10.0.0.0/24']
-vmtp_internal_cidr_v6: ['2001:45::/64']
-vmtp_router_name: "pns-router"
-vmtp_os_dp_network: "physnet1"
diff --git a/ansible/roles/vmtp/meta/main.yml b/ansible/roles/vmtp/meta/main.yml
deleted file mode 100644
index 6b4fff8fef..0000000000
--- a/ansible/roles/vmtp/meta/main.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-dependencies:
- - { role: common }
diff --git a/ansible/roles/vmtp/tasks/config.yml b/ansible/roles/vmtp/tasks/config.yml
deleted file mode 100644
index c63c93e806..0000000000
--- a/ansible/roles/vmtp/tasks/config.yml
+++ /dev/null
@@ -1,26 +0,0 @@
----
-- name: Ensuring config directories exist
- file:
- path: "{{ node_config_directory }}/vmtp"
- state: "directory"
- recurse: yes
-
-- name: Register binary python path
- command: echo /usr/lib/python2.7/site-packages
- register: python_path
- when: kolla_install_type == 'binary'
-
-- name: Register source python path
- command: echo /var/lib/kolla/venv/lib/python2.7/site-packages
- register: python_path
- when: kolla_install_type != 'binary'
-
-- name: Copying over configuration file for vmtp
- merge_yaml:
- sources:
- - "{{ role_path }}/templates/{{ item }}.j2"
- - "{{ node_custom_config }}/{{ item }}"
- - "{{ node_custom_config }}/vmtp/{{ item }}"
- dest: "{{ python_path }}/vmtp/{{ item }}"
- with_items:
- - "cfg.default.yaml"
diff --git a/ansible/roles/vmtp/tasks/deploy.yml b/ansible/roles/vmtp/tasks/deploy.yml
deleted file mode 100644
index d4daf108c1..0000000000
--- a/ansible/roles/vmtp/tasks/deploy.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-- include: config.yml
- when: inventory_hostname in groups['vmtp']
-
-- include: start.yml
- when: inventory_hostname in groups['vmtp']
diff --git a/ansible/roles/vmtp/tasks/main.yml b/ansible/roles/vmtp/tasks/main.yml
deleted file mode 100644
index b017e8b4ad..0000000000
--- a/ansible/roles/vmtp/tasks/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-- include: "{{ action }}.yml"
diff --git a/ansible/roles/vmtp/tasks/precheck.yml b/ansible/roles/vmtp/tasks/precheck.yml
deleted file mode 100644
index ed97d539c0..0000000000
--- a/ansible/roles/vmtp/tasks/precheck.yml
+++ /dev/null
@@ -1 +0,0 @@
----
diff --git a/ansible/roles/vmtp/tasks/pull.yml b/ansible/roles/vmtp/tasks/pull.yml
deleted file mode 100644
index 0be3c5eece..0000000000
--- a/ansible/roles/vmtp/tasks/pull.yml
+++ /dev/null
@@ -1,7 +0,0 @@
----
-- name: Pulling vmtp image
- kolla_docker:
- action: "pull_image"
- common_options: "{{ docker_common_options }}"
- image: "{{ vmtp_image_full }}"
- when: inventory_hostname in groups['vmtp']
diff --git a/ansible/roles/vmtp/tasks/reconfigure.yml b/ansible/roles/vmtp/tasks/reconfigure.yml
deleted file mode 100644
index 3cf5f29402..0000000000
--- a/ansible/roles/vmtp/tasks/reconfigure.yml
+++ /dev/null
@@ -1,58 +0,0 @@
----
-- name: Ensure container is up
- kolla_docker:
- name: "vmtp"
- action: "get_container_state"
- register: container_state
- failed_when: container_state.Running == false
- when: inventory_hostname in groups['vmtp']
-
-- include: config.yml
-
-- name: Check configuration
- command: docker exec {{ item.name }} /usr/local/bin/kolla_set_configs --check
- changed_when: false
- failed_when: false
- register: check_results
- when: inventory_hostname in groups['vmtp']
- with_items:
- - { name: vmtp, group: vmtp }
-
-# NOTE(jeffrey4l): when config_strategy == 'COPY_ALWAYS'
-# and container env['KOLLA_CONFIG_STRATEGY'] == 'COPY_ONCE',
-# just remove the container and start again
-- name: Containers config strategy
- kolla_docker:
- name: "vmtp"
- action: "get_container_env"
- register: container_envs
- when: inventory_hostname in groups['vmtp']
-
-- name: Remove the containers
- kolla_docker:
- name: "vmtp"
- action: "remove_container"
- register: remove_containers
- when:
- - config_strategy == "COPY_ONCE" or item[0]['KOLLA_CONFIG_STRATEGY'] == 'COPY_ONCE'
- - item[1]['rc'] == 1
- - inventory_hostname in groups['vmtp']
- with_together:
- - "{{ container_envs.results }}"
- - "{{ check_results.results }}"
-
-- include: start.yml
- when: remove_containers.changed
-
-- name: Restart containers
- kolla_docker:
- name: "vmtp"
- action: "restart_container"
- when:
- - config_strategy == 'COPY_ALWAYS'
- - item[0]['KOLLA_CONFIG_STRATEGY'] != 'COPY_ONCE'
- - item[1]['rc'] == 1
- - inventory_hostname in groups['vmtp']
- with_together:
- - "{{ container_envs.results }}"
- - "{{ check_results.results }}"
diff --git a/ansible/roles/vmtp/tasks/start.yml b/ansible/roles/vmtp/tasks/start.yml
deleted file mode 100644
index c3a2b19b73..0000000000
--- a/ansible/roles/vmtp/tasks/start.yml
+++ /dev/null
@@ -1,11 +0,0 @@
----
-- name: Starting vmtp container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- image: "{{ vmtp_image_full }}"
- name: "vmtp"
- volumes:
- - "{{ node_config_directory }}/vmtp/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "kolla_logs:/var/log/kolla"
diff --git a/ansible/roles/vmtp/tasks/upgrade.yml b/ansible/roles/vmtp/tasks/upgrade.yml
deleted file mode 100644
index 1f16915ad9..0000000000
--- a/ansible/roles/vmtp/tasks/upgrade.yml
+++ /dev/null
@@ -1,4 +0,0 @@
----
-- include: config.yml
-
-- include: start.yml
diff --git a/ansible/roles/vmtp/templates/cfg.default.yaml.j2 b/ansible/roles/vmtp/templates/cfg.default.yaml.j2
deleted file mode 100644
index 991c86c698..0000000000
--- a/ansible/roles/vmtp/templates/cfg.default.yaml.j2
+++ /dev/null
@@ -1,41 +0,0 @@
-image_name: {{ vmtp_vm_image_name }}
-ssh_vm_username: {{ vmtp_vm_ssh_username }}
-flavor_type: {{ vmtp_vm_flavor_type }}
-availability_zone: {{ vmtp_vm_availability_zone }}
-dns_nameservers: {{ vmtp_vm_nameservers }}
-vm_image_url: {{ vmtp_vm_image_url }}
-
-reuse_network_name:
-floating_ip: True
-reuse_existing_vm:
-config_drive:
-user_data_file:
-ipv6_mode:
-router_name: {{ vmtp_router_name }}
-
-internal_network_name: {{ vmtp_internal_network_name }}
-internal_subnet_name: {{ vmtp_internal_subnet_name }}
-internal_subnet_name_ipv6: {{ vmtp_internal_subnet_name_ipv6 }}
-internal_cidr: {{ vmtp_internal_cidr }}
-internal_cidr_v6: {{ vmtp_internal_cidr_v6 }}
-
-public_key_file:
-private_key_file:
-public_key_name: 'pns_public_key'
-vm_name_server: 'TestServer'
-vm_name_client: 'TestClient'
-security_group_name: 'pns-security'
-
-ping_count: 2
-ping_pass_threshold: 80
-ssh_retry_count: 50
-generic_retry_count: 50
-
-tcp_tp_loop_count: 3
-tcp_pkt_sizes: [65536]
-udp_pkt_sizes: [128, 1024, 8192]
-icmp_pkt_sizes: [64, 391, 1500]
-udp_loss_rate_range: [2, 5]
-
-vm_bandwidth: 0
-os_dataplane_network: {{ vmtp_os_dp_network }}
diff --git a/ansible/roles/watcher/defaults/main.yml b/ansible/roles/watcher/defaults/main.yml
deleted file mode 100644
index 28f300bcba..0000000000
--- a/ansible/roles/watcher/defaults/main.yml
+++ /dev/null
@@ -1,38 +0,0 @@
----
-project_name: "watcher"
-
-####################
-# Database
-####################
-watcher_database_name: "watcher"
-watcher_database_user: "watcher"
-watcher_database_address: "{{ kolla_internal_fqdn }}:{{ database_port }}"
-
-
-####################
-# Docker
-####################
-watcher_engine_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-watcher-engine"
-watcher_engine_tag: "{{ openstack_release }}"
-watcher_engine_image_full: "{{ watcher_engine_image }}:{{ watcher_engine_tag }}"
-
-watcher_api_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-watcher-api"
-watcher_api_tag: "{{ openstack_release }}"
-watcher_api_image_full: "{{ watcher_api_image }}:{{ watcher_api_tag }}"
-
-watcher_applier_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-watcher-applier"
-watcher_applier_tag: "{{ openstack_release }}"
-watcher_applier_image_full: "{{ watcher_applier_image }}:{{ watcher_applier_tag }}"
-
-####################
-# OpenStack
-####################
-watcher_admin_endpoint: "{{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ watcher_api_port }}"
-watcher_internal_endpoint: "{{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ watcher_api_port }}"
-watcher_public_endpoint: "{{ public_protocol }}://{{ kolla_external_fqdn }}:{{ watcher_api_port }}"
-
-watcher_logging_debug: "{{ openstack_logging_debug }}"
-
-watcher_keystone_user: "watcher"
-
-openstack_watcher_auth: "{'auth_url':'{{ openstack_auth.auth_url }}','username':'{{ openstack_auth.username }}','password':'{{ openstack_auth.password }}','project_name':'{{ openstack_auth.project_name }}'}"
diff --git a/ansible/roles/watcher/meta/main.yml b/ansible/roles/watcher/meta/main.yml
deleted file mode 100644
index 6b4fff8fef..0000000000
--- a/ansible/roles/watcher/meta/main.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-dependencies:
- - { role: common }
diff --git a/ansible/roles/watcher/tasks/bootstrap.yml b/ansible/roles/watcher/tasks/bootstrap.yml
deleted file mode 100644
index fa63250e36..0000000000
--- a/ansible/roles/watcher/tasks/bootstrap.yml
+++ /dev/null
@@ -1,41 +0,0 @@
----
-- name: Creating Watcher database
- command: docker exec -t kolla_toolbox /usr/bin/ansible localhost
- -m mysql_db
- -a "login_host='{{ database_address }}'
- login_port='{{ database_port }}'
- login_user='{{ database_user }}'
- login_password='{{ database_password }}'
- name='{{ watcher_database_name }}'"
- register: database
- changed_when: "{{ database.stdout.find('localhost | SUCCESS => ') != -1 and
- (database.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
- failed_when: database.stdout.split()[2] != 'SUCCESS'
- run_once: True
- delegate_to: "{{ groups['watcher-api'][0] }}"
-
-- name: Reading json from variable
- set_fact:
- database_created: "{{ (database.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
-
-- name: Creating Watcher database user and setting permissions
- command: docker exec -t kolla_toolbox /usr/bin/ansible localhost
- -m mysql_user
- -a "login_host='{{ database_address }}'
- login_port='{{ database_port }}'
- login_user='{{ database_user }}'
- login_password='{{ database_password }}'
- name='{{ watcher_database_name }}'
- password='{{ watcher_database_password }}'
- host='%'
- priv='{{ watcher_database_name }}.*:ALL'
- append_privs='yes'"
- register: database_user_create
- changed_when: "{{ database_user_create.stdout.find('localhost | SUCCESS => ') != -1 and
- (database_user_create.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
- failed_when: database_user_create.stdout.split()[2] != 'SUCCESS'
- run_once: True
- delegate_to: "{{ groups['watcher-api'][0] }}"
-
-- include: bootstrap_service.yml
- when: database_created
diff --git a/ansible/roles/watcher/tasks/bootstrap_service.yml b/ansible/roles/watcher/tasks/bootstrap_service.yml
deleted file mode 100644
index 802737bd23..0000000000
--- a/ansible/roles/watcher/tasks/bootstrap_service.yml
+++ /dev/null
@@ -1,20 +0,0 @@
----
-- name: Running Watcher bootstrap container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- detach: False
- environment:
- KOLLA_BOOTSTRAP:
- KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}"
- image: "{{ watcher_api_image_full }}"
- labels:
- BOOTSTRAP:
- name: "bootstrap_watcher"
- restart_policy: "never"
- volumes:
- - "{{ node_config_directory }}/watcher-api/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "kolla_logs:/var/log/kolla/"
- run_once: True
- delegate_to: "{{ groups['watcher-api'][0] }}"
diff --git a/ansible/roles/watcher/tasks/config.yml b/ansible/roles/watcher/tasks/config.yml
deleted file mode 100644
index 9f32f9398f..0000000000
--- a/ansible/roles/watcher/tasks/config.yml
+++ /dev/null
@@ -1,52 +0,0 @@
----
-- name: Ensuring config directories exist
- file:
- path: "{{ node_config_directory }}/{{ item }}"
- state: "directory"
- recurse: yes
- with_items:
- - "watcher-api"
- - "watcher-engine"
- - "watcher-applier"
-
-- name: Copying over config.json files for services
- template:
- src: "{{ item }}.json.j2"
- dest: "{{ node_config_directory }}/{{ item }}/config.json"
- with_items:
- - "watcher-api"
- - "watcher-engine"
- - "watcher-applier"
-
-- name: Copying over watcher.conf
- merge_configs:
- vars:
- service_name: "{{ item }}"
- sources:
- - "{{ role_path }}/templates/watcher.conf.j2"
- - "{{ node_config_directory }}/config/global.conf"
- - "{{ node_config_directory }}/config/database.conf"
- - "{{ node_config_directory }}/config/messaging.conf"
- - "{{ node_config_directory }}/config/watcher.conf"
- - "{{ node_config_directory }}/config/watcher/{{ item }}.conf"
- - "{{ node_config_directory }}/config/watcher/{{ inventory_hostname }}/watcher.conf"
- dest: "{{ node_config_directory }}/{{ item }}/watcher.conf"
- with_items:
- - "watcher-api"
- - "watcher-engine"
- - "watcher-applier"
-
-- name: Check if policies shall be overwritten
- local_action: stat path="{{ node_custom_config }}/watcher/policy.json"
- register: watcher_policy
-
-- name: Copying over existing policy.json
- template:
- src: "{{ node_custom_config }}/watcher/policy.json"
- dest: "{{ node_config_directory }}/{{ item }}/policy.json"
- with_items:
- - "watcher-api"
- - "watcher-engine"
- - "watcher-applier"
- when:
- watcher_policy.stat.exists
diff --git a/ansible/roles/watcher/tasks/deploy.yml b/ansible/roles/watcher/tasks/deploy.yml
deleted file mode 100644
index d589a67b10..0000000000
--- a/ansible/roles/watcher/tasks/deploy.yml
+++ /dev/null
@@ -1,16 +0,0 @@
----
-- include: register.yml
- when: inventory_hostname in groups['watcher-api']
-
-- include: config.yml
- when: inventory_hostname in groups['watcher-api'] or
- inventory_hostname in groups['watcher-engine'] or
- inventory_hostname in groups['watcher-applier']
-
-- include: bootstrap.yml
- when: inventory_hostname in groups['watcher-api']
-
-- include: start.yml
- when: inventory_hostname in groups['watcher-api'] or
- inventory_hostname in groups['watcher-engine'] or
- inventory_hostname in groups['watcher-applier']
diff --git a/ansible/roles/watcher/tasks/main.yml b/ansible/roles/watcher/tasks/main.yml
deleted file mode 100644
index b017e8b4ad..0000000000
--- a/ansible/roles/watcher/tasks/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-- include: "{{ action }}.yml"
diff --git a/ansible/roles/watcher/tasks/precheck.yml b/ansible/roles/watcher/tasks/precheck.yml
deleted file mode 100644
index ed97d539c0..0000000000
--- a/ansible/roles/watcher/tasks/precheck.yml
+++ /dev/null
@@ -1 +0,0 @@
----
diff --git a/ansible/roles/watcher/tasks/pull.yml b/ansible/roles/watcher/tasks/pull.yml
deleted file mode 100644
index a4e20fb64f..0000000000
--- a/ansible/roles/watcher/tasks/pull.yml
+++ /dev/null
@@ -1,21 +0,0 @@
----
-- name: Pulling watcher-api image
- kolla_docker:
- action: "pull_image"
- common_options: "{{ docker_common_options }}"
- image: "{{ watcher_api_image_full }}"
- when: inventory_hostname in groups['watcher-api']
-
-- name: Pulling watcher-engine image
- kolla_docker:
- action: "pull_image"
- common_options: "{{ docker_common_options }}"
- image: "{{ watcher_engine_image_full }}"
- when: inventory_hostname in groups['watcher-engine']
-
-- name: Pulling watcher-applier image
- kolla_docker:
- action: "pull_image"
- common_options: "{{ docker_common_options }}"
- image: "{{ watcher_applier_image_full }}"
- when: inventory_hostname in groups['watcher-applier']
diff --git a/ansible/roles/watcher/tasks/reconfigure.yml b/ansible/roles/watcher/tasks/reconfigure.yml
deleted file mode 100644
index e28ec6ab6f..0000000000
--- a/ansible/roles/watcher/tasks/reconfigure.yml
+++ /dev/null
@@ -1,71 +0,0 @@
----
-- name: Ensuring the containers up
- kolla_docker:
- name: "{{ item.name }}"
- action: "get_container_state"
- register: container_state
- failed_when: container_state.Running == false
- when: inventory_hostname in groups[item.group]
- with_items:
- - { name: watcher_api, group: watcher-api }
- - { name: watcher_engine, group: watcher-engine }
- - { name: watcher_applier, group: watcher-applier }
-
-- include: config.yml
-
-- name: Check the configs
- command: docker exec {{ item.name }} /usr/local/bin/kolla_set_configs --check
- changed_when: false
- failed_when: false
- register: check_results
- when: inventory_hostname in groups[item.group]
- with_items:
- - { name: watcher_api, group: watcher-api }
- - { name: watcher_engine, group: watcher-engine }
- - { name: watcher_applier, group: watcher-applier }
-
-- name: Containers config strategy
- kolla_docker:
- name: "{{ item.name }}"
- action: "get_container_env"
- register: container_envs
- when: inventory_hostname in groups[item.group]
- with_items:
- - { name: watcher_api, group: watcher-api }
- - { name: watcher_engine, group: watcher-engine }
- - { name: watcher_applier, group: watcher-applier }
-
-- name: Remove the containers
- kolla_docker:
- name: "{{ item[0]['name'] }}"
- action: "remove_container"
- register: remove_containers
- when:
- - inventory_hostname in groups[item[0]['group']]
- - config_strategy == "COPY_ONCE" or item[1]['KOLLA_CONFIG_STRATEGY'] == 'COPY_ONCE'
- - item[2]['rc'] == 1
- with_together:
- - [{ name: watcher_api, group: watcher-api },
- { name: watcher_engine, group: watcher-engine },
- { name: watcher_applier, group: watcher-applier }]
- - "{{ container_envs.results }}"
- - "{{ check_results.results }}"
-
-- include: start.yml
- when: remove_containers.changed
-
-- name: Restart containers
- kolla_docker:
- name: "{{ item[0]['name'] }}"
- action: "restart_container"
- when:
- - config_strategy == 'COPY_ALWAYS'
- - item[1]['KOLLA_CONFIG_STRATEGY'] != 'COPY_ONCE'
- - item[2]['rc'] == 1
- - inventory_hostname in groups[item[0]['group']]
- with_together:
- - [{ name: watcher_api, group: watcher-api },
- { name: watcher_engine, group: watcher-engine },
- { name: watcher_applier, group: watcher-applier }]
- - "{{ container_envs.results }}"
- - "{{ check_results.results }}"
diff --git a/ansible/roles/watcher/tasks/register.yml b/ansible/roles/watcher/tasks/register.yml
deleted file mode 100644
index 59a2e21823..0000000000
--- a/ansible/roles/watcher/tasks/register.yml
+++ /dev/null
@@ -1,40 +0,0 @@
----
-- name: Creating the Watcher service and endpoint
- command: docker exec -t kolla_toolbox /usr/bin/ansible localhost
- -m kolla_keystone_service
- -a "service_name=watcher
- service_type=infra-optim
- description='Infrastructure Optimization service'
- endpoint_region={{ openstack_region_name }}
- url='{{ item.url }}'
- interface='{{ item.interface }}'
- region_name={{ openstack_region_name }}
- auth={{ '{{ openstack_watcher_auth }}' }}"
- -e "{'openstack_watcher_auth':{{ openstack_watcher_auth }}}"
- register: watcher_endpoint
- changed_when: "{{ watcher_endpoint.stdout.find('localhost | SUCCESS => ') != -1 and (watcher_endpoint.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
- until: watcher_endpoint.stdout.split()[2] == 'SUCCESS'
- retries: 10
- delay: 5
- run_once: True
- with_items:
- - {'interface': 'admin', 'url': '{{ watcher_admin_endpoint }}'}
- - {'interface': 'internal', 'url': '{{ watcher_internal_endpoint }}'}
- - {'interface': 'public', 'url': '{{ watcher_public_endpoint }}'}
-
-- name: Creating the Watcher project, user, and role
- command: docker exec -t kolla_toolbox /usr/bin/ansible localhost
- -m kolla_keystone_user
- -a "project=service
- user=watcher
- password={{ watcher_keystone_password }}
- role=admin
- region_name={{ openstack_region_name }}
- auth={{ '{{ openstack_watcher_auth }}' }}"
- -e "{'openstack_watcher_auth':{{ openstack_watcher_auth }}}"
- register: watcher_user
- changed_when: "{{ watcher_user.stdout.find('localhost | SUCCESS => ') != -1 and (watcher_user.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
- until: watcher_user.stdout.split()[2] == 'SUCCESS'
- retries: 10
- delay: 5
- run_once: True
diff --git a/ansible/roles/watcher/tasks/start.yml b/ansible/roles/watcher/tasks/start.yml
deleted file mode 100644
index e88e3f853a..0000000000
--- a/ansible/roles/watcher/tasks/start.yml
+++ /dev/null
@@ -1,36 +0,0 @@
----
-- name: Starting watcher-applier container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- image: "{{ watcher_applier_image_full }}"
- name: "watcher_applier"
- volumes:
- - "{{ node_config_directory }}/watcher-applier/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "kolla_logs:/var/log/kolla/"
- when: inventory_hostname in groups['watcher-applier']
-
-- name: Starting watcher-engine container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- image: "{{ watcher_engine_image_full }}"
- name: "watcher_engine"
- volumes:
- - "{{ node_config_directory }}/watcher-engine/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "kolla_logs:/var/log/kolla/"
- when: inventory_hostname in groups['watcher-engine']
-
-- name: Starting watcher-api container
- kolla_docker:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- image: "{{ watcher_api_image_full }}"
- name: "watcher_api"
- volumes:
- - "{{ node_config_directory }}/watcher-api/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "kolla_logs:/var/log/kolla/"
- when: inventory_hostname in groups['watcher-api']
diff --git a/ansible/roles/watcher/tasks/upgrade.yml b/ansible/roles/watcher/tasks/upgrade.yml
deleted file mode 100644
index 308053080c..0000000000
--- a/ansible/roles/watcher/tasks/upgrade.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-- include: config.yml
-
-- include: bootstrap_service.yml
-
-- include: start.yml
diff --git a/ansible/roles/watcher/templates/watcher-api.json.j2 b/ansible/roles/watcher/templates/watcher-api.json.j2
deleted file mode 100644
index 422313e777..0000000000
--- a/ansible/roles/watcher/templates/watcher-api.json.j2
+++ /dev/null
@@ -1,18 +0,0 @@
-{
- "command": "watcher-api --config-file /etc/watcher/watcher.conf",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/watcher.conf",
- "dest": "/etc/watcher/watcher.conf",
- "owner": "watcher",
- "perm": "0644"
- }
- ],
- "permissions": [
- {
- "path": "/var/log/kolla/watcher",
- "owner": "watcher:watcher",
- "recurse": true
- }
- ]
-}
diff --git a/ansible/roles/watcher/templates/watcher-applier.json.j2 b/ansible/roles/watcher/templates/watcher-applier.json.j2
deleted file mode 100644
index 2fae81a0e7..0000000000
--- a/ansible/roles/watcher/templates/watcher-applier.json.j2
+++ /dev/null
@@ -1,18 +0,0 @@
-{
- "command": "watcher-applier --config-file /etc/watcher/watcher.conf",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/watcher.conf",
- "dest": "/etc/watcher/watcher.conf",
- "owner": "watcher",
- "perm": "0644"
- }
- ],
- "permissions": [
- {
- "path": "/var/log/kolla/watcher",
- "owner": "watcher:watcher",
- "recurse": true
- }
- ]
-}
diff --git a/ansible/roles/watcher/templates/watcher-engine.json.j2 b/ansible/roles/watcher/templates/watcher-engine.json.j2
deleted file mode 100644
index acf60e9305..0000000000
--- a/ansible/roles/watcher/templates/watcher-engine.json.j2
+++ /dev/null
@@ -1,18 +0,0 @@
-{
- "command": "watcher-decision-engine --config-file /etc/watcher/watcher.conf",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/watcher.conf",
- "dest": "/etc/watcher/watcher.conf",
- "owner": "watcher",
- "perm": "0644"
- }
- ],
- "permissions": [
- {
- "path": "/var/log/kolla/watcher",
- "owner": "watcher:watcher",
- "recurse": true
- }
- ]
-}
diff --git a/ansible/roles/watcher/templates/watcher.conf.j2 b/ansible/roles/watcher/templates/watcher.conf.j2
deleted file mode 100644
index 5a2a3afda7..0000000000
--- a/ansible/roles/watcher/templates/watcher.conf.j2
+++ /dev/null
@@ -1,43 +0,0 @@
-[DEFAULT]
-debug = {{ watcher_logging_debug }}
-
-log_dir = /var/log/kolla/watcher
-
-transport_url = rabbit://{% for host in groups['rabbitmq'] %}{{ rabbitmq_user }}:{{ rabbitmq_password }}@{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ rabbitmq_port }}{% if not loop.last %},{% endif %}{% endfor %}
-
-{% if service_name == 'watcher-api' %}
-[api]
-host = {{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}
-port = {{ watcher_api_port }}
-{% endif %}
-
-[database]
-connection = mysql+pymysql://{{ watcher_database_user }}:{{ watcher_database_password }}@{{ watcher_database_address}}/{{ watcher_database_name }}
-max_retries = -1
-
-[keystone_authtoken]
-auth_uri = {{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_public_port }}
-auth_url = {{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_admin_port }}
-auth_type = password
-project_domain_id = default
-user_domain_id = default
-project_name = service
-username = {{ watcher_keystone_user }}
-password = {{ watcher_keystone_password }}
-
-memcache_security_strategy = ENCRYPT
-memcache_secret_key = {{ memcache_secret_key }}
-memcached_servers = {% for host in groups['memcached'] %}{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}
-
-[watcher_clients_auth]
-auth_uri = {{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_public_port }}
-auth_url = {{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_admin_port }}
-auth_type = password
-project_domain_id = default
-user_domain_id = default
-project_name = service
-username = {{ watcher_keystone_user }}
-password = {{ watcher_keystone_password }}
-
-[oslo_concurrency]
-lock_path = /var/lib/watcher/tmp
diff --git a/ansible/site.yml b/ansible/site.yml
deleted file mode 100644
index e82c0b5010..0000000000
--- a/ansible/site.yml
+++ /dev/null
@@ -1,408 +0,0 @@
----
-# NOTE(awiddersheim): Gather facts for all hosts as a
-# first step since several plays below require them when
-# building their configurations. The below 'gather_facts'
-# set to 'false' is a bit confusing but this is to avoid
-# Ansible gathering facts twice.
-- name: Gather facts for all hosts
- hosts:
- - all
- serial: '{{ serial|default("0") }}'
- gather_facts: false
- tasks:
- - setup:
- tags: always
-
-- hosts:
- - all
- roles:
- - role: prechecks
- when: action == "precheck"
-
-- hosts:
- - ceph-mon
- - ceph-osd
- - ceph-rgw
- serial: '{{ serial|default("0") }}'
- roles:
- - { role: ceph,
- tags: ceph,
- when: enable_ceph | bool }
-
-- hosts: collectd
- serial: '{{ serial|default("0") }}'
- roles:
- - { role: collectd,
- tags: collectd,
- when: enable_collectd | bool }
-
-- hosts: elasticsearch
- serial: '{{ serial|default("0") }}'
- roles:
- - { role: elasticsearch,
- tags: elasticsearch,
- when: enable_elasticsearch | bool }
-
-- hosts: influxdb
- serial: '{{ serial|default("0") }}'
- roles:
- - { role: influxdb,
- tags: influxdb,
- when: enable_influxdb | bool }
-
-- hosts:
- - telegraf
- serial: '{{ serial|default("0") }}'
- roles:
- - { role: telegraf,
- tags: telegraf,
- when: enable_telegraf | bool }
-
-- hosts:
- - haproxy
- roles:
- - { role: haproxy,
- tags: haproxy,
- when: enable_haproxy | bool }
-
-- hosts: kibana
- serial: '{{ serial|default("0") }}'
- roles:
- - { role: kibana,
- tags: kibana,
- when: enable_kibana | bool }
-
-- hosts: memcached
- serial: '{{ serial|default("0") }}'
- roles:
- - { role: memcached,
- tags: [memcache, memcached],
- when: enable_memcached | bool }
-
-- hosts: mariadb
- serial: '{{ serial|default("0") }}'
- roles:
- - { role: mariadb,
- tags: mariadb,
- when: enable_mariadb | bool }
-
-- hosts:
- - iscsid
- - tgtd
- serial: '{{ serial|default("0") }}'
- roles:
- - { role: iscsi,
- tags: iscsi,
- when: enable_iscsid | bool }
-
-- hosts:
- - multipathd
- serial: '{{ serial|default("0") }}'
- roles:
- - { role: multipathd,
- tags: multipathd,
- when: enable_multipathd | bool }
-
-- hosts: rabbitmq
- roles:
- - { role: rabbitmq,
- tags: rabbitmq,
- when: enable_rabbitmq | bool }
-
-- hosts: etcd
- serial: '{{ serial|default("0") }}'
- roles:
- - { role: etcd,
- tags: etcd,
- when: enable_etcd | bool }
-
-- hosts:
- - keystone
- serial: '{{ serial|default("0") }}'
- roles:
- - { role: keystone,
- tags: keystone,
- when: enable_keystone | bool }
-
-- hosts:
- - swift-account-server
- - swift-container-server
- - swift-object-server
- - swift-proxy-server
- serial: '{{ serial|default("0") }}'
- roles:
- - { role: swift,
- tags: swift,
- when: enable_swift | bool }
-
-- hosts:
- - ceph-mon
- - glance-api
- - glance-registry
- serial: '{{ serial|default("0") }}'
- roles:
- - { role: glance,
- tags: glance,
- when: enable_glance | bool }
-
-- hosts:
- - ironic-api
- - ironic-conductor
- - ironic-inspector
- - ironic-pxe
- serial: '{{ serial|default("0") }}'
- roles:
- - { role: ironic,
- tags: ironic,
- when: enable_ironic | bool }
-
-- hosts:
- - ceph-mon
- - compute
- - nova-api
- - nova-conductor
- - nova-consoleauth
- - nova-novncproxy
- - nova-scheduler
- serial: '{{ serial|default("0") }}'
- roles:
- - { role: nova,
- tags: nova,
- when: enable_nova | bool }
-
-- hosts:
- - compute
- roles:
- - { role: kuryr,
- tags: kuryr,
- when: enable_kuryr | bool }
-
-# (gmmaha): Please do not change the order listed here. The current order is a
-# workaround to fix the bug https://bugs.launchpad.net/kolla/+bug/1546789
-- hosts:
- - neutron-server
- - neutron-dhcp-agent
- - neutron-l3-agent
- - neutron-lbaas-agent
- - neutron-metadata-agent
- - neutron-vpnaas-agent
- - compute
- - manila-share
- serial: '{{ serial|default("0") }}'
- roles:
- - { role: neutron,
- tags: neutron,
- when: enable_neutron | bool }
-
-- hosts:
- - ceph-mon
- - cinder-api
- - cinder-backup
- - cinder-scheduler
- - cinder-volume
- serial: '{{ serial|default("0") }}'
- roles:
- - { role: cinder,
- tags: cinder,
- when: enable_cinder | bool }
-
-- hosts:
- - heat-api
- - heat-api-cfn
- - heat-engine
- serial: '{{ serial|default("0") }}'
- roles:
- - { role: heat,
- tags: heat,
- when: enable_heat | bool }
-
-- hosts:
- - horizon
- serial: '{{ serial|default("0") }}'
- roles:
- - { role: horizon,
- tags: horizon,
- when: enable_horizon | bool }
-
-- hosts:
- - murano-api
- - murano-engine
- serial: '{{ serial|default("0") }}'
- roles:
- - { role: murano,
- tags: murano,
- when: enable_murano | bool }
-
-- hosts:
- - magnum-api
- - magnum-conductor
- serial: '{{ serial|default("0") }}'
- roles:
- - { role: magnum,
- tags: magnum,
- when: enable_magnum | bool }
-
-- hosts:
- - mistral-api
- - mistral-engine
- - mistral-executor
- serial: '{{ serial|default("0") }}'
- roles:
- - { role: mistral,
- tags: mistral,
- when: enable_mistral | bool }
-
-- hosts:
- - sahara-api
- - sahara-engine
- serial: '{{ serial|default("0") }}'
- roles:
- - { role: sahara,
- tags: sahara,
- when: enable_sahara | bool }
-
-- hosts:
- - mongodb
- serial: '{{ serial|default("0") }}'
- roles:
- - { role: mongodb,
- tags: mongodb,
- when: enable_mongodb | bool }
-
-- hosts:
- - manila-api
- - manila-data
- - manila-share
- - manila-scheduler
- serial: '{{ serial|default("0") }}'
- roles:
- - { role: manila,
- tags: manila,
- when: enable_manila | bool }
-
-- hosts:
- - gnocchi-api
- - gnocchi-metricd
- - gnocchi-statsd
- serial: '{{ serial|default("0") }}'
- roles:
- - { role: gnocchi,
- tags: gnocchi,
- when: enable_gnocchi | bool }
-
-- hosts:
- - ceilometer
- - compute
- serial: '{{ serial|default("0") }}'
- roles:
- - { role: ceilometer,
- tags: ceilometer,
- when: enable_ceilometer | bool }
-
-- hosts:
- - aodh
- serial: '{{ serial|default("0") }}'
- roles:
- - { role: aodh,
- tags: aodh,
- when: enable_aodh | bool }
-
-- hosts:
- - barbican-api
- - barbican-keystone-listener
- - barbican-worker
- serial: '{{ serial|default("0") }}'
- roles:
- - { role: barbican,
- tags: barbican,
- when: enable_barbican | bool }
-
-- hosts:
- - congress-api
- - congress-policy-engine
- - congress-datasource
- serial: '{{ serial|default("0") }}'
- roles:
- - { role: congress,
- tags: congress,
- when: enable_congress | bool }
-
-- hosts:
- - designate-api
- - designate-central
- - designate-mdns
- - designate-pool-manager
- - designate-sink
- serial: '{{ serial|default("0") }}'
- roles:
- - { role: designate,
- tags: designate,
- when: enable_designate | bool }
-
-- hosts:
- - tempest
- serial: '{{ serial|default("0") }}'
- roles:
- - { role: tempest,
- tags: tempest,
- when: enable_tempest | bool }
-
-- hosts: rally
- serial: '{{ serial|default("0") }}'
- roles:
- - { role: rally,
- tags: rally,
- when: enable_rally | bool }
-
-- hosts:
- - vmtp
- serial: '{{ serial|default("0") }}'
- roles:
- - { role: vmtp,
- tags: vmtp,
- when: enable_vmtp | bool }
-
-- hosts:
- - watcher-api
- - watcher-engine
- - watcher-applier
- serial: '{{ serial|default("0") }}'
- roles:
- - { role: watcher,
- tags: watcher,
- when: enable_watcher | bool }
-
-- hosts:
- - grafana
- serial: '{{ serial|default("0") }}'
- roles:
- - { role: grafana,
- tags: grafana,
- when: enable_grafana | bool }
-
-- hosts:
- - cloudkitty-api
- - cloudkitty-processor
- serial: '{{ serial|default("0") }}'
- roles:
- - { role: cloudkitty,
- tags: cloudkitty,
- when: enable_cloudkitty | bool }
-
-- hosts:
- - senlin-api
- - senlin-engine
- serial: '{{ serial|default("0") }}'
- roles:
- - { role: senlin,
- tags: senlin,
- when: enable_senlin | bool }
-
-- hosts:
- - searchlight-api
- - searchlight-listener
- serial: '{{ serial|default("0") }}'
- roles:
- - { role: searchlight,
- tags: searchlight,
- when: enable_searchlight | bool }
diff --git a/setup.cfg b/setup.cfg
index bacc903c68..cc0db64e7b 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -24,7 +24,6 @@ classifier =
packages =
kolla
data_files =
- share/kolla/ansible = ansible/*
share/kolla/docker = docker/*
share/kolla/tools = tools/validate-docker-execute.sh
share/kolla/tools = tools/cleanup-containers
@@ -36,9 +35,6 @@ data_files =
share/kolla = tools/openrc-example
share/kolla = setup.cfg
-scripts =
- tools/kolla-ansible
-
[entry_points]
console_scripts =
kolla-build = kolla.cmd.build:main
diff --git a/tests/test_kolla_docker.py b/tests/test_kolla_docker.py
deleted file mode 100644
index efcf16c0d1..0000000000
--- a/tests/test_kolla_docker.py
+++ /dev/null
@@ -1,709 +0,0 @@
-#!/usr/bin/python
-
-# Copyright 2016 NEC Corporation
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import copy
-import imp
-import os
-import sys
-
-try:
- from unittest import mock
-except ImportError:
- import mock
-from docker import errors as docker_error
-from oslotest import base
-
-this_dir = os.path.dirname(sys.modules[__name__].__file__)
-sys.modules['ansible'] = mock.MagicMock()
-sys.modules['ansible.module_utils'] = mock.MagicMock()
-sys.modules['ansible.module_utils.basic'] = mock.MagicMock()
-kolla_docker_file = os.path.join(this_dir, '..', 'ansible',
- 'library', 'kolla_docker.py')
-kd = imp.load_source('kolla_docker', kolla_docker_file)
-
-
-class ModuleArgsTest(base.BaseTestCase):
-
- def setUp(self):
- super(ModuleArgsTest, self).setUp()
-
- def test_module_args(self):
- argument_spec = dict(
- common_options=dict(required=False, type='dict', default=dict()),
- action=dict(
- required=True, type='str', choices=['compare_image',
- 'create_volume',
- 'get_container_env',
- 'get_container_state',
- 'pull_image',
- 'remove_container',
- 'remove_volume',
- 'restart_container',
- 'start_container',
- 'stop_container']),
- api_version=dict(required=False, type='str', default='auto'),
- auth_email=dict(required=False, type='str'),
- auth_password=dict(required=False, type='str'),
- auth_registry=dict(required=False, type='str'),
- auth_username=dict(required=False, type='str'),
- detach=dict(required=False, type='bool', default=True),
- labels=dict(required=False, type='dict', default=dict()),
- name=dict(required=False, type='str'),
- environment=dict(required=False, type='dict'),
- image=dict(required=False, type='str'),
- ipc_mode=dict(required=False, type='str', choices=['host']),
- cap_add=dict(required=False, type='list', default=list()),
- security_opt=dict(required=False, type='list', default=list()),
- pid_mode=dict(required=False, type='str', choices=['host']),
- privileged=dict(required=False, type='bool', default=False),
- remove_on_exit=dict(required=False, type='bool', default=True),
- restart_policy=dict(
- required=False, type='str', choices=['no',
- 'never',
- 'on-failure',
- 'always',
- 'unless-stopped']),
- restart_retries=dict(required=False, type='int', default=10),
- tls_verify=dict(required=False, type='bool', default=False),
- tls_cert=dict(required=False, type='str'),
- tls_key=dict(required=False, type='str'),
- tls_cacert=dict(required=False, type='str'),
- volumes=dict(required=False, type='list'),
- volumes_from=dict(required=False, type='list')
- )
- required_together = [
- ['tls_cert', 'tls_key']
- ]
-
- kd.AnsibleModule = mock.MagicMock()
- kd.generate_module()
- kd.AnsibleModule.assert_called_with(
- argument_spec=argument_spec,
- required_together=required_together,
- bypass_checks=True
- )
-
-FAKE_DATA = {
-
- 'params': {
- 'detach': True,
- 'environment': {},
- 'host_config': {
- 'network_mode': 'host',
- 'ipc_mode': '',
- 'cap_add': None,
- 'security_opt': None,
- 'pid_mode': '',
- 'privileged': False,
- 'volumes_from': None,
- 'restart_policy': 'unless-stopped',
- 'restart_retries': 10},
- 'labels': {'build-date': '2016-06-02',
- 'kolla_version': '2.0.1',
- 'license': 'GPLv2',
- 'name': 'ubuntu Base Image',
- 'vendor': 'ubuntuOS'},
- 'image': 'myregistrydomain.com:5000/ubuntu:16.04',
- 'name': 'test_container',
- 'volumes': None,
- 'tty': True
- },
-
- 'images': [
- {'Created': 1462317178,
- 'Labels': {},
- 'VirtualSize': 120759015,
- 'ParentId': '',
- 'RepoTags': ['myregistrydomain.com:5000/ubuntu:16.04'],
- 'Id': 'sha256:c5f1cf30',
- 'Size': 120759015},
- {'Created': 1461802380,
- 'Labels': {},
- 'VirtualSize': 403096303,
- 'ParentId': '',
- 'RepoTags': ['myregistrydomain.com:5000/centos:7.0'],
- 'Id': 'sha256:336a6',
- 'Size': 403096303}
- ],
-
- 'containers': [
- {'Created': 1463578194,
- 'Status': 'Up 23 hours',
- 'HostConfig': {'NetworkMode': 'default'},
- 'Id': 'e40d8e7187',
- 'Image': 'myregistrydomain.com:5000/ubuntu:16.04',
- 'ImageID': 'sha256:c5f1cf30',
- 'Labels': {},
- 'Names': '/my_container'}
- ],
-
- 'container_inspect': {
- 'Config': {
- 'Env': ['KOLLA_BASE_DISTRO=ubuntu',
- 'KOLLA_INSTALL_TYPE=binary',
- 'KOLLA_INSTALL_METATYPE=rdo'],
- 'Hostname': 'node2',
- 'Volumes': {'/var/lib/kolla/config_files/': {}}},
- 'Mounts': {},
- 'NetworkSettings': {},
- 'State': {}
- }
-
-}
-
-
-@mock.patch("docker.Client")
-def get_DockerWorker(mod_param, mock_dclient):
- module = mock.MagicMock()
- module.params = mod_param
- dw = kd.DockerWorker(module)
- return dw
-
-
-class TestContainer(base.BaseTestCase):
-
- def setUp(self):
- super(TestContainer, self).setUp()
- self.fake_data = copy.deepcopy(FAKE_DATA)
-
- def test_create_container(self):
- self.dw = get_DockerWorker(self.fake_data['params'])
- self.dw.dc.create_host_config = mock.MagicMock(
- return_value=self.fake_data['params']['host_config'])
- self.dw.create_container()
- self.assertTrue(self.dw.changed)
- self.dw.dc.create_container.assert_called_once_with(
- **self.fake_data['params'])
-
- def test_start_container_without_pull(self):
- self.fake_data['params'].update({'auth_username': 'fake_user',
- 'auth_password': 'fake_psw',
- 'auth_registry': 'myrepo/myapp',
- 'auth_email': 'fake_mail@foogle.com'})
- self.dw = get_DockerWorker(self.fake_data['params'])
- self.dw.dc.images = mock.MagicMock(
- return_value=self.fake_data['images'])
- self.dw.dc.containers = mock.MagicMock(params={'all': 'True'})
- new_container = copy.deepcopy(self.fake_data['containers'])
- new_container.append({'Names': '/test_container',
- 'Status': 'Up 2 seconds'})
- self.dw.dc.containers.side_effect = [self.fake_data['containers'],
- new_container]
- self.dw.check_container_differs = mock.MagicMock(return_value=False)
- self.dw.create_container = mock.MagicMock()
- self.dw.start_container()
- self.assertFalse(self.dw.changed)
- self.dw.create_container.assert_called_once_with()
-
- def test_start_container_with_duplicate_name(self):
- self.fake_data['params'].update({'name': 'my_container',
- 'auth_username': 'fake_user',
- 'auth_password': 'fake_psw',
- 'auth_registry': 'myrepo/myapp',
- 'auth_email': 'fake_mail@foogle.com'})
- self.dw = get_DockerWorker(self.fake_data['params'])
- self.dw.dc.images = mock.MagicMock(
- return_value=self.fake_data['images'])
- self.dw.dc.containers = mock.MagicMock(params={'all': 'True'})
- updated_cont_list = copy.deepcopy(self.fake_data['containers'])
- updated_cont_list.pop(0)
- self.dw.dc.containers.side_effect = [self.fake_data['containers'],
- self.fake_data['containers'],
- updated_cont_list,
- self.fake_data['containers']
- ]
- self.dw.check_container_differs = mock.MagicMock(return_value=True)
- self.dw.dc.remove_container = mock.MagicMock()
- self.dw.create_container = mock.MagicMock()
- self.dw.start_container()
- self.assertTrue(self.dw.changed)
- self.dw.dc.remove_container.assert_called_once_with(
- container=self.fake_data['params'].get('name'),
- force=True)
- self.dw.create_container.assert_called_once_with()
-
- def test_start_container(self):
- self.fake_data['params'].update({'name': 'my_container',
- 'auth_username': 'fake_user',
- 'auth_password': 'fake_psw',
- 'auth_registry': 'myrepo/myapp',
- 'auth_email': 'fake_mail@foogle.com'})
- self.dw = get_DockerWorker(self.fake_data['params'])
- self.dw.dc.images = mock.MagicMock(
- return_value=self.fake_data['images'])
- self.fake_data['containers'][0].update(
- {'Status': 'Exited 2 days ago'})
- self.dw.dc.containers = mock.MagicMock(
- return_value=self.fake_data['containers'])
- self.dw.check_container_differs = mock.MagicMock(return_value=False)
- self.dw.dc.start = mock.MagicMock()
- self.dw.start_container()
- self.assertTrue(self.dw.changed)
- self.dw.dc.start.assert_called_once_with(
- container=self.fake_data['params'].get('name'))
-
- def test_stop_container(self):
- self.dw = get_DockerWorker({'name': 'my_container',
- 'action': 'stop_container'})
- self.dw.dc.containers.return_value = self.fake_data['containers']
- self.dw.stop_container()
-
- self.assertTrue(self.dw.changed)
- self.dw.dc.containers.assert_called_once_with(all=True)
- self.dw.dc.stop.assert_called_once_with('my_container')
-
- def test_stop_container_not_exists(self):
- self.dw = get_DockerWorker({'name': 'fake_container',
- 'action': 'stop_container'})
- self.dw.dc.containers.return_value = self.fake_data['containers']
- self.dw.stop_container()
-
- self.assertFalse(self.dw.changed)
- self.dw.dc.containers.assert_called_once_with(all=True)
- self.dw.module.fail_json.assert_called_once_with(
- msg="No such container: fake_container to stop")
-
- def test_restart_container(self):
- self.dw = get_DockerWorker({'name': 'my_container',
- 'action': 'restart_container'})
- self.dw.dc.containers.return_value = self.fake_data['containers']
- self.fake_data['container_inspect'].update(
- self.fake_data['containers'][0])
- self.dw.dc.inspect_container.return_value = (
- self.fake_data['container_inspect'])
- self.dw.restart_container()
-
- self.assertTrue(self.dw.changed)
- self.dw.dc.containers.assert_called_once_with(all=True)
- self.dw.dc.inspect_container.assert_called_once_with('my_container')
- self.dw.dc.restart.assert_called_once_with('my_container')
-
- def test_restart_container_not_exists(self):
- self.dw = get_DockerWorker({'name': 'fake-container',
- 'action': 'restart_container'})
- self.dw.dc.containers.return_value = self.fake_data['containers']
- self.dw.restart_container()
-
- self.assertFalse(self.dw.changed)
- self.dw.dc.containers.assert_called_once_with(all=True)
- self.dw.module.fail_json.assert_called_once_with(
- msg="No such container: fake-container")
-
- def test_remove_container(self):
- self.dw = get_DockerWorker({'name': 'my_container',
- 'action': 'remove_container'})
- self.dw.dc.containers.return_value = self.fake_data['containers']
- self.dw.remove_container()
-
- self.assertTrue(self.dw.changed)
- self.dw.dc.containers.assert_called_once_with(all=True)
- self.dw.dc.remove_container.assert_called_once_with(
- container='my_container',
- force=True
- )
-
- def test_get_container_env(self):
- fake_env = dict(KOLLA_BASE_DISTRO='ubuntu',
- KOLLA_INSTALL_TYPE='binary',
- KOLLA_INSTALL_METATYPE='rdo')
- self.dw = get_DockerWorker({'name': 'my_container',
- 'action': 'get_container_env'})
- self.dw.dc.containers.return_value = self.fake_data['containers']
- self.fake_data['container_inspect'].update(
- self.fake_data['containers'][0])
- self.dw.dc.inspect_container.return_value = (
- self.fake_data['container_inspect'])
- self.dw.get_container_env()
-
- self.assertFalse(self.dw.changed)
- self.dw.dc.containers.assert_called_once_with(all=True)
- self.dw.dc.inspect_container.assert_called_once_with('my_container')
- self.dw.module.exit_json.assert_called_once_with(**fake_env)
-
- def test_get_container_env_negative(self):
- self.dw = get_DockerWorker({'name': 'fake_container',
- 'action': 'get_container_env'})
- self.dw.dc.containers.return_value = self.fake_data['containers']
- self.dw.get_container_env()
-
- self.assertFalse(self.dw.changed)
- self.dw.module.fail_json.assert_called_once_with(
- msg="No such container: fake_container")
-
- def test_get_container_state(self):
- State = {'Dead': False,
- 'ExitCode': 0,
- 'Pid': 12475,
- 'StartedAt': u'2016-06-07T11:22:37.66876269Z',
- 'Status': u'running'}
- self.fake_data['container_inspect'].update({'State': State})
- self.dw = get_DockerWorker({'name': 'my_container',
- 'action': 'get_container_state'})
- self.dw.dc.containers.return_value = self.fake_data['containers']
- self.dw.dc.inspect_container.return_value = (
- self.fake_data['container_inspect'])
- self.dw.get_container_state()
-
- self.assertFalse(self.dw.changed)
- self.dw.dc.containers.assert_called_once_with(all=True)
- self.dw.dc.inspect_container.assert_called_once_with('my_container')
- self.dw.module.exit_json.assert_called_once_with(**State)
-
- def test_get_container_state_negative(self):
- self.dw = get_DockerWorker({'name': 'fake_container',
- 'action': 'get_container_state'})
- self.dw.dc.containers.return_value = self.fake_data['containers']
- self.dw.get_container_state()
-
- self.assertFalse(self.dw.changed)
- self.dw.dc.containers.assert_called_once_with(all=True)
- self.dw.module.fail_json.assert_called_once_with(
- msg="No such container: fake_container")
-
-
-class TestImage(base.BaseTestCase):
-
- def setUp(self):
- super(TestImage, self).setUp()
- self.fake_data = copy.deepcopy(FAKE_DATA)
-
- def test_check_image(self):
- self.dw = get_DockerWorker(
- {'image': 'myregistrydomain.com:5000/ubuntu:16.04'})
- self.dw.dc.images.return_value = self.fake_data['images']
-
- return_data = self.dw.check_image()
- self.assertFalse(self.dw.changed)
- self.dw.dc.images.assert_called_once_with()
- self.assertEqual(self.fake_data['images'][0], return_data)
-
- def test_check_image_before_docker_1_12(self):
- self.dw = get_DockerWorker(
- {'image': 'myregistrydomain.com:5000/centos:7.0'})
- self.fake_data['images'][0]['RepoTags'] = []
- self.dw.dc.images.return_value = self.fake_data['images']
-
- return_data = self.dw.check_image()
- self.assertFalse(self.dw.changed)
- self.dw.dc.images.assert_called_once_with()
- self.assertEqual(self.fake_data['images'][1], return_data)
-
- def test_check_image_docker_1_12(self):
- self.dw = get_DockerWorker(
- {'image': 'myregistrydomain.com:5000/centos:7.0'})
- self.fake_data['images'][0]['RepoTags'] = None
- self.dw.dc.images.return_value = self.fake_data['images']
-
- return_data = self.dw.check_image()
- self.assertFalse(self.dw.changed)
- self.dw.dc.images.assert_called_once_with()
- self.assertEqual(self.fake_data['images'][1], return_data)
-
- def test_compare_image(self):
- self.dw = get_DockerWorker(
- {'image': 'myregistrydomain.com:5000/ubuntu:16.04'})
- self.dw.dc.images.return_value = self.fake_data['images']
- container_info = {'Image': 'sha256:c5f1cf40',
- 'Config': {'myregistrydomain.com:5000/ubuntu:16.04'}
- }
-
- return_data = self.dw.compare_image(container_info)
- self.assertFalse(self.dw.changed)
- self.dw.dc.images.assert_called_once_with()
- self.assertTrue(return_data)
-
- def test_pull_image_new(self):
- self.dw = get_DockerWorker(
- {'image': 'myregistrydomain.com:5000/ubuntu:16.04',
- 'auth_username': 'fake_user',
- 'auth_password': 'fake_psw',
- 'auth_registry': 'myrepo/myapp',
- 'auth_email': 'fake_mail@foogle.com'
- })
- self.dw.dc.pull.return_value = [
- '{"status":"Pull complete","progressDetail":{},"id":"22f7"}\r\n',
- '{"status":"Digest: sha256:47c3bdbcf99f0c1a36e4db"}\r\n',
- '{"status":"Downloaded newer image for ubuntu:16.04"}\r\n'
- ]
-
- self.dw.pull_image()
- self.dw.dc.pull.assert_called_once_with(
- repository='myregistrydomain.com:5000/ubuntu',
- tag='16.04',
- stream=True)
- self.assertTrue(self.dw.changed)
-
- def test_pull_image_exists(self):
- self.dw = get_DockerWorker(
- {'image': 'myregistrydomain.com:5000/ubuntu:16.04'})
- self.dw.dc.pull.return_value = [
- '{"status":"Pull complete","progressDetail":{},"id":"22f7"}\r\n',
- '{"status":"Digest: sha256:47c3bdbf0c1a36e4db"}\r\n',
- '{"status":"mage is up to date for ubuntu:16.04"}\r\n'
- ]
-
- self.dw.pull_image()
- self.dw.dc.pull.assert_called_once_with(
- repository='myregistrydomain.com:5000/ubuntu',
- tag='16.04',
- stream=True)
- self.assertFalse(self.dw.changed)
-
- def test_pull_image_unknown_status(self):
- self.dw = get_DockerWorker(
- {'image': 'myregistrydomain.com:5000/ubuntu:16.04'})
- self.dw.dc.pull.return_value = [
- '{"status": "some random message"}\r\n']
-
- self.dw.pull_image()
- self.dw.dc.pull.assert_called_once_with(
- repository='myregistrydomain.com:5000/ubuntu',
- tag='16.04',
- stream=True)
- self.assertFalse(self.dw.changed)
- self.dw.module.fail_json.assert_called_with(
- msg='Unknown status message: some random message',
- failed=True)
-
- def test_pull_image_not_exists(self):
- self.dw = get_DockerWorker(
- {'image': 'unknown:16.04'})
- self.dw.dc.pull.return_value = [
- '{"error": "image unknown not found"}\r\n']
-
- self.dw.pull_image()
- self.dw.dc.pull.assert_called_once_with(
- repository='unknown',
- tag='16.04',
- stream=True)
- self.assertFalse(self.dw.changed)
- self.dw.module.fail_json.assert_called_once_with(
- msg="The requested image does not exist: unknown:16.04",
- failed=True)
-
- def test_pull_image_error(self):
- self.dw = get_DockerWorker(
- {'image': 'myregistrydomain.com:5000/ubuntu:16.04'})
- self.dw.dc.pull.return_value = [
- '{"error": "unexpected error"}\r\n']
-
- self.dw.pull_image()
- self.dw.dc.pull.assert_called_once_with(
- repository='myregistrydomain.com:5000/ubuntu',
- tag='16.04',
- stream=True)
- self.assertFalse(self.dw.changed)
- self.dw.module.fail_json.assert_called_once_with(
- msg="Unknown error message: unexpected error",
- failed=True)
-
-
-class TestVolume(base.BaseTestCase):
-
- def setUp(self):
- super(TestVolume, self).setUp()
- self.fake_data = copy.deepcopy(FAKE_DATA)
- self.volumes = {
- 'Volumes':
- [{'Driver': u'local',
- 'Labels': None,
- 'Mountpoint': '/var/lib/docker/volumes/nova_compute/_data',
- 'Name': 'nova_compute'},
- {'Driver': 'local',
- 'Labels': None,
- 'Mountpoint': '/var/lib/docker/volumes/mariadb/_data',
- 'Name': 'mariadb'}]
- }
-
- def test_create_volume(self):
- self.dw = get_DockerWorker({'name': 'rabbitmq',
- 'action': 'create_volume'})
- self.dw.dc.volumes.return_value = self.volumes
-
- self.dw.create_volume()
- self.dw.dc.volumes.assert_called_once_with()
- self.assertTrue(self.dw.changed)
- self.dw.dc.create_volume.assert_called_once_with(
- name='rabbitmq',
- driver='local')
-
- def test_create_volume_exists(self):
- self.dw = get_DockerWorker({'name': 'nova_compute',
- 'action': 'create_volume'})
- self.dw.dc.volumes.return_value = self.volumes
-
- self.dw.create_volume()
- self.dw.dc.volumes.assert_called_once_with()
- self.assertFalse(self.dw.changed)
-
- def test_remove_volume(self):
- self.dw = get_DockerWorker({'name': 'nova_compute',
- 'action': 'remove_volume'})
- self.dw.dc.volumes.return_value = self.volumes
-
- self.dw.remove_volume()
- self.assertTrue(self.dw.changed)
- self.dw.dc.remove_volume.assert_called_once_with(name='nova_compute')
-
- def test_remove_volume_not_exists(self):
- self.dw = get_DockerWorker({'name': 'rabbitmq',
- 'action': 'remove_volume'})
- self.dw.dc.volumes.return_value = self.volumes
-
- self.dw.remove_volume()
- self.assertFalse(self.dw.changed)
-
- def test_remove_volume_exception(self):
- resp = mock.MagicMock()
- resp.status_code = 409
- docker_except = docker_error.APIError('test error', resp)
- self.dw = get_DockerWorker({'name': 'nova_compute',
- 'action': 'remove_volume'})
- self.dw.dc.volumes.return_value = self.volumes
- self.dw.dc.remove_volume.side_effect = docker_except
-
- self.assertRaises(docker_error.APIError, self.dw.remove_volume)
- self.assertTrue(self.dw.changed)
- self.dw.module.fail_json.assert_called_once_with(
- failed=True,
- msg="Volume named 'nova_compute' is currently in-use"
- )
-
-
-class TestAttrComp(base.BaseTestCase):
-
- def setUp(self):
- super(TestAttrComp, self).setUp()
- self.fake_data = copy.deepcopy(FAKE_DATA)
-
- def test_compare_cap_add_neg(self):
- container_info = {'HostConfig': dict(CapAdd=['data'])}
- self.dw = get_DockerWorker({'cap_add': ['data']})
- self.assertFalse(self.dw.compare_cap_add(container_info))
-
- def test_compare_cap_add_pos(self):
- container_info = {'HostConfig': dict(CapAdd=['data1'])}
- self.dw = get_DockerWorker({'cap_add': ['data2']})
- self.assertTrue(self.dw.compare_cap_add(container_info))
-
- def test_compare_ipc_mode_neg(self):
- container_info = {'HostConfig': dict(IpcMode='data')}
- self.dw = get_DockerWorker({'ipc_mode': 'data'})
- self.assertFalse(self.dw.compare_ipc_mode(container_info))
-
- def test_compare_ipc_mode_pos(self):
- container_info = {'HostConfig': dict(IpcMode='data1')}
- self.dw = get_DockerWorker({'ipc_mode': 'data2'})
- self.assertTrue(self.dw.compare_ipc_mode(container_info))
-
- def test_compare_security_opt_neg(self):
- container_info = {'HostConfig': dict(SecurityOpt=['data'])}
- self.dw = get_DockerWorker({'security_opt': ['data']})
- self.assertFalse(self.dw.compare_security_opt(container_info))
-
- def test_compare_security_opt_pos(self):
- container_info = {'HostConfig': dict(SecurityOpt=['data1'])}
- self.dw = get_DockerWorker({'security_opt': ['data2']})
- self.assertTrue(self.dw.compare_security_opt(container_info))
-
- def test_compare_pid_mode_neg(self):
- container_info = {'HostConfig': dict(PidMode='host')}
- self.dw = get_DockerWorker({'pid_mode': 'host'})
- self.assertFalse(self.dw.compare_pid_mode(container_info))
-
- def test_compare_pid_mode_pos(self):
- container_info = {'HostConfig': dict(PidMode='host1')}
- self.dw = get_DockerWorker({'pid_mode': 'host2'})
- self.assertTrue(self.dw.compare_pid_mode(container_info))
-
- def test_compare_privileged_neg(self):
- container_info = {'HostConfig': dict(Privileged=True)}
- self.dw = get_DockerWorker({'privileged': True})
- self.assertFalse(self.dw.compare_privileged(container_info))
-
- def test_compare_privileged_pos(self):
- container_info = {'HostConfig': dict(Privileged=True)}
- self.dw = get_DockerWorker({'privileged': False})
- self.assertTrue(self.dw.compare_privileged(container_info))
-
- def test_compare_labels_neg(self):
- container_info = {'Config': dict(Labels={'kolla_version': '2.0.1'})}
- self.dw = get_DockerWorker({'labels': {'kolla_version': '2.0.1'}})
- self.dw.check_image = mock.MagicMock(return_value=dict(
- Labels={'kolla_version': '2.0.1'}))
- self.assertFalse(self.dw.compare_labels(container_info))
-
- def test_compare_labels_pos(self):
- container_info = {'Config': dict(Labels={'kolla_version': '1.0.1'})}
- self.dw = get_DockerWorker({'labels': {'kolla_version': '2.0.1'}})
- self.dw.check_image = mock.MagicMock(return_value=dict(
- Labels={'kolla_version': '1.0.1'}))
- self.assertTrue(self.dw.compare_labels(container_info))
-
- def test_compare_volumes_from_neg(self):
- container_info = {'HostConfig': dict(VolumesFrom=['777f7dc92da7'])}
- self.dw = get_DockerWorker({'volumes_from': ['777f7dc92da7']})
-
- self.assertFalse(self.dw.compare_volumes_from(container_info))
-
- def test_compare_volumes_from_post(self):
- container_info = {'HostConfig': dict(VolumesFrom=['777f7dc92da7'])}
- self.dw = get_DockerWorker({'volumes_from': ['ba8c0c54f0f2']})
-
- self.assertTrue(self.dw.compare_volumes_from(container_info))
-
- def test_compare_volumes_neg(self):
- container_info = {
- 'Config': dict(Volumes=['/var/log/kolla/']),
- 'HostConfig': dict(Binds=['kolla_logs:/var/log/kolla/:rw'])}
- self.dw = get_DockerWorker(
- {'volumes': ['kolla_logs:/var/log/kolla/:rw']})
-
- self.assertFalse(self.dw.compare_volumes(container_info))
-
- def test_compare_volumes_pos(self):
- container_info = {
- 'Config': dict(Volumes=['/var/log/kolla/']),
- 'HostConfig': dict(Binds=['kolla_logs:/var/log/kolla/:rw'])}
- self.dw = get_DockerWorker(
- {'volumes': ['/dev/:/dev/:rw']})
-
- self.assertTrue(self.dw.compare_volumes(container_info))
-
- def test_compare_environment_neg(self):
- container_info = {'Config': dict(
- Env=['KOLLA_CONFIG_STRATEGY=COPY_ALWAYS',
- 'KOLLA_BASE_DISTRO=ubuntu',
- 'KOLLA_INSTALL_TYPE=binary']
- )}
- self.dw = get_DockerWorker({
- 'environment': dict(KOLLA_CONFIG_STRATEGY='COPY_ALWAYS',
- KOLLA_BASE_DISTRO='ubuntu',
- KOLLA_INSTALL_TYPE='binary')})
-
- self.assertFalse(self.dw.compare_environment(container_info))
-
- def test_compare_environment_pos(self):
- container_info = {'Config': dict(
- Env=['KOLLA_CONFIG_STRATEGY=COPY_ALWAYS',
- 'KOLLA_BASE_DISTRO=ubuntu',
- 'KOLLA_INSTALL_TYPE=binary']
- )}
- self.dw = get_DockerWorker({
- 'environment': dict(KOLLA_CONFIG_STRATEGY='COPY_ALWAYS',
- KOLLA_BASE_DISTRO='centos',
- KOLLA_INSTALL_TYPE='binary')})
-
- self.assertTrue(self.dw.compare_environment(container_info))
diff --git a/tools/kolla-ansible b/tools/kolla-ansible
deleted file mode 100755
index 16891734b2..0000000000
--- a/tools/kolla-ansible
+++ /dev/null
@@ -1,224 +0,0 @@
-#!/bin/bash
-#
-# This script can be used to interact with kolla via ansible.
-
-function find_base_dir {
- local real_path=$(python -c "import os;print(os.path.realpath('$0'))")
- local dir_name="$(dirname "$real_path")"
- if [[ ${dir_name} == "/usr/bin" ]]; then
- BASEDIR=/usr/share/kolla
- elif [[ ${dir_name} == "/usr/local/bin" ]]; then
- BASEDIR=/usr/local/share/kolla
- else
- BASEDIR="$(dirname ${dir_name})"
- fi
-}
-
-function process_cmd {
- echo "$ACTION : $CMD"
- $CMD
- if [[ $? -ne 0 ]]; then
- echo "Command failed $CMD"
- exit 1
- fi
-}
-
-function usage {
- cat < Specify path to ansible inventory file
- --playbook, -p Specify path to ansible playbook file
- --configdir Specify path to directory with globals.yml
- --key -k Specify path to ansible vault keyfile
- --help, -h Show this usage information
- --tags, -t Only run plays and tasks tagged with these values
- --extra, -e Set additional variables as key=value or YAML/JSON passed to ansible-playbook
- --passwords Specify path to the passwords file
- --verbose, -v Increase verbosity of ansible-playbook
-
-Commands:
- prechecks Do pre-deployment checks for hosts
- mariadb_recovery Recover a completely stopped mariadb cluster
- bootstrap-servers bootstrap servers with kolla deploy dependencies
- destroy Destroy Kolla containers, volumes and host configuration
- (--include-images to also destroy Kolla images)
- deploy Deploy and start all kolla containers
- deploy-bifrost Deploy and start bifrost container
- deploy-servers Enroll and deploy servers with bifrost
- post-deploy Do post deploy on deploy node
- pull Pull all images for containers (only pulls, no running container changes)
- reconfigure Reconfigure OpenStack service
- certificates Generate self-signed certificate for TLS *For Development Only*
- upgrade Upgrades existing OpenStack Environment
- genconfig Generate configuration files for enabled OpenStack services
-EOF
-}
-
-
-SHORT_OPTS="hi:p:t:k:e:v"
-LONG_OPTS="help,inventory:,playbook:,tags:,key:,extra:,verbose,configdir:,passwords:,yes-i-really-really-mean-it,include-images"
-ARGS=$(getopt -o "${SHORT_OPTS}" -l "${LONG_OPTS}" --name "$0" -- "$@") || { usage >&2; exit 2; }
-
-eval set -- "$ARGS"
-
-find_base_dir
-
-INVENTORY="${BASEDIR}/ansible/inventory/all-in-one"
-PLAYBOOK="${BASEDIR}/ansible/site.yml"
-VERBOSITY=
-EXTRA_OPTS=
-CONFIG_DIR="/etc/kolla"
-PASSWORDS_FILE="${CONFIG_DIR}/passwords.yml"
-DANGER_CONFIRM=
-INCLUDE_IMAGES=
-
-while [ "$#" -gt 0 ]; do
- case "$1" in
-
- (--inventory|-i)
- INVENTORY="$2"
- shift 2
- ;;
-
- (--playbook|-p)
- PLAYBOOK="$2"
- shift 2
- ;;
-
- (--tags|-t)
- EXTRA_OPTS="$EXTRA_OPTS --tags $2"
- shift 2
- ;;
-
- (--verbose|-v)
- VERBOSITY="$VERBOSITY --verbose"
- shift 1
- ;;
-
- (--configdir)
- CONFIG_DIR="$2"
- shift 2
- ;;
-
- (--yes-i-really-really-mean-it)
- DANGER_CONFIRM="$1"
- shift 1
- ;;
-
- (--include-images)
- INCLUDE_IMAGES="$1"
- shift 1
- ;;
-
- (--key|-k)
- VAULT_PASS_FILE="$2"
- EXTRA_OPTS="$EXTRA_OPTS --vault-password-file=$VAULT_PASS_FILE"
- shift 2
- ;;
-
- (--extra|-e)
- EXTRA_OPTS="$EXTRA_OPTS -e $2"
- shift 2
- ;;
- (--passwords)
- PASSWORDS_FILE="$2"
- shift 2
- ;;
- (--help|-h)
- usage
- shift
- exit 0
- ;;
-
- (--)
- shift
- break
- ;;
-
- (*)
- echo "error"
- exit 3
- ;;
-esac
-done
-
-case "$1" in
-
-(prechecks)
- ACTION="Pre-deployment checking"
- EXTRA_OPTS="$EXTRA_OPTS -e action=precheck"
- ;;
-(mariadb_recovery)
- ACTION="Attempting to restart mariadb cluster"
- EXTRA_OPTS="$EXTRA_OPTS -e action=deploy -e common_run=true"
- PLAYBOOK="${BASEDIR}/ansible/mariadb_recovery.yml"
- ;;
-(destroy)
- ACTION="Destroy Kolla containers, volumes and host configuration"
- PLAYBOOK="${BASEDIR}/ansible/destroy.yml"
- if [[ "${INCLUDE_IMAGES}" == "--include-images" ]]; then
- EXTRA_OPTS="$EXTRA_OPTS -e destroy_include_images=yes"
- fi
- if [[ "${DANGER_CONFIRM}" != "--yes-i-really-really-mean-it" ]]; then
- cat << EOF
-WARNING:
- This will PERMANENTLY DESTROY all deployed kolla containers, volumes and host configuration.
- There is no way to recover from this action. To confirm, please add the following option:
- --yes-i-really-really-mean-it
-EOF
- exit 1
- fi
- ;;
-(bootstrap-servers)
- ACTION="Bootstraping servers"
- PLAYBOOK="${BASEDIR}/ansible/kolla-host.yml"
- EXTRA_OPTS="$EXTRA_OPTS -e action=bootstrap-servers"
- ;;
-(deploy)
- ACTION="Deploying Playbooks"
- EXTRA_OPTS="$EXTRA_OPTS -e action=deploy"
- ;;
-(deploy-bifrost)
- ACTION="Deploying Bifrost"
- PLAYBOOK="${BASEDIR}/ansible/bifrost.yml"
- EXTRA_OPTS="$EXTRA_OPTS -e action=deploy"
- ;;
-(deploy-servers)
- ACTION="Deploying servers with bifrost"
- PLAYBOOK="${BASEDIR}/ansible/bifrost.yml"
- EXTRA_OPTS="$EXTRA_OPTS -e action=deploy-servers"
- ;;
-(post-deploy)
- ACTION="Post-Deploying Playbooks"
- PLAYBOOK="${BASEDIR}/ansible/post-deploy.yml"
- ;;
-(pull)
- ACTION="Pulling Docker images"
- EXTRA_OPTS="$EXTRA_OPTS -e action=pull"
- ;;
-(upgrade)
- ACTION="Upgrading OpenStack Environment"
- EXTRA_OPTS="$EXTRA_OPTS -e action=upgrade -e serial=30%"
- ;;
-(reconfigure)
- ACTION="Reconfigure OpenStack service"
- EXTRA_OPTS="$EXTRA_OPTS -e action=reconfigure -e serial=30%"
- ;;
-(certificates)
- ACTION="Generate TLS Certificates"
- PLAYBOOK="${BASEDIR}/ansible/certificates.yml"
- ;;
-(genconfig)
- ACTION="Generate configuration files for enabled OpenStack services"
- EXTRA_OPTS="$EXTRA_OPTS -e action=config"
- ;;
-(*) usage
- exit 0
- ;;
-esac
-
-CONFIG_OPTS="-e @${CONFIG_DIR}/globals.yml -e @${PASSWORDS_FILE} -e CONFIG_DIR=${CONFIG_DIR}"
-CMD="ansible-playbook -i $INVENTORY $CONFIG_OPTS $EXTRA_OPTS $PLAYBOOK $VERBOSITY"
-process_cmd