Remove Ansible from Kolla
Change-Id: I5396e4b9927d3e65b6aa4185238a43a4765a996b
This commit is contained in:
parent
0963c2255a
commit
3fcef82946
@ -1,96 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
|
||||
# Copyright 2015 Sam Yaple
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import ConfigParser
|
||||
import inspect
|
||||
import os
|
||||
from six import StringIO
|
||||
|
||||
from ansible.plugins import action
|
||||
|
||||
|
||||
class ActionModule(action.ActionBase):
|
||||
|
||||
TRANSFERS_FILES = True
|
||||
|
||||
def read_config(self, source, config):
|
||||
# Only use config if present
|
||||
if os.access(source, os.R_OK):
|
||||
with open(source, 'r') as f:
|
||||
template_data = f.read()
|
||||
result = self._templar.template(template_data)
|
||||
fakefile = StringIO(result)
|
||||
config.readfp(fakefile)
|
||||
fakefile.close()
|
||||
|
||||
def run(self, tmp=None, task_vars=None):
|
||||
|
||||
if task_vars is None:
|
||||
task_vars = dict()
|
||||
result = super(ActionModule, self).run(tmp, task_vars)
|
||||
|
||||
# NOTE(jeffrey4l): Ansible 2.1 add a remote_user param to the
|
||||
# _make_tmp_path function. inspect the number of the args here. In
|
||||
# this way, ansible 2.0 and ansible 2.1 are both supported
|
||||
make_tmp_path_args = inspect.getargspec(self._make_tmp_path)[0]
|
||||
if not tmp and len(make_tmp_path_args) == 1:
|
||||
tmp = self._make_tmp_path()
|
||||
if not tmp and len(make_tmp_path_args) == 2:
|
||||
remote_user = (task_vars.get('ansible_user')
|
||||
or self._play_context.remote_user)
|
||||
tmp = self._make_tmp_path(remote_user)
|
||||
|
||||
sources = self._task.args.get('sources', None)
|
||||
extra_vars = self._task.args.get('vars', list())
|
||||
|
||||
if not isinstance(sources, list):
|
||||
sources = [sources]
|
||||
|
||||
temp_vars = task_vars.copy()
|
||||
temp_vars.update(extra_vars)
|
||||
|
||||
config = ConfigParser.ConfigParser()
|
||||
old_vars = self._templar._available_variables
|
||||
self._templar.set_available_variables(temp_vars)
|
||||
|
||||
for source in sources:
|
||||
self.read_config(source, config)
|
||||
|
||||
self._templar.set_available_variables(old_vars)
|
||||
# Dump configparser to string via an emulated file
|
||||
|
||||
fakefile = StringIO()
|
||||
config.write(fakefile)
|
||||
|
||||
remote_path = self._connection._shell.join_path(tmp, 'src')
|
||||
xfered = self._transfer_data(remote_path, fakefile.getvalue())
|
||||
fakefile.close()
|
||||
|
||||
new_module_args = self._task.args.copy()
|
||||
new_module_args.pop('vars', None)
|
||||
new_module_args.pop('sources', None)
|
||||
|
||||
new_module_args.update(
|
||||
dict(
|
||||
src=xfered
|
||||
)
|
||||
)
|
||||
|
||||
result.update(self._execute_module(module_name='copy',
|
||||
module_args=new_module_args,
|
||||
task_vars=task_vars,
|
||||
tmp=tmp))
|
||||
return result
|
@ -1,96 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
|
||||
# Copyright 2015 Sam Yaple
|
||||
# Copyright 2016 intel
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import inspect
|
||||
import os
|
||||
|
||||
from yaml import dump
|
||||
from yaml import load
|
||||
try:
|
||||
from yaml import CDumper as Dumper # noqa: F401
|
||||
from yaml import CLoader as Loader # noqa: F401
|
||||
except ImportError:
|
||||
from yaml import Dumper # noqa: F401
|
||||
from yaml import Loader # noqa: F401
|
||||
|
||||
|
||||
from ansible.plugins import action
|
||||
|
||||
|
||||
class ActionModule(action.ActionBase):
|
||||
|
||||
TRANSFERS_FILES = True
|
||||
|
||||
def read_config(self, source):
|
||||
result = None
|
||||
# Only use config if present
|
||||
if os.access(source, os.R_OK):
|
||||
with open(source, 'r') as f:
|
||||
template_data = f.read()
|
||||
template_data = self._templar.template(template_data)
|
||||
result = load(template_data)
|
||||
return result or {}
|
||||
|
||||
def run(self, tmp=None, task_vars=None):
|
||||
if task_vars is None:
|
||||
task_vars = dict()
|
||||
result = super(ActionModule, self).run(tmp, task_vars)
|
||||
|
||||
# NOTE(jeffrey4l): Ansible 2.1 add a remote_user param to the
|
||||
# _make_tmp_path function. inspect the number of the args here. In
|
||||
# this way, ansible 2.0 and ansible 2.1 are both supported
|
||||
make_tmp_path_args = inspect.getargspec(self._make_tmp_path)[0]
|
||||
if not tmp and len(make_tmp_path_args) == 1:
|
||||
tmp = self._make_tmp_path()
|
||||
if not tmp and len(make_tmp_path_args) == 2:
|
||||
remote_user = (task_vars.get('ansible_user')
|
||||
or self._play_context.remote_user)
|
||||
tmp = self._make_tmp_path(remote_user)
|
||||
# save template args.
|
||||
extra_vars = self._task.args.get('vars', list())
|
||||
old_vars = self._templar._available_variables
|
||||
|
||||
temp_vars = task_vars.copy()
|
||||
temp_vars.update(extra_vars)
|
||||
self._templar.set_available_variables(temp_vars)
|
||||
|
||||
output = {}
|
||||
sources = self._task.args.get('sources', None)
|
||||
if not isinstance(sources, list):
|
||||
sources = [sources]
|
||||
for source in sources:
|
||||
output.update(self.read_config(source))
|
||||
|
||||
# restore original vars
|
||||
self._templar.set_available_variables(old_vars)
|
||||
|
||||
remote_path = self._connection._shell.join_path(tmp, 'src')
|
||||
xfered = self._transfer_data(remote_path,
|
||||
dump(output,
|
||||
default_flow_style=False))
|
||||
new_module_args = self._task.args.copy()
|
||||
new_module_args.update(
|
||||
dict(
|
||||
src=xfered
|
||||
)
|
||||
)
|
||||
del new_module_args['sources']
|
||||
result.update(self._execute_module(module_name='copy',
|
||||
module_args=new_module_args,
|
||||
task_vars=task_vars,
|
||||
tmp=tmp))
|
||||
return result
|
@ -1,5 +0,0 @@
|
||||
---
|
||||
- hosts: localhost
|
||||
roles:
|
||||
- { role: bifrost,
|
||||
tags: bifrost}
|
@ -1,4 +0,0 @@
|
||||
---
|
||||
- hosts: all
|
||||
roles:
|
||||
- certificates
|
@ -1,4 +0,0 @@
|
||||
---
|
||||
- hosts: all
|
||||
roles:
|
||||
- destroy
|
@ -1,427 +0,0 @@
|
||||
---
|
||||
# The options in this file can be overridden in 'globals.yml'
|
||||
|
||||
# The "temp" files that are created before merge need to stay persistent due
|
||||
# to the fact that ansible will register a "change" if it has to create them
|
||||
# again. Persistent files allow for idempotency
|
||||
container_config_directory: "/var/lib/kolla/config_files"
|
||||
|
||||
# The directory to merge custom config files the kolla's config files
|
||||
node_custom_config: "/etc/kolla/config"
|
||||
|
||||
# The project to generate configuration files for
|
||||
project: ""
|
||||
|
||||
# The directory to store the config files on the destination node
|
||||
node_config_directory: "/etc/kolla/{{ project }}"
|
||||
|
||||
|
||||
###################
|
||||
# Kolla options
|
||||
###################
|
||||
|
||||
# Which orchestration engine to use. Valid options are [ ANSIBLE, KUBERNETES ]
|
||||
orchestration_engine: "ANSIBLE"
|
||||
|
||||
# Valid options are [ COPY_ONCE, COPY_ALWAYS ]
|
||||
config_strategy: "COPY_ALWAYS"
|
||||
|
||||
# Valid options are [ centos, oraclelinux, ubuntu ]
|
||||
kolla_base_distro: "centos"
|
||||
# Valid options are [ binary, source ]
|
||||
kolla_install_type: "binary"
|
||||
|
||||
kolla_internal_vip_address: "{{ kolla_internal_address }}"
|
||||
kolla_internal_fqdn: "{{ kolla_internal_vip_address }}"
|
||||
kolla_external_vip_address: "{{ kolla_internal_vip_address }}"
|
||||
kolla_external_fqdn: "{{ kolla_internal_fqdn if kolla_external_vip_address == kolla_internal_vip_address else kolla_external_vip_address }}"
|
||||
|
||||
kolla_enable_sanity_checks: "no"
|
||||
|
||||
kolla_enable_sanity_keystone: "{{ kolla_enable_sanity_checks }}"
|
||||
kolla_enable_sanity_glance: "{{ kolla_enable_sanity_checks }}"
|
||||
kolla_enable_sanity_cinder: "{{ kolla_enable_sanity_checks }}"
|
||||
kolla_enable_sanity_swift: "{{ kolla_enable_sanity_checks }}"
|
||||
|
||||
|
||||
####################
|
||||
# kolla-kubernetes
|
||||
####################
|
||||
# By default, Kolla API services bind to the network address assigned
|
||||
# to the api_interface. Allow the bind address to be an override. In
|
||||
# some cases (Kubernetes), the api_interface address is not known
|
||||
# until container runtime, and thus it is necessary to bind to all
|
||||
# interfaces "0.0.0.0". When used outside of Kubernetes, binding to
|
||||
# all interfaces may present a security issue, and thus is not
|
||||
# recommended.
|
||||
api_interface_address: "{{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] if orchestration_engine == 'ANSIBLE' else '0.0.0.0' }}"
|
||||
|
||||
####################
|
||||
# Database options
|
||||
####################
|
||||
database_address: "{{ kolla_internal_fqdn }}"
|
||||
database_user: "root"
|
||||
database_port: "3306"
|
||||
|
||||
|
||||
####################
|
||||
# Docker options
|
||||
####################
|
||||
docker_registry_email:
|
||||
docker_registry:
|
||||
docker_namespace: "kolla"
|
||||
docker_registry_username:
|
||||
|
||||
# Valid options are [ never, on-failure, always, unless-stopped ]
|
||||
docker_restart_policy: "unless-stopped"
|
||||
|
||||
# '0' means unlimited retries
|
||||
docker_restart_policy_retry: "10"
|
||||
|
||||
# Common options used throughout docker
|
||||
docker_common_options:
|
||||
auth_email: "{{ docker_registry_email }}"
|
||||
auth_password: "{{ docker_registry_password }}"
|
||||
auth_registry: "{{ docker_registry }}"
|
||||
auth_username: "{{ docker_registry_username }}"
|
||||
environment:
|
||||
KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}"
|
||||
restart_policy: "{{ docker_restart_policy }}"
|
||||
restart_retries: "{{ docker_restart_policy_retry }}"
|
||||
|
||||
|
||||
####################
|
||||
# keepalived options
|
||||
####################
|
||||
# Arbitrary unique number from 0..255
|
||||
keepalived_virtual_router_id: "51"
|
||||
|
||||
|
||||
####################
|
||||
# Networking options
|
||||
####################
|
||||
network_interface: "eth0"
|
||||
neutron_external_interface: "eth1"
|
||||
kolla_external_vip_interface: "{{ network_interface }}"
|
||||
api_interface: "{{ network_interface }}"
|
||||
storage_interface: "{{ network_interface }}"
|
||||
cluster_interface: "{{ network_interface }}"
|
||||
tunnel_interface: "{{ network_interface }}"
|
||||
bifrost_network_interface: "{{ network_interface }}"
|
||||
tunnel_interface_address: "{{ hostvars[inventory_hostname]['ansible_' + tunnel_interface]['ipv4']['address'] }}"
|
||||
|
||||
# Valid options are [ openvswitch, linuxbridge, sfc ]
|
||||
neutron_plugin_agent: "openvswitch"
|
||||
|
||||
# The default ports used by each service.
|
||||
aodh_api_port: "8042"
|
||||
|
||||
barbican_api_port: "9311"
|
||||
|
||||
ceilometer_api_port: "8777"
|
||||
|
||||
congress_api_port: "1789"
|
||||
|
||||
cloudkitty_api_port: "8889"
|
||||
|
||||
designate_api_port: "9001"
|
||||
|
||||
iscsi_port: "3260"
|
||||
|
||||
gnocchi_api_port: "8041"
|
||||
|
||||
mariadb_port: "{{ database_port }}"
|
||||
mariadb_wsrep_port: "4567"
|
||||
mariadb_ist_port: "4568"
|
||||
mariadb_sst_port: "4444"
|
||||
|
||||
rabbitmq_port: "5672"
|
||||
rabbitmq_management_port: "15672"
|
||||
rabbitmq_cluster_port: "25672"
|
||||
rabbitmq_epmd_port: "4369"
|
||||
|
||||
mongodb_port: "27017"
|
||||
mongodb_web_port: "28017"
|
||||
|
||||
haproxy_stats_port: "1984"
|
||||
|
||||
keystone_public_port: "5000"
|
||||
keystone_admin_port: "35357"
|
||||
keystone_ssh_port: "8023"
|
||||
|
||||
glance_api_port: "9292"
|
||||
glance_registry_port: "9191"
|
||||
|
||||
nova_api_port: "8774"
|
||||
nova_metadata_port: "8775"
|
||||
nova_novncproxy_port: "6080"
|
||||
nova_spicehtml5proxy_port: "6082"
|
||||
|
||||
neutron_server_port: "9696"
|
||||
|
||||
cinder_api_port: "8776"
|
||||
|
||||
memcached_port: "11211"
|
||||
|
||||
swift_proxy_server_port: "8080"
|
||||
swift_object_server_port: "6000"
|
||||
swift_account_server_port: "6001"
|
||||
swift_container_server_port: "6002"
|
||||
swift_rsync_port: "10873"
|
||||
|
||||
sahara_api_port: "8386"
|
||||
|
||||
heat_api_port: "8004"
|
||||
heat_api_cfn_port: "8000"
|
||||
|
||||
murano_api_port: "8082"
|
||||
|
||||
ironic_api_port: "6385"
|
||||
|
||||
magnum_api_port: "9511"
|
||||
|
||||
rgw_port: "6780"
|
||||
|
||||
mistral_api_port: "8989"
|
||||
|
||||
kibana_server_port: "5601"
|
||||
|
||||
elasticsearch_port: "9200"
|
||||
|
||||
manila_api_port: "8786"
|
||||
|
||||
watcher_api_port: "9322"
|
||||
|
||||
influxdb_admin_port: "8083"
|
||||
influxdb_http_port: "8086"
|
||||
|
||||
senlin_api_port: "8778"
|
||||
|
||||
etcd_client_port: "2379"
|
||||
etcd_peer_port: "2380"
|
||||
|
||||
kuryr_port: "23750"
|
||||
|
||||
searchlight_api_port: "9393"
|
||||
|
||||
grafana_server_port: "3000"
|
||||
|
||||
public_protocol: "{{ 'https' if kolla_enable_tls_external | bool else 'http' }}"
|
||||
internal_protocol: "http"
|
||||
admin_protocol: "http"
|
||||
|
||||
####################
|
||||
# OpenStack options
|
||||
####################
|
||||
openstack_release: "3.0.0"
|
||||
openstack_logging_debug: "False"
|
||||
|
||||
openstack_region_name: "RegionOne"
|
||||
|
||||
openstack_service_workers: "{{ [ansible_processor_vcpus, 5]|min if orchestration_engine == 'ANSIBLE' else '1'}}"
|
||||
|
||||
# Optionally allow Kolla to set sysctl values
|
||||
set_sysctl: "yes"
|
||||
|
||||
# Valid options are [ novnc, spice ]
|
||||
nova_console: "novnc"
|
||||
|
||||
# OpenStack authentication string. You should only need to override these if you
|
||||
# are changing the admin tenant/project or user.
|
||||
openstack_auth:
|
||||
auth_url: "{{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_admin_port }}"
|
||||
username: "admin"
|
||||
password: "{{ keystone_admin_password }}"
|
||||
project_name: "admin"
|
||||
|
||||
# These roles are required for Kolla to be operation, however a savvy deployer
|
||||
# could disable some of these required roles and run their own services.
|
||||
enable_glance: "yes"
|
||||
enable_haproxy: "yes"
|
||||
enable_keystone: "yes"
|
||||
enable_mariadb: "yes"
|
||||
enable_memcached: "yes"
|
||||
enable_neutron: "yes"
|
||||
enable_nova: "yes"
|
||||
enable_rabbitmq: "yes"
|
||||
|
||||
# Additional optional OpenStack features and services are specified here
|
||||
enable_aodh: "no"
|
||||
enable_barbican: "no"
|
||||
enable_cadf_notifications: "no"
|
||||
enable_ceilometer: "no"
|
||||
enable_central_logging: "no"
|
||||
enable_ceph: "no"
|
||||
enable_ceph_rgw: "no"
|
||||
enable_cinder: "no"
|
||||
enable_cinder_backend_iscsi: "no"
|
||||
enable_cinder_backend_lvm: "no"
|
||||
enable_cinder_backend_nfs: "no"
|
||||
enable_cloudkitty: "no"
|
||||
enable_congress: "no"
|
||||
enable_etcd: "no"
|
||||
enable_designate: "no"
|
||||
enable_gnocchi: "no"
|
||||
enable_grafana: "no"
|
||||
enable_heat: "yes"
|
||||
enable_horizon: "yes"
|
||||
enable_influxdb: "no"
|
||||
enable_ironic: "no"
|
||||
enable_iscsid: "{{ enable_cinder_backend_iscsi | bool or enable_cinder_backend_lvm | bool or enable_ironic | bool }}"
|
||||
enable_kuryr: "no"
|
||||
enable_magnum: "no"
|
||||
enable_manila: "no"
|
||||
enable_manila_backend_generic: "no"
|
||||
enable_manila_backend_hnas: "no"
|
||||
enable_mistral: "no"
|
||||
enable_mongodb: "no"
|
||||
enable_multipathd: "no"
|
||||
enable_murano: "no"
|
||||
enable_neutron_vpnaas: "no"
|
||||
enable_neutron_dvr: "no"
|
||||
enable_neutron_lbaas: "no"
|
||||
enable_neutron_qos: "no"
|
||||
enable_neutron_agent_ha: "no"
|
||||
enable_rally: "no"
|
||||
enable_sahara: "no"
|
||||
enable_searchlight: "no"
|
||||
enable_senlin: "no"
|
||||
enable_swift: "no"
|
||||
enable_telegraf: "no"
|
||||
enable_tempest: "no"
|
||||
enable_vmtp: "no"
|
||||
enable_watcher: "no"
|
||||
|
||||
ironic_keystone_user: "ironic"
|
||||
neutron_keystone_user: "neutron"
|
||||
nova_keystone_user: "nova"
|
||||
|
||||
# Nova fake driver and the number of fake driver per compute node
|
||||
enable_nova_fake: "no"
|
||||
num_nova_fake_per_node: 5
|
||||
|
||||
# Monitoring options are specified here
|
||||
enable_collectd: "no"
|
||||
|
||||
####################
|
||||
# Logging options
|
||||
####################
|
||||
|
||||
elasticsearch_address: "{{ kolla_internal_vip_address }}"
|
||||
elasticsearch_protocol: "{{ internal_protocol }}"
|
||||
|
||||
enable_elasticsearch: "{{ 'yes' if enable_central_logging | bool else 'no' }}"
|
||||
enable_kibana: "{{ 'yes' if enable_central_logging | bool else 'no' }}"
|
||||
|
||||
####################
|
||||
# RabbitMQ options
|
||||
####################
|
||||
rabbitmq_user: "openstack"
|
||||
rabbitmq_version: "rabbitmq_server-3.6/plugins/rabbitmq_clusterer-3.6.x.ez/rabbitmq_clusterer-3.6.x-667f92b0/ebin"
|
||||
|
||||
####################
|
||||
# HAProxy options
|
||||
####################
|
||||
haproxy_user: "openstack"
|
||||
haproxy_enable_external_vip: "{{ 'no' if kolla_external_vip_address == kolla_internal_vip_address else 'yes' }}"
|
||||
kolla_enable_tls_external: "no"
|
||||
kolla_external_fqdn_cert: "{{ node_config_directory }}/certificates/haproxy.pem"
|
||||
kolla_external_fqdn_cacert: "{{ node_config_directory }}/certificates/haproxy-ca.crt"
|
||||
|
||||
|
||||
####################
|
||||
# Kibana options
|
||||
####################
|
||||
kibana_user: "kibana"
|
||||
|
||||
|
||||
####################
|
||||
# Keystone options
|
||||
####################
|
||||
keystone_admin_url: "{{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_admin_port }}/v3"
|
||||
keystone_internal_url: "{{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_public_port }}/v3"
|
||||
keystone_public_url: "{{ public_protocol }}://{{ kolla_external_fqdn }}:{{ keystone_public_port }}/v3"
|
||||
|
||||
# Valid options are [ uuid, fernet ]
|
||||
keystone_token_provider: "uuid"
|
||||
fernet_token_expiry: 86400
|
||||
|
||||
|
||||
#######################
|
||||
# Glance options
|
||||
#######################
|
||||
glance_backend_file: "{{ not enable_ceph | bool }}"
|
||||
glance_backend_ceph: "{{ enable_ceph }}"
|
||||
|
||||
|
||||
#######################
|
||||
# Ceilometer options
|
||||
#######################
|
||||
# Valid options are [ mongodb, mysql, gnocchi ]
|
||||
ceilometer_database_type: "mongodb"
|
||||
|
||||
|
||||
#################
|
||||
# Gnocchi options
|
||||
#################
|
||||
# Vaid options are [file, ceph]
|
||||
gnocchi_backend_storage: "{{ 'ceph' if enable_ceph|bool else 'file' }}"
|
||||
|
||||
|
||||
#################################
|
||||
# Cinder options
|
||||
#################################
|
||||
cinder_backend_ceph: "{{ enable_ceph }}"
|
||||
cinder_volume_group: "cinder-volumes"
|
||||
|
||||
#######################
|
||||
# Cloudkitty options
|
||||
#######################
|
||||
# Valid options are [ ceilometer, gnocchi ]
|
||||
cloudkitty_collector_backend: "ceilometer"
|
||||
|
||||
|
||||
#######################
|
||||
# Nova options
|
||||
#######################
|
||||
nova_backend_ceph: "{{ enable_ceph }}"
|
||||
nova_backend: "{{ 'rbd' if nova_backend_ceph | bool else 'default' }}"
|
||||
|
||||
|
||||
#######################
|
||||
# Horizon options
|
||||
#######################
|
||||
horizon_backend_database: "no"
|
||||
|
||||
###################
|
||||
# Ceph options
|
||||
###################
|
||||
# Ceph can be setup with a caching to improve performance. To use the cache you
|
||||
# must provide separate disks than those for the OSDs
|
||||
ceph_enable_cache: "no"
|
||||
# Valid options are [ forward, none, writeback ]
|
||||
ceph_cache_mode: "writeback"
|
||||
|
||||
# Valid options are [ ext4, btrfs, xfs ]
|
||||
ceph_osd_filesystem: "xfs"
|
||||
|
||||
# Set to 'yes-i-really-really-mean-it' to force wipe disks with existing partitions for OSDs. Only
|
||||
# set if you understand the consequences!
|
||||
ceph_osd_wipe_disk: ""
|
||||
|
||||
# These are /etc/fstab options. Comma separated, no spaces (see fstab(8))
|
||||
ceph_osd_mount_options: "defaults,noatime"
|
||||
|
||||
# A requirement for using the erasure-coded pools is you must setup a cache tier
|
||||
# Valid options are [ erasure, replicated ]
|
||||
ceph_pool_type: "replicated"
|
||||
|
||||
ceph_cinder_pool_name: "volumes"
|
||||
ceph_cinder_backup_pool_name: "backups"
|
||||
ceph_glance_pool_name: "images"
|
||||
ceph_gnocchi_pool_name: "gnocchi"
|
||||
ceph_nova_pool_name: "vms"
|
||||
|
||||
ceph_erasure_profile: "k=4 m=2 ruleset-failure-domain=host"
|
||||
ceph_rule: "default host {{ 'indep' if ceph_pool_type == 'erasure' else 'firstn' }}"
|
||||
ceph_cache_rule: "cache host firstn"
|
@ -1,423 +0,0 @@
|
||||
# These initial groups are the only groups required to be modified. The
|
||||
# additional groups are for more control of the environment.
|
||||
[control]
|
||||
localhost ansible_connection=local
|
||||
|
||||
[network]
|
||||
localhost ansible_connection=local
|
||||
|
||||
[compute]
|
||||
localhost ansible_connection=local
|
||||
|
||||
[storage]
|
||||
localhost ansible_connection=local
|
||||
|
||||
[monitoring]
|
||||
localhost ansible_connection=local
|
||||
|
||||
# You can explicitly specify which hosts run each project by updating the
|
||||
# groups in the sections below. Common services are grouped together.
|
||||
[collectd:children]
|
||||
compute
|
||||
|
||||
[baremetal:children]
|
||||
control
|
||||
|
||||
[grafana:children]
|
||||
monitoring
|
||||
|
||||
[etcd:children]
|
||||
control
|
||||
|
||||
[kibana:children]
|
||||
control
|
||||
|
||||
[telegraf:children]
|
||||
compute
|
||||
control
|
||||
monitoring
|
||||
network
|
||||
storage
|
||||
|
||||
[elasticsearch:children]
|
||||
control
|
||||
|
||||
[haproxy:children]
|
||||
network
|
||||
|
||||
[mariadb:children]
|
||||
control
|
||||
|
||||
[rabbitmq:children]
|
||||
control
|
||||
|
||||
[mongodb:children]
|
||||
control
|
||||
|
||||
[keystone:children]
|
||||
control
|
||||
|
||||
[glance:children]
|
||||
control
|
||||
|
||||
[nova:children]
|
||||
control
|
||||
|
||||
[neutron:children]
|
||||
network
|
||||
|
||||
[cinder:children]
|
||||
control
|
||||
|
||||
[cloudkitty:children]
|
||||
control
|
||||
|
||||
[memcached:children]
|
||||
control
|
||||
|
||||
[horizon:children]
|
||||
control
|
||||
|
||||
[swift:children]
|
||||
control
|
||||
|
||||
[barbican:children]
|
||||
control
|
||||
|
||||
[heat:children]
|
||||
control
|
||||
|
||||
[murano:children]
|
||||
control
|
||||
|
||||
[ceph:children]
|
||||
control
|
||||
|
||||
[ironic:children]
|
||||
control
|
||||
|
||||
[influxdb:children]
|
||||
monitoring
|
||||
|
||||
[magnum:children]
|
||||
control
|
||||
|
||||
[sahara:children]
|
||||
control
|
||||
|
||||
[mistral:children]
|
||||
control
|
||||
|
||||
[manila:children]
|
||||
control
|
||||
|
||||
[gnocchi:children]
|
||||
control
|
||||
|
||||
[ceilometer:children]
|
||||
control
|
||||
|
||||
[aodh:children]
|
||||
control
|
||||
|
||||
[congress:children]
|
||||
control
|
||||
|
||||
# Tempest
|
||||
[tempest:children]
|
||||
control
|
||||
|
||||
[senlin:children]
|
||||
control
|
||||
|
||||
[vmtp:children]
|
||||
control
|
||||
|
||||
[watcher:children]
|
||||
control
|
||||
|
||||
[rally:children]
|
||||
control
|
||||
|
||||
[searchlight:children]
|
||||
control
|
||||
|
||||
[designate:children]
|
||||
control
|
||||
|
||||
# Additional control implemented here. These groups allow you to control which
|
||||
# services run on which hosts at a per-service level.
|
||||
#
|
||||
# Word of caution: Some services are required to run on the same host to
|
||||
# function appropriately. For example, neutron-metadata-agent must run on the
|
||||
# same host as the l3-agent and (depending on configuration) the dhcp-agent.
|
||||
|
||||
# Glance
|
||||
[glance-api:children]
|
||||
glance
|
||||
|
||||
[glance-registry:children]
|
||||
glance
|
||||
|
||||
# Nova
|
||||
[nova-api:children]
|
||||
nova
|
||||
|
||||
[nova-conductor:children]
|
||||
nova
|
||||
|
||||
[nova-consoleauth:children]
|
||||
nova
|
||||
|
||||
[nova-novncproxy:children]
|
||||
nova
|
||||
|
||||
[nova-scheduler:children]
|
||||
nova
|
||||
|
||||
[nova-spicehtml5proxy:children]
|
||||
nova
|
||||
|
||||
[nova-compute-ironic:children]
|
||||
nova
|
||||
|
||||
# Neutron
|
||||
[neutron-server:children]
|
||||
control
|
||||
|
||||
[neutron-dhcp-agent:children]
|
||||
neutron
|
||||
|
||||
[neutron-l3-agent:children]
|
||||
neutron
|
||||
|
||||
[neutron-lbaas-agent:children]
|
||||
neutron
|
||||
|
||||
[neutron-metadata-agent:children]
|
||||
neutron
|
||||
|
||||
[neutron-vpnaas-agent:children]
|
||||
neutron
|
||||
|
||||
# Ceph
|
||||
[ceph-mon:children]
|
||||
ceph
|
||||
|
||||
[ceph-rgw:children]
|
||||
ceph
|
||||
|
||||
[ceph-osd:children]
|
||||
storage
|
||||
|
||||
# Cinder
|
||||
[cinder-api:children]
|
||||
cinder
|
||||
|
||||
[cinder-backup:children]
|
||||
storage
|
||||
|
||||
[cinder-scheduler:children]
|
||||
cinder
|
||||
|
||||
[cinder-volume:children]
|
||||
storage
|
||||
|
||||
# Cloudkitty
|
||||
[cloudkitty-api:children]
|
||||
cloudkitty
|
||||
|
||||
[cloudkitty-processor:children]
|
||||
cloudkitty
|
||||
|
||||
# iSCSI
|
||||
[iscsid:children]
|
||||
compute
|
||||
storage
|
||||
ironic-conductor
|
||||
|
||||
[tgtd:children]
|
||||
storage
|
||||
|
||||
# Manila
|
||||
[manila-api:children]
|
||||
manila
|
||||
|
||||
[manila-scheduler:children]
|
||||
manila
|
||||
|
||||
[manila-share:children]
|
||||
network
|
||||
|
||||
[manila-data:children]
|
||||
manila
|
||||
|
||||
# Swift
|
||||
[swift-proxy-server:children]
|
||||
swift
|
||||
|
||||
[swift-account-server:children]
|
||||
storage
|
||||
|
||||
[swift-container-server:children]
|
||||
storage
|
||||
|
||||
[swift-object-server:children]
|
||||
storage
|
||||
|
||||
# Barbican
|
||||
[barbican-api:children]
|
||||
barbican
|
||||
|
||||
[barbican-keystone-listener:children]
|
||||
barbican
|
||||
|
||||
[barbican-worker:children]
|
||||
barbican
|
||||
|
||||
# Heat
|
||||
[heat-api:children]
|
||||
heat
|
||||
|
||||
[heat-api-cfn:children]
|
||||
heat
|
||||
|
||||
[heat-engine:children]
|
||||
heat
|
||||
|
||||
# Murano
|
||||
[murano-api:children]
|
||||
murano
|
||||
|
||||
[murano-engine:children]
|
||||
murano
|
||||
|
||||
# Ironic
|
||||
[ironic-api:children]
|
||||
ironic
|
||||
|
||||
[ironic-conductor:children]
|
||||
ironic
|
||||
|
||||
[ironic-inspector:children]
|
||||
ironic
|
||||
|
||||
[ironic-pxe:children]
|
||||
ironic
|
||||
|
||||
# Magnum
|
||||
[magnum-api:children]
|
||||
magnum
|
||||
|
||||
[magnum-conductor:children]
|
||||
magnum
|
||||
|
||||
# Mistral
|
||||
[mistral-api:children]
|
||||
mistral
|
||||
|
||||
[mistral-executor:children]
|
||||
mistral
|
||||
|
||||
[mistral-engine:children]
|
||||
mistral
|
||||
|
||||
# Aodh
|
||||
[aodh-api:children]
|
||||
aodh
|
||||
|
||||
[aodh-evaluator:children]
|
||||
aodh
|
||||
|
||||
[aodh-listener:children]
|
||||
aodh
|
||||
|
||||
[aodh-notifier:children]
|
||||
aodh
|
||||
|
||||
# Gnocchi
|
||||
[gnocchi-api:children]
|
||||
gnocchi
|
||||
|
||||
[gnocchi-statsd:children]
|
||||
gnocchi
|
||||
|
||||
[gnocchi-metricd:children]
|
||||
gnocchi
|
||||
|
||||
# Sahara
|
||||
[sahara-api:children]
|
||||
sahara
|
||||
|
||||
[sahara-engine:children]
|
||||
sahara
|
||||
|
||||
# Ceilometer
|
||||
[ceilometer-api:children]
|
||||
ceilometer
|
||||
|
||||
[ceilometer-central:children]
|
||||
ceilometer
|
||||
|
||||
[ceilometer-notification:children]
|
||||
ceilometer
|
||||
|
||||
[ceilometer-collector:children]
|
||||
ceilometer
|
||||
|
||||
[ceilometer-compute:children]
|
||||
compute
|
||||
|
||||
# Congress
|
||||
[congress-api:children]
|
||||
congress
|
||||
|
||||
[congress-datasource:children]
|
||||
congress
|
||||
|
||||
[congress-policy-engine:children]
|
||||
congress
|
||||
|
||||
# Multipathd
|
||||
[multipathd:children]
|
||||
compute
|
||||
|
||||
# Watcher
|
||||
[watcher-api:children]
|
||||
watcher
|
||||
|
||||
[watcher-engine:children]
|
||||
watcher
|
||||
|
||||
[watcher-applier:children]
|
||||
watcher
|
||||
|
||||
# Senlin
|
||||
[senlin-api:children]
|
||||
senlin
|
||||
|
||||
[senlin-engine:children]
|
||||
senlin
|
||||
|
||||
# Searchlight
|
||||
[searchlight-api:children]
|
||||
searchlight
|
||||
|
||||
[searchlight-listener:children]
|
||||
searchlight
|
||||
|
||||
# Designate
|
||||
[designate-api:children]
|
||||
designate
|
||||
|
||||
[designate-central:children]
|
||||
designate
|
||||
|
||||
[designate-mdns:children]
|
||||
designate
|
||||
|
||||
[designate-pool-manager:children]
|
||||
designate
|
||||
|
||||
[designate-sink:children]
|
||||
designate
|
@ -1,438 +0,0 @@
|
||||
# These initial groups are the only groups required to be modified. The
|
||||
# additional groups are for more control of the environment.
|
||||
[control]
|
||||
# These hostname must be resolvable from your deployment host
|
||||
control01
|
||||
control02
|
||||
control03
|
||||
|
||||
# The above can also be specified as follows:
|
||||
#control[01:03] ansible_user=kolla
|
||||
|
||||
# The network nodes are where your l3-agent and loadbalancers will run
|
||||
# This can be the same as a host in the control group
|
||||
[network]
|
||||
network01
|
||||
|
||||
[compute]
|
||||
compute01
|
||||
|
||||
[monitoring]
|
||||
monitoring01
|
||||
|
||||
# When compute nodes and control nodes use different interfaces,
|
||||
# you can specify "api_interface" and another interfaces like below:
|
||||
#compute01 neutron_external_interface=eth0 api_interface=em1 storage_interface=em1 tunnel_interface=em1
|
||||
|
||||
[storage]
|
||||
storage01
|
||||
|
||||
[baremetal:children]
|
||||
control
|
||||
network
|
||||
compute
|
||||
storage
|
||||
|
||||
# You can explicitly specify which hosts run each project by updating the
|
||||
# groups in the sections below. Common services are grouped together.
|
||||
[collectd:children]
|
||||
compute
|
||||
|
||||
[grafana:children]
|
||||
monitoring
|
||||
|
||||
[etcd:children]
|
||||
control
|
||||
|
||||
[influxdb:children]
|
||||
monitoring
|
||||
|
||||
[kibana:children]
|
||||
control
|
||||
|
||||
[telegraf:children]
|
||||
compute
|
||||
control
|
||||
monitoring
|
||||
network
|
||||
storage
|
||||
|
||||
[elasticsearch:children]
|
||||
control
|
||||
|
||||
[haproxy:children]
|
||||
network
|
||||
|
||||
[mariadb:children]
|
||||
control
|
||||
|
||||
[rabbitmq:children]
|
||||
control
|
||||
|
||||
[mongodb:children]
|
||||
control
|
||||
|
||||
[keystone:children]
|
||||
control
|
||||
|
||||
[glance:children]
|
||||
control
|
||||
|
||||
[nova:children]
|
||||
control
|
||||
|
||||
[neutron:children]
|
||||
network
|
||||
|
||||
[cinder:children]
|
||||
control
|
||||
|
||||
[cloudkitty:children]
|
||||
control
|
||||
|
||||
[memcached:children]
|
||||
control
|
||||
|
||||
[horizon:children]
|
||||
control
|
||||
|
||||
[swift:children]
|
||||
control
|
||||
|
||||
[barbican:children]
|
||||
control
|
||||
|
||||
[heat:children]
|
||||
control
|
||||
|
||||
[murano:children]
|
||||
control
|
||||
|
||||
[ironic:children]
|
||||
control
|
||||
|
||||
[ceph:children]
|
||||
control
|
||||
|
||||
[magnum:children]
|
||||
control
|
||||
|
||||
[sahara:children]
|
||||
control
|
||||
|
||||
[mistral:children]
|
||||
control
|
||||
|
||||
[manila:children]
|
||||
control
|
||||
|
||||
[ceilometer:children]
|
||||
control
|
||||
|
||||
[aodh:children]
|
||||
control
|
||||
|
||||
[congress:children]
|
||||
control
|
||||
|
||||
[gnocchi:children]
|
||||
control
|
||||
|
||||
# Tempest
|
||||
[tempest:children]
|
||||
control
|
||||
|
||||
[senlin:children]
|
||||
control
|
||||
|
||||
[vmtp:children]
|
||||
control
|
||||
|
||||
[watcher:children]
|
||||
control
|
||||
|
||||
[rally:children]
|
||||
control
|
||||
|
||||
[searchlight:children]
|
||||
control
|
||||
|
||||
[designate:children]
|
||||
control
|
||||
|
||||
# Additional control implemented here. These groups allow you to control which
|
||||
# services run on which hosts at a per-service level.
|
||||
#
|
||||
# Word of caution: Some services are required to run on the same host to
|
||||
# function appropriately. For example, neutron-metadata-agent must run on the
|
||||
# same host as the l3-agent and (depending on configuration) the dhcp-agent.
|
||||
|
||||
# Glance
|
||||
[glance-api:children]
|
||||
glance
|
||||
|
||||
[glance-registry:children]
|
||||
glance
|
||||
|
||||
# Nova
|
||||
[nova-api:children]
|
||||
nova
|
||||
|
||||
[nova-conductor:children]
|
||||
nova
|
||||
|
||||
[nova-consoleauth:children]
|
||||
nova
|
||||
|
||||
[nova-novncproxy:children]
|
||||
nova
|
||||
|
||||
[nova-scheduler:children]
|
||||
nova
|
||||
|
||||
[nova-spicehtml5proxy:children]
|
||||
nova
|
||||
|
||||
[nova-compute-ironic:children]
|
||||
nova
|
||||
|
||||
# Neutron
|
||||
[neutron-server:children]
|
||||
control
|
||||
|
||||
[neutron-dhcp-agent:children]
|
||||
neutron
|
||||
|
||||
[neutron-l3-agent:children]
|
||||
neutron
|
||||
|
||||
[neutron-lbaas-agent:children]
|
||||
neutron
|
||||
|
||||
[neutron-metadata-agent:children]
|
||||
neutron
|
||||
|
||||
[neutron-vpnaas-agent:children]
|
||||
neutron
|
||||
|
||||
# Ceph
|
||||
[ceph-mon:children]
|
||||
ceph
|
||||
|
||||
[ceph-rgw:children]
|
||||
ceph
|
||||
|
||||
[ceph-osd:children]
|
||||
storage
|
||||
|
||||
# Cinder
|
||||
[cinder-api:children]
|
||||
cinder
|
||||
|
||||
[cinder-backup:children]
|
||||
storage
|
||||
|
||||
[cinder-scheduler:children]
|
||||
cinder
|
||||
|
||||
[cinder-volume:children]
|
||||
storage
|
||||
|
||||
# Cloudkitty
|
||||
[cloudkitty-api:children]
|
||||
cloudkitty
|
||||
|
||||
[cloudkitty-processor:children]
|
||||
cloudkitty
|
||||
|
||||
# iSCSI
|
||||
[iscsid:children]
|
||||
compute
|
||||
storage
|
||||
ironic-conductor
|
||||
|
||||
[tgtd:children]
|
||||
storage
|
||||
|
||||
# Manila
|
||||
[manila-api:children]
|
||||
manila
|
||||
|
||||
[manila-scheduler:children]
|
||||
manila
|
||||
|
||||
[manila-share:children]
|
||||
network
|
||||
|
||||
[manila-data:children]
|
||||
manila
|
||||
|
||||
# Swift
|
||||
[swift-proxy-server:children]
|
||||
swift
|
||||
|
||||
[swift-account-server:children]
|
||||
storage
|
||||
|
||||
[swift-container-server:children]
|
||||
storage
|
||||
|
||||
[swift-object-server:children]
|
||||
storage
|
||||
|
||||
# Barbican
|
||||
[barbican-api:children]
|
||||
barbican
|
||||
|
||||
[barbican-keystone-listener:children]
|
||||
barbican
|
||||
|
||||
[barbican-worker:children]
|
||||
barbican
|
||||
|
||||
# Heat
|
||||
[heat-api:children]
|
||||
heat
|
||||
|
||||
[heat-api-cfn:children]
|
||||
heat
|
||||
|
||||
[heat-engine:children]
|
||||
heat
|
||||
|
||||
# Murano
|
||||
[murano-api:children]
|
||||
murano
|
||||
|
||||
[murano-engine:children]
|
||||
murano
|
||||
|
||||
# Ironic
|
||||
[ironic-api:children]
|
||||
ironic
|
||||
|
||||
[ironic-conductor:children]
|
||||
ironic
|
||||
|
||||
[ironic-inspector:children]
|
||||
ironic
|
||||
|
||||
[ironic-pxe:children]
|
||||
ironic
|
||||
|
||||
# Magnum
|
||||
[magnum-api:children]
|
||||
magnum
|
||||
|
||||
[magnum-conductor:children]
|
||||
magnum
|
||||
|
||||
# Sahara
|
||||
[sahara-api:children]
|
||||
sahara
|
||||
|
||||
[sahara-engine:children]
|
||||
sahara
|
||||
|
||||
# Mistral
|
||||
[mistral-api:children]
|
||||
mistral
|
||||
|
||||
[mistral-executor:children]
|
||||
mistral
|
||||
|
||||
[mistral-engine:children]
|
||||
mistral
|
||||
|
||||
# Ceilometer
|
||||
[ceilometer-api:children]
|
||||
ceilometer
|
||||
|
||||
[ceilometer-central:children]
|
||||
ceilometer
|
||||
|
||||
[ceilometer-notification:children]
|
||||
ceilometer
|
||||
|
||||
[ceilometer-collector:children]
|
||||
ceilometer
|
||||
|
||||
[ceilometer-compute:children]
|
||||
compute
|
||||
|
||||
# Aodh
|
||||
[aodh-api:children]
|
||||
aodh
|
||||
|
||||
[aodh-evaluator:children]
|
||||
aodh
|
||||
|
||||
[aodh-listener:children]
|
||||
aodh
|
||||
|
||||
[aodh-notifier:children]
|
||||
aodh
|
||||
|
||||
# Congress
|
||||
[congress-api:children]
|
||||
congress
|
||||
|
||||
[congress-datasource:children]
|
||||
congress
|
||||
|
||||
[congress-policy-engine:children]
|
||||
congress
|
||||
|
||||
# Gnocchi
|
||||
[gnocchi-api:children]
|
||||
gnocchi
|
||||
|
||||
[gnocchi-statsd:children]
|
||||
gnocchi
|
||||
|
||||
[gnocchi-metricd:children]
|
||||
gnocchi
|
||||
|
||||
# Multipathd
|
||||
[multipathd:children]
|
||||
compute
|
||||
|
||||
# Watcher
|
||||
[watcher-api:children]
|
||||
watcher
|
||||
|
||||
[watcher-engine:children]
|
||||
watcher
|
||||
|
||||
[watcher-applier:children]
|
||||
watcher
|
||||
|
||||
# Senlin
|
||||
[senlin-api:children]
|
||||
senlin
|
||||
|
||||
[senlin-engine:children]
|
||||
senlin
|
||||
|
||||
# Searchlight
|
||||
[searchlight-api:children]
|
||||
searchlight
|
||||
|
||||
[searchlight-listener:children]
|
||||
searchlight
|
||||
|
||||
# Designate
|
||||
[designate-api:children]
|
||||
designate
|
||||
|
||||
[designate-central:children]
|
||||
designate
|
||||
|
||||
[designate-mdns:children]
|
||||
designate
|
||||
|
||||
[designate-pool-manager:children]
|
||||
designate
|
||||
|
||||
[designate-sink:children]
|
||||
designate
|
@ -1,7 +0,0 @@
|
||||
---
|
||||
- hosts: baremetal
|
||||
gather_facts: no
|
||||
roles:
|
||||
- { role: baremetal,
|
||||
tags: baremetal }
|
||||
|
@ -1,193 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
|
||||
# Copyright 2015 Sam Yaple
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# This module has been relicensed from the source below:
|
||||
# https://github.com/SamYaple/yaodu/blob/master/ansible/library/bslurp
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: bslurp
|
||||
short_description: Slurps a file from a remote node
|
||||
description:
|
||||
- Used for fetching a binary blob containing the file, then push that file
|
||||
to other hosts.
|
||||
options:
|
||||
src:
|
||||
description:
|
||||
- File to fetch. When dest is used, src is expected to be a str with data
|
||||
required: True
|
||||
type: str
|
||||
compress:
|
||||
description:
|
||||
- Compress file with zlib
|
||||
default: True
|
||||
type: bool
|
||||
dest:
|
||||
description:
|
||||
- Where to write out binary blob
|
||||
required: False
|
||||
type: str
|
||||
mode:
|
||||
description:
|
||||
- Destination file permissions
|
||||
default: '0644'
|
||||
type: str
|
||||
sha1:
|
||||
description:
|
||||
- sha1 hash of the underlying data
|
||||
default: None
|
||||
type: bool
|
||||
author: Sam Yaple
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
Distribute a file from single to many hosts:
|
||||
|
||||
- hosts: web_servers
|
||||
tasks:
|
||||
- name: Pull in web config
|
||||
bslurp: src="/path/to/file"
|
||||
register: file_data
|
||||
run_once: True
|
||||
- name: Push if changed
|
||||
bslurp:
|
||||
src: "{{ file_data.content }}"
|
||||
dest: "{{ file_data.source }}"
|
||||
mode: "{{ file_data.mode }}"
|
||||
sha1: "{{ file_data.sha1 }}"
|
||||
|
||||
Distribute multiple files from single to many hosts:
|
||||
|
||||
- hosts: web_servers
|
||||
tasks:
|
||||
- name: Pull in web config
|
||||
bslurp: src="{{ item }}"
|
||||
with_items:
|
||||
- "/path/to/file1"
|
||||
- "/path/to/file2"
|
||||
- "/path/to/file3"
|
||||
register: file_data
|
||||
run_once: True
|
||||
- name: Push if changed
|
||||
bslurp:
|
||||
src: "{{ item.content }}"
|
||||
dest: "{{ item.source }}"
|
||||
mode: "{{ item.mode }}"
|
||||
sha1: "{{ item.sha1 }}"
|
||||
with_items: file_data.results
|
||||
|
||||
Distribute a file to many hosts without compression; Change
|
||||
permissions on dest:
|
||||
|
||||
- hosts: web_servers
|
||||
tasks:
|
||||
- name: Pull in web config
|
||||
bslurp: src="/path/to/file"
|
||||
register: file_data
|
||||
run_once: True
|
||||
- name: Push if changed
|
||||
bslurp:
|
||||
src: "{{ file_data.content }}"
|
||||
dest: "/new/path/to/file"
|
||||
mode: "0777"
|
||||
compress: False
|
||||
sha1: "{{ file_data.sha1 }}"
|
||||
'''
|
||||
|
||||
import base64
|
||||
import hashlib
|
||||
import os
|
||||
import traceback
|
||||
import zlib
|
||||
|
||||
|
||||
def copy_from_host(module):
|
||||
compress = module.params.get('compress')
|
||||
src = module.params.get('src')
|
||||
|
||||
if not os.path.exists(src):
|
||||
module.fail_json(msg="file not found: {}".format(src))
|
||||
if not os.access(src, os.R_OK):
|
||||
module.fail_json(msg="file is not readable: {}".format(src))
|
||||
|
||||
mode = oct(os.stat(src).st_mode & 0o777)
|
||||
|
||||
with open(src, 'rb') as f:
|
||||
raw_data = f.read()
|
||||
|
||||
sha1 = hashlib.sha1(raw_data).hexdigest()
|
||||
data = zlib.compress(raw_data) if compress else raw_data
|
||||
|
||||
module.exit_json(content=base64.b64encode(data), sha1=sha1, mode=mode,
|
||||
source=src)
|
||||
|
||||
|
||||
def copy_to_host(module):
|
||||
compress = module.params.get('compress')
|
||||
dest = module.params.get('dest')
|
||||
mode = int(module.params.get('mode'), 0)
|
||||
sha1 = module.params.get('sha1')
|
||||
src = module.params.get('src')
|
||||
|
||||
data = base64.b64decode(src)
|
||||
raw_data = zlib.decompress(data) if compress else data
|
||||
|
||||
if sha1:
|
||||
if os.path.exists(dest):
|
||||
if os.access(dest, os.R_OK):
|
||||
with open(dest, 'rb') as f:
|
||||
if hashlib.sha1(f.read()).hexdigest() == sha1:
|
||||
module.exit_json(changed=False)
|
||||
else:
|
||||
module.exit_json(failed=True, changed=False,
|
||||
msg='file is not accessible: {}'.format(dest))
|
||||
|
||||
if sha1 != hashlib.sha1(raw_data).hexdigest():
|
||||
module.exit_json(failed=True, changed=False,
|
||||
msg='sha1 sum does not match data')
|
||||
|
||||
with os.fdopen(os.open(dest, os.O_WRONLY | os.O_CREAT, mode), 'wb') as f:
|
||||
f.write(raw_data)
|
||||
|
||||
module.exit_json(changed=True)
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = dict(
|
||||
compress=dict(default=True, type='bool'),
|
||||
dest=dict(type='str'),
|
||||
mode=dict(default='0644', type='str'),
|
||||
sha1=dict(default=None, type='str'),
|
||||
src=dict(required=True, type='str')
|
||||
)
|
||||
module = AnsibleModule(argument_spec)
|
||||
|
||||
dest = module.params.get('dest')
|
||||
|
||||
try:
|
||||
if dest:
|
||||
copy_to_host(module)
|
||||
else:
|
||||
copy_from_host(module)
|
||||
except Exception:
|
||||
module.exit_json(failed=True, changed=True,
|
||||
msg=repr(traceback.format_exc()))
|
||||
|
||||
|
||||
# import module snippets
|
||||
from ansible.module_utils.basic import * # noqa
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -1,751 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
|
||||
# Copyright 2015 Sam Yaple
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: kolla_docker
|
||||
short_description: Module for controlling Docker
|
||||
description:
|
||||
- A module targeting at controlling Docker as used by Kolla.
|
||||
options:
|
||||
common_options:
|
||||
description:
|
||||
- A dict containing common params such as login info
|
||||
required: False
|
||||
type: dict
|
||||
default: dict()
|
||||
action:
|
||||
description:
|
||||
- The action the module should take
|
||||
required: True
|
||||
type: str
|
||||
choices:
|
||||
- compare_image
|
||||
- create_volume
|
||||
- get_container_env
|
||||
- get_container_state
|
||||
- pull_image
|
||||
- remove_container
|
||||
- remove_volume
|
||||
- restart_container
|
||||
- start_container
|
||||
- stop_container
|
||||
api_version:
|
||||
description:
|
||||
- The version of the api for docker-py to use when contacting docker
|
||||
required: False
|
||||
type: str
|
||||
default: auto
|
||||
auth_email:
|
||||
description:
|
||||
- The email address used to authenticate
|
||||
required: False
|
||||
type: str
|
||||
auth_password:
|
||||
description:
|
||||
- The password used to authenticate
|
||||
required: False
|
||||
type: str
|
||||
auth_registry:
|
||||
description:
|
||||
- The registry to authenticate
|
||||
required: False
|
||||
type: str
|
||||
auth_username:
|
||||
description:
|
||||
- The username used to authenticate
|
||||
required: False
|
||||
type: str
|
||||
detach:
|
||||
description:
|
||||
- Detach from the container after it is created
|
||||
required: False
|
||||
default: True
|
||||
type: bool
|
||||
name:
|
||||
description:
|
||||
- Name of the container or volume to manage
|
||||
required: False
|
||||
type: str
|
||||
environment:
|
||||
description:
|
||||
- The environment to set for the container
|
||||
required: False
|
||||
type: dict
|
||||
image:
|
||||
description:
|
||||
- Name of the docker image
|
||||
required: False
|
||||
type: str
|
||||
ipc_mode:
|
||||
description:
|
||||
- Set docker ipc namespace
|
||||
required: False
|
||||
type: str
|
||||
default: None
|
||||
choices:
|
||||
- host
|
||||
cap_add:
|
||||
description:
|
||||
- Add capabilities to docker container
|
||||
required: False
|
||||
type: list
|
||||
default: list()
|
||||
security_opt:
|
||||
description:
|
||||
- Set container security profile
|
||||
required: False
|
||||
type: list
|
||||
default: list()
|
||||
labels:
|
||||
description:
|
||||
- List of labels to apply to container
|
||||
required: False
|
||||
type: dict
|
||||
default: dict()
|
||||
pid_mode:
|
||||
description:
|
||||
- Set docker pid namespace
|
||||
required: False
|
||||
type: str
|
||||
default: None
|
||||
choices:
|
||||
- host
|
||||
privileged:
|
||||
description:
|
||||
- Set the container to privileged
|
||||
required: False
|
||||
default: False
|
||||
type: bool
|
||||
remove_on_exit:
|
||||
description:
|
||||
- When not detaching from container, remove on successful exit
|
||||
required: False
|
||||
default: True
|
||||
type: bool
|
||||
restart_policy:
|
||||
description:
|
||||
- Determine what docker does when the container exits
|
||||
required: False
|
||||
type: str
|
||||
choices:
|
||||
- never
|
||||
- on-failure
|
||||
- always
|
||||
- unless-stopped
|
||||
restart_retries:
|
||||
description:
|
||||
- How many times to attempt a restart if restart_policy is set
|
||||
type: int
|
||||
default: 10
|
||||
volumes:
|
||||
description:
|
||||
- Set volumes for docker to use
|
||||
required: False
|
||||
type: list
|
||||
volumes_from:
|
||||
description:
|
||||
- Name or id of container(s) to use volumes from
|
||||
required: True
|
||||
type: list
|
||||
author: Sam Yaple
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- hosts: kolla_docker
|
||||
tasks:
|
||||
- name: Start container
|
||||
kolla_docker:
|
||||
image: ubuntu
|
||||
name: test_container
|
||||
action: start_container
|
||||
- name: Remove container
|
||||
kolla_docker:
|
||||
name: test_container
|
||||
action: remove_container
|
||||
- name: Pull image without starting container
|
||||
kolla_docker:
|
||||
action: pull_container
|
||||
image: private-registry.example.com:5000/ubuntu
|
||||
- name: Create named volume
|
||||
action: create_volume
|
||||
name: name_of_volume
|
||||
- name: Remove named volume
|
||||
action: remove_volume
|
||||
name: name_of_volume
|
||||
'''
|
||||
|
||||
import json
|
||||
import os
|
||||
import traceback
|
||||
|
||||
import docker
|
||||
|
||||
|
||||
class DockerWorker(object):
|
||||
|
||||
def __init__(self, module):
|
||||
self.module = module
|
||||
self.params = self.module.params
|
||||
self.changed = False
|
||||
|
||||
# TLS not fully implemented
|
||||
# tls_config = self.generate_tls()
|
||||
|
||||
options = {
|
||||
'version': self.params.get('api_version')
|
||||
}
|
||||
|
||||
self.dc = docker.Client(**options)
|
||||
|
||||
def generate_tls(self):
|
||||
tls = {'verify': self.params.get('tls_verify')}
|
||||
tls_cert = self.params.get('tls_cert'),
|
||||
tls_key = self.params.get('tls_key'),
|
||||
tls_cacert = self.params.get('tls_cacert')
|
||||
|
||||
if tls['verify']:
|
||||
if tls_cert:
|
||||
self.check_file(tls_cert)
|
||||
self.check_file(tls_key)
|
||||
tls['client_cert'] = (tls_cert, tls_key)
|
||||
if tls_cacert:
|
||||
self.check_file(tls_cacert)
|
||||
tls['verify'] = tls_cacert
|
||||
|
||||
return docker.tls.TLSConfig(**tls)
|
||||
|
||||
def check_file(self, path):
|
||||
if not os.path.isfile(path):
|
||||
self.module.fail_json(
|
||||
failed=True,
|
||||
msg='There is no file at "{}"'.format(path)
|
||||
)
|
||||
if not os.access(path, os.R_OK):
|
||||
self.module.fail_json(
|
||||
failed=True,
|
||||
msg='Permission denied for file at "{}"'.format(path)
|
||||
)
|
||||
|
||||
def check_image(self):
|
||||
find_image = ':'.join(self.parse_image())
|
||||
for image in self.dc.images():
|
||||
repo_tags = image.get('RepoTags')
|
||||
if not repo_tags:
|
||||
continue
|
||||
for image_name in repo_tags:
|
||||
if image_name == find_image:
|
||||
return image
|
||||
|
||||
def check_volume(self):
|
||||
for vol in self.dc.volumes()['Volumes'] or list():
|
||||
if vol['Name'] == self.params.get('name'):
|
||||
return vol
|
||||
|
||||
def check_container(self):
|
||||
find_name = '/{}'.format(self.params.get('name'))
|
||||
for cont in self.dc.containers(all=True):
|
||||
if find_name in cont['Names']:
|
||||
return cont
|
||||
|
||||
def get_container_info(self):
|
||||
container = self.check_container()
|
||||
if not container:
|
||||
return None
|
||||
return self.dc.inspect_container(self.params.get('name'))
|
||||
|
||||
def check_container_differs(self):
|
||||
container_info = self.get_container_info()
|
||||
return (
|
||||
self.compare_cap_add(container_info) or
|
||||
self.compare_security_opt(container_info) or
|
||||
self.compare_image(container_info) or
|
||||
self.compare_ipc_mode(container_info) or
|
||||
self.compare_labels(container_info) or
|
||||
self.compare_privileged(container_info) or
|
||||
self.compare_pid_mode(container_info) or
|
||||
self.compare_volumes(container_info) or
|
||||
self.compare_volumes_from(container_info) or
|
||||
self.compare_environment(container_info)
|
||||
)
|
||||
|
||||
def compare_ipc_mode(self, container_info):
|
||||
new_ipc_mode = self.params.get('ipc_mode')
|
||||
current_ipc_mode = container_info['HostConfig'].get('IpcMode')
|
||||
if not current_ipc_mode:
|
||||
current_ipc_mode = None
|
||||
|
||||
if new_ipc_mode != current_ipc_mode:
|
||||
return True
|
||||
|
||||
def compare_cap_add(self, container_info):
|
||||
new_cap_add = self.params.get('cap_add', list())
|
||||
current_cap_add = container_info['HostConfig'].get('CapAdd',
|
||||
list())
|
||||
if not current_cap_add:
|
||||
current_cap_add = list()
|
||||
if set(new_cap_add).symmetric_difference(set(current_cap_add)):
|
||||
return True
|
||||
|
||||
def compare_security_opt(self, container_info):
|
||||
new_sec_opt = self.params.get('security_opt', list())
|
||||
current_sec_opt = container_info['HostConfig'].get('SecurityOpt',
|
||||
list())
|
||||
if not current_sec_opt:
|
||||
current_sec_opt = list()
|
||||
if set(new_sec_opt).symmetric_difference(set(current_sec_opt)):
|
||||
return True
|
||||
|
||||
def compare_pid_mode(self, container_info):
|
||||
new_pid_mode = self.params.get('pid_mode')
|
||||
current_pid_mode = container_info['HostConfig'].get('PidMode')
|
||||
if not current_pid_mode:
|
||||
current_pid_mode = None
|
||||
|
||||
if new_pid_mode != current_pid_mode:
|
||||
return True
|
||||
|
||||
def compare_privileged(self, container_info):
|
||||
new_privileged = self.params.get('privileged')
|
||||
current_privileged = container_info['HostConfig']['Privileged']
|
||||
if new_privileged != current_privileged:
|
||||
return True
|
||||
|
||||
def compare_image(self, container_info=None):
|
||||
container_info = container_info or self.get_container_info()
|
||||
parse_repository_tag = docker.utils.parse_repository_tag
|
||||
if not container_info:
|
||||
return True
|
||||
new_image = self.check_image()
|
||||
current_image = container_info['Image']
|
||||
if not new_image:
|
||||
return True
|
||||
if new_image['Id'] != current_image:
|
||||
return True
|
||||
# NOTE(Jeffrey4l) when new image and the current image have
|
||||
# the same id, but the tag name different.
|
||||
elif (parse_repository_tag(container_info['Config']['Image']) !=
|
||||
parse_repository_tag(self.params.get('image'))):
|
||||
return True
|
||||
|
||||
def compare_labels(self, container_info):
|
||||
new_labels = self.params.get('labels')
|
||||
current_labels = container_info['Config'].get('Labels', dict())
|
||||
image_labels = self.check_image().get('Labels', dict())
|
||||
for k, v in image_labels.items():
|
||||
if k in new_labels:
|
||||
if v != new_labels[k]:
|
||||
return True
|
||||
else:
|
||||
del current_labels[k]
|
||||
|
||||
if new_labels != current_labels:
|
||||
return True
|
||||
|
||||
def compare_volumes_from(self, container_info):
|
||||
new_vols_from = self.params.get('volumes_from')
|
||||
current_vols_from = container_info['HostConfig'].get('VolumesFrom')
|
||||
if not new_vols_from:
|
||||
new_vols_from = list()
|
||||
if not current_vols_from:
|
||||
current_vols_from = list()
|
||||
|
||||
if set(current_vols_from).symmetric_difference(set(new_vols_from)):
|
||||
return True
|
||||
|
||||
def compare_volumes(self, container_info):
|
||||
volumes, binds = self.generate_volumes()
|
||||
current_vols = container_info['Config'].get('Volumes')
|
||||
current_binds = container_info['HostConfig'].get('Binds')
|
||||
if not volumes:
|
||||
volumes = list()
|
||||
if not current_vols:
|
||||
current_vols = list()
|
||||
if not current_binds:
|
||||
current_binds = list()
|
||||
|
||||
if set(volumes).symmetric_difference(set(current_vols)):
|
||||
return True
|
||||
|
||||
new_binds = list()
|
||||
if binds:
|
||||
for k, v in binds.items():
|
||||
new_binds.append("{}:{}:{}".format(k, v['bind'], v['mode']))
|
||||
|
||||
if set(new_binds).symmetric_difference(set(current_binds)):
|
||||
return True
|
||||
|
||||
def compare_environment(self, container_info):
|
||||
if self.params.get('environment'):
|
||||
current_env = dict()
|
||||
for kv in container_info['Config'].get('Env', list()):
|
||||
k, v = kv.split('=', 1)
|
||||
current_env.update({k: v})
|
||||
|
||||
for k, v in self.params.get('environment').items():
|
||||
if k not in current_env:
|
||||
return True
|
||||
if current_env[k] != v:
|
||||
return True
|
||||
|
||||
def parse_image(self):
|
||||
full_image = self.params.get('image')
|
||||
|
||||
if '/' in full_image:
|
||||
registry, image = full_image.split('/', 1)
|
||||
else:
|
||||
image = full_image
|
||||
|
||||
if ':' in image:
|
||||
return full_image.rsplit(':', 1)
|
||||
else:
|
||||
return full_image, 'latest'
|
||||
|
||||
def pull_image(self):
|
||||
if self.params.get('auth_username'):
|
||||
self.dc.login(
|
||||
username=self.params.get('auth_username'),
|
||||
password=self.params.get('auth_password'),
|
||||
registry=self.params.get('auth_registry'),
|
||||
email=self.params.get('auth_email')
|
||||
)
|
||||
|
||||
image, tag = self.parse_image()
|
||||
|
||||
statuses = [
|
||||
json.loads(line.strip()) for line in self.dc.pull(
|
||||
repository=image, tag=tag, stream=True
|
||||
)
|
||||
]
|
||||
|
||||
for status in reversed(statuses):
|
||||
if 'error' in status:
|
||||
if status['error'].endswith('not found'):
|
||||
self.module.fail_json(
|
||||
msg="The requested image does not exist: {}:{}".format(
|
||||
image, tag),
|
||||
failed=True
|
||||
)
|
||||
else:
|
||||
self.module.fail_json(
|
||||
msg="Unknown error message: {}".format(
|
||||
status['error']),
|
||||
failed=True
|
||||
)
|
||||
|
||||
if status and status.get('status'):
|
||||
# NOTE(SamYaple): This allows us to use v1 and v2 docker
|
||||
# registries. Eventually docker will stop supporting v1
|
||||
# registries and when that happens we can remove this.
|
||||
if 'legacy registry' in status['status']:
|
||||
continue
|
||||
elif 'Downloaded newer image for' in status['status']:
|
||||
self.changed = True
|
||||
return
|
||||
elif 'Image is up to date for' in status['status']:
|
||||
return
|
||||
else:
|
||||
self.module.fail_json(
|
||||
msg="Unknown status message: {}".format(
|
||||
status['status']),
|
||||
failed=True
|
||||
)
|
||||
|
||||
def remove_container(self):
|
||||
if self.check_container():
|
||||
self.changed = True
|
||||
self.dc.remove_container(
|
||||
container=self.params.get('name'),
|
||||
force=True
|
||||
)
|
||||
|
||||
def generate_volumes(self):
|
||||
volumes = self.params.get('volumes')
|
||||
if not volumes:
|
||||
return None, None
|
||||
|
||||
vol_list = list()
|
||||
vol_dict = dict()
|
||||
|
||||
for vol in volumes:
|
||||
if ':' not in vol:
|
||||
vol_list.append(vol)
|
||||
continue
|
||||
|
||||
split_vol = vol.split(':')
|
||||
|
||||
if (len(split_vol) == 2
|
||||
and ('/' not in split_vol[0] or '/' in split_vol[1])):
|
||||
split_vol.append('rw')
|
||||
|
||||
vol_list.append(split_vol[1])
|
||||
vol_dict.update({
|
||||
split_vol[0]: {
|
||||
'bind': split_vol[1],
|
||||
'mode': split_vol[2]
|
||||
}
|
||||
})
|
||||
|
||||
return vol_list, vol_dict
|
||||
|
||||
def build_host_config(self, binds):
|
||||
options = {
|
||||
'network_mode': 'host',
|
||||
'ipc_mode': self.params.get('ipc_mode'),
|
||||
'cap_add': self.params.get('cap_add'),
|
||||
'security_opt': self.params.get('security_opt'),
|
||||
'pid_mode': self.params.get('pid_mode'),
|
||||
'privileged': self.params.get('privileged'),
|
||||
'volumes_from': self.params.get('volumes_from')
|
||||
}
|
||||
|
||||
if self.params.get('restart_policy') in ['on-failure',
|
||||
'always',
|
||||
'unless-stopped']:
|
||||
options['restart_policy'] = {
|
||||
'Name': self.params.get('restart_policy'),
|
||||
'MaximumRetryCount': self.params.get('restart_retries')
|
||||
}
|
||||
|
||||
if binds:
|
||||
options['binds'] = binds
|
||||
|
||||
return self.dc.create_host_config(**options)
|
||||
|
||||
def _inject_env_var(self, environment_info):
|
||||
newenv = {
|
||||
'KOLLA_SERVICE_NAME': self.params.get('name').replace('_', '-')
|
||||
}
|
||||
environment_info.update(newenv)
|
||||
return environment_info
|
||||
|
||||
def _format_env_vars(self):
|
||||
env = self._inject_env_var(self.params.get('environment'))
|
||||
return {k: "" if env[k] is None else env[k] for k in env}
|
||||
|
||||
def build_container_options(self):
|
||||
volumes, binds = self.generate_volumes()
|
||||
return {
|
||||
'detach': self.params.get('detach'),
|
||||
'environment': self._format_env_vars(),
|
||||
'host_config': self.build_host_config(binds),
|
||||
'labels': self.params.get('labels'),
|
||||
'image': self.params.get('image'),
|
||||
'name': self.params.get('name'),
|
||||
'volumes': volumes,
|
||||
'tty': True
|
||||
}
|
||||
|
||||
def create_container(self):
|
||||
self.changed = True
|
||||
options = self.build_container_options()
|
||||
self.dc.create_container(**options)
|
||||
|
||||
def start_container(self):
|
||||
if not self.check_image():
|
||||
self.pull_image()
|
||||
|
||||
container = self.check_container()
|
||||
if container and self.check_container_differs():
|
||||
self.remove_container()
|
||||
container = self.check_container()
|
||||
|
||||
if not container:
|
||||
self.create_container()
|
||||
container = self.check_container()
|
||||
|
||||
if not container['Status'].startswith('Up '):
|
||||
self.changed = True
|
||||
self.dc.start(container=self.params.get('name'))
|
||||
|
||||
# We do not want to detach so we wait around for container to exit
|
||||
if not self.params.get('detach'):
|
||||
rc = self.dc.wait(self.params.get('name'))
|
||||
if rc != 0:
|
||||
self.module.fail_json(
|
||||
failed=True,
|
||||
changed=True,
|
||||
msg="Container exited with non-zero return code"
|
||||
)
|
||||
if self.params.get('remove_on_exit'):
|
||||
self.remove_container()
|
||||
|
||||
def get_container_env(self):
|
||||
name = self.params.get('name')
|
||||
info = self.get_container_info()
|
||||
if not info:
|
||||
self.module.fail_json(msg="No such container: {}".format(name))
|
||||
else:
|
||||
envs = dict()
|
||||
for env in info['Config']['Env']:
|
||||
if '=' in env:
|
||||
key, value = env.split('=', 1)
|
||||
else:
|
||||
key, value = env, ''
|
||||
envs[key] = value
|
||||
|
||||
self.module.exit_json(**envs)
|
||||
|
||||
def get_container_state(self):
|
||||
name = self.params.get('name')
|
||||
info = self.get_container_info()
|
||||
if not info:
|
||||
self.module.fail_json(msg="No such container: {}".format(name))
|
||||
else:
|
||||
self.module.exit_json(**info['State'])
|
||||
|
||||
def stop_container(self):
|
||||
name = self.params.get('name')
|
||||
container = self.check_container()
|
||||
if not container:
|
||||
self.module.fail_json(
|
||||
msg="No such container: {} to stop".format(name))
|
||||
elif not container['Status'].startswith('Exited '):
|
||||
self.changed = True
|
||||
self.dc.stop(name)
|
||||
|
||||
def restart_container(self):
|
||||
name = self.params.get('name')
|
||||
info = self.get_container_info()
|
||||
if not info:
|
||||
self.module.fail_json(
|
||||
msg="No such container: {}".format(name))
|
||||
else:
|
||||
self.changed = True
|
||||
self.dc.restart(name)
|
||||
|
||||
def create_volume(self):
|
||||
if not self.check_volume():
|
||||
self.changed = True
|
||||
self.dc.create_volume(name=self.params.get('name'), driver='local')
|
||||
|
||||
def remove_volume(self):
|
||||
if self.check_volume():
|
||||
self.changed = True
|
||||
try:
|
||||
self.dc.remove_volume(name=self.params.get('name'))
|
||||
except docker.errors.APIError as e:
|
||||
if e.response.status_code == 409:
|
||||
self.module.fail_json(
|
||||
failed=True,
|
||||
msg="Volume named '{}' is currently in-use".format(
|
||||
self.params.get('name')
|
||||
)
|
||||
)
|
||||
raise
|
||||
|
||||
|
||||
def generate_module():
|
||||
argument_spec = dict(
|
||||
common_options=dict(required=False, type='dict', default=dict()),
|
||||
action=dict(required=True, type='str', choices=['compare_image',
|
||||
'create_volume',
|
||||
'get_container_env',
|
||||
'get_container_state',
|
||||
'pull_image',
|
||||
'remove_container',
|
||||
'remove_volume',
|
||||
'restart_container',
|
||||
'start_container',
|
||||
'stop_container']),
|
||||
api_version=dict(required=False, type='str', default='auto'),
|
||||
auth_email=dict(required=False, type='str'),
|
||||
auth_password=dict(required=False, type='str'),
|
||||
auth_registry=dict(required=False, type='str'),
|
||||
auth_username=dict(required=False, type='str'),
|
||||
detach=dict(required=False, type='bool', default=True),
|
||||
labels=dict(required=False, type='dict', default=dict()),
|
||||
name=dict(required=False, type='str'),
|
||||
environment=dict(required=False, type='dict'),
|
||||
image=dict(required=False, type='str'),
|
||||
ipc_mode=dict(required=False, type='str', choices=['host']),
|
||||
cap_add=dict(required=False, type='list', default=list()),
|
||||
security_opt=dict(required=False, type='list', default=list()),
|
||||
pid_mode=dict(required=False, type='str', choices=['host']),
|
||||
privileged=dict(required=False, type='bool', default=False),
|
||||
remove_on_exit=dict(required=False, type='bool', default=True),
|
||||
restart_policy=dict(required=False, type='str', choices=[
|
||||
'no',
|
||||
'never',
|
||||
'on-failure',
|
||||
'always',
|
||||
'unless-stopped']),
|
||||
restart_retries=dict(required=False, type='int', default=10),
|
||||
tls_verify=dict(required=False, type='bool', default=False),
|
||||
tls_cert=dict(required=False, type='str'),
|
||||
tls_key=dict(required=False, type='str'),
|
||||
tls_cacert=dict(required=False, type='str'),
|
||||
volumes=dict(required=False, type='list'),
|
||||
volumes_from=dict(required=False, type='list')
|
||||
)
|
||||
required_together = [
|
||||
['tls_cert', 'tls_key']
|
||||
]
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
required_together=required_together,
|
||||
bypass_checks=True
|
||||
)
|
||||
|
||||
new_args = module.params.pop('common_options', dict())
|
||||
|
||||
# NOTE(jeffrey4l): merge the environment
|
||||
env = module.params.pop('environment', dict())
|
||||
if env:
|
||||
new_args['environment'].update(env)
|
||||
|
||||
for key, value in module.params.items():
|
||||
if key in new_args and value is None:
|
||||
continue
|
||||
new_args[key] = value
|
||||
|
||||
module.params = new_args
|
||||
return module
|
||||
|
||||
|
||||
def main():
|
||||
module = generate_module()
|
||||
|
||||
# TODO(SamYaple): Replace with required_if when Ansible 2.0 lands
|
||||
if (module.params.get('action') in ['pull_image', 'start_container']
|
||||
and not module.params.get('image')):
|
||||
module.fail_json(
|
||||
msg="missing required arguments: image",
|
||||
failed=True
|
||||
)
|
||||
# TODO(SamYaple): Replace with required_if when Ansible 2.0 lands
|
||||
if (module.params.get('action') != 'pull_image'
|
||||
and not module.params.get('name')):
|
||||
module.fail_json(
|
||||
msg="missing required arguments: name",
|
||||
failed=True
|
||||
)
|
||||
|
||||
try:
|
||||
dw = DockerWorker(module)
|
||||
# TODO(inc0): We keep it bool to have ansible deal with consistent
|
||||
# types. If we ever add method that will have to return some
|
||||
# meaningful data, we need to refactor all methods to return dicts.
|
||||
result = bool(getattr(dw, module.params.get('action'))())
|
||||
module.exit_json(changed=dw.changed, result=result)
|
||||
except Exception:
|
||||
module.exit_json(failed=True, changed=True,
|
||||
msg=repr(traceback.format_exc()))
|
||||
|
||||
# import module snippets
|
||||
from ansible.module_utils.basic import * # noqa
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -1,51 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
|
||||
# Copyright 2015 Sam Yaple
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: merge_configs
|
||||
short_description: Merge ini-style configs
|
||||
description:
|
||||
- ConfigParser is used to merge several ini-style configs into one
|
||||
options:
|
||||
dest:
|
||||
description:
|
||||
- The destination file name
|
||||
required: True
|
||||
type: str
|
||||
sources:
|
||||
description:
|
||||
- A list of files on the destination node to merge together
|
||||
default: None
|
||||
required: True
|
||||
type: str
|
||||
author: Sam Yaple
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
Merge multiple configs:
|
||||
|
||||
- hosts: database
|
||||
tasks:
|
||||
- name: Merge configs
|
||||
merge_configs:
|
||||
sources:
|
||||
- "/tmp/config_1.cnf"
|
||||
- "/tmp/config_2.cnf"
|
||||
- "/tmp/config_3.cnf"
|
||||
dest:
|
||||
- "/etc/mysql/my.cnf"
|
||||
'''
|
@ -1,51 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
|
||||
# Copyright 2015 Sam Yaple
|
||||
# Copyright 2016 intel
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: merge_yaml
|
||||
short_description: Merge yaml-style configs
|
||||
description:
|
||||
- PyYAML is used to merge several yaml files into one
|
||||
options:
|
||||
dest:
|
||||
description:
|
||||
- The destination file name
|
||||
required: True
|
||||
type: str
|
||||
sources:
|
||||
description:
|
||||
- A list of files on the destination node to merge together
|
||||
default: None
|
||||
required: True
|
||||
type: str
|
||||
author: Sean Mooney
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
Merge multiple yaml files:
|
||||
|
||||
- hosts: localhost
|
||||
tasks:
|
||||
- name: Merge yaml files
|
||||
merge_yaml:
|
||||
sources:
|
||||
- "/tmp/default.yml"
|
||||
- "/tmp/override.yml"
|
||||
dest:
|
||||
- "/tmp/out.yml"
|
||||
'''
|
@ -1,8 +0,0 @@
|
||||
---
|
||||
- hosts: mariadb
|
||||
roles:
|
||||
- { role: mariadb,
|
||||
tags: mariadb,
|
||||
when: enable_mariadb | bool }
|
||||
vars:
|
||||
mariadb_recover: true
|
@ -1,9 +0,0 @@
|
||||
---
|
||||
- name: Creating admin openrc file on the deploy node
|
||||
hosts: all
|
||||
tasks:
|
||||
- template:
|
||||
src: "roles/common/templates/admin-openrc.sh.j2"
|
||||
dest: "{{ node_config_directory }}/admin-openrc.sh"
|
||||
run_once: True
|
||||
connection: local
|
@ -1,43 +0,0 @@
|
||||
---
|
||||
project_name: "aodh"
|
||||
|
||||
|
||||
####################
|
||||
# Database
|
||||
####################
|
||||
aodh_database_name: "aodh"
|
||||
aodh_database_user: "aodh"
|
||||
aodh_database_address: "{{ kolla_internal_fqdn }}:{{ database_port }}"
|
||||
|
||||
####################
|
||||
# Docker
|
||||
####################
|
||||
aodh_api_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-aodh-api"
|
||||
aodh_api_tag: "{{ openstack_release }}"
|
||||
aodh_api_image_full: "{{ aodh_api_image }}:{{ aodh_api_tag }}"
|
||||
|
||||
aodh_evaluator_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-aodh-evaluator"
|
||||
aodh_evaluator_tag: "{{ openstack_release }}"
|
||||
aodh_evaluator_image_full: "{{ aodh_evaluator_image }}:{{ aodh_evaluator_tag }}"
|
||||
|
||||
aodh_listener_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-aodh-listener"
|
||||
aodh_listener_tag: "{{ openstack_release }}"
|
||||
aodh_listener_image_full: "{{ aodh_listener_image }}:{{ aodh_listener_tag }}"
|
||||
|
||||
aodh_notifier_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-aodh-notifier"
|
||||
aodh_notifier_tag: "{{ openstack_release }}"
|
||||
aodh_notifier_image_full: "{{ aodh_notifier_image }}:{{ aodh_notifier_tag }}"
|
||||
|
||||
|
||||
####################
|
||||
# OpenStack
|
||||
####################
|
||||
aodh_admin_endpoint: "{{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ aodh_api_port }}"
|
||||
aodh_internal_endpoint: "{{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ aodh_api_port }}"
|
||||
aodh_public_endpoint: "{{ public_protocol }}://{{ kolla_external_fqdn }}:{{ aodh_api_port }}"
|
||||
|
||||
aodh_logging_debug: "{{ openstack_logging_debug }}"
|
||||
|
||||
aodh_keystone_user: "aodh"
|
||||
|
||||
openstack_aodh_auth: "{'auth_url':'{{ openstack_auth.auth_url }}','username':'{{ openstack_auth.username }}','password':'{{ openstack_auth.password }}','project_name':'{{ openstack_auth.project_name }}','domain_name':'default'}"
|
@ -1,3 +0,0 @@
|
||||
---
|
||||
dependencies:
|
||||
- { role: common }
|
@ -1,41 +0,0 @@
|
||||
---
|
||||
- name: Creating aodh database
|
||||
command: docker exec -t kolla_toolbox /usr/bin/ansible localhost
|
||||
-m mysql_db
|
||||
-a "login_host='{{ database_address }}'
|
||||
login_port='{{ database_port }}'
|
||||
login_user='{{ database_user }}'
|
||||
login_password='{{ database_password }}'
|
||||
name='{{ aodh_database_name }}'"
|
||||
register: database
|
||||
changed_when: "{{ database.stdout.find('localhost | SUCCESS => ') != -1 and
|
||||
(database.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
|
||||
failed_when: database.stdout.split()[2] != 'SUCCESS'
|
||||
run_once: True
|
||||
delegate_to: "{{ groups['aodh-api'][0] }}"
|
||||
|
||||
- name: Reading json from variable
|
||||
set_fact:
|
||||
database_created: "{{ (database.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
|
||||
|
||||
- name: Creating aodh database user and setting permissions
|
||||
command: docker exec -t kolla_toolbox /usr/bin/ansible localhost
|
||||
-m mysql_user
|
||||
-a "login_host='{{ database_address }}'
|
||||
login_port='{{ database_port }}'
|
||||
login_user='{{ database_user }}'
|
||||
login_password='{{ database_password }}'
|
||||
name='{{ aodh_database_name }}'
|
||||
password='{{ aodh_database_password }}'
|
||||
host='%'
|
||||
priv='{{ aodh_database_name }}.*:ALL'
|
||||
append_privs='yes'"
|
||||
register: database_user_create
|
||||
changed_when: "{{ database_user_create.stdout.find('localhost | SUCCESS => ') != -1 and
|
||||
(database_user_create.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
|
||||
failed_when: database_user_create.stdout.split()[2] != 'SUCCESS'
|
||||
run_once: True
|
||||
delegate_to: "{{ groups['aodh-api'][0] }}"
|
||||
|
||||
- include: bootstrap_service.yml
|
||||
when: database_created
|
@ -1,21 +0,0 @@
|
||||
---
|
||||
- name: Running aodh bootstrap container
|
||||
kolla_docker:
|
||||
action: "start_container"
|
||||
common_options: "{{ docker_common_options }}"
|
||||
detach: False
|
||||
environment:
|
||||
KOLLA_BOOTSTRAP:
|
||||
KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}"
|
||||
image: "{{ aodh_api_image_full }}"
|
||||
labels:
|
||||
BOOTSTRAP:
|
||||
name: "bootstrap_aodh"
|
||||
restart_policy: "never"
|
||||
volumes:
|
||||
- "{{ node_config_directory }}/aodh-api/:{{ container_config_directory }}/:ro"
|
||||
- "/etc/localtime:/etc/localtime:ro"
|
||||
- "aodh:/var/lib/aodh/"
|
||||
- "kolla_logs:/var/log/kolla/"
|
||||
run_once: True
|
||||
delegate_to: "{{ groups['aodh-api'][0] }}"
|
@ -1,63 +0,0 @@
|
||||
---
|
||||
- name: Ensuring config directories exist
|
||||
file:
|
||||
path: "{{ node_config_directory }}/{{ item }}"
|
||||
state: "directory"
|
||||
recurse: yes
|
||||
with_items:
|
||||
- "aodh-api"
|
||||
- "aodh-evaluator"
|
||||
- "aodh-listener"
|
||||
- "aodh-notifier"
|
||||
|
||||
- name: Copying over config.json files for services
|
||||
template:
|
||||
src: "{{ item }}.json.j2"
|
||||
dest: "{{ node_config_directory }}/{{ item }}/config.json"
|
||||
with_items:
|
||||
- "aodh-api"
|
||||
- "aodh-listener"
|
||||
- "aodh-evaluator"
|
||||
- "aodh-notifier"
|
||||
|
||||
- name: Copying over aodh.conf
|
||||
merge_configs:
|
||||
vars:
|
||||
service_name: "{{ item }}"
|
||||
sources:
|
||||
- "{{ role_path }}/templates/aodh.conf.j2"
|
||||
- "{{ node_custom_config }}/global.conf"
|
||||
- "{{ node_custom_config }}/database.conf"
|
||||
- "{{ node_custom_config }}/messaging.conf"
|
||||
- "{{ node_custom_config }}/aodh.conf"
|
||||
- "{{ node_custom_config }}/aodh/{{ item }}.conf"
|
||||
- "{{ node_custom_config }}/aodh/{{ inventory_hostname }}/{{ item }}.conf"
|
||||
dest: "{{ node_config_directory }}/{{ item }}/aodh.conf"
|
||||
with_items:
|
||||
- "aodh-api"
|
||||
- "aodh-evaluator"
|
||||
- "aodh-listener"
|
||||
- "aodh-notifier"
|
||||
|
||||
- name: Copying over wsgi-aodh files for services
|
||||
template:
|
||||
src: "wsgi-aodh.conf.j2"
|
||||
dest: "{{ node_config_directory }}/{{ item }}/wsgi-aodh.conf"
|
||||
with_items:
|
||||
- "aodh-api"
|
||||
|
||||
- name: Check if policies shall be overwritten
|
||||
local_action: stat path="{{ node_custom_config }}/aodh/policy.json"
|
||||
register: aodh_policy
|
||||
|
||||
- name: Copying over existing policy.json
|
||||
template:
|
||||
src: "{{ node_custom_config }}/aodh/policy.json"
|
||||
dest: "{{ node_config_directory }}/{{ item }}/policy.json"
|
||||
with_items:
|
||||
- "aodh-api"
|
||||
- "aodh-evaluator"
|
||||
- "aodh-listener"
|
||||
- "aodh-notifier"
|
||||
when:
|
||||
aodh_policy.stat.exists
|
@ -1,18 +0,0 @@
|
||||
---
|
||||
- include: register.yml
|
||||
when: inventory_hostname in groups['aodh-api']
|
||||
|
||||
- include: config.yml
|
||||
when: inventory_hostname in groups['aodh-api'] or
|
||||
inventory_hostname in groups['aodh-evaluator'] or
|
||||
inventory_hostname in groups['aodh-listener'] or
|
||||
inventory_hostname in groups['aodh-notifier']
|
||||
|
||||
- include: bootstrap.yml
|
||||
when: inventory_hostname in groups['aodh-api']
|
||||
|
||||
- include: start.yml
|
||||
when: inventory_hostname in groups['aodh-api'] or
|
||||
inventory_hostname in groups['aodh-evaluator'] or
|
||||
inventory_hostname in groups['aodh-listener'] or
|
||||
inventory_hostname in groups['aodh-notifier']
|
@ -1,2 +0,0 @@
|
||||
---
|
||||
- include: "{{ action }}.yml"
|
@ -1 +0,0 @@
|
||||
---
|
@ -1,28 +0,0 @@
|
||||
---
|
||||
- name: Pulling aodh-api image
|
||||
kolla_docker:
|
||||
action: "pull_image"
|
||||
common_options: "{{ docker_common_options }}"
|
||||
image: "{{ aodh_api_image_full }}"
|
||||
when: inventory_hostname in groups['aodh-api']
|
||||
|
||||
- name: Pulling aodh-listener image
|
||||
kolla_docker:
|
||||
action: "pull_image"
|
||||
common_options: "{{ docker_common_options }}"
|
||||
image: "{{ aodh_listener_image_full }}"
|
||||
when: inventory_hostname in groups['aodh-listener']
|
||||
|
||||
- name: Pulling aodh-evaluator image
|
||||
kolla_docker:
|
||||
action: "pull_image"
|
||||
common_options: "{{ docker_common_options }}"
|
||||
image: "{{ aodh_evaluator_image_full }}"
|
||||
when: inventory_hostname in groups['aodh-evaluator']
|
||||
|
||||
- name: Pulling aodh-notifier image
|
||||
kolla_docker:
|
||||
action: "pull_image"
|
||||
common_options: "{{ docker_common_options }}"
|
||||
image: "{{ aodh_notifier_image_full }}"
|
||||
when: inventory_hostname in groups['aodh-notifier']
|
@ -1,79 +0,0 @@
|
||||
---
|
||||
- name: Ensuring the containers up
|
||||
kolla_docker:
|
||||
name: "{{ item.name }}"
|
||||
action: "get_container_state"
|
||||
register: container_state
|
||||
failed_when: container_state.Running == false
|
||||
when: inventory_hostname in groups[item.group]
|
||||
with_items:
|
||||
- { name: aodh_api, group: aodh-api }
|
||||
- { name: aodh_listener, group: aodh-listener }
|
||||
- { name: aodh_evaluator, group: aodh-evaluator }
|
||||
- { name: aodh_notifier, group: aodh-notifier }
|
||||
|
||||
- include: config.yml
|
||||
|
||||
- name: Check the configs
|
||||
command: docker exec {{ item.name }} /usr/local/bin/kolla_set_configs --check
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
register: check_results
|
||||
when: inventory_hostname in groups[item.group]
|
||||
with_items:
|
||||
- { name: aodh_api, group: aodh-api }
|
||||
- { name: aodh_listener, group: aodh-listener }
|
||||
- { name: aodh_evaluator, group: aodh-evaluator }
|
||||
- { name: aodh_notifier, group: aodh-notifier }
|
||||
|
||||
# NOTE(jeffrey4l): when config_strategy == 'COPY_ALWAYS'
|
||||
# and container env['KOLLA_CONFIG_STRATEGY'] == 'COPY_ONCE',
|
||||
# just remove the container and start again
|
||||
- name: Containers config strategy
|
||||
kolla_docker:
|
||||
name: "{{ item.name }}"
|
||||
action: "get_container_env"
|
||||
register: container_envs
|
||||
when: inventory_hostname in groups[item.group]
|
||||
with_items:
|
||||
- { name: aodh_api, group: aodh-api }
|
||||
- { name: aodh_listener, group: aodh-listener }
|
||||
- { name: aodh_evaluator, group: aodh-evaluator }
|
||||
- { name: aodh_notifier, group: aodh-notifier }
|
||||
|
||||
- name: Remove the containers
|
||||
kolla_docker:
|
||||
name: "{{ item[0]['name'] }}"
|
||||
action: "remove_container"
|
||||
register: remove_containers
|
||||
when:
|
||||
- inventory_hostname in groups[item[0]['group']]
|
||||
- config_strategy == "COPY_ONCE" or item[1]['KOLLA_CONFIG_STRATEGY'] == 'COPY_ONCE'
|
||||
- item[2]['rc'] == 1
|
||||
with_together:
|
||||
- [{ name: aodh_api, group: aodh-api },
|
||||
{ name: aodh_listener, group: aodh-listener },
|
||||
{ name: aodh_evaluator, group: aodh-evaluator },
|
||||
{ name: aodh_notifier, group: aodh-notifier }]
|
||||
- "{{ container_envs.results }}"
|
||||
- "{{ check_results.results }}"
|
||||
|
||||
- include: start.yml
|
||||
when: remove_containers.changed
|
||||
|
||||
- name: Restart containers
|
||||
kolla_docker:
|
||||
name: "{{ item[0]['name'] }}"
|
||||
action: "restart_container"
|
||||
when:
|
||||
- inventory_hostname in groups[item[0]['group']]
|
||||
- config_strategy == 'COPY_ALWAYS'
|
||||
- item[1]['KOLLA_CONFIG_STRATEGY'] != 'COPY_ONCE'
|
||||
- item[2]['rc'] == 1
|
||||
with_together:
|
||||
- [{ name: aodh_api, group: aodh-api },
|
||||
{ name: aodh_listener, group: aodh-listener },
|
||||
{ name: aodh_evaluator, group: aodh-evaluator },
|
||||
{ name: aodh_notifier, group: aodh-notifier }]
|
||||
- "{{ container_envs.results }}"
|
||||
- "{{ check_results.results }}"
|
@ -1,40 +0,0 @@
|
||||
---
|
||||
- name: Creating the aodh service and endpoint
|
||||
command: docker exec -t kolla_toolbox /usr/bin/ansible localhost
|
||||
-m kolla_keystone_service
|
||||
-a "service_name=aodh
|
||||
service_type=alarming
|
||||
description='OpenStack Alarming Service'
|
||||
endpoint_region={{ openstack_region_name }}
|
||||
url='{{ item.url }}'
|
||||
interface='{{ item.interface }}'
|
||||
region_name={{ openstack_region_name }}
|
||||
auth={{ '{{ openstack_aodh_auth }}' }}"
|
||||
-e "{'openstack_aodh_auth':{{ openstack_aodh_auth }}}"
|
||||
register: aodh_endpoint
|
||||
changed_when: "{{ aodh_endpoint.stdout.find('localhost | SUCCESS => ') != -1 and (aodh_endpoint.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
|
||||
until: aodh_endpoint.stdout.split()[2] == 'SUCCESS'
|
||||
retries: 10
|
||||
delay: 5
|
||||
run_once: True
|
||||
with_items:
|
||||
- {'interface': 'admin', 'url': '{{ aodh_admin_endpoint }}'}
|
||||
- {'interface': 'internal', 'url': '{{ aodh_internal_endpoint }}'}
|
||||
- {'interface': 'public', 'url': '{{ aodh_public_endpoint }}'}
|
||||
|
||||
- name: Creating the aodh project, user, and role
|
||||
command: docker exec -t kolla_toolbox /usr/bin/ansible localhost
|
||||
-m kolla_keystone_user
|
||||
-a "project=service
|
||||
user=aodh
|
||||
password={{ aodh_keystone_password }}
|
||||
role=admin
|
||||
region_name={{ openstack_region_name }}
|
||||
auth={{ '{{ openstack_aodh_auth }}' }}"
|
||||
-e "{'openstack_aodh_auth':{{ openstack_aodh_auth }}}"
|
||||
register: aodh_user
|
||||
changed_when: "{{ aodh_user.stdout.find('localhost | SUCCESS => ') != -1 and (aodh_user.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
|
||||
until: aodh_user.stdout.split()[2] == 'SUCCESS'
|
||||
retries: 10
|
||||
delay: 5
|
||||
run_once: True
|
@ -1,49 +0,0 @@
|
||||
---
|
||||
- name: Starting aodh-api container
|
||||
kolla_docker:
|
||||
action: "start_container"
|
||||
common_options: "{{ docker_common_options }}"
|
||||
image: "{{ aodh_api_image_full }}"
|
||||
name: "aodh_api"
|
||||
volumes:
|
||||
- "{{ node_config_directory }}/aodh-api/:{{ container_config_directory }}/:ro"
|
||||
- "/etc/localtime:/etc/localtime:ro"
|
||||
- "aodh:/var/lib/aodh/"
|
||||
- "kolla_logs:/var/log/kolla/"
|
||||
when: inventory_hostname in groups['aodh-api']
|
||||
|
||||
- name: Starting aodh-evaluator container
|
||||
kolla_docker:
|
||||
action: "start_container"
|
||||
common_options: "{{ docker_common_options }}"
|
||||
image: "{{ aodh_evaluator_image_full }}"
|
||||
name: "aodh_evaluator"
|
||||
volumes:
|
||||
- "{{ node_config_directory }}/aodh-evaluator/:{{ container_config_directory }}/:ro"
|
||||
- "/etc/localtime:/etc/localtime:ro"
|
||||
- "kolla_logs:/var/log/kolla/"
|
||||
when: inventory_hostname in groups['aodh-evaluator']
|
||||
|
||||
- name: Starting aodh-listener container
|
||||
kolla_docker:
|
||||
action: "start_container"
|
||||
common_options: "{{ docker_common_options }}"
|
||||
image: "{{ aodh_listener_image_full }}"
|
||||
name: "aodh_listener"
|
||||
volumes:
|
||||
- "{{ node_config_directory }}/aodh-listener/:{{ container_config_directory }}/:ro"
|
||||
- "/etc/localtime:/etc/localtime:ro"
|
||||
- "kolla_logs:/var/log/kolla/"
|
||||
when: inventory_hostname in groups['aodh-listener']
|
||||
|
||||
- name: Starting aodh-notifier container
|
||||
kolla_docker:
|
||||
action: "start_container"
|
||||
common_options: "{{ docker_common_options }}"
|
||||
image: "{{ aodh_notifier_image_full }}"
|
||||
name: "aodh_notifier"
|
||||
volumes:
|
||||
- "{{ node_config_directory }}/aodh-notifier/:{{ container_config_directory }}/:ro"
|
||||
- "/etc/localtime:/etc/localtime:ro"
|
||||
- "kolla_logs:/var/log/kolla/"
|
||||
when: inventory_hostname in groups['aodh-notifier']
|
@ -1,6 +0,0 @@
|
||||
---
|
||||
- include: config.yml
|
||||
|
||||
- include: bootstrap_service.yml
|
||||
|
||||
- include: start.yml
|
@ -1,19 +0,0 @@
|
||||
{% set aodh_cmd = 'apache2' if kolla_base_distro in ['ubuntu', 'debian'] else 'httpd' %}
|
||||
{% set aodh_dir = 'apache2/conf-enabled' if kolla_base_distro in ['ubuntu', 'debian'] else 'httpd/conf.d' %}
|
||||
{
|
||||
"command": "{{ aodh_cmd }} -DFOREGROUND",
|
||||
"config_files": [
|
||||
{
|
||||
"source": "{{ container_config_directory }}/aodh.conf",
|
||||
"dest": "/etc/aodh/aodh.conf",
|
||||
"owner": "aodh",
|
||||
"perm": "0600"
|
||||
},
|
||||
{
|
||||
"source": "{{ container_config_directory }}/wsgi-aodh.conf",
|
||||
"dest": "/etc/{{ aodh_dir }}/wsgi-aodh.conf",
|
||||
"owner": "root",
|
||||
"perm": "0600"
|
||||
}
|
||||
]
|
||||
}
|
@ -1,11 +0,0 @@
|
||||
{
|
||||
"command": "aodh-evaluator",
|
||||
"config_files": [
|
||||
{
|
||||
"source": "{{ container_config_directory }}/aodh.conf",
|
||||
"dest": "/etc/aodh/aodh.conf",
|
||||
"owner": "aodh",
|
||||
"perm": "0600"
|
||||
}
|
||||
]
|
||||
}
|
@ -1,11 +0,0 @@
|
||||
{
|
||||
"command": "aodh-listener",
|
||||
"config_files": [
|
||||
{
|
||||
"source": "{{ container_config_directory }}/aodh.conf",
|
||||
"dest": "/etc/aodh/aodh.conf",
|
||||
"owner": "aodh",
|
||||
"perm": "0600"
|
||||
}
|
||||
]
|
||||
}
|
@ -1,11 +0,0 @@
|
||||
{
|
||||
"command": "aodh-notifier",
|
||||
"config_files": [
|
||||
{
|
||||
"source": "{{ container_config_directory }}/aodh.conf",
|
||||
"dest": "/etc/aodh/aodh.conf",
|
||||
"owner": "aodh",
|
||||
"perm": "0600"
|
||||
}
|
||||
]
|
||||
}
|
@ -1,40 +0,0 @@
|
||||
#jinja2: trim_blocks: False
|
||||
[DEFAULT]
|
||||
auth_strategy = keystone
|
||||
log_dir = /var/log/kolla/aodh
|
||||
debug = {{ aodh_logging_debug }}
|
||||
notification_topics = notifications
|
||||
|
||||
transport_url = rabbit://{% for host in groups['rabbitmq'] %}{{ rabbitmq_user }}:{{ rabbitmq_password }}@{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ rabbitmq_port }}{% if not loop.last %},{% endif %}{% endfor %}
|
||||
|
||||
[api]
|
||||
port = {{ aodh_api_port }}
|
||||
host = {{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}
|
||||
|
||||
[database]
|
||||
connection = mysql+pymysql://{{ aodh_database_user }}:{{ aodh_database_password }}@{{ aodh_database_address }}/{{ aodh_database_name }}
|
||||
|
||||
|
||||
[keystone_authtoken]
|
||||
memcache_security_strategy = ENCRYPT
|
||||
memcache_secret_key = {{ memcache_secret_key }}
|
||||
memcache_servers = {% for host in groups['memcached'] %}{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}
|
||||
auth_uri = {{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_public_port }}
|
||||
project_domain_name = default
|
||||
project_name = service
|
||||
user_domain_name = default
|
||||
username = {{ aodh_keystone_user }}
|
||||
password = {{ aodh_keystone_password }}
|
||||
auth_url = {{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_admin_port }}
|
||||
auth_type = password
|
||||
|
||||
|
||||
[service_credentials]
|
||||
auth_url = {{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_public_port }}/v3
|
||||
region_name = {{ openstack_region_name }}
|
||||
password = {{ aodh_keystone_password }}
|
||||
username = {{ aodh_keystone_user }}
|
||||
project_name = service
|
||||
project_domain_id = default
|
||||
user_domain_id = default
|
||||
auth_type = password
|
@ -1,25 +0,0 @@
|
||||
{% set python_path = '/usr/lib/python2.7/site-packages' if kolla_install_type == 'binary' else '/var/lib/kolla/venv/lib/python2.7/site-packages' %}
|
||||
Listen {{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}:{{ aodh_api_port }}
|
||||
|
||||
<VirtualHost *:{{ aodh_api_port }}>
|
||||
|
||||
## Vhost docroot
|
||||
DocumentRoot "/var/www/cgi-bin/aodh"
|
||||
|
||||
## Directories, there should at least be a declaration for /var/www/cgi-bin/aodh
|
||||
|
||||
<Directory "/var/www/cgi-bin/aodh">
|
||||
Options Indexes FollowSymLinks MultiViews
|
||||
AllowOverride None
|
||||
Require all granted
|
||||
</Directory>
|
||||
|
||||
## Logging
|
||||
ErrorLog "/var/log/kolla/aodh/aodh_wsgi_error.log"
|
||||
ServerSignature Off
|
||||
CustomLog "/var/log/kolla/aodh/aodh_wsgi_access.log" combined
|
||||
WSGIApplicationGroup %{GLOBAL}
|
||||
WSGIDaemonProcess aodh group=aodh processes={{ openstack_service_workers }} threads=1 user=aodh python-path={{ python_path }}
|
||||
WSGIProcessGroup aodh
|
||||
WSGIScriptAlias / "/var/www/cgi-bin/aodh/app.wsgi"
|
||||
</VirtualHost>
|
@ -1,40 +0,0 @@
|
||||
---
|
||||
project_name: "barbican"
|
||||
|
||||
|
||||
####################
|
||||
# Database
|
||||
####################
|
||||
barbican_database_name: "barbican"
|
||||
barbican_database_user: "barbican"
|
||||
barbican_database_address: "{{ kolla_internal_fqdn }}:{{ database_port }}"
|
||||
|
||||
|
||||
####################
|
||||
# Docker
|
||||
####################
|
||||
barbican_api_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-barbican-api"
|
||||
barbican_api_tag: "{{ openstack_release }}"
|
||||
barbican_api_image_full: "{{ barbican_api_image }}:{{ barbican_api_tag }}"
|
||||
|
||||
barbican_keystone_listener_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-barbican-keystone-listener"
|
||||
barbican_keystone_listener_tag: "{{ openstack_release }}"
|
||||
barbican_keystone_listener_image_full: "{{ barbican_keystone_listener_image }}:{{ barbican_keystone_listener_tag }}"
|
||||
|
||||
barbican_worker_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-barbican-worker"
|
||||
barbican_worker_tag: "{{ openstack_release }}"
|
||||
barbican_worker_image_full: "{{ barbican_worker_image }}:{{ barbican_worker_tag }}"
|
||||
|
||||
|
||||
####################
|
||||
# OpenStack
|
||||
####################
|
||||
barbican_admin_endpoint: "{{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ barbican_api_port }}"
|
||||
barbican_internal_endpoint: "{{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ barbican_api_port }}"
|
||||
barbican_public_endpoint: "{{ public_protocol }}://{{ kolla_external_fqdn }}:{{ barbican_api_port }}"
|
||||
|
||||
barbican_logging_debug: "{{ openstack_logging_debug }}"
|
||||
|
||||
barbican_keystone_user: "barbican"
|
||||
|
||||
openstack_barbican_auth: "{'auth_url':'{{ openstack_auth.auth_url }}','username':'{{ openstack_auth.username }}','password':'{{ openstack_auth.password }}','project_name':'{{ openstack_auth.project_name }}','domain_name':'default'}"
|
@ -1,3 +0,0 @@
|
||||
---
|
||||
dependencies:
|
||||
- { role: common }
|
@ -1,41 +0,0 @@
|
||||
---
|
||||
- name: Creating barbican database
|
||||
command: docker exec -t kolla_toolbox /usr/bin/ansible localhost
|
||||
-m mysql_db
|
||||
-a "login_host='{{ database_address }}'
|
||||
login_port='{{ database_port }}'
|
||||
login_user='{{ database_user }}'
|
||||
login_password='{{ database_password }}'
|
||||
name='{{ barbican_database_name }}'"
|
||||
register: database
|
||||
changed_when: "{{ database.stdout.find('localhost | SUCCESS => ') != -1 and
|
||||
(database.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
|
||||
failed_when: database.stdout.split()[2] != 'SUCCESS'
|
||||
run_once: True
|
||||
delegate_to: "{{ groups['barbican-api'][0] }}"
|
||||
|
||||
- name: Reading json from variable
|
||||
set_fact:
|
||||
database_created: "{{ (database.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
|
||||
|
||||
- name: Creating barbican database user and setting permissions
|
||||
command: docker exec -t kolla_toolbox /usr/bin/ansible localhost
|
||||
-m mysql_user
|
||||
-a "login_host='{{ database_address }}'
|
||||
login_port='{{ database_port }}'
|
||||
login_user='{{ database_user }}'
|
||||
login_password='{{ database_password }}'
|
||||
name='{{ barbican_database_name }}'
|
||||
password='{{ barbican_database_password }}'
|
||||
host='%'
|
||||
priv='{{ barbican_database_name }}.*:ALL'
|
||||
append_privs='yes'"
|
||||
register: database_user_create
|
||||
changed_when: "{{ database_user_create.stdout.find('localhost | SUCCESS => ') != -1 and
|
||||
(database_user_create.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
|
||||
failed_when: database_user_create.stdout.split()[2] != 'SUCCESS'
|
||||
run_once: True
|
||||
delegate_to: "{{ groups['barbican-api'][0] }}"
|
||||
|
||||
- include: bootstrap_service.yml
|
||||
when: database_created
|
@ -1,21 +0,0 @@
|
||||
---
|
||||
- name: Running barbican bootstrap container
|
||||
kolla_docker:
|
||||
action: "start_container"
|
||||
common_options: "{{ docker_common_options }}"
|
||||
detach: False
|
||||
environment:
|
||||
KOLLA_BOOTSTRAP:
|
||||
KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}"
|
||||
image: "{{ barbican_api_image_full }}"
|
||||
labels:
|
||||
BOOTSTRAP:
|
||||
name: "bootstrap_barbican"
|
||||
restart_policy: "never"
|
||||
volumes:
|
||||
- "{{ node_config_directory }}/barbican-api/:{{ container_config_directory }}/:ro"
|
||||
- "/etc/localtime:/etc/localtime:ro"
|
||||
- "barbican:/var/lib/barbican/"
|
||||
- "kolla_logs:/var/log/kolla/"
|
||||
run_once: True
|
||||
delegate_to: "{{ groups['barbican-api'][0] }}"
|
@ -1,68 +0,0 @@
|
||||
---
|
||||
- name: Ensuring config directories exist
|
||||
file:
|
||||
path: "{{ node_config_directory }}/{{ item }}"
|
||||
state: "directory"
|
||||
recurse: yes
|
||||
with_items:
|
||||
- "barbican-api/vassals"
|
||||
- "barbican-keystone-listener"
|
||||
- "barbican-worker"
|
||||
|
||||
- name: Copying over config.json files for services
|
||||
template:
|
||||
src: "{{ item }}.json.j2"
|
||||
dest: "{{ node_config_directory }}/{{ item }}/config.json"
|
||||
with_items:
|
||||
- "barbican-api"
|
||||
- "barbican-worker"
|
||||
- "barbican-keystone-listener"
|
||||
|
||||
- name: Copying over barbican-api.ini
|
||||
merge_configs:
|
||||
sources:
|
||||
- "{{ role_path }}/templates/barbican-api.ini.j2"
|
||||
- "{{ node_custom_config }}/barbican-api/barbican-api.ini"
|
||||
- "{{ node_custom_config }}/barbican-api/{{ inventory_hostname }}/barbican-api.ini"
|
||||
dest: "{{ node_config_directory }}/barbican-api/vassals/barbican-api.ini"
|
||||
|
||||
- name: Copying over barbican-api-paste.ini
|
||||
merge_configs:
|
||||
sources:
|
||||
- "{{ role_path }}/templates/barbican-api-paste.ini.j2"
|
||||
- "{{ node_custom_config }}/barbican-api/barbican-api-paste.ini"
|
||||
- "{{ node_custom_config }}/barbican-api/{{ inventory_hostname }}/barbican-api-paste.ini"
|
||||
dest: "{{ node_config_directory }}/barbican-api/barbican-api-paste.ini"
|
||||
|
||||
- name: Copying over barbican.conf
|
||||
merge_configs:
|
||||
vars:
|
||||
service_name: "{{ item }}"
|
||||
sources:
|
||||
- "{{ role_path }}/templates/barbican.conf.j2"
|
||||
- "{{ node_custom_config }}/global.conf"
|
||||
- "{{ node_custom_config }}/database.conf"
|
||||
- "{{ node_custom_config }}/messaging.conf"
|
||||
- "{{ node_custom_config }}/barbican.conf"
|
||||
- "{{ node_custom_config }}/barbican/{{ item }}.conf"
|
||||
- "{{ node_custom_config }}/barbican/{{ inventory_hostname }}/barbican.conf"
|
||||
dest: "{{ node_config_directory }}/{{ item }}/barbican.conf"
|
||||
with_items:
|
||||
- "barbican-api"
|
||||
- "barbican-keystone-listener"
|
||||
- "barbican-worker"
|
||||
|
||||
- name: Check if policies shall be overwritten
|
||||
local_action: stat path="{{ node_custom_config }}/barbican/policy.json"
|
||||
register: barbican_policy
|
||||
|
||||
- name: Copying over existing policy.json
|
||||
template:
|
||||
src: "{{ node_custom_config }}/barbican/policy.json"
|
||||
dest: "{{ node_config_directory }}/{{ item }}/policy.json"
|
||||
with_items:
|
||||
- "barbican-api"
|
||||
- "barbican-keystone-listener"
|
||||
- "barbican-worker"
|
||||
when:
|
||||
barbican_policy.stat.exists
|
@ -1,16 +0,0 @@
|
||||
---
|
||||
- include: register.yml
|
||||
when: inventory_hostname in groups['barbican-api']
|
||||
|
||||
- include: config.yml
|
||||
when: inventory_hostname in groups['barbican-api'] or
|
||||
inventory_hostname in groups['barbican-worker'] or
|
||||
inventory_hostname in groups['barbican-keystone-listener']
|
||||
|
||||
- include: bootstrap.yml
|
||||
when: inventory_hostname in groups['barbican-api']
|
||||
|
||||
- include: start.yml
|
||||
when: inventory_hostname in groups['barbican-api'] or
|
||||
inventory_hostname in groups['barbican-worker'] or
|
||||
inventory_hostname in groups['barbican-keystone-listener']
|
@ -1,2 +0,0 @@
|
||||
---
|
||||
- include: "{{ action }}.yml"
|
@ -1 +0,0 @@
|
||||
---
|
@ -1,21 +0,0 @@
|
||||
---
|
||||
- name: Pulling barbican-api image
|
||||
kolla_docker:
|
||||
action: "pull_image"
|
||||
common_options: "{{ docker_common_options }}"
|
||||
image: "{{ barbican_api_image_full }}"
|
||||
when: inventory_hostname in groups['barbican-api']
|
||||
|
||||
- name: Pulling barbican-keystone-listener image
|
||||
kolla_docker:
|
||||
action: "pull_image"
|
||||
common_options: "{{ docker_common_options }}"
|
||||
image: "{{ barbican_keystone_listener_image_full }}"
|
||||
when: inventory_hostname in groups['barbican-keystone-listener']
|
||||
|
||||
- name: Pulling barbican-worker image
|
||||
kolla_docker:
|
||||
action: "pull_image"
|
||||
common_options: "{{ docker_common_options }}"
|
||||
image: "{{ barbican_worker_image_full }}"
|
||||
when: inventory_hostname in groups['barbican-worker']
|
@ -1,74 +0,0 @@
|
||||
---
|
||||
- name: Ensuring the containers up
|
||||
kolla_docker:
|
||||
name: "{{ item.name }}"
|
||||
action: "get_container_state"
|
||||
register: container_state
|
||||
failed_when: container_state.Running == false
|
||||
when: inventory_hostname in groups[item.group]
|
||||
with_items:
|
||||
- { name: barbican_api, group: barbican-api }
|
||||
- { name: barbican_keystone_listener, group: barbican-keystone-listener }
|
||||
- { name: barbican_worker, group: barbican-worker }
|
||||
|
||||
- include: config.yml
|
||||
|
||||
- name: Check the configs
|
||||
command: docker exec {{ item.name }} /usr/local/bin/kolla_set_configs --check
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
register: check_results
|
||||
when: inventory_hostname in groups[item.group]
|
||||
with_items:
|
||||
- { name: barbican_api, group: barbican-api }
|
||||
- { name: barbican_keystone_listener, group: barbican-keystone-listener }
|
||||
- { name: barbican_worker, group: barbican-worker }
|
||||
|
||||
# NOTE(jeffrey4l): when config_strategy == 'COPY_ALWAYS'
|
||||
# and container env['KOLLA_CONFIG_STRATEGY'] == 'COPY_ONCE',
|
||||
# just remove the container and start again
|
||||
- name: Containers config strategy
|
||||
kolla_docker:
|
||||
name: "{{ item.name }}"
|
||||
action: "get_container_env"
|
||||
register: container_envs
|
||||
when: inventory_hostname in groups[item.group]
|
||||
with_items:
|
||||
- { name: barbican_api, group: barbican-api }
|
||||
- { name: barbican_keystone_listener, group: barbican-keystone-listener }
|
||||
- { name: barbican_worker, group: barbican-worker }
|
||||
|
||||
- name: Remove the containers
|
||||
kolla_docker:
|
||||
name: "{{ item[0]['name'] }}"
|
||||
action: "remove_container"
|
||||
register: remove_containers
|
||||
when:
|
||||
- inventory_hostname in groups[item[0]['group']]
|
||||
- config_strategy == "COPY_ONCE" or item[1]['KOLLA_CONFIG_STRATEGY'] == 'COPY_ONCE'
|
||||
- item[2]['rc'] == 1
|
||||
with_together:
|
||||
- [{ name: barbican_api, group: barbican-api },
|
||||
{ name: barbican_keystone_listener, group: barbican-keystone-listener },
|
||||
{ name: barbican_worker, group: barbican-worker }]
|
||||
- "{{ container_envs.results }}"
|
||||
- "{{ check_results.results }}"
|
||||
|
||||
- include: start.yml
|
||||
when: remove_containers.changed
|
||||
|
||||
- name: Restart containers
|
||||
kolla_docker:
|
||||
name: "{{ item[0]['name'] }}"
|
||||
action: "restart_container"
|
||||
when:
|
||||
- inventory_hostname in groups[item[0]['group']]
|
||||
- config_strategy == 'COPY_ALWAYS'
|
||||
- item[1]['KOLLA_CONFIG_STRATEGY'] != 'COPY_ONCE'
|
||||
- item[2]['rc'] == 1
|
||||
with_together:
|
||||
- [{ name: barbican_api, group: barbican-api },
|
||||
{ name: barbican_keystone_listener, group: barbican-keystone-listener },
|
||||
{ name: barbican_worker, group: barbican-worker }]
|
||||
- "{{ container_envs.results }}"
|
||||
- "{{ check_results.results }}"
|
@ -1,40 +0,0 @@
|
||||
---
|
||||
- name: Creating the barbican service and endpoint
|
||||
command: docker exec -t kolla_toolbox /usr/bin/ansible localhost
|
||||
-m kolla_keystone_service
|
||||
-a "service_name=barbican
|
||||
service_type=key-manager
|
||||
description='Barbican Key Management Service'
|
||||
endpoint_region={{ openstack_region_name }}
|
||||
url='{{ item.url }}'
|
||||
interface='{{ item.interface }}'
|
||||
region_name={{ openstack_region_name }}
|
||||
auth={{ '{{ openstack_barbican_auth }}' }}"
|
||||
-e "{'openstack_barbican_auth':{{ openstack_barbican_auth }}}"
|
||||
register: barbican_endpoint
|
||||
changed_when: "{{ barbican_endpoint.stdout.find('localhost | SUCCESS => ') != -1 and (barbican_endpoint.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
|
||||
until: barbican_endpoint.stdout.split()[2] == 'SUCCESS'
|
||||
retries: 10
|
||||
delay: 5
|
||||
run_once: True
|
||||
with_items:
|
||||
- {'interface': 'admin', 'url': '{{ barbican_admin_endpoint }}'}
|
||||
- {'interface': 'internal', 'url': '{{ barbican_internal_endpoint }}'}
|
||||
- {'interface': 'public', 'url': '{{ barbican_public_endpoint }}'}
|
||||
|
||||
- name: Creating the barbican project, user, and role
|
||||
command: docker exec -t kolla_toolbox /usr/bin/ansible localhost
|
||||
-m kolla_keystone_user
|
||||
-a "project=service
|
||||
user=barbican
|
||||
password={{ barbican_keystone_password }}
|
||||
role=admin
|
||||
region_name={{ openstack_region_name }}
|
||||
auth={{ '{{ openstack_barbican_auth }}' }}"
|
||||
-e "{'openstack_barbican_auth':{{ openstack_barbican_auth }}}"
|
||||
register: barbican_user
|
||||
changed_when: "{{ barbican_user.stdout.find('localhost | SUCCESS => ') != -1 and (barbican_user.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
|
||||
until: barbican_user.stdout.split()[2] == 'SUCCESS'
|
||||
retries: 10
|
||||
delay: 5
|
||||
run_once: True
|
@ -1,37 +0,0 @@
|
||||
---
|
||||
- name: Starting barbican-api container
|
||||
kolla_docker:
|
||||
action: "start_container"
|
||||
common_options: "{{ docker_common_options }}"
|
||||
image: "{{ barbican_api_image_full }}"
|
||||
name: "barbican_api"
|
||||
volumes:
|
||||
- "{{ node_config_directory }}/barbican-api/:{{ container_config_directory }}/:ro"
|
||||
- "/etc/localtime:/etc/localtime:ro"
|
||||
- "barbican:/var/lib/barbican/"
|
||||
- "kolla_logs:/var/log/kolla/"
|
||||
when: inventory_hostname in groups['barbican-api']
|
||||
|
||||
- name: Starting barbican-keystone-listener container
|
||||
kolla_docker:
|
||||
action: "start_container"
|
||||
common_options: "{{ docker_common_options }}"
|
||||
image: "{{ barbican_keystone_listener_image_full }}"
|
||||
name: "barbican_keystone_listener"
|
||||
volumes:
|
||||
- "{{ node_config_directory }}/barbican-keystone-listener/:{{ container_config_directory }}/:ro"
|
||||
- "/etc/localtime:/etc/localtime:ro"
|
||||
- "kolla_logs:/var/log/kolla/"
|
||||
when: inventory_hostname in groups['barbican-keystone-listener']
|
||||
|
||||
- name: Starting barbican-worker container
|
||||
kolla_docker:
|
||||
action: "start_container"
|
||||
common_options: "{{ docker_common_options }}"
|
||||
image: "{{ barbican_worker_image_full }}"
|
||||
name: "barbican_worker"
|
||||
volumes:
|
||||
- "{{ node_config_directory }}/barbican-worker/:{{ container_config_directory }}/:ro"
|
||||
- "/etc/localtime:/etc/localtime:ro"
|
||||
- "kolla_logs:/var/log/kolla/"
|
||||
when: inventory_hostname in groups['barbican-worker']
|
@ -1,6 +0,0 @@
|
||||
---
|
||||
- include: config.yml
|
||||
|
||||
- include: bootstrap_service.yml
|
||||
|
||||
- include: start.yml
|
@ -1,60 +0,0 @@
|
||||
[composite:main]
|
||||
use = egg:Paste#urlmap
|
||||
/: barbican_version
|
||||
/v1: barbican-api-keystone
|
||||
|
||||
# Use this pipeline for Barbican API - versions no authentication
|
||||
[pipeline:barbican_version]
|
||||
pipeline = cors versionapp
|
||||
|
||||
# Use this pipeline for Barbican API - DEFAULT no authentication
|
||||
[pipeline:barbican_api]
|
||||
pipeline = cors unauthenticated-context apiapp
|
||||
|
||||
#Use this pipeline to activate a repoze.profile middleware and HTTP port,
|
||||
# to provide profiling information for the REST API processing.
|
||||
[pipeline:barbican-profile]
|
||||
pipeline = cors unauthenticated-context egg:Paste#cgitb egg:Paste#httpexceptions profile apiapp
|
||||
|
||||
#Use this pipeline for keystone auth
|
||||
[pipeline:barbican-api-keystone]
|
||||
pipeline = cors authtoken context apiapp
|
||||
|
||||
#Use this pipeline for keystone auth with audit feature
|
||||
[pipeline:barbican-api-keystone-audit]
|
||||
pipeline = authtoken context audit apiapp
|
||||
|
||||
[app:apiapp]
|
||||
paste.app_factory = barbican.api.app:create_main_app
|
||||
|
||||
[app:versionapp]
|
||||
paste.app_factory = barbican.api.app:create_version_app
|
||||
|
||||
[filter:simple]
|
||||
paste.filter_factory = barbican.api.middleware.simple:SimpleFilter.factory
|
||||
|
||||
[filter:unauthenticated-context]
|
||||
paste.filter_factory = barbican.api.middleware.context:UnauthenticatedContextMiddleware.factory
|
||||
|
||||
[filter:context]
|
||||
paste.filter_factory = barbican.api.middleware.context:ContextMiddleware.factory
|
||||
|
||||
[filter:audit]
|
||||
paste.filter_factory = keystonemiddleware.audit:filter_factory
|
||||
audit_map_file = /etc/barbican/api_audit_map.conf
|
||||
|
||||
[filter:authtoken]
|
||||
paste.filter_factory = keystonemiddleware.auth_token:filter_factory
|
||||
|
||||
[filter:profile]
|
||||
use = egg:repoze.profile
|
||||
log_filename = myapp.profile
|
||||
cachegrind_filename = cachegrind.out.myapp
|
||||
discard_first_request = true
|
||||
path = /__profile__
|
||||
flush_at_shutdown = true
|
||||
unwind = false
|
||||
|
||||
[filter:cors]
|
||||
paste.filter_factory = oslo_middleware.cors:filter_factory
|
||||
oslo_config_project = barbican
|
@ -1,11 +0,0 @@
|
||||
[uwsgi]
|
||||
socket = {{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}:{{ barbican_api_port }}
|
||||
protocol = http
|
||||
processes = {{ openstack_service_workers }}
|
||||
lazy = true
|
||||
vacuum = true
|
||||
no-default-app = true
|
||||
memory-report = true
|
||||
plugins = python
|
||||
paste = config:/etc/barbican/barbican-api-paste.ini
|
||||
add-header = Connection: close
|
@ -1,23 +0,0 @@
|
||||
{
|
||||
"command": "uwsgi --master --emperor /etc/barbican/vassals --logto /var/log/kolla/barbican/barbican-api.log",
|
||||
"config_files": [
|
||||
{
|
||||
"source": "{{ container_config_directory }}/barbican.conf",
|
||||
"dest": "/etc/barbican/barbican.conf",
|
||||
"owner": "barbican",
|
||||
"perm": "0600"
|
||||
},
|
||||
{
|
||||
"source": "{{ container_config_directory }}/vassals/barbican-api.ini",
|
||||
"dest": "/etc/barbican/vassals/barbican-api.ini",
|
||||
"owner": "barbican",
|
||||
"perm": "0600"
|
||||
},
|
||||
{
|
||||
"source": "{{ container_config_directory }}/barbican-api-paste.ini",
|
||||
"dest": "/etc/barbican/barbican-api-paste.ini",
|
||||
"owner": "barbican",
|
||||
"perm": "0600"
|
||||
}
|
||||
]
|
||||
}
|
@ -1,11 +0,0 @@
|
||||
{
|
||||
"command": "barbican-keystone-listener",
|
||||
"config_files": [
|
||||
{
|
||||
"source": "{{ container_config_directory }}/barbican.conf",
|
||||
"dest": "/etc/barbican/barbican.conf",
|
||||
"owner": "barbican",
|
||||
"perm": "0600"
|
||||
}
|
||||
]
|
||||
}
|
@ -1,11 +0,0 @@
|
||||
{
|
||||
"command": "barbican-worker",
|
||||
"config_files": [
|
||||
{
|
||||
"source": "{{ container_config_directory }}/barbican.conf",
|
||||
"dest": "/etc/barbican/barbican.conf",
|
||||
"owner": "barbican",
|
||||
"perm": "0600"
|
||||
}
|
||||
]
|
||||
}
|
@ -1,47 +0,0 @@
|
||||
[DEFAULT]
|
||||
debug = {{ barbican_logging_debug }}
|
||||
log_dir = /var/log/kolla/barbican
|
||||
|
||||
|
||||
bind_port = {{ barbican_api_port }}
|
||||
bind_host = {{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}
|
||||
host_href = {{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ barbican_api_port }}
|
||||
backlog = 4096
|
||||
max_allowed_secret_in_bytes = 10000
|
||||
max_allowed_request_size_in_bytes = 1000000
|
||||
|
||||
db_auto_create = False
|
||||
sql_connection = mysql://{{ barbican_database_user }}:{{ barbican_database_password }}@{{ barbican_database_address }}/{{ barbican_database_name }}
|
||||
|
||||
transport_url = rabbit://{% for host in groups['rabbitmq'] %}{{ rabbitmq_user }}:{{ rabbitmq_password }}@{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ rabbitmq_port }}{% if not loop.last %},{% endif %}{% endfor %}
|
||||
|
||||
[keystone_notifications]
|
||||
enable = True
|
||||
|
||||
[keystone_authtoken]
|
||||
auth_uri = {{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_public_port }}
|
||||
project_domain_id = default
|
||||
project_name = service
|
||||
user_domain_id = default
|
||||
username = {{ barbican_keystone_user }}
|
||||
password = {{ barbican_keystone_password }}
|
||||
auth_url = {{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_admin_port }}
|
||||
auth_type = password
|
||||
|
||||
memcache_security_strategy = ENCRYPT
|
||||
memcache_secret_key = {{ memcache_secret_key }}
|
||||
{% if orchestration_engine == 'KUBERNETES' %}
|
||||
memcache_servers = {{ memcached_servers }}
|
||||
{% else %}
|
||||
memcached_servers = {% for host in groups['memcached'] %}{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}
|
||||
{% endif %}
|
||||
|
||||
[service_credentials]
|
||||
auth_url = {{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_public_port }}
|
||||
region_name = {{ openstack_region_name }}
|
||||
password = {{ barbican_keystone_password }}
|
||||
username = {{ barbican_keystone_user }}
|
||||
project_name = service
|
||||
project_domain_id = default
|
||||
user_domain_id = default
|
||||
auth_type = password
|
@ -1,33 +0,0 @@
|
||||
---
|
||||
docker_apt_url: "https://apt.dockerproject.org"
|
||||
docker_apt_key_id: F76221572C52609D
|
||||
|
||||
docker_yum_url: "https://yum.dockerproject.org"
|
||||
docker_gpg_fingerprint: "58118E89F3A912897C070ADBF76221572C52609D"
|
||||
|
||||
customize_etc_hosts: True
|
||||
|
||||
create_kolla_user: True
|
||||
|
||||
docker_storage_driver: ""
|
||||
|
||||
debian_pkg_install:
|
||||
- docker-engine=1.11.*
|
||||
- git
|
||||
- python-setuptools
|
||||
- ntp
|
||||
|
||||
redhat_pkg_install:
|
||||
- epel-release
|
||||
- docker-engine-1.11.0
|
||||
- git
|
||||
- python-setuptools
|
||||
- ntp
|
||||
|
||||
ubuntu_pkg_removals:
|
||||
- lxd
|
||||
- lxc
|
||||
- libvirt
|
||||
|
||||
redhat_pkg_removals:
|
||||
- libvirt
|
@ -1,4 +0,0 @@
|
||||
---
|
||||
- include: pre-install.yml
|
||||
- include: install.yml
|
||||
- include: post-install.yml
|
@ -1,81 +0,0 @@
|
||||
---
|
||||
- name: Update apt cache
|
||||
command: apt-get update
|
||||
become: True
|
||||
when: ansible_os_family == 'Debian'
|
||||
|
||||
- name: Set firewall default policy
|
||||
ufw: state=disabled policy=allow
|
||||
when: ansible_os_family == 'Debian'
|
||||
|
||||
- name: Check if firewalld is installed
|
||||
command: rpm -q firewalld
|
||||
register: firewalld_check
|
||||
failed_when: firewalld_check.rc > 1
|
||||
when: ansible_os_family == 'RedHat'
|
||||
|
||||
- name: Disable firewalld
|
||||
become: True
|
||||
service:
|
||||
name: "{{ item }}"
|
||||
enabled: false
|
||||
state: stopped
|
||||
with_items:
|
||||
- firewalld
|
||||
when:
|
||||
- ansible_os_family == 'RedHat'
|
||||
- firewalld_check.rc == 0
|
||||
|
||||
- name: Install apt packages
|
||||
package: name={{item}} state=present
|
||||
become: True
|
||||
with_items: "{{ debian_pkg_install }}"
|
||||
when: ansible_os_family == 'Debian'
|
||||
|
||||
- name: Install wily kernel
|
||||
package: name=linux-generic-lts-wily state=latest
|
||||
register: kernel_updated
|
||||
become: True
|
||||
when:
|
||||
- ansible_distribution|lower == "ubuntu" | bool
|
||||
- ansible_distribution_release|lower == "trusty" | bool
|
||||
|
||||
- name: Set reboot required
|
||||
set_fact:
|
||||
reboot_required: True
|
||||
when:
|
||||
- kernel_updated is defined
|
||||
- kernel_updated.changed
|
||||
|
||||
- name: Install deltarpm packages
|
||||
package: name={{item}} state=installed
|
||||
become: True
|
||||
with_items:
|
||||
- deltarpm
|
||||
when: ansible_os_family == 'RedHat'
|
||||
|
||||
- name: Install yum packages
|
||||
package: name={{item}} state=present
|
||||
become: True
|
||||
with_items: "{{ redhat_pkg_install }}"
|
||||
when: ansible_os_family == 'RedHat'
|
||||
|
||||
- name: Install pip
|
||||
easy_install: name=pip
|
||||
become: True
|
||||
|
||||
- name: Install docker-py
|
||||
pip: name=docker-py state=latest
|
||||
become: True
|
||||
|
||||
- name: Remove packages
|
||||
package: name={{item}} state=absent
|
||||
with_items: "{{ ubuntu_pkg_removals }}"
|
||||
become: True
|
||||
when: ansible_distribution|lower == "ubuntu" | bool
|
||||
|
||||
- name: Remove packages
|
||||
package: name={{item}} state=absent
|
||||
with_items: "{{ redhat_pkg_removals }}"
|
||||
become: True
|
||||
when: ansible_os_family == 'RedHat'
|
@ -1,2 +0,0 @@
|
||||
---
|
||||
- include: "{{ action }}.yml"
|
@ -1,85 +0,0 @@
|
||||
---
|
||||
- name: Ensure docker service directory exists
|
||||
file:
|
||||
path=/etc/systemd/system/docker.service.d
|
||||
state=directory
|
||||
recurse=yes
|
||||
become: True
|
||||
when: (ansible_distribution == "Ubuntu" and ansible_distribution_major_version > "14") or
|
||||
(ansible_os_family == "RedHat") or (ansible_distribution == "Debian")
|
||||
|
||||
- name: Configure docker service
|
||||
become: True
|
||||
template:
|
||||
src=docker_systemd_service.j2
|
||||
dest=/etc/systemd/system/docker.service.d/kolla.conf
|
||||
when: (ansible_distribution == "Ubuntu" and ansible_distribution_major_version > "14") or
|
||||
(ansible_os_family == "RedHat") or (ansible_distribution == "Debian")
|
||||
|
||||
- name: Reload docker service file
|
||||
become: True
|
||||
command: systemctl daemon-reload
|
||||
when: (ansible_distribution == "Ubuntu" and ansible_distribution_major_version > "14") or
|
||||
(ansible_os_family == "RedHat") or (ansible_distribution == "Debian")
|
||||
|
||||
- name: Configure docker service
|
||||
become: True
|
||||
template:
|
||||
src=docker_defaults.j2
|
||||
dest=/etc/default/docker
|
||||
when: (ansible_distribution == "Ubuntu" and ansible_distribution_major_version < "15")
|
||||
|
||||
- name: Docker mount shared
|
||||
command: mount --make-shared /run
|
||||
become: True
|
||||
when: (ansible_distribution == "Ubuntu" and ansible_distribution_major_version < "15")
|
||||
|
||||
- name: Get stat of libvirtd apparmor profile
|
||||
stat: path=/etc/apparmor.d/usr.sbin.libvirtd
|
||||
register: apparmor_libvirtd_profile
|
||||
when: ansible_distribution == "Ubuntu"
|
||||
|
||||
- name: Remove apparmor profile for libvirt
|
||||
command: apparmor_parser -R /etc/apparmor.d/usr.sbin.libvirtd
|
||||
become: True
|
||||
when:
|
||||
- ansible_distribution == "Ubuntu"
|
||||
- apparmor_libvirtd_profile.stat.exists == True
|
||||
|
||||
- name: Create docker group
|
||||
group:
|
||||
name: docker
|
||||
become: True
|
||||
|
||||
- name: Add kolla user to docker group
|
||||
user:
|
||||
name: kolla
|
||||
append: yes
|
||||
groups: docker
|
||||
become: True
|
||||
when: create_kolla_user | bool == True
|
||||
|
||||
- name: Start docker
|
||||
service:
|
||||
name: docker
|
||||
state: started
|
||||
become: yes
|
||||
|
||||
- name: Restart docker
|
||||
service:
|
||||
name: docker
|
||||
state: restarted
|
||||
become: yes
|
||||
|
||||
- name: Enable docker
|
||||
service:
|
||||
name: docker
|
||||
enabled: yes
|
||||
become: yes
|
||||
|
||||
- name: Reboot
|
||||
command: reboot -f
|
||||
become: yes
|
||||
when:
|
||||
- reboot_required is defined
|
||||
- reboot_required | bool == true
|
@ -1,137 +0,0 @@
|
||||
---
|
||||
# NOTE: raw install is required to support cloud images which do not have python installed
|
||||
- name: "Install python2 and python-simplejson"
|
||||
become: true
|
||||
raw: "yum install -y python python-simplejson || (apt-get update && apt-get install -y python2.7 python-simplejson)"
|
||||
|
||||
- name: Gather facts
|
||||
setup:
|
||||
|
||||
- name: Ensure localhost in /etc/hosts
|
||||
lineinfile:
|
||||
dest: /etc/hosts
|
||||
regexp: "^127.0.0.1.*"
|
||||
line: "127.0.0.1 localhost"
|
||||
state: present
|
||||
become: True
|
||||
when: customize_etc_hosts | bool == True
|
||||
|
||||
- name: Generate /etc/hosts for all of the nodes
|
||||
blockinfile:
|
||||
dest: /etc/hosts
|
||||
marker: "# {mark} ANSIBLE GENERATED HOSTS"
|
||||
block: |
|
||||
{% for host in groups['all'] %}
|
||||
{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }} {{ hostvars[host]['ansible_hostname'] }}
|
||||
{% endfor %}
|
||||
become: True
|
||||
when: customize_etc_hosts | bool == True
|
||||
|
||||
- name: Ensure sudo group is present
|
||||
group: name=sudo state=present
|
||||
become: True
|
||||
|
||||
- name: Ensure kolla group is present
|
||||
group: name=kolla state=present
|
||||
become: True
|
||||
when: create_kolla_user | bool == True
|
||||
|
||||
- name: Create kolla user
|
||||
user:
|
||||
name: kolla
|
||||
state: present
|
||||
group: kolla
|
||||
groups: "sudo"
|
||||
become: True
|
||||
when: create_kolla_user | bool == True
|
||||
|
||||
- name: Grant kolla user passwordless sudo
|
||||
lineinfile:
|
||||
dest: /etc/sudoers
|
||||
state: present
|
||||
regexp: '^kolla'
|
||||
line: 'kolla ALL=(ALL) NOPASSWD: ALL'
|
||||
become: True
|
||||
when: create_kolla_user | bool == True
|
||||
|
||||
- name: Add public key to kolla user authorized keys
|
||||
authorized_key:
|
||||
user: kolla
|
||||
key: "{{ kolla_ssh_key.public_key }}"
|
||||
become: True
|
||||
when: create_kolla_user | bool == True
|
||||
|
||||
- name: Install apt packages
|
||||
command: apt-get update
|
||||
become: True
|
||||
when: ansible_os_family == 'Debian'
|
||||
|
||||
- name: Install ubuntu ca certs
|
||||
package: name={{item}} state=latest
|
||||
become: True
|
||||
with_items:
|
||||
- ca-certificates
|
||||
- apt-transport-https
|
||||
when:
|
||||
- ansible_os_family == 'Debian'
|
||||
|
||||
- name: Ensure apt sources list directory exists
|
||||
file: path=/etc/apt/sources.list.d state=directory recurse=yes
|
||||
become: True
|
||||
when: ansible_os_family == 'Debian'
|
||||
|
||||
- name: Enable docker repo apt
|
||||
template:
|
||||
src: docker_apt_repo.j2
|
||||
dest: /etc/apt/sources.list.d/docker.list
|
||||
become: True
|
||||
when: ansible_os_family == 'Debian'
|
||||
|
||||
- name: Install docker apt gpg key
|
||||
apt_key:
|
||||
url: "{{ docker_apt_url }}/gpg"
|
||||
id: "{{ docker_apt_key_id }}"
|
||||
state: present
|
||||
become: True
|
||||
when:
|
||||
- ansible_os_family == 'Debian'
|
||||
- ansible_distribution == 'Ubuntu'
|
||||
|
||||
- name: Ensure yum repos directory exists
|
||||
file: path=/etc/yum.repos.d/ state=directory recurse=yes
|
||||
become: True
|
||||
when: ansible_os_family == 'RedHat'
|
||||
|
||||
- name: Enable docker repo yum
|
||||
become: True
|
||||
template:
|
||||
src: docker_yum_repo.j2
|
||||
dest: /etc/yum.repos.d/docker.repo
|
||||
when: ansible_os_family == 'RedHat'
|
||||
|
||||
- name: Install docker rpm gpg key
|
||||
rpm_key:
|
||||
state: present
|
||||
key: "{{ docker_yum_url }}/gpg"
|
||||
become: True
|
||||
when: ansible_os_family == 'RedHat'
|
||||
|
||||
- name: Ensure /etc/kolla directory exists
|
||||
file:
|
||||
path: /etc/kolla
|
||||
state: directory
|
||||
recurse: yes
|
||||
owner: kolla
|
||||
group: kolla
|
||||
mode: 755
|
||||
become: True
|
||||
when: create_kolla_user | bool == True
|
||||
|
||||
- name: Ensure /etc/kolla directory exists
|
||||
file:
|
||||
path: /etc/kolla
|
||||
state: directory
|
||||
recurse: yes
|
||||
mode: 666
|
||||
become: True
|
||||
when: create_kolla_user | bool == False
|
@ -1 +0,0 @@
|
||||
---
|
@ -1,2 +0,0 @@
|
||||
# main docker repo
|
||||
deb {{docker_apt_url}}/repo {{ansible_distribution|lower}}-{{ansible_distribution_release|lower}} main
|
@ -1,26 +0,0 @@
|
||||
# Docker Upstart and SysVinit configuration file
|
||||
|
||||
#
|
||||
# THIS FILE DOES NOT APPLY TO SYSTEMD
|
||||
#
|
||||
# Please see the documentation for "systemd drop-ins":
|
||||
# https://docs.docker.com/engine/articles/systemd/
|
||||
#
|
||||
|
||||
# Customize location of Docker binary (especially for development testing).
|
||||
#DOCKERD="/usr/local/bin/dockerd"
|
||||
|
||||
# Use DOCKER_OPTS to modify the daemon startup options.
|
||||
DOCKER_OPTS=""
|
||||
{% if docker_storage_driver %}
|
||||
DOCKER_OPTS="$DOCKER_OPTS --storage-driver={{ docker_storage_driver }}"
|
||||
{% endif %}
|
||||
{% if docker_registry %}
|
||||
DOCKER_OPTS="$DOCKER_OPTS --insecure-registry {{ docker_registry }}"
|
||||
{% endif %}
|
||||
|
||||
# If you need Docker to use an HTTP proxy, it can also be specified here.
|
||||
#export http_proxy="http://127.0.0.1:3128/"
|
||||
|
||||
# This is also a handy place to tweak where Docker's temporary files go.
|
||||
#export TMPDIR="/mnt/bigdrive/docker-tmp"
|
@ -1,11 +0,0 @@
|
||||
[Service]
|
||||
MountFlags=shared
|
||||
ExecStart=
|
||||
ExecStart=/usr/bin/docker daemon \
|
||||
{% if docker_registry %}
|
||||
--insecure-registry {{ docker_registry }} \
|
||||
{% endif %}
|
||||
{% if docker_storage_driver %}
|
||||
--storage-driver={{ docker_storage_driver }}
|
||||
{% endif %}
|
||||
-H fd://
|
@ -1,6 +0,0 @@
|
||||
[docker-repo]
|
||||
name=Docker main Repository
|
||||
baseurl={{docker_yum_url}}/repo/main/{{ansible_distribution|lower}}/{{ansible_distribution_major_version|lower}}
|
||||
enabled=1
|
||||
gpgcheck=1
|
||||
gpgkey={{docker_yum_url}}/gpg
|
@ -1,7 +0,0 @@
|
||||
---
|
||||
####################
|
||||
# Docker
|
||||
####################
|
||||
bifrost_deploy_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-bifrost-deploy"
|
||||
bifrost_deploy_tag: "{{ openstack_release }}"
|
||||
bifrost_deploy_image_full: "{{ bifrost_deploy_image }}:{{ bifrost_deploy_tag }}"
|
@ -1,28 +0,0 @@
|
||||
---
|
||||
- name: Bootstrap bifrost (this may take several minutes)
|
||||
command: >
|
||||
docker exec bifrost_deploy
|
||||
bash -c '/bifrost/scripts/env-setup.sh && source /bifrost/env-vars
|
||||
&& source /opt/stack/ansible/hacking/env-setup &&
|
||||
ansible-playbook -vvvv -i /bifrost/playbooks/inventory/localhost
|
||||
/bifrost/playbooks/install.yaml -e @/etc/bifrost/bifrost.yml
|
||||
-e @/etc/bifrost/dib.yml -e skip_package_install=true'
|
||||
- name: Installing ssh keys
|
||||
command: >
|
||||
docker exec bifrost_deploy
|
||||
bash -c 'mkdir /root/.ssh ; mkdir /home/ironic/.ssh;
|
||||
cp -f /etc/bifrost/id_rsa /root/.ssh/id_rsa &&
|
||||
cp -f /etc/bifrost/id_rsa.pub /root/.ssh/id_rsa.pub &&
|
||||
cp -f /etc/bifrost/ssh_config /root/.ssh/config &&
|
||||
cp -f /etc/bifrost/id_rsa /home/ironic/.ssh/id_rsa &&
|
||||
cp -f /etc/bifrost/id_rsa.pub /home/ironic/.ssh/id_rsa.pub &&
|
||||
cp -f /etc/bifrost/ssh_config /home/ironic/.ssh/config &&
|
||||
chmod 600 /root/.ssh/id_rsa &&
|
||||
chmod 600 /root/.ssh/id_rsa.pub &&
|
||||
chmod 600 /root/.ssh/config &&
|
||||
chmod 600 /home/ironic/.ssh/id_rsa &&
|
||||
chmod 600 /home/ironic/.ssh/id_rsa.pub &&
|
||||
chmod 600 /home/ironic/.ssh/config &&
|
||||
chown ironic:ironic /home/ironic/.ssh/id_rsa &&
|
||||
chown ironic:ironic /home/ironic/.ssh/id_rsa.pub &&
|
||||
chown ironic:ironic /home/ironic/.ssh/config'
|
@ -1,29 +0,0 @@
|
||||
---
|
||||
- name: Ensuring config directories exist
|
||||
file:
|
||||
path: "{{ node_config_directory }}/{{ item }}"
|
||||
state: "directory"
|
||||
recurse: yes
|
||||
with_items:
|
||||
- "bifrost"
|
||||
|
||||
- name: Generate bifrost configs
|
||||
merge_yaml:
|
||||
sources:
|
||||
- "{{ role_path }}/templates/{{ item }}.yml.j2"
|
||||
- "{{ node_custom_config }}/{{ item }}.yml"
|
||||
- "{{ node_custom_config }}/bifrost/{{ item }}.yml"
|
||||
dest: "{{ node_config_directory }}/bifrost/{{ item }}.yml"
|
||||
with_items:
|
||||
- "bifrost"
|
||||
- "dib"
|
||||
- "servers"
|
||||
|
||||
- name: Template ssh keys
|
||||
template:
|
||||
src: "{{ item.src }}"
|
||||
dest: "{{ node_config_directory }}/bifrost/{{ item.dest }}"
|
||||
with_items:
|
||||
- { src: "id_rsa", dest: "id_rsa" }
|
||||
- { src: "id_rsa.pub", dest: "id_rsa.pub" }
|
||||
- { src: "ssh_config", dest: "ssh_config" }
|
@ -1,20 +0,0 @@
|
||||
---
|
||||
- name: Enrolling physical servers with ironic
|
||||
command: >
|
||||
docker exec bifrost_deploy
|
||||
bash -c 'source /bifrost/env-vars
|
||||
&& source /opt/stack/ansible/hacking/env-setup &&
|
||||
export BIFROST_INVENTORY_SOURCE=/etc/bifrost/servers.yml &&
|
||||
ansible-playbook -vvvv -i /bifrost/playbooks/inventory/bifrost_inventory.py
|
||||
/bifrost/playbooks/enroll-dynamic.yaml -e "ansible_python_interpreter=/var/lib/kolla/venv/bin/python"
|
||||
-e @/etc/bifrost/bifrost.yml'
|
||||
|
||||
- name: Deploy physical servers with ironic
|
||||
command: >
|
||||
docker exec bifrost_deploy
|
||||
bash -c 'source /bifrost/env-vars
|
||||
&& source /opt/stack/ansible/hacking/env-setup &&
|
||||
export BIFROST_INVENTORY_SOURCE=/etc/bifrost/servers.yml &&
|
||||
ansible-playbook -vvvv -i /bifrost/playbooks/inventory/bifrost_inventory.py
|
||||
/bifrost/playbooks/deploy-dynamic.yaml -e "ansible_python_interpreter=/var/lib/kolla/venv/bin/python"
|
||||
-e @/etc/bifrost/bifrost.yml'
|
@ -1,6 +0,0 @@
|
||||
---
|
||||
- include: config.yml
|
||||
|
||||
- include: start.yml
|
||||
|
||||
- include: bootstrap.yml
|
@ -1,2 +0,0 @@
|
||||
---
|
||||
- include: "{{ action }}.yml"
|
@ -1 +0,0 @@
|
||||
---
|
@ -1,64 +0,0 @@
|
||||
---
|
||||
- name: Ensuring the containers up
|
||||
kolla_docker:
|
||||
name: "{{ item.name }}"
|
||||
action: "get_container_state"
|
||||
register: container_state
|
||||
failed_when: container_state.Running == false
|
||||
when: inventory_hostname in groups[item.group]
|
||||
with_items:
|
||||
- { name: bifrost-deploy, group: bifrost-deploy }
|
||||
|
||||
- include: config.yml
|
||||
|
||||
- name: Check the configs
|
||||
command: docker exec {{ item.name }} /usr/local/bin/kolla_set_configs --check
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
register: check_results
|
||||
when: inventory_hostname in groups[item.group]
|
||||
with_items:
|
||||
- { name: bifrost-deploy, group: bifrost-deploy }
|
||||
|
||||
# NOTE(jeffrey4l): when config_strategy == 'COPY_ALWAYS'
|
||||
# and container env['KOLLA_CONFIG_STRATEGY'] == 'COPY_ONCE',
|
||||
# just remove the container and start again
|
||||
- name: Containers config strategy
|
||||
kolla_docker:
|
||||
name: "{{ item.name }}"
|
||||
action: "get_container_env"
|
||||
register: container_envs
|
||||
when: inventory_hostname in groups[item.group]
|
||||
with_items:
|
||||
- { name: bifrost-deploy, group: bifrost-deploy }
|
||||
|
||||
- name: Remove the containers
|
||||
kolla_docker:
|
||||
name: "{{ item[0]['name'] }}"
|
||||
action: "remove_container"
|
||||
register: remove_containers
|
||||
when:
|
||||
- inventory_hostname in groups[item[0]['group']]
|
||||
- config_strategy == "COPY_ONCE" or item[1]['KOLLA_CONFIG_STRATEGY'] == 'COPY_ONCE'
|
||||
- item[2]['rc'] == 1
|
||||
with_together:
|
||||
- [{ name: bifrost-deploy, group: bifrost-deploy }]
|
||||
- "{{ container_envs.results }}"
|
||||
- "{{ check_results.results }}"
|
||||
|
||||
- include: start.yml
|
||||
when: remove_containers.changed
|
||||
|
||||
- name: Restart containers
|
||||
kolla_docker:
|
||||
name: "{{ item[0]['name'] }}"
|
||||
action: "restart_container"
|
||||
when:
|
||||
- inventory_hostname in groups[item[0]['group']]
|
||||
- config_strategy == 'COPY_ALWAYS'
|
||||
- item[1]['KOLLA_CONFIG_STRATEGY'] != 'COPY_ONCE'
|
||||
- item[2]['rc'] == 1
|
||||
with_together:
|
||||
- [{ name: bifrost-deploy, group: bifrost-deploy }]
|
||||
- "{{ container_envs.results }}"
|
||||
- "{{ check_results.results }}"
|
@ -1,12 +0,0 @@
|
||||
---
|
||||
- name: Starting bifrost deploy container
|
||||
kolla_docker:
|
||||
action: "start_container"
|
||||
common_options: "{{ docker_common_options }}"
|
||||
image: "{{ bifrost_deploy_image_full }}"
|
||||
name: "bifrost_deploy"
|
||||
privileged: True
|
||||
volumes:
|
||||
- "{{ node_config_directory }}/bifrost/:/etc/bifrost:ro"
|
||||
- "/etc/localtime:/etc/localtime:ro"
|
||||
- "/dev:/dev"
|
@ -1,4 +0,0 @@
|
||||
---
|
||||
- include: config.yml
|
||||
|
||||
- include: start.yml
|
@ -1,3 +0,0 @@
|
||||
mysql_service_name: mysql
|
||||
ansible_python_interpreter: /var/lib/kolla/venv/bin/python
|
||||
network_interface: "{{ bifrost_network_interface }}"
|
@ -1,2 +0,0 @@
|
||||
create_image_via_dib: "true"
|
||||
dib_os_element: "debian"
|
@ -1 +0,0 @@
|
||||
{{ bifrost_ssh_key.private_key }}
|
@ -1 +0,0 @@
|
||||
{{ bifrost_ssh_key.public_key }}
|
@ -1 +0,0 @@
|
||||
---
|
@ -1,3 +0,0 @@
|
||||
Host *
|
||||
StrictHostKeyChecking no
|
||||
UserKnownHostsFile /dev/null
|
@ -1,51 +0,0 @@
|
||||
---
|
||||
project_name: "ceilometer"
|
||||
|
||||
|
||||
####################
|
||||
# Database
|
||||
####################
|
||||
ceilometer_database_name: "ceilometer"
|
||||
ceilometer_database_user: "ceilometer"
|
||||
ceilometer_database_port: "{{ mongodb_port if ceilometer_database_type == 'mongodb' else database_port }}"
|
||||
|
||||
ceilometer_database_mongodb_address: "{% for host in groups['mongodb'] %}{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ ceilometer_database_port }}{% if not loop.last %},{% endif %}{% endfor %}"
|
||||
ceilometer_database_mysql_address: "{{ kolla_internal_fqdn }}"
|
||||
|
||||
|
||||
####################
|
||||
# Docker
|
||||
####################
|
||||
ceilometer_notification_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-ceilometer-notification"
|
||||
ceilometer_notification_tag: "{{ openstack_release }}"
|
||||
ceilometer_notification_image_full: "{{ ceilometer_notification_image }}:{{ ceilometer_notification_tag }}"
|
||||
|
||||
ceilometer_api_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-ceilometer-api"
|
||||
ceilometer_api_tag: "{{ openstack_release }}"
|
||||
ceilometer_api_image_full: "{{ ceilometer_api_image }}:{{ ceilometer_api_tag }}"
|
||||
|
||||
ceilometer_central_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-ceilometer-central"
|
||||
ceilometer_central_tag: "{{ openstack_release }}"
|
||||
ceilometer_central_image_full: "{{ ceilometer_central_image }}:{{ ceilometer_central_tag }}"
|
||||
|
||||
ceilometer_collector_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-ceilometer-collector"
|
||||
ceilometer_collector_tag: "{{ openstack_release }}"
|
||||
ceilometer_collector_image_full: "{{ ceilometer_collector_image }}:{{ ceilometer_collector_tag }}"
|
||||
|
||||
ceilometer_compute_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-ceilometer-compute"
|
||||
ceilometer_compute_tag: "{{ openstack_release }}"
|
||||
ceilometer_compute_image_full: "{{ ceilometer_compute_image }}:{{ ceilometer_compute_tag }}"
|
||||
|
||||
|
||||
####################
|
||||
# OpenStack
|
||||
####################
|
||||
ceilometer_admin_endpoint: "{{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ ceilometer_api_port }}"
|
||||
ceilometer_internal_endpoint: "{{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ ceilometer_api_port }}"
|
||||
ceilometer_public_endpoint: "{{ public_protocol }}://{{ kolla_external_fqdn }}:{{ ceilometer_api_port }}"
|
||||
|
||||
ceilometer_logging_debug: "{{ openstack_logging_debug }}"
|
||||
|
||||
ceilometer_keystone_user: "ceilometer"
|
||||
|
||||
openstack_ceilometer_auth: "{'auth_url':'{{ openstack_auth.auth_url }}','username':'{{ openstack_auth.username }}','password':'{{ openstack_auth.password }}','project_name':'{{ openstack_auth.project_name }}','domain_name':'default'}"
|
@ -1,3 +0,0 @@
|
||||
---
|
||||
dependencies:
|
||||
- { role: common }
|
@ -1,73 +0,0 @@
|
||||
---
|
||||
- name: Creating Ceilometer MongoDB database
|
||||
command: docker exec -t mongodb mongo --host {{ ceilometer_database_mongodb_address }} --eval 'db = db.getSiblingDB("{{ ceilometer_database_name }}"); db.createUser({user':' "{{ ceilometer_database_user }}", pwd':' "{{ ceilometer_database_password}}", roles':' [ "readWrite", "dbAdmin" ]})'
|
||||
register: mongodb_ceilometer_database
|
||||
run_once: true
|
||||
failed_when:
|
||||
- "'already' not in mongodb_ceilometer_database.stdout"
|
||||
- mongodb_ceilometer_database.stdout.split()[4] != 'connecting'
|
||||
delegate_to: "{{ groups['mongodb'][0] }}"
|
||||
when:
|
||||
- ceilometer_database_type == "mongodb"
|
||||
|
||||
- name: Checking Ceilometer mysql database
|
||||
command: docker exec -t kolla_toolbox /usr/bin/ansible localhost
|
||||
-m mysql_db
|
||||
-a "login_host='{{ ceilometer_database_mysql_address }}'
|
||||
login_port='{{ ceilometer_database_port }}'
|
||||
login_user='{{ ceilometer_database_user }}'
|
||||
login_password='{{ ceilometer_database_password }}'
|
||||
name='{{ ceilometer_database_name }}'"
|
||||
register: mysql_access
|
||||
failed_when: False
|
||||
changed_when: False
|
||||
run_once: True
|
||||
when:
|
||||
- ceilometer_database_type == "mysql"
|
||||
|
||||
- name: Creating Ceilometer mysql database
|
||||
command: docker exec -t kolla_toolbox /usr/bin/ansible localhost
|
||||
-m mysql_db
|
||||
-a "login_host='{{ ceilometer_database_mysql_address }}'
|
||||
login_port='{{ ceilometer_database_port }}'
|
||||
login_user='{{ database_user }}'
|
||||
login_password='{{ database_password }}'
|
||||
name='{{ ceilometer_database_name }}'"
|
||||
register: mysql_ceilometer_database
|
||||
changed_when: "{{ mysql_ceilometer_database.stdout.find('localhost | SUCCESS => ') != -1 and
|
||||
(mysql_ceilometer_database.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
|
||||
failed_when: mysql_ceilometer_database.stdout.split()[2] != 'SUCCESS'
|
||||
run_once: True
|
||||
delegate_to: "{{ groups['ceilometer-api'][0] }}"
|
||||
when:
|
||||
- ceilometer_database_type == "mysql"
|
||||
- "'FAILED' in mysql_access.stdout"
|
||||
|
||||
- name: Creating Ceilometer database user and setting permissions
|
||||
command: docker exec -t kolla_toolbox /usr/bin/ansible localhost
|
||||
-m mysql_user
|
||||
-a "login_host='{{ ceilometer_database_mysql_address }}'
|
||||
login_port='{{ ceilometer_database_port }}'
|
||||
login_user='{{ database_user }}'
|
||||
login_password='{{ database_password }}'
|
||||
name='{{ ceilometer_database_name }}'
|
||||
password='{{ ceilometer_database_password }}'
|
||||
host='%'
|
||||
priv='{{ ceilometer_database_name }}.*:ALL'
|
||||
append_privs='yes'"
|
||||
register: database_user_create
|
||||
changed_when: "{{ database_user_create.stdout.find('localhost | SUCCESS => ') != -1 and
|
||||
(database_user_create.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
|
||||
failed_when: database_user_create.stdout.split()[2] != 'SUCCESS'
|
||||
run_once: True
|
||||
delegate_to: "{{ groups['ceilometer-api'][0] }}"
|
||||
when:
|
||||
- ceilometer_database_type == "mysql"
|
||||
- "'FAILED' in mysql_access.stdout"
|
||||
|
||||
# TODO(Jeffrey4l): fix idempotent when ceilomter_database_type == "gnocchi"
|
||||
# NOTE(vbel): bootstrapping of mysql db for ceilometer is idempotent
|
||||
- include: bootstrap_service.yml
|
||||
when: ((ceilometer_database_type == "mongodb" and mongodb_ceilometer_database.changed)
|
||||
or ceilometer_database_type == "mysql"
|
||||
or ceilometer_database_type == "gnocchi")
|
@ -1,22 +0,0 @@
|
||||
---
|
||||
- name: Running Ceilometer bootstrap container
|
||||
kolla_docker:
|
||||
action: "start_container"
|
||||
common_options: "{{ docker_common_options }}"
|
||||
detach: False
|
||||
environment:
|
||||
KOLLA_BOOTSTRAP:
|
||||
KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}"
|
||||
CEILOMETER_DATABASE_TYPE: "{{ ceilometer_database_type }}"
|
||||
image: "{{ ceilometer_api_image_full }}"
|
||||
labels:
|
||||
BOOTSTRAP:
|
||||
name: "bootstrap_ceilometer"
|
||||
restart_policy: "never"
|
||||
volumes:
|
||||
- "{{ node_config_directory }}/ceilometer-api/:{{ container_config_directory }}/:ro"
|
||||
- "/etc/localtime:/etc/localtime:ro"
|
||||
- "ceilometer:/var/lib/ceilometer/"
|
||||
- "kolla_logs:/var/log/kolla/"
|
||||
run_once: True
|
||||
delegate_to: "{{ groups['ceilometer-api'][0] }}"
|
@ -1,78 +0,0 @@
|
||||
---
|
||||
- name: Ensuring config directories exist
|
||||
file:
|
||||
path: "{{ node_config_directory }}/{{ item }}"
|
||||
state: "directory"
|
||||
recurse: yes
|
||||
with_items:
|
||||
- "ceilometer-notification"
|
||||
- "ceilometer-collector"
|
||||
- "ceilometer-api"
|
||||
- "ceilometer-central"
|
||||
- "ceilometer-compute"
|
||||
|
||||
- name: Copying over config.json files for services
|
||||
template:
|
||||
src: "{{ item }}.json.j2"
|
||||
dest: "{{ node_config_directory }}/{{ item }}/config.json"
|
||||
with_items:
|
||||
- "ceilometer-notification"
|
||||
- "ceilometer-collector"
|
||||
- "ceilometer-api"
|
||||
- "ceilometer-central"
|
||||
- "ceilometer-compute"
|
||||
|
||||
- name: Copying over ceilometer-api.conf
|
||||
template:
|
||||
src: "{{ item }}"
|
||||
dest: "{{ node_config_directory }}/ceilometer-api/wsgi-ceilometer-api.conf"
|
||||
with_first_found:
|
||||
- "{{ node_custom_config }}/ceilometer/{{ inventory_hostname }}/wsgi-ceilometer-api.conf"
|
||||
- "{{ node_custom_config }}/ceilometer/wsgi-ceilometer-api.conf"
|
||||
- "wsgi-ceilometer-api.conf.j2"
|
||||
|
||||
- name: Copying over ceilometer.conf
|
||||
merge_configs:
|
||||
vars:
|
||||
service_name: "{{ item }}"
|
||||
sources:
|
||||
- "{{ role_path }}/templates/ceilometer.conf.j2"
|
||||
- "{{ node_custom_config }}/global.conf"
|
||||
- "{{ node_custom_config }}/database.conf"
|
||||
- "{{ node_custom_config }}/messaging.conf"
|
||||
- "{{ node_custom_config }}/ceilometer.conf"
|
||||
- "{{ node_custom_config }}/ceilometer/{{ item }}.conf"
|
||||
- "{{ node_custom_config }}/ceilometer/{{ inventory_hostname }}/ceilometer.conf"
|
||||
dest: "{{ node_config_directory }}/{{ item }}/ceilometer.conf"
|
||||
with_items:
|
||||
- "ceilometer-notification"
|
||||
- "ceilometer-collector"
|
||||
- "ceilometer-api"
|
||||
- "ceilometer-central"
|
||||
- "ceilometer-compute"
|
||||
|
||||
- name: Copying over event and pipeline yaml for notification service
|
||||
template:
|
||||
src: "{{ item }}.j2"
|
||||
dest: "{{ node_config_directory }}/ceilometer-notification/{{ item }}"
|
||||
with_items:
|
||||
- "event_definitions.yaml"
|
||||
- "event_pipeline.yaml"
|
||||
- "pipeline.yaml"
|
||||
|
||||
- name: Check if policies shall be overwritten
|
||||
local_action: stat path="{{ node_custom_config }}/ceilometer/policy.json"
|
||||
register: ceilometer_policy
|
||||
|
||||
- name: Copying over existing policy.json
|
||||
template:
|
||||
src: "{{ node_custom_config }}/ceilometer/policy.json"
|
||||
dest: "{{ node_config_directory }}/{{ item }}/policy.json"
|
||||
with_items:
|
||||
- "ceilometer-notification"
|
||||
- "ceilometer-collector"
|
||||
- "ceilometer-api"
|
||||
- "ceilometer-central"
|
||||
- "ceilometer-compute"
|
||||
when:
|
||||
ceilometer_policy.stat.exists
|
@ -1,14 +0,0 @@
|
||||
---
|
||||
- include: register.yml
|
||||
when: inventory_hostname in groups['ceilometer']
|
||||
|
||||
- include: config.yml
|
||||
when: inventory_hostname in groups['ceilometer'] or
|
||||
inventory_hostname in groups['compute']
|
||||
|
||||
- include: bootstrap.yml
|
||||
when: inventory_hostname in groups['ceilometer']
|
||||
|
||||
- include: start.yml
|
||||
when: inventory_hostname in groups['ceilometer'] or
|
||||
inventory_hostname in groups['compute']
|
@ -1,2 +0,0 @@
|
||||
---
|
||||
- include: "{{ action }}.yml"
|
@ -1 +0,0 @@
|
||||
---
|
@ -1,35 +0,0 @@
|
||||
---
|
||||
- name: Pulling ceilometer-notification image
|
||||
kolla_docker:
|
||||
action: "pull_image"
|
||||
common_options: "{{ docker_common_options }}"
|
||||
image: "{{ ceilometer_notification_image_full }}"
|
||||
when: inventory_hostname in groups['ceilometer-notification']
|
||||
|
||||
- name: Pulling ceilometer-api image
|
||||
kolla_docker:
|
||||
action: "pull_image"
|
||||
common_options: "{{ docker_common_options }}"
|
||||
image: "{{ ceilometer_api_image_full }}"
|
||||
when: inventory_hostname in groups['ceilometer-api']
|
||||
|
||||
- name: Pulling ceilometer-central image
|
||||
kolla_docker:
|
||||
action: "pull_image"
|
||||
common_options: "{{ docker_common_options }}"
|
||||
image: "{{ ceilometer_central_image_full }}"
|
||||
when: inventory_hostname in groups['ceilometer-central']
|
||||
|
||||
- name: Pulling ceilometer-collector image
|
||||
kolla_docker:
|
||||
action: "pull_image"
|
||||
common_options: "{{ docker_common_options }}"
|
||||
image: "{{ ceilometer_collector_image_full }}"
|
||||
when: inventory_hostname in groups['ceilometer-collector']
|
||||
|
||||
- name: Pulling ceilometer-compute image
|
||||
kolla_docker:
|
||||
action: "pull_image"
|
||||
common_options: "{{ docker_common_options }}"
|
||||
image: "{{ ceilometer_compute_image_full }}"
|
||||
when: inventory_hostname in groups['ceilometer-compute']
|
@ -1,84 +0,0 @@
|
||||
---
|
||||
- name: Ensuring the containers up
|
||||
kolla_docker:
|
||||
name: "{{ item.name }}"
|
||||
action: "get_container_state"
|
||||
register: container_state
|
||||
failed_when: container_state.Running == false
|
||||
when: inventory_hostname in groups[item.group]
|
||||
with_items:
|
||||
- { name: ceilometer_api, group: ceilometer-api }
|
||||
- { name: ceilometer_central, group: ceilometer-central }
|
||||
- { name: ceilometer_notification, group: ceilometer-notification }
|
||||
- { name: ceilometer_collector, group: ceilometer-collector }
|
||||
- { name: ceilometer_compute, group: ceilometer-compute }
|
||||
|
||||
- include: config.yml
|
||||
|
||||
- name: Check the configs
|
||||
command: docker exec {{ item.name }} /usr/local/bin/kolla_set_configs --check
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
register: check_results
|
||||
when: inventory_hostname in groups[item.group]
|
||||
with_items:
|
||||
- { name: ceilometer_api, group: ceilometer-api }
|
||||
- { name: ceilometer_central, group: ceilometer-central }
|
||||
- { name: ceilometer_notification, group: ceilometer-notification }
|
||||
- { name: ceilometer_collector, group: ceilometer-collector }
|
||||
- { name: ceilometer_compute, group: ceilometer-compute }
|
||||
|
||||
# NOTE(jeffrey4l): when config_strategy == 'COPY_ALWAYS'
|
||||
# and container env['KOLLA_CONFIG_STRATEGY'] == 'COPY_ONCE',
|
||||
# just remove the container and start again
|
||||
- name: Containers config strategy
|
||||
kolla_docker:
|
||||
name: "{{ item.name }}"
|
||||
action: "get_container_env"
|
||||
register: container_envs
|
||||
when: inventory_hostname in groups[item.group]
|
||||
with_items:
|
||||
- { name: ceilometer_api, group: ceilometer-api }
|
||||
- { name: ceilometer_central, group: ceilometer-central }
|
||||
- { name: ceilometer_notification, group: ceilometer-notification }
|
||||
- { name: ceilometer_collector, group: ceilometer-collector }
|
||||
- { name: ceilometer_compute, group: ceilometer-compute }
|
||||
|
||||
- name: Remove the containers
|
||||
kolla_docker:
|
||||
name: "{{ item[0]['name'] }}"
|
||||
action: "remove_container"
|
||||
register: remove_containers
|
||||
when:
|
||||
- inventory_hostname in groups[item[0]['group']]
|
||||
- config_strategy == "COPY_ONCE" or item[1]['KOLLA_CONFIG_STRATEGY'] == 'COPY_ONCE'
|
||||
- item[2]['rc'] == 1
|
||||
with_together:
|
||||
- [{ name: ceilometer_api, group: ceilometer-api },
|
||||
{ name: ceilometer_central, group: ceilometer-central },
|
||||
{ name: ceilometer_notification, group: ceilometer-notification },
|
||||
{ name: ceilometer_collector, group: ceilometer-collector },
|
||||
{ name: ceilometer_compute, group: ceilometer-compute }]
|
||||
- "{{ container_envs.results }}"
|
||||
- "{{ check_results.results }}"
|
||||
|
||||
- include: start.yml
|
||||
when: remove_containers.changed
|
||||
|
||||
- name: Restart containers
|
||||
kolla_docker:
|
||||
name: "{{ item[0]['name'] }}"
|
||||
action: "restart_container"
|
||||
when:
|
||||
- inventory_hostname in groups[item[0]['group']]
|
||||
- config_strategy == 'COPY_ALWAYS'
|
||||
- item[1]['KOLLA_CONFIG_STRATEGY'] != 'COPY_ONCE'
|
||||
- item[2]['rc'] == 1
|
||||
with_together:
|
||||
- [{ name: ceilometer_api, group: ceilometer-api },
|
||||
{ name: ceilometer_central, group: ceilometer-central },
|
||||
{ name: ceilometer_notification, group: ceilometer-notification },
|
||||
{ name: ceilometer_collector, group: ceilometer-collector },
|
||||
{ name: ceilometer_compute, group: ceilometer-compute }]
|
||||
- "{{ container_envs.results }}"
|
||||
- "{{ check_results.results }}"
|
@ -1,40 +0,0 @@
|
||||
---
|
||||
- name: Creating the Ceilometer service and endpoint
|
||||
command: docker exec -t kolla_toolbox /usr/bin/ansible localhost
|
||||
-m kolla_keystone_service
|
||||
-a "service_name=ceilometer
|
||||
service_type=metering
|
||||
description='Openstack Telemetry'
|
||||
endpoint_region={{ openstack_region_name }}
|
||||
url='{{ item.url }}'
|
||||
interface='{{ item.interface }}'
|
||||
region_name={{ openstack_region_name }}
|
||||
auth={{ '{{ openstack_ceilometer_auth }}' }}"
|
||||
-e "{'openstack_ceilometer_auth':{{ openstack_ceilometer_auth }}}"
|
||||
register: ceilometer_endpoint
|
||||
changed_when: "{{ ceilometer_endpoint.stdout.find('localhost | SUCCESS => ') != -1 and (ceilometer_endpoint.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
|
||||
until: ceilometer_endpoint.stdout.split()[2] == 'SUCCESS'
|
||||
retries: 10
|
||||
delay: 5
|
||||
run_once: True
|
||||
with_items:
|
||||
- {'interface': 'admin', 'url': '{{ ceilometer_admin_endpoint }}'}
|
||||
- {'interface': 'internal', 'url': '{{ ceilometer_internal_endpoint }}'}
|
||||
- {'interface': 'public', 'url': '{{ ceilometer_public_endpoint }}'}
|
||||
|
||||
- name: Creating the Ceilometer project, user, and role
|
||||
command: docker exec -t kolla_toolbox /usr/bin/ansible localhost
|
||||
-m kolla_keystone_user
|
||||
-a "project=service
|
||||
user=ceilometer
|
||||
password={{ ceilometer_keystone_password }}
|
||||
role=admin
|
||||
region_name={{ openstack_region_name }}
|
||||
auth={{ '{{ openstack_ceilometer_auth }}' }}"
|
||||
-e "{'openstack_ceilometer_auth':{{ openstack_ceilometer_auth }}}"
|
||||
register: ceilometer_user
|
||||
changed_when: "{{ ceilometer_user.stdout.find('localhost | SUCCESS => ') != -1 and (ceilometer_user.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
|
||||
until: ceilometer_user.stdout.split()[2] == 'SUCCESS'
|
||||
retries: 10
|
||||
delay: 5
|
||||
run_once: True
|
@ -1,67 +0,0 @@
|
||||
---
|
||||
- name: Starting ceilometer-notification container
|
||||
kolla_docker:
|
||||
action: "start_container"
|
||||
common_options: "{{ docker_common_options }}"
|
||||
image: "{{ ceilometer_notification_image_full }}"
|
||||
name: "ceilometer_notification"
|
||||
volumes:
|
||||
- "{{ node_config_directory }}/ceilometer-notification/:{{ container_config_directory }}/:ro"
|
||||
- "/etc/localtime:/etc/localtime:ro"
|
||||
- "kolla_logs:/var/log/kolla/"
|
||||
when: inventory_hostname in groups['ceilometer-notification']
|
||||
|
||||
- name: Starting ceilometer-api container
|
||||
kolla_docker:
|
||||
action: "start_container"
|
||||
common_options: "{{ docker_common_options }}"
|
||||
image: "{{ ceilometer_api_image_full }}"
|
||||
name: "ceilometer_api"
|
||||
volumes:
|
||||
- "{{ node_config_directory }}/ceilometer-api/:{{ container_config_directory }}/:ro"
|
||||
- "/etc/localtime:/etc/localtime:ro"
|
||||
- "ceilometer:/var/lib/ceilometer/"
|
||||
- "kolla_logs:/var/log/kolla/"
|
||||
when: inventory_hostname in groups['ceilometer-api']
|
||||
|
||||
- name: Starting ceilometer-central container
|
||||
kolla_docker:
|
||||
action: "start_container"
|
||||
common_options: "{{ docker_common_options }}"
|
||||
image: "{{ ceilometer_central_image_full }}"
|
||||
name: "ceilometer_central"
|
||||
volumes:
|
||||
- "{{ node_config_directory }}/ceilometer-central/:{{ container_config_directory }}/:ro"
|
||||
- "/etc/localtime:/etc/localtime:ro"
|
||||
- "ceilometer:/var/lib/ceilometer/"
|
||||
- "kolla_logs:/var/log/kolla/"
|
||||
when: inventory_hostname in groups['ceilometer-central']
|
||||
|
||||
- name: Starting ceilometer-collector container
|
||||
kolla_docker:
|
||||
action: "start_container"
|
||||
common_options: "{{ docker_common_options }}"
|
||||
image: "{{ ceilometer_collector_image_full }}"
|
||||
name: "ceilometer_collector"
|
||||
volumes:
|
||||
- "{{ node_config_directory }}/ceilometer-collector/:{{ container_config_directory }}/:ro"
|
||||
- "/etc/localtime:/etc/localtime:ro"
|
||||
- "ceilometer:/var/lib/ceilometer/"
|
||||
- "kolla_logs:/var/log/kolla/"
|
||||
when: inventory_hostname in groups['ceilometer-collector']
|
||||
|
||||
- name: Starting ceilometer-compute container
|
||||
kolla_docker:
|
||||
action: "start_container"
|
||||
common_options: "{{ docker_common_options }}"
|
||||
image: "{{ ceilometer_compute_image_full }}"
|
||||
name: "ceilometer_compute"
|
||||
privileged: True
|
||||
volumes:
|
||||
- "{{ node_config_directory }}/ceilometer-compute/:{{ container_config_directory }}/:ro"
|
||||
- "/etc/localtime:/etc/localtime:ro"
|
||||
- "/run/:/run/:shared"
|
||||
- "ceilometer:/var/lib/ceilometer/"
|
||||
- "kolla_logs:/var/log/kolla/"
|
||||
- "nova_libvirt:/var/lib/libvirt"
|
||||
when: inventory_hostname in groups['ceilometer-compute']
|
@ -1,6 +0,0 @@
|
||||
---
|
||||
- include: config.yml
|
||||
|
||||
- include: bootstrap_service.yml
|
||||
|
||||
- include: start.yml
|
@ -1,20 +0,0 @@
|
||||
{% set apache_cmd = 'apache2' if kolla_base_distro in ['ubuntu', 'debian'] else 'httpd' %}
|
||||
{% set apache_dir = 'apache2/conf-enabled' if kolla_base_distro in ['ubuntu', 'debian'] else 'httpd/conf.d' %}
|
||||
{% set apache_file = '000-default.conf' if kolla_base_distro in ['ubuntu', 'debian'] else 'ceilometer-api.conf' %}
|
||||
{
|
||||
"command": "/usr/sbin/{{ apache_cmd }} -DFOREGROUND",
|
||||
"config_files": [
|
||||
{
|
||||
"source": "{{ container_config_directory }}/ceilometer.conf",
|
||||
"dest": "/etc/ceilometer/ceilometer.conf",
|
||||
"owner": "ceilometer",
|
||||
"perm": "0600"
|
||||
},
|
||||
{
|
||||
"source": "{{ container_config_directory }}/wsgi-ceilometer-api.conf",
|
||||
"dest": "/etc/{{ apache_dir }}/{{ apache_file }}",
|
||||
"owner": "ceilometer",
|
||||
"perm": "0644"
|
||||
}
|
||||
]
|
||||
}
|
@ -1,11 +0,0 @@
|
||||
{
|
||||
"command": "ceilometer-polling --polling-namespaces central",
|
||||
"config_files": [
|
||||
{
|
||||
"source": "{{ container_config_directory }}/ceilometer.conf",
|
||||
"dest": "/etc/ceilometer/ceilometer.conf",
|
||||
"owner": "ceilometer",
|
||||
"perm": "0600"
|
||||
}
|
||||
]
|
||||
}
|
@ -1,11 +0,0 @@
|
||||
{
|
||||
"command": "ceilometer-collector",
|
||||
"config_files": [
|
||||
{
|
||||
"source": "{{ container_config_directory }}/ceilometer.conf",
|
||||
"dest": "/etc/ceilometer/ceilometer.conf",
|
||||
"owner": "ceilometer",
|
||||
"perm": "0600"
|
||||
}
|
||||
]
|
||||
}
|
@ -1,11 +0,0 @@
|
||||
{
|
||||
"command": "ceilometer-polling --polling-namespaces compute",
|
||||
"config_files": [
|
||||
{
|
||||
"source": "{{ container_config_directory }}/ceilometer.conf",
|
||||
"dest": "/etc/ceilometer/ceilometer.conf",
|
||||
"owner": "ceilometer",
|
||||
"perm": "0600"
|
||||
}
|
||||
]
|
||||
}
|
@ -1,29 +0,0 @@
|
||||
{
|
||||
"command": "ceilometer-agent-notification",
|
||||
"config_files": [
|
||||
{
|
||||
"source": "{{ container_config_directory }}/ceilometer.conf",
|
||||
"dest": "/etc/ceilometer/ceilometer.conf",
|
||||
"owner": "ceilometer",
|
||||
"perm": "0600"
|
||||
},
|
||||
{
|
||||
"source": "{{ container_config_directory }}/event_definitions.yaml",
|
||||
"dest": "/etc/ceilometer/event_definitions.yaml",
|
||||
"owner": "ceilometer",
|
||||
"perm": "0600"
|
||||
},
|
||||
{
|
||||
"source": "{{ container_config_directory }}/event_pipeline.yaml",
|
||||
"dest": "/etc/ceilometer/event_pipeline.yaml",
|
||||
"owner": "ceilometer",
|
||||
"perm": "0600"
|
||||
},
|
||||
{
|
||||
"source": "{{ container_config_directory }}/pipeline.yaml",
|
||||
"dest": "/etc/ceilometer/pipeline.yaml",
|
||||
"owner": "ceilometer",
|
||||
"perm": "0600"
|
||||
}
|
||||
]
|
||||
}
|
@ -1,64 +0,0 @@
|
||||
[DEFAULT]
|
||||
debug = {{ ceilometer_logging_debug }}
|
||||
|
||||
log_dir = /var/log/kolla/ceilometer
|
||||
|
||||
transport_url = rabbit://{% for host in groups['rabbitmq'] %}{{ rabbitmq_user }}:{{ rabbitmq_password }}@{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ rabbitmq_port }}{% if not loop.last %},{% endif %}{% endfor %}
|
||||
|
||||
{% if ceilometer_database_type == 'gnocchi' %}
|
||||
meter_dispatchers = gnocchi
|
||||
event_dispatchers = gnocchi
|
||||
{% else %}
|
||||
meter_dispatchers = database
|
||||
event_dispatchers = database
|
||||
{% endif %}
|
||||
|
||||
[api]
|
||||
port = {{ ceilometer_api_port }}
|
||||
host = {{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}
|
||||
|
||||
[collector]
|
||||
|
||||
[database]
|
||||
{% if ceilometer_database_type == "mongodb" %}
|
||||
event_connection = mongodb://{{ ceilometer_database_user }}:{{ ceilometer_database_password }}@{{ ceilometer_database_mongodb_address }}/{{ ceilometer_database_name }}
|
||||
metering_connection = mongodb://{{ ceilometer_database_user }}:{{ ceilometer_database_password }}@{{ ceilometer_database_mongodb_address }}/{{ ceilometer_database_name }}
|
||||
{% elif ceilometer_database_type == "mysql" %}
|
||||
event_connection = mysql+pymysql://{{ ceilometer_database_user }}:{{ ceilometer_database_password }}@{{ ceilometer_database_mysql_address }}:{{ ceilometer_database_port }}/{{ ceilometer_database_name }}
|
||||
metering_connection = mysql+pymysql://{{ ceilometer_database_user }}:{{ ceilometer_database_password }}@{{ ceilometer_database_mysql_address }}:{{ ceilometer_database_port }}/{{ ceilometer_database_name }}
|
||||
{% endif %}
|
||||
|
||||
[keystone_authtoken]
|
||||
auth_uri = {{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_public_port }}
|
||||
project_domain_name = Default
|
||||
project_name = service
|
||||
user_domain_name = Default
|
||||
username = {{ ceilometer_keystone_user }}
|
||||
password = {{ ceilometer_keystone_password }}
|
||||
auth_url = {{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_admin_port }}
|
||||
auth_type = password
|
||||
|
||||
memcache_security_strategy = ENCRYPT
|
||||
memcache_secret_key = {{ memcache_secret_key }}
|
||||
memcached_servers = {% for host in groups['memcached'] %}{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}
|
||||
|
||||
|
||||
[notification]
|
||||
store_events = true
|
||||
|
||||
[service_credentials]
|
||||
auth_url = {{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_public_port }}/v3
|
||||
region_name = {{ openstack_region_name }}
|
||||
password = {{ ceilometer_keystone_password }}
|
||||
username = {{ ceilometer_keystone_user }}
|
||||
project_name = service
|
||||
project_domain_id = default
|
||||
user_domain_id = default
|
||||
auth_type = password
|
||||
|
||||
{% if ceilometer_database_type == 'gnocchi' %}
|
||||
[dispatcher_gnocchi]
|
||||
filter_service_activity = False
|
||||
url = {{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ gnocchi_api_port }}
|
||||
archive_policy = low
|
||||
{% endif %}
|
@ -1,553 +0,0 @@
|
||||
---
|
||||
- event_type: compute.instance.*
|
||||
traits: &instance_traits
|
||||
tenant_id:
|
||||
fields: payload.tenant_id
|
||||
user_id:
|
||||
fields: payload.user_id
|
||||
instance_id:
|
||||
fields: payload.instance_id
|
||||
host:
|
||||
fields: publisher_id.`split(., 1, 1)`
|
||||
service:
|
||||
fields: publisher_id.`split(., 0, -1)`
|
||||
memory_mb:
|
||||
type: int
|
||||
fields: payload.memory_mb
|
||||
disk_gb:
|
||||
type: int
|
||||
fields: payload.disk_gb
|
||||
root_gb:
|
||||
type: int
|
||||
fields: payload.root_gb
|
||||
ephemeral_gb:
|
||||
type: int
|
||||
fields: payload.ephemeral_gb
|
||||
vcpus:
|
||||
type: int
|
||||
fields: payload.vcpus
|
||||
instance_type_id:
|
||||
type: int
|
||||
fields: payload.instance_type_id
|
||||
instance_type:
|
||||
fields: payload.instance_type
|
||||
state:
|
||||
fields: payload.state
|
||||
os_architecture:
|
||||
fields: payload.image_meta.'org.openstack__1__architecture'
|
||||
os_version:
|
||||
fields: payload.image_meta.'org.openstack__1__os_version'
|
||||
os_distro:
|
||||
fields: payload.image_meta.'org.openstack__1__os_distro'
|
||||
launched_at:
|
||||
type: datetime
|
||||
fields: payload.launched_at
|
||||
deleted_at:
|
||||
type: datetime
|
||||
fields: payload.deleted_at
|
||||
- event_type: compute.instance.exists
|
||||
traits:
|
||||
<<: *instance_traits
|
||||
audit_period_beginning:
|
||||
type: datetime
|
||||
fields: payload.audit_period_beginning
|
||||
audit_period_ending:
|
||||
type: datetime
|
||||
fields: payload.audit_period_ending
|
||||
- event_type: ['volume.exists', 'volume.create.*', 'volume.delete.*', 'volume.resize.*', 'volume.attach.*', 'volume.detach.*', 'volume.update.*', 'snapshot.exists', 'snapshot.create.*', 'snapshot.delete.*', 'snapshot.update.*']
|
||||
traits: &cinder_traits
|
||||
user_id:
|
||||
fields: payload.user_id
|
||||
project_id:
|
||||
fields: payload.tenant_id
|
||||
availability_zone:
|
||||
fields: payload.availability_zone
|
||||
display_name:
|
||||
fields: payload.display_name
|
||||
replication_status:
|
||||
fields: payload.replication_status
|
||||
status:
|
||||
fields: payload.status
|
||||
created_at:
|
||||
fields: payload.created_at
|
||||
- event_type: ['volume.exists', 'volume.create.*', 'volume.delete.*', 'volume.resize.*', 'volume.attach.*', 'volume.detach.*', 'volume.update.*']
|
||||
traits:
|
||||
<<: *cinder_traits
|
||||
resource_id:
|
||||
fields: payload.volume_id
|
||||
host:
|
||||
fields: payload.host
|
||||
size:
|
||||
fields: payload.size
|
||||
type:
|
||||
fields: payload.volume_type
|
||||
replication_status:
|
||||
fields: payload.replication_status
|
||||
- event_type: ['snapshot.exists', 'snapshot.create.*', 'snapshot.delete.*', 'snapshot.update.*']
|
||||
traits:
|
||||
<<: *cinder_traits
|
||||
resource_id:
|
||||
fields: payload.snapshot_id
|
||||
volume_id:
|
||||
fields: payload.volume_id
|
||||
- event_type: ['image_volume_cache.*']
|
||||
traits:
|
||||
image_id:
|
||||
fields: payload.image_id
|
||||
host:
|
||||
fields: payload.host
|
||||
- event_type: ['image.update', 'image.upload', 'image.delete']
|
||||
traits: &glance_crud
|
||||
project_id:
|
||||
fields: payload.owner
|
||||
resource_id:
|
||||
fields: payload.id
|
||||
name:
|
||||
fields: payload.name
|
||||
status:
|
||||
fields: payload.status
|
||||
created_at:
|
||||
fields: payload.created_at
|
||||
user_id:
|
||||
fields: payload.owner
|
||||
deleted_at:
|
||||
fields: payload.deleted_at
|
||||
size:
|
||||
fields: payload.size
|
||||
- event_type: image.send
|
||||
traits: &glance_send
|
||||
receiver_project:
|
||||
fields: payload.receiver_tenant_id
|
||||
receiver_user:
|
||||
fields: payload.receiver_user_id
|
||||
user_id:
|
||||
fields: payload.owner_id
|
||||
image_id:
|
||||
fields: payload.image_id
|
||||
destination_ip:
|
||||
fields: payload.destination_ip
|
||||
bytes_sent:
|
||||
fields: payload.bytes_sent
|
||||
- event_type: orchestration.stack.*
|
||||
traits: &orchestration_crud
|
||||
project_id:
|
||||
fields: payload.tenant_id
|
||||
user_id:
|
||||
fields: ['_context_trustor_user_id', '_context_user_id']
|
||||
resource_id:
|
||||
fields: payload.stack_identity
|
||||
- event_type: sahara.cluster.*
|
||||
traits: &sahara_crud
|
||||
project_id:
|
||||
fields: payload.project_id
|
||||
user_id:
|
||||
fields: _context_user_id
|
||||
resource_id:
|
||||
fields: payload.cluster_id
|
||||
- event_type: sahara.cluster.health
|
||||
traits: &sahara_health
|
||||
<<: *sahara_crud
|
||||
verification_id:
|
||||
fields: payload.verification_id
|
||||
health_check_status:
|
||||
fields: payload.health_check_status
|
||||
health_check_name:
|
||||
fields: payload.health_check_name
|
||||
health_check_description:
|
||||
fields: payload.health_check_description
|
||||
created_at:
|
||||
type: datetime
|
||||
fields: payload.created_at
|
||||
updated_at:
|
||||
type: datetime
|
||||
fields: payload.updated_at
|
||||
- event_type: ['identity.user.*', 'identity.project.*', 'identity.group.*', 'identity.role.*', 'identity.OS-TRUST:trust.*',
|
||||
'identity.region.*', 'identity.service.*', 'identity.endpoint.*', 'identity.policy.*']
|
||||
traits: &identity_crud
|
||||
resource_id:
|
||||
fields: payload.resource_info
|
||||
initiator_id:
|
||||
fields: payload.initiator.id
|
||||
project_id:
|
||||
fields: payload.initiator.project_id
|
||||
domain_id:
|
||||
fields: payload.initiator.domain_id
|
||||
- event_type: identity.role_assignment.*
|
||||
traits: &identity_role_assignment
|
||||
role:
|
||||
fields: payload.role
|
||||
group:
|
||||
fields: payload.group
|
||||
domain:
|
||||
fields: payload.domain
|
||||
user:
|
||||
fields: payload.user
|
||||
project:
|
||||
fields: payload.project
|
||||
- event_type: identity.authenticate
|
||||
traits: &identity_authenticate
|
||||
typeURI:
|
||||
fields: payload.typeURI
|
||||
id:
|
||||
fields: payload.id
|
||||
action:
|
||||
fields: payload.action
|
||||
eventType:
|
||||
fields: payload.eventType
|
||||
eventTime:
|
||||
fields: payload.eventTime
|
||||
outcome:
|
||||
fields: payload.outcome
|
||||
initiator_typeURI:
|
||||
fields: payload.initiator.typeURI
|
||||
initiator_id:
|
||||
fields: payload.initiator.id
|
||||
initiator_name:
|
||||
fields: payload.initiator.name
|
||||
initiator_host_agent:
|
||||
fields: payload.initiator.host.agent
|
||||
initiator_host_addr:
|
||||
fields: payload.initiator.host.address
|
||||
target_typeURI:
|
||||
fields: payload.target.typeURI
|
||||
target_id:
|
||||
fields: payload.target.id
|
||||
observer_typeURI:
|
||||
fields: payload.observer.typeURI
|
||||
observer_id:
|
||||
fields: payload.observer.id
|
||||
- event_type: objectstore.http.request
|
||||
traits: &objectstore_request
|
||||
typeURI:
|
||||
fields: payload.typeURI
|
||||
id:
|
||||
fields: payload.id
|
||||
action:
|
||||
fields: payload.action
|
||||
eventType:
|
||||
fields: payload.eventType
|
||||
eventTime:
|
||||
fields: payload.eventTime
|
||||
outcome:
|
||||
fields: payload.outcome
|
||||
initiator_typeURI:
|
||||
fields: payload.initiator.typeURI
|
||||
initiator_id:
|
||||
fields: payload.initiator.id
|
||||
initiator_project_id:
|
||||
fields: payload.initiator.project_id
|
||||
target_typeURI:
|
||||
fields: payload.target.typeURI
|
||||
target_id:
|
||||
fields: payload.target.id
|
||||
target_action:
|
||||
fields: payload.target.action
|
||||
target_metadata_path:
|
||||
fields: payload.target.metadata.path
|
||||
target_metadata_version:
|
||||
fields: payload.target.metadata.version
|
||||
target_metadata_container:
|
||||
fields: payload.target.metadata.container
|
||||
target_metadata_object:
|
||||
fields: payload.target.metadata.object
|
||||
observer_id:
|
||||
fields: payload.observer.id
|
||||
- event_type: magnetodb.table.*
|
||||
traits: &kv_store
|
||||
resource_id:
|
||||
fields: payload.table_uuid
|
||||
user_id:
|
||||
fields: _context_user_id
|
||||
project_id:
|
||||
fields: _context_tenant
|
||||
- event_type: ['network.*', 'subnet.*', 'port.*', 'router.*', 'floatingip.*', 'pool.*', 'vip.*', 'member.*', 'health_monitor.*', 'healthmonitor.*', 'listener.*', 'loadbalancer.*', 'firewall.*', 'firewall_policy.*', 'firewall_rule.*', 'vpnservice.*', 'ipsecpolicy.*', 'ikepolicy.*', 'ipsec_site_connection.*']
|
||||
traits: &network_traits
|
||||
user_id:
|
||||
fields: _context_user_id
|
||||
project_id:
|
||||
fields: _context_tenant_id
|
||||
- event_type: network.*
|
||||
traits:
|
||||
<<: *network_traits
|
||||
resource_id:
|
||||
fields: ['payload.network.id', 'payload.id']
|
||||
- event_type: subnet.*
|
||||
traits:
|
||||
<<: *network_traits
|
||||
resource_id:
|
||||
fields: ['payload.subnet.id', 'payload.id']
|
||||
- event_type: port.*
|
||||
traits:
|
||||
<<: *network_traits
|
||||
resource_id:
|
||||
fields: ['payload.port.id', 'payload.id']
|
||||
- event_type: router.*
|
||||
traits:
|
||||
<<: *network_traits
|
||||
resource_id:
|
||||
fields: ['payload.router.id', 'payload.id']
|
||||
- event_type: floatingip.*
|
||||
traits:
|
||||
<<: *network_traits
|
||||
resource_id:
|
||||
fields: ['payload.floatingip.id', 'payload.id']
|
||||
- event_type: pool.*
|
||||
traits:
|
||||
<<: *network_traits
|
||||
resource_id:
|
||||
fields: ['payload.pool.id', 'payload.id']
|
||||
- event_type: vip.*
|
||||
traits:
|
||||
<<: *network_traits
|
||||
resource_id:
|
||||
fields: ['payload.vip.id', 'payload.id']
|
||||
- event_type: member.*
|
||||
traits:
|
||||
<<: *network_traits
|
||||
resource_id:
|
||||
fields: ['payload.member.id', 'payload.id']
|
||||
- event_type: health_monitor.*
|
||||
traits:
|
||||
<<: *network_traits
|
||||
resource_id:
|
||||
fields: ['payload.health_monitor.id', 'payload.id']
|
||||
- event_type: healthmonitor.*
|
||||
traits:
|
||||
<<: *network_traits
|
||||
resource_id:
|
||||
fields: ['payload.healthmonitor.id', 'payload.id']
|
||||
- event_type: listener.*
|
||||
traits:
|
||||
<<: *network_traits
|
||||
resource_id:
|
||||
fields: ['payload.listener.id', 'payload.id']
|
||||
- event_type: loadbalancer.*
|
||||
traits:
|
||||
<<: *network_traits
|
||||
resource_id:
|
||||
fields: ['payload.loadbalancer.id', 'payload.id']
|
||||
- event_type: firewall.*
|
||||
traits:
|
||||
<<: *network_traits
|
||||
resource_id:
|
||||
fields: ['payload.firewall.id', 'payload.id']
|
||||
- event_type: firewall_policy.*
|
||||
traits:
|
||||
<<: *network_traits
|
||||
resource_id:
|
||||
fields: ['payload.firewall_policy.id', 'payload.id']
|
||||
- event_type: firewall_rule.*
|
||||
traits:
|
||||
<<: *network_traits
|
||||
resource_id:
|
||||
fields: ['payload.firewall_rule.id', 'payload.id']
|
||||
- event_type: vpnservice.*
|
||||
traits:
|
||||
<<: *network_traits
|
||||
resource_id:
|
||||
fields: ['payload.vpnservice.id', 'payload.id']
|
||||
- event_type: ipsecpolicy.*
|
||||
traits:
|
||||
<<: *network_traits
|
||||
resource_id:
|
||||
fields: ['payload.ipsecpolicy.id', 'payload.id']
|
||||
- event_type: ikepolicy.*
|
||||
traits:
|
||||
<<: *network_traits
|
||||
resource_id:
|
||||
fields: ['payload.ikepolicy.id', 'payload.id']
|
||||
- event_type: ipsec_site_connection.*
|
||||
traits:
|
||||
<<: *network_traits
|
||||
resource_id:
|
||||
fields: ['payload.ipsec_site_connection.id', 'payload.id']
|
||||
- event_type: '*http.*'
|
||||
traits: &http_audit
|
||||
project_id:
|
||||
fields: payload.initiator.project_id
|
||||
user_id:
|
||||
fields: payload.initiator.id
|
||||
typeURI:
|
||||
fields: payload.typeURI
|
||||
eventType:
|
||||
fields: payload.eventType
|
||||
action:
|
||||
fields: payload.action
|
||||
outcome:
|
||||
fields: payload.outcome
|
||||
id:
|
||||
fields: payload.id
|
||||
eventTime:
|
||||
fields: payload.eventTime
|
||||
requestPath:
|
||||
fields: payload.requestPath
|
||||
observer_id:
|
||||
fields: payload.observer.id
|
||||
target_id:
|
||||
fields: payload.target.id
|
||||
target_typeURI:
|
||||
fields: payload.target.typeURI
|
||||
target_name:
|
||||
fields: payload.target.name
|
||||
initiator_typeURI:
|
||||
fields: payload.initiator.typeURI
|
||||
initiator_id:
|
||||
fields: payload.initiator.id
|
||||
initiator_name:
|
||||
fields: payload.initiator.name
|
||||
initiator_host_address:
|
||||
fields: payload.initiator.host.address
|
||||
- event_type: '*http.response'
|
||||
traits:
|
||||
<<: *http_audit
|
||||
reason_code:
|
||||
fields: payload.reason.reasonCode
|
||||
- event_type: ['dns.domain.create', 'dns.domain.update', 'dns.domain.delete']
|
||||
traits: &dns_domain_traits
|
||||
status:
|
||||
fields: payload.status
|
||||
retry:
|
||||
fields: payload.retry
|
||||
description:
|
||||
fields: payload.description
|
||||
expire:
|
||||
fields: payload.expire
|
||||
email:
|
||||
fields: payload.email
|
||||
ttl:
|
||||
fields: payload.ttl
|
||||
action:
|
||||
fields: payload.action
|
||||
name:
|
||||
fields: payload.name
|
||||
resource_id:
|
||||
fields: payload.id
|
||||
created_at:
|
||||
fields: payload.created_at
|
||||
updated_at:
|
||||
fields: payload.updated_at
|
||||
version:
|
||||
fields: payload.version
|
||||
parent_domain_id:
|
||||
fields: parent_domain_id
|
||||
serial:
|
||||
fields: payload.serial
|
||||
- event_type: dns.domain.exists
|
||||
traits:
|
||||
<<: *dns_domain_traits
|
||||
audit_period_beginning:
|
||||
type: datetime
|
||||
fields: payload.audit_period_beginning
|
||||
audit_period_ending:
|
||||
type: datetime
|
||||
fields: payload.audit_period_ending
|
||||
- event_type: trove.*
|
||||
traits: &trove_base_traits
|
||||
state:
|
||||
fields: payload.state_description
|
||||
instance_type:
|
||||
fields: payload.instance_type
|
||||
user_id:
|
||||
fields: payload.user_id
|
||||
resource_id:
|
||||
fields: payload.instance_id
|
||||
instance_type_id:
|
||||
fields: payload.instance_type_id
|
||||
launched_at:
|
||||
type: datetime
|
||||
fields: payload.launched_at
|
||||
instance_name:
|
||||
fields: payload.instance_name
|
||||
state:
|
||||
fields: payload.state
|
||||
nova_instance_id:
|
||||
fields: payload.nova_instance_id
|
||||
service_id:
|
||||
fields: payload.service_id
|
||||
created_at:
|
||||
type: datetime
|
||||
fields: payload.created_at
|
||||
region:
|
||||
fields: payload.region
|
||||
- event_type: ['trove.instance.create', 'trove.instance.modify_volume', 'trove.instance.modify_flavor', 'trove.instance.delete']
|
||||
traits: &trove_common_traits
|
||||
name:
|
||||
fields: payload.name
|
||||
availability_zone:
|
||||
fields: payload.availability_zone
|
||||
instance_size:
|
||||
type: int
|
||||
fields: payload.instance_size
|
||||
volume_size:
|
||||
type: int
|
||||
fields: payload.volume_size
|
||||
nova_volume_id:
|
||||
fields: payload.nova_volume_id
|
||||
- event_type: trove.instance.create
|
||||
traits:
|
||||
<<: [*trove_base_traits, *trove_common_traits]
|
||||
- event_type: trove.instance.modify_volume
|
||||
traits:
|
||||
<<: [*trove_base_traits, *trove_common_traits]
|
||||
old_volume_size:
|
||||
type: int
|
||||
fields: payload.old_volume_size
|
||||
modify_at:
|
||||
type: datetime
|
||||
fields: payload.modify_at
|
||||
- event_type: trove.instance.modify_flavor
|
||||
traits:
|
||||
<<: [*trove_base_traits, *trove_common_traits]
|
||||
old_instance_size:
|
||||
type: int
|
||||
fields: payload.old_instance_size
|
||||
modify_at:
|
||||
type: datetime
|
||||
fields: payload.modify_at
|
||||
- event_type: trove.instance.delete
|
||||
traits:
|
||||
<<: [*trove_base_traits, *trove_common_traits]
|
||||
deleted_at:
|
||||
type: datetime
|
||||
fields: payload.deleted_at
|
||||
- event_type: trove.instance.exists
|
||||
traits:
|
||||
<<: *trove_base_traits
|
||||
display_name:
|
||||
fields: payload.display_name
|
||||
audit_period_beginning:
|
||||
type: datetime
|
||||
fields: payload.audit_period_beginning
|
||||
audit_period_ending:
|
||||
type: datetime
|
||||
fields: payload.audit_period_ending
|
||||
- event_type: profiler.*
|
||||
traits:
|
||||
project:
|
||||
fields: payload.project
|
||||
service:
|
||||
fields: payload.service
|
||||
name:
|
||||
fields: payload.name
|
||||
base_id:
|
||||
fields: payload.base_id
|
||||
trace_id:
|
||||
fields: payload.trace_id
|
||||
parent_id:
|
||||
fields: payload.parent_id
|
||||
timestamp:
|
||||
fields: payload.timestamp
|
||||
host:
|
||||
fields: payload.info.host
|
||||
path:
|
||||
fields: payload.info.request.path
|
||||
query:
|
||||
fields: payload.info.request.query
|
||||
method:
|
||||
fields: payload.info.request.method
|
||||
scheme:
|
||||
fields: payload.info.request.scheme
|
||||
db.statement:
|
||||
fields: payload.info.db.statement
|
||||
db.params:
|
||||
fields: payload.info.db.params
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user