Migrate Puppet Hieradata to composable services

Migrate puppet/hieradata/*.yaml parameters to puppet/services/*.yaml
except for some services that are not composable yet.

Co-Authored-By: Juan Antonio Osorio Robles <jaosorior@redhat.com>
Change-Id: I7e5f8b18ee9aa63a1dffc6facaf88315b07d5fd7
changes/16/341616/20
Emilien Macchi 7 years ago
parent fc93cf3e29
commit 315fa31963

@ -106,8 +106,6 @@ resources:
- {get_param: ceph_storage_hosts}
hiera:
datafiles:
RedHat:
raw_data: {get_file: hieradata/RedHat.yaml}
bootstrap_node:
mapped_data:
bootstrap_nodeid: {get_input: bootstrap_nodeid}

@ -0,0 +1 @@
Do not add more hieradata in this directory, and use composable services.

@ -1,9 +0,0 @@
# RedHat specific overrides go here
rabbitmq::package_provider: 'yum'
# The Galera package should work in cluster and
# non-cluster modes based on the config file.
# We set the package name here explicitly so
# that it matches what we pre-install
# in tripleo-puppet-elements.
mysql::server::package_name: 'mariadb-galera-server'

@ -1,51 +1,3 @@
# Common Hiera data gets applied to all nodes
ssh::server::storeconfigs_enabled: false
# ceilometer settings used by compute and controller ceilo auth settings
ceilometer::agent::auth::auth_region: 'regionOne'
ceilometer::agent::auth::auth_tenant_name: 'service'
# TODO(emilien) move it to composable aodh roles later
aodh::auth::auth_region: 'regionOne'
aodh::auth::auth_tenant_name: 'service'
gnocchi::auth::auth_region: 'regionOne'
gnocchi::auth::auth_tenant_name: 'service'
nova::api::admin_tenant_name: 'service'
nova::network::neutron::neutron_project_name: 'service'
nova::network::neutron::neutron_username: 'neutron'
nova::network::neutron::dhcp_domain: ''
neutron::allow_overlapping_ips: true
neutron::server::project_name: 'service'
kernel_modules:
nf_conntrack: {}
sysctl_settings:
net.ipv4.tcp_keepalive_intvl:
value: 1
net.ipv4.tcp_keepalive_probes:
value: 5
net.ipv4.tcp_keepalive_time:
value: 5
net.nf_conntrack_max:
value: 500000
net.netfilter.nf_conntrack_max:
value: 500000
# prevent neutron bridges from autoconfiguring ipv6 addresses
net.ipv6.conf.default.accept_ra:
value: 0
net.ipv6.conf.default.autoconf:
value: 0
net.core.netdev_max_backlog:
value: 10000
nova::rabbit_heartbeat_timeout_threshold: 60
neutron::rabbit_heartbeat_timeout_threshold: 60
cinder::rabbit_heartbeat_timeout_threshold: 60
ceilometer::rabbit_heartbeat_timeout_threshold: 60
heat::rabbit_heartbeat_timeout_threshold: 60
keystone::rabbit_heartbeat_timeout_threshold: 60
nova::cinder_catalog_info: 'volumev2:cinderv2:internalURL'

@ -1,21 +1,3 @@
# Hiera data here applies to all compute nodes
nova::notify_on_state_change: 'vm_and_task_state'
nova::notification_driver: messagingv2
nova::compute::instance_usage_audit: true
nova::compute::instance_usage_audit_period: 'hour'
nova::compute::rbd::libvirt_rbd_secret_uuid: "%{hiera('ceph::profile::params::fsid')}"
nova::network::neutron::neutron_auth_type: 'v3password'
# Changing the default from 512MB. The current templates can not deploy
# overclouds with swap. On an idle compute node, we see ~1024MB of RAM
# used. 2048 is suggested to account for other possible operations for
# example openvswitch.
nova::compute::reserved_host_memory: 2048
ceilometer::agent::auth::auth_tenant_name: 'service'
ceilometer::agent::auth::auth_endpoint_type: 'internalURL'
compute_classes: []

@ -1,190 +1,16 @@
# Hiera data here applies to all controller nodes
nova::api::enabled: true
nova::vncproxy::enabled: true
# gnocchi
gnocchi::storage::swift::swift_user: 'service:gnocchi'
gnocchi::storage::swift::swift_auth_version: 2
gnocchi::statsd::resource_id: '0a8b55df-f90f-491c-8cb9-7cdecec6fc26'
gnocchi::statsd::user_id: '27c0d3f8-e7ee-42f0-8317-72237d1c5ae3'
gnocchi::statsd::project_id: '6c38cd8d-099a-4cb2-aecf-17be688e8616'
gnocchi::statsd::flush_delay: 10
gnocchi::statsd::archive_policy_name: 'low'
# rabbitmq
rabbitmq::delete_guest_user: false
rabbitmq::wipe_db_on_cookie_change: true
rabbitmq::port: '5672'
rabbitmq::package_source: undef
rabbitmq::repos_ensure: false
rabbitmq_environment:
RABBITMQ_NODENAME: "rabbit@%{::hostname}"
RABBITMQ_SERVER_ERL_ARGS: '"+K true +A30 +P 1048576 -kernel inet_default_connect_options [{nodelay,true},{raw,6,18,<<5000:64/native>>}] -kernel inet_default_listen_options [{raw,6,18,<<5000:64/native>>}]"'
rabbitmq_kernel_variables:
inet_dist_listen_min: '35672'
inet_dist_listen_max: '35672'
rabbitmq_config_variables:
tcp_listen_options: '[binary, {packet, raw}, {reuseaddr, true}, {backlog, 128}, {nodelay, true}, {exit_on_close, false}, {keepalive, true}]'
cluster_partition_handling: 'pause_minority'
loopback_users: '[]'
mongodb::server::replset: tripleo
mongodb::server::journal: false
redis::port: 6379
redis::sentinel::master_name: "%{hiera('bootstrap_nodeid')}"
redis::sentinel::redis_host: "%{hiera('bootstrap_nodeid_ip')}"
redis::sentinel::notification_script: '/usr/local/bin/redis-notifications.sh'
# keystone
keystone::roles::admin::email: 'root@localhost'
# service tenant
glance::api::keystone_tenant: 'service'
# TODO(emilien) move it to composable aodh roles later
aodh::api::keystone_tenant: 'service'
glance::registry::keystone_tenant: 'service'
neutron::server::auth_tenant: 'service'
neutron::agents::metadata::auth_tenant: 'service'
neutron::agents::l3::router_delete_namespaces: True
cinder::api::keystone_tenant: 'service'
swift::proxy::authtoken::admin_tenant_name: 'service'
ceilometer::api::keystone_tenant: 'service'
gnocchi::api::keystone_tenant: 'service'
heat::keystone_tenant: 'service'
sahara::admin_tenant_name: 'service'
aodh::keystone::auth::tenant: 'service'
ceilometer::keystone::auth::tenant: 'service'
cinder::keystone::auth::tenant: 'service'
glance::keystone::auth::tenant: 'service'
gnocchi::keystone::auth::tenant: 'service'
heat::keystone::auth::tenant: 'service'
neutron::keystone::auth::tenant: 'service'
nova::keystone::auth::tenant: 'service'
sahara::keystone::auth::tenant: 'service'
swift::keystone::auth::tenant: 'service'
# keystone
keystone::cron::token_flush::maxdelay: 3600
keystone::roles::admin::service_tenant: 'service'
keystone::roles::admin::admin_tenant: 'admin'
keystone::cron::token_flush::destination: '/dev/null'
keystone::config::keystone_config:
DEFAULT/secure_proxy_ssl_header:
value: 'HTTP_X_FORWARDED_PROTO'
ec2/driver:
value: 'keystone.contrib.ec2.backends.sql.Ec2'
keystone::service_name: 'httpd'
keystone::wsgi::apache::ssl: false
#swift
swift::proxy::pipeline:
- 'catch_errors'
- 'healthcheck'
- 'proxy-logging'
- 'cache'
- 'ratelimit'
- 'bulk'
- 'tempurl'
- 'formpost'
- 'authtoken'
- 'keystone'
- 'staticweb'
- 'proxy-logging'
- 'proxy-server'
swift::proxy::account_autocreate: true
swift::keystone::auth::configure_s3_endpoint: false
swift::keystone::auth::operator_roles:
- admin
- swiftoperator
# glance
glance::api::pipeline: 'keystone'
glance::api::show_image_direct_url: true
glance::registry::pipeline: 'keystone'
glance::backend::swift::swift_store_create_container_on_put: true
glance_file_pcmk_directory: '/var/lib/glance/images'
# neutron
neutron::server::sync_db: true
# nova
nova::notify_on_state_change: 'vm_and_task_state'
nova::api::default_floating_pool: 'public'
nova::api::sync_db_api: true
nova::api::enable_proxy_headers_parsing: true
nova::notification_driver: messaging
# ceilometer
ceilometer::agent::auth::auth_endpoint_type: 'internalURL'
# cinder
cinder::scheduler::scheduler_driver: cinder.scheduler.filter_scheduler.FilterScheduler
cinder::cron::db_purge::destination: '/dev/null'
cinder::host: hostgroup
# TODO(jaosorior): Move to cinder profile once cinder is moved as a composable
# service.
cinder::api::enable_proxy_headers_parsing: true
# heat
heat::engine::configure_delegated_roles: false
heat::engine::trusts_delegated_roles: []
heat::instance_user: ''
heat::cron::purge_deleted::age: 30
heat::cron::purge_deleted::age_type: 'days'
heat::cron::purge_deleted::maxdelay: 3600
heat::cron::purge_deleted::destination: '/dev/null'
heat::keystone::domain::domain_name: 'heat_stack'
heat::keystone::domain::domain_admin: 'heat_stack_domain_admin'
heat::keystone::domain::domain_admin_email: 'heat_stack_domain_admin@localhost'
heat::auth_plugin: 'password'
# pacemaker
pacemaker::corosync::cluster_name: 'tripleo_cluster'
pacemaker::corosync::manage_fw: false
pacemaker::resource_defaults::defaults:
resource-stickiness: { value: INFINITY }
corosync_token_timeout: 10000
# horizon
horizon::cache_backend: django.core.cache.backends.memcached.MemcachedCache
horizon::django_session_engine: 'django.contrib.sessions.backends.cache'
horizon::vhost_extra_params:
add_listen: false
priority: 10
access_log_format: '%a %l %u %t \"%r\" %>s %b \"%%{}{Referer}i\" \"%%{}{User-Agent}i\"'
# mysql
mysql::server::manage_config_file: true
tripleo::haproxy::keystone_admin: true
tripleo::haproxy::keystone_public: true
tripleo::haproxy::neutron: true
tripleo::haproxy::cinder: true
tripleo::haproxy::glance_api: true
tripleo::haproxy::glance_registry: true
tripleo::haproxy::nova_osapi: true
tripleo::haproxy::nova_metadata: true
tripleo::haproxy::nova_novncproxy: true
tripleo::haproxy::mysql: true
tripleo::haproxy::redis: true
tripleo::haproxy::sahara: true
tripleo::haproxy::swift_proxy_server: true
tripleo::haproxy::ceilometer: true
tripleo::haproxy::aodh: true
tripleo::haproxy::gnocchi: true
tripleo::haproxy::heat_api: true
tripleo::haproxy::heat_cloudwatch: true
tripleo::haproxy::heat_cfn: true
tripleo::haproxy::horizon: true
controller_classes: []
# firewall
# TODO(emilien) move it to composable roles later
# Already WIP with https://review.openstack.org/330785
# and https://review.openstack.org/338527
tripleo::firewall::firewall_rules:
'128 aodh':
dport:
- 8042
- 13042
controller_classes: []

@ -1,4 +1,5 @@
# Aodh
# TODO(emilien) move it to composable aodh roles later
aodh::db::mysql::user: aodh
aodh::db::mysql::host: "%{hiera('mysql_virtual_ip')}"
aodh::db::mysql::dbname: aodh

@ -1,21 +1,2 @@
# Hiera data for swift storage nodes
swift::storage::all::incoming_chmod: 'Du=rwx,g=rx,o=rx,Fu=rw,g=r,o=r'
swift::storage::all::outgoing_chmod: 'Du=rwx,g=rx,o=rx,Fu=rw,g=r,o=r'
swift::storage::all::object_pipeline:
- healthcheck
- recon
- object-server
swift::storage::all::container_pipeline:
- healthcheck
- container-server
swift::storage::all::account_pipeline:
- healthcheck
- account-server
swift::proxy::keystone::operator_roles:
- admin
- swiftoperator
- ResellerAdmin
object_classes: []

@ -1,14 +1,3 @@
# Hiera data here applies to all volume storage nodes
# cinder
cinder::scheduler::scheduler_driver: cinder.scheduler.filter_scheduler.FilterScheduler
cinder::config::cinder_config:
DEFAULT/nova_catalog_info:
value: 'compute:Compute Service:internalURL'
DEFAULT/swift_catalog_info:
value: 'object-store:swift:internalURL'
cinder_user_enabled_backends: []
volume_classes: []
volume_classes: []

@ -30,5 +30,6 @@ outputs:
dport:
- 8777
- 13777
- ceilometer::api::keystone_tenant: 'service'
step_config: |
include ::tripleo::profile::base::ceilometer::api

@ -83,6 +83,9 @@ outputs:
ceilometer::agent::auth::auth_password: {get_param: CeilometerPassword}
ceilometer::agent::auth::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] }
ceilometer::agent::notification::store_events: {get_param: CeilometerStoreEvents}
ceilometer::agent::auth::auth_region: 'regionOne'
ceilometer::agent::auth::auth_tenant_name: 'service'
ceilometer::agent::auth::auth_endpoint_type: 'internalURL'
ceilometer::db::mysql::password: {get_param: CeilometerPassword}
ceilometer::collector::meter_dispatcher: {get_param: CeilometerMeterDispatcher}
ceilometer::dispatcher::gnocchi::url: {get_param: [EndpointMap, GnocchiInternal, uri]}
@ -94,6 +97,7 @@ outputs:
ceilometer::keystone::auth::admin_url: {get_param: [EndpointMap, CeilometerAdmin, uri]}
ceilometer::keystone::auth::password: {get_param: CeilometerPassword}
ceilometer::keystone::auth::region: {get_param: KeystoneRegion}
ceilometer::keystone::auth::tenant: 'service'
ceilometer::rabbit_userid: {get_param: RabbitUserName}
ceilometer::rabbit_password: {get_param: RabbitPassword}
ceilometer::rabbit_use_ssl: {get_param: RabbitClientUseSSL}
@ -104,3 +108,4 @@ outputs:
ceilometer::db::mysql::allowed_hosts:
- '%'
- "%{hiera('mysql_bind_host')}"
ceilometer::rabbit_heartbeat_timeout_threshold: 60

@ -37,6 +37,13 @@ outputs:
- cinder::api::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]}
cinder::api::identity_uri: {get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix]}
cinder::api::keystone_password: {get_param: CinderPassword}
cinder::api::keystone_tenant: 'service'
cinder::api::enable_proxy_headers_parsing: true
cinder::api::nova_catalog_info: 'compute:Compute Service:internalURL'
# TODO(emilien) move it to puppet-cinder
cinder::config:
DEFAULT/swift_catalog_info:
value: 'object-store:swift:internalURL'
cinder::glance::glance_api_servers: {get_param: [EndpointMap, GlanceInternal, uri]}
tripleo::profile::base::cinder::cinder_enable_db_purge: {get_param: CinderEnableDBPurge}
tripleo.cinder_api.firewall_rules:

@ -63,3 +63,7 @@ outputs:
cinder::db::mysql::allowed_hosts:
- '%'
- "%{hiera('mysql_bind_host')}"
cinder::rabbit_heartbeat_timeout_threshold: 60
cinder::keystone::auth::tenant: 'service'
cinder::host: hostgroup
cinder::cron::db_purge::destination: '/dev/null'

@ -23,6 +23,8 @@ outputs:
value:
service_name: cinder-scheduler
config_settings:
get_attr: [CinderBase, role_data, config_settings]
map_merge:
- get_attr: [CinderBase, role_data, config_settings]
- cinder::scheduler::scheduler_driver: cinder.scheduler.filter_scheduler.FilterScheduler
step_config: |
include ::tripleo::profile::base::cinder::scheduler

@ -27,5 +27,6 @@ outputs:
service_name: mongodb-base
config_settings:
mongodb::server::nojournal: {get_param: MongoDbNoJournal}
mongodb::server::journal: false
mongodb::server::ipv6: {get_param: MongoDbIPv6}
mongodb::server::replset: {get_param: MongoDbReplset}
mongodb::server::replset: {get_param: MongoDbReplset}

@ -17,6 +17,13 @@ outputs:
value:
service_name: mysql
config_settings:
# The Galera package should work in cluster and
# non-cluster modes based on the config file.
# We set the package name here explicitly so
# that it matches what we pre-install
# in tripleo-puppet-elements.
mysql::server::package_name: 'mariadb-galera-server'
mysql::server::manage_config_file: true
tripleo.mysql.firewall_rules:
'104 mysql galera':
dport:

@ -15,8 +15,11 @@ outputs:
value:
service_name: redis-base
config_settings:
redis::requirepass: {get_param: RedisPassword}
redis::masterauth: {get_param: RedisPassword}
redis::sentinel_auth_pass: {get_param: RedisPassword}
tripleo::loadbalancer::redis_password: {get_param: RedisPassword}
redis::requirepass: {get_param: RedisPassword}
redis::masterauth: {get_param: RedisPassword}
redis::sentinel_auth_pass: {get_param: RedisPassword}
redis::port: 6379
redis::sentinel::master_name: '"%{hiera(\"bootstrap_nodeid\")}"'
redis::sentinel::redis_host: '"%{hiera(\"bootstrap_nodeid_ip\")}"'
redis::sentinel::notification_script: '/usr/local/bin/redis-notifications.sh'
tripleo::loadbalancer::redis_password: {get_param: RedisPassword}

@ -92,6 +92,7 @@ outputs:
glance::backend::swift::swift_store_auth_address: {get_param: [EndpointMap, KeystoneInternal, uri] }
glance::backend::swift::swift_store_user: service:glance
glance::backend::swift::swift_store_key: {get_param: GlancePassword}
glance::backend::swift::swift_store_create_container_on_put: true
glance::backend::rbd::rbd_store_pool: {get_param: GlanceRbdPoolName}
glance::backend::rbd::rbd_store_user: {get_param: CephClientUserName}
glance_backend: {get_param: GlanceBackend}
@ -109,5 +110,10 @@ outputs:
dport:
- 9292
- 13292
glance::keystone::auth::tenant: 'service'
glance::api::keystone_tenant: 'service'
glance::api::pipeline: 'keystone'
glance::api::show_image_direct_url: true
step_config: |
include ::tripleo::profile::base::glance::api

@ -38,6 +38,8 @@ outputs:
- {get_param: [EndpointMap, MysqlInternal, host]}
- '/glance'
glance::registry::keystone_password: {get_param: GlancePassword}
glance::registry::keystone_tenant: 'service'
glance::registry::pipeline: 'keystone'
glance::registry::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] }
glance::registry::identity_uri: { get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix] }
glance::registry::debug: {get_param: Debug}

@ -29,5 +29,6 @@ outputs:
dport:
- 8041
- 13041
- gnocchi::api::keystone_tenant: 'service'
step_config: |
include ::tripleo::profile::base::gnocchi::api

@ -90,3 +90,5 @@ outputs:
gnocchi::db::mysql::allowed_hosts:
- '%'
- "%{hiera('mysql_bind_host')}"
gnocchi::auth::auth_region: 'regionOne'
gnocchi::auth::auth_tenant_name: 'service'

@ -19,5 +19,26 @@ outputs:
tripleo.haproxy.firewall_rules:
'107 haproxy stats':
dport: 1993
# TODO(emilien) make it composable to find which services are actually running
tripleo::haproxy::keystone_admin: true
tripleo::haproxy::keystone_public: true
tripleo::haproxy::neutron: true
tripleo::haproxy::cinder: true
tripleo::haproxy::glance_api: true
tripleo::haproxy::glance_registry: true
tripleo::haproxy::nova_osapi: true
tripleo::haproxy::nova_metadata: true
tripleo::haproxy::nova_novncproxy: true
tripleo::haproxy::mysql: true
tripleo::haproxy::redis: true
tripleo::haproxy::sahara: true
tripleo::haproxy::swift_proxy_server: true
tripleo::haproxy::ceilometer: true
tripleo::haproxy::aodh: true
tripleo::haproxy::gnocchi: true
tripleo::haproxy::heat_api: true
tripleo::haproxy::heat_cloudwatch: true
tripleo::haproxy::heat_cfn: true
tripleo::haproxy::horizon: true
step_config: |
include ::tripleo::profile::base::haproxy

@ -45,3 +45,14 @@ outputs:
context_is_admin:
key: 'context_is_admin'
value: 'role:admin'
heat::rabbit_heartbeat_timeout_threshold: 60
heat::keystone_tenant: 'service'
heat::keystone::auth::tenant: 'service'
heat::keystone::domain::domain_name: 'heat_stack'
heat::keystone::domain::domain_admin: 'heat_stack_domain_admin'
heat::keystone::domain::domain_admin_email: 'heat_stack_domain_admin@localhost'
heat::auth_plugin: 'password'
heat::cron::purge_deleted::age: 30
heat::cron::purge_deleted::age_type: 'days'
heat::cron::purge_deleted::maxdelay: 3600
heat::cron::purge_deleted::destination: '/dev/null'

@ -40,6 +40,8 @@ outputs:
map_merge:
- get_attr: [HeatBase, role_data, config_settings]
- heat::engine::num_engine_workers: {get_param: HeatWorkers}
heat::engine::configure_delegated_roles: false
heat::engine::trusts_delegated_roles: []
tripleo::profile::base::heat::manage_db_purge: {get_param: HeatEnableDBPurge}
heat::database_connection:
list_join:

@ -36,5 +36,11 @@ outputs:
dport:
- 80
- 443
horizon::cache_backend: django.core.cache.backends.memcached.MemcachedCache
horizon::django_session_engine: 'django.contrib.sessions.backends.cache'
horizon::vhost_extra_params:
add_listen: false
priority: 10
access_log_format: '%a %l %u %t \"%r\" %>s %b \"%%{}{Referer}i\" \"%%{}{User-Agent}i\"'
step_config: |
include ::tripleo::profile::base::horizon

@ -15,5 +15,26 @@ outputs:
description: Role data for the Kernel modules
value:
service_name: kernel
config_settings:
kernel_modules:
nf_conntrack: {}
sysctl_settings:
net.ipv4.tcp_keepalive_intvl:
value: 1
net.ipv4.tcp_keepalive_probes:
value: 5
net.ipv4.tcp_keepalive_time:
value: 5
net.nf_conntrack_max:
value: 500000
net.netfilter.nf_conntrack_max:
value: 500000
# prevent neutron bridges from autoconfiguring ipv6 addresses
net.ipv6.conf.default.accept_ra:
value: 0
net.ipv6.conf.default.autoconf:
value: 0
net.core.netdev_max_backlog:
value: 10000
step_config: |
include ::tripleo::profile::base::kernel

@ -132,6 +132,18 @@ outputs:
keystone::db::mysql::allowed_hosts:
- '%'
- "%{hiera('mysql_bind_host')}"
keystone::rabbit_heartbeat_timeout_threshold: 60
keystone::cron::token_flush::maxdelay: 3600
keystone::roles::admin::service_tenant: 'service'
keystone::roles::admin::admin_tenant: 'admin'
keystone::cron::token_flush::destination: '/dev/null'
keystone::config::keystone_config:
DEFAULT/secure_proxy_ssl_header:
value: 'HTTP_X_FORWARDED_PROTO'
ec2/driver:
value: 'keystone.contrib.ec2.backends.sql.Ec2'
keystone::service_name: 'httpd'
keystone::wsgi::apache::ssl: false
keystone::wsgi::apache::workers: {get_param: KeystoneWorkers}
# override via extraconfig:

@ -61,4 +61,7 @@ outputs:
params:
PLUGINS: {get_param: NeutronServicePlugins}
neutron::debug: {get_param: Debug}
neutron::host: '"%{::fqdn}"'
neutron::allow_overlapping_ips: true
neutron::rabbit_heartbeat_timeout_threshold: 60
neutron::host: '"%{::fqdn}"' #NOTE: extra quoting is needed
neutron::keystone::auth::tenant: 'service'

@ -31,5 +31,6 @@ outputs:
map_merge:
- get_attr: [NeutronBase, role_data, config_settings]
- neutron::agents::l3::external_network_bridge: {get_param: NeutronExternalNetworkBridge}
neutron::agents::l3::router_delete_namespaces: True
step_config: |
include tripleo::profile::base::neutron::l3

@ -39,5 +39,6 @@ outputs:
neutron::agents::metadata::metadata_workers: {get_param: NeutronWorkers}
neutron::agents::metadata::auth_password: {get_param: NeutronPassword}
neutron::agents::metadata::auth_url: { get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix] }
neutron::agents::metadata::auth_tenant: 'service'
step_config: |
include tripleo::profile::base::neutron::metadata

@ -54,6 +54,7 @@ outputs:
- '/ovs_neutron?charset=utf8'
neutron::server::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] }
neutron::server::auth_url: {get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix]}
neutron::server::auth_tenant: 'service'
neutron::server::identity_uri: { get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix] }
neutron::server::api_workers: {get_param: NeutronWorkers}
neutron::server::allow_automatic_l3agent_failover: {get_param: NeutronAllowL3AgentFailover}
@ -65,6 +66,8 @@ outputs:
neutron::server::notifications::tenant_name: 'service'
neutron::server::notifications::project_name: 'service'
neutron::server::notifications::password: {get_param: NovaPassword}
neutron::server::project_name: 'service'
neutron::server::sync_db: true
neutron::db::mysql::password: {get_param: NeutronPassword}
neutron::db::mysql::user: neutron
neutron::db::mysql::host: {get_param: [EndpointMap, MysqlNoBracketsInternal, host]}

@ -42,5 +42,10 @@ outputs:
- 8774
- 13774
- 8775
nova::api::admin_tenant_name: 'service'
nova::api::enabled: true
nova::api::default_floating_pool: 'public'
nova::api::sync_db_api: true
nova::api::enable_proxy_headers_parsing: true
step_config: |
include tripleo::profile::base::nova::api

@ -79,4 +79,25 @@ outputs:
- '%'
- "%{hiera('mysql_bind_host')}"
nova::debug: {get_param: Debug}
nova::host: '"%{::fqdn}"'
nova::network::neutron::neutron_project_name: 'service'
nova::network::neutron::neutron_username: 'neutron'
nova::network::neutron::dhcp_domain: ''
nova::rabbit_heartbeat_timeout_threshold: 60
nova::cinder_catalog_info: 'volumev2:cinderv2:internalURL'
nova::host: '"%{::fqdn}"' # NOTE: extra quoting is needed.
nova::notify_on_state_change: 'vm_and_task_state'
nova::notification_driver: messagingv2
nova::network::neutron::neutron_auth_type: 'v3password'
nova::keystone::auth::tenant: 'service'
nova::db::mysql::user: nova
nova::db::mysql::host: {get_param: [EndpointMap, MysqlNoBracketsInternal, host]}
nova::db::mysql::dbname: nova
nova::db::mysql::allowed_hosts:
- '%'
- "%{hiera('mysql_bind_host')}"
nova::db::mysql_api::user: nova_api
nova::db::mysql_api::host: {get_param: [EndpointMap, MysqlNoBracketsInternal, host]}
nova::db::mysql_api::dbname: nova_api
nova::db::mysql_api::allowed_hosts:
- '%'
- "%{hiera('mysql_bind_host')}"

@ -31,7 +31,7 @@ outputs:
map_merge:
- get_attr: [NovaBase, role_data, config_settings]
- nova::compute::libvirt::manage_libvirt_services: false
# we manage migration in nova common puppet profile
# we manage migration in nova common puppet profile
nova::compute::libvirt::migration_support: false
tripleo::profile::base::nova::manage_migration: true
tripleo::profile::base::nova::nova_compute_enabled: true
@ -42,6 +42,14 @@ outputs:
- '.'
- - 'client'
- {get_param: CephClientUserName}
nova::compute::rbd::libvirt_rbd_secret_uuid: '"%{hiera(\"ceph::profile::params::fsid\")}"'
nova::compute::instance_usage_audit: true
nova::compute::instance_usage_audit_period: 'hour'
# Changing the default from 512MB. The current templates can not deploy
# overclouds with swap. On an idle compute node, we see ~1024MB of RAM
# used. 2048 is suggested to account for other possible operations for
# example openvswitch.
nova::compute::reserved_host_memory: 2048
step_config: |
# TODO(emilien): figure how to deal with libvirt profile.
# We'll probably threat it like we do with Neutron plugins.

@ -22,6 +22,8 @@ outputs:
value:
service_name: nova-vncproxy
config_settings:
get_attr: [NovaBase, role_data, config_settings]
map_merge:
- get_attr: [NovaBase, role_data, config_settings]
- nova::vncproxy::enabled: true
step_config: |
include tripleo::profile::base::nova::vncproxy

@ -16,6 +16,11 @@ outputs:
value:
service_name: pacemaker
config_settings:
pacemaker::corosync::cluster_name: 'tripleo_cluster'
pacemaker::corosync::manage_fw: false
pacemaker::resource_defaults::defaults:
resource-stickiness: { value: INFINITY }
corosync_token_timeout: 10000
tripleo.pacemaker.firewall_rules:
'130 pacemaker tcp':
proto: 'tcp'

@ -53,6 +53,7 @@ outputs:
glance_file_pcmk_fstype: {get_param: GlanceFilePcmkFstype}
glance_file_pcmk_manage: {get_param: GlanceFilePcmkManage}
glance_file_pcmk_options: {get_param: GlanceFilePcmkOptions}
glance_file_pcmk_directory: '/var/lib/glance/images'
glance::api::manage_service: false
glance::api::enabled: false
step_config: |

@ -42,5 +42,20 @@ outputs:
- 4369
- 5672
- 35672
rabbitmq::delete_guest_user: false
rabbitmq::wipe_db_on_cookie_change: true
rabbitmq::port: '5672'
rabbitmq::package_source: undef
rabbitmq::repos_ensure: false
rabbitmq_environment:
RABBITMQ_NODENAME: "rabbit@%{::hostname}"
RABBITMQ_SERVER_ERL_ARGS: '"+K true +A30 +P 1048576 -kernel inet_default_connect_options [{nodelay,true},{raw,6,18,<<5000:64/native>>}] -kernel inet_default_listen_options [{raw,6,18,<<5000:64/native>>}]"'
rabbitmq_kernel_variables:
inet_dist_listen_min: '35672'
inet_dist_listen_max: '35672'
rabbitmq_config_variables:
tcp_listen_options: '[binary, {packet, raw}, {reuseaddr, true}, {backlog, 128}, {nodelay, true}, {exit_on_close, false}, {keepalive, true}]'
cluster_partition_handling: 'pause_minority'
loopback_users: '[]'
step_config: |
include ::tripleo::profile::base::rabbitmq

@ -47,3 +47,5 @@ outputs:
- spark
- storm
sahara::rpc_backend: rabbit
sahara::admin_tenant_name: 'service'
sahara::keystone::auth::tenant: 'service'

@ -30,11 +30,11 @@ outputs:
- sahara_dsn: &sahara_dsn
list_join:
- ''
- - {get_param: [EndpointMap, MysqlVirtual, protocol]}
- - {get_param: [EndpointMap, MysqlInternal, protocol]}
- '://sahara:'
- {get_param: SaharaPassword}
- '@'
- {get_param: [EndpointMap, MysqlVirtual, host]}
- {get_param: [EndpointMap, MysqlInternal, host]}
- '/sahara'
sahara::database_connection: *sahara_dsn
sahara::db::mysql::password: {get_param: SaharaPassword}

@ -41,6 +41,7 @@ outputs:
swift::proxy::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]}
swift::proxy::authtoken::identity_uri: {get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix]}
swift::proxy::authtoken::admin_password: {get_param: SwiftPassword}
swift::proxy::authtoken::admin_tenant_name: 'service'
swift::proxy::node_timeout: {get_param: SwiftProxyNodeTimeout}
swift::proxy::workers: {get_param: SwiftWorkers}
swift::keystone::auth::public_url: {get_param: [EndpointMap, SwiftPublic, uri]}
@ -56,5 +57,30 @@ outputs:
dport:
- 8080
- 13808
swift::keystone::auth::tenant: 'service'
swift::keystone::auth::configure_s3_endpoint: false
swift::keystone::auth::operator_roles:
- admin
- swiftoperator
- ResellerAdmin
swift::proxy::keystone::operator_roles:
- admin
- swiftoperator
- ResellerAdmin
swift::proxy::pipeline:
- 'catch_errors'
- 'healthcheck'
- 'proxy-logging'
- 'cache'
- 'ratelimit'
- 'bulk'
- 'tempurl'
- 'formpost'
- 'authtoken'
- 'keystone'
- 'staticweb'
- 'proxy-logging'
- 'proxy-server'
swift::proxy::account_autocreate: true
step_config: |
include ::tripleo::profile::base::swift::proxy

@ -48,5 +48,17 @@ outputs:
- 6000
- 6001
- 6002
swift::storage::all::incoming_chmod: 'Du=rwx,g=rx,o=rx,Fu=rw,g=r,o=r'
swift::storage::all::outgoing_chmod: 'Du=rwx,g=rx,o=rx,Fu=rw,g=r,o=r'
swift::storage::all::object_pipeline:
- healthcheck
- recon
- object-server
swift::storage::all::container_pipeline:
- healthcheck
- container-server
swift::storage::all::account_pipeline:
- healthcheck
- account-server
step_config: |
include ::tripleo::profile::base::swift::storage

Loading…
Cancel
Save