From 66c4bdadccaef781aa22876474507db1195fa5c0 Mon Sep 17 00:00:00 2001 From: Nikita Koshikov Date: Tue, 17 Nov 2015 17:04:25 +0300 Subject: [PATCH] Add selective SSL support Add second-level hash for TLS support. If user mixing this hash into his astute.yaml, values from it will be taken to TLS-related tasks instead of ones from UI. Hash to use looks like: use_ssl: horizon: true horizon_public: true horizon_public_hostname: horizon.fuel.local horizon_public_usercert: true horizon_public_certdata: KeyPairStoredHere keystone: true keystone_public: true keystone_public_ip: ... keystone_internal: true ... keystone_admin: true ... glance: true glance_public: true ... Co-Authored-By: Stanislaw Bogatkin Related to blueprint selective-ssl Change-Id: Ic3034b3fea27a370b1f7cbd4e088f361fce96142 --- .../puppet/openstack/manifests/ceilometer.pp | 4 +- .../puppet/openstack/manifests/cinder.pp | 1 - .../puppet/openstack/manifests/compute.pp | 5 +- .../puppet/openstack/manifests/glance.pp | 16 +- .../openstack/manifests/ha/ceilometer.pp | 22 +- .../puppet/openstack/manifests/ha/cinder.pp | 22 +- .../puppet/openstack/manifests/ha/glance.pp | 22 +- .../openstack/manifests/ha/haproxy_service.pp | 43 +- .../puppet/openstack/manifests/ha/heat.pp | 22 +- .../puppet/openstack/manifests/ha/horizon.pp | 6 + .../puppet/openstack/manifests/ha/ironic.pp | 6 + .../puppet/openstack/manifests/ha/keystone.pp | 44 +- .../puppet/openstack/manifests/ha/murano.pp | 22 +- .../puppet/openstack/manifests/ha/neutron.pp | 22 +- .../puppet/openstack/manifests/ha/nova.pp | 29 +- .../puppet/openstack/manifests/ha/radosgw.pp | 9 +- .../puppet/openstack/manifests/ha/sahara.pp | 23 +- .../puppet/openstack/manifests/ha/swift.pp | 22 +- deployment/puppet/openstack/manifests/heat.pp | 19 +- .../classes/openstack_ha_ceilometer_spec.rb | 2 + .../spec/classes/openstack_ha_cinder_spec.rb | 2 + .../spec/classes/openstack_ha_glance_spec.rb | 2 + .../spec/classes/openstack_ha_heat_spec.rb | 3 + .../spec/classes/openstack_ha_ironic_spec.rb | 2 + .../classes/openstack_ha_keystone_spec.rb | 2 + .../spec/classes/openstack_ha_murano_spec.rb | 2 + .../spec/classes/openstack_ha_neutron_spec.rb | 2 + .../spec/classes/openstack_ha_nova_spec.rb | 4 + .../spec/classes/openstack_ha_radosgw_spec.rb | 2 + .../spec/classes/openstack_ha_sahara_spec.rb | 2 + .../spec/classes/openstack_ha_swift_spec.rb | 2 + deployment/puppet/osnailyfacter/.fixtures.yml | 8 +- .../parser/functions/get_ssl_property.rb | 74 + .../modular/astute/upload_cirros.rb | 12 +- .../modular/ceilometer/compute.pp | 7 +- .../modular/ceilometer/controller.pp | 7 +- .../modular/ceilometer/keystone.pp | 26 +- .../osnailyfacter/modular/glance/glance.pp | 14 +- .../osnailyfacter/modular/glance/keystone.pp | 22 +- .../puppet/osnailyfacter/modular/heat/heat.pp | 41 +- .../osnailyfacter/modular/heat/keystone.pp | 31 +- .../osnailyfacter/modular/murano/keystone.pp | 37 +- .../osnailyfacter/modular/murano/murano.pp | 31 +- .../modular/openstack-cinder/keystone.pp | 51 +- .../openstack-cinder/openstack-cinder.pp | 63 +- .../modular/openstack-controller/keystone.pp | 45 +- .../openstack-controller.pp | 19 +- .../openstack-haproxy-ceilometer.pp | 12 +- .../openstack-haproxy-cinder.pp | 20 +- .../openstack-haproxy-glance.pp | 19 +- .../openstack-haproxy-heat.pp | 23 +- .../openstack-haproxy-horizon.pp | 15 +- .../openstack-haproxy-ironic.pp | 15 +- .../openstack-haproxy-keystone.pp | 25 +- .../openstack-haproxy-murano.pp | 11 +- .../openstack-haproxy-neutron.pp | 12 +- .../openstack-haproxy-nova.pp | 18 +- .../openstack-haproxy-radosgw.pp | 7 +- .../openstack-haproxy-sahara.pp | 13 +- .../openstack-haproxy-swift.pp | 23 +- .../modular/openstack-network/compute-nova.pp | 35 +- .../modular/openstack-network/keystone.pp | 25 +- .../openstack-network/server-config.pp | 60 +- .../modular/openstack-network/server-nova.pp | 38 +- .../osnailyfacter/modular/roles/cinder.pp | 17 +- .../osnailyfacter/modular/roles/compute.pp | 20 +- .../modular/ssl/ssl_add_trust_chain.pp | 60 +- .../modular/ssl/ssl_dns_setup.pp | 102 ++ .../modular/ssl/ssl_keys_saving.pp | 57 +- .../modular/ssl/ssl_keys_saving_pre.rb | 2 +- .../osnailyfacter/modular/ssl/tasks.yaml | 11 + .../osnailyfacter/modular/swift/keystone.pp | 44 +- .../osnailyfacter/modular/swift/swift.pp | 71 +- .../spec/fixtures/manifests/site.pp | 0 .../spec/functions/get_ssl_property_spec.rb | 124 ++ ...vlan.ceph.ceil-compute.overridden_ssl.yaml | 959 +++++++++++++ ...eil-primary-controller.overridden_ssl.yaml | 1204 +++++++++++++++++ .../neut_vlan.compute.ssl.overridden.yaml | 1055 +++++++++++++++ ...ara-primary-controller.overridden_ssl.yaml | 1196 ++++++++++++++++ .../spec/hosts/ceilometer/keystone_spec.rb | 28 +- tests/noop/spec/hosts/glance/keystone_spec.rb | 24 +- tests/noop/spec/hosts/heat/heat_spec.rb | 27 +- tests/noop/spec/hosts/heat/keystone_spec.rb | 58 +- tests/noop/spec/hosts/murano/keystone_spec.rb | 35 +- tests/noop/spec/hosts/murano/murano_spec.rb | 22 +- .../hosts/openstack-cinder/keystone_spec.rb | 83 +- .../openstack-cinder/openstack-cinder_spec.rb | 14 +- .../openstack-controller/keystone_spec.rb | 33 +- .../openstack-controller_spec.rb | 8 +- .../openstack-network/compute-nova_spec.rb | 15 +- .../hosts/openstack-network/keystone_spec.rb | 21 +- .../openstack-network/server-config_spec.rb | 92 +- .../openstack-network/server-nova_spec.rb | 31 +- tests/noop/spec/hosts/roles/cinder_spec.rb | 21 +- tests/noop/spec/hosts/roles/compute_spec.rb | 54 +- .../hosts/ssl/ssl_add_trust_chain_spec.rb | 7 + .../noop/spec/hosts/ssl/ssl_dns_setup_spec.rb | 89 ++ .../spec/hosts/ssl/ssl_keys_saving_spec.rb | 78 ++ tests/noop/spec/hosts/swift/keystone_spec.rb | 35 +- tests/noop/spec/hosts/swift/swift_spec.rb | 44 +- 100 files changed, 6382 insertions(+), 604 deletions(-) create mode 100644 deployment/puppet/osnailyfacter/lib/puppet/parser/functions/get_ssl_property.rb create mode 100644 deployment/puppet/osnailyfacter/modular/ssl/ssl_dns_setup.pp create mode 100644 deployment/puppet/osnailyfacter/spec/fixtures/manifests/site.pp create mode 100644 deployment/puppet/osnailyfacter/spec/functions/get_ssl_property_spec.rb create mode 100644 tests/noop/astute.yaml/neut_vlan.ceph.ceil-compute.overridden_ssl.yaml create mode 100644 tests/noop/astute.yaml/neut_vlan.ceph.ceil-primary-controller.overridden_ssl.yaml create mode 100644 tests/noop/astute.yaml/neut_vlan.compute.ssl.overridden.yaml create mode 100644 tests/noop/astute.yaml/neut_vxlan_dvr.murano.sahara-primary-controller.overridden_ssl.yaml create mode 100644 tests/noop/spec/hosts/ssl/ssl_dns_setup_spec.rb diff --git a/deployment/puppet/openstack/manifests/ceilometer.pp b/deployment/puppet/openstack/manifests/ceilometer.pp index 8f8d214006..d28c010f73 100644 --- a/deployment/puppet/openstack/manifests/ceilometer.pp +++ b/deployment/puppet/openstack/manifests/ceilometer.pp @@ -29,6 +29,7 @@ class openstack::ceilometer ( $amqp_user = 'guest', $amqp_password = 'rabbit_pw', $rabbit_ha_queues = false, + $keystone_protocol = 'http', $keystone_host = '127.0.0.1', $host = '0.0.0.0', $port = '8777', @@ -71,7 +72,7 @@ class openstack::ceilometer ( # Configure authentication for agents class { '::ceilometer::agent::auth': - auth_url => "http://${keystone_host}:5000/v2.0", + auth_url => "${keystone_protocol}://${keystone_host}:5000/v2.0", auth_password => $keystone_password, auth_region => $keystone_region, auth_tenant_name => $keystone_tenant, @@ -116,6 +117,7 @@ class openstack::ceilometer ( # Install the ceilometer-api service # The keystone_password parameter is mandatory class { '::ceilometer::api': + keystone_protocol => $keystone_protocol, keystone_host => $keystone_host, keystone_user => $keystone_user, keystone_password => $keystone_password, diff --git a/deployment/puppet/openstack/manifests/cinder.pp b/deployment/puppet/openstack/manifests/cinder.pp index d539e1a6b9..d988bf59b7 100644 --- a/deployment/puppet/openstack/manifests/cinder.pp +++ b/deployment/puppet/openstack/manifests/cinder.pp @@ -21,7 +21,6 @@ class openstack::cinder( $enabled = true, $enable_volumes = true, $purge_cinder_config = true, - $auth_host = '127.0.0.1', $bind_host = '0.0.0.0', $iscsi_bind_host = '0.0.0.0', $use_syslog = false, diff --git a/deployment/puppet/openstack/manifests/compute.pp b/deployment/puppet/openstack/manifests/compute.pp index a1ebddc445..b7731dd624 100644 --- a/deployment/puppet/openstack/manifests/compute.pp +++ b/deployment/puppet/openstack/manifests/compute.pp @@ -32,6 +32,8 @@ # Optional. Defaults to false, # [libvirt_type] Underlying libvirt supported hypervisor. # Optional. Defaults to 'kvm', +# [vncproxy_protocol] Protocol to use for access vnc proxy. Optional. +# Defaults to 'http'. # [vncproxy_host] Host that serves as vnc proxy. Optional. # Defaults to false. False indicates that a vnc proxy should not be configured. # [vnc_enabled] Rather vnc console should be enabled. @@ -82,6 +84,7 @@ class openstack::compute ( $host_uuid = undef, # VNC $vnc_enabled = true, + $vncproxy_protocol = 'http', $vncproxy_host = undef, $vncserver_listen = '0.0.0.0', $migration_support = false, @@ -290,8 +293,8 @@ class openstack::compute ( enabled => $enabled, vnc_enabled => $vnc_enabled, vncserver_proxyclient_address => $internal_address, + vncproxy_protocol => $vncproxy_protocol, vncproxy_host => $vncproxy_host, - vncproxy_protocol => $nova_hash['vncproxy_protocol'], vncproxy_port => $nova_hash['vncproxy_port'], force_config_drive => $nova_hash['force_config_drive'], #NOTE(bogdando) default became true in 4.0.0 puppet-nova (was false) diff --git a/deployment/puppet/openstack/manifests/glance.pp b/deployment/puppet/openstack/manifests/glance.pp index ed16482321..cc091f67c9 100644 --- a/deployment/puppet/openstack/manifests/glance.pp +++ b/deployment/puppet/openstack/manifests/glance.pp @@ -13,8 +13,12 @@ # [db_host] Host where DB resides. Required. # [glance_user_password] Password for glance auth user. Required. # [glance_db_password] Password for glance DB. Required. +# [glance_protocol] Protocol glance used to speak with registry. +# Optional. Defaults to 'http' # [keystone_host] Host whre keystone is running. Optional. Defaults to '127.0.0.1' # [auth_uri] URI used for auth. Optional. Defaults to "http://${keystone_host}:5000/" +# [internal_ssl] Whether to use SSL for auth on internal networks. +# Optional. Defaults to false # [db_type] Type of sql databse to use. Optional. Defaults to 'mysql' # [glance_db_user] Name of glance DB user. Optional. Defaults to 'glance' # [glance_db_dbname] Name of glance DB. Optional. Defaults to 'glance' @@ -48,6 +52,8 @@ class openstack::glance ( $registry_host = '127.0.0.1', $auth_uri = 'http://127.0.0.1:5000/', $region = 'RegionOne', + $internal_ssl = false, + $glance_protocol = 'http', $db_type = 'mysql', $glance_db_user = 'glance', $glance_db_dbname = 'glance', @@ -106,6 +112,12 @@ class openstack::glance ( } } + if $internal_ssl { + $auth_protocol = 'https' + } else { + $auth_protocol = 'http' + } + # Install and configure glance-api class { 'glance::api': verbose => $verbose, @@ -115,6 +127,7 @@ class openstack::glance ( auth_port => '35357', auth_host => $keystone_host, auth_url => $auth_uri, + auth_protocol => $auth_protocol, keystone_user => $glance_user, keystone_password => $glance_user_password, keystone_tenant => $glance_tenant, @@ -185,6 +198,7 @@ class openstack::glance ( auth_host => $keystone_host, auth_port => '35357', auth_type => 'keystone', + auth_protocol => $auth_protocol, keystone_user => $glance_user, keystone_password => $glance_user_password, keystone_tenant => $glance_tenant, @@ -272,7 +286,7 @@ class openstack::glance ( swift_store_key => $glance_user_password, swift_store_create_container_on_put => 'True', swift_store_large_object_size => $swift_store_large_object_size, - swift_store_auth_address => "http://${keystone_host}:5000/v2.0/", + swift_store_auth_address => "${auth_protocol}://${keystone_host}:5000/v2.0/", swift_store_region => $region, } } diff --git a/deployment/puppet/openstack/manifests/ha/ceilometer.pp b/deployment/puppet/openstack/manifests/ha/ceilometer.pp index 42e934956f..ef8c78e79d 100644 --- a/deployment/puppet/openstack/manifests/ha/ceilometer.pp +++ b/deployment/puppet/openstack/manifests/ha/ceilometer.pp @@ -16,6 +16,20 @@ # (optional) Boolean. If true, enables SSL for $public_virtual_ip # Defaults to false. # +# [*public_ssl_path*] +# (optional) String. Filesystem path to the file with public certificate +# content +# Defaults to undef +# +# [*internal_ssl*] +# (optional) Boolean. If true, enables SSL for $internal_virtual_ip +# Defaults to false. +# +# [*internal_ssl_path*] +# (optional) String. Filesystem path to the file with internal certificate +# content +# Defaults to undef +# # [*public_virtual_ip*] # (required) String. This is the ipaddress to be used for the external facing # vip @@ -28,7 +42,10 @@ class openstack::ha::ceilometer ( $ipaddresses, $public_virtual_ip, $server_names, - $public_ssl = false, + $public_ssl = false, + $public_ssl_path = undef, + $internal_ssl = false, + $internal_ssl_path = undef, ) { # defaults for any haproxy_service within this class @@ -44,6 +61,9 @@ class openstack::ha::ceilometer ( listen_port => 8777, public => true, public_ssl => $public_ssl, + public_ssl_path => $public_ssl_path, + internal_ssl => $internal_ssl, + internal_ssl_path => $internal_ssl_path, require_service => 'ceilometer-api', haproxy_config_options => { 'http-request' => 'set-header X-Forwarded-Proto https if { ssl_fc }', diff --git a/deployment/puppet/openstack/manifests/ha/cinder.pp b/deployment/puppet/openstack/manifests/ha/cinder.pp index 718ccda5db..637697f1b1 100644 --- a/deployment/puppet/openstack/manifests/ha/cinder.pp +++ b/deployment/puppet/openstack/manifests/ha/cinder.pp @@ -16,6 +16,20 @@ # (optional) Boolean. If true, enables SSL for $public_virtual_ip # Defaults to false. # +# [*public_ssl_path*] +# (optional) String. Filesystem path to the file with public certificate +# content +# Defaults to undef +# +# [*internal_ssl*] +# (optional) Boolean. If true, enables SSL for $internal_virtual_ip +# Defaults to false. +# +# [*internal_ssl_path*] +# (optional) String. Filesystem path to the file with internal certificate +# content +# Defaults to undef +# # [*public_virtual_ip*] # (required) String. This is the ipaddress to be used for the external facing # vip @@ -28,7 +42,10 @@ class openstack::ha::cinder ( $ipaddresses, $public_virtual_ip, $server_names, - $public_ssl = false, + $public_ssl = false, + $public_ssl_path = undef, + $internal_ssl = false, + $internal_ssl_path = under, ) { # defaults for any haproxy_service within this class @@ -44,6 +61,9 @@ class openstack::ha::cinder ( listen_port => 8776, public => true, public_ssl => $public_ssl, + public_ssl_path => $public_ssl_path, + internal_ssl => $internal_ssl, + internal_ssl_path => $internal_ssl_path, require_service => 'cinder-api', server_names => $server_names, ipaddresses => $ipaddresses, diff --git a/deployment/puppet/openstack/manifests/ha/glance.pp b/deployment/puppet/openstack/manifests/ha/glance.pp index b120248bd1..0a73a00b01 100644 --- a/deployment/puppet/openstack/manifests/ha/glance.pp +++ b/deployment/puppet/openstack/manifests/ha/glance.pp @@ -16,6 +16,20 @@ # (optional) Boolean. If true, enables SSL for $public_virtual_ip # Defaults to false. # +# [*public_ssl_path*] +# (optional) String. Filesystem path to the file with public certificate +# content +# Defaults to undef +# +# [*internal_ssl*] +# (optional) Boolean. If true, enables SSL for $internal_virtual_ip +# Defaults to false. +# +# [*internal_ssl_path*] +# (optional) String. Filesystem path to the file with internal certificate +# content +# Defaults to undef +# # [*public_virtual_ip*] # (required) String. This is the ipaddress to be used for the external facing # vip @@ -28,7 +42,10 @@ class openstack::ha::glance ( $ipaddresses, $public_virtual_ip, $server_names, - $public_ssl = false, + $public_ssl = false, + $public_ssl_path = undef, + $internal_ssl = false, + $internal_ssl_path = undef, ) { # defaults for any haproxy_service within this class @@ -45,6 +62,9 @@ class openstack::ha::glance ( listen_port => 9292, public => true, public_ssl => $public_ssl, + public_ssl_path => $public_ssl_path, + internal_ssl => $internal_ssl, + internal_ssl_path => $internal_ssl_path, require_service => 'glance-api', haproxy_config_options => { 'option' => ['httpchk /versions', 'httplog', 'httpclose'], diff --git a/deployment/puppet/openstack/manifests/ha/haproxy_service.pp b/deployment/puppet/openstack/manifests/ha/haproxy_service.pp index 54dca142d4..71e922ecc8 100644 --- a/deployment/puppet/openstack/manifests/ha/haproxy_service.pp +++ b/deployment/puppet/openstack/manifests/ha/haproxy_service.pp @@ -82,10 +82,13 @@ define openstack::ha::haproxy_service ( 'balance' => 'roundrobin' }, $internal = true, $public = false, - $public_ssl = false, $ipaddresses = undef, $server_names = undef, $mode = undef, + $public_ssl = false, + $public_ssl_path = undef, + $internal_ssl = false, + $internal_ssl_path = undef, $require_service = undef, ) { @@ -96,29 +99,39 @@ define openstack::ha::haproxy_service ( include openstack::ha::haproxy_restart - if $public and $internal { + if $public_ssl and !$public_ssl_path { + fail("You must set up path to public ssl keypair if you want to use public ssl") + } + if $internal_ssl and !$internal_ssl_path { + fail("You must set up path to internal ssl keypair if you want to use internal ssl") + } + if !($internal or $public) { + fail('At least one of $public or $internal must be set to true') + } + + if $public { if $public_ssl { - $bind = merge({ "$public_virtual_ip:$listen_port" => ['ssl', 'crt', '/var/lib/astute/haproxy/public_haproxy.pem'] }, - array_to_hash(suffix(flatten([$internal_virtual_ip]), ":${listen_port}"), "")) + $public_bind = { "$public_virtual_ip:$listen_port" => ['ssl', 'crt', $public_ssl_path] } } else { - $bind = array_to_hash(suffix(flatten([$internal_virtual_ip, $public_virtual_ip]), ":${listen_port}"), "") - } - } elsif $internal { - $bind = array_to_hash(suffix(flatten([$internal_virtual_ip]), ":${listen_port}"), "") - } elsif $public { - if $public_ssl { - $bind = { "$public_virtual_ip:$listen_port" => ['ssl', 'crt', '/var/lib/astute/haproxy/public_haproxy.pem'] } - } else { - $bind = array_to_hash(suffix(flatten([$public_virtual_ip]), ":${listen_port}"), "") + $public_bind = { "$public_virtual_ip:$listen_port" => "" } } } else { - fail('At least one of $public or $internal must be set to true') + $public_bind = {} + } + if $internal { + if $internal_ssl { + $internal_bind = { "$internal_virtual_ip:$listen_port" => ['ssl', 'crt', $internal_ssl_path] } + } else { + $internal_bind = { "$internal_virtual_ip:$listen_port" => "" } + } + } else { + $internal_bind = {} } # Configure HAProxy to listen haproxy::listen { $name: order => $order, - bind => $bind, + bind => merge($public_bind, $internal_bind), options => $haproxy_config_options, mode => $mode, use_include => true, diff --git a/deployment/puppet/openstack/manifests/ha/heat.pp b/deployment/puppet/openstack/manifests/ha/heat.pp index a8a8ef1103..6bb14af840 100644 --- a/deployment/puppet/openstack/manifests/ha/heat.pp +++ b/deployment/puppet/openstack/manifests/ha/heat.pp @@ -20,6 +20,20 @@ # (optional) Boolean. If true, enables SSL for $public_virtual_ip # Defaults to false. # +# [*public_ssl_path*] +# (optional) String. Filesystem path to the file with public certificate +# content +# Defaults to undef +# +# [*internal_ssl*] +# (optional) Boolean. If true, enables SSL for $internal_virtual_ip +# Defaults to false. +# +# [*internal_ssl_path*] +# (optional) String. Filesystem path to the file with internal certificate +# content +# Defaults to undef +# # [*server_names*] # (required) Array. This is an array of server names for the haproxy service # @@ -28,7 +42,10 @@ class openstack::ha::heat ( $ipaddresses, $public_virtual_ip, $server_names, - $public_ssl = false, + $public_ssl = false, + $public_ssl_path = undef, + $internal_ssl = false, + $internal_ssl_path = undef, ) { # defaults for any haproxy_service within this class @@ -39,6 +56,9 @@ class openstack::ha::heat ( server_names => $server_names, public => true, public_ssl => $public_ssl, + public_ssl_path => $public_ssl_path, + internal_ssl => $internal_ssl, + internal_ssl_path => $internal_ssl_path, require_service => 'heat-api', haproxy_config_options => { option => ['httpchk', 'httplog', 'httpclose'], diff --git a/deployment/puppet/openstack/manifests/ha/horizon.pp b/deployment/puppet/openstack/manifests/ha/horizon.pp index afb9c05c85..347b6e4cc0 100644 --- a/deployment/puppet/openstack/manifests/ha/horizon.pp +++ b/deployment/puppet/openstack/manifests/ha/horizon.pp @@ -24,12 +24,17 @@ # port for the horizon vip # Defaults to false # +# [*public_ssl_path*] +# (optional) String. Filesystem path to the file with certificate content +# Defaults to undef +# class openstack::ha::horizon ( $internal_virtual_ip, $ipaddresses, $public_virtual_ip, $server_names, $use_ssl = false, + $public_ssl_path = undef, ) { # defaults for any haproxy_service within this class @@ -59,6 +64,7 @@ class openstack::ha::horizon ( listen_port => 443, balancermember_port => 80, public_ssl => $use_ssl, + public_ssl_path => $public_ssl_path, haproxy_config_options => { 'option' => ['forwardfor', 'httpchk', 'httpclose', 'httplog'], 'stick-table' => 'type ip size 200k expire 30m', diff --git a/deployment/puppet/openstack/manifests/ha/ironic.pp b/deployment/puppet/openstack/manifests/ha/ironic.pp index be3fb9dfde..44f48069e1 100644 --- a/deployment/puppet/openstack/manifests/ha/ironic.pp +++ b/deployment/puppet/openstack/manifests/ha/ironic.pp @@ -16,6 +16,10 @@ # (optional) Boolean. If true, enables SSL for $public_virtual_ip # Defaults to false. # +# [*public_ssl_path*] +# (optional) String. Path to public SSL certificate +# Defaults to undef. +# # [*public_virtual_ip*] # (required) String. This is the ipaddress to be used for the external facing # vip @@ -34,6 +38,7 @@ class openstack::ha::ironic ( $server_names, $baremetal_virtual_ip, $public_ssl = false, + $public_ssl_path = undef, ) { # defaults for any haproxy_service within this class @@ -53,6 +58,7 @@ class openstack::ha::ironic ( order => '180', public => true, public_ssl => $public_ssl, + public_ssl_path => $public_ssl_path, } openstack::ha::haproxy_service { 'ironic-baremetal': diff --git a/deployment/puppet/openstack/manifests/ha/keystone.pp b/deployment/puppet/openstack/manifests/ha/keystone.pp index 38c804829b..612c3b2fd1 100644 --- a/deployment/puppet/openstack/manifests/ha/keystone.pp +++ b/deployment/puppet/openstack/manifests/ha/keystone.pp @@ -16,6 +16,31 @@ # (optional) Boolean. If true, enables SSL for $public_virtual_ip # Defaults to false. # +# [*public_ssl_path*] +# (optional) String. Filesystem path to the file with public certificate +# content +# Defaults to undef +# +# [*internal_ssl*] +# (optional) Boolean. If true, enables SSL for $internal_virtual_ip and port +# 5000 +# Defaults to false. +# +# [*internal_ssl_path*] +# (optional) String. Filesystem path to the file with internal certificate +# content +# Defaults to undef +# +# [*admin_ssl*] +# (optional) Boolean. If true, enables SSL for $internal_virtual_ip and port +# 35357 +# Defaults to false. +# +# [*admin_ssl_path*] +# (optional) String. Filesystem path to the file with admin certificate +# content +# Defaults to undef +# # [*public_virtual_ip*] # (required) String. This is the ipaddress to be used for the external facing # vip @@ -28,7 +53,12 @@ class openstack::ha::keystone ( $ipaddresses, $public_virtual_ip, $server_names, - $public_ssl = false, + $public_ssl = false, + $public_ssl_path = undef, + $internal_ssl = false, + $internal_ssl_path = undef, + $admin_ssl = false, + $admin_ssl_path = undef, ) { # defaults for any haproxy_service within this class @@ -37,6 +67,10 @@ class openstack::ha::keystone ( ipaddresses => $ipaddresses, public_virtual_ip => $public_virtual_ip, server_names => $server_names, + public_ssl => $public_ssl, + public_ssl_path => $public_ssl_path, + internal_ssl => $internal_ssl, + internal_ssl_path => $internal_ssl_path, haproxy_config_options => { option => ['httpchk', 'httplog', 'httpclose'], 'http-request' => 'set-header X-Forwarded-Proto https if { ssl_fc }', @@ -52,8 +86,10 @@ class openstack::ha::keystone ( } openstack::ha::haproxy_service { 'keystone-2': - order => '030', - listen_port => 35357, - public => false, + order => '030', + listen_port => 35357, + public => false, + internal_ssl => $admin_ssl, + internal_ssl_path => $admin_ssl_path, } } diff --git a/deployment/puppet/openstack/manifests/ha/murano.pp b/deployment/puppet/openstack/manifests/ha/murano.pp index 96fd1b2482..3e86d8c0ce 100644 --- a/deployment/puppet/openstack/manifests/ha/murano.pp +++ b/deployment/puppet/openstack/manifests/ha/murano.pp @@ -16,6 +16,20 @@ # (optional) Boolean. If true, enables SSL for $public_virtual_ip # Defaults to false. # +# [*public_ssl_path*] +# (optional) String. Filesystem path to the file with public certificate +# content +# Defaults to undef +# +# [*internal_ssl*] +# (optional) Boolean. If true, enables SSL for $internal_virtual_ip +# Defaults to false. +# +# [*internal_ssl_path*] +# (optional) String. Filesystem path to the file with internal certificate +# content +# Defaults to undef +# # [*public_virtual_ip*] # (required) String. This is the ipaddress to be used for the external facing # vip @@ -28,7 +42,10 @@ class openstack::ha::murano ( $ipaddresses, $public_virtual_ip, $server_names, - $public_ssl = false, + $public_ssl = false, + $public_ssl_path = undef, + $internal_ssl = false, + $internal_ssl_path = undef, ) { # defaults for any haproxy_service within this class @@ -44,6 +61,9 @@ class openstack::ha::murano ( order => '190', listen_port => 8082, public_ssl => $public_ssl, + public_ssl_path => $public_ssl_path, + internal_ssl => $internal_ssl, + internal_ssl_path => $internal_ssl_path, require_service => 'murano_api', haproxy_config_options => { 'http-request' => 'set-header X-Forwarded-Proto https if { ssl_fc }', diff --git a/deployment/puppet/openstack/manifests/ha/neutron.pp b/deployment/puppet/openstack/manifests/ha/neutron.pp index 93ff074781..e0aa82a099 100644 --- a/deployment/puppet/openstack/manifests/ha/neutron.pp +++ b/deployment/puppet/openstack/manifests/ha/neutron.pp @@ -16,6 +16,20 @@ # (optional) Boolean. If true, enables SSL for $public_virtual_ip # Defaults to false. # +# [*public_ssl_path*] +# (optional) String. Filesystem path to the file with public certificate +# content +# Defaults to undef +# +# [*internal_ssl*] +# (optional) Boolean. If true, enables SSL for $internal_virtual_ip +# Defaults to false. +# +# [*internal_ssl_path*] +# (optional) String. Filesystem path to the file with internal certificate +# content +# Defaults to undef +# # [*public_virtual_ip*] # (required) String. This is the ipaddress to be used for the external facing # vip @@ -28,7 +42,10 @@ class openstack::ha::neutron ( $ipaddresses, $public_virtual_ip, $server_names, - $public_ssl = false, + $public_ssl = false, + $public_ssl_path = undef, + $internal_ssl = false, + $internal_ssl_path = undef, ) { # defaults for any haproxy_service within this class @@ -44,6 +61,9 @@ class openstack::ha::neutron ( listen_port => 9696, public => true, public_ssl => $public_ssl, + public_ssl_path => $public_ssl_path, + internal_ssl => $internal_ssl, + internal_ssl_path => $internal_ssl_path, define_backups => false, haproxy_config_options => { option => ['httpchk', 'httplog', 'httpclose'], diff --git a/deployment/puppet/openstack/manifests/ha/nova.pp b/deployment/puppet/openstack/manifests/ha/nova.pp index 5951f3e8d1..19ff6cc17c 100644 --- a/deployment/puppet/openstack/manifests/ha/nova.pp +++ b/deployment/puppet/openstack/manifests/ha/nova.pp @@ -16,6 +16,20 @@ # (optional) Boolean. If true, enables SSL for $public_virtual_ip # Defaults to false. # +# [*public_ssl_path*] +# (optional) String. Filesystem path to the file with public certificate +# content +# Defaults to undef +# +# [*internal_ssl*] +# (optional) Boolean. If true, enables SSL for $internal_virtual_ip +# Defaults to false. +# +# [*internal_ssl_path*] +# (optional) String. Filesystem path to the file with internal certificate +# content +# Defaults to undef +# # [*public_virtual_ip*] # (required) String. This is the ipaddress to be used for the external facing # vip @@ -28,7 +42,10 @@ class openstack::ha::nova ( $ipaddresses, $public_virtual_ip, $server_names, - $public_ssl = false, + $public_ssl = false, + $public_ssl_path = undef, + $internal_ssl = false, + $internal_ssl_path = undef, ) { # defaults for any haproxy_service within this class @@ -44,6 +61,9 @@ class openstack::ha::nova ( listen_port => 8773, public => true, public_ssl => $public_ssl, + public_ssl_path => $public_ssl_path, + internal_ssl => $internal_ssl, + internal_ssl_path => $internal_ssl_path, require_service => 'nova-api', haproxy_config_options => { 'timeout server' => '600s', @@ -56,6 +76,9 @@ class openstack::ha::nova ( listen_port => 8774, public => true, public_ssl => $public_ssl, + public_ssl_path => $public_ssl_path, + internal_ssl => $internal_ssl, + internal_ssl_path => $internal_ssl_path, require_service => 'nova-api', haproxy_config_options => { option => ['httpchk', 'httplog', 'httpclose'], @@ -68,6 +91,9 @@ class openstack::ha::nova ( openstack::ha::haproxy_service { 'nova-metadata-api': order => '060', listen_port => 8775, + internal_ssl => $internal_ssl, + internal_ssl_path => $internal_ssl_path, + require_service => 'nova-api', haproxy_config_options => { option => ['httpchk', 'httplog', 'httpclose'], }, @@ -79,6 +105,7 @@ class openstack::ha::nova ( listen_port => 6080, public => true, public_ssl => $public_ssl, + public_ssl_path => $public_ssl_path, internal => false, require_service => 'nova-vncproxy', haproxy_config_options => { diff --git a/deployment/puppet/openstack/manifests/ha/radosgw.pp b/deployment/puppet/openstack/manifests/ha/radosgw.pp index c62f30af7a..87f4598aed 100644 --- a/deployment/puppet/openstack/manifests/ha/radosgw.pp +++ b/deployment/puppet/openstack/manifests/ha/radosgw.pp @@ -16,6 +16,11 @@ # (optional) Boolean. If true, enables SSL for $public_virtual_ip # Defaults to false. # +# [*public_ssl_path*] +# (optional) String. Filesystem path to the file with public certificate +# content +# Defaults to undef +# # [*public_virtual_ip*] # (required) String. This is the ipaddress to be used for the external facing # vip @@ -28,7 +33,8 @@ class openstack::ha::radosgw ( $ipaddresses, $public_virtual_ip, $server_names, - $public_ssl = false, + $public_ssl = false, + $public_ssl_path = undef, $baremetal_virtual_ip = undef, ) { @@ -50,6 +56,7 @@ class openstack::ha::radosgw ( order => '130', public => true, public_ssl => $public_ssl, + public_ssl_path => $public_ssl_path, } if $baremetal_virtual_ip { diff --git a/deployment/puppet/openstack/manifests/ha/sahara.pp b/deployment/puppet/openstack/manifests/ha/sahara.pp index 85c22c0d06..ef86486857 100644 --- a/deployment/puppet/openstack/manifests/ha/sahara.pp +++ b/deployment/puppet/openstack/manifests/ha/sahara.pp @@ -16,6 +16,20 @@ # (optional) Boolean. If true, enables SSL for $public_virtual_ip # Defaults to false. # +# [*public_ssl_path*] +# (optional) String. Filesystem path to the file with public certificate +# content +# Defaults to undef +# +# [*internal_ssl*] +# (optional) Boolean. If true, enables SSL for $internal_virtual_ip +# Defaults to false. +# +# [*internal_ssl_path*] +# (optional) String. Filesystem path to the file with internal certificate +# content +# Defaults to undef +# # [*public_virtual_ip*] # (required) String. This is the ipaddress to be used for the external facing # vip @@ -28,7 +42,10 @@ class openstack::ha::sahara ( $ipaddresses, $public_virtual_ip, $server_names, - $public_ssl = false, + $public_ssl = false, + $public_ssl_path = undef, + $internal_ssl = false, + $internal_ssl_path = undef, ) { # defaults for any haproxy_service within this class @@ -44,10 +61,12 @@ class openstack::ha::sahara ( listen_port => 8386, public => true, public_ssl => $public_ssl, + public_ssl_path => $public_ssl_path, + internal_ssl => $internal_ssl, + internal_ssl_path => $internal_ssl_path, require_service => 'sahara-api', haproxy_config_options => { 'http-request' => 'set-header X-Forwarded-Proto https if { ssl_fc }', }, - } } diff --git a/deployment/puppet/openstack/manifests/ha/swift.pp b/deployment/puppet/openstack/manifests/ha/swift.pp index 71918cbc58..67d245bc36 100644 --- a/deployment/puppet/openstack/manifests/ha/swift.pp +++ b/deployment/puppet/openstack/manifests/ha/swift.pp @@ -16,6 +16,20 @@ # (optional) Boolean. If true, enables SSL for $public_virtual_ip # Defaults to false. # +# [*public_ssl_path*] +# (optional) String. Filesystem path to the file with public certificate +# content +# Defaults to undef +# +# [*internal_ssl*] +# (optional) Boolean. If true, enables SSL for $internal_virtual_ip +# Defaults to false. +# +# [*internal_ssl_path*] +# (optional) String. Filesystem path to the file with internal certificate +# content +# Defaults to undef +# # [*public_virtual_ip*] # (required) String. This is the ipaddress to be used for the external facing # vip @@ -32,7 +46,10 @@ class openstack::ha::swift ( $ipaddresses, $public_virtual_ip, $server_names, - $public_ssl = false, + $public_ssl = false, + $public_ssl_path = undef, + $internal_ssl = false, + $internal_ssl_path = undef, $baremetal_virtual_ip = undef, ) { @@ -54,6 +71,9 @@ class openstack::ha::swift ( order => '120', public => true, public_ssl => $public_ssl, + public_ssl_path => $public_ssl_path, + internal_ssl => $internal_ssl, + internal_ssl_path => $internal_ssl_path, } if $baremetal_virtual_ip { diff --git a/deployment/puppet/openstack/manifests/heat.pp b/deployment/puppet/openstack/manifests/heat.pp index ee8279db5a..e44ab12f3d 100644 --- a/deployment/puppet/openstack/manifests/heat.pp +++ b/deployment/puppet/openstack/manifests/heat.pp @@ -1,4 +1,15 @@ # +# == Class: openstack::heat +# +# Installs and configures Heat +# +# === Parameters +# +# [heat_protocol] +# Protocol to use for reach Heat-related services. +# Optional. Defaults to 'http'. +# +# #TODO(bogdando) sync extended qpid rpc backend configuration here as well # [use_stderr] Rather or not service should send output to stderr. Optional. Defaults to true. # @@ -28,7 +39,6 @@ class openstack::heat ( $keystone_port = '35357', $keystone_service_port = '5000', $keystone_protocol = 'http', - $public_ssl = false, $keystone_user = 'heat', $keystone_tenant = 'services', $keystone_password = false, @@ -36,6 +46,7 @@ class openstack::heat ( $region = 'RegionOne', $auth_uri = false, $identity_uri = false, + $heat_protocol = 'http', $trusts_delegated_roles = [], $verbose = false, @@ -92,17 +103,17 @@ class openstack::heat ( if $heat_metadata_server_url { $metadata_server_url = $heat_metadata_server_url } else { - $metadata_server_url = "http://${external_ip}:${api_cfn_bind_port}" + $metadata_server_url = "${heat_protocol}://${external_ip}:${api_cfn_bind_port}" } if $heat_waitcondition_server_url { $waitcondition_server_url = $heat_waitcondition_server_url } else { - $waitcondition_server_url = "http://${external_ip}:${api_cfn_bind_port}/v1/waitcondition" + $waitcondition_server_url = "${heat_protocol}://${external_ip}:${api_cfn_bind_port}/v1/waitcondition" } if $heat_watch_server_url { $watch_server_url = $heat_watch_server_url } else { - $watch_server_url = "http://${external_ip}:${api_cloudwatch_bind_port}" + $watch_server_url = "${heat_protocol}://${external_ip}:${api_cloudwatch_bind_port}" } # TODO(bogdando) clarify this config section (left from upstream presync state) diff --git a/deployment/puppet/openstack/spec/classes/openstack_ha_ceilometer_spec.rb b/deployment/puppet/openstack/spec/classes/openstack_ha_ceilometer_spec.rb index 76e2bd454b..ac51b45836 100644 --- a/deployment/puppet/openstack/spec/classes/openstack_ha_ceilometer_spec.rb +++ b/deployment/puppet/openstack/spec/classes/openstack_ha_ceilometer_spec.rb @@ -6,6 +6,7 @@ require 'spec_helper' :public_virtual_ip => '192.168.0.1', :server_names => ['node-1', 'node-2'], :public_ssl => true, + :public_ssl_path => '/var/lib/fuel/haproxy/public_ceilometer.pem', } } let(:facts) { {:kernel => 'Linux', :concat_basedir => '/var/lib/puppet/concat', @@ -18,6 +19,7 @@ require 'spec_helper' 'listen_port' => 8777, 'public' => true, 'public_ssl' => true, + 'public_ssl_path' => '/var/lib/fuel/haproxy/public_ceilometer.pem', 'require_service' => 'ceilometer-api', 'haproxy_config_options' => { 'http-request' => 'set-header X-Forwarded-Proto https if { ssl_fc }', diff --git a/deployment/puppet/openstack/spec/classes/openstack_ha_cinder_spec.rb b/deployment/puppet/openstack/spec/classes/openstack_ha_cinder_spec.rb index cbb274a976..128092d7aa 100644 --- a/deployment/puppet/openstack/spec/classes/openstack_ha_cinder_spec.rb +++ b/deployment/puppet/openstack/spec/classes/openstack_ha_cinder_spec.rb @@ -6,6 +6,7 @@ require 'spec_helper' :public_virtual_ip => '192.168.0.1', :server_names => ['node-1', 'node-2'], :public_ssl => true, + :public_ssl_path => '/var/lib/fuel/haproxy/public_cinder.pem', } } let(:facts) { {:kernel => 'Linux', :concat_basedir => '/var/lib/puppet/concat', @@ -18,6 +19,7 @@ require 'spec_helper' 'listen_port' => 8776, 'public' => true, 'public_ssl' => true, + 'public_ssl_path' => '/var/lib/fuel/haproxy/public_cinder.pem', 'require_service' => 'cinder-api', 'haproxy_config_options' => { 'option' => ['httpchk', 'httplog','httpclose'], diff --git a/deployment/puppet/openstack/spec/classes/openstack_ha_glance_spec.rb b/deployment/puppet/openstack/spec/classes/openstack_ha_glance_spec.rb index 85ff89684b..6914789a2e 100644 --- a/deployment/puppet/openstack/spec/classes/openstack_ha_glance_spec.rb +++ b/deployment/puppet/openstack/spec/classes/openstack_ha_glance_spec.rb @@ -6,6 +6,7 @@ require 'spec_helper' :public_virtual_ip => '192.168.0.1', :server_names => ['node-1', 'node-2'], :public_ssl => true, + :public_ssl_path => '/var/lib/fuel/haproxy/public_glance.pem', } } let(:facts) { {:kernel => 'Linux', :concat_basedir => '/var/lib/puppet/concat', @@ -18,6 +19,7 @@ require 'spec_helper' 'listen_port' => 9292, 'public' => true, 'public_ssl' => true, + 'public_ssl_path' => '/var/lib/fuel/haproxy/public_glance.pem', 'require_service' => 'glance-api', 'haproxy_config_options' => { 'option' => ['httpchk /versions', 'httplog','httpclose'], diff --git a/deployment/puppet/openstack/spec/classes/openstack_ha_heat_spec.rb b/deployment/puppet/openstack/spec/classes/openstack_ha_heat_spec.rb index 0bb7e21d18..011f54edf4 100644 --- a/deployment/puppet/openstack/spec/classes/openstack_ha_heat_spec.rb +++ b/deployment/puppet/openstack/spec/classes/openstack_ha_heat_spec.rb @@ -6,6 +6,7 @@ require 'spec_helper' :public_virtual_ip => '192.168.0.1', :server_names => ['node-1', 'node-2'], :public_ssl => true, + :public_ssl_path => '/var/lib/fuel/haproxy/public_heat.pem', } } let(:facts) { {:kernel => 'Linux', :concat_basedir => '/var/lib/puppet/concat', @@ -18,6 +19,7 @@ require 'spec_helper' 'listen_port' => 8004, 'public' => true, 'public_ssl' => true, + 'public_ssl_path' => '/var/lib/fuel/haproxy/public_heat.pem', 'require_service' => 'heat-api', 'haproxy_config_options' => { 'option' => ['httpchk', 'httplog','httpclose'], @@ -34,6 +36,7 @@ require 'spec_helper' 'listen_port' => 8000, 'public' => true, 'public_ssl' => true, + 'public_ssl_path' => '/var/lib/fuel/haproxy/public_heat.pem', 'require_service' => 'heat-api', 'haproxy_config_options' => { 'option' => ['httpchk', 'httplog','httpclose'], diff --git a/deployment/puppet/openstack/spec/classes/openstack_ha_ironic_spec.rb b/deployment/puppet/openstack/spec/classes/openstack_ha_ironic_spec.rb index 71da652578..20c18c8b7c 100644 --- a/deployment/puppet/openstack/spec/classes/openstack_ha_ironic_spec.rb +++ b/deployment/puppet/openstack/spec/classes/openstack_ha_ironic_spec.rb @@ -7,6 +7,7 @@ require 'spec_helper' :baremetal_virtual_ip => '192.168.0.2', :server_names => ['node-1', 'node-2'], :public_ssl => true, + :public_ssl_path => '/var/lib/fuel/haproxy/public_ironic.pem', } } let(:facts) { {:kernel => 'Linux', :concat_basedir => '/var/lib/puppet/concat', @@ -19,6 +20,7 @@ require 'spec_helper' 'listen_port' => 6385, 'public' => true, 'public_ssl' => true, + 'public_ssl_path' => '/var/lib/fuel/haproxy/public_ironic.pem', 'haproxy_config_options' => { 'option' => ['httpchk GET /', 'httplog','httpclose'], 'http-request' => 'set-header X-Forwarded-Proto https if { ssl_fc }', diff --git a/deployment/puppet/openstack/spec/classes/openstack_ha_keystone_spec.rb b/deployment/puppet/openstack/spec/classes/openstack_ha_keystone_spec.rb index e2cb3a4064..4e1e071da9 100644 --- a/deployment/puppet/openstack/spec/classes/openstack_ha_keystone_spec.rb +++ b/deployment/puppet/openstack/spec/classes/openstack_ha_keystone_spec.rb @@ -6,6 +6,7 @@ require 'spec_helper' :public_virtual_ip => '192.168.0.1', :server_names => ['node-1', 'node-2'], :public_ssl => true, + :public_ssl_path => '/var/lib/fuel/haproxy/public_keystone.pem', } } let(:facts) { {:kernel => 'Linux', :concat_basedir => '/var/lib/puppet/concat', @@ -18,6 +19,7 @@ require 'spec_helper' 'listen_port' => 5000, 'public' => true, 'public_ssl' => true, + 'public_ssl_path' => '/var/lib/fuel/haproxy/public_keystone.pem', 'haproxy_config_options' => { 'option' => ['httpchk', 'httplog','httpclose'], 'http-request' => 'set-header X-Forwarded-Proto https if { ssl_fc }', diff --git a/deployment/puppet/openstack/spec/classes/openstack_ha_murano_spec.rb b/deployment/puppet/openstack/spec/classes/openstack_ha_murano_spec.rb index 2c129f662b..29777c10d5 100644 --- a/deployment/puppet/openstack/spec/classes/openstack_ha_murano_spec.rb +++ b/deployment/puppet/openstack/spec/classes/openstack_ha_murano_spec.rb @@ -6,6 +6,7 @@ require 'spec_helper' :public_virtual_ip => '192.168.0.1', :server_names => ['node-1', 'node-2'], :public_ssl => true, + :public_ssl_path => '/var/lib/fuel/haproxy/public_murano.pem', } } let(:facts) { {:kernel => 'Linux', :concat_basedir => '/var/lib/puppet/concat', @@ -18,6 +19,7 @@ require 'spec_helper' 'listen_port' => 8082, 'public' => true, 'public_ssl' => true, + 'public_ssl_path' => '/var/lib/fuel/haproxy/public_murano.pem', 'require_service' => 'murano_api', 'haproxy_config_options' => { 'http-request' => 'set-header X-Forwarded-Proto https if { ssl_fc }', diff --git a/deployment/puppet/openstack/spec/classes/openstack_ha_neutron_spec.rb b/deployment/puppet/openstack/spec/classes/openstack_ha_neutron_spec.rb index da7a40f5b3..ac92de524a 100644 --- a/deployment/puppet/openstack/spec/classes/openstack_ha_neutron_spec.rb +++ b/deployment/puppet/openstack/spec/classes/openstack_ha_neutron_spec.rb @@ -6,6 +6,7 @@ require 'spec_helper' :public_virtual_ip => '192.168.0.1', :server_names => ['node-1', 'node-2'], :public_ssl => true, + :public_ssl_path => '/var/lib/fuel/haproxy/public_neutron.pem', } } let(:facts) { {:kernel => 'Linux', :concat_basedir => '/var/lib/puppet/concat', @@ -18,6 +19,7 @@ require 'spec_helper' 'listen_port' => 9696, 'public' => true, 'public_ssl' => true, + 'public_ssl_path' => '/var/lib/fuel/haproxy/public_neutron.pem', 'haproxy_config_options' => { 'option' => ['httpchk', 'httplog','httpclose'], 'http-request' => 'set-header X-Forwarded-Proto https if { ssl_fc }', diff --git a/deployment/puppet/openstack/spec/classes/openstack_ha_nova_spec.rb b/deployment/puppet/openstack/spec/classes/openstack_ha_nova_spec.rb index 726e62d0b2..ab50b41710 100644 --- a/deployment/puppet/openstack/spec/classes/openstack_ha_nova_spec.rb +++ b/deployment/puppet/openstack/spec/classes/openstack_ha_nova_spec.rb @@ -6,6 +6,7 @@ require 'spec_helper' :public_virtual_ip => '192.168.0.1', :server_names => ['node-1', 'node-2'], :public_ssl => true, + :public_ssl_path => '/var/lib/fuel/haproxy/public_nova.pem', } } let(:facts) { {:kernel => 'Linux', :concat_basedir => '/var/lib/puppet/concat', @@ -18,6 +19,7 @@ require 'spec_helper' 'listen_port' => 8773, 'public' => true, 'public_ssl' => true, + 'public_ssl_path' => '/var/lib/fuel/haproxy/public_nova.pem', 'require_service' => 'nova-api', 'haproxy_config_options' => { 'timeout server' => '600s', @@ -31,6 +33,7 @@ require 'spec_helper' 'listen_port' => 8774, 'public' => true, 'public_ssl' => true, + 'public_ssl_path' => '/var/lib/fuel/haproxy/public_nova.pem', 'require_service' => 'nova-api', 'haproxy_config_options' => { 'option' => ['httpchk', 'httplog', 'httpclose'], @@ -56,6 +59,7 @@ require 'spec_helper' 'listen_port' => 6080, 'public' => true, 'public_ssl' => true, + 'public_ssl_path' => '/var/lib/fuel/haproxy/public_nova.pem', 'internal' => false, 'require_service' => 'nova-vncproxy', 'haproxy_config_options' => { diff --git a/deployment/puppet/openstack/spec/classes/openstack_ha_radosgw_spec.rb b/deployment/puppet/openstack/spec/classes/openstack_ha_radosgw_spec.rb index 5a7a045aca..114de41ffc 100644 --- a/deployment/puppet/openstack/spec/classes/openstack_ha_radosgw_spec.rb +++ b/deployment/puppet/openstack/spec/classes/openstack_ha_radosgw_spec.rb @@ -7,6 +7,7 @@ require 'spec_helper' :baremetal_virtual_ip => '192.168.0.2', :server_names => ['node-1', 'node-2'], :public_ssl => true, + :public_ssl_path => '/var/lib/fuel/haproxy/public_radosgw.pem', } } let(:facts) { {:kernel => 'Linux', :concat_basedir => '/var/lib/puppet/concat', @@ -20,6 +21,7 @@ require 'spec_helper' 'balancermember_port' => 6780, 'public' => true, 'public_ssl' => true, + 'public_ssl_path' => '/var/lib/fuel/haproxy/public_radosgw.pem', 'haproxy_config_options' => { 'option' => ['httplog', 'httpchk GET /'], 'http-request' => 'set-header X-Forwarded-Proto https if { ssl_fc }', diff --git a/deployment/puppet/openstack/spec/classes/openstack_ha_sahara_spec.rb b/deployment/puppet/openstack/spec/classes/openstack_ha_sahara_spec.rb index 652bfe3333..1bec9f4d06 100644 --- a/deployment/puppet/openstack/spec/classes/openstack_ha_sahara_spec.rb +++ b/deployment/puppet/openstack/spec/classes/openstack_ha_sahara_spec.rb @@ -6,6 +6,7 @@ require 'spec_helper' :public_virtual_ip => '192.168.0.1', :server_names => ['node-1', 'node-2'], :public_ssl => true, + :public_ssl_path => '/var/lib/fuel/haproxy/public_sahara.pem', } } let(:facts) { {:kernel => 'Linux', :concat_basedir => '/var/lib/puppet/concat', @@ -18,6 +19,7 @@ require 'spec_helper' 'listen_port' => 8386, 'public' => true, 'public_ssl' => true, + 'public_ssl_path' => '/var/lib/fuel/haproxy/public_sahara.pem', 'require_service' => 'sahara-api', 'haproxy_config_options' => { 'http-request' => 'set-header X-Forwarded-Proto https if { ssl_fc }', diff --git a/deployment/puppet/openstack/spec/classes/openstack_ha_swift_spec.rb b/deployment/puppet/openstack/spec/classes/openstack_ha_swift_spec.rb index 7b4d66724a..74fd52bb55 100644 --- a/deployment/puppet/openstack/spec/classes/openstack_ha_swift_spec.rb +++ b/deployment/puppet/openstack/spec/classes/openstack_ha_swift_spec.rb @@ -7,6 +7,7 @@ require 'spec_helper' :baremetal_virtual_ip => '192.168.0.2', :server_names => ['node-1', 'node-2'], :public_ssl => true, + :public_ssl_path => '/var/lib/fuel/haproxy/public_swift.pem', } } let(:facts) { {:kernel => 'Linux', :concat_basedir => '/var/lib/puppet/concat', @@ -19,6 +20,7 @@ require 'spec_helper' 'listen_port' => 8080, 'public' => true, 'public_ssl' => true, + 'public_ssl_path' => '/var/lib/fuel/haproxy/public_swift.pem', 'haproxy_config_options' => { 'option' => ['httpchk', 'httplog','httpclose'], 'http-request' => 'set-header X-Forwarded-Proto https if { ssl_fc }', diff --git a/deployment/puppet/osnailyfacter/.fixtures.yml b/deployment/puppet/osnailyfacter/.fixtures.yml index 6461b5852c..90d6e70e39 100644 --- a/deployment/puppet/osnailyfacter/.fixtures.yml +++ b/deployment/puppet/osnailyfacter/.fixtures.yml @@ -1,4 +1,6 @@ fixtures: - symlinks: - 'osnailyfacter': "#{source_dir}" - 'l23network': "#{source_dir}/../l23network" + repositories: + 'stdlib': 'git://github.com/puppetlabs/puppetlabs-stdlib.git' + symlinks: + 'osnailyfacter': "#{source_dir}" + 'l23network': "#{source_dir}/../l23network" diff --git a/deployment/puppet/osnailyfacter/lib/puppet/parser/functions/get_ssl_property.rb b/deployment/puppet/osnailyfacter/lib/puppet/parser/functions/get_ssl_property.rb new file mode 100644 index 0000000000..49f69123e7 --- /dev/null +++ b/deployment/puppet/osnailyfacter/lib/puppet/parser/functions/get_ssl_property.rb @@ -0,0 +1,74 @@ +module Puppet::Parser::Functions + newfunction(:get_ssl_property, :type => :rvalue, :doc => <<-EOS +Get SSL properties for services based on SSL hashes +Parameters: + use_ssl_hash - hash with data for all TLS settings of all services + public_ssl_hash - auxiliary hash with data for public TLS settings + service_name - name of service for which data should be returned + service_type - type of endpoint + resource_type - type of data needed to be retrieved + default_value - array with potentially default values to pick + +Return: + value for resource type you asked +EOS + ) do |args| + use_ssl_hash, public_ssl_hash, service_name, service_type, resource_type, default_value = *args + + fail "You should provide all arguments!" if args.length != 6 + + rval = false + fail "You should provide hash as a first argument!" if not use_ssl_hash.is_a?(Hash) + fail "You should provide hash as a second argument!" if not public_ssl_hash.is_a?(Hash) + fail "You should provide 'name' for service as a third argument!" if service_name.empty? + fail "You should provide 'type' for service as a fourth argument!" if service_type.empty? + fail "You should provide 'type' for resource as a fifth argument!" if resource_type.empty? + fail "You should provide some default value as a sixth argument!" if default_value.nil? + + check_ns_public = service_type.to_s == 'public' + + check_ssl = false + check_ssl = true if use_ssl_hash["#{service_name}_#{service_type}"] + check_ssl = true if check_ns_public and public_ssl_hash['services'] + check_ssl = true if use_ssl_hash.empty? and public_ssl_hash.empty? and default_value == 'https' + check_ssl = true if use_ssl_hash.empty? and service_type != 'public' and default_value == 'https' + check_ssl = true if use_ssl_hash.empty? and public_ssl_hash.empty? and resource_type == 'usage' and default_value + check_ssl = true if use_ssl_hash.empty? and service_type != 'public' and resource_type == 'usage' and default_value + + case resource_type.to_s + when 'protocol' + rval = check_ssl ? 'https' : 'http' + + when 'hostname' + if check_ssl and check_ns_public + get_variables = function_try_get_value([use_ssl_hash, "#{service_name}_#{service_type}_#{resource_type}", '']) + rval = function_pick([get_variables, public_ssl_hash[resource_type], *default_value]) + elsif check_ssl + get_variables = function_try_get_value([use_ssl_hash, "#{service_name}_#{service_type}_#{resource_type}", '']) + rval = function_pick([get_variables, *default_value]) + else + rval = function_pick(default_value, false) + end + + when 'usage' + rval = check_ssl ? true : false + + when 'path' + bpath = '/var/lib/astute/haproxy/' + if check_ns_public + if use_ssl_hash["#{service_name}_#{service_type}"] + rval = bpath + service_type.to_s + '_' + service_name + '.pem' + elsif public_ssl_hash['services'] + rval = bpath + service_type.to_s + '_haproxy.pem' + else + rval = '' + end + else + rval = bpath + service_type.to_s + '_' + service_name + '.pem' + end + else + fail "You should choose 'protocol', 'hostname', 'usage' or 'path' for service!" + end + rval + end +end diff --git a/deployment/puppet/osnailyfacter/modular/astute/upload_cirros.rb b/deployment/puppet/osnailyfacter/modular/astute/upload_cirros.rb index 26902f461c..04d597c5ff 100755 --- a/deployment/puppet/osnailyfacter/modular/astute/upload_cirros.rb +++ b/deployment/puppet/osnailyfacter/modular/astute/upload_cirros.rb @@ -12,11 +12,21 @@ tenant_name = glanced['tenant'].nil? ? "services" : glanced['tenant'] user_name = glanced['user'].nil? ? "glance" : glanced['user'] endpoint_type = glanced['endpoint_type'].nil? ? "internalURL" : glanced['endpoint_type'] region_name = hiera.lookup 'region', 'RegionOne', {} +ssl_hash = hiera.lookup 'use_ssl', {}, {} + +if ssl_hash['keystone_internal'] + auth_proto = 'https' + auth_addr = ssl_hash['keystone_internal_hostname'] || auth_addr +else + auth_proto = 'http' +end + +puts "Auth URL is #{auth_proto}://#{auth_addr}:5000/v2.0" ENV['OS_TENANT_NAME']="#{tenant_name}" ENV['OS_USERNAME']="#{user_name}" ENV['OS_PASSWORD']="#{glanced['user_password']}" -ENV['OS_AUTH_URL']="http://#{auth_addr}:5000/v2.0" +ENV['OS_AUTH_URL']="#{auth_proto}://#{auth_addr}:5000/v2.0" ENV['OS_ENDPOINT_TYPE'] = "#{endpoint_type}" ENV['OS_REGION_NAME']="#{region_name}" diff --git a/deployment/puppet/osnailyfacter/modular/ceilometer/compute.pp b/deployment/puppet/osnailyfacter/modular/ceilometer/compute.pp index cca7ea29f8..935bcd288d 100644 --- a/deployment/puppet/osnailyfacter/modular/ceilometer/compute.pp +++ b/deployment/puppet/osnailyfacter/modular/ceilometer/compute.pp @@ -28,6 +28,10 @@ $ceilometer_metering_secret = $ceilometer_hash['metering_secret'] $verbose = pick($ceilometer_hash['verbose'], hiera('verbose', true)) $debug = pick($ceilometer_hash['debug'], hiera('debug', false)) $default_log_levels = hiera_hash('default_log_levels') +$ssl_hash = hiera_hash('use_ssl', {}) + +$keystone_protocol = get_ssl_property($ssl_hash, {}, 'keystone', 'internal', 'protocol', 'http') +$keystone_endpoint = get_ssl_property($ssl_hash, {}, 'keystone', 'internal', 'hostname', [hiera('service_endpoint', ''), $management_vip]) if ($ceilometer_enabled) { class { 'openstack::ceilometer': @@ -43,7 +47,8 @@ if ($ceilometer_enabled) { keystone_user => $ceilometer_hash['user'], keystone_tenant => $ceilometer_hash['tenant'], keystone_region => $ceilometer_region, - keystone_host => $service_endpoint, + keystone_protocol => $keystone_protocol, + keystone_host => $keystone_endpoint, keystone_password => $ceilometer_user_password, on_compute => true, metering_secret => $ceilometer_metering_secret, diff --git a/deployment/puppet/osnailyfacter/modular/ceilometer/controller.pp b/deployment/puppet/osnailyfacter/modular/ceilometer/controller.pp index b973f0bb1a..7bb3aed027 100644 --- a/deployment/puppet/osnailyfacter/modular/ceilometer/controller.pp +++ b/deployment/puppet/osnailyfacter/modular/ceilometer/controller.pp @@ -56,10 +56,14 @@ $amqp_user = $rabbit_hash['user'] $rabbit_ha_queues = true $service_endpoint = hiera('service_endpoint') $ha_mode = pick($ceilometer_hash['ha_mode'], true) +$ssl_hash = hiera_hash('use_ssl', {}) prepare_network_config(hiera('network_scheme', {})) $api_bind_address = get_network_role_property('ceilometer/api', 'ipaddr') +$keystone_protocol = get_ssl_property($ssl_hash, {}, 'keystone', 'internal', 'protocol', 'http') +$keystone_endpoint = get_ssl_property($ssl_hash, {}, 'keystone', 'internal', 'hostname', [$service_endpoint, $management_vip]) + if $ceilometer_hash['enabled'] { if $external_mongo { $mongo_hosts = $exteranl_mongo_hash['hosts_ip'] @@ -96,7 +100,8 @@ if ($ceilometer_enabled) { amqp_user => $amqp_user, amqp_password => $amqp_password, rabbit_ha_queues => $rabbit_ha_queues, - keystone_host => $service_endpoint, + keystone_protocol => $keystone_protocol, + keystone_host => $keystone_endpoint, keystone_password => $ceilometer_user_password, keystone_user => $ceilometer_hash['user'], keystone_tenant => $ceilometer_hash['tenant'], diff --git a/deployment/puppet/osnailyfacter/modular/ceilometer/keystone.pp b/deployment/puppet/osnailyfacter/modular/ceilometer/keystone.pp index c6ddaef120..9d9d361fa4 100644 --- a/deployment/puppet/osnailyfacter/modular/ceilometer/keystone.pp +++ b/deployment/puppet/osnailyfacter/modular/ceilometer/keystone.pp @@ -2,16 +2,19 @@ notice('MODULAR: ceilometer/keystone.pp') $ceilometer_hash = hiera_hash('ceilometer', {}) $public_vip = hiera('public_vip') +$management_vip = hiera('management_vip') $public_ssl_hash = hiera('public_ssl') -$public_address = $public_ssl_hash['services'] ? { - true => $public_ssl_hash['hostname'], - default => $public_vip, -} -$public_protocol = $public_ssl_hash['services'] ? { - true => 'https', - default => 'http', -} -$admin_address = hiera('management_vip') +$ssl_hash = hiera_hash('use_ssl', {}) + +$public_protocol = get_ssl_property($ssl_hash, $public_ssl_hash, 'ceilometer', 'public', 'protocol', 'http') +$public_address = get_ssl_property($ssl_hash, $public_ssl_hash, 'ceilometer', 'public', 'hostname', [$public_vip]) + +$internal_protocol = get_ssl_property($ssl_hash, {}, 'ceilometer', 'internal', 'protocol', 'http') +$internal_address = get_ssl_property($ssl_hash, {}, 'ceilometer', 'internal', 'hostname', [$management_vip]) + +$admin_protocol = get_ssl_property($ssl_hash, {}, 'ceilometer', 'admin', 'protocol', 'http') +$admin_address = get_ssl_property($ssl_hash, {}, 'ceilometer', 'admin', 'hostname', [$management_vip]) + $region = pick($ceilometer_hash['region'], hiera('region', 'RegionOne')) $password = $ceilometer_hash['user_password'] $auth_name = pick($ceilometer_hash['auth_name'], 'ceilometer') @@ -25,7 +28,8 @@ validate_string($public_address) validate_string($password) $public_url = "${public_protocol}://${public_address}:8777" -$admin_url = "http://${admin_address}:8777" +$internal_url = "${internal_protocol}://${internal_address}:8777" +$admin_url = "${admin_protocol}://${admin_address}:8777" class { '::ceilometer::keystone::auth': password => $password, @@ -35,7 +39,7 @@ class { '::ceilometer::keystone::auth': configure_user_role => $configure_user_role, service_name => $service_name, public_url => $public_url, - internal_url => $admin_url, + internal_url => $internal_url, admin_url => $admin_url, region => $region, } diff --git a/deployment/puppet/osnailyfacter/modular/glance/glance.pp b/deployment/puppet/osnailyfacter/modular/glance/glance.pp index 3b9a6596bd..aa9f39d973 100644 --- a/deployment/puppet/osnailyfacter/modular/glance/glance.pp +++ b/deployment/puppet/osnailyfacter/modular/glance/glance.pp @@ -19,7 +19,6 @@ $max_pool_size = hiera('max_pool_size') $max_overflow = hiera('max_overflow') $ceilometer_hash = hiera_hash('ceilometer', {}) $region = hiera('region','RegionOne') -$glance_endpoint = $management_vip $service_workers = pick($glance_hash['glance_workers'], min(max($::processorcount, 2), 16)) $default_log_levels = hiera_hash('default_log_levels') @@ -30,7 +29,6 @@ $api_bind_address = get_network_role_property('glance/api', 'ipadd $enabled = true $max_retries = '-1' $idle_timeout = '3600' -$auth_uri = "http://${service_endpoint}:5000/" $rabbit_password = $rabbit_hash['password'] $rabbit_user = $rabbit_hash['user'] @@ -54,6 +52,14 @@ $glance_image_cache_max_size = $glance_hash['image_cache_max_size'] $glance_pipeline = pick($glance_hash['pipeline'], 'keystone') $glance_large_object_size = pick($glance_hash['large_object_size'], '5120') +$ssl_hash = hiera_hash('use_ssl', {}) +$keystone_protocol = get_ssl_property($ssl_hash, {}, 'keystone', 'internal', 'protocol', 'http') +$internal_ssl = get_ssl_property($ssl_hash, {}, 'keystone', 'internal', 'usage', false) +$keystone_endpoint = get_ssl_property($ssl_hash, {}, 'keystone', 'internal', 'hostname', [hiera('service_endpoint', ''), $management_vip]) +$glance_endpoint = get_ssl_property($ssl_hash, {}, 'glance', 'internal', 'hostname', [$management_vip]) + +$auth_uri = "${keystone_protocol}://${keystone_endpoint}:5000/" + $rados_connect_timeout = '30' if ($storage_hash['images_ceph']) { @@ -93,7 +99,9 @@ class { 'openstack::glance': glance_vcenter_image_dir => $glance_vcenter_image_dir, glance_vcenter_api_retry_count => $glance_vcenter_api_retry_count, auth_uri => $auth_uri, - keystone_host => $service_endpoint, + keystone_host => $keystone_endpoint, + internal_ssl => $internal_ssl, + glance_protocol => 'http', region => $region, bind_host => $api_bind_address, enabled => $enabled, diff --git a/deployment/puppet/osnailyfacter/modular/glance/keystone.pp b/deployment/puppet/osnailyfacter/modular/glance/keystone.pp index 6a06be2926..debcbdbdc3 100644 --- a/deployment/puppet/osnailyfacter/modular/glance/keystone.pp +++ b/deployment/puppet/osnailyfacter/modular/glance/keystone.pp @@ -3,7 +3,7 @@ notice('MODULAR: glance/keystone.pp') $glance_hash = hiera_hash('glance', {}) $public_vip = hiera('public_vip') $public_ssl_hash = hiera('public_ssl') -$admin_address = hiera('management_vip') +$management_vip = hiera('management_vip') $region = pick($glance_hash['region'], hiera('region', 'RegionOne')) $password = $glance_hash['user_password'] $auth_name = pick($glance_hash['auth_name'], 'glance') @@ -12,18 +12,18 @@ $configure_user = pick($glance_hash['configure_user'], true) $configure_user_role = pick($glance_hash['configure_user_role'], true) $service_name = pick($glance_hash['service_name'], 'glance') $tenant = pick($glance_hash['tenant'], 'services') +$ssl_hash = hiera_hash('use_ssl', {}) -$public_address = $public_ssl_hash['services'] ? { - true => $public_ssl_hash['hostname'], - default => $public_vip, -} -$public_protocol = $public_ssl_hash['services'] ? { - true => 'https', - default => 'http', -} +$public_protocol = get_ssl_property($ssl_hash, $public_ssl_hash, 'glance', 'public', 'protocol', 'http') +$public_address = get_ssl_property($ssl_hash, $public_ssl_hash, 'glance', 'public', 'hostname', [$public_vip]) +$internal_protocol = get_ssl_property($ssl_hash, {}, 'glance', 'internal', 'protocol', 'http') +$internal_address = get_ssl_property($ssl_hash, {}, 'glance', 'internal', 'hostname', [$management_vip]) +$admin_protocol = get_ssl_property($ssl_hash, {}, 'glance', 'admin', 'protocol', 'http') +$admin_address = get_ssl_property($ssl_hash, {}, 'glance', 'admin', 'hostname', [$management_vip]) $public_url = "${public_protocol}://${public_address}:9292" -$admin_url = "http://${admin_address}:9292" +$internal_url = "${internal_protocol}://${internal_address}:9292" +$admin_url = "${admin_protocol}://${admin_address}:9292" validate_string($public_address) validate_string($password) @@ -36,7 +36,7 @@ class { '::glance::keystone::auth': configure_user_role => $configure_user_role, service_name => $service_name, public_url => $public_url, + internal_url => $internal_url, admin_url => $admin_url, - internal_url => $admin_url, region => $region, } diff --git a/deployment/puppet/osnailyfacter/modular/heat/heat.pp b/deployment/puppet/osnailyfacter/modular/heat/heat.pp index 0b0d81a7da..e919c8084f 100644 --- a/deployment/puppet/osnailyfacter/modular/heat/heat.pp +++ b/deployment/puppet/osnailyfacter/modular/heat/heat.pp @@ -9,6 +9,26 @@ $max_pool_size = hiera('max_pool_size') $max_overflow = hiera('max_overflow') $idle_timeout = hiera('idle_timeout') $service_endpoint = hiera('service_endpoint') +$public_ssl_hash = hiera('public_ssl') +$ssl_hash = hiera_hash('use_ssl', {}) +$public_vip = hiera('public_vip') + +$public_auth_protocol = get_ssl_property($ssl_hash, $public_ssl_hash, 'keystone', 'public', 'protocol', 'http') +$public_auth_address = get_ssl_property($ssl_hash, $public_ssl_hash, 'keystone', 'public', 'hostname', [$public_vip]) +$internal_auth_protocol = get_ssl_property($ssl_hash, {}, 'keystone', 'internal', 'protocol', 'http') +$internal_auth_address = get_ssl_property($ssl_hash, {}, 'keystone', 'internal', 'hostname', [$service_endpoint, $management_vip]) +$admin_auth_protocol = get_ssl_property($ssl_hash, {}, 'keystone', 'admin', 'protocol', 'http') +$admin_auth_address = get_ssl_property($ssl_hash, {}, 'keystone', 'admin', 'hostname', [$service_endpoint, $management_vip]) + +$heat_protocol = get_ssl_property($ssl_hash, {}, 'heat', 'internal', 'protocol', 'http') +$heat_endpoint = get_ssl_property($ssl_hash, {}, 'heat', 'internal', 'hostname', [hiera('heat_endpoint', ''), $management_vip]) +$internal_ssl = get_ssl_property($ssl_hash, {}, 'heat', 'internal', 'usage', false) + +$public_ssl = get_ssl_property($ssl_hash, {}, 'heat', 'public', 'usage', false) + +$auth_uri = "${public_auth_protocol}://${public_auth_address}:5000/v2.0/" +$identity_uri = "${admin_auth_protocol}://${admin_auth_address}:35357/" + $debug = pick($heat_hash['debug'], hiera('debug', false)) $verbose = pick($heat_hash['verbose'], hiera('verbose', true)) $default_log_levels = hiera_hash('default_log_levels') @@ -26,19 +46,6 @@ $database_name = hiera('heat_db_name', 'heat') $read_timeout = '60' $sql_connection = "mysql://${database_user}:${database_password}@${db_host}/${database_name}?read_timeout=${read_timeout}" $region = hiera('region', 'RegionOne') -$public_ssl_hash = hiera('public_ssl') -$public_ip = hiera('public_vip') -$public_protocol = pick($public_ssl_hash['services'], false) ? { - true => 'https', - default => 'http', -} - -$public_address = pick($public_ssl_hash['services'], false) ? { - true => pick($public_ssl_hash['hostname']), - default => $public_ip, -} -$auth_uri = "${public_protocol}://${public_address}:5000/v2.0/" -$identity_uri = "http://${service_endpoint}:35357/" ####### Disable upstart startup on install ####### if $::operatingsystem == 'Ubuntu' { @@ -69,14 +76,16 @@ class { 'openstack::heat' : api_cloudwatch_bind_host => $bind_address, auth_uri => $auth_uri, identity_uri => $identity_uri, + keystone_protocol => $keystone_protocol, + keystone_host => $service_endpoint, keystone_user => $keystone_user, keystone_password => $heat_hash['user_password'], keystone_tenant => $keystone_tenant, - keystone_ec2_uri => "http://${service_endpoint}:5000/v2.0", + keystone_ec2_uri => "${internal_auth_protocol}://${internal_auth_address}:5000/v2.0", region => $region, - public_ssl => $public_ssl_hash['services'], rpc_backend => 'rabbit', amqp_hosts => split(hiera('amqp_hosts',''), ','), + heat_protocol => $heat_protocol, amqp_user => $rabbit_hash['user'], amqp_password => $rabbit_hash['password'], sql_connection => $sql_connection, @@ -137,7 +146,7 @@ haproxy_backend_status { 'keystone-admin' : } class { 'heat::keystone::domain' : - auth_url => "http://${service_endpoint}:35357/v2.0", + auth_url => "${internal_auth_protocol}://${admin_auth_address}:35357/v2.0", keystone_admin => $keystone_user, keystone_password => $heat_hash['user_password'], keystone_tenant => $keystone_tenant, diff --git a/deployment/puppet/osnailyfacter/modular/heat/keystone.pp b/deployment/puppet/osnailyfacter/modular/heat/keystone.pp index 50034745d7..9ac1f57297 100644 --- a/deployment/puppet/osnailyfacter/modular/heat/keystone.pp +++ b/deployment/puppet/osnailyfacter/modular/heat/keystone.pp @@ -2,17 +2,19 @@ notice('MODULAR: heat/keystone.pp') $heat_hash = hiera_hash('heat', {}) $public_vip = hiera('public_vip') -$admin_address = hiera('management_vip') $region = pick($heat_hash['region'], hiera('region', 'RegionOne')) +$management_vip = hiera('management_vip') $public_ssl_hash = hiera('public_ssl') -$public_address = $public_ssl_hash['services'] ? { - true => $public_ssl_hash['hostname'], - default => $public_vip, -} -$public_protocol = $public_ssl_hash['services'] ? { - true => 'https', - default => 'http', -} +$ssl_hash = hiera_hash('use_ssl', {}) + +$public_protocol = get_ssl_property($ssl_hash, $public_ssl_hash, 'heat', 'public', 'protocol', 'http') +$public_address = get_ssl_property($ssl_hash, $public_ssl_hash, 'heat', 'public', 'hostname', [$public_vip]) + +$internal_protocol = get_ssl_property($ssl_hash, {}, 'heat', 'internal', 'protocol', 'http') +$internal_address = get_ssl_property($ssl_hash, {}, 'heat', 'internal', 'hostname', [hiera('heat_endpoint', ''), $management_vip]) + +$admin_protocol = get_ssl_property($ssl_hash, {}, 'heat', 'admin', 'protocol', 'http') +$admin_address = get_ssl_property($ssl_hash, {}, 'heat', 'admin', 'hostname', [hiera('heat_endpoint', ''), $management_vip]) $password = $heat_hash['user_password'] $auth_name = pick($heat_hash['auth_name'], 'heat') @@ -26,10 +28,11 @@ validate_string($public_address) validate_string($password) $public_url = "${public_protocol}://${public_address}:8004/v1/%(tenant_id)s" -$admin_url = "http://${admin_address}:8004/v1/%(tenant_id)s" +$internal_url = "${internal_protocol}://${internal_address}:8004/v1/%(tenant_id)s" +$admin_url = "${admin_protocol}://${admin_address}:8004/v1/%(tenant_id)s" $public_url_cfn = "${public_protocol}://${public_address}:8000/v1" -$admin_url_cfn = "http://${admin_address}:8000/v1" - +$internal_url_cfn = "${internal_protocol}://${internal_address}:8000/v1" +$admin_url_cfn = "${admin_protocol}://${admin_address}:8000/v1" class { '::heat::keystone::auth' : @@ -41,7 +44,7 @@ class { '::heat::keystone::auth' : configure_endpoint => true, trusts_delegated_roles => $trusts_delegated_roles, public_url => $public_url, - internal_url => $admin_url, + internal_url => $internal_url, admin_url => $admin_url, } @@ -54,6 +57,6 @@ class { '::heat::keystone::auth_cfn' : email => "${auth_name}-cfn@localhost", configure_endpoint => true, public_url => $public_url_cfn, - internal_url => $admin_url_cfn, + internal_url => $internal_url_cfn, admin_url => $admin_url_cfn, } diff --git a/deployment/puppet/osnailyfacter/modular/murano/keystone.pp b/deployment/puppet/osnailyfacter/modular/murano/keystone.pp index 4d132d74e1..c3fdf30ba0 100644 --- a/deployment/puppet/osnailyfacter/modular/murano/keystone.pp +++ b/deployment/puppet/osnailyfacter/modular/murano/keystone.pp @@ -1,27 +1,26 @@ notice('MODULAR: murano/keystone.pp') -$murano_hash = hiera_hash('murano_hash', {}) -$public_ip = hiera('public_vip') -$management_ip = hiera('management_vip') -$public_ssl = hiera('public_ssl') -$region = hiera('region', 'RegionOne') +$murano_hash = hiera_hash('murano_hash', {}) +$public_ip = hiera('public_vip') +$management_ip = hiera('management_vip') +$region = hiera('region', 'RegionOne') +$public_ssl_hash = hiera('public_ssl') +$ssl_hash = hiera_hash('use_ssl', {}) +$public_protocol = get_ssl_property($ssl_hash, $public_ssl_hash, 'murano', 'public', 'protocol', 'http') +$public_address = get_ssl_property($ssl_hash, $public_ssl_hash, 'murano', 'public', 'hostname', [$public_ip]) -$public_protocol = $public_ssl['services'] ? { - true => 'https', - default => 'http', -} +$internal_protocol = get_ssl_property($ssl_hash, {}, 'murano', 'internal', 'protocol', 'http') +$internal_address = get_ssl_property($ssl_hash, {}, 'murano', 'internal', 'hostname', [$management_ip]) -$public_address = $public_ssl['services'] ? { - true => $public_ssl['hostname'], - default => $public_ip, -} +$admin_protocol = get_ssl_property($ssl_hash, {}, 'murano', 'admin', 'protocol', 'http') +$admin_address = get_ssl_property($ssl_hash, {}, 'murano', 'admin', 'hostname', [$management_ip]) -$api_bind_port = '8082' - -$tenant = pick($murano_hash['tenant'], 'services') -$public_url = "${public_protocol}://${public_address}:${api_bind_port}" -$admin_url = "http://${management_ip}:${api_bind_port}" +$api_bind_port = '8082' +$tenant = pick($murano_hash['tenant'], 'services') +$public_url = "${public_protocol}://${public_address}:${api_bind_port}" +$internal_url = "${internal_protocol}://${internal_address}:${api_bind_port}" +$admin_url = "${admin_protocol}://${admin_address}:${api_bind_port}" ################################################################# @@ -31,6 +30,6 @@ class { 'murano::keystone::auth': region => $region, tenant => $tenant, public_url => $public_url, + internal_url => $internal_url, admin_url => $admin_url, - internal_url => $admin_url, } diff --git a/deployment/puppet/osnailyfacter/modular/murano/murano.pp b/deployment/puppet/osnailyfacter/modular/murano/murano.pp index 444ee6562d..e042b17c7c 100644 --- a/deployment/puppet/osnailyfacter/modular/murano/murano.pp +++ b/deployment/puppet/osnailyfacter/modular/murano/murano.pp @@ -22,30 +22,33 @@ $use_stderr = hiera('use_stderr', false) $rabbit_ha_queues = hiera('rabbit_ha_queues') $amqp_port = hiera('amqp_port') $amqp_hosts = hiera('amqp_hosts') -$public_ssl = hiera_hash('public_ssl', {}) $external_dns = hiera_hash('external_dns', {}) +$public_ssl_hash = hiera_hash('public_ssl', {}) +$ssl_hash = hiera_hash('use_ssl', {}) + +$public_auth_protocol = get_ssl_property($ssl_hash, $public_ssl_hash, 'keystone', 'public', 'protocol', 'http') +$public_auth_address = get_ssl_property($ssl_hash, $public_ssl_hash, 'keystone', 'public', 'hostname', [$public_ip]) + +$internal_auth_protocol = get_ssl_property($ssl_hash, {}, 'keystone', 'internal', 'protocol', 'http') +$internal_auth_address = get_ssl_property($ssl_hash, {}, 'keystone', 'internal', 'hostname', [hiera('keystone_endpoint', ''), $service_endpoint, $management_vip]) + +$admin_auth_protocol = get_ssl_property($ssl_hash, {}, 'keystone', 'admin', 'protocol', 'http') +$admin_auth_address = get_ssl_property($ssl_hash, {}, 'keystone', 'admin', 'hostname', [hiera('keystone_endpoint', ''), $service_endpoint, $management_vip]) + +$internal_api_protocol = 'http' +$api_bind_host = get_network_role_property('murano/api', 'ipaddr') ################################################################# if $murano_hash['enabled'] { - $public_protocol = pick($public_ssl['services'], false) ? { - true => 'https', - default => 'http', - } - - $public_address = pick($public_ssl['services'], false) ? { - true => pick($public_ssl['hostname']), - default => $public_ip, - } $firewall_rule = '202 murano-api' $api_bind_port = '8082' - $api_bind_host = get_network_role_property('murano/api', 'ipaddr') $murano_user = pick($murano_hash['user'], 'murano') $tenant = pick($murano_hash['tenant'], 'services') - $internal_url = "http://${api_bind_host}:${api_bind_port}" + $internal_url = "${internal_api_protocol}://${api_bind_host}:${api_bind_port}" $db_user = pick($murano_hash['db_user'], 'murano') $db_name = pick($murano_hash['db_name'], 'murano') $db_password = pick($murano_hash['db_password']) @@ -81,11 +84,11 @@ if $murano_hash['enabled'] { use_stderr => $use_stderr, log_facility => $syslog_log_facility_murano, database_connection => $sql_connection, - auth_uri => "${public_protocol}://${public_address}:5000/v2.0/", + auth_uri => "${public_auth_protocol}://${public_auth_address}:5000/v2.0/", admin_user => $murano_user, admin_password => $murano_hash['user_password'], admin_tenant_name => $tenant, - identity_uri => "http://${service_endpoint}:35357/", + identity_uri => "${admin_auth_protocol}://${admin_auth_address}:35357/", use_neutron => $use_neutron, rabbit_os_user => $rabbit_hash['user'], rabbit_os_password => $rabbit_hash['password'], diff --git a/deployment/puppet/osnailyfacter/modular/openstack-cinder/keystone.pp b/deployment/puppet/osnailyfacter/modular/openstack-cinder/keystone.pp index 1b93e1427b..42e7a0a0bf 100644 --- a/deployment/puppet/osnailyfacter/modular/openstack-cinder/keystone.pp +++ b/deployment/puppet/osnailyfacter/modular/openstack-cinder/keystone.pp @@ -2,19 +2,26 @@ notice('MODULAR: cinder/keystone.pp') $cinder_hash = hiera_hash('cinder', {}) $public_ssl_hash = hiera('public_ssl') +$ssl_hash = hiera_hash('use_ssl', {}) $public_vip = hiera('public_vip') -$public_address = $public_ssl_hash['services'] ? { - true => $public_ssl_hash['hostname'], - default => $public_vip, -} -$public_protocol = $public_ssl_hash['services'] ? { - true => 'https', - default => 'http', -} -$admin_protocol = 'http' -$admin_address = hiera('management_vip') -$region = pick($cinder_hash['region'], hiera('region', 'RegionOne')) +$management_vip = hiera('management_vip') +$public_protocol = get_ssl_property($ssl_hash, $public_ssl_hash, 'cinder', 'public', 'protocol', 'http') +$public_address = get_ssl_property($ssl_hash, $public_ssl_hash, 'cinder', 'public', 'hostname', [$public_vip]) + +$internal_protocol = get_ssl_property($ssl_hash, {}, 'cinder', 'internal', 'protocol', 'http') +$internal_address = get_ssl_property($ssl_hash, {}, 'cinder', 'internal', 'hostname', [$management_vip]) + +$admin_protocol = get_ssl_property($ssl_hash, {}, 'cinder', 'admin', 'protocol', 'http') +$admin_address = get_ssl_property($ssl_hash, {}, 'cinder', 'admin', 'hostname', [$management_vip]) + +$port = '8776' + +$public_base_url = "${public_protocol}://${public_address}:${port}" +$internal_base_url = "${internal_protocol}://${internal_address}:${port}" +$admin_base_url = "${admin_protocol}://${admin_address}:${port}" + +$region = pick($cinder_hash['region'], hiera('region', 'RegionOne')) $password = $cinder_hash['user_password'] $auth_name = pick($cinder_hash['auth_name'], 'cinder') $configure_endpoint = pick($cinder_hash['configure_endpoint'], true) @@ -23,15 +30,9 @@ $configure_user_role = pick($cinder_hash['configure_user_role'], true) $service_name = pick($cinder_hash['service_name'], 'cinder') $tenant = pick($cinder_hash['tenant'], 'services') -$port = '8776' - -$public_url = "${public_protocol}://${public_address}:${port}/v1/%(tenant_id)s" -$admin_url = "${admin_protocol}://${admin_address}:${port}/v1/%(tenant_id)s" - -$public_url_v2 = "${public_protocol}://${public_address}:${port}/v2/%(tenant_id)s" -$admin_url_v2 = "${admin_protocol}://${admin_address}:${port}/v2/%(tenant_id)s" - validate_string($public_address) +validate_string($internal_address) +validate_string($admin_address) validate_string($password) class { '::cinder::keystone::auth': @@ -41,11 +42,11 @@ class { '::cinder::keystone::auth': configure_user => $configure_user, configure_user_role => $configure_user_role, service_name => $service_name, - public_url => $public_url, - internal_url => $admin_url, - admin_url => $admin_url, - public_url_v2 => $public_url_v2, - internal_url_v2 => $admin_url_v2, - admin_url_v2 => $admin_url_v2, + public_url => "${public_base_url}/v1/%(tenant_id)s", + internal_url => "${internal_base_url}/v1/%(tenant_id)s", + admin_url => "${admin_base_url}/v1/%(tenant_id)s", + public_url_v2 => "${public_base_url}/v2/%(tenant_id)s", + internal_url_v2 => "${internal_base_url}/v2/%(tenant_id)s", + admin_url_v2 => "${admin_base_url}/v2/%(tenant_id)s", region => $region, } diff --git a/deployment/puppet/osnailyfacter/modular/openstack-cinder/openstack-cinder.pp b/deployment/puppet/osnailyfacter/modular/openstack-cinder/openstack-cinder.pp index 3b19128808..43e1a32967 100644 --- a/deployment/puppet/osnailyfacter/modular/openstack-cinder/openstack-cinder.pp +++ b/deployment/puppet/osnailyfacter/modular/openstack-cinder/openstack-cinder.pp @@ -2,28 +2,42 @@ notice('MODULAR: openstack-cinder.pp') #Network stuff prepare_network_config(hiera('network_scheme', {})) -$cinder_hash = hiera_hash('cinder_hash', {}) -$management_vip = hiera('management_vip') -$queue_provider = hiera('queue_provider', 'rabbitmq') -$cinder_volume_group = hiera('cinder_volume_group', 'cinder') -$nodes_hash = hiera('nodes', {}) -$storage_hash = hiera_hash('storage', {}) -$ceilometer_hash = hiera_hash('ceilometer_hash',{}) -$rabbit_hash = hiera_hash('rabbit_hash', {}) -$service_endpoint = hiera('service_endpoint') -$service_workers = pick($cinder_hash['workers'], - min(max($::processorcount, 2), 16)) +$cinder_hash = hiera_hash('cinder_hash', {}) +$management_vip = hiera('management_vip') +$queue_provider = hiera('queue_provider', 'rabbitmq') +$cinder_volume_group = hiera('cinder_volume_group', 'cinder') +$nodes_hash = hiera('nodes', {}) +$storage_hash = hiera_hash('storage', {}) +$ceilometer_hash = hiera_hash('ceilometer_hash',{}) +$rabbit_hash = hiera_hash('rabbit_hash', {}) +$service_endpoint = hiera('service_endpoint') +$service_workers = pick($cinder_hash['workers'], min(max($::processorcount, 2), 16)) +$cinder_db_password = $cinder_hash[db_password] +$cinder_user_password = $cinder_hash[user_password] +$keystone_user = pick($cinder_hash['user'], 'cinder') +$keystone_tenant = pick($cinder_hash['tenant'], 'services') +$region = hiera('region', 'RegionOne') +$db_host = pick($cinder_hash['db_host'], hiera('database_vip')) +$cinder_db_user = pick($cinder_hash['db_user'], 'cinder') +$cinder_db_name = pick($cinder_hash['db_name'], 'cinder') +$roles = node_roles($nodes_hash, hiera('uid')) +$ssl_hash = hiera_hash('use_ssl', {}) -$cinder_db_password = $cinder_hash[db_password] -$cinder_user_password = $cinder_hash[user_password] -$keystone_user = pick($cinder_hash['user'], 'cinder') -$keystone_tenant = pick($cinder_hash['tenant'], 'services') -$region = hiera('region', 'RegionOne') -$db_host = pick($cinder_hash['db_host'], hiera('database_vip')) -$cinder_db_user = pick($cinder_hash['db_user'], 'cinder') -$cinder_db_name = pick($cinder_hash['db_name'], 'cinder') -$roles = node_roles($nodes_hash, hiera('uid')) -$glance_api_servers = hiera('glance_api_servers', "${management_vip}:9292") +$keystone_auth_protocol = get_ssl_property($ssl_hash, {}, 'keystone', 'internal', 'protocol', 'http') +$keystone_auth_host = get_ssl_property($ssl_hash, {}, 'keystone', 'internal', 'hostname', [hiera('keystone_endpoint', ''), $service_endpoint, $management_vip]) + +$glance_protocol = get_ssl_property($ssl_hash, {}, 'glance', 'internal', 'protocol', 'http') +$glance_endpoint = get_ssl_property($ssl_hash, {}, 'heat', 'internal', 'hostname', [hiera('glance_endpoint', ''), $management_vip]) +$glance_ssl_usage = get_ssl_property($ssl_hash, {}, 'glance', 'internal', 'usage', false) +if $glance_ssl_usage { + $glance_api_servers = "${glance_protocol}://${glance_endpoint}:9292" +} else { + $glance_api_servers = hiera('glance_api_servers', "${management_vip}:9292") +} + +$service_port = '5000' +$auth_uri = "${keystone_auth_protocol}://${keystone_auth_host}:${service_port}/" +$identity_uri = "${keystone_auth_protocol}://${keystone_auth_host}:${service_port}/" # Determine who should get the volume service if (member($roles, 'cinder') and $storage_hash['volumes_lvm']) { @@ -42,12 +56,6 @@ $max_overflow = min($::processorcount * 5 + 0, 60 + 0) $max_retries = '-1' $idle_timeout = '3600' -$keystone_auth_protocol = 'http' -$keystone_auth_host = $service_endpoint -$service_port = '5000' -$auth_uri = "${keystone_auth_protocol}://${keystone_auth_host}:${service_port}/" -$identity_uri = "${keystone_auth_protocol}://${keystone_auth_host}:${service_port}/" - $openstack_version = { 'keystone' => 'installed', 'glance' => 'installed', @@ -70,7 +78,6 @@ class {'openstack::cinder': manage_volumes => $manage_volumes, enabled => true, glance_api_servers => $glance_api_servers, - auth_host => $service_endpoint, bind_host => get_network_role_property('cinder/api', 'ipaddr'), iscsi_bind_host => get_network_role_property('cinder/iscsi', 'ipaddr'), keystone_user => $keystone_user, diff --git a/deployment/puppet/osnailyfacter/modular/openstack-controller/keystone.pp b/deployment/puppet/osnailyfacter/modular/openstack-controller/keystone.pp index 0f5a4fc8c4..feabd902a7 100644 --- a/deployment/puppet/osnailyfacter/modular/openstack-controller/keystone.pp +++ b/deployment/puppet/osnailyfacter/modular/openstack-controller/keystone.pp @@ -2,17 +2,29 @@ notice('MODULAR: openstack-controller/keystone.pp') $nova_hash = hiera_hash('nova', {}) $public_vip = hiera('public_vip') +$management_vip = hiera('management_vip') $public_ssl_hash = hiera('public_ssl') -$public_address = $public_ssl_hash['services'] ? { - true => $public_ssl_hash['hostname'], - default => $public_vip, -} -$public_protocol = $public_ssl_hash['services'] ? { - true => 'https', - default => 'http', -} -$admin_protocol = 'http' -$admin_address = hiera('management_vip') +$ssl_hash = hiera_hash('use_ssl', {}) + +$public_protocol = get_ssl_property($ssl_hash, $public_ssl_hash, 'nova', 'public', 'protocol', 'http') +$public_address = get_ssl_property($ssl_hash, $public_ssl_hash, 'nova', 'public', 'hostname', [$public_vip]) + +$internal_protocol = get_ssl_property($ssl_hash, {}, 'nova', 'internal', 'protocol', 'http') +$internal_address = get_ssl_property($ssl_hash, {}, 'nova', 'internal', 'hostname', [$management_vip]) + +$admin_protocol = get_ssl_property($ssl_hash, {}, 'nova', 'admin', 'protocol', 'http') +$admin_address = get_ssl_property($ssl_hash, {}, 'nova', 'admin', 'hostname', [$management_vip]) + +$compute_port = '8774' +$public_base_url = "${public_protocol}://${public_address}:${compute_port}" +$internal_base_url = "${internal_protocol}://${internal_address}:${compute_port}" +$admin_base_url = "${admin_protocol}://${admin_address}:${compute_port}" + +$ec2_port = '8773' +$ec2_public_url = "${public_protocol}://${public_address}:${ec2_port}/services/Cloud" +$ec2_internal_url = "${internal_protocol}://${internal_address}:${ec2_port}/services/Cloud" +$ec2_admin_url = "${admin_protocol}://${admin_address}:${ec2_port}/services/Admin" + $region = pick($nova_hash['region'], hiera('region', 'RegionOne')) $password = $nova_hash['user_password'] @@ -23,15 +35,6 @@ $configure_user_role = pick($nova_hash['configure_user_role'], true) $service_name = pick($nova_hash['service_name'], 'nova') $tenant = pick($nova_hash['tenant'], 'services') -$compute_port = '8774' -$public_base_url = "${public_protocol}://${public_address}:${compute_port}" -$admin_base_url = "${admin_protocol}://${admin_address}:${compute_port}" - -$ec2_port = '8773' -$ec2_public_url = "${public_protocol}://${public_address}:${ec2_port}/services/Cloud" -$ec2_internal_url = "${admin_protocol}://${admin_address}:${ec2_port}/services/Cloud" -$ec2_admin_url = "${admin_protocol}://${admin_address}:${ec2_port}/services/Admin" - validate_string($public_address) validate_string($password) @@ -45,8 +48,8 @@ class { '::nova::keystone::auth': service_name => $service_name, public_url => "${public_base_url}/v2/%(tenant_id)s", public_url_v3 => "${public_base_url}/v3", - internal_url => "${admin_base_url}/v2/%(tenant_id)s", - internal_url_v3 => "${admin_base_url}/v3", + internal_url => "${internal_base_url}/v2/%(tenant_id)s", + internal_url_v3 => "${internal_base_url}/v3", admin_url => "${admin_base_url}/v2/%(tenant_id)s", admin_url_v3 => "${admin_base_url}/v3", region => $region, diff --git a/deployment/puppet/osnailyfacter/modular/openstack-controller/openstack-controller.pp b/deployment/puppet/osnailyfacter/modular/openstack-controller/openstack-controller.pp index 32e8c706bf..ac87de60cd 100644 --- a/deployment/puppet/osnailyfacter/modular/openstack-controller/openstack-controller.pp +++ b/deployment/puppet/osnailyfacter/modular/openstack-controller/openstack-controller.pp @@ -34,10 +34,23 @@ $syslog_log_facility_ceph = hiera('syslog_log_facility_ceph','LOG_LOCAL0') $workloads_hash = hiera_hash('workloads_collector', {}) $service_endpoint = hiera('service_endpoint') $db_host = pick($nova_hash['db_host'], hiera('database_vip')) +$ssl_hash = hiera_hash('use_ssl', {}) + +$internal_auth_protocol = get_ssl_property($ssl_hash, {}, 'keystone', 'internal', 'protocol', 'http') +$internal_auth_address = get_ssl_property($ssl_hash, {}, 'keystone', 'internal', 'hostname', [$service_endpoint, $management_vip]) + +$glance_protocol = get_ssl_property($ssl_hash, {}, 'glance', 'internal', 'protocol', 'http') +$glance_endpoint = get_ssl_property($ssl_hash, {}, 'glance', 'internal', 'hostname', [hiera('glance_endpoint', ''), $management_vip]) +$glance_ssl = get_ssl_property($ssl_hash, {}, 'glance', 'internal', 'usage', false) +if $glance_ssl { + $glance_api_servers = "${glance_protocol}://${glance_endpoint}:9292" +} else { + $glance_api_servers = hiera('glance_api_servers', "${management_vip}:9292") +} + $nova_db_user = pick($nova_hash['db_user'], 'nova') $keystone_user = pick($nova_hash['user'], 'nova') $keystone_tenant = pick($nova_hash['tenant'], 'services') -$glance_api_servers = hiera('glance_api_servers', "$management_vip:9292") $region = hiera('region', 'RegionOne') $service_workers = pick($nova_hash['workers'], min(max($::processorcount, 2), 16)) @@ -165,7 +178,7 @@ if $primary_controller { "OS_TENANT_NAME=${keystone_tenant}", "OS_USERNAME=${keystone_user}", "OS_PASSWORD=${nova_hash['user_password']}", - "OS_AUTH_URL=http://${service_endpoint}:5000/v2.0/", + "OS_AUTH_URL=${internal_auth_protocol}://${internal_auth_address}:5000/v2.0/", 'OS_ENDPOINT_TYPE=internalURL', "OS_REGION_NAME=${region}", "NOVA_ENDPOINT_TYPE=internalURL", @@ -196,7 +209,7 @@ if $primary_controller { username => $access_hash[user], api_key => $access_hash[password], auth_method => 'password', - auth_url => "http://${service_endpoint}:5000/v2.0/", + auth_url => "${internal_auth_protocol}://${internal_auth_address}:5000/v2.0/", authtenant_name => $access_hash[tenant], api_retries => 10, } diff --git a/deployment/puppet/osnailyfacter/modular/openstack-haproxy/openstack-haproxy-ceilometer.pp b/deployment/puppet/osnailyfacter/modular/openstack-haproxy/openstack-haproxy-ceilometer.pp index 74edc62ef3..d10752a896 100644 --- a/deployment/puppet/osnailyfacter/modular/openstack-haproxy/openstack-haproxy-ceilometer.pp +++ b/deployment/puppet/osnailyfacter/modular/openstack-haproxy/openstack-haproxy-ceilometer.pp @@ -4,8 +4,15 @@ $ceilometer_hash = hiera_hash('ceilometer',{}) # NOT enabled by default $use_ceilometer = pick($ceilometer_hash['enabled'], false) $public_ssl_hash = hiera('public_ssl') +$ssl_hash = hiera_hash('use_ssl', {}) $ceilometer_address_map = get_node_to_ipaddr_map_by_network_role(hiera_hash('ceilometer_nodes'), 'ceilometer/api') +$public_ssl = get_ssl_property($ssl_hash, $public_ssl_hash, 'ceilometer', 'public', 'usage', false) +$public_ssl_path = get_ssl_property($ssl_hash, $public_ssl_hash, 'ceilometer', 'public', 'path', ['']) + +$internal_ssl = get_ssl_property($ssl_hash, {}, 'ceilometer', 'internal', 'usage', false) +$internal_ssl_path = get_ssl_property($ssl_hash, {}, 'ceilometer', 'internal', 'path', ['']) + if ($use_ceilometer) { $server_names = hiera_array('ceilometer_names', keys($ceilometer_address_map)) $ipaddresses = hiera_array('ceilometer_ipaddresses', values($ceilometer_address_map)) @@ -18,6 +25,9 @@ if ($use_ceilometer) { ipaddresses => $ipaddresses, public_virtual_ip => $public_virtual_ip, server_names => $server_names, - public_ssl => $public_ssl_hash['services'], + public_ssl => $public_ssl, + public_ssl_path => $public_ssl_path, + internal_ssl => $internal_ssl, + internal_ssl_path => $internal_ssl_path, } } diff --git a/deployment/puppet/osnailyfacter/modular/openstack-haproxy/openstack-haproxy-cinder.pp b/deployment/puppet/osnailyfacter/modular/openstack-haproxy/openstack-haproxy-cinder.pp index 238e0ecd6b..1b2c697af4 100644 --- a/deployment/puppet/osnailyfacter/modular/openstack-haproxy/openstack-haproxy-cinder.pp +++ b/deployment/puppet/osnailyfacter/modular/openstack-haproxy/openstack-haproxy-cinder.pp @@ -1,10 +1,17 @@ notice('MODULAR: openstack-haproxy-cinder.pp') -$network_metadata = hiera_hash('network_metadata') -$cinder_hash = hiera_hash('cinder_hash', {}) +$network_metadata = hiera_hash('network_metadata') +$cinder_hash = hiera_hash('cinder_hash', {}) # enabled by default -$use_cinder = pick($cinder_hash['enabled'], true) -$public_ssl_hash = hiera('public_ssl') +$use_cinder = pick($cinder_hash['enabled'], true) +$public_ssl_hash = hiera('public_ssl') +$ssl_hash = hiera_hash('use_ssl', {}) + +$public_ssl = get_ssl_property($ssl_hash, $public_ssl_hash, 'cinder', 'public', 'usage', false) +$public_ssl_path = get_ssl_property($ssl_hash, $public_ssl_hash, 'cinder', 'public', 'path', ['']) + +$internal_ssl = get_ssl_property($ssl_hash, {}, 'cinder', 'internal', 'usage', false) +$internal_ssl_path = get_ssl_property($ssl_hash, {}, 'cinder', 'internal', 'path', ['']) $cinder_address_map = get_node_to_ipaddr_map_by_network_role(hiera_hash('cinder_nodes'), 'cinder/api') if ($use_cinder) { @@ -19,6 +26,9 @@ if ($use_cinder) { ipaddresses => $ipaddresses, public_virtual_ip => $public_virtual_ip, server_names => $server_names, - public_ssl => $public_ssl_hash['services'], + public_ssl => $public_ssl, + public_ssl_path => $public_ssl_path, + internal_ssl => $internal_ssl, + internal_ssl_path => $internal_ssl_path, } } diff --git a/deployment/puppet/osnailyfacter/modular/openstack-haproxy/openstack-haproxy-glance.pp b/deployment/puppet/osnailyfacter/modular/openstack-haproxy/openstack-haproxy-glance.pp index 14dec20194..4c70b5d6b5 100644 --- a/deployment/puppet/osnailyfacter/modular/openstack-haproxy/openstack-haproxy-glance.pp +++ b/deployment/puppet/osnailyfacter/modular/openstack-haproxy/openstack-haproxy-glance.pp @@ -1,11 +1,17 @@ notice('MODULAR: openstack-haproxy-glance.pp') -$network_metadata = hiera_hash('network_metadata') -$glance_hash = hiera_hash('glance', {}) +$network_metadata = hiera_hash('network_metadata') +$glance_hash = hiera_hash('glance', {}) # enabled by default -$use_glance = pick($glance_hash['enabled'], true) -$public_ssl_hash = hiera('public_ssl') +$use_glance = pick($glance_hash['enabled'], true) +$public_ssl_hash = hiera('public_ssl') +$ssl_hash = hiera_hash('use_ssl', {}) +$public_ssl = get_ssl_property($ssl_hash, $public_ssl_hash, 'glance', 'public', 'usage', false) +$public_ssl_path = get_ssl_property($ssl_hash, $public_ssl_hash, 'glance', 'public', 'path', ['']) + +$internal_ssl = get_ssl_property($ssl_hash, {}, 'glance', 'internal', 'usage', false) +$internal_ssl_path = get_ssl_property($ssl_hash, {}, 'glance', 'internal', 'path', ['']) #todo(sv): change to 'glance' as soon as glance as node-role was ready $glances_address_map = get_node_to_ipaddr_map_by_network_role(get_nodes_hash_by_roles($network_metadata, ['primary-controller', 'controller']), 'glance/api') @@ -21,6 +27,9 @@ if ($use_glance) { ipaddresses => $ipaddresses, public_virtual_ip => $public_virtual_ip, server_names => $server_names, - public_ssl => $public_ssl_hash['services'], + public_ssl => $public_ssl, + public_ssl_path => $public_ssl_path, + internal_ssl => $internal_ssl, + internal_ssl_path => $internal_ssl_path, } } diff --git a/deployment/puppet/osnailyfacter/modular/openstack-haproxy/openstack-haproxy-heat.pp b/deployment/puppet/osnailyfacter/modular/openstack-haproxy/openstack-haproxy-heat.pp index 4bbf4336b7..26350893dc 100644 --- a/deployment/puppet/osnailyfacter/modular/openstack-haproxy/openstack-haproxy-heat.pp +++ b/deployment/puppet/osnailyfacter/modular/openstack-haproxy/openstack-haproxy-heat.pp @@ -1,11 +1,19 @@ notice('MODULAR: openstack-haproxy-heat.pp') -$heat_hash = hiera_hash('heat', {}) +$heat_hash = hiera_hash('heat', {}) # enabled by default -$use_heat = pick($heat_hash['enabled'], true) -$public_ssl_hash = hiera('public_ssl') -$network_metadata = hiera_hash('network_metadata') -$heat_address_map = get_node_to_ipaddr_map_by_network_role(get_nodes_hash_by_roles($network_metadata, hiera('heat_roles')), 'heat/api') +$use_heat = pick($heat_hash['enabled'], true) +$public_ssl_hash = hiera('public_ssl') +$ssl_hash = hiera('use_ssl', {}) + +$public_ssl = get_ssl_property($ssl_hash, $public_ssl_hash, 'heat', 'public', 'usage', false) +$public_ssl_path = get_ssl_property($ssl_hash, $public_ssl_hash, 'heat', 'public', 'path', ['']) + +$internal_ssl = get_ssl_property($ssl_hash, {}, 'heat', 'internal', 'usage', false) +$internal_ssl_path = get_ssl_property($ssl_hash, {}, 'heat', 'internal', 'path', ['']) + +$network_metadata = hiera_hash('network_metadata') +$heat_address_map = get_node_to_ipaddr_map_by_network_role(get_nodes_hash_by_roles($network_metadata, hiera('heat_roles')), 'heat/api') if ($use_heat) { $server_names = hiera_array('heat_names',keys($heat_address_map)) @@ -19,6 +27,9 @@ if ($use_heat) { ipaddresses => $ipaddresses, public_virtual_ip => $public_virtual_ip, server_names => $server_names, - public_ssl => $public_ssl_hash['services'], + public_ssl => $public_ssl, + public_ssl_path => $public_ssl_path, + internal_ssl => $internal_ssl, + internal_ssl_path => $internal_ssl_path, } } diff --git a/deployment/puppet/osnailyfacter/modular/openstack-haproxy/openstack-haproxy-horizon.pp b/deployment/puppet/osnailyfacter/modular/openstack-haproxy/openstack-haproxy-horizon.pp index a491245a75..0345f19108 100644 --- a/deployment/puppet/osnailyfacter/modular/openstack-haproxy/openstack-haproxy-horizon.pp +++ b/deployment/puppet/osnailyfacter/modular/openstack-haproxy/openstack-haproxy-horizon.pp @@ -1,10 +1,14 @@ notice('MODULAR: openstack-haproxy-horizon.pp') -$network_metadata = hiera_hash('network_metadata') -$horizon_hash = hiera_hash('horizon', {}) +$network_metadata = hiera_hash('network_metadata') +$horizon_hash = hiera_hash('horizon', {}) # enabled by default -$use_horizon = pick($horizon_hash['enabled'], true) -$public_ssl_hash = hiera('public_ssl') +$use_horizon = pick($horizon_hash['enabled'], true) +$public_ssl_hash = hiera('public_ssl') +$ssl_hash = hiera_hash('use_ssl', {}) + +$public_ssl = get_ssl_property($ssl_hash, $public_ssl_hash, 'horizon', 'public', 'usage', false) +$public_ssl_path = get_ssl_property($ssl_hash, $public_ssl_hash, 'horizon', 'public', 'path', ['']) $horizon_address_map = get_node_to_ipaddr_map_by_network_role(hiera_hash('horizon_nodes'), 'horizon') if ($use_horizon) { @@ -19,6 +23,7 @@ if ($use_horizon) { ipaddresses => $ipaddresses, public_virtual_ip => $public_virtual_ip, server_names => $server_names, - use_ssl => $public_ssl_hash['horizon'], + use_ssl => $public_ssl, + public_ssl_path => $public_ssl_path, } } diff --git a/deployment/puppet/osnailyfacter/modular/openstack-haproxy/openstack-haproxy-ironic.pp b/deployment/puppet/osnailyfacter/modular/openstack-haproxy/openstack-haproxy-ironic.pp index df256fc6fd..8f76d8250f 100644 --- a/deployment/puppet/osnailyfacter/modular/openstack-haproxy/openstack-haproxy-ironic.pp +++ b/deployment/puppet/osnailyfacter/modular/openstack-haproxy/openstack-haproxy-ironic.pp @@ -1,10 +1,14 @@ notice('MODULAR: openstack-haproxy-ironic.pp') -$network_metadata = hiera_hash('network_metadata') -$public_ssl_hash = hiera('public_ssl') -$ironic_hash = hiera_hash('ironic', {}) +$network_metadata = hiera_hash('network_metadata') +$public_ssl_hash = hiera('public_ssl') +$ssl_hash = hiera_hash('use_ssl', {}) +$ironic_hash = hiera_hash('ironic', {}) -$ironic_address_map = get_node_to_ipaddr_map_by_network_role(hiera('ironic_api_nodes'), 'ironic/api') +$public_ssl = get_ssl_property($ssl_hash, $public_ssl_hash, 'ironic', 'public', 'usage', false) +$public_ssl_path = get_ssl_property($ssl_hash, $public_ssl_hash, 'ironic', 'public', 'path', ['']) + +$ironic_address_map = get_node_to_ipaddr_map_by_network_role(hiera('ironic_api_nodes'), 'ironic/api') $server_names = hiera_array('ironic_server_names', keys($ironic_address_map)) $ipaddresses = hiera_array('ironic_ipaddresses', values($ironic_address_map)) @@ -17,6 +21,7 @@ class { '::openstack::ha::ironic': ipaddresses => $ipaddresses, public_virtual_ip => $public_virtual_ip, server_names => $server_names, - public_ssl => $public_ssl_hash['services'], + public_ssl => $public_ssl, + public_ssl_path => $public_ssl_path, baremetal_virtual_ip => $baremetal_virtual_ip, } diff --git a/deployment/puppet/osnailyfacter/modular/openstack-haproxy/openstack-haproxy-keystone.pp b/deployment/puppet/osnailyfacter/modular/openstack-haproxy/openstack-haproxy-keystone.pp index 8772ac4c48..b1b6eaaddb 100644 --- a/deployment/puppet/osnailyfacter/modular/openstack-haproxy/openstack-haproxy-keystone.pp +++ b/deployment/puppet/osnailyfacter/modular/openstack-haproxy/openstack-haproxy-keystone.pp @@ -1,10 +1,20 @@ notice('MODULAR: openstack-haproxy-keystone.pp') -$network_metadata = hiera_hash('network_metadata') -$keystone_hash = hiera_hash('keystone', {}) +$network_metadata = hiera_hash('network_metadata') +$keystone_hash = hiera_hash('keystone', {}) # enabled by default -$use_keystone = pick($keystone_hash['enabled'], true) -$public_ssl_hash = hiera('public_ssl') +$use_keystone = pick($keystone_hash['enabled'], true) +$public_ssl_hash = hiera('public_ssl') +$ssl_hash = hiera_hash('use_ssl', {}) + +$public_ssl = get_ssl_property($ssl_hash, $public_ssl_hash, 'keystone', 'public', 'usage', false) +$public_ssl_path = get_ssl_property($ssl_hash, $public_ssl_hash, 'keystone', 'public', 'path', ['']) + +$internal_ssl = get_ssl_property($ssl_hash, {}, 'keystone', 'internal', 'usage', false) +$internal_ssl_path = get_ssl_property($ssl_hash, {}, 'keystone', 'internal', 'path', ['']) + +$admin_ssl = get_ssl_property($ssl_hash, {}, 'keystone', 'admin', 'usage', false) +$admin_ssl_path = get_ssl_property($ssl_hash, {}, 'keystone', 'admin', 'path', ['']) #todo(sv): change to 'keystone' as soon as keystone as node-role was ready $keystones_address_map = get_node_to_ipaddr_map_by_network_role(get_nodes_hash_by_roles($network_metadata, ['primary-controller', 'controller']), 'keystone/api') @@ -24,6 +34,11 @@ if ($use_keystone) { ipaddresses => $ipaddresses, public_virtual_ip => $public_virtual_ip, server_names => $server_names, - public_ssl => $public_ssl_hash['services'], + public_ssl => $public_ssl, + public_ssl_path => $public_ssl_path, + internal_ssl => $internal_ssl, + internal_ssl_path => $internal_ssl_path, + admin_ssl => $admin_ssl, + admin_ssl_path => $admin_ssl_path, } } diff --git a/deployment/puppet/osnailyfacter/modular/openstack-haproxy/openstack-haproxy-murano.pp b/deployment/puppet/osnailyfacter/modular/openstack-haproxy/openstack-haproxy-murano.pp index 5224a2e039..30683454a9 100644 --- a/deployment/puppet/osnailyfacter/modular/openstack-haproxy/openstack-haproxy-murano.pp +++ b/deployment/puppet/osnailyfacter/modular/openstack-haproxy/openstack-haproxy-murano.pp @@ -4,6 +4,12 @@ $murano_hash = hiera_hash('murano_hash',{}) # NOT enabled by default $use_murano = pick($murano_hash['enabled'], false) $public_ssl_hash = hiera('public_ssl') +$ssl_hash = hiera_hash('use_ssl', {}) +$public_ssl = get_ssl_property($ssl_hash, $public_ssl_hash, 'murano', 'public', 'usage', false) +$public_ssl_path = get_ssl_property($ssl_hash, $public_ssl_hash, 'murano', 'public', 'path', ['']) +$internal_ssl = get_ssl_property($ssl_hash, {}, 'murano', 'internal', 'usage', false) +$internal_ssl_path = get_ssl_property($ssl_hash, {}, 'murano', 'internal', 'path', ['']) + $network_metadata = hiera_hash('network_metadata') $murano_address_map = get_node_to_ipaddr_map_by_network_role(get_nodes_hash_by_roles($network_metadata, hiera('murano_roles')), 'murano/api') @@ -19,6 +25,9 @@ if ($use_murano) { ipaddresses => $ipaddresses, public_virtual_ip => $public_virtual_ip, server_names => $server_names, - public_ssl => $public_ssl_hash['services'], + public_ssl => $public_ssl, + public_ssl_path => $public_ssl_path, + internal_ssl => $internal_ssl, + internal_ssl_path => $internal_ssl_path, } } diff --git a/deployment/puppet/osnailyfacter/modular/openstack-haproxy/openstack-haproxy-neutron.pp b/deployment/puppet/osnailyfacter/modular/openstack-haproxy/openstack-haproxy-neutron.pp index 738ccfde64..4e83ecaa34 100644 --- a/deployment/puppet/osnailyfacter/modular/openstack-haproxy/openstack-haproxy-neutron.pp +++ b/deployment/puppet/osnailyfacter/modular/openstack-haproxy/openstack-haproxy-neutron.pp @@ -3,6 +3,13 @@ notice('MODULAR: openstack-haproxy-neutron.pp') # NOT enabled by default $use_neutron = hiera('use_neutron', false) $public_ssl_hash = hiera('public_ssl') +$ssl_hash = hiera_hash('use_ssl', {}) + +$public_ssl = get_ssl_property($ssl_hash, $public_ssl_hash, 'neutron', 'public', 'usage', false) +$public_ssl_path = get_ssl_property($ssl_hash, $public_ssl_hash, 'neutron', 'public', 'path', ['']) + +$internal_ssl = get_ssl_property($ssl_hash, {}, 'neutron', 'internal', 'usage', false) +$internal_ssl_path = get_ssl_property($ssl_hash, {}, 'neutron', 'internal', 'path', ['']) $neutron_address_map = get_node_to_ipaddr_map_by_network_role(hiera_hash('neutron_nodes'), 'neutron/api') if ($use_neutron) { @@ -17,6 +24,9 @@ if ($use_neutron) { ipaddresses => $ipaddresses, public_virtual_ip => $public_virtual_ip, server_names => $server_names, - public_ssl => $public_ssl_hash['services'], + public_ssl => $public_ssl, + public_ssl_path => $public_ssl_path, + internal_ssl => $internal_ssl, + internal_ssl_path => $internal_ssl_path, } } diff --git a/deployment/puppet/osnailyfacter/modular/openstack-haproxy/openstack-haproxy-nova.pp b/deployment/puppet/osnailyfacter/modular/openstack-haproxy/openstack-haproxy-nova.pp index 060d9db73d..af64023a43 100644 --- a/deployment/puppet/osnailyfacter/modular/openstack-haproxy/openstack-haproxy-nova.pp +++ b/deployment/puppet/osnailyfacter/modular/openstack-haproxy/openstack-haproxy-nova.pp @@ -1,9 +1,16 @@ notice('MODULAR: openstack-haproxy-nova.pp') -$nova_hash = hiera_hash('nova', {}) +$nova_hash = hiera_hash('nova', {}) # enabled by default -$use_nova = pick($nova_hash['enabled'], true) -$public_ssl_hash = hiera('public_ssl') +$use_nova = pick($nova_hash['enabled'], true) +$public_ssl_hash = hiera('public_ssl') +$ssl_hash = hiera_hash('use_ssl', {}) + +$public_ssl = get_ssl_property($ssl_hash, $public_ssl_hash, 'nova', 'public', 'usage', false) +$public_ssl_path = get_ssl_property($ssl_hash, $public_ssl_hash, 'nova', 'public', 'path', ['']) + +$internal_ssl = get_ssl_property($ssl_hash, {}, 'nova', 'internal', 'usage', false) +$internal_ssl_path = get_ssl_property($ssl_hash, {}, 'nova', 'internal', 'path', ['']) $nova_api_address_map = get_node_to_ipaddr_map_by_network_role(hiera('nova_api_nodes'), 'nova/api') @@ -20,6 +27,9 @@ if ($use_nova) { ipaddresses => $ipaddresses, public_virtual_ip => $public_virtual_ip, server_names => $server_names, - public_ssl => $public_ssl_hash['services'], + public_ssl => $public_ssl, + public_ssl_path => $public_ssl_path, + internal_ssl => $internal_ssl, + internal_ssl_path => $internal_ssl_path, } } diff --git a/deployment/puppet/osnailyfacter/modular/openstack-haproxy/openstack-haproxy-radosgw.pp b/deployment/puppet/osnailyfacter/modular/openstack-haproxy/openstack-haproxy-radosgw.pp index 8147336517..4981fd4fd7 100644 --- a/deployment/puppet/osnailyfacter/modular/openstack-haproxy/openstack-haproxy-radosgw.pp +++ b/deployment/puppet/osnailyfacter/modular/openstack-haproxy/openstack-haproxy-radosgw.pp @@ -3,6 +3,10 @@ notice('MODULAR: openstack-haproxy-radosgw.pp') $network_metadata = hiera_hash('network_metadata') $storage_hash = hiera_hash('storage', {}) $public_ssl_hash = hiera('public_ssl') +$ssl_hash = hiera_hash('use_ssl', {}) + +$public_ssl = get_ssl_property($ssl_hash, $public_ssl_hash, 'radosgw', 'public', 'usage', false) +$public_ssl_path = get_ssl_property($ssl_hash, $public_ssl_hash, 'radosgw', 'public', 'path', ['']) $ironic_hash = hiera_hash('ironic', {}) if !($storage_hash['images_ceph'] and $storage_hash['objects_ceph']) and !$storage_hash['images_vcenter'] { @@ -33,7 +37,8 @@ if $use_radosgw { ipaddresses => $ipaddresses, public_virtual_ip => $public_virtual_ip, server_names => $server_names, - public_ssl => $public_ssl_hash['services'], + public_ssl => $public_ssl, + public_ssl_path => $public_ssl_path, baremetal_virtual_ip => $baremetal_virtual_ip, } } diff --git a/deployment/puppet/osnailyfacter/modular/openstack-haproxy/openstack-haproxy-sahara.pp b/deployment/puppet/osnailyfacter/modular/openstack-haproxy/openstack-haproxy-sahara.pp index 4f76a2f2ec..7522bdf4e3 100644 --- a/deployment/puppet/osnailyfacter/modular/openstack-haproxy/openstack-haproxy-sahara.pp +++ b/deployment/puppet/osnailyfacter/modular/openstack-haproxy/openstack-haproxy-sahara.pp @@ -4,6 +4,14 @@ $sahara_hash = hiera_hash('sahara_hash',{}) # NOT enabled by default $use_sahara = pick($sahara_hash['enabled'], false) $public_ssl_hash = hiera('public_ssl') +$ssl_hash = hiera_hash('use_ssl', {}) + +$public_ssl = get_ssl_property($ssl_hash, $public_ssl_hash, 'sahara', 'public', 'usage', false) +$public_ssl_path = get_ssl_property($ssl_hash, $public_ssl_hash, 'sahara', 'public', 'path', ['']) + +$internal_ssl = get_ssl_property($ssl_hash, {}, 'sahara', 'internal', 'usage', false) +$internal_ssl_path = get_ssl_property($ssl_hash, {}, 'sahara', 'internal', 'path', ['']) + $network_metadata = hiera_hash('network_metadata') $sahara_address_map = get_node_to_ipaddr_map_by_network_role(get_nodes_hash_by_roles($network_metadata, hiera('sahara_roles')), 'sahara/api') @@ -19,6 +27,9 @@ if ($use_sahara) { ipaddresses => $ipaddresses, public_virtual_ip => $public_virtual_ip, server_names => $server_names, - public_ssl => $public_ssl_hash['services'], + public_ssl => $public_ssl, + public_ssl_path => $public_ssl_path, + internal_ssl => $internal_ssl, + internal_ssl_path => $internal_ssl_path, } } diff --git a/deployment/puppet/osnailyfacter/modular/openstack-haproxy/openstack-haproxy-swift.pp b/deployment/puppet/osnailyfacter/modular/openstack-haproxy/openstack-haproxy-swift.pp index 01819d46b8..3b59456e3f 100644 --- a/deployment/puppet/osnailyfacter/modular/openstack-haproxy/openstack-haproxy-swift.pp +++ b/deployment/puppet/osnailyfacter/modular/openstack-haproxy/openstack-haproxy-swift.pp @@ -1,10 +1,18 @@ notice('MODULAR: openstack-haproxy-swift.pp') -$network_metadata = hiera_hash('network_metadata') -$storage_hash = hiera_hash('storage', {}) -$swift_proxies = hiera_hash('swift_proxies', undef) -$public_ssl_hash = hiera('public_ssl') -$ironic_hash = hiera_hash('ironic', {}) +$network_metadata = hiera_hash('network_metadata') +$storage_hash = hiera_hash('storage', {}) +$swift_proxies = hiera_hash('swift_proxies', undef) +$public_ssl_hash = hiera('public_ssl') +$ssl_hash = hiera_hash('use_ssl', {}) + +$public_ssl = get_ssl_property($ssl_hash, $public_ssl_hash, 'swift', 'public', 'usage', false) +$public_ssl_path = get_ssl_property($ssl_hash, $public_ssl_hash, 'swift', 'public', 'path', ['']) + +$internal_ssl = get_ssl_property($ssl_hash, {}, 'swift', 'internal', 'usage', false) +$internal_ssl_path = get_ssl_property($ssl_hash, {}, 'swift', 'internal', 'path', ['']) + +$ironic_hash = hiera_hash('ironic', {}) if !($storage_hash['images_ceph'] and $storage_hash['objects_ceph']) and !$storage_hash['images_vcenter'] { $use_swift = true @@ -31,7 +39,10 @@ if ($use_swift) { ipaddresses => $ipaddresses, public_virtual_ip => $public_virtual_ip, server_names => $server_names, - public_ssl => $public_ssl_hash['services'], + public_ssl => $public_ssl, + public_ssl_path => $public_ssl_path, + internal_ssl => $internal_ssl, + internal_ssl_path => $internal_ssl_path, baremetal_virtual_ip => $baremetal_virtual_ip, } } diff --git a/deployment/puppet/osnailyfacter/modular/openstack-network/compute-nova.pp b/deployment/puppet/osnailyfacter/modular/openstack-network/compute-nova.pp index 5a362f2711..dce3c678a4 100644 --- a/deployment/puppet/osnailyfacter/modular/openstack-network/compute-nova.pp +++ b/deployment/puppet/osnailyfacter/modular/openstack-network/compute-nova.pp @@ -4,22 +4,29 @@ $use_neutron = hiera('use_neutron', false) if $use_neutron { include nova::params - $neutron_config = hiera_hash('neutron_config', {}) + $neutron_config = hiera_hash('neutron_config', {}) $neutron_integration_bridge = 'br-int' - $nova_hash = hiera_hash('nova', {}) - $libvirt_vif_driver = pick($nova_hash['libvirt_vif_driver'], 'nova.virt.libvirt.vif.LibvirtGenericVIFDriver') + $nova_hash = hiera_hash('nova', {}) + $libvirt_vif_driver = pick($nova_hash['libvirt_vif_driver'], 'nova.virt.libvirt.vif.LibvirtGenericVIFDriver') - $management_vip = hiera('management_vip') - $service_endpoint = hiera('service_endpoint', $management_vip) - $neutron_endpoint = hiera('neutron_endpoint', $management_vip) - $admin_password = try_get_value($neutron_config, 'keystone/admin_password') - $admin_tenant_name = try_get_value($neutron_config, 'keystone/admin_tenant', 'services') - $admin_username = try_get_value($neutron_config, 'keystone/admin_user', 'neutron') - $region_name = hiera('region', 'RegionOne') - $auth_api_version = 'v2.0' - $admin_identity_uri = "http://${service_endpoint}:35357" - $admin_auth_url = "${admin_identity_uri}/${auth_api_version}" - $neutron_url = "http://${neutron_endpoint}:9696" + $management_vip = hiera('management_vip') + $service_endpoint = hiera('service_endpoint', $management_vip) + $admin_password = try_get_value($neutron_config, 'keystone/admin_password') + $admin_tenant_name = try_get_value($neutron_config, 'keystone/admin_tenant', 'services') + $admin_username = try_get_value($neutron_config, 'keystone/admin_user', 'neutron') + $region_name = hiera('region', 'RegionOne') + $auth_api_version = 'v2.0' + $ssl_hash = hiera_hash('use_ssl', {}) + + $admin_identity_protocol = get_ssl_property($ssl_hash, {}, 'keystone', 'admin', 'protocol', 'http') + $admin_identity_address = get_ssl_property($ssl_hash, {}, 'keystone', 'admin', 'hostname', [$service_endpoint, $management_vip]) + + $neutron_internal_protocol = get_ssl_property($ssl_hash, {}, 'neutron', 'internal', 'protocol', 'http') + $neutron_endpoint = get_ssl_property($ssl_hash, {}, 'neutron', 'internal', 'hostname', [hiera('neutron_endpoint', ''), $management_vip]) + + $admin_identity_uri = "${admin_identity_protocol}://${admin_identity_address}:35357" + $admin_auth_url = "${admin_identity_uri}/${auth_api_version}" + $neutron_url = "${neutron_internal_protocol}://${neutron_endpoint}:9696" service { 'libvirt' : ensure => 'running', diff --git a/deployment/puppet/osnailyfacter/modular/openstack-network/keystone.pp b/deployment/puppet/osnailyfacter/modular/openstack-network/keystone.pp index 937b42b7d9..7081a5d6fc 100644 --- a/deployment/puppet/osnailyfacter/modular/openstack-network/keystone.pp +++ b/deployment/puppet/osnailyfacter/modular/openstack-network/keystone.pp @@ -3,17 +3,19 @@ notice('MODULAR: openstack-network/keystone.pp') $use_neutron = hiera('use_neutron', false) $neutron_hash = hiera_hash('quantum_settings', {}) $public_vip = hiera('public_vip') +$management_vip = hiera('management_vip') $public_ssl_hash = hiera('public_ssl') -$public_address = $public_ssl_hash['services'] ? { - true => $public_ssl_hash['hostname'], - default => $public_vip, -} -$public_protocol = $public_ssl_hash['services'] ? { - true => 'https', - default => 'http', -} -$admin_address = hiera('management_vip') -$admin_protocol = 'http' +$ssl_hash = hiera_hash('use_ssl', {}) + +$public_protocol = get_ssl_property($ssl_hash, $public_ssl_hash, 'neutron', 'public', 'protocol', 'http') +$public_address = get_ssl_property($ssl_hash, $public_ssl_hash, 'neutron', 'public', 'hostname', [$public_vip]) + +$internal_protocol = get_ssl_property($ssl_hash, {}, 'neutron', 'internal', 'protocol', 'http') +$internal_address = get_ssl_property($ssl_hash, {}, 'neutron', 'internal', 'hostname', [$management_vip]) + +$admin_protocol = get_ssl_property($ssl_hash, {}, 'neutron', 'admin', 'protocol', 'http') +$admin_address = get_ssl_property($ssl_hash, {}, 'neutron', 'admin', 'hostname', [$management_vip]) + $region = pick($neutron_hash['region'], hiera('region', 'RegionOne')) $password = $neutron_hash['keystone']['admin_password'] @@ -27,11 +29,12 @@ $tenant = pick($neutron_hash['tenant'], 'services') $port = '9696' $public_url = "${public_protocol}://${public_address}:${port}" -$internal_url = "${admin_protocol}://${admin_address}:${port}" +$internal_url = "${internal_protocol}://${internal_address}:${port}" $admin_url = "${admin_protocol}://${admin_address}:${port}" validate_string($public_address) +validate_string($internal_address) validate_string($password) if $use_neutron { diff --git a/deployment/puppet/osnailyfacter/modular/openstack-network/server-config.pp b/deployment/puppet/osnailyfacter/modular/openstack-network/server-config.pp index 0767021873..9f407a2847 100644 --- a/deployment/puppet/osnailyfacter/modular/openstack-network/server-config.pp +++ b/deployment/puppet/osnailyfacter/modular/openstack-network/server-config.pp @@ -1,41 +1,51 @@ notice('MODULAR: openstack-network/server-config.pp') -$use_neutron = hiera('use_neutron', false) +$use_neutron = hiera('use_neutron', false) class neutron { } class { 'neutron' : } if $use_neutron { - $neutron_config = hiera_hash('neutron_config') - $neutron_server_enable = pick($neutron_config['neutron_server_enable'], true) - $database_vip = hiera('database_vip') - $management_vip = hiera('management_vip') - $service_endpoint = hiera('service_endpoint', $management_vip) - $nova_endpoint = hiera('nova_endpoint', $management_vip) - $nova_hash = hiera_hash('nova', { }) - $primary_controller = hiera('primary_controller', false) + $neutron_config = hiera_hash('neutron_config') + $neutron_server_enable = pick($neutron_config['neutron_server_enable'], true) + $database_vip = hiera('database_vip') + $management_vip = hiera('management_vip') + $service_endpoint = hiera('service_endpoint', $management_vip) + $nova_endpoint = hiera('nova_endpoint', $management_vip) + $nova_hash = hiera_hash('nova', { }) + $primary_controller = hiera('primary_controller', false) - $neutron_db_password = $neutron_config['database']['passwd'] - $neutron_db_user = try_get_value($neutron_config, 'database/user', 'neutron') - $neutron_db_name = try_get_value($neutron_config, 'database/name', 'neutron') - $neutron_db_host = try_get_value($neutron_config, 'database/host', $database_vip) + $neutron_db_password = $neutron_config['database']['passwd'] + $neutron_db_user = try_get_value($neutron_config, 'database/user', 'neutron') + $neutron_db_name = try_get_value($neutron_config, 'database/name', 'neutron') + $neutron_db_host = try_get_value($neutron_config, 'database/host', $database_vip) - $neutron_db_uri = "mysql://${neutron_db_user}:${neutron_db_password}@${neutron_db_host}/${neutron_db_name}?&read_timeout=60" + $neutron_db_uri = "mysql://${neutron_db_user}:${neutron_db_password}@${neutron_db_host}/${neutron_db_name}?&read_timeout=60" - $auth_password = $neutron_config['keystone']['admin_password'] - $auth_user = pick($neutron_config['keystone']['admin_user'], 'neutron') - $auth_tenant = pick($neutron_config['keystone']['admin_tenant'], 'services') - $auth_region = hiera('region', 'RegionOne') - $auth_endpoint_type = 'internalURL' + $auth_password = $neutron_config['keystone']['admin_password'] + $auth_user = pick($neutron_config['keystone']['admin_user'], 'neutron') + $auth_tenant = pick($neutron_config['keystone']['admin_tenant'], 'services') + $auth_region = hiera('region', 'RegionOne') + $auth_endpoint_type = 'internalURL' - $auth_api_version = 'v2.0' - $identity_uri = "http://${service_endpoint}:5000/" - #$auth_url = "${identity_uri}${auth_api_version}" - $nova_admin_auth_url = "http://${service_endpoint}:35357/" - $nova_url = "http://${nova_endpoint}:8774/v2" + $ssl_hash = hiera_hash('use_ssl', {}) - $service_workers = pick($neutron_config['workers'], min(max($::processorcount, 2), 16)) + $internal_auth_protocol = get_ssl_property($ssl_hash, {}, 'keystone', 'internal', 'protocol', 'http') + $internal_auth_endpoint = get_ssl_property($ssl_hash, {}, 'keystone', 'internal', 'hostname', [$service_endpoint, $management_vip]) + + $admin_auth_protocol = get_ssl_property($ssl_hash, {}, 'keystone', 'admin', 'protocol', 'http') + $admin_auth_endpoint = get_ssl_property($ssl_hash, {}, 'keystone', 'admin', 'hostname', [$service_endpoint, $management_vip]) + + $nova_internal_protocol = get_ssl_property($ssl_hash, {}, 'nova', 'internal', 'protocol', 'http') + $nova_internal_endpoint = get_ssl_property($ssl_hash, {}, 'nova', 'internal', 'hostname', [$nova_endpoint]) + + $auth_api_version = 'v2.0' + $identity_uri = "${internal_auth_protocol}://${internal_auth_endpoint}:5000/" + $nova_admin_auth_url = "${admin_auth_protocol}://${admin_auth_endpoint}:35357/" + $nova_url = "${nova_internal_protocol}://${nova_internal_endpoint}:8774/v2" + + $service_workers = pick($neutron_config['workers'], min(max($::processorcount, 2), 16)) $neutron_advanced_config = hiera_hash('neutron_advanced_configuration', { }) $dvr = pick($neutron_advanced_config['neutron_dvr'], false) diff --git a/deployment/puppet/osnailyfacter/modular/openstack-network/server-nova.pp b/deployment/puppet/osnailyfacter/modular/openstack-network/server-nova.pp index 721a8c2f66..208b303285 100644 --- a/deployment/puppet/osnailyfacter/modular/openstack-network/server-nova.pp +++ b/deployment/puppet/osnailyfacter/modular/openstack-network/server-nova.pp @@ -3,21 +3,29 @@ notice('MODULAR: openstack-network/server-nova.pp') $use_neutron = hiera('use_neutron', false) if $use_neutron { - $neutron_config = hiera_hash('neutron_config') - $management_vip = hiera('management_vip') - $service_endpoint = hiera('service_endpoint', $management_vip) - $neutron_endpoint = hiera('neutron_endpoint', $management_vip) - $admin_password = try_get_value($neutron_config, 'keystone/admin_password') - $admin_tenant_name = try_get_value($neutron_config, 'keystone/admin_tenant', 'services') - $admin_username = try_get_value($neutron_config, 'keystone/admin_user', 'neutron') - $region_name = hiera('region', 'RegionOne') - $auth_api_version = 'v2.0' - $admin_identity_uri = "http://${service_endpoint}:35357" - $admin_auth_url = "${admin_identity_uri}/${auth_api_version}" - $neutron_url = "http://${neutron_endpoint}:9696" - $neutron_ovs_bridge = 'br-int' - $conf_nova = pick($neutron_config['conf_nova'], true) - $floating_net = pick($neutron_config['default_floating_net'], 'net04_ext') + $neutron_config = hiera_hash('neutron_config') + $management_vip = hiera('management_vip') + $service_endpoint = hiera('service_endpoint', $management_vip) + $neutron_endpoint = hiera('neutron_endpoint', $management_vip) + $admin_password = try_get_value($neutron_config, 'keystone/admin_password') + $admin_tenant_name = try_get_value($neutron_config, 'keystone/admin_tenant', 'services') + $admin_username = try_get_value($neutron_config, 'keystone/admin_user', 'neutron') + $region_name = hiera('region', 'RegionOne') + $auth_api_version = 'v2.0' + $ssl_hash = hiera_hash('use_ssl', {}) + + $admin_auth_protocol = get_ssl_property($ssl_hash, {}, 'keystone', 'admin', 'protocol', 'http') + $admin_auth_endpoint = get_ssl_property($ssl_hash, {}, 'keystone', 'admin', 'hostname', [hiera('service_endpoint', ''), $management_vip]) + + $neutron_internal_protocol = get_ssl_property($ssl_hash, {}, 'neutron', 'internal', 'protocol', 'http') + $neutron_internal_endpoint = get_ssl_property($ssl_hash, {}, 'neutron', 'internal', 'hostname', [$neutron_endpoint]) + + $admin_identity_uri = "${admin_auth_protocol}://${admin_auth_endpoint}:35357" + $admin_auth_url = "${admin_identity_uri}/${auth_api_version}" + $neutron_url = "${neutron_internal_protocol}://${neutron_internal_endpoint}:9696" + $neutron_ovs_bridge = 'br-int' + $conf_nova = pick($neutron_config['conf_nova'], true) + $floating_net = pick($neutron_config['default_floating_net'], 'net04_ext') class { 'nova::network::neutron' : neutron_admin_password => $admin_password, diff --git a/deployment/puppet/osnailyfacter/modular/roles/cinder.pp b/deployment/puppet/osnailyfacter/modular/roles/cinder.pp index 873186417b..b32afb82b8 100644 --- a/deployment/puppet/osnailyfacter/modular/roles/cinder.pp +++ b/deployment/puppet/osnailyfacter/modular/roles/cinder.pp @@ -46,11 +46,21 @@ $db_host = pick($cinder_hash['db_host'], hiera('database_ $cinder_db_user = pick($cinder_hash['db_user'], 'cinder') $cinder_db_name = pick($cinder_hash['db_name'], 'cinder') +$ssl_hash = hiera_hash('use_ssl', {}) $service_endpoint = hiera('service_endpoint') -$glance_api_servers = hiera('glance_api_servers', "${management_vip}:9292") -$keystone_auth_protocol = 'http' -$keystone_auth_host = $service_endpoint +$keystone_auth_protocol = get_ssl_property($ssl_hash, {}, 'keystone', 'internal', 'protocol', 'http') +$keystone_auth_host = get_ssl_property($ssl_hash, {}, 'keystone', 'internal', 'hostname', [$service_endpoint, $management_vip]) + +$glance_protocol = get_ssl_property($ssl_hash, {}, 'glance', 'internal', 'protocol', 'http') +$glance_endpoint = get_ssl_property($ssl_hash, {}, 'glance', 'internal', 'hostname', [$management_vip]) +$glance_internal_ssl = get_ssl_property($ssl_hash, {}, 'glance', 'internal', 'usage', false) +if $glance_internal_ssl { + $glance_api_servers = "${glance_protocol}://${glance_endpoint}:9292" +} else { + $glance_api_servers = hiera('glance_api_servers', "http://${management_vip}:9292") +} + $service_port = '5000' $auth_uri = "${keystone_auth_protocol}://${keystone_auth_host}:${service_port}/" @@ -275,7 +285,6 @@ class { 'openstack::cinder': manage_volumes => $manage_volumes, iser => $storage_hash['iser'], enabled => true, - auth_host => $service_endpoint, iscsi_bind_host => $storage_address, keystone_user => $keystone_user, keystone_tenant => $keystone_tenant, diff --git a/deployment/puppet/osnailyfacter/modular/roles/compute.pp b/deployment/puppet/osnailyfacter/modular/roles/compute.pp index db52e24de8..1f2cc1e4cf 100644 --- a/deployment/puppet/osnailyfacter/modular/roles/compute.pp +++ b/deployment/puppet/osnailyfacter/modular/roles/compute.pp @@ -51,16 +51,23 @@ $syslog_log_facility_sahara = hiera('syslog_log_facility_sahara','LOG_LOCAL0 $nova_rate_limits = hiera('nova_rate_limits') $nova_report_interval = hiera('nova_report_interval') $nova_service_down_time = hiera('nova_service_down_time') -$glance_api_servers = hiera('glance_api_servers', "${management_vip}:9292") $config_drive_format = 'vfat' - $public_ssl_hash = hiera('public_ssl') -$vncproxy_host = $public_ssl_hash['services'] ? { - true => $public_ssl_hash['hostname'], - default => $public_vip, +$ssl_hash = hiera_hash('use_ssl', {}) + +$glance_protocol = get_ssl_property($ssl_hash, {}, 'glance', 'internal', 'protocol', 'http') +$glance_endpoint = get_ssl_property($ssl_hash, {}, 'glance', 'internal', 'hostname', [hiera('glance_endpoint', $management_vip)]) +$glance_internal_ssl = get_ssl_property($ssl_hash, {}, 'glance', 'internal', 'usage', false) +if $glance_internal_ssl { + $glance_api_servers = "${glance_protocol}://${glance_endpoint}:9292" +} else { + $glance_api_servers = hiera('glance_api_servers', "${management_vip}:9292") } -$db_host = pick($nova_hash['db_host'], $database_vip) +$vncproxy_protocol = get_ssl_property($ssl_hash, $public_ssl_hash, 'nova', 'public', 'protocol', [$nova_hash['vncproxy_protocol'], 'http']) +$vncproxy_host = get_ssl_property($ssl_hash, $public_ssl_hash, 'nova', 'public', 'hostname', [$public_vip]) + +$db_host = pick($nova_hash['db_host'], $database_vip) $block_device_allocate_retries = hiera('block_device_allocate_retries', 300) $block_device_allocate_retries_interval = hiera('block_device_allocate_retries_interval', 3) @@ -246,6 +253,7 @@ class { 'openstack::compute': rabbit_ha_queues => $rabbit_ha_queues, auto_assign_floating_ip => $auto_assign_floating_ip, glance_api_servers => $glance_api_servers, + vncproxy_protocol => $vncproxy_protocol, vncproxy_host => $vncproxy_host, vncserver_listen => '0.0.0.0', migration_support => true, diff --git a/deployment/puppet/osnailyfacter/modular/ssl/ssl_add_trust_chain.pp b/deployment/puppet/osnailyfacter/modular/ssl/ssl_add_trust_chain.pp index 231088d0ac..fe6a63e45d 100644 --- a/deployment/puppet/osnailyfacter/modular/ssl/ssl_add_trust_chain.pp +++ b/deployment/puppet/osnailyfacter/modular/ssl/ssl_add_trust_chain.pp @@ -1,31 +1,57 @@ notice('MODULAR: ssl_add_trust_chain.pp') -$public_ssl_hash = hiera('public_ssl') -$ip = hiera('public_vip') +$public_ssl_hash = hiera('public_ssl') +$ssl_hash = hiera_hash('use_ssl', {}) + +define file_link { + $service = $name + if !empty(file("/etc/pki/tls/certs/public_${service}.pem",'/dev/null')) { + file { "/usr/local/share/ca-certificates/${service}_public_haproxy.crt": + ensure => link, + target => "/etc/pki/tls/certs/public_${service}.pem", + } + } +} + +if !empty($ssl_hash) { + $services = [ 'horizon', 'keystone', 'nova', 'heat', 'glance', 'cinder', 'neutron', 'swift', 'sahara', 'murano', 'ceilometer', 'radosgw'] + + file_link { $services: } + +} elsif !empty($public_ssl_hash) { + case $::osfamily { + /(?i)redhat/: { + file { '/etc/pki/ca-trust/source/anchors/public_haproxy.pem': + ensure => 'link', + target => '/etc/pki/tls/certs/public_haproxy.pem', + } + } + + /(?i)debian/: { + file { '/usr/local/share/ca-certificates/public_haproxy.crt': + ensure => 'link', + target => '/etc/pki/tls/certs/public_haproxy.pem', + } + } + default: { + fail("Unsupported OS: ${::osfamily}/${::operatingsystem}") + } + } +} case $::osfamily { /(?i)redhat/: { - file { '/etc/pki/ca-trust/source/anchors/public_haproxy.pem': - ensure => 'link', - target => '/etc/pki/tls/certs/public_haproxy.pem', - }-> - exec { 'enable_trust': path => '/bin:/usr/bin:/sbin:/usr/sbin', command => 'update-ca-trust force-enable', }-> - exec { 'add_trust': path => '/bin:/usr/bin:/sbin:/usr/sbin', - command => 'update-ca-trust extract', + command => 'update-ca-certificates', } } - /(?i)debian/: { - file { '/usr/local/share/ca-certificates/public_haproxy.crt': - ensure => 'link', - target => '/etc/pki/tls/certs/public_haproxy.pem', - }-> + /(?i)debian/: { exec { 'add_trust': path => '/bin:/usr/bin:/sbin:/usr/sbin', command => 'update-ca-certificates', @@ -36,7 +62,5 @@ case $::osfamily { } } -host { $public_ssl_hash['hostname']: - ensure => present, - ip => $ip, -} + +File <| |> -> Exec['add_trust'] diff --git a/deployment/puppet/osnailyfacter/modular/ssl/ssl_dns_setup.pp b/deployment/puppet/osnailyfacter/modular/ssl/ssl_dns_setup.pp new file mode 100644 index 0000000000..59a1a6ffe8 --- /dev/null +++ b/deployment/puppet/osnailyfacter/modular/ssl/ssl_dns_setup.pp @@ -0,0 +1,102 @@ +notice('MODULAR: ssl_dns_setup.pp') + +$public_ssl_hash = hiera('public_ssl') +$ssl_hash = hiera_hash('use_ssl', {}) +$public_vip = hiera('public_vip') +$management_vip = hiera('management_vip') +$openstack_service_endpoints = hiera_hash('openstack_service_endpoints', {}) + +$services = [ 'horizon', 'keystone', 'nova', 'heat', 'glance', 'cinder', 'neutron', 'swift', 'sahara', 'murano', 'ceilometer', 'radosgw'] + +#TODO(sbog): convert it to '.each' when moving to Puppet 4 +define hosts ( + $ssl_hash, + ){ + $service = $name + $public_vip = hiera('public_vip') + $management_vip = hiera('management_vip') + + $public_hostname = try_get_value($ssl_hash, "${service}_public_hostname", "") + $internal_hostname = try_get_value($ssl_hash, "${service}_internal_hostname", "") + $admin_hostname = try_get_value($ssl_hash, "${service}_admin_hostname", $internal_hostname) + + $service_public_ip = try_get_value($ssl_hash, "${service}_public_ip", "") + if !empty($service_public_ip) { + $public_ip = $service_public_ip + } else { + $public_ip = $public_vip + } + + $service_internal_ip = try_get_value($ssl_hash, "${service}_internal_ip", "") + if !empty($service_internal_ip) { + $internal_ip = $service_internal_ip + } else { + $internal_ip = $management_vip + } + + $service_admin_ip = try_get_value($ssl_hash, "${service}_admin_ip", "") + if !empty($service_admin_ip) { + $admin_ip = $service_admin_ip + } else { + $admin_ip = $management_vip + } + + # We always need to set public hostname resolution + if !empty($public_hostname) and !defined(Host[$public_hostname]) { + host { $public_hostname: + name => $public_hostname, + ensure => present, + ip => $public_ip, + } + } + + if ($public_hostname == $internal_hostname) and ($public_hostname == $admin_hostname) { + notify{"All ${service} hostnames is equal, just public one inserted to DNS":} + } + elsif $public_hostanme == $internal_hostname { + if !empty($admin_hostname) and !defined(Host[$admin_hostname]) { + host { $admin_hostname: + name => $admin_hostname, + ensure => present, + ip => $admin_ip, + } + } + } + elsif ($public_hostname == $admin_hostname) or ($internal_hostname == $admin_hostname) { + if !empty($internal_hostname) and !defined(Host[$internal_hostname]) { + host { $internal_hostname: + name => $internal_hostname, + ensure => present, + ip => $internal_ip, + } + } + } + else { + if !empty($admin_hostname) and !defined(Host[$admin_hostname]) { + host { $admin_hostname: + name => $admin_hostname, + ensure => present, + ip => $admin_ip, + } + } + if !empty($internal_hostname) and !defined(Host[$internal_hostname]) { + host { $internal_hostname: + name => $internal_hostname, + ensure => present, + ip => $internal_ip, + } + } + } +} + +if !empty($ssl_hash) { + + hosts { $services: + ssl_hash => $ssl_hash, + } +} elsif !empty($public_ssl_hash) { + host { $public_ssl_hash['hostname']: + ensure => present, + ip => $public_vip, + } +} diff --git a/deployment/puppet/osnailyfacter/modular/ssl/ssl_keys_saving.pp b/deployment/puppet/osnailyfacter/modular/ssl/ssl_keys_saving.pp index 862b1f217e..6e1eefb813 100644 --- a/deployment/puppet/osnailyfacter/modular/ssl/ssl_keys_saving.pp +++ b/deployment/puppet/osnailyfacter/modular/ssl/ssl_keys_saving.pp @@ -1,6 +1,7 @@ notice('MODULAR: ssl_keys_saving.pp') $public_ssl_hash = hiera_hash('public_ssl') +$ssl_hash = hiera_hash('use_ssl', {}) $pub_certificate_content = $public_ssl_hash['cert_data']['content'] $base_path = "/etc/pki/tls/certs" $pki_path = [ "/etc/pki", "/etc/pki/tls" ] @@ -16,7 +17,57 @@ file { [ $pki_path, $base_path, $astute_base_path ]: ensure => directory, } -file { ["$base_path/public_haproxy.pem", "$astute_base_path/public_haproxy.pem"]: - ensure => present, - content => $pub_certificate_content, +#TODO(sbog): convert it to '.each' syntax when moving to Puppet 4 +define cert_file ( + $ssl_hash, + $base_path, + $astute_base_path, + ){ + $service = $name + + $public_service = try_get_value($ssl_hash, "${service}_public", false) + $public_usercert = try_get_value($ssl_hash, "${service}_public_usercert", false) + $public_certdata = try_get_value($ssl_hash, "${service}_public_certdata", "") + $internal_service = try_get_value($ssl_hash, "${service}_internal", false) + $internal_usercert = try_get_value($ssl_hash, "${service}_internal_usercert", false) + $internal_certdata = try_get_value($ssl_hash, "${service}_internal_certdata", "") + $admin_service = try_get_value($ssl_hash, "${service}_admin", false) + $admin_usercert = try_get_value($ssl_hash, "${service}_admin_usercert", false) + $admin_certdata = try_get_value($ssl_hash, "${service}_admin_certdata", "") + + if $ssl_hash["${service}"] { + if $public_service and $public_usercert and !empty($public_certdata) { + file { ["${base_path}/public_${service}.pem", "${astute_base_path}/public_${service}.pem"]: + ensure => present, + content => $public_certdata, + } + } + if $internal_service and $internal_usercert and !empty($internal_certdata) { + file { ["${base_path}/internal_${service}.pem", "${astute_base_path}/internal_${service}.pem"]: + ensure => present, + content => $internal_certdata, + } + } + if $admin_service and $admin_usercert and !empty($admin_certdata) { + file { ["${base_path}/admin_${service}.pem", "${astute_base_path}/admin_${service}.pem"]: + ensure => present, + content => $admin_certdata, + } + } + } +} + +if !empty($ssl_hash) { + $services = [ 'horizon', 'keystone', 'nova', 'heat', 'glance', 'cinder', 'neutron', 'swift', 'sahara', 'murano', 'ceilometer', 'radosgw'] + + cert_file { $services: + ssl_hash => $ssl_hash, + base_path => $base_path, + astute_base_path => $astute_base_path, + } +} elsif !empty($public_ssl_hash) { + file { ["$base_path/public_haproxy.pem", "$astute_base_path/public_haproxy.pem"]: + ensure => present, + content => $pub_certificate_content, + } } diff --git a/deployment/puppet/osnailyfacter/modular/ssl/ssl_keys_saving_pre.rb b/deployment/puppet/osnailyfacter/modular/ssl/ssl_keys_saving_pre.rb index 546ca3e65a..307f16ea7d 100644 --- a/deployment/puppet/osnailyfacter/modular/ssl/ssl_keys_saving_pre.rb +++ b/deployment/puppet/osnailyfacter/modular/ssl/ssl_keys_saving_pre.rb @@ -3,7 +3,7 @@ require File.join File.dirname(__FILE__), '../test_common.rb' class SslKeysSavingPreTest < Test::Unit::TestCase def test_ssl_data - assert TestCommon::Settings.lookup('public_ssl'), 'No SSL hash found in Hiera!' + assert TestCommon::Settings.lookup('use_ssl'), 'No SSL hash found in Hiera!' end end diff --git a/deployment/puppet/osnailyfacter/modular/ssl/tasks.yaml b/deployment/puppet/osnailyfacter/modular/ssl/tasks.yaml index 662307f1d2..c41cd46c37 100644 --- a/deployment/puppet/osnailyfacter/modular/ssl/tasks.yaml +++ b/deployment/puppet/osnailyfacter/modular/ssl/tasks.yaml @@ -23,3 +23,14 @@ timeout: 3600 test_pre: cmd: ruby /etc/puppet/modules/osnailyfacter/modular/ssl/ssl_keys_saving_pre.rb + +- id: ssl-dns-setup + type: puppet + groups: [primary-controller, controller] + requires: [firewall, ssl-add-trust-chain] + condition: "settings:public_ssl.horizon.value == true or settings:public_ssl.services.value == true" + required_for: [deploy_end] + parameters: + puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/ssl/ssl_dns_setup.pp + puppet_modules: /etc/puppet/modules + timeout: 3600 diff --git a/deployment/puppet/osnailyfacter/modular/swift/keystone.pp b/deployment/puppet/osnailyfacter/modular/swift/keystone.pp index 325a83b1e4..a807bf1edf 100644 --- a/deployment/puppet/osnailyfacter/modular/swift/keystone.pp +++ b/deployment/puppet/osnailyfacter/modular/swift/keystone.pp @@ -1,40 +1,34 @@ notice('MODULAR: swift/keystone.pp') -$swift_hash = hiera_hash('swift', {}) -$public_vip = hiera('public_vip') +$swift_hash = hiera_hash('swift', {}) +$public_vip = hiera('public_vip') # Allow a plugin to override the admin address using swift_hash: -$admin_address = pick($swift_hash['management_vip'], hiera('management_vip')) -$region = pick($swift_hash['region'], hiera('region', 'RegionOne')) -$public_ssl_hash = hiera('public_ssl') -$public_address = $public_ssl_hash['services'] ? { - # Allow a plugin to override the public address using swift_hash: - # TODO(sbog): with this approach you must use IP address in SAN field of - # certificate on external swift. Change this in next iterations of TLS - # implementation. - true => pick($swift_hash['public_vip'], - $public_ssl_hash['hostname']), - default => $public_vip, -} -$public_protocol = $public_ssl_hash['services'] ? { - true => 'https', - default => 'http', -} +$management_vip = pick($swift_hash['management_vip'], hiera('management_vip')) +$region = pick($swift_hash['region'], hiera('region', 'RegionOne')) +$public_ssl_hash = hiera('public_ssl') +$ssl_hash = hiera_hash('use_ssl', {}) -$password = $swift_hash['user_password'] -$auth_name = pick($swift_hash['auth_name'], 'swift') -$configure_endpoint = pick($swift_hash['configure_endpoint'], true) -$service_name = pick($swift_hash['service_name'], 'swift') -$tenant = pick($swift_hash['tenant'], 'services') +$public_protocol = get_ssl_property($ssl_hash, $public_ssl_hash, 'swift', 'public', 'protocol', 'http') +$public_address = get_ssl_property($ssl_hash, $public_ssl_hash, 'swift', 'public', 'hostname', [$swift_hash['public_vip'], $public_vip]) + +$internal_protocol = get_ssl_property($ssl_hash, {}, 'swift', 'internal', 'protocol', 'http') +$internal_address = get_ssl_property($ssl_hash, {}, 'swift', 'internal', 'hostname', [$management_vip]) + +$password = $swift_hash['user_password'] +$auth_name = pick($swift_hash['auth_name'], 'swift') +$configure_endpoint = pick($swift_hash['configure_endpoint'], true) +$service_name = pick($swift_hash['service_name'], 'swift') +$tenant = pick($swift_hash['tenant'], 'services') validate_string($public_address) validate_string($password) $public_url = "${public_protocol}://${public_address}:8080/v1/AUTH_%(tenant_id)s" -$admin_url = "http://${admin_address}:8080/v1/AUTH_%(tenant_id)s" +$admin_url = "${internal_protocol}://${internal_address}:8080/v1/AUTH_%(tenant_id)s" # Amazon S3 endpoints $public_url_s3 = "${public_protocol}://${public_address}:8080" -$admin_url_s3 = "http://${admin_address}:8080" +$admin_url_s3 = "${internal_protocol}://${internal_address}:8080" class { '::swift::keystone::auth': password => $password, diff --git a/deployment/puppet/osnailyfacter/modular/swift/swift.pp b/deployment/puppet/osnailyfacter/modular/swift/swift.pp index aec2433750..b43fabef47 100644 --- a/deployment/puppet/osnailyfacter/modular/swift/swift.pp +++ b/deployment/puppet/osnailyfacter/modular/swift/swift.pp @@ -1,41 +1,48 @@ notice('MODULAR: swift.pp') -$network_scheme = hiera_hash('network_scheme') -$network_metadata = hiera_hash('network_metadata') +$network_scheme = hiera_hash('network_scheme') +$network_metadata = hiera_hash('network_metadata') prepare_network_config($network_scheme) -$swift_hash = hiera_hash('swift_hash') -$swift_master_role = hiera('swift_master_role', 'primary-controller') -$swift_nodes = hiera_hash('swift_nodes', {}) -$swift_operator_roles = pick($swift_hash['swift_operator_roles'], ['admin', 'SwiftOperator']) -$swift_proxies_addr_list = values(get_node_to_ipaddr_map_by_network_role(hiera_hash('swift_proxies', {}), 'swift/api')) +$swift_hash = hiera_hash('swift_hash') +$swift_master_role = hiera('swift_master_role', 'primary-controller') +$swift_nodes = hiera_hash('swift_nodes', {}) +$swift_operator_roles = pick($swift_hash['swift_operator_roles'], ['admin', 'SwiftOperator']) +$swift_proxies_addr_list = values(get_node_to_ipaddr_map_by_network_role(hiera_hash('swift_proxies', {}), 'swift/api')) # todo(sv) replace 'management' to mgmt/memcache -$memcaches_addr_list = values(get_node_to_ipaddr_map_by_network_role(hiera_hash('swift_proxy_caches', {}), 'management')) -$is_primary_swift_proxy = hiera('is_primary_swift_proxy', false) -$proxy_port = hiera('proxy_port', '8080') -$storage_hash = hiera_hash('storage_hash') -$mp_hash = hiera('mp') -$management_vip = hiera('management_vip') -$public_vip = hiera('public_vip') -$swift_api_ipaddr = get_network_role_property('swift/api', 'ipaddr') -$swift_storage_ipaddr = get_network_role_property('swift/replication', 'ipaddr') -$debug = pick($swift_hash['debug'], hiera('debug', false)) -$verbose = pick($swift_hash['verbose'], hiera('verbose', false)) +$memcaches_addr_list = values(get_node_to_ipaddr_map_by_network_role(hiera_hash('swift_proxy_caches', {}), 'management')) +$is_primary_swift_proxy = hiera('is_primary_swift_proxy', false) +$proxy_port = hiera('proxy_port', '8080') +$storage_hash = hiera_hash('storage_hash') +$mp_hash = hiera('mp') +$management_vip = hiera('management_vip') +$public_vip = hiera('public_vip') +$swift_api_ipaddr = get_network_role_property('swift/api', 'ipaddr') +$swift_storage_ipaddr = get_network_role_property('swift/replication', 'ipaddr') +$debug = pick($swift_hash['debug'], hiera('debug', false)) +$verbose = pick($swift_hash['verbose'], hiera('verbose', false)) # NOTE(mattymo): Changing ring_part_power or part_hours on redeploy leads to data loss -$ring_part_power = pick($swift_hash['ring_part_power'], 10) -$ring_min_part_hours = hiera('swift_ring_min_part_hours', 1) -$deploy_swift_storage = hiera('deploy_swift_storage', true) -$deploy_swift_proxy = hiera('deploy_swift_proxy', true) -$create_keystone_auth = pick($swift_hash['create_keystone_auth'], true) +$ring_part_power = pick($swift_hash['ring_part_power'], 10) +$ring_min_part_hours = hiera('swift_ring_min_part_hours', 1) +$deploy_swift_storage = hiera('deploy_swift_storage', true) +$deploy_swift_proxy = hiera('deploy_swift_proxy', true) +$create_keystone_auth = pick($swift_hash['create_keystone_auth'], true) #Keystone settings -$service_endpoint = hiera('service_endpoint') -$keystone_user = pick($swift_hash['user'], 'swift') -$keystone_password = pick($swift_hash['user_password'], 'passsword') -$keystone_tenant = pick($swift_hash['tenant'], 'services') -$keystone_protocol = pick($swift_hash['auth_protocol'], 'http') -$region = hiera('region', 'RegionOne') -$service_workers = pick($swift_hash['workers'], +$service_endpoint = hiera('service_endpoint') +$keystone_user = pick($swift_hash['user'], 'swift') +$keystone_password = pick($swift_hash['user_password'], 'passsword') +$keystone_tenant = pick($swift_hash['tenant'], 'services') +$keystone_protocol = pick($swift_hash['auth_protocol'], 'http') +$region = hiera('region', 'RegionOne') +$service_workers = pick($swift_hash['workers'], min(max($::processorcount, 2), 16)) +$ssl_hash = hiera_hash('use_ssl', {}) + +$keystone_internal_protocol = get_ssl_property($ssl_hash, {}, 'keystone', 'internal', 'protocol', 'http') +$keystone_endpoint = get_ssl_property($ssl_hash, {}, 'keystone', 'internal', 'hostname', [hiera('service_endpoint', ''), $management_vip]) + +$swift_internal_protocol = get_ssl_property($ssl_hash, {}, 'swift', 'internal', 'protocol', 'http') +$swift_internal_endpoint = get_ssl_property($ssl_hash, {}, 'swift', 'internal', 'hostname', [$swift_api_ipaddr, $management_vip]) # Use Swift if it isn't replaced by vCenter, Ceph for BOTH images and objects if !($storage_hash['images_ceph'] and $storage_hash['objects_ceph']) and !$storage_hash['images_vcenter'] { @@ -112,13 +119,13 @@ if !($storage_hash['images_ceph'] and $storage_hash['objects_ceph']) and !$stora auth_protocol => $keystone_protocol, } -> class { 'openstack::swift::status': - endpoint => "http://${swift_api_ipaddr}:${proxy_port}", + endpoint => "${swift_internal_protocol}://${swift_internal_endpoint}:${proxy_port}", vip => $management_vip, only_from => "127.0.0.1 240.0.0.2 ${sto_nets} ${man_nets}", con_timeout => 5 } -> class { 'swift::dispersion': - auth_url => "http://$service_endpoint:5000/v2.0/", + auth_url => "${keystone_internal_protocol}://${keystone_endpoint}:5000/v2.0/", auth_user => $keystone_user, auth_tenant => $keystone_tenant, auth_pass => $keystone_password, diff --git a/deployment/puppet/osnailyfacter/spec/fixtures/manifests/site.pp b/deployment/puppet/osnailyfacter/spec/fixtures/manifests/site.pp new file mode 100644 index 0000000000..e69de29bb2 diff --git a/deployment/puppet/osnailyfacter/spec/functions/get_ssl_property_spec.rb b/deployment/puppet/osnailyfacter/spec/functions/get_ssl_property_spec.rb new file mode 100644 index 0000000000..2c94b47cd3 --- /dev/null +++ b/deployment/puppet/osnailyfacter/spec/functions/get_ssl_property_spec.rb @@ -0,0 +1,124 @@ +require 'spec_helper' + +describe 'get_ssl_property' do + + let(:public_ssl_hash) do + { + 'horizon' => true, + 'services' => true, + 'cert_source' => 'self_signed', + 'cert_data' => { + 'content' => 'somedataaboutyourkeypair' + }, + 'hostname' => 'public.fuel.local' + } + end + + let(:public_ssl_hash_disabled) do + { + 'horizon' => false, + 'services' => false, + 'cert_source' => 'self_signed', + 'cert_data' => { + 'content' => 'somedataaboutyourkeypair' + }, + 'hostname' => 'public.fuel.local' + } + end + + let(:use_ssl_hash) do + { + 'horizon' => true, + 'horizon_public' => true, + 'horizon_public_hostname' => 'horizon.public.fuel.local', + 'horizon_public_usercert' => true, + 'horizon_public_certdata' => 'somethinglikeacertificateforhorizon', + 'keystone' => true, + 'keystone_public' => true, + 'keystone_public_ip' => '10.10.10.10', + 'keystone_public_hostname' => 'keystone.public.fuel.local', + 'keystone_public_usercert' => true, + 'keystone_public_certdata' => 'somethinglikeacertificateforkeystone', + 'keystone_internal' => true, + 'keystone_internal_ip' => '20.20.20.20', + 'keystone_internal_hostname' => 'keystone.internal.fuel.local', + 'keystone_internal_usercert' => true, + 'keystone_internal_certdata' => 'somethinglikeacertificateforkeystone', + 'keystone_admin' => true, + 'keystone_admin_ip' => '30.30.30.30', + 'keystone_admin_hostname' => 'keystone.admin.fuel.local', + 'keystone_admin_usercert' => true, + 'keystone_admin_certdata' => 'somethinglikeacertificateforkeystone', + } + end + + context 'when wrong data provided' do + it 'should exist' do + is_expected.not_to eq(nil) + end + + it 'should fail if first argument is not hash' do + is_expected.to run.with_params(10, public_ssl_hash, 'test', 'test', 'test', false).and_raise_error(Puppet::Error, /hash as a first argument/) + end + + it 'should fail if second argument is not hash' do + is_expected.to run.with_params(use_ssl_hash, 10, 'test', 'test', 'test', false).and_raise_error(Puppet::Error, /hash as a second argument/) + end + + it 'should fail if third argument is empty' do + is_expected.to run.with_params(use_ssl_hash, public_ssl_hash, '', 'test', 'test', false).and_raise_error(Puppet::Error, /'name' for service/) + end + + it 'should fail if fourth argument is empty' do + is_expected.to run.with_params(use_ssl_hash, public_ssl_hash, 'test', '', 'test', false).and_raise_error(Puppet::Error, /'type' for service/) + end + + it 'should fail if fifth argument is empty' do + is_expected.to run.with_params(use_ssl_hash, public_ssl_hash, 'test', 'test', '', false).and_raise_error(Puppet::Error, /'type' for resource/) + end + + it 'should fail if fifth argument is wrong' do + is_expected.to run.with_params(use_ssl_hash, public_ssl_hash, 'test', 'test', 'wrong', false).and_raise_error(Puppet::Error, /should choose/) + end + + end + + context 'when first hash is empty' do + it 'should get data from auxilary hash for public endpoints' do + is_expected.to run.with_params({}, public_ssl_hash, 'keystone', 'public', 'usage', false).and_return(true) + is_expected.to run.with_params({}, public_ssl_hash, 'keystone', 'public', 'protocol', 'http').and_return('https') + is_expected.to run.with_params({}, public_ssl_hash, 'keystone', 'public', 'hostname', 'internal.fuel.local').and_return('public.fuel.local') + is_expected.to run.with_params({}, public_ssl_hash, 'keystone', 'public', 'path', '/root').and_return('/var/lib/astute/haproxy/public_haproxy.pem') + end + + it 'should get data from default values for non-public endpoints when empty hashes provided' do + is_expected.to run.with_params({}, {}, 'keystone', 'internal', 'usage', true).and_return(true) + is_expected.to run.with_params({}, {}, 'keystone', 'internal', 'protocol', 'https').and_return('https') + is_expected.to run.with_params({}, {}, 'keystone', 'internal', 'hostname', ['keystone.internal.fuel.local']).and_return('keystone.internal.fuel.local') + is_expected.to run.with_params({}, {}, 'keystone', 'internal', 'path', '/var/lib/astute/haproxy/internal_keystone.pem').and_return('/var/lib/astute/haproxy/internal_keystone.pem') + is_expected.to run.with_params({}, {}, 'keystone', 'admin', 'usage', true).and_return(true) + is_expected.to run.with_params({}, {}, 'keystone', 'admin', 'protocol', 'https').and_return('https') + is_expected.to run.with_params({}, {}, 'keystone', 'admin', 'hostname', ['keystone.admin.fuel.local']).and_return('keystone.admin.fuel.local') + is_expected.to run.with_params({}, {}, 'keystone', 'admin', 'path', '/var/lib/astute/haproxy/admin_keystone.pem').and_return('/var/lib/astute/haproxy/admin_keystone.pem') + end + + it 'should get data from default values for non-public endpoints when public_ssl hash provided' do + is_expected.to run.with_params({}, public_ssl_hash, 'keystone', 'internal', 'usage', true).and_return(true) + is_expected.to run.with_params({}, public_ssl_hash, 'keystone', 'internal', 'protocol', 'https').and_return('https') + is_expected.to run.with_params({}, public_ssl_hash, 'keystone', 'internal', 'hostname', ['keystone.internal.fuel.local']).and_return('keystone.internal.fuel.local') + is_expected.to run.with_params({}, public_ssl_hash, 'keystone', 'internal', 'path', '/var/lib/astute/haproxy/internal_keystone.pem').and_return('/var/lib/astute/haproxy/internal_keystone.pem') + is_expected.to run.with_params({}, public_ssl_hash, 'keystone', 'admin', 'usage', true).and_return(true) + is_expected.to run.with_params({}, public_ssl_hash, 'keystone', 'admin', 'protocol', 'https').and_return('https') + is_expected.to run.with_params({}, public_ssl_hash, 'keystone', 'admin', 'hostname', ['keystone.admin.fuel.local']).and_return('keystone.admin.fuel.local') + is_expected.to run.with_params({}, public_ssl_hash, 'keystone', 'admin', 'path', '/var/lib/astute/haproxy/admin_keystone.pem').and_return('/var/lib/astute/haproxy/admin_keystone.pem') + end + + it 'should get data from first hash when it has data and both hashes provided' do + is_expected.to run.with_params(use_ssl_hash, public_ssl_hash_disabled, 'keystone', 'public', 'usage', false).and_return(true) + is_expected.to run.with_params(use_ssl_hash, public_ssl_hash_disabled, 'keystone', 'public', 'protocol', 'http').and_return('https') + is_expected.to run.with_params(use_ssl_hash, public_ssl_hash_disabled, 'keystone', 'public', 'hostname', ['no default']).and_return('keystone.public.fuel.local') + is_expected.to run.with_params(use_ssl_hash, public_ssl_hash_disabled, 'keystone', 'public', 'path', ['no default']).and_return('/var/lib/astute/haproxy/public_keystone.pem') + end + + end +end diff --git a/tests/noop/astute.yaml/neut_vlan.ceph.ceil-compute.overridden_ssl.yaml b/tests/noop/astute.yaml/neut_vlan.ceph.ceil-compute.overridden_ssl.yaml new file mode 100644 index 0000000000..5b8c1afc21 --- /dev/null +++ b/tests/noop/astute.yaml/neut_vlan.ceph.ceil-compute.overridden_ssl.yaml @@ -0,0 +1,959 @@ +access: + email: admin@localhost + metadata: + label: Access + weight: 10 + password: admin + tenant: admin + user: admin +auth_key: '' +auto_assign_floating_ip: false +base_syslog: + syslog_port: '514' + syslog_server: 10.108.0.2 +ceilometer: + db_password: Toe5phw4 + enabled: true + metering_secret: tHq2rcoq + user_password: WBfBSo6U +cinder: + db_password: trj609V8 + fixed_key: 7883d66c643ce9a508ebcd4cd5516fc98814a11276bc98c4e8e671188b54e941 + user_password: sJRfG0GP +cobbler: + profile: ubuntu_1404_x86_64 +corosync: + group: 226.94.1.1 + metadata: + label: Corosync + restrictions: + - action: hide + condition: 'true' + weight: 50 + port: '12000' + verified: false +debug: false +deployment_id: 37 +deployment_mode: ha_compact +external_dns: + dns_list: 8.8.8.8, 8.8.4.4 + metadata: + label: Upstream DNS + weight: 90 +external_mongo: + hosts_ip: '' + metadata: + label: External MongoDB + restrictions: + - action: hide + condition: settings:additional_components.mongo.value == false + weight: 20 + mongo_db_name: ceilometer + mongo_password: ceilometer + mongo_replset: '' + mongo_user: ceilometer +external_ntp: + metadata: + label: Upstream NTP + weight: 100 + ntp_list: 0.pool.ntp.org, 1.pool.ntp.org +use_ssl: + horizon: true + horizon_public: true + horizon_public_hostname: 'horizon.public.fuel.local' + horizon_public_usercert: true + horizon_public_certdata: 'somethinglikeacertificateforhorizon' + keystone: true + keystone_public: true + keystone_public_ip: '10.10.10.10' + keystone_public_hostname: 'keystone.public.fuel.local' + keystone_public_usercert: true + keystone_public_certdata: 'somethinglikeacertificateforkeystone' + keystone_internal: true + keystone_internal_ip: '20.20.20.20' + keystone_internal_hostname: 'keystone.internal.fuel.local' + keystone_internal_usercert: true + keystone_internal_certdata: 'somethinglikeacertificateforkeystone' + keystone_admin: true + keystone_admin_ip: '30.30.30.30' + keystone_admin_hostname: 'keystone.admin.fuel.local' + keystone_admin_usercert: true + keystone_admin_certdata: 'somethinglikeacertificateforkeystone' + nova: true + nova_public: true + nova_public_hostname: 'nova.public.fuel.local' + nova_public_usercert: true + nova_public_certdata: 'somethinglikeacertificatefornova' + nova_internal: true + nova_internal_hostname: 'nova.internal.fuel.local' + nova_internal_usercert: true + nova_internal_certdata: 'somethinglikeacertificatefornova' + nova_admin: true + nova_admin_hostname: 'nova.admin.fuel.local' + nova_admin_usercert: true + nova_admin_certdata: 'somethinglikeacertificatefornova' + heat: true + heat_public: true + heat_public_hostname: 'heat.public.fuel.local' + heat_public_usercert: true + heat_public_certdata: 'somethinglikeacertificateforheat' + heat_internal: true + heat_internal_hostname: 'heat.internal.fuel.local' + heat_internal_usercert: true + heat_internal_certdata: 'somethinglikeacertificateforheat' + heat_admin: true + heat_admin_hostname: 'heat.admin.fuel.local' + heat_admin_usercert: true + heat_admin_certdata: 'somethinglikeacertificateforheat' + glance: true + glance_public: true + glance_public_hostname: 'glance.public.fuel.local' + glance_public_usercert: true + glance_public_certdata: 'somethinglikeacertificateforglance' + glance_internal: true + glance_internal_hostname: 'glance.internal.fuel.local' + glance_internal_usercert: true + glance_internal_certdata: 'somethinglikeacertificateforglance' + glance_admin: true + glance_admin_hostname: 'glance.admin.fuel.local' + glance_admin_usercert: true + glance_admin_certdata: 'somethinglikeacertificateforglance' + cinder: true + cinder_public: true + cinder_public_hostname: 'cinder.public.fuel.local' + cinder_public_usercert: true + cinder_public_certdata: 'somethinglikeacertificateforcinder' + cinder_internal: true + cinder_internal_hostname: 'cinder.internal.fuel.local' + cinder_internal_usercert: true + cinder_internal_certdata: 'somethinglikeacertificateforcinder' + cinder_admin: true + cinder_admin_hostname: 'cinder.admin.fuel.local' + cinder_admin_usercert: true + cinder_admin_certdata: 'somethinglikeacertificateforcinder' + neutron: true + neutron_public: true + neutron_public_hostname: 'neutron.public.fuel.local' + neutron_public_usercert: true + neutron_public_certdata: 'somethinglikeacertificateforneutron' + neutron_internal: true + neutron_internal_hostname: 'neutron.internal.fuel.local' + neutron_internal_usercert: true + neutron_internal_certdata: 'somethinglikeacertificateforneutron' + neutron_admin: true + neutron_admin_hostname: 'neutron.admin.fuel.local' + neutron_admin_usercert: true + neutron_admin_certdata: 'somethinglikeacertificateforneutron' + swift: true + swift_public: true + swift_public_hostname: 'swift.public.fuel.local' + swift_public_usercert: true + swift_public_certdata: 'somethinglikeacertificateforswift' + swift_internal: true + swift_internal_hostname: 'swift.internal.fuel.local' + swift_internal_usercert: true + swift_internal_certdata: 'somethinglikeacertificateforswift' + swift_admin: true + swift_admin_hostname: 'swift.admin.fuel.local' + swift_admin_usercert: true + swift_admin_certdata: 'somethinglikeacertificateforswift' + sahara: true + sahara_public: true + sahara_public_hostname: 'sahara.public.fuel.local' + sahara_public_usercert: true + sahara_public_certdata: 'somethinglikeacertificateforsahara' + sahara_internal: true + sahara_internal_hostname: 'sahara.internal.fuel.local' + sahara_internal_usercert: true + sahara_internal_certdata: 'somethinglikeacertificateforsahara' + sahara_admin: true + sahara_admin_hostname: 'sahara.admin.fuel.local' + sahara_admin_usercert: true + sahara_admin_certdata: 'somethinglikeacertificateforsahara' + murano: true + murano_public: true + murano_public_hostname: 'murano.public.fuel.local' + murano_public_usercert: true + murano_public_certdata: 'somethinglikeacertificateformurano' + murano_internal: true + murano_internal_hostname: 'murano.internal.fuel.local' + murano_internal_usercert: true + murano_internal_certdata: 'somethinglikeacertificateformurano' + murano_admin: true + murano_admin_hostname: 'murano.admin.fuel.local' + murano_admin_usercert: true + murano_admin_certdata: 'somethinglikeacertificateformurano' + ceilometer: true + ceilometer_public: true + ceilometer_public_hostname: 'ceilometer.public.fuel.local' + ceilometer_public_usercert: true + ceilometer_public_certdata: 'somethinglikeacertificateforceilometer' + ceilometer_internal: true + ceilometer_internal_hostname: 'ceilometer.internal.fuel.local' + ceilometer_internal_usercert: true + ceilometer_internal_certdata: 'somethinglikeacertificateforceilometer' + ceilometer_admin: true + ceilometer_admin_hostname: 'ceilometer.admin.fuel.local' + ceilometer_admin_usercert: true + ceilometer_admin_certdata: 'somethinglikeacertificateforceilometer' + radosgw: true + radosgw_public: true + radosgw_public_hostname: 'radosgw.public.fuel.local' + radosgw_public_usercert: true + radosgw_public_certdata: 'somethinglikeacertificateforradosgw' +public_ssl: + metadata: + label: Public TLS + weight: 110 + horizon: true + services: true + cert_source: self_signed + cert_data: + content: 'somedataaboutyourkeypair' + hostname: public.fuel.local +fail_if_error: false +fqdn: node-127.test.domain.local +fuel_version: '6.1' +glance: + db_password: 385SUUrC + image_cache_max_size: '0' + user_password: A9KgbnX6 +heat: + auth_encryption_key: 2604abefbdf5043f07e989af10f6caba + db_password: NTeyraV2 + enabled: true + rabbit_password: ReVt6ZKQ + user_password: tryL79Yl +kernel_params: + kernel: console=ttyS0,9600 console=tty0 net.ifnames=0 biosdevname=0 rootdelay=90 + nomodeset + metadata: + label: Kernel parameters + weight: 40 +keystone: + admin_token: UxFQFw3m + db_password: e4Op1FQB +last_controller: node-125 +libvirt_type: qemu +host_uuid: '00000000-0000-0000-0000-000000000000' +management_network_range: 192.168.0.0/24 +management_vip: 192.168.0.6 +management_vrouter_vip: 192.168.0.7 +master_ip: 10.108.0.2 +metadata: + label: Common + weight: 30 +mongo: + enabled: false +mp: +- point: '1' + weight: '1' +- point: '2' + weight: '2' +murano: + db_password: 7I6NRZcB + enabled: false + rabbit_password: X4GK4R7f + user_password: nuCELy8q +murano_settings: + metadata: + label: Murano Settings + restrictions: + - action: hide + condition: settings:additional_components.murano.value == false + weight: 20 + murano_repo_url: http://catalog.openstack.org/ +mysql: + root_password: 5eqwkxY3 + wsrep_password: sFMiVJ7I +network_metadata: + nodes: + node-121: + swift_zone: '1' + uid: '121' + fqdn: node-121.test.domain.local + network_roles: + keystone/api: 192.168.0.1 + neutron/api: 192.168.0.1 + mgmt/database: 192.168.0.1 + sahara/api: 192.168.0.1 + heat/api: 192.168.0.1 + ceilometer/api: 192.168.0.1 + ex: + ceph/public: 192.168.0.1 + ceph/radosgw: + management: 192.168.0.1 + swift/api: 192.168.0.1 + mgmt/api: 192.168.0.1 + storage: 192.168.1.1 + mgmt/corosync: 192.168.0.1 + cinder/api: 192.168.0.1 + public/vip: + swift/replication: 192.168.1.1 + mgmt/messaging: 192.168.0.1 + neutron/mesh: 192.168.0.1 + admin/pxe: 10.109.0.9 + mongo/db: 192.168.0.1 + neutron/private: + neutron/floating: + fw-admin: 10.109.0.9 + glance/api: 192.168.0.1 + mgmt/vip: 192.168.0.1 + murano/api: 192.168.0.1 + nova/api: 192.168.0.1 + horizon: 192.168.0.1 + mgmt/memcache: 192.168.0.1 + cinder/iscsi: 192.168.1.1 + ceph/replication: 192.168.1.1 + user_node_name: Untitled (6a:e7) + node_roles: + - primary-mongo + name: node-121 + node-124: + swift_zone: '1' + uid: '124' + fqdn: node-124.test.domain.local + network_roles: + keystone/api: 192.168.0.2 + neutron/api: 192.168.0.2 + mgmt/database: 192.168.0.2 + sahara/api: 192.168.0.2 + heat/api: 192.168.0.2 + ceilometer/api: 192.168.0.2 + ex: + ceph/public: 192.168.0.2 + ceph/radosgw: + management: 192.168.0.2 + swift/api: 192.168.0.2 + mgmt/api: 192.168.0.2 + storage: 192.168.1.2 + mgmt/corosync: 192.168.0.2 + cinder/api: 192.168.0.2 + public/vip: + swift/replication: 192.168.1.2 + mgmt/messaging: 192.168.0.2 + neutron/mesh: 192.168.0.2 + admin/pxe: 10.109.0.9 + mongo/db: 192.168.0.2 + neutron/private: + neutron/floating: + fw-admin: 10.109.0.9 + glance/api: 192.168.0.2 + mgmt/vip: 192.168.0.2 + murano/api: 192.168.0.2 + nova/api: 192.168.0.2 + horizon: 192.168.0.2 + mgmt/memcache: 192.168.0.2 + cinder/iscsi: 192.168.1.2 + ceph/replication: 192.168.1.2 + user_node_name: Untitled (6a:e7) + node_roles: + - ceph-osd + name: node-124 + node-125: + swift_zone: '1' + uid: '125' + fqdn: node-125.test.domain.local + network_roles: + keystone/api: 192.168.0.3 + neutron/api: 192.168.0.3 + mgmt/database: 192.168.0.3 + sahara/api: 192.168.0.3 + heat/api: 192.168.0.3 + ceilometer/api: 192.168.0.3 + ex: 172.16.0.2 + ceph/public: 192.168.0.3 + ceph/radosgw: 172.16.0.2 + management: 192.168.0.3 + swift/api: 192.168.0.3 + mgmt/api: 192.168.0.3 + storage: 192.168.1.3 + mgmt/corosync: 192.168.0.3 + cinder/api: 192.168.0.3 + public/vip: 172.16.0.2 + swift/replication: 192.168.1.3 + mgmt/messaging: 192.168.0.3 + neutron/mesh: 192.168.0.3 + admin/pxe: 10.109.0.9 + mongo/db: 192.168.0.3 + neutron/private: + neutron/floating: + fw-admin: 10.109.0.9 + glance/api: 192.168.0.3 + mgmt/vip: 192.168.0.3 + murano/api: 192.168.0.3 + nova/api: 192.168.0.3 + horizon: 192.168.0.3 + mgmt/memcache: 192.168.0.3 + cinder/iscsi: 192.168.1.3 + ceph/replication: 192.168.1.3 + user_node_name: Untitled (6a:e7) + node_roles: + - primary-controller + name: node-125 + node-126: + swift_zone: '1' + uid: '126' + fqdn: node-126.test.domain.local + network_roles: + keystone/api: 192.168.0.4 + neutron/api: 192.168.0.4 + mgmt/database: 192.168.0.4 + sahara/api: 192.168.0.4 + heat/api: 192.168.0.4 + ceilometer/api: 192.168.0.4 + ex: + ceph/public: 192.168.0.4 + ceph/radosgw: + management: 192.168.0.4 + swift/api: 192.168.0.4 + mgmt/api: 192.168.0.4 + storage: 192.168.1.4 + mgmt/corosync: 192.168.0.4 + cinder/api: 192.168.0.4 + public/vip: + swift/replication: 192.168.1.4 + mgmt/messaging: 192.168.0.4 + neutron/mesh: 192.168.0.4 + admin/pxe: 10.109.0.9 + mongo/db: 192.168.0.4 + neutron/private: + neutron/floating: + fw-admin: 10.109.0.9 + glance/api: 192.168.0.4 + mgmt/vip: 192.168.0.4 + murano/api: 192.168.0.4 + nova/api: 192.168.0.4 + horizon: 192.168.0.4 + mgmt/memcache: 192.168.0.4 + cinder/iscsi: 192.168.1.4 + ceph/replication: 192.168.1.4 + user_node_name: Untitled (6a:e7) + node_roles: + - ceph-osd + name: node-126 + node-127: + swift_zone: '1' + uid: '127' + fqdn: node-127.test.domain.local + network_roles: + keystone/api: 192.168.0.5 + neutron/api: 192.168.0.5 + mgmt/database: 192.168.0.5 + sahara/api: 192.168.0.5 + heat/api: 192.168.0.5 + ceilometer/api: 192.168.0.5 + ex: + ceph/public: 192.168.0.5 + ceph/radosgw: + management: 192.168.0.5 + swift/api: 192.168.0.5 + mgmt/api: 192.168.0.5 + storage: 192.168.1.5 + mgmt/corosync: 192.168.0.5 + cinder/api: 192.168.0.5 + public/vip: + swift/replication: 192.168.1.5 + mgmt/messaging: 192.168.0.5 + neutron/mesh: 192.168.0.5 + admin/pxe: 10.109.0.9 + mongo/db: 192.168.0.5 + neutron/private: + neutron/floating: + fw-admin: 10.109.0.9 + glance/api: 192.168.0.5 + mgmt/vip: 192.168.0.5 + murano/api: 192.168.0.5 + nova/api: 192.168.0.5 + horizon: 192.168.0.5 + mgmt/memcache: 192.168.0.5 + cinder/iscsi: 192.168.1.5 + ceph/replication: 192.168.1.5 + user_node_name: Untitled (6a:e7) + node_roles: + - compute + name: node-127 + vips: + vrouter: + ipaddr: 192.168.0.6 + management: + ipaddr: 192.168.0.7 + public: + ipaddr: 172.16.0.3 + vrouter_pub: + ipaddr: 172.16.0.3 +network_scheme: + endpoints: + br-fw-admin: + IP: + - 10.108.0.6/24 + br-mgmt: + IP: + - 192.168.0.5/24 + gateway: 192.168.0.7 + vendor_specific: + phy_interfaces: + - eth0 + vlans: 101 + br-prv: + IP: none + vendor_specific: + phy_interfaces: + - eth0 + vlans: 1000:1030 + br-storage: + IP: + - 192.168.1.5/24 + vendor_specific: + phy_interfaces: + - eth0 + vlans: 102 + interfaces: + eth0: + vendor_specific: + bus_info: '0000:00:03.0' + driver: e1000 + eth1: + vendor_specific: + bus_info: '0000:00:04.0' + driver: e1000 + eth2: + vendor_specific: + bus_info: '0000:00:05.0' + driver: e1000 + eth3: + vendor_specific: + bus_info: '0000:00:06.0' + driver: e1000 + eth4: + vendor_specific: + bus_info: '0000:00:07.0' + driver: e1000 + provider: lnx + roles: + ex: br-ex + public/vip: br-ex + neutron/floating: br-floating + storage: br-storage + keystone/api: br-mgmt + neutron/api: br-mgmt + mgmt/database: br-mgmt + sahara/api: br-mgmt + ceilometer/api: br-mgmt + mgmt/vip: br-mgmt + ceph/public: br-mgmt + mgmt/messaging: br-mgmt + management: br-mgmt + swift/api: br-mgmt + mgmt/api: br-mgmt + storage: br-storage + mgmt/corosync: br-mgmt + cinder/api: br-mgmt + swift/replication: br-storage + neutron/mesh: br-mgmt + admin/pxe: br-fw-admin + mongo/db: br-mgmt + neutron/private: br-prv + fw-admin: br-fw-admin + glance/api: br-mgmt + heat/api: br-mgmt + murano/api: br-mgmt + nova/api: br-mgmt + horizon: br-mgmt + mgmt/memcache: br-mgmt + cinder/iscsi: br-storage + ceph/replication: br-storage + neutron/mesh: br-mgmt + transformations: + - action: add-br + name: br-fw-admin + - action: add-br + name: br-mgmt + - action: add-br + name: br-storage + - action: add-br + name: br-prv + provider: ovs + - action: add-patch + bridges: + - br-prv + - br-fw-admin + provider: ovs + - action: add-port + bridge: br-fw-admin + name: eth0 + - action: add-port + bridge: br-storage + name: eth0.102 + - action: add-port + bridge: br-mgmt + name: eth0.101 + version: '1.1' +neutron_mellanox: + metadata: + enabled: true + label: Mellanox Neutron components + toggleable: false + weight: 50 + plugin: disabled + vf_num: '16' +nodes: +- fqdn: node-121.test.domain.local + internal_address: 192.168.0.1 + internal_netmask: 255.255.255.0 + name: node-121 + role: primary-mongo + storage_address: 192.168.1.1 + storage_netmask: 255.255.255.0 + swift_zone: '121' + uid: '121' + user_node_name: Untitled (18:c9) +- fqdn: node-124.test.domain.local + internal_address: 192.168.0.2 + internal_netmask: 255.255.255.0 + name: node-124 + role: ceph-osd + storage_address: 192.168.1.2 + storage_netmask: 255.255.255.0 + swift_zone: '124' + uid: '124' + user_node_name: Untitled (6f:9d) +- fqdn: node-125.test.domain.local + internal_address: 192.168.0.3 + internal_netmask: 255.255.255.0 + name: node-125 + public_address: 172.16.0.2 + public_netmask: 255.255.255.0 + role: primary-controller + storage_address: 192.168.1.3 + storage_netmask: 255.255.255.0 + swift_zone: '125' + uid: '125' + user_node_name: Untitled (34:45) +- fqdn: node-126.test.domain.local + internal_address: 192.168.0.4 + internal_netmask: 255.255.255.0 + name: node-126 + role: ceph-osd + storage_address: 192.168.1.4 + storage_netmask: 255.255.255.0 + swift_zone: '126' + uid: '126' + user_node_name: Untitled (12:ea) +- fqdn: node-127.test.domain.local + internal_address: 192.168.0.5 + internal_netmask: 255.255.255.0 + name: node-127 + role: compute + storage_address: 192.168.1.5 + storage_netmask: 255.255.255.0 + swift_zone: '127' + uid: '127' + user_node_name: Untitled (74:27) +nova: + db_password: VXcP6cIR + state_path: /var/lib/nova + user_password: fuhtZH6v +nova_quota: false +online: true +openstack_version: 2014.2-6.1 +openstack_version_prev: null +priority: 300 +provision: + codename: trusty + image_data: + /: + container: gzip + format: ext4 + uri: http://10.108.0.2:8080/targetimages/env_37_ubuntu_1404_amd64.img.gz + /boot: + container: gzip + format: ext2 + uri: http://10.108.0.2:8080/targetimages/env_37_ubuntu_1404_amd64-boot.img.gz + metadata: + label: Provision + weight: 80 + method: image +public_network_assignment: + assign_to_all_nodes: false + metadata: + label: Public network assignment + restrictions: + - action: hide + condition: cluster:net_provider != 'neutron' + weight: 50 +neutron_advanced_configuration: + neutron_dvr: false + neutron_l2_pop: false +public_vip: 172.16.0.3 +public_vrouter_vip: 172.16.0.4 +puppet: + manifests: rsync://10.108.0.2:/puppet/2014.2-6.1/manifests/ + modules: rsync://10.108.0.2:/puppet/2014.2-6.1/modules/ +puppet_debug: true +quantum: true +quantum_settings: + L2: + base_mac: fa:16:3e:00:00:00 + phys_nets: + physnet1: + bridge: br-floating + physnet2: + bridge: br-prv + vlan_range: 1000:1030 + segmentation_type: vlan + L3: + use_namespaces: true + database: + passwd: zOXpcc6c + keystone: + admin_password: XgdPodA7 + metadata: + metadata_proxy_shared_secret: QU11ydS2 + predefined_networks: + net04: + L2: + network_type: vlan + physnet: physnet2 + router_ext: false + segment_id: null + L3: + enable_dhcp: true + floating: null + gateway: 192.168.111.1 + nameservers: + - 8.8.4.4 + - 8.8.8.8 + subnet: 192.168.111.0/24 + shared: false + tenant: admin + net04_ext: + L2: + network_type: flat + physnet: physnet1 + router_ext: true + segment_id: null + L3: + enable_dhcp: false + floating: 172.16.0.130:172.16.0.254 + gateway: 172.16.0.1 + nameservers: [] + subnet: 172.16.0.0/24 + shared: false + tenant: admin +rabbit: + password: 1GXPbTgb +repo_setup: + installer_initrd: + local: /var/www/nailgun/ubuntu/x86_64/images/initrd.gz + remote_relative: dists/trusty/main/installer-amd64/current/images/netboot/ubuntu-installer/amd64/initrd.gz + installer_kernel: + local: /var/www/nailgun/ubuntu/x86_64/images/linux + remote_relative: dists/trusty/main/installer-amd64/current/images/netboot/ubuntu-installer/amd64/linux + metadata: + label: Repositories + weight: 50 + repos: + - name: ubuntu + priority: null + section: main universe multiverse + suite: trusty + type: deb + uri: http://archive.ubuntu.com/ubuntu/ + - name: ubuntu-updates + priority: null + section: main universe multiverse + suite: trusty-updates + type: deb + uri: http://archive.ubuntu.com/ubuntu/ + - name: ubuntu-security + priority: null + section: main universe multiverse + suite: trusty-security + type: deb + uri: http://archive.ubuntu.com/ubuntu/ + - name: mos + priority: 1050 + section: main restricted + suite: mos6.1 + type: deb + uri: http://mirror.fuel-infra.org/mos/ubuntu/ + - name: mos-updates + priority: 1050 + section: main restricted + suite: mos6.1-updates + type: deb + uri: http://mirror.fuel-infra.org/mos/ubuntu/ + - name: mos-security + priority: 1050 + section: main restricted + suite: mos6.1-security + type: deb + uri: http://mirror.fuel-infra.org/mos/ubuntu/ + - name: mos-holdback + priority: 1100 + section: main restricted + suite: mos6.1-holdback + type: deb + uri: http://mirror.fuel-infra.org/mos/ubuntu/ +resume_guests_state_on_host_boot: true +role: compute +sahara: + db_password: R68HpdNS + enabled: false + user_password: ts32qXcD +status: discover +storage: + ephemeral_ceph: false + images_ceph: true + images_vcenter: false + iser: false + metadata: + label: Storage + weight: 60 + objects_ceph: true + osd_pool_size: '2' + pg_num: 256 + per_pool_pg_nums: + default_pg_num: 256 + cinder_volume: 2048 + compute: 1024 + backups: 512 + ".rgw": 512 + images: 256 + volumes_ceph: true + volumes_lvm: false +storage_network_range: 192.168.1.0/24 +swift: + user_password: bpFT3TKn +syslog: + metadata: + label: Syslog + weight: 50 + syslog_port: '514' + syslog_server: '' + syslog_transport: tcp +tasks: +- parameters: + cwd: / + puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/hiera/hiera.pp + puppet_modules: /etc/puppet/modules + timeout: 3600 + priority: 100 + type: puppet + uids: + - '127' +- parameters: + cwd: / + puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/globals/globals.pp + puppet_modules: /etc/puppet/modules + timeout: 3600 + priority: 200 + type: puppet + uids: + - '127' +- parameters: + cwd: / + puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/logging/logging.pp + puppet_modules: /etc/puppet/modules + timeout: 3600 + priority: 300 + type: puppet + uids: + - '127' +- parameters: + cwd: / + puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/tools/tools.pp + puppet_modules: /etc/puppet/modules + timeout: 3600 + priority: 400 + type: puppet + uids: + - '127' +- parameters: + cwd: / + puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/netconfig/netconfig.pp + puppet_modules: /etc/puppet/modules + timeout: 3600 + priority: 500 + type: puppet + uids: + - '127' +- parameters: + cwd: / + puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/firewall/firewall.pp + puppet_modules: /etc/puppet/modules + timeout: 3600 + priority: 600 + type: puppet + uids: + - '127' +- parameters: + cwd: / + puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/hosts/hosts.pp + puppet_modules: /etc/puppet/modules + timeout: 3600 + priority: 700 + type: puppet + uids: + - '127' +- parameters: + cwd: / + puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/roles/compute.pp + puppet_modules: /etc/puppet/modules + timeout: 3600 + priority: 900 + type: puppet + uids: + - '127' +- parameters: + cwd: / + puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/ceilometer/compute.pp + puppet_modules: /etc/puppet/modules + timeout: 3600 + priority: 1000 + type: puppet + uids: + - '127' +- parameters: + cwd: / + puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/dns/dns-client.pp + puppet_modules: /etc/puppet/modules + timeout: 3600 + priority: 1100 + type: puppet + uids: + - '127' +- parameters: + cwd: / + puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/openstack-network/openstack-network-compute.pp + puppet_modules: /etc/puppet/modules + timeout: 3600 + priority: 1200 + type: puppet + uids: + - '127' +test_vm_image: + container_format: bare + disk_format: qcow2 + glance_properties: '' + img_name: TestVM + img_path: /usr/share/cirros-testvm/cirros-x86_64-disk.img + min_ram: 64 + os_name: cirros + public: 'true' +uid: '127' +use_cow_images: true +use_vcenter: false +user_node_name: Untitled (74:27) +workloads_collector: + enabled: true + metadata: + label: Workloads Collector User + restrictions: + - action: hide + condition: 'true' + weight: 10 + password: v6vMAe7Q + tenant: services + username: workloads_collector diff --git a/tests/noop/astute.yaml/neut_vlan.ceph.ceil-primary-controller.overridden_ssl.yaml b/tests/noop/astute.yaml/neut_vlan.ceph.ceil-primary-controller.overridden_ssl.yaml new file mode 100644 index 0000000000..169a424694 --- /dev/null +++ b/tests/noop/astute.yaml/neut_vlan.ceph.ceil-primary-controller.overridden_ssl.yaml @@ -0,0 +1,1204 @@ +access: + email: admin@localhost + metadata: + label: Access + weight: 10 + password: admin + tenant: admin + user: admin +auth_key: '' +auto_assign_floating_ip: false +base_syslog: + syslog_port: '514' + syslog_server: 10.108.0.2 +ceilometer: + db_password: Toe5phw4 + enabled: true + metering_secret: tHq2rcoq + user_password: WBfBSo6U +cinder: + db_password: trj609V8 + fixed_key: 7883d66c643ce9a508ebcd4cd5516fc98814a11276bc98c4e8e671188b54e941 + user_password: sJRfG0GP +cobbler: + profile: ubuntu_1404_x86_64 +corosync: + group: 226.94.1.1 + metadata: + label: Corosync + restrictions: + - action: hide + condition: 'true' + weight: 50 + port: '12000' + verified: false +debug: false +deployment_id: 37 +deployment_mode: ha_compact +external_dns: + dns_list: 8.8.8.8, 8.8.4.4 + metadata: + label: Upstream DNS + weight: 90 +external_mongo: + hosts_ip: '' + metadata: + label: External MongoDB + restrictions: + - action: hide + condition: settings:additional_components.mongo.value == false + weight: 20 + mongo_db_name: ceilometer + mongo_password: ceilometer + mongo_replset: '' + mongo_user: ceilometer +external_ntp: + metadata: + label: Upstream NTP + weight: 100 + ntp_list: 0.pool.ntp.org, 1.pool.ntp.org +use_ssl: + horizon: true + horizon_public: true + horizon_public_hostname: 'horizon.public.fuel.local' + horizon_public_usercert: true + horizon_public_certdata: 'somethinglikeacertificateforhorizon' + keystone: true + keystone_public: true + keystone_public_ip: '10.10.10.10' + keystone_public_hostname: 'keystone.public.fuel.local' + keystone_public_usercert: true + keystone_public_certdata: 'somethinglikeacertificateforkeystone' + keystone_internal: true + keystone_internal_ip: '20.20.20.20' + keystone_internal_hostname: 'keystone.internal.fuel.local' + keystone_internal_usercert: true + keystone_internal_certdata: 'somethinglikeacertificateforkeystone' + keystone_admin: true + keystone_admin_ip: '30.30.30.30' + keystone_admin_hostname: 'keystone.admin.fuel.local' + keystone_admin_usercert: true + keystone_admin_certdata: 'somethinglikeacertificateforkeystone' + nova: true + nova_public: true + nova_public_hostname: 'nova.public.fuel.local' + nova_public_usercert: true + nova_public_certdata: 'somethinglikeacertificatefornova' + nova_internal: true + nova_internal_hostname: 'nova.internal.fuel.local' + nova_internal_usercert: true + nova_internal_certdata: 'somethinglikeacertificatefornova' + nova_admin: true + nova_admin_hostname: 'nova.admin.fuel.local' + nova_admin_usercert: true + nova_admin_certdata: 'somethinglikeacertificatefornova' + heat: true + heat_public: true + heat_public_hostname: 'heat.public.fuel.local' + heat_public_usercert: true + heat_public_certdata: 'somethinglikeacertificateforheat' + heat_internal: true + heat_internal_hostname: 'heat.internal.fuel.local' + heat_internal_usercert: true + heat_internal_certdata: 'somethinglikeacertificateforheat' + heat_admin: true + heat_admin_hostname: 'heat.admin.fuel.local' + heat_admin_usercert: true + heat_admin_certdata: 'somethinglikeacertificateforheat' + glance: true + glance_public: true + glance_public_hostname: 'glance.public.fuel.local' + glance_public_usercert: true + glance_public_certdata: 'somethinglikeacertificateforglance' + glance_internal: true + glance_internal_hostname: 'glance.internal.fuel.local' + glance_internal_usercert: true + glance_internal_certdata: 'somethinglikeacertificateforglance' + glance_admin: true + glance_admin_hostname: 'glance.admin.fuel.local' + glance_admin_usercert: true + glance_admin_certdata: 'somethinglikeacertificateforglance' + cinder: true + cinder_public: true + cinder_public_hostname: 'cinder.public.fuel.local' + cinder_public_usercert: true + cinder_public_certdata: 'somethinglikeacertificateforcinder' + cinder_internal: true + cinder_internal_hostname: 'cinder.internal.fuel.local' + cinder_internal_usercert: true + cinder_internal_certdata: 'somethinglikeacertificateforcinder' + cinder_admin: true + cinder_admin_hostname: 'cinder.admin.fuel.local' + cinder_admin_usercert: true + cinder_admin_certdata: 'somethinglikeacertificateforcinder' + neutron: true + neutron_public: true + neutron_public_hostname: 'neutron.public.fuel.local' + neutron_public_usercert: true + neutron_public_certdata: 'somethinglikeacertificateforneutron' + neutron_internal: true + neutron_internal_hostname: 'neutron.internal.fuel.local' + neutron_internal_usercert: true + neutron_internal_certdata: 'somethinglikeacertificateforneutron' + neutron_admin: true + neutron_admin_hostname: 'neutron.admin.fuel.local' + neutron_admin_usercert: true + neutron_admin_certdata: 'somethinglikeacertificateforneutron' + swift: true + swift_public: true + swift_public_hostname: 'swift.public.fuel.local' + swift_public_usercert: true + swift_public_certdata: 'somethinglikeacertificateforswift' + swift_internal: true + swift_internal_hostname: 'swift.internal.fuel.local' + swift_internal_usercert: true + swift_internal_certdata: 'somethinglikeacertificateforswift' + swift_admin: true + swift_admin_hostname: 'swift.admin.fuel.local' + swift_admin_usercert: true + swift_admin_certdata: 'somethinglikeacertificateforswift' + sahara: true + sahara_public: true + sahara_public_hostname: 'sahara.public.fuel.local' + sahara_public_usercert: true + sahara_public_certdata: 'somethinglikeacertificateforsahara' + sahara_internal: true + sahara_internal_hostname: 'sahara.internal.fuel.local' + sahara_internal_usercert: true + sahara_internal_certdata: 'somethinglikeacertificateforsahara' + sahara_admin: true + sahara_admin_hostname: 'sahara.admin.fuel.local' + sahara_admin_usercert: true + sahara_admin_certdata: 'somethinglikeacertificateforsahara' + murano: true + murano_public: true + murano_public_hostname: 'murano.public.fuel.local' + murano_public_usercert: true + murano_public_certdata: 'somethinglikeacertificateformurano' + murano_internal: true + murano_internal_hostname: 'murano.internal.fuel.local' + murano_internal_usercert: true + murano_internal_certdata: 'somethinglikeacertificateformurano' + murano_admin: true + murano_admin_hostname: 'murano.admin.fuel.local' + murano_admin_usercert: true + murano_admin_certdata: 'somethinglikeacertificateformurano' + ceilometer: true + ceilometer_public: true + ceilometer_public_hostname: 'ceilometer.public.fuel.local' + ceilometer_public_usercert: true + ceilometer_public_certdata: 'somethinglikeacertificateforceilometer' + ceilometer_internal: true + ceilometer_internal_hostname: 'ceilometer.internal.fuel.local' + ceilometer_internal_usercert: true + ceilometer_internal_certdata: 'somethinglikeacertificateforceilometer' + ceilometer_admin: true + ceilometer_admin_hostname: 'ceilometer.admin.fuel.local' + ceilometer_admin_usercert: true + ceilometer_admin_certdata: 'somethinglikeacertificateforceilometer' + radosgw: true + radosgw_public: true + radosgw_public_hostname: 'radosgw.public.fuel.local' + radosgw_public_usercert: true + radosgw_public_certdata: 'somethinglikeacertificateforradosgw' +public_ssl: + metadata: + label: Public TLS + weight: 110 + horizon: true + services: true + cert_source: self_signed + cert_data: + content: 'somedataaboutyourkeypair' + hostname: public.fuel.local +fail_if_error: true +fqdn: node-125.test.domain.local +fuel_version: '6.1' +glance: + db_password: 385SUUrC + image_cache_max_size: '0' + user_password: A9KgbnX6 +heat: + auth_encryption_key: 2604abefbdf5043f07e989af10f6caba + db_password: NTeyraV2 + enabled: true + rabbit_password: ReVt6ZKQ + user_password: tryL79Yl +kernel_params: + kernel: console=ttyS0,9600 console=tty0 net.ifnames=0 biosdevname=0 rootdelay=90 + nomodeset + metadata: + label: Kernel parameters + weight: 40 +keystone: + admin_token: UxFQFw3m + db_password: e4Op1FQB +last_controller: node-125 +libvirt_type: qemu +management_network_range: 192.168.0.0/24 +management_vip: 192.168.0.6 +management_vrouter_vip: 192.168.0.7 +master_ip: 10.108.0.2 +metadata: + label: Common + weight: 30 +mongo: + enabled: false +mp: +- point: '1' + weight: '1' +- point: '2' + weight: '2' +murano: + db_password: 7I6NRZcB + enabled: false + rabbit_password: X4GK4R7f + user_password: nuCELy8q +murano_settings: + metadata: + label: Murano Settings + restrictions: + - action: hide + condition: settings:additional_components.murano.value == false + weight: 20 + murano_repo_url: http://catalog.openstack.org/ +mysql: + root_password: 5eqwkxY3 + wsrep_password: sFMiVJ7R +network_metadata: + nodes: + node-121: + swift_zone: '1' + uid: '121' + fqdn: node-121.test.domain.local + network_roles: + keystone/api: 192.168.0.1 + neutron/api: 192.168.0.1 + mgmt/database: 192.168.0.1 + sahara/api: 192.168.0.1 + heat/api: 192.168.0.1 + ceilometer/api: 192.168.0.1 + ex: + ceph/public: 192.168.0.1 + ceph/radosgw: + management: 192.168.0.1 + swift/api: 192.168.0.1 + mgmt/api: 192.168.0.1 + storage: 192.168.1.1 + mgmt/corosync: 192.168.0.1 + cinder/api: 192.168.0.1 + public/vip: + swift/replication: 192.168.1.1 + mgmt/messaging: 192.168.0.1 + neutron/mesh: 192.168.0.1 + admin/pxe: 10.109.0.9 + mongo/db: 192.168.0.1 + neutron/private: + neutron/floating: + fw-admin: 10.109.0.9 + glance/api: 192.168.0.1 + mgmt/vip: 192.168.0.1 + murano/api: 192.168.0.1 + nova/api: 192.168.0.1 + horizon: 192.168.0.1 + mgmt/memcache: 192.168.0.1 + cinder/iscsi: 192.168.1.1 + ceph/replication: 192.168.1.1 + user_node_name: Untitled (6a:e7) + node_roles: + - primary-mongo + name: node-121 + node-124: + swift_zone: '1' + uid: '124' + fqdn: node-124.test.domain.local + network_roles: + keystone/api: 192.168.0.2 + neutron/api: 192.168.0.2 + mgmt/database: 192.168.0.2 + sahara/api: 192.168.0.2 + heat/api: 192.168.0.2 + ceilometer/api: 192.168.0.2 + ex: + ceph/public: 192.168.0.2 + ceph/radosgw: + management: 192.168.0.2 + swift/api: 192.168.0.2 + mgmt/api: 192.168.0.2 + storage: 192.168.1.2 + mgmt/corosync: 192.168.0.2 + cinder/api: 192.168.0.2 + public/vip: + swift/replication: 192.168.1.2 + mgmt/messaging: 192.168.0.2 + neutron/mesh: 192.168.0.2 + admin/pxe: 10.109.0.9 + mongo/db: 192.168.0.2 + neutron/private: + neutron/floating: + fw-admin: 10.109.0.9 + glance/api: 192.168.0.2 + mgmt/vip: 192.168.0.2 + murano/api: 192.168.0.2 + nova/api: 192.168.0.2 + horizon: 192.168.0.2 + mgmt/memcache: 192.168.0.2 + cinder/iscsi: 192.168.1.2 + ceph/replication: 192.168.1.2 + user_node_name: Untitled (6a:e7) + node_roles: + - ceph-osd + name: node-124 + node-125: + swift_zone: '1' + uid: '125' + fqdn: node-125.test.domain.local + network_roles: + keystone/api: 192.168.0.3 + neutron/api: 192.168.0.3 + mgmt/database: 192.168.0.3 + sahara/api: 192.168.0.3 + heat/api: 192.168.0.3 + ceilometer/api: 192.168.0.3 + ex: 172.16.0.2 + ceph/public: 192.168.0.3 + ceph/radosgw: 172.16.0.2 + management: 192.168.0.3 + swift/api: 192.168.0.3 + mgmt/api: 192.168.0.3 + storage: 192.168.1.3 + mgmt/corosync: 192.168.0.3 + cinder/api: 192.168.0.3 + public/vip: 172.16.0.2 + swift/replication: 192.168.1.3 + mgmt/messaging: 192.168.0.3 + neutron/mesh: 192.168.0.3 + admin/pxe: 10.109.0.9 + mongo/db: 192.168.0.3 + neutron/private: + neutron/floating: + fw-admin: 10.109.0.9 + glance/api: 192.168.0.3 + mgmt/vip: 192.168.0.3 + murano/api: 192.168.0.3 + nova/api: 192.168.0.3 + horizon: 192.168.0.3 + mgmt/memcache: 192.168.0.3 + cinder/iscsi: 192.168.1.3 + ceph/replication: 192.168.1.3 + user_node_name: Untitled (6a:e7) + node_roles: + - primary-controller + name: node-125 + node-126: + swift_zone: '1' + uid: '126' + fqdn: node-126.test.domain.local + network_roles: + keystone/api: 192.168.0.4 + neutron/api: 192.168.0.4 + mgmt/database: 192.168.0.4 + sahara/api: 192.168.0.4 + heat/api: 192.168.0.4 + ceilometer/api: 192.168.0.4 + ex: + ceph/public: 192.168.0.4 + ceph/radosgw: + management: 192.168.0.4 + swift/api: 192.168.0.4 + mgmt/api: 192.168.0.4 + storage: 192.168.1.4 + mgmt/corosync: 192.168.0.4 + cinder/api: 192.168.0.4 + public/vip: + swift/replication: 192.168.1.4 + mgmt/messaging: 192.168.0.4 + neutron/mesh: 192.168.0.4 + admin/pxe: 10.109.0.9 + mongo/db: 192.168.0.4 + neutron/private: + neutron/floating: + fw-admin: 10.109.0.9 + glance/api: 192.168.0.4 + mgmt/vip: 192.168.0.4 + murano/api: 192.168.0.4 + nova/api: 192.168.0.4 + horizon: 192.168.0.4 + mgmt/memcache: 192.168.0.4 + cinder/iscsi: 192.168.1.4 + ceph/replication: 192.168.1.4 + user_node_name: Untitled (6a:e7) + node_roles: + - ceph-osd + name: node-126 + node-127: + swift_zone: '1' + uid: '127' + fqdn: node-127.test.domain.local + network_roles: + keystone/api: 192.168.0.5 + neutron/api: 192.168.0.5 + mgmt/database: 192.168.0.5 + sahara/api: 192.168.0.5 + heat/api: 192.168.0.5 + ceilometer/api: 192.168.0.5 + ex: + ceph/public: 192.168.0.5 + ceph/radosgw: + management: 192.168.0.5 + swift/api: 192.168.0.5 + mgmt/api: 192.168.0.5 + storage: 192.168.1.5 + mgmt/corosync: 192.168.0.5 + cinder/api: 192.168.0.5 + public/vip: + swift/replication: 192.168.1.5 + mgmt/messaging: 192.168.0.5 + neutron/mesh: 192.168.0.5 + admin/pxe: 10.109.0.9 + mongo/db: 192.168.0.5 + neutron/private: + neutron/floating: + fw-admin: 10.109.0.9 + glance/api: 192.168.0.5 + mgmt/vip: 192.168.0.5 + murano/api: 192.168.0.5 + nova/api: 192.168.0.5 + horizon: 192.168.0.5 + mgmt/memcache: 192.168.0.5 + cinder/iscsi: 192.168.1.5 + ceph/replication: 192.168.1.5 + user_node_name: Untitled (6a:e7) + node_roles: + - compute + name: node-127 + vips: + vrouter: + ipaddr: 192.168.0.6 + management: + ipaddr: 192.168.0.7 + public: + ipaddr: 172.16.0.3 + vrouter_pub: + ipaddr: 172.16.0.3 +network_scheme: + endpoints: + br-ex: + IP: + - 172.16.0.2/24 + gateway: 172.16.0.1 + vendor_specific: + phy_interfaces: + - eth1 + br-floating: + IP: none + br-fw-admin: + IP: + - 10.108.0.8/24 + br-mgmt: + IP: + - 192.168.0.3/24 + vendor_specific: + phy_interfaces: + - eth0 + vlans: 101 + br-prv: + IP: none + vendor_specific: + phy_interfaces: + - eth0 + vlans: 1000:1030 + br-storage: + IP: + - 192.168.1.3/24 + vendor_specific: + phy_interfaces: + - eth0 + vlans: 102 + interfaces: + eth0: + vendor_specific: + bus_info: '0000:00:03.0' + driver: e1000 + eth1: + vendor_specific: + bus_info: '0000:00:04.0' + driver: e1000 + eth2: + vendor_specific: + bus_info: '0000:00:05.0' + driver: e1000 + eth3: + vendor_specific: + bus_info: '0000:00:06.0' + driver: e1000 + eth4: + vendor_specific: + bus_info: '0000:00:07.0' + driver: e1000 + provider: lnx + roles: + ex: br-ex + public/vip: br-ex + neutron/floating: br-floating + storage: br-storage + keystone/api: br-mgmt + neutron/api: br-mgmt + mgmt/database: br-mgmt + sahara/api: br-mgmt + ceilometer/api: br-mgmt + mgmt/vip: br-mgmt + ceph/public: br-mgmt + mgmt/messaging: br-mgmt + management: br-mgmt + swift/api: br-mgmt + mgmt/api: br-mgmt + storage: br-storage + mgmt/corosync: br-mgmt + cinder/api: br-mgmt + swift/replication: br-storage + neutron/mesh: br-mgmt + admin/pxe: br-fw-admin + mongo/db: br-mgmt + neutron/private: br-prv + fw-admin: br-fw-admin + glance/api: br-mgmt + heat/api: br-mgmt + murano/api: br-mgmt + nova/api: br-mgmt + horizon: br-mgmt + mgmt/memcache: br-mgmt + cinder/iscsi: br-storage + ceph/replication: br-storage + neutron/mesh: br-mgmt + transformations: + - action: add-br + name: br-fw-admin + - action: add-br + name: br-mgmt + - action: add-br + name: br-storage + - action: add-br + name: br-ex + - action: add-br + name: br-floating + provider: ovs + - action: add-patch + bridges: + - br-floating + - br-ex + provider: ovs + - action: add-br + name: br-prv + provider: ovs + - action: add-patch + bridges: + - br-prv + - br-fw-admin + provider: ovs + - action: add-port + bridge: br-fw-admin + name: eth0 + - action: add-port + bridge: br-storage + name: eth0.102 + - action: add-port + bridge: br-mgmt + name: eth0.101 + - action: add-port + bridge: br-ex + name: eth1 + version: '1.1' +neutron_mellanox: + metadata: + enabled: true + label: Mellanox Neutron components + toggleable: false + weight: 50 + plugin: disabled + vf_num: '16' +nodes: +- fqdn: node-121.test.domain.local + internal_address: 192.168.0.1 + internal_netmask: 255.255.255.0 + name: node-121 + role: primary-mongo + storage_address: 192.168.1.1 + storage_netmask: 255.255.255.0 + swift_zone: '121' + uid: '121' + user_node_name: Untitled (18:c9) +- fqdn: node-124.test.domain.local + internal_address: 192.168.0.2 + internal_netmask: 255.255.255.0 + name: node-124 + role: ceph-osd + storage_address: 192.168.1.2 + storage_netmask: 255.255.255.0 + swift_zone: '124' + uid: '124' + user_node_name: Untitled (6f:9d) +- fqdn: node-125.test.domain.local + internal_address: 192.168.0.3 + internal_netmask: 255.255.255.0 + name: node-125 + public_address: 172.16.0.2 + public_netmask: 255.255.255.0 + role: primary-controller + storage_address: 192.168.1.3 + storage_netmask: 255.255.255.0 + swift_zone: '125' + uid: '125' + user_node_name: Untitled (34:45) +- fqdn: node-126.test.domain.local + internal_address: 192.168.0.4 + internal_netmask: 255.255.255.0 + name: node-126 + role: ceph-osd + storage_address: 192.168.1.4 + storage_netmask: 255.255.255.0 + swift_zone: '126' + uid: '126' + user_node_name: Untitled (12:ea) +- fqdn: node-127.test.domain.local + internal_address: 192.168.0.5 + internal_netmask: 255.255.255.0 + name: node-127 + role: compute + storage_address: 192.168.1.5 + storage_netmask: 255.255.255.0 + swift_zone: '127' + uid: '127' + user_node_name: Untitled (74:27) +nova: + db_password: VXcP6cIR + state_path: /var/lib/nova + user_password: fuhtZH6v +nova_quota: false +online: true +openstack_version: 2014.2-6.1 +openstack_version_prev: null +priority: 200 +provision: + codename: trusty + image_data: + /: + container: gzip + format: ext4 + uri: http://10.108.0.2:8080/targetimages/env_37_ubuntu_1404_amd64.img.gz + /boot: + container: gzip + format: ext2 + uri: http://10.108.0.2:8080/targetimages/env_37_ubuntu_1404_amd64-boot.img.gz + metadata: + label: Provision + weight: 80 + method: image +public_network_assignment: + assign_to_all_nodes: false + metadata: + label: Public network assignment + restrictions: + - action: hide + condition: cluster:net_provider != 'neutron' + weight: 50 +neutron_advanced_configuration: + neutron_dvr: false + neutron_l2_pop: false +public_vip: 172.16.0.3 +public_vrouter_vip: 172.16.0.4 +puppet: + manifests: rsync://10.108.0.2:/puppet/2014.2-6.1/manifests/ + modules: rsync://10.108.0.2:/puppet/2014.2-6.1/modules/ +puppet_debug: true +quantum: true +quantum_settings: + L2: + base_mac: fa:16:3e:00:00:00 + phys_nets: + physnet1: + bridge: br-floating + physnet2: + bridge: br-prv + vlan_range: 1000:1030 + segmentation_type: vlan + L3: + use_namespaces: true + database: + passwd: zOXpcc6c + keystone: + admin_password: XgdPodA7 + metadata: + metadata_proxy_shared_secret: QU11ydS2 + predefined_networks: + net04: + L2: + network_type: vlan + physnet: physnet2 + router_ext: false + segment_id: null + L3: + enable_dhcp: true + floating: null + gateway: 192.168.111.1 + nameservers: + - 8.8.4.4 + - 8.8.8.8 + subnet: 192.168.111.0/24 + shared: false + tenant: admin + net04_ext: + L2: + network_type: flat + physnet: physnet1 + router_ext: true + segment_id: null + L3: + enable_dhcp: false + floating: 172.16.0.130:172.16.0.254 + gateway: 172.16.0.1 + nameservers: [] + subnet: 172.16.0.0/24 + shared: false + tenant: admin +rabbit: + password: 1GXPbTgb +repo_setup: + installer_initrd: + local: /var/www/nailgun/ubuntu/x86_64/images/initrd.gz + remote_relative: dists/trusty/main/installer-amd64/current/images/netboot/ubuntu-installer/amd64/initrd.gz + installer_kernel: + local: /var/www/nailgun/ubuntu/x86_64/images/linux + remote_relative: dists/trusty/main/installer-amd64/current/images/netboot/ubuntu-installer/amd64/linux + metadata: + label: Repositories + weight: 50 + repos: + - name: ubuntu + priority: null + section: main universe multiverse + suite: trusty + type: deb + uri: http://archive.ubuntu.com/ubuntu/ + - name: ubuntu-updates + priority: null + section: main universe multiverse + suite: trusty-updates + type: deb + uri: http://archive.ubuntu.com/ubuntu/ + - name: ubuntu-security + priority: null + section: main universe multiverse + suite: trusty-security + type: deb + uri: http://archive.ubuntu.com/ubuntu/ + - name: mos + priority: 1050 + section: main restricted + suite: mos6.1 + type: deb + uri: http://mirror.fuel-infra.org/mos/ubuntu/ + - name: mos-updates + priority: 1050 + section: main restricted + suite: mos6.1-updates + type: deb + uri: http://mirror.fuel-infra.org/mos/ubuntu/ + - name: mos-security + priority: 1050 + section: main restricted + suite: mos6.1-security + type: deb + uri: http://mirror.fuel-infra.org/mos/ubuntu/ + - name: mos-holdback + priority: 1100 + section: main restricted + suite: mos6.1-holdback + type: deb + uri: http://mirror.fuel-infra.org/mos/ubuntu/ +resume_guests_state_on_host_boot: true +role: primary-controller +sahara: + db_password: R68HpdNS + enabled: false + user_password: ts32qXcD +status: discover +storage: + ephemeral_ceph: false + images_ceph: true + images_vcenter: false + iser: false + metadata: + label: Storage + weight: 60 + objects_ceph: true + osd_pool_size: '2' + pg_num: 256 + per_pool_pg_nums: + default_pg_num: 256 + cinder_volume: 2048 + compute: 1024 + backups: 512 + ".rgw": 512 + images: 256 + volumes_ceph: true + volumes_lvm: false +storage_network_range: 192.168.1.0/24 +swift: + user_password: bpFT3TKn +syslog: + metadata: + label: Syslog + weight: 50 + syslog_port: '514' + syslog_server: '' + syslog_transport: tcp +tasks: +- parameters: + cwd: / + puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/hiera/hiera.pp + puppet_modules: /etc/puppet/modules + timeout: 3600 + priority: 100 + type: puppet + uids: + - '125' +- parameters: + cwd: / + puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/globals/globals.pp + puppet_modules: /etc/puppet/modules + timeout: 3600 + priority: 200 + type: puppet + uids: + - '125' +- parameters: + cwd: / + puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/logging/logging.pp + puppet_modules: /etc/puppet/modules + timeout: 3600 + priority: 300 + type: puppet + uids: + - '125' +- parameters: + cwd: / + puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/tools/tools.pp + puppet_modules: /etc/puppet/modules + timeout: 3600 + priority: 400 + type: puppet + uids: + - '125' +- parameters: + cwd: / + puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/umm/umm.pp + puppet_modules: /etc/puppet/modules + timeout: 3600 + priority: 500 + type: puppet + uids: + - '125' +- parameters: + cwd: / + puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/netconfig/netconfig.pp + puppet_modules: /etc/puppet/modules + timeout: 3600 + priority: 600 + type: puppet + uids: + - '125' +- parameters: + cwd: / + puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/hosts/hosts.pp + puppet_modules: /etc/puppet/modules + timeout: 3600 + priority: 700 + type: puppet + uids: + - '125' +- parameters: + cwd: / + puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/firewall/firewall.pp + puppet_modules: /etc/puppet/modules + timeout: 3600 + priority: 800 + type: puppet + uids: + - '125' +- parameters: + cwd: / + puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/cluster/cluster.pp + puppet_modules: /etc/puppet/modules + timeout: 3600 + priority: 900 + type: puppet + uids: + - '125' +- parameters: + cwd: / + puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/virtual_ips/virtual_ips.pp + puppet_modules: /etc/puppet/modules + timeout: 3600 + priority: 1000 + type: puppet + uids: + - '125' +- parameters: + cwd: / + puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/virtual_ips/conntrackd.pp + puppet_modules: /etc/puppet/modules + timeout: 3600 + priority: 1100 + type: puppet + uids: + - '125' +- parameters: + cwd: / + puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/cluster-haproxy/cluster-haproxy.pp + puppet_modules: /etc/puppet/modules + timeout: 3600 + priority: 1200 + type: puppet + uids: + - '125' +- parameters: + cwd: / + puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/openstack-haproxy/openstack-haproxy.pp + puppet_modules: /etc/puppet/modules + timeout: 3600 + priority: 1300 + type: puppet + uids: + - '125' +- parameters: + cwd: / + puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/dns/dns-server.pp + puppet_modules: /etc/puppet/modules + timeout: 3600 + priority: 1400 + type: puppet + uids: + - '125' +- parameters: + cwd: / + puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/dns/dns-client.pp + puppet_modules: /etc/puppet/modules + timeout: 3600 + priority: 1500 + type: puppet + uids: + - '125' +- parameters: + cwd: / + puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/ntp/ntp-server.pp + puppet_modules: /etc/puppet/modules + timeout: 3600 + priority: 1600 + type: puppet + uids: + - '125' +- parameters: + cwd: / + puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/database/database.pp + puppet_modules: /etc/puppet/modules + timeout: 3600 + priority: 1700 + type: puppet + uids: + - '125' +- parameters: + cwd: / + puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/ceilometer/controller.pp + puppet_modules: /etc/puppet/modules + timeout: 3600 + priority: 1800 + type: puppet + uids: + - '125' +- parameters: + cwd: / + puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/ceilometer/radosgw_user.pp + puppet_modules: /etc/puppet/modules + timeout: 3600 + priority: 1800 + type: puppet + uids: + - '125' +- parameters: + cwd: / + puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/rabbitmq/rabbitmq.pp + puppet_modules: /etc/puppet/modules + timeout: 3600 + priority: 1900 + type: puppet + uids: + - '125' +- parameters: + cwd: / + puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/memcached/memcached.pp + puppet_modules: /etc/puppet/modules + timeout: 3600 + priority: 2000 + type: puppet + uids: + - '125' +- parameters: + cwd: / + puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/keystone/keystone.pp + puppet_modules: /etc/puppet/modules + timeout: 3600 + priority: 2100 + type: puppet + uids: + - '125' +- parameters: + cwd: / + puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/openstack-cinder/openstack-cinder.pp + puppet_modules: /etc/puppet/modules + timeout: 1200 + priority: 2200 + type: puppet + uids: + - '125' +- parameters: + cwd: / + puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/glance/glance.pp + puppet_modules: /etc/puppet/modules + timeout: 3600 + priority: 2300 + type: puppet + uids: + - '125' +- parameters: + cwd: / + puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/openstack-controller/openstack-controller.pp + puppet_modules: /etc/puppet/modules + timeout: 3600 + priority: 2400 + type: puppet + uids: + - '125' +- parameters: + cwd: / + puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/openstack-network/openstack-network-controller.pp + puppet_modules: /etc/puppet/modules + timeout: 3600 + priority: 2500 + type: puppet + uids: + - '125' +- parameters: + cwd: / + puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/heat/heat.pp + puppet_modules: /etc/puppet/modules + timeout: 3600 + priority: 2600 + type: puppet + uids: + - '125' +- parameters: + cwd: / + puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/horizon/horizon.pp + puppet_modules: /etc/puppet/modules + timeout: 3600 + priority: 2700 + type: puppet + uids: + - '125' +- parameters: + cwd: / + puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/murano/murano.pp + puppet_modules: /etc/puppet/modules + timeout: 3600 + priority: 2800 + type: puppet + uids: + - '125' +- parameters: + cwd: / + puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/sahara/sahara.pp + puppet_modules: /etc/puppet/modules + timeout: 3600 + priority: 2900 + type: puppet + uids: + - '125' +- parameters: + cwd: / + puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/api-proxy/api-proxy.pp + puppet_modules: /etc/puppet/modules + timeout: 3600 + priority: 3000 + type: puppet + uids: + - '125' +- parameters: + cwd: / + puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/ceph/mon.pp + puppet_modules: /etc/puppet/modules + timeout: 3600 + priority: 3100 + type: puppet + uids: + - '125' +- parameters: + cwd: / + puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/ceph/radosgw.pp + puppet_modules: /etc/puppet/modules + timeout: 3600 + priority: 3200 + type: puppet + uids: + - '125' +- parameters: + cwd: / + puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/swift/swift.pp + puppet_modules: /etc/puppet/modules + timeout: 3600 + priority: 3300 + type: puppet + uids: + - '125' +- parameters: + cwd: / + puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/roles/controller.pp + puppet_modules: /etc/puppet/modules + timeout: 3600 + priority: 3400 + type: puppet + uids: + - '125' +- parameters: + cwd: / + puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/swift/rebalance_cronjob.pp + puppet_modules: /etc/puppet/modules + timeout: 300 + priority: 3500 + type: puppet + uids: + - '125' +test_vm_image: + container_format: bare + disk_format: qcow2 + glance_properties: '' + img_name: TestVM + img_path: /usr/share/cirros-testvm/cirros-x86_64-disk.img + min_ram: 64 + os_name: cirros + public: 'true' +uid: '125' +use_cow_images: true +use_vcenter: false +user_node_name: Untitled (34:45) +workloads_collector: + enabled: true + metadata: + label: Workloads Collector User + restrictions: + - action: hide + condition: 'true' + weight: 10 + password: v6vMAe7Q + tenant: services + username: workloads_collector diff --git a/tests/noop/astute.yaml/neut_vlan.compute.ssl.overridden.yaml b/tests/noop/astute.yaml/neut_vlan.compute.ssl.overridden.yaml new file mode 100644 index 0000000000..faff578af3 --- /dev/null +++ b/tests/noop/astute.yaml/neut_vlan.compute.ssl.overridden.yaml @@ -0,0 +1,1055 @@ +--- +tasks: +- priority: 100 + type: puppet + uids: + - '5' + parameters: + puppet_modules: "/etc/puppet/modules" + puppet_manifest: "/etc/puppet/modules/osnailyfacter/modular/fuel_pkgs/fuel_pkgs.pp" + timeout: 600 + cwd: "/" +- priority: 200 + type: puppet + uids: + - '5' + parameters: + puppet_modules: "/etc/puppet/modules" + puppet_manifest: "/etc/puppet/modules/osnailyfacter/modular/hiera/hiera.pp" + timeout: 3600 + cwd: "/" +- priority: 300 + type: puppet + uids: + - '5' + parameters: + puppet_modules: "/etc/puppet/modules" + puppet_manifest: "/etc/puppet/modules/osnailyfacter/modular/globals/globals.pp" + timeout: 3600 + cwd: "/" +- priority: 400 + type: puppet + uids: + - '5' + parameters: + puppet_modules: "/etc/puppet/modules" + puppet_manifest: "/etc/puppet/modules/osnailyfacter/modular/logging/logging.pp" + timeout: 3600 + cwd: "/" +- priority: 500 + type: puppet + uids: + - '5' + parameters: + puppet_modules: "/etc/puppet/modules" + puppet_manifest: "/etc/puppet/modules/osnailyfacter/modular/tools/tools.pp" + timeout: 3600 + cwd: "/" +- priority: 600 + type: puppet + uids: + - '5' + parameters: + puppet_modules: "/etc/puppet/modules" + puppet_manifest: "/etc/puppet/modules/osnailyfacter/modular/netconfig/netconfig.pp" + timeout: 3600 + cwd: "/" +- priority: 700 + type: puppet + uids: + - '5' + parameters: + puppet_modules: "/etc/puppet/modules" + puppet_manifest: "/etc/puppet/modules/osnailyfacter/modular/netconfig/connectivity_tests.pp" + timeout: 3600 + cwd: "/" +- priority: 800 + type: puppet + uids: + - '5' + parameters: + puppet_modules: "/etc/puppet/modules" + puppet_manifest: "/etc/puppet/modules/osnailyfacter/modular/firewall/firewall.pp" + timeout: 3600 + cwd: "/" +- priority: 900 + type: puppet + uids: + - '5' + parameters: + puppet_modules: "/etc/puppet/modules" + puppet_manifest: "/etc/puppet/modules/osnailyfacter/modular/ssl/ssl_add_trust_chain.pp" + timeout: 3600 + cwd: "/" +- priority: 1000 + type: puppet + uids: + - '5' + parameters: + puppet_modules: "/etc/puppet/modules" + puppet_manifest: "/etc/puppet/modules/osnailyfacter/modular/hosts/hosts.pp" + timeout: 3600 + cwd: "/" +- priority: 1100 + type: puppet + uids: + - '5' + parameters: + puppet_modules: "/etc/puppet/modules" + puppet_manifest: "/etc/puppet/modules/osnailyfacter/modular/roles/compute.pp" + timeout: 3600 + cwd: "/" +- priority: 1200 + type: puppet + uids: + - '5' + parameters: + puppet_modules: "/etc/puppet/modules" + puppet_manifest: "/etc/puppet/modules/osnailyfacter/modular/openstack-network/openstack-network-compute.pp" + timeout: 3600 + cwd: "/" +user_node_name: Untitled (2a:ee) +uid: '5' +resume_guests_state_on_host_boot: true +syslog: + syslog_port: '514' + syslog_transport: tcp + syslog_server: '' + metadata: + weight: 50 + label: Syslog +libvirt_type: qemu +host_uuid: '00000000-0000-0000-0000-000000000000' +puppet: + modules: rsync://10.122.5.2:/puppet/2015.1.0-8.0/modules/ + manifests: rsync://10.122.5.2:/puppet/2015.1.0-8.0/manifests/ +mysql: + root_password: PBmaN2YX + wsrep_password: FsatnsoY +quantum: true +use_cow_images: true +glance: + image_cache_max_size: '5368709120' + user_password: DztOMLWg + db_password: LtDxFLyX +ceilometer: + db_password: HUn68NQb + user_password: ycMeNdmo + metering_secret: zjAKZxtd + enabled: false +cobbler: + profile: ubuntu_1404_x86_64 +mongo: + enabled: false +quantum_settings: + database: + passwd: bnQfjm1A + keystone: + admin_password: glXcjTAY + L3: + use_namespaces: true + L2: + phys_nets: + physnet1: + bridge: br-floating + physnet2: + bridge: br-prv + vlan_range: 1000:1030 + base_mac: fa:16:3e:00:00:00 + segmentation_type: vlan + predefined_networks: + net04_ext: + shared: false + L2: + network_type: flat + physnet: physnet1 + router_ext: true + segment_id: + L3: + nameservers: [] + subnet: 10.122.6.0/24 + floating: 10.122.6.130:10.122.6.254 + gateway: 10.122.6.1 + enable_dhcp: false + tenant: admin + net04: + shared: false + L2: + network_type: vlan + router_ext: false + physnet: physnet2 + segment_id: + L3: + nameservers: + - 8.8.4.4 + - 8.8.8.8 + subnet: 10.122.8.0/24 + floating: + gateway: 10.122.8.1 + enable_dhcp: true + tenant: admin + metadata: + metadata_proxy_shared_secret: orn88mVY +use_vcenter: false +management: + network_role: mgmt/vip + ipaddr: 10.122.7.7 + node_roles: + - controller + - primary-controller + namespace: haproxy +openstack_version: 2015.1.0-8.0 +kernel_params: + kernel: console=ttyS0,9600 console=tty0 net.ifnames=0 biosdevname=0 rootdelay=90 + nomodeset + metadata: + weight: 40 + label: Kernel parameters +provision: + image_data: + "/boot": + container: gzip + uri: http://10.122.5.2:8080/targetimages/env_1_ubuntu_1404_amd64-boot.img.gz + format: ext2 + "/": + container: gzip + uri: http://10.122.5.2:8080/targetimages/env_1_ubuntu_1404_amd64.img.gz + format: ext4 + codename: trusty + method: image + metadata: + restrictions: + - action: hide + condition: 'true' + weight: 80 + label: Provision +storage: + iser: false + volumes_ceph: false + per_pool_pg_nums: + compute: 128 + default_pg_num: 128 + volumes: 128 + images: 128 + backups: 128 + ".rgw": 128 + objects_ceph: false + ephemeral_ceph: false + volumes_lvm: true + images_vcenter: false + osd_pool_size: '2' + pg_num: 128 + images_ceph: false + metadata: + weight: 60 + label: Storage +nova: + db_password: of31Rxsy + user_password: n0KfayKg + state_path: "/var/lib/nova" +master_ip: 10.122.5.2 +priority: 300 +external_dns: + dns_list: 8.8.8.8 + metadata: + weight: 90 + label: Host OS DNS Servers +murano: + db_password: gjBrHFFZ + user_password: crLmJ5b0 + enabled: false + rabbit_password: ra45kjwS +murano_settings: + murano_repo_url: http://storage.apps.openstack.org/ + metadata: + restrictions: + - action: hide + message: Murano is not enabled on the Additional Components section + condition: settings:additional_components.murano.value == false + weight: 20 + label: Murano Settings +role: compute +deployment_mode: ha_compact +external_mongo: + mongo_db_name: ceilometer + mongo_replset: '' + mongo_user: ceilometer + hosts_ip: '' + mongo_password: ceilometer + metadata: + restrictions: + - action: hide + message: Ceilometer and MongoDB are not enabled on the Additional Components + section + condition: settings:additional_components.mongo.value == false + weight: 20 + label: External MongoDB +online: true +keystone: + db_password: H4N630IH + admin_token: cKHHVACg +nodes: +- user_node_name: Untitled (d8:bb) + uid: '1' + public_address: 10.122.6.2 + internal_netmask: 255.255.255.0 + fqdn: node-1.domain.local + role: cinder + public_netmask: 255.255.255.0 + internal_address: 10.122.7.1 + storage_address: 10.122.9.1 + swift_zone: '1' + storage_netmask: 255.255.255.0 + name: node-1 +- user_node_name: Untitled (d8:bb) + uid: '1' + public_address: 10.122.6.2 + internal_netmask: 255.255.255.0 + fqdn: node-1.domain.local + role: primary-controller + public_netmask: 255.255.255.0 + internal_address: 10.122.7.1 + storage_address: 10.122.9.1 + swift_zone: '1' + storage_netmask: 255.255.255.0 + name: node-1 +- user_node_name: Untitled (68:63) + uid: '2' + public_address: 10.122.6.4 + internal_netmask: 255.255.255.0 + fqdn: node-2.domain.local + role: cinder + public_netmask: 255.255.255.0 + internal_address: 10.122.7.4 + storage_address: 10.122.9.3 + swift_zone: '2' + storage_netmask: 255.255.255.0 + name: node-2 +- user_node_name: Untitled (68:63) + uid: '2' + public_address: 10.122.6.4 + internal_netmask: 255.255.255.0 + fqdn: node-2.domain.local + role: controller + public_netmask: 255.255.255.0 + internal_address: 10.122.7.4 + storage_address: 10.122.9.3 + swift_zone: '2' + storage_netmask: 255.255.255.0 + name: node-2 +- user_node_name: Untitled (03:15) + uid: '3' + public_address: 10.122.6.3 + internal_netmask: 255.255.255.0 + fqdn: node-3.domain.local + role: cinder + public_netmask: 255.255.255.0 + internal_address: 10.122.7.5 + storage_address: 10.122.9.2 + swift_zone: '3' + storage_netmask: 255.255.255.0 + name: node-3 +- user_node_name: Untitled (03:15) + uid: '3' + public_address: 10.122.6.3 + internal_netmask: 255.255.255.0 + fqdn: node-3.domain.local + role: controller + public_netmask: 255.255.255.0 + internal_address: 10.122.7.5 + storage_address: 10.122.9.2 + swift_zone: '3' + storage_netmask: 255.255.255.0 + name: node-3 +- user_node_name: Untitled (a7:46) + uid: '4' + internal_netmask: 255.255.255.0 + fqdn: node-4.domain.local + role: compute + internal_address: 10.122.7.2 + storage_address: 10.122.9.5 + swift_zone: '4' + storage_netmask: 255.255.255.0 + name: node-4 +- user_node_name: Untitled (2a:ee) + uid: '5' + internal_netmask: 255.255.255.0 + fqdn: node-5.domain.local + role: compute + internal_address: 10.122.7.3 + storage_address: 10.122.9.4 + swift_zone: '5' + storage_netmask: 255.255.255.0 + name: node-5 +nova_quota: false +public: + network_role: public/vip + ipaddr: 10.122.6.6 + node_roles: + - controller + - primary-controller + namespace: haproxy +corosync: + verified: false + group: 226.94.1.1 + port: '12000' + metadata: + restrictions: + - action: hide + condition: 'true' + weight: 50 + label: Corosync +metadata: + weight: 30 + label: Common +status: discover +swift: + user_password: zr0zBVgi +repo_setup: + installer_kernel: + local: "/var/www/nailgun/ubuntu/x86_64/images/linux" + remote_relative: dists/trusty/main/installer-amd64/current/images/netboot/ubuntu-installer/amd64/linux + repos: + - name: ubuntu + section: main universe multiverse + uri: http://archive.ubuntu.com/ubuntu/ + priority: + suite: trusty + type: deb + - name: ubuntu-updates + section: main universe multiverse + uri: http://archive.ubuntu.com/ubuntu/ + priority: + suite: trusty-updates + type: deb + - name: ubuntu-security + section: main universe multiverse + uri: http://archive.ubuntu.com/ubuntu/ + priority: + suite: trusty-security + type: deb + - name: mos + section: main restricted + uri: http://10.122.5.2:8080/2015.1.0-8.0/ubuntu/x86_64 + priority: 1050 + suite: mos7.0 + type: deb + - name: mos-updates + section: main restricted + uri: http://mirror.fuel-infra.org/mos-repos/ubuntu/7.0/ + priority: 1050 + suite: mos7.0-updates + type: deb + - name: mos-security + section: main restricted + uri: http://mirror.fuel-infra.org/mos-repos/ubuntu/7.0/ + priority: 1050 + suite: mos7.0-security + type: deb + - name: mos-holdback + section: main restricted + uri: http://mirror.fuel-infra.org/mos-repos/ubuntu/7.0/ + priority: 1100 + suite: mos7.0-holdback + type: deb + - name: Auxiliary + section: main restricted + uri: http://10.122.5.2:8080/2015.1.0-8.0/ubuntu/auxiliary + priority: 1150 + suite: auxiliary + type: deb + metadata: + always_editable: true + weight: 50 + label: Repositories + installer_initrd: + local: "/var/www/nailgun/ubuntu/x86_64/images/initrd.gz" + remote_relative: dists/trusty/main/installer-amd64/current/images/netboot/ubuntu-installer/amd64/initrd.gz +vrouter: + network_role: mgmt/vip + ipaddr: 10.122.7.6 + node_roles: + - controller + - primary-controller + namespace: vrouter +fail_if_error: false +sahara: + db_password: dfoFKl7u + user_password: jYjxogYn + enabled: false +network_metadata: + nodes: + node-5: + swift_zone: '5' + uid: '5' + fqdn: node-5.domain.local + network_roles: + murano/api: 10.122.7.3 + keystone/api: 10.122.7.3 + neutron/api: 10.122.7.3 + mgmt/database: 10.122.7.3 + sahara/api: 10.122.7.3 + ceilometer/api: 10.122.7.3 + ceph/public: 10.122.7.3 + mgmt/messaging: 10.122.7.3 + management: 10.122.7.3 + swift/api: 10.122.7.3 + storage: 10.122.9.4 + mgmt/corosync: 10.122.7.3 + cinder/api: 10.122.7.3 + swift/replication: 10.122.9.4 + admin/pxe: 10.122.5.4 + mongo/db: 10.122.7.3 + neutron/private: + neutron/floating: + fw-admin: 10.122.5.4 + glance/api: 10.122.7.3 + heat/api: 10.122.7.3 + mgmt/vip: 10.122.7.3 + nova/api: 10.122.7.3 + horizon: 10.122.7.3 + nova/migration: 10.122.7.3 + mgmt/memcache: 10.122.7.3 + cinder/iscsi: 10.122.9.4 + ceph/replication: 10.122.9.4 + user_node_name: Untitled (2a:ee) + node_roles: + - compute + name: node-5 + node-4: + swift_zone: '4' + uid: '4' + fqdn: node-4.domain.local + network_roles: + murano/api: 10.122.7.2 + keystone/api: 10.122.7.2 + neutron/api: 10.122.7.2 + mgmt/database: 10.122.7.2 + sahara/api: 10.122.7.2 + ceilometer/api: 10.122.7.2 + ceph/public: 10.122.7.2 + mgmt/messaging: 10.122.7.2 + management: 10.122.7.2 + swift/api: 10.122.7.2 + storage: 10.122.9.5 + mgmt/corosync: 10.122.7.2 + cinder/api: 10.122.7.2 + swift/replication: 10.122.9.5 + admin/pxe: 10.122.5.5 + mongo/db: 10.122.7.2 + neutron/private: + neutron/floating: + fw-admin: 10.122.5.5 + glance/api: 10.122.7.2 + heat/api: 10.122.7.2 + mgmt/vip: 10.122.7.2 + nova/api: 10.122.7.2 + horizon: 10.122.7.2 + nova/migration: 10.122.7.2 + mgmt/memcache: 10.122.7.2 + cinder/iscsi: 10.122.9.5 + ceph/replication: 10.122.9.5 + user_node_name: Untitled (a7:46) + node_roles: + - compute + name: node-4 + node-1: + swift_zone: '1' + uid: '1' + fqdn: node-1.domain.local + network_roles: + keystone/api: 10.122.7.1 + neutron/api: 10.122.7.1 + mgmt/database: 10.122.7.1 + sahara/api: 10.122.7.1 + heat/api: 10.122.7.1 + ceilometer/api: 10.122.7.1 + ex: 10.122.6.2 + ceph/public: 10.122.7.1 + mgmt/messaging: 10.122.7.1 + management: 10.122.7.1 + swift/api: 10.122.7.1 + storage: 10.122.9.1 + mgmt/corosync: 10.122.7.1 + cinder/api: 10.122.7.1 + public/vip: 10.122.6.2 + swift/replication: 10.122.9.1 + ceph/radosgw: 10.122.6.2 + admin/pxe: 10.122.5.3 + mongo/db: 10.122.7.1 + neutron/private: + neutron/floating: + fw-admin: 10.122.5.3 + glance/api: 10.122.7.1 + mgmt/vip: 10.122.7.1 + murano/api: 10.122.7.1 + nova/api: 10.122.7.1 + horizon: 10.122.7.1 + nova/migration: 10.122.7.1 + mgmt/memcache: 10.122.7.1 + cinder/iscsi: 10.122.9.1 + ceph/replication: 10.122.9.1 + user_node_name: Untitled (d8:bb) + node_roles: + - cinder + - primary-controller + name: node-1 + node-3: + swift_zone: '3' + uid: '3' + fqdn: node-3.domain.local + network_roles: + keystone/api: 10.122.7.5 + neutron/api: 10.122.7.5 + mgmt/database: 10.122.7.5 + sahara/api: 10.122.7.5 + heat/api: 10.122.7.5 + ceilometer/api: 10.122.7.5 + ex: 10.122.6.3 + ceph/public: 10.122.7.5 + mgmt/messaging: 10.122.7.5 + management: 10.122.7.5 + swift/api: 10.122.7.5 + storage: 10.122.9.2 + mgmt/corosync: 10.122.7.5 + cinder/api: 10.122.7.5 + public/vip: 10.122.6.3 + swift/replication: 10.122.9.2 + ceph/radosgw: 10.122.6.3 + admin/pxe: 10.122.5.6 + mongo/db: 10.122.7.5 + neutron/private: + neutron/floating: + fw-admin: 10.122.5.6 + glance/api: 10.122.7.5 + mgmt/vip: 10.122.7.5 + murano/api: 10.122.7.5 + nova/api: 10.122.7.5 + horizon: 10.122.7.5 + nova/migration: 10.122.7.5 + mgmt/memcache: 10.122.7.5 + cinder/iscsi: 10.122.9.2 + ceph/replication: 10.122.9.2 + user_node_name: Untitled (03:15) + node_roles: + - cinder + - controller + name: node-3 + node-2: + swift_zone: '2' + uid: '2' + fqdn: node-2.domain.local + network_roles: + keystone/api: 10.122.7.4 + neutron/api: 10.122.7.4 + mgmt/database: 10.122.7.4 + sahara/api: 10.122.7.4 + heat/api: 10.122.7.4 + ceilometer/api: 10.122.7.4 + ex: 10.122.6.4 + ceph/public: 10.122.7.4 + mgmt/messaging: 10.122.7.4 + management: 10.122.7.4 + swift/api: 10.122.7.4 + storage: 10.122.9.3 + mgmt/corosync: 10.122.7.4 + cinder/api: 10.122.7.4 + public/vip: 10.122.6.4 + swift/replication: 10.122.9.3 + ceph/radosgw: 10.122.6.4 + admin/pxe: 10.122.5.7 + mongo/db: 10.122.7.4 + neutron/private: + neutron/floating: + fw-admin: 10.122.5.7 + glance/api: 10.122.7.4 + mgmt/vip: 10.122.7.4 + murano/api: 10.122.7.4 + nova/api: 10.122.7.4 + horizon: 10.122.7.4 + nova/migration: 10.122.7.4 + mgmt/memcache: 10.122.7.4 + cinder/iscsi: 10.122.9.3 + ceph/replication: 10.122.9.3 + user_node_name: Untitled (68:63) + node_roles: + - cinder + - controller + name: node-2 + vips: + vrouter_pub: + network_role: public/vip + node_roles: + - controller + - primary-controller + namespace: vrouter + ipaddr: 10.122.6.5 + management: + network_role: mgmt/vip + node_roles: + - controller + - primary-controller + namespace: haproxy + ipaddr: 10.122.7.7 + public: + network_role: public/vip + node_roles: + - controller + - primary-controller + namespace: haproxy + ipaddr: 10.122.6.6 + vrouter: + network_role: mgmt/vip + node_roles: + - controller + - primary-controller + namespace: vrouter + ipaddr: 10.122.7.6 +network_scheme: + transformations: + - action: add-br + name: br-fw-admin + - action: add-br + name: br-mgmt + - action: add-br + name: br-storage + - action: add-br + name: br-prv + provider: ovs + - action: add-patch + bridges: + - br-prv + - br-fw-admin + provider: ovs + mtu: 65000 + - action: add-port + bridge: br-fw-admin + name: eth0 + - action: add-port + bridge: br-mgmt + name: eth0.101 + - action: add-port + bridge: br-storage + name: eth0.102 + roles: + murano/api: br-mgmt + keystone/api: br-mgmt + neutron/api: br-mgmt + mgmt/database: br-mgmt + sahara/api: br-mgmt + ceilometer/api: br-mgmt + ceph/public: br-mgmt + mgmt/messaging: br-mgmt + management: br-mgmt + swift/api: br-mgmt + storage: br-storage + mgmt/corosync: br-mgmt + cinder/api: br-mgmt + swift/replication: br-storage + admin/pxe: br-fw-admin + mongo/db: br-mgmt + neutron/private: br-prv + fw-admin: br-fw-admin + glance/api: br-mgmt + heat/api: br-mgmt + mgmt/vip: br-mgmt + nova/api: br-mgmt + horizon: br-mgmt + nova/migration: br-mgmt + mgmt/memcache: br-mgmt + cinder/iscsi: br-storage + ceph/replication: br-storage + interfaces: + eth4: + vendor_specific: + driver: e1000 + bus_info: '0000:00:07.0' + eth3: + vendor_specific: + driver: e1000 + bus_info: '0000:00:06.0' + eth2: + vendor_specific: + driver: e1000 + bus_info: '0000:00:05.0' + eth1: + vendor_specific: + driver: e1000 + bus_info: '0000:00:04.0' + eth0: + vendor_specific: + driver: e1000 + bus_info: '0000:00:03.0' + version: '1.1' + provider: lnx + endpoints: + br-fw-admin: + IP: + - 10.122.5.4/24 + gateway: 10.122.5.2 + br-storage: + IP: + - 10.122.9.4/24 + br-mgmt: + IP: + - 10.122.7.3/24 + br-prv: + IP: none +heat: + db_password: GqzWSxBW + user_password: uMxK47eJ + enabled: true + auth_encryption_key: 9431f2b16d26488b896e64d236953521 + rabbit_password: jnXk99nV +storage_network_range: 10.122.9.0/24 +fuel_version: '7.0' +rabbit: + password: WYcvKQyZ +public_network_assignment: + assign_to_all_nodes: false + metadata: + restrictions: + - action: hide + condition: cluster:net_provider != 'neutron' + weight: 50 + label: Public network assignment +use_cinder: true +test_vm_image: + os_name: cirros + img_path: "/usr/share/cirros-testvm/cirros-x86_64-disk.img" + container_format: bare + min_ram: 64 + disk_format: qcow2 + glance_properties: '' + img_name: TestVM + public: 'true' +management_network_range: 10.122.7.0/24 +neutron_advanced_configuration: + neutron_dvr: false + neutron_l2_pop: false + metadata: + restrictions: + - action: hide + condition: cluster:net_provider != 'neutron' + weight: 45 + label: Neutron Advanced Configuration +base_syslog: + syslog_port: '514' + syslog_server: 10.122.5.2 +vms_conf: [] +workloads_collector: + username: fuel_stats_user + enabled: true + create_user: false + password: ATGggm00 + tenant: services + metadata: + restrictions: + - action: hide + condition: 'true' + weight: 10 + label: Workloads Collector User +auth_key: '' +puppet_debug: true +access: + email: admin@localhost + password: admin + user: admin + tenant: admin + metadata: + weight: 10 + label: Access +last_controller: node-3 +fqdn: node-5.domain.local +use_ssl: + horizon: true + horizon_public: true + horizon_public_hostname: 'horizon.public.fuel.local' + horizon_public_usercert: true + horizon_public_certdata: 'somethinglikeacertificateforhorizon' + keystone: true + keystone_public: true + keystone_public_ip: '10.10.10.10' + keystone_public_hostname: 'keystone.public.fuel.local' + keystone_public_usercert: true + keystone_public_certdata: 'somethinglikeacertificateforkeystone' + keystone_internal: true + keystone_internal_ip: '20.20.20.20' + keystone_internal_hostname: 'keystone.internal.fuel.local' + keystone_internal_usercert: true + keystone_internal_certdata: 'somethinglikeacertificateforkeystone' + keystone_admin: true + keystone_admin_ip: '30.30.30.30' + keystone_admin_hostname: 'keystone.admin.fuel.local' + keystone_admin_usercert: true + keystone_admin_certdata: 'somethinglikeacertificateforkeystone' + nova: true + nova_public: true + nova_public_hostname: 'nova.public.fuel.local' + nova_public_usercert: true + nova_public_certdata: 'somethinglikeacertificatefornova' + nova_internal: true + nova_internal_hostname: 'nova.internal.fuel.local' + nova_internal_usercert: true + nova_internal_certdata: 'somethinglikeacertificatefornova' + nova_admin: true + nova_admin_hostname: 'nova.admin.fuel.local' + nova_admin_usercert: true + nova_admin_certdata: 'somethinglikeacertificatefornova' + heat: true + heat_public: true + heat_public_hostname: 'heat.public.fuel.local' + heat_public_usercert: true + heat_public_certdata: 'somethinglikeacertificateforheat' + heat_internal: true + heat_internal_hostname: 'heat.internal.fuel.local' + heat_internal_usercert: true + heat_internal_certdata: 'somethinglikeacertificateforheat' + heat_admin: true + heat_admin_hostname: 'heat.admin.fuel.local' + heat_admin_usercert: true + heat_admin_certdata: 'somethinglikeacertificateforheat' + glance: true + glance_public: true + glance_public_hostname: 'glance.public.fuel.local' + glance_public_usercert: true + glance_public_certdata: 'somethinglikeacertificateforglance' + glance_internal: true + glance_internal_hostname: 'glance.internal.fuel.local' + glance_internal_usercert: true + glance_internal_certdata: 'somethinglikeacertificateforglance' + glance_admin: true + glance_admin_hostname: 'glance.admin.fuel.local' + glance_admin_usercert: true + glance_admin_certdata: 'somethinglikeacertificateforglance' + cinder: true + cinder_public: true + cinder_public_hostname: 'cinder.public.fuel.local' + cinder_public_usercert: true + cinder_public_certdata: 'somethinglikeacertificateforcinder' + cinder_internal: true + cinder_internal_hostname: 'cinder.internal.fuel.local' + cinder_internal_usercert: true + cinder_internal_certdata: 'somethinglikeacertificateforcinder' + cinder_admin: true + cinder_admin_hostname: 'cinder.admin.fuel.local' + cinder_admin_usercert: true + cinder_admin_certdata: 'somethinglikeacertificateforcinder' + neutron: true + neutron_public: true + neutron_public_hostname: 'neutron.public.fuel.local' + neutron_public_usercert: true + neutron_public_certdata: 'somethinglikeacertificateforneutron' + neutron_internal: true + neutron_internal_hostname: 'neutron.internal.fuel.local' + neutron_internal_usercert: true + neutron_internal_certdata: 'somethinglikeacertificateforneutron' + neutron_admin: true + neutron_admin_hostname: 'neutron.admin.fuel.local' + neutron_admin_usercert: true + neutron_admin_certdata: 'somethinglikeacertificateforneutron' + swift: true + swift_public: true + swift_public_hostname: 'swift.public.fuel.local' + swift_public_usercert: true + swift_public_certdata: 'somethinglikeacertificateforswift' + swift_internal: true + swift_internal_hostname: 'swift.internal.fuel.local' + swift_internal_usercert: true + swift_internal_certdata: 'somethinglikeacertificateforswift' + swift_admin: true + swift_admin_hostname: 'swift.admin.fuel.local' + swift_admin_usercert: true + swift_admin_certdata: 'somethinglikeacertificateforswift' + sahara: true + sahara_public: true + sahara_public_hostname: 'sahara.public.fuel.local' + sahara_public_usercert: true + sahara_public_certdata: 'somethinglikeacertificateforsahara' + sahara_internal: true + sahara_internal_hostname: 'sahara.internal.fuel.local' + sahara_internal_usercert: true + sahara_internal_certdata: 'somethinglikeacertificateforsahara' + sahara_admin: true + sahara_admin_hostname: 'sahara.admin.fuel.local' + sahara_admin_usercert: true + sahara_admin_certdata: 'somethinglikeacertificateforsahara' + murano: true + murano_public: true + murano_public_hostname: 'murano.public.fuel.local' + murano_public_usercert: true + murano_public_certdata: 'somethinglikeacertificateformurano' + murano_internal: true + murano_internal_hostname: 'murano.internal.fuel.local' + murano_internal_usercert: true + murano_internal_certdata: 'somethinglikeacertificateformurano' + murano_admin: true + murano_admin_hostname: 'murano.admin.fuel.local' + murano_admin_usercert: true + murano_admin_certdata: 'somethinglikeacertificateformurano' + ceilometer: true + ceilometer_public: true + ceilometer_public_hostname: 'ceilometer.public.fuel.local' + ceilometer_public_usercert: true + ceilometer_public_certdata: 'somethinglikeacertificateforceilometer' + ceilometer_internal: true + ceilometer_internal_hostname: 'ceilometer.internal.fuel.local' + ceilometer_internal_usercert: true + ceilometer_internal_certdata: 'somethinglikeacertificateforceilometer' + ceilometer_admin: true + ceilometer_admin_hostname: 'ceilometer.admin.fuel.local' + ceilometer_admin_usercert: true + ceilometer_admin_certdata: 'somethinglikeacertificateforceilometer' + radosgw: true + radosgw_public: true + radosgw_public_hostname: 'radosgw.public.fuel.local' + radosgw_public_usercert: true + radosgw_public_certdata: 'somethinglikeacertificateforradosgw' +public_ssl: + hostname: public.fuel.local + horizon: true + services: true + cert_data: + content: 'somedataaboutyourkeypair' + cert_source: self_signed + metadata: + weight: 110 + label: Public TLS +auto_assign_floating_ip: false +vrouter_pub: + network_role: public/vip + ipaddr: 10.122.6.5 + node_roles: + - controller + - primary-controller + namespace: vrouter +mp: +- weight: '1' + point: '1' +- weight: '2' + point: '2' +neutron_mellanox: + vf_num: '16' + plugin: disabled + metadata: + restrictions: + - action: hide + condition: not ('experimental' in version:feature_groups) + enabled: true + weight: 50 + toggleable: false + label: Mellanox Neutron components +horizon: + secret_key: 1b0d6af1962c94205a5dbbffbb98ba66d651bd4bec78c7c385ad75eb07ad0fcb +debug: false +cinder: + db_password: qeN6frZT + user_password: 7Al0iWfl + fixed_key: 264ecf97cf69264f775e549fe5fd1ce3db6a92df94d37745819892803a83b19c +deployment_id: 1 +external_ntp: + ntp_list: 0.fuel.pool.ntp.org, 1.fuel.pool.ntp.org, 2.fuel.pool.ntp.org + metadata: + weight: 100 + label: Host OS NTP Servers +openstack_version_prev: diff --git a/tests/noop/astute.yaml/neut_vxlan_dvr.murano.sahara-primary-controller.overridden_ssl.yaml b/tests/noop/astute.yaml/neut_vxlan_dvr.murano.sahara-primary-controller.overridden_ssl.yaml new file mode 100644 index 0000000000..f34d6935d5 --- /dev/null +++ b/tests/noop/astute.yaml/neut_vxlan_dvr.murano.sahara-primary-controller.overridden_ssl.yaml @@ -0,0 +1,1196 @@ +access: + email: admin@localhost + metadata: + label: Access + weight: 10 + password: admin + tenant: admin + user: admin +auth_key: '' +auto_assign_floating_ip: false +base_syslog: + syslog_port: '514' + syslog_server: 10.108.0.2 +ceilometer: + db_password: ZcffCIm5 + enabled: false + metering_secret: 7aqxzabx + user_password: FQUfTQ6a +cinder: + db_password: 71kNkN9U + fixed_key: 0ded0202e2a355df942df2bacbaba992658a0345f68f2db6e1bdb6dbb8f682cf + user_password: O2st17AP +cobbler: + profile: ubuntu_1404_x86_64 +corosync: + group: 226.94.1.1 + metadata: + label: Corosync + restrictions: + - action: hide + condition: 'true' + weight: 50 + port: '12000' + verified: false +debug: false +deployment_id: 38 +deployment_mode: ha_compact +external_dns: + dns_list: 8.8.8.8, 8.8.4.4 + metadata: + label: Upstream DNS + weight: 90 +external_mongo: + hosts_ip: '' + metadata: + label: External MongoDB + restrictions: + - action: hide + condition: settings:additional_components.mongo.value == false + weight: 20 + mongo_db_name: ceilometer + mongo_password: ceilometer + mongo_replset: '' + mongo_user: ceilometer +external_ntp: + metadata: + label: Upstream NTP + weight: 100 + ntp_list: 0.pool.ntp.org, 1.pool.ntp.org +use_ssl: + horizon: true + horizon_public: true + horizon_public_hostname: 'horizon.public.fuel.local' + horizon_public_usercert: true + horizon_public_certdata: 'somethinglikeacertificateforhorizon' + keystone: true + keystone_public: true + keystone_public_ip: '10.10.10.10' + keystone_public_hostname: 'keystone.public.fuel.local' + keystone_public_usercert: true + keystone_public_certdata: 'somethinglikeacertificateforkeystone' + keystone_internal: true + keystone_internal_ip: '20.20.20.20' + keystone_internal_hostname: 'keystone.internal.fuel.local' + keystone_internal_usercert: true + keystone_internal_certdata: 'somethinglikeacertificateforkeystone' + keystone_admin: true + keystone_admin_ip: '30.30.30.30' + keystone_admin_hostname: 'keystone.admin.fuel.local' + keystone_admin_usercert: true + keystone_admin_certdata: 'somethinglikeacertificateforkeystone' + nova: true + nova_public: true + nova_public_hostname: 'nova.public.fuel.local' + nova_public_usercert: true + nova_public_certdata: 'somethinglikeacertificatefornova' + nova_internal: true + nova_internal_hostname: 'nova.internal.fuel.local' + nova_internal_usercert: true + nova_internal_certdata: 'somethinglikeacertificatefornova' + nova_admin: true + nova_admin_hostname: 'nova.admin.fuel.local' + nova_admin_usercert: true + nova_admin_certdata: 'somethinglikeacertificatefornova' + heat: true + heat_public: true + heat_public_hostname: 'heat.public.fuel.local' + heat_public_usercert: true + heat_public_certdata: 'somethinglikeacertificateforheat' + heat_internal: true + heat_internal_hostname: 'heat.internal.fuel.local' + heat_internal_usercert: true + heat_internal_certdata: 'somethinglikeacertificateforheat' + heat_admin: true + heat_admin_hostname: 'heat.admin.fuel.local' + heat_admin_usercert: true + heat_admin_certdata: 'somethinglikeacertificateforheat' + glance: true + glance_public: true + glance_public_hostname: 'glance.public.fuel.local' + glance_public_usercert: true + glance_public_certdata: 'somethinglikeacertificateforglance' + glance_internal: true + glance_internal_hostname: 'glance.internal.fuel.local' + glance_internal_usercert: true + glance_internal_certdata: 'somethinglikeacertificateforglance' + glance_admin: true + glance_admin_hostname: 'glance.admin.fuel.local' + glance_admin_usercert: true + glance_admin_certdata: 'somethinglikeacertificateforglance' + cinder: true + cinder_public: true + cinder_public_hostname: 'cinder.public.fuel.local' + cinder_public_usercert: true + cinder_public_certdata: 'somethinglikeacertificateforcinder' + cinder_internal: true + cinder_internal_hostname: 'cinder.internal.fuel.local' + cinder_internal_usercert: true + cinder_internal_certdata: 'somethinglikeacertificateforcinder' + cinder_admin: true + cinder_admin_hostname: 'cinder.admin.fuel.local' + cinder_admin_usercert: true + cinder_admin_certdata: 'somethinglikeacertificateforcinder' + neutron: true + neutron_public: true + neutron_public_hostname: 'neutron.public.fuel.local' + neutron_public_usercert: true + neutron_public_certdata: 'somethinglikeacertificateforneutron' + neutron_internal: true + neutron_internal_hostname: 'neutron.internal.fuel.local' + neutron_internal_usercert: true + neutron_internal_certdata: 'somethinglikeacertificateforneutron' + neutron_admin: true + neutron_admin_hostname: 'neutron.admin.fuel.local' + neutron_admin_usercert: true + neutron_admin_certdata: 'somethinglikeacertificateforneutron' + swift: true + swift_public: true + swift_public_hostname: 'swift.public.fuel.local' + swift_public_usercert: true + swift_public_certdata: 'somethinglikeacertificateforswift' + swift_internal: true + swift_internal_hostname: 'swift.internal.fuel.local' + swift_internal_usercert: true + swift_internal_certdata: 'somethinglikeacertificateforswift' + swift_admin: true + swift_admin_hostname: 'swift.admin.fuel.local' + swift_admin_usercert: true + swift_admin_certdata: 'somethinglikeacertificateforswift' + sahara: true + sahara_public: true + sahara_public_hostname: 'sahara.public.fuel.local' + sahara_public_usercert: true + sahara_public_certdata: 'somethinglikeacertificateforsahara' + sahara_internal: true + sahara_internal_hostname: 'sahara.internal.fuel.local' + sahara_internal_usercert: true + sahara_internal_certdata: 'somethinglikeacertificateforsahara' + sahara_admin: true + sahara_admin_hostname: 'sahara.admin.fuel.local' + sahara_admin_usercert: true + sahara_admin_certdata: 'somethinglikeacertificateforsahara' + murano: true + murano_public: true + murano_public_hostname: 'murano.public.fuel.local' + murano_public_usercert: true + murano_public_certdata: 'somethinglikeacertificateformurano' + murano_internal: true + murano_internal_hostname: 'murano.internal.fuel.local' + murano_internal_usercert: true + murano_internal_certdata: 'somethinglikeacertificateformurano' + murano_admin: true + murano_admin_hostname: 'murano.admin.fuel.local' + murano_admin_usercert: true + murano_admin_certdata: 'somethinglikeacertificateformurano' + ceilometer: true + ceilometer_public: true + ceilometer_public_hostname: 'ceilometer.public.fuel.local' + ceilometer_public_usercert: true + ceilometer_public_certdata: 'somethinglikeacertificateforceilometer' + ceilometer_internal: true + ceilometer_internal_hostname: 'ceilometer.internal.fuel.local' + ceilometer_internal_usercert: true + ceilometer_internal_certdata: 'somethinglikeacertificateforceilometer' + ceilometer_admin: true + ceilometer_admin_hostname: 'ceilometer.admin.fuel.local' + ceilometer_admin_usercert: true + ceilometer_admin_certdata: 'somethinglikeacertificateforceilometer' + radosgw: true + radosgw_public: true + radosgw_public_hostname: 'radosgw.public.fuel.local' + radosgw_public_usercert: true + radosgw_public_certdata: 'somethinglikeacertificateforradosgw' +public_ssl: + metadata: + label: Public TLS + weight: 110 + horizon: true + services: true + cert_source: self_signed + cert_data: + content: 'somedataaboutyourkeypair' + hostname: public.fuel.local +fail_if_error: true +fqdn: node-128.test.domain.local +fuel_version: '6.1' +glance: + db_password: 0UYCFNfc + image_cache_max_size: '13868466176' + user_password: 94lWbeNn +heat: + auth_encryption_key: 8edb899a7e81e56abe51639880aa32dd + db_password: AuaPc3Yq + enabled: true + rabbit_password: Nmn2wr9S + user_password: EWJfBLJ9 +kernel_params: + kernel: console=ttyS0,9600 console=tty0 net.ifnames=0 biosdevname=0 rootdelay=90 + nomodeset + metadata: + label: Kernel parameters + weight: 40 +keystone: + admin_token: 0be9G8hj + db_password: 32TWl29R +last_controller: node-131 +libvirt_type: qemu +management_network_range: 192.168.0.0/24 +management_vip: 192.168.0.6 +management_vrouter_vip: 192.168.0.7 +master_ip: 10.108.0.2 +metadata: + label: Common + weight: 30 +mongo: + enabled: false +mp: +- point: '1' + weight: '1' +- point: '2' + weight: '2' +murano: + db_password: R3SuvZbh + enabled: true + rabbit_password: ZNdTAgF3 + user_password: xP8WtHQw +murano_settings: + metadata: + label: Murano Settings + restrictions: + - action: hide + condition: settings:additional_components.murano.value == false + weight: 20 + murano_repo_url: http://catalog.openstack.org/ +mysql: + root_password: Lz18BpbQ + wsrep_password: JrlrVOHu +network_metadata: + nodes: + node-118: + swift_zone: '1' + uid: '118' + fqdn: node-118.test.domain.local + network_roles: + keystone/api: 192.168.0.1 + neutron/api: 192.168.0.1 + mgmt/database: 192.168.0.1 + sahara/api: 192.168.0.1 + heat/api: 192.168.0.1 + ceilometer/api: 192.168.0.1 + ex: + ceph/public: 192.168.0.1 + ceph/radosgw: + management: 192.168.0.1 + swift/api: 192.168.0.1 + mgmt/api: 192.168.0.1 + storage: 192.168.1.1 + mgmt/corosync: 192.168.0.1 + cinder/api: 192.168.0.1 + public/vip: + swift/replication: 192.168.1.1 + mgmt/messaging: 192.168.0.1 + neutron/mesh: 192.168.0.1 + admin/pxe: 10.109.0.9 + mongo/db: 192.168.0.1 + neutron/private: + neutron/floating: + fw-admin: 10.109.0.9 + glance/api: 192.168.0.1 + mgmt/vip: 192.168.0.1 + murano/api: 192.168.0.1 + nova/api: 192.168.0.1 + horizon: 192.168.0.1 + mgmt/memcache: 192.168.0.1 + cinder/iscsi: 192.168.1.1 + ceph/replication: 192.168.1.1 + user_node_name: Untitled (6a:e7) + node_roles: + - cinder + name: node-118 + node-128: + swift_zone: '1' + uid: '128' + fqdn: node-128.test.domain.local + network_roles: + keystone/api: 192.168.0.2 + neutron/api: 192.168.0.2 + mgmt/database: 192.168.0.2 + sahara/api: 192.168.0.2 + heat/api: 192.168.0.2 + ceilometer/api: 192.168.0.2 + ex: 172.16.0.2 + ceph/public: 192.168.0.2 + ceph/radosgw: 172.16.0.2 + management: 192.168.0.2 + swift/api: 192.168.0.2 + mgmt/api: 192.168.0.2 + storage: 192.168.1.2 + mgmt/corosync: 192.168.0.2 + cinder/api: 192.168.0.2 + public/vip: 172.16.0.2 + swift/replication: 192.168.1.2 + mgmt/messaging: 192.168.0.2 + neutron/mesh: 192.168.0.2 + admin/pxe: 10.108.0.3 + mongo/db: 192.168.0.2 + neutron/private: + neutron/floating: + fw-admin: 10.108.0.3 + glance/api: 192.168.0.2 + mgmt/vip: 192.168.0.2 + murano/api: 192.168.0.2 + nova/api: 192.168.0.2 + horizon: 192.168.0.2 + mgmt/memcache: 192.168.0.2 + cinder/iscsi: 192.168.1.2 + ceph/replication: 192.168.1.2 + user_node_name: Untitled (6a:e7) + node_roles: + - primary-controller + name: node-128 + node-129: + swift_zone: '1' + uid: '129' + fqdn: node-129.test.domain.local + network_roles: + keystone/api: 192.168.0.3 + neutron/api: 192.168.0.3 + mgmt/database: 192.168.0.3 + sahara/api: 192.168.0.3 + heat/api: 192.168.0.3 + ceilometer/api: 192.168.0.3 + ex: 172.16.0.3 + ceph/public: 192.168.0.3 + ceph/radosgw: 172.16.0.3 + management: 192.168.0.3 + swift/api: 192.168.0.3 + mgmt/api: 192.168.0.3 + storage: 192.168.1.3 + mgmt/corosync: 192.168.0.3 + cinder/api: 192.168.0.3 + public/vip: 172.16.0.3 + swift/replication: 192.168.1.3 + mgmt/messaging: 192.168.0.3 + neutron/mesh: 192.168.0.3 + admin/pxe: 10.108.0.6 + mongo/db: 192.168.0.3 + neutron/private: + neutron/floating: + fw-admin: 10.108.0.6 + glance/api: 192.168.0.3 + mgmt/vip: 192.168.0.3 + murano/api: 192.168.0.3 + nova/api: 192.168.0.3 + horizon: 192.168.0.3 + mgmt/memcache: 192.168.0.3 + cinder/iscsi: 192.168.1.3 + ceph/replication: 192.168.1.3 + user_node_name: Untitled (6a:e7) + node_roles: + - controller + name: node-129 + node-131: + swift_zone: '1' + uid: '131' + fqdn: node-131.test.domain.local + network_roles: + keystone/api: 192.168.0.4 + neutron/api: 192.168.0.4 + mgmt/database: 192.168.0.4 + sahara/api: 192.168.0.4 + heat/api: 192.168.0.4 + ceilometer/api: 192.168.0.4 + ex: 172.16.0.4 + ceph/public: 192.168.0.4 + ceph/radosgw: 172.16.0.4 + management: 192.168.0.4 + swift/api: 192.168.0.4 + mgmt/api: 192.168.0.4 + storage: 192.168.1.4 + mgmt/corosync: 192.168.0.4 + cinder/api: 192.168.0.4 + public/vip: 172.16.0.4 + swift/replication: 192.168.1.4 + mgmt/messaging: 192.168.0.4 + neutron/mesh: 192.168.0.4 + admin/pxe: 10.109.0.9 + mongo/db: 192.168.0.4 + neutron/private: + neutron/floating: + fw-admin: 10.109.0.9 + glance/api: 192.168.0.4 + mgmt/vip: 192.168.0.4 + murano/api: 192.168.0.4 + nova/api: 192.168.0.4 + horizon: 192.168.0.4 + mgmt/memcache: 192.168.0.4 + cinder/iscsi: 192.168.1.4 + ceph/replication: 192.168.1.4 + user_node_name: Untitled (6a:e7) + node_roles: + - controller + name: node-131 + node-132: + swift_zone: '1' + uid: '132' + fqdn: node-132.test.domain.local + network_roles: + keystone/api: 192.168.0.5 + neutron/api: 192.168.0.5 + mgmt/database: 192.168.0.5 + sahara/api: 192.168.0.5 + heat/api: 192.168.0.5 + ceilometer/api: 192.168.0.5 + ex: + ceph/public: 192.168.0.5 + ceph/radosgw: + management: 192.168.0.5 + swift/api: 192.168.0.5 + mgmt/api: 192.168.0.5 + storage: 192.168.1.5 + mgmt/corosync: 192.168.0.5 + cinder/api: 192.168.0.5 + public/vip: + swift/replication: 192.168.1.5 + mgmt/messaging: 192.168.0.5 + neutron/mesh: 192.168.0.5 + admin/pxe: 10.108.0.4 + mongo/db: 192.168.0.5 + neutron/private: + neutron/floating: + fw-admin: 10.108.0.4 + glance/api: 192.168.0.5 + mgmt/vip: 192.168.0.5 + murano/api: 192.168.0.5 + nova/api: 192.168.0.5 + horizon: 192.168.0.5 + mgmt/memcache: 192.168.0.5 + cinder/iscsi: 192.168.1.5 + ceph/replication: 192.168.1.5 + user_node_name: Untitled (6a:e7) + node_roles: + - compute + name: node-132 + vips: + vrouter: + ipaddr: 192.168.0.3 + management: + ipaddr: 192.168.0.2 + public: + ipaddr: 10.109.1.2 + vrouter_pub: + ipaddr: 10.109.1.3 +network_scheme: + endpoints: + br-ex: + IP: + - 172.16.0.2/24 + gateway: 172.16.0.1 + vendor_specific: + phy_interfaces: + - eth1 + br-floating: + IP: none + br-fw-admin: + IP: + - 10.108.0.3/24 + br-mgmt: + IP: + - 192.168.0.2/24 + vendor_specific: + phy_interfaces: + - eth0 + vlans: 101 + br-storage: + IP: + - 192.168.1.2/24 + vendor_specific: + phy_interfaces: + - eth0 + vlans: 102 + interfaces: + eth0: + vendor_specific: + bus_info: '0000:00:03.0' + driver: e1000 + eth1: + vendor_specific: + bus_info: '0000:00:04.0' + driver: e1000 + eth2: + vendor_specific: + bus_info: '0000:00:05.0' + driver: e1000 + eth3: + vendor_specific: + bus_info: '0000:00:06.0' + driver: e1000 + eth4: + vendor_specific: + bus_info: '0000:00:07.0' + driver: e1000 + provider: lnx + roles: + ex: br-ex + public/vip: br-ex + neutron/floating: br-floating + storage: br-storage + keystone/api: br-mgmt + neutron/api: br-mgmt + mgmt/database: br-mgmt + sahara/api: br-mgmt + ceilometer/api: br-mgmt + mgmt/vip: br-mgmt + ceph/public: br-mgmt + mgmt/messaging: br-mgmt + management: br-mgmt + swift/api: br-mgmt + mgmt/api: br-mgmt + storage: br-storage + mgmt/corosync: br-mgmt + cinder/api: br-mgmt + swift/replication: br-storage + neutron/mesh: br-mgmt + admin/pxe: br-fw-admin + mongo/db: br-mgmt + neutron/private: br-prv + fw-admin: br-fw-admin + glance/api: br-mgmt + heat/api: br-mgmt + murano/api: br-mgmt + nova/api: br-mgmt + horizon: br-mgmt + mgmt/memcache: br-mgmt + cinder/iscsi: br-storage + ceph/replication: br-storage + neutron/mesh: br-mgmt + transformations: + - action: add-br + name: br-fw-admin + - action: add-br + name: br-mgmt + - action: add-br + name: br-storage + - action: add-br + name: br-ex + - action: add-br + name: br-floating + provider: ovs + - action: add-patch + bridges: + - br-floating + - br-ex + provider: ovs + - action: add-port + bridge: br-fw-admin + name: eth0 + - action: add-port + bridge: br-storage + name: eth0.102 + - action: add-port + bridge: br-mgmt + name: eth0.101 + - action: add-port + bridge: br-ex + name: eth1 + version: '1.1' +neutron_mellanox: + metadata: + enabled: true + label: Mellanox Neutron components + toggleable: false + weight: 50 + plugin: disabled + vf_num: '16' +nodes: +- fqdn: node-118.test.domain.local + internal_address: 192.168.0.1 + internal_netmask: 255.255.255.0 + name: node-118 + role: cinder + storage_address: 192.168.1.1 + storage_netmask: 255.255.255.0 + swift_zone: '118' + uid: '118' + user_node_name: Untitled (1d:4b) +- fqdn: node-128.test.domain.local + internal_address: 192.168.0.2 + internal_netmask: 255.255.255.0 + name: node-128 + public_address: 172.16.0.2 + public_netmask: 255.255.255.0 + role: primary-controller + storage_address: 192.168.1.2 + storage_netmask: 255.255.255.0 + swift_zone: '128' + uid: '128' + user_node_name: Untitled (6f:9d) +- fqdn: node-129.test.domain.local + internal_address: 192.168.0.3 + internal_netmask: 255.255.255.0 + name: node-129 + public_address: 172.16.0.3 + public_netmask: 255.255.255.0 + role: controller + storage_address: 192.168.1.3 + storage_netmask: 255.255.255.0 + swift_zone: '129' + uid: '129' + user_node_name: Untitled (74:27) +- fqdn: node-131.test.domain.local + internal_address: 192.168.0.4 + internal_netmask: 255.255.255.0 + name: node-131 + public_address: 172.16.0.4 + public_netmask: 255.255.255.0 + role: controller + storage_address: 192.168.1.4 + storage_netmask: 255.255.255.0 + swift_zone: '131' + uid: '131' + user_node_name: Untitled (34:45) +- fqdn: node-132.test.domain.local + internal_address: 192.168.0.5 + internal_netmask: 255.255.255.0 + name: node-132 + role: compute + storage_address: 192.168.1.5 + storage_netmask: 255.255.255.0 + swift_zone: '132' + uid: '132' + user_node_name: Untitled (18:c9) +nova: + db_password: mqnsUMgC + state_path: /var/lib/nova + user_password: fj4wVCEs +nova_quota: false +online: true +openstack_version: 2014.2-6.1 +openstack_version_prev: null +priority: 100 +provision: + codename: trusty + image_data: + /: + container: gzip + format: ext4 + uri: http://10.108.0.2:8080/targetimages/env_38_ubuntu_1404_amd64.img.gz + /boot: + container: gzip + format: ext2 + uri: http://10.108.0.2:8080/targetimages/env_38_ubuntu_1404_amd64-boot.img.gz + metadata: + label: Provision + weight: 80 + method: image +public_network_assignment: + assign_to_all_nodes: false + metadata: + label: Public network assignment + restrictions: + - action: hide + condition: cluster:net_provider != 'neutron' + weight: 50 +neutron_advanced_configuration: + neutron_dvr: true + neutron_l2_pop: true +public_vip: 172.16.0.5 +public_vrouter_vip: 172.16.0.6 +puppet: + manifests: rsync://10.108.0.2:/puppet/2014.2-6.1/manifests/ + modules: rsync://10.108.0.2:/puppet/2014.2-6.1/modules/ +puppet_debug: true +quantum: true +quantum_settings: + default_floating_net: net04custom_ext + default_private_net: net04custom + default_router: router04custom + L2: + base_mac: fa:16:3e:00:00:00 + phys_nets: + physnet1: + bridge: br-floating + segmentation_type: tun + tunnel_id_ranges: 2:65535 + L3: + use_namespaces: true + database: + passwd: QRpCfPk8 + keystone: + admin_password: oT56DSZF + metadata: + metadata_proxy_shared_secret: fp618p5V + predefined_networks: + net04custom: + L2: + network_type: vxlan + physnet: null + router_ext: false + segment_id: null + L3: + enable_dhcp: true + floating: null + gateway: 192.168.111.1 + nameservers: + - 8.8.4.4 + - 8.8.8.8 + subnet: 192.168.111.0/24 + shared: false + tenant: admin + net04custom_ext: + L2: + network_type: flat + physnet: physnet1 + router_ext: true + segment_id: null + L3: + enable_dhcp: false + floating: 172.16.0.130:172.16.0.254 + gateway: 172.16.0.1 + nameservers: [] + subnet: 172.16.0.0/24 + shared: false + tenant: admin +rabbit: + password: c7fQJeSe +repo_setup: + installer_initrd: + local: /var/www/nailgun/ubuntu/x86_64/images/initrd.gz + remote_relative: dists/trusty/main/installer-amd64/current/images/netboot/ubuntu-installer/amd64/initrd.gz + installer_kernel: + local: /var/www/nailgun/ubuntu/x86_64/images/linux + remote_relative: dists/trusty/main/installer-amd64/current/images/netboot/ubuntu-installer/amd64/linux + metadata: + label: Repositories + weight: 50 + repos: + - name: ubuntu + priority: null + section: main universe multiverse + suite: trusty + type: deb + uri: http://archive.ubuntu.com/ubuntu/ + - name: ubuntu-updates + priority: null + section: main universe multiverse + suite: trusty-updates + type: deb + uri: http://archive.ubuntu.com/ubuntu/ + - name: ubuntu-security + priority: null + section: main universe multiverse + suite: trusty-security + type: deb + uri: http://archive.ubuntu.com/ubuntu/ + - name: mos + priority: 1050 + section: main restricted + suite: mos6.1 + type: deb + uri: http://mirror.fuel-infra.org/mos/ubuntu/ + - name: mos-updates + priority: 1050 + section: main restricted + suite: mos6.1-updates + type: deb + uri: http://mirror.fuel-infra.org/mos/ubuntu/ + - name: mos-security + priority: 1050 + section: main restricted + suite: mos6.1-security + type: deb + uri: http://mirror.fuel-infra.org/mos/ubuntu/ + - name: mos-holdback + priority: 1100 + section: main restricted + suite: mos6.1-holdback + type: deb + uri: http://mirror.fuel-infra.org/mos/ubuntu/ +resume_guests_state_on_host_boot: true +role: primary-controller +sahara: + db_password: f0jl4v47 + enabled: true + user_password: pJc2zAOx +status: discover +storage: + ephemeral_ceph: false + images_ceph: false + images_vcenter: false + iser: false + metadata: + label: Storage + weight: 60 + objects_ceph: false + osd_pool_size: '2' + pg_num: 128 + per_pool_pg_nums: + default_pg_num: 128 + cinder_volume: 2048 + compute: 1024 + backups: 512 + ".rgw": 512 + images: 256 + volumes_ceph: false + volumes_lvm: true +storage_network_range: 192.168.1.0/24 +swift: + user_password: BP92J6tg +syslog: + metadata: + label: Syslog + weight: 50 + syslog_port: '514' + syslog_server: '' + syslog_transport: tcp +tasks: +- parameters: + cwd: / + puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/hiera/hiera.pp + puppet_modules: /etc/puppet/modules + timeout: 3600 + priority: 100 + type: puppet + uids: + - '128' +- parameters: + cwd: / + puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/globals/globals.pp + puppet_modules: /etc/puppet/modules + timeout: 3600 + priority: 200 + type: puppet + uids: + - '128' +- parameters: + cwd: / + puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/logging/logging.pp + puppet_modules: /etc/puppet/modules + timeout: 3600 + priority: 300 + type: puppet + uids: + - '128' +- parameters: + cwd: / + puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/tools/tools.pp + puppet_modules: /etc/puppet/modules + timeout: 3600 + priority: 400 + type: puppet + uids: + - '128' +- parameters: + cwd: / + puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/umm/umm.pp + puppet_modules: /etc/puppet/modules + timeout: 3600 + priority: 500 + type: puppet + uids: + - '128' +- parameters: + cwd: / + puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/netconfig/netconfig.pp + puppet_modules: /etc/puppet/modules + timeout: 3600 + priority: 600 + type: puppet + uids: + - '128' +- parameters: + cwd: / + puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/hosts/hosts.pp + puppet_modules: /etc/puppet/modules + timeout: 3600 + priority: 700 + type: puppet + uids: + - '128' +- parameters: + cwd: / + puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/firewall/firewall.pp + puppet_modules: /etc/puppet/modules + timeout: 3600 + priority: 800 + type: puppet + uids: + - '128' +- parameters: + cwd: / + puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/cluster/cluster.pp + puppet_modules: /etc/puppet/modules + timeout: 3600 + priority: 900 + type: puppet + uids: + - '128' +- parameters: + cwd: / + puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/virtual_ips/virtual_ips.pp + puppet_modules: /etc/puppet/modules + timeout: 3600 + priority: 1000 + type: puppet + uids: + - '128' +- parameters: + cwd: / + puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/virtual_ips/conntrackd.pp + puppet_modules: /etc/puppet/modules + timeout: 3600 + priority: 1100 + type: puppet + uids: + - '128' +- parameters: + cwd: / + puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/cluster-haproxy/cluster-haproxy.pp + puppet_modules: /etc/puppet/modules + timeout: 3600 + priority: 1200 + type: puppet + uids: + - '128' +- parameters: + cwd: / + puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/openstack-haproxy/openstack-haproxy.pp + puppet_modules: /etc/puppet/modules + timeout: 3600 + priority: 1300 + type: puppet + uids: + - '128' +- parameters: + cwd: / + puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/dns/dns-server.pp + puppet_modules: /etc/puppet/modules + timeout: 3600 + priority: 1400 + type: puppet + uids: + - '128' +- parameters: + cwd: / + puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/dns/dns-client.pp + puppet_modules: /etc/puppet/modules + timeout: 3600 + priority: 1500 + type: puppet + uids: + - '128' +- parameters: + cwd: / + puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/ntp/ntp-server.pp + puppet_modules: /etc/puppet/modules + timeout: 3600 + priority: 1600 + type: puppet + uids: + - '128' +- parameters: + cwd: / + puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/database/database.pp + puppet_modules: /etc/puppet/modules + timeout: 3600 + priority: 1700 + type: puppet + uids: + - '128' +- parameters: + cwd: / + puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/ceilometer/controller.pp + puppet_modules: /etc/puppet/modules + timeout: 3600 + priority: 1800 + type: puppet + uids: + - '128' +- parameters: + cwd: / + puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/ceilometer/radosgw_user.pp + puppet_modules: /etc/puppet/modules + timeout: 3600 + priority: 1800 + type: puppet + uids: + - '128' +- parameters: + cwd: / + puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/rabbitmq/rabbitmq.pp + puppet_modules: /etc/puppet/modules + timeout: 3600 + priority: 1900 + type: puppet + uids: + - '128' +- parameters: + cwd: / + puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/memcached/memcached.pp + puppet_modules: /etc/puppet/modules + timeout: 3600 + priority: 2000 + type: puppet + uids: + - '128' +- parameters: + cwd: / + puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/keystone/keystone.pp + puppet_modules: /etc/puppet/modules + timeout: 3600 + priority: 2100 + type: puppet + uids: + - '128' +- parameters: + cwd: / + puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/openstack-cinder/openstack-cinder.pp + puppet_modules: /etc/puppet/modules + timeout: 1200 + priority: 2200 + type: puppet + uids: + - '128' +- parameters: + cwd: / + puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/glance/glance.pp + puppet_modules: /etc/puppet/modules + timeout: 3600 + priority: 2300 + type: puppet + uids: + - '128' +- parameters: + cwd: / + puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/openstack-controller/openstack-controller.pp + puppet_modules: /etc/puppet/modules + timeout: 3600 + priority: 2400 + type: puppet + uids: + - '128' +- parameters: + cwd: / + puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/openstack-network/openstack-network-controller.pp + puppet_modules: /etc/puppet/modules + timeout: 3600 + priority: 2500 + type: puppet + uids: + - '128' +- parameters: + cwd: / + puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/heat/heat.pp + puppet_modules: /etc/puppet/modules + timeout: 3600 + priority: 2600 + type: puppet + uids: + - '128' +- parameters: + cwd: / + puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/horizon/horizon.pp + puppet_modules: /etc/puppet/modules + timeout: 3600 + priority: 2700 + type: puppet + uids: + - '128' +- parameters: + cwd: / + puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/murano/murano.pp + puppet_modules: /etc/puppet/modules + timeout: 3600 + priority: 2800 + type: puppet + uids: + - '128' +- parameters: + cwd: / + puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/sahara/sahara.pp + puppet_modules: /etc/puppet/modules + timeout: 3600 + priority: 2900 + type: puppet + uids: + - '128' +- parameters: + cwd: / + puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/api-proxy/api-proxy.pp + puppet_modules: /etc/puppet/modules + timeout: 3600 + priority: 3000 + type: puppet + uids: + - '128' +- parameters: + cwd: / + puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/ceph/mon.pp + puppet_modules: /etc/puppet/modules + timeout: 3600 + priority: 3100 + type: puppet + uids: + - '128' +- parameters: + cwd: / + puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/ceph/radosgw.pp + puppet_modules: /etc/puppet/modules + timeout: 3600 + priority: 3200 + type: puppet + uids: + - '128' +- parameters: + cwd: / + puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/swift/swift.pp + puppet_modules: /etc/puppet/modules + timeout: 3600 + priority: 3300 + type: puppet + uids: + - '128' +- parameters: + cwd: / + puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/roles/controller.pp + puppet_modules: /etc/puppet/modules + timeout: 3600 + priority: 3400 + type: puppet + uids: + - '128' +- parameters: + cwd: / + puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/swift/rebalance_cronjob.pp + puppet_modules: /etc/puppet/modules + timeout: 300 + priority: 3500 + type: puppet + uids: + - '128' +test_vm_image: + container_format: bare + disk_format: qcow2 + glance_properties: '' + img_name: TestVM + img_path: /usr/share/cirros-testvm/cirros-x86_64-disk.img + min_ram: 64 + os_name: cirros + public: 'true' +uid: '128' +use_cinder: true +use_cow_images: true +use_vcenter: false +user_node_name: Untitled (6f:9d) +workloads_collector: + enabled: true + metadata: + label: Workloads Collector User + restrictions: + - action: hide + condition: 'true' + weight: 10 + password: 1r3ROjcQ + tenant: services + username: workloads_collector diff --git a/tests/noop/spec/hosts/ceilometer/keystone_spec.rb b/tests/noop/spec/hosts/ceilometer/keystone_spec.rb index 16acf970e8..2b89b666d8 100644 --- a/tests/noop/spec/hosts/ceilometer/keystone_spec.rb +++ b/tests/noop/spec/hosts/ceilometer/keystone_spec.rb @@ -5,20 +5,29 @@ manifest = 'ceilometer/keystone.pp' describe manifest do shared_examples 'catalog' do it 'should set empty trusts_delegated_roles for ceilometer auth' do - contain_class('ceilometer::keystone::auth') + should contain_class('ceilometer::keystone::auth') end it 'should use either public_vip or management_vip' do - public_vip = Noop.hiera('public_vip') - public_ssl = Noop.hiera_structure('public_ssl/services') - if public_ssl + internal_protocol = 'http' + internal_address = Noop.hiera('management_vip') + admin_protocol = 'http' + admin_address = internal_address + + if Noop.hiera_structure('use_ssl', false) + public_protocol = 'https' + public_address = Noop.hiera_structure('use_ssl/ceilometer_public_hostname') + internal_protocol = 'https' + internal_address = Noop.hiera_structure('use_ssl/ceilometer_internal_hostname') + admin_protocol = 'https' + admin_address = Noop.hiera_structure('use_ssl/ceilometer_admin_hostname') + elsif Noop.hiera_structure('public_ssl/services') public_address = Noop.hiera_structure('public_ssl/hostname') public_protocol = 'https' else - public_address = public_vip + public_address = Noop.hiera('public_vip') public_protocol = 'http' end - admin_address = Noop.hiera 'management_vip' password = Noop.hiera_structure 'ceilometer/user_password' auth_name = Noop.hiera_structure 'ceilometer/auth_name', 'ceilometer' @@ -29,9 +38,10 @@ describe manifest do region = Noop.hiera_structure 'ceilometer/region', 'RegionOne' public_url = "#{public_protocol}://#{public_address}:8777" - admin_url = "http://#{admin_address}:8777" + internal_url = "#{internal_protocol}://#{internal_address}:8777" + admin_url = "#{admin_protocol}://#{admin_address}:8777" - contain_class('ceilometer::keystone::auth').with( + should contain_class('ceilometer::keystone::auth').with( 'password' => password, 'auth_name' => auth_name, 'configure_endpoint' => configure_endpoint, @@ -39,7 +49,7 @@ describe manifest do 'configure_user_role' => configure_user_role, 'service_name' => service_name, 'public_url' => public_url, - 'internal_url' => admin_url, + 'internal_url' => internal_url, 'admin_url' => admin_url, 'region' => region ) diff --git a/tests/noop/spec/hosts/glance/keystone_spec.rb b/tests/noop/spec/hosts/glance/keystone_spec.rb index a84d8589f6..11d3cbca20 100644 --- a/tests/noop/spec/hosts/glance/keystone_spec.rb +++ b/tests/noop/spec/hosts/glance/keystone_spec.rb @@ -4,15 +4,24 @@ manifest = 'glance/keystone.pp' describe manifest do shared_examples 'catalog' do - public_vip = Noop.hiera('public_vip') - admin_address = Noop.hiera('management_vip') - public_ssl = Noop.hiera_structure('public_ssl/services') - if public_ssl + internal_protocol = 'http' + internal_address = Noop.hiera('management_vip') + admin_protocol = 'http' + admin_address = internal_address + + if Noop.hiera_structure('use_ssl', false) + public_protocol = 'https' + public_address = Noop.hiera_structure('use_ssl/glance_public_hostname') + internal_protocol = 'https' + internal_address = Noop.hiera_structure('use_ssl/glance_internal_hostname') + admin_protocol = 'https' + admin_address = Noop.hiera_structure('use_ssl/glance_admin_hostname') + elsif Noop.hiera_structure('public_ssl/services') public_address = Noop.hiera_structure('public_ssl/hostname') public_protocol = 'https' else - public_address = public_vip + public_address = Noop.hiera('public_vip') public_protocol = 'http' end @@ -24,7 +33,8 @@ describe manifest do region = Noop.hiera_structure('glance/region', 'RegionOne') service_name = Noop.hiera_structure('glance/service_name', 'glance') public_url = "#{public_protocol}://#{public_address}:9292" - admin_url = "http://#{admin_address}:9292" + internal_url = "#{internal_protocol}://#{internal_address}:9292" + admin_url = "#{admin_protocol}://#{admin_address}:9292" it 'should declare glance::keystone::auth class correctly' do should contain_class('glance::keystone::auth').with( @@ -35,8 +45,8 @@ describe manifest do 'configure_user_role' => configure_user_role, 'service_name' => service_name, 'public_url' => public_url, + 'internal_url' => internal_url, 'admin_url' => admin_url, - 'internal_url' => admin_url, 'region' => region, ) end diff --git a/tests/noop/spec/hosts/heat/heat_spec.rb b/tests/noop/spec/hosts/heat/heat_spec.rb index 04f9860ec4..f186010772 100644 --- a/tests/noop/spec/hosts/heat/heat_spec.rb +++ b/tests/noop/spec/hosts/heat/heat_spec.rb @@ -13,18 +13,21 @@ describe manifest do Noop.puppet_function 'prepare_network_config', network_scheme end - let(:public_ip) do - Noop.hiera 'public_vip' + admin_auth_protocol = 'http' + admin_auth_address = Noop.hiera('service_endpoint') + if Noop.hiera_structure('use_ssl', false) + public_auth_protocol = 'https' + public_auth_address = Noop.hiera_structure('use_ssl/keystone_public_hostname') + admin_auth_protocol = 'https' + admin_auth_address = Noop.hiera_structure('use_ssl/keystone_admin_hostname') + elsif Noop.hiera_structure('public_ssl/services') + public_auth_protocol = 'https' + public_auth_address = Noop.hiera_structure('public_ssl/hostname') + else + public_auth_protocol = 'http' + public_auth_address = Noop.hiera('public_vip') end - let(:service_endpoint) { Noop.hiera('service_endpoint') } - let(:public_ssl) { Noop.hiera_structure('public_ssl/services') } - let(:public_ssl_hostname) do - Noop.hiera_structure('public_ssl/hostname') - end - let(:public_protocol) { public_ssl ? 'https' : 'http' } - let(:public_address) { public_ssl ? public_ssl_hostname : public_ip } - use_syslog = Noop.hiera 'use_syslog' default_log_levels_hash = Noop.hiera_hash 'default_log_levels' default_log_levels = Noop.puppet_function 'join_keys_to_values',default_log_levels_hash,'=' @@ -35,8 +38,8 @@ describe manifest do it 'should use auth_uri and identity_uri' do should contain_class('openstack::heat').with( - 'auth_uri' => "#{public_protocol}://#{public_address}:5000/v2.0/", - 'identity_uri' => "http://#{service_endpoint}:35357/" + 'auth_uri' => "#{public_auth_protocol}://#{public_auth_address}:5000/v2.0/", + 'identity_uri' => "#{admin_auth_protocol}://#{admin_auth_address}:35357/" ) end diff --git a/tests/noop/spec/hosts/heat/keystone_spec.rb b/tests/noop/spec/hosts/heat/keystone_spec.rb index 2b6058d9f4..8236dfd24c 100644 --- a/tests/noop/spec/hosts/heat/keystone_spec.rb +++ b/tests/noop/spec/hosts/heat/keystone_spec.rb @@ -10,34 +10,44 @@ describe manifest do ) end - public_vip = Noop.hiera('public_vip') - admin_address = Noop.hiera('management_vip') - public_ssl = Noop.hiera_structure('public_ssl/services') + internal_protocol = 'http' + internal_address = Noop.hiera('management_vip') + admin_protocol = 'http' + admin_address = internal_address - if public_ssl - public_address = Noop.hiera_structure('public_ssl/hostname') - public_protocol = 'https' - else - public_address = public_vip - public_protocol = 'http' - end + if Noop.hiera_structure('use_ssl', false) + public_protocol = 'https' + public_address = Noop.hiera_structure('use_ssl/heat_public_hostname') + internal_protocol = 'https' + internal_address = Noop.hiera_structure('use_ssl/heat_internal_hostname') + admin_protocol = 'https' + admin_address = Noop.hiera_structure('use_ssl/heat_admin_hostname') + elsif Noop.hiera_structure('public_ssl/services') + public_protocol = 'https' + public_address = Noop.hiera_structure('public_ssl/hostname') + else + public_address = Noop.hiera('public_vip') + public_protocol = 'http' + end - public_url = "#{public_protocol}://#{public_address}:8004/v1/%(tenant_id)s" - admin_url = "http://#{admin_address}:8004/v1/%(tenant_id)s" - public_url_cfn = "#{public_protocol}://#{public_address}:8000/v1" - admin_url_cfn = "http://#{admin_address}:8000/v1" + public_url = "#{public_protocol}://#{public_address}:8004/v1/%(tenant_id)s" + internal_url = "#{internal_protocol}://#{internal_address}:8004/v1/%(tenant_id)s" + admin_url = "#{admin_protocol}://#{admin_address}:8004/v1/%(tenant_id)s" + public_url_cfn = "#{public_protocol}://#{public_address}:8000/v1" + internal_url_cfn = "#{internal_protocol}://#{internal_address}:8000/v1" + admin_url_cfn = "#{admin_protocol}://#{admin_address}:8000/v1" - it 'class heat::keystone::auth should contain correct *_url' do - should contain_class('heat::keystone::auth').with('public_url' => public_url) - should contain_class('heat::keystone::auth').with('admin_url' => admin_url) - should contain_class('heat::keystone::auth').with('internal_url' => admin_url) - end + it 'class heat::keystone::auth should contain correct *_url' do + should contain_class('heat::keystone::auth').with('public_url' => public_url) + should contain_class('heat::keystone::auth').with('internal_url' => internal_url) + should contain_class('heat::keystone::auth').with('admin_url' => admin_url) + end - it 'class heat::keystone::auth_cfn should contain correct *_url' do - should contain_class('heat::keystone::auth_cfn').with('public_url' => public_url_cfn) - should contain_class('heat::keystone::auth_cfn').with('admin_url' => admin_url_cfn) - should contain_class('heat::keystone::auth_cfn').with('internal_url' => admin_url_cfn) - end + it 'class heat::keystone::auth_cfn should contain correct *_url' do + should contain_class('heat::keystone::auth_cfn').with('public_url' => public_url_cfn) + should contain_class('heat::keystone::auth_cfn').with('internal_url' => internal_url_cfn) + should contain_class('heat::keystone::auth_cfn').with('admin_url' => admin_url_cfn) + end end diff --git a/tests/noop/spec/hosts/murano/keystone_spec.rb b/tests/noop/spec/hosts/murano/keystone_spec.rb index 52d73f92ef..42942ee58b 100644 --- a/tests/noop/spec/hosts/murano/keystone_spec.rb +++ b/tests/noop/spec/hosts/murano/keystone_spec.rb @@ -7,16 +7,31 @@ describe manifest do let(:service_endpoint) { Noop.hiera 'service_endpoint' } let(:network_scheme) { Noop.hiera_hash 'network_scheme' } - let(:public_vip) { Noop.hiera 'public_vip' } - let(:public_ssl) { Noop.hiera_structure('public_ssl/services') } - let(:public_ssl_hostname) { Noop.hiera_structure('public_ssl/hostname') } - let(:api_bind_port) { '8082' } + api_bind_port = '8082' - let(:admin_url) { "http://#{service_endpoint}:#{api_bind_port}" } - let(:public_url) { "#{public_protocol}://#{public_address}:#{api_bind_port}" } + internal_protocol = 'http' + internal_address = Noop.hiera('service_endpoint') + admin_protocol = 'http' + admin_address = internal_address + + if Noop.hiera_structure('use_ssl', false) + public_protocol = 'https' + public_address = Noop.hiera_structure('use_ssl/murano_public_hostname') + internal_protocol = 'https' + internal_address = Noop.hiera_structure('use_ssl/murano_internal_hostname') + admin_protocol = 'https' + admin_address = Noop.hiera_structure('use_ssl/murano_admin_hostname') + elsif Noop.hiera_structure('public_ssl/services') + public_protocol = 'https' + public_address = Noop.hiera_structure('public_ssl/hostname') + else + public_protocol = 'http' + public_address = Noop.hiera 'public_vip' + end + public_url = "#{public_protocol}://#{public_address}:#{api_bind_port}" + internal_url = "#{internal_protocol}://#{internal_address}:#{api_bind_port}" + admin_url = "#{admin_protocol}://#{admin_address}:#{api_bind_port}" - let(:public_protocol) { public_ssl ? 'https' : 'http' } - let(:public_address) { public_ssl ? public_ssl_hostname : public_vip } let(:region) { Noop.hiera('region', 'RegionOne') } let(:tenant) { Noop.hiera_structure('murano_hash/tenant', 'services') } @@ -32,8 +47,8 @@ describe manifest do 'region' => region, 'tenant' => tenant, 'public_url' => public_url, - 'admin_url' => admin_url, - 'internal_url' => admin_url + 'internal_url' => internal_url, + 'admin_url' => admin_url ) end diff --git a/tests/noop/spec/hosts/murano/murano_spec.rb b/tests/noop/spec/hosts/murano/murano_spec.rb index 09d6655267..5fb0194805 100644 --- a/tests/noop/spec/hosts/murano/murano_spec.rb +++ b/tests/noop/spec/hosts/murano/murano_spec.rb @@ -62,13 +62,21 @@ describe manifest do "mysql://#{db_user}:#{db_password}@#{db_host}/#{db_name}?read_timeout=#{read_timeout}" end - let(:public_ssl_hostname) do - Noop.hiera_structure('public_ssl/hostname') + admin_auth_protocol = 'http' + admin_auth_address = Noop.hiera('service_endpoint') + if Noop.hiera_structure('use_ssl', false) + public_auth_protocol = 'https' + public_auth_address = Noop.hiera_structure('use_ssl/keystone_public_hostname') + admin_auth_protocol = 'https' + admin_auth_address = Noop.hiera_structure('use_ssl/keystone_admin_hostname') + elsif Noop.hiera_structure('public_ssl/services', false) + public_auth_protocol = 'https' + public_auth_address = Noop.hiera_structure('public_ssl/hostname') + else + public_auth_protocol = 'http' + public_auth_address = Noop.hiera('public_vip') end - let(:public_protocol) { public_ssl ? 'https' : 'http' } - let(:public_address) { public_ssl ? public_ssl_hostname : public_ip } - let(:external_network) do if use_neutron Noop.puppet_function 'get_ext_net_name', predefined_networks @@ -94,11 +102,11 @@ describe manifest do 'use_stderr' => 'false', 'log_facility' => syslog_log_facility_murano, 'database_connection' => sql_connection, - 'auth_uri' => "#{public_protocol}://#{public_address}:5000/v2.0/", + 'auth_uri' => "#{public_auth_protocol}://#{public_auth_address}:5000/v2.0/", 'admin_user' => murano_user, 'admin_password' => murano_password, 'admin_tenant_name' => tenant, - 'identity_uri' => "http://#{service_endpoint}:35357/", + 'identity_uri' => "#{admin_auth_protocol}://#{admin_auth_address}:35357/", 'use_neutron' => use_neutron, 'rabbit_os_user' => rabbit_os_user, 'rabbit_os_password' => rabbit_os_password, diff --git a/tests/noop/spec/hosts/openstack-cinder/keystone_spec.rb b/tests/noop/spec/hosts/openstack-cinder/keystone_spec.rb index 7587da9eb8..81158dc257 100644 --- a/tests/noop/spec/hosts/openstack-cinder/keystone_spec.rb +++ b/tests/noop/spec/hosts/openstack-cinder/keystone_spec.rb @@ -8,45 +8,56 @@ describe manifest do contain_class('cinder::keystone::auth') end - public_vip = Noop.hiera('public_vip') - public_ssl = Noop.hiera_structure('public_ssl/services') - - if public_ssl - public_address = Noop.hiera_structure('public_ssl/hostname') - public_protocol = 'https' - else - public_address = public_vip public_protocol = 'http' - end - admin_address = Noop.hiera 'management_vip' - public_url = "#{public_protocol}://#{public_address}:8776/v1/%(tenant_id)s" - admin_url = "http://#{admin_address}:8776/v1/%(tenant_id)s" - public_url_v2 = "#{public_protocol}://#{public_address}:8776/v2/%(tenant_id)s" - admin_url_v2 = "http://#{admin_address}:8776/v2/%(tenant_id)s" + internal_protocol = 'http' + internal_address = Noop.hiera 'management_vip' + admin_protocol = 'http' + admin_address = internal_address - password = Noop.hiera_structure 'cinder/user_password' - auth_name = Noop.hiera_structure 'cinder/auth_name', 'cinder' - configure_endpoint = Noop.hiera_structure 'cinder/configure_endpoint', true - configure_user = Noop.hiera_structure 'cinder/configure_user_role', true - service_name = Noop.hiera_structure 'cinder/service_name', 'cinder' - region = Noop.hiera_structure 'cinder/region', 'RegionOne' + if Noop.hiera_structure('use_ssl', false) + public_protocol = 'https' + public_address = Noop.hiera_structure('use_ssl/cinder_public_hostname') + internal_protocol = 'https' + internal_address = Noop.hiera_structure('use_ssl/cinder_internal_hostname') + admin_protocol = 'https' + admin_address = Noop.hiera_structure('use_ssl/cinder_admin_hostname') + elsif Noop.hiera_structure('public_ssl/services') + public_address = Noop.hiera_structure('public_ssl/hostname') + public_protocol = 'https' + else + public_address = Noop.hiera('public_vip') + end - it 'should declare cinder::keystone::auth class with propper parameters' do - should contain_class('cinder::keystone::auth').with( - 'password' => password, - 'auth_name' => auth_name, - 'configure_endpoint' => configure_endpoint, - 'configure_user' => configure_user, - 'service_name' => service_name, - 'public_url' => public_url, - 'internal_url' => admin_url, - 'admin_url' => admin_url, - 'public_url_v2' => public_url_v2, - 'internal_url_v2' => admin_url_v2, - 'admin_url_v2' => admin_url_v2, - 'region' => region, - ) - end + public_url = "#{public_protocol}://#{public_address}:8776/v1/%(tenant_id)s" + internal_url = "#{internal_protocol}://#{internal_address}:8776/v1/%(tenant_id)s" + admin_url = "#{admin_protocol}://#{admin_address}:8776/v1/%(tenant_id)s" + public_url_v2 = "#{public_protocol}://#{public_address}:8776/v2/%(tenant_id)s" + internal_url_v2 = "#{internal_protocol}://#{internal_address}:8776/v2/%(tenant_id)s" + admin_url_v2 = "#{admin_protocol}://#{admin_address}:8776/v2/%(tenant_id)s" + + password = Noop.hiera_structure 'cinder/user_password' + auth_name = Noop.hiera_structure 'cinder/auth_name', 'cinder' + configure_endpoint = Noop.hiera_structure 'cinder/configure_endpoint', true + configure_user = Noop.hiera_structure 'cinder/configure_user_role', true + service_name = Noop.hiera_structure 'cinder/service_name', 'cinder' + region = Noop.hiera_structure 'cinder/region', 'RegionOne' + + it 'should declare cinder::keystone::auth class with propper parameters' do + should contain_class('cinder::keystone::auth').with( + 'password' => password, + 'auth_name' => auth_name, + 'configure_endpoint' => configure_endpoint, + 'configure_user' => configure_user, + 'service_name' => service_name, + 'public_url' => public_url, + 'internal_url' => internal_url, + 'admin_url' => admin_url, + 'public_url_v2' => public_url_v2, + 'internal_url_v2' => internal_url_v2, + 'admin_url_v2' => admin_url_v2, + 'region' => region, + ) + end end #end of shared examples diff --git a/tests/noop/spec/hosts/openstack-cinder/openstack-cinder_spec.rb b/tests/noop/spec/hosts/openstack-cinder/openstack-cinder_spec.rb index a533ebb794..0803593c59 100644 --- a/tests/noop/spec/hosts/openstack-cinder/openstack-cinder_spec.rb +++ b/tests/noop/spec/hosts/openstack-cinder/openstack-cinder_spec.rb @@ -33,9 +33,15 @@ describe manifest do ) end - keystone_auth_host = Noop.hiera 'service_endpoint' - auth_uri = "http://#{keystone_auth_host}:5000/" - identity_uri = "http://#{keystone_auth_host}:5000/" + if Noop.hiera_structure('use_ssl', false) + internal_auth_protocol = 'https' + keystone_auth_host = Noop.hiera_structure('use_ssl/keystone_internal_hostname') + else + internal_auth_protocol = 'http' + keystone_auth_host = Noop.hiera 'service_endpoint' + end + auth_uri = "#{internal_auth_protocol}://#{keystone_auth_host}:5000/" + identity_uri = "#{internal_auth_protocol}://#{keystone_auth_host}:5000/" it 'ensures cinder_config contains auth_uri and identity_uri ' do should contain_cinder_config('keystone_authtoken/auth_uri').with(:value => auth_uri) @@ -60,7 +66,7 @@ describe manifest do it "should contain cinder config with privileged user settings" do should contain_cinder_config('DEFAULT/os_privileged_user_password').with_value(cinder_user_password) should contain_cinder_config('DEFAULT/os_privileged_user_tenant').with_value(cinder_tenant) - should contain_cinder_config('DEFAULT/os_privileged_user_auth_url').with_value("http://#{keystone_auth_host}:5000/") + should contain_cinder_config('DEFAULT/os_privileged_user_auth_url').with_value("#{internal_auth_protocol}://#{keystone_auth_host}:5000/") should contain_cinder_config('DEFAULT/os_privileged_user_name').with_value(cinder_user) should contain_cinder_config('DEFAULT/nova_catalog_admin_info').with_value('compute:nova:adminURL') should contain_cinder_config('DEFAULT/nova_catalog_info').with_value('compute:nova:internalURL') diff --git a/tests/noop/spec/hosts/openstack-controller/keystone_spec.rb b/tests/noop/spec/hosts/openstack-controller/keystone_spec.rb index a6ca03429d..655b1d05a4 100644 --- a/tests/noop/spec/hosts/openstack-controller/keystone_spec.rb +++ b/tests/noop/spec/hosts/openstack-controller/keystone_spec.rb @@ -8,35 +8,46 @@ describe manifest do contain_class('nova::keystone::auth') end - public_vip = Noop.hiera('public_vip') - admin_address = Noop.hiera('management_vip') - public_ssl = Noop.hiera_structure('public_ssl/services') + public_vip = Noop.hiera('public_vip') + internal_protocol = 'http' + internal_address = Noop.hiera('management_vip') + admin_protocol = 'http' + admin_address = internal_address + public_ssl = Noop.hiera_structure('public_ssl/services') - if public_ssl - public_address = Noop.hiera_structure('public_ssl/hostname') + if Noop.hiera_structure('use_ssl') + public_protocol = 'https' + public_address = Noop.hiera_structure('use_ssl/nova_public_hostname') + internal_protocol = 'https' + internal_address = Noop.hiera_structure('use_ssl/nova_internal_hostname') + admin_protocol = 'https' + admin_address = Noop.hiera_structure('use_ssl/nova_admin_hostname') + elsif public_ssl public_protocol = 'https' + public_address = Noop.hiera_structure('public_ssl/hostname') else - public_address = public_vip public_protocol = 'http' + public_address = public_vip end compute_port = '8774' public_base_url = "#{public_protocol}://#{public_address}:#{compute_port}" - admin_base_url = "http://#{admin_address}:#{compute_port}" + internal_base_url = "#{internal_protocol}://#{internal_address}:#{compute_port}" + admin_base_url = "#{admin_protocol}://#{admin_address}:#{compute_port}" ec2_port = '8773' ec2_public_url = "#{public_protocol}://#{public_address}:#{ec2_port}/services/Cloud" - ec2_internal_url = "http://#{admin_address}:#{ec2_port}/services/Cloud" - ec2_admin_url = "http://#{admin_address}:#{ec2_port}/services/Admin" + ec2_internal_url = "#{internal_protocol}://#{internal_address}:#{ec2_port}/services/Cloud" + ec2_admin_url = "#{admin_protocol}://#{admin_address}:#{ec2_port}/services/Admin" it 'class nova::keystone::auth should contain correct *_url' do should contain_class('nova::keystone::auth').with( 'public_url' => "#{public_base_url}/v2/%(tenant_id)s", 'public_url_v3' => "#{public_base_url}/v3", + 'internal_url' => "#{internal_base_url}/v2/%(tenant_id)s", + 'internal_url_v3' => "#{internal_base_url}/v3", 'admin_url' => "#{admin_base_url}/v2/%(tenant_id)s", 'admin_url_v3' => "#{admin_base_url}/v3", - 'internal_url' => "#{admin_base_url}/v2/%(tenant_id)s", - 'internal_url_v3' => "#{admin_base_url}/v3", 'ec2_public_url' => ec2_public_url, 'ec2_admin_url' => ec2_admin_url, 'ec2_internal_url' => ec2_internal_url, diff --git a/tests/noop/spec/hosts/openstack-controller/openstack-controller_spec.rb b/tests/noop/spec/hosts/openstack-controller/openstack-controller_spec.rb index 32972ba117..5753766318 100644 --- a/tests/noop/spec/hosts/openstack-controller/openstack-controller_spec.rb +++ b/tests/noop/spec/hosts/openstack-controller/openstack-controller_spec.rb @@ -118,6 +118,12 @@ describe manifest do end if floating_ips_range && access_hash + if Noop.hiera_structure('use_ssl', false) + internal_auth_protocol = 'https' + keystone_host = Noop.hiera_structure('use_ssl/keystone_internal_hostname') + else + internal_auth_protocol = 'http' + end floating_ips_range.each do |ips_range| it "should configure nova floating IP range for #{ips_range}" do should contain_nova_floating_range(ips_range).with( @@ -126,7 +132,7 @@ describe manifest do 'username' => access_hash['user'], 'api_key' => access_hash['password'], 'auth_method' => 'password', - 'auth_url' => "http://#{keystone_host}:5000/v2.0/", + 'auth_url' => "#{internal_auth_protocol}://#{keystone_host}:5000/v2.0/", 'api_retries' => '10', ) end diff --git a/tests/noop/spec/hosts/openstack-network/compute-nova_spec.rb b/tests/noop/spec/hosts/openstack-network/compute-nova_spec.rb index 6c39ab9b4b..c387d6f922 100644 --- a/tests/noop/spec/hosts/openstack-network/compute-nova_spec.rb +++ b/tests/noop/spec/hosts/openstack-network/compute-nova_spec.rb @@ -131,10 +131,21 @@ describe manifest do :neutron_admin_tenant_name => admin_tenant_name, :neutron_region_name => region_name, :neutron_admin_username => admin_username, - :neutron_admin_auth_url => admin_auth_url, - :neutron_url => neutron_url, :neutron_ovs_bridge => neutron_integration_bridge, )} + if Noop.hiera_structure('use_ssl', false) + admin_identity_address = Noop.hiera_structure('use_ssl/keystone_admin_hostname') + neutron_internal_address = Noop.hiera_structure('use_ssl/neutron_internal_hostname') + it { expect(subject).to contain_class('nova::network::neutron').with( + :neutron_admin_auth_url => "https://#{admin_identity_address}:35357/v2.0", + :neutron_url => "https://#{neutron_internal_address}:9696", + )} + else + it { expect(subject).to contain_class('nova::network::neutron').with( + :neutron_admin_auth_url => admin_auth_url, + :neutron_url => neutron_url, + )} + end # it { expect(subject).to contain_augeas('sysctl-net.bridge.bridge-nf-call-arptables').with( :context => '/files/etc/sysctl.conf', diff --git a/tests/noop/spec/hosts/openstack-network/keystone_spec.rb b/tests/noop/spec/hosts/openstack-network/keystone_spec.rb index f128f8f388..645710c009 100644 --- a/tests/noop/spec/hosts/openstack-network/keystone_spec.rb +++ b/tests/noop/spec/hosts/openstack-network/keystone_spec.rb @@ -4,17 +4,24 @@ manifest = 'openstack-network/keystone.pp' describe manifest do shared_examples 'catalog' do - public_vip = Noop.hiera('public_vip') - public_ssl = Noop.hiera_structure('public_ssl/services', false) - if public_ssl + internal_protocol = 'http' + internal_address = Noop.hiera('management_vip') + admin_protocol = internal_protocol + admin_address = internal_address + if Noop.hiera_structure('use_ssl', false) + public_protocol = 'https' + public_address = Noop.hiera_structure('use_ssl/neutron_public_hostname') + internal_protocol = 'https' + internal_address = Noop.hiera_structure('use_ssl/neutron_internal_hostname') + admin_protocol = 'https' + admin_address = Noop.hiera_structure('use_ssl/neutron_admin_hostname') + elsif Noop.hiera_structure('public_ssl/services', false) public_address = Noop.hiera_structure('public_ssl/hostname') public_protocol = 'https' else - public_address = public_vip public_protocol = 'http' + public_address = Noop.hiera('public_vip') end - admin_address = Noop.hiera('management_vip') - admin_protocol = 'http' region = Noop.hiera_structure('quantum_settings/region', 'RegionOne') password = Noop.hiera_structure('quantum_settings/keystone/admin_password') auth_name = Noop.hiera_structure('quantum_settings/auth_name', 'neutron') @@ -25,7 +32,7 @@ describe manifest do tenant = Noop.hiera_structure('quantum_settings/tenant', 'services') port ='9696' public_url = "#{public_protocol}://#{public_address}:#{port}" - internal_url = "#{admin_protocol}://#{admin_address}:#{port}" + internal_url = "#{internal_protocol}://#{internal_address}:#{port}" admin_url = "#{admin_protocol}://#{admin_address}:#{port}" use_neutron = Noop.hiera('use_neutron', false) diff --git a/tests/noop/spec/hosts/openstack-network/server-config_spec.rb b/tests/noop/spec/hosts/openstack-network/server-config_spec.rb index b3992f9652..50af477cc0 100644 --- a/tests/noop/spec/hosts/openstack-network/server-config_spec.rb +++ b/tests/noop/spec/hosts/openstack-network/server-config_spec.rb @@ -49,17 +49,72 @@ describe manifest do ) end - it 'auth options' do - identity_uri = "http://#{service_endpoint}:5000/" - ks = neutron_config['keystone'] - should contain_class('neutron::server').with( - 'auth_password' => ks.fetch('admin_password'), - 'auth_tenant' => ks.fetch('admin_tenant', 'services'), - 'auth_region' => Noop.hiera('region', 'RegionOne'), - 'auth_user' => ks.fetch('admin_user', 'neutron'), - 'identity_uri' => identity_uri, - 'auth_uri' => identity_uri, - ) + if Noop.hiera_structure('use_ssl', false) + context 'with overridden TLS for internal endpoints' do + internal_auth_protocol = 'https' + internal_auth_endpoint = Noop.hiera_structure('use_ssl/keystone_internal_hostname') + + it 'should have correct auth options' do + identity_uri = "#{internal_auth_protocol}://#{internal_auth_endpoint}:5000/" + ks = neutron_config['keystone'] + should contain_class('neutron::server').with( + 'auth_password' => ks.fetch('admin_password'), + 'auth_tenant' => ks.fetch('admin_tenant', 'services'), + 'auth_region' => Noop.hiera('region', 'RegionOne'), + 'auth_user' => ks.fetch('admin_user', 'neutron'), + 'identity_uri' => identity_uri, + 'auth_uri' => identity_uri, + ) + end + + admin_auth_protocol = 'https' + admin_auth_endpoint = Noop.hiera_structure('use_ssl/keystone_admin_hostname') + nova_auth_protocol = 'https' + internal_nova_endpoint = Noop.hiera_structure('use_ssl/nova_internal_hostname') + it 'should declare class neutron::server::notifications with TLS endpoints' do + nova_admin_auth_url = "#{admin_auth_protocol}://#{admin_auth_endpoint}:35357/" + nova_url = "#{nova_auth_protocol}://#{internal_nova_endpoint}:8774/v2" + nova_hash = Noop.hiera_hash('nova', {}) + should contain_class('neutron::server::notifications').with( + 'nova_url' => nova_url, + 'auth_url' => nova_admin_auth_url, + 'region_name' => Noop.hiera('region', 'RegionOne'), + 'username' => nova_hash.fetch('user', 'nova'), + 'tenant_name' => nova_hash.fetch('tenant', 'services'), + 'password' => nova_hash.fetch('user_password'), + ) + end + end + else + context 'without overridden TLS for internal endpoints' do + it 'should have correct auth options' do + identity_uri = "http://#{service_endpoint}:5000/" + ks = neutron_config['keystone'] + should contain_class('neutron::server').with( + 'auth_password' => ks.fetch('admin_password'), + 'auth_tenant' => ks.fetch('admin_tenant', 'services'), + 'auth_region' => Noop.hiera('region', 'RegionOne'), + 'auth_user' => ks.fetch('admin_user', 'neutron'), + 'identity_uri' => identity_uri, + 'auth_uri' => identity_uri, + ) + end + + it 'should declare neutron::server::notifications without TLS endpoints' do + nova_admin_auth_url = "http://#{service_endpoint}:35357/" + nova_endpoint = Noop.hiera('nova_endpoint', management_vip) + nova_url = "http://#{nova_endpoint}:8774/v2" + nova_hash = Noop.hiera_hash('nova', {}) + should contain_class('neutron::server::notifications').with( + 'nova_url' => nova_url, + 'auth_url' => nova_admin_auth_url, + 'region_name' => Noop.hiera('region', 'RegionOne'), + 'username' => nova_hash.fetch('user', 'nova'), + 'tenant_name' => nova_hash.fetch('tenant', 'services'), + 'password' => nova_hash.fetch('user_password'), + ) + end + end end it { should contain_class('neutron::server').with('manage_service' => 'true')} @@ -98,21 +153,6 @@ describe manifest do ) end - it 'neutron::server::notifications' do - nova_admin_auth_url = "http://#{service_endpoint}:35357/" - nova_endpoint = Noop.hiera('nova_endpoint', management_vip) - nova_url = "http://#{nova_endpoint}:8774/v2" - nova_hash = Noop.hiera_hash('nova', {}) - should contain_class('neutron::server::notifications').with( - 'nova_url' => nova_url, - 'auth_url' => nova_admin_auth_url, - 'region_name' => Noop.hiera('region', 'RegionOne'), - 'username' => nova_hash.fetch('user', 'nova'), - 'tenant_name' => nova_hash.fetch('tenant', 'services'), - 'password' => nova_hash.fetch('user_password'), - ) - end - it { should contain_service('neutron-server').with( 'ensure' => 'stopped', 'enable' => 'false', diff --git a/tests/noop/spec/hosts/openstack-network/server-nova_spec.rb b/tests/noop/spec/hosts/openstack-network/server-nova_spec.rb index 218f4075d4..fe1cd5f3b4 100644 --- a/tests/noop/spec/hosts/openstack-network/server-nova_spec.rb +++ b/tests/noop/spec/hosts/openstack-network/server-nova_spec.rb @@ -40,15 +40,34 @@ describe manifest do it { should contain_class('nova::network::neutron').with( 'neutron_admin_username' => admin_username )} - it { should contain_class('nova::network::neutron').with( - 'neutron_admin_auth_url' => admin_auth_url - )} - it { should contain_class('nova::network::neutron').with( - 'neutron_url' => neutron_url - )} it { should contain_class('nova::network::neutron').with( 'neutron_ovs_bridge' => 'br-int' )} + + if Noop.hiera_structure('use_ssl', false) + context 'with overridden TLS' do + admin_auth_protocol = 'https' + admin_auth_endpoint = Noop.hiera_structure('use_ssl/keystone_admin_hostname') + it { should contain_class('nova::network::neutron').with( + 'neutron_admin_auth_url' => "#{admin_auth_protocol}://#{admin_auth_endpoint}:35357/#{auth_api_version}" + )} + + neutron_internal_protocol = 'https' + neutron_internal_endpoint = Noop.hiera_structure('use_ssl/neutron_internal_hostname') + it { should contain_class('nova::network::neutron').with( + 'neutron_url' => "#{neutron_internal_protocol}://#{neutron_internal_endpoint}:9696" + )} + end + else + context 'without overridden TLS' do + it { should contain_class('nova::network::neutron').with( + 'neutron_admin_auth_url' => admin_auth_url + )} + it { should contain_class('nova::network::neutron').with( + 'neutron_url' => neutron_url + )} + end + end end elsif !Noop.hiera('use_neutron') && Noop.hiera('role') =~ /controller/ diff --git a/tests/noop/spec/hosts/roles/cinder_spec.rb b/tests/noop/spec/hosts/roles/cinder_spec.rb index 82a31319dd..89f5becd45 100644 --- a/tests/noop/spec/hosts/roles/cinder_spec.rb +++ b/tests/noop/spec/hosts/roles/cinder_spec.rb @@ -13,8 +13,25 @@ describe manifest do it { should contain_package('python-amqp') } - keystone_auth_host = Noop.hiera 'service_endpoint' - auth_uri = "http://#{keystone_auth_host}:5000/" + if Noop.hiera_structure('use_ssl') + internal_auth_protocol = 'https' + internal_auth_address = Noop.hiera_structure('use_ssl/keystone_internal_hostname') + glance_protocol = 'https' + glance_internal_address = Noop.hiera_structure('use_ssl/glance_internal_hostname') + else + internal_auth_protocol = 'http' + internal_auth_address = Noop.hiera 'service_endpoint' + glance_protocol = 'http' + glance_internal_address = Noop.hiera('management_vip') + end + auth_uri = "#{internal_auth_protocol}://#{internal_auth_address}:5000/" + glance_api_servers = "#{glance_protocol}://#{glance_internal_address}:9292" + + it 'should contain correct glance api servers addresses' do + should contain_class('openstack::cinder').with( + 'glance_api_servers' => glance_api_servers + ) + end it 'ensures cinder_config contains auth_uri and identity_uri ' do should contain_cinder_config('keystone_authtoken/auth_uri').with(:value => auth_uri) diff --git a/tests/noop/spec/hosts/roles/compute_spec.rb b/tests/noop/spec/hosts/roles/compute_spec.rb index 053ef41e3d..38655ec6e2 100644 --- a/tests/noop/spec/hosts/roles/compute_spec.rb +++ b/tests/noop/spec/hosts/roles/compute_spec.rb @@ -145,17 +145,43 @@ describe manifest do # SSL support - public_ssl = Noop.hiera_structure('public_ssl/services') - - if public_ssl - it 'should properly configure vncproxy WITH ssl' do - vncproxy_host = Noop.hiera_structure('public_ssl/hostname') - should contain_class('openstack::compute').with( - 'vncproxy_host' => vncproxy_host - ) - should contain_class('nova::compute').with( - 'vncproxy_protocol' => 'https' - ) + if Noop.hiera_structure('use_ssl') + context 'with enabled and overridden TLS' do + it 'should properly configure vncproxy WITH ssl' do + vncproxy_host = Noop.hiera_structure('use_ssl/nova_public_hostname') + should contain_class('openstack::compute').with( + 'vncproxy_host' => vncproxy_host + ) + should contain_class('nova::compute').with( + 'vncproxy_protocol' => 'https' + ) + end + it 'should properly configure glance api servers WITH ssl' do + glance_protocol = 'https' + glance_endpoint = Noop.hiera_structure('use_ssl/glance_internal_hostname') + glance_api_servers = "#{glance_protocol}://#{glance_endpoint}:9292" + should contain_class('openstack::compute').with( + 'glance_api_servers' => glance_api_servers + ) + end + end + elsif Noop.hiera_structure('public_ssl/services') + context 'with enabled and not overridden TLS' do + it 'should properly configure vncproxy WITH ssl' do + vncproxy_host = Noop.hiera_structure('public_ssl/hostname') + should contain_class('openstack::compute').with( + 'vncproxy_host' => vncproxy_host + ) + should contain_class('nova::compute').with( + 'vncproxy_protocol' => 'https' + ) + end + it 'should properly configure glance api servers WITHOUT ssl' do + management_vip = Noop.hiera('management_vip') + should contain_class('openstack::compute').with( + 'glance_api_servers' => "#{management_vip}:9292" + ) + end end else it 'should properly configure vncproxy WITHOUT ssl' do @@ -167,6 +193,12 @@ describe manifest do 'vncproxy_protocol' => 'http' ) end + it 'should properly configure glance api servers WITHOUT ssl' do + management_vip = Noop.hiera('management_vip') + should contain_class('openstack::compute').with( + 'glance_api_servers' => "#{management_vip}:9292" + ) + end end end diff --git a/tests/noop/spec/hosts/ssl/ssl_add_trust_chain_spec.rb b/tests/noop/spec/hosts/ssl/ssl_add_trust_chain_spec.rb index 3e0518aff8..3811a17bb2 100644 --- a/tests/noop/spec/hosts/ssl/ssl_add_trust_chain_spec.rb +++ b/tests/noop/spec/hosts/ssl/ssl_add_trust_chain_spec.rb @@ -3,6 +3,13 @@ require 'shared-examples' manifest = 'ssl/ssl_add_trust_chain.pp' describe manifest do + shared_examples 'catalog' do + it 'should add certificates to trust chain' do + should contain_exec('add_trust').with( + 'command' => 'update-ca-certificates', + ) + end + end test_ubuntu_and_centos manifest end diff --git a/tests/noop/spec/hosts/ssl/ssl_dns_setup_spec.rb b/tests/noop/spec/hosts/ssl/ssl_dns_setup_spec.rb new file mode 100644 index 0000000000..d301b11d8f --- /dev/null +++ b/tests/noop/spec/hosts/ssl/ssl_dns_setup_spec.rb @@ -0,0 +1,89 @@ +require 'spec_helper' +require 'shared-examples' +manifest = 'ssl/ssl_dns_setup.pp' + +describe manifest do + shared_examples 'catalog' do + + if Noop.hiera_structure('use_ssl', false) + context "when all services have hostnames" do + public_services = [ 'horizon', 'keystone', 'nova', 'heat', 'glance', 'cinder', 'neutron', 'swift', 'sahara', 'murano', 'ceilometer', 'radosgw'] + public_services.each do |service| + public_vip = Noop.hiera_structure("use_ssl/#{service}_public_ip", Noop.hiera('public_vip')) + public_hostname = Noop.hiera_structure "use_ssl/#{service}_public_hostname", Noop.hiera('management_vip') + + it "should set #{service} resolving for public hostanme" do + should contain_host("#{public_hostname}").with( + 'ensure' => 'present', + 'ip' => public_vip, + ) + end + end + + ia_services = [ 'keystone', 'nova', 'heat', 'glance', 'cinder', 'neutron', 'swift', 'sahara', 'murano', 'ceilometer' ] + ia_services.each do |service| + management_vip = Noop.hiera_structure("use_ssl/#{service}_internal_ip", Noop.hiera('management_vip')) + admin_vip = Noop.hiera_structure("use_ssl/#{service}_admin_ip", Noop.hiera('management_vip')) + internal_hostname = Noop.hiera_structure "use_ssl/#{service}_internal_hostname" + admin_hostname = Noop.hiera_structure "use_ssl/#{service}_admin_hostname" + + it "should set #{service} resolving for internal hostname" do + should contain_host("#{internal_hostname}").with( + 'ensure' => 'present', + 'ip' => management_vip, + ) + end + + it "should set #{service} resolution for admin hostname" do + should contain_host("#{admin_hostname}").with( + 'ensure' => 'present', + 'ip' => admin_vip, + ) + end + end + end + + context "when keystone external ip set" do + let(:public_ip) { Noop.hiera_structure "use_ssl/keystone_public_ip" } + public_hostname = Noop.hiera_structure "use_ssl/keystone_public_hostname" + let(:internal_ip) { Noop.hiera_structure "use_ssl/keystone_internal_ip" } + internal_hostname = Noop.hiera_structure "use_ssl/keystone_internal_hostname" + let(:admin_ip) { Noop.hiera_structure "use_ssl/keystone_admin_ip" } + admin_hostname = Noop.hiera_structure "use_ssl/keystone_admin_hostname" + + it "should set resolve with keystone public external ip" do + should contain_host("#{public_hostname}").with( + 'ensure' => 'present', + 'ip' => public_ip, + ) + end + + it "should set resolve with keystone internal external ip" do + should contain_host("#{internal_hostname}").with( + 'ensure' => 'present', + 'ip' => internal_ip, + ) + end + + it "should set resolve with keystone admin external ip" do + should contain_host("#{admin_hostname}").with( + 'ensure' => 'present', + 'ip' => admin_ip, + ) + end + end + elsif Noop.hiera_structure('public_ssl/services', false) + it "should set resolving for public endpoints" do + public_vip = Noop.hiera('public_vip') + public_hostname = Noop.hiera_structure('public_ssl/hostname') + + should contain_host("#{public_hostname}").with( + 'ensure' => 'present', + 'ip' => public_vip, + ) + end + end + + end + test_ubuntu_and_centos manifest +end diff --git a/tests/noop/spec/hosts/ssl/ssl_keys_saving_spec.rb b/tests/noop/spec/hosts/ssl/ssl_keys_saving_spec.rb index 7f16339573..e43c994486 100644 --- a/tests/noop/spec/hosts/ssl/ssl_keys_saving_spec.rb +++ b/tests/noop/spec/hosts/ssl/ssl_keys_saving_spec.rb @@ -3,6 +3,84 @@ require 'shared-examples' manifest = 'ssl/ssl_keys_saving.pp' describe manifest do + shared_examples 'catalog' do + if Noop.hiera('use_ssl', false) + context 'for services that have all endpoint types' do + services = [ 'keystone', 'nova', 'heat', 'glance', 'cinder', 'neutron', 'swift', 'sahara', 'murano', 'ceilometer' ] + types = [ 'public', 'internal', 'admin' ] + services.each do |service| + types.each do |type| + certdata = Noop.hiera_structure "use_ssl/#{service}_#{type}_certdata" + it "should create certificate file with all data for #{type} #{service} in /etc/" do + should contain_file("/etc/pki/tls/certs/#{type}_#{service}.pem").with( + 'ensure' => 'present', + 'content' => certdata, + ) + end + + it "should create certificate file with all data for #{type} #{service} in /var/" do + should contain_file("/var/lib/astute/haproxy/#{type}_#{service}.pem").with( + 'ensure' => 'present', + 'content' => certdata, + ) + end + end + end + end + + context 'for public-only services' do + services = [ 'horizon', 'radosgw' ] + services.each do |service| + certdata = Noop.hiera_structure "use_ssl/#{service}_public_certdata" + it "should create certificate file with all data for public #{service} in /etc/" do + should contain_file("/etc/pki/tls/certs/public_#{service}.pem").with( + 'ensure' => 'present', + 'content' => certdata, + ) + end + + it "should create certificate file with all data for public #{service} in /var/" do + should contain_file("/var/lib/astute/haproxy/public_#{service}.pem").with( + 'ensure' => 'present', + 'content' => certdata, + ) + end + + it "should not create certificate file for internal #{service} in /etc/" do + should_not contain_file("/etc/pki/tls/certs/internal_#{service}.pem") + end + + it "should not create certificate file for internal #{service} in /var/" do + should_not contain_file("/var/lib/astute/haproxy/internal_#{service}.pem") + end + + it "should not create certificate file for admin #{service} in /etc/" do + should_not contain_file("/etc/pki/tls/certs/admin_#{service}.pem") + end + + it "should not create certificate file for admin #{service} in /var/" do + should_not contain_file("/var/lib/astute/haproxy/admin_#{service}.pem") + end + + end + end + + elsif Noop.hiera('public_ssl', false) + certdata = Noop.hiera_structure "public_ssl/cert_data/content" + it "should create certificate file for public endpoints in /var/" do + should contain_file("/var/lib/astute/haproxy/public_haproxy.pem").with( + 'ensure' => 'present', + 'content' => certdata, + ) + end + it "should create certificate file with for public endpoints in /etc/" do + should contain_file("/etc/pki/tls/certs/public_haproxy.pem").with( + 'ensure' => 'present', + 'content' => certdata, + ) + end + end + end test_ubuntu_and_centos manifest end diff --git a/tests/noop/spec/hosts/swift/keystone_spec.rb b/tests/noop/spec/hosts/swift/keystone_spec.rb index ecf37d954b..dffc3d78b0 100644 --- a/tests/noop/spec/hosts/swift/keystone_spec.rb +++ b/tests/noop/spec/hosts/swift/keystone_spec.rb @@ -9,32 +9,43 @@ describe manifest do end swift = Noop.hiera_structure('swift') - public_ssl = Noop.hiera_structure('public_ssl/services') - public_address = false if swift['management_vip'] - admin_address = swift['management_vip'] + management_vip = swift['management_vip'] else - admin_address = Noop.hiera('management_vip') + management_vip = Noop.hiera('management_vip') end - if swift['public_vip'] - public_address = swift['public_vip'] - end - - if public_ssl - public_address = public_address || Noop.hiera_structure('public_ssl/hostname') + if Noop.hiera_structure('use_ssl/swift_public', false) public_protocol = 'https' + public_address = Noop.hiera_structure('use_ssl/swift_public_hostname') + elsif Noop.hiera_structure('public_ssl/services') + public_address = Noop.hiera_structure('public_ssl/hostname') + public_protocol = 'https' + elsif swift['public_vip'] + public_protocol = 'http' + public_address = swift['public_vip'] else public_address = Noop.hiera('public_vip') public_protocol = 'http' end + if Noop.hiera_structure('use_ssl/swift_internal', false) + internal_protocol = 'https' + internal_address = Noop.hiera_structure('use_ssl/swift_internal_hostname') + elsif swift['management_vip'] + internal_protocol = 'http' + internal_address = swift['management_vip'] + else + internal_protocol = 'http' + internal_address = Noop.hiera('management_vip') + end + public_url = "#{public_protocol}://#{public_address}:8080/v1/AUTH_%(tenant_id)s" - admin_url = "http://#{admin_address}:8080/v1/AUTH_%(tenant_id)s" + admin_url = "#{internal_protocol}://#{internal_address}:8080/v1/AUTH_%(tenant_id)s" public_url_s3 = "#{public_protocol}://#{public_address}:8080" - admin_url_s3 = "http://#{admin_address}:8080" + admin_url_s3 = "#{internal_protocol}://#{internal_address}:8080" it 'class swift::keystone::auth should contain correct *_url' do should contain_class('swift::keystone::auth').with('public_url' => public_url) diff --git a/tests/noop/spec/hosts/swift/swift_spec.rb b/tests/noop/spec/hosts/swift/swift_spec.rb index d5c5537542..780624402a 100644 --- a/tests/noop/spec/hosts/swift/swift_spec.rb +++ b/tests/noop/spec/hosts/swift/swift_spec.rb @@ -85,15 +85,43 @@ describe manifest do ) end - it 'should declare swift::dispersion' do - should contain_class('swift::dispersion').that_requires('Class[openstack::swift::status]') - end + if Noop.hiera('use_ssl', false) + context 'with enabled internal TLS for keystone' do + keystone_endpoint = Noop.hiera_structure 'use_ssl/keystone_internal_hostname' + it 'should declare swift::dispersion' do + should contain_class('swift::dispersion').with( + 'auth_url' => "https://#{keystone_endpoint}:5000/v2.0/" + ).that_requires('Class[openstack::swift::status]') + end + end - it { - should contain_class('openstack::swift::status').with( - 'only_from' => "127.0.0.1 240.0.0.2 #{sto_nets} #{man_nets}", - ).that_requires('Class[openstack::swift::proxy]') - } + context 'with enabled internal TLS for swift' do + swift_endpoint = Noop.hiera_structure 'use_ssl/swift_internal_hostname' + it { + should contain_class('openstack::swift::status').with( + 'endpoint' => "https://#{swift_endpoint}:8080", + 'only_from' => "127.0.0.1 240.0.0.2 #{sto_nets} #{man_nets}", + ).that_requires('Class[openstack::swift::proxy]') + } + end + else + keystone_endpoint = Noop.hiera 'service_endpoint' + context 'with disabled internal TLS for keystone' do + it 'should declare swift::dispersion' do + should contain_class('swift::dispersion').with( + 'auth_url' => "http://#{keystone_endpoint}:5000/v2.0/" + ).that_requires('Class[openstack::swift::status]') + end + end + + context 'with disabled internal TLS for swift' do + it { + should contain_class('openstack::swift::status').with( + 'only_from' => "127.0.0.1 240.0.0.2 #{sto_nets} #{man_nets}", + ).that_requires('Class[openstack::swift::proxy]') + } + end + end it 'should configure swift on separate partition' do should contain_file(swift_partition).with(