Make puppet manifests compliant with Puppet 4.x
- https://docs.puppetlabs.com/puppet/3.8/reference/deprecated_language.html - Temporary disablement of the pupppet-lint autoload layout check failing for ringbuilder.pp. A fix for that will be part of an other patch. Change-Id: I495825641ab12e7c5789c1405649c356c5bb8051 Signed-off-by: Gael Chamoulaud <gchamoul@redhat.com>
This commit is contained in:
parent
0d6b04c21a
commit
b9aab09518
1
Rakefile
1
Rakefile
@ -3,3 +3,4 @@ require 'puppet-lint/tasks/puppet-lint'
|
||||
|
||||
PuppetLint.configuration.fail_on_warnings = true
|
||||
PuppetLint.configuration.send('disable_80chars')
|
||||
PuppetLint.configuration.send('disable_autoloader_layout')
|
||||
|
@ -13,7 +13,7 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
include tripleo::packages
|
||||
include ::tripleo::packages
|
||||
|
||||
create_resources(sysctl::value, hiera('sysctl_settings'), {})
|
||||
|
||||
@ -25,13 +25,13 @@ if str2bool(hiera('ceph_osd_selinux_permissive', true)) {
|
||||
exec { 'set selinux to permissive on boot':
|
||||
command => "sed -ie 's/^SELINUX=.*/SELINUX=permissive/' /etc/selinux/config",
|
||||
onlyif => "test -f /etc/selinux/config && ! grep '^SELINUX=permissive' /etc/selinux/config",
|
||||
path => ["/usr/bin", "/usr/sbin"],
|
||||
path => ['/usr/bin', '/usr/sbin'],
|
||||
}
|
||||
|
||||
exec { 'set selinux to permissive':
|
||||
command => "setenforce 0",
|
||||
command => 'setenforce 0',
|
||||
onlyif => "which setenforce && getenforce | grep -i 'enforcing'",
|
||||
path => ["/usr/bin", "/usr/sbin"],
|
||||
path => ['/usr/bin', '/usr/sbin'],
|
||||
} -> Class['ceph::profile::osd']
|
||||
}
|
||||
|
||||
|
@ -13,7 +13,7 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
include tripleo::packages
|
||||
include ::tripleo::packages
|
||||
|
||||
create_resources(sysctl::value, hiera('sysctl_settings'), {})
|
||||
|
||||
@ -24,7 +24,7 @@ if count(hiera('ntp::servers')) > 0 {
|
||||
file { ['/etc/libvirt/qemu/networks/autostart/default.xml',
|
||||
'/etc/libvirt/qemu/networks/default.xml']:
|
||||
ensure => absent,
|
||||
before => Service['libvirt']
|
||||
before => Service['libvirt'],
|
||||
}
|
||||
# in case libvirt has been already running before the Puppet run, make
|
||||
# sure the default network is destroyed
|
||||
@ -55,7 +55,7 @@ if $rbd_ephemeral_storage or $rbd_persistent_storage {
|
||||
}
|
||||
|
||||
if hiera('cinder_enable_nfs_backend', false) {
|
||||
if ($::selinux != "false") {
|
||||
if str2bool($::selinux) {
|
||||
selboolean { 'virt_use_nfs':
|
||||
value => on,
|
||||
persistent => true,
|
||||
@ -69,18 +69,18 @@ include ::nova::compute::libvirt
|
||||
include ::nova::network::neutron
|
||||
include ::neutron
|
||||
|
||||
class { 'neutron::plugins::ml2':
|
||||
class { '::neutron::plugins::ml2':
|
||||
flat_networks => split(hiera('neutron_flat_networks'), ','),
|
||||
tenant_network_types => [hiera('neutron_tenant_network_type')],
|
||||
}
|
||||
|
||||
class { 'neutron::agents::ml2::ovs':
|
||||
class { '::neutron::agents::ml2::ovs':
|
||||
bridge_mappings => split(hiera('neutron_bridge_mappings'), ','),
|
||||
tunnel_types => split(hiera('neutron_tunnel_types'), ','),
|
||||
}
|
||||
|
||||
if 'cisco_n1kv' in hiera('neutron_mechanism_drivers') {
|
||||
class { 'neutron::agents::n1kv_vem':
|
||||
class { '::neutron::agents::n1kv_vem':
|
||||
n1kv_source => hiera('n1kv_vem_source', undef),
|
||||
n1kv_version => hiera('n1kv_vem_version', undef),
|
||||
}
|
||||
@ -97,7 +97,7 @@ snmp::snmpv3_user { $snmpd_user:
|
||||
authtype => 'MD5',
|
||||
authpass => hiera('snmpd_readonly_user_password'),
|
||||
}
|
||||
class { 'snmp':
|
||||
class { '::snmp':
|
||||
agentaddress => ['udp:161','udp6:[::1]:161'],
|
||||
snmpd_config => [ join(['rouser ', hiera('snmpd_readonly_user_name')]), 'proc cron', 'includeAllDisks 10%', 'master agentx', 'trapsink localhost public', 'iquerySecName internalUser', 'rouser internalUser', 'defaultMonitors yes', 'linkUpDownNotifications yes' ],
|
||||
}
|
||||
|
@ -13,7 +13,7 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
include tripleo::packages
|
||||
include ::tripleo::packages
|
||||
|
||||
if hiera('step') >= 1 {
|
||||
|
||||
@ -70,13 +70,13 @@ if hiera('step') >= 2 {
|
||||
include ::tripleo::redis_notification
|
||||
}
|
||||
|
||||
if str2bool(hiera('enable_galera', 'true')) {
|
||||
if str2bool(hiera('enable_galera', true)) {
|
||||
$mysql_config_file = '/etc/my.cnf.d/galera.cnf'
|
||||
} else {
|
||||
$mysql_config_file = '/etc/my.cnf.d/server.cnf'
|
||||
}
|
||||
# TODO Galara
|
||||
class { 'mysql::server':
|
||||
class { '::mysql::server':
|
||||
config_file => $mysql_config_file,
|
||||
override_options => {
|
||||
'mysqld' => {
|
||||
@ -126,31 +126,31 @@ if hiera('step') >= 2 {
|
||||
$enable_ceph = hiera('ceph_storage_count', 0) > 0
|
||||
|
||||
if $enable_ceph {
|
||||
class { 'ceph::profile::params':
|
||||
mon_initial_members => downcase(hiera('ceph_mon_initial_members'))
|
||||
class { '::ceph::profile::params':
|
||||
mon_initial_members => downcase(hiera('ceph_mon_initial_members')),
|
||||
}
|
||||
include ::ceph::profile::mon
|
||||
}
|
||||
|
||||
if str2bool(hiera('enable_ceph_storage', 'false')) {
|
||||
if str2bool(hiera('enable_ceph_storage', false)) {
|
||||
if str2bool(hiera('ceph_osd_selinux_permissive', true)) {
|
||||
exec { 'set selinux to permissive on boot':
|
||||
command => "sed -ie 's/^SELINUX=.*/SELINUX=permissive/' /etc/selinux/config",
|
||||
onlyif => "test -f /etc/selinux/config && ! grep '^SELINUX=permissive' /etc/selinux/config",
|
||||
path => ["/usr/bin", "/usr/sbin"],
|
||||
path => ['/usr/bin', '/usr/sbin'],
|
||||
}
|
||||
|
||||
exec { 'set selinux to permissive':
|
||||
command => "setenforce 0",
|
||||
command => 'setenforce 0',
|
||||
onlyif => "which setenforce && getenforce | grep -i 'enforcing'",
|
||||
path => ["/usr/bin", "/usr/sbin"],
|
||||
path => ['/usr/bin', '/usr/sbin'],
|
||||
} -> Class['ceph::profile::osd']
|
||||
}
|
||||
|
||||
include ::ceph::profile::osd
|
||||
}
|
||||
|
||||
if str2bool(hiera('enable_external_ceph', 'false')) {
|
||||
if str2bool(hiera('enable_external_ceph', false)) {
|
||||
include ::ceph::profile::client
|
||||
}
|
||||
|
||||
@ -196,9 +196,9 @@ if hiera('step') >= 3 {
|
||||
|
||||
$glance_backend = downcase(hiera('glance_backend', 'swift'))
|
||||
case $glance_backend {
|
||||
swift: { $backend_store = 'glance.store.swift.Store' }
|
||||
file: { $backend_store = 'glance.store.filesystem.Store' }
|
||||
rbd: { $backend_store = 'glance.store.rbd.Store' }
|
||||
'swift': { $backend_store = 'glance.store.swift.Store' }
|
||||
'file': { $backend_store = 'glance.store.filesystem.Store' }
|
||||
'rbd': { $backend_store = 'glance.store.rbd.Store' }
|
||||
default: { fail('Unrecognized glance_backend parameter.') }
|
||||
}
|
||||
$http_store = ['glance.store.http.Store']
|
||||
@ -206,8 +206,8 @@ if hiera('step') >= 3 {
|
||||
|
||||
# TODO: notifications, scrubber, etc.
|
||||
include ::glance
|
||||
class { 'glance::api':
|
||||
known_stores => $glance_store
|
||||
class { '::glance::api':
|
||||
known_stores => $glance_store,
|
||||
}
|
||||
include ::glance::registry
|
||||
include join(['::glance::backend::', $glance_backend])
|
||||
@ -239,24 +239,24 @@ if hiera('step') >= 3 {
|
||||
require => Package['neutron'],
|
||||
}
|
||||
|
||||
class { 'neutron::plugins::ml2':
|
||||
class { '::neutron::plugins::ml2':
|
||||
flat_networks => split(hiera('neutron_flat_networks'), ','),
|
||||
tenant_network_types => [hiera('neutron_tenant_network_type')],
|
||||
mechanism_drivers => [hiera('neutron_mechanism_drivers')],
|
||||
}
|
||||
class { 'neutron::agents::ml2::ovs':
|
||||
class { '::neutron::agents::ml2::ovs':
|
||||
bridge_mappings => split(hiera('neutron_bridge_mappings'), ','),
|
||||
tunnel_types => split(hiera('neutron_tunnel_types'), ','),
|
||||
}
|
||||
if 'cisco_n1kv' in hiera('neutron_mechanism_drivers') {
|
||||
include neutron::plugins::ml2::cisco::nexus1000v
|
||||
include ::neutron::plugins::ml2::cisco::nexus1000v
|
||||
|
||||
class { 'neutron::agents::n1kv_vem':
|
||||
class { '::neutron::agents::n1kv_vem':
|
||||
n1kv_source => hiera('n1kv_vem_source', undef),
|
||||
n1kv_version => hiera('n1kv_vem_version', undef),
|
||||
}
|
||||
|
||||
class { 'n1k_vsm':
|
||||
class { '::n1k_vsm':
|
||||
n1kv_source => hiera('n1kv_vsm_source', undef),
|
||||
n1kv_version => hiera('n1kv_vsm_version', undef),
|
||||
pacemaker_control => false,
|
||||
@ -272,7 +272,7 @@ if hiera('step') >= 3 {
|
||||
}
|
||||
|
||||
if hiera('neutron_enable_bigswitch_ml2', false) {
|
||||
include neutron::plugins::ml2::bigswitch::restproxy
|
||||
include ::neutron::plugins::ml2::bigswitch::restproxy
|
||||
}
|
||||
neutron_l3_agent_config {
|
||||
'DEFAULT/ovs_use_veth': value => hiera('neutron_ovs_use_veth', false);
|
||||
@ -291,7 +291,7 @@ if hiera('step') >= 3 {
|
||||
include ::cinder::glance
|
||||
include ::cinder::scheduler
|
||||
include ::cinder::volume
|
||||
class {'cinder::setup_test_volume':
|
||||
class { '::cinder::setup_test_volume':
|
||||
size => join([hiera('cinder_lvm_loop_device_size'), 'M']),
|
||||
}
|
||||
|
||||
@ -371,7 +371,7 @@ if hiera('step') >= 3 {
|
||||
if hiera('cinder_enable_nfs_backend', false) {
|
||||
$cinder_nfs_backend = 'tripleo_nfs'
|
||||
|
||||
if ($::selinux != "false") {
|
||||
if str2bool($::selinux) {
|
||||
selboolean { 'virt_use_nfs':
|
||||
value => on,
|
||||
persistent => true,
|
||||
@ -406,9 +406,9 @@ if hiera('step') >= 3 {
|
||||
include ::swift::proxy::formpost
|
||||
|
||||
# swift storage
|
||||
if str2bool(hiera('enable_swift_storage', 'true')) {
|
||||
class {'swift::storage::all':
|
||||
mount_check => str2bool(hiera('swift_mount_check'))
|
||||
if str2bool(hiera('enable_swift_storage', true)) {
|
||||
class { '::swift::storage::all':
|
||||
mount_check => str2bool(hiera('swift_mount_check')),
|
||||
}
|
||||
if(!defined(File['/srv/node'])) {
|
||||
file { '/srv/node':
|
||||
@ -442,7 +442,7 @@ if hiera('step') >= 3 {
|
||||
include ::ceilometer::alarm::evaluator
|
||||
include ::ceilometer::expirer
|
||||
include ::ceilometer::collector
|
||||
include ceilometer::agent::auth
|
||||
include ::ceilometer::agent::auth
|
||||
class { '::ceilometer::db' :
|
||||
database_connection => $ceilometer_database_connection,
|
||||
}
|
||||
@ -463,7 +463,8 @@ if hiera('step') >= 3 {
|
||||
$_profile_support = 'None'
|
||||
}
|
||||
$neutron_options = {'profile_support' => $_profile_support }
|
||||
class { 'horizon':
|
||||
|
||||
class { '::horizon':
|
||||
cache_server_ip => hiera('memcache_node_ips', '127.0.0.1'),
|
||||
neutron_options => $neutron_options,
|
||||
}
|
||||
@ -473,7 +474,7 @@ if hiera('step') >= 3 {
|
||||
authtype => 'MD5',
|
||||
authpass => hiera('snmpd_readonly_user_password'),
|
||||
}
|
||||
class { 'snmp':
|
||||
class { '::snmp':
|
||||
agentaddress => ['udp:161','udp6:[::1]:161'],
|
||||
snmpd_config => [ join(['rouser ', hiera('snmpd_readonly_user_name')]), 'proc cron', 'includeAllDisks 10%', 'master agentx', 'trapsink localhost public', 'iquerySecName internalUser', 'rouser internalUser', 'defaultMonitors yes', 'linkUpDownNotifications yes' ],
|
||||
}
|
||||
|
@ -18,7 +18,7 @@ Pcmk_resource <| |> {
|
||||
try_sleep => 3,
|
||||
}
|
||||
|
||||
include tripleo::packages
|
||||
include ::tripleo::packages
|
||||
|
||||
if $::hostname == downcase(hiera('bootstrap_nodeid')) {
|
||||
$pacemaker_master = true
|
||||
@ -28,7 +28,7 @@ if $::hostname == downcase(hiera('bootstrap_nodeid')) {
|
||||
$sync_db = false
|
||||
}
|
||||
|
||||
$enable_fencing = str2bool(hiera('enable_fencing', 'false')) and hiera('step') >= 5
|
||||
$enable_fencing = str2bool(hiera('enable_fencing', false)) and hiera('step') >= 5
|
||||
|
||||
# When to start and enable services which haven't been Pacemakerized
|
||||
# FIXME: remove when we start all OpenStack services using Pacemaker
|
||||
@ -68,7 +68,7 @@ if hiera('step') >= 1 {
|
||||
disable => !$enable_fencing,
|
||||
}
|
||||
if $enable_fencing {
|
||||
include tripleo::fencing
|
||||
include ::tripleo::fencing
|
||||
|
||||
# enable stonith after all fencing devices have been created
|
||||
Class['tripleo::fencing'] -> Class['pacemaker::stonith']
|
||||
@ -93,7 +93,7 @@ if hiera('step') >= 1 {
|
||||
environment_variables => hiera('rabbitmq_environment'),
|
||||
} ->
|
||||
file { '/var/lib/rabbitmq/.erlang.cookie':
|
||||
ensure => 'present',
|
||||
ensure => file,
|
||||
owner => 'rabbitmq',
|
||||
group => 'rabbitmq',
|
||||
mode => '0400',
|
||||
@ -120,7 +120,7 @@ if hiera('step') >= 1 {
|
||||
}
|
||||
|
||||
# Galera
|
||||
if str2bool(hiera('enable_galera', 'true')) {
|
||||
if str2bool(hiera('enable_galera', true)) {
|
||||
$mysql_config_file = '/etc/my.cnf.d/galera.cnf'
|
||||
} else {
|
||||
$mysql_config_file = '/etc/my.cnf.d/server.cnf'
|
||||
@ -154,7 +154,7 @@ if hiera('step') >= 1 {
|
||||
'wsrep_causal_reads' => '0',
|
||||
'wsrep_notify_cmd' => '',
|
||||
'wsrep_sst_method' => 'rsync',
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
class { '::mysql::server':
|
||||
@ -178,7 +178,7 @@ if hiera('step') >= 2 {
|
||||
|
||||
if $pacemaker_master {
|
||||
|
||||
include pacemaker::resource_defaults
|
||||
include ::pacemaker::resource_defaults
|
||||
|
||||
# FIXME: we should not have to access tripleo::loadbalancer class
|
||||
# parameters here to configure pacemaker VIPs. The configuration
|
||||
@ -331,7 +331,7 @@ if hiera('step') >= 2 {
|
||||
}
|
||||
|
||||
pacemaker::resource::service { $::memcached::params::service_name :
|
||||
clone_params => "interleave=true",
|
||||
clone_params => 'interleave=true',
|
||||
require => Class['::memcached'],
|
||||
}
|
||||
|
||||
@ -385,7 +385,7 @@ if hiera('step') >= 2 {
|
||||
timeout => 30,
|
||||
tries => 180,
|
||||
try_sleep => 10,
|
||||
environment => ["AVAILABLE_WHEN_READONLY=0"],
|
||||
environment => ['AVAILABLE_WHEN_READONLY=0'],
|
||||
require => File['/etc/sysconfig/clustercheck'],
|
||||
}
|
||||
|
||||
@ -411,27 +411,27 @@ MYSQL_HOST=localhost\n",
|
||||
|
||||
# Create all the database schemas
|
||||
if $sync_db {
|
||||
class { 'keystone::db::mysql':
|
||||
class { '::keystone::db::mysql':
|
||||
require => Exec['galera-ready'],
|
||||
}
|
||||
class { 'glance::db::mysql':
|
||||
class { '::glance::db::mysql':
|
||||
require => Exec['galera-ready'],
|
||||
}
|
||||
class { 'nova::db::mysql':
|
||||
class { '::nova::db::mysql':
|
||||
require => Exec['galera-ready'],
|
||||
}
|
||||
class { 'neutron::db::mysql':
|
||||
class { '::neutron::db::mysql':
|
||||
require => Exec['galera-ready'],
|
||||
}
|
||||
class { 'cinder::db::mysql':
|
||||
class { '::cinder::db::mysql':
|
||||
require => Exec['galera-ready'],
|
||||
}
|
||||
class { 'heat::db::mysql':
|
||||
class { '::heat::db::mysql':
|
||||
require => Exec['galera-ready'],
|
||||
}
|
||||
|
||||
if downcase(hiera('ceilometer_backend')) == 'mysql' {
|
||||
class { 'ceilometer::db::mysql':
|
||||
class { '::ceilometer::db::mysql':
|
||||
require => Exec['galera-ready'],
|
||||
}
|
||||
}
|
||||
@ -444,31 +444,31 @@ MYSQL_HOST=localhost\n",
|
||||
$enable_ceph = hiera('ceph_storage_count', 0) > 0
|
||||
|
||||
if $enable_ceph {
|
||||
class { 'ceph::profile::params':
|
||||
mon_initial_members => downcase(hiera('ceph_mon_initial_members'))
|
||||
class { '::ceph::profile::params':
|
||||
mon_initial_members => downcase(hiera('ceph_mon_initial_members')),
|
||||
}
|
||||
include ::ceph::profile::mon
|
||||
}
|
||||
|
||||
if str2bool(hiera('enable_ceph_storage', 'false')) {
|
||||
if str2bool(hiera('enable_ceph_storage', false)) {
|
||||
if str2bool(hiera('ceph_osd_selinux_permissive', true)) {
|
||||
exec { 'set selinux to permissive on boot':
|
||||
command => "sed -ie 's/^SELINUX=.*/SELINUX=permissive/' /etc/selinux/config",
|
||||
onlyif => "test -f /etc/selinux/config && ! grep '^SELINUX=permissive' /etc/selinux/config",
|
||||
path => ["/usr/bin", "/usr/sbin"],
|
||||
path => ['/usr/bin', '/usr/sbin'],
|
||||
}
|
||||
|
||||
exec { 'set selinux to permissive':
|
||||
command => "setenforce 0",
|
||||
command => 'setenforce 0',
|
||||
onlyif => "which setenforce && getenforce | grep -i 'enforcing'",
|
||||
path => ["/usr/bin", "/usr/sbin"],
|
||||
path => ['/usr/bin', '/usr/sbin'],
|
||||
} -> Class['ceph::profile::osd']
|
||||
}
|
||||
|
||||
include ::ceph::profile::osd
|
||||
}
|
||||
|
||||
if str2bool(hiera('enable_external_ceph', 'false')) {
|
||||
if str2bool(hiera('enable_external_ceph', false)) {
|
||||
include ::ceph::profile::client
|
||||
}
|
||||
|
||||
@ -517,16 +517,16 @@ if hiera('step') >= 3 {
|
||||
|
||||
$glance_backend = downcase(hiera('glance_backend', 'swift'))
|
||||
case $glance_backend {
|
||||
swift: { $backend_store = 'glance.store.swift.Store' }
|
||||
file: { $backend_store = 'glance.store.filesystem.Store' }
|
||||
rbd: { $backend_store = 'glance.store.rbd.Store' }
|
||||
'swift': { $backend_store = 'glance.store.swift.Store' }
|
||||
'file': { $backend_store = 'glance.store.filesystem.Store' }
|
||||
'rbd': { $backend_store = 'glance.store.rbd.Store' }
|
||||
default: { fail('Unrecognized glance_backend parameter.') }
|
||||
}
|
||||
$http_store = ['glance.store.http.Store']
|
||||
$glance_store = concat($http_store, $backend_store)
|
||||
|
||||
if $glance_backend == 'file' and hiera('glance_file_pcmk_manage', false) {
|
||||
pacemaker::resource::filesystem { "glance-fs":
|
||||
pacemaker::resource::filesystem { 'glance-fs':
|
||||
device => hiera('glance_file_pcmk_device'),
|
||||
directory => hiera('glance_file_pcmk_directory'),
|
||||
fstype => hiera('glance_file_pcmk_fstype'),
|
||||
@ -537,7 +537,7 @@ if hiera('step') >= 3 {
|
||||
|
||||
# TODO: notifications, scrubber, etc.
|
||||
include ::glance
|
||||
class { 'glance::api':
|
||||
class { '::glance::api':
|
||||
known_stores => $glance_store,
|
||||
manage_service => false,
|
||||
enabled => false,
|
||||
@ -598,7 +598,7 @@ if hiera('step') >= 3 {
|
||||
manage_service => false,
|
||||
enabled => false,
|
||||
}
|
||||
class { 'neutron::agents::metadata':
|
||||
class { '::neutron::agents::metadata':
|
||||
manage_service => false,
|
||||
enabled => false,
|
||||
}
|
||||
@ -609,12 +609,12 @@ if hiera('step') >= 3 {
|
||||
notify => Service['neutron-dhcp-service'],
|
||||
require => Package['neutron'],
|
||||
}
|
||||
class { 'neutron::plugins::ml2':
|
||||
class { '::neutron::plugins::ml2':
|
||||
flat_networks => split(hiera('neutron_flat_networks'), ','),
|
||||
tenant_network_types => [hiera('neutron_tenant_network_type')],
|
||||
mechanism_drivers => [hiera('neutron_mechanism_drivers')],
|
||||
}
|
||||
class { 'neutron::agents::ml2::ovs':
|
||||
class { '::neutron::agents::ml2::ovs':
|
||||
manage_service => false,
|
||||
enabled => false,
|
||||
bridge_mappings => split(hiera('neutron_bridge_mappings'), ','),
|
||||
@ -629,21 +629,21 @@ if hiera('step') >= 3 {
|
||||
include ::neutron::plugins::ml2::cisco::type_nexus_vxlan
|
||||
}
|
||||
if 'cisco_n1kv' in hiera('neutron_mechanism_drivers') {
|
||||
include neutron::plugins::ml2::cisco::nexus1000v
|
||||
include ::neutron::plugins::ml2::cisco::nexus1000v
|
||||
|
||||
class { 'neutron::agents::n1kv_vem':
|
||||
class { '::neutron::agents::n1kv_vem':
|
||||
n1kv_source => hiera('n1kv_vem_source', undef),
|
||||
n1kv_version => hiera('n1kv_vem_version', undef),
|
||||
}
|
||||
|
||||
class { 'n1k_vsm':
|
||||
class { '::n1k_vsm':
|
||||
n1kv_source => hiera('n1kv_vsm_source', undef),
|
||||
n1kv_version => hiera('n1kv_vsm_version', undef),
|
||||
}
|
||||
}
|
||||
|
||||
if hiera('neutron_enable_bigswitch_ml2', false) {
|
||||
include neutron::plugins::ml2::bigswitch::restproxy
|
||||
include ::neutron::plugins::ml2::bigswitch::restproxy
|
||||
}
|
||||
neutron_l3_agent_config {
|
||||
'DEFAULT/ovs_use_veth': value => hiera('neutron_ovs_use_veth', false);
|
||||
@ -667,7 +667,7 @@ if hiera('step') >= 3 {
|
||||
enabled => false,
|
||||
}
|
||||
include ::cinder::glance
|
||||
class {'cinder::setup_test_volume':
|
||||
class { '::cinder::setup_test_volume':
|
||||
size => join([hiera('cinder_lvm_loop_device_size'), 'M']),
|
||||
}
|
||||
|
||||
@ -747,14 +747,14 @@ if hiera('step') >= 3 {
|
||||
if hiera('cinder_enable_nfs_backend', false) {
|
||||
$cinder_nfs_backend = 'tripleo_nfs'
|
||||
|
||||
if ($::selinux != "false") {
|
||||
if str2bool($::selinux) {
|
||||
selboolean { 'virt_use_nfs':
|
||||
value => on,
|
||||
persistent => true,
|
||||
} -> Package['nfs-utils']
|
||||
}
|
||||
|
||||
package {'nfs-utils': } ->
|
||||
package { 'nfs-utils': } ->
|
||||
cinder::backend::nfs { $cinder_nfs_backend:
|
||||
nfs_servers => hiera('cinder_nfs_servers'),
|
||||
nfs_mount_options => hiera('cinder_nfs_mount_options'),
|
||||
@ -784,9 +784,9 @@ if hiera('step') >= 3 {
|
||||
include ::swift::proxy::formpost
|
||||
|
||||
# swift storage
|
||||
if str2bool(hiera('enable_swift_storage', 'true')) {
|
||||
if str2bool(hiera('enable_swift_storage', true)) {
|
||||
class {'::swift::storage::all':
|
||||
mount_check => str2bool(hiera('swift_mount_check'))
|
||||
mount_check => str2bool(hiera('swift_mount_check')),
|
||||
}
|
||||
class {'::swift::storage::account':
|
||||
manage_service => $non_pcmk_start,
|
||||
@ -854,7 +854,7 @@ if hiera('step') >= 3 {
|
||||
database_connection => $ceilometer_database_connection,
|
||||
sync_db => $sync_db,
|
||||
}
|
||||
include ceilometer::agent::auth
|
||||
include ::ceilometer::agent::auth
|
||||
|
||||
Cron <| title == 'ceilometer-expirer' |> { command => "sleep $((\$(od -A n -t d -N 3 /dev/urandom) % 86400)) && ${::ceilometer::params::expirer_command}" }
|
||||
|
||||
@ -892,7 +892,7 @@ if hiera('step') >= 3 {
|
||||
$_profile_support = 'None'
|
||||
}
|
||||
$neutron_options = {'profile_support' => $_profile_support }
|
||||
class { 'horizon':
|
||||
class { '::horizon':
|
||||
cache_server_ip => hiera('memcache_node_ips', '127.0.0.1'),
|
||||
neutron_options => $neutron_options,
|
||||
}
|
||||
@ -902,7 +902,7 @@ if hiera('step') >= 3 {
|
||||
authtype => 'MD5',
|
||||
authpass => hiera('snmpd_readonly_user_password'),
|
||||
}
|
||||
class { 'snmp':
|
||||
class { '::snmp':
|
||||
agentaddress => ['udp:161','udp6:[::1]:161'],
|
||||
snmpd_config => [ join(['rouser ', hiera('snmpd_readonly_user_name')]), 'proc cron', 'includeAllDisks 10%', 'master agentx', 'trapsink localhost public', 'iquerySecName internalUser', 'rouser internalUser', 'defaultMonitors yes', 'linkUpDownNotifications yes' ],
|
||||
}
|
||||
@ -918,7 +918,7 @@ if hiera('step') >= 4 {
|
||||
|
||||
# Keystone
|
||||
pacemaker::resource::service { $::keystone::params::service_name :
|
||||
clone_params => "interleave=true",
|
||||
clone_params => 'interleave=true',
|
||||
verify_on_create => true,
|
||||
require => [File['/etc/keystone/ssl/certs/ca.pem'],
|
||||
File['/etc/keystone/ssl/private/signing_key.pem'],
|
||||
@ -927,7 +927,7 @@ if hiera('step') >= 4 {
|
||||
|
||||
pacemaker::constraint::base { 'haproxy-then-keystone-constraint':
|
||||
constraint_type => 'order',
|
||||
first_resource => "haproxy-clone",
|
||||
first_resource => 'haproxy-clone',
|
||||
second_resource => "${::keystone::params::service_name}-clone",
|
||||
first_action => 'start',
|
||||
second_action => 'start',
|
||||
@ -936,7 +936,7 @@ if hiera('step') >= 4 {
|
||||
}
|
||||
pacemaker::constraint::base { 'rabbitmq-then-keystone-constraint':
|
||||
constraint_type => 'order',
|
||||
first_resource => "rabbitmq-clone",
|
||||
first_resource => 'rabbitmq-clone',
|
||||
second_resource => "${::keystone::params::service_name}-clone",
|
||||
first_action => 'start',
|
||||
second_action => 'start',
|
||||
@ -945,7 +945,7 @@ if hiera('step') >= 4 {
|
||||
}
|
||||
pacemaker::constraint::base { 'memcached-then-keystone-constraint':
|
||||
constraint_type => 'order',
|
||||
first_resource => "memcached-clone",
|
||||
first_resource => 'memcached-clone',
|
||||
second_resource => "${::keystone::params::service_name}-clone",
|
||||
first_action => 'start',
|
||||
second_action => 'start',
|
||||
@ -954,7 +954,7 @@ if hiera('step') >= 4 {
|
||||
}
|
||||
pacemaker::constraint::base { 'galera-then-keystone-constraint':
|
||||
constraint_type => 'order',
|
||||
first_resource => "galera-master",
|
||||
first_resource => 'galera-master',
|
||||
second_resource => "${::keystone::params::service_name}-clone",
|
||||
first_action => 'promote',
|
||||
second_action => 'start',
|
||||
@ -964,11 +964,11 @@ if hiera('step') >= 4 {
|
||||
|
||||
# Cinder
|
||||
pacemaker::resource::service { $::cinder::params::api_service :
|
||||
clone_params => "interleave=true",
|
||||
clone_params => 'interleave=true',
|
||||
require => Pacemaker::Resource::Service[$::keystone::params::service_name],
|
||||
}
|
||||
pacemaker::resource::service { $::cinder::params::scheduler_service :
|
||||
clone_params => "interleave=true",
|
||||
clone_params => 'interleave=true',
|
||||
}
|
||||
pacemaker::resource::service { $::cinder::params::volume_service : }
|
||||
|
||||
@ -982,45 +982,45 @@ if hiera('step') >= 4 {
|
||||
Pacemaker::Resource::Service[$::keystone::params::service_name]],
|
||||
}
|
||||
pacemaker::constraint::base { 'cinder-api-then-cinder-scheduler-constraint':
|
||||
constraint_type => "order",
|
||||
constraint_type => 'order',
|
||||
first_resource => "${::cinder::params::api_service}-clone",
|
||||
second_resource => "${::cinder::params::scheduler_service}-clone",
|
||||
first_action => "start",
|
||||
second_action => "start",
|
||||
first_action => 'start',
|
||||
second_action => 'start',
|
||||
require => [Pacemaker::Resource::Service[$::cinder::params::api_service],
|
||||
Pacemaker::Resource::Service[$::cinder::params::scheduler_service]],
|
||||
}
|
||||
pacemaker::constraint::colocation { 'cinder-scheduler-with-cinder-api-colocation':
|
||||
source => "${::cinder::params::scheduler_service}-clone",
|
||||
target => "${::cinder::params::api_service}-clone",
|
||||
score => "INFINITY",
|
||||
score => 'INFINITY',
|
||||
require => [Pacemaker::Resource::Service[$::cinder::params::api_service],
|
||||
Pacemaker::Resource::Service[$::cinder::params::scheduler_service]],
|
||||
}
|
||||
pacemaker::constraint::base { 'cinder-scheduler-then-cinder-volume-constraint':
|
||||
constraint_type => "order",
|
||||
constraint_type => 'order',
|
||||
first_resource => "${::cinder::params::scheduler_service}-clone",
|
||||
second_resource => "${::cinder::params::volume_service}",
|
||||
first_action => "start",
|
||||
second_action => "start",
|
||||
second_resource => $::cinder::params::volume_service,
|
||||
first_action => 'start',
|
||||
second_action => 'start',
|
||||
require => [Pacemaker::Resource::Service[$::cinder::params::scheduler_service],
|
||||
Pacemaker::Resource::Service[$::cinder::params::volume_service]],
|
||||
}
|
||||
pacemaker::constraint::colocation { 'cinder-volume-with-cinder-scheduler-colocation':
|
||||
source => "${::cinder::params::volume_service}",
|
||||
source => $::cinder::params::volume_service,
|
||||
target => "${::cinder::params::scheduler_service}-clone",
|
||||
score => "INFINITY",
|
||||
score => 'INFINITY',
|
||||
require => [Pacemaker::Resource::Service[$::cinder::params::scheduler_service],
|
||||
Pacemaker::Resource::Service[$::cinder::params::volume_service]],
|
||||
}
|
||||
|
||||
# Glance
|
||||
pacemaker::resource::service { $::glance::params::registry_service_name :
|
||||
clone_params => "interleave=true",
|
||||
clone_params => 'interleave=true',
|
||||
require => Pacemaker::Resource::Service[$::keystone::params::service_name],
|
||||
}
|
||||
pacemaker::resource::service { $::glance::params::api_service_name :
|
||||
clone_params => "interleave=true",
|
||||
clone_params => 'interleave=true',
|
||||
}
|
||||
|
||||
pacemaker::constraint::base { 'keystone-then-glance-registry-constraint':
|
||||
@ -1033,18 +1033,18 @@ if hiera('step') >= 4 {
|
||||
Pacemaker::Resource::Service[$::keystone::params::service_name]],
|
||||
}
|
||||
pacemaker::constraint::base { 'glance-registry-then-glance-api-constraint':
|
||||
constraint_type => "order",
|
||||
constraint_type => 'order',
|
||||
first_resource => "${::glance::params::registry_service_name}-clone",
|
||||
second_resource => "${::glance::params::api_service_name}-clone",
|
||||
first_action => "start",
|
||||
second_action => "start",
|
||||
first_action => 'start',
|
||||
second_action => 'start',
|
||||
require => [Pacemaker::Resource::Service[$::glance::params::registry_service_name],
|
||||
Pacemaker::Resource::Service[$::glance::params::api_service_name]],
|
||||
}
|
||||
pacemaker::constraint::colocation { 'glance-api-with-glance-registry-colocation':
|
||||
source => "${::glance::params::api_service_name}-clone",
|
||||
target => "${::glance::params::registry_service_name}-clone",
|
||||
score => "INFINITY",
|
||||
score => 'INFINITY',
|
||||
require => [Pacemaker::Resource::Service[$::glance::params::registry_service_name],
|
||||
Pacemaker::Resource::Service[$::glance::params::api_service_name]],
|
||||
}
|
||||
@ -1056,154 +1056,154 @@ if hiera('step') >= 4 {
|
||||
# https://bugzilla.redhat.com/show_bug.cgi?id=1233061
|
||||
exec { '/usr/bin/systemctl start neutron-server && /usr/bin/sleep 5' : } ->
|
||||
pacemaker::resource::service { $::neutron::params::server_service:
|
||||
clone_params => "interleave=true",
|
||||
require => Pacemaker::Resource::Service[$::keystone::params::service_name]
|
||||
clone_params => 'interleave=true',
|
||||
require => Pacemaker::Resource::Service[$::keystone::params::service_name],
|
||||
}
|
||||
pacemaker::resource::service { $::neutron::params::l3_agent_service:
|
||||
clone_params => "interleave=true",
|
||||
clone_params => 'interleave=true',
|
||||
}
|
||||
pacemaker::resource::service { $::neutron::params::dhcp_agent_service:
|
||||
clone_params => "interleave=true",
|
||||
clone_params => 'interleave=true',
|
||||
}
|
||||
pacemaker::resource::service { $::neutron::params::ovs_agent_service:
|
||||
clone_params => "interleave=true",
|
||||
clone_params => 'interleave=true',
|
||||
}
|
||||
pacemaker::resource::service { $::neutron::params::metadata_agent_service:
|
||||
clone_params => "interleave=true",
|
||||
clone_params => 'interleave=true',
|
||||
}
|
||||
pacemaker::resource::ocf { $::neutron::params::ovs_cleanup_service:
|
||||
ocf_agent_name => "neutron:OVSCleanup",
|
||||
clone_params => "interleave=true",
|
||||
ocf_agent_name => 'neutron:OVSCleanup',
|
||||
clone_params => 'interleave=true',
|
||||
}
|
||||
pacemaker::resource::ocf { 'neutron-netns-cleanup':
|
||||
ocf_agent_name => "neutron:NetnsCleanup",
|
||||
clone_params => "interleave=true",
|
||||
ocf_agent_name => 'neutron:NetnsCleanup',
|
||||
clone_params => 'interleave=true',
|
||||
}
|
||||
|
||||
# neutron - one chain ovs-cleanup-->netns-cleanup-->ovs-agent
|
||||
pacemaker::constraint::base { 'neutron-ovs-cleanup-to-netns-cleanup-constraint':
|
||||
constraint_type => "order",
|
||||
constraint_type => 'order',
|
||||
first_resource => "${::neutron::params::ovs_cleanup_service}-clone",
|
||||
second_resource => "neutron-netns-cleanup-clone",
|
||||
first_action => "start",
|
||||
second_action => "start",
|
||||
require => [Pacemaker::Resource::Ocf["${::neutron::params::ovs_cleanup_service}"],
|
||||
second_resource => 'neutron-netns-cleanup-clone',
|
||||
first_action => 'start',
|
||||
second_action => 'start',
|
||||
require => [Pacemaker::Resource::Ocf[$::neutron::params::ovs_cleanup_service],
|
||||
Pacemaker::Resource::Ocf['neutron-netns-cleanup']],
|
||||
}
|
||||
pacemaker::constraint::colocation { 'neutron-ovs-cleanup-to-netns-cleanup-colocation':
|
||||
source => "neutron-netns-cleanup-clone",
|
||||
source => 'neutron-netns-cleanup-clone',
|
||||
target => "${::neutron::params::ovs_cleanup_service}-clone",
|
||||
score => "INFINITY",
|
||||
require => [Pacemaker::Resource::Ocf["${::neutron::params::ovs_cleanup_service}"],
|
||||
score => 'INFINITY',
|
||||
require => [Pacemaker::Resource::Ocf[$::neutron::params::ovs_cleanup_service],
|
||||
Pacemaker::Resource::Ocf['neutron-netns-cleanup']],
|
||||
}
|
||||
pacemaker::constraint::base { 'neutron-netns-cleanup-to-openvswitch-agent-constraint':
|
||||
constraint_type => "order",
|
||||
first_resource => "neutron-netns-cleanup-clone",
|
||||
constraint_type => 'order',
|
||||
first_resource => 'neutron-netns-cleanup-clone',
|
||||
second_resource => "${::neutron::params::ovs_agent_service}-clone",
|
||||
first_action => "start",
|
||||
second_action => "start",
|
||||
require => [Pacemaker::Resource::Ocf["neutron-netns-cleanup"],
|
||||
Pacemaker::Resource::Service["${::neutron::params::ovs_agent_service}"]],
|
||||
first_action => 'start',
|
||||
second_action => 'start',
|
||||
require => [Pacemaker::Resource::Ocf['neutron-netns-cleanup'],
|
||||
Pacemaker::Resource::Service[$::neutron::params::ovs_agent_service]],
|
||||
}
|
||||
pacemaker::constraint::colocation { 'neutron-netns-cleanup-to-openvswitch-agent-colocation':
|
||||
source => "${::neutron::params::ovs_agent_service}-clone",
|
||||
target => "neutron-netns-cleanup-clone",
|
||||
score => "INFINITY",
|
||||
require => [Pacemaker::Resource::Ocf["neutron-netns-cleanup"],
|
||||
Pacemaker::Resource::Service["${::neutron::params::ovs_agent_service}"]],
|
||||
target => 'neutron-netns-cleanup-clone',
|
||||
score => 'INFINITY',
|
||||
require => [Pacemaker::Resource::Ocf['neutron-netns-cleanup'],
|
||||
Pacemaker::Resource::Service[$::neutron::params::ovs_agent_service]],
|
||||
}
|
||||
|
||||
#another chain keystone-->neutron-server-->ovs-agent-->dhcp-->l3
|
||||
pacemaker::constraint::base { 'keystone-to-neutron-server-constraint':
|
||||
constraint_type => "order",
|
||||
constraint_type => 'order',
|
||||
first_resource => "${::keystone::params::service_name}-clone",
|
||||
second_resource => "${::neutron::params::server_service}-clone",
|
||||
first_action => "start",
|
||||
second_action => "start",
|
||||
first_action => 'start',
|
||||
second_action => 'start',
|
||||
require => [Pacemaker::Resource::Service[$::keystone::params::service_name],
|
||||
Pacemaker::Resource::Service[$::neutron::params::server_service]],
|
||||
}
|
||||
pacemaker::constraint::base { 'neutron-server-to-openvswitch-agent-constraint':
|
||||
constraint_type => "order",
|
||||
constraint_type => 'order',
|
||||
first_resource => "${::neutron::params::server_service}-clone",
|
||||
second_resource => "${::neutron::params::ovs_agent_service}-clone",
|
||||
first_action => "start",
|
||||
second_action => "start",
|
||||
first_action => 'start',
|
||||
second_action => 'start',
|
||||
require => [Pacemaker::Resource::Service[$::neutron::params::server_service],
|
||||
Pacemaker::Resource::Service[$::neutron::params::ovs_agent_service]],
|
||||
}
|
||||
pacemaker::constraint::base { 'neutron-openvswitch-agent-to-dhcp-agent-constraint':
|
||||
constraint_type => "order",
|
||||
constraint_type => 'order',
|
||||
first_resource => "${::neutron::params::ovs_agent_service}-clone",
|
||||
second_resource => "${::neutron::params::dhcp_agent_service}-clone",
|
||||
first_action => "start",
|
||||
second_action => "start",
|
||||
require => [Pacemaker::Resource::Service["${::neutron::params::ovs_agent_service}"],
|
||||
Pacemaker::Resource::Service["${::neutron::params::dhcp_agent_service}"]],
|
||||
first_action => 'start',
|
||||
second_action => 'start',
|
||||
require => [Pacemaker::Resource::Service[$::neutron::params::ovs_agent_service],
|
||||
Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service]],
|
||||
|
||||
}
|
||||
pacemaker::constraint::colocation { 'neutron-openvswitch-agent-to-dhcp-agent-colocation':
|
||||
source => "${::neutron::params::dhcp_agent_service}-clone",
|
||||
target => "${::neutron::params::ovs_agent_service}-clone",
|
||||
score => "INFINITY",
|
||||
require => [Pacemaker::Resource::Service["${::neutron::params::ovs_agent_service}"],
|
||||
Pacemaker::Resource::Service["${::neutron::params::dhcp_agent_service}"]],
|
||||
score => 'INFINITY',
|
||||
require => [Pacemaker::Resource::Service[$::neutron::params::ovs_agent_service],
|
||||
Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service]],
|
||||
}
|
||||
pacemaker::constraint::base { 'neutron-dhcp-agent-to-l3-agent-constraint':
|
||||
constraint_type => "order",
|
||||
constraint_type => 'order',
|
||||
first_resource => "${::neutron::params::dhcp_agent_service}-clone",
|
||||
second_resource => "${::neutron::params::l3_agent_service}-clone",
|
||||
first_action => "start",
|
||||
second_action => "start",
|
||||
require => [Pacemaker::Resource::Service["${::neutron::params::dhcp_agent_service}"],
|
||||
Pacemaker::Resource::Service["${::neutron::params::l3_agent_service}"]]
|
||||
first_action => 'start',
|
||||
second_action => 'start',
|
||||
require => [Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service],
|
||||
Pacemaker::Resource::Service[$::neutron::params::l3_agent_service]],
|
||||
}
|
||||
pacemaker::constraint::colocation { 'neutron-dhcp-agent-to-l3-agent-colocation':
|
||||
source => "${::neutron::params::l3_agent_service}-clone",
|
||||
target => "${::neutron::params::dhcp_agent_service}-clone",
|
||||
score => "INFINITY",
|
||||
require => [Pacemaker::Resource::Service["${::neutron::params::dhcp_agent_service}"],
|
||||
Pacemaker::Resource::Service["${::neutron::params::l3_agent_service}"]]
|
||||
score => 'INFINITY',
|
||||
require => [Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service],
|
||||
Pacemaker::Resource::Service[$::neutron::params::l3_agent_service]],
|
||||
}
|
||||
pacemaker::constraint::base { 'neutron-l3-agent-to-metadata-agent-constraint':
|
||||
constraint_type => "order",
|
||||
constraint_type => 'order',
|
||||
first_resource => "${::neutron::params::l3_agent_service}-clone",
|
||||
second_resource => "${::neutron::params::metadata_agent_service}-clone",
|
||||
first_action => "start",
|
||||
second_action => "start",
|
||||
require => [Pacemaker::Resource::Service["${::neutron::params::l3_agent_service}"],
|
||||
Pacemaker::Resource::Service["${::neutron::params::metadata_agent_service}"]]
|
||||
first_action => 'start',
|
||||
second_action => 'start',
|
||||
require => [Pacemaker::Resource::Service[$::neutron::params::l3_agent_service],
|
||||
Pacemaker::Resource::Service[$::neutron::params::metadata_agent_service]],
|
||||
}
|
||||
pacemaker::constraint::colocation { 'neutron-l3-agent-to-metadata-agent-colocation':
|
||||
source => "${::neutron::params::metadata_agent_service}-clone",
|
||||
target => "${::neutron::params::l3_agent_service}-clone",
|
||||
score => "INFINITY",
|
||||
require => [Pacemaker::Resource::Service["${::neutron::params::l3_agent_service}"],
|
||||
Pacemaker::Resource::Service["${::neutron::params::metadata_agent_service}"]]
|
||||
score => 'INFINITY',
|
||||
require => [Pacemaker::Resource::Service[$::neutron::params::l3_agent_service],
|
||||
Pacemaker::Resource::Service[$::neutron::params::metadata_agent_service]],
|
||||
}
|
||||
|
||||
# Nova
|
||||
pacemaker::resource::service { $::nova::params::api_service_name :
|
||||
clone_params => "interleave=true",
|
||||
op_params => "start timeout=90s monitor start-delay=10s",
|
||||
clone_params => 'interleave=true',
|
||||
op_params => 'start timeout=90s monitor start-delay=10s',
|
||||
}
|
||||
pacemaker::resource::service { $::nova::params::conductor_service_name :
|
||||
clone_params => "interleave=true",
|
||||
op_params => "start timeout=90s monitor start-delay=10s",
|
||||
clone_params => 'interleave=true',
|
||||
op_params => 'start timeout=90s monitor start-delay=10s',
|
||||
}
|
||||
pacemaker::resource::service { $::nova::params::consoleauth_service_name :
|
||||
clone_params => "interleave=true",
|
||||
op_params => "start timeout=90s monitor start-delay=10s",
|
||||
clone_params => 'interleave=true',
|
||||
op_params => 'start timeout=90s monitor start-delay=10s',
|
||||
require => Pacemaker::Resource::Service[$::keystone::params::service_name],
|
||||
}
|
||||
pacemaker::resource::service { $::nova::params::vncproxy_service_name :
|
||||
clone_params => "interleave=true",
|
||||
op_params => "start timeout=90s monitor start-delay=10s",
|
||||
clone_params => 'interleave=true',
|
||||
op_params => 'start timeout=90s monitor start-delay=10s',
|
||||
}
|
||||
pacemaker::resource::service { $::nova::params::scheduler_service_name :
|
||||
clone_params => "interleave=true",
|
||||
op_params => "start timeout=90s monitor start-delay=10s",
|
||||
clone_params => 'interleave=true',
|
||||
op_params => 'start timeout=90s monitor start-delay=10s',
|
||||
}
|
||||
|
||||
pacemaker::constraint::base { 'keystone-then-nova-consoleauth-constraint':
|
||||
@ -1216,66 +1216,66 @@ if hiera('step') >= 4 {
|
||||
Pacemaker::Resource::Service[$::keystone::params::service_name]],
|
||||
}
|
||||
pacemaker::constraint::base { 'nova-consoleauth-then-nova-vncproxy-constraint':
|
||||
constraint_type => "order",
|
||||
constraint_type => 'order',
|
||||
first_resource => "${::nova::params::consoleauth_service_name}-clone",
|
||||
second_resource => "${::nova::params::vncproxy_service_name}-clone",
|
||||
first_action => "start",
|
||||
second_action => "start",
|
||||
first_action => 'start',
|
||||
second_action => 'start',
|
||||
require => [Pacemaker::Resource::Service[$::nova::params::consoleauth_service_name],
|
||||
Pacemaker::Resource::Service[$::nova::params::vncproxy_service_name]],
|
||||
}
|
||||
pacemaker::constraint::colocation { 'nova-vncproxy-with-nova-consoleauth-colocation':
|
||||
source => "${::nova::params::vncproxy_service_name}-clone",
|
||||
target => "${::nova::params::consoleauth_service_name}-clone",
|
||||
score => "INFINITY",
|
||||
score => 'INFINITY',
|
||||
require => [Pacemaker::Resource::Service[$::nova::params::consoleauth_service_name],
|
||||
Pacemaker::Resource::Service[$::nova::params::vncproxy_service_name]],
|
||||
}
|
||||
pacemaker::constraint::base { 'nova-vncproxy-then-nova-api-constraint':
|
||||
constraint_type => "order",
|
||||
constraint_type => 'order',
|
||||
first_resource => "${::nova::params::vncproxy_service_name}-clone",
|
||||
second_resource => "${::nova::params::api_service_name}-clone",
|
||||
first_action => "start",
|
||||
second_action => "start",
|
||||
first_action => 'start',
|
||||
second_action => 'start',
|
||||
require => [Pacemaker::Resource::Service[$::nova::params::vncproxy_service_name],
|
||||
Pacemaker::Resource::Service[$::nova::params::api_service_name]],
|
||||
}
|
||||
pacemaker::constraint::colocation { 'nova-api-with-nova-vncproxy-colocation':
|
||||
source => "${::nova::params::api_service_name}-clone",
|
||||
target => "${::nova::params::vncproxy_service_name}-clone",
|
||||
score => "INFINITY",
|
||||
score => 'INFINITY',
|
||||
require => [Pacemaker::Resource::Service[$::nova::params::vncproxy_service_name],
|
||||
Pacemaker::Resource::Service[$::nova::params::api_service_name]],
|
||||
}
|
||||
pacemaker::constraint::base { 'nova-api-then-nova-scheduler-constraint':
|
||||
constraint_type => "order",
|
||||
constraint_type => 'order',
|
||||
first_resource => "${::nova::params::api_service_name}-clone",
|
||||
second_resource => "${::nova::params::scheduler_service_name}-clone",
|
||||
first_action => "start",
|
||||
second_action => "start",
|
||||
first_action => 'start',
|
||||
second_action => 'start',
|
||||
require => [Pacemaker::Resource::Service[$::nova::params::api_service_name],
|
||||
Pacemaker::Resource::Service[$::nova::params::scheduler_service_name]],
|
||||
}
|
||||
pacemaker::constraint::colocation { 'nova-scheduler-with-nova-api-colocation':
|
||||
source => "${::nova::params::scheduler_service_name}-clone",
|
||||
target => "${::nova::params::api_service_name}-clone",
|
||||
score => "INFINITY",
|
||||
score => 'INFINITY',
|
||||
require => [Pacemaker::Resource::Service[$::nova::params::api_service_name],
|
||||
Pacemaker::Resource::Service[$::nova::params::scheduler_service_name]],
|
||||
}
|
||||
pacemaker::constraint::base { 'nova-scheduler-then-nova-conductor-constraint':
|
||||
constraint_type => "order",
|
||||
constraint_type => 'order',
|
||||
first_resource => "${::nova::params::scheduler_service_name}-clone",
|
||||
second_resource => "${::nova::params::conductor_service_name}-clone",
|
||||
first_action => "start",
|
||||
second_action => "start",
|
||||
first_action => 'start',
|
||||
second_action => 'start',
|
||||
require => [Pacemaker::Resource::Service[$::nova::params::scheduler_service_name],
|
||||
Pacemaker::Resource::Service[$::nova::params::conductor_service_name]],
|
||||
}
|
||||
pacemaker::constraint::colocation { 'nova-conductor-with-nova-scheduler-colocation':
|
||||
source => "${::nova::params::conductor_service_name}-clone",
|
||||
target => "${::nova::params::scheduler_service_name}-clone",
|
||||
score => "INFINITY",
|
||||
score => 'INFINITY',
|
||||
require => [Pacemaker::Resource::Service[$::nova::params::scheduler_service_name],
|
||||
Pacemaker::Resource::Service[$::nova::params::conductor_service_name]],
|
||||
}
|
||||
@ -1324,7 +1324,7 @@ if hiera('step') >= 4 {
|
||||
}
|
||||
pacemaker::constraint::base { 'redis-then-ceilometer-central-constraint':
|
||||
constraint_type => 'order',
|
||||
first_resource => "redis-master",
|
||||
first_resource => 'redis-master',
|
||||
second_resource => "${::ceilometer::params::agent_central_service_name}-clone",
|
||||
first_action => 'promote',
|
||||
second_action => 'start',
|
||||
@ -1524,7 +1524,7 @@ if hiera('step') >= 4 {
|
||||
|
||||
# Horizon
|
||||
pacemaker::resource::service { $::horizon::params::http_service:
|
||||
clone_params => "interleave=true",
|
||||
clone_params => 'interleave=true',
|
||||
}
|
||||
|
||||
#VSM
|
||||
@ -1535,7 +1535,7 @@ if hiera('step') >= 4 {
|
||||
require => Class['n1k_vsm'],
|
||||
meta_params => 'resource-stickiness=INFINITY',
|
||||
}
|
||||
if str2bool(hiera('n1k_vsm::pacemaker_control', 'true')) {
|
||||
if str2bool(hiera('n1k_vsm::pacemaker_control', true)) {
|
||||
pacemaker::resource::ocf { 'vsm-s' :
|
||||
ocf_agent_name => 'heartbeat:VirtualDomain',
|
||||
resource_params => 'force_stop=true config=/var/spool/cisco/vsm/vsm_secondary_deploy.xml',
|
||||
@ -1543,9 +1543,9 @@ if hiera('step') >= 4 {
|
||||
meta_params => 'resource-stickiness=INFINITY',
|
||||
}
|
||||
pacemaker::constraint::colocation { 'vsm-colocation-contraint':
|
||||
source => "vsm-p",
|
||||
target => "vsm-s",
|
||||
score => "-INFINITY",
|
||||
source => 'vsm-p',
|
||||
target => 'vsm-s',
|
||||
score => '-INFINITY',
|
||||
require => [Pacemaker::Resource::Ocf['vsm-p'],
|
||||
Pacemaker::Resource::Ocf['vsm-s']],
|
||||
}
|
||||
|
@ -13,7 +13,7 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
include tripleo::packages
|
||||
include ::tripleo::packages
|
||||
|
||||
create_resources(sysctl::value, hiera('sysctl_settings'), {})
|
||||
|
||||
@ -22,8 +22,8 @@ if count(hiera('ntp::servers')) > 0 {
|
||||
}
|
||||
|
||||
include ::swift
|
||||
class {'swift::storage::all':
|
||||
mount_check => str2bool(hiera('swift_mount_check'))
|
||||
class { '::swift::storage::all':
|
||||
mount_check => str2bool(hiera('swift_mount_check')),
|
||||
}
|
||||
if(!defined(File['/srv/node'])) {
|
||||
file { '/srv/node':
|
||||
@ -43,7 +43,7 @@ snmp::snmpv3_user { $snmpd_user:
|
||||
authtype => 'MD5',
|
||||
authpass => hiera('snmpd_readonly_user_password'),
|
||||
}
|
||||
class { 'snmp':
|
||||
class { '::snmp':
|
||||
agentaddress => ['udp:161','udp6:[::1]:161'],
|
||||
snmpd_config => [ join(['rouser ', hiera('snmpd_readonly_user_name')]), 'proc cron', 'includeAllDisks 10%', 'master agentx', 'trapsink localhost public', 'iquerySecName internalUser', 'rouser internalUser', 'defaultMonitors yes', 'linkUpDownNotifications yes' ],
|
||||
}
|
||||
|
@ -13,7 +13,7 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
include tripleo::packages
|
||||
include ::tripleo::packages
|
||||
|
||||
create_resources(sysctl::value, hiera('sysctl_settings'), {})
|
||||
|
||||
@ -47,7 +47,7 @@ snmp::snmpv3_user { $snmpd_user:
|
||||
authtype => 'MD5',
|
||||
authpass => hiera('snmpd_readonly_user_password'),
|
||||
}
|
||||
class { 'snmp':
|
||||
class { '::snmp':
|
||||
agentaddress => ['udp:161','udp6:[::1]:161'],
|
||||
snmpd_config => [ join(['rouser ', hiera('snmpd_readonly_user_name')]), 'proc cron', 'includeAllDisks 10%', 'master agentx', 'trapsink localhost public', 'iquerySecName internalUser', 'rouser internalUser', 'defaultMonitors yes', 'linkUpDownNotifications yes' ],
|
||||
}
|
||||
|
@ -13,7 +13,7 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
include tripleo::packages
|
||||
include ::tripleo::packages
|
||||
|
||||
define add_devices(
|
||||
$swift_zones = '1'
|
||||
@ -55,13 +55,15 @@ define add_devices(
|
||||
class tripleo::ringbuilder (
|
||||
$swift_zones = '1',
|
||||
$devices = '',
|
||||
$build_ring = 'True',
|
||||
$build_ring = true,
|
||||
$part_power,
|
||||
$replicas,
|
||||
$min_part_hours,
|
||||
) {
|
||||
|
||||
if str2bool(downcase("$build_ring")) {
|
||||
validate_bool($build_ring)
|
||||
|
||||
if $build_ring {
|
||||
|
||||
$device_array = strip(split(rstrip($devices), ','))
|
||||
|
||||
@ -74,7 +76,7 @@ class tripleo::ringbuilder (
|
||||
|
||||
# add all other devices
|
||||
add_devices {$device_array:
|
||||
swift_zones => $swift_zones
|
||||
swift_zones => $swift_zones,
|
||||
} ->
|
||||
|
||||
# rebalance
|
||||
|
Loading…
x
Reference in New Issue
Block a user