Graph and manifests cleanup for modularization

* Move needed resources from controller.pp to openstack-controller
* Rename 'top-role-controller' task to 'controller_end'
* Simplify tasks graph
* Add missing notice('MODULAR') function calls
* Remove unneeded ./openstack/manifests/controller_ha.pp file
* Move tweaks::ubuntu_service_override into proper manifests

Change-Id: I990aeb202ec34cc1289d36cd85c7894070d149b9
Related-Blueprint: fuel-library-modularization
This commit is contained in:
Aleksandr Didenko 2015-03-01 13:02:34 +02:00
parent ed68c7270a
commit 8655f4a50d
22 changed files with 74 additions and 428 deletions

View File

@ -547,21 +547,6 @@ class openstack::controller (
tweaks::ubuntu_service_override { 'glance-registry':
package_name => 'glance-registry',
}
tweaks::ubuntu_service_override { ['murano_api', 'murano_engine']:
package_name => 'murano',
}
tweaks::ubuntu_service_override { 'heat-api-cloudwatch':
package_name => 'heat-api-cloudwatch',
}
tweaks::ubuntu_service_override { 'heat-api-cfn':
package_name => 'heat-api-cfn',
}
tweaks::ubuntu_service_override { 'heat-api':
package_name => 'heat-api',
}
tweaks::ubuntu_service_override { 'sahara-api':
package_name => 'sahara',
}
tweaks::ubuntu_service_override { 'keystone':
package_name => 'keystone',
}

View File

@ -1,238 +0,0 @@
# Set up HA for OpenStack controller services
class openstack::controller_ha (
$controllers,
$primary_controller,
$controller_public_addresses,
$public_interface,
$private_interface = 'eth2',
$controller_internal_addresses,
$internal_virtual_ip,
$public_virtual_ip,
$internal_address,
$floating_range,
$fixed_range,
$multi_host,
$network_manager,
$verbose = true,
$debug = false,
$network_config = {},
$num_networks = 1,
$network_size = 255,
$auto_assign_floating_ip = false,
$mysql_root_password,
$keystone_admin_tenant = 'admin',
$keystone_db_password,
$keystone_admin_token,
$glance_db_password,
$glance_user_password,
$glance_image_cache_max_size,
$known_stores = false,
$glance_vcenter_host = undef,
$glance_vcenter_user = undef,
$glance_vcenter_password = undef,
$glance_vcenter_datacenter = undef,
$glance_vcenter_datastore = undef,
$glance_vcenter_image_dir = undef,
$nova_db_password,
$nova_user_password,
$queue_provider,
$amqp_hosts,
$amqp_user,
$amqp_password,
$rabbit_ha_queues = true,
$rabbitmq_bind_ip_address,
$rabbitmq_bind_port,
$rabbitmq_cluster_nodes,
$memcached_servers,
$memcached_bind_address = undef,
$export_resources,
$glance_backend = 'file',
$swift_proxies = undef,
$rgw_servers = undef,
$network_provider = 'nova',
$neutron_db_user = 'neutron',
$neutron_db_password = 'neutron_db_pass',
$neutron_db_dbname = 'neutron',
$neutron_user_password = 'asdf123',
$neutron_metadata_proxy_secret = '12345',
$neutron_ha_agents = 'slave',
$base_mac = 'fa:16:3e:00:00:00',
$cinder = false,
$cinder_iscsi_bind_addr = false,
$nv_physical_volume = undef,
$manage_volumes = false,
$custom_mysql_setup_class = 'galera', $galera_nodes,
$use_syslog = false,
$novnc_address = undef,
$syslog_log_facility_glance = 'LOG_LOCAL2',
$syslog_log_facility_cinder = 'LOG_LOCAL3',
$syslog_log_facility_neutron = 'LOG_LOCAL4',
$syslog_log_facility_nova = 'LOG_LOCAL6',
$syslog_log_facility_keystone = 'LOG_LOCAL7',
$syslog_log_facility_ceilometer = 'LOG_LOCAL0',
$cinder_rate_limits = undef, $nova_rate_limits = undef,
$cinder_volume_group = 'cinder-volumes',
$cinder_user_password = 'cinder_user_pass',
$cinder_db_password = 'cinder_db_pass',
$ceilometer = false,
$ceilometer_db_password = 'ceilometer_pass',
$ceilometer_user_password = 'ceilometer_pass',
$ceilometer_db_user = 'ceilometer',
$ceilometer_db_dbname = 'ceilometer',
$ceilometer_metering_secret = 'ceilometer',
$ceilometer_db_type = 'mongodb',
$swift_rados_backend = false,
$ceilometer_db_host = '127.0.0.1',
$ceilometer_ext_mongo = false,
$mongo_replicaset = undef,
$sahara = false,
$murano = false,
$horizon_use_ssl = false,
$neutron_network_node = false,
$neutron_netnode_on_cnt = false,
$mysql_skip_name_resolve = false,
$ha_provider = "pacemaker",
$create_networks = true,
$use_unicast_corosync = false,
$ha_mode = true,
$nameservers = undef,
$idle_timeout = '3600',
$max_pool_size = '10',
$max_overflow = '30',
$max_retries = '-1',
$nova_report_interval = '10',
$nova_service_down_time = '60',
) {
class { '::openstack::ha::haproxy':
controllers => $controllers,
public_virtual_ip => $public_virtual_ip,
internal_virtual_ip => $internal_virtual_ip,
horizon_use_ssl => $horizon_use_ssl,
neutron => $network_provider ? {'neutron' => true, default => false},
queue_provider => $queue_provider,
custom_mysql_setup_class => $custom_mysql_setup_class,
swift_proxies => $swift_proxies,
rgw_servers => $rgw_servers,
ceilometer => $ceilometer,
sahara => $sahara,
murano => $murano,
is_primary_controller => $primary_controller,
}
class { '::openstack::controller':
private_interface => $private_interface,
public_interface => $public_interface,
public_address => $public_virtual_ip, # It is feature for HA mode.
internal_address => $internal_virtual_ip, # All internal traffic goes
admin_address => $internal_virtual_ip, # through load balancer.
floating_range => $floating_range,
fixed_range => $fixed_range,
multi_host => $multi_host,
network_config => $network_config,
num_networks => $num_networks,
network_size => $network_size,
network_manager => $network_manager,
verbose => $verbose,
debug => $debug,
auto_assign_floating_ip => $auto_assign_floating_ip,
mysql_root_password => $mysql_root_password,
custom_mysql_setup_class => $custom_mysql_setup_class,
galera_cluster_name => 'openstack',
primary_controller => $primary_controller,
galera_node_address => $internal_address,
galera_nodes => $galera_nodes,
novnc_address => $novnc_address,
mysql_skip_name_resolve => $mysql_skip_name_resolve,
admin_email => $admin_email,
admin_user => $admin_user,
admin_password => $admin_password,
keystone_db_password => $keystone_db_password,
keystone_admin_token => $keystone_admin_token,
keystone_admin_tenant => $keystone_admin_tenant,
glance_db_password => $glance_db_password,
glance_user_password => $glance_user_password,
glance_api_servers => $glance_api_servers,
glance_image_cache_max_size => $glance_image_cache_max_size,
glance_vcenter_host => $glance_vcenter_host,
glance_vcenter_user => $glance_vcenter_user,
glance_vcenter_password => $glance_vcenter_password,
glance_vcenter_datacenter => $glance_vcenter_datacenter,
glance_vcenter_datastore => $glance_vcenter_datastore,
glance_vcenter_image_dir => $glance_vcenter_image_dir,
nova_db_password => $nova_db_password,
nova_user_password => $nova_user_password,
queue_provider => $queue_provider,
amqp_hosts => $amqp_hosts,
amqp_user => $amqp_user,
amqp_password => $amqp_password,
rabbit_ha_queues => $rabbit_ha_queues,
rabbitmq_bind_ip_address => $rabbitmq_bind_ip_address,
rabbitmq_bind_port => $rabbitmq_bind_port,
rabbitmq_cluster_nodes => $rabbitmq_cluster_nodes,
cache_server_ip => $memcached_servers,
memcached_bind_address => $memcached_bind_address,
export_resources => false,
api_bind_address => $internal_address,
db_host => $internal_virtual_ip,
service_endpoint => $internal_virtual_ip,
glance_backend => $glance_backend,
known_stores => $known_stores,
#require => Service['keepalived'],
network_provider => $network_provider,
neutron_db_user => $neutron_db_user,
neutron_db_password => $neutron_db_password,
neutron_db_dbname => $neutron_db_dbname,
neutron_user_password => $neutron_user_password,
neutron_metadata_proxy_secret => $neutron_metadata_proxy_secret,
neutron_ha_agents => $neutron_ha_agents,
segment_range => $segment_range,
tenant_network_type => $tenant_network_type,
create_networks => $primary_controller,
#
cinder => $cinder,
cinder_iscsi_bind_addr => $cinder_iscsi_bind_addr,
cinder_user_password => $cinder_user_password,
cinder_db_password => $cinder_db_password,
manage_volumes => $manage_volumes,
nv_physical_volume => $nv_physical_volume,
cinder_volume_group => $cinder_volume_group,
#
ceilometer => $ceilometer,
ceilometer_db_user => $ceilometer_db_user,
ceilometer_db_password => $ceilometer_db_password,
ceilometer_user_password => $ceilometer_user_password,
ceilometer_metering_secret => $ceilometer_metering_secret,
ceilometer_db_dbname => $ceilometer_db_dbname,
ceilometer_db_type => $ceilometer_db_type,
ceilometer_db_host => $ceilometer_db_host,
swift_rados_backend => $swift_rados_backend,
ceilometer_ext_mongo => $ceilometer_ext_mongo,
mongo_replicaset => $mongo_replicaset,
#
# turn on SWIFT_ENABLED option for Horizon dashboard
swift => $glance_backend ? { 'swift' => true, default => false },
use_syslog => $use_syslog,
syslog_log_facility_glance => $syslog_log_facility_glance,
syslog_log_facility_cinder => $syslog_log_facility_cinder,
syslog_log_facility_nova => $syslog_log_facility_nova,
syslog_log_facility_keystone => $syslog_log_facility_keystone,
syslog_log_facility_ceilometer => $syslog_log_facility_ceilometer,
cinder_rate_limits => $cinder_rate_limits,
nova_rate_limits => $nova_rate_limits,
nova_report_interval => $nova_report_interval,
nova_service_down_time => $nova_service_down_time,
horizon_use_ssl => $horizon_use_ssl,
ha_mode => $ha_mode,
nameservers => $nameservers,
# SQLALchemy backend
max_retries => $max_retries,
max_pool_size => $max_pool_size,
max_overflow => $max_overflow,
idle_timeout => $idle_timeout,
}
}

View File

@ -1,7 +1,7 @@
- id: cluster-haproxy
type: puppet
groups: [primary-controller, controller]
required_for: [top-role-controller, deploy_end]
required_for: [deploy_end]
requires: [virtual_ips]
parameters:
puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/cluster-haproxy/cluster-haproxy.pp

View File

@ -1,7 +1,7 @@
- id: cluster
type: puppet
groups: [primary-controller, controller]
required_for: [top-role-controller, deploy_end]
required_for: [deploy_end]
requires: [hosts, firewall]
parameters:
puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/cluster/cluster.pp

View File

@ -20,7 +20,18 @@ $databse_name = 'heat'
$read_timeout = '60'
$sql_connection = "mysql://${databse_user}:${database_password}@${$controller_node_address}/${databse_name}?read_timeout=${read_timeout}"
#################################################################
####### Disable upstart startup on install #######
if($::operatingsystem == 'Ubuntu') {
tweaks::ubuntu_service_override { 'heat-api-cloudwatch':
package_name => 'heat-api-cloudwatch',
}
tweaks::ubuntu_service_override { 'heat-api-cfn':
package_name => 'heat-api-cfn',
}
tweaks::ubuntu_service_override { 'heat-api':
package_name => 'heat-api',
}
}
class { 'openstack::heat' :
external_ip => $controller_node_public,

View File

@ -1,8 +1,8 @@
- id: heat
type: puppet
groups: [primary-controller, controller]
required_for: [deploy_end, top-role-controller]
requires: [hiera, globals, netconfig, firewall, openstack-controller]
required_for: [deploy_end]
requires: [openstack-controller]
parameters:
puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/heat/heat.pp
puppet_modules: /etc/puppet/modules

View File

@ -21,6 +21,13 @@ $primary_controller = hiera('primary_controller')
if $murano_hash['enabled'] {
####### Disable upstart startup on install #######
if($::operatingsystem == 'Ubuntu') {
tweaks::ubuntu_service_override { ['murano_api', 'murano_engine']:
package_name => 'murano',
}
}
#NOTE(mattymo): Backward compatibility for Icehouse
case $openstack_version {
/201[1-3]\./: {

View File

@ -1,8 +1,8 @@
- id: murano
type: puppet
groups: [primary-controller, controller]
required_for: [deploy_end, top-role-controller]
requires: [hiera, globals, netconfig, firewall, heat]
required_for: [deploy_end, controller_remaining_tasks]
requires: [heat]
parameters:
puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/murano/murano.pp
puppet_modules: /etc/puppet/modules

View File

@ -2,7 +2,7 @@
type: puppet
groups: [primary-controller, controller, cinder, compute, ceph-osd, zabbix-server, primary-mongo, mongo]
required_for: [deploy_end]
requires: [logging]
requires: [tools]
parameters:
puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/netconfig/netconfig.pp
puppet_modules: /etc/puppet/modules

View File

@ -17,6 +17,7 @@ $syslog_log_facility_ceilometer = hiera('syslog_log_facility_ceilometer','LOG_LO
$management_vip = hiera('management_vip')
$public_vip = hiera('public_vip')
$storage_address = hiera('storage_address')
$sahara_hash = hiera('sahara', {})
$cinder_hash = hiera('cinder', {})
$nodes_hash = hiera('nodes', {})
$mysql_hash = hiera('mysql', {})
@ -451,3 +452,26 @@ if $primary_controller {
}
}
nova_config {
'DEFAULT/teardown_unused_network_gateway': value => 'True'
}
if $sahara_hash['enabled'] {
$scheduler_default_filters = [ 'DifferentHostFilter' ]
} else {
$scheduler_default_filters = []
}
class { '::nova::scheduler::filter':
cpu_allocation_ratio => '8.0',
disk_allocation_ratio => '1.0',
ram_allocation_ratio => '1.0',
scheduler_host_subset_size => '30',
scheduler_default_filters => concat($scheduler_default_filters, [ 'RetryFilter', 'AvailabilityZoneFilter', 'RamFilter', 'CoreFilter', 'DiskFilter', 'ComputeFilter', 'ComputeCapabilitiesFilter', 'ImagePropertiesFilter', 'ServerGroupAntiAffinityFilter', 'ServerGroupAffinityFilter' ])
}
# From logasy filter.pp
nova_config {
'DEFAULT/ram_weight_multiplier': value => '1.0'
}

View File

@ -1,7 +1,7 @@
- id: openstack-controller
type: puppet
groups: [primary-controller, controller]
required_for: [top-role-controller, deploy_end]
required_for: [deploy_end]
requires: [openstack-haproxy]
parameters:
puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/openstack-controller/openstack-controller.pp

View File

@ -1,7 +1,7 @@
- id: openstack-haproxy
type: puppet
groups: [primary-controller, controller]
required_for: [top-role-controller, deploy_end]
required_for: [deploy_end]
requires: [cluster-haproxy]
parameters:
puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/openstack-haproxy/openstack-haproxy.pp

View File

@ -64,16 +64,6 @@ if $neutron_mellanox {
$mellanox_mode = 'disabled'
}
if (!empty(filter_nodes(hiera('nodes'), 'role', 'ceph-osd')) or
$storage_hash['volumes_ceph'] or
$storage_hash['images_ceph'] or
$storage_hash['objects_ceph']
) {
$use_ceph = true
} else {
$use_ceph = false
}
if $use_neutron {
include l23network::l2
$novanetwork_params = {}
@ -96,32 +86,6 @@ if $use_neutron {
}
$network_manager = "nova.network.manager.${novanetwork_params['network_manager']}"
if !$ceilometer_hash {
$ceilometer_hash = {
enabled => false,
db_password => 'ceilometer',
user_password => 'ceilometer',
metering_secret => 'ceilometer',
}
$ext_mongo = false
} else {
# External mongo integration
if $mongo_hash['enabled'] {
$ext_mongo_hash = hiera('external_mongo')
$ceilometer_db_user = $ext_mongo_hash['mongo_user']
$ceilometer_db_password = $ext_mongo_hash['mongo_password']
$ceilometer_db_name = $ext_mongo_hash['mongo_db_name']
$ext_mongo = true
} else {
$ceilometer_db_user = 'ceilometer'
$ceilometer_db_password = $ceilometer_hash['db_password']
$ceilometer_db_name = 'ceilometer'
$ext_mongo = false
$ext_mongo_hash = {}
}
}
if $primary_controller {
if ($mellanox_mode == 'ethernet') {
$test_vm_pkg = 'cirros-testvm-mellanox'
@ -134,25 +98,6 @@ if $primary_controller {
}
}
if $ceilometer_hash['enabled'] {
if $ext_mongo {
$mongo_hosts = $ext_mongo_hash['hosts_ip']
if $ext_mongo_hash['mongo_replset'] {
$mongo_replicaset = $ext_mongo_hash['mongo_replset']
} else {
$mongo_replicaset = undef
}
} else {
$mongo_hosts = mongo_hosts($nodes_hash)
if size(mongo_hosts($nodes_hash, 'array', 'mongo')) > 1 {
$mongo_replicaset = 'ceilometer'
} else {
$mongo_replicaset = undef
}
}
}
if !$rabbit_hash['user'] {
$rabbit_hash['user'] = 'nova'
}
@ -162,22 +107,9 @@ if ! $use_neutron {
}
$floating_hash = {}
##CALCULATED PARAMETERS
##NO NEED TO CHANGE
$node = filter_nodes($nodes_hash, 'name', $::hostname)
if empty($node) {
fail("Node $::hostname is not defined in the hash structure")
}
# get cidr netmasks for VIPs
$primary_controller_nodes = filter_nodes($nodes_hash,'role','primary-controller')
##REFACTORING NEEDED
##TODO: simply parse nodes array
$controller_internal_addresses = nodes_to_hash($controllers,'name','internal_address')
$controller_public_addresses = nodes_to_hash($controllers,'name','public_address')
@ -188,59 +120,10 @@ $controller_node_public = $public_vip
$controller_node_address = $management_vip
$roles = node_roles($nodes_hash, hiera('uid'))
# AMQP client configuration
if $internal_address in $controller_nodes {
# prefer local MQ broker if it exists on this node
$amqp_nodes = concat(['127.0.0.1'], fqdn_rotate(delete($controller_nodes, $internal_address)))
} else {
$amqp_nodes = fqdn_rotate($controller_nodes)
}
$amqp_port = '5673'
$amqp_hosts = inline_template("<%= @amqp_nodes.map {|x| x + ':' + @amqp_port}.join ',' %>")
$rabbit_ha_queues = true
# RabbitMQ server configuration
$rabbitmq_bind_ip_address = 'UNSET' # bind RabbitMQ to 0.0.0.0
$rabbitmq_bind_port = $amqp_port
$rabbitmq_cluster_nodes = $controller_hostnames # has to be hostnames
# SQLAlchemy backend configuration
$max_pool_size = min($::processorcount * 5 + 0, 30 + 0)
$max_overflow = min($::processorcount * 5 + 0, 60 + 0)
$max_retries = '-1'
$idle_timeout = '3600'
$cinder_iscsi_bind_addr = $storage_address
# Determine who should get the volume service
if (member($roles, 'cinder') and $storage_hash['volumes_lvm']) {
$manage_volumes = 'iscsi'
} elsif (member($roles, 'cinder') and $storage_hash['volumes_vmdk']) {
$manage_volumes = 'vmdk'
} elsif ($storage_hash['volumes_ceph']) {
$manage_volumes = 'ceph'
} else {
$manage_volumes = false
}
# Determine who should be the default backend
if ($storage_hash['images_ceph']) {
$glance_backend = 'ceph'
$glance_known_stores = [ 'glance.store.rbd.Store', 'glance.store.http.Store' ]
} elsif ($storage_hash['images_vcenter']) {
$glance_backend = 'vmware'
$glance_known_stores = [ 'glance.store.vmware_datastore.Store', 'glance.store.http.Store' ]
} else {
$glance_backend = 'swift'
$glance_known_stores = [ 'glance.store.swift.Store', 'glance.store.http.Store' ]
}
$network_config = {
'vlan_start' => $vlan_start,
}
#################################################################
# NOTE(bogdando) for controller nodes running Corosync with Pacemaker
# we delegate all of the monitor functions to RA instead of monit.
@ -264,7 +147,7 @@ if $use_monit_real {
$ovs_vswitchd_name = $::l23network::params::ovs_service_name
case $::osfamily {
'RedHat' : {
$service_path = '/sbin/service'
$service_path = '/sbin/service'
}
'Debian' : {
$service_path = '/usr/sbin/service'
@ -275,9 +158,6 @@ if $use_monit_real {
}
}
#HARDCODED PARAMETERS
$mirror_type = 'external'
Exec { logoutput => true }
if $use_vmware_nsx {
@ -288,34 +168,6 @@ if $use_vmware_nsx {
}
}
#################################################################
include osnailyfacter::test_controller
nova_config {
'DEFAULT/teardown_unused_network_gateway': value => 'True'
}
#ADDONS START
if $sahara_hash['enabled'] {
$scheduler_default_filters = [ 'DifferentHostFilter' ]
} else {
$scheduler_default_filters = []
}
class { '::nova::scheduler::filter':
cpu_allocation_ratio => '8.0',
disk_allocation_ratio => '1.0',
ram_allocation_ratio => '1.0',
scheduler_host_subset_size => '30',
scheduler_default_filters => concat($scheduler_default_filters, [ 'RetryFilter', 'AvailabilityZoneFilter', 'RamFilter', 'CoreFilter', 'DiskFilter', 'ComputeFilter', 'ComputeCapabilitiesFilter', 'ImagePropertiesFilter', 'ServerGroupAntiAffinityFilter', 'ServerGroupAffinityFilter' ])
}
# From logasy filter.pp
nova_config {
'DEFAULT/ram_weight_multiplier': value => '1.0'
}
if ($::mellanox_mode == 'ethernet') {
$ml2_eswitch = $neutron_mellanox['ml2_eswitch']
class { 'mellanox_openstack::controller':
@ -324,10 +176,6 @@ if ($::mellanox_mode == 'ethernet') {
}
}
#ADDONS END
########################################################################
# TODO(bogdando) add monit zabbix services monitoring, if required
# NOTE(bogdando) for nodes with pacemaker, we should use OCF instead of monit
@ -341,8 +189,4 @@ sysctl::value { 'vm.swappiness':
value => "10"
}
# Stubs
class mysql::server {}
include mysql::server
# vim: set ts=2 sw=2 et :

View File

@ -1,8 +1,7 @@
- id: top-role-controller
- id: controller_remaining_tasks
type: puppet
groups: [primary-controller, controller]
required_for: [deploy_end]
requires: [hosts, firewall]
parameters:
puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/roles/controller.pp
puppet_modules: /etc/puppet/modules

View File

@ -20,6 +20,14 @@ $deployment_mode = hiera('deployment_mode')
#################################################################
if $sahara_hash['enabled'] {
####### Disable upstart startup on install #######
if($::operatingsystem == 'Ubuntu') {
tweaks::ubuntu_service_override { 'sahara-api':
package_name => 'sahara',
}
}
class { 'sahara' :
api_host => $public_ip,
db_password => $sahara_hash['db_password'],

View File

@ -1,8 +1,8 @@
- id: sahara
type: puppet
groups: [primary-controller, controller]
required_for: [deploy_end, top-role-controller]
requires: [hiera, globals, netconfig, firewall, openstack-controller]
required_for: [deploy_end, controller_remaining_tasks]
requires: [openstack-controller]
parameters:
puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/sahara/sahara.pp
puppet_modules: /etc/puppet/modules

View File

@ -1,3 +1,5 @@
notice('MODULAR: swift.pp')
$swift_hash = hiera('swift_hash')
$storage_hash = hiera('storage_hash')
$mp_hash = hiera('mp')

View File

@ -1,7 +1,7 @@
- id: swift
type: puppet
groups: [primary-controller, controller]
required_for: [deploy_end]
required_for: [deploy_end, controller_remaining_tasks]
requires: [openstack-controller]
parameters:
puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/swift/swift.pp

View File

@ -1,7 +1,7 @@
- id: virtual_ips
type: puppet
groups: [primary-controller, controller]
required_for: [top-role-controller, deploy_end]
required_for: [deploy_end]
requires: [cluster]
parameters:
puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/virtual_ips/virtual_ips.pp

View File

@ -1,3 +1,5 @@
notice('MODULAR: vmware/compute.pp')
$use_vcenter = hiera('use_vcenter', false)
include nova::params

View File

@ -2,7 +2,7 @@
type: puppet
groups: [primary-controller, controller]
required_for: [deploy_end]
requires: [top-role-controller]
requires: [controller_remaining_tasks]
condition: "settings:common.libvirt_type.value == 'vcenter' or settings:common.use_vcenter.value == true"
parameters:
puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/vmware/vcenter.pp

View File

@ -1,3 +1,5 @@
notice('MODULAR: vmware/vcenter.pp')
$libvirt_type = hiera('libvirt_type')
$use_vcenter = hiera('use_vcenter', false)
$vcenter_hash = hiera('vcenter_hash')