Configure ComputeServices via resource chains
This patch wires a Heat feature to configure services via a Heat resource chain. Additional patches will be able to configure compute services using composable services. Change-Id: Ib4fd8bffde51902aa19f9673a389600fc467fc45
This commit is contained in:
parent
9036e73afe
commit
f4e5895dd8
@ -661,6 +661,13 @@ parameters:
|
||||
for each service that should get installed on the Controllers.
|
||||
type: comma_delimited_list
|
||||
|
||||
ComputeServices:
|
||||
default: []
|
||||
description: A list of service resources (configured in the Heat
|
||||
resource_registry) which represent nested stacks
|
||||
for each service that should get installed on the Compute Nodes.
|
||||
type: comma_delimited_list
|
||||
|
||||
# Block storage specific parameters
|
||||
BlockStorageCount:
|
||||
type: number
|
||||
@ -992,6 +999,13 @@ resources:
|
||||
SchedulerHints: {get_param: ControllerSchedulerHints}
|
||||
ServiceConfigSettings: {get_attr: [ControllerServiceChain, config_settings]}
|
||||
|
||||
ComputeServiceChain:
|
||||
type: OS::TripleO::Services
|
||||
properties:
|
||||
Services: {get_param: ComputeServices}
|
||||
EndpointMap: {get_attr: [EndpointMap, endpoint_map]}
|
||||
MysqlVirtualIPUri: {get_attr: [VipMap, net_ip_uri_map, {get_param: [ServiceNetMap, MysqlNetwork]}]}
|
||||
|
||||
Compute:
|
||||
type: OS::Heat::ResourceGroup
|
||||
depends_on: Networks
|
||||
@ -1072,6 +1086,8 @@ resources:
|
||||
ServerMetadata: {get_param: ServerMetadata}
|
||||
SchedulerHints: {get_param: NovaComputeSchedulerHints}
|
||||
NodeIndex: '%index%'
|
||||
ServiceConfigSettings: {get_attr: [ComputeServiceChain, config_settings]}
|
||||
|
||||
|
||||
BlockStorage:
|
||||
type: OS::Heat::ResourceGroup
|
||||
@ -1557,6 +1573,7 @@ resources:
|
||||
allnodes_extra: {get_attr: [AllNodesExtraConfig, config_identifier]}
|
||||
compute_config: {get_attr: [Compute, attributes, config_identifier]}
|
||||
deployment_identifier: {get_param: DeployIdentifier}
|
||||
StepConfig: {get_attr: [ComputeServiceChain, step_config]}
|
||||
|
||||
ObjectStorageNodesPostDeployment:
|
||||
type: OS::TripleO::ObjectStoragePostDeployment
|
||||
|
@ -13,7 +13,10 @@ parameters:
|
||||
NodeConfigIdentifiers:
|
||||
type: json
|
||||
description: Value which changes if the node configuration may need to be re-applied
|
||||
|
||||
StepConfig:
|
||||
type: string
|
||||
description: Config manifests that will be used to step through the deployment.
|
||||
default: ''
|
||||
|
||||
resources:
|
||||
|
||||
@ -34,25 +37,55 @@ resources:
|
||||
group: puppet
|
||||
options:
|
||||
enable_debug: {get_param: ConfigDebug}
|
||||
enable_hiera: True
|
||||
enable_facter: False
|
||||
inputs:
|
||||
- name: step
|
||||
outputs:
|
||||
- name: result
|
||||
config:
|
||||
get_file: manifests/overcloud_compute.pp
|
||||
list_join:
|
||||
- ''
|
||||
- - get_file: manifests/overcloud_compute.pp
|
||||
- {get_param: StepConfig}
|
||||
|
||||
ComputePuppetDeployment:
|
||||
ComputeServicesBaseDeployment_Step2:
|
||||
type: OS::Heat::StructuredDeployments
|
||||
depends_on: ComputeArtifactsDeploy
|
||||
depends_on: [ComputeArtifactsDeploy]
|
||||
properties:
|
||||
name: ComputePuppetDeployment
|
||||
name: ComputeServicesBaseDeployment_Step2
|
||||
servers: {get_param: servers}
|
||||
config: {get_resource: ComputePuppetConfig}
|
||||
input_values:
|
||||
step: 2
|
||||
update_identifier: {get_param: NodeConfigIdentifiers}
|
||||
|
||||
ComputeOvercloudServicesDeployment_Step3:
|
||||
type: OS::Heat::StructuredDeployments
|
||||
depends_on: ComputeServicesBaseDeployment_Step2
|
||||
properties:
|
||||
name: ComputeOvercloudServicesDeployment_Step3
|
||||
servers: {get_param: servers}
|
||||
config: {get_resource: ComputePuppetConfig}
|
||||
input_values:
|
||||
step: 3
|
||||
update_identifier: {get_param: NodeConfigIdentifiers}
|
||||
|
||||
ComputeOvercloudServicesDeployment_Step4:
|
||||
type: OS::Heat::StructuredDeployments
|
||||
depends_on: ComputeOvercloudServicesDeployment_Step3
|
||||
properties:
|
||||
name: ComputeOvercloudServicesDeployment_Step4
|
||||
servers: {get_param: servers}
|
||||
config: {get_resource: ComputePuppetConfig}
|
||||
input_values:
|
||||
step: 4
|
||||
update_identifier: {get_param: NodeConfigIdentifiers}
|
||||
|
||||
# Note, this should come last, so use depends_on to ensure
|
||||
# this is created after any other resources.
|
||||
ExtraConfig:
|
||||
depends_on: ComputePuppetDeployment
|
||||
depends_on: ComputeOvercloudServicesDeployment_Step4
|
||||
type: OS::TripleO::NodeExtraConfigPost
|
||||
properties:
|
||||
servers: {get_param: servers}
|
||||
|
@ -339,6 +339,9 @@ parameters:
|
||||
type: json
|
||||
description: Optional scheduler hints to pass to nova
|
||||
default: {}
|
||||
ServiceConfigSettings:
|
||||
type: json
|
||||
default: {}
|
||||
|
||||
resources:
|
||||
|
||||
@ -481,6 +484,7 @@ resources:
|
||||
- heat_config_%{::deploy_config_name}
|
||||
- compute_extraconfig
|
||||
- extraconfig
|
||||
- service_configs
|
||||
- compute
|
||||
- ceph_cluster # provided by CephClusterConfig
|
||||
- ceph
|
||||
@ -495,6 +499,8 @@ resources:
|
||||
- neutron_opencontrail_data # Optionally provided by ComputeExtraConfigPre
|
||||
merge_behavior: deeper
|
||||
datafiles:
|
||||
service_configs:
|
||||
mapped_data: {get_param: ServiceConfigSettings}
|
||||
compute_extraconfig:
|
||||
mapped_data: {get_param: NovaComputeExtraConfig}
|
||||
extraconfig:
|
||||
|
@ -16,8 +16,8 @@
|
||||
include ::tripleo::packages
|
||||
include ::tripleo::firewall
|
||||
|
||||
create_resources(kmod::load, hiera('kernel_modules'), {})
|
||||
create_resources(sysctl::value, hiera('sysctl_settings'), {})
|
||||
create_resources(kmod::load, hiera('kernel_modules'), { })
|
||||
create_resources(sysctl::value, hiera('sysctl_settings'), { })
|
||||
Exec <| tag == 'kmod::load' |> -> Sysctl <| |>
|
||||
|
||||
if count(hiera('ntp::servers')) > 0 {
|
||||
@ -26,200 +26,204 @@ if count(hiera('ntp::servers')) > 0 {
|
||||
|
||||
include ::timezone
|
||||
|
||||
file { ['/etc/libvirt/qemu/networks/autostart/default.xml',
|
||||
'/etc/libvirt/qemu/networks/default.xml']:
|
||||
ensure => absent,
|
||||
before => Service['libvirt'],
|
||||
}
|
||||
# in case libvirt has been already running before the Puppet run, make
|
||||
# sure the default network is destroyed
|
||||
exec { 'libvirt-default-net-destroy':
|
||||
command => '/usr/bin/virsh net-destroy default',
|
||||
onlyif => '/usr/bin/virsh net-info default | /bin/grep -i "^active:\s*yes"',
|
||||
before => Service['libvirt'],
|
||||
}
|
||||
if hiera('step') >= 4 {
|
||||
|
||||
# When utilising images for deployment, we need to reset the iSCSI initiator name to make it unique
|
||||
exec { 'reset-iscsi-initiator-name':
|
||||
command => '/bin/echo InitiatorName=$(/usr/sbin/iscsi-iname) > /etc/iscsi/initiatorname.iscsi',
|
||||
onlyif => '/usr/bin/test ! -f /etc/iscsi/.initiator_reset',
|
||||
}->
|
||||
|
||||
file { '/etc/iscsi/.initiator_reset':
|
||||
ensure => present,
|
||||
}
|
||||
|
||||
include ::nova
|
||||
include ::nova::config
|
||||
include ::nova::compute
|
||||
|
||||
$rbd_ephemeral_storage = hiera('nova::compute::rbd::ephemeral_storage', false)
|
||||
$rbd_persistent_storage = hiera('rbd_persistent_storage', false)
|
||||
if $rbd_ephemeral_storage or $rbd_persistent_storage {
|
||||
if str2bool(hiera('ceph_ipv6', false)) {
|
||||
$mon_host = hiera('ceph_mon_host_v6')
|
||||
} else {
|
||||
$mon_host = hiera('ceph_mon_host')
|
||||
file { ['/etc/libvirt/qemu/networks/autostart/default.xml',
|
||||
'/etc/libvirt/qemu/networks/default.xml']:
|
||||
ensure => absent,
|
||||
before => Service['libvirt'],
|
||||
}
|
||||
class { '::ceph::profile::params':
|
||||
mon_host => $mon_host,
|
||||
}
|
||||
include ::ceph::conf
|
||||
include ::ceph::profile::client
|
||||
|
||||
$client_keys = hiera('ceph::profile::params::client_keys')
|
||||
$client_user = join(['client.', hiera('ceph_client_user_name')])
|
||||
class { '::nova::compute::rbd':
|
||||
libvirt_rbd_secret_key => $client_keys[$client_user]['secret'],
|
||||
}
|
||||
}
|
||||
|
||||
if hiera('cinder_enable_nfs_backend', false) {
|
||||
if str2bool($::selinux) {
|
||||
selboolean { 'virt_use_nfs':
|
||||
value => on,
|
||||
persistent => true,
|
||||
} -> Package['nfs-utils']
|
||||
# in case libvirt has been already running before the Puppet run, make
|
||||
# sure the default network is destroyed
|
||||
exec { 'libvirt-default-net-destroy':
|
||||
command => '/usr/bin/virsh net-destroy default',
|
||||
onlyif => '/usr/bin/virsh net-info default | /bin/grep -i "^active:\s*yes"',
|
||||
before => Service['libvirt'],
|
||||
}
|
||||
|
||||
package {'nfs-utils': } -> Service['nova-compute']
|
||||
}
|
||||
# When utilising images for deployment, we need to reset the iSCSI initiator name to make it unique
|
||||
exec { 'reset-iscsi-initiator-name':
|
||||
command => '/bin/echo InitiatorName=$(/usr/sbin/iscsi-iname) > /etc/iscsi/initiatorname.iscsi',
|
||||
onlyif => '/usr/bin/test ! -f /etc/iscsi/.initiator_reset',
|
||||
}->
|
||||
|
||||
if str2bool(hiera('nova::use_ipv6', false)) {
|
||||
$vncserver_listen = '::0'
|
||||
} else {
|
||||
$vncserver_listen = '0.0.0.0'
|
||||
}
|
||||
|
||||
if $rbd_ephemeral_storage {
|
||||
class { '::nova::compute::libvirt':
|
||||
libvirt_disk_cachemodes => ['network=writeback'],
|
||||
libvirt_hw_disk_discard => 'unmap',
|
||||
vncserver_listen => $vncserver_listen,
|
||||
file { '/etc/iscsi/.initiator_reset':
|
||||
ensure => present,
|
||||
}
|
||||
} else {
|
||||
class { '::nova::compute::libvirt' :
|
||||
vncserver_listen => $vncserver_listen,
|
||||
}
|
||||
}
|
||||
|
||||
nova_config {
|
||||
'DEFAULT/my_ip': value => $ipaddress;
|
||||
'DEFAULT/linuxnet_interface_driver': value => 'nova.network.linux_net.LinuxOVSInterfaceDriver';
|
||||
'DEFAULT/host': value => $fqdn;
|
||||
# TUNNELLED mode provides a security enhancement when using shared storage but is not
|
||||
# supported when not using shared storage.
|
||||
# See https://bugzilla.redhat.com/show_bug.cgi?id=1301986#c12
|
||||
# In future versions of QEMU (2.6, mostly), Dan's native encryption
|
||||
# work will obsolete the need to use TUNNELLED transport mode.
|
||||
'libvirt/live_migration_tunnelled': value => $rbd_ephemeral_storage;
|
||||
}
|
||||
include ::nova
|
||||
include ::nova::config
|
||||
include ::nova::compute
|
||||
|
||||
if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' {
|
||||
file {'/etc/libvirt/qemu.conf':
|
||||
ensure => present,
|
||||
content => hiera('midonet_libvirt_qemu_data')
|
||||
}
|
||||
}
|
||||
include ::nova::network::neutron
|
||||
include ::neutron
|
||||
include ::neutron::config
|
||||
$rbd_ephemeral_storage = hiera('nova::compute::rbd::ephemeral_storage', false)
|
||||
$rbd_persistent_storage = hiera('rbd_persistent_storage', false)
|
||||
if $rbd_ephemeral_storage or $rbd_persistent_storage {
|
||||
if str2bool(hiera('ceph_ipv6', false)) {
|
||||
$mon_host = hiera('ceph_mon_host_v6')
|
||||
} else {
|
||||
$mon_host = hiera('ceph_mon_host')
|
||||
}
|
||||
class { '::ceph::profile::params':
|
||||
mon_host => $mon_host,
|
||||
}
|
||||
include ::ceph::conf
|
||||
include ::ceph::profile::client
|
||||
|
||||
# If the value of core plugin is set to 'nuage',
|
||||
# include nuage agent,
|
||||
# If the value of core plugin is set to 'midonet',
|
||||
# include midonet agent,
|
||||
# else use the default value of 'ml2'
|
||||
if hiera('neutron::core_plugin') == 'neutron.plugins.nuage.plugin.NuagePlugin' {
|
||||
include ::nuage::vrs
|
||||
include ::nova::compute::neutron
|
||||
|
||||
class { '::nuage::metadataagent':
|
||||
nova_os_tenant_name => hiera('nova::api::admin_tenant_name'),
|
||||
nova_os_password => hiera('nova_password'),
|
||||
nova_metadata_ip => hiera('nova_metadata_node_ips'),
|
||||
nova_auth_ip => hiera('keystone_public_api_virtual_ip'),
|
||||
}
|
||||
}
|
||||
elsif hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' {
|
||||
|
||||
# TODO(devvesa) provide non-controller ips for these services
|
||||
$zookeeper_node_ips = hiera('neutron_api_node_ips')
|
||||
$cassandra_node_ips = hiera('neutron_api_node_ips')
|
||||
|
||||
class {'::tripleo::network::midonet::agent':
|
||||
zookeeper_servers => $zookeeper_node_ips,
|
||||
cassandra_seeds => $cassandra_node_ips
|
||||
}
|
||||
}
|
||||
elsif hiera('neutron::core_plugin') == 'neutron_plugin_contrail.plugins.opencontrail.contrail_plugin.NeutronPluginContrailCoreV2' {
|
||||
|
||||
include ::contrail::vrouter
|
||||
# NOTE: it's not possible to use this class without a functional
|
||||
# contrail controller up and running
|
||||
#class {'::contrail::vrouter::provision_vrouter':
|
||||
# require => Class['contrail::vrouter'],
|
||||
#}
|
||||
}
|
||||
elsif hiera('neutron::core_plugin') == 'networking_plumgrid.neutron.plugins.plugin.NeutronPluginPLUMgridV2' {
|
||||
# forward all ipv4 traffic
|
||||
# this is required for the vms to pass through the gateways public interface
|
||||
sysctl::value { 'net.ipv4.ip_forward': value => '1' }
|
||||
|
||||
# ifc_ctl_pp needs to be invoked by root as part of the vif.py when a VM is powered on
|
||||
file { '/etc/sudoers.d/ifc_ctl_sudoers':
|
||||
ensure => file,
|
||||
owner => root,
|
||||
group => root,
|
||||
mode => '0440',
|
||||
content => "nova ALL=(root) NOPASSWD: /opt/pg/bin/ifc_ctl_pp *\n",
|
||||
}
|
||||
}
|
||||
else {
|
||||
|
||||
# NOTE: this code won't live in puppet-neutron until Neutron OVS agent
|
||||
# can be gracefully restarted. See https://review.openstack.org/#/c/297211
|
||||
# In the meantime, it's safe to restart the agent on each change in neutron.conf,
|
||||
# because Puppet changes are supposed to be done during bootstrap and upgrades.
|
||||
# Some resource managed by Neutron_config (like messaging and logging options) require
|
||||
# a restart of OVS agent. This code does it.
|
||||
# In Newton, OVS agent will be able to be restarted gracefully so we'll drop the code
|
||||
# from here and fix it in puppet-neutron.
|
||||
Neutron_config<||> ~> Service['neutron-ovs-agent-service']
|
||||
|
||||
include ::neutron::plugins::ml2
|
||||
include ::neutron::agents::ml2::ovs
|
||||
|
||||
if 'cisco_n1kv' in hiera('neutron::plugins::ml2::mechanism_drivers') {
|
||||
class { '::neutron::agents::n1kv_vem':
|
||||
n1kv_source => hiera('n1kv_vem_source', undef),
|
||||
n1kv_version => hiera('n1kv_vem_version', undef),
|
||||
$client_keys = hiera('ceph::profile::params::client_keys')
|
||||
$client_user = join(['client.', hiera('ceph_client_user_name')])
|
||||
class { '::nova::compute::rbd':
|
||||
libvirt_rbd_secret_key => $client_keys[$client_user]['secret'],
|
||||
}
|
||||
}
|
||||
|
||||
if 'bsn_ml2' in hiera('neutron::plugins::ml2::mechanism_drivers') {
|
||||
include ::neutron::agents::bigswitch
|
||||
if hiera('cinder_enable_nfs_backend', false) {
|
||||
if str2bool($::selinux) {
|
||||
selboolean { 'virt_use_nfs':
|
||||
value => on,
|
||||
persistent => true,
|
||||
} -> Package['nfs-utils']
|
||||
}
|
||||
|
||||
package { 'nfs-utils': } -> Service['nova-compute']
|
||||
}
|
||||
}
|
||||
|
||||
neutron_config {
|
||||
'DEFAULT/host': value => $fqdn;
|
||||
}
|
||||
if str2bool(hiera('nova::use_ipv6', false)) {
|
||||
$vncserver_listen = '::0'
|
||||
} else {
|
||||
$vncserver_listen = '0.0.0.0'
|
||||
}
|
||||
|
||||
include ::ceilometer
|
||||
include ::ceilometer::config
|
||||
include ::ceilometer::agent::compute
|
||||
include ::ceilometer::agent::auth
|
||||
if $rbd_ephemeral_storage {
|
||||
class { '::nova::compute::libvirt':
|
||||
libvirt_disk_cachemodes => ['network=writeback'],
|
||||
libvirt_hw_disk_discard => 'unmap',
|
||||
vncserver_listen => $vncserver_listen,
|
||||
}
|
||||
} else {
|
||||
class { '::nova::compute::libvirt' :
|
||||
vncserver_listen => $vncserver_listen,
|
||||
}
|
||||
}
|
||||
|
||||
$snmpd_user = hiera('snmpd_readonly_user_name')
|
||||
snmp::snmpv3_user { $snmpd_user:
|
||||
authtype => 'MD5',
|
||||
authpass => hiera('snmpd_readonly_user_password'),
|
||||
}
|
||||
class { '::snmp':
|
||||
agentaddress => ['udp:161','udp6:[::1]:161'],
|
||||
snmpd_config => [ join(['createUser ', hiera('snmpd_readonly_user_name'), ' MD5 "', hiera('snmpd_readonly_user_password'), '"']), join(['rouser ', hiera('snmpd_readonly_user_name')]), 'proc cron', 'includeAllDisks 10%', 'master agentx', 'trapsink localhost public', 'iquerySecName internalUser', 'rouser internalUser', 'defaultMonitors yes', 'linkUpDownNotifications yes' ],
|
||||
}
|
||||
nova_config {
|
||||
'DEFAULT/my_ip': value => $ipaddress;
|
||||
'DEFAULT/linuxnet_interface_driver': value => 'nova.network.linux_net.LinuxOVSInterfaceDriver';
|
||||
'DEFAULT/host': value => $fqdn;
|
||||
# TUNNELLED mode provides a security enhancement when using shared storage but is not
|
||||
# supported when not using shared storage.
|
||||
# See https://bugzilla.redhat.com/show_bug.cgi?id=1301986#c12
|
||||
# In future versions of QEMU (2.6, mostly), Dan's native encryption
|
||||
# work will obsolete the need to use TUNNELLED transport mode.
|
||||
'libvirt/live_migration_tunnelled': value => $rbd_ephemeral_storage;
|
||||
}
|
||||
|
||||
hiera_include('compute_classes')
|
||||
package_manifest{'/var/lib/tripleo/installed-packages/overcloud_compute': ensure => present}
|
||||
if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' {
|
||||
file { '/etc/libvirt/qemu.conf':
|
||||
ensure => present,
|
||||
content => hiera('midonet_libvirt_qemu_data')
|
||||
}
|
||||
}
|
||||
include ::nova::network::neutron
|
||||
include ::neutron
|
||||
include ::neutron::config
|
||||
|
||||
# If the value of core plugin is set to 'nuage',
|
||||
# include nuage agent,
|
||||
# If the value of core plugin is set to 'midonet',
|
||||
# include midonet agent,
|
||||
# else use the default value of 'ml2'
|
||||
if hiera('neutron::core_plugin') == 'neutron.plugins.nuage.plugin.NuagePlugin' {
|
||||
include ::nuage::vrs
|
||||
include ::nova::compute::neutron
|
||||
|
||||
class { '::nuage::metadataagent':
|
||||
nova_os_tenant_name => hiera('nova::api::admin_tenant_name'),
|
||||
nova_os_password => hiera('nova_password'),
|
||||
nova_metadata_ip => hiera('nova_metadata_node_ips'),
|
||||
nova_auth_ip => hiera('keystone_public_api_virtual_ip'),
|
||||
}
|
||||
}
|
||||
elsif hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' {
|
||||
|
||||
# TODO(devvesa) provide non-controller ips for these services
|
||||
$zookeeper_node_ips = hiera('neutron_api_node_ips')
|
||||
$cassandra_node_ips = hiera('neutron_api_node_ips')
|
||||
|
||||
class { '::tripleo::network::midonet::agent':
|
||||
zookeeper_servers => $zookeeper_node_ips,
|
||||
cassandra_seeds => $cassandra_node_ips
|
||||
}
|
||||
}
|
||||
elsif hiera('neutron::core_plugin') == 'neutron_plugin_contrail.plugins.opencontrail.contrail_plugin.NeutronPluginContrailCoreV2' {
|
||||
|
||||
include ::contrail::vrouter
|
||||
# NOTE: it's not possible to use this class without a functional
|
||||
# contrail controller up and running
|
||||
#class {'::contrail::vrouter::provision_vrouter':
|
||||
# require => Class['contrail::vrouter'],
|
||||
#}
|
||||
}
|
||||
elsif hiera('neutron::core_plugin') == 'networking_plumgrid.neutron.plugins.plugin.NeutronPluginPLUMgridV2' {
|
||||
# forward all ipv4 traffic
|
||||
# this is required for the vms to pass through the gateways public interface
|
||||
sysctl::value { 'net.ipv4.ip_forward': value => '1' }
|
||||
|
||||
# ifc_ctl_pp needs to be invoked by root as part of the vif.py when a VM is powered on
|
||||
file { '/etc/sudoers.d/ifc_ctl_sudoers':
|
||||
ensure => file,
|
||||
owner => root,
|
||||
group => root,
|
||||
mode => '0440',
|
||||
content => "nova ALL=(root) NOPASSWD: /opt/pg/bin/ifc_ctl_pp *\n",
|
||||
}
|
||||
}
|
||||
else {
|
||||
|
||||
# NOTE: this code won't live in puppet-neutron until Neutron OVS agent
|
||||
# can be gracefully restarted. See https://review.openstack.org/#/c/297211
|
||||
# In the meantime, it's safe to restart the agent on each change in neutron.conf,
|
||||
# because Puppet changes are supposed to be done during bootstrap and upgrades.
|
||||
# Some resource managed by Neutron_config (like messaging and logging options) require
|
||||
# a restart of OVS agent. This code does it.
|
||||
# In Newton, OVS agent will be able to be restarted gracefully so we'll drop the code
|
||||
# from here and fix it in puppet-neutron.
|
||||
Neutron_config<||> ~> Service['neutron-ovs-agent-service']
|
||||
|
||||
include ::neutron::plugins::ml2
|
||||
include ::neutron::agents::ml2::ovs
|
||||
|
||||
if 'cisco_n1kv' in hiera('neutron::plugins::ml2::mechanism_drivers') {
|
||||
class { '::neutron::agents::n1kv_vem':
|
||||
n1kv_source => hiera('n1kv_vem_source', undef),
|
||||
n1kv_version => hiera('n1kv_vem_version', undef),
|
||||
}
|
||||
}
|
||||
|
||||
if 'bsn_ml2' in hiera('neutron::plugins::ml2::mechanism_drivers') {
|
||||
include ::neutron::agents::bigswitch
|
||||
}
|
||||
}
|
||||
|
||||
neutron_config {
|
||||
'DEFAULT/host': value => $fqdn;
|
||||
}
|
||||
|
||||
include ::ceilometer
|
||||
include ::ceilometer::config
|
||||
include ::ceilometer::agent::compute
|
||||
include ::ceilometer::agent::auth
|
||||
|
||||
$snmpd_user = hiera('snmpd_readonly_user_name')
|
||||
snmp::snmpv3_user { $snmpd_user:
|
||||
authtype => 'MD5',
|
||||
authpass => hiera('snmpd_readonly_user_password'),
|
||||
}
|
||||
class { '::snmp':
|
||||
agentaddress => ['udp:161','udp6:[::1]:161'],
|
||||
snmpd_config => [ join(['createUser ', hiera('snmpd_readonly_user_name'), ' MD5 "', hiera('snmpd_readonly_user_password'), '"']), join(['rouser ', hiera('snmpd_readonly_user_name')]), 'proc cron', 'includeAllDisks 10%', 'master agentx', 'trapsink localhost public', 'iquerySecName internalUser', 'rouser internalUser', 'defaultMonitors yes', 'linkUpDownNotifications yes' ],
|
||||
}
|
||||
|
||||
hiera_include('compute_classes')
|
||||
package_manifest{ '/var/lib/tripleo/installed-packages/overcloud_compute': ensure => present }
|
||||
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user