Remove puppet-manifests code made obsolete by ansible

As a result of switch to Ansible, remove the obsolete erb
templates and remove the dependency of is_initial_config_primary
facter.

Change-Id: I4ca6525f01a37da971dc66a11ee99ea4e115e3ad
Partial-Bug: 1834218
Depends-On: https://review.opendev.org/#/c/703517/
Signed-off-by: Angie Wang <angie.wang@windriver.com>
This commit is contained in:
Angie Wang 2020-01-15 16:15:26 -05:00
parent dc2e42f634
commit 27f167eb14
20 changed files with 179 additions and 1239 deletions

View File

@ -77,8 +77,6 @@ make install \
BINDIR=%{buildroot}%{local_bindir} \
CONFIGDIR=%{buildroot}%{config_dir} \
MODULEDIR=%{buildroot}%{module_dir}
# This .orig file is not allow in SUSE, and it is not critical for StarlingX functionality
rm %{puppet_modules}/platform/templates/calico.yaml.erb.orig
%files
%defattr(-,root,root,-)

View File

@ -1,22 +0,0 @@
#
# puppet manifest for controller initial bootstrap
#
Exec {
timeout => 600,
path => '/usr/bin:/usr/sbin:/bin:/sbin:/usr/local/bin:/usr/local/sbin'
}
include ::platform::config::bootstrap
include ::platform::users::bootstrap
include ::platform::sysctl::bootstrap
include ::platform::ldap::bootstrap
include ::platform::drbd::bootstrap
include ::platform::postgresql::bootstrap
include ::platform::amqp::bootstrap
include ::openstack::keystone::bootstrap
include ::openstack::barbican::bootstrap
include ::platform::client::bootstrap
include ::platform::sysinv::bootstrap

View File

@ -3,94 +3,88 @@ class openstack::barbican::params (
$region_name = undef,
$service_name = 'barbican-api',
$service_create = false,
$service_enabled = true,
) { }
class openstack::barbican
inherits ::openstack::barbican::params {
if $service_enabled {
include ::platform::params
include ::platform::params
if $::platform::params::init_keystone {
include ::barbican::keystone::auth
include ::barbican::keystone::authtoken
}
if $::platform::params::init_keystone {
include ::barbican::keystone::auth
include ::barbican::keystone::authtoken
}
if $::platform::params::init_database {
include ::barbican::db::postgresql
}
if $::platform::params::init_database {
include ::barbican::db::postgresql
}
barbican_config {
'service_credentials/interface': value => 'internalURL'
}
barbican_config {
'service_credentials/interface': value => 'internalURL'
}
file { '/var/run/barbican':
ensure => 'directory',
owner => 'barbican',
group => 'barbican',
}
file { '/var/run/barbican':
ensure => 'directory',
owner => 'barbican',
group => 'barbican',
}
$api_workers = $::platform::params::eng_workers_by_4
$api_workers = $::platform::params::eng_workers_by_4
file_line { 'Modify workers in gunicorn-config.py':
path => '/etc/barbican/gunicorn-config.py',
line => "workers = ${api_workers}",
match => '.*workers = .*',
tag => 'modify-workers',
}
file_line { 'Modify workers in gunicorn-config.py':
path => '/etc/barbican/gunicorn-config.py',
line => "workers = ${api_workers}",
match => '.*workers = .*',
tag => 'modify-workers',
}
file { '/etc/logrotate.d/barbican-api':
ensure => present,
content => template('openstack/barbican-api-logrotate.erb')
}
file { '/etc/logrotate.d/barbican-api':
ensure => present,
content => template('openstack/barbican-api-logrotate.erb')
}
}
class openstack::barbican::service
inherits ::openstack::barbican::params {
class openstack::barbican::service (
$service_enabled = false,
) inherits ::openstack::barbican::params {
$api_fqdn = $::platform::params::controller_hostname
$url_host = "http://${api_fqdn}:${api_port}"
if $service_enabled {
$enabled = true
$api_host = '[::]'
} else {
$enabled = false
include ::platform::network::mgmt::params
$api_host = $::platform::network::mgmt::params::subnet_version ? {
6 => "[${::platform::network::mgmt::params::controller_address}]",
default => $::platform::network::mgmt::params::controller_address,
}
}
include ::platform::amqp::params
$api_fqdn = $::platform::params::controller_hostname
$url_host = "http://${api_fqdn}:${api_port}"
if str2bool($::is_initial_config_primary) {
$enabled = true
$api_host = '[::]'
} else {
$enabled = false
include ::platform::network::mgmt::params
$api_host = $::platform::network::mgmt::params::subnet_version ? {
6 => "[${::platform::network::mgmt::params::controller_address}]",
default => $::platform::network::mgmt::params::controller_address,
}
}
include ::platform::amqp::params
class { '::barbican::api':
enabled => $enabled,
bind_host => $api_host,
bind_port => $api_port,
host_href => $url_host,
sync_db => !$::openstack::barbican::params::service_create,
enable_proxy_headers_parsing => true,
rabbit_use_ssl => $::platform::amqp::params::ssl_enabled,
default_transport_url => $::platform::amqp::params::transport_url,
}
class { '::barbican::api':
enabled => $enabled,
bind_host => $api_host,
bind_port => $api_port,
host_href => $url_host,
sync_db => !$::openstack::barbican::params::service_create,
enable_proxy_headers_parsing => true,
rabbit_use_ssl => $::platform::amqp::params::ssl_enabled,
default_transport_url => $::platform::amqp::params::transport_url,
}
class { '::barbican::keystone::notification':
enable_keystone_notification => true,
}
class { '::barbican::keystone::notification':
enable_keystone_notification => true,
}
cron { 'barbican-cleaner':
ensure => 'present',
command => '/usr/bin/barbican-manage db clean -p -e -L /var/log/barbican/barbican-clean.log',
environment => 'PATH=/bin:/usr/bin:/usr/sbin',
minute => '50',
hour => '*/24',
user => 'root',
}
cron { 'barbican-cleaner':
ensure => 'present',
command => '/usr/bin/barbican-manage db clean -p -e -L /var/log/barbican/barbican-clean.log',
environment => 'PATH=/bin:/usr/bin:/usr/sbin',
minute => '50',
hour => '*/24',
user => 'root',
}
}
@ -132,10 +126,8 @@ class openstack::barbican::api
}
}
if $service_enabled {
include ::openstack::barbican::service
include ::openstack::barbican::haproxy
}
include ::openstack::barbican::service
include ::openstack::barbican::haproxy
}
class openstack::barbican::bootstrap
@ -164,7 +156,9 @@ class openstack::barbican::bootstrap
include ::barbican::db::postgresql
include ::openstack::barbican
include ::openstack::barbican::service
class { '::openstack::barbican::service':
service_enabled => true,
}
}
class openstack::barbican::runtime

View File

@ -38,17 +38,8 @@ class openstack::keystone (
$keystone_key_repo_path = "${::platform::drbd::platform::params::mountpoint}/keystone"
$eng_workers = $::platform::params::eng_workers
# FIXME(mpeters): binding to wildcard address to allow bootstrap transition
# Not sure if there is a better way to transition from the localhost address
# to the management address while still being able to authenticate the client
if str2bool($::is_initial_config_primary) {
$enabled = true
$bind_host = '[::]'
} else {
$enabled = false
$bind_host = $::platform::network::mgmt::params::controller_address_url
}
$enabled = false
$bind_host = $::platform::network::mgmt::params::controller_address_url
Class[$name] -> Class['::platform::client']
@ -214,9 +205,9 @@ class openstack::keystone::bootstrap(
# In the case of a Distributed Cloud deployment, apply the Keystone
# controller configuration for each SubCloud, since Keystone is also
# a localized service.
if ($::platform::params::init_keystone and
(!$::platform::params::region_config or
$::platform::params::distributed_cloud_role == 'subcloud')) {
if (!$::platform::params::region_config or
$::platform::params::distributed_cloud_role == 'subcloud') {
include ::keystone::db::postgresql
@ -274,7 +265,6 @@ class openstack::keystone::endpointgroup
# $::platform::params::init_keystone should be checked by the caller.
# as this class should be only invoked when initializing keystone.
# i.e. is_initial_config_primary is true is expected.
if ($::platform::params::distributed_cloud_role =='systemcontroller') {
$reference_region = $::openstack::keystone::params::region_name

View File

@ -1,8 +0,0 @@
# Returns true is this is the primary initial config (ie. first controller)
Facter.add("is_initial_config_primary") do
setcode do
ENV['INITIAL_CONFIG_PRIMARY'] == "true"
end
end

View File

@ -35,14 +35,6 @@ class platform::amqp::rabbitmq (
if $service_enabled {
$service_ensure = 'running'
}
elsif str2bool($::is_initial_config_primary) {
$service_ensure = 'running'
# ensure service is stopped after initial configuration
class { '::platform::amqp::post':
stage => post
}
} else {
$service_ensure = 'stopped'
}

View File

@ -86,34 +86,31 @@ class platform::compute::grub::recovery {
class platform::compute::grub::audit
inherits ::platform::compute::grub::params {
if ! str2bool($::is_initial_config_primary) {
notice('Audit CPU and Grub Configuration')
notice('Audit CPU and Grub Configuration')
$expected_n_cpus = Integer($::number_of_logical_cpus)
$n_cpus_ok = ($n_cpus == $expected_n_cpus)
$expected_n_cpus = Integer($::number_of_logical_cpus)
$n_cpus_ok = ($n_cpus == $expected_n_cpus)
$cmd_ok = check_grub_config($grub_updates)
$cmd_ok = check_grub_config($grub_updates)
if $cmd_ok and $n_cpus_ok {
$ensure = present
notice('CPU and Boot Argument audit passed.')
if $cmd_ok and $n_cpus_ok {
$ensure = present
notice('CPU and Boot Argument audit passed.')
} else {
$ensure = absent
if !$cmd_ok {
notice('Kernel Boot Argument Mismatch')
include ::platform::compute::grub::recovery
} else {
$ensure = absent
if !$cmd_ok {
notice('Kernel Boot Argument Mismatch')
include ::platform::compute::grub::recovery
} else {
notice("Mismatched CPUs: Found=${n_cpus}, Expected=${expected_n_cpus}")
}
notice("Mismatched CPUs: Found=${n_cpus}, Expected=${expected_n_cpus}")
}
}
file { '/var/run/worker_goenabled':
ensure => $ensure,
owner => 'root',
group => 'root',
mode => '0644',
}
file { '/var/run/worker_goenabled':
ensure => $ensure,
owner => 'root',
group => 'root',
mode => '0644',
}
}

View File

@ -341,19 +341,6 @@ class platform::config::controller::post
{
include ::platform::params
# TODO(tngo): The following block will be removed when we switch to Ansible
if str2bool($::is_initial_config_primary) {
# copy configured hosts to redundant storage
file { "${::platform::params::config_path}/hosts":
source => '/etc/hosts',
replace => false,
}
file { '/etc/platform/.unlock_ready':
ensure => present,
}
}
if ! $::platform::params::controller_upgrade {
file { '/etc/platform/.initial_config_complete':
ensure => present,

View File

@ -129,120 +129,6 @@ class platform::dockerdistribution::config
mode => '0755',
source => "puppet:///modules/${module_name}/registry-token-server"
}
# self-signed certificate for registry use
# this needs to be generated here because the certificate
# need to know the registry ip address for SANs
if str2bool($::is_initial_config_primary) {
$shared_dir = $::platform::params::config_path
$certs_dir = '/etc/ssl/private'
$docker_registry_public_ip = $::platform::haproxy::params::public_ip_address
# create the certificate files
file { "${certs_dir}/registry-cert-extfile.cnf":
ensure => present,
owner => 'root',
group => 'root',
mode => '0400',
content => template('platform/registry-cert-extfile.erb'),
}
-> exec { 'docker-registry-generate-cert':
command => "openssl req -x509 -sha256 -nodes -days 365 -newkey rsa:2048 \
-keyout ${certs_dir}/registry-cert.key \
-out ${certs_dir}/registry-cert.crt \
-config ${certs_dir}/registry-cert-extfile.cnf",
logoutput => true
}
-> exec { 'docker-registry-generate-pkcs1-cert-from-pkcs8':
command => "openssl rsa -in ${certs_dir}/registry-cert.key \
-out ${certs_dir}/registry-cert-pkcs1.key",
logoutput => true
}
# ensure permissions are set correctly
-> file { "${certs_dir}/registry-cert-pkcs1.key":
ensure => 'file',
owner => 'root',
group => 'root',
mode => '0400',
}
-> file { "${certs_dir}/registry-cert.key":
ensure => 'file',
owner => 'root',
group => 'root',
mode => '0400',
}
-> file { "${certs_dir}/registry-cert.crt":
ensure => 'file',
owner => 'root',
group => 'root',
mode => '0400',
}
# delete the extfile used in certificate generation
-> exec { 'remove-registry-cert-extfile':
command => "rm ${certs_dir}/registry-cert-extfile.cnf"
}
# copy certificates and keys to shared directory for second controller
# we do not need to worry about second controller being up at this point,
# since we have a is_initial_config_primary check
-> file { "${shared_dir}/registry-cert-pkcs1.key":
ensure => 'file',
owner => 'root',
group => 'root',
mode => '0400',
source => "${certs_dir}/registry-cert-pkcs1.key",
}
-> file { "${shared_dir}/registry-cert.key":
ensure => 'file',
owner => 'root',
group => 'root',
mode => '0400',
source => "${certs_dir}/registry-cert.key",
}
-> file { "${shared_dir}/registry-cert.crt":
ensure => 'file',
owner => 'root',
group => 'root',
mode => '0400',
source => "${certs_dir}/registry-cert.crt",
}
# copy the certificate to docker certificates directory,
# which makes docker trust that specific certificate
# this is required for self-signed and also if the user does
# not have a certificate signed by a "default" CA
-> file { '/etc/docker/certs.d':
ensure => 'directory',
owner => 'root',
group => 'root',
mode => '0700',
}
-> file { '/etc/docker/certs.d/registry.local:9001':
ensure => 'directory',
owner => 'root',
group => 'root',
mode => '0700',
}
-> file { '/etc/docker/certs.d/registry.local:9001/registry-cert.crt':
ensure => 'file',
owner => 'root',
group => 'root',
mode => '0400',
source => "${certs_dir}/registry-cert.crt",
}
}
}
# compute also needs the "insecure" flag in order to deploy images from

View File

@ -99,21 +99,6 @@ define platform::drbd::filesystem (
cpumask => $::platform::drbd::params::cpumask,
resync_after => $resync_after,
}
if str2bool($::is_initial_config_primary) {
# NOTE: The DRBD file system can only be resized immediately if not peering,
# otherwise it must wait for the peer backing storage device to be
# resized before issuing the resize locally.
Drbd::Resource[$title]
-> exec { "drbd resize ${title}":
command => "drbdadm -- --assume-peer-has-space resize ${title}",
}
-> exec { "resize2fs ${title}":
command => "resize2fs ${device}",
}
}
}
@ -283,17 +268,10 @@ class platform::drbd::etcd::params (
class platform::drbd::etcd (
) inherits ::platform::drbd::etcd::params {
if str2bool($::is_initial_config_primary) {
$drbd_primary = true
$drbd_initial = true
$drbd_automount = true
$drbd_manage = true
} else {
$drbd_primary = undef
$drbd_initial = undef
$drbd_automount = undef
$drbd_manage = undef
}
$drbd_primary = undef
$drbd_initial = undef
$drbd_automount = undef
$drbd_manage = undef
platform::drbd::filesystem { $resource_name:
vg_name => $vg_name,
@ -346,17 +324,10 @@ class platform::drbd::dockerdistribution::params (
class platform::drbd::dockerdistribution ()
inherits ::platform::drbd::dockerdistribution::params {
if str2bool($::is_initial_config_primary) {
$drbd_primary = true
$drbd_initial = true
$drbd_automount = true
$drbd_manage = true
} else {
$drbd_primary = undef
$drbd_initial = undef
$drbd_automount = undef
$drbd_manage = undef
}
$drbd_primary = undef
$drbd_initial = undef
$drbd_automount = undef
$drbd_manage = undef
platform::drbd::filesystem { $resource_name:
vg_name => $vg_name,
@ -455,8 +426,8 @@ class platform::drbd(
$service_enable = false,
$service_ensure = 'stopped',
) {
if (str2bool($::is_initial_config_primary) or str2bool($::is_standalone_controller)
){
if str2bool($::is_standalone_controller)
{
# Enable DRBD on standalone
class { '::drbd':
service_enable => true,

View File

@ -38,12 +38,13 @@ class platform::etcd::setup {
-> Service['etcd']
}
class platform::etcd::init
inherits ::platform::etcd::params {
class platform::etcd::init (
$service_enabled = false,
) inherits ::platform::etcd::params {
$client_url = "http://${bind_address}:${port}"
if str2bool($::is_initial_config_primary) {
if $service_enabled {
$service_ensure = 'running'
}
else {
@ -99,13 +100,11 @@ class platform::etcd::datadir::bootstrap
require ::platform::drbd::etcd::bootstrap
Class['::platform::drbd::etcd::bootstrap'] -> Class[$name]
if $::platform::params::init_database {
file { $etcd_versioned_dir:
ensure => 'directory',
owner => 'root',
group => 'root',
mode => '0755',
}
file { $etcd_versioned_dir:
ensure => 'directory',
owner => 'root',
group => 'root',
mode => '0755',
}
}
@ -114,9 +113,10 @@ class platform::etcd::bootstrap
include ::platform::etcd::datadir::bootstrap
include ::platform::etcd::setup
include ::platform::etcd::init
Class['::platform::etcd::datadir::bootstrap']
-> Class['::platform::etcd::setup']
-> Class['::platform::etcd::init']
-> class { '::platform::etcd::init':
service_enabled => true,
}
}

View File

@ -91,14 +91,17 @@ class platform::fm::runtime {
class platform::fm::bootstrap {
# Set up needed config to enable launching of fmManager later
include ::platform::params
include ::platform::fm::params
include ::platform::fm
if $::platform::params::init_keystone {
include ::fm::keystone::auth
class { '::fm::api':
host => $::platform::fm::params::api_host,
workers => $::platform::params::eng_workers,
sync_db => $::platform::params::init_database,
}
include ::platform::fm::config
include ::fm::client
include ::fm::keystone::authtoken
include ::fm::db::postgresql
include ::fm::keystone::auth
class { '::fm::api':
host => $::platform::fm::params::api_host,
workers => $::platform::params::eng_workers,
sync_db => true,
}
}

View File

@ -8,7 +8,6 @@ define platform::helm::repository (
$repo_base = undef,
$repo_port = undef,
$create = false,
$primary = false,
) {
$repo_path = "${repo_base}/${name}"
@ -58,7 +57,6 @@ class platform::helm::repositories
repo_base => $target_helm_repos_base_dir,
repo_port => $::openstack::horizon::params::http_port,
create => $::is_initial_config,
primary => $::is_initial_config_primary,
}
-> exec { 'Updating info of available charts locally from chart repo':
@ -93,79 +91,21 @@ class platform::helm
}
if (str2bool($::is_initial_config) and $::personality == 'controller') {
include ::platform::helm::repositories
if str2bool($::is_initial_config_primary) {
Class['::platform::kubernetes::master']
if $::platform::docker::params::gcr_registry {
$gcr_registry = $::platform::docker::params::gcr_registry
} else {
$gcr_registry = 'gcr.io'
}
if $::platform::docker::params::quay_registry {
$quay_registry = $::platform::docker::params::quay_registry
} else {
$quay_registry = 'quay.io'
}
Class['::platform::kubernetes::master']
-> exec { 'load tiller docker image':
command => "docker image pull ${gcr_registry}/kubernetes-helm/tiller:v2.13.1",
logoutput => true,
}
# TODO(tngo): If and when tiller image is upversioned, please ensure armada compatibility as part of the test
-> exec { 'load armada docker image':
command => "docker image pull ${quay_registry}/airshipit/armada:8a1638098f88d92bf799ef4934abe569789b885e-ubuntu_bionic",
logoutput => true,
}
-> exec { 'create service account for tiller':
command => 'kubectl --kubeconfig=/etc/kubernetes/admin.conf create serviceaccount --namespace kube-system tiller',
logoutput => true,
}
-> exec { 'create cluster role binding for tiller service account':
command => 'kubectl --kubeconfig=/etc/kubernetes/admin.conf create clusterrolebinding tiller-cluster-rule --clusterrole=cluster-admin --serviceaccount=kube-system:tiller', # lint:ignore:140chars
logoutput => true,
}
-> exec { 'initialize helm':
environment => [ 'KUBECONFIG=/etc/kubernetes/admin.conf', 'HOME=/home/sysadmin' ],
command => "helm init --skip-refresh --service-account tiller --node-selectors \"node-role.kubernetes.io/master\"=\"\" --tiller-image=${gcr_registry}/kubernetes-helm/tiller:v2.13.1 --override spec.template.spec.hostNetwork=true", # lint:ignore:140chars
logoutput => true,
user => 'sysadmin',
group => 'sys_protected',
require => User['sysadmin']
}
exec { "bind mount ${target_helm_repos_base_dir}":
command => "mount -o bind -t ext4 ${source_helm_repos_base_dir} ${target_helm_repos_base_dir}",
require => File[ $source_helm_repos_base_dir, $target_helm_repos_base_dir ]
}
} else {
Class['::platform::kubernetes::master']
-> exec { 'initialize helm':
environment => [ 'KUBECONFIG=/etc/kubernetes/admin.conf', 'HOME=/home/sysadmin' ],
command => 'helm init --skip-refresh --client-only',
logoutput => true,
user => 'sysadmin',
group => 'sys_protected',
require => User['sysadmin']
}
-> exec { 'initialize helm':
environment => [ 'KUBECONFIG=/etc/kubernetes/admin.conf', 'HOME=/home/sysadmin' ],
command => 'helm init --skip-refresh --client-only',
logoutput => true,
user => 'sysadmin',
group => 'sys_protected',
require => User['sysadmin']
}
include ::platform::helm::repositories
include ::openstack::horizon::params
$port = $::openstack::horizon::params::http_port
exec { 'restart lighttpd for helm':
require => [File['/etc/lighttpd/lighttpd.conf', $target_helm_repos_base_dir, $source_helm_repos_base_dir],
Exec['initialize helm']],
-> exec { 'restart lighttpd for helm':
require => [File['/etc/lighttpd/lighttpd.conf', $target_helm_repos_base_dir, $source_helm_repos_base_dir]],
command => 'systemctl restart lighttpd.service',
logoutput => true,
}

View File

@ -516,7 +516,7 @@ class platform::kubernetes::coredns {
include ::platform::params
if str2bool($::is_initial_config_primary) or str2bool($::is_initial_k8s_config) {
if str2bool($::is_initial_k8s_config) {
if $::platform::params::system_mode != 'simplex' {
# For duplex and multi-node system, restrict the dns pod to master nodes
exec { 'restrict coredns to master nodes':

View File

@ -87,8 +87,24 @@ class platform::mtce::runtime {
}
}
class platform::mtce::bootstrap {
class platform::mtce::bootstrap
inherits ::platform::mtce::params {
include ::platform::params
include ::platform::mtce
include ::platform::mtce::agent
# configure a mtce keystone user
keystone_user { $auth_username:
ensure => present,
password => $auth_pw,
enabled => true,
}
# assign an admin role for this mtce user on the services tenant
keystone_user_role { "${auth_username}@${auth_project}":
ensure => present,
user_domain => $auth_user_domain,
project_domain => $auth_project_domain,
roles => ['admin'],
}
}

View File

@ -87,6 +87,6 @@ class platform::params (
$eng_workers_by_6 = min($eng_max_workers, $eng_workers_mem, max($phys_core_count/6, 2))
}
$init_database = (str2bool($::is_initial_config_primary) or $controller_upgrade)
$init_keystone = (str2bool($::is_initial_config_primary) or $controller_upgrade)
$init_database = $controller_upgrade
$init_keystone = $controller_upgrade
}

View File

@ -94,17 +94,6 @@ class platform::postgresql::server (
}
}
if str2bool($::is_initial_config_primary) {
$service_ensure = 'running'
# ensure service is stopped after initial configuration
class { '::platform::postgresql::post':
stage => post
}
} else {
$service_ensure = 'stopped'
}
class {'::postgresql::globals':
datadir => $data_dir,
confdir => $config_dir,
@ -112,7 +101,7 @@ class platform::postgresql::server (
-> class {'::postgresql::server':
ip_mask_allow_all_users => $ipv4acl,
service_ensure => $service_ensure,
service_ensure => 'stopped',
}
}

View File

@ -148,7 +148,6 @@ class platform::sm
# Barbican
include ::openstack::barbican::params
$barbican_enabled = $::openstack::barbican::params::service_enabled
$ost_cl_ctrl_host = $::platform::network::mgmt::params::controller_address_url
@ -455,18 +454,16 @@ class platform::sm
}
# Barbican
if $barbican_enabled {
exec { 'Configure OpenStack - Barbican API':
command => "sm-configure service_instance barbican-api barbican-api \"config=/etc/barbican/barbican.conf\"",
}
exec { 'Configure OpenStack - Barbican API':
command => "sm-configure service_instance barbican-api barbican-api \"config=/etc/barbican/barbican.conf\"",
}
exec { 'Configure OpenStack - Barbican Keystone Listener':
command => "sm-configure service_instance barbican-keystone-listener barbican-keystone-listener \"config=/etc/barbican/barbican.conf\"",
}
exec { 'Configure OpenStack - Barbican Keystone Listener':
command => "sm-configure service_instance barbican-keystone-listener barbican-keystone-listener \"config=/etc/barbican/barbican.conf\"",
}
exec { 'Configure OpenStack - Barbican Worker':
command => "sm-configure service_instance barbican-worker barbican-worker \"config=/etc/barbican/barbican.conf\"",
}
exec { 'Configure OpenStack - Barbican Worker':
command => "sm-configure service_instance barbican-worker barbican-worker \"config=/etc/barbican/barbican.conf\"",
}
exec { 'Configure NFS Management':
@ -676,52 +673,23 @@ class platform::sm
}
# Barbican
if $barbican_enabled {
exec { 'Provision OpenStack - Barbican API (service-group-member)':
command => 'sm-provision service-group-member cloud-services barbican-api',
}
-> exec { 'Provision OpenStack - Barbican API (service)':
command => 'sm-provision service barbican-api',
}
-> exec { 'Provision OpenStack - Barbican Keystone Listener (service-group-member)':
command => 'sm-provision service-group-member cloud-services barbican-keystone-listener',
}
-> exec { 'Provision OpenStack - Barbican Keystone Listener (service)':
command => 'sm-provision service barbican-keystone-listener',
}
-> exec { 'Provision OpenStack - Barbican Worker (service-group-member)':
command => 'sm-provision service-group-member cloud-services barbican-worker',
}
-> exec { 'Provision OpenStack - Barbican Worker (service)':
command => 'sm-provision service barbican-worker',
}
} else {
exec { 'Deprovision OpenStack - Barbican API (service-group-member)':
path => [ '/usr/bin', '/usr/sbin', '/usr/local/bin', '/etc', '/sbin', '/bin' ],
command => 'sm-deprovision service-group-member cloud-services barbican-api',
exec { 'Provision OpenStack - Barbican API (service-group-member)':
command => 'sm-provision service-group-member cloud-services barbican-api',
}
-> exec { 'Deprovision OpenStack - Barbican API (service)':
path => [ '/usr/bin', '/usr/sbin', '/usr/local/bin', '/etc', '/sbin', '/bin' ],
command => 'sm-deprovision service barbican-api',
-> exec { 'Provision OpenStack - Barbican API (service)':
command => 'sm-provision service barbican-api',
}
-> exec { 'Provision OpenStack - Barbican Keystone Listener (service-group-member)':
command => 'sm-provision service-group-member cloud-services barbican-keystone-listener',
}
-> exec { 'Provision OpenStack - Barbican Keystone Listener (service)':
command => 'sm-provision service barbican-keystone-listener',
}
exec { 'Deprovision OpenStack - Barbican Keystone Listener (service-group-member)':
path => [ '/usr/bin', '/usr/sbin', '/usr/local/bin', '/etc', '/sbin', '/bin' ],
command => 'sm-deprovision service-group-member cloud-services barbican-keystone-listener',
}
-> exec { 'Deprovision OpenStack - Barbican Keystone Listener (service)':
path => [ '/usr/bin', '/usr/sbin', '/usr/local/bin', '/etc', '/sbin', '/bin' ],
command => 'sm-deprovision service barbican-keystone-listener',
}
exec { 'Deprovision OpenStack - Barbican Worker (service-group-member)':
path => [ '/usr/bin', '/usr/sbin', '/usr/local/bin', '/etc', '/sbin', '/bin' ],
command => 'sm-deprovision service-group-member cloud-services barbican-worker',
}
-> exec { 'Deprovision OpenStack - Barbican Worker (service)':
path => [ '/usr/bin', '/usr/sbin', '/usr/local/bin', '/etc', '/sbin', '/bin' ],
command => 'sm-deprovision service barbican-worker',
-> exec { 'Provision OpenStack - Barbican Worker (service-group-member)':
command => 'sm-provision service-group-member cloud-services barbican-worker',
}
-> exec { 'Provision OpenStack - Barbican Worker (service)':
command => 'sm-provision service barbican-worker',
}
if $ceph_configured {

View File

@ -1,748 +0,0 @@
---
# Source: calico/templates/calico-config.yaml
# This ConfigMap is used to configure a self-hosted Calico installation.
kind: ConfigMap
apiVersion: v1
metadata:
name: calico-config
namespace: kube-system
data:
# Typha is disabled.
typha_service_name: "none"
# Configure the Calico backend to use.
calico_backend: "bird"
# Configure the MTU to use
veth_mtu: "1440"
# The CNI network configuration to install on each node. The special
# values in this config will be automatically populated.
cni_network_config: |-
{
"name": "k8s-pod-network",
"cniVersion": "0.3.0",
"plugins": [
{
"type": "calico",
"log_level": "info",
"datastore_type": "kubernetes",
"nodename": "__KUBERNETES_NODE_NAME__",
"mtu": __CNI_MTU__,
"ipam": {
"type": "calico-ipam"
},
"policy": {
"type": "k8s"
},
"kubernetes": {
"kubeconfig": "__KUBECONFIG_FILEPATH__"
}
},
{
"type": "portmap",
"snat": true,
"capabilities": {"portMappings": true}
}
]
}
---
# Source: calico/templates/kdd-crds.yaml
# Create all the CustomResourceDefinitions needed for
# Calico policy and networking mode.
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: felixconfigurations.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: FelixConfiguration
plural: felixconfigurations
singular: felixconfiguration
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: ipamblocks.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: IPAMBlock
plural: ipamblocks
singular: ipamblock
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: blockaffinities.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: BlockAffinity
plural: blockaffinities
singular: blockaffinity
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: ipamhandles.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: IPAMHandle
plural: ipamhandles
singular: ipamhandle
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: ipamconfigs.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: IPAMConfig
plural: ipamconfigs
singular: ipamconfig
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: bgppeers.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: BGPPeer
plural: bgppeers
singular: bgppeer
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: bgpconfigurations.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: BGPConfiguration
plural: bgpconfigurations
singular: bgpconfiguration
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: ippools.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: IPPool
plural: ippools
singular: ippool
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: hostendpoints.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: HostEndpoint
plural: hostendpoints
singular: hostendpoint
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: clusterinformations.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: ClusterInformation
plural: clusterinformations
singular: clusterinformation
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: globalnetworkpolicies.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: GlobalNetworkPolicy
plural: globalnetworkpolicies
singular: globalnetworkpolicy
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: globalnetworksets.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: GlobalNetworkSet
plural: globalnetworksets
singular: globalnetworkset
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: networkpolicies.crd.projectcalico.org
spec:
scope: Namespaced
group: crd.projectcalico.org
version: v1
names:
kind: NetworkPolicy
plural: networkpolicies
singular: networkpolicy
---
# Source: calico/templates/rbac.yaml
# Include a clusterrole for the kube-controllers component,
# and bind it to the calico-kube-controllers serviceaccount.
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: calico-kube-controllers
rules:
# Nodes are watched to monitor for deletions.
- apiGroups: [""]
resources:
- nodes
verbs:
- watch
- list
- get
# Pods are queried to check for existence.
- apiGroups: [""]
resources:
- pods
verbs:
- get
# IPAM resources are manipulated when nodes are deleted.
- apiGroups: ["crd.projectcalico.org"]
resources:
- ippools
verbs:
- list
- apiGroups: ["crd.projectcalico.org"]
resources:
- blockaffinities
- ipamblocks
- ipamhandles
verbs:
- get
- list
- create
- update
- delete
# Needs access to update clusterinformations.
- apiGroups: ["crd.projectcalico.org"]
resources:
- clusterinformations
verbs:
- get
- create
- update
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: calico-kube-controllers
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: calico-kube-controllers
subjects:
- kind: ServiceAccount
name: calico-kube-controllers
namespace: kube-system
---
# Include a clusterrole for the calico-node DaemonSet,
# and bind it to the calico-node serviceaccount.
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: calico-node
rules:
# The CNI plugin needs to get pods, nodes, and namespaces.
- apiGroups: [""]
resources:
- pods
- nodes
- namespaces
verbs:
- get
- apiGroups: [""]
resources:
- endpoints
- services
verbs:
# Used to discover service IPs for advertisement.
- watch
- list
# Used to discover Typhas.
- get
- apiGroups: [""]
resources:
- nodes/status
verbs:
# Needed for clearing NodeNetworkUnavailable flag.
- patch
# Calico stores some configuration information in node annotations.
- update
# Watch for changes to Kubernetes NetworkPolicies.
- apiGroups: ["networking.k8s.io"]
resources:
- networkpolicies
verbs:
- watch
- list
# Used by Calico for policy information.
- apiGroups: [""]
resources:
- pods
- namespaces
- serviceaccounts
verbs:
- list
- watch
# The CNI plugin patches pods/status.
- apiGroups: [""]
resources:
- pods/status
verbs:
- patch
# Calico monitors various CRDs for config.
- apiGroups: ["crd.projectcalico.org"]
resources:
- globalfelixconfigs
- felixconfigurations
- bgppeers
- globalbgpconfigs
- bgpconfigurations
- ippools
- ipamblocks
- globalnetworkpolicies
- globalnetworksets
- networkpolicies
- clusterinformations
- hostendpoints
verbs:
- get
- list
- watch
# Calico must create and update some CRDs on startup.
- apiGroups: ["crd.projectcalico.org"]
resources:
- ippools
- felixconfigurations
- clusterinformations
verbs:
- create
- update
# Calico stores some configuration information on the node.
- apiGroups: [""]
resources:
- nodes
verbs:
- get
- list
- watch
# These permissions are only requried for upgrade from v2.6, and can
# be removed after upgrade or on fresh installations.
- apiGroups: ["crd.projectcalico.org"]
resources:
- bgpconfigurations
- bgppeers
verbs:
- create
- update
# These permissions are required for Calico CNI to perform IPAM allocations.
- apiGroups: ["crd.projectcalico.org"]
resources:
- blockaffinities
- ipamblocks
- ipamhandles
verbs:
- get
- list
- create
- update
- delete
- apiGroups: ["crd.projectcalico.org"]
resources:
- ipamconfigs
verbs:
- get
# Block affinities must also be watchable by confd for route aggregation.
- apiGroups: ["crd.projectcalico.org"]
resources:
- blockaffinities
verbs:
- watch
# The Calico IPAM migration needs to get daemonsets. These permissions can be
# removed if not upgrading from an installation using host-local IPAM.
- apiGroups: ["apps"]
resources:
- daemonsets
verbs:
- get
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: calico-node
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: calico-node
subjects:
- kind: ServiceAccount
name: calico-node
namespace: kube-system
---
---
# Source: calico/templates/calico-node.yaml
# This manifest installs the node container, as well
# as the Calico CNI plugins and network config on
# each master and worker node in a Kubernetes cluster.
kind: DaemonSet
apiVersion: extensions/v1beta1
metadata:
name: calico-node
namespace: kube-system
labels:
k8s-app: calico-node
spec:
selector:
matchLabels:
k8s-app: calico-node
updateStrategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
template:
metadata:
labels:
k8s-app: calico-node
annotations:
# This, along with the CriticalAddonsOnly toleration below,
# marks the pod as a critical add-on, ensuring it gets
# priority scheduling and that its resources are reserved
# if it ever gets evicted.
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
nodeSelector:
beta.kubernetes.io/os: linux
hostNetwork: true
tolerations:
# Make sure calico-node gets scheduled on all nodes.
- effect: NoSchedule
operator: Exists
# Mark the pod as a critical add-on for rescheduling.
- key: CriticalAddonsOnly
operator: Exists
- effect: NoExecute
operator: Exists
serviceAccountName: calico-node
# Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force
# deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.
terminationGracePeriodSeconds: 0
initContainers:
# This container performs upgrade from host-local IPAM to calico-ipam.
# It can be deleted if this is a fresh installation, or if you have already
# upgraded to use calico-ipam.
- name: upgrade-ipam
image: calico/cni:v3.6.1
command: ["/opt/cni/bin/calico-ipam", "-upgrade"]
env:
- name: KUBERNETES_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: CALICO_NETWORKING_BACKEND
valueFrom:
configMapKeyRef:
name: calico-config
key: calico_backend
volumeMounts:
- mountPath: /var/lib/cni/networks
name: host-local-net-dir
- mountPath: /host/opt/cni/bin
name: cni-bin-dir
# This container installs the Calico CNI binaries
# and CNI network config file on each node.
- name: install-cni
image: calico/cni:v3.6.1
command: ["/install-cni.sh"]
env:
# Name of the CNI config file to create.
- name: CNI_CONF_NAME
value: "10-calico.conflist"
# The CNI network config to install on each node.
- name: CNI_NETWORK_CONFIG
valueFrom:
configMapKeyRef:
name: calico-config
key: cni_network_config
# Set the hostname based on the k8s node name.
- name: KUBERNETES_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
# CNI MTU Config variable
- name: CNI_MTU
valueFrom:
configMapKeyRef:
name: calico-config
key: veth_mtu
# Prevents the container from sleeping forever.
- name: SLEEP
value: "false"
volumeMounts:
- mountPath: /host/opt/cni/bin
name: cni-bin-dir
- mountPath: /host/etc/cni/net.d
name: cni-net-dir
containers:
# Runs node container on each Kubernetes node. This
# container programs network policy and routes on each
# host.
- name: calico-node
image: calico/node:v3.6.1
env:
# Use Kubernetes API as the backing datastore.
- name: DATASTORE_TYPE
value: "kubernetes"
# Wait for the datastore.
- name: WAIT_FOR_DATASTORE
value: "true"
# Set based on the k8s node name.
- name: NODENAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
# Choose the backend to use.
- name: CALICO_NETWORKING_BACKEND
valueFrom:
configMapKeyRef:
name: calico-config
key: calico_backend
# Cluster type to identify the deployment type
- name: CLUSTER_TYPE
value: "k8s,bgp"
# Auto-detect the BGP IP address.
- name: IP
value: "autodetect"
# Enable IPIP
- name: CALICO_IPV4POOL_IPIP
value: "Always"
# Set MTU for tunnel device used if ipip is enabled
- name: FELIX_IPINIPMTU
valueFrom:
configMapKeyRef:
name: calico-config
key: veth_mtu
# The default IPv4 pool to create on startup if none exists. Pod IPs will be
# chosen from this range. Changing this value after installation will have
# no effect. This should fall within `--cluster-cidr`.
- name: CALICO_IPV4POOL_CIDR
value: "192.168.0.0/16"
# Disable file logging so `kubectl logs` works.
- name: CALICO_DISABLE_FILE_LOGGING
value: "true"
# Set Felix endpoint to host default action to ACCEPT.
- name: FELIX_DEFAULTENDPOINTTOHOSTACTION
value: "ACCEPT"
# Disable IPv6 on Kubernetes.
- name: FELIX_IPV6SUPPORT
value: "false"
# Set Felix logging to "info"
- name: FELIX_LOGSEVERITYSCREEN
value: "info"
- name: FELIX_HEALTHENABLED
value: "true"
securityContext:
privileged: true
resources:
requests:
cpu: 250m
livenessProbe:
httpGet:
path: /liveness
port: 9099
host: localhost
periodSeconds: 10
initialDelaySeconds: 10
failureThreshold: 6
readinessProbe:
exec:
command:
- /bin/calico-node
- -bird-ready
- -felix-ready
periodSeconds: 10
volumeMounts:
- mountPath: /lib/modules
name: lib-modules
readOnly: true
- mountPath: /run/xtables.lock
name: xtables-lock
readOnly: false
- mountPath: /var/run/calico
name: var-run-calico
readOnly: false
- mountPath: /var/lib/calico
name: var-lib-calico
readOnly: false
volumes:
# Used by node.
- name: lib-modules
hostPath:
path: /lib/modules
- name: var-run-calico
hostPath:
path: /var/run/calico
- name: var-lib-calico
hostPath:
path: /var/lib/calico
- name: xtables-lock
hostPath:
path: /run/xtables.lock
type: FileOrCreate
# Used to install CNI.
- name: cni-bin-dir
hostPath:
path: /opt/cni/bin
- name: cni-net-dir
hostPath:
path: /etc/cni/net.d
# Mount in the directory for host-local IPAM allocations. This is
# used when upgrading from host-local to calico-ipam, and can be removed
# if not using the upgrade-ipam init container.
- name: host-local-net-dir
hostPath:
path: /var/lib/cni/networks
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: calico-node
namespace: kube-system
---
# Source: calico/templates/calico-kube-controllers.yaml
# This manifest deploys the Calico node controller.
# See https://github.com/projectcalico/kube-controllers
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: calico-kube-controllers
namespace: kube-system
labels:
k8s-app: calico-kube-controllers
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
# The controller can only have a single active instance.
replicas: 1
strategy:
type: Recreate
template:
metadata:
name: calico-kube-controllers
namespace: kube-system
labels:
k8s-app: calico-kube-controllers
spec:
nodeSelector:
beta.kubernetes.io/os: linux
tolerations:
# Mark the pod as a critical add-on for rescheduling.
- key: CriticalAddonsOnly
operator: Exists
- key: node-role.kubernetes.io/master
effect: NoSchedule
serviceAccountName: calico-kube-controllers
containers:
- name: calico-kube-controllers
image: calico/kube-controllers:v3.6.1
env:
# Choose which controllers to run.
- name: ENABLED_CONTROLLERS
value: node
- name: DATASTORE_TYPE
value: kubernetes
readinessProbe:
exec:
command:
- /usr/bin/check-status
- -r
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: calico-kube-controllers
namespace: kube-system
---
# Source: calico/templates/calico-etcd-secrets.yaml
---
# Source: calico/templates/calico-typha.yaml
---
# Source: calico/templates/configure-canal.yaml

View File

@ -1,13 +0,0 @@
[req]
prompt = no
x509_extensions = v3_req
distinguished_name = dn
[dn]
CN = registry.local
[v3_req]
subjectAltName = @alt_names
[alt_names]
DNS.1 = registry.local
DNS.2 = registry.central
IP.1 = <%= @docker_registry_ip %>
IP.2 = <%= @docker_registry_public_ip %>