Browse Source

Merge remote-tracking branch 'gerrit/master' into f/centos8

Change-Id: I222b07433861bf9618d34c06346502e8d1a7551d
changes/25/729825/1
Shuicheng Lin 7 months ago
parent
commit
d4617fbad7
36 changed files with 595 additions and 189 deletions
  1. +2
    -0
      .zuul.yaml
  2. +2
    -2
      modules/puppet-dcdbsync/src/dcdbsync/manifests/openstack_api.pp
  3. +2
    -2
      modules/puppet-dcmanager/src/dcmanager/manifests/init.pp
  4. +7
    -3
      modules/puppet-dcorch/src/dcorch/manifests/keystone/auth.pp
  5. +0
    -6
      modules/puppet-dcorch/src/dcorch/manifests/params.pp
  6. +0
    -47
      modules/puppet-dcorch/src/dcorch/manifests/snmp.pp
  7. +1
    -1
      puppet-manifests/centos/build_srpm.data
  8. +2
    -4
      puppet-manifests/src/manifests/controller.pp
  9. +2
    -2
      puppet-manifests/src/manifests/storage.pp
  10. +2
    -2
      puppet-manifests/src/manifests/worker.pp
  11. +14
    -0
      puppet-manifests/src/modules/openstack/manifests/barbican.pp
  12. +13
    -0
      puppet-manifests/src/modules/openstack/manifests/keystone.pp
  13. +18
    -0
      puppet-manifests/src/modules/platform/files/change_kube_apiserver_params.py
  14. +21
    -11
      puppet-manifests/src/modules/platform/files/etcd
  15. +41
    -0
      puppet-manifests/src/modules/platform/manifests/config.pp
  16. +30
    -2
      puppet-manifests/src/modules/platform/manifests/containerd.pp
  17. +21
    -1
      puppet-manifests/src/modules/platform/manifests/dcdbsync.pp
  18. +25
    -0
      puppet-manifests/src/modules/platform/manifests/dcmanager.pp
  19. +38
    -9
      puppet-manifests/src/modules/platform/manifests/dcorch.pp
  20. +23
    -1
      puppet-manifests/src/modules/platform/manifests/docker.pp
  21. +24
    -11
      puppet-manifests/src/modules/platform/manifests/drbd.pp
  22. +123
    -36
      puppet-manifests/src/modules/platform/manifests/filesystem.pp
  23. +13
    -0
      puppet-manifests/src/modules/platform/manifests/fm.pp
  24. +18
    -1
      puppet-manifests/src/modules/platform/manifests/haproxy.pp
  25. +19
    -16
      puppet-manifests/src/modules/platform/manifests/kubernetes.pp
  26. +13
    -0
      puppet-manifests/src/modules/platform/manifests/network.pp
  27. +14
    -0
      puppet-manifests/src/modules/platform/manifests/nfv.pp
  28. +14
    -0
      puppet-manifests/src/modules/platform/manifests/patching.pp
  29. +45
    -29
      puppet-manifests/src/modules/platform/manifests/sm.pp
  30. +12
    -0
      puppet-manifests/src/modules/platform/manifests/smapi.pp
  31. +10
    -0
      puppet-manifests/src/modules/platform/manifests/sysctl.pp
  32. +14
    -0
      puppet-manifests/src/modules/platform/manifests/sysinv.pp
  33. +2
    -2
      puppet-manifests/src/modules/platform/templates/config.toml.erb
  34. +3
    -0
      puppet-manifests/src/modules/platform/templates/kube-apiserver-change-params.erb
  35. +1
    -0
      test-requirements.txt
  36. +6
    -1
      tox.ini

+ 2
- 0
.zuul.yaml View File

@@ -1,5 +1,7 @@
---
- project:
templates:
- stx-bandit-jobs
check:
jobs:
- stx-puppet-linters


+ 2
- 2
modules/puppet-dcdbsync/src/dcdbsync/manifests/openstack_api.pp View File

@@ -60,7 +60,7 @@
#
# [*bind_port*]
# (optional) The dcorch dbsync api port
# Defaults to 8220
# Defaults to 8229
#
# [*package_ensure*]
# (optional) The state of the package
@@ -93,7 +93,7 @@ class dcdbsync::openstack_api (
$auth_type = 'password',
$package_ensure = 'latest',
$bind_host = '0.0.0.0',
$bind_port = 8220,
$bind_port = 8229,
$enabled = false
) {



+ 2
- 2
modules/puppet-dcmanager/src/dcmanager/manifests/init.pp View File

@@ -22,8 +22,8 @@
class dcmanager (
$database_connection = '',
$database_idle_timeout = 3600,
$database_max_pool_size = 5,
$database_max_overflow = 10,
$database_max_pool_size = 1,
$database_max_overflow = 100,
$control_exchange = 'openstack',
$rabbit_host = '127.0.0.1',
$rabbit_port = 5672,


+ 7
- 3
modules/puppet-dcorch/src/dcorch/manifests/keystone/auth.pp View File

@@ -43,6 +43,10 @@ class dcorch::keystone::auth (
$cinder_proxy_public_url_v3 = 'http://127.0.0.1:28776/v3/%(tenant_id)s',
$patching_proxy_public_url = 'http://127.0.0.1:25491',
$identity_proxy_public_url = 'http://127.0.0.1:25000/v3',

$identity_proxy_admin_url = 'http://127.0.0.1:25000/v3',
$sysinv_proxy_admin_url = 'http://127.0.0.1:26385/v1',
$patching_proxy_admin_url = 'http://127.0.0.1:25491',
) {
if $::platform::params::distributed_cloud_role =='systemcontroller' {
keystone::resource::service_identity { 'dcorch':
@@ -68,7 +72,7 @@ class dcorch::keystone::auth (
type => 'platform',
region => $region,
public_url => $sysinv_proxy_public_url,
admin_url => $sysinv_proxy_internal_url,
admin_url => $sysinv_proxy_admin_url,
internal_url => $sysinv_proxy_internal_url
}

@@ -78,7 +82,7 @@ class dcorch::keystone::auth (
type => 'patching',
region => $region,
public_url => $patching_proxy_public_url,
admin_url => $patching_proxy_internal_url,
admin_url => $patching_proxy_admin_url,
internal_url => $patching_proxy_internal_url
}
keystone_endpoint { "${region}/keystone::identity" :
@@ -87,7 +91,7 @@ class dcorch::keystone::auth (
type => 'identity',
region => $region,
public_url => $identity_proxy_public_url,
admin_url => $identity_proxy_internal_url,
admin_url => $identity_proxy_admin_url,
internal_url => $identity_proxy_internal_url
}
}


+ 0
- 6
modules/puppet-dcorch/src/dcorch/manifests/params.pp View File

@@ -20,8 +20,6 @@ class dcorch::params {
$api_service = 'dcorch-api'
$engine_package = 'distributedcloud-dcorch'
$engine_service = 'dcorch-engine'
$snmp_package = 'distributedcloud-dcorch'
$snmp_service = 'dcorch-snmp'
$api_proxy_package = 'distributedcloud-dcorch'
$api_proxy_service = 'dcorch-api-proxy'

@@ -35,8 +33,6 @@ class dcorch::params {
$api_service = 'dcorch-api'
$engine_package = false
$engine_service = 'dcorch-engine'
$snmp_package = false
$snmp_service = 'dcorch-snmp'
$api_proxy_package = false
$api_proxy_service = 'dcorch-api-proxy'

@@ -48,8 +44,6 @@ class dcorch::params {
$client_package = 'distributedcloud-client-dcorchclient'
$api_package = false
$api_service = 'dcorch-api'
$snmp_package = false
$snmp_service = 'dcorch-snmp'
$engine_package = false
$engine_service = 'dcorch-engine'
$api_proxy_package = false


+ 0
- 47
modules/puppet-dcorch/src/dcorch/manifests/snmp.pp View File

@@ -1,47 +0,0 @@
#
# Files in this package are licensed under Apache; see LICENSE file.
#
# Copyright (c) 2013-2018 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
# Dec 2017 Creation based off puppet-sysinv
#

class dcorch::snmp (
$package_ensure = 'latest',
$enabled = false,
$bind_host = '0.0.0.0',
$com_str = 'dcorchAlarmAggregator'
) {

include dcorch::params
include dcorch::deps

if $::dcorch::params::snmp_package {
package { 'dcorch-snmp':
ensure => $package_ensure,
name => $::dcorch::params::snmp_package,
tag => 'dcorch-package',
}
}
dcorch_config {
'snmp/snmp_ip': value => $bind_host;
'snmp/snmp_comm_str': value => $com_str;
}

if $enabled {
$ensure = 'running'
} else {
$ensure = 'stopped'
}

service { 'dcorch-snmp':
ensure => $ensure,
name => $::dcorch::params::snmp_service,
enable => $enabled,
hasstatus => false,
tag => 'dcorch-service',
}

}

+ 1
- 1
puppet-manifests/centos/build_srpm.data View File

@@ -1,2 +1,2 @@
SRC_DIR="src"
TIS_PATCH_VER=96
TIS_PATCH_VER=98

+ 2
- 4
puppet-manifests/src/manifests/controller.pp View File

@@ -40,9 +40,9 @@ include ::platform::postgresql::server
include ::platform::haproxy::server
include ::platform::grub
include ::platform::etcd
include ::platform::docker
include ::platform::docker::controller
include ::platform::dockerdistribution
include ::platform::containerd
include ::platform::containerd::controller
include ::platform::kubernetes::master
include ::platform::helm

@@ -89,8 +89,6 @@ include ::platform::dcorch::engine
include ::platform::dcorch::api_proxy
include ::platform::dcmanager::api

include ::platform::dcorch::snmp

include ::platform::dcdbsync
include ::platform::dcdbsync::api



+ 2
- 2
puppet-manifests/src/manifests/storage.pp View File

@@ -27,8 +27,8 @@ include ::platform::sysinv
include ::platform::grub
include ::platform::collectd
include ::platform::filesystem::storage
include ::platform::docker
include ::platform::containerd
include ::platform::docker::storage
include ::platform::containerd::storage
include ::platform::ceph::storage

class { '::platform::config::storage::post':


+ 2
- 2
puppet-manifests/src/manifests/worker.pp View File

@@ -30,8 +30,8 @@ include ::platform::devices
include ::platform::grub
include ::platform::collectd
include ::platform::filesystem::compute
include ::platform::docker
include ::platform::containerd
include ::platform::docker::worker
include ::platform::containerd::worker
include ::platform::dockerdistribution::compute
include ::platform::kubernetes::worker
include ::platform::multipath


+ 14
- 0
puppet-manifests/src/modules/openstack/manifests/barbican.pp View File

@@ -90,12 +90,26 @@ class openstack::barbican::service (

class openstack::barbican::haproxy
inherits ::openstack::barbican::params {
include ::platform::params
include ::platform::haproxy::params

platform::haproxy::proxy { 'barbican-restapi':
server_name => 's-barbican-restapi',
public_port => $api_port,
private_port => $api_port,
}

# Configure rules for DC https enabled admin endpoint.
if ($::platform::params::distributed_cloud_role == 'systemcontroller' or
$::platform::params::distributed_cloud_role == 'subcloud') {
platform::haproxy::proxy { 'barbican-restapi-admin':
https_ep_type => 'admin',
server_name => 's-barbican-restapi',
public_ip_address => $::platform::haproxy::params::private_ip_address,
public_port => $api_port + 1,
private_port => $api_port,
}
}
}

class openstack::barbican::api


+ 13
- 0
puppet-manifests/src/modules/openstack/manifests/keystone.pp View File

@@ -131,6 +131,7 @@ class openstack::keystone::haproxy
inherits ::openstack::keystone::params {

include ::platform::params
include ::platform::haproxy::params

if !$::platform::params::region_config {
platform::haproxy::proxy { 'keystone-restapi':
@@ -139,6 +140,18 @@ class openstack::keystone::haproxy
private_port => $api_port,
}
}

# Configure rules for DC https enabled admin endpoint.
if ($::platform::params::distributed_cloud_role == 'systemcontroller' or
$::platform::params::distributed_cloud_role == 'subcloud') {
platform::haproxy::proxy { 'keystone-restapi-admin':
https_ep_type => 'admin',
server_name => 's-keystone',
public_ip_address => $::platform::haproxy::params::private_ip_address,
public_port => $api_port + 1,
private_port => $api_port,
}
}
}

define delete_endpoints (


+ 18
- 0
puppet-manifests/src/modules/platform/files/change_kube_apiserver_params.py View File

@@ -19,6 +19,7 @@ parser.add_argument("--oidc_issuer_url")
parser.add_argument("--oidc_client_id")
parser.add_argument("--oidc_username_claim")
parser.add_argument("--oidc_groups_claim")
parser.add_argument("--admission_plugins")
args = parser.parse_args()

if args.configmap_file:
@@ -59,6 +60,23 @@ else:
if 'oidc-groups-claim' in cluster_config['apiServer']['extraArgs']:
del cluster_config['apiServer']['extraArgs']['oidc-groups-claim']

if args.admission_plugins:
all_plugins = args.admission_plugins
# there are some plugins required by the system
# if the plugins is specified manually, these ones might
# be missed. We will add these automatically so the user
# does not need to keep track of them
required_plugins = ['NodeRestriction']
for plugin in required_plugins:
if plugin not in all_plugins:
all_plugins = all_plugins + "," + plugin
cluster_config['apiServer']['extraArgs']['enable-admission-plugins'] = \
all_plugins
else:
plugins = 'enable-admission-plugins'
if plugins in cluster_config['apiServer']['extraArgs']:
del cluster_config['apiServer']['extraArgs'][plugins]

cluster_config_string = yaml.dump(cluster_config, Dumper=yaml.RoundTripDumper,
default_flow_style=False)
# use yaml.scalarstring.PreservedScalarString to make sure the yaml is


+ 21
- 11
puppet-manifests/src/modules/platform/files/etcd View File

@@ -19,6 +19,7 @@
DESC="ETCD highly-available key value database"
SERVICE="etcd.service"
PIDFILE="/var/run/etcd.pid"
UPGRADE_SWACT_FILE="/etc/platform/.upgrade_swact_controller_1"


status()
@@ -46,16 +47,28 @@ start()
fi
fi

echo "Starting $SERVICE..."
RETVAL=0

systemctl start $SERVICE
if [ -e $UPGRADE_SWACT_FILE ]; then
echo "Perform upgrade_swact_migration migrate etcd ..."
/usr/bin/upgrade_swact_migration.py migrate_etcd
if [ $? -ne 0 ]
then
RETVAL=1
fi
fi

if [ $? -eq 0 ]; then
echo "Started $SERVICE successfully"
RETVAL=0
else
echo "$SERVICE failed!"
RETVAL=1
if [ $RETVAL -eq 0 ]; then
echo "Starting $SERVICE..."

systemctl start $SERVICE

if [ $? -eq 0 ]; then
echo "Started $SERVICE successfully"
else
echo "$SERVICE failed!"
RETVAL=1
fi
fi

}
@@ -98,6 +111,3 @@ case "$1" in
esac

exit $RETVAL




+ 41
- 0
puppet-manifests/src/modules/platform/manifests/config.pp View File

@@ -295,6 +295,43 @@ class platform::config::certs::ssl_ca
}
}

class platform::config::dccert::params (
$dc_root_ca_crt = '',
$dc_adminep_crt = ''
) { }


class platform::config::dc_root_ca
inherits ::platform::config::dccert::params {
$dc_root_ca_file = '/etc/pki/ca-trust/source/anchors/dc-adminep-root-ca.crt'
$dc_adminep_cert_file = '/etc/ssl/private/admin-ep-cert.pem'

if ! empty($dc_adminep_crt) {
file { 'adminep-cert':
ensure => present,
path => $dc_adminep_cert_file,
owner => root,
group => root,
mode => '0400',
content => $dc_adminep_crt,
}
}

if ! empty($dc_root_ca_crt) {
file { 'create-dc-adminep-root-ca-cert':
ensure => present,
path => $dc_root_ca_file,
owner => root,
group => root,
mode => '0644',
content => $dc_root_ca_crt,
}
-> exec { 'update-dc-ca-trust':
command => 'update-ca-trust',
}
}
}


class platform::config::runtime {
include ::platform::config::certs::ssl_ca
@@ -313,6 +350,10 @@ class platform::config::pre {
include ::platform::config::file
include ::platform::config::tpm
include ::platform::config::certs::ssl_ca
if ($::platform::params::distributed_cloud_role =='systemcontroller' and
$::personality == 'controller') {
include ::platform::config::dc_root_ca
}
}




+ 30
- 2
puppet-manifests/src/modules/platform/manifests/containerd.pp View File

@@ -5,7 +5,8 @@ class platform::containerd::params (
$no_proxy = undef,
$k8s_registry = undef,
$insecure_registries = undef,
$k8s_cni_bin_dir = '/usr/libexec/cni'
$k8s_cni_bin_dir = '/usr/libexec/cni',
$stream_server_address = 'localhost',
) { }

class platform::containerd::config
@@ -16,6 +17,12 @@ class platform::containerd::config
include ::platform::kubernetes::params
include ::platform::dockerdistribution::registries

# If containerd is started prior to networking providing a default route, the
# containerd cri plugin will fail to load and the status of the cri plugin
# will be in 'error'. This will prevent any crictl image pulls from working as
# containerd is not automatically restarted when plugins fail to load.
Anchor['platform::networking'] -> Class[$name]

# inherit the proxy setting from docker
$http_proxy = $::platform::docker::params::http_proxy
$https_proxy = $::platform::docker::params::https_proxy
@@ -54,6 +61,12 @@ class platform::containerd::config
# get cni bin directory
$k8s_cni_bin_dir = $::platform::kubernetes::params::k8s_cni_bin_dir

if $::platform::network::mgmt::params::subnet_version == $::platform::params::ipv6 {
$stream_server_address = '::1'
} else {
$stream_server_address = '127.0.0.1'
}

file { '/etc/containerd':
ensure => 'directory',
owner => 'root',
@@ -91,9 +104,24 @@ class platform::containerd::install
}
}

class platform::containerd
class platform::containerd::controller
{
include ::platform::containerd::install
include ::platform::containerd::config
}

class platform::containerd::worker
{
if $::personality != 'controller' {
include ::platform::containerd::install
include ::platform::containerd::config
}
}

class platform::containerd::storage
{
if $::personality != 'controller' {
include ::platform::containerd::install
include ::platform::containerd::config
}
}

+ 21
- 1
puppet-manifests/src/modules/platform/manifests/dcdbsync.pp View File

@@ -1,6 +1,6 @@
class platform::dcdbsync::params (
$api_port = 8219,
$api_openstack_port = 8220,
$api_openstack_port = 8229,
$region_name = undef,
$service_create = false,
$service_enabled = false,
@@ -41,6 +41,26 @@ class platform::dcdbsync::api
}
}
}

include ::platform::dcdbsync::haproxy
}

class platform::dcdbsync::haproxy
inherits ::platform::dcdbsync::params {
include ::platform::params
include ::platform::haproxy::params

# Configure rules for https enabled admin endpoint.
if ($::platform::params::distributed_cloud_role == 'systemcontroller' or
$::platform::params::distributed_cloud_role == 'subcloud') {
platform::haproxy::proxy { 'dcdbsync-restapi-admin':
https_ep_type => 'admin',
server_name => 's-dcdbsync',
public_ip_address => $::platform::haproxy::params::private_ip_address,
public_port => $api_port + 1,
private_port => $api_port,
}
}
}

class platform::dcdbsync::stx_openstack::runtime


+ 25
- 0
puppet-manifests/src/modules/platform/manifests/dcmanager.pp View File

@@ -7,6 +7,7 @@ class platform::dcmanager::params (
$service_name = 'dcmanager',
$default_endpoint_type = 'internalURL',
$service_create = false,
$deploy_base_dir = '/opt/platform/deploy',
$iso_base_dir_source = '/opt/platform/iso',
$iso_base_dir_target = '/www/pages/iso',
) {
@@ -41,11 +42,18 @@ class platform::dcmanager
ensure => directory,
mode => '0755',
}
file {$deploy_base_dir:
ensure => directory,
mode => '0755',
}
}
}

class platform::dcmanager::haproxy
inherits ::platform::dcmanager::params {
include ::platform::params
include ::platform::haproxy::params

if $::platform::params::distributed_cloud_role =='systemcontroller' {
platform::haproxy::proxy { 'dcmanager-restapi':
server_name => 's-dcmanager',
@@ -53,6 +61,17 @@ class platform::dcmanager::haproxy
private_port => $api_port,
}
}

# Configure rules for https enabled admin endpoint.
if $::platform::params::distributed_cloud_role == 'systemcontroller' {
platform::haproxy::proxy { 'dcmanager-restapi-admin':
https_ep_type => 'admin',
server_name => 's-dcmanager',
public_ip_address => $::platform::haproxy::params::private_ip_address,
public_port => $api_port + 1,
private_port => $api_port,
}
}
}

class platform::dcmanager::manager {
@@ -84,6 +103,7 @@ class platform::dcmanager::fs::runtime {
include ::platform::dcmanager::params
$iso_base_dir_source = $::platform::dcmanager::params::iso_base_dir_source
$iso_base_dir_target = $::platform::dcmanager::params::iso_base_dir_target
$deploy_base_dir = $::platform::dcmanager::params::deploy_base_dir

file {$iso_base_dir_source:
ensure => directory,
@@ -95,6 +115,11 @@ class platform::dcmanager::fs::runtime {
mode => '0755',
}

file {$deploy_base_dir:
ensure => directory,
mode => '0755',
}

exec { "bind mount ${iso_base_dir_target}":
command => "mount -o bind -t ext4 ${iso_base_dir_source} ${iso_base_dir_target}",
require => File[ $iso_base_dir_source, $iso_base_dir_target ]


+ 38
- 9
puppet-manifests/src/modules/platform/manifests/dcorch.pp View File

@@ -40,6 +40,17 @@ class platform::dcorch
proxy_bind_host => $api_host,
proxy_remote_host => $api_host,
}

# Purge dcorch database 20 minutes in the first hour daily
cron { 'dcorch-cleaner':
ensure => 'present',
command => '/usr/bin/clean-dcorch',
environment => 'PATH=/bin:/usr/bin:/usr/sbin',
minute => '20',
hour => '*/24',
user => 'root',
}

}
}

@@ -69,6 +80,8 @@ class platform::dcorch::firewall

class platform::dcorch::haproxy
inherits ::platform::dcorch::params {
include ::platform::haproxy::params

if $::platform::params::distributed_cloud_role =='systemcontroller' {
platform::haproxy::proxy { 'dcorch-neutron-api-proxy':
server_name => 's-dcorch-neutron-api-proxy',
@@ -100,6 +113,31 @@ class platform::dcorch::haproxy
public_port => $identity_api_proxy_port,
private_port => $identity_api_proxy_port,
}

# Configure rules for https enabled identity api proxy admin endpoint.
platform::haproxy::proxy { 'dcorch-identity-api-proxy-admin':
https_ep_type => 'admin',
server_name => 's-dcorch-identity-api-proxy',
public_ip_address => $::platform::haproxy::params::private_ip_address,
public_port => $identity_api_proxy_port + 1,
private_port => $identity_api_proxy_port,
}
# Configure rules for https enabled sysinv api proxy admin endpoint.
platform::haproxy::proxy { 'dcorch-sysinv-api-proxy-admin':
https_ep_type => 'admin',
server_name => 's-dcorch-sysinv-api-proxy',
public_ip_address => $::platform::haproxy::params::private_ip_address,
public_port => $sysinv_api_proxy_port + 1,
private_port => $sysinv_api_proxy_port,
}
# Configure rules for https enabled patching api proxy admin endpoint.
platform::haproxy::proxy { 'dcorch-patch-api-proxy-admin':
https_ep_type => 'admin',
server_name => 's-dcorch-patch-api-proxy',
public_ip_address => $::platform::haproxy::params::private_ip_address,
public_port => $patch_api_proxy_port + 1,
private_port => $patch_api_proxy_port,
}
}
}

@@ -110,15 +148,6 @@ class platform::dcorch::engine
}
}

class platform::dcorch::snmp
inherits ::platform::dcorch::params {
if $::platform::params::distributed_cloud_role =='systemcontroller' {
class { '::dcorch::snmp':
bind_host => $api_host,
}
}
}


class platform::dcorch::api_proxy
inherits ::platform::dcorch::params {


+ 23
- 1
puppet-manifests/src/modules/platform/manifests/docker.pp View File

@@ -20,6 +20,12 @@ class platform::docker::params (
class platform::docker::config
inherits ::platform::docker::params {

# Docker restarts will trigger a containerd restart and containerd needs a
# default route present for it's CRI plugin to load correctly. Since we are
# defering containerd restart until after the network config is applied, do
# the same here to align config/restart times for both containerd and docker.
Anchor['platform::networking'] -> Class[$name]

if $http_proxy or $https_proxy {
file { '/etc/systemd/system/docker.service.d':
ensure => 'directory',
@@ -63,12 +69,28 @@ class platform::docker::install
}
}

class platform::docker
class platform::docker::controller
{
include ::platform::docker::install
include ::platform::docker::config
}

class platform::docker::worker
{
if $::personality != 'controller' {
include ::platform::docker::install
include ::platform::docker::config
}
}

class platform::docker::storage
{
if $::personality != 'controller' {
include ::platform::docker::install
include ::platform::docker::config
}
}

class platform::docker::config::bootstrap
inherits ::platform::docker::params {



+ 24
- 11
puppet-manifests/src/modules/platform/manifests/drbd.pp View File

@@ -101,7 +101,20 @@ define platform::drbd::filesystem (
}
}


# The device names (/dev/drbdX) for all drbd devices added in this manifest
# should be kept in sync with the ones present in the restore ansible playbook
# present in the ansible-playbooks repo at:
# playbookconfig/src/playbooks/roles/restore-platform/restore-more-data/tasks/main.yml
# (ansible task name is "Resize DRBD filesystems").
# This is done because the device names are only defined here and never reach
# sysinv, so there is no way to get this info from another place.
# If adding another drbd-synced resource, check backup&restore works after resizing
# the resource.
#
# NOTE: Only devices present in the "system controllerfs-list" command output
# need to be kept in sync. Filesystem that we don't allow resizing for
# (for example rabbitmq) or those that don't use the controllerfs
# command (for example cephmon) don't need to be kept in sync.
class platform::drbd::pgsql::params (
$device = '/dev/drbd0',
$lv_name = 'pgsql-lv',
@@ -210,19 +223,19 @@ class platform::drbd::extension (
}
}

class platform::drbd::patch_vault::params (
class platform::drbd::dc_vault::params (
$service_enabled = false,
$device = '/dev/drbd6',
$lv_name = 'patch-vault-lv',
$lv_size = '8',
$mountpoint = '/opt/patch-vault',
$lv_name = 'dc-vault-lv',
$lv_size = '15',
$mountpoint = '/opt/dc-vault',
$port = '7794',
$resource_name = 'drbd-patch-vault',
$resource_name = 'drbd-dc-vault',
$vg_name = 'cgts-vg',
) {}

class platform::drbd::patch_vault (
) inherits ::platform::drbd::patch_vault::params {
class platform::drbd::dc_vault (
) inherits ::platform::drbd::dc_vault::params {

if str2bool($::is_standalone_controller) {
$drbd_primary = true
@@ -447,7 +460,7 @@ class platform::drbd(
include ::platform::drbd::rabbit
include ::platform::drbd::platform
include ::platform::drbd::extension
include ::platform::drbd::patch_vault
include ::platform::drbd::dc_vault
include ::platform::drbd::etcd
include ::platform::drbd::dockerdistribution
include ::platform::drbd::cephmon
@@ -517,10 +530,10 @@ class platform::drbd::extension::runtime {
}


class platform::drbd::patch_vault::runtime {
class platform::drbd::dc_vault::runtime {
include ::platform::drbd::params
include ::platform::drbd::runtime_service_enable
include ::platform::drbd::patch_vault
include ::platform::drbd::dc_vault
}

class platform::drbd::etcd::runtime {


+ 123
- 36
puppet-manifests/src/modules/platform/manifests/filesystem.pp View File

@@ -10,6 +10,7 @@ define platform::filesystem (
$fs_type,
$fs_options,
$fs_use_all = false,
$ensure = present,
$mode = '0750',
) {
include ::platform::filesystem::params
@@ -27,44 +28,80 @@ define platform::filesystem (
$fs_size_is_minsize = false
}

# create logical volume
logical_volume { $lv_name:
ensure => present,
volume_group => $vg_name,
size => $size,
size_is_minsize => $fs_size_is_minsize,
if ($ensure == 'absent') {
exec { "umount mountpoint ${mountpoint}":
command => "umount ${mountpoint}; true",
onlyif => "test -e ${mountpoint}",
}
-> mount { $name:
ensure => $ensure,
atboot => 'yes',
name => $mountpoint,
device => $device,
options => 'defaults',
fstype => $fs_type,
}
-> file { $mountpoint:
ensure => $ensure,
force => true,
}
-> exec { "wipe start of device ${device}":
command => "dd if=/dev/zero of=${device} bs=512 count=34",
onlyif => "blkid ${device}",
}
-> exec { "wipe end of device ${device}":
command => "dd if=/dev/zero of=${device} bs=512 seek=$(($(blockdev --getsz ${device}) - 34)) count=34",
onlyif => "blkid ${device}",
}
-> exec { "lvremove lv ${lv_name}":
command => "lvremove -f cgts-vg ${lv_name}; true",
onlyif => "test -e /dev/cgts-vg/${lv_name}"
}
}

# create filesystem
-> filesystem { $device:
ensure => present,
fs_type => $fs_type,
options => $fs_options,
}

-> file { $mountpoint:
ensure => 'directory',
owner => 'root',
group => 'root',
mode => $mode,
}

-> mount { $name:
ensure => 'mounted',
atboot => 'yes',
name => $mountpoint,
device => $device,
options => 'defaults',
fstype => $fs_type,
}

# The above mount resource doesn't actually remount devices that were already present in /etc/fstab, but were
# unmounted during manifest application. To get around this, we attempt to mount them again, if they are not
# already mounted.
-> exec { "mount ${device}":
unless => "mount | awk '{print \$3}' | grep -Fxq ${mountpoint}",
command => "mount ${mountpoint}",
path => '/usr/bin'
if ($ensure == 'present') {
# create logical volume
logical_volume { $lv_name:
ensure => $ensure,
volume_group => $vg_name,
size => $size,
size_is_minsize => $fs_size_is_minsize,
}

# create filesystem
-> filesystem { $device:
ensure => $ensure,
fs_type => $fs_type,
options => $fs_options,
}

-> file { $mountpoint:
ensure => 'directory',
owner => 'root',
group => 'root',
mode => $mode,
}

-> mount { $name:
ensure => 'mounted',
atboot => 'yes',
name => $mountpoint,
device => $device,
options => 'defaults',
fstype => $fs_type,
}

# The above mount resource doesn't actually remount devices that were already present in /etc/fstab, but were
# unmounted during manifest application. To get around this, we attempt to mount them again, if they are not
# already mounted.
-> exec { "mount ${device}":
unless => "mount | awk '{print \$3}' | grep -Fxq ${mountpoint}",
command => "mount ${mountpoint}",
path => '/usr/bin'
}
-> exec {"Change ${mountpoint} dir permissions":
command => "chmod ${mode} ${mountpoint}",
}
}
}

@@ -123,6 +160,18 @@ class platform::filesystem::backup
}
}

class platform::filesystem::conversion::params (
$conversion_enabled = false,
$ensure = absent,
$lv_size = '1',
$lv_name = 'conversion-lv',
$mountpoint = '/opt/conversion',
$devmapper = '/dev/mapper/cgts--vg-conversion--lv',
$fs_type = 'ext4',
$fs_options = ' ',
$mode = '0750'
) { }

class platform::filesystem::scratch::params (
$lv_size = '8',
$lv_name = 'scratch-lv',
@@ -144,6 +193,24 @@ class platform::filesystem::scratch
}
}

class platform::filesystem::conversion
inherits ::platform::filesystem::conversion::params {

if $conversion_enabled {
$ensure = present
$mode = '0777'
}
platform::filesystem { $lv_name:
ensure => $ensure,
lv_name => $lv_name,
lv_size => $lv_size,
mountpoint => $mountpoint,
fs_type => $fs_type,
fs_options => $fs_options,
mode => $mode
}
}

class platform::filesystem::kubelet::params (
$lv_size = '10',
$lv_name = 'kubelet-lv',
@@ -216,6 +283,7 @@ class platform::filesystem::compute {
class platform::filesystem::controller {
include ::platform::filesystem::backup
include ::platform::filesystem::scratch
include ::platform::filesystem::conversion
include ::platform::filesystem::docker
include ::platform::filesystem::kubelet
}
@@ -250,6 +318,25 @@ class platform::filesystem::scratch::runtime {
}
}

class platform::filesystem::conversion::runtime {
include ::platform::filesystem::conversion
include ::platform::filesystem::conversion::params

$conversion_enabled = $::platform::filesystem::conversion::params::conversion_enabled
$lv_name = $::platform::filesystem::conversion::params::lv_name
$lv_size = $::platform::filesystem::conversion::params::lv_size
$devmapper = $::platform::filesystem::conversion::params::devmapper

if $conversion_enabled {
Class['::platform::filesystem::conversion']
-> platform::filesystem::resize { $lv_name:
lv_name => $lv_name,
lv_size => $lv_size,
devmapper => $devmapper,
}
}
}

class platform::filesystem::kubelet::runtime {

include ::platform::filesystem::kubelet::params


+ 13
- 0
puppet-manifests/src/modules/platform/manifests/fm.pp View File

@@ -38,6 +38,7 @@ class platform::fm
class platform::fm::haproxy
inherits ::platform::fm::params {

include ::platform::params
include ::platform::haproxy::params

platform::haproxy::proxy { 'fm-api-internal':
@@ -54,6 +55,18 @@ class platform::fm::haproxy
public_port => $api_port,
private_port => $api_port,
}

# Configure rules for DC https enabled admin endpoint.
if ($::platform::params::distributed_cloud_role == 'systemcontroller' or
$::platform::params::distributed_cloud_role == 'subcloud') {
platform::haproxy::proxy { 'fm-api-admin':
https_ep_type => 'admin',
server_name => 's-fm-api-admin',
public_ip_address => $::platform::haproxy::params::private_ip_address,
public_port => $api_port + 1,
private_port => $api_port,
}
}
}

class platform::fm::api


+ 18
- 1
puppet-manifests/src/modules/platform/manifests/haproxy.pp View File

@@ -3,6 +3,7 @@ class platform::haproxy::params (
$public_ip_address,
$public_address_url,
$enable_https = false,
$https_ep_type = 'public',

$global_options = undef,
$tpm_object = undef,
@@ -20,6 +21,7 @@ define platform::haproxy::proxy (
$client_timeout = undef,
$x_forwarded_proto = true,
$enable_https = undef,
$https_ep_type = undef,
$public_api = true,
$tcp_mode = false,
) {
@@ -31,13 +33,23 @@ define platform::haproxy::proxy (
$https_enabled = $::platform::haproxy::params::enable_https
}

if $https_ep_type != undef {
$https_ep = $https_ep_type
} else {
$https_ep = $::platform::haproxy::params::https_ep_type
}

if $x_forwarded_proto {
if $https_enabled and $public_api {
if $https_enabled and $public_api and $https_ep == 'public' {
$ssl_option = 'ssl crt /etc/ssl/private/server-cert.pem'
$proto = 'X-Forwarded-Proto:\ https'
# The value of max-age matches lighttpd.conf, and should be
# maintained for consistency
$hsts_option = 'Strict-Transport-Security:\ max-age=63072000;\ includeSubDomains'
} elsif $https_ep == 'admin' {
$ssl_option = 'ssl crt /etc/ssl/private/admin-ep-cert.pem'
$proto = 'X-Forwarded-Proto:\ https'
$hsts_option = 'Strict-Transport-Security:\ max-age=63072000;\ includeSubDomains'
} else {
$ssl_option = ' '
$proto = 'X-Forwarded-Proto:\ http'
@@ -147,6 +159,11 @@ class platform::haproxy::runtime {
include ::platform::nfv::haproxy
include ::platform::ceph::haproxy
include ::platform::fm::haproxy
if ($::platform::params::distributed_cloud_role == 'systemcontroller' or
$::platform::params::distributed_cloud_role == 'subcloud') {
include ::platform::dcdbsync::haproxy
include ::platform::smapi::haproxy
}
if $::platform::params::distributed_cloud_role =='systemcontroller' {
include ::platform::dcmanager::haproxy
include ::platform::dcorch::haproxy


+ 19
- 16
puppet-manifests/src/modules/platform/manifests/kubernetes.pp View File

@@ -10,9 +10,9 @@ class platform::kubernetes::params (
$host_labels = [],
$k8s_cpuset = undef,
$k8s_nodeset = undef,
$k8s_reserved_cpus = undef,
$k8s_platform_cpuset = undef,
$k8s_reserved_mem = undef,
$k8s_isol_cpus = undef,
$k8s_all_reserved_cpuset = undef,
$k8s_cpu_mgr_policy = 'none',
$k8s_topology_mgr_policy = 'best-effort',
$k8s_cni_bin_dir = '/usr/libexec/cni',
@@ -21,7 +21,8 @@ class platform::kubernetes::params (
$oidc_issuer_url = undef,
$oidc_client_id = undef,
$oidc_username_claim = undef,
$oidc_groups_claim = undef
$oidc_groups_claim = undef,
$admission_plugins = undef
) { }

class platform::kubernetes::cgroup::params (
@@ -107,9 +108,9 @@ class platform::kubernetes::kubeadm {

$node_ip = $::platform::kubernetes::params::node_ip
$host_labels = $::platform::kubernetes::params::host_labels
$k8s_reserved_cpus = $::platform::kubernetes::params::k8s_reserved_cpus
$k8s_platform_cpuset = $::platform::kubernetes::params::k8s_platform_cpuset
$k8s_reserved_mem = $::platform::kubernetes::params::k8s_reserved_mem
$k8s_isol_cpus = $::platform::kubernetes::params::k8s_isol_cpus
$k8s_all_reserved_cpuset = $::platform::kubernetes::params::k8s_all_reserved_cpuset
$k8s_cni_bin_dir = $::platform::kubernetes::params::k8s_cni_bin_dir
$k8s_vol_plugin_dir = $::platform::kubernetes::params::k8s_vol_plugin_dir
$k8s_cpu_mgr_policy = $::platform::kubernetes::params::k8s_cpu_mgr_policy
@@ -127,21 +128,22 @@ class platform::kubernetes::kubeadm {
and !('openstack-compute-node' in $host_labels) {
$opts = join(['--feature-gates TopologyManager=true',
"--cpu-manager-policy=${k8s_cpu_mgr_policy}",
"--topology-manager-policy=${k8s_topology_mgr_policy}",
'--system-reserved-cgroup=/system.slice'], ' ')
"--topology-manager-policy=${k8s_topology_mgr_policy}"], ' ')
$opts_sys_res = join(['--system-reserved=',
"cpu=${k8s_reserved_cpus},",
"memory=${k8s_reserved_mem}Mi"])
$opts_kube_res = join(['--kube-reserved=',
"cpu=${k8s_isol_cpus}"])

if $k8s_cpu_mgr_policy == 'none' {
$k8s_cpu_manager_opts = join([$opts,
$opts_sys_res], ' ')
$k8s_reserved_cpus = $k8s_platform_cpuset
} else {
$k8s_cpu_manager_opts = join([$opts,
$opts_sys_res,
$opts_kube_res], ' ')
# The union of platform, isolated, and vswitch
$k8s_reserved_cpus = $k8s_all_reserved_cpuset
}

$opts_res_cpus = "--reserved-cpus=${k8s_reserved_cpus}"
$k8s_cpu_manager_opts = join([$opts,
$opts_sys_res,
$opts_res_cpus], ' ')
} else {
$k8s_cpu_manager_opts = '--cpu-manager-policy=none'
}
@@ -538,8 +540,9 @@ class platform::kubernetes::upgrade_first_control_plane

include ::platform::params

# The --allow-*-upgrades options allow us to upgrade to any k8s release if necessary
exec { 'upgrade first control plane':
command => "kubeadm upgrade apply ${version} -y",
command => "kubeadm upgrade apply ${version} --allow-experimental-upgrades --allow-release-candidate-upgrades -y",
logoutput => true,
}



+ 13
- 0
puppet-manifests/src/modules/platform/manifests/network.pp View File

@@ -109,8 +109,14 @@ define network_address (
# loopback interface. These addresses must be assigned using the host scope
# or assignment is prevented (can't have multiple global scope addresses on
# the loopback interface).

# For ipv6 the only way to initiate outgoing connections
# over the fixed ips is to set preferred_lft to 0 for the
# floating ips so that they are not used
if $ifname == 'lo' {
$options = 'scope host'
} elsif $::platform::network::mgmt::params::subnet_version == $::platform::params::ipv6 {
$options = 'preferred_lft 0'
} else {
$options = ''
}
@@ -237,6 +243,7 @@ class platform::network::apply {
Network_config <| |>
-> Exec['apply-network-config']
-> Network_address <| |>
-> Exec['wait-for-tentative']
-> Anchor['platform::networking']

# Adding Network_route dependency separately, in case it's empty,
@@ -254,6 +261,12 @@ class platform::network::apply {
exec {'apply-network-config':
command => 'apply_network_config.sh',
}
# Wait for network interface to leave tentative state during ipv6 DAD
exec {'wait-for-tentative':
command => '[ $(ip -6 addr sh | grep -c inet6.*tentative) -eq 0 ]',
tries => 10,
try_sleep => 1,
}
}




+ 14
- 0
puppet-manifests/src/modules/platform/manifests/nfv.pp View File

@@ -56,12 +56,26 @@ class platform::nfv::runtime {

class platform::nfv::haproxy
inherits ::platform::nfv::params {
include ::platform::params
include ::platform::haproxy::params

platform::haproxy::proxy { 'vim-restapi':
server_name => 's-vim-restapi',
public_port => $api_port,
private_port => $api_port,
}

# Configure rules for DC https enabled admin endpoint.
if ($::platform::params::distributed_cloud_role == 'systemcontroller' or
$::platform::params::distributed_cloud_role == 'subcloud') {
platform::haproxy::proxy { 'vim-restapi-admin':
https_ep_type => 'admin',
server_name => 's-vim-restapi',
public_ip_address => $::platform::haproxy::params::private_ip_address,
public_port => $api_port + 1,
private_port => $api_port,
}
}
}




+ 14
- 0
puppet-manifests/src/modules/platform/manifests/patching.pp View File

@@ -51,6 +51,8 @@ class platform::patching

class platform::patching::haproxy
inherits ::platform::patching::params {
include ::platform::params
include ::platform::haproxy::params

platform::haproxy::proxy { 'patching-restapi':
server_name => 's-patching',
@@ -58,6 +60,18 @@ class platform::patching::haproxy
private_port => $private_port,
server_timeout => $server_timeout,
}

# Configure rules for DC https enabled admin endpoint.
if ($::platform::params::distributed_cloud_role == 'systemcontroller' or
$::platform::params::distributed_cloud_role == 'subcloud') {
platform::haproxy::proxy { 'patching-restapi-admin':
https_ep_type => 'admin',
server_name => 's-patching',
public_ip_address => $::platform::haproxy::params::private_ip_address,
public_port => $private_port + 1,
private_port => $private_port,
}
}
}




+ 45
- 29
puppet-manifests/src/modules/platform/manifests/sm.pp View File

@@ -67,11 +67,11 @@ class platform::sm
$extension_fs_device = $::platform::drbd::extension::params::device
$extension_fs_directory = $::platform::drbd::extension::params::mountpoint

include ::platform::drbd::patch_vault::params
$drbd_patch_enabled = $::platform::drbd::patch_vault::params::service_enabled
$patch_drbd_resource = $::platform::drbd::patch_vault::params::resource_name
$patch_fs_device = $::platform::drbd::patch_vault::params::device
$patch_fs_directory = $::platform::drbd::patch_vault::params::mountpoint
include ::platform::drbd::dc_vault::params
$drbd_patch_enabled = $::platform::drbd::dc_vault::params::service_enabled
$patch_drbd_resource = $::platform::drbd::dc_vault::params::resource_name
$patch_fs_device = $::platform::drbd::dc_vault::params::device
$patch_fs_directory = $::platform::drbd::dc_vault::params::mountpoint

include ::platform::drbd::etcd::params
$etcd_drbd_resource = $::platform::drbd::etcd::params::resource_name
@@ -258,8 +258,16 @@ class platform::sm
command => "sm-configure service_instance management-ip management-ip \"ip=${mgmt_ip_param_ip},cidr_netmask=${mgmt_ip_param_mask},nic=${mgmt_ip_interface},arp_count=7,dc=yes\"",
}
} else {
# For ipv6 the only way to initiate outgoing connections
# over the fixed ips is to set preferred_lft to 0 for the
# floating ips so that they are not used
if $::platform::network::mgmt::params::subnet_version == $::platform::params::ipv6 {
$preferred_lft = '0'
} else {
$preferred_lft = 'forever'
}
exec { 'Configure Management IP':
command => "sm-configure service_instance management-ip management-ip \"ip=${mgmt_ip_param_ip},cidr_netmask=${mgmt_ip_param_mask},nic=${mgmt_ip_interface},arp_count=7\"",
command => "sm-configure service_instance management-ip management-ip \"ip=${mgmt_ip_param_ip},cidr_netmask=${mgmt_ip_param_mask},nic=${mgmt_ip_interface},arp_count=7,preferred_lft=${preferred_lft}\"",
}
}

@@ -270,9 +278,17 @@ class platform::sm
"sm-configure service_instance cluster-host-ip cluster-host-ip \"ip=${cluster_host_ip_param_ip},cidr_netmask=${cluster_host_ip_param_mask},nic=${cluster_host_ip_interface},arp_count=7,dc=yes\"",
}
} else {
# For ipv6 the only way to initiate outgoing connections
# over the fixed ips is to set preferred_lft to 0 for the
# floating ips so that they are not used
if $::platform::network::cluster_host::params::subnet_version == $::platform::params::ipv6 {
$preferred_lft_cluster = '0'
} else {
$preferred_lft_cluster = 'forever'
}
exec { 'Configure Cluster Host IP service instance':
command =>
"sm-configure service_instance cluster-host-ip cluster-host-ip \"ip=${cluster_host_ip_param_ip},cidr_netmask=${cluster_host_ip_param_mask},nic=${cluster_host_ip_interface},arp_count=7\"",
"sm-configure service_instance cluster-host-ip cluster-host-ip \"ip=${cluster_host_ip_param_ip},cidr_netmask=${cluster_host_ip_param_mask},nic=${cluster_host_ip_interface},arp_count=7,preferred_lft=${preferred_lft_cluster}\"",
}
}

@@ -369,12 +385,12 @@ class platform::sm
}

if $drbd_patch_enabled {
exec { 'Configure Patch-vault DRBD':
command => "sm-configure service_instance drbd-patch-vault drbd-patch-vault:${hostunit} \"drbd_resource=${patch_drbd_resource}\"",
exec { 'Configure DC-vault DRBD':
command => "sm-configure service_instance drbd-dc-vault drbd-dc-vault:${hostunit} \"drbd_resource=${patch_drbd_resource}\"",
}

exec { 'Configure Patch-vault FileSystem':
command => "sm-configure service_instance patch-vault-fs patch-vault-fs \"device=${patch_fs_device},directory=${patch_fs_directory},options=noatime,nodiratime,fstype=ext4,check_level=20\"",
exec { 'Configure DC-vault FileSystem':
command => "sm-configure service_instance dc-vault-fs dc-vault-fs \"device=${patch_fs_device},directory=${patch_fs_directory},options=noatime,nodiratime,fstype=ext4,check_level=20\"",
}
}

@@ -503,7 +519,7 @@ class platform::sm
}
} else {
exec { 'Configure Platform NFS':
command => "sm-configure service_instance platform-nfs-ip platform-nfs-ip \"ip=${platform_nfs_ip_param_ip},cidr_netmask=${platform_nfs_ip_param_mask},nic=${mgmt_ip_interface},arp_count=7\"",
command => "sm-configure service_instance platform-nfs-ip platform-nfs-ip \"ip=${platform_nfs_ip_param_ip},cidr_netmask=${platform_nfs_ip_param_mask},nic=${mgmt_ip_interface},arp_count=7,preferred_lft=${preferred_lft}\"",
}
}

@@ -602,17 +618,17 @@ class platform::sm
}

if $drbd_patch_enabled {
exec { 'Provision patch-vault-fs (service-group-member)':
command => 'sm-provision service-group-member controller-services patch-vault-fs',
exec { 'Provision dc-vault-fs (service-group-member)':
command => 'sm-provision service-group-member controller-services dc-vault-fs',
}
-> exec { 'Provision patch-vault-fs (service)':
command => 'sm-provision service patch-vault-fs',
-> exec { 'Provision dc-vault-fs (service)':
command => 'sm-provision service dc-vault-fs',
}
-> exec { 'Provision drbd-patch-vault (service-group-member)':
command => 'sm-provision service-group-member controller-services drbd-patch-vault',
-> exec { 'Provision drbd-dc-vault (service-group-member)':
command => 'sm-provision service-group-member controller-services drbd-dc-vault',
}
-> exec { 'Provision drbd-patch-vault (service)':
command => 'sm-provision service drbd-patch-vault',
-> exec { 'Provision drbd-dc-vault (service)':
command => 'sm-provision service drbd-dc-vault',
}
}

@@ -805,6 +821,12 @@ class platform::sm
-> exec { 'Provision DCManager-Manager in SM (service dcmanager-manager)':
command => 'sm-provision service dcmanager-manager',
}
-> exec { 'Provision DCManager-Audit (service-group-member dcmanager-audit)':
command => 'sm-provision service-group-member distributed-cloud-services dcmanager-audit',
}
-> exec { 'Provision DCManager-Audit in SM (service dcmanager-audit)':
command => 'sm-provision service dcmanager-audit',
}
-> exec { 'Provision DCManager-RestApi (service-group-member dcmanager-api)':
command => 'sm-provision service-group-member distributed-cloud-services dcmanager-api',
}
@@ -817,12 +839,6 @@ class platform::sm
-> exec { 'Provision DCOrch-Engine in SM (service dcorch-engine)':
command => 'sm-provision service dcorch-engine',
}
-> exec { 'Provision DCOrch-Snmp (service-group-member dcorch-snmp)':
command => 'sm-provision service-group-member distributed-cloud-services dcorch-snmp',
}
-> exec { 'Provision DCOrch-Snmp in SM (service dcorch-snmp)':
command => 'sm-provision service dcorch-snmp',
}
-> exec { 'Provision DCOrch-Identity-Api-Proxy (service-group-member dcorch-identity-api-proxy)':
command => 'sm-provision service-group-member distributed-cloud-services dcorch-identity-api-proxy',
}
@@ -856,15 +872,15 @@ class platform::sm
-> exec { 'Configure Platform - DCManager-Manager':
command => "sm-configure service_instance dcmanager-manager dcmanager-manager \"\"",
}
-> exec { 'Configure Platform - DCManager-Audit':
command => "sm-configure service_instance dcmanager-audit dcmanager-audit \"\"",
}
-> exec { 'Configure OpenStack - DCManager-API':
command => "sm-configure service_instance dcmanager-api dcmanager-api \"\"",
}
-> exec { 'Configure OpenStack - DCOrch-Engine':
command => "sm-configure service_instance dcorch-engine dcorch-engine \"\"",
}
-> exec { 'Configure OpenStack - DCOrch-Snmp':
command => "sm-configure service_instance dcorch-snmp dcorch-snmp \"\"",
}
-> exec { 'Configure OpenStack - DCOrch-identity-api-proxy':
command => "sm-configure service_instance dcorch-identity-api-proxy dcorch-identity-api-proxy \"\"",
}


+ 12
- 0
puppet-manifests/src/modules/platform/manifests/smapi.pp View File

@@ -29,6 +29,18 @@ class platform::smapi::haproxy
public_port => $port,
private_port => $port,
}

# Configure rules for DC https enabled admin endpoint.
if ($::platform::params::distributed_cloud_role == 'systemcontroller' or
$::platform::params::distributed_cloud_role == 'subcloud') {
platform::haproxy::proxy { 'sm-api-admin':
https_ep_type => 'admin',
server_name => 's-smapi-admin',
public_ip_address => $::platform::haproxy::params::private_ip_address,
public_port => $port + 1,
private_port => $port,
}
}
}

class platform::smapi


+ 10
- 0
puppet-manifests/src/modules/platform/manifests/sysctl.pp View File

@@ -46,6 +46,16 @@ class platform::sysctl
sysctl::value { 'kernel.sched_rt_runtime_us':
value => '1000000',
}

# Enable check for raising timer interrupt only if one is pending.
# This allows nohz full mode to operate properly on isolated cores.
# Without it, ktimersoftd interferes with only one job being
# on the run queue on that core, causing it to drop out of nohz.
# If the check option doesn't exist in the kernel, silently fail.
exec { 'Enable ktimer_lockless_check mode if it exists':
command => "bash -c 'echo 1 2>/dev/null >/sys/kernel/ktimer_lockless_check; exit 0'",
}

} else {
# Disable NUMA balancing
sysctl::value { 'kernel.numa_balancing':


+ 14
- 0
puppet-manifests/src/modules/platform/manifests/sysinv.pp View File

@@ -78,12 +78,26 @@ class platform::sysinv::conductor {

class platform::sysinv::haproxy
inherits ::platform::sysinv::params {
include ::platform::params
include ::platform::haproxy::params

platform::haproxy::proxy { 'sysinv-restapi':
server_name => 's-sysinv',
public_port => $api_port,
private_port => $api_port,
}

# Configure rules for DC https enabled admin endpoint.
if ($::platform::params::distributed_cloud_role == 'systemcontroller' or
$::platform::params::distributed_cloud_role == 'subcloud') {
platform::haproxy::proxy { 'sysinv-restapi-admin':
https_ep_type => 'admin',
server_name => 's-sysinv',
public_ip_address => $::platform::haproxy::params::private_ip_address,
public_port => $api_port + 1,
private_port => $api_port,
}
}
}




+ 2
- 2
puppet-manifests/src/modules/platform/templates/config.toml.erb View File

@@ -26,10 +26,10 @@ oom_score = 0
[plugins.cgroups]
no_prometheus = false
[plugins.cri]
stream_server_address = ""
stream_server_address = "<%= @stream_server_address %>"
stream_server_port = "0"
enable_selinux = false
sandbox_image = "registry.local:9001/k8s.gcr.io/pause:3.1"
sandbox_image = "registry.local:9001/k8s.gcr.io/pause:3.2"
stats_collect_period = 10
systemd_cgroup = false
enable_tls_streaming = false


+ 3
- 0
puppet-manifests/src/modules/platform/templates/kube-apiserver-change-params.erb View File

@@ -20,6 +20,9 @@ python /usr/share/puppet/modules/platform/files/change_kube_apiserver_params.py
<%- if @oidc_groups_claim -%>
--oidc_groups_claim <%= @oidc_groups_claim %> \
<%- end -%>
<%- if @admission_plugins -%>
--admission_plugins <%= @admission_plugins %> \
<%- end -%>

kubectl --kubeconfig=/etc/kubernetes/admin.conf -n kube-system patch configmap kubeadm-config -p "$(cat <%= @configmap_temp_file %>)"
kubeadm config view > <%= @configmap_temp_file %>


+ 1
- 0
test-requirements.txt View File

@@ -1,3 +1,4 @@
# hacking pulls in flake8
hacking!=0.13.0,<0.14,>=0.12.0 # Apache-2.0
bashate >= 0.2
bandit!=1.6.0,>=1.1.0,<2.0.0

+ 6
- 1
tox.ini View File

@@ -68,7 +68,7 @@ sitepackages = False

deps = {[testenv]deps}
ruamel.yaml
pylint
pylint<2.5.0
commands =
pylint {posargs} --rcfile=./pylint.rc puppet-manifests

@@ -81,3 +81,8 @@ show-source = True
ignore = E123,E125,E501,H405,W504
exclude = .venv,.git,.tox,dist,doc,*lib/python*,*egg,build,release-tag-*

[testenv:bandit]
basepython = python3
description = Bandit code scan for *.py files under config folder
deps = -r{toxinidir}/test-requirements.txt
commands = bandit -r {toxinidir}/ -x '**/.tox/**,**/.eggs/**' -lll

Loading…
Cancel
Save