Merge branch 'master' into f/centos8

Signed-off-by: Charles Short <charles.short@windriver.com>
Change-Id: I2a4beab477cc2ba551122975569588592a006c68
This commit is contained in:
Charles Short 2021-05-18 14:23:56 -04:00
commit 2b026190a3
68 changed files with 1504 additions and 252 deletions

2
bindep.txt Normal file
View File

@ -0,0 +1,2 @@
ruby-devel [test platform:rpm]
ruby-dev [test platform:dpkg]

View File

@ -1,2 +1,2 @@
SRC_DIR="src"
TIS_PATCH_VER=1
TIS_PATCH_VER=PKG_GITREVCOUNT

View File

@ -1,2 +1,2 @@
SRC_DIR="src"
TIS_PATCH_VER=1
TIS_PATCH_VER=PKG_GITREVCOUNT

View File

@ -1,2 +1,2 @@
SRC_DIR="src"
TIS_PATCH_VER=1
TIS_PATCH_VER=PKG_GITREVCOUNT

View File

@ -22,8 +22,8 @@
class dcorch (
$database_connection = '',
$database_idle_timeout = 3600,
$database_max_pool_size = 405,
$database_max_overflow = 100,
$database_max_pool_size = 125,
$database_max_overflow = 75,
$control_exchange = 'openstack',
$rabbit_host = '127.0.0.1',
$rabbit_port = 5672,

View File

@ -1,2 +1,2 @@
SRC_DIR="src"
TIS_PATCH_VER=1
TIS_PATCH_VER=PKG_GITREVCOUNT

View File

@ -75,8 +75,9 @@ class fm (
$event_log_max_size = 4000,
$system_name = undef,
$region_name = undef,
$trap_destinations = undef,
$sysinv_catalog_info = undef,
$snmp_enabled = 0,
$snmp_trap_server_port = 162,
) inherits fm::params {
include ::fm::deps
@ -89,7 +90,6 @@ class fm (
'DEFAULT/event_log_max_size': value => $event_log_max_size;
'DEFAULT/system_name': value => $system_name;
'DEFAULT/region_name': value => $region_name;
'DEFAULT/trap_destinations': value => $trap_destinations;
}
# Automatically add psycopg2 driver to postgresql (only does this if it is missing)
@ -106,6 +106,12 @@ class fm (
'sysinv/os_region_name': value => $region_name;
}
fm_config {
'snmp/snmp_enabled': value => $snmp_enabled;
'snmp/trap_server_ip': value => 'controller';
'snmp/trap_server_port': value => $snmp_trap_server_port;
}
fm_api_paste_ini {
'pipeline:fm-api/pipeline': value => 'request_id authtoken api_v1';
'filter:request_id/paste.filter_factory': value => 'oslo_middleware:RequestId.factory';

View File

@ -1,2 +1,2 @@
SRC_DIR="src"
TIS_PATCH_VER=9
TIS_PATCH_VER=PKG_GITREVCOUNT

View File

@ -1,2 +1,2 @@
SRC_DIR="src"
TIS_PATCH_VER=5
TIS_PATCH_VER=PKG_GITREVCOUNT

View File

@ -1,2 +1,2 @@
SRC_DIR="src"
TIS_PATCH_VER=2
TIS_PATCH_VER=PKG_GITREVCOUNT

View File

@ -1,2 +1,2 @@
SRC_DIR="src"
TIS_PATCH_VER=1
TIS_PATCH_VER=PKG_GITREVCOUNT

View File

@ -1,2 +1,2 @@
SRC_DIR="src"
TIS_PATCH_VER=1
TIS_PATCH_VER=PKG_GITREVCOUNT

View File

@ -1,2 +1,2 @@
SRC_DIR="src"
TIS_PATCH_VER=9
TIS_PATCH_VER=PKG_GITREVCOUNT

View File

@ -211,6 +211,13 @@ function is_vlan {
return $?
}
#
# returns $(true) if cfg file has the given interface_name as a PHYSDEV
#
function has_physdev {
cfg_has_property_with_value $1 "PHYSDEV" $2
}
#
# returns $(true) if cfg file is configured as an ethernet interface. For the
# purposes of this script "ethernet" is considered as any interface that is not
@ -263,12 +270,13 @@ function is_eq_sriov_numvfs {
#
# Warning: Only compares against cfg file attributes:
# BOOTPROTO DEVICE IPADDR NETMASK GATEWAY MTU BONDING_OPTS SRIOV_NUMVFS
# IPV6ADDR IPV6_DEFAULTGW
#
function is_eq_ifcfg {
local cfg_1=$1
local cfg_2=$2
for attr in BOOTPROTO DEVICE IPADDR NETMASK GATEWAY MTU BONDING_OPTS; do
for attr in BOOTPROTO DEVICE IPADDR NETMASK GATEWAY MTU BONDING_OPTS IPV6ADDR IPV6_DEFAULTGW; do
local attr_value1
attr_value1=$(normalized_cfg_attr_value $cfg_1 $attr)
local attr_value2
@ -381,9 +389,15 @@ function update_routes {
function update_interfaces {
upDown=()
changed=()
vlans=()
for cfg_path in $(find /var/run/network-scripts.puppet/ -name "${IFNAME_INCLUDE}"); do
cfg=$(basename $cfg_path)
if is_vlan /etc/sysconfig/network-scripts/$cfg; then
vlans+=($cfg)
fi
diff -I ".*Last generated.*" -q /var/run/network-scripts.puppet/$cfg \
/etc/sysconfig/network-scripts/$cfg >/dev/null 2>&1
@ -448,6 +462,20 @@ function update_interfaces {
do_rm /etc/sysconfig/network-scripts/$r
done
# If a lower ethernet interface is being changed, the upper vlan interface(s) will lose
# configuration such as (IPv6) addresses and (IPv4, IPv6) default routes. If the vlan
# interface is not already in the up/down list, then explicitly add it.
for cfg in ${upDown[@]}; do
for vlan in ${vlans[@]}; do
if has_physdev /var/run/network-scripts.puppet/$vlan ${cfg#ifcfg-}; then
if [[ ! " ${upDown[@]} " =~ " ${vlan} " ]]; then
log_it "Adding ${vlan} to up/down list since physdev ${cfg#ifcfg-} is changing"
upDown+=($vlan)
fi
fi
done
done
# now down the changed ifaces by dealing with vlan interfaces first so that
# they are brought down gracefully (i.e., without taking their dependencies
# away unexpectedly).

View File

@ -15,11 +15,22 @@ done
HIERADATA=$1
HOST=$2
PERSONALITY=$3
MANIFEST=${4:-$PERSONALITY}
# subfunctions is a list of subfunctions, separated by comma
SUBFUNCTIONS=$3
IFS=, read PERSONALITY SUBFUNCTION LL <<< $SUBFUNCTIONS
if [ "${SUBFUNCTION}" = "worker" ]; then
MANIFEST="aio"
else
PERSONALITY=${SUBFUNCTIONS}
MANIFEST=${PERSONALITY}
fi
MANIFEST=${4:-$MANIFEST}
RUNTIMEDATA=$5
logger -t $0 "puppet-manifest-apply ${HIERADATA} ${HOST} ${SUBFUNCTIONS} ${MANIFEST} ${RUNTIMEDATA}"
PUPPET_MODULES_PATH=/usr/share/puppet/modules:/usr/share/openstack-puppet/modules
PUPPET_MANIFEST=/etc/puppet/manifests/${MANIFEST}.pp
PUPPET_TMP=/tmp/puppet
@ -27,7 +38,7 @@ FILEBUCKET_PATH=/var/lib/puppet/clientbucket
# Setup log directory and file
DATETIME=$(date -u +"%Y-%m-%d-%H-%M-%S")
LOGDIR="/var/log/puppet/${DATETIME}_${PERSONALITY}"
LOGDIR="/var/log/puppet/${DATETIME}_${MANIFEST}"
LOGFILE=${LOGDIR}/puppet.log
mkdir -p ${LOGDIR}
@ -41,7 +52,7 @@ chmod 600 ${LOGFILE}
# Remove old log directories
declare -i NUM_DIRS=`ls -d1 /var/log/puppet/[0-9]* 2>/dev/null | wc -l`
declare -i MAX_DIRS=20
declare -i MAX_DIRS=50
if [ ${NUM_DIRS} -gt ${MAX_DIRS} ]; then
let -i RMDIRS=${NUM_DIRS}-${MAX_DIRS}
ls -d1 /var/log/puppet/[0-9]* | head -${RMDIRS} | xargs --no-run-if-empty rm -rf
@ -53,33 +64,78 @@ fi
rm -rf ${PUPPET_TMP}
mkdir -p ${PUPPET_TMP}/hieradata
cp /etc/puppet/hieradata/global.yaml ${PUPPET_TMP}/hieradata/global.yaml
cp /etc/puppet/hieradata/${PERSONALITY}.yaml ${PUPPET_TMP}/hieradata/personality.yaml
if [ "${MANIFEST}" = 'aio' ]; then
cat /etc/puppet/hieradata/controller.yaml /etc/puppet/hieradata/worker.yaml > ${PUPPET_TMP}/hieradata/personality.yaml
else
cp /etc/puppet/hieradata/${PERSONALITY}.yaml ${PUPPET_TMP}/hieradata/personality.yaml
fi
# When the worker node is first booted and goes online, sysinv-agent reports
# host CPU inventory which triggers the first runtime manifest apply that updates
# the grub. At this time, copying the host file failed due to a timing issue that
# has not yet been fully understood. Subsequent retries worked.
if [ "${PERSONALITY}" = "worker" ]; then
n=0
until [ $n -ge 3 ]; do
cp -f ${HIERADATA}/${HOST}.yaml ${PUPPET_TMP}/hieradata/host.yaml && break
n=$(($n+1))
logger -t $0 "Failed to copy /etc/puppet/hieradata/${HOST}.yaml"
sleep 15
done
#
# When back to back runtime manifests (e.g. as on https modify certificate
# install) are issued, copying of the hieradata file may fail. Suspect this is due
# to potential update of hieradata on the controller while the file is being
# copied. Check rsync status and retry if needed.
declare -i MAX_RETRIES=3
HIERA_HOST=()
if [ "${MANIFEST}" == 'ansible_bootstrap' ]; then
HIERA_SYS=("${HIERADATA}/secure_static.yaml" "${HIERADATA}/static.yaml")
elif [ "${MANIFEST}" == 'upgrade' ]; then
HIERA_SYS=("${HIERADATA}/secure_static.yaml" "${HIERADATA}/static.yaml" "${HIERADATA}/system.yaml")
else
cp -f ${HIERADATA}/${HOST}.yaml ${PUPPET_TMP}/hieradata/host.yaml
HIERA_SYS=("${HIERADATA}/secure_static.yaml" "${HIERADATA}/static.yaml" "${HIERADATA}/system.yaml" "${HIERADATA}/secure_system.yaml")
HIERA_HOST=("${HIERADATA}/${HOST}.yaml")
fi
cp -f ${HIERADATA}/system.yaml \
${HIERADATA}/secure_system.yaml \
${HIERADATA}/static.yaml \
${HIERADATA}/secure_static.yaml \
${PUPPET_TMP}/hieradata/
if [ -n "${RUNTIMEDATA}" ]; then
cp -f ${RUNTIMEDATA} ${PUPPET_TMP}/hieradata/runtime.yaml
HIERA_RUNTIME=("${RUNTIMEDATA}")
else
HIERA_RUNTIME=()
fi
DELAY_SECS=15
for (( iter=1; iter<=$MAX_RETRIES; iter++ )); do
if [ ${#HIERA_HOST[@]} -ne 0 ]; then
rsync -c "${HIERA_HOST[@]}" ${PUPPET_TMP}/hieradata/host.yaml
if [ $? -eq 0 ]; then
HIERA_HOST=()
fi
fi
rsync -c "${HIERA_SYS[@]}" ${PUPPET_TMP}/hieradata
if [ $? -eq 0 ]; then
HIERA_SYS=()
fi
if [ ${#HIERA_RUNTIME[@]} -ne 0 ]; then
rsync -c "${HIERA_RUNTIME[@]}" ${PUPPET_TMP}/hieradata/runtime.yaml
if [ $? -eq 0 ]; then
HIERA_RUNTIME=()
fi
fi
if [ ${#HIERA_HOST[@]} -eq 0 ] && [ ${#HIERA_SYS[@]} -eq 0 ] && [ ${#HIERA_SYS[@]} -eq 0 ]; then
break
fi
logger -t $0 "Failed to copy ${HIERA_HOST[*]}:${HIERA_SYS[*]}:${HIERA_FILES_RUNTIME[*]} iteration: ${iter}."
if [ ${iter} -eq ${MAX_RETRIES} ]; then
echo "[FAILED]"
echo "Exiting, failed to rsync hieradata"
logger -t $0 "Exiting, failed to rsync hieradata"
exit 1
else
logger -t $0 "Failed to rsync hieradata iteration: ${iter}. Retry in ${DELAY_SECS} seconds"
sleep ${DELAY_SECS}
fi
done
# Exit function to save logs from initial apply
function finish {
@ -87,6 +143,7 @@ function finish {
if [ ! -f ${SAVEDLOGS} ]; then
# Save the logs
tar czf ${SAVEDLOGS} ${LOGDIR} 2>/dev/null
chmod 600 ${SAVEDLOGS}
fi
# To avoid the ever growing contents of filebucket which may trigger inode

View File

@ -81,8 +81,6 @@ platform::influxdb::logrotate::params::log_file_rotate: 10
# postgresql
postgresql::globals::needs_initdb: false
postgresql::server::service_enable: false
postgresql::server::ip_mask_deny_postgres_user: '0.0.0.0/32'
postgresql::server::ip_mask_allow_all_users: '0.0.0.0/0'
postgresql::server::pg_hba_conf_path: "/etc/postgresql/pg_hba.conf"
postgresql::server::pg_ident_conf_path: "/etc/postgresql/pg_ident.conf"
postgresql::server::postgresql_conf_path: "/etc/postgresql/postgresql.conf"

View File

@ -34,7 +34,7 @@ platform::collectd::params::write_queue_limit_high: 1000000
platform::collectd::params::write_queue_limit_low: 800000
platform::collectd::params::server_addrs: ['controller']
platform::collectd::params::server_port: 25826
platform::collectd::params::collectd_d_dir: '/etc/collectd.d'
platform::collectd::params::collectd_d_dir: '/etc/collectd.d/starlingx'
# collectd: module named plugins
platform::collectd::params::module_path: '/opt/collectd/extensions/python'

View File

@ -0,0 +1,116 @@
#
# puppet manifest for controller nodes of AIO system
#
Exec {
timeout => 600,
path => '/usr/bin:/usr/sbin:/bin:/sbin:/usr/local/bin:/usr/local/sbin'
}
class { '::firewall':
ensure => stopped
}
include ::platform::config
include ::platform::users
include ::platform::sysctl::controller
include ::platform::filesystem::controller
include ::platform::firewall::calico::oam
include ::platform::dhclient
include ::platform::partitions
include ::platform::lvm::aio
include ::platform::network
include ::platform::drbd
include ::platform::exports
include ::platform::dns
include ::platform::ldap::server
include ::platform::ldap::client
include ::platform::password
include ::platform::ntp::server
include ::platform::ptp
include ::platform::lldp
include ::platform::amqp::rabbitmq
include ::platform::postgresql::server
include ::platform::haproxy::server
include ::platform::grub
include ::platform::etcd
include ::platform::docker::controller
include ::platform::dockerdistribution
include ::platform::containerd::controller
include ::platform::kubernetes::gate
include ::platform::helm
include ::platform::armada
include ::platform::patching
include ::platform::patching::api
include ::platform::remotelogging
include ::platform::remotelogging::proxy
include ::platform::sysinv
include ::platform::sysinv::api
include ::platform::sysinv::conductor
include ::platform::mtce
include ::platform::mtce::agent
include ::platform::memcached
include ::platform::nfv
include ::platform::nfv::api
include ::platform::ceph::controller
include ::platform::ceph::rgw
include ::platform::influxdb
include ::platform::influxdb::logrotate
include ::platform::collectd
include ::platform::fm
include ::platform::fm::api
include ::platform::multipath
include ::platform::client
include ::openstack::keystone
include ::openstack::keystone::api
include ::openstack::horizon
include ::platform::dcmanager
include ::platform::dcmanager::manager
include ::platform::dcorch
include ::platform::dcorch::engine
include ::platform::dcorch::api_proxy
include ::platform::dcmanager::api
include ::platform::certmon
include ::platform::dcdbsync
include ::platform::dcdbsync::api
include ::platform::smapi
include ::openstack::barbican
include ::openstack::barbican::api
include ::platform::sm
include ::platform::lmon
include ::platform::rook
include ::platform::deviceimage
include ::platform::compute
include ::platform::vswitch
include ::platform::devices
include ::platform::interfaces::sriov::config
include ::platform::worker::storage
include ::platform::pciirqaffinity
include ::platform::docker::login
include ::platform::kubernetes::aio
class { '::platform::config::aio::post':
stage => post,
}
hiera_include('classes')

View File

@ -21,6 +21,9 @@ include ::platform::client::bootstrap
include ::platform::sysinv::bootstrap
# Puppet class to setup helm database
include ::platform::helm::bootstrap
# Puppet classes to enable the bring up of kubernetes master
include ::platform::docker::bootstrap
include ::platform::etcd::bootstrap

View File

@ -2,6 +2,10 @@
# puppet manifest for controller hosts
#
# A separated AIO manifest (./aio.pp) is applied to AIO controllers.
# Changes for controllers should also be considered to implement in
# aio.pp.
Exec {
timeout => 600,
path => '/usr/bin:/usr/sbin:/bin:/sbin:/usr/local/bin:/usr/local/sbin'
@ -44,6 +48,7 @@ include ::platform::docker::controller
include ::platform::dockerdistribution
include ::platform::containerd::controller
include ::platform::kubernetes::master
include ::platform::kubernetes::gate
include ::platform::helm
include ::platform::armada
@ -102,6 +107,9 @@ include ::openstack::barbican::api
include ::platform::sm
include ::platform::lmon
include ::platform::rook
include ::platform::deviceimage
class { '::platform::config::controller::post':
stage => post,

View File

@ -30,6 +30,7 @@ include ::platform::filesystem::storage
include ::platform::docker::storage
include ::platform::containerd::storage
include ::platform::ceph::storage
include ::platform::rook
class { '::platform::config::storage::post':
stage => post,

View File

@ -2,6 +2,10 @@
# puppet manifest for worker nodes
#
# A separated AIO manifest (./aio.pp) is applied to AIO controllers.
# Changes for workers should also be considered to implement in
# aio.pp.
Exec {
timeout => 300,
path => '/usr/bin:/usr/sbin:/bin:/sbin:/usr/local/bin:/usr/local/sbin'
@ -27,7 +31,7 @@ include ::platform::remotelogging
include ::platform::mtce
include ::platform::sysinv
include ::platform::devices
include ::platform::interfaces::sriov
include ::platform::interfaces::sriov::config
include ::platform::grub
include ::platform::collectd
include ::platform::filesystem::compute
@ -42,6 +46,7 @@ include ::platform::ceph::worker
include ::platform::worker::storage
include ::platform::pciirqaffinity
include ::platform::lmon
include ::platform::rook
class { '::platform::config::worker::post':
stage => post,

View File

@ -460,6 +460,12 @@ class openstack::keystone::endpoint::runtime {
}
}
class openstack::keystone::endpoint::runtime::post {
class {'openstack::keystone::endpoint::runtime':
stage => post
}
}
class openstack::keystone::upgrade (
$upgrade_token_cmd,
$upgrade_url = undef,
@ -479,13 +485,6 @@ class openstack::keystone::upgrade (
$keystone_key_repo = "${::platform::drbd::platform::params::mountpoint}/keystone"
# TODO(aning): For R5->R6 upgrade, a local keystone fernet keys repository may
# need to be setup for the local keystone instance on standby controller to
# service specific upgrade operations, since we need to keep the keys repository
# in /opt/platform/keystone/fernet-keys intact so that service won't fail on active
# controller during upgrade. Once the upgade finishes, the temparary local
# fernet keys repository will be deleted.
# Need to create the parent directory for fernet keys repository
# This is a workaround to a puppet bug.
file { $keystone_key_repo:

View File

@ -218,7 +218,7 @@ server.max-keep-alive-idle = 0
## read proxy.txt for more info
# Proxy all non-static content to the local horizon dashboard
$HTTP["url"] !~ "^/(rel-[^/]*|feed|updates|static|helm_charts|iso)/" {
$HTTP["url"] !~ "^/(rel-[^/]*|feed|updates|static|helm_charts|iso|device_images)/" {
proxy.server = ( "" =>
( "localhost" =>
(

View File

@ -20,6 +20,11 @@ parser.add_argument("--oidc_client_id")
parser.add_argument("--oidc_username_claim")
parser.add_argument("--oidc_groups_claim")
parser.add_argument("--admission_plugins")
parser.add_argument("--etcd_cafile")
parser.add_argument("--etcd_certfile")
parser.add_argument("--etcd_keyfile")
parser.add_argument("--etcd_servers")
args = parser.parse_args()
if args.configmap_file:
@ -77,6 +82,24 @@ else:
if plugins in cluster_config['apiServer']['extraArgs']:
del cluster_config['apiServer']['extraArgs'][plugins]
# etcd parameters are required to start up kube-apiserver
# do not remove any existing etcd parameters in the config map
if args.etcd_cafile:
cluster_config['etcd']['external']['caFile'] = \
args.etcd_cafile
if args.etcd_certfile:
cluster_config['etcd']['external']['certFile'] = \
args.etcd_certfile
if args.etcd_keyfile:
cluster_config['etcd']['external']['keyFile'] = \
args.etcd_keyfile
if args.etcd_servers:
cluster_config['etcd']['external']['endpoints'] = \
args.etcd_servers.split(',')
cluster_config_string = yaml.dump(cluster_config, Dumper=yaml.RoundTripDumper,
default_flow_style=False)
# use yaml.scalarstring.PreservedScalarString to make sure the yaml is

View File

@ -19,7 +19,7 @@
DESC="ETCD highly-available key value database"
SERVICE="etcd.service"
PIDFILE="/var/run/etcd.pid"
UPGRADE_SWACT_FILE="/etc/platform/.upgrade_swact_controller_1"
UPGRADE_SWACT_FILE="/opt/etcd/.upgrade_etcd"
status()

View File

@ -4,6 +4,17 @@ User=root
NotifyAccess=all
Type=notify
ExecStart=
ExecStart=-/bin/bash -c "GOMAXPROCS=$(nproc) /usr/bin/etcd --name=\"${ETCD_NAME}\" --data-dir=\"${ETCD_DATA_DIR}\" --listen-client-urls=\"${ETCD_LISTEN_CLIENT_URLS}\" 2>&1 | /usr/bin/forward-journald -tag etcd"
ExecStart=-/bin/bash -c "GOMAXPROCS=$(nproc) /usr/bin/etcd \
--name=\"${ETCD_NAME}\" \
--data-dir=\"${ETCD_DATA_DIR}\" \
--listen-client-urls=\"${ETCD_LISTEN_CLIENT_URLS}\" \
--client-cert-auth=${ETCD_CLIENT_CERT_AUTH} \
--trusted-ca-file=\"${ETCD_TRUSTED_CA_FILE}\" \
--cert-file=\"${ETCD_CERT_FILE}\" \
--key-file=\"${ETCD_KEY_FILE}\" 2>&1 \
| /usr/bin/forward-journald -tag etcd"
ExecStartPost=/bin/bash -c 'echo $MAINPID >/var/run/etcd.pid'
ExecStopPost=/bin/bash/rm -f /var/run/etcd.pid
ExecStopPost=/bin/bash -c 'rm -f /var/run/etcd.pid'
Nice=-19
IOSchedulingClass=best-effort
IOSchedulingPriority=0

View File

@ -0,0 +1,5 @@
Facter.add("configured_ceph_osds") do
setcode do
Dir.entries("/var/lib/ceph/osd").select { |osd| osd.match("ceph-.*") }
end
end

View File

@ -0,0 +1,7 @@
# Returns true if active controller found on this node
Facter.add("is_active_controller_found") do
setcode do
! File.exist?('/var/run/.active_controller_not_found')
end
end

View File

@ -0,0 +1,7 @@
# Returns true if Rook Ceph has been configured on current node
Facter.add("is_node_rook_ceph_configured") do
setcode do
File.exist?('/etc/platform/.node_rook_ceph_configured')
end
end

View File

@ -30,7 +30,7 @@ class platform::amqp::rabbitmq (
ensure => 'directory',
owner => 'root',
group => 'root',
mode => '0755',
mode => '0750',
}
if $service_enabled {

View File

@ -37,9 +37,12 @@ class platform::ceph::params(
$rgw_gc_processor_max_time = '300',
$rgw_gc_processor_period = '300',
$configure_ceph_mon_info = false,
$simplex_to_duplex_migration = false,
$cephfs_filesystems = {},
$ceph_config_file = '/etc/ceph/ceph.conf',
$ceph_config_ready_path = '/var/run/.ceph_started',
$node_ceph_configured_flag = '/etc/platform/.node_ceph_configured',
$pmond_ceph_file = '/etc/pmon.d/ceph.conf',
) { }
@ -120,7 +123,15 @@ class platform::ceph
}
# Remove old, no longer in use, monitor hosts from Ceph's config file
$valid_monitors = [ $mon_0_host, $mon_1_host, $mon_2_host ]
if $system_type == 'All-in-one' {
if $system_mode == 'simplex' {
$valid_monitors = [ $mon_0_host ]
} else {
$valid_monitors = [ $floating_mon_host ]
}
} else {
$valid_monitors = [ $mon_0_host, $mon_1_host, $mon_2_host ]
}
$::configured_ceph_monitors.each |Integer $index, String $monitor| {
if ! ($monitor in $valid_monitors) {
@ -222,8 +233,19 @@ class platform::ceph::monitor
}
if $system_type == 'All-in-one' and 'duplex' in $system_mode {
# ensure DRBD config is complete before enabling the ceph monitor
Drbd::Resource <| |> -> Class['::ceph']
# if transition from AIO-SX to AIO-DX has started, we need to
# wipe the logical volume before mounting DRBD
# and remove the pmon.d managed ceph daemons
if ($simplex_to_duplex_migration and str2bool($::is_node_ceph_configured)) {
contain ::platform::ceph::migration::sx_to_dx::remove_mon
include ::platform::ceph::migration::sx_to_dx::rebuild_mon
Class['::platform::ceph::migration::sx_to_dx::remove_mon']
-> Class['::ceph']
} else {
# ensure DRBD config is complete before enabling the ceph monitor
Drbd::Resource <| |> -> Class['::ceph']
}
} else {
File['/var/lib/ceph']
-> platform::filesystem { $mon_lv_name:
@ -325,6 +347,234 @@ class platform::ceph::monitor
}
}
class platform::ceph::migration::sx_to_dx::remove_mon
inherits platform::ceph::params {
include ::platform::filesystem::params
$vg_name = $::platform::filesystem::params::vg_name
$drbd_device = $::platform::drbd::cephmon::params::device
$lv_device = "/dev/${vg_name}/${mon_lv_name}"
exec { 'Unmounting cephmon logical volume' :
command => "umount ${mon_mountpoint}",
onlyif => "mountpoint -q ${mon_mountpoint}",
}
-> exec { "Removing auto mounting ${mon_mountpoint} from fstab" :
command => "/bin/sed -i '/^.*${mon_lv_name}.*ext4/d' /etc/fstab",
onlyif => "grep -q '^.*${mon_lv_name}.*ext4' /etc/fstab",
}
-> exec { "wipe start of device ${lv_device}" :
command => "dd if=/dev/zero of=${lv_device} bs=512 count=34",
onlyif => "blkid ${lv_device}",
}
-> exec { "wipe end of device ${lv_device}" :
command => "dd if=/dev/zero of=${lv_device} bs=512 seek=$(($(blockdev --getsz ${lv_device}) - 34)) count=34",
}
-> exec { "remove ${pmond_ceph_file}" :
command => "rm -f ${pmond_ceph_file}",
onlyif => "test -f ${pmond_ceph_file}",
}
-> Drbd::Resource['drbd-cephmon']
-> exec { 'Adding auto mount for drbd-cephmon to fstab' :
command => "echo \"${drbd_device} ${mon_mountpoint} auto defaults,noauto 0 0\" | tee -a /etc/fstab",
}
-> exec { 'Mount drbd-cephmon DRBD device' :
command => "/usr/bin/mount ${mon_mountpoint}",
}
}
class platform::ceph::migration::sx_to_dx::rebuild_mon
inherits platform::ceph::params {
# Make sure osds are provisioned
Class['::platform::ceph::osds'] -> Class[$name]
$mon_db_path = "${$mon_mountpoint}/ceph-${floating_mon_host}"
exec { 'sm-unmanage service ceph-osd to rebuild store.db' :
command => 'sm-unmanage service ceph-osd',
onlyif => 'test -f /var/run/goenabled',
}
-> exec { 'sm-unmanage service ceph-mon to rebuild store.db' :
command => 'sm-unmanage service ceph-mon',
onlyif => 'test -f /var/run/goenabled',
}
-> exec { 'stop Ceph OSDs and Monitor' :
command => '/etc/init.d/ceph-init-wrapper stop'
}
-> exec { 'Remove current ceph-controller store.db' :
command => "rm -rf ${mon_db_path}/store.db",
onlyif => "test -d ${mon_db_path}/store.db"
}
$::configured_ceph_osds.each |Integer $index, String $osd| {
exec { "Rebuilding monitor storage from OSD ${osd}" :
command => "ceph-objectstore-tool --data-path /var/lib/ceph/osd/${osd} --no-mon-config\
--op update-mon-db --mon-store-path ${mon_db_path}",
require => Exec['Remove current ceph-controller store.db'],
}
Exec["Rebuilding monitor storage from OSD ${osd}"] -> Exec['Add monitor information to store.db']
}
exec { 'Add monitor information to store.db' :
command => "ceph-monstore-tool ${mon_db_path} rebuild --mon-ids ${floating_mon_host}",
}
-> exec { 'start Ceph Monitor after rebuilding monitor store' :
command => '/etc/init.d/ceph-init-wrapper start mon',
}
-> exec { 'start other Ceph components after rebuilding monitor store' :
command => '/etc/init.d/ceph-init-wrapper start',
}
-> exec { 'sm-manage service ceph-osd after rebuilding monitor store' :
command => 'sm-manage service ceph-osd',
onlyif => 'test -f /var/run/goenabled',
}
-> exec { 'sm-manage service ceph-mon after rebuilding monitor store' :
command => 'sm-manage service ceph-mon',
onlyif => 'test -f /var/run/goenabled',
}
class { 'platform::ceph::migration::sx_to_dx::active_cluster_updates' :
stage => post,
}
}
class platform::ceph::migration::sx_to_dx::active_cluster_updates
inherits platform::ceph::params {
exec { 'Ensure Ceph Monitor is running' :
command => '/etc/init.d/ceph-init-wrapper start mon',
}
-> exec { 'Ensure Ceph OSDs are running' :
command => '/etc/init.d/ceph-init-wrapper start osd',
}
-> exec { 'Ensure Ceph mds is stoped':
command => '/etc/init.d/ceph-init-wrapper stop mds'
}
$cephfs_filesystems.each |String $fs, Array $pools| {
$metadada_pool = $pools[0]
$data_pool = $pools[1]
exec { "Rebuilding cephfs filesystem ${fs}" :
command => "ceph fs new ${fs} ${metadada_pool} ${data_pool} --force",
require => Exec['Ensure Ceph mds is stoped'],
}
-> exec { "Reset cephfs filesystem ${fs}" :
command => "ceph fs reset ${fs} --yes-i-really-mean-it",
}
Exec["Reset cephfs filesystem ${fs}"] -> Exec['Ensure Ceph mds is re-started']
}
exec { 'Ensure Ceph mds is re-started':
command => '/etc/init.d/ceph-init-wrapper start mds'
}
-> exec { 'Update crushmap to support DX' :
command => template('platform/ceph_crushmap_add_controller1_bucket.erb'),
}
}
class platform::ceph::metadataserver::config
inherits ::platform::ceph::params {
if $::hostname == $mon_0_host {
Class['::ceph']
-> ceph_config {
"mds.${$::hostname}/host": value => $mon_0_host;
}
}
if $::hostname == $mon_1_host {
Class['::ceph']
-> ceph_config {
"mds.${$::hostname}/host": value => $mon_1_host;
}
}
if $::hostname == $mon_2_host {
Class['::ceph']
-> ceph_config {
"mds.${$::hostname}/host": value => $mon_2_host;
}
}
}
class platform::ceph::metadataserver::controller::runtime
inherits ::platform::ceph::params {
include ::platform::ceph::metadataserver::config
if $::personality == 'controller' {
include ::platform::sm::ceph::runtime
# Make sure the metadata config is added before starting services
Class['::platform::ceph::metadataserver::config'] -> Class[$name]
# Make sure the ceph SM services are provisioned
Class['::platform::sm::ceph::runtime'] -> Class[$name]
$system_mode = $::platform::params::system_mode
$system_type = $::platform::params::system_type
if $system_type == 'All-in-one' {
if 'duplex' in $system_mode {
if str2bool($::is_controller_active) {
# Active Duplex Controller
exec { 'sm-unmanage service ceph-mon':
command => 'sm-unmanage service ceph-mon'
}
-> exec { 'Ensure Ceph monitor is started':
command => '/etc/init.d/ceph-init-wrapper start mon'
}
-> exec { 'Ensure Ceph metadata server is started':
command => '/etc/init.d/ceph-init-wrapper start mds'
}
-> exec { 'sm-manage service ceph-mon':
command => 'sm-manage service ceph-mon'
}
} else {
# Standby Duplex Controller
exec { 'Ensure Ceph metadata server is started':
command => '/etc/init.d/ceph-init-wrapper start mds'
}
}
}
} else {
# Simplex/Std Controller
exec { 'Ensure Ceph monitor is started':
command => '/usr/local/sbin/pmon-restart ceph'
}
-> exec { 'Ensure Ceph metadata server is started':
command => '/etc/init.d/ceph-init-wrapper start mds'
}
}
}
}
class platform::ceph::metadataserver::worker::runtime
inherits ::platform::ceph::params {
include ::platform::ceph::metadataserver::config
if $::personality == 'worker' {
if $::hostname == $mon_0_host or $::hostname == $mon_1_host or $::hostname == $mon_2_host {
# Worker with a monitor assigned:
# Make sure the metadata config and monitor is added before starting services
Class['::platform::ceph::monitor'] -> Class[$name]
Class['::platform::ceph::metadataserver::config'] -> Class[$name]
exec {'Ensure Ceph monitor is started':
command => '/usr/local/sbin/pmon-restart ceph'
}
-> exec { 'Ensure Ceph metadata server is started':
command => '/etc/init.d/ceph-init-wrapper start mds'
}
}
}
}
class platform::ceph::metadataserver::runtime {
include ::platform::ceph::metadataserver::controller::runtime
include ::platform::ceph::metadataserver::worker::runtime
}
define osd_crush_location(
$osd_id,
$osd_uuid,
@ -415,7 +665,7 @@ class platform::ceph::osds(
# skip_osds_during_restore is set to true when the default primary
# ceph backend "ceph-store" has "restore" as its task and it is
# not an AIO system.
if ! $skip_osds_during_restore {
if ! $skip_osds_during_restore and $service_enabled {
file { '/var/lib/ceph/osd':
ensure => 'directory',
path => '/var/lib/ceph/osd',
@ -541,12 +791,14 @@ class platform::ceph::worker {
if $::personality == 'worker' {
include ::platform::ceph
include ::platform::ceph::monitor
include ::platform::ceph::metadataserver::config
}
}
class platform::ceph::storage {
include ::platform::ceph
include ::platform::ceph::monitor
include ::platform::ceph::metadataserver::config
include ::platform::ceph::osds
# Ensure partitions update prior to ceph storage configuration
@ -556,21 +808,36 @@ class platform::ceph::storage {
class platform::ceph::controller {
include ::platform::ceph
include ::platform::ceph::monitor
include ::platform::ceph::osds
include ::platform::ceph::metadataserver::config
# Ensure partitions update prior to ceph storage configuration
Class['::platform::partitions'] -> Class['::platform::ceph::osds']
# is_active_controller_found is checking the existence of
# /var/run/.active_controller_not_found, which will be created
# by /etc/init.d/controller_config if it couldn't detect an active
# controller. This will be the case for DOR (Dead Office Recovery),
# during which both controllers are booting up thus there is no
# active controller. The ceph::osds class has to be skipped in this
# case otherwise it will fail for not being able to find ceph monitor
# cluster.
if str2bool($::is_active_controller_found) {
include ::platform::ceph::osds
# Ensure partitions update prior to ceph storage configuration
Class['::platform::partitions'] -> Class['::platform::ceph::osds']
}
}
class platform::ceph::runtime_base {
include ::platform::ceph::monitor
include ::platform::ceph::metadataserver::runtime
include ::platform::ceph
$system_mode = $::platform::params::system_mode
$system_type = $::platform::params::system_type
if $system_type == 'All-in-one' and 'duplex' in $system_mode {
Drbd::Resource <| |> -> Class[$name]
if $::personality == 'controller' {
if $system_type == 'All-in-one' and 'duplex' in $system_mode {
Drbd::Resource <| |> -> Class[$name]
}
Class[$name] -> Class['::platform::sm::ceph::runtime']
}
}

View File

@ -33,13 +33,6 @@ class platform::collectd
unless => 'systemctl is-enabled collectd'
}
# ensure that collectd is running
-> service { 'collectd':
ensure => running,
provider => 'systemd',
require => Anchor['platform::networking'],
} # now get pmond to monitor the process
# ensure pmon soft link for process monitoring
-> file { '/etc/pmon.d/collectd.conf':
ensure => 'link',

View File

@ -247,6 +247,16 @@ class platform::config::tpm {
}
class platform::config::kdump {
file_line { '/etc/kdump.conf dracut_args':
path => '/etc/kdump.conf',
line => 'dracut_args --omit-drivers "ice e1000e i40e ixgbe ixgbevf iavf"',
match => '^dracut_args .*--omit-drivers',
}
~> service { 'kdump': }
}
class platform::config::certs::ssl_ca
inherits ::platform::config::certs::params {
@ -353,8 +363,10 @@ class platform::config::pre {
include ::platform::config::hosts
include ::platform::config::file
include ::platform::config::tpm
include ::platform::config::kdump
include ::platform::config::certs::ssl_ca
if ($::platform::params::distributed_cloud_role =='systemcontroller' and
if (($::platform::params::distributed_cloud_role =='systemcontroller' or
$::platform::params::distributed_cloud_role =='subcloud') and
$::personality == 'controller') {
include ::platform::config::dc_root_ca
}
@ -441,6 +453,18 @@ class platform::config::storage::post
}
}
class platform::config::aio::post
{
file { '/etc/platform/.initial_controller_config_complete':
ensure => present,
}
file { '/var/run/.controller_config_complete':
ensure => present,
}
include ::platform::config::worker::post
}
class platform::config::bootstrap {
stage { 'pre':
before => Stage['main'],

View File

@ -7,6 +7,7 @@ class platform::containerd::params (
$insecure_registries = undef,
$k8s_cni_bin_dir = '/usr/libexec/cni',
$stream_server_address = 'localhost',
$custom_container_runtime = undef,
) { }
class platform::containerd::config
@ -37,6 +38,9 @@ class platform::containerd::config
$insecure_registries = $::platform::dockerdistribution::registries::insecure_registries
$distributed_cloud_role = $::platform::params::distributed_cloud_role
# grab custom cri class entries
$custom_container_runtime = $::platform::containerd::params::custom_container_runtime
if $http_proxy or $https_proxy {
file { '/etc/systemd/system/containerd.service.d':
ensure => 'directory',

View File

@ -0,0 +1,23 @@
class platform::deviceimage::params(
$source_deviceimage_base_dir = '/opt/platform/device_images',
$target_deviceimage_base_dir = '/www/pages/device_images',
) {}
class platform::deviceimage
inherits ::platform::deviceimage::params {
file {$target_deviceimage_base_dir:
ensure => directory,
owner => 'www',
require => User['www']
}
Drbd::Resource <| |>
-> file {$source_deviceimage_base_dir:
ensure => directory,
owner => 'www',
require => User['www']
}
}

View File

@ -76,6 +76,11 @@ define platform::devices::sriov_bind (
Class['platform::devices::fpga::n3000::reset']
-> Exec["sriov-bind-device: ${title}"]
}
if ($device_id != undef) and ($device_id == '0d5c') {
include platform::devices::acc100::fec
Exec["sriov-enable-device: ${title}"]
-> Class['platform::devices::acc100::fec']
}
ensure_resource(kmod::load, $driver)
exec { "sriov-bind-device: ${title}":
command => template('platform/sriov.bind-device.erb'),
@ -109,10 +114,8 @@ define platform::devices::sriov_pf_enable (
class platform::devices::fpga::fec::vf
inherits ::platform::devices::fpga::fec::params {
include ::platform::kubernetes::worker::sriovdp
require ::platform::devices::fpga::fec::pf
create_resources('platform::devices::sriov_vf_bind', $device_config, {})
Platform::Devices::Sriov_vf_bind <| |> -> Class['platform::kubernetes::worker::sriovdp']
}
class platform::devices::fpga::fec::pf
@ -129,16 +132,17 @@ class platform::devices::fpga::fec::params (
$device_config = {}
) { }
class platform::devices::fpga::n3000::reset {
# The N3000 FPGA is reset via docker container application by the
# sysinv FPGA agent on startup. This will clear the number of VFs
# configured on the FEC device as well as any bound drivers.
exec { 'Waiting for n3000 reset before enabling device':
command => 'test -e /var/run/.sysinv_n3000_reset',
path => '/usr/bin/',
class platform::devices::fpga::n3000::reset
inherits ::platform::devices::fpga::fec::params {
# To reset N3000 FPGA
Class[$name] -> Class['::platform::devices::fpga::fec::config']
exec { 'Reset n3000 fpgas':
command => 'sysinv-reset-n3000-fpgas',
path => ['/usr/bin/', '/usr/sbin/'],
tries => 60,
try_sleep => 1,
require => Anchor['platform::networking'],
unless => 'test -e /var/run/.sysinv_n3000_reset'
}
}
@ -153,9 +157,19 @@ class platform::devices::fpga::fec {
require ::platform::devices::fpga::fec::config
}
class platform::devices::acc100::fec (
$enabled = false
)
{
if $enabled {
exec { 'Mt.Bryce: Configuring baseband device':
command => template('platform/processing.accelerator-config.erb'),
logoutput => true,
}
}
}
class platform::devices {
include ::platform::devices::qat
include ::platform::devices::fpga::fec
}

View File

@ -80,7 +80,7 @@ class platform::dns::dnsmasq::reload {
}
class platform::dns::runtime {
class platform::dns::dnsmasq::runtime {
include ::platform::dns::dnsmasq
class {'::platform::dns::dnsmasq::reload':

View File

@ -139,7 +139,12 @@ class platform::docker::login
{
include ::platform::dockerdistribution::params
Class['::platform::dockerdistribution::compute'] ~> Class[$name]
if $::personality == 'controller' {
Class['::platform::dockerdistribution::config'] ~> Class[$name]
}
else {
Class['::platform::dockerdistribution::compute'] ~> Class[$name]
}
exec { 'docker-login':
command => "/usr/local/sbin/run_docker_login \

View File

@ -391,19 +391,24 @@ class platform::drbd::cephmon::params (
class platform::drbd::cephmon ()
inherits ::platform::drbd::cephmon::params {
include ::platform::ceph::params
$system_mode = $::platform::params::system_mode
$system_type = $::platform::params::system_type
if ((str2bool($::is_controller_active) or str2bool($::is_standalone_controller))
and ! str2bool($::is_node_ceph_configured)) {
# If migrating from AIO SX to DX we want to override
# these properties and handle it as an initial ceph setup
# so DRBD is properly configured
if $::platform::ceph::params::simplex_to_duplex_migration {
$drbd_primary = true
$drbd_initial = true
$drbd_automount = false
} elsif ((str2bool($::is_controller_active) or str2bool($::is_standalone_controller))
and ! str2bool($::is_node_ceph_configured)) {
# Active controller, first time configuration.
$drbd_primary = true
$drbd_initial = true
$drbd_automount = true
} elsif str2bool($::is_standalone_controller) {
# Active standalone controller, successive reboots.
$drbd_primary = true
@ -435,6 +440,60 @@ class platform::drbd::cephmon ()
}
}
class platform::drbd::rookmon::params (
$device = '/dev/drbd9',
$lv_name = 'ceph-mon-lv',
$mountpoint = '/var/lib/ceph/mon-a',
$port = '7788',
$resource_name = 'drbd-cephmon',
$vg_name = 'cgts-vg',
) {}
class platform::drbd::rookmon ()
inherits ::platform::drbd::rookmon::params {
include ::platform::rook::params
$system_mode = $::platform::params::system_mode
$system_type = $::platform::params::system_type
if ((str2bool($::is_controller_active) or str2bool($::is_standalone_controller))
and ! str2bool($::is_node_rook_ceph_configured)) {
# Active controller, first time configuration.
$drbd_primary = true
$drbd_initial = true
$drbd_automount = true
} elsif str2bool($::is_standalone_controller) {
# Active standalone controller, successive reboots.
$drbd_primary = true
$drbd_initial = undef
$drbd_automount = true
} else {
# Node unlock, reboot or standby configuration
# Do not mount ceph
$drbd_primary = undef
$drbd_initial = undef
$drbd_automount = undef
}
if ($platform::rook::params::service_enabled and
$system_type == 'All-in-one' and 'duplex' in $system_mode) {
platform::drbd::filesystem { $resource_name:
vg_name => $vg_name,
lv_name => $lv_name,
lv_size => $platform::rook::params::mon_lv_size,
port => $port,
device => $device,
mountpoint => $mountpoint,
resync_after => undef,
manage_override => true,
ha_primary_override => $drbd_primary,
initial_setup_override => $drbd_initial,
automount_override => $drbd_automount,
}
}
}
class platform::drbd(
$service_enable = false,
@ -464,6 +523,7 @@ class platform::drbd(
include ::platform::drbd::etcd
include ::platform::drbd::dockerdistribution
include ::platform::drbd::cephmon
include ::platform::drbd::rookmon
include ::platform::drbd::trigger_resize_check
# network changes need to be applied prior to DRBD resources
@ -569,5 +629,10 @@ class platform::drbd::cephmon::runtime {
include ::platform::drbd::params
include ::platform::drbd::runtime_service_enable
include ::platform::drbd::cephmon
include ::platform::drbd::trigger_resize_check
}
class platform::drbd::rookmon::runtime {
include ::platform::drbd::params
include ::platform::drbd::runtime_service_enable
include ::platform::drbd::rookmon
}

View File

@ -1,7 +1,9 @@
class platform::etcd::params (
$bind_address = '0.0.0.0',
$bind_address_version = 4,
$port = 2379,
$node = 'controller',
$security_enabled = undef,
)
{
include ::platform::params
@ -42,8 +44,6 @@ class platform::etcd::init (
$service_enabled = false,
) inherits ::platform::etcd::params {
$client_url = "http://${bind_address}:${port}"
if $service_enabled {
$service_ensure = 'running'
}
@ -51,6 +51,32 @@ class platform::etcd::init (
$service_ensure = 'stopped'
}
if $security_enabled {
$client_cert_auth = true
$cert_file = '/etc/etcd/etcd-server.crt'
$key_file = '/etc/etcd/etcd-server.key'
$trusted_ca_file = '/etc/etcd/ca.crt'
if $bind_address_version == $::platform::params::ipv6 {
$client_url = "https://[${bind_address}]:${port}"
}
else {
$client_url = "https://${bind_address}:${port}"
}
}
else {
# This else part can be removed after STX5.0
$client_cert_auth = false
$cert_file = undef
$key_file = undef
$trusted_ca_file = undef
if $bind_address_version == $::platform::params::ipv6 {
$client_url = "http://[${bind_address}]:${port}"
}
else {
$client_url = "http://${bind_address}:${port}"
}
}
class { 'etcd':
ensure => 'present',
etcd_name => $node,
@ -61,6 +87,10 @@ class platform::etcd::init (
advertise_client_urls => $client_url,
data_dir => "${etcd_versioned_dir}/${node}.etcd",
proxy => 'off',
client_cert_auth => $client_cert_auth,
cert_file => $cert_file,
key_file => $key_file,
trusted_ca_file => $trusted_ca_file,
}
}
@ -94,6 +124,95 @@ class platform::etcd::datadir
}
}
class platform::etcd::upgrade::runtime
inherits ::platform::etcd::params {
include ::platform::etcd::init
$server_url = $::platform::etcd::init::client_url
$etcd_cert = '/etc/etcd/etcd-client.crt'
$etcd_key = '/etc/etcd/etcd-client.key'
$etcd_ca = '/etc/etcd/ca.crt'
if ! str2bool($::is_controller_active) {
file { '/etc/etcd/etcd-server.crt':
ensure => 'present',
replace => true,
source => "/var/run/platform/config/${sw_version}/etcd/etcd-server.crt",
}
-> file { '/etc/etcd/etcd-server.key':
ensure => 'present',
replace => true,
source => "/var/run/platform/config/${sw_version}/etcd/etcd-server.key",
}
-> file { '/etc/etcd/etcd-client.crt':
ensure => 'present',
replace => true,
source => "/var/run/platform/config/${sw_version}/etcd/etcd-client.crt",
}
-> file { '/etc/etcd/etcd-client.key':
ensure => 'present',
replace => true,
source => "/var/run/platform/config/${sw_version}/etcd/etcd-client.key",
}
-> file { '/etc/etcd/ca.crt':
ensure => 'present',
replace => true,
source => "/var/run/platform/config/${sw_version}/etcd/ca.crt",
}
-> file { '/etc/kubernetes/pki/apiserver-etcd-client.crt':
ensure => 'present',
replace => true,
source => "/var/run/platform/config/${sw_version}/etcd/apiserver-etcd-client.crt",
}
-> file { '/etc/kubernetes/pki/apiserver-etcd-client.key':
ensure => 'present',
replace => true,
source => "/var/run/platform/config/${sw_version}/etcd/apiserver-etcd-client.key",
}
-> class { '::platform::kubernetes::master::change_apiserver_parameters':
etcd_cafile => '/etc/kubernetes/pki/ca.crt',
etcd_certfile => '/etc/kubernetes/pki/apiserver-etcd-client.crt',
etcd_keyfile => '/etc/kubernetes/pki/apiserver-etcd-client.key',
etcd_servers => $server_url,
}
}
else {
class { '::platform::kubernetes::master::change_apiserver_parameters':
etcd_cafile => '/etc/kubernetes/pki/ca.crt',
etcd_certfile => '/etc/kubernetes/pki/apiserver-etcd-client.crt',
etcd_keyfile => '/etc/kubernetes/pki/apiserver-etcd-client.key',
etcd_servers => $server_url,
}
-> platform::sm::restart {'etcd': }
-> exec { 'create-etcd-root-account':
command => "etcdctl --cert-file=${etcd_cert} --key-file=${etcd_key} --ca-file=${etcd_ca} --endpoint=${server_url} \
user add root:sysadmin",
}
-> exec { 'create-etcd-user-account':
command => "etcdctl --cert-file=${etcd_cert} --key-file=${etcd_key} --ca-file=${etcd_ca} --endpoint=${server_url} \
user add apiserver-etcd-client:sysadmin",
}
-> exec { 'enable-etcd-auth':
command => "etcdctl --cert-file=${etcd_cert} --key-file=${etcd_key} --ca-file=${etcd_ca} --endpoint=${server_url} \
auth enable",
returns => [0,1]
}
}
}
class platform::etcd::datadir::bootstrap
inherits ::platform::etcd::params {
@ -117,6 +236,6 @@ class platform::etcd::bootstrap
Class['::platform::etcd::datadir::bootstrap']
-> Class['::platform::etcd::setup']
-> class { '::platform::etcd::init':
service_enabled => true,
service_enabled => false,
}
}

View File

@ -103,7 +103,6 @@ class platform::firewall::calico::oam::services {
# udp
$sm_port = [2222, 2223]
$ntp_port = [123]
$snmp_port = [161, 162]
$ptp_port = [319, 320]
# tcp
@ -153,7 +152,7 @@ class platform::firewall::calico::oam::services {
}
$t_ip_version = $ip_version
$t_udp_ports = concat($sm_port, $ntp_port, $snmp_port, $ptp_port)
$t_udp_ports = concat($sm_port, $ntp_port, $ptp_port)
$t_tcp_ports = concat($ssh_port,
$fm_port, $nfv_vim_port, $patching_port, $sysinv_port, $sm_api_port,
$kube_apiserver_port, $docker_registry_port, $docker_token_port,
@ -215,7 +214,7 @@ class platform::firewall::calico::oam {
contain ::platform::firewall::calico::oam::endpoints
contain ::platform::firewall::calico::oam::services
Class['::platform::kubernetes::master'] -> Class[$name]
Class['::platform::kubernetes::gate'] -> Class[$name]
Class['::platform::firewall::calico::oam::endpoints']
-> Class['::platform::firewall::calico::oam::services']
}

View File

@ -5,20 +5,21 @@ class platform::fm::params (
$system_name = undef,
$service_create = false,
$service_enabled = true,
$trap_destinations = [],
$sysinv_catalog_info = 'platform:sysinv:internalURL',
$snmp_enabled = 0,
$snmp_trap_server_port = 162,
) { }
class platform::fm::config
inherits ::platform::fm::params {
$trap_dest_str = join($trap_destinations,',')
class { '::fm':
region_name => $region_name,
system_name => $system_name,
trap_destinations => $trap_dest_str,
sysinv_catalog_info => $sysinv_catalog_info,
region_name => $region_name,
system_name => $system_name,
sysinv_catalog_info => $sysinv_catalog_info,
snmp_enabled => $snmp_enabled,
snmp_trap_server_port => $snmp_trap_server_port,
}
}

View File

@ -93,8 +93,7 @@ class platform::helm
if (str2bool($::is_initial_config) and $::personality == 'controller') {
include ::platform::helm::repositories
Class['::platform::kubernetes::master']
Class['::platform::kubernetes::gate']
-> exec { 'restart lighttpd for helm':
require => [File['/etc/lighttpd/lighttpd.conf', $target_helm_repos_base_dir, $source_helm_repos_base_dir]],
command => 'systemctl restart lighttpd.service',
@ -111,3 +110,22 @@ class platform::helm::runtime {
Exec['sm-restart-lighttpd'] -> Class['::platform::helm::repositories']
}
class platform::helm::v2::db::postgresql (
$password,
$dbname = 'helmv2',
$user = 'helmv2',
$encoding = undef,
$privileges = 'ALL',
) {
::postgresql::server::db { $dbname:
user => $user,
password => postgresql_password($user, $password),
encoding => $encoding,
grant => $privileges,
}
}
class platform::helm::bootstrap {
include ::platform::helm::v2::db::postgresql
}

View File

@ -17,14 +17,35 @@ class platform::kubernetes::params (
$k8s_topology_mgr_policy = 'best-effort',
$k8s_cni_bin_dir = '/usr/libexec/cni',
$k8s_vol_plugin_dir = '/usr/libexec/kubernetes/kubelet-plugins/volume/exec/',
$k8s_pod_max_pids = '750',
$join_cmd = undef,
$oidc_issuer_url = undef,
$oidc_client_id = undef,
$oidc_username_claim = undef,
$oidc_groups_claim = undef,
$admission_plugins = undef
$admission_plugins = undef,
$etcd_cafile = undef,
$etcd_certfile = undef,
$etcd_keyfile = undef,
$etcd_servers = undef,
) { }
class platform::kubernetes::configuration {
if 'kube-ignore-isol-cpus' in $::platform::kubernetes::params::host_labels {
$ensure = 'present'
} else {
$ensure = 'absent'
}
file { '/etc/kubernetes/ignore_isolcpus':
ensure => $ensure,
owner => 'root',
group => 'root',
mode => '0644',
}
}
class platform::kubernetes::cgroup::params (
$cgroup_root = '/sys/fs/cgroup',
$cgroup_name = 'k8s-infra',
@ -115,6 +136,7 @@ class platform::kubernetes::kubeadm {
$k8s_vol_plugin_dir = $::platform::kubernetes::params::k8s_vol_plugin_dir
$k8s_cpu_mgr_policy = $::platform::kubernetes::params::k8s_cpu_mgr_policy
$k8s_topology_mgr_policy = $::platform::kubernetes::params::k8s_topology_mgr_policy
$k8s_pod_max_pids = $::platform::kubernetes::params::k8s_pod_max_pids
$iptables_file = "net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1"
@ -198,15 +220,18 @@ class platform::kubernetes::kubeadm {
group => 'root',
mode => '0700',
}
# Start kubelet.
-> service { 'kubelet':
enable => true,
}
# A seperate enable is required since we have modified the service resource
# to never enable services.
-> exec { 'enable-kubelet':
command => '/usr/bin/systemctl enable kubelet.service',
}
# Start kubelet if it is standard controller.
if !str2bool($::is_worker_subfunction) {
File['/etc/kubernetes/manifests']
-> service { 'kubelet':
enable => true,
}
}
}
class platform::kubernetes::master::init
@ -310,6 +335,7 @@ class platform::kubernetes::master
contain ::platform::kubernetes::master::init
contain ::platform::kubernetes::coredns
contain ::platform::kubernetes::firewall
contain ::platform::kubernetes::configuration
Class['::platform::sysctl::controller::reserve_ports'] -> Class[$name]
Class['::platform::etcd'] -> Class[$name]
@ -318,7 +344,8 @@ class platform::kubernetes::master
# Ensure DNS is configured as name resolution is required when
# kubeadm init is run.
Class['::platform::dns'] -> Class[$name]
Class['::platform::kubernetes::kubeadm']
Class['::platform::kubernetes::configuration']
-> Class['::platform::kubernetes::kubeadm']
-> Class['::platform::kubernetes::cgroup']
-> Class['::platform::kubernetes::master::init']
-> Class['::platform::kubernetes::coredns']
@ -334,19 +361,16 @@ class platform::kubernetes::worker::init
if str2bool($::is_initial_config) {
include ::platform::dockerdistribution::params
# Get the pause image tag from kubeadm required images
# list and replace with local registry
$get_k8s_pause_img = "kubeadm --kubeconfig=/etc/kubernetes/admin.conf config images list 2>/dev/null |\
awk '/^k8s.gcr.io\\/pause:/{print \$1}' | sed 's#k8s.gcr.io#registry.local:9001\\/k8s.gcr.io#'"
$k8s_pause_img = generate('/bin/sh', '-c', $get_k8s_pause_img)
if k8s_pause_img {
exec { 'load k8s pause image by containerd':
command => "crictl pull --creds ${::platform::dockerdistribution::params::registry_username}:${::platform::dockerdistribution::params::registry_password} ${k8s_pause_img}", # lint:ignore:140chars
logoutput => true,
before => Exec['configure worker node']
}
# Pull pause image tag from kubeadm required images list for this version
# kubeadm config images list does not use the --kubeconfig argument
# and admin.conf will not exist on a pure worker, and kubelet.conf will not
# exist until after a join.
$local_registry_auth = "${::platform::dockerdistribution::params::registry_username}:${::platform::dockerdistribution::params::registry_password}" # lint:ignore:140chars
exec { 'load k8s pause image by containerd':
# splitting this command over multiple lines appears to break puppet-lint
command => "kubeadm config images list --kubernetes-version ${version} --image-repository=registry.local:9001/k8s.gcr.io 2>/dev/null | grep k8s.gcr.io/pause: | xargs -i crictl pull --creds ${local_registry_auth} {}", # lint:ignore:140chars
logoutput => true,
before => Exec['configure worker node'],
}
}
@ -388,7 +412,6 @@ class platform::kubernetes::worker::pci
$pcidp_resources = undef,
) {
include ::platform::kubernetes::params
include ::platform::kubernetes::worker::sriovdp
file { '/etc/pcidp':
ensure => 'directory',
@ -405,6 +428,11 @@ class platform::kubernetes::worker::pci
}
}
class platform::kubernetes::worker::pci::runtime {
include ::platform::kubernetes::worker::pci
include ::platform::kubernetes::worker::sriovdp
}
class platform::kubernetes::worker::sriovdp {
include ::platform::kubernetes::params
include ::platform::params
@ -412,16 +440,9 @@ class platform::kubernetes::worker::sriovdp {
if ($::personality == 'controller') and
str2bool($::is_worker_subfunction)
and ('sriovdp' in $host_labels) {
# In an AIO system, it's possible for the device plugin pods to start
# before the device VFs are bound to a driver. Deleting the device
# plugin pods will cause them to be recreated by the daemonset and
# allow them to re-scan the set of matching device ids/drivers
# specified in the /etc/pcidp/config.json file.
# This may be mitigated by moving to helm + configmap for the device
# plugin.
exec { 'Delete sriov device plugin pod if present':
path => '/usr/bin:/usr/sbin:/bin',
command => 'kubectl --kubeconfig=/etc/kubernetes/admin.conf delete pod -n kube-system --selector=app=sriovdp --field-selector spec.nodeName=$(hostname) --timeout=60s', # lint:ignore:140chars
command => 'kubectl --kubeconfig=/etc/kubernetes/admin.conf delete pod -n kube-system --selector=app=sriovdp --field-selector spec.nodeName=$(hostname) --timeout=360s', # lint:ignore:140chars
onlyif => 'kubectl --kubeconfig=/etc/kubernetes/admin.conf get pods -n kube-system --selector=app=sriovdp --field-selector spec.nodeName=$(hostname) | grep kube-sriov-device-plugin', # lint:ignore:140chars
logoutput => true,
}
@ -437,18 +458,18 @@ class platform::kubernetes::worker
contain ::platform::kubernetes::kubeadm
contain ::platform::kubernetes::cgroup
contain ::platform::kubernetes::worker::init
contain ::platform::kubernetes::configuration
Class['::platform::kubernetes::kubeadm']
Class['::platform::kubernetes::configuration']
-> Class['::platform::kubernetes::kubeadm']
-> Class['::platform::kubernetes::cgroup']
-> Class['::platform::kubernetes::worker::init']
} else {
# Reconfigure cgroups cpusets on AIO
contain ::platform::kubernetes::cgroup
}
# Add refresh dependency for kubelet for hugepage allocation
Class['::platform::compute::allocate']
~> service { 'kubelet':
}
# Enable kubelet on AIO and worker nodes.
Class['::platform::compute::allocate']
-> service { 'kubelet':
enable => true,
}
# TODO: The following exec is a workaround. Once kubernetes becomes the
@ -462,6 +483,25 @@ class platform::kubernetes::worker
contain ::platform::kubernetes::worker::pci
}
class platform::kubernetes::aio
inherits ::platform::kubernetes::params {
include ::platform::kubernetes::master
include ::platform::kubernetes::worker
Class['::platform::kubernetes::master']
-> Class['::platform::kubernetes::worker']
-> Class[$name]
}
class platform::kubernetes::gate {
if $::platform::params::system_type != 'All-in-one' {
Class['::platform::kubernetes::master'] -> Class[$name]
} else {
Class['::platform::kubernetes::aio'] -> Class[$name]
}
}
class platform::kubernetes::coredns {
include ::platform::params
@ -607,6 +647,7 @@ class platform::kubernetes::upgrade_first_control_plane
class platform::kubernetes::upgrade_control_plane
inherits ::platform::kubernetes::params {
# control plane is only upgraded on a controller (which has admin.conf)
exec { 'upgrade control plane':
command => 'kubeadm --kubeconfig=/etc/kubernetes/admin.conf upgrade node',
logoutput => true,
@ -626,22 +667,19 @@ class platform::kubernetes::worker::upgrade_kubelet
include ::platform::dockerdistribution::params
# Get the pause image tag from kubeadm required images
# list and replace with local registry
$get_k8s_pause_img = "kubeadm --kubeconfig=/etc/kubernetes/admin.conf config images list 2>/dev/null |\
awk '/^k8s.gcr.io\\/pause:/{print \$1}' | sed 's#k8s.gcr.io#registry.local:9001\\/k8s.gcr.io#'"
$k8s_pause_img = generate('/bin/sh', '-c', $get_k8s_pause_img)
# workers use kubelet.conf rather than admin.conf
$local_registry_auth = "${::platform::dockerdistribution::params::registry_username}:${::platform::dockerdistribution::params::registry_password}" # lint:ignore:140chars
if k8s_pause_img {
exec { 'load k8s pause image':
command => "crictl pull --creds ${::platform::dockerdistribution::params::registry_username}:${::platform::dockerdistribution::params::registry_password} ${k8s_pause_img}", # lint:ignore:140chars
logoutput => true,
before => Exec['upgrade kubelet']
}
# Pull the pause image tag from kubeadm required images list for this version
exec { 'pull pause image':
# spltting this command over multiple lines will break puppet-lint for later violations
command => "kubeadm --kubeconfig=/etc/kubernetes/kubelet.conf config images list --kubernetes-version ${upgrade_to_version} --image-repository=registry.local:9001/k8s.gcr.io 2>/dev/null | grep k8s.gcr.io/pause: | xargs -i crictl pull --creds ${local_registry_auth} {}", # lint:ignore:140chars
logoutput => true,
before => Exec['upgrade kubelet'],
}
exec { 'upgrade kubelet':
command => 'kubeadm --kubeconfig=/etc/kubernetes/admin.conf upgrade node',
command => 'kubeadm --kubeconfig=/etc/kubernetes/kubelet.conf upgrade node',
logoutput => true,
}
@ -650,8 +688,12 @@ class platform::kubernetes::worker::upgrade_kubelet
}
}
class platform::kubernetes::master::change_apiserver_parameters
inherits ::platform::kubernetes::params {
class platform::kubernetes::master::change_apiserver_parameters (
$etcd_cafile = $platform::kubernetes::params::etcd_cafile,
$etcd_certfile = $platform::kubernetes::params::etcd_certfile,
$etcd_keyfile = $platform::kubernetes::params::etcd_keyfile,
$etcd_servers = $platform::kubernetes::params::etcd_servers,
) inherits ::platform::kubernetes::params {
$configmap_temp_file = '/tmp/cluster_configmap.yaml'
$configview_temp_file = '/tmp/kubeadm_config_view.yaml'
@ -659,7 +701,6 @@ class platform::kubernetes::master::change_apiserver_parameters
exec { 'update kube-apiserver params':
command => template('platform/kube-apiserver-change-params.erb')
}
}
class platform::kubernetes::certsans::runtime

View File

@ -112,6 +112,10 @@ class platform::ldap::client
}
}
class platform::ldap::client::runtime {
include ::platform::ldap::client
}
class platform::ldap::bootstrap
inherits ::platform::ldap::params {
include ::platform::params

View File

@ -90,12 +90,12 @@ class platform::lvm::controller::vgs {
class platform::lvm::controller
inherits ::platform::lvm::params {
::platform::lvm::global_filter { 'transition filter':
::platform::lvm::global_filter { 'transition filter controller':
filter => $transition_filter,
before => Class['::platform::lvm::controller::vgs']
}
::platform::lvm::global_filter { 'final filter':
::platform::lvm::global_filter { 'final filter controller':
filter => $final_filter,
require => Class['::platform::lvm::controller::vgs']
}
@ -121,12 +121,12 @@ class platform::lvm::compute::vgs {
class platform::lvm::compute
inherits ::platform::lvm::params {
::platform::lvm::global_filter { 'transition filter':
::platform::lvm::global_filter { 'transition filter compute':
filter => $transition_filter,
before => Class['::platform::lvm::compute::vgs']
}
::platform::lvm::global_filter { 'final filter':
::platform::lvm::global_filter { 'final filter compute':
filter => $final_filter,
require => Class['::platform::lvm::compute::vgs']
}
@ -140,6 +140,20 @@ class platform::lvm::compute::runtime {
include ::platform::lvm::compute
}
###############
# AIO
###############
class platform::lvm::aio
inherits ::platform::lvm::params {
include ::platform::lvm::controller
include ::platform::lvm::compute
Class['::platform::lvm::controller']
-> Class['::platform::lvm::compute']
-> Class['::platform::worker::storage']
}
###############
# Storage Hosts
###############

View File

@ -187,6 +187,7 @@ define platform::interfaces::sriov_enable (
if ($num_vfs != undef) and ($num_vfs > 0) {
exec { "sriov-enable-device: ${title}":
command => template('platform/sriov.enable-device.erb'),
onlyif => "[ $(cat /sys/bus/pci/devices/${addr}/${vf_file}) != ${num_vfs} ]",
logoutput => true,
}
}
@ -195,12 +196,13 @@ define platform::interfaces::sriov_enable (
define platform::interfaces::sriov_bind (
$addr,
$driver
$driver,
$vfnumber = undef,
$max_tx_rate = undef
) {
if ($driver != undef) {
ensure_resource(kmod::load, $driver)
Anchor['platform::networking']
-> exec { "sriov-vf-bind-device: ${title}":
exec { "sriov-vf-bind-device: ${title}":
command => template('platform/sriov.bind-device.erb'),
logoutput => true,
require => [ Kmod::Load[$driver] ],
@ -227,21 +229,57 @@ define platform::interfaces::sriov_vf_bind (
create_resources('platform::interfaces::sriov_bind', $vf_config, {})
}
class platform::interfaces::sriov (
$sriov_config = {},
$runtime = false
define platform::interfaces::sriov_ratelimit (
$addr,
$driver,
$port_name,
$vfnumber = undef,
$max_tx_rate = undef
) {
if $runtime {
create_resources('platform::interfaces::sriov_enable', $sriov_config, {})
} else {
create_resources('platform::interfaces::sriov_vf_bind', $sriov_config, {})
Platform::Interfaces::Sriov_vf_bind <| |> -> Class['::platform::kubernetes::worker::sriovdp']
if $max_tx_rate {
exec { "sriov-vf-rate-limit: ${title}":
command => template('platform/sriov.ratelimit.erb'),
logoutput => true,
tries => 5,
try_sleep => 1,
}
}
}
define platform::interfaces::sriov_vf_ratelimit (
$addr,
$device_id,
$num_vfs,
$port_name,
$vf_config
) {
create_resources('platform::interfaces::sriov_ratelimit', $vf_config, {port_name => $port_name})
}
class platform::interfaces::sriov (
$sriov_config = {}
) {
}
class platform::interfaces::sriov::enable
inherits platform::interfaces::sriov {
create_resources('platform::interfaces::sriov_enable', $sriov_config, {})
}
class platform::interfaces::sriov::config
inherits platform::interfaces::sriov {
Anchor['platform::networking'] -> Class[$name]
create_resources('platform::interfaces::sriov_vf_bind', $sriov_config, {})
create_resources('platform::interfaces::sriov_vf_ratelimit', $sriov_config, {})
}
class platform::interfaces::sriov::runtime {
class { 'platform::interfaces::sriov': runtime => true }
include ::platform::interfaces::sriov::enable
}
class platform::interfaces::sriov::vf::runtime {
include ::platform::interfaces::sriov::config
}
@ -325,7 +363,9 @@ class platform::network (
class platform::network::runtime {
include ::platform::network::apply
class {'::platform::network::apply':
stage => pre
}
}

View File

@ -39,6 +39,19 @@ class platform::nfv {
include ::nfv::event_log
}
class platform::nfv::webserver::reload {
platform::sm::restart {'vim-webserver': }
}
class platform::nfv::webserver::runtime {
include ::platform::nfv
class {'::platform::nfv::webserver::reload':
stage => post
}
}
class platform::nfv::reload {
platform::sm::restart {'vim': }

View File

@ -1,7 +1,7 @@
class platform::patching::params (
$private_port = 5491,
$public_port = 15491,
$server_timeout = '300s',
$server_timeout = '600s',
$region_name = undef,
$service_create = false,
) { }
@ -70,6 +70,7 @@ class platform::patching::haproxy
public_ip_address => $::platform::haproxy::params::private_ip_address,
public_port => $private_port + 1,
private_port => $private_port,
server_timeout => $server_timeout,
}
}
}

View File

@ -7,12 +7,20 @@ class platform::postgresql::params
$data_dir = "${root_dir}/${::platform::params::software_version}"
$password = undef
include ::platform::network::mgmt::params
if $::platform::network::mgmt::params::subnet_version == $::platform::params::ipv6 {
$ip_mask_allow_all_users = '::0/0'
$ip_mask_deny_postgres_user = '::0/128'
} else {
$ip_mask_allow_all_users = '0.0.0.0/0'
$ip_mask_deny_postgres_user = '0.0.0.0/32'
}
}
class platform::postgresql::server (
$ipv4acl = undef,
) inherits ::platform::postgresql::params {
class platform::postgresql::server
inherits ::platform::postgresql::params {
include ::platform::params
@ -100,8 +108,9 @@ class platform::postgresql::server (
}
-> class {'::postgresql::server':
ip_mask_allow_all_users => $ipv4acl,
service_ensure => 'stopped',
ip_mask_allow_all_users => $ip_mask_allow_all_users,
ip_mask_deny_postgres_user => $ip_mask_deny_postgres_user,
service_ensure => 'stopped',
}
}
@ -154,6 +163,8 @@ class platform::postgresql::bootstrap
}
-> class {'::postgresql::server':
ip_mask_allow_all_users => $ip_mask_allow_all_users,
ip_mask_deny_postgres_user => $ip_mask_deny_postgres_user
}
# Allow local postgres user as trusted for simplex upgrade scripts
@ -185,12 +196,15 @@ class platform::postgresql::upgrade
}
-> class {'::postgresql::server':
ip_mask_allow_all_users => $ip_mask_allow_all_users,
ip_mask_deny_postgres_user => $ip_mask_deny_postgres_user
}
include ::barbican::db::postgresql
include ::sysinv::db::postgresql
include ::keystone::db::postgresql
include ::fm::db::postgresql
include ::platform::helm::v2::db::postgresql
}
class platform::postgresql::sc::configured {
@ -212,6 +226,8 @@ class platform::postgresql::sc::runtime
}
-> class {'::postgresql::server':
ip_mask_allow_all_users => $ip_mask_allow_all_users,
ip_mask_deny_postgres_user => $ip_mask_deny_postgres_user
}
include ::platform::dcmanager::runtime

View File

@ -0,0 +1,90 @@
class platform::rook::params(
$service_enabled = false,
$mon_lv_size = 20,
$node_rook_ceph_configured_flag = '/etc/platform/.node_rook_ceph_configured',
) { }
define platform::rook::mapping {
exec { 'enable volume group device mapper mapping':
command => "vgchange -ay ${name} || true",
onlyif => "test ! -d /dev/${name}",
}
}
define platform_rook_directory(
$disk_node,
$data_path,
$directory,
) {
exec { "mount ${disk_node}":
unless => "mount | grep -q ${disk_node}",
command => "mount ${data_path} ${directory} || true",
}
}
class platform::rook::directories(
$dir_config = {},
) inherits ::platform::rook::params {
create_resources('platform_rook_directory', $dir_config)
}
class platform::rook::vg::rook_vg(
$vg_name = [],
) inherits platform::rook::params {
::platform::rook::mapping { $vg_name:
}
}
class platform::rook::storage {
include ::platform::rook::directories
include ::platform::rook::vg::rook_vg
}
class platform::rook::post
inherits ::platform::rook::params {
if $service_enabled {
# Ceph configuration on this node is done
file { $node_rook_ceph_configured_flag:
ensure => present
}
}
}
class platform::rook_base
inherits ::platform::ceph::params {
$system_mode = $::platform::params::system_mode
$system_type = $::platform::params::system_type
if $service_enabled {
file { '/var/lib/ceph/mon-a':
ensure => 'directory',
owner => 'root',
group => 'root',
mode => '0755',
}
if $system_type == 'All-in-one' and 'duplex' in $system_mode {
# ensure DRBD config init only once
Drbd::Resource <| |> -> Class[$name]
}
class { '::platform::rook::post':
stage => post
}
}
}
class platform::rook {
include ::platform::rook::storage
include ::platform::rook_base
}
class platform::rook::runtime {
include ::platform::rook_base
}

View File

@ -88,11 +88,20 @@ class platform::sm
$helmrepo_fs_source_dir = $::platform::helm::repositories::params::source_helm_repos_base_dir
$helmrepo_fs_target_dir = $::platform::helm::repositories::params::target_helm_repos_base_dir
include ::platform::deviceimage::params
$deviceimage_fs_source_dir = $::platform::deviceimage::params::source_deviceimage_base_dir
$deviceimage_fs_target_dir = $::platform::deviceimage::params::target_deviceimage_base_dir
include ::platform::drbd::cephmon::params
$cephmon_drbd_resource = $::platform::drbd::cephmon::params::resource_name
$cephmon_fs_device = $::platform::drbd::cephmon::params::device
$cephmon_fs_directory = $::platform::drbd::cephmon::params::mountpoint
include ::platform::rook::params
$rookmon_drbd_resource = $::platform::drbd::rookmon::params::resource_name
$rookmon_fs_device = $::platform::drbd::rookmon::params::device
$rookmon_fs_directory = $::platform::drbd::rookmon::params::mountpoint
include ::openstack::keystone::params
$keystone_api_version = $::openstack::keystone::params::api_version
$keystone_identity_uri = $::openstack::keystone::params::identity_uri
@ -164,6 +173,7 @@ class platform::sm
# Ceph-Rados-Gateway
include ::platform::ceph::params
$ceph_configured = $::platform::ceph::params::service_enabled
$rook_configured = $::platform::rook::params::service_enabled
$rgw_configured = $::platform::ceph::params::rgw_enabled
if $system_mode == 'simplex' {
@ -414,6 +424,17 @@ class platform::sm
command => "sm-configure service_instance etcd-fs etcd-fs \"device=${etcd_fs_device},directory=${etcd_fs_directory},options=noatime,nodiratime,fstype=ext4,check_level=20\"",
}
# Configure device image repository
exec { 'Provision device-image-fs (service-group-member)':
command => 'sm-provision service-group-member controller-services device-image-fs',
}
-> exec { 'Provision device-image-fs (service)':
command => 'sm-provision service device-image-fs',
}
-> exec { 'Configure Device Image Repository FileSystem':
command => "sm-configure service_instance device-image-fs device-image-fs \"device=${deviceimage_fs_source_dir},directory=${deviceimage_fs_target_dir},options=bind,noatime,nodiratime,fstype=ext4,check_level=20\"",
}
# TODO: region code needs to be revisited
if $region_config {
# In a default Multi-Region configuration, Keystone is running as a
@ -597,6 +618,55 @@ class platform::sm
command => "sm-configure service_group yes controller distributed-cloud-services N 1 0 \"\" \"\"",
}
}
} else {
exec { 'Provision oam-ip service group member':
command => 'sm-provision service-group-member oam-services oam-ip',
}
-> exec { 'Provision oam-ip service':
command => 'sm-provision service oam-ip',
}
exec { 'Configure oam-service redundancy model to DX':
command => "sm-configure service_group yes controller oam-services 'N + M' 1 1 \"controller-aggregate\" directory-services",
}
exec { 'Configure controller-services redundancy model to DX':
command => "sm-configure service_group yes controller controller-services 'N + M' 1 1 \"controller-aggregate\" directory-services",
}
exec { 'Configure cloud-services redundancy model to DX':
command => "sm-configure service_group yes controller cloud-services 'N + M' 1 1 \"controller-aggregate\" directory-services",
}
exec { 'Configure vim-services redundancy model to DX':
command => "sm-configure service_group yes controller vim-services 'N + M' 1 1 \"controller-aggregate\" directory-services",
}
exec { 'Configure patching-services redundancy model to DX':
command => "sm-configure service_group yes controller patching-services 'N + M' 1 1 \"\" \"\"",
}
exec { 'Configure directory-services redundancy model to DX':
command => "sm-configure service_group yes controller directory-services N 2 0 \"\" \"\"",
}
exec { 'Configure web-services redundancy model to DX':
command => "sm-configure service_group yes controller web-services N 2 0 \"\" \"\"",
}
exec { 'Configure storage-services redundancy model to DX':
command => "sm-configure service_group yes controller storage-services N 2 0 \"\" \"\"",
}
exec { 'Configure storage-monitoring-services redundancy model to DX':
command => "sm-configure service_group yes controller storage-monitoring-services 'N + M' 1 1 \"\" \"\"",
}
if $::platform::params::distributed_cloud_role == 'subcloud' {
exec { 'Configure distributed-cloud-services redundancy model to DX':
command => "sm-configure service_group yes controller distributed-cloud-services 'N + M' 1 1 \"controller-aggregate\" \"\"",
}
}
}
exec { 'Provision extension-fs (service-group-member)':
@ -725,6 +795,17 @@ class platform::sm
-> exec { 'Configure ceph-osd':
command => "sm-configure service_instance ceph-osd ceph-osd \"\"",
}
Exec['Provision service-group storage-monitoring-services']
-> exec { 'Configure Rookmon DRBD':
command => "sm-configure service_instance drbd-rookmon drbd-rookmon:${hostunit} \"drbd_resource=${rookmon_drbd_resource}\"",
}
-> exec { 'Configure Rookmon FileSystem':
command => "sm-configure service_instance rookmon-fs rookmon-fs \"device=${rookmon_fs_device},directory=${rookmon_fs_directory},options=noatime,nodiratime,fstype=ext4,check_level=20\"",
}
-> exec { 'Configure Rook mon exit':
command => "sm-configure service_instance rook-mon-exit rook-mon-exit \"\"",
}
}
@ -782,6 +863,20 @@ class platform::sm
}
}
if $rook_configured {
if $system_type == 'All-in-one' and 'duplex' in $system_mode {
exec { 'Provision Rookmon FS in SM (service-group-member rookmon-fs)':
command => 'sm-provision service-group-member controller-services rookmon-fs',
}
-> exec { 'Provision Rookmon DRBD in SM (service-group-member drbd-rookmon)':
command => 'sm-provision service-group-member controller-services drbd-rookmon',
}
-> exec { 'Provision Rook-mon-exit in SM (service-group-member rook-mon-exit)':
command => 'sm-provision service-group-member controller-services rook-mon-exit',
}
}
}
# Ceph-Rados-Gateway
if $rgw_configured {
exec {'Provision Ceph-Rados-Gateway (service-group-member ceph-radosgw)':
@ -832,6 +927,12 @@ class platform::sm
-> exec { 'Provision DCManager-Audit in SM (service dcmanager-audit)':
command => 'sm-provision service dcmanager-audit',
}
-> exec { 'Provision DCManager-Audit-Worker (service-group-member dcmanager-audit-worker)':
command => 'sm-provision service-group-member distributed-cloud-services dcmanager-audit-worker',
}
-> exec { 'Provision DCManager-Audit-Worker in SM (service dcmanager-audit-worker)':
command => 'sm-provision service dcmanager-audit-worker',
}
-> exec { 'Provision DCManager-Orchestrator (service-group-member dcmanager-orchestrator)':
command => 'sm-provision service-group-member distributed-cloud-services dcmanager-orchestrator',
}
@ -886,6 +987,9 @@ class platform::sm
-> exec { 'Configure Platform - DCManager-Audit':
command => "sm-configure service_instance dcmanager-audit dcmanager-audit \"\"",
}
-> exec { 'Configure Platform - DCManager-Audit-Worker':
command => "sm-configure service_instance dcmanager-audit-worker dcmanager-audit-worker \"\"",
}
-> exec { 'Configure Platform - DCManager-Orchestrator':
command => "sm-configure service_instance dcmanager-orchestrator dcmanager-orchestrator \"\"",
}
@ -918,6 +1022,72 @@ class platform::sm
# lint:endignore:140chars
}
class platform::sm::update_oam_config::runtime {
$system_mode = $::platform::params::system_mode
# lint:ignore:140chars
if $system_mode == 'simplex' {
include ::platform::network::oam::params
$oam_my_unit_ip = $::platform::network::oam::params::controller_address
$oam_ip_interface = $::platform::network::oam::params::interface_name
$oam_ip_param_ip = $::platform::network::oam::params::controller_address
$oam_ip_param_mask = $::platform::network::oam::params::subnet_prefixlen
exec { 'pmon-stop-sm-api':
command => 'pmon-stop sm-api',
}
-> exec { 'pmon-stop-sm':
command => 'pmon-stop sm'
}
# remove previous active DB and its journaling files
-> file { '/var/run/sm/sm.db':
ensure => absent
}
-> file { '/var/run/sm/sm.db-shm':
ensure => absent
}
-> file { '/var/run/sm/sm.db-wal':
ensure => absent
}
# will write the config values to /var/lib/sm/sm.db
-> exec { 'Configure OAM IP':
command => "sm-configure service_instance oam-ip oam-ip \"ip=${oam_ip_param_ip},cidr_netmask=${oam_ip_param_mask},nic=${oam_ip_interface},arp_count=7\"",
}
-> exec { 'Configure OAM Interface':
command => "sm-configure interface controller oam-interface \"\" ${oam_my_unit_ip} 2222 2223 \"\" 2222 2223",
}
-> exec { 'pmon-start-sm':
# will copy /var/lib/sm/sm.db to /var/run/sm/sm.db
command => 'pmon-start sm'
}
-> exec { 'pmon-start-sm-api':
command => 'pmon-start sm-api'
}
# the services below need to be restarted after, but wait for them to reach enabled-active
-> exec {'wait-for-haproxy':
command => '[ $(sm-query service haproxy | grep -c ".*enabled-active.*") -eq 1 ]',
tries => 15,
try_sleep => 1,
}
-> exec {'wait-for-vim-webserver':
command => '[ $(sm-query service vim-webserver | grep -c ".*enabled-active.*") -eq 1 ]',
tries => 15,
try_sleep => 1,
}
-> exec {'wait-for-registry-token-server':
command => '[ $(sm-query service registry-token-server | grep -c ".*enabled-active.*") -eq 1 ]',
tries => 15,
try_sleep => 1,
}
-> exec {'wait-for-registry-docker-distribution':
command => '[ $(sm-query service docker-distribution | grep -c ".*enabled-active.*") -eq 1 ]',
tries => 15,
try_sleep => 1,
}
}
# lint:endignore:140chars
}
define platform::sm::restart {
exec {"sm-restart-${name}":
@ -1027,6 +1197,8 @@ class platform::sm::rgw::runtime {
class platform::sm::ceph::runtime {
$ceph_configured = $::platform::ceph::params::service_enabled
include ::platform::rook::params
$rook_configured = $::platform::rook::params::service_enabled
$system_mode = $::platform::params::system_mode
$system_type = $::platform::params::system_type
@ -1055,4 +1227,18 @@ class platform::sm::ceph::runtime {
command => 'sm-provision service-group-member storage-monitoring-services ceph-manager --apply',
}
}
if $rook_configured {
if $system_type == 'All-in-one' and 'duplex' in $system_mode {
exec { 'Provision Cephmon FS in SM (service-group-member cephmon-fs)':
command => 'sm-provision service-group-member controller-services rookmon-fs --apply',
}
-> exec { 'Provision Cephmon DRBD in SM (service-group-member drbd-cephmon':
command => 'sm-provision service-group-member controller-services drbd-rookmon --apply',
}
-> exec { 'Provision Rook-mon-exit in SM (service-group-member rook-mon-exit)':
command => 'sm-provision service-group-member controller-services rook-mon-exit --apply',
}
}
}
}

View File

@ -1,28 +0,0 @@
class platform::snmp::params (
$community_strings = [],
$trap_destinations = [],
$system_name = '',
$system_location = '?',
$system_contact = '?',
$system_info = '',
$software_version = '',
) { }
class platform::snmp::runtime
inherits ::platform::snmp::params {
$software_version = $::platform::params::software_version
$system_info = $::system_info
file { '/etc/snmp/snmpd.conf':
ensure => 'present',
replace => true,
content => template('platform/snmpd.conf.erb')
}
# send HUP signal to snmpd if it is running
-> exec { 'notify-snmp':
command => '/usr/bin/pkill -HUP snmpd',
onlyif => 'ps -ef | pgrep snmpd'
}
}

View File

@ -10,11 +10,6 @@ class platform::sysctl
$ip_version = $::platform::network::mgmt::params::subnet_version
# Increase min_free_kbytes to 128 MiB from 88 MiB, helps prevent OOM
sysctl::value { 'vm.min_free_kbytes':
value => '131072'
}
# Set sched_nr_migrate to standard linux default
sysctl::value { 'kernel.sched_nr_migrate':
value => '8',
@ -119,6 +114,11 @@ class platform::sysctl::controller
include ::platform::sysctl
include ::platform::sysctl::controller::reserve_ports
# Increase min_free_kbytes to 128 MiB from 88 MiB, helps prevent OOM
sysctl::value { 'vm.min_free_kbytes':
value => '131072'
}
# Engineer VM page cache tunables to prevent significant IO delays that may
# occur if we flush a buildup of dirty pages. Engineer VM settings to make
# writebacks more regular. Note that Linux default proportion of page cache that
@ -164,11 +164,21 @@ class platform::sysctl::controller
class platform::sysctl::compute {
include ::platform::sysctl
# Increase min_free_kbytes to 128 MiB from 88 MiB, helps prevent OOM
sysctl::value { 'vm.min_free_kbytes':
value => '131072'
}
}
class platform::sysctl::storage {
include ::platform::sysctl
# Increase min_free_kbytes to 256 MiB for storage node, helps prevent OOM
sysctl::value { 'vm.min_free_kbytes':
value => '262144'
}
}

View File

@ -0,0 +1,56 @@
/bin/true # puppet requires this for correct template parsing
# During a migration from AIO-SX to AIO-DX, Ceph's crushmap need to
# be updated to add a new bucket for controller-1 and the ruleset
# update to replicate data between hosts
set -x
CRUSHMAP_BIN=/tmp/crushmap.bin
CRUSHMAP_TXT=/tmp/crushmap.rule
NEW_CRUSHMAP_TXT=/tmp/new-crushmap.rule
NEW_CRUSHMAP_BIN=/tmp/new-crushmap.bin
# add controller-1 bucket
timeout 10 ceph osd crush add-bucket controller-1 host
if [ $? -ne 0 ]; then
echo "Ceph is down or failed to create bucket"
exit 1
fi
ceph osd crush move controller-1 chassis=group-0
if [ $? -ne 0 ]; then
echo "Failed to add controller-1 bucket host"
exit 1
fi
# update ruleset
ceph osd getcrushmap -o $CRUSHMAP_BIN
crushtool -d $CRUSHMAP_BIN -o $CRUSHMAP_TXT
if [ $? -ne 0 ]; then
echo "Failed to read and decompile crushmap rules"
exit 1
fi
sed -e 's/\(step chooseleaf\) \(firstn [[:digit:]] type\) osd/\1 \2 host/' $CRUSHMAP_TXT > $NEW_CRUSHMAP_TXT
# compile crushmap
crushtool -c $NEW_CRUSHMAP_TXT -o $NEW_CRUSHMAP_BIN
if [ $? -ne 0 ]; then
echo "Failed to compile new crushmap rules"
exit 1
fi
ceph osd setcrushmap -i $NEW_CRUSHMAP_BIN
if [ $? -ne 0 ]; then
echo "Failed to set new crushmap ruleset"
exit 1
fi
set +x
exit 0

View File

@ -69,6 +69,14 @@ oom_score = 0
runtime_type = "io.containerd.kata.v2"
runtime_engine = ""
runtime_root = ""
<%- if @custom_container_runtime -%>
<%- @custom_container_runtime.each do |cri| -%>
[plugins.cri.containerd.runtimes.<%= cri[0] %>]
runtime_type = "io.containerd.runc.v1"
[plugins.cri.containerd.runtimes.<%= cri[0] %>.options]
BinaryName = "<%= cri[1] %>"
<%- end -%>
<%- end -%>
[plugins.cri.cni]
# conf_dir is the directory in which the admin places a CNI conf.

View File

@ -56,7 +56,7 @@ dhcp-option=option6:224,<%= @install_uuid %>
#
dhcp-match=set:efi,option:client-arch,2
dhcp-match=set:efi,option:client-arch,6
dhcp-match=set:efi,option:client-arch,7
dhcp-match=set:uefi,option:client-arch,7
dhcp-match=set:efi,option:client-arch,8
dhcp-match=set:efi,option:client-arch,9
dhcp-match=set:bios,option:client-arch,0
@ -80,6 +80,9 @@ dhcp-boot=tag:bios,tag:mgmt,pxelinux.0,<%= @mgmt_hostname %>,<%= @mgmt_controlle
dhcp-boot=tag:efi,tag:pxeboot,EFI/grubx64.efi,<%= @pxeboot_hostname %>,<%= @pxeboot_controller_address %>
dhcp-boot=tag:efi,tag:mgmt,EFI/grubx64.efi,<%= @mgmt_hostname %>,<%= @mgmt_controller_address %>
dhcp-boot=tag:uefi,tag:pxeboot,EFI/shim.efi,<%= @pxeboot_hostname %>,<%= @pxeboot_controller_address %>
dhcp-boot=tag:uefi,tag:mgmt,EFI/shim.efi,<%= @mgmt_hostname %>,<%= @mgmt_controller_address %>
# Do not forward queries for plain names (no dots)
domain-needed
# Query the upstream servers in the order they appear. This is necessary when
@ -94,7 +97,15 @@ strict-order
# queries failing for a very long time, even after the service comes up (e.g.
# after a host is rebooted).
max-cache-ttl=5
local=//
# The CNAME entry below will only cause dnsmasq to respond to queries
# that match the address family. Therefore dnsmasq will forward
# queries to the external servers for local domain queries that don't
# match the request record type (i.e. A or AAAA records).
# This line will prevent all requests with the .local domain from
# being forwarded to external servers. This will ensure registry.local
# is not forwarded in all cases, regardless of record type being
# requested.
local=/local/
port=53
bogus-priv
clear-on-reload

View File

@ -1,6 +0,0 @@
[snmp]
<%- @trap_destinations.each do |destination| -%>
trap2sink=<%= destination %>
<%- end -%>

View File

@ -23,6 +23,18 @@ python /usr/share/puppet/modules/platform/files/change_kube_apiserver_params.py
<%- if @admission_plugins -%>
--admission_plugins <%= @admission_plugins %> \
<%- end -%>
<%- if @etcd_cafile -%>
--etcd_cafile <%= @etcd_cafile %> \
<%- end -%>
<%- if @etcd_certfile -%>
--etcd_certfile <%= @etcd_certfile %> \
<%- end -%>
<%- if @etcd_keyfile -%>
--etcd_keyfile <%= @etcd_keyfile %> \
<%- end -%>
<%- if @etcd_servers -%>
--etcd_servers <%= @etcd_servers %>
<%- end -%>
kubectl --kubeconfig=/etc/kubernetes/admin.conf -n kube-system patch configmap kubeadm-config -p "$(cat <%= @configmap_temp_file %>)"
kubeadm --kubeconfig=/etc/kubernetes/admin.conf config view > <%= @configmap_temp_file %>

View File

@ -1,2 +1,2 @@
# Overrides config file for kubelet
KUBELET_EXTRA_ARGS=--cni-bin-dir=<%= @k8s_cni_bin_dir %> --node-ip=<%= @node_ip %> --volume-plugin-dir=<%= @k8s_vol_plugin_dir %> <%= @k8s_cpu_manager_opts %> --container-runtime=remote --container-runtime-endpoint=unix:///var/run/containerd/containerd.sock
KUBELET_EXTRA_ARGS=--cni-bin-dir=<%= @k8s_cni_bin_dir %> --node-ip=<%= @node_ip %> --volume-plugin-dir=<%= @k8s_vol_plugin_dir %> <%= @k8s_cpu_manager_opts %> --container-runtime=remote --container-runtime-endpoint=unix:///var/run/containerd/containerd.sock --pod-max-pids <%= @k8s_pod_max_pids %>

View File

@ -0,0 +1 @@
/usr/bin/pf_bb_config ACC100 -c /usr/share/pf-bb-config/acc100/acc100_config_vf_5g.cfg

View File

@ -61,7 +61,6 @@ rewrite r_rewrite_set{
set("<%= @system_name %> syslog ${HOST}", value("HOST") condition(filter(f_syslog)));
set("<%= @system_name %> user.log ${HOST}", value("HOST") condition(filter(f_user)));
set("<%= @system_name %> uucp.log ${HOST}", value("HOST") condition(filter(f_uucp)));
set("<%= @system_name %> snmp-api.log ${HOST}", value("HOST") condition(filter(f_snmpat)));
# Most logs write log level to the message field. some writes it to the PRIORITY field
# The priority field is not sent remotely. This is because tcp/udp destinations don't
# work well with templates, which we use to write the priority field to log files on the

View File

@ -1,33 +0,0 @@
###########################################################################
#
# snmpd.conf
#
# - This file is managed by Puppet. DO NOT EDIT.
#
###########################################################################
# incl/excl subtree mask
view all included .1 80
sysDescr <%= @software_version %> <%= @system_info %>
sysObjectID 1.3.6.1.4.1.731.3
sysContact <%= @system_contact %>
sysName <%= @system_name %>
sysLocation <%= @system_location %>
sysServices 72
[snmp] clientaddr oamcontroller
dlmod cgtsAgentPlugin /usr/lib64/libcgtsAgentPlugin.so.1
dlmod snmpAuditPlugin /usr/lib64/libsnmpAuditPlugin.so.1
# Insert the snmpAudit hander into specific sections of the mib tree
injectHandler snmpAudit null
injectHandler snmpAudit bulk_to_next
<%- @community_strings.each do |community| -%>
rocommunity <%= community %>
rocommunity6 <%= community %>
<%- end -%>
<%- @trap_destinations.each do |destination| -%>
trap2sink <%= destination %>
<%- end -%>

View File

@ -0,0 +1 @@
ip link set <%= @port_name -%> vf <%= @vfnumber -%> max_tx_rate <%= @max_tx_rate -%>

View File

@ -30,6 +30,6 @@ skip_tests = \
--no-autoloader_layout-check \
--no-documentation-check
commands =
gem install --no-document json puppet-lint
gem install --no-document json puppet-lint:2.3.6
bash -c "find {toxinidir} -name \*.pp -print0 | xargs -0 puppet-lint --fail-on-warnings {[testenv:puppetlint]skip_tests}"

View File

@ -1,4 +1,4 @@
# hacking pulls in flake8
hacking
hacking < 4.0.1
bashate >= 0.2
bandit!=1.6.0,>=1.1.0,<2.0.0
bandit!=1.6.0,>=1.1.0,<2.0.0;python_version>="3.0" # GPLv2

View File

@ -33,6 +33,14 @@ commands =
-i E006 \
-e 'E*'"
[testenv:bindep]
# Do not install any requirements. We want this to be fast and work even if
# system dependencies are missing, since it's used to tell you what system
# dependencies are missing! This also means that bindep must be installed
# separately, outside of the requirements files.
deps = bindep
commands = bindep test
[testenv:linters]
# Note: centos developer env requires ruby-devel
# Ubuntu developer env requires ruby-dev