add puppet manifests
Change-Id: I1c14ecb2d904dfc8575fcb07fd7f84be690b6429
This commit is contained in:
parent
37bd4300e4
commit
1856c1f334
|
@ -0,0 +1,139 @@
|
|||
#!/bin/bash
|
||||
# Copyright 2014 OpenStack Foundation.
|
||||
# Copyright 2014 Hewlett-Packard Development Company, L.P.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
MODULE_PATH=/usr/share/puppet/modules
|
||||
|
||||
function remove_module {
|
||||
local SHORT_MODULE_NAME=$1
|
||||
if [ -n "$SHORT_MODULE_NAME" ]; then
|
||||
rm -Rf "$MODULE_PATH/$SHORT_MODULE_NAME"
|
||||
else
|
||||
echo "ERROR: remove_module requires a SHORT_MODULE_NAME."
|
||||
fi
|
||||
}
|
||||
|
||||
# Array of modules to be installed key:value is module:version.
|
||||
declare -A MODULES
|
||||
|
||||
# Array of modues to be installed from source and without dependency resolution.
|
||||
# key:value is source location, revision to checkout
|
||||
declare -A SOURCE_MODULES
|
||||
|
||||
#NOTE: if we previously installed kickstandproject-ntp we nuke it here
|
||||
# since puppetlabs-ntp and kickstandproject-ntp install to the same dir
|
||||
if grep kickstandproject-ntp /etc/puppet/modules/ntp/Modulefile &> /dev/null; then
|
||||
remove_module "ntp"
|
||||
fi
|
||||
|
||||
|
||||
# freenode #puppet 2012-09-25:
|
||||
# 18:25 < jeblair> i would like to use some code that someone wrote,
|
||||
# but it's important that i understand how the author wants me to use
|
||||
# it...
|
||||
# 18:25 < jeblair> in the case of the vcsrepo module, there is
|
||||
# ambiguity, and so we are trying to determine what the author(s)
|
||||
# intent is
|
||||
# 18:30 < jamesturnbull> jeblair: since we - being PL - are the author
|
||||
# - our intent was not to limit it's use and it should be Apache
|
||||
# licensed
|
||||
|
||||
MODULES["puppetlabs-vcsrepo"]="1.2.0"
|
||||
MODULES["puppetlabs-apt"]="1.6.0"
|
||||
MODULES["puppetlabs-firewall"]="1.1.3"
|
||||
MODULES["puppetlabs-concat"]="1.1.0"
|
||||
MODULES["puppetlabs-mysql"]="2.3.1"
|
||||
MODULES["puppetlabs-ntp"]="3.1.2"
|
||||
MODULES["puppetlabs-postgresql"]="3.4.2"
|
||||
MODULES["puppetlabs-rsync"]="0.3.1"
|
||||
MODULES["puppetlabs-stdlib"]="4.5.1"
|
||||
MODULES["puppetlabs-java_ks"]="1.2.6"
|
||||
MODULES["puppetlabs-nodejs"]="0.7.1"
|
||||
MODULES["puppetlabs-apache"]="1.4.1"
|
||||
MODULES["maestrodev-rvm"]="1.11.0"
|
||||
MODULES["thias-sysctl"]="1.0.0"
|
||||
MODULES["thias-php"]="1.1.0"
|
||||
MODULES["darin-zypprepo"]="1.0.1"
|
||||
MODULES["elasticsearch/elasticsearch"]="0.4.0"
|
||||
MODULES["ripienaar-module_data"]="0.0.3"
|
||||
MODULES["rodjek-logrotate"]="1.1.1"
|
||||
MODULES["saz-sudo"]="3.0.9"
|
||||
MODULES["golja-gnupg"]="1.2.1"
|
||||
MODULES["gnubilafrance-atop"]="0.0.4"
|
||||
|
||||
SOURCE_MODULES["https://github.com/iberezovskiy/puppet-mongodb"]="0.1"
|
||||
SOURCE_MODULES["https://github.com/monester/puppet-bacula"]="v0.4.0.1"
|
||||
SOURCE_MODULES["https://github.com/monester/puppet-libvirt"]="0.3.2-3"
|
||||
SOURCE_MODULES["https://github.com/SergK/puppet-display"]="0.5.0"
|
||||
SOURCE_MODULES["https://github.com/SergK/puppet-glusterfs"]="0.0.4"
|
||||
SOURCE_MODULES["https://github.com/SergK/puppet-sshuserconfig"]="0.0.1"
|
||||
SOURCE_MODULES["https://github.com/SergK/puppet-znc"]="0.0.9"
|
||||
SOURCE_MODULES["https://github.com/teran/puppet-bind"]="0.5.1-hiera-debian-keys-controls-support"
|
||||
SOURCE_MODULES["https://github.com/teran/puppet-mailman"]="0.1.4+user-fix"
|
||||
SOURCE_MODULES["https://github.com/teran/puppet-nginx"]="0.1.1+ssl_ciphers(renew)"
|
||||
|
||||
MODULE_LIST=`puppet module list`
|
||||
|
||||
# Install all the modules
|
||||
for MOD in ${!MODULES[*]} ; do
|
||||
# If the module at the current version does not exist upgrade or install it.
|
||||
if ! echo $MODULE_LIST | grep "$MOD ([^v]*v${MODULES[$MOD]}" >/dev/null 2>&1
|
||||
then
|
||||
# Attempt module upgrade. If that fails try installing the module.
|
||||
if ! puppet module upgrade $MOD --version ${MODULES[$MOD]} >/dev/null 2>&1
|
||||
then
|
||||
# This will get run in cron, so silence non-error output
|
||||
echo "Installing ${MOD} ..."
|
||||
puppet module install --target-dir $MODULE_PATH $MOD --version ${MODULES[$MOD]} >/dev/null
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
MODULE_LIST=`puppet module list`
|
||||
|
||||
# Make a second pass, just installing modules from source
|
||||
for MOD in ${!SOURCE_MODULES[*]} ; do
|
||||
# get the name of the module directory
|
||||
if [ `echo $MOD | awk -F. '{print $NF}'` = 'git' ]; then
|
||||
echo "Remote repos of the form repo.git are not supported: ${MOD}"
|
||||
exit 1
|
||||
fi
|
||||
MODULE_NAME=`echo $MOD | awk -F- '{print $NF}'`
|
||||
# set up git base command to use the correct path
|
||||
GIT_CMD_BASE="git --git-dir=${MODULE_PATH}/${MODULE_NAME}/.git --work-tree ${MODULE_PATH}/${MODULE_NAME}"
|
||||
# treat any occurrence of the module as a match
|
||||
if ! echo $MODULE_LIST | grep "${MODULE_NAME}" >/dev/null 2>&1; then
|
||||
# clone modules that are not installed
|
||||
git clone $MOD "${MODULE_PATH}/${MODULE_NAME}"
|
||||
else
|
||||
if [ ! -d ${MODULE_PATH}/${MODULE_NAME}/.git ]; then
|
||||
echo "Found directory ${MODULE_PATH}/${MODULE_NAME} that is not a git repo, deleting it and reinstalling from source"
|
||||
remove_module $MODULE_NAME
|
||||
echo "Cloning ${MODULE_PATH}/${MODULE_NAME} ..."
|
||||
git clone $MOD "${MODULE_PATH}/${MODULE_NAME}"
|
||||
elif [ `${GIT_CMD_BASE} remote show origin | grep 'Fetch URL' | awk -F'URL: ' '{print $2}'` != $MOD ]; then
|
||||
echo "Found remote in ${MODULE_PATH}/${MODULE_NAME} that does not match desired remote ${MOD}, deleting dir and re-cloning"
|
||||
remove_module $MODULE_NAME
|
||||
git clone $MOD "${MODULE_PATH}/${MODULE_NAME}"
|
||||
fi
|
||||
fi
|
||||
# fetch the latest refs from the repo
|
||||
$GIT_CMD_BASE fetch
|
||||
# make sure the correct revision is installed, I have to use rev-list b/c rev-parse does not work with tags
|
||||
if [ `${GIT_CMD_BASE} rev-list HEAD --max-count=1` != `${GIT_CMD_BASE} rev-list ${SOURCE_MODULES[$MOD]} --max-count=1` ]; then
|
||||
# checkout correct revision
|
||||
$GIT_CMD_BASE checkout ${SOURCE_MODULES[$MOD]}
|
||||
fi
|
||||
done
|
|
@ -0,0 +1,36 @@
|
|||
#!/bin/sh
|
||||
|
||||
set -xe
|
||||
|
||||
export DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
apt-get update
|
||||
apt-get upgrade -y
|
||||
apt-get install -y git puppet apt-transport-https tar
|
||||
|
||||
if [ -z "${PUPPET_MODULES_ARCHIVE}" ]; then
|
||||
/etc/puppet/bin/install_modules.sh
|
||||
else
|
||||
MODULEPATH=$(puppet config print | awk -F':' '/^modulepath/{print $NF}')
|
||||
if [ -f "${PUPPET_MODULES_ARCHIVE}" ]; then
|
||||
tar xvf "${PUPPET_MODULES_ARCHIVE}" --strip-components=1 -C "${MODULEPATH}"
|
||||
else
|
||||
echo "${PUPPET_MODULES_ARCHIVE} is not a file. Quitting!"
|
||||
exit 2
|
||||
fi
|
||||
fi
|
||||
|
||||
expect_hiera=$(puppet apply -vd --genconfig | awk '/ hiera_config / {print $3}')
|
||||
if [ ! -f "${expect_hiera}" ]; then
|
||||
echo "File ${expect_hiera} not found!"
|
||||
if [ ! -f /etc/hiera.yaml ]; then
|
||||
ln -s /etc/puppet/hiera/hiera-stub.yaml "${expect_hiera}"
|
||||
else
|
||||
echo "Found default /etc/hiera.yaml"
|
||||
ln -s /etc/hiera.yaml "${expect_hiera}"
|
||||
fi
|
||||
fi
|
||||
|
||||
FACTER_PUPPET_APPLY=true FACTER_ROLE=puppetmaster puppet apply -vd /etc/puppet/manifests/site.pp
|
||||
puppet agent --enable
|
||||
puppet agent -vd --no-daemonize --onetime
|
|
@ -0,0 +1,20 @@
|
|||
Deployment in isolated environment
|
||||
==================================
|
||||
|
||||
Requirements
|
||||
------------
|
||||
|
||||
#) Already prepared tar.bz2 archive containing Puppet modules with following structure::
|
||||
|
||||
modules
|
||||
module1
|
||||
module2
|
||||
moduleN
|
||||
|
||||
Usage
|
||||
-----
|
||||
Call ``install_puppet_master.sh`` with PUPPET_MODULES_ARCHIVE set to path to archive::
|
||||
|
||||
PUPPET_MODULES_ARCHIVE="/home/test/archive.tar.bz2" ./install_puppet_master.sh
|
||||
|
||||
It's going to install modules from archive and then run regular scripts used for environment deployment.
|
|
@ -0,0 +1,291 @@
|
|||
---
|
||||
apt::always_apt_update: true
|
||||
apt::disable_keys: false
|
||||
apt::purge_sources_list: true
|
||||
apt::purge_sources_list_d: true
|
||||
apt::purge_preferences_d: true
|
||||
apt::update_timeout: 300
|
||||
apt::sources:
|
||||
mirror:
|
||||
location: 'http://archive.ubuntu.com/ubuntu/'
|
||||
release: "%{::lsbdistcodename}"
|
||||
key: 'C0B21F32'
|
||||
key_server: 'keyserver.ubuntu.com'
|
||||
repos: 'main restricted universe multiverse'
|
||||
include_src: false
|
||||
include_deb: true
|
||||
mirror_updates:
|
||||
location: 'http://archive.ubuntu.com/ubuntu/'
|
||||
release: "%{::lsbdistcodename}-updates"
|
||||
key: 'C0B21F32'
|
||||
key_server: 'keyserver.ubuntu.com'
|
||||
repos: 'main restricted universe multiverse'
|
||||
include_src: false
|
||||
include_deb: true
|
||||
devops:
|
||||
location: 'http://mirror.fuel-infra.org/devops/ubuntu/'
|
||||
release: '/'
|
||||
key: '62BF6A9C1D2B45A2'
|
||||
key_server: 'keyserver.ubuntu.com'
|
||||
repos: ''
|
||||
include_src: false
|
||||
include_deb: true
|
||||
docker:
|
||||
location: 'https://get.docker.io/ubuntu'
|
||||
release: 'docker'
|
||||
key: 'A88D21E9'
|
||||
key_server: 'keyserver.ubuntu.com'
|
||||
repos: 'main'
|
||||
include_src: false
|
||||
include_deb: true
|
||||
jenkins:
|
||||
location: 'http://pkg.jenkins-ci.org/debian-stable/'
|
||||
release: 'binary/'
|
||||
key: 'D50582E6'
|
||||
key_server: 'keyserver.ubuntu.com'
|
||||
repos: ''
|
||||
include_src: false
|
||||
include_deb: true
|
||||
elasticsearch:
|
||||
location: 'http://packages.elasticsearch.org/elasticsearch/1.3/debian'
|
||||
release: 'stable'
|
||||
repos: 'main'
|
||||
key: 'D88E42B4'
|
||||
key_server: 'keyserver.ubuntu.com'
|
||||
include_src: false
|
||||
include_deb: true
|
||||
|
||||
atop::service: true
|
||||
atop::interval: 60
|
||||
|
||||
yum::default:
|
||||
'enabled': true
|
||||
yum::purge: true
|
||||
yum::repos:
|
||||
'base':
|
||||
'descr': 'CentOS-$releasever - Base'
|
||||
'baseurl': 'http://mirror.centos.org/centos/$releasever/os/$basearch/'
|
||||
'gpgcheck': true
|
||||
'centosplus':
|
||||
'descr': 'CentOS-$releasever - Plus'
|
||||
'baseurl': 'http://mirror.centos.org/centos/$releasever/centosplus/$basearch/'
|
||||
'gpgcheck': true
|
||||
'contrib':
|
||||
'descr': 'CentOS-$releasever - Contrib'
|
||||
'baseurl': 'http://mirror.centos.org/centos/$releasever/contrib/$basearch/'
|
||||
'gpgcheck': true
|
||||
'epel':
|
||||
'descr': 'epel $releasever'
|
||||
'mirrorlist': 'https://mirrors.fedoraproject.org/metalink?repo=epel-6&arch=$basearch'
|
||||
'gpgcheck': true
|
||||
'gpgkey': 'https://dl.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-6'
|
||||
'extras':
|
||||
'descr': 'CentOS-$releasever - Extras'
|
||||
'baseurl': 'http://mirror.centos.org/centos/$releasever/extras/$basearch/'
|
||||
'gpgcheck': true
|
||||
'jpackage':
|
||||
'descr': 'JPackage'
|
||||
'mirrorlist': 'http://www.jpackage.org/mirrorlist.php?dist=generic&type=free&release=5.0'
|
||||
'gpgcheck': true
|
||||
'gpgkey': 'http://www.jpackage.org/jpackage.asc'
|
||||
'updates':
|
||||
'descr': 'CentOS-$releasever - Updates'
|
||||
'baseurl': 'http://mirror.centos.org/centos/$releasever/updates/$basearch/'
|
||||
'gpgcheck': true
|
||||
'zabbix':
|
||||
'descr': 'Zabbix Official Repository - $basearch'
|
||||
'baseurl': 'http://repo.zabbix.com/zabbix/2.2/rhel/6/$basearch/'
|
||||
'gpgcheck': true
|
||||
'gpgkey': 'http://repo.zabbix.com/RPM-GPG-KEY-ZABBIX'
|
||||
|
||||
firewall:
|
||||
known_networks:
|
||||
- 10.108.0.0/16
|
||||
external_hosts:
|
||||
- 10.0.0.0/16
|
||||
internal_networks:
|
||||
- 172.18.0.0/16
|
||||
local_networks:
|
||||
- 192.168.1.0/24
|
||||
|
||||
mysql:
|
||||
root_password: 'peNTZ7GA2Zr90y'
|
||||
|
||||
system::root_email: 'root@example.com'
|
||||
system::mta_local_only: true
|
||||
system::timezone: 'UTC'
|
||||
system::root_password: '$6$rqlo82B/$nKaHJ2oNy08spMfByg1Pk.U/fnJvhOdWAMe2MS53zW8yw3ZIGGMoiqz98s/DDeeOzKrc2iR7WWoOfN5RoVnd9/'
|
||||
system::install_tools: true
|
||||
|
||||
fuel_project::jenkins::slave::nailgun_db:
|
||||
- 'nailgun'
|
||||
- 'nailgun0'
|
||||
- 'nailgun1'
|
||||
- 'nailgun2'
|
||||
- 'nailgun3'
|
||||
- 'nailgun4'
|
||||
- 'nailgun5'
|
||||
- 'nailgun6'
|
||||
- 'nailgun7'
|
||||
|
||||
fuel_project::jenkins::slave::seed_cleanup_dirs:
|
||||
-
|
||||
dir: '/var/www/fuelweb-iso'
|
||||
ttl: 10
|
||||
pattern: 'fuel-*'
|
||||
-
|
||||
dir: '/srv/downloads'
|
||||
ttl: 1
|
||||
pattern: 'fuel-*'
|
||||
|
||||
|
||||
fuel_project::jenkins::slave::docker_package: 'lxc-docker-1.5.0'
|
||||
fuel_project::jenkins::slave::jenkins_swarm_slave: true
|
||||
|
||||
fuel_project::jenkins::slave::ruby_version: '2.1.5'
|
||||
|
||||
jenkins::slave::authorized_keys:
|
||||
'jenkins@mytestserver':
|
||||
type: ssh-rsa
|
||||
key: 'AAAAB3NzaC1yc2EAAAADAQABAAABAQDNWgMf6IisSY0HK0mpHkgVhRxHsDom81PJ6W3jAgcSBWY1Kz/2vL98SK91ppgYmnDa2uLbchY2Xk9ciefMpm7Qq5EO6oSPKJJhADyCYAX/7YomZIy4Xu7HxEh0Z6VCLt0DymwN4tBS9JuTISvEm17BLgtis/AemA2eRIl0JAdPf9rmQps4KP5AhG60ucdtTKD0y8TFK95ateplgcq9JLRInhrdg/vnJLbKnV7lP1g5dfY1rm6bum7P+Jwf2tdTOa0b5ucK/+iWVbyPO4Z2afPpblh4Vynfe2wMzzpGAp3n5MwtH2EZmSXm/B6/CkgOFROsmWH8MzQEvNBGHhw+ONR9'
|
||||
jenkins::swarm_slave::master: 'https://jenkins.test-company.org/'
|
||||
jenkins::swarm_slave::user: 'jenkins-robotson'
|
||||
jenkins::swarm_slave::password: 'BTRfeHyibQlM2M'
|
||||
jenkins::swarm_slave::labels: '14_04'
|
||||
|
||||
|
||||
fuel_project::jenkins::slave::known_hosts:
|
||||
'review.openstack.org':
|
||||
host: 'review.openstack.org'
|
||||
port: 29418
|
||||
|
||||
mysql::client::package_name: 'percona-server-client-5.6'
|
||||
mysql::server::package_name: 'percona-server-server-5.6'
|
||||
mysql::server::root_password: 'WpUrXaC92cZQ4XHMLpfraTRsl16ZtoTu'
|
||||
|
||||
puppet::master::autosign: true
|
||||
puppet::master::firewall_allow_sources:
|
||||
'1000 - puppet master connections from 10.0.0.0/8':
|
||||
source: '10.0.0.0/8'
|
||||
'1000 - puppet master connections from 172.16.0.0/12':
|
||||
source: '172.16.0.0/12'
|
||||
'1000 - puppet master connections from 192.168.0.0/16':
|
||||
source: '192.168.0.0/16'
|
||||
|
||||
sysctl::base::values:
|
||||
net.ipv4.ip_forward:
|
||||
value: '0'
|
||||
net.ipv4.tcp_syncookies:
|
||||
value: 1
|
||||
net.ipv4.tcp_window_scaling:
|
||||
value: 1
|
||||
net.ipv4.tcp_congestion_control:
|
||||
value: cubic
|
||||
net.ipv4.tcp_no_metrics_save:
|
||||
value: 1
|
||||
net.ipv4.tcp_moderate_rcvbuf:
|
||||
value: 1
|
||||
fs.inotify.max_user_instances:
|
||||
value: 256
|
||||
|
||||
#passed to nginx::package class
|
||||
nginx::package_name: nginx-full
|
||||
nginx::package_source: nginx
|
||||
nginx::package_ensure: present
|
||||
nginx::manage_repo: false
|
||||
|
||||
#passed to nginx::service class
|
||||
nginx::configtest_enable: true
|
||||
nginx::service_ensure: running
|
||||
nginx::service_restart: 'nginx -t && /etc/init.d/nginx restart'
|
||||
|
||||
nginx::config::temp_dir: /tmp
|
||||
nginx::config::run_dir: /var/nginx
|
||||
nginx::config::conf_template: fuel_project/nginx/nginx.conf.erb
|
||||
nginx::config::proxy_conf_template: nginx/conf.d/proxy.conf.erb
|
||||
nginx::config::confd_purge: true
|
||||
nginx::config::vhost_purge: true
|
||||
nginx::config::worker_processes: "%{processorcount}"
|
||||
nginx::config::worker_connections: 1024
|
||||
nginx::config::worker_rlimit_nofile: 1024
|
||||
nginx::config::types_hash_max_size: 1024
|
||||
nginx::config::types_hash_bucket_size: 512
|
||||
nginx::config::names_hash_bucket_size: 64
|
||||
nginx::config::names_hash_max_size: 512
|
||||
nginx::config::multi_accept: 'off'
|
||||
nginx::config::events_use: false
|
||||
nginx::config::sendfile: 'on'
|
||||
nginx::config::keepalive_timeout: 65
|
||||
nginx::config::http_tcp_nodelay: 'on'
|
||||
nginx::config::http_tcp_nopush: 'off'
|
||||
nginx::config::gzip: 'on'
|
||||
nginx::config::server_tokens: 'off'
|
||||
nginx::config::spdy: 'off'
|
||||
nginx::config::ssl_stapling: 'off'
|
||||
nginx::config::proxy_redirect: 'off'
|
||||
nginx::config::proxy_set_header:
|
||||
- 'Host $host'
|
||||
- 'X-Real-IP $remote_addr'
|
||||
- 'X-Forwarded-For $proxy_add_x_forwarded_for'
|
||||
nginx::config::proxy_cache_path: '/var/lib/nginx/cache'
|
||||
nginx::config::proxy_cache_levels: '2'
|
||||
nginx::config::proxy_cache_keys_zone: 'static:500m'
|
||||
nginx::config::proxy_cache_max_size: 500m
|
||||
nginx::config::proxy_cache_inactive: 20m
|
||||
nginx::config::fastcgi_cache_path: false
|
||||
nginx::config::fastcgi_cache_levels: '1'
|
||||
nginx::config::fastcgi_cache_keys_zone: 'd3:100m'
|
||||
nginx::config::fastcgi_cache_max_size: 500m
|
||||
nginx::config::fastcgi_cache_inactive: 20m
|
||||
nginx::config::fastcgi_cache_key: false
|
||||
nginx::config::fastcgi_cache_use_stale: false
|
||||
nginx::config::client_body_temp_path: /var/nginx/client_body_temp
|
||||
nginx::config::client_body_buffer_size: 128k
|
||||
nginx::config::client_max_body_size: 10m
|
||||
nginx::config::proxy_temp_path: /var/nginx/proxy_temp
|
||||
nginx::config::proxy_connect_timeout: '90'
|
||||
nginx::config::proxy_send_timeout: '90'
|
||||
nginx::config::proxy_read_timeout: '90'
|
||||
nginx::config::proxy_buffers: '32 4k'
|
||||
nginx::config::proxy_http_version: '1.0'
|
||||
nginx::config::proxy_buffer_size: 8k
|
||||
nginx::config::proxy_headers_hash_bucket_size: '256'
|
||||
nginx::config::logdir: /var/log/nginx
|
||||
|
||||
nginx::config::mail: false
|
||||
|
||||
# Used to set conn_limit
|
||||
nginx::config::http_cfg_append:
|
||||
'limit_conn_zone': '$binary_remote_addr zone=addr:10m'
|
||||
|
||||
nginx::config::nginx_error_log: /var/log/nginx/error.log
|
||||
nginx::config::http_access_log: /var/log/nginx/access.log
|
||||
|
||||
nginx::config::root_group: root
|
||||
# Specific owner for sites-available directory
|
||||
nginx::config::sites_available_owner: root
|
||||
nginx::config::sites_available_group: root
|
||||
nginx::config::sites_available_mode: '0644'
|
||||
|
||||
# Owner for all other files
|
||||
nginx::config::global_owner: root
|
||||
nginx::config::global_group: root
|
||||
nginx::config::global_mode: '0644'
|
||||
|
||||
nginx::config::pid: /var/run/nginx.pid
|
||||
|
||||
nginx::config::conf_dir: /etc/nginx
|
||||
|
||||
nginx::config::super_user: true
|
||||
nginx::config::daemon_user: www-data
|
||||
|
||||
logrotate::rules:
|
||||
'upstart':
|
||||
path: '/var/log/upstart/*.log'
|
||||
rotate_every: 'day'
|
||||
rotate: '7'
|
||||
missingok: true
|
||||
compress: true
|
||||
ifempty: false
|
||||
create: false
|
||||
delaycompress: true
|
|
@ -0,0 +1,15 @@
|
|||
---
|
||||
:backends:
|
||||
- yaml
|
||||
:yaml:
|
||||
:datadir: /var/lib/hiera
|
||||
:json:
|
||||
:datadir: /var/lib/hiera
|
||||
:hierarchy:
|
||||
- nodes/%{::clientcert}
|
||||
- roles/%{::role}
|
||||
- locations/%{::location}
|
||||
- common
|
||||
:logger: console
|
||||
:merge_behavior: deeper
|
||||
|
|
@ -0,0 +1,50 @@
|
|||
---
|
||||
apt::sources:
|
||||
mirror:
|
||||
location: 'http://mirrors.kha.mirantis.net/ubuntu/'
|
||||
release: "%{::lsbdistcodename}"
|
||||
key: 'C0B21F32'
|
||||
key_server: 'keyserver.ubuntu.com'
|
||||
repos: 'main restricted universe multiverse'
|
||||
include_src: false
|
||||
include_deb: true
|
||||
mirror_updates:
|
||||
location: 'http://mirrors.kha.mirantis.net/ubuntu/'
|
||||
release: "%{::lsbdistcodename}-updates"
|
||||
key: 'C0B21F32'
|
||||
key_server: 'keyserver.ubuntu.com'
|
||||
repos: 'main restricted universe multiverse'
|
||||
include_src: false
|
||||
include_deb: true
|
||||
devops:
|
||||
location: 'http://osci-mirror-kha.kha.mirantis.net/devops/ubuntu/'
|
||||
release: '/'
|
||||
key: '62BF6A9C1D2B45A2'
|
||||
key_server: 'keyserver.ubuntu.com'
|
||||
repos: ''
|
||||
include_src: false
|
||||
include_deb: true
|
||||
docker:
|
||||
location: 'https://get.docker.io/ubuntu'
|
||||
release: 'docker'
|
||||
key: 'A88D21E9'
|
||||
key_server: 'keyserver.ubuntu.com'
|
||||
repos: 'main'
|
||||
include_src: false
|
||||
include_deb: true
|
||||
jenkins:
|
||||
location: 'http://pkg.jenkins-ci.org/debian-stable/'
|
||||
release: 'binary/'
|
||||
key: 'D50582E6'
|
||||
key_server: 'keyserver.ubuntu.com'
|
||||
repos: ''
|
||||
include_src: false
|
||||
include_deb: true
|
||||
elasticsearch:
|
||||
location: 'http://packages.elasticsearch.org/elasticsearch/1.3/debian'
|
||||
release: 'stable'
|
||||
repos: 'main'
|
||||
key: 'D88E42B4'
|
||||
key_server: 'keyserver.ubuntu.com'
|
||||
include_src: false
|
||||
include_deb: true
|
|
@ -0,0 +1,50 @@
|
|||
---
|
||||
apt::sources:
|
||||
mirror:
|
||||
location: 'http://mirrors.msk.mirantis.net/ubuntu/'
|
||||
release: "%{::lsbdistcodename}"
|
||||
key: 'C0B21F32'
|
||||
key_server: 'keyserver.ubuntu.com'
|
||||
repos: 'main restricted universe multiverse'
|
||||
include_src: false
|
||||
include_deb: true
|
||||
mirror_updates:
|
||||
location: 'http://mirrors.msk.mirantis.net/ubuntu/'
|
||||
release: "%{::lsbdistcodename}-updates"
|
||||
key: 'C0B21F32'
|
||||
key_server: 'keyserver.ubuntu.com'
|
||||
repos: 'main restricted universe multiverse'
|
||||
include_src: false
|
||||
include_deb: true
|
||||
devops:
|
||||
location: 'http://osci-mirror-msk.msk.mirantis.net/devops/ubuntu/'
|
||||
release: '/'
|
||||
key: '62BF6A9C1D2B45A2'
|
||||
key_server: 'keyserver.ubuntu.com'
|
||||
repos: ''
|
||||
include_src: false
|
||||
include_deb: true
|
||||
docker:
|
||||
location: 'https://get.docker.io/ubuntu'
|
||||
release: 'docker'
|
||||
key: 'A88D21E9'
|
||||
key_server: 'keyserver.ubuntu.com'
|
||||
repos: 'main'
|
||||
include_src: false
|
||||
include_deb: true
|
||||
jenkins:
|
||||
location: 'http://pkg.jenkins-ci.org/debian-stable/'
|
||||
release: 'binary/'
|
||||
key: 'D50582E6'
|
||||
key_server: 'keyserver.ubuntu.com'
|
||||
repos: ''
|
||||
include_src: false
|
||||
include_deb: true
|
||||
elasticsearch:
|
||||
location: 'http://packages.elasticsearch.org/elasticsearch/1.3/debian'
|
||||
release: 'stable'
|
||||
repos: 'main'
|
||||
key: 'D88E42B4'
|
||||
key_server: 'keyserver.ubuntu.com'
|
||||
include_src: false
|
||||
include_deb: true
|
|
@ -0,0 +1,93 @@
|
|||
---
|
||||
classes:
|
||||
- '::fuel_project::jenkins::master'
|
||||
|
||||
fuel_project::jenkins::master::install_label_dumper: true
|
||||
fuel_project::jenkins::master::install_plugins: true
|
||||
fuel_project::jenkins::master::service_fqdn: 'jenkins.test-company.org'
|
||||
|
||||
jenkins::master::install_groovy: true
|
||||
jenkins::master::jenkins_cli_file: '/var/cache/jenkins/war/WEB-INF/jenkins-cli.jar'
|
||||
jenkins::master::jenkins_cli_tries: '6'
|
||||
jenkins::master::jenkins_cli_try_sleep: '30'
|
||||
jenkins::master::jenkins_libdir: '/var/lib/jenkins'
|
||||
jenkins::master::jenkins_management_email: 'jenkin@example.com'
|
||||
jenkins::master::jenkins_management_login: 'jenkins-manager'
|
||||
jenkins::master::jenkins_management_name: 'Jenkins Master'
|
||||
jenkins::master::jenkins_management_password: 'jenkins_password'
|
||||
jenkins::master::jenkins_s2m_acl: true
|
||||
jenkins::master::security_model: 'ldap'
|
||||
jenkins::master::security_opt_params: ''
|
||||
jenkins::master::service_fqdn: 'jenkins-product.test.local'
|
||||
jenkins::master::ssl_key_file: '/etc/ssl/jenkins.key'
|
||||
jenkins::master::ssl_key_file_contents: |
|
||||
-----BEGIN PRIVATE KEY-----
|
||||
MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQC2OB+nAmxkHPht
|
||||
j9CBXr1LU/n7nh37WUDGahYN775RLcR3NUZZHz6hoc7hyvEPO1PI5Mm2y0L8yREJ
|
||||
meDFRl1yknP1Pe/vSlP+1+73l9UlpfV8uNwJ3DfAPUgxwYjOO0zMMu6Nih4zuZ2N
|
||||
H2LHM3laJAWeeBCTCp4SxCW1XeMlKqfdT4/T3eXp5WdJ1+EtP6rya9Zivx+HHh6X
|
||||
dIfKTypGiZiPiCewQnd0a2MM0X0IjtcvalldV4M9llAojkVze1idIBRu9c7t914C
|
||||
fZAsbSSe2Q8s8YYAmymvxWrchz+CVs8GPoGx1iPSM4zBZFikJXaWT8IVk3TcFTHo
|
||||
k9AzFtYdAgMBAAECggEBAJdwr4W6suDFXwaXhp9uYH4xbcpbz+ksdXQxiODORmrr
|
||||
UaQNR8kb+Y6Vjv4DDzMsiGanFqnv5l12sc078R2jbFijNPI2JqnGKWbciYOG0aO3
|
||||
eP3OGTmspz0C8XRAt3VGvX3cOnyxtIYilrlJw2tw8UMkOvNIL+Y05ckM8ZX5UKV6
|
||||
lVJ30HO2jR6T5yM/Gc3s3gL/X5bHcaQDLWjhqZP411zULQPsWP8+bbXv8f+jZqcg
|
||||
jg4oK1mC2MtGjy83DU5CqxZKPiISXm48RDDe8HAUrnkEMQAnHPdQymMv+d9kDv2y
|
||||
6sp1ov3BQCfZm0mHkSW+wdnzwjNnPHZZ2FdvRz7V3GECgYEA5eFYdA9vuTbFhtod
|
||||
foxHzmqZXBQM6ToXYEKFgdNYvHDISXNdsB4DyUT4V46bxpTMLynMqvM59/u16jaP
|
||||
lo4DkkRLG/GxvGeFM/0odPMnoGTL0HBMJiYr7U3tgtEu2t2RqmVc2tpDPzQ0Mwaj
|
||||
dqHPFId1p1AHeeX1MxeuTLPkA5UCgYEAyuxvfgoBfsDad5E3EbFrilrRJRbb3yxQ
|
||||
hgilaISaSDn0MWZ3zE+pTCwuA9HYmjwr4GCeO8kSCpnhI4BKASMa4p0SLsTr0i/9
|
||||
OUulLi3ZieWA2mqekqUo/CaccMhMfGr4AVQ3WeK3cjKXj/j/WnKTfHSB5uL2bvFg
|
||||
XoqfXcOUZmkCgYBjkkdBBkqrXBkU/zcVUGft9eh1pM2u3BWyAT5Y7JWcEfH/NrRX
|
||||
C7kyHei/7Cp3So5iw2U+itoKGwJB794kJWFQorox4W/OHrzotvgmKAh7Bg3uPCYP
|
||||
xCr0v/Nn3XnBHYXx27Prq+zC3Lbbfz2grhfHWaFRlm2WlE+wEMrTuHvEPQKBgEM9
|
||||
XSRShHRPyxRblffS5mON/Edh77Ffqb8AFm8voT/VlEjaP1AABYUsDoNNgYx568AJ
|
||||
w+Tjl4rTunpdBCikTUBR87hzoAChzjKyEiXfI3pCBhRZx/mnqJEE6kmk1VNUzqEC
|
||||
GuU57rd0dCxMwbBizuQqZvDuu+G/McOiA3S6Xe4hAoGAYs01BdHeksEyZK+q0TIR
|
||||
cHJOyX0ae4ClfXyJ6moQbPr9uoDs0g+3p8IZtiEwVatpmQB2DIoE6jF81rsKBK68
|
||||
tHQtn8ywdYDgJbqhx2Y4XP+9CeNhsRAya8SxFmQMirdtWNltMNvTXHFEoVWbf9Yz
|
||||
Sb2NcH2bS0mjAlLmBCPYqsA=
|
||||
-----END PRIVATE KEY-----
|
||||
|
||||
jenkins::master::jenkins_ssh_private_key_contents: |
|
||||
-----BEGIN RSA PRIVATE KEY-----
|
||||
MIIEogIBAAKCAQEAzVoDH+iIrEmNBytJqR5IFYUcR7A6JvNTyelt4wIHEgVmNSs/
|
||||
9ry/fEivdaaYGJpw2tri23IWNl5PXInnzKZu0KuRDuqEjyiSYQA8gmAF/+2KJmSM
|
||||
uF7ux8RIdGelQi7dA8psDeLQUvSbkyErxJtewS4LYrPwHpgNnkSJdCQHT3/a5kKb
|
||||
OCj+QIRutLnHbUyg9MvExSveWrXqZYHKvSS0SJ4a3YP75yS2yp1e5T9YOXX2Na5u
|
||||
m7puz/icH9rXUzmtG+bnCv/ollW8jzuGdmnz6W5YeFcp33tsDM86RgKd5+TMLR9h
|
||||
GZkl5vwevwpIDhUTrJlh/DM0BLzQRh4cPjjUfQIDAQABAoIBAGQO0OjyR+4S5Imy
|
||||
uPCTlbIOqunvX1ZtR81hVS7AZSuNv/B2Q3N5IqBvVjcwVnneftDUyKb+nv4c0/SW
|
||||
KYEZM3OvtT2cXbzXmwNytwkburCqUJ9GbR7E+voRlPBLNEXcScq4DhByDOnu0ANP
|
||||
rWDeB7x/MAMHBCAUHMaaRJN3nqxIEvvzKK0B3GpRsVgGLDTQ4wX9ojmPQ7H8QQVV
|
||||
ZnfiJxhXoXbcQUudwn2etMOQpnOzq+fUSj2U6U+pxnkQBcdb2TUqLVOdKqzV4Xwc
|
||||
u/mqmtMRb6cjRpH+J1ajZqgbn6yw756TmP/LT5Jb0l/tI4b/HrPlXuXSJHtLFvQE
|
||||
D00tK+ECgYEA+Gk447CteVDmkKU/kvDh9PVbZRsuF24w+LK6VLLxSp94gGIlHyNN
|
||||
WdamBZviBIOnyz8x3WPd8u2LnkBla7L4iJgh/v5XgAK4I5ES94VGiEnEWJDXVKOY
|
||||
JW9mRH7CElmhRbhVuMQoEDonhiLNLnRwwwjF79dSlANpJxioMCVOMkUCgYEA06AH
|
||||
sx5gzdCt1OAgR2XPANMLdOgufjWsQiZtCIlLQTxzEjmF3uwsWy+ugVQIFZnwIUxw
|
||||
5O41uDji1lwE/ond15oBMFB97unzsgFW3uHSV7yWJv1SVP7LSXZnBIRhwqsozYNL
|
||||
3py9k/EvuZ4P+EoR8F3COC5gg62qxO5L2P3O2NkCgYAJ+e/W9RmCbcVUuc470IDC
|
||||
nbf174mCV2KQGl1xWV5naNAmF8r13S0WFpDEWOZS2Ba9CuStx3z6bJ/W0y8/jAh/
|
||||
M9zpqL1K3tEWXJUua6PRhWTlSavcMlXB6x9oUM7qfb8EVcrbiMUzIaLEuFEVNIfy
|
||||
zT9lynf+icSHVW4rwNPLIQKBgCJ0VYyWD5Cyvvp/mwHE05UAx0a7XoZx2p/SfcH8
|
||||
CGKQovN+pgsLTJV0B+dKdR5/N5dUSLUdC2X47QWVacK/U3z8t+DT2g0BzglXKnuT
|
||||
LJnYPGIQsEziRtqpClCz9O6qyzPagom13y+s/uYrk9IKzSzjNvHKqzAFIF57paGo
|
||||
gPrRAoGAClmcMYF4m48mnMAj5htFQg1UlE8abKygoWRZO/+0uh9BrZeQ3jsWnUWW
|
||||
3TWXEjB/RazdPB0PWfc3kjruz8IhDsLKQYPX+h8JuLO8ZL20Mxo7o3bs/GQnDrw1
|
||||
g/PCKBJscu0RQxsa16tt5aX/IM82cJR6At3tTUyUpiwqNsVClJs=
|
||||
-----END RSA PRIVATE KEY-----
|
||||
|
||||
|
||||
jenkins::master::jenkins_ssh_public_key_contents: 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDNWgMf6IisSY0HK0mpHkgVhRxHsDom81PJ6W3jAgcSBWY1Kz/2vL98SK91ppgYmnDa2uLbchY2Xk9ciefMpm7Qq5EO6oSPKJJhADyCYAX/7YomZIy4Xu7HxEh0Z6VCLt0DymwN4tBS9JuTISvEm17BLgtis/AemA2eRIl0JAdPf9rmQps4KP5AhG60ucdtTKD0y8TFK95ateplgcq9JLRInhrdg/vnJLbKnV7lP1g5dfY1rm6bum7P+Jwf2tdTOa0b5ucK/+iWVbyPO4Z2afPpblh4Vynfe2wMzzpGAp3n5MwtH2EZmSXm/B6/CkgOFROsmWH8MzQEvNBGHhw+ONR9'
|
||||
jenkins::master::jenkins_address: '127.0.0.1'
|
||||
jenkins::master::jenkins_proto: 'http'
|
||||
jenkins::master::jenkins_port: '8080'
|
||||
jenkins::master::jenkins_java_args: '-Xmx1500m -Xms1024m -Dorg.apache.commons.jelly.tags.fmt.timeZone=Europe/Moscow'
|
||||
jenkins::master::jjb_username: 'jjb_user'
|
||||
jenkins::master::jjb_password: 'jjb_pass'
|
||||
jenkins::master::firewall_allow_sources:
|
||||
'1000 - jenkins connections from 0.0.0.0/0':
|
||||
source: '0.0.0.0/0'
|
||||
#jenkins::master::nginx_log_format: 'proxy'
|
|
@ -0,0 +1,13 @@
|
|||
---
|
||||
classes:
|
||||
- '::fuel_project::jenkins::slave'
|
||||
- '::sudo'
|
||||
|
||||
# keep current sudo configuration
|
||||
sudo::purge: false
|
||||
sudo::config_file_replace: false
|
||||
|
||||
# https://bugs.launchpad.net/fuel/+bug/1458842
|
||||
sudo::configs:
|
||||
'tcpdump':
|
||||
'content': '%sudo ALL=(ALL) NOPASSWD: /usr/sbin/tcpdump'
|
|
@ -0,0 +1,3 @@
|
|||
---
|
||||
classes:
|
||||
- '::fuel_project::puppet::master'
|
|
@ -0,0 +1,62 @@
|
|||
# Defaults
|
||||
|
||||
Exec {
|
||||
path => '/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin',
|
||||
provider => 'shell',
|
||||
}
|
||||
|
||||
File {
|
||||
replace => true,
|
||||
}
|
||||
|
||||
if($::osfamily == 'Debian') {
|
||||
Exec['apt_update'] -> Package <| |>
|
||||
}
|
||||
|
||||
stage { 'pre' :
|
||||
before => Stage['main'],
|
||||
}
|
||||
|
||||
$gitrevision = '$Id$'
|
||||
|
||||
notify { "Revision : ${gitrevision}" :}
|
||||
|
||||
file { '/var/lib/puppet' :
|
||||
ensure => 'directory',
|
||||
owner => 'puppet',
|
||||
group => 'puppet',
|
||||
mode => '0755',
|
||||
}
|
||||
|
||||
file { '/var/lib/puppet/gitrevision.txt' :
|
||||
ensure => 'present',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0444',
|
||||
content => $gitrevision,
|
||||
require => File['/var/lib/puppet'],
|
||||
}
|
||||
|
||||
|
||||
# Nodes definitions
|
||||
|
||||
node /jenkins-slave\.test-company\.org/ {
|
||||
class { '::fuel_project::jenkins::slave' :
|
||||
external_host => true,
|
||||
}
|
||||
}
|
||||
|
||||
node /jenkins\.test-company\.org/ {
|
||||
class { '::fuel_project::jenkins::master' :}
|
||||
}
|
||||
|
||||
# Default
|
||||
node default {
|
||||
$classes = hiera('classes', '')
|
||||
if ($classes) {
|
||||
validate_array($classes)
|
||||
hiera_include('classes')
|
||||
} else {
|
||||
notify { 'Default node invocation' :}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,9 @@
|
|||
# Class: firewall_defaults::post
|
||||
#
|
||||
class firewall_defaults::post {
|
||||
firewall { '9999 drop all':
|
||||
proto => 'all',
|
||||
action => 'drop',
|
||||
before => undef,
|
||||
}
|
||||
}
|
|
@ -0,0 +1,39 @@
|
|||
# Class: firewall_defaults::pre
|
||||
#
|
||||
class firewall_defaults::pre {
|
||||
include firewall_defaults::post
|
||||
|
||||
case $::osfamily {
|
||||
'Debian': {
|
||||
package { 'iptables-persistent' :
|
||||
ensure => 'present',
|
||||
before => Resources['firewall']
|
||||
}
|
||||
}
|
||||
default: { }
|
||||
}
|
||||
|
||||
resources { 'firewall' :
|
||||
purge => true,
|
||||
}
|
||||
|
||||
Firewall {
|
||||
before => Class['firewall_defaults::post'],
|
||||
}
|
||||
|
||||
firewall { '000 accept all icmp':
|
||||
proto => 'icmp',
|
||||
action => 'accept',
|
||||
require => undef,
|
||||
}->
|
||||
firewall { '001 accept all to lo interface':
|
||||
proto => 'all',
|
||||
iniface => 'lo',
|
||||
action => 'accept',
|
||||
}->
|
||||
firewall { '002 accept related established rules':
|
||||
proto => 'all',
|
||||
ctstate => ['RELATED', 'ESTABLISHED'],
|
||||
action => 'accept',
|
||||
}
|
||||
}
|
|
@ -0,0 +1,17 @@
|
|||
# For all the following commands HOME should be set to the directory that has .my.cnf file with password information.
|
||||
# Get information about Slave replica
|
||||
UserParameter=mysql.replication_status[*],HOME=/var/lib/zabbix mysql -e 'SHOW SLAVE STATUS \G' | grep $1 | cut -d ':' -f2-
|
||||
# Flexible parameter to grab global variables. On the frontend side, use keys like mysql.status[Com_insert].
|
||||
# Key syntax is mysql.status[variable].
|
||||
UserParameter=mysql.status[*],echo "show global status where Variable_name='$1';" | HOME=/var/lib/zabbix mysql -N | awk '{print $$2}'
|
||||
# Flexible parameter to determine database or table size. On the frontend side, use keys like mysql.size[zabbix,history,data].
|
||||
# Key syntax is mysql.size[<database>,<table>,<type>].
|
||||
# Database may be a database name or "all". Default is "all".
|
||||
# Table may be a table name or "all". Default is "all".
|
||||
# Type may be "data", "index", "free" or "both". Both is a sum of data and index. Default is "both".
|
||||
# Database is mandatory if a table is specified. Type may be specified always.
|
||||
# Returns value in bytes.
|
||||
# 'sum' on data_length or index_length alone needed when we are getting this information for whole database instead of a single table
|
||||
UserParameter=mysql.size[*],echo "select sum($(case "$3" in both|"") echo "data_length+index_length";; data|index) echo "$3_length";; free) echo "data_free";; esac)) from information_schema.tables$([[ "$1" = "all" || ! "$1" ]] || echo " where table_schema='$1'")$([[ "$2" = "all" || ! "$2" ]] || echo "and table_name='$2'");" | HOME=/var/lib/zabbix mysql -N
|
||||
UserParameter=mysql.ping,HOME=/var/lib/zabbix mysqladmin ping | grep -c alive
|
||||
UserParameter=mysql.version,mysql -V
|
|
@ -0,0 +1,4 @@
|
|||
UserParameter=hardware,echo CPU: `cat /proc/cpuinfo | awk -F':' '/^model name/ { MODEL=$2; COUNT+=1 } END { print COUNT"x"MODEL }'`, MEM: `cat /proc/meminfo | awk '/^MemTotal:/ { printf "%dG", ($2/1024000) }'`, HDD: `cat /sys/block/sd*/size /sys/block/vd*/size 2>/dev/null | sort | uniq -c | sort -n | awk '{ printf "%dx%dG, ", $1, ($2/2/1000/976.5) }' | sed 's/, $//'`
|
||||
UserParameter=vfs.dev.discovery,for partition in $(awk '{print $NF}' /proc/partitions | egrep '^[sv]d([a-z]+)$') ; do partitionlist="$partitionlist,"'{"{#DISKDEV}":"'$partition'"}'; done; echo '{"data":['${partitionlist#,}']}'
|
||||
UserParameter=smartd.value[*],sudo /usr/sbin/smartctl -A "/dev/$1" | /bin/grep "$2" | /usr/bin/awk '{print $$10}'
|
||||
UserParameter=smartd.hdd.health[*],sudo /usr/sbin/smartctl -H "/dev/$1" | /bin/grep -c '^SMART overall-health self-assessment test result: PASSED$'
|
|
@ -0,0 +1 @@
|
|||
UserParameter=ssl.certificate.check[*],/usr/local/bin/zabbix_check_certificate.sh -H "$1" -p "$2"
|
|
@ -0,0 +1,2 @@
|
|||
UserParameter=lpupdatebug.lp_logcheck[*],/usr/local/bin/tailnew /var/log/lpupdatebug.log lpupdatebug.lp_logcheck.$(echo -n /var/log/lpupdatebug.log | sed 's~/~_~g') | grep -oh '0x00000lp' | tail -1
|
||||
UserParameter=lpupdatebug.ssh_logcheck[*],/usr/local/bin/tailnew /var/log/lpupdatebug.log lpupdatebug.ssh_logcheck.$(echo -n /var/log/lpupdatebug.log | sed 's~/~_~g') | grep -oh '0x00000ssh' | tail -1
|
Binary file not shown.
|
@ -0,0 +1,59 @@
|
|||
#!/usr/bin/perl
|
||||
# Ex.
|
||||
# ./glubix_checkvolstatus.pl --volume_name vol0
|
||||
# ./glubix_checkvolstatus.pl --volume_name vol1 --volume_numbricks 4
|
||||
#
|
||||
# options:
|
||||
# --volume_name vol1
|
||||
# --volume_numbricks 4
|
||||
|
||||
use strict;
|
||||
use Getopt::Long qw(:config posix_default no_ignore_case gnu_compat);
|
||||
|
||||
my $rc = 0;
|
||||
my $gluster_volume_name;
|
||||
my $gluster_volume_numbricks;
|
||||
|
||||
my $getopt_result = GetOptions('volume_name=s' => \$gluster_volume_name,
|
||||
'volume_numbricks=i', => \$gluster_volume_numbricks);
|
||||
|
||||
if ($gluster_volume_name eq "") {
|
||||
$rc = 0;
|
||||
printf "$rc\n";
|
||||
exit;
|
||||
}
|
||||
|
||||
my $gluster_cmd = "/usr/sbin/gluster";
|
||||
my $exec_cmd = "$gluster_cmd volume info $gluster_volume_name 2> /dev/null";
|
||||
|
||||
my $result = `$exec_cmd`;
|
||||
|
||||
if ($result =~ m/Status: Started/) {
|
||||
# volume status is Started
|
||||
$rc = 1;
|
||||
|
||||
if ($gluster_volume_numbricks ne "" && $gluster_volume_numbricks > 0) {
|
||||
my $exec_cmd2 = "$gluster_cmd volume status $gluster_volume_name 2> /dev/null | grep '^Brick' | wc -l";
|
||||
my $result2 = `$exec_cmd2`;
|
||||
|
||||
my $num_active_brick = $result2;
|
||||
|
||||
# If number of active bricks were less than $gluster_volume_numbricks. return code is 0;
|
||||
if( $num_active_brick < $gluster_volume_numbricks ) {
|
||||
# missing some bricks. may be down.
|
||||
$rc = 0;
|
||||
} else {
|
||||
# brick is healthy
|
||||
$rc = 1;
|
||||
}
|
||||
}
|
||||
} elsif ($result =~ m/Status: Stopped/) {
|
||||
# volume status is Stopped
|
||||
$rc = 0;
|
||||
} else {
|
||||
# volume status is maintainance down or other
|
||||
$rc = 0;
|
||||
}
|
||||
|
||||
printf "$rc\n";
|
||||
exit
|
|
@ -0,0 +1,43 @@
|
|||
#!/usr/bin/perl
|
||||
# geo-replication via gsync for GlusterFS 3.3
|
||||
# Ex.
|
||||
# ./glubix_.pl --volume_name vol1 --slave_url remote::geo-vol1
|
||||
#
|
||||
# options:
|
||||
# --volume_name vol1
|
||||
# --slave_url remote::geo-vol1
|
||||
#
|
||||
|
||||
use strict;
|
||||
use Getopt::Long qw(:config posix_default no_ignore_case gnu_compat);
|
||||
|
||||
my $gluster_volume_name;
|
||||
my $gluster_slave_url;
|
||||
|
||||
my $getopt_result = GetOptions('volume_name=s' => \$gluster_volume_name,
|
||||
'slave_url=s', => \$gluster_slave_url);
|
||||
|
||||
if ($gluster_volume_name eq "") {
|
||||
exit 1;
|
||||
}
|
||||
|
||||
my $gluster_cmd = "/usr/sbin/gluster";
|
||||
my $exec_cmd = "$gluster_cmd volume geo-replication $gluster_volume_name $gluster_slave_url status | tail -1 2> /dev/null";
|
||||
|
||||
my $result = `$exec_cmd`;
|
||||
|
||||
if ($result =~ '^----' || $result =~ '^No active geo-replication sessions' ) {
|
||||
print "NotFound\n";
|
||||
exit 1;
|
||||
}
|
||||
|
||||
my ($master_vol,$slave_url,$status) = split(/[\s\t]+/, $result);
|
||||
|
||||
if ($master_vol eq $gluster_volume_name && $slave_url eq $gluster_slave_url) {
|
||||
print "$status\n";
|
||||
exit;
|
||||
}
|
||||
|
||||
print "ERROR\n";
|
||||
exit 1;
|
||||
|
|
@ -0,0 +1,51 @@
|
|||
#!/usr/bin/perl
|
||||
# geo-replication via gsync for GlusterFS 3.3
|
||||
# Ex.
|
||||
# ./glubix_.pl --volume_name vol1 --slave_url remote::geo-vol1
|
||||
#
|
||||
# options:
|
||||
# --volume_name vol1
|
||||
# --slave_url remote::geo-vol1
|
||||
#
|
||||
|
||||
use strict;
|
||||
use Getopt::Long qw(:config posix_default no_ignore_case gnu_compat);
|
||||
|
||||
my $gluster_volume_name;
|
||||
my $gluster_slave_url;
|
||||
|
||||
my $getopt_result = GetOptions('volume_name=s' => \$gluster_volume_name,
|
||||
'slave_url=s', => \$gluster_slave_url);
|
||||
|
||||
if ($gluster_volume_name eq "") {
|
||||
exit 1;
|
||||
}
|
||||
|
||||
my $gluster_cmd = "/usr/sbin/gluster";
|
||||
my $exec_cmd = "$gluster_cmd volume geo-replication $gluster_volume_name $gluster_slave_url status 2> /dev/null";
|
||||
|
||||
my $result = `$exec_cmd`;
|
||||
|
||||
if ($result =~ '^No active geo-replication sessions between' || $result =~ '^No active geo-replication sessions' ) {
|
||||
print "NotFound\n";
|
||||
exit 1;
|
||||
}
|
||||
|
||||
$result =~ s/^\s*\n//g;
|
||||
$result =~ s/MASTER NODE[A-Z\s]+\n//g;
|
||||
$result =~ s/-+\s*\n//g;
|
||||
|
||||
my @result_array = split(/\n/,$result);
|
||||
foreach my $line (@result_array){
|
||||
print "$line\n";
|
||||
my ($master_node,$master_vol,$master_brick,$slave_url,$status,$checkpoint_status,$crawl_status) = split(/\s+/, $line);
|
||||
|
||||
if ($master_vol eq $gluster_volume_name && $slave_url eq $gluster_slave_url) {
|
||||
print "$status\n";
|
||||
exit;
|
||||
}
|
||||
}
|
||||
|
||||
print "ERROR\n";
|
||||
exit 1;
|
||||
|
|
@ -0,0 +1,21 @@
|
|||
#!/usr/bin/perl
|
||||
# Ex.
|
||||
# ./glubix_numpeers.pl
|
||||
#
|
||||
|
||||
use strict;
|
||||
|
||||
my $gluster_cmd = "/usr/sbin/gluster";
|
||||
my $exec_cmd = "$gluster_cmd peer status 2> /dev/null";
|
||||
|
||||
my $result = `$exec_cmd`;
|
||||
|
||||
my $peers = 0;
|
||||
|
||||
if ($result =~ m/Number of Peers: ([0-9]+)/) {
|
||||
$peers = $1;
|
||||
}
|
||||
|
||||
print "$peers\n";
|
||||
|
||||
exit;
|
|
@ -0,0 +1,20 @@
|
|||
#!/usr/bin/perl
|
||||
# Ex.
|
||||
# ./glubix_uuid.pl
|
||||
|
||||
use strict;
|
||||
|
||||
my $glusterd_info = "/var/lib/glusterd/glusterd.info";
|
||||
my $exec_cmd = "grep UUID $glusterd_info 2> /dev/null";
|
||||
|
||||
my $result = `$exec_cmd`;
|
||||
|
||||
my $uuid;
|
||||
|
||||
if ($result =~ m/UUID=([0-9a-f]+-[0-9a-f]+-[0-9a-f]+-[0-9a-f]+-[0-9a-f]+)/) {
|
||||
$uuid = $1;
|
||||
}
|
||||
|
||||
print "$uuid\n";
|
||||
|
||||
exit;
|
|
@ -0,0 +1,39 @@
|
|||
#!/usr/bin/perl
|
||||
# Ex.
|
||||
# ./glubix_volstatus.pl --volume_name vol0
|
||||
#
|
||||
# options:
|
||||
# --volume_name vol1
|
||||
|
||||
use strict;
|
||||
use Getopt::Long qw(:config posix_default no_ignore_case gnu_compat);
|
||||
|
||||
my $status;
|
||||
my $gluster_volume_name;
|
||||
|
||||
my $getopt_result = GetOptions('volume_name=s' => \$gluster_volume_name);
|
||||
|
||||
if ($gluster_volume_name eq "") {
|
||||
$status = "NotFound";
|
||||
printf "$status\n";
|
||||
exit 1;
|
||||
}
|
||||
|
||||
my $gluster_cmd = "/usr/sbin/gluster";
|
||||
my $exec_cmd = "$gluster_cmd volume info $gluster_volume_name 2> /dev/null";
|
||||
|
||||
my $result = `$exec_cmd`;
|
||||
|
||||
if ($result =~ m/Status: Started/) {
|
||||
$status = 'Started';
|
||||
} elsif ($result =~ m/Status: Stopped/) {
|
||||
$status = 'Stopped';
|
||||
} elsif ($result =~ m/Status: Created/) {
|
||||
$status = 'Created';
|
||||
} else {
|
||||
# Can't parse volume status
|
||||
$status = "Unknown";
|
||||
}
|
||||
|
||||
printf "$status\n";
|
||||
exit;
|
|
@ -0,0 +1,43 @@
|
|||
#!/usr/bin/perl
|
||||
# Ex.
|
||||
# ./glubix_voltype.pl --volume_name vol0
|
||||
#
|
||||
# options:
|
||||
# --volume_name vol1
|
||||
# return value type: String
|
||||
# return value list: Unknown ... Can't parse result of gluster command
|
||||
# Blank ... --volume_name is not set
|
||||
# Distribute
|
||||
# Stripe
|
||||
# Replicate
|
||||
# Striped-Replicate
|
||||
# Distributed-Stripe
|
||||
# Distributed-Replicate
|
||||
# Distributed-Striped-Replicate
|
||||
|
||||
use strict;
|
||||
use Getopt::Long qw(:config posix_default no_ignore_case gnu_compat);
|
||||
|
||||
my $voltype;
|
||||
my $gluster_volume_name = '';
|
||||
|
||||
my $getopt_result = GetOptions('volume_name=s' => \$gluster_volume_name);
|
||||
|
||||
if ($gluster_volume_name eq '') {
|
||||
$voltype = "Blank";
|
||||
exit;
|
||||
}
|
||||
|
||||
my $gluster_cmd = "/usr/sbin/gluster";
|
||||
my $exec_cmd = "$gluster_cmd volume info $gluster_volume_name 2> /dev/null";
|
||||
|
||||
my $result = `$exec_cmd`;
|
||||
|
||||
if ($result =~ m/Type: (\S+)/) {
|
||||
$voltype = $1;
|
||||
} else {
|
||||
$voltype = "Unknown";
|
||||
}
|
||||
|
||||
printf "$voltype\n";
|
||||
exit
|
|
@ -0,0 +1,9 @@
|
|||
UserParameter=gluster.version,/usr/sbin/glusterd -V|head -1|cut -d' ' -f2
|
||||
UserParameter=gluster.uuid,sudo /usr/local/bin/glubix_uuid.pl
|
||||
UserParameter=gluster.numpeers,sudo /usr/local/bin/glubix_numpeers.pl
|
||||
UserParameter=gluster.voltype[*],sudo /usr/local/bin/glubix_voltype.pl --volume_name $1
|
||||
UserParameter=gluster.volstatus[*],sudo /usr/local/bin/glubix_volstatus.pl --volume_name $1
|
||||
UserParameter=gluster.checkvolstatus[*],sudo /usr/local/bin/glubix_checkvolstatus.pl --volume_name $1 --volume_numbricks $2
|
||||
UserParameter=gluster.numbricks[*],sudo /usr/sbin/gluster volume info $1 | grep '^Brick[0-9]*:' | wc -l
|
||||
UserParameter=gluster.numactivebricks[*],sudo /usr/sbin/gluster volume status $1 | grep '^Brick' | wc -l
|
||||
UserParameter=gluster.georepstatus[*],sudo /usr/local/bin/glubix_georepstatus34.pl --volume_name $1 --slave_url $2
|
|
@ -0,0 +1,8 @@
|
|||
TIMEOUT 50
|
||||
TOTALTIMEOUT 9000
|
||||
ONTIMEOUT local
|
||||
default local
|
||||
|
||||
LABEL local
|
||||
MENU LABEL Boot local hard drive
|
||||
LOCALBOOT 0
|
|
@ -0,0 +1,311 @@
|
|||
#!/usr/bin/python
|
||||
|
||||
import sys, getopt
|
||||
import os.path
|
||||
import netaddr
|
||||
import re
|
||||
import paramiko
|
||||
import time
|
||||
from hashlib import sha512
|
||||
from xmlbuilder import XMLBuilder
|
||||
|
||||
IPMI_USERNAME = os.getenv('IPMI_USERNAME', '')
|
||||
IPMI_PASSWORD = os.getenv('IPMI_PASSWORD', '')
|
||||
|
||||
CISCO_USERNAME = os.getenv('CISCO_USERNAME', '')
|
||||
CISCO_PASSWORD = os.getenv('CISCO_PASSWORD', '')
|
||||
|
||||
servers = {
|
||||
'cz5547' : { 'hostname' : 'cz5547-kvm.host-telecom.com', 'mac' : 'a0-d3-c1-ef-2c-d8', 'int1' : 'gi 0/1', 'int2' : 'gi 0/2' },
|
||||
'cz5548' : { 'hostname' : 'cz5548-kvm.host-telecom.com', 'mac' : 'a0-d3-c1-ef-16-ec', 'int1' : 'gi 0/3', 'int2' : 'gi 0/4' },
|
||||
'cz5549' : { 'hostname' : 'cz5549-kvm.host-telecom.com', 'mac' : 'a0-d3-c1-ef-32-cc', 'int1' : 'gi 0/5', 'int2' : 'gi 0/6' },
|
||||
'cz5550' : { 'hostname' : 'cz5550-kvm.host-telecom.com', 'mac' : 'a0-2b-b8-1f-48-4c', 'int1' : 'gi 0/7', 'int2' : 'gi 0/8' },
|
||||
'cz5551' : { 'hostname' : 'cz5551-kvm.host-telecom.com', 'mac' : 'a0-2b-b8-1f-48-fc', 'int1' : 'gi 0/9', 'int2' : 'gi 0/10' },
|
||||
'cz5552' : { 'hostname' : 'cz5552-kvm.host-telecom.com', 'mac' : 'a0-2b-b8-1f-4a-88', 'int1' : 'gi 0/11', 'int2' : 'gi 0/12' },
|
||||
'cz5553' : { 'hostname' : 'cz5553-kvm.host-telecom.com', 'mac' : 'a0-2b-b8-1f-4a-ac', 'int1' : 'gi 0/19', 'int2' : 'gi 0/20' },
|
||||
'cz5554' : { 'hostname' : 'cz5554-kvm.host-telecom.com', 'mac' : 'a0-2b-b8-1f-4a-90', 'int1' : 'gi 0/21', 'int2' : 'gi 0/22' },
|
||||
'cz5555' : { 'hostname' : 'cz5555-kvm.host-telecom.com', 'mac' : 'a0-2b-b8-1f-4c-9c', 'int1' : 'gi 0/23', 'int2' : 'gi 0/24' },
|
||||
'cz5556' : { 'hostname' : 'cz5556-kvm.host-telecom.com', 'mac' : 'a0-2b-b8-1f-4c-48', 'int1' : 'gi 0/31', 'int2' : 'gi 0/32' },
|
||||
'cz5557' : { 'hostname' : 'cz5557-kvm.host-telecom.com', 'mac' : 'a0-2b-b8-1f-4a-08', 'int1' : 'gi 0/33', 'int2' : 'gi 0/34' },
|
||||
'cz5558' : { 'hostname' : 'cz5558-kvm.host-telecom.com', 'mac' : 'a0-2b-b8-1f-4c-74', 'int1' : 'gi 0/35', 'int2' : 'gi 0/36' },
|
||||
'cz5559' : { 'hostname' : 'cz5559-kvm.host-telecom.com', 'mac' : 'a0-2b-b8-1f-4c-54', 'int1' : 'gi 0/13', 'int2' : 'gi 0/14' },
|
||||
'cz5560' : { 'hostname' : 'cz5560-kvm.host-telecom.com', 'mac' : '00-00-00-00-00-00', 'int1' : 'gi 0/15', 'int2' : 'gi 0/16' },
|
||||
'cz5561' : { 'hostname' : 'cz5561-kvm.host-telecom.com', 'mac' : 'a0-2b-b8-1f-4b-ec', 'int1' : 'gi 0/17', 'int2' : 'gi 0/18' },
|
||||
'cz5562' : { 'hostname' : 'cz5562-kvm.host-telecom.com', 'mac' : '00-00-00-00-00-00', 'int1' : 'gi 0/25', 'int2' : 'gi 0/26' },
|
||||
'cz5563' : { 'hostname' : 'cz5563-kvm.host-telecom.com', 'mac' : '00-00-00-00-00-00', 'int1' : 'gi 0/27', 'int2' : 'gi 0/28' },
|
||||
'cz5564' : { 'hostname' : 'cz5564-kvm.host-telecom.com', 'mac' : '00-00-00-00-00-00', 'int1' : 'gi 0/29', 'int2' : 'gi 0/30' },
|
||||
}
|
||||
|
||||
vlans = {
|
||||
'221' : { 'network' : netaddr.IPNetwork('172.16.39.0/26') },
|
||||
'222' : { 'network' : netaddr.IPNetwork('172.16.39.64/26') },
|
||||
'223' : { 'network' : netaddr.IPNetwork('172.16.39.128/26') },
|
||||
'224' : { 'network' : netaddr.IPNetwork('172.16.39.192/26') },
|
||||
'225' : { 'network' : netaddr.IPNetwork('172.16.37.128/26') },
|
||||
'226' : { 'network' : netaddr.IPNetwork('172.16.37.192/26') },
|
||||
}
|
||||
|
||||
switches = {
|
||||
'cz-sw' : { 'hostname' : '193.161.84.243 ' },
|
||||
}
|
||||
|
||||
class fuelLab:
|
||||
""" Lab definition """
|
||||
def __init__(self):
|
||||
self.name="Lab1"
|
||||
self.fuel = None
|
||||
self.iso = None
|
||||
self.vlan = None
|
||||
self.public_vlan = None
|
||||
self.vlan_range = None
|
||||
self.nodes = []
|
||||
self.tftp_root = "/var/lib/tftpboot"
|
||||
|
||||
def set_host(self,host):
|
||||
if host in servers.keys():
|
||||
self.name = host
|
||||
self.fuel = servers[host]
|
||||
else:
|
||||
print "Node "+node+" not defined"
|
||||
sys.exit(1)
|
||||
|
||||
def add_node(self,node):
|
||||
if re.match('^[1-9a-f]{2}:[1-9a-f]{2}$',node):
|
||||
node = re.sub(':','-',node)
|
||||
for name in servers.keys():
|
||||
if re.search(node+'$', servers[name]['mac']):
|
||||
self.add_node(name)
|
||||
return
|
||||
if node in servers.keys():
|
||||
self.nodes.append(servers[node])
|
||||
else:
|
||||
print "Node "+node+" not defined"
|
||||
sys.exit(1)
|
||||
|
||||
def set_vlan(self,vlan):
|
||||
if vlan in vlans.keys():
|
||||
self.vlan = vlan
|
||||
else:
|
||||
print "Vlan "+vlan+" not defined"
|
||||
sys.exit(1)
|
||||
|
||||
def set_public_vlan(self,vlan):
|
||||
if vlan in vlans.keys():
|
||||
self.public_vlan = vlan
|
||||
else:
|
||||
print "Vlan "+vlan+" not defined"
|
||||
sys.exit(1)
|
||||
|
||||
def set_vlan_range(self,vlan_range):
|
||||
res = re.match(r"(\d+)\-(\d+)",vlan_range)
|
||||
if res:
|
||||
min,max = int(res.group(1)),int(res.group(2))
|
||||
if(max-min > 1 and max-min < 20):
|
||||
self.vlan_range = str(min)+'-'+str(max)
|
||||
else:
|
||||
print "Range is too big"
|
||||
else:
|
||||
print "Wrong range"
|
||||
|
||||
def create_pxe(self):
|
||||
self.pxe_file = "/var/lib/tftpboot/pxelinux.cfg/01-"+self.fuel['mac']
|
||||
f = open(self.pxe_file, "w")
|
||||
ip = vlans[self.vlan]['network']
|
||||
nfs_share = "nfs:" +str(ip.ip+1) + ":" + self.tftp_root + self.fuel_path
|
||||
host_ip = ip.ip + 2
|
||||
host_gw = ip.ip + ip.size - 2
|
||||
host_netmask = ip.netmask
|
||||
f.write("DEFAULT fuel\nPROMPT 0\nTIMEOUT 0\nTOTALTIMEOUT 0\nONTIMEOUT fuel\n\n")
|
||||
f.write("LABEL fuel\nKERNEL %s/isolinux/vmlinuz\nINITRD %s/isolinux/initrd.img\n" % (self.fuel_path, self.fuel_path))
|
||||
f.write("APPEND biosdevname=0 ks=%s repo=%s ip=%s netmask=%s gw=%s hostname=fuel-lab-%s.mirantis.com showmenu=no installdrive=sda ksdevice=eth0 forceformat=yes\n" % \
|
||||
( nfs_share + "/ks.cfg", nfs_share ,host_ip, host_netmask, host_gw, self.name ) )
|
||||
f.close()
|
||||
|
||||
def mac_in_nodes(self,mac):
|
||||
for node in self.nodes:
|
||||
if node['mac'] == mac:
|
||||
return True
|
||||
return False
|
||||
|
||||
def update_dhcpd(self):
|
||||
mac = re.sub('-',':',self.fuel['mac'])
|
||||
fuel = self.fuel
|
||||
ip = vlans[self.vlan]['network']
|
||||
filename = "/tmp/deploy." + str(os.getpid())
|
||||
x = XMLBuilder('network')
|
||||
x.name("lab" + str(self.vlan))
|
||||
x.bridge(name = "br"+self.vlan, stp="off", delay="0")
|
||||
with x.forward(mode = "route", dev="eth0"):
|
||||
x.interface(dev="eth0")
|
||||
with x.ip(address = str(ip.ip+1), netmask="255.255.255.192"):
|
||||
with x.dhcp:
|
||||
x.host(mac=mac, ip=str(ip.ip+2))
|
||||
x.bootp(file="pxelinux.0")
|
||||
x.tftp(root="/var/lib/tftpboot")
|
||||
print str(x)+"\n"
|
||||
f=open(filename,"w")
|
||||
f.write(str(x)+"\n")
|
||||
f.close()
|
||||
os.system("sudo ifconfig br%s down" % self.vlan)
|
||||
os.system("virsh net-destroy lab%s" % self.vlan)
|
||||
os.system("virsh net-create %s" % filename)
|
||||
os.system("sudo brctl addif br%s eth1.%s" % (self.vlan, self.vlan))
|
||||
|
||||
def switch_write(self):
|
||||
ssh = paramiko.SSHClient()
|
||||
ssh.set_missing_host_key_policy(
|
||||
paramiko.AutoAddPolicy())
|
||||
ssh.connect('193.161.84.243', username=CISCO_USERNAME, password=CISCO_PASSWORD)
|
||||
sess = ssh.invoke_shell()
|
||||
vlans = "%s,%s" % (self.vlan, self.vlan_range)
|
||||
print sess.recv(5000)
|
||||
sess.send("conf t\n")
|
||||
time.sleep(1)
|
||||
for node in self.nodes + [self.fuel]:
|
||||
print sess.recv(5000)
|
||||
sess.send( "interface %s\nswitchport trunk native vlan %s\nswitchport trunk allowed vlan %s\n" % (node['int1'], self.vlan, vlans) )
|
||||
sess.send( "interface %s\nno switchport trunk native vlan\n" % ( node['int2'] ) )
|
||||
if self.public_vlan:
|
||||
sess.send( "switchport trunk native vlan %s\nswitchport trunk allowed vlan %s\n" % (self.public_vlan, vlans+","+self.public_vlan) )
|
||||
else:
|
||||
sess.send( "switchport trunk allowed vlan %s\n" % (vlans) )
|
||||
time.sleep(1)
|
||||
time.sleep(2)
|
||||
sess.send("end\nexit\n")
|
||||
print sess.recv(5000)
|
||||
|
||||
def reboot_master(self):
|
||||
print "Rebooting Fuel Master: %s" % self.fuel['hostname']
|
||||
os.system("ipmitool -I lanplus -L operator -H " + self.fuel['hostname'] + " -U " + IPMI_USERNAME + " -P '" + IPMI_PASSWORD + "' power cycle")
|
||||
|
||||
def reboot_nodes(self):
|
||||
for node in self.nodes:
|
||||
print "Reboot node: %s" % node['hostname']
|
||||
os.system("ipmitool -I lanplus -L operator -H " + node['hostname'] + " -U " + IPMI_USERNAME + " -P '" + IPMI_PASSWORD + "' power cycle")
|
||||
|
||||
def set_iso(self,iso):
|
||||
iso = os.path.abspath(iso)
|
||||
if os.path.isfile(iso):
|
||||
self.iso = iso
|
||||
self.fuel_path = "/" + sha512(iso).hexdigest()[:16]
|
||||
else:
|
||||
print "ISO: %s not found" % iso
|
||||
sys.exit(1)
|
||||
|
||||
def unpack_iso(self):
|
||||
mount_iso_path = self.tftp_root + self.fuel_path
|
||||
if os.path.ismount(mount_iso_path):
|
||||
return
|
||||
if not os.path.exists(mount_iso_path):
|
||||
os.system("mkdir " + mount_iso_path)
|
||||
os.system("sudo mount -o loop,ro %s %s" % ( self.iso, mount_iso_path) )
|
||||
|
||||
def check_params(self, mode):
|
||||
if not mode:
|
||||
return False
|
||||
if 'install_fuel' in mode and not ( self.fuel and self.iso and self.vlan) :
|
||||
return False
|
||||
if 'reboot' in mode and not ( self.nodes ) :
|
||||
return False
|
||||
if 'configure' in mode and not ( self.fuel and self.vlan and self.vlan_range and self.nodes ) :
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
|
||||
def usage():
|
||||
print '''
|
||||
== For existing configuration you must specify:
|
||||
\nEXAMPLE:\tdeploy.py --host=cz5551 --vlan=221 --iso=/srv/downloads/fuel.iso\n
|
||||
--host Host to use as master node
|
||||
--vlan Preconfigured lab admin vlan
|
||||
--iso ISO to install
|
||||
\n == To reboot nodes you need only: ==
|
||||
\nEXAMPLE:\tdeploy.py (--reboot-nodes|-r) --node cz5547 --node 2c:d8 ...\n
|
||||
--reboot-nodes Reboot only nodes
|
||||
--node=cz0000 Node to reboot
|
||||
\n == For NEW configuration (DevOps team only) ==
|
||||
\nEXAMPLE:\tdeploy.py --host cz5551 --vlan 221 [--public-vlan=222] --vlan-range 300-305 [--iso fuel.iso] --node cz5547 --node cz5548 --node cz5549\n
|
||||
--public-vlan 222 Set untagged eth1 vlan (if needed)
|
||||
--vlan-range 51-55 Vlans for storage/private/management/etc
|
||||
--node Node to include in lab'''
|
||||
|
||||
def main(argv):
|
||||
lab = fuelLab()
|
||||
mode = []
|
||||
nodes = []
|
||||
try:
|
||||
opts, args = getopt.getopt(argv,"hr",["host=","vlan=","public-vlan=","vlan-range=","iso=","node=","help","reboot-nodes"])
|
||||
except getopt.GetoptError:
|
||||
usage()
|
||||
sys.exit(2)
|
||||
for opt, arg in opts:
|
||||
if opt in ( "-h", "--help" ):
|
||||
usage()
|
||||
sys.exit(0)
|
||||
elif opt == "--host":
|
||||
lab.set_host(arg)
|
||||
elif opt == "--vlan":
|
||||
lab.set_vlan(arg)
|
||||
elif opt == "--public-vlan":
|
||||
mode.append('configure')
|
||||
lab.set_public_vlan(arg)
|
||||
elif opt == "--vlan-range":
|
||||
mode.append('configure')
|
||||
lab.set_vlan_range(arg)
|
||||
elif opt == "--iso":
|
||||
mode.append('install_fuel')
|
||||
lab.set_iso(arg)
|
||||
elif opt == "--node":
|
||||
lab.add_node(arg)
|
||||
nodes.append(arg)
|
||||
elif opt in ( "--reboot-nodes", "-r" ):
|
||||
mode.append('reboot')
|
||||
|
||||
if not lab.check_params(mode):
|
||||
usage()
|
||||
exit(1)
|
||||
|
||||
if 'reboot' in mode:
|
||||
lab.reboot_nodes()
|
||||
return
|
||||
if 'configure' in mode:
|
||||
lab.switch_write()
|
||||
lab.update_dhcpd()
|
||||
vlan = vlans[lab.vlan]['network']
|
||||
if lab.public_vlan:
|
||||
vlan_p = vlans[lab.public_vlan]['network']
|
||||
pub_net = vlan_p
|
||||
pub_gw = vlan_p[-2]
|
||||
else:
|
||||
pub_net = pub_gw = "Not available"
|
||||
print '''
|
||||
================================================================================
|
||||
Lab configured:
|
||||
|
||||
Fuel host ip: %s
|
||||
Admin network: ( Untagged eth0 )
|
||||
network: %s
|
||||
gateway: %s
|
||||
Public network: ( Untagged eth1 )
|
||||
network: %s
|
||||
gateway: %s
|
||||
Vlans available: %s
|
||||
|
||||
To install Fuel:
|
||||
deploy.py --host %s --vlan %s --iso /srv/downloads/fuel.iso
|
||||
|
||||
To reboot all nodes:
|
||||
deploy.py -r --node %s
|
||||
================================================================================
|
||||
''' % ( vlan[2], vlan, vlan[-2], pub_net, pub_gw, lab.vlan_range, lab.name, lab.vlan, " --node ".join(nodes) )
|
||||
|
||||
if 'install_fuel' in mode:
|
||||
lab.update_dhcpd()
|
||||
lab.create_pxe()
|
||||
lab.unpack_iso()
|
||||
lab.reboot_master()
|
||||
os.system("echo 'rm %s' | at now + 10 minutes" % lab.pxe_file)
|
||||
|
||||
if __name__ == "__main__":
|
||||
main(sys.argv[1:])
|
|
@ -0,0 +1,23 @@
|
|||
auto lo
|
||||
iface lo inet loopback
|
||||
|
||||
auto eth0
|
||||
iface eth0 inet dhcp
|
||||
|
||||
auto eth1.221
|
||||
iface eth1.221 inet manual
|
||||
|
||||
auto eth1.222
|
||||
iface eth1.222 inet manual
|
||||
|
||||
auto eth1.223
|
||||
iface eth1.223 inet manual
|
||||
|
||||
auto eth1.224
|
||||
iface eth1.224 inet manual
|
||||
|
||||
auto eth1.225
|
||||
iface eth1.225 inet manual
|
||||
|
||||
auto eth1.226
|
||||
iface eth1.226 inet manual
|
|
@ -0,0 +1 @@
|
|||
ALL ALL=NOPASSWD: /bin/mount, /bin/umount, /sbin/ifconfig, /sbin/brctl
|
|
@ -0,0 +1,75 @@
|
|||
#!/usr/bin/perl -w
|
||||
#
|
||||
# $jwk: bind96-stats-parse.pl,v 1.4 2011/08/22 16:11:13 jwk Exp $
|
||||
#
|
||||
# Parse the statistics file produced by BIND 9.6 and higher. Output
|
||||
# the statistics in format that's easily parseable by a
|
||||
# script/program/whatever.
|
||||
#
|
||||
# Joel Knight
|
||||
# knight.joel gmail.com
|
||||
# 2010.12.26
|
||||
#
|
||||
# http://www.packetmischief.ca/monitoring-bind9/
|
||||
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
|
||||
# how often are you pulling statistics?
|
||||
my $INTERVAL = 300;
|
||||
|
||||
my $prefix;
|
||||
my $view;
|
||||
my $item;
|
||||
my $cnt;
|
||||
|
||||
my $now = time;
|
||||
|
||||
my $go = 0;
|
||||
|
||||
while (<>) {
|
||||
chomp;
|
||||
# +++ Statistics Dump +++ (1293358206)
|
||||
if (m/^\+\+\+ Statistics Dump \+\+\+ \((\d+)\)/) {
|
||||
my $d = $now - $1;
|
||||
# stats that are older than $INTERVAL seconds are ones that we've
|
||||
# already processed
|
||||
if ($d >= $INTERVAL) {
|
||||
next;
|
||||
} else {
|
||||
print scalar localtime $1, "\n";
|
||||
$go++;
|
||||
}
|
||||
}
|
||||
|
||||
next unless $go;
|
||||
|
||||
# ++ Incoming Requests ++
|
||||
# ++ Socket I/O Statistics ++
|
||||
if (m/^\+\+ ([^+]+) \+\+$/) {
|
||||
($prefix = lc $1) =~ s/[\s\>\<\/\(\)]/_/g;
|
||||
$view = $item = $cnt = "";
|
||||
}
|
||||
# [View: custom_view_name]
|
||||
# we ignore the view name "default" so that the word "default" is not
|
||||
# inserted into the output.
|
||||
if (m/^\[View: (\w+)(| .*)\]/) {
|
||||
next if $1 eq "default";
|
||||
$view = $1;
|
||||
}
|
||||
|
||||
# 407104 QUERY
|
||||
# 3379 EDNS(0) query failures
|
||||
# 134 queries with RTT < 10ms
|
||||
if (m/^\s+(\d+) ([^\n]+)/) {
|
||||
($cnt = lc $1) =~ s/[\s\>\<\/\(\)]/_/g;
|
||||
($item = lc $2) =~ s/[\s\>\<\/\(\)]/_/g;
|
||||
|
||||
if ($view) {
|
||||
print "$prefix\+$view:$item=$cnt\n";
|
||||
} else {
|
||||
print "$prefix:$item=$cnt\n";
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1 @@
|
|||
UserParameter=bind.stats[*],(/usr/local/bin/bind96-stats-parse.pl < /var/lib/bind/statistics.txt | /bin/fgrep "$1=" || echo "$1=0") | /usr/bin/cut -d= -f2
|
|
@ -0,0 +1,14 @@
|
|||
UserParameter=nginx.active[*],/usr/bin/curl -s http://127.0.0.1:61929/ | awk '/^Active/ {print $NF}'
|
||||
UserParameter=nginx.reading[*],/usr/bin/curl -s http://127.0.0.1:61929/ | awk '/Reading/ {print $$2}'
|
||||
UserParameter=nginx.writing[*],/usr/bin/curl -s http://127.0.0.1:61929/ | awk '/Writing/ {print $$4}'
|
||||
UserParameter=nginx.waiting[*],/usr/bin/curl -s http://127.0.0.1:61929/ | awk '/Waiting/ {print $$6}'
|
||||
UserParameter=nginx.accepted[*],/usr/bin/curl -s http://127.0.0.1:61929/ | awk '/^[ \t]+[0-9]+[ \t]+[0-9]+[ \t]+[0-9]+/ {print $$1}'
|
||||
UserParameter=nginx.handled[*],/usr/bin/curl -s http://127.0.0.1:61929/ | awk '/^[ \t]+[0-9]+[ \t]+[0-9]+[ \t]+[0-9]+/ {print $$2}'
|
||||
UserParameter=nginx.requests[*],/usr/bin/curl -s http://127.0.0.1:61929/ | awk '/^[ \t]+[0-9]+[ \t]+[0-9]+[ \t]+[0-9]+/ {print $$3}'
|
||||
UserParameter=nginx.logs.requests[*],sudo /usr/bin/tailnew "$1" nginx.logs.requests.$(echo -n $1 | sed 's~/~_~g') | wc -l
|
||||
UserParameter=nginx.logs.requests.2xx[*],sudo /usr/bin/tailnew "$1" nginx.logs.requests.$(echo -n $1 | sed 's~/~_~g').2xx | awk 'BEGIN { i = 0 } {if($$8 >= 200 && $$8 < 300) { i++ }} END {print i}'
|
||||
UserParameter=nginx.logs.requests.3xx[*],sudo /usr/bin/tailnew "$1" nginx.logs.requests.$(echo -n $1 | sed 's~/~_~g').3xx | awk 'BEGIN { i = 0 } {if($$8 >= 300 && $$8 < 400) { i++ }} END {print i}'
|
||||
UserParameter=nginx.logs.requests.4xx[*],sudo /usr/bin/tailnew "$1" nginx.logs.requests.$(echo -n $1 | sed 's~/~_~g').4xx | awk 'BEGIN { i = 0 } {if($$8 >= 400 && $$8 < 500) { i++ }} END {print i}'
|
||||
UserParameter=nginx.logs.requests.5xx[*],sudo /usr/bin/tailnew "$1" nginx.logs.requests.$(echo -n $1 | sed 's~/~_~g').5xx | awk 'BEGIN { i = 0 } {if($$8 >= 500 && $$8 < 600) { i++ }} END {print i}'
|
||||
UserParameter=nginx.timings.frontend[*],sudo /usr/bin/tailnew "$1" nginx.timings_$(echo -n "$1" | sed 's~/~_~g')_frontend | awk '{sum += $$NF} END {print sum / NR}'
|
||||
UserParameter=nginx.timings.backend[*],sudo /usr/bin/tailnew "$1" nginx.timings_$(echo -n "$1" | sed 's~/~_~g')_backend | awk '{sum += $$(NF-5)} END {print sum / NR}'
|
|
@ -0,0 +1,121 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# Source: https://sys4.de/en/blog/2013/08/06/monitoring-certificates-zabbix/
|
||||
#
|
||||
# Authors:
|
||||
# Michael Schwartzkopff <ms@sys4.de>
|
||||
# Marc Schiffbauer <m@sys4.de>
|
||||
#
|
||||
|
||||
trap clean_exit EXIT
|
||||
|
||||
clean_exit() {
|
||||
[[ $TMP && -f $TMP ]] && rm -f "$TMP"
|
||||
}
|
||||
|
||||
debug() {
|
||||
[[ $DEBUG -gt 0 ]] && echo "$*"
|
||||
}
|
||||
|
||||
debugexec() {
|
||||
[[ $DEBUG -gt 0 ]] && "$*"
|
||||
}
|
||||
|
||||
error() {
|
||||
echo "ERROR: $*"
|
||||
}
|
||||
|
||||
die() {
|
||||
error "$*"
|
||||
exit 1
|
||||
}
|
||||
|
||||
usage() {
|
||||
echo "
|
||||
Usage:
|
||||
$(basename $0) [options]
|
||||
|
||||
-H <hostname> Hostname to connect to. Default: localhost
|
||||
-P <protocol> Protocol to use (SSL, SMTP, IMAP, POP3, FTP, XMPP). Default: SSL
|
||||
-d Turn on debug mode
|
||||
-i Get certificate issuer instead of days left until certificate will expire
|
||||
-p <port> Port to connect to. Defaults: 443 (SSL), 25 (SMTP), 143 (IMAP),
|
||||
110 (POP3), 21 (FTP), 5269 (XMPP)
|
||||
|
||||
"
|
||||
exit 0
|
||||
}
|
||||
|
||||
while getopts "idhH:p:P:" opt; do
|
||||
case "$opt" in
|
||||
H) HOST="$OPTARG";;
|
||||
P) PROTO="$OPTARG";;
|
||||
d) DEBUG=1; set -x;;
|
||||
i) WHAT="ISSUER";;
|
||||
p) PORT="$OPTARG";;
|
||||
*) usage;;
|
||||
esac
|
||||
done
|
||||
|
||||
# set default values
|
||||
HOST=${HOST:-localhost}
|
||||
PROTO=${PROTO:-SSL}
|
||||
WHAT=${WHAT:-TIME}
|
||||
|
||||
debug "Checking protocol $PROTO on ${HOST}:${PORT}"
|
||||
|
||||
case $PROTO in
|
||||
SSL)
|
||||
PORT=${PORT:-443}
|
||||
S_CLIENT_OPTS=" -host $HOST -port $PORT -showcerts"
|
||||
;;
|
||||
SMTP)
|
||||
PORT=${PORT:-25}
|
||||
S_CLIENT_OPTS="-connect $HOST:$PORT -starttls smtp"
|
||||
;;
|
||||
IMAP)
|
||||
PORT=${PORT:-143}
|
||||
S_CLIENT_OPTS="-connect $HOST:$PORT -starttls imap"
|
||||
;;
|
||||
POP3)
|
||||
PORT=${PORT:-110}
|
||||
S_CLIENT_OPTS="-connect $HOST:$PORT -starttls pop3"
|
||||
;;
|
||||
FTP)
|
||||
PORT=${PORT:-21}
|
||||
S_CLIENT_OPTS="-connect $HOST:$PORT -starttls ftp"
|
||||
;;
|
||||
XMPP)
|
||||
PORT=${PORT:-5269}
|
||||
S_CLIENT_OPTS="-connect $HOST:$PORT -starttls xmpp"
|
||||
;;
|
||||
*)
|
||||
die "Unknown protocol"
|
||||
;;
|
||||
esac
|
||||
|
||||
debug "Certificate:"
|
||||
debugexec "openssl s_client $S_CLIENT_OPTS </dev/null 2>$TMP"
|
||||
|
||||
case $WHAT in
|
||||
TIME)
|
||||
TMP="$(mktemp)"
|
||||
END_DATE="$(openssl s_client $S_CLIENT_OPTS </dev/null 2>$TMP | openssl x509 -dates -noout | sed -n 's/notAfter=//p')"
|
||||
NOW="$(date '+%s')"
|
||||
if [[ $END_DATE ]]; then
|
||||
SEC_LEFT="$(date '+%s' --date "${END_DATE}")"
|
||||
echo $((($SEC_LEFT-$NOW)/24/3600))
|
||||
else
|
||||
die "openssl error: $(cat $TMP)"
|
||||
fi
|
||||
;;
|
||||
ISSUER)
|
||||
TMP="$(mktemp)"
|
||||
openssl s_client $S_CLIENT_OPTS </dev/null 2>$TMP | openssl x509 -issuer -noout | sed -n 's/.*CN=//p'
|
||||
;;
|
||||
*)
|
||||
die "BUG: unknown WHAT value: $WHAT"
|
||||
;;
|
||||
esac
|
||||
|
||||
exit 0
|
|
@ -0,0 +1,52 @@
|
|||
# Class: fuel_project::apps::firewall
|
||||
#
|
||||
class fuel_project::apps::firewall {
|
||||
$rules = hiera_hash('fuel_project::apps::firewall::rules', undef)
|
||||
|
||||
if ($rules) {
|
||||
case $::osfamily {
|
||||
'Debian': {
|
||||
package { 'iptables-persistent' :
|
||||
ensure => 'present',
|
||||
before => Resources['firewall']
|
||||
}
|
||||
}
|
||||
default: { }
|
||||
}
|
||||
|
||||
resources { 'firewall' :
|
||||
purge => true,
|
||||
}
|
||||
|
||||
firewall { '0000 - accept all icmp' :
|
||||
proto => 'icmp',
|
||||
action => 'accept',
|
||||
require => undef,
|
||||
}->
|
||||
firewall { '0001 - accept all to lo interface' :
|
||||
proto => 'all',
|
||||
iniface => 'lo',
|
||||
action => 'accept',
|
||||
}->
|
||||
firewall { '0002 - accept related established rules' :
|
||||
proto => 'all',
|
||||
ctstate => ['RELATED', 'ESTABLISHED'],
|
||||
action => 'accept',
|
||||
}
|
||||
|
||||
create_resources(firewall, $rules, {
|
||||
before => Firewall['9999 - drop all'],
|
||||
require => [
|
||||
Firewall['0000 - accept all icmp'],
|
||||
Firewall['0001 - accept all to lo interface'],
|
||||
Firewall['0002 - accept related established rules'],
|
||||
]
|
||||
})
|
||||
|
||||
firewall { '9999 - drop all' :
|
||||
proto => 'all',
|
||||
action => 'drop',
|
||||
before => undef,
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,105 @@
|
|||
# Class: fuel_project::apps::lodgeit
|
||||
#
|
||||
class fuel_project::apps::lodgeit (
|
||||
$ssl_certificate_contents,
|
||||
$ssl_key_contents,
|
||||
$ssl_certificate_file = '/etc/ssl/certs/paste.crt',
|
||||
$ssl_key_file = '/etc/ssl/private/paste.key',
|
||||
$service_fqdn = [$::fqdn],
|
||||
$nginx_access_log = '/var/log/nginx/access.log',
|
||||
$nginx_error_log = '/var/log/nginx/error.log',
|
||||
$nginx_log_format = 'proxy',
|
||||
$paste_header_contents = '<h1>Lodge It</h1>',
|
||||
) {
|
||||
if (! defined(Class['::nginx'])) {
|
||||
class { '::fuel_project::nginx' :}
|
||||
}
|
||||
class { '::lodgeit::web' :}
|
||||
|
||||
file { $ssl_certificate_file :
|
||||
ensure => 'present',
|
||||
mode => '0700',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
content => $ssl_certificate_contents,
|
||||
}
|
||||
|
||||
file { $ssl_key_file :
|
||||
ensure => 'present',
|
||||
mode => '0700',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
content => $ssl_key_contents,
|
||||
}
|
||||
|
||||
file { '/usr/share/lodgeit/lodgeit/views/header.html' :
|
||||
ensure => 'present',
|
||||
content => $paste_header_contents,
|
||||
require => Class['::lodgeit::web'],
|
||||
}
|
||||
|
||||
::nginx::resource::vhost { 'paste' :
|
||||
ensure => 'present',
|
||||
server_name => $service_fqdn,
|
||||
listen_port => 80,
|
||||
www_root => '/var/www',
|
||||
access_log => $nginx_access_log,
|
||||
error_log => $nginx_error_log,
|
||||
format_log => $nginx_log_format,
|
||||
location_cfg_append => {
|
||||
return => "301 https://${service_fqdn}\$request_uri",
|
||||
},
|
||||
}
|
||||
|
||||
::nginx::resource::vhost { 'paste-ssl' :
|
||||
ensure => 'present',
|
||||
listen_port => 443,
|
||||
ssl_port => 443,
|
||||
server_name => $service_fqdn,
|
||||
ssl => true,
|
||||
ssl_cert => $ssl_certificate_file,
|
||||
ssl_key => $ssl_key_file,
|
||||
ssl_cache => 'shared:SSL:10m',
|
||||
ssl_session_timeout => '10m',
|
||||
ssl_stapling => true,
|
||||
ssl_stapling_verify => true,
|
||||
access_log => $nginx_access_log,
|
||||
error_log => $nginx_error_log,
|
||||
format_log => $nginx_log_format,
|
||||
uwsgi => '127.0.0.1:4634',
|
||||
location_cfg_append => {
|
||||
uwsgi_intercept_errors => 'on',
|
||||
'error_page 403' => '/fuel-infra/403.html',
|
||||
'error_page 404' => '/fuel-infra/404.html',
|
||||
'error_page 500 502 504' => '/fuel-infra/5xx.html',
|
||||
},
|
||||
require => [
|
||||
File[$ssl_certificate_file],
|
||||
File[$ssl_key_file],
|
||||
],
|
||||
}
|
||||
|
||||
::nginx::resource::location { 'paste-ssl-static' :
|
||||
ensure => 'present',
|
||||
vhost => 'paste-ssl',
|
||||
ssl => true,
|
||||
ssl_only => true,
|
||||
location => '/static/',
|
||||
www_root => '/usr/share/lodgeit/lodgeit',
|
||||
location_cfg_append => {
|
||||
'error_page 403' => '/fuel-infra/403.html',
|
||||
'error_page 404' => '/fuel-infra/404.html',
|
||||
'error_page 500 502 504' => '/fuel-infra/5xx.html',
|
||||
},
|
||||
}
|
||||
|
||||
::nginx::resource::location { 'paste-error-pages' :
|
||||
ensure => 'present',
|
||||
vhost => 'paste-ssl',
|
||||
location => '~ ^\/(mirantis|fuel-infra)\/(403|404|5xx)\.html$',
|
||||
ssl => true,
|
||||
ssl_only => true,
|
||||
www_root => '/usr/share/error_pages',
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,111 @@
|
|||
# Class: fuel_project::apps::mirror
|
||||
#
|
||||
class fuel_project::apps::mirror (
|
||||
$autoindex = 'on',
|
||||
$dir = '/var/www/mirror',
|
||||
$dir_group = 'www-data',
|
||||
$dir_owner = 'www-data',
|
||||
$firewall_allow_sources = {},
|
||||
$nginx_access_log = '/var/log/nginx/access.log',
|
||||
$nginx_error_log = '/var/log/nginx/error.log',
|
||||
$nginx_log_format = 'proxy',
|
||||
$port = 80,
|
||||
$rsync_mirror_lockfile = '/var/run/rsync_mirror.lock',
|
||||
$rsync_mirror_lockfile_rw = '/var/run/rsync_mirror_sync.lock',
|
||||
$rsync_rw_share_comment = 'Fuel mirror sync',
|
||||
$rsync_share_comment = 'Fuel mirror rsync share',
|
||||
$rsync_writable_share = true,
|
||||
$service_aliases = [],
|
||||
$service_fqdn = "mirror.${::fqdn}",
|
||||
$sync_hosts_allow = [],
|
||||
) {
|
||||
if(!defined(Class['rsync'])) {
|
||||
class { 'rsync' :
|
||||
package_ensure => 'present',
|
||||
}
|
||||
}
|
||||
|
||||
ensure_resource('user', $dir_owner, {
|
||||
ensure => 'present',
|
||||
})
|
||||
|
||||
ensure_resource('group', $dir_group, {
|
||||
ensure => 'present',
|
||||
})
|
||||
|
||||
file { $dir :
|
||||
ensure => 'directory',
|
||||
owner => $dir_owner,
|
||||
group => $dir_group,
|
||||
mode => '0755',
|
||||
require => [
|
||||
Class['nginx'],
|
||||
User[$dir_owner],
|
||||
Group[$dir_group],
|
||||
],
|
||||
}
|
||||
|
||||
if (!defined(Class['::rsync::server'])) {
|
||||
class { '::rsync::server' :
|
||||
gid => 'root',
|
||||
uid => 'root',
|
||||
use_chroot => 'yes',
|
||||
use_xinetd => false,
|
||||
}
|
||||
}
|
||||
|
||||
::rsync::server::module{ 'mirror':
|
||||
comment => $rsync_share_comment,
|
||||
uid => 'nobody',
|
||||
gid => 'nogroup',
|
||||
list => 'yes',
|
||||
lock_file => $rsync_mirror_lockfile,
|
||||
max_connections => 100,
|
||||
path => $dir,
|
||||
read_only => 'yes',
|
||||
write_only => 'no',
|
||||
require => File[$dir],
|
||||
}
|
||||
|
||||
if ($rsync_writable_share) {
|
||||
::rsync::server::module{ 'mirror-sync':
|
||||
comment => $rsync_rw_share_comment,
|
||||
uid => $dir_owner,
|
||||
gid => $dir_group,
|
||||
hosts_allow => $sync_hosts_allow,
|
||||
hosts_deny => ['*'],
|
||||
incoming_chmod => '0755',
|
||||
outgoing_chmod => '0644',
|
||||
list => 'yes',
|
||||
lock_file => $rsync_mirror_lockfile_rw,
|
||||
max_connections => 100,
|
||||
path => $dir,
|
||||
read_only => 'no',
|
||||
write_only => 'no',
|
||||
require => [
|
||||
File[$dir],
|
||||
User[$dir_owner],
|
||||
Group[$dir_group],
|
||||
],
|
||||
}
|
||||
}
|
||||
|
||||
if (!defined(Class['::fuel_project::nginx'])) {
|
||||
class { '::fuel_project::nginx' :}
|
||||
}
|
||||
::nginx::resource::vhost { 'mirror' :
|
||||
ensure => 'present',
|
||||
www_root => $dir,
|
||||
access_log => $nginx_access_log,
|
||||
error_log => $nginx_error_log,
|
||||
format_log => $nginx_log_format,
|
||||
server_name => [
|
||||
$service_fqdn,
|
||||
"mirror.${::fqdn}",
|
||||
join($service_aliases, ' ')
|
||||
],
|
||||
location_cfg_append => {
|
||||
autoindex => $autoindex,
|
||||
},
|
||||
}
|
||||
}
|
|
@ -0,0 +1,135 @@
|
|||
# == Class: fuel_project::apps::mirror_npm
|
||||
#
|
||||
class fuel_project::apps::mirror_npm (
|
||||
$cron_frequency = '*/5',
|
||||
$nginx_access_log = '/var/log/nginx/access.log',
|
||||
$nginx_error_log = '/var/log/nginx/error.log',
|
||||
$nginx_log_format = 'proxy',
|
||||
$npm_dir = '/var/www/npm_mirror',
|
||||
$parallelism = 10,
|
||||
$recheck = false,
|
||||
$service_fqdn = $::fqdn,
|
||||
$upstream_mirror = 'http://registry.npmjs.org/',
|
||||
) {
|
||||
|
||||
validate_bool(
|
||||
$recheck,
|
||||
)
|
||||
|
||||
$packages = [
|
||||
'ruby',
|
||||
'ruby-dev',
|
||||
]
|
||||
|
||||
package { $packages :
|
||||
ensure => installed,
|
||||
}
|
||||
|
||||
package { 'npm-mirror' :
|
||||
ensure => '0.0.1',
|
||||
provider => gem,
|
||||
require => Package[$packages],
|
||||
}
|
||||
|
||||
ensure_resource('file', '/var/www', {
|
||||
ensure => 'directory',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0755',
|
||||
})
|
||||
|
||||
file { $npm_dir :
|
||||
ensure => 'directory',
|
||||
owner => 'npmuser',
|
||||
group => 'www-data',
|
||||
require => [
|
||||
User['npmuser'],
|
||||
File['/var/www'],
|
||||
]
|
||||
}
|
||||
|
||||
user { 'npmuser' :
|
||||
ensure => 'present',
|
||||
home => '/var/lib/npm',
|
||||
comment => 'Service used to run npm mirror synchronization',
|
||||
managehome => true,
|
||||
system => true,
|
||||
}
|
||||
|
||||
file { '/etc/npm_mirror/' :
|
||||
ensure => 'directory',
|
||||
owner => 'npmuser',
|
||||
group => 'npmuser',
|
||||
require => User['npmuser'],
|
||||
}
|
||||
|
||||
file { '/etc/npm_mirror/config.yml' :
|
||||
ensure => 'present',
|
||||
owner => 'npmuser',
|
||||
group => 'npmuser',
|
||||
mode => '0644',
|
||||
content => template('fuel_project/apps/npm_mirror.erb'),
|
||||
replace => true,
|
||||
require => [
|
||||
User['npmuser'],
|
||||
File['/etc/npm_mirror/'],
|
||||
],
|
||||
}
|
||||
|
||||
::nginx::resource::vhost { 'npm_mirror' :
|
||||
ensure => 'present',
|
||||
access_log => $nginx_access_log,
|
||||
error_log => $nginx_error_log,
|
||||
format_log => $nginx_log_format,
|
||||
www_root => $npm_dir,
|
||||
server_name => [$service_fqdn],
|
||||
index_files => ['index.json'],
|
||||
use_default_location => false,
|
||||
}
|
||||
|
||||
::nginx::resource::location { 'etag' :
|
||||
ensure => present,
|
||||
location => '~ \.etag$',
|
||||
vhost => 'npm_mirror',
|
||||
location_custom_cfg => {
|
||||
return => '404',
|
||||
},
|
||||
}
|
||||
|
||||
::nginx::resource::location { 'json' :
|
||||
ensure => present,
|
||||
location => '~ /index\.json$',
|
||||
vhost => 'npm_mirror',
|
||||
location_custom_cfg => {
|
||||
default_type => 'application/json',
|
||||
},
|
||||
}
|
||||
|
||||
::nginx::resource::location { 'all' :
|
||||
ensure => present,
|
||||
location => '= /-/all/since',
|
||||
vhost => 'npm_mirror',
|
||||
location_custom_cfg => {
|
||||
rewrite => '^ /-/all/',
|
||||
},
|
||||
}
|
||||
|
||||
file { '/var/run/npm' :
|
||||
ensure => 'directory',
|
||||
owner => 'npmuser',
|
||||
group => 'root',
|
||||
require => User['npmuser'],
|
||||
}
|
||||
|
||||
cron { 'npm-mirror' :
|
||||
minute => $cron_frequency,
|
||||
command => 'flock -n /var/run/npm/mirror.lock timeout -k 2m 30m npm-mirror /etc/npm_mirror/config.yml 2>&1 | logger -t npm-mirror',
|
||||
environment => 'PATH=/usr/local/bin:/usr/bin:/bin:/usr/sbin:/sbin',
|
||||
user => 'npmuser',
|
||||
require => [
|
||||
User['npmuser'],
|
||||
File['/etc/npm_mirror/config.yml'],
|
||||
],
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,113 @@
|
|||
# Class: fuel_project::apps::mirror_pypi
|
||||
#
|
||||
class fuel_project::apps::mirror_pypi (
|
||||
$cron_frequency = '*/5',
|
||||
$mirror_delete_packages = true,
|
||||
$mirror_dir = '/var/www/pypi_mirror',
|
||||
$mirror_master = 'https://pypi.python.org',
|
||||
$mirror_stop_on_error = true,
|
||||
$mirror_timeout = 10,
|
||||
$mirror_workers = 5,
|
||||
$nginx_access_log = '/var/log/nginx/access.log',
|
||||
$nginx_error_log = '/var/log/nginx/error.log',
|
||||
$nginx_log_format = 'proxy',
|
||||
$service_fqdn = $::fqdn,
|
||||
) {
|
||||
|
||||
validate_bool(
|
||||
$mirror_delete_packages,
|
||||
$mirror_stop_on_error,
|
||||
)
|
||||
|
||||
$packages = [
|
||||
'python-bandersnatch-wrapper',
|
||||
'python-pip',
|
||||
]
|
||||
|
||||
ensure_packages($packages)
|
||||
|
||||
package { 'bandersnatch' :
|
||||
ensure => '1.8',
|
||||
provider => pip,
|
||||
require => Package[$packages],
|
||||
}
|
||||
|
||||
ensure_resource('file', '/var/www', {
|
||||
ensure => 'directory',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0755',
|
||||
})
|
||||
|
||||
file { $mirror_dir :
|
||||
ensure => 'directory',
|
||||
owner => 'pypi',
|
||||
group => 'www-data',
|
||||
require => [
|
||||
User['pypi'],
|
||||
File['/var/www'],
|
||||
]
|
||||
}
|
||||
|
||||
user { 'pypi' :
|
||||
ensure => 'present',
|
||||
home => '/var/lib/pypi',
|
||||
comment => 'Service used to run pypi mirror synchronization',
|
||||
managehome => true,
|
||||
system => true,
|
||||
}
|
||||
|
||||
file { '/etc/bandersnatch.conf' :
|
||||
ensure => 'present',
|
||||
owner => 'pypi',
|
||||
group => 'pypi',
|
||||
mode => '0600',
|
||||
content => template('fuel_project/apps/bandersnatch.conf.erb'),
|
||||
require => [
|
||||
User['pypi'],
|
||||
Package[$packages],
|
||||
]
|
||||
}
|
||||
|
||||
# Configure webserver to serve the web/ sub-directory of the mirror.
|
||||
::nginx::resource::vhost { $service_fqdn :
|
||||
ensure => 'present',
|
||||
autoindex => 'on',
|
||||
access_log => $nginx_access_log,
|
||||
error_log => $nginx_error_log,
|
||||
format_log => $nginx_log_format,
|
||||
www_root => "${mirror_dir}/web",
|
||||
server_name => [$service_fqdn],
|
||||
vhost_cfg_append => {
|
||||
charset => 'utf-8',
|
||||
}
|
||||
}
|
||||
|
||||
::nginx::resource::location { 'pypi_mirror_root' :
|
||||
ensure => 'present',
|
||||
vhost => $service_fqdn,
|
||||
www_root => "${mirror_dir}/web",
|
||||
}
|
||||
|
||||
file { '/var/run/bandersnatch' :
|
||||
ensure => 'directory',
|
||||
owner => 'pypi',
|
||||
group => 'root',
|
||||
require => [
|
||||
User['pypi'],
|
||||
Package[$packages],
|
||||
]
|
||||
}
|
||||
|
||||
cron { 'pypi-mirror' :
|
||||
minute => $cron_frequency,
|
||||
command => 'flock -n /var/run/bandersnatch/mirror.lock timeout -k 2m 30m /usr/bin/run-bandersnatch 2>&1 | logger -t pypi-mirror',
|
||||
user => 'pypi',
|
||||
environment => 'PATH=/usr/local/bin:/usr/bin:/bin:/usr/sbin:/sbin',
|
||||
require => [
|
||||
User['pypi'],
|
||||
Package[$packages],
|
||||
]
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,94 @@
|
|||
# == Class: fuel_project::apps::mirror_rubygems
|
||||
#
|
||||
class fuel_project::apps::mirror_rubygems (
|
||||
$cron_frequency = '*/5',
|
||||
$nginx_access_log = '/var/log/nginx/access.log',
|
||||
$nginx_error_log = '/var/log/nginx/error.log',
|
||||
$nginx_log_format = 'proxy',
|
||||
$parallelism = '10',
|
||||
$rubygems_dir = '/var/www/rubygems_mirror',
|
||||
$service_fqdn = $::fqdn,
|
||||
$upstream_mirror = 'http://rubygems.org',
|
||||
) {
|
||||
|
||||
package { 'rubygems-mirror' :
|
||||
ensure => '1.0.1',
|
||||
provider => gem,
|
||||
}
|
||||
|
||||
ensure_resource('file', '/var/www', {
|
||||
ensure => 'directory',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0755',
|
||||
})
|
||||
|
||||
file { $rubygems_dir :
|
||||
ensure => 'directory',
|
||||
owner => 'rubygems',
|
||||
group => 'www-data',
|
||||
require => [
|
||||
User['rubygems'],
|
||||
File['/var/www'],
|
||||
]
|
||||
}
|
||||
|
||||
user { 'rubygems' :
|
||||
ensure => 'present',
|
||||
home => '/var/lib/rubygems',
|
||||
comment => 'Service used to run rubygems mirror synchronization',
|
||||
managehome => true,
|
||||
system => true,
|
||||
}
|
||||
|
||||
file { '/var/lib/rubygems/.gem' :
|
||||
ensure => 'directory',
|
||||
owner => 'rubygems',
|
||||
group => 'rubygems',
|
||||
require => User['rubygems'],
|
||||
}
|
||||
|
||||
file { '/var/lib/rubygems/.gem/.mirrorrc' :
|
||||
ensure => 'present',
|
||||
owner => 'rubygems',
|
||||
group => 'rubygems',
|
||||
mode => '0600',
|
||||
content => template('fuel_project/apps/rubygems_mirrorrc.erb'),
|
||||
replace => true,
|
||||
require => [
|
||||
User['rubygems'],
|
||||
File['/var/lib/rubygems/.gem'],
|
||||
],
|
||||
}
|
||||
|
||||
::nginx::resource::vhost { $service_fqdn :
|
||||
ensure => 'present',
|
||||
autoindex => 'on',
|
||||
access_log => $nginx_access_log,
|
||||
error_log => $nginx_error_log,
|
||||
format_log => $nginx_log_format,
|
||||
www_root => $rubygems_dir,
|
||||
server_name => [$service_fqdn]
|
||||
}
|
||||
|
||||
::nginx::resource::location { 'rubygems_mirror_root' :
|
||||
ensure => present,
|
||||
vhost => $service_fqdn,
|
||||
www_root => $rubygems_dir,
|
||||
}
|
||||
|
||||
file { '/var/run/rubygems' :
|
||||
ensure => 'directory',
|
||||
owner => 'rubygems',
|
||||
group => 'root',
|
||||
require => User['rubygems'],
|
||||
}
|
||||
|
||||
cron { 'rubygems-mirror' :
|
||||
minute => $cron_frequency,
|
||||
command => 'flock -n /var/run/rubygems/mirror.lock timeout -k 2m 30m gem mirror 2>&1 | logger -t rubygems-mirror',
|
||||
environment => 'PATH=/usr/local/bin:/usr/bin:/bin:/usr/sbin:/sbin',
|
||||
user => 'rubygems',
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,16 @@
|
|||
# Class: fuel_project::apps::monitoring::mysql::server
|
||||
#
|
||||
class fuel_project::apps::monitoring::mysql::server {
|
||||
zabbix::item { 'mysql' :
|
||||
content => 'puppet:///modules/fuel_project/apps/monitoring/mysql/mysql_items.conf',
|
||||
}
|
||||
|
||||
file { '/var/lib/zabbix/.my.cnf' :
|
||||
ensure => 'present',
|
||||
source => '/root/.my.cnf',
|
||||
require => Class['::mysql::server'],
|
||||
owner => 'zabbix',
|
||||
group => 'zabbix',
|
||||
mode => '0600',
|
||||
}
|
||||
}
|
|
@ -0,0 +1,64 @@
|
|||
# Class: fuel_project::apps::partnerappliance
|
||||
#
|
||||
class fuel_project::apps::partnerappliance (
|
||||
$authorized_keys,
|
||||
$group = 'appliance',
|
||||
$home_dir = '/var/www/appliance',
|
||||
$data_dir = "${home_dir}/data",
|
||||
$user = 'appliance',
|
||||
$vhost = 'appliance',
|
||||
$service_fqdn = "${vhost}.${::domain}",
|
||||
) {
|
||||
|
||||
# manage user $HOME manually, since we don't need .bash* stuff
|
||||
# but only ~/.ssh/
|
||||
file { $home_dir :
|
||||
ensure => 'directory',
|
||||
owner => $user,
|
||||
group => $group,
|
||||
mode => '0755',
|
||||
require => User[$user]
|
||||
}
|
||||
|
||||
file { $data_dir :
|
||||
ensure => 'directory',
|
||||
owner => $user,
|
||||
group => $group,
|
||||
mode => '0755',
|
||||
require => [
|
||||
File[$home_dir],
|
||||
]
|
||||
}
|
||||
|
||||
user { $user :
|
||||
ensure => 'present',
|
||||
system => true,
|
||||
managehome => false,
|
||||
home => $home_dir,
|
||||
shell => '/bin/sh',
|
||||
}
|
||||
|
||||
$opts = [
|
||||
"command=\"rsync --server -rlpt --delete . ${data_dir}\"",
|
||||
'no-agent-forwarding',
|
||||
'no-port-forwarding',
|
||||
'no-user-rc',
|
||||
'no-X11-forwarding',
|
||||
'no-pty',
|
||||
]
|
||||
|
||||
create_resources(ssh_authorized_key, $authorized_keys, {
|
||||
ensure => 'present',
|
||||
user => $user,
|
||||
require => [
|
||||
File[$home_dir],
|
||||
User[$user],
|
||||
],
|
||||
options => $opts,
|
||||
})
|
||||
|
||||
::nginx::resource::vhost { $vhost :
|
||||
server_name => [ $service_fqdn ],
|
||||
www_root => $data_dir,
|
||||
}
|
||||
}
|
|
@ -0,0 +1,70 @@
|
|||
# Class: fuel_project::apps::partnershare
|
||||
#
|
||||
class fuel_project::apps::partnershare (
|
||||
$authorized_key,
|
||||
$htpasswd_content = '',
|
||||
) {
|
||||
|
||||
# used to download magnet links
|
||||
ensure_packages(['python-seed-client'])
|
||||
|
||||
if (!defined(Class['::fuel_project::common'])) {
|
||||
class { '::fuel_project::common':
|
||||
external_host => $apply_firewall_rules,
|
||||
}
|
||||
}
|
||||
|
||||
if (!defined(Class['::fuel_project::nginx'])) {
|
||||
class { '::fuel_project::nginx': }
|
||||
}
|
||||
|
||||
user { 'partnershare':
|
||||
ensure => 'present',
|
||||
home => '/var/www/partnershare',
|
||||
managehome => true,
|
||||
system => true,
|
||||
require => File['/var/www'],
|
||||
}
|
||||
|
||||
ssh_authorized_key { 'partnershare':
|
||||
user => 'partnershare',
|
||||
type => 'ssh-rsa',
|
||||
key => $authorized_key,
|
||||
require => User['partnershare'],
|
||||
}
|
||||
|
||||
file { '/etc/nginx/partners.htpasswd':
|
||||
ensure => 'file',
|
||||
owner => 'root',
|
||||
group => 'www-data',
|
||||
mode => '0640',
|
||||
content => $htpasswd_content,
|
||||
}
|
||||
|
||||
cron { 'cleaner':
|
||||
command => 'find /var/www/partnershare -mtime +30 -delete > /dev/null 2>&1',
|
||||
user => 'www-data',
|
||||
hour => '*/1',
|
||||
minute => '0',
|
||||
}
|
||||
|
||||
::nginx::resource::vhost { 'partnershare' :
|
||||
server_name => ['share.fuel-infra.org'],
|
||||
www_root => '/var/www/partnershare',
|
||||
vhost_cfg_append => {
|
||||
'autoindex' => 'on',
|
||||
'auth_basic' => '"Restricted access!"',
|
||||
'auth_basic_user_file' => '/etc/nginx/partners.htpasswd',
|
||||
}
|
||||
}
|
||||
|
||||
::nginx::resource::location { 'partnershare_root':
|
||||
ensure => present,
|
||||
vhost => 'partnershare',
|
||||
www_root => '/var/www/partnershare',
|
||||
location => '~ /\.',
|
||||
location_cfg_append => {
|
||||
deny => 'all',
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,58 @@
|
|||
# Class: fuel_project::apps::plugins
|
||||
#
|
||||
class fuel_project::apps::plugins (
|
||||
$apply_firewall_rules = false,
|
||||
$firewall_allow_sources = {},
|
||||
$nginx_access_log = '/var/log/nginx/access.log',
|
||||
$nginx_error_log = '/var/log/nginx/error.log',
|
||||
$nginx_log_format = 'proxy',
|
||||
$plugins_dir = '/var/www/plugins',
|
||||
$service_fqdn = "plugins.${::fqdn}",
|
||||
$sync_hosts_allow = [],
|
||||
) {
|
||||
if (!defined(Class['::fuel_project::nginx'])) {
|
||||
class { '::fuel_project::nginx' :}
|
||||
}
|
||||
::nginx::resource::vhost { 'plugins' :
|
||||
ensure => 'present',
|
||||
autoindex => 'on',
|
||||
access_log => $nginx_access_log,
|
||||
error_log => $nginx_error_log,
|
||||
format_log => $nginx_log_format,
|
||||
www_root => $plugins_dir,
|
||||
server_name => [$service_fqdn, "plugins.${::fqdn}"]
|
||||
}
|
||||
|
||||
file { $plugins_dir :
|
||||
ensure => 'directory',
|
||||
owner => 'www-data',
|
||||
group => 'www-data',
|
||||
require => Class['::nginx'],
|
||||
}
|
||||
|
||||
if (!defined(Class['::rsync::server'])) {
|
||||
class { '::rsync::server' :
|
||||
gid => 'root',
|
||||
uid => 'root',
|
||||
use_chroot => 'yes',
|
||||
use_xinetd => false,
|
||||
}
|
||||
}
|
||||
|
||||
::rsync::server::module{ 'plugins':
|
||||
comment => 'Fuel plugins sync',
|
||||
uid => 'www-data',
|
||||
gid => 'www-data',
|
||||
hosts_allow => $sync_hosts_allow,
|
||||
hosts_deny => ['*'],
|
||||
incoming_chmod => '0755',
|
||||
outgoing_chmod => '0644',
|
||||
list => 'yes',
|
||||
lock_file => '/var/run/rsync_plugins_sync.lock',
|
||||
max_connections => 100,
|
||||
path => $plugins_dir,
|
||||
read_only => 'no',
|
||||
write_only => 'no',
|
||||
require => File[$plugins_dir],
|
||||
}
|
||||
}
|
|
@ -0,0 +1,64 @@
|
|||
# Class: fuel_project::apps::seed
|
||||
#
|
||||
class fuel_project::apps::seed (
|
||||
$apply_firewall_rules = false,
|
||||
$client_max_body_size = '5G',
|
||||
$nginx_access_log = '/var/log/nginx/access.log',
|
||||
$nginx_error_log = '/var/log/nginx/error.log',
|
||||
$nginx_log_format = 'proxy',
|
||||
$seed_cleanup_dirs = undef,
|
||||
$seed_dir = '/var/www/seed',
|
||||
$seed_port = 17333,
|
||||
$service_fqdn = "seed.${::fqdn}",
|
||||
# FIXME: Make one list for hosts on L3 and L7
|
||||
$vhost_acl_allow = [],
|
||||
) {
|
||||
if (!defined(Class['::fuel_project::nginx'])) {
|
||||
class { '::fuel_project::nginx' :}
|
||||
}
|
||||
::nginx::resource::vhost { 'seed' :
|
||||
ensure => 'present',
|
||||
autoindex => 'off',
|
||||
access_log => $nginx_access_log,
|
||||
error_log => $nginx_error_log,
|
||||
format_log => $nginx_log_format,
|
||||
www_root => $seed_dir,
|
||||
server_name => [$service_fqdn, $::fqdn]
|
||||
}
|
||||
|
||||
::nginx::resource::vhost { 'seed-upload' :
|
||||
ensure => 'present',
|
||||
autoindex => 'off',
|
||||
www_root => $seed_dir,
|
||||
listen_port => $seed_port,
|
||||
server_name => [$::fqdn],
|
||||
access_log => $nginx_access_log,
|
||||
error_log => $nginx_error_log,
|
||||
format_log => $nginx_log_format,
|
||||
location_cfg_append => {
|
||||
dav_methods => 'PUT',
|
||||
client_max_body_size => $client_max_body_size,
|
||||
allow => $vhost_acl_allow,
|
||||
deny => 'all',
|
||||
}
|
||||
}
|
||||
|
||||
ensure_resource('file', '/var/www', {
|
||||
ensure => 'directory',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0755',
|
||||
before => File[$seed_dir],
|
||||
})
|
||||
|
||||
file { $seed_dir :
|
||||
ensure => 'directory',
|
||||
owner => 'www-data',
|
||||
group => 'www-data',
|
||||
require => Class['nginx'],
|
||||
}
|
||||
|
||||
class {'::devopslib::downloads_cleaner' :
|
||||
cleanup_dirs => $seed_cleanup_dirs,
|
||||
}
|
||||
}
|
|
@ -0,0 +1,60 @@
|
|||
# Class: fuel_project::apps::static
|
||||
#
|
||||
class fuel_project::apps::static (
|
||||
$nginx_access_log = '/var/log/nginx/access.log',
|
||||
$nginx_error_log = '/var/log/nginx/error.log',
|
||||
$nginx_log_format = undef,
|
||||
$packages = ['javascript-bundle'],
|
||||
$service_fqdn = $::fqdn,
|
||||
$ssl_certificate = '/etc/ssl/certs/static.crt',
|
||||
$ssl_certificate_content = '',
|
||||
$ssl_key = '/etc/ssl/private/static.key',
|
||||
$ssl_key_content = '',
|
||||
$static_dir = '/usr/share/javascript',
|
||||
) {
|
||||
ensure_packages(['javascript-bundle'])
|
||||
|
||||
if($ssl_certificate and $ssl_certificate_content) {
|
||||
file { $ssl_certificate :
|
||||
ensure => 'present',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0400',
|
||||
content => $ssl_certificate_content,
|
||||
}
|
||||
}
|
||||
|
||||
if($ssl_key and $ssl_key_content) {
|
||||
file { $ssl_key :
|
||||
ensure => 'present',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0400',
|
||||
content => $ssl_key_content,
|
||||
}
|
||||
}
|
||||
|
||||
::nginx::resource::vhost { 'static' :
|
||||
ensure => 'present',
|
||||
autoindex => 'off',
|
||||
access_log => $nginx_access_log,
|
||||
error_log => $nginx_error_log,
|
||||
format_log => $nginx_log_format,
|
||||
ssl => true,
|
||||
listen_port => 80,
|
||||
ssl_port => 443,
|
||||
ssl_cert => $ssl_certificate,
|
||||
ssl_key => $ssl_key,
|
||||
www_root => $static_dir,
|
||||
server_name => [$service_fqdn, "static.${::fqdn}"],
|
||||
gzip_types => 'text/css application/x-javascript',
|
||||
vhost_cfg_append => {
|
||||
'add_header' => "'Access-Control-Allow-Origin' '*'",
|
||||
},
|
||||
require => [
|
||||
Package[$packages],
|
||||
File[$ssl_certificate],
|
||||
File[$ssl_key],
|
||||
],
|
||||
}
|
||||
}
|
|
@ -0,0 +1,58 @@
|
|||
# Class: fuel_project::apps::update
|
||||
#
|
||||
class fuel_project::apps::updates (
|
||||
$apply_firewall_rules = false,
|
||||
$firewall_allow_sources = {},
|
||||
$nginx_access_log = '/var/log/nginx/access.log',
|
||||
$nginx_error_log = '/var/log/nginx/error.log',
|
||||
$nginx_log_format = 'proxy',
|
||||
$service_fqdn = "updates.${::fqdn}",
|
||||
$sync_hosts_allow = [],
|
||||
$updates_dir = '/var/www/updates',
|
||||
) {
|
||||
if (!defined(Class['::fuel_project::nginx'])) {
|
||||
class { '::fuel_project::nginx' :}
|
||||
}
|
||||
::nginx::resource::vhost { 'updates' :
|
||||
ensure => 'present',
|
||||
autoindex => 'on',
|
||||
access_log => $nginx_access_log,
|
||||
error_log => $nginx_error_log,
|
||||
format_log => $nginx_log_format,
|
||||
www_root => $updates_dir,
|
||||
server_name => [$service_fqdn, "updates.${::fqdn}"]
|
||||
}
|
||||
|
||||
file { $updates_dir :
|
||||
ensure => 'directory',
|
||||
owner => 'www-data',
|
||||
group => 'www-data',
|
||||
require => Class['::nginx'],
|
||||
}
|
||||
|
||||
if (!defined(Class['::rsync::server'])) {
|
||||
class { '::rsync::server' :
|
||||
gid => 'root',
|
||||
uid => 'root',
|
||||
use_chroot => 'yes',
|
||||
use_xinetd => false,
|
||||
}
|
||||
}
|
||||
|
||||
::rsync::server::module{ 'updates':
|
||||
comment => 'Fuel updates sync',
|
||||
uid => 'www-data',
|
||||
gid => 'www-data',
|
||||
hosts_allow => $sync_hosts_allow,
|
||||
hosts_deny => ['*'],
|
||||
incoming_chmod => '0755',
|
||||
outgoing_chmod => '0644',
|
||||
list => 'yes',
|
||||
lock_file => '/var/run/rsync_updates_sync.lock',
|
||||
max_connections => 100,
|
||||
path => $updates_dir,
|
||||
read_only => 'no',
|
||||
write_only => 'no',
|
||||
require => File[$updates_dir],
|
||||
}
|
||||
}
|
|
@ -0,0 +1,70 @@
|
|||
# Class: fuel_project::apps::web_share
|
||||
#
|
||||
class fuel_project::apps::web_share (
|
||||
$authorized_keys,
|
||||
$group = 'jenkins',
|
||||
$nginx_access_log = '/var/log/nginx/access.log',
|
||||
$nginx_autoindex = 'on',
|
||||
$nginx_error_log = '/var/log/nginx/error.log',
|
||||
$nginx_log_format = undef,
|
||||
$nginx_server_name = $::fqdn,
|
||||
$share_root = '/var/www/share_logs',
|
||||
$user = 'jenkins',
|
||||
) {
|
||||
|
||||
ensure_resource('file', '/var/www', {
|
||||
ensure => 'directory',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0755',
|
||||
})
|
||||
|
||||
file { $share_root :
|
||||
ensure => 'directory',
|
||||
owner => $user,
|
||||
group => $group,
|
||||
mode => '0755',
|
||||
require => [
|
||||
User[$user],
|
||||
File['/var/www'],
|
||||
],
|
||||
}
|
||||
|
||||
::nginx::resource::vhost { 'share-http' :
|
||||
ensure => 'present',
|
||||
server_name => [$nginx_server_name],
|
||||
listen_port => 80,
|
||||
www_root => $share_root,
|
||||
access_log => $nginx_access_log,
|
||||
error_log => $nginx_error_log,
|
||||
format_log => $nginx_log_format,
|
||||
autoindex => $nginx_autoindex,
|
||||
require => File[$share_root],
|
||||
}
|
||||
|
||||
# manage user $HOME manually, since we don't need .bash* stuff
|
||||
# but only ~/.ssh/
|
||||
file { "/var/lib/${user}" :
|
||||
ensure => 'directory',
|
||||
owner => $user,
|
||||
group => $group,
|
||||
mode => '0755',
|
||||
}
|
||||
|
||||
user { $user :
|
||||
ensure => 'present',
|
||||
system => true,
|
||||
managehome => false,
|
||||
home => "/var/lib/${user}",
|
||||
shell => '/usr/sbin/nologin',
|
||||
}
|
||||
|
||||
create_resources(ssh_authorized_key, $authorized_keys, {
|
||||
ensure => 'present',
|
||||
user => $user,
|
||||
require => [
|
||||
User[$user],
|
||||
],
|
||||
})
|
||||
|
||||
}
|
|
@ -0,0 +1,145 @@
|
|||
# Class: fuel_project::common
|
||||
#
|
||||
class fuel_project::common (
|
||||
$bind_policy = '',
|
||||
$external_host = false,
|
||||
$facts = {
|
||||
'location' => $::location,
|
||||
'role' => $::role,
|
||||
},
|
||||
$kernel_package = undef,
|
||||
$ldap = false,
|
||||
$ldap_base = '',
|
||||
$ldap_ignore_users = '',
|
||||
$ldap_uri = '',
|
||||
$logrotate_rules = hiera_hash('logrotate::rules', {}),
|
||||
$pam_filter = '',
|
||||
$pam_password = '',
|
||||
$root_password_hash = 'r00tme',
|
||||
$root_shell = '/bin/bash',
|
||||
$tls_cacertdir = '',
|
||||
) {
|
||||
class { '::atop' :}
|
||||
class { '::ntp' :}
|
||||
class { '::puppet::agent' :}
|
||||
class { '::ssh::authorized_keys' :}
|
||||
class { '::ssh::sshd' :
|
||||
apply_firewall_rules => $external_host,
|
||||
}
|
||||
# TODO: remove ::system module
|
||||
# ... by spliting it's functions to separate modules
|
||||
# or reusing publically available ones
|
||||
class { '::system' :}
|
||||
class { '::zabbix::agent' :
|
||||
apply_firewall_rules => $external_host,
|
||||
}
|
||||
|
||||
::puppet::facter { 'facts' :
|
||||
facts => $facts,
|
||||
}
|
||||
|
||||
ensure_packages([
|
||||
'apparmor',
|
||||
'facter-facts',
|
||||
'screen',
|
||||
'tmux',
|
||||
])
|
||||
|
||||
# install the exact version of kernel package
|
||||
# please note, that reboot must be done manually
|
||||
if($kernel_package) {
|
||||
ensure_packages($kernel_package)
|
||||
}
|
||||
|
||||
if($ldap) {
|
||||
class { '::ssh::ldap' :}
|
||||
|
||||
file { '/usr/local/bin/ldap2sshkeys.sh' :
|
||||
ensure => 'present',
|
||||
mode => '0700',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
content => template('fuel_project/common/ldap2sshkeys.sh.erb'),
|
||||
}
|
||||
|
||||
exec { 'sync-ssh-keys' :
|
||||
command => '/usr/local/bin/ldap2sshkeys.sh',
|
||||
logoutput => on_failure,
|
||||
require => File['/usr/local/bin/ldap2sshkeys.sh'],
|
||||
}
|
||||
|
||||
cron { 'ldap2sshkeys' :
|
||||
command => "/usr/local/bin/ldap2sshkeys.sh ${::hostname} 2>&1 | logger -t ldap2sshkeys",
|
||||
user => root,
|
||||
hour => '*',
|
||||
minute => fqdn_rand(59),
|
||||
require => File['/usr/local/bin/ldap2sshkeys.sh'],
|
||||
}
|
||||
}
|
||||
|
||||
case $::osfamily {
|
||||
'Debian': {
|
||||
class { '::apt' :}
|
||||
}
|
||||
'RedHat': {
|
||||
class { '::yum' :}
|
||||
}
|
||||
default: { }
|
||||
}
|
||||
|
||||
# Logrotate items
|
||||
create_resources('::logrotate::rule', $logrotate_rules)
|
||||
|
||||
zabbix::item { 'software-zabbix-check' :
|
||||
template => 'fuel_project/common/zabbix/software.conf.erb',
|
||||
}
|
||||
|
||||
# Zabbix hardware item
|
||||
ensure_packages(['smartmontools'])
|
||||
|
||||
::zabbix::item { 'hardware-zabbix-check' :
|
||||
content => 'puppet:///modules/fuel_project/common/zabbix/hardware.conf',
|
||||
require => Package['smartmontools'],
|
||||
}
|
||||
# /Zabbix hardware item
|
||||
|
||||
# Zabbix SSL item
|
||||
file { '/usr/local/bin/zabbix_check_certificate.sh' :
|
||||
ensure => 'present',
|
||||
mode => '0755',
|
||||
source => 'puppet:///modules/fuel_project/zabbix/zabbix_check_certificate.sh',
|
||||
}
|
||||
::zabbix::item { 'ssl-certificate-check' :
|
||||
content => 'puppet:///modules/fuel_project/common/zabbix/ssl-certificate-check.conf',
|
||||
require => File['/usr/local/bin/zabbix_check_certificate.sh'],
|
||||
}
|
||||
# /Zabbix SSL item
|
||||
|
||||
mount { '/' :
|
||||
ensure => 'present',
|
||||
options => 'defaults,errors=remount-ro,noatime,nodiratime,barrier=0',
|
||||
}
|
||||
|
||||
file { '/etc/hostname' :
|
||||
ensure => 'present',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0644',
|
||||
content => "${::fqdn}\n",
|
||||
notify => Exec['/bin/hostname -F /etc/hostname'],
|
||||
}
|
||||
|
||||
file { '/etc/hosts' :
|
||||
ensure => 'present',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0644',
|
||||
content => template('fuel_project/common/hosts.erb'),
|
||||
}
|
||||
|
||||
exec { '/bin/hostname -F /etc/hostname' :
|
||||
subscribe => File['/etc/hostname'],
|
||||
refreshonly => true,
|
||||
require => File['/etc/hostname'],
|
||||
}
|
||||
}
|
|
@ -0,0 +1,17 @@
|
|||
#Class fuel_project::devops_tools
|
||||
#
|
||||
class fuel_project::devops_tools (
|
||||
$lpbugmanage = false,
|
||||
$lpupdatebug = false,
|
||||
) {
|
||||
|
||||
class { '::fuel_project::common' :}
|
||||
|
||||
if($lpbugmanage) {
|
||||
class { '::fuel_project::devops_tools::lpbugmanage' :}
|
||||
}
|
||||
|
||||
if($lpupdatebug) {
|
||||
class { '::fuel_project::devops_tools::lpupdatebug' :}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,52 @@
|
|||
#Class fuel_project::devops_tools::lpbugmanage
|
||||
#
|
||||
class fuel_project::devops_tools::lpbugmanage (
|
||||
$id = '',
|
||||
$consumer_key = '',
|
||||
$consumer_secret = '',
|
||||
$access_token = '',
|
||||
$access_secret = '',
|
||||
$section = 'bugmanage',
|
||||
$appname = 'lpbugmanage',
|
||||
$credfile = '/etc/lpbugmanage/credentials.conf',
|
||||
$cachedir = '/var/cache/launchpadlib/',
|
||||
$logfile = 'lpbugmanage.log',
|
||||
$env = 'staging',
|
||||
$status = 'New, Confirmed, Triaged, In Progress, Incomplete',
|
||||
$series = 'https://api.staging.launchpad.net/1.0/fuel',
|
||||
$milestone = 'https://api.staging.launchpad.net/1.0/fuel/+milestone',
|
||||
$distr = 'fuel',
|
||||
$package_name = 'python-lpbugmanage',
|
||||
) {
|
||||
|
||||
ensure_packages([$package_name])
|
||||
|
||||
file { '/etc/lpbugmanage/credentials.conf':
|
||||
ensure => 'present',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0400',
|
||||
content => template('fuel_project/devops_tools/credentials.erb'),
|
||||
require => Package['python-lpbugmanage'],
|
||||
}
|
||||
|
||||
file { '/etc/lpbugmanage/lpbugmanage.conf':
|
||||
ensure => 'present',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0644',
|
||||
content => template('fuel_project/devops_tools/lpbugmanage.erb'),
|
||||
require => Package['python-lpbugmanage'],
|
||||
}
|
||||
|
||||
cron { 'lpbugmanage':
|
||||
user => 'root',
|
||||
hour => '*/1',
|
||||
command => '/usr/bin/flock -n -x /var/lock/lpbugmanage.lock /usr/bin/lpbugmanage.py test 2>&1 | logger -t lpbugmanage',
|
||||
require => [
|
||||
Package['python-lpbugmanage'],
|
||||
File['/etc/lpbugmanage/credentials.conf'],
|
||||
File['/etc/lpbugmanage/lpbugmanage.conf'],
|
||||
],
|
||||
}
|
||||
}
|
|
@ -0,0 +1,68 @@
|
|||
# Class fuel_project::devops_tools::lpupdatebug
|
||||
#
|
||||
class fuel_project::devops_tools::lpupdatebug (
|
||||
$access_token = '',
|
||||
$access_secret = '',
|
||||
$appname = 'lpupdatebug',
|
||||
$cachedir = '/var/tmp/launchpadlib/',
|
||||
$consumer_key = '',
|
||||
$consumer_secret = '',
|
||||
$credfile = '/etc/lpupdatebug/credentials.conf',
|
||||
$env = 'production',
|
||||
$host = 'localhost',
|
||||
$id = '1',
|
||||
$logfile = '/var/log/lpupdatebug.log',
|
||||
$package_name = 'python-lpupdatebug',
|
||||
$port = '29418',
|
||||
$projects = [],
|
||||
$sshprivkey = '/etc/lpupdatebug/lpupdatebug.key',
|
||||
$sshprivkey_contents = undef,
|
||||
$update_status = 'yes',
|
||||
$username = 'lpupdatebug',
|
||||
) {
|
||||
|
||||
ensure_packages([$package_name])
|
||||
|
||||
if ($sshprivkey_contents)
|
||||
{
|
||||
file { $sshprivkey :
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0400',
|
||||
content => $sshprivkey_contents,
|
||||
}
|
||||
}
|
||||
|
||||
file { '/etc/lpupdatebug/credentials.conf':
|
||||
ensure => 'present',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0400',
|
||||
content => template('fuel_project/devops_tools/credentials.erb'),
|
||||
require => Package['python-lpupdatebug'],
|
||||
}
|
||||
|
||||
file { '/etc/lpupdatebug/lpupdatebug.conf':
|
||||
ensure => 'present',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0644',
|
||||
content => template('fuel_project/devops_tools/lpupdatebug.erb'),
|
||||
require => Package['python-lpupdatebug'],
|
||||
}
|
||||
|
||||
service { 'python-lpupdatebug' :
|
||||
ensure => running,
|
||||
enable => true,
|
||||
hasrestart => false,
|
||||
require => Package[$package_name]
|
||||
}
|
||||
|
||||
ensure_packages(['tailnew'])
|
||||
|
||||
zabbix::item { 'lpupdatebug-zabbix-check' :
|
||||
content => 'puppet:///modules/fuel_project/devops_tools/userparams-lpupdatebug.conf',
|
||||
notify => Service[$::zabbix::params::agent_service],
|
||||
require => Package['tailnew']
|
||||
}
|
||||
}
|
|
@ -0,0 +1,59 @@
|
|||
# Class: fuel_project::gerrit
|
||||
#
|
||||
class fuel_project::gerrit (
|
||||
$gerrit_auth_type = undef,
|
||||
$replica_points = undef,
|
||||
$replication_mode = '',
|
||||
|
||||
) {
|
||||
|
||||
$gerrit = hiera_hash('gerrit')
|
||||
class { '::gerrit' :
|
||||
canonicalweburl => $gerrit['service_url'],
|
||||
contactstore => $gerrit['contactstore'],
|
||||
container_heaplimit => floor($::memorysize_mb/2*1024*1024),
|
||||
email_private_key => $gerrit['email_private_key'],
|
||||
gerrit_auth_type => $gerrit_auth_type,
|
||||
gerrit_start_timeout => $gerrit['start_timeout'],
|
||||
gitweb => true,
|
||||
mysql_database => $gerrit['mysql_database'],
|
||||
mysql_host => $gerrit['mysql_host'],
|
||||
mysql_password => $gerrit['mysql_password'],
|
||||
mysql_user => $gerrit['mysql_user'],
|
||||
service_fqdn => $gerrit['service_fqdn'],
|
||||
ssh_dsa_key_contents => $gerrit['ssh_dsa_key_contents'],
|
||||
ssh_dsa_pubkey_contents => $gerrit['ssh_dsa_pubkey_contents'],
|
||||
ssh_project_rsa_key_contents => $gerrit['project_ssh_rsa_key_contents'],
|
||||
ssh_project_rsa_pubkey_contents => $gerrit['project_ssh_rsa_pubkey_contents'],
|
||||
ssh_replication_rsa_key_contents => $gerrit['replication_ssh_rsa_key_contents'],
|
||||
ssh_replication_rsa_pubkey_contents => $gerrit['replication_ssh_rsa_pubkey_contents'],
|
||||
ssh_rsa_key_contents => $gerrit['ssh_rsa_key_contents'],
|
||||
ssh_rsa_pubkey_contents => $gerrit['ssh_rsa_pubkey_contents'],
|
||||
ssl_cert_file => $gerrit['ssl_cert_file'],
|
||||
ssl_cert_file_contents => $gerrit['ssl_cert_file_contents'],
|
||||
ssl_chain_file => $gerrit['ssl_chain_file'],
|
||||
ssl_chain_file_contents => $gerrit['ssl_chain_file_contents'],
|
||||
ssl_key_file => $gerrit['ssl_key_file'],
|
||||
ssl_key_file_contents => $gerrit['ssl_key_file_contents'],
|
||||
}
|
||||
|
||||
class { '::gerrit::mysql' :
|
||||
database_name => $gerrit['mysql_database'],
|
||||
database_user => $gerrit['mysql_user'],
|
||||
database_password => $gerrit['mysql_password'],
|
||||
}
|
||||
|
||||
class { '::gerrit::hideci' :}
|
||||
|
||||
if ($replication_mode == 'master' and $replica_points) {
|
||||
create_resources(
|
||||
::fuel_project::gerrit::replication,
|
||||
$replica_points,
|
||||
)
|
||||
}
|
||||
|
||||
if ($replication_mode == 'slave') {
|
||||
class { '::fuel_project::gerrit::replication_slave' :}
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,73 @@
|
|||
# Define: fuel_project::gerrit::replication
|
||||
#
|
||||
# Replication path consists of:
|
||||
# uri: 'user@host:path'
|
||||
# More docs:
|
||||
# https://gerrit.libreoffice.org/plugins/replication/Documentation/config.html
|
||||
#
|
||||
define fuel_project::gerrit::replication (
|
||||
$host,
|
||||
$path,
|
||||
$user,
|
||||
$auth_group = undef,
|
||||
$config_file_path = '/var/lib/gerrit/review_site/etc/replication.config',
|
||||
$mirror = undef,
|
||||
$private_key = undef,
|
||||
$public_key = undef,
|
||||
$replicate_permissions = undef,
|
||||
$replication_delay = 0,
|
||||
$threads = 3,
|
||||
){
|
||||
|
||||
# define replication file
|
||||
# Each resource must be uniq otherwise we will have duplicate declaration error,
|
||||
# as we are using the SAME configuration file for adding replica points, we must to
|
||||
# use ensure_resource which only creates the resource if it does not already exist
|
||||
# and thus help us to avoid duplcate declaration problem
|
||||
ensure_resource(
|
||||
'concat',
|
||||
$config_file_path,
|
||||
{
|
||||
ensure => present,
|
||||
owner => 'gerrit',
|
||||
group => 'gerrit',
|
||||
mode => '0644',
|
||||
order => 'numeric',
|
||||
})
|
||||
|
||||
# add header with link to docs (to replication file)
|
||||
# To avoid duplcate declaration error (because we have concat::fragment, named
|
||||
# replication_config_header) we have to use ensure_resource, which only creates
|
||||
# the resource if it does not already exist
|
||||
ensure_resource(
|
||||
'concat::fragment',
|
||||
'replication_config_header',
|
||||
{
|
||||
target => $config_file_path,
|
||||
content => "# This file is managed by puppet.\n#https://gerrit.libreoffice.org/plugins/replication/Documentation/config.html\n",
|
||||
order => '01'
|
||||
})
|
||||
|
||||
# add host to known_hosts
|
||||
ssh::known_host { "${host}-known-hosts" :
|
||||
host => $host,
|
||||
user => 'gerrit',
|
||||
require => User['gerrit'],
|
||||
}
|
||||
|
||||
# add ssh key-pare for replication
|
||||
sshuserconfig::remotehost { "${user}-${host}" :
|
||||
unix_user => 'gerrit',
|
||||
ssh_config_dir => '/var/lib/gerrit/.ssh',
|
||||
remote_hostname => $host,
|
||||
remote_username => $user,
|
||||
private_key_content => $private_key,
|
||||
public_key_content => $public_key,
|
||||
}
|
||||
|
||||
# add replica configuration to gerrrit replication.conf
|
||||
concat::fragment { "${user}-${host}-${path}":
|
||||
target => $config_file_path,
|
||||
content => template('fuel_project/gerrit/replication.config.erb'),
|
||||
}
|
||||
}
|
|
@ -0,0 +1,46 @@
|
|||
# Class fuel_project::gerrit::replication_slave
|
||||
#
|
||||
class fuel_project::gerrit::replication_slave (
|
||||
$authorized_keys = {}
|
||||
) {
|
||||
|
||||
if (!defined(User['gerrit-replicator'])) {
|
||||
user { 'gerrit-replicator':
|
||||
ensure => 'present',
|
||||
name => 'gerrit-replicator',
|
||||
shell => '/bin/bash',
|
||||
home => '/var/lib/gerrit-replicator',
|
||||
managehome => true,
|
||||
comment => 'Gerrit Replicator User',
|
||||
system => true,
|
||||
}
|
||||
}
|
||||
|
||||
file { '/var/lib/gerrit-replicator/.ssh/' :
|
||||
ensure => 'directory',
|
||||
owner => 'gerrit-replicator',
|
||||
group => 'gerrit-replicator',
|
||||
mode => '0700',
|
||||
require => User['gerrit-replicator'],
|
||||
}
|
||||
|
||||
file { '/var/lib/gerrit/review_site/git/' :
|
||||
ensure => 'directory',
|
||||
owner => 'gerrit-replicator',
|
||||
group => 'gerrit-replicator',
|
||||
recurse => true,
|
||||
require => [
|
||||
User['gerrit-replicator'],
|
||||
Package['gerrit'],
|
||||
],
|
||||
}
|
||||
|
||||
create_resources(ssh_authorized_key, $authorized_keys, {
|
||||
ensure => 'present',
|
||||
user => 'gerrit-replicator',
|
||||
require => [
|
||||
User['gerrit-replicator'],
|
||||
File['/var/lib/gerrit-replicator/.ssh/'],
|
||||
],
|
||||
})
|
||||
}
|
|
@ -0,0 +1,146 @@
|
|||
# Class: fuel_project::glusterfs
|
||||
#
|
||||
# Parameters:
|
||||
# $create_pool:
|
||||
# if false, then it's just install glusterfs server and client
|
||||
# $gfs_pool:
|
||||
# list of nodes with glusterfs server installed, will be used for pool
|
||||
# $gfs_volume_name:
|
||||
# name of datapoint (shared point), will be used by clients for mounting,
|
||||
# example: mount -t glusterfs $gfs_pool[0]:/$gfs_volume_name /mnt/local
|
||||
# $gfs_brick_point:
|
||||
# mount points which are going to be used to building bricks
|
||||
#
|
||||
# The above parameters in terms of glusterfs:
|
||||
# 1. gluster peer probe $gfs_pool[0]
|
||||
# gluster peer probe $gfs_pool[1]
|
||||
# 2. mkdir -p $gfs_brick_point
|
||||
# gluster volume create $gfs_volume_name replica 2 transport tcp \
|
||||
# $gfs_pool[0]:$gfs_brick_point $gfs_pool[1]:$gfs_brick_point force
|
||||
#
|
||||
# All gluster customization:
|
||||
# http://docs.openstack.org/admin-guide-cloud/content/glusterfs_backend.html
|
||||
#
|
||||
class fuel_project::glusterfs (
|
||||
$apply_firewall_rules = false,
|
||||
$create_pool = false,
|
||||
$firewall_allow_sources = {},
|
||||
$gfs_brick_point = '/mnt/brick',
|
||||
$gfs_pool = [ 'slave-13.test.local','slave-14.test.local' ],
|
||||
$gfs_volume_name = 'data',
|
||||
$owner_gid = 165,
|
||||
$owner_uid = 165,
|
||||
|
||||
){
|
||||
class { '::fuel_project::common' :
|
||||
external_host => $apply_firewall_rules,
|
||||
}
|
||||
|
||||
if !defined(Class[::zabbix::agent]) {
|
||||
class { '::zabbix::agent' :
|
||||
apply_firewall_rules => $apply_firewall_rules,
|
||||
}
|
||||
}
|
||||
|
||||
class { '::glusterfs': }
|
||||
|
||||
# permissions will be managed by glsuterfs itself
|
||||
file { $gfs_brick_point:
|
||||
ensure => directory,
|
||||
mode => '0775',
|
||||
}
|
||||
|
||||
if $create_pool {
|
||||
glusterfs_pool { $gfs_pool: }
|
||||
|
||||
glusterfs_vol { $gfs_volume_name :
|
||||
replica => 2,
|
||||
brick => [ "${gfs_pool[0]}:${gfs_brick_point}", "${gfs_pool[1]}:${gfs_brick_point}"],
|
||||
force => true,
|
||||
require => [
|
||||
File[$gfs_brick_point],
|
||||
Glusterfs_pool[$gfs_pool],
|
||||
],
|
||||
}
|
||||
|
||||
exec { "set_volume_uid_${gfs_volume_name}":
|
||||
command => "gluster volume set ${gfs_volume_name} storage.owner-uid ${owner_uid}",
|
||||
user => 'root',
|
||||
unless => "gluster volume info| fgrep 'storage.owner-uid: ${owner_uid}'",
|
||||
require => Glusterfs_vol[$gfs_volume_name],
|
||||
}
|
||||
|
||||
exec { "set_volume_gid_${gfs_volume_name}":
|
||||
command => "gluster volume set ${gfs_volume_name} storage.owner-gid ${owner_gid}",
|
||||
user => 'root',
|
||||
unless => "gluster volume info| fgrep 'storage.owner-gid: ${owner_gid}'",
|
||||
require => Glusterfs_vol[$gfs_volume_name],
|
||||
}
|
||||
|
||||
exec { "set_volume_param_${gfs_volume_name}":
|
||||
command => "gluster volume set ${gfs_volume_name} server.allow-insecure on",
|
||||
user => 'root',
|
||||
unless => 'gluster volume info| fgrep "server.allow-insecure: on"',
|
||||
notify => Exec["restart_volume_${gfs_volume_name}"],
|
||||
require => Glusterfs_vol[$gfs_volume_name],
|
||||
}
|
||||
|
||||
exec { "restart_volume_${gfs_volume_name}":
|
||||
command => "echo y | gluster volume stop ${gfs_volume_name}; gluster volume start ${gfs_volume_name}",
|
||||
user => 'root',
|
||||
refreshonly => true,
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
file { '/etc/glusterfs/glusterd.vol' :
|
||||
ensure => 'present',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
content => template('fuel_project/glusterfs/glusterd.vol.erb'),
|
||||
require => Class['glusterfs::package'],
|
||||
notify => Class['glusterfs::service'],
|
||||
}
|
||||
|
||||
# put monitoring scripts
|
||||
file { '/usr/local/bin' :
|
||||
ensure => directory,
|
||||
recurse => remote,
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0754',
|
||||
source => 'puppet:///modules/fuel_project/glusterfs/zabbix/glubix',
|
||||
}
|
||||
|
||||
# update sudoerc for zabbix user with monitoring scripts
|
||||
file { '/etc/sudoers.d/zabbix_glusterfs' :
|
||||
ensure => 'present',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0440',
|
||||
content => template('fuel_project/glusterfs/sudoers_zabbix_glusterfs.erb')
|
||||
}
|
||||
|
||||
zabbix::item { 'glusterfs-zabbix-check' :
|
||||
content => 'puppet:///modules/fuel_project/glusterfs/zabbix/userparams-glubix.conf',
|
||||
notify => Service[$::zabbix::params::agent_service],
|
||||
}
|
||||
|
||||
if $apply_firewall_rules {
|
||||
include firewall_defaults::pre
|
||||
# 111 - RPC incomming
|
||||
# 24007 - Gluster Daemon
|
||||
# 24008 - Management
|
||||
# 49152 - (GlusterFS versions 3.4 and later) - Each brick for every volume on your host requires it's own port.
|
||||
# For every new brick, one new port will be used.
|
||||
# 2049, 38465-38469 - this is required by the Gluster NFS service.
|
||||
create_resources(firewall, $firewall_allow_sources, {
|
||||
ensure => present,
|
||||
dport => [111, 24007, 24008, 49152, 2049, 38465, 38466, 38467, 38468, 38469],
|
||||
proto => 'tcp',
|
||||
action => 'accept',
|
||||
require => Class['firewall_defaults::pre'],
|
||||
})
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,25 @@
|
|||
# Class: fuel_project::jenkins::master
|
||||
#
|
||||
class fuel_project::jenkins::master (
|
||||
$firewall_enable = false,
|
||||
$install_label_dumper = false,
|
||||
$install_plugins = false,
|
||||
$install_zabbix_item = false,
|
||||
$service_fqdn = $::fqdn,
|
||||
) {
|
||||
class { '::fuel_project::common':
|
||||
external_host => $firewall_enable,
|
||||
}
|
||||
class { '::jenkins::master':
|
||||
apply_firewall_rules => $firewall_enable,
|
||||
install_zabbix_item => $install_zabbix_item,
|
||||
install_label_dumper => $install_label_dumper,
|
||||
service_fqdn => $service_fqdn,
|
||||
}
|
||||
if($install_plugins) {
|
||||
package { 'jenkins-plugins' :
|
||||
ensure => present,
|
||||
require => Service['jenkins'],
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,854 @@
|
|||
# Class: fuel_project::jenkins::slave
|
||||
#
|
||||
class fuel_project::jenkins::slave (
|
||||
$docker_package,
|
||||
$ruby_version,
|
||||
$bind_policy = '',
|
||||
$build_fuel_iso = false,
|
||||
$build_fuel_packages = false,
|
||||
$build_fuel_npm_packages = ['grunt-cli', 'gulp'],
|
||||
$build_fuel_plugins = false,
|
||||
$check_tasks_graph = false,
|
||||
$docker_service = '',
|
||||
$external_host = false,
|
||||
$fuel_web_selenium = false,
|
||||
$http_share_iso = false,
|
||||
$install_docker = false,
|
||||
$jenkins_swarm_slave = false,
|
||||
$known_hosts = {},
|
||||
$known_hosts_overwrite = false,
|
||||
$libvirt_default_network = false,
|
||||
$ldap = false,
|
||||
$ldap_base = '',
|
||||
$ldap_ignore_users = '',
|
||||
$ldap_sudo_group = undef,
|
||||
$ldap_uri = '',
|
||||
$local_ssh_private_key = undef,
|
||||
$local_ssh_public_key = undef,
|
||||
$nailgun_db = ['nailgun'],
|
||||
$osc_apiurl = '',
|
||||
$osc_pass_primary = '',
|
||||
$osc_pass_secondary = '',
|
||||
$osc_url_primary = '',
|
||||
$osc_url_secondary = '',
|
||||
$osc_user_primary = '',
|
||||
$osc_user_secondary = '',
|
||||
$osci_centos_image_name = 'centos6.4-x86_64-gold-master.img',
|
||||
$osci_centos_job_dir = '/home/jenkins/vm-centos-test-rpm',
|
||||
$osci_centos_remote_dir = 'vm-centos-test-rpm',
|
||||
$osci_obs_jenkins_key = '',
|
||||
$osci_obs_jenkins_key_contents = '',
|
||||
$osci_rsync_source_server = '',
|
||||
$osci_test = false,
|
||||
$osci_trusty_image_name = 'trusty.qcow2',
|
||||
$osci_trusty_job_dir = '/home/jenkins/vm-trusty-test-deb',
|
||||
$osci_trusty_remote_dir = 'vm-trusty-test-deb',
|
||||
$osci_ubuntu_image_name = 'ubuntu-deb-test.qcow2',
|
||||
$osci_ubuntu_job_dir = '/home/jenkins/vm-ubuntu-test-deb',
|
||||
$osci_ubuntu_remote_dir = 'vm-ubuntu-test-deb',
|
||||
$osci_vm_centos_jenkins_key = '',
|
||||
$osci_vm_centos_jenkins_key_contents = '',
|
||||
$osci_vm_trusty_jenkins_key = '',
|
||||
$osci_vm_trusty_jenkins_key_contents = '',
|
||||
$osci_vm_ubuntu_jenkins_key = '',
|
||||
$osci_vm_ubuntu_jenkins_key_contents = '',
|
||||
$ostf_db = ['ostf'],
|
||||
$pam_filter = '',
|
||||
$pam_password = '',
|
||||
$run_tests = false,
|
||||
$seed_cleanup_dirs = [
|
||||
{
|
||||
'dir' => '/var/www/fuelweb-iso', # directory to poll
|
||||
'ttl' => 10, # time to live in days
|
||||
'pattern' => 'fuel-*', # pattern to filter files in directory
|
||||
},
|
||||
{
|
||||
'dir' => '/srv/downloads',
|
||||
'ttl' => 1,
|
||||
'pattern' => 'fuel-*',
|
||||
}
|
||||
],
|
||||
$simple_syntax_check = false,
|
||||
$sudo_commands = ['/sbin/ebtables'],
|
||||
$tls_cacertdir = '',
|
||||
$verify_fuel_astute = false,
|
||||
$verify_fuel_docs = false,
|
||||
$verify_fuel_pkgs_requirements = false,
|
||||
$verify_fuel_stats = false,
|
||||
$verify_fuel_web = false,
|
||||
$verify_fuel_web_npm_packages = ['casperjs','grunt-cli','gulp','phantomjs'],
|
||||
$verify_jenkins_jobs = false,
|
||||
$workspace = '/home/jenkins/workspace',
|
||||
$x11_display_num = 99,
|
||||
) {
|
||||
|
||||
if (!defined(Class['::fuel_project::common'])) {
|
||||
class { '::fuel_project::common' :
|
||||
external_host => $external_host,
|
||||
ldap => $ldap,
|
||||
ldap_uri => $ldap_uri,
|
||||
ldap_base => $ldap_base,
|
||||
tls_cacertdir => $tls_cacertdir,
|
||||
pam_password => $pam_password,
|
||||
pam_filter => $pam_filter,
|
||||
bind_policy => $bind_policy,
|
||||
ldap_ignore_users => $ldap_ignore_users,
|
||||
}
|
||||
}
|
||||
|
||||
class { 'transmission::daemon' :}
|
||||
|
||||
if ($jenkins_swarm_slave == true) {
|
||||
class { '::jenkins::swarm_slave' :}
|
||||
} else {
|
||||
class { '::jenkins::slave' :}
|
||||
}
|
||||
|
||||
# jenkins should be in www-data group by default
|
||||
User <| title == 'jenkins' |> {
|
||||
groups +> 'www-data',
|
||||
}
|
||||
|
||||
class {'::devopslib::downloads_cleaner' :
|
||||
cleanup_dirs => $seed_cleanup_dirs,
|
||||
clean_seeds => true,
|
||||
}
|
||||
|
||||
ensure_packages(['git', 'python-seed-client'])
|
||||
|
||||
# release status reports
|
||||
if ($build_fuel_iso == true or $run_tests == true) {
|
||||
class { '::landing_page::updater' :}
|
||||
}
|
||||
|
||||
# FIXME: Legacy compability LP #1418927
|
||||
cron { 'devops-env-cleanup' :
|
||||
ensure => 'absent',
|
||||
}
|
||||
file { '/usr/local/bin/devops-env-cleanup.sh' :
|
||||
ensure => 'absent',
|
||||
}
|
||||
file { '/etc/devops/local_settings.py' :
|
||||
ensure => 'absent',
|
||||
}
|
||||
file { '/etc/devops' :
|
||||
ensure => 'absent',
|
||||
force => true,
|
||||
require => File['/etc/devops/local_settings.py'],
|
||||
}
|
||||
package { 'python-devops' :
|
||||
ensure => 'absent',
|
||||
uninstall_options => ['purge']
|
||||
}
|
||||
# /FIXME
|
||||
|
||||
file { '/home/jenkins/.ssh' :
|
||||
ensure => 'directory',
|
||||
mode => '0700',
|
||||
owner => 'jenkins',
|
||||
group => 'jenkins',
|
||||
require => User['jenkins'],
|
||||
}
|
||||
|
||||
|
||||
if ($local_ssh_private_key) {
|
||||
file { '/home/jenkins/.ssh/id_rsa' :
|
||||
ensure => 'present',
|
||||
mode => '0600',
|
||||
owner => 'jenkins',
|
||||
group => 'jenkins',
|
||||
content => $local_ssh_private_key,
|
||||
require => [
|
||||
User['jenkins'],
|
||||
File['/home/jenkins/.ssh'],
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
if ($local_ssh_public_key) {
|
||||
file { '/home/jenkins/.ssh/id_rsa.pub' :
|
||||
ensure => 'present',
|
||||
mode => '0600',
|
||||
owner => 'jenkins',
|
||||
group => 'jenkins',
|
||||
content => $local_ssh_public_key,
|
||||
require => [
|
||||
User['jenkins'],
|
||||
File['/home/jenkins/.ssh'],
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
# 'known_hosts' manage
|
||||
if ($known_hosts) {
|
||||
create_resources('ssh::known_host', $known_hosts, {
|
||||
user => 'jenkins',
|
||||
overwrite => $known_hosts_overwrite,
|
||||
require => User['jenkins'],
|
||||
})
|
||||
}
|
||||
|
||||
# Run system tests
|
||||
if ($run_tests == true) {
|
||||
if ($libvirt_default_network == false) {
|
||||
class { '::libvirt' :
|
||||
listen_tls => false,
|
||||
listen_tcp => true,
|
||||
auth_tcp => 'none',
|
||||
listen_addr => '127.0.0.1',
|
||||
mdns_adv => false,
|
||||
unix_sock_group => 'libvirtd',
|
||||
unix_sock_rw_perms => '0777',
|
||||
python => true,
|
||||
qemu => true,
|
||||
tcp_port => 16509,
|
||||
deb_default => {
|
||||
'libvirtd_opts' => '-d -l',
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
libvirt_pool { 'default' :
|
||||
ensure => 'present',
|
||||
type => 'dir',
|
||||
autostart => true,
|
||||
target => '/var/lib/libvirt/images',
|
||||
require => Class['libvirt'],
|
||||
}
|
||||
|
||||
# python-devops installation
|
||||
if (!defined(Class['::postgresql::server'])) {
|
||||
class { '::postgresql::server' : }
|
||||
}
|
||||
|
||||
::postgresql::server::db { 'devops' :
|
||||
user => 'devops',
|
||||
password => 'devops',
|
||||
}
|
||||
|
||||
::postgresql::server::db { 'fuel_devops' :
|
||||
user => 'fuel_devops',
|
||||
password => 'fuel_devops',
|
||||
}
|
||||
# /python-devops installation
|
||||
|
||||
$system_tests_packages = [
|
||||
# dependencies
|
||||
'libevent-dev',
|
||||
'libffi-dev',
|
||||
'libvirt-dev',
|
||||
'python-dev',
|
||||
'python-psycopg2',
|
||||
'python-virtualenv',
|
||||
'python-yaml',
|
||||
'pkg-config',
|
||||
'postgresql-server-dev-all',
|
||||
|
||||
# diagnostic utilities
|
||||
'htop',
|
||||
'sysstat',
|
||||
'dstat',
|
||||
'vncviewer',
|
||||
'tcpdump',
|
||||
|
||||
# usefull utils
|
||||
'screen',
|
||||
|
||||
# repo building utilities
|
||||
'reprepro',
|
||||
'createrepo',
|
||||
]
|
||||
|
||||
ensure_packages($system_tests_packages)
|
||||
|
||||
file { $workspace :
|
||||
ensure => 'directory',
|
||||
owner => 'jenkins',
|
||||
group => 'jenkins',
|
||||
require => User['jenkins'],
|
||||
}
|
||||
|
||||
ensure_resource('file', "${workspace}/iso", {
|
||||
ensure => 'directory',
|
||||
owner => 'jenkins',
|
||||
group => 'jenkins',
|
||||
mode => '0755',
|
||||
require => [
|
||||
User['jenkins'],
|
||||
File[$workspace],
|
||||
],
|
||||
})
|
||||
|
||||
file { '/etc/sudoers.d/systest' :
|
||||
ensure => 'present',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0440',
|
||||
content => template('fuel_project/jenkins/slave/system_tests.sudoers.d.erb'),
|
||||
}
|
||||
|
||||
# Working with bridging
|
||||
# we need to load module to be sure /proc/sys/net/bridge branch will be created
|
||||
exec { 'load_bridge_module' :
|
||||
command => '/sbin/modprobe bridge',
|
||||
user => 'root',
|
||||
logoutput => 'on_failure',
|
||||
}
|
||||
|
||||
# ensure bridge module will be loaded on system start
|
||||
augeas { 'sysctl-net.bridge.bridge-nf-call-iptables' :
|
||||
context => '/files/etc/modules',
|
||||
changes => 'clear bridge',
|
||||
}
|
||||
|
||||
sysctl { 'net.bridge.bridge-nf-call-iptables' :
|
||||
value => '0',
|
||||
require => Exec['load_bridge_module'],
|
||||
}
|
||||
|
||||
sysctl { 'vm.swappiness' :
|
||||
value => '0',
|
||||
}
|
||||
}
|
||||
|
||||
# provide env for building packages, actaully for "make sources"
|
||||
# from fuel-main and remove duplicate packages from build ISO
|
||||
if ($build_fuel_packages or $build_fuel_iso) {
|
||||
$build_fuel_packages_list = [
|
||||
'devscripts',
|
||||
'libparse-debcontrol-perl',
|
||||
'make',
|
||||
'mock',
|
||||
'nodejs',
|
||||
'nodejs-legacy',
|
||||
'npm',
|
||||
'pigz',
|
||||
'lzop',
|
||||
'python-setuptools',
|
||||
'python-rpm',
|
||||
'python-pbr',
|
||||
'reprepro',
|
||||
'ruby',
|
||||
'sbuild',
|
||||
]
|
||||
|
||||
User <| title == 'jenkins' |> {
|
||||
groups +> 'mock',
|
||||
require => Package[$build_fuel_packages_list],
|
||||
}
|
||||
|
||||
ensure_packages($build_fuel_packages_list)
|
||||
|
||||
if ($build_fuel_npm_packages) {
|
||||
ensure_packages($build_fuel_npm_packages, {
|
||||
provider => npm,
|
||||
require => Package['npm'],
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
# Build ISO
|
||||
if ($build_fuel_iso == true) {
|
||||
$build_fuel_iso_packages = [
|
||||
'bc',
|
||||
'build-essential',
|
||||
'createrepo',
|
||||
'debmirror',
|
||||
'debootstrap',
|
||||
'dosfstools',
|
||||
'extlinux',
|
||||
'genisoimage',
|
||||
'isomd5sum',
|
||||
'kpartx',
|
||||
'libconfig-auto-perl',
|
||||
'libmysqlclient-dev',
|
||||
'libparse-debian-packages-perl',
|
||||
'libyaml-dev',
|
||||
'lrzip',
|
||||
'python-daemon',
|
||||
'python-ipaddr',
|
||||
'python-jinja2',
|
||||
'python-nose',
|
||||
'python-paramiko',
|
||||
'python-pip',
|
||||
'python-xmlbuilder',
|
||||
'python-virtualenv',
|
||||
'python-yaml',
|
||||
'realpath',
|
||||
'ruby-bundler',
|
||||
'ruby-builder',
|
||||
'ruby-dev',
|
||||
'rubygems-integration',
|
||||
'syslinux',
|
||||
'time',
|
||||
'unzip',
|
||||
'xorriso',
|
||||
'yum',
|
||||
'yum-utils',
|
||||
]
|
||||
|
||||
ensure_packages($build_fuel_iso_packages)
|
||||
|
||||
ensure_resource('file', '/var/www', {
|
||||
ensure => 'directory',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0755',
|
||||
})
|
||||
|
||||
ensure_resource('file', '/var/www/fwm', {
|
||||
ensure => 'directory',
|
||||
owner => 'jenkins',
|
||||
group => 'jenkins',
|
||||
mode => '0755',
|
||||
require => [
|
||||
User['jenkins'],
|
||||
File['/var/www'],
|
||||
],
|
||||
})
|
||||
|
||||
if ($http_share_iso) {
|
||||
class { '::fuel_project::nginx' :}
|
||||
::nginx::resource::vhost { 'share':
|
||||
server_name => ['_'],
|
||||
autoindex => 'on',
|
||||
www_root => '/var/www',
|
||||
}
|
||||
|
||||
ensure_resource('file', '/var/www/fuelweb-iso', {
|
||||
ensure => 'directory',
|
||||
owner => 'jenkins',
|
||||
group => 'jenkins',
|
||||
mode => '0755',
|
||||
require => [
|
||||
User['jenkins'],
|
||||
File['/var/www'],
|
||||
],
|
||||
})
|
||||
}
|
||||
|
||||
if (!defined(Package['multistrap'])) {
|
||||
package { 'multistrap' :
|
||||
ensure => '2.1.6ubuntu3'
|
||||
}
|
||||
}
|
||||
apt::pin { 'multistrap' :
|
||||
packages => 'multistrap',
|
||||
version => '2.1.6ubuntu3',
|
||||
priority => 1000,
|
||||
}
|
||||
|
||||
# LP: https://bugs.launchpad.net/ubuntu/+source/libxml2/+bug/1375637
|
||||
if (!defined(Package['libxml2'])) {
|
||||
package { 'libxml2' :
|
||||
ensure => '2.9.1+dfsg1-ubuntu1',
|
||||
}
|
||||
}
|
||||
if (!defined(Package['python-libxml2'])) {
|
||||
package { 'python-libxml2' :
|
||||
ensure => '2.9.1+dfsg1-ubuntu1',
|
||||
}
|
||||
}
|
||||
apt::pin { 'libxml2' :
|
||||
packages => 'libxml2 python-libxml2',
|
||||
version => '2.9.1+dfsg1-ubuntu1',
|
||||
priority => 1000,
|
||||
}
|
||||
# /LP
|
||||
|
||||
file { 'jenkins-sudo-for-build_iso' :
|
||||
path => '/etc/sudoers.d/build_fuel_iso',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0440',
|
||||
content => template('fuel_project/jenkins/slave/build_iso.sudoers.d.erb')
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
# osci_tests - for deploying osci jenkins slaves
|
||||
if ($osci_test == true) {
|
||||
|
||||
# osci needed packages
|
||||
$osci_test_packages = [
|
||||
'osc',
|
||||
'yum-utils',
|
||||
]
|
||||
|
||||
ensure_packages($osci_test_packages)
|
||||
|
||||
# sudo for user 'jenkins'
|
||||
file { 'jenkins-sudo-for-osci-vm' :
|
||||
path => '/etc/sudoers.d/jenkins_sudo',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0440',
|
||||
content => template('fuel_project/jenkins/slave/build_iso.sudoers.d.erb'),
|
||||
require => User['jenkins'],
|
||||
}
|
||||
|
||||
# obs client settings
|
||||
file { 'oscrc' :
|
||||
path => '/home/jenkins/.oscrc',
|
||||
owner => 'jenkins',
|
||||
group => 'jenkins',
|
||||
mode => '0644',
|
||||
content => template('fuel_project/jenkins/slave/oscrc.erb'),
|
||||
require => [
|
||||
Package[$osci_test_packages],
|
||||
User['jenkins'],
|
||||
],
|
||||
}
|
||||
|
||||
# osci kvm settings
|
||||
if (!defined(Class['::libvirt'])) {
|
||||
class { '::libvirt' :
|
||||
mdns_adv => false,
|
||||
unix_sock_rw_perms => '0777',
|
||||
qemu => true,
|
||||
defaultnetwork => true,
|
||||
}
|
||||
}
|
||||
|
||||
# osci needed directories
|
||||
file {
|
||||
[
|
||||
$osci_ubuntu_job_dir,
|
||||
$osci_centos_job_dir,
|
||||
$osci_trusty_job_dir
|
||||
] :
|
||||
ensure => 'directory',
|
||||
owner => 'jenkins',
|
||||
group => 'jenkins',
|
||||
require => User['jenkins'],
|
||||
}
|
||||
|
||||
# rsync of vm images from existing rsync share
|
||||
class { 'rsync': package_ensure => 'present' }
|
||||
|
||||
rsync::get { $osci_ubuntu_image_name :
|
||||
source => "rsync://${osci_rsync_source_server}/${osci_ubuntu_remote_dir}/${osci_ubuntu_image_name}",
|
||||
path => $osci_ubuntu_job_dir,
|
||||
timeout => 14400,
|
||||
require => [
|
||||
File[$osci_ubuntu_job_dir],
|
||||
User['jenkins'],
|
||||
],
|
||||
}
|
||||
|
||||
rsync::get { $osci_centos_image_name :
|
||||
source => "rsync://${osci_rsync_source_server}/${osci_centos_remote_dir}/${osci_centos_image_name}",
|
||||
path => $osci_centos_job_dir,
|
||||
timeout => 14400,
|
||||
require => [
|
||||
File[$osci_centos_job_dir],
|
||||
User['jenkins'],
|
||||
],
|
||||
}
|
||||
|
||||
rsync::get { $osci_trusty_image_name :
|
||||
source => "rsync://${osci_rsync_source_server}/${osci_trusty_remote_dir}/${osci_trusty_image_name}",
|
||||
path => $osci_trusty_job_dir,
|
||||
timeout => 14400,
|
||||
require => [
|
||||
File[$osci_trusty_job_dir],
|
||||
User['jenkins'],
|
||||
],
|
||||
}
|
||||
|
||||
# osci needed ssh keys
|
||||
file {
|
||||
[
|
||||
$osci_obs_jenkins_key,
|
||||
$osci_vm_ubuntu_jenkins_key,
|
||||
$osci_vm_centos_jenkins_key,
|
||||
$osci_vm_trusty_jenkins_key
|
||||
]:
|
||||
owner => 'jenkins',
|
||||
group => 'nogroup',
|
||||
mode => '0600',
|
||||
content => [
|
||||
$osci_obs_jenkins_key_contents,
|
||||
$osci_vm_ubuntu_jenkins_key_contents,
|
||||
$osci_vm_centos_jenkins_key_contents,
|
||||
$osci_vm_trusty_jenkins_key_contents
|
||||
],
|
||||
require => [
|
||||
File[
|
||||
'/home/jenkins/.ssh',
|
||||
$osci_ubuntu_job_dir,
|
||||
$osci_centos_job_dir,
|
||||
$osci_trusty_job_dir
|
||||
],
|
||||
User['jenkins'],
|
||||
],
|
||||
}
|
||||
}
|
||||
|
||||
# *** Custom tests ***
|
||||
|
||||
# anonymous statistics tests
|
||||
if ($verify_fuel_stats) {
|
||||
class { '::fuel_stats::tests' : }
|
||||
}
|
||||
|
||||
# Web tests by verify-fuel-web, stackforge-verify-fuel-web, verify-fuel-ostf
|
||||
if ($verify_fuel_web) {
|
||||
$verify_fuel_web_packages = [
|
||||
'inkscape',
|
||||
'libxslt1-dev',
|
||||
'nodejs-legacy',
|
||||
'npm',
|
||||
'postgresql-server-dev-all',
|
||||
'python-all-dev',
|
||||
'python-cloud-sptheme',
|
||||
'python-sphinx',
|
||||
'python-tox',
|
||||
'python-virtualenv',
|
||||
'python2.6',
|
||||
'python2.6-dev',
|
||||
'python3-dev',
|
||||
'rst2pdf',
|
||||
]
|
||||
|
||||
ensure_packages($verify_fuel_web_packages)
|
||||
|
||||
if ($verify_fuel_web_npm_packages) {
|
||||
ensure_packages($verify_fuel_web_npm_packages, {
|
||||
provider => npm,
|
||||
require => Package['npm'],
|
||||
})
|
||||
}
|
||||
|
||||
if ($fuel_web_selenium) {
|
||||
$selenium_packages = [
|
||||
'chromium-browser',
|
||||
'chromium-chromedriver',
|
||||
'firefox',
|
||||
'imagemagick',
|
||||
'x11-apps',
|
||||
'xfonts-100dpi',
|
||||
'xfonts-75dpi',
|
||||
'xfonts-cyrillic',
|
||||
'xfonts-scalable',
|
||||
]
|
||||
ensure_packages($selenium_packages)
|
||||
|
||||
class { 'display' :
|
||||
display => $x11_display_num,
|
||||
width => 1366,
|
||||
height => 768,
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if (!defined(Class['postgresql::server'])) {
|
||||
class { 'postgresql::server' : }
|
||||
}
|
||||
|
||||
postgresql::server::db { $nailgun_db:
|
||||
user => 'nailgun',
|
||||
password => 'nailgun',
|
||||
}
|
||||
postgresql::server::db { $ostf_db:
|
||||
user => 'ostf',
|
||||
password => 'ostf',
|
||||
}
|
||||
file { '/var/log/nailgun' :
|
||||
ensure => directory,
|
||||
owner => 'jenkins',
|
||||
require => User['jenkins'],
|
||||
}
|
||||
}
|
||||
|
||||
# For the below roles we need to have rvm base class
|
||||
if ($verify_fuel_astute or $simple_syntax_check or $build_fuel_plugins) {
|
||||
class { 'rvm' : }
|
||||
rvm::system_user { 'jenkins': }
|
||||
rvm_system_ruby { "ruby-${ruby_version}" :
|
||||
ensure => 'present',
|
||||
default_use => true,
|
||||
require => Class['rvm'],
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
# Astute tests require only rvm package
|
||||
if ($verify_fuel_astute) {
|
||||
rvm_gem { 'bundler' :
|
||||
ensure => 'present',
|
||||
ruby_version => "ruby-${ruby_version}",
|
||||
require => Rvm_system_ruby["ruby-${ruby_version}"],
|
||||
}
|
||||
# FIXME: remove this hack, create package raemon?
|
||||
$raemon_file = '/tmp/raemon-0.3.0.gem'
|
||||
file { $raemon_file :
|
||||
source => 'puppet:///modules/fuel_project/gems/raemon-0.3.0.gem',
|
||||
}
|
||||
rvm_gem { 'raemon' :
|
||||
ensure => 'present',
|
||||
ruby_version => "ruby-${ruby_version}",
|
||||
source => $raemon_file,
|
||||
require => [ Rvm_system_ruby["ruby-${ruby_version}"], File[$raemon_file] ],
|
||||
}
|
||||
}
|
||||
|
||||
# Simple syntax check by:
|
||||
# - verify-fuel-devops
|
||||
# - fuellib_review_syntax_check (puppet tests)
|
||||
if ($simple_syntax_check) {
|
||||
$syntax_check_packages = [
|
||||
'libxslt1-dev',
|
||||
'puppet-lint',
|
||||
'python-flake8',
|
||||
'python-tox',
|
||||
]
|
||||
|
||||
ensure_packages($syntax_check_packages)
|
||||
|
||||
rvm_gem { 'puppet-lint' :
|
||||
ensure => 'installed',
|
||||
ruby_version => "ruby-${ruby_version}",
|
||||
require => Rvm_system_ruby["ruby-${ruby_version}"],
|
||||
}
|
||||
}
|
||||
|
||||
# Check tasks graph
|
||||
if ($check_tasks_graph){
|
||||
$tasks_graph_check_packages = [
|
||||
'python-pytest',
|
||||
'python-jsonschema',
|
||||
'python-networkx',
|
||||
]
|
||||
|
||||
ensure_packages($tasks_graph_check_packages)
|
||||
}
|
||||
|
||||
# Verify Fuel docs
|
||||
if ($verify_fuel_docs) {
|
||||
$verify_fuel_docs_packages = [
|
||||
'inkscape',
|
||||
'libjpeg-dev',
|
||||
'make',
|
||||
'plantuml',
|
||||
'python-cloud-sptheme',
|
||||
'python-sphinx',
|
||||
'python-sphinxcontrib.plantuml',
|
||||
'rst2pdf',
|
||||
'texlive-font-utils', # provides epstopdf binary
|
||||
]
|
||||
|
||||
ensure_packages($verify_fuel_docs_packages)
|
||||
}
|
||||
|
||||
# Verify Jenkins jobs
|
||||
if ($verify_jenkins_jobs) {
|
||||
$verify_jenkins_jobs_packages = [
|
||||
'bats',
|
||||
'python-tox',
|
||||
'shellcheck',
|
||||
]
|
||||
|
||||
ensure_packages($verify_jenkins_jobs_packages)
|
||||
}
|
||||
|
||||
# Verify and Build fuel-plugins project
|
||||
if ($build_fuel_plugins) {
|
||||
$build_fuel_plugins_packages = [
|
||||
'rpm',
|
||||
'createrepo',
|
||||
'dpkg-dev',
|
||||
'libyaml-dev',
|
||||
'make',
|
||||
'python-dev',
|
||||
'ruby-dev',
|
||||
'gcc',
|
||||
'python2.6',
|
||||
'python2.6-dev',
|
||||
'python-tox',
|
||||
'python-virtualenv',
|
||||
]
|
||||
|
||||
ensure_packages($build_fuel_plugins_packages)
|
||||
|
||||
# we also need fpm gem
|
||||
rvm_gem { 'fpm' :
|
||||
ensure => 'present',
|
||||
ruby_version => "ruby-${ruby_version}",
|
||||
require => [
|
||||
Rvm_system_ruby["ruby-${ruby_version}"],
|
||||
Package['make'],
|
||||
],
|
||||
}
|
||||
}
|
||||
|
||||
# verify requirements-{deb|rpm}.txt files from fuel-main project
|
||||
# test-requirements-{deb|rpm} jobs on fuel-ci
|
||||
if ($verify_fuel_pkgs_requirements==true){
|
||||
$verify_fuel_requirements_packages = [
|
||||
'devscripts',
|
||||
'yum-utils',
|
||||
]
|
||||
|
||||
ensure_packages($verify_fuel_requirements_packages)
|
||||
}
|
||||
|
||||
if ($install_docker or $build_fuel_iso or $build_fuel_packages) {
|
||||
if (!$docker_package) {
|
||||
fail('You must define docker package explicitly')
|
||||
}
|
||||
|
||||
if (!defined(Package[$docker_package])) {
|
||||
package { $docker_package :
|
||||
ensure => 'present',
|
||||
require => Package['lxc-docker'],
|
||||
}
|
||||
}
|
||||
|
||||
#actually docker have api, and in some cases it will not be automatically started and enabled
|
||||
if ($docker_service and (!defined(Service[$docker_service]))) {
|
||||
service { $docker_service :
|
||||
ensure => 'running',
|
||||
enable => true,
|
||||
hasstatus => true,
|
||||
require => [
|
||||
Package[$docker_package],
|
||||
Group['docker'],
|
||||
],
|
||||
}
|
||||
}
|
||||
|
||||
package { 'lxc-docker' :
|
||||
ensure => 'absent',
|
||||
}
|
||||
|
||||
group { 'docker' :
|
||||
ensure => 'present',
|
||||
require => Package[$docker_package],
|
||||
}
|
||||
|
||||
User <| title == 'jenkins' |> {
|
||||
groups +> 'docker',
|
||||
require => Group['docker'],
|
||||
}
|
||||
|
||||
if ($external_host) {
|
||||
firewall { '010 accept all to docker0 interface':
|
||||
proto => 'all',
|
||||
iniface => 'docker0',
|
||||
action => 'accept',
|
||||
require => Package[$docker_package],
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if($ldap_sudo_group) {
|
||||
file { '/etc/sudoers.d/sandbox':
|
||||
ensure => 'present',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0440',
|
||||
content => template('fuel_project/jenkins/slave/sandbox.sudoers.d.erb'),
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,61 @@
|
|||
# Class: fuel_project::jenkins::slave::custom_scripts
|
||||
|
||||
class fuel_project::jenkins::slave::custom_scripts (
|
||||
$docker_package,
|
||||
$configs_path = '/etc/custom_scripts/',
|
||||
$docker_user = 'jenkins',
|
||||
$known_hosts = undef,
|
||||
$packages = [
|
||||
'git',
|
||||
],
|
||||
) {
|
||||
|
||||
$configs = hiera_hash('fuel_project::jenkins::slave::custom_scripts::configs', {})
|
||||
|
||||
if (!defined(Class['::fuel_project::common'])) {
|
||||
class { '::fuel_project::common' : }
|
||||
}
|
||||
|
||||
if (!defined(Class['::jenkins::slave'])) {
|
||||
class { '::jenkins::slave' : }
|
||||
}
|
||||
|
||||
# install required packages
|
||||
ensure_packages($packages)
|
||||
ensure_packages($docker_package)
|
||||
|
||||
# ensure $docker_user in docker group
|
||||
# docker group will be created by docker package
|
||||
User <| title == $docker_user |> {
|
||||
groups +> 'docker',
|
||||
require => Package[$docker_package],
|
||||
}
|
||||
|
||||
if ($known_hosts) {
|
||||
create_resources('ssh::known_host', $known_hosts, {
|
||||
user => $docker_user,
|
||||
overwrite => false,
|
||||
require => User[$docker_user],
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
if ($configs) {
|
||||
file { $configs_path:
|
||||
ensure => 'directory',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0700',
|
||||
}
|
||||
|
||||
create_resources(file, $configs, {
|
||||
ensure => 'present',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0600',
|
||||
require => File[$configs_path],
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,94 @@
|
|||
# Used for deploy lab-cz.vm.mirantis.net
|
||||
class fuel_project::lab_cz (
|
||||
$external_host = true,
|
||||
) {
|
||||
# Used for network managment
|
||||
class { 'common' :
|
||||
external_host => $external_host
|
||||
}
|
||||
|
||||
include ssh::ldap
|
||||
class { 'libvirt' :
|
||||
qemu => false,
|
||||
listen_tcp => false,
|
||||
listen_tls => false,
|
||||
unix_sock_rw_perms => '0777',
|
||||
unix_sock_group => 'libvirtd',
|
||||
}
|
||||
|
||||
$packages = [
|
||||
'syslinux',
|
||||
'python-paramiko',
|
||||
'python-netaddr',
|
||||
'python-xmlbuilder',
|
||||
'nfs-kernel-server',
|
||||
'ipmitool',
|
||||
'vlan',
|
||||
]
|
||||
|
||||
ensure_packages($packages)
|
||||
|
||||
file { '/etc/exports' :
|
||||
ensure => 'present',
|
||||
content => "/var/lib/tftpboot *(ro,async,no_subtree_check,no_root_squash,crossmnt)\n",
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0644',
|
||||
require => Package['nfs-kernel-server'],
|
||||
notify => Service['nfs-export-fuel'],
|
||||
}
|
||||
|
||||
service { 'nfs-export-fuel' :
|
||||
ensure => 'running',
|
||||
name => 'nfs-kernel-server',
|
||||
enable => true,
|
||||
restart => true,
|
||||
}
|
||||
|
||||
file { [
|
||||
'/var/lib/tftpboot',
|
||||
'/var/lib/tftpboot/pxelinux.cfg',
|
||||
'/srv/downloads' ] :
|
||||
ensure => 'directory',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0777',
|
||||
}
|
||||
|
||||
file { '/var/lib/tftpboot/pxelinux.0' :
|
||||
ensure => 'present',
|
||||
source => 'file:///usr/lib/syslinux/pxelinux.0',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0644',
|
||||
require => [
|
||||
File['/var/lib/tftpboot'],
|
||||
Package['syslinux'],
|
||||
]
|
||||
}
|
||||
|
||||
file { '/var/lib/tftpboot/pxelinux.cfg/default' :
|
||||
ensure => 'present',
|
||||
source => 'puppet:///modules/fuel_project/lab_cz/default',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0644',
|
||||
require => File['/var/lib/tftpboot/pxelinux.cfg'],
|
||||
}
|
||||
|
||||
file { '/etc/sudoers.d/deploy' :
|
||||
ensure => 'present',
|
||||
source => 'puppet:///modules/fuel_project/lab_cz/sudo_deploy',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0440',
|
||||
}
|
||||
|
||||
file { '/etc/network/interfaces' :
|
||||
ensure => 'present',
|
||||
source => 'puppet:///modules/fuel_project/lab_cz/network_interfaces',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0644',
|
||||
}
|
||||
}
|
|
@ -0,0 +1,19 @@
|
|||
# Class: fuel_project::mongo_common
|
||||
#
|
||||
|
||||
class fuel_project::mongo_common (
|
||||
$primary = false,
|
||||
)
|
||||
{
|
||||
if $primary {
|
||||
class { '::fuel_project::common' :} ->
|
||||
class {'::mongodb::client': } ->
|
||||
class {'::mongodb::server': } ->
|
||||
class {'::mongodb::replset': } ->
|
||||
class {'::fuel_project::mongodb': }
|
||||
} else {
|
||||
class { '::fuel_project::common' :} ->
|
||||
class {'::mongodb::client': } ->
|
||||
class {'::mongodb::server': }
|
||||
}
|
||||
}
|
|
@ -0,0 +1,44 @@
|
|||
# Class: fuel_project::mongodb
|
||||
#
|
||||
|
||||
class fuel_project::mongodb (
|
||||
$user = 'ceilometer',
|
||||
$admin_username = 'admin',
|
||||
$password = 'ceilometer',
|
||||
$admin_password = 'admin',
|
||||
$admin_database = 'admin',
|
||||
)
|
||||
{
|
||||
mongodb::db { 'ceilometer':
|
||||
user => $user,
|
||||
password => $password,
|
||||
roles => [ 'readWrite', 'dbAdmin' ],
|
||||
admin_username => $admin_username,
|
||||
admin_password => $admin_password,
|
||||
admin_database => $admin_database,
|
||||
} ->
|
||||
|
||||
mongodb::db { 'admin':
|
||||
user => $admin_username,
|
||||
password => $admin_password,
|
||||
roles => [
|
||||
'userAdmin',
|
||||
'readWrite',
|
||||
'dbAdmin',
|
||||
'dbAdminAnyDatabase',
|
||||
'readAnyDatabase',
|
||||
'readWriteAnyDatabase',
|
||||
'userAdminAnyDatabase',
|
||||
'clusterAdmin',
|
||||
'clusterManager',
|
||||
'clusterMonitor',
|
||||
'hostManager',
|
||||
'root',
|
||||
'restore',
|
||||
],
|
||||
admin_username => $admin_username,
|
||||
admin_password => $admin_password,
|
||||
admin_database => $admin_database,
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,222 @@
|
|||
# Class: fuel_project::nailgun_demo
|
||||
#
|
||||
class fuel_project::nailgun_demo (
|
||||
$apply_firewall_rules = false,
|
||||
$lock_file = '',
|
||||
$nginx_access_log = '/var/log/nginx/access.log',
|
||||
$nginx_error_log = '/var/log/nginx/error.log',
|
||||
$nginx_log_format = 'proxy',
|
||||
$server_name = '',
|
||||
) {
|
||||
|
||||
if (!defined(Class['fuel_project::common'])) {
|
||||
class { 'fuel_project::common':
|
||||
external_host => $apply_firewall_rules,
|
||||
}
|
||||
}
|
||||
|
||||
if (!defined(Class['fuel_project::nginx'])) {
|
||||
class { 'fuel_project::nginx': }
|
||||
}
|
||||
|
||||
if (!defined(Class['postgresql::server'])) {
|
||||
class { 'postgresql::server': }
|
||||
}
|
||||
|
||||
# required packages
|
||||
# http://docs.mirantis.com/fuel-dev/develop/nailgun/development/env.html
|
||||
$packages = [
|
||||
'git',
|
||||
'npm',
|
||||
'nodejs-legacy',
|
||||
'postgresql-server-dev-all',
|
||||
]
|
||||
|
||||
$npm_packages = [
|
||||
'grunt-cli',
|
||||
'gulp',
|
||||
'inflight',
|
||||
]
|
||||
|
||||
package { $packages:
|
||||
ensure => 'present',
|
||||
}
|
||||
|
||||
ensure_packages($npm_packages, {
|
||||
provider => npm,
|
||||
require => Package['npm'],
|
||||
})
|
||||
|
||||
# create main user
|
||||
user { 'nailgun':
|
||||
ensure => 'present',
|
||||
home => '/home/nailgun',
|
||||
managehome => true,
|
||||
}
|
||||
|
||||
# create log directory
|
||||
file { '/var/log/nailgun':
|
||||
ensure => 'directory',
|
||||
owner => 'nailgun',
|
||||
require => User['nailgun'],
|
||||
}
|
||||
|
||||
file { '/var/log/remote':
|
||||
ensure => 'directory',
|
||||
owner => 'nailgun',
|
||||
require => User['nailgun'],
|
||||
}
|
||||
|
||||
# create main directories
|
||||
file { '/usr/share/fuel-web':
|
||||
ensure => 'directory',
|
||||
owner => 'nailgun',
|
||||
require => User['nailgun'],
|
||||
}
|
||||
|
||||
# clone fuel-web
|
||||
vcsrepo { '/usr/share/fuel-web':
|
||||
ensure => 'present',
|
||||
provider => 'git',
|
||||
source => 'https://github.com/stackforge/fuel-web',
|
||||
user => 'nailgun',
|
||||
require => [User['nailgun'],
|
||||
File['/usr/share/fuel-web'],
|
||||
Package['git'] ],
|
||||
}
|
||||
|
||||
# prepare database
|
||||
postgresql::server::db { 'nailgun' :
|
||||
user => 'nailgun',
|
||||
password => postgresql_password('nailgun', 'nailgun'),
|
||||
}
|
||||
|
||||
# prepare environment
|
||||
venv::venv { 'venv-nailgun' :
|
||||
path => '/home/nailgun/python',
|
||||
requirements => '/usr/share/fuel-web/nailgun/requirements.txt',
|
||||
options => '',
|
||||
user => 'nailgun',
|
||||
require => [
|
||||
Vcsrepo['/usr/share/fuel-web'],
|
||||
Package[$packages],
|
||||
]
|
||||
}
|
||||
|
||||
venv::exec { 'venv-syncdb' :
|
||||
command => './manage.py syncdb',
|
||||
cwd => '/usr/share/fuel-web/nailgun',
|
||||
venv => '/home/nailgun/python',
|
||||
user => 'nailgun',
|
||||
require => [Venv::Venv['venv-nailgun'],
|
||||
Postgresql::Server::Db['nailgun'],],
|
||||
onlyif => "test ! -f ${lock_file}",
|
||||
}
|
||||
|
||||
venv::exec { 'venv-loaddefault' :
|
||||
command => './manage.py loaddefault',
|
||||
cwd => '/usr/share/fuel-web/nailgun',
|
||||
venv => '/home/nailgun/python',
|
||||
user => 'nailgun',
|
||||
require => Venv::Exec['venv-syncdb'],
|
||||
onlyif => "test ! -f ${lock_file}",
|
||||
}
|
||||
|
||||
venv::exec { 'venv-loaddata' :
|
||||
command => './manage.py loaddata nailgun/fixtures/sample_environment.json',
|
||||
cwd => '/usr/share/fuel-web/nailgun',
|
||||
venv => '/home/nailgun/python',
|
||||
user => 'nailgun',
|
||||
require => Venv::Exec['venv-loaddefault'],
|
||||
onlyif => "test ! -f ${lock_file}",
|
||||
}
|
||||
|
||||
exec { 'venv-npm' :
|
||||
command => 'npm install',
|
||||
cwd => '/usr/share/fuel-web/nailgun',
|
||||
user => 'nailgun',
|
||||
require => [
|
||||
Venv::Exec['venv-loaddata'],
|
||||
Package[$npm_packages],
|
||||
],
|
||||
onlyif => "test ! -f ${lock_file}",
|
||||
}
|
||||
|
||||
exec { 'venv-gulp' :
|
||||
command => '/usr/local/bin/gulp bower',
|
||||
cwd => '/usr/share/fuel-web/nailgun',
|
||||
environment => 'HOME=/home/nailgun',
|
||||
user => 'nailgun',
|
||||
require => Exec['venv-npm'],
|
||||
onlyif => "test ! -f ${lock_file}",
|
||||
}
|
||||
|
||||
file_line { 'fake_mode':
|
||||
path => '/usr/share/fuel-web/nailgun/nailgun/settings.yaml',
|
||||
line => 'FAKE_TASKS: "1"',
|
||||
require => Vcsrepo['/usr/share/fuel-web'],
|
||||
}
|
||||
|
||||
::nginx::resource::vhost { 'demo-redirect' :
|
||||
ensure => 'present',
|
||||
listen_port => 80,
|
||||
server_name => [$server_name],
|
||||
www_root => '/var/www',
|
||||
access_log => $nginx_access_log,
|
||||
error_log => $nginx_error_log,
|
||||
format_log => $nginx_log_format,
|
||||
location_cfg_append => {
|
||||
rewrite => '^ http://$server_name:8000$request_uri permanent',
|
||||
},
|
||||
}
|
||||
|
||||
nginx::resource::vhost { 'demo' :
|
||||
ensure => 'present',
|
||||
listen_port => 8000,
|
||||
server_name => [$server_name],
|
||||
access_log => $nginx_access_log,
|
||||
error_log => $nginx_error_log,
|
||||
format_log => $nginx_log_format,
|
||||
uwsgi => '127.0.0.1:7933',
|
||||
location_cfg_append => {
|
||||
uwsgi_connect_timeout => '3m',
|
||||
uwsgi_read_timeout => '3m',
|
||||
uwsgi_send_timeout => '3m',
|
||||
}
|
||||
}
|
||||
|
||||
nginx::resource::location { 'demo-static' :
|
||||
ensure => 'present',
|
||||
vhost => 'demo',
|
||||
location => '/static/',
|
||||
www_root => '/usr/share/fuel-web/nailgun',
|
||||
}
|
||||
|
||||
uwsgi::application { 'fuel-web' :
|
||||
plugins => 'python',
|
||||
uid => 'nailgun',
|
||||
gid => 'nailgun',
|
||||
socket => '127.0.0.1:7933',
|
||||
chdir => '/usr/share/fuel-web/nailgun',
|
||||
home => '/home/nailgun/python',
|
||||
module => 'nailgun.wsgi:application',
|
||||
env => 'DJANGO_SETTINGS_MODULE=nailgun.settings',
|
||||
workers => '8',
|
||||
enable_threads => true,
|
||||
require => [File_line['fake_mode'],
|
||||
Exec['venv-gulp'],
|
||||
User['nailgun'],],
|
||||
}
|
||||
|
||||
if $apply_firewall_rules {
|
||||
include firewall_defaults::pre
|
||||
firewall { '1000 Allow demo 80, 8000 connection' :
|
||||
ensure => present,
|
||||
dport => [80, 8000],
|
||||
proto => 'tcp',
|
||||
action => 'accept',
|
||||
require => Class['firewall_defaults::pre'],
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,38 @@
|
|||
# Class: fuel_project::nginx
|
||||
#
|
||||
class fuel_project::nginx {
|
||||
if (!defined(Class['::nginx'])) {
|
||||
class { '::nginx' :}
|
||||
}
|
||||
|
||||
::nginx::resource::vhost { 'stub_status' :
|
||||
ensure => 'present',
|
||||
listen_ip => '127.0.0.1',
|
||||
listen_port => 61929,
|
||||
location_custom_cfg => {
|
||||
stub_status => true,
|
||||
},
|
||||
}
|
||||
|
||||
if ( ! $::puppet_apply ) {
|
||||
::nginx::resource::vhost { 'logshare' :
|
||||
ensure => 'present',
|
||||
listen_port => 4637,
|
||||
gzip_types => 'application/octet-stream',
|
||||
ssl_port => 4637,
|
||||
ssl => true,
|
||||
ssl_cert => "/var/lib/puppet/ssl/certs/${::fqdn}.pem",
|
||||
ssl_key => "/var/lib/puppet/ssl/private_keys/${::fqdn}.pem",
|
||||
ssl_client_certificate => '/var/lib/puppet/ssl/certs/ca.pem',
|
||||
ssl_crl => '/var/lib/puppet/ssl/crl.pem',
|
||||
ssl_verify_client => 'on',
|
||||
www_root => '/var/log',
|
||||
}
|
||||
}
|
||||
|
||||
ensure_packages('error-pages')
|
||||
|
||||
zabbix::item { 'nginx' :
|
||||
content => 'puppet:///modules/fuel_project/zabbix/nginx_items.conf',
|
||||
}
|
||||
}
|
|
@ -0,0 +1,68 @@
|
|||
# Class: fuel_project::puppet::master
|
||||
#
|
||||
class fuel_project::puppet::master (
|
||||
$apply_firewall_rules = false,
|
||||
$enable_update_cronjob = true,
|
||||
$external_host = false,
|
||||
$firewall_allow_sources = {},
|
||||
$hiera_backends = ['yaml'],
|
||||
$hiera_config = '/etc/hiera.yaml',
|
||||
$hiera_config_template = 'puppet/hiera.yaml.erb',
|
||||
$hiera_hierarchy = ['nodes/%{::clientcert}', 'roles/%{::role}', 'locations/%{::location}', 'common'],
|
||||
$hiera_json_datadir = '/var/lib/hiera',
|
||||
$hiera_logger = 'console',
|
||||
$hiera_merge_behavior = 'deeper',
|
||||
$hiera_yaml_datadir = '/var/lib/hiera',
|
||||
$manifests_binpath = '/etc/puppet/bin',
|
||||
$manifests_branch = 'master',
|
||||
$manifests_manifestspath = '/etc/puppet/manifests',
|
||||
$manifests_modulespath = '/etc/puppet/modules',
|
||||
$manifests_repo = 'ssh://puppet-master-tst@review.fuel-infra.org:29418/fuel-infra/puppet-manifests',
|
||||
$manifests_tmpdir = '/tmp/puppet-manifests',
|
||||
$puppet_config = '/etc/puppet/puppet.conf',
|
||||
$puppet_environment = 'production',
|
||||
$puppet_master_run_with = 'nginx+uwsgi',
|
||||
$puppet_server = $::fqdn,
|
||||
) {
|
||||
class { '::fuel_project::common' :
|
||||
external_host => $external_host,
|
||||
}
|
||||
class { '::fuel_project::nginx' :
|
||||
require => Class['::fuel_project::common'],
|
||||
}
|
||||
class { '::puppet::master' :
|
||||
apply_firewall_rules => $apply_firewall_rules,
|
||||
firewall_allow_sources => $firewall_allow_sources,
|
||||
hiera_backends => $hiera_backends,
|
||||
hiera_config => $hiera_config,
|
||||
hiera_config_template => $hiera_config_template,
|
||||
hiera_hierarchy => $hiera_hierarchy,
|
||||
hiera_json_datadir => $hiera_json_datadir,
|
||||
hiera_logger => $hiera_logger,
|
||||
hiera_merge_behavior => $hiera_merge_behavior,
|
||||
hiera_yaml_datadir => $hiera_yaml_datadir,
|
||||
config => $puppet_config,
|
||||
environment => $puppet_environment,
|
||||
server => $puppet_server,
|
||||
puppet_master_run_with => $puppet_master_run_with,
|
||||
require => [
|
||||
Class['::fuel_project::common'],
|
||||
Class['::fuel_project::nginx'],
|
||||
],
|
||||
}
|
||||
file { '/usr/local/bin/puppet-manifests-update.sh' :
|
||||
ensure => 'present',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0755',
|
||||
content => template('fuel_project/puppet/master/puppet-manifests-update.sh.erb')
|
||||
}
|
||||
if ($enable_update_cronjob) {
|
||||
cron { 'puppet-manifests-update' :
|
||||
command => '/usr/bin/timeout -k80 60 /usr/local/bin/puppet-manifests-update.sh 2>&1 | logger -t puppet-manifests-update',
|
||||
user => 'root',
|
||||
minute => '*/5',
|
||||
require => File['/usr/local/bin/puppet-manifests-update.sh'],
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,19 @@
|
|||
# class fuel_project::racktables
|
||||
class fuel_project::racktables (
|
||||
$firewall_enable = false,
|
||||
) {
|
||||
class { '::fuel_project::common' :
|
||||
external_host => $firewall_enable,
|
||||
}
|
||||
class { '::fuel_project::nginx' : }
|
||||
class { '::racktables' : }
|
||||
|
||||
if ($firewall_enable) {
|
||||
include firewall_defaults::pre
|
||||
firewall { '1000 - allow http/https connections to racktables' :
|
||||
dport => [80, 443],
|
||||
action => 'accept',
|
||||
require => Class['firewall_defaults::pre'],
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,278 @@
|
|||
#
|
||||
class fuel_project::roles::docs (
|
||||
$community_hostname = 'docs.fuel-infra.org',
|
||||
$community_ssl_cert_content = '',
|
||||
$community_ssl_cert_filename = '/etc/ssl/community-docs.crt',
|
||||
$community_ssl_key_content = '',
|
||||
$community_ssl_key_filename = '/etc/ssl/community-docs.key',
|
||||
$docs_user = 'docs',
|
||||
$fuel_version = '6.0',
|
||||
$hostname = 'docs.mirantis.com',
|
||||
$nginx_access_log = '/var/log/nginx/access.log',
|
||||
$nginx_error_log = '/var/log/nginx/error.log',
|
||||
$nginx_log_format = undef,
|
||||
$redirect_root_to = 'http://www.mirantis.com/openstack-documentation/',
|
||||
$specs_hostname = 'specs.fuel-infra.org',
|
||||
$specs_www_root = '/var/www/specs',
|
||||
$ssh_auth_key = undef,
|
||||
$ssl_cert_content = '',
|
||||
$ssl_cert_filename = '/etc/ssl/docs.crt',
|
||||
$ssl_key_content = '',
|
||||
$ssl_key_filename = '/etc/ssl/docs.key',
|
||||
$www_root = '/var/www',
|
||||
) {
|
||||
if ( ! defined(Class['::fuel_project::nginx']) ) {
|
||||
class { '::fuel_project::nginx' : }
|
||||
}
|
||||
|
||||
user { $docs_user :
|
||||
ensure => 'present',
|
||||
shell => '/bin/bash',
|
||||
managehome => true,
|
||||
}
|
||||
|
||||
ensure_packages('error-pages')
|
||||
|
||||
if ($ssl_cert_content and $ssl_key_content) {
|
||||
file { $ssl_cert_filename :
|
||||
ensure => 'present',
|
||||
mode => '0600',
|
||||
group => 'root',
|
||||
owner => 'root',
|
||||
content => $ssl_cert_content,
|
||||
}
|
||||
file { $ssl_key_filename :
|
||||
ensure => 'present',
|
||||
mode => '0600',
|
||||
group => 'root',
|
||||
owner => 'root',
|
||||
content => $ssl_key_content,
|
||||
}
|
||||
Nginx::Resource::Vhost <| title == $hostname |> {
|
||||
ssl => true,
|
||||
ssl_cert => $ssl_cert_filename,
|
||||
ssl_key => $ssl_key_filename,
|
||||
listen_port => 443,
|
||||
ssl_port => 443,
|
||||
}
|
||||
::nginx::resource::vhost { "${hostname}-redirect" :
|
||||
ensure => 'present',
|
||||
server_name => [$hostname],
|
||||
listen_port => 80,
|
||||
www_root => $www_root,
|
||||
access_log => $nginx_access_log,
|
||||
error_log => $nginx_error_log,
|
||||
format_log => $nginx_log_format,
|
||||
location_cfg_append => {
|
||||
return => "301 https://${hostname}\$request_uri",
|
||||
},
|
||||
}
|
||||
$ssl = true
|
||||
} else {
|
||||
$ssl = false
|
||||
}
|
||||
|
||||
if ($community_ssl_cert_content and $community_ssl_key_content) {
|
||||
file { $community_ssl_cert_filename :
|
||||
ensure => 'present',
|
||||
mode => '0600',
|
||||
group => 'root',
|
||||
owner => 'root',
|
||||
content => $community_ssl_cert_content,
|
||||
}
|
||||
file { $community_ssl_key_filename :
|
||||
ensure => 'present',
|
||||
mode => '0600',
|
||||
group => 'root',
|
||||
owner => 'root',
|
||||
content => $community_ssl_key_content,
|
||||
}
|
||||
Nginx::Resource::Vhost <| title == $community_hostname |> {
|
||||
ssl => true,
|
||||
ssl_cert => $community_ssl_cert_filename,
|
||||
ssl_key => $community_ssl_key_filename,
|
||||
listen_port => 443,
|
||||
ssl_port => 443,
|
||||
}
|
||||
::nginx::resource::vhost { "${community_hostname}-redirect" :
|
||||
ensure => 'present',
|
||||
server_name => [$community_hostname],
|
||||
listen_port => 80,
|
||||
www_root => $www_root,
|
||||
access_log => $nginx_access_log,
|
||||
error_log => $nginx_error_log,
|
||||
format_log => $nginx_log_format,
|
||||
location_cfg_append => {
|
||||
return => "301 https://${community_hostname}\$request_uri",
|
||||
},
|
||||
}
|
||||
$community_ssl = true
|
||||
} else {
|
||||
$community_ssl = false
|
||||
}
|
||||
|
||||
if ($ssh_auth_key) {
|
||||
ssh_authorized_key { 'fuel_docs@jenkins' :
|
||||
user => $docs_user,
|
||||
type => 'ssh-rsa',
|
||||
key => $ssh_auth_key,
|
||||
require => User[$docs_user],
|
||||
}
|
||||
}
|
||||
|
||||
::nginx::resource::vhost { $community_hostname :
|
||||
ensure => 'present',
|
||||
server_name => [$community_hostname],
|
||||
listen_port => 80,
|
||||
www_root => $www_root,
|
||||
access_log => $nginx_access_log,
|
||||
error_log => $nginx_error_log,
|
||||
format_log => $nginx_log_format,
|
||||
location_cfg_append => {
|
||||
'rewrite' => {
|
||||
'^/$' => '/fuel-dev',
|
||||
'^/express/?$' => '/openstack/express/latest',
|
||||
'^/(express/.+)' => '/openstack/$1',
|
||||
'^/fuel/?$' => "/openstack/fuel/fuel-${fuel_version}",
|
||||
'^/(fuel/.+)' => '/openstack/$1',
|
||||
'^/openstack/fuel/$' => "/openstack/fuel/fuel-${fuel_version}",
|
||||
},
|
||||
},
|
||||
vhost_cfg_append => {
|
||||
'error_page 403' => '/fuel-infra/403.html',
|
||||
'error_page 404' => '/fuel-infra/404.html',
|
||||
'error_page 500 502 504' => '/fuel-infra/5xx.html',
|
||||
}
|
||||
}
|
||||
|
||||
# error pages for community
|
||||
::nginx::resource::location { "${community_hostname}-error-pages" :
|
||||
ensure => 'present',
|
||||
vhost => $community_hostname,
|
||||
location => '~ ^\/fuel-infra\/(403|404|5xx)\.html$',
|
||||
ssl => true,
|
||||
ssl_only => true,
|
||||
www_root => '/usr/share/error_pages',
|
||||
require => Package['error-pages'],
|
||||
}
|
||||
|
||||
# Disable fuel-master docs on community site
|
||||
::nginx::resource::location { "${community_hostname}/openstack/fuel/fuel-master" :
|
||||
vhost => $community_hostname,
|
||||
location => '~ \/openstack\/fuel\/fuel-master\/.*',
|
||||
www_root => $www_root,
|
||||
ssl => $community_ssl,
|
||||
ssl_only => $community_ssl,
|
||||
location_cfg_append => {
|
||||
return => 404,
|
||||
},
|
||||
}
|
||||
|
||||
::nginx::resource::location { "${community_hostname}/fuel-dev" :
|
||||
vhost => $community_hostname,
|
||||
location => '/fuel-dev',
|
||||
location_alias => "${www_root}/fuel-dev-docs/fuel-dev-master",
|
||||
ssl => $community_ssl,
|
||||
ssl_only => $community_ssl,
|
||||
}
|
||||
|
||||
# Bug: https://bugs.launchpad.net/fuel/+bug/1473440
|
||||
::nginx::resource::location { "${community_hostname}/fuel-qa" :
|
||||
vhost => $community_hostname,
|
||||
location => '/fuel-qa',
|
||||
location_alias => "${www_root}/fuel-qa/fuel-master",
|
||||
ssl => $community_ssl,
|
||||
ssl_only => $community_ssl,
|
||||
}
|
||||
|
||||
::nginx::resource::vhost { $hostname :
|
||||
ensure => 'present',
|
||||
server_name => [$hostname],
|
||||
listen_port => 80,
|
||||
www_root => $www_root,
|
||||
access_log => $nginx_access_log,
|
||||
error_log => $nginx_error_log,
|
||||
format_log => $nginx_log_format,
|
||||
location_cfg_append => {
|
||||
'rewrite' => {
|
||||
'^/$' => $redirect_root_to,
|
||||
'^/fuel-dev/?(.*)$' => "http://${community_hostname}/fuel-dev/\$1",
|
||||
'^/express/?$' => '/openstack/express/latest',
|
||||
'^/(express/.+)' => '/openstack/$1',
|
||||
'^/fuel/?$' => "/openstack/fuel/fuel-${fuel_version}",
|
||||
'^/(fuel/.+)' => '/openstack/$1',
|
||||
'^/openstack/fuel/$' => "/openstack/fuel/fuel-${fuel_version}",
|
||||
},
|
||||
},
|
||||
vhost_cfg_append => {
|
||||
'error_page 403' => '/mirantis/403.html',
|
||||
'error_page 404' => '/mirantis/404.html',
|
||||
'error_page 500 502 504' => '/mirantis/5xx.html',
|
||||
}
|
||||
}
|
||||
|
||||
# error pages for primary docs
|
||||
::nginx::resource::location { "${hostname}-error-pages" :
|
||||
ensure => 'present',
|
||||
vhost => $hostname,
|
||||
location => '~ ^\/mirantis\/(403|404|5xx)\.html$',
|
||||
ssl => true,
|
||||
ssl_only => true,
|
||||
www_root => '/usr/share/error_pages',
|
||||
require => Package['error-pages'],
|
||||
}
|
||||
|
||||
if (! defined(File[$www_root])) {
|
||||
file { $www_root :
|
||||
ensure => 'directory',
|
||||
mode => '0755',
|
||||
owner => $docs_user,
|
||||
group => $docs_user,
|
||||
require => User[$docs_user],
|
||||
}
|
||||
} else {
|
||||
File <| title == $www_root |> {
|
||||
owner => $docs_user,
|
||||
group => $docs_user,
|
||||
require => User[$docs_user],
|
||||
}
|
||||
}
|
||||
|
||||
file { "${www_root}/robots.txt" :
|
||||
ensure => 'present',
|
||||
mode => '0644',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
content => template('fuel_project/fuel_docs/robots.txt.erb'),
|
||||
require => File[$www_root],
|
||||
}
|
||||
|
||||
# fuel specs
|
||||
file { $specs_www_root :
|
||||
ensure => 'directory',
|
||||
mode => '0755',
|
||||
owner => $docs_user,
|
||||
group => $docs_user,
|
||||
require => [
|
||||
File[$www_root],
|
||||
User[$docs_user],
|
||||
]
|
||||
}
|
||||
|
||||
::nginx::resource::vhost { $specs_hostname :
|
||||
server_name => [$specs_hostname],
|
||||
www_root => $specs_www_root,
|
||||
access_log => $nginx_access_log,
|
||||
error_log => $nginx_error_log,
|
||||
location_cfg_append => {
|
||||
'rewrite' => {
|
||||
'^/$' => '/fuel-specs-master',
|
||||
},
|
||||
},
|
||||
vhost_cfg_append => {
|
||||
'error_page 403' => '/mirantis/403.html',
|
||||
'error_page 404' => '/mirantis/404.html',
|
||||
'error_page 500 502 504' => '/mirantis/5xx.html',
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,6 @@
|
|||
# Class: fuel_project::roles::errata
|
||||
#
|
||||
class fuel_project::roles::errata {
|
||||
class { '::fuel_project::roles::errata::web' :}
|
||||
class { '::fuel_project::roles::errata::database' :}
|
||||
}
|
|
@ -0,0 +1,8 @@
|
|||
# Class: fuel_project::roles::errata::database
|
||||
#
|
||||
class fuel_project::roles::errata::database {
|
||||
if (!defined(Class['::fuel_project::common'])) {
|
||||
class { '::fuel_project::common' :}
|
||||
}
|
||||
class { '::errata::database' :}
|
||||
}
|
|
@ -0,0 +1,9 @@
|
|||
# Class: fuel_project::roles::errata::web
|
||||
#
|
||||
class fuel_project::roles::errata::web {
|
||||
if (!defined(Class['::fuel_project::common'])) {
|
||||
class { '::fuel_project::common' :}
|
||||
}
|
||||
class { '::fuel_project::nginx' :}
|
||||
class { '::errata::web' :}
|
||||
}
|
|
@ -0,0 +1,16 @@
|
|||
# Class: fuel_project::roles::mailman
|
||||
#
|
||||
class fuel_project::roles::mailman {
|
||||
class { '::fuel_project::common' :}
|
||||
class { '::fuel_project::nginx' :}
|
||||
class { '::mailman' :}
|
||||
class { '::apache' :}
|
||||
class { '::apache::mod::cgid' :}
|
||||
class { '::apache::mod::mime' :}
|
||||
|
||||
::apache::vhost { $::fqdn :
|
||||
docroot => '/var/www/lists',
|
||||
aliases => hiera_array('fuel_project::roles::mailman::apache_aliases'),
|
||||
directories => hiera_array('fuel_project::roles::mailman::apache_directories'),
|
||||
}
|
||||
}
|
|
@ -0,0 +1,104 @@
|
|||
# Class: fuel_project::roles::ns
|
||||
#
|
||||
class fuel_project::roles::ns (
|
||||
$dns_repo,
|
||||
$dns_branch = 'master',
|
||||
$dns_checkout_private_key_content = undef,
|
||||
$dns_tmpdir = '/tmp/ns-update',
|
||||
$firewall_enable = false,
|
||||
$firewall_rules = {},
|
||||
$role = 'master',
|
||||
$target_path = '/var/cache/bind',
|
||||
) {
|
||||
class { '::fuel_project::common' :
|
||||
external_host => $firewall_enable,
|
||||
}
|
||||
class { '::bind' :}
|
||||
::bind::server::conf { '/etc/bind/named.conf' :
|
||||
require => Class['::bind'],
|
||||
}
|
||||
|
||||
if ($role == 'master') {
|
||||
ensure_packages(['git'])
|
||||
|
||||
file { '/usr/local/bin/ns-update.sh' :
|
||||
ensure => 'present',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0755',
|
||||
content => template('fuel_project/roles/ns/ns-update.sh.erb'),
|
||||
require => [
|
||||
Class['::bind'],
|
||||
::Bind::Server::Conf['/etc/bind/named.conf'],
|
||||
Package['git'],
|
||||
],
|
||||
}
|
||||
|
||||
cron { 'ns-update' :
|
||||
command => '/usr/bin/timeout -k80 60 /usr/local/bin/ns-update.sh 2>&1 | logger -t ns-update',
|
||||
user => 'root',
|
||||
minute => '*/5',
|
||||
require => File['/usr/local/bin/ns-update.sh'],
|
||||
}
|
||||
}
|
||||
|
||||
ensure_packages(['perl', 'perl-base'])
|
||||
|
||||
file { '/usr/local/bin/bind96-stats-parse.pl' :
|
||||
ensure => 'present',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0755',
|
||||
source => 'puppet:///modules/fuel_project/ns/bind96-stats-parse.pl',
|
||||
require => [
|
||||
Package['perl'],
|
||||
Package['perl-base']
|
||||
],
|
||||
}
|
||||
|
||||
file { '/var/lib/bind/statistics.txt' :
|
||||
ensure => 'present',
|
||||
owner => 'bind',
|
||||
group => 'bind',
|
||||
}
|
||||
|
||||
cron { 'rndc-stats' :
|
||||
command => '>/var/lib/bind/statistics.txt ; /usr/sbin/rndc stats',
|
||||
user => 'root',
|
||||
minute => '*/5',
|
||||
require => [
|
||||
File['/var/lib/bind/statistics.txt'],
|
||||
File['/usr/local/bin/bind96-stats-parse.pl'],
|
||||
],
|
||||
}
|
||||
|
||||
::zabbix::item { 'bind' :
|
||||
content => 'puppet:///modules/fuel_project/ns/zabbix_bind.conf',
|
||||
}
|
||||
|
||||
if ($dns_checkout_private_key_content) {
|
||||
file { '/root/.ssh' :
|
||||
ensure => 'directory',
|
||||
mode => '0500',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
}
|
||||
|
||||
file { '/root/.ssh/id_rsa' :
|
||||
ensure => 'present',
|
||||
content => $dns_checkout_private_key_content,
|
||||
mode => '0400',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
require => File['/root/.ssh'],
|
||||
}
|
||||
}
|
||||
|
||||
if ($firewall_enable) {
|
||||
include firewall_defaults::pre
|
||||
create_resources(firewall, $firewall_rules, {
|
||||
action => 'accept',
|
||||
require => Class['firewall_defaults::pre'],
|
||||
})
|
||||
}
|
||||
}
|
|
@ -0,0 +1,44 @@
|
|||
# Class: fuel_project::roles::perestroika::builder
|
||||
#
|
||||
# jenkins slave host for building packages
|
||||
# see hiera file for list and params of used classes
|
||||
|
||||
class fuel_project::roles::perestroika::builder (
|
||||
$docker_package,
|
||||
$builder_user = 'jenkins',
|
||||
$known_hosts = undef,
|
||||
$packages = [
|
||||
'createrepo',
|
||||
'devscripts',
|
||||
'git',
|
||||
'python-setuptools',
|
||||
'reprepro',
|
||||
'yum-utils',
|
||||
],
|
||||
){
|
||||
|
||||
# ensure build user exists
|
||||
ensure_resource('user', $builder_user, {
|
||||
'ensure' => 'present'
|
||||
})
|
||||
|
||||
# install required packages
|
||||
ensure_packages($packages)
|
||||
ensure_packages($docker_package)
|
||||
|
||||
# ensure $builder_user in docker group
|
||||
# docker group will be created by docker package
|
||||
User <| title == $builder_user |> {
|
||||
groups +> 'docker',
|
||||
require => Package[$docker_package],
|
||||
}
|
||||
|
||||
if ($known_hosts) {
|
||||
create_resources('ssh::known_host', $known_hosts, {
|
||||
user => $builder_user,
|
||||
overwrite => false,
|
||||
require => User[$builder_user],
|
||||
})
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,55 @@
|
|||
# Class: fuel_project::roles::perestroika::publisher
|
||||
#
|
||||
# jenkins slave host for publishing of packages
|
||||
# see hiera file for list and params of used classes
|
||||
|
||||
class fuel_project::roles::perestroika::publisher (
|
||||
$gpg_content_priv,
|
||||
$gpg_content_pub,
|
||||
$gpg_id_priv,
|
||||
$gpg_id_pub,
|
||||
$gpg_pub_key_owner = 'jenkins',
|
||||
$gpg_priv_key_owner = 'jenkins',
|
||||
$packages = [
|
||||
'createrepo',
|
||||
'devscripts',
|
||||
'expect',
|
||||
'python-lxml',
|
||||
'reprepro',
|
||||
'rpm',
|
||||
'yum-utils',
|
||||
],
|
||||
) {
|
||||
|
||||
ensure_packages($packages)
|
||||
|
||||
if( ! defined(Class['::fuel_project::jenkins::slave'])) {
|
||||
class { '::fuel_project::jenkins::slave' : }
|
||||
}
|
||||
|
||||
class { '::gnupg' : }
|
||||
|
||||
gnupg_key { 'perestroika_gpg_public':
|
||||
ensure => 'present',
|
||||
key_id => $gpg_id_pub,
|
||||
user => $gpg_pub_key_owner,
|
||||
key_content => $gpg_content_pub,
|
||||
key_type => public,
|
||||
require => [
|
||||
User['jenkins'],
|
||||
Class['::fuel_project::jenkins::slave'],
|
||||
],
|
||||
}
|
||||
|
||||
gnupg_key { 'perestroika_gpg_private':
|
||||
ensure => 'present',
|
||||
key_id => $gpg_id_priv,
|
||||
user => $gpg_priv_key_owner,
|
||||
key_content => $gpg_content_priv,
|
||||
key_type => private,
|
||||
require => [
|
||||
User['jenkins'],
|
||||
Class['::fuel_project::jenkins::slave'],
|
||||
],
|
||||
}
|
||||
}
|
|
@ -0,0 +1,24 @@
|
|||
# Class: fuel_project::roles::storage
|
||||
#
|
||||
class fuel_project::roles::storage (
|
||||
$iso_vault_fqdn = "iso.${::fqdn}",
|
||||
) {
|
||||
class { '::fuel_project::common' :}
|
||||
class { '::fuel_project::apps::mirror' :}
|
||||
|
||||
if (!defined(Class['::fuel_project::nginx'])) {
|
||||
class { '::fuel_project::nginx' :}
|
||||
}
|
||||
|
||||
::nginx::resource::vhost { 'iso-vault' :
|
||||
ensure => 'present',
|
||||
www_root => '/var/www/iso-vault',
|
||||
access_log => '/var/log/nginx/access.log',
|
||||
error_log => '/var/log/nginx/error.log',
|
||||
format_log => 'proxy',
|
||||
server_name => [$iso_vault_fqdn, "iso.${::fqdn}"],
|
||||
location_cfg_append => {
|
||||
autoindex => 'on',
|
||||
},
|
||||
}
|
||||
}
|
|
@ -0,0 +1,6 @@
|
|||
# Class: fuel_project::roles::tracker
|
||||
#
|
||||
class fuel_project::roles::tracker {
|
||||
class { '::fuel_project::common' :}
|
||||
class { '::opentracker' :}
|
||||
}
|
|
@ -0,0 +1,6 @@
|
|||
# Class: fuel_project::roles::zabbix::proxy
|
||||
#
|
||||
class fuel_project::roles::zabbix::proxy {
|
||||
class { '::fuel_project::common' :}
|
||||
class { '::zabbix::proxy' :}
|
||||
}
|
|
@ -0,0 +1,59 @@
|
|||
# Class: fuel_project::roles::zabbix::server
|
||||
#
|
||||
class fuel_project::roles::zabbix::server (
|
||||
$mysql_replication_password = '',
|
||||
$mysql_replication_user = 'repl',
|
||||
$mysql_slave_host = undef,
|
||||
$maintenance_script = '/usr/share/zabbix-server-mysql/maintenance.sh',
|
||||
$maintenance_script_config = '/root/.my.cnf',
|
||||
$server_role = 'master', # master || slave
|
||||
$slack_emoji_ok = ':smile:',
|
||||
$slack_emoji_problem = ':frowning:',
|
||||
$slack_emoji_unknown = ':ghost:',
|
||||
$slack_post_username = '',
|
||||
$slack_web_hook_url = '',
|
||||
) {
|
||||
class { '::fuel_project::common' :}
|
||||
class { '::zabbix::server' :}
|
||||
|
||||
::zabbix::server::alertscript { 'slack.sh' :
|
||||
template => 'fuel_project/zabbix/slack.sh.erb',
|
||||
require => Class['::zabbix::server'],
|
||||
}
|
||||
|
||||
::zabbix::server::alertscript { 'zabbkit.sh' :
|
||||
template => 'fuel_project/zabbix/zabbkit.sh.erb',
|
||||
require => Class['::zabbix::server'],
|
||||
}
|
||||
|
||||
if ($server_role == 'master' and $mysql_slave_host) {
|
||||
mysql_user { "${mysql_replication_user}@${mysql_slave_host}" :
|
||||
ensure => 'present',
|
||||
password_hash => mysql_password($mysql_replication_password),
|
||||
}
|
||||
|
||||
mysql_grant { "${mysql_replication_user}@${mysql_slave_host}/*.*" :
|
||||
ensure => 'present',
|
||||
options => ['GRANT'],
|
||||
privileges => ['REPLICATION SLAVE'],
|
||||
table => '*.*',
|
||||
user => "${mysql_replication_user}@${mysql_slave_host}",
|
||||
}
|
||||
|
||||
file { $maintenance_script :
|
||||
ensure => 'present',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0755',
|
||||
content => template('fuel_project/roles/zabbix/server/maintenance.sh.erb'),
|
||||
require => Class['::zabbix::server'],
|
||||
}
|
||||
|
||||
cron { 'zabbix-maintenance' :
|
||||
ensure => 'present',
|
||||
command => "${maintenance_script} 2>&1 | logger -t zabbix-maintenance",
|
||||
weekday => 'Wednesday',
|
||||
hour => '15',
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,86 @@
|
|||
# Used for deployment of TPI lab
|
||||
class fuel_project::tpi::lab (
|
||||
$btsync_secret = $fuel_project::tpi::params::btsync_secret,
|
||||
$sudo_commands = [ '/sbin/ebtables', '/sbin/iptables' ],
|
||||
$local_home_basenames = [ 'jenkins' ],
|
||||
) {
|
||||
|
||||
class { '::tpi::nfs_client' :
|
||||
local_home_basenames => $local_home_basenames,
|
||||
}
|
||||
|
||||
class { '::fuel_project::jenkins::slave' :
|
||||
run_tests => true,
|
||||
sudo_commands => $sudo_commands,
|
||||
ldap => true,
|
||||
build_fuel_plugins => true,
|
||||
}
|
||||
|
||||
File<| title == 'jenkins-sudo-for-build_iso' |> {
|
||||
content => template('fuel_project/tpi/jenkins-sudo-for-build_iso'),
|
||||
}
|
||||
|
||||
class { '::tpi::vmware_lab' : }
|
||||
|
||||
# these packages will be installed from tpi apt repo defined in hiera
|
||||
$tpi_packages = [
|
||||
'linux-image-3.13.0-39-generic',
|
||||
'linux-image-extra-3.13.0-39-generic',
|
||||
'linux-headers-3.13.0-39',
|
||||
'linux-headers-3.13.0-39-generic',
|
||||
'btsync',
|
||||
'sudo-ldap',
|
||||
'zsh',
|
||||
'most',
|
||||
]
|
||||
|
||||
ensure_packages($tpi_packages)
|
||||
|
||||
service { 'btsync':
|
||||
ensure => 'running',
|
||||
enable => true,
|
||||
require => Package['btsync'],
|
||||
}
|
||||
|
||||
file { '/etc/default/btsync':
|
||||
notify => Service['btsync'],
|
||||
mode => '0600',
|
||||
owner => 'btsync',
|
||||
group => 'btsync',
|
||||
content => template('fuel_project/tpi/btsync.erb'),
|
||||
require => File['/etc/btsync/tpi.conf'],
|
||||
}
|
||||
|
||||
file { '/etc/btsync/tpi.conf':
|
||||
notify => Service['btsync'],
|
||||
mode => '0600',
|
||||
owner => 'btsync',
|
||||
group => 'btsync',
|
||||
content => template('fuel_project/tpi/tpi.conf.erb'),
|
||||
require => Package['btsync'],
|
||||
}
|
||||
|
||||
# transparent hugepage defragmentation leads to slowdowns
|
||||
# in our environments (kvm+vmware workstation), disable it
|
||||
file { '/etc/init.d/disable-hugepage-defrag':
|
||||
mode => '0755',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
content => template('fuel_project/tpi/disable-hugepage-defrag.erb'),
|
||||
}
|
||||
|
||||
service { 'disable-hugepage-defrag':
|
||||
ensure => 'running',
|
||||
enable => true,
|
||||
require => File['/etc/init.d/disable-hugepage-defrag'],
|
||||
}
|
||||
|
||||
file { '/etc/sudoers.d/tpi' :
|
||||
ensure => 'present',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0600',
|
||||
content => template('fuel_project/tpi/tpi.sudoers.d.erb'),
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,12 @@
|
|||
# Used for deployment of TPI puppet master
|
||||
class fuel_project::tpi::puppetmaster (
|
||||
$local_home_basenames= [],
|
||||
) {
|
||||
|
||||
class { 'tpi::nfs_client' :
|
||||
local_home_basenames => $local_home_basenames,
|
||||
}
|
||||
|
||||
class { '::fuel_project::puppet::master' : }
|
||||
|
||||
}
|
|
@ -0,0 +1,34 @@
|
|||
# Used for deployment of TPI servers
|
||||
class fuel_project::tpi::server (
|
||||
) {
|
||||
class { '::fuel_project::common' : }
|
||||
class { '::jenkins::master' :}
|
||||
|
||||
class { 'rsync':
|
||||
package_ensure => 'present',
|
||||
}
|
||||
|
||||
if (!defined(Class['::rsync::server'])) {
|
||||
class { '::rsync::server' :
|
||||
gid => 'root',
|
||||
uid => 'root',
|
||||
use_chroot => 'yes',
|
||||
use_xinetd => false,
|
||||
}
|
||||
}
|
||||
|
||||
::rsync::server::module{ 'storage':
|
||||
comment => 'TPI main rsync share',
|
||||
uid => 'nobody',
|
||||
gid => 'nogroup',
|
||||
list => 'yes',
|
||||
lock_file => '/var/run/rsync_storage.lock',
|
||||
max_connections => 100,
|
||||
path => '/storage',
|
||||
read_only => 'yes',
|
||||
write_only => 'no',
|
||||
incoming_chmod => false,
|
||||
outgoing_chmod => false,
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,17 @@
|
|||
# Class: fuel_project::web
|
||||
#
|
||||
class fuel_project::web (
|
||||
$fuel_landing_page = false,
|
||||
$docs_landing_page = false,
|
||||
) {
|
||||
class { '::fuel_project::nginx' :}
|
||||
class { '::fuel_project::common' :}
|
||||
|
||||
if ($fuel_landing_page) {
|
||||
class { '::landing_page' :}
|
||||
}
|
||||
|
||||
if ($docs_landing_page) {
|
||||
class { '::landing_page::docs' :}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,26 @@
|
|||
# Class: fuel_project::znc
|
||||
#
|
||||
#
|
||||
class fuel_project::znc (
|
||||
$apply_firewall_rules = false,
|
||||
$service_port = 7777,
|
||||
|
||||
){
|
||||
class { '::fuel_project::common':
|
||||
external_host => $apply_firewall_rules,
|
||||
}
|
||||
|
||||
class { '::znc': port => $service_port}
|
||||
|
||||
if $apply_firewall_rules {
|
||||
include firewall_defaults::pre
|
||||
firewall { '1000 Allow znc connection' :
|
||||
ensure => present,
|
||||
dport => $service_port,
|
||||
proto => 'tcp',
|
||||
action => 'accept',
|
||||
require => Class['firewall_defaults::pre'],
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,39 @@
|
|||
[mirror]
|
||||
; The directory where the mirror data will be stored.
|
||||
directory = <%= @mirror_dir %>
|
||||
|
||||
; The PyPI server which will be mirrored.
|
||||
; master = https://testpypi.python.org
|
||||
; scheme for PyPI server MUST be https
|
||||
master = <%= @mirror_master %>
|
||||
|
||||
; The network socket timeout to use for all connections. This is set to a
|
||||
; somewhat aggressively low value: rather fail quickly temporarily and re-run
|
||||
; the client soon instead of having a process hang infinitely and have TCP not
|
||||
; catching up for ages.
|
||||
timeout = <%= @mirror_timeout %>
|
||||
|
||||
; Number of worker threads to use for parallel downloads.
|
||||
; Recommendations for worker thread setting:
|
||||
; - leave the default of 3 to avoid overloading the pypi master
|
||||
; - official servers located in data centers could run 20 workers
|
||||
; - anything beyond 50 is probably unreasonable and avoided by bandersnatch
|
||||
workers = <%= @mirror_workers %>
|
||||
|
||||
; Whether to stop a sync quickly after an error is found or whether to continue
|
||||
; syncing but not marking the sync as successful. Value should be "true" or
|
||||
; "false".
|
||||
stop-on-error = <%= @mirror_stop_on_error %>
|
||||
|
||||
; Whether or not files that have been deleted on the master should be deleted
|
||||
; on the mirror, too.
|
||||
; IMPORTANT: if you are running an official mirror than you *need* to leave
|
||||
; this on.
|
||||
delete-packages = <%= @mirror_delete_packages %>
|
||||
|
||||
[statistics]
|
||||
; A glob pattern matching all access log files that should be processed to
|
||||
; generate daily access statistics that will be aggregated on the master PyPI.
|
||||
access-log-pattern = <%= @nginx_access_log %>*
|
||||
|
||||
; vim: set ft=cfg:
|
|
@ -0,0 +1,5 @@
|
|||
- from: <%= @upstream_mirror %>
|
||||
to: <%= @npm_dir %>
|
||||
server: http://<%= @service_fqdn %>
|
||||
parallelism: <%= @parallelism %>
|
||||
recheck: <%= @recheck ? 'true' : 'false' %>
|
|
@ -0,0 +1,4 @@
|
|||
---
|
||||
- from: <%= @upstream_mirror %>
|
||||
to: <%= @rubygems_dir %>
|
||||
parallelism: <%= @parallelism %>
|
|
@ -0,0 +1,6 @@
|
|||
127.0.0.1 localhost
|
||||
127.0.1.1 <%= @fqdn %> <%= @hostname %>
|
||||
|
||||
::1 localhost ip6-localhost ip6-loopback
|
||||
ff02::1 ip6-allnodes
|
||||
ff02::2 ip6-allrouters
|
|
@ -0,0 +1,54 @@
|
|||
#!/bin/sh
|
||||
|
||||
sh=$1
|
||||
if [ $# -lt 1 ]; then
|
||||
echo Usage: $CMD smart-hostname
|
||||
echo smart-hostname - ns2-srt for example
|
||||
sh=`hostname -s`
|
||||
fi
|
||||
|
||||
d=/etc/ssh/keys.$$
|
||||
t=/tmp/ldap2sshkeys.$$
|
||||
g=/tmp/ldap2sshkeys-sg.$$
|
||||
tmpDir=/tmp/ldap2sshkeys_dir.$$
|
||||
|
||||
[ -d $d ] || mkdir $d
|
||||
[ -d $tmpDir ] || mkdir $tmpDir
|
||||
|
||||
ldapsearch -LLL -x -b "o=mirantis,dc=mirantis,dc=net" "(&(objectClass=groupOfNames)(|(&(accessTo=$sh)(trustModel=byhost))(trustModel=fullaccess)))" memberUid | awk '/memberUid:/ {print $2}' > $t
|
||||
ldapsearch -LLL -x -b "ou=people,ou=external,dc=mirantis,dc=net" "(&(objectClass=groupOfNames)(|(&(accessTo=$sh)(trustModel=byhost))(trustModel=fullaccess)))" memberUid | awk '/memberUid:/ {print $2}' >> $t
|
||||
ldapsearch -LLL -x -b "ou=people,ou=external,dc=mirantis,dc=net" "(&(sshPublicKey=*)(|(&(accessTo=$sh)(trustModel=byhost))(trustModel=fullaccess)(memberOf=cn=it,ou=groups,o=mirantis,dc=mirantis,dc=net)))" uid | awk '/uid:/ {print $2}' >> $t
|
||||
ldapsearch -LLL -x -b "o=mirantis,dc=mirantis,dc=net" "(&(sshPublicKey=*)(|(&(accessTo=$sh)(trustModel=byhost))(trustModel=fullaccess)(memberOf=cn=it,ou=groups,o=mirantis,dc=mirantis,dc=net)))" uid | awk '/uid:/ {print $2}' >> $t
|
||||
|
||||
ldapsearch -LLL -x -b "ou=groups,ou=servers,dc=mirantis,dc=net" "(&(objectClass=gosaGroupOfNames)(member=cn=$sh*))" dn | grep -oP '(?<=.{7,7}).*(?=,ou=groups)' >> $g
|
||||
|
||||
for s in `sort -u $g`; do
|
||||
ldapsearch -LLL -x -b "o=mirantis,dc=mirantis,dc=net" "(&(objectClass=groupOfNames)(|(&(accessTo=$s)(trustModel=byhost))(trustModel=fullaccess)))" memberUid | awk '/memberUid:/ {print $2}' >> $t
|
||||
ldapsearch -LLL -x -b "ou=people,ou=external,dc=mirantis,dc=net" "(&(objectClass=groupOfNames)(|(&(accessTo=$s)(trustModel=byhost))(trustModel=fullaccess)))" memberUid | awk '/memberUid:/ {print $2}' >> $t
|
||||
ldapsearch -LLL -x -b "ou=people,ou=external,dc=mirantis,dc=net" "(&(sshPublicKey=*)(|(&(accessTo=$s)(trustModel=byhost))(trustModel=fullaccess)(memberOf=cn=it,ou=groups,o=mirantis,dc=mirantis,dc=net)))" uid | awk '/uid:/ {print $2}' >> $t
|
||||
ldapsearch -LLL -x -b "o=mirantis,dc=mirantis,dc=net" "(&(sshPublicKey=*)(|(&(accessTo=$s)(trustModel=byhost))(trustModel=fullaccess)(memberOf=cn=it,ou=groups,o=mirantis,dc=mirantis,dc=net)))" uid | awk '/uid:/ {print $2}' >> $t
|
||||
done
|
||||
|
||||
for u in `sort -u $t`;do
|
||||
ldapsearch -x -LLL -b "o=mirantis,dc=mirantis,dc=net" "uid=$u" sshPublicKey -tt -T $tmpDir > /dev/null 2>&1
|
||||
[ "xxx`ls $tmpDir`" != 'xxx' ] && ( cat $tmpDir/* > $d/$u ; rm -f $tmpDir/* ) && (sed -i "s/ssh-rsa/\nssh-rsa/2g" $d/$u)
|
||||
done
|
||||
for u in `sort -u $t`;do
|
||||
ldapsearch -x -LLL -b "ou=people,ou=services,dc=mirantis,dc=net" "uid=$u" sshPublicKey -tt -T $tmpDir > /dev/null 2>&1
|
||||
[ "xxx`ls $tmpDir`" != 'xxx' ] && ( cat $tmpDir/* > $d/$u ; rm -f $tmpDir/* ) && (sed -i "s/ssh-rsa/\nssh-rsa/2g" $d/$u)
|
||||
done
|
||||
for u in `sort -u $t`;do
|
||||
ldapsearch -x -LLL -b "ou=people,ou=external,dc=mirantis,dc=net" "uid=$u" sshPublicKey -tt -T $tmpDir > /dev/null 2>&1
|
||||
[ "xxx`ls $tmpDir`" != 'xxx' ] && ( cat $tmpDir/* > $d/$u ; rm -f $tmpDir/* ) && (sed -i "s/ssh-rsa/\nssh-rsa/2g" $d/$u)
|
||||
done
|
||||
|
||||
rm $g
|
||||
rm $t
|
||||
rm -fR $tmpDir
|
||||
|
||||
if (grep -E '(dss|rsa)' $d/*>/dev/null);then
|
||||
[ -d /etc/ssh/keys.old ] && rm -rf /etc/ssh/keys.old
|
||||
[ -d /etc/ssh/keys ] && mv /etc/ssh/keys /etc/ssh/keys.old
|
||||
mv $d /etc/ssh/keys
|
||||
rm -rf etc/ssh/keys.*
|
||||
fi
|
|
@ -0,0 +1,6 @@
|
|||
<% if @osfamily == 'Debian' %>
|
||||
UserParameter=system.software.packages,dpkg-query --show | awk '{print $1"="$2}'
|
||||
<% elsif @osfamily == 'RedHat' %>
|
||||
UserParameter=system.software.packages,rpm -qa
|
||||
<% end %>
|
||||
UserParameter=system.software.services,sudo netstat -utpln | grep -v 'Active Internet connections\|PID/Program name' | awk '{print $NF}' | cut -d/ -f2 | sort -u
|
|
@ -0,0 +1,15 @@
|
|||
<% if @id != nil -%>
|
||||
[<%= @id %>]
|
||||
<% end -%>
|
||||
<% if @consumer_key != nil -%>
|
||||
consumer_key = <%= @consumer_key %>
|
||||
<% end -%>
|
||||
<% if @consumer_secret != nil -%>
|
||||
consumer_secret = <%= @consumer_secret %>
|
||||
<% end -%>
|
||||
<% if @access_token != nil -%>
|
||||
access_token = <%= @access_token %>
|
||||
<% end -%>
|
||||
<% if @access_secret != nil -%>
|
||||
access_secret = <%= @access_secret %>
|
||||
<% end -%>
|
|
@ -0,0 +1,30 @@
|
|||
<% if @section != nil -%>
|
||||
[<%= @section %>]
|
||||
<% end -%>
|
||||
<% if @appname != nil -%>
|
||||
appname = <%= @appname %>
|
||||
<% end -%>
|
||||
<% if @credfile != nil -%>
|
||||
credfile = <%= @credfile %>
|
||||
<% end -%>
|
||||
<% if @cachedir != nil -%>
|
||||
cachedir = <%= @cachedir %>
|
||||
<% end -%>
|
||||
<% if @logfile != nil -%>
|
||||
logfile = <%= @logfile %>
|
||||
<% end -%>
|
||||
<% if @env != nil -%>
|
||||
env = <%= @env %>
|
||||
<% end -%>
|
||||
<% if @status != nil -%>
|
||||
status = <%= @status %>
|
||||
<% end -%>
|
||||
<% if @series != nil -%>
|
||||
series = <%= @series %>
|
||||
<% end -%>
|
||||
<% if @milestone != nil -%>
|
||||
milestone = <%= @milestone %>
|
||||
<% end -%>
|
||||
<% if @distr != nil -%>
|
||||
distr = <%= @distr %>
|
||||
<% end -%>
|
|
@ -0,0 +1,34 @@
|
|||
<% if @appname != nil -%>
|
||||
[<%= @appname %>]
|
||||
appname = <%= @appname %>
|
||||
<% end -%>
|
||||
<% if @credfile != nil -%>
|
||||
credfile = <%= @credfile %>
|
||||
<% end -%>
|
||||
<% if @cachedir != nil -%>
|
||||
cachedir = <%= @cachedir %>
|
||||
<% end -%>
|
||||
<% if @logfile != nil -%>
|
||||
logfile = <%= @logfile %>
|
||||
<% end -%>
|
||||
<% if @host != nil -%>
|
||||
host = <%= @host %>
|
||||
<% end -%>
|
||||
<% if @port != nil -%>
|
||||
port = <%= @port %>
|
||||
<% end -%>
|
||||
<% if @sshprivkey != nil -%>
|
||||
sshkey = <%= @sshprivkey %>
|
||||
<% end -%>
|
||||
<% if @update_status != nil -%>
|
||||
update_status = <%= @update_status %>
|
||||
<% end -%>
|
||||
<% if @username != nil -%>
|
||||
username = <%= @username %>
|
||||
<% end -%>
|
||||
<% if @env != nil -%>
|
||||
env = <%= @env %>
|
||||
<% end -%>
|
||||
<% if @projects.any? -%>
|
||||
projects = <%= @projects.join(', ') %>
|
||||
<% end -%>
|
|
@ -0,0 +1,10 @@
|
|||
User-Agent: *
|
||||
Allow: /fuel/fuel-<%= @fuel_version %>/
|
||||
Allow: /openstack/fuel/fuel-<%= @fuel_version %>/
|
||||
Disallow: /fuel/
|
||||
Disallow: /openstack/fuel/
|
||||
Disallow: /*/index_content.html$
|
||||
Disallow: /*/index_content.html?*
|
||||
Disallow: /index_content.html$
|
||||
Disallow: /index_content.html?*
|
||||
Allow: /
|
|
@ -0,0 +1,48 @@
|
|||
|
||||
[remote "<%= @title %>"]
|
||||
url = <%= @user %>@<%= @host %>:<%= @path %>${name}.git
|
||||
<% if @admin_url != nil -%>
|
||||
adminUrl = <%= @admin_url %>
|
||||
<% end -%>
|
||||
<% if @auth_group != nil -%>
|
||||
authGroup = <%= @auth_group %>
|
||||
<% end -%>
|
||||
<% if @create_missing_repositories != nil -%>
|
||||
createMissingRepositories = <%= @create_missing_repositories %>
|
||||
<% end -%>
|
||||
<% if @mirror != nil -%>
|
||||
mirror = <%= @mirror %>
|
||||
<% end -%>
|
||||
<% if @projects != nil -%>
|
||||
projects = <%= @projects %>
|
||||
<% end -%>
|
||||
<% if @push != nil -%>
|
||||
push = <%= @push %>
|
||||
<% end -%>
|
||||
<% if @receivepack != nil -%>
|
||||
receivepack = <%= @receivepack %>
|
||||
<% end -%>
|
||||
<% if @remote_name_style != nil -%>
|
||||
remoteNameStyle = <%= @remote_name_style %>
|
||||
<% end -%>
|
||||
<% if @replicate_permissions != nil -%>
|
||||
replicatePermissions = <%= @replicate_permissions %>
|
||||
<% end -%>
|
||||
<% if @replicate_project_deletions != nil -%>
|
||||
replicateProjectDeletions = <%= @replicate_project_deletions %>
|
||||
<% end -%>
|
||||
<% if @replication_delay != nil -%>
|
||||
replicationDelay = <%= @replication_delay %>
|
||||
<% end -%>
|
||||
<% if @replication_retry != nil -%>
|
||||
replicationRetry = <%= @replication_retry %>
|
||||
<% end -%>
|
||||
<% if @timeout != nil -%>
|
||||
timeout = <%= @timeout %>
|
||||
<% end -%>
|
||||
<% if @threads != nil -%>
|
||||
threads = <%= @threads %>
|
||||
<% end -%>
|
||||
<% if @uploadpack != nil -%>
|
||||
uploadpack = <%= @uploadpack %>
|
||||
<% end -%>
|
|
@ -0,0 +1,10 @@
|
|||
volume management
|
||||
type mgmt/glusterd
|
||||
option working-directory /var/lib/glusterd
|
||||
option transport-type socket,rdma
|
||||
option transport.socket.keepalive-time 10
|
||||
option transport.socket.keepalive-interval 2
|
||||
option transport.socket.read-fail-log off
|
||||
option rpc-auth-allow-insecure on
|
||||
# option base-port 49152
|
||||
end-volume
|
|
@ -0,0 +1,5 @@
|
|||
Cmnd_Alias GLUSTER = /usr/sbin/gluster
|
||||
Cmnd_Alias GLUBIX_SCRIPTS = /usr/local/bin/glubix_checkvolstatus.pl, /usr/local/bin/glubix_georepstatus33.pl, /usr/local/bin/glubix_georepstatus34.pl, /usr/local/bin/glubix_numpeers.pl, /usr/local/bin/glubix_uuid.pl, /usr/local/bin/glubix_volstatus.pl, /usr/local/bin/glubix_voltype.pl
|
||||
|
||||
zabbix ALL = NOPASSWD: GLUSTER
|
||||
zabbix ALL = NOPASSWD: GLUBIX_SCRIPTS
|
|
@ -0,0 +1,3 @@
|
|||
# FIXME: https://bugs.launchpad.net/fuel/+bug/1348599
|
||||
jenkins ALL=(ALL) NOPASSWD: ALL
|
||||
# /FIXME
|
|
@ -0,0 +1,10 @@
|
|||
[general]
|
||||
apiurl = <%= @osc_apiurl %>
|
||||
|
||||
[<%= @osc_url_primary %>]
|
||||
user = <%= @osc_user_primary %>
|
||||
pass = <%= @osc_pass_primary %>
|
||||
|
||||
[<%= @osc_url_secondary %>]
|
||||
user = <%= @osc_user_secondary %>
|
||||
pass = <%= @osc_pass_secondary %>
|
|
@ -0,0 +1,3 @@
|
|||
<% if @ldap_sudo_group and not @ldap_sudo_group.empty? -%>
|
||||
<%= @ldap_sudo_group %> ALL=(ALL) NOPASSWD: ALL
|
||||
<% end -%>
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue