Big clean of tripleo-ci

Change-Id: Iff0350a1fff1057d1de924f05693258445da9c37
This commit is contained in:
Sagi Shnaidman 2020-01-22 15:29:25 +02:00
parent 5eb4b92669
commit 0977f8502d
70 changed files with 0 additions and 6192 deletions

View File

@ -1,66 +0,0 @@
heat_template_version: 2015-04-30
description: >
Software Config to drive os-net-config for a simple bridge configured
with a static IP address for the ctlplane network.
parameters:
ControlPlaneIp:
default: ''
description: IP address/subnet on the ctlplane network
type: string
ExternalIpSubnet:
default: ''
description: IP address/subnet on the external network
type: string
InternalApiIpSubnet:
default: ''
description: IP address/subnet on the internal API network
type: string
StorageIpSubnet:
default: ''
description: IP address/subnet on the storage network
type: string
StorageMgmtIpSubnet:
default: ''
description: IP address/subnet on the storage mgmt network
type: string
TenantIpSubnet:
default: ''
description: IP address/subnet on the tenant network
type: string
ManagementIpSubnet:
default: ''
description: IP address/subnet on the management network
type: string
ControlPlaneSubnetCidr: # Override this via parameter_defaults
default: '24'
description: The subnet CIDR of the control plane network.
type: string
resources:
OsNetConfigImpl:
type: OS::Heat::SoftwareConfig
properties:
group: script
config:
str_replace:
template: |
#!/bin/bash
if ! ip addr show dev $bridge_name | grep CONTROLPLANEIP/CONTROLPLANESUBNETCIDR; then
ip addr add CONTROLPLANEIP/CONTROLPLANESUBNETCIDR dev $bridge_name
fi
params:
CONTROLPLANEIP: {get_param: ControlPlaneIp}
CONTROLPLANESUBNETCIDR: {get_param: ControlPlaneSubnetCidr}
inputs:
-
name: bridge_name
default: br-ex
description: bridge-name
type: String
outputs:
OS::stack_id:
description: The OsNetConfigImpl resource.
value: {get_resource: OsNetConfigImpl}

View File

@ -1,145 +0,0 @@
Description: Create a bare metal test environment undercloud host to run test environment workers.
HeatTemplateFormatVersion: '2012-12-12'
Parameters:
AuthTenant:
Description: Overcloud Auth Tenant.
Type: String
Default: 'openstack-nodepool'
AuthUrl:
Description: Overcloud Keystone.
Type: String
AuthUser:
Description: Overcloud Auth User.
Type: String
Default: 'tripleo-ci'
NoEcho: true
AuthPassword:
Description: User password.
Type: String
NoEcho: true
CPUPerEnv:
Default: 3
Description: CPU's to require per testenv
Type: String
DiskPerEnv:
Default: 60
Description: Disk space to require per testenv
Type: String
GearmanHost:
Description: Gearman server host.
Type: String
GearmanPort:
Default: 4730
Description: Gearman server port.
Type: String
KeyName:
Default: default
Description: Name of an existing EC2 KeyPair to enable SSH access
Type: String
Hosts:
Default: ''
Description: A string to append to /etc/hosts
Type: String
MemPerEnv:
Default: 10
Description: RAM to require per testenv
Type: String
NetworkName:
Description: Name of the Neutron network to allocate ports on.
Type: String
Flavor:
Default: baremetal_full
Description: Flavor to request when deploying.
Type: String
PublicInterface:
Default: eth0
Description: Network interface to make into a bridge (for seed vm connectivity)
Type: String
NtpServer:
Type: String
Default: ''
testenvImage:
Default: testenv-worker
Type: String
Resources:
AccessPolicy:
Properties:
AllowedResources:
- testenv0
Type: OS::Heat::AccessPolicy
Key:
Properties:
UserName:
Ref: User
Type: AWS::IAM::AccessKey
User:
Properties:
Policies:
- Ref: AccessPolicy
Type: AWS::IAM::User
testenv0CompletionCondition:
DependsOn: testenv0
Properties:
Count: '1'
Handle:
Ref: testenv0CompletionHandle
Timeout: '900'
Type: AWS::CloudFormation::WaitCondition
testenv0CompletionHandle:
Type: AWS::CloudFormation::WaitConditionHandle
testenv0:
Metadata:
completion-handle:
Ref: testenv0CompletionHandle
os-collect-config:
cfn:
access_key_id:
Ref: Key
path: testenv0.Metadata
secret_access_key:
Fn::GetAtt:
- Key
- SecretAccessKey
stack_name:
Ref: AWS::StackName
gearman-worker:
host:
Ref: GearmanHost
port:
Ref: GearmanPort
mem-per-env:
Ref: MemPerEnv
cpu-per-env:
Ref: CPUPerEnv
disk-per-env:
Ref: DiskPerEnv
auth_user:
Ref: AuthUser
auth_tenant:
Ref: AuthTenant
auth_url:
Ref: AuthUrl
auth_passwd:
Ref: AuthPassword
network_name:
Ref: NetworkName
neutron:
ovs:
public_interface:
Ref: PublicInterface
physical_bridge: br-ctlplane
hosts:
Ref: Hosts
ntp:
servers:
- {server: {Ref: NtpServer}, fudge: "stratum 0"}
Properties:
flavor:
Ref: Flavor
image:
Ref: testenvImage
key_name:
Ref: KeyName
networks:
- network: ctlplane
Type: OS::Nova::Server

View File

@ -1,100 +0,0 @@
#!/bin/bash
#
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
set -eu
SCRIPT_NAME=$(basename $0)
SCRIPT_HOME=$(dirname $0)
function show_options {
echo "Usage: $SCRIPT_NAME [options]"
echo
echo "Ensure that a given user exists."
echo
echo "Options:"
echo " -h -- this help"
echo " -e -- email"
echo " -n -- name"
echo " -t -- tenant"
echo " -u -- usercode"
echo
exit $1
}
EMAIL=''
NAME=''
TENANT=''
USERCODE=''
TEMP=`getopt -o hu:e:n:t: -n $SCRIPT_NAME -- "$@"`
if [ $? != 0 ]; then
echo "Terminating..." >&2;
exit 1;
fi
# Note the quotes around `$TEMP': they are essential!
eval set -- "$TEMP"
while true ; do
case "$1" in
-h) show_options 0;;
-e) EMAIL=$2; shift 2 ;;
-n) NAME=$2; shift 2 ;;
-t) TENANT=$2; shift 2 ;;
-u) USERCODE=$2; shift 2 ;;
--) shift ; break ;;
*) echo "Error: unsupported option $1." ; exit 1 ;;
esac
done
EXTRA_ARGS=${1:-''}
if [ -z "$EMAIL" -o -z "$NAME" -o -z "$TENANT" -o -z "$USERCODE" -o -n "$EXTRA_ARGS" ]; then
show_options 1
fi
echo "Checking for user $USERCODE"
#TODO: fix after bug 1392035 in the keystone client library
USER_ID=$(openstack user list | awk '{print tolower($0)}' |grep " ${USERCODE,,} " |awk '{print$2}')
if [ -z "$USER_ID" ]; then
PASSWORD=''
if [ -e os-asserted-users ]; then
PASSWORD=$(awk "\$1==\"$USERCODE\" { print \$2 }" < os-asserted-users)
fi
if [ -z "$PASSWORD" ]; then
PASSWORD=$(os-make-password)
echo "$USERCODE $PASSWORD" >> os-asserted-users
fi
USER_ID=$(openstack user create --password "$PASSWORD" --email "$EMAIL" $USERCODE | awk '$2=="id" {print $4}')
fi
#TODO: fix after bug 1392035 in the keystone client library
TENANT_ID=$(openstack project list | awk '{print tolower($0)}' |grep " ${TENANT,,} " |awk '{print$2}')
if [ -z "$TENANT_ID" ]; then
TENANT_ID=$(openstack project create $TENANT | awk '$2=="id" {print $4}')
fi
if [ "$TENANT" = "admin" ]; then
ROLE="admin"
else
ROLE="_member_"
fi
ROLE_ID=$(openstack role show $ROLE | awk '$2=="id" {print $4}')
if openstack user role list --project $TENANT_ID $USER_ID | grep "${ROLE_ID}.*${ROLE}.*${USER_ID}" ; then
echo "User already has role '$ROLE'"
else
openstack role add --project $TENANT_ID --user $USER_ID $ROLE_ID
fi
echo "User $USERCODE configured."

View File

@ -1,84 +0,0 @@
#!/usr/bin/bash
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
set -ex
[ -n "$1" ] || ( echo "Usage : $0 <num-runs> <sim-runs>" && exit 1 )
# Creates a template image (if it doesn't exist), then runs an
# overcloud ci job <num-runs> times, <sim-runs> simultaneously.
IMAGE=CentOS-7-x86_64-GenericCloud
USER=centos
# makes some assumptions but good enough for now
nova keypair-add --pub-key ~/.ssh/id_rsa.pub bighammer || true
function tapper {
set -x
NODENAME=test-node-$1
nova boot --image $IMAGE --flavor undercloud --key-name bighammer $NODENAME
#trap "nova delete $NODENAME" RETURN ERR
sleep 60
if [ "$(nova show $NODENAME | awk '/status/ {print $4}')" != "ACTIVE" ] ; then
nova show $NODENAME
return 1
fi
IP=$(nova show $NODENAME | awk '/private network/ {print $5}')
PORTID=$(neutron port-list | grep "$IP\>" | awk '{print $2}')
FLOATINGIP=$(nova floating-ip-create $EXTNET | grep public | awk '{print $2}')
[ -z "$FLOATINGIP" ] && echo "No Floating IP..." && exit 1
#trap "nova delete $NODENAME || true ; sleep 20 ; nova floatingip-delete $FLOATINGIP" RETURN ERR
nova floating-ip-associate $NODENAME $FLOATINGIP
sleep 20
ssh -tt $USER@$FLOATINGIP <<EOF
set -xe
sudo yum install -y git screen
sudo mkdir -p /opt/stack/new
sudo chown centos /opt/stack/new
git clone https://opendev.org/openstack/tripleo-ci /opt/stack/new/tripleo-ci
cd /opt/stack/new/tripleo-ci
DISTRIBUTION=CentOS DISTRIBUTION_MAJOR_VERSION=7 OVERRIDE_ZUUL_BRANCH= ZUUL_BRANCH=master WORKSPACE=/tmp TOCI_JOBTYPE=nonha DEVSTACK_GATE_TIMEOUT=180 ./toci_gate_test.sh
exit 0
EOF
set +x
date
echo "JOB DONE"
}
TODO=$1
SIM=$2
DONE=0
[ -e logs ] && mv logs logs-$(date +%s)
mkdir -p logs
while true; do
[ $DONE -ge $TODO ] && echo "Done" && break
jobs
if [ $(jobs | wc -l) -lt $SIM ] ; then
DONE=$((DONE+1))
echo "Starting job $DONE"
tapper $DONE &> logs/job-$DONE.log &
fi
sleep 10 # Lets not hammer the API all in one go
done
# Wait for the last process to finish
wait

View File

@ -1,56 +0,0 @@
#!/bin/bash
set -eux
export STABLE_RELEASE=${STABLE_RELEASE:-""}
# Source deploy.env if it exists. It should exist if we are running under
# tripleo-ci
export TRIPLEO_ROOT=${TRIPLEO_ROOT:-"/opt/stack/new"}
if [ -f "$TRIPLEO_ROOT/tripleo-ci/deploy.env" ]; then
source $TRIPLEO_ROOT/tripleo-ci/deploy.env
fi
# Ensure epel-release is not installed
sudo yum erase -y epel-release || :
# Copied from toci_gate_test.sh...need to apply this fix on subnodes as well
# TODO(pabelanger): Why is python-requests installed from pip?
# TODO(amoralej): remove after https://review.opendev.org/#/c/468872/ is merged
sudo pip uninstall certifi -y || true
sudo pip uninstall urllib3 -y || true
sudo pip uninstall requests -y || true
sudo rpm -e --nodeps python2-certifi || :
sudo rpm -e --nodeps python2-urllib3 || :
sudo rpm -e --nodeps python2-requests || :
sudo yum -y install python-requests python-urllib3
# Clear out any puppet modules on the node placed their by infra configuration
sudo rm -rf /etc/puppet/modules/*
# This will remove any puppet configuration done my infra setup
sudo yum -y remove puppet facter hiera
# Update everything
sudo yum -y update
# git is needed since oooq multinode jobs does a git clone
# See https://bugs.launchpad.net/tripleo-quickstart/+bug/1667043
sudo yum -y install git python-heat-agent* openstack-heat-agents
# create a loop device for ceph-ansible
# device name is static so we know what to point to from ceph-ansible
# job names might change, but multinode implies ceph as per scenario001-multinode.yaml
if [[ "${TOCI_JOBTYPE:-''}" =~ multinode ]]; then
if [[ ! -e /dev/loop3 ]]; then # ensure /dev/loop3 does not exist before making it
command -v losetup >/dev/null 2>&1 || { sudo yum -y install util-linux; }
sudo dd if=/dev/zero of=/var/lib/ceph-osd.img bs=1 count=0 seek=7G
sudo losetup /dev/loop3 /var/lib/ceph-osd.img
elif [[ -f /var/lib/ceph-osd.img ]]; then #loop3 and ceph-osd.img exist
echo "warning: looks like ceph loop device already created. Trying to continue"
else
echo "error: /dev/loop3 exists but not /var/lib/ceph-osd.img. Exiting."
exit 1
fi
sudo lsblk
fi

View File

@ -1,97 +0,0 @@
#!/bin/bash
set -eux
export STABLE_RELEASE=${STABLE_RELEASE:-""}
# Source deploy.env if it exists. It should exist if we are running under
# tripleo-ci
export TRIPLEO_ROOT=${TRIPLEO_ROOT:-"/opt/stack/new"}
if [ -f "$TRIPLEO_ROOT/tripleo-ci/deploy.env" ]; then
source $TRIPLEO_ROOT/tripleo-ci/deploy.env
fi
# Ensure epel-release is not installed
sudo yum erase -y epel-release || :
# Copied from toci_gate_test.sh...need to apply this fix on subnodes as well
# TODO(pabelanger): Why is python-requests installed from pip?
# Reinstall python-requests if it was already installed, otherwise it will be
# installed later when other packages are installed.
# TODO(amoralej): remove after https://review.opendev.org/#/c/468872/ is merged
sudo pip uninstall certifi -y || true
sudo pip uninstall urllib3 -y || true
sudo pip uninstall requests -y || true
sudo rpm -e --nodeps python2-certifi || :
sudo rpm -e --nodeps python2-urllib3 || :
sudo rpm -e --nodeps python2-requests || :
sudo yum -y install python-requests python-urllib3
# Remove the anything on the infra image template that might interfere with CI
# Note for tripleo-quickstart: this task is already managed in tripleo-ci-setup-playbook.yml
sudo yum remove -y facter puppet hiera puppetlabs-release rdo-release centos-release-[a-z]*
sudo rm -rf /etc/puppet /etc/hiera.yaml
# Update everything
sudo yum -y update
# instack-undercloud will pull in all the needed deps
# git needed since puppet modules installed from source
# openstack-tripleo-common needed for the tripleo-build-images command
sudo yum -y install instack-undercloud git openstack-tripleo-common
# detect the real path depending on diskimage-builder version
COMMON_ELEMENTS_PATH=$(python -c '
try:
import diskimage_builder.paths
diskimage_builder.paths.show_path("elements")
except:
print("/usr/share/diskimage-builder/elements")
')
export ELEMENTS_PATH="${COMMON_ELEMENTS_PATH}:/usr/share/instack-undercloud:/usr/share/tripleo-image-elements:/usr/share/tripleo-puppet-elements"
ELEMENTS=$(\
tripleo-build-images \
--image-json-output \
--image-name overcloud-full \
--image-config-file /usr/share/tripleo-common/image-yaml/overcloud-images-centos7.yaml \
--image-config-file /usr/share/tripleo-common/image-yaml/overcloud-images.yaml \
| jq '. | .[0].elements | map(.+" ") | add' \
| sed 's/"//g')
# delorean-repo is excluded b/c we've already run --repo-setup on this node and
# we don't want to overwrite that.
sudo -E instack \
-e centos7 \
enable-packages-install \
install-types \
$ELEMENTS \
-k extra-data \
pre-install \
install \
post-install \
-b 05-fstab-rootfs-label \
00-fix-requiretty \
90-rebuild-ramdisk \
00-usr-local-bin-secure-path \
-x delorean-repo \
-d
# In the imported elements we have remove-machine-id. In multinode
# jobs that could mean we end up without /etc/machine-id. Make sure
# we have one.
[ -s /etc/machine-id ] || sudo -E systemd-machine-id-setup
PACKAGES=$(\
tripleo-build-images \
--image-json-output \
--image-name overcloud-full \
--image-config-file /usr/share/tripleo-common/image-yaml/overcloud-images-centos7.yaml \
--image-config-file /usr/share/tripleo-common/image-yaml/overcloud-images.yaml \
| jq '. | .[0].packages | .[] | tostring' \
| sed 's/"//g')
# Install additional packages expected by the image
sudo yum -y install $PACKAGES
sudo sed -i 's/SELINUX=enforcing/SELINUX=permissive/' /etc/selinux/config
sudo setenforce 0

View File

@ -1,16 +0,0 @@
#!/bin/bash
set -x
nova delete $(nova list --all-tenants | grep -e te-broker -e mirror-server -e proxy-server| awk '{print $2}')
sleep 5
keystone user-delete openstack-nodepool
keystone tenant-delete openstack-nodepool
neutron router-interface-delete private_router private_subnet
neutron router-delete private_router
neutron subnet-delete private_subnet
neutron net-delete private
neutron subnet-delete public_subnet
neutron net-delete public

View File

@ -1,29 +0,0 @@
#!/bin/bash
# bootstrap a tripleo-ci infrastructure server, this selects which puppet manifest
# to run based on the hostname e.g. to create a mirror server then one can simply
# nova boot --image <id> --flavor <id> --user-data scripts/deploy-server.sh --nic net-id=<id> --nic net-id=<id>,v4-fixed-ip=192.168.1.101 mirror-server
yum install -y epel-release
yum install -y puppet git
echo puppetlabs-apache adrien-filemapper | xargs -n 1 puppet module install
git clone https://github.com/puppetlabs/puppetlabs-vcsrepo.git /etc/puppet/modules/vcsrepo
if [ -e /sys/class/net/eth1 ] ; then
echo -e 'DEVICE=eth1\nBOOTPROTO=dhcp\nONBOOT=yes\nPERSISTENT_DHCLIENT=yes\nPEERDNS=no' > /etc/sysconfig/network-scripts/ifcfg-eth1
ifdown eth1
ifup eth1
fi
CIREPO=/opt/stack/tripleo-ci
mkdir -p $CIREPO
git clone https://opendev.org/openstack/tripleo-ci $CIREPO
if [ -f $CIREPO/scripts/$(hostname)/$(hostname).sh ] ; then
bash $CIREPO/scripts/$(hostname)/$(hostname).sh
fi
if [ -f $CIREPO/scripts/$(hostname)/$(hostname).pp ] ; then
puppet apply $CIREPO/scripts/$(hostname)/$(hostname).pp
fi

View File

@ -1,590 +0,0 @@
#!/bin/bash
set -eux
set -o pipefail
cd
# This sets all the environment variables for undercloud and overcloud installation
source $TRIPLEO_ROOT/tripleo-ci/deploy.env
source $TRIPLEO_ROOT/tripleo-ci/scripts/metrics.bash
source $TRIPLEO_ROOT/tripleo-ci/scripts/common_functions.sh
# Prevent python from buffering stdout, so timestamps are set at appropriate times
export PYTHONUNBUFFERED=true
export DIB_DISTRIBUTION_MIRROR=$NODEPOOL_CENTOS_MIRROR
export STABLE_RELEASE=${STABLE_RELEASE:-""}
# the TLS everywhere job requires the undercloud to have a domain set so it can
# enroll to FreeIPA
if [ $CA_SERVER == 1 ] ; then
# This is needed since we use scripts that are located both in t-h-t and
# tripleo-common for setting up our test CA.
sudo yum install -yq \
openstack-tripleo-heat-templates \
openstack-tripleo-common \
ipa-client \
python-novajoin
export TRIPLEO_DOMAIN=ooo.test
export CA_SERVER_HOSTNAME=ipa.$TRIPLEO_DOMAIN
export CA_ADMIN_PASS=$(uuidgen)
export CA_DIR_MANAGER_PASS=$(uuidgen)
export CA_SECRET=$(uuidgen)
export UNDERCLOUD_FQDN=undercloud.$TRIPLEO_DOMAIN
# We can access the CA server through this address for bootstrapping
# purposes.
export CA_SERVER_PRIVATE_IP=$(jq -r '.extra_nodes[0].ips.private[0].addr' ~/instackenv.json)
# Address that will be used for the provisioning interface. The undercloud
# and the overcloud nodes should have access to this.
export CA_SERVER_IP="192.168.24.250"
export CA_SERVER_CIDR="${CA_SERVER_IP}/24"
echo "$CA_SERVER_PRIVATE_IP $CA_SERVER_HOSTNAME" | sudo tee -a /etc/hosts
cat <<EOF >~/freeipa-setup.env
export Hostname=$CA_SERVER_HOSTNAME
export FreeIPAIP=$CA_SERVER_IP
export AdminPassword=$CA_ADMIN_PASS
export DirectoryManagerPassword=$CA_DIR_MANAGER_PASS
export HostsSecret=$CA_SECRET
export UndercloudFQDN=$UNDERCLOUD_FQDN
export ProvisioningCIDR=$CA_SERVER_CIDR
export UsingNovajoin=1
EOF
# Set undercloud FQDN
sudo hostnamectl set-hostname --static $UNDERCLOUD_FQDN
# Copy CA env file and installation script
scp $SSH_OPTIONS ~/freeipa-setup.env centos@$CA_SERVER_PRIVATE_IP:/tmp/freeipa-setup.env
scp $SSH_OPTIONS /usr/share/openstack-tripleo-heat-templates/ci/scripts/freeipa_setup.sh centos@$CA_SERVER_PRIVATE_IP:~/freeipa_setup.sh
# Set up CA
ssh $SSH_OPTIONS -tt centos@$CA_SERVER_PRIVATE_IP "sudo bash ~/freeipa_setup.sh"
# enroll to CA
sudo /usr/libexec/novajoin-ipa-setup \
--principal admin \
--password $CA_ADMIN_PASS \
--server $CA_SERVER_HOSTNAME \
--realm $(echo $TRIPLEO_DOMAIN | awk '{print toupper($0)}') \
--domain $TRIPLEO_DOMAIN \
--hostname $UNDERCLOUD_FQDN \
--otp-file /tmp/ipa-otp.txt \
--precreate
cat <<EOF >$TRIPLEO_ROOT/cloud-names.yaml
parameter_defaults:
CloudDomain: $TRIPLEO_DOMAIN
CloudName: overcloud.$TRIPLEO_DOMAIN
CloudNameInternal: overcloud.internalapi.$TRIPLEO_DOMAIN
CloudNameStorage: overcloud.storage.$TRIPLEO_DOMAIN
CloudNameStorageManagement: overcloud.storagemgmt.$TRIPLEO_DOMAIN
CloudNameCtlplane: overcloud.ctlplane.$TRIPLEO_DOMAIN
EOF
fi
cat <<EOF >$HOME/undercloud-hieradata-override.yaml
ironic::drivers::deploy::http_port: 3816
EOF
echo '[DEFAULT]' > ~/undercloud.conf
echo "hieradata_override = $HOME/undercloud-hieradata-override.yaml" >> ~/undercloud.conf
cat <<EOF >>~/undercloud.conf
network_cidr = 192.168.24.0/24
local_ip = 192.168.24.1/24
network_gateway = 192.168.24.1
undercloud_public_vip = 192.168.24.2
undercloud_admin_vip = 192.168.24.3
masquerade_network = 192.168.24.0/24
dhcp_start = 192.168.24.5
dhcp_end = 192.168.24.30
inspection_iprange = 192.168.24.100,192.168.24.120
EOF
if [ $UNDERCLOUD_SSL == 1 ] ; then
echo 'generate_service_certificate = True' >> ~/undercloud.conf
fi
if [ $UNDERCLOUD_TELEMETRY == 0 ] ; then
echo 'enable_telemetry = False' >> ~/undercloud.conf
echo 'enable_legacy_ceilometer_api = false' >> ~/undercloud.conf
fi
if [ $UNDERCLOUD_UI == 0 ] ; then
echo 'enable_ui = False' >> ~/undercloud.conf
fi
if [ $UNDERCLOUD_VALIDATIONS == 0 ] ; then
echo 'enable_validations = False' >> ~/undercloud.conf
fi
if [ $RUN_TEMPEST_TESTS != 1 ] ; then
echo 'enable_tempest = False' >> ~/undercloud.conf
fi
if [ $CA_SERVER == 1 ] ; then
echo 'enable_novajoin = True' >> ~/undercloud.conf
echo "undercloud_hostname = $UNDERCLOUD_FQDN" >> ~/undercloud.conf
echo "ipa_otp = $(cat /tmp/ipa-otp.txt)" >> ~/undercloud.conf
echo "undercloud_nameservers = $CA_SERVER_IP" >> ~/undercloud.conf
echo "overcloud_domain_name = $TRIPLEO_DOMAIN" >> ~/undercloud.conf
echo "nova::api::vendordata_dynamic_connect_timeout: 20" >> ~/undercloud-hieradata-override.yaml
echo "nova::api::vendordata_dynamic_read_timeout: 20" >> ~/undercloud-hieradata-override.yaml
# NOTE(jaosorior): the DNSServers from the overcloud need to point to the
# CA so the domain can be discovered.
sed -i 's/\(DnsServers: \).*/\1["'$CA_SERVER_IP'", "8.8.8.8"]/' \
$TRIPLEO_ROOT/tripleo-ci/test-environments/network-templates/network-environment.yaml
sed -i 's/\(DnsServers: \).*/\1["'$CA_SERVER_IP'", "8.8.8.8"]/' \
$TRIPLEO_ROOT/tripleo-ci/test-environments/net-iso.yaml
# Use FreeIPA as nameserver
echo -e "nameserver 192.168.24.250\nnameserver 8.8.8.8" | sudo tee /etc/resolv.conf
fi
if [ $UNDERCLOUD_HEAT_CONVERGENCE == 1 ] ; then
cat <<EOF >>$HOME/undercloud-hieradata-override.yaml
heat::engine::convergence_engine: true
EOF
fi
# TODO: fix this in instack-undercloud
sudo mkdir -p /etc/puppet/hieradata
if [ "$OSINFRA" = 1 ]; then
echo "net_config_override = $TRIPLEO_ROOT/tripleo-ci/undercloud-configs/net-config-multinode.json.template" >> ~/undercloud.conf
fi
# If we're testing an undercloud upgrade, remove the ci repo, since we don't
# want to consume the package being tested until we actually do the upgrade.
if [ "$UNDERCLOUD_MAJOR_UPGRADE" == 1 ] ; then
sudo rm -f /etc/yum.repos.d/delorean-ci.repo
fi
echo "INFO: Check /var/log/undercloud_install.txt for undercloud install output"
echo "INFO: This file can be found in logs/undercloud.tar.xz in the directory containing console.log"
start_metric "tripleo.${STABLE_RELEASE:-master}.${TOCI_JOBTYPE}.undercloud.install.seconds"
$TRIPLEO_ROOT/tripleo-ci/scripts/tripleo.sh --undercloud 2>&1 | awk '{ print strftime("%Y-%m-%d %H:%M:%S.000"), "|", $0; fflush(); }' | sudo dd of=/var/log/undercloud_install.txt || (tail -n 50 /var/log/undercloud_install.txt && false)
stop_metric "tripleo.${STABLE_RELEASE:-master}.${TOCI_JOBTYPE}.undercloud.install.seconds"
# FreeIPA contains an LDAP server, so we're gonna use that to set up a keystone
# domain that reads from that ldap server.
if [ $CA_SERVER == 1 ] ; then
export LDAP_DOMAIN_NAME=freeipadomain
export LDAP_READER_INIT_PASS=$(uuidgen)
export LDAP_READER_PASS=$(uuidgen)
export DEMO_USER_INIT_PASS=$(uuidgen)
export DEMO_USER_PASS=$(uuidgen)
echo $CA_ADMIN_PASS | kinit admin
echo "$LDAP_READER_INIT_PASS" | ipa user-add keystone --cn="keystone user" \
--first="keystone" --last="user" --password
echo "$DEMO_USER_INIT_PASS" | ipa user-add demo --cn="demo user" \
--first="demo" --last="user" --password
kdestroy -A
# Reset password. Since kerberos prompts for the password to be reset on
# first usage.
echo -e "$LDAP_READER_INIT_PASS\n$LDAP_READER_PASS\n$LDAP_READER_PASS" | kinit keystone
kdestroy -A
echo -e "$DEMO_USER_INIT_PASS\n$DEMO_USER_PASS\n$DEMO_USER_PASS" | kinit demo
kdestroy -A
export LDAP_SUFFIX=$(echo $TRIPLEO_DOMAIN | sed -e 's/^/dc=/' -e 's/\./,dc=/g')
# Create LDAP configuration in heat environment
cat <<EOF >$TRIPLEO_ROOT/keystone-ldap.yaml
parameter_defaults:
KeystoneLDAPDomainEnable: true
KeystoneLDAPBackendConfigs:
$LDAP_DOMAIN_NAME:
url: ldaps://$CA_SERVER_HOSTNAME
user: uid=keystone,cn=users,cn=accounts,$LDAP_SUFFIX
password: $LDAP_READER_PASS
suffix: $LDAP_SUFFIX
user_tree_dn: cn=users,cn=accounts,$LDAP_SUFFIX
user_objectclass: inetOrgPerson
user_id_attribute: uid
user_name_attribute: uid
user_mail_attribute: mail
user_allow_create: false
user_allow_update: false
user_allow_delete: false
group_tree_dn: cn=groups,cn=accounts,$LDAP_SUFFIX
group_objectclass: groupOfNames
group_id_attribute: cn
group_name_attribute: cn
group_member_attribute: member
group_desc_attribute: description
group_allow_create: false
group_allow_update: false
group_allow_delete: false
user_enabled_attribute: nsAccountLock
user_enabled_default: False
user_enabled_invert: true
EOF
fi
if [ "$OVB" = 1 ]; then
# eth1 is on the provisioning netwrok and doesn't have dhcp, so we need to set its MTU manually.
sudo ip link set dev eth1 up
sudo ip link set dev eth1 mtu 1400
echo -e "\ndhcp-option-force=26,1400" | sudo tee -a /etc/dnsmasq-ironic.conf
sudo systemctl restart 'neutron-*'
# The undercloud install is creating file in ~/.cache as root
# change them back so we can build overcloud images
sudo chown -R $USER ~/.cache || true
# check the power status of the last IPMI device we have details for
# this ensures the BMC is ready and sanity tests that its working
PMADDR=$(jq '.nodes[length-1].pm_addr' < ~/instackenv.json | tr '"' ' ')
$TRIPLEO_ROOT/tripleo-ci/scripts/wait_for -d 10 -l 40 -- ipmitool -I lanplus -H $PMADDR -U admin -P password power status
fi
if [ $INTROSPECT == 1 ] ; then
# I'm removing most of the nodes in the env to speed up discovery
# This could be in jq but I don't know how
# Only do this for jobs that use introspection, as it makes the likelihood
# of hitting https://bugs.launchpad.net/tripleo/+bug/1341420 much higher
python -c "import simplejson ; d = simplejson.loads(open(\"instackenv.json\").read()) ; del d[\"nodes\"][$NODECOUNT:] ; print simplejson.dumps(d)" > instackenv_reduced.json
mv instackenv_reduced.json instackenv.json
# Lower the timeout for introspection to decrease failure time
# It should not take more than 10 minutes with IPA ramdisk and no extra collectors
sudo sed -i '2itimeout = 600' /etc/ironic-inspector/inspector.conf
sudo systemctl restart openstack-ironic-inspector
fi
if [ $NETISO_V4 -eq 1 ] || [ $NETISO_V6 -eq 1 ]; then
# Update our floating range to use a 10. /24
export FLOATING_IP_CIDR=${FLOATING_IP_CIDR:-"10.0.0.0/24"}
export FLOATING_IP_START=${FLOATING_IP_START:-"10.0.0.100"}
export FLOATING_IP_END=${FLOATING_IP_END:-"10.0.0.200"}
export EXTERNAL_NETWORK_GATEWAY=${EXTERNAL_NETWORK_GATEWAY:-"10.0.0.1"}
# Make our undercloud act as the external gateway
# OVB uses eth2 as the "external" network
# NOTE: seed uses eth0 for the local network.
cat >> /tmp/eth2.cfg <<EOF_CAT
network_config:
- type: interface
name: eth2
use_dhcp: false
addresses:
- ip_netmask: 10.0.0.1/24
- ip_netmask: 2001:db8:fd00:1000::1/64
EOF_CAT
sudo os-net-config -c /tmp/eth2.cfg -v
fi
if [ "$OSINFRA" = "0" ]; then
# Our ci underclouds don't have enough RAM to allow us to use a tmpfs
export DIB_NO_TMPFS=1
# No point waiting for a grub prompt in ci
export DIB_GRUB_TIMEOUT=0
# Override the default repositories set by tripleo.sh, to add the delorean-ci repository
export OVERCLOUD_IMAGES_DIB_YUM_REPO_CONF=$(ls /etc/yum.repos.d/delorean*)
# Directing the output of this command to a file as its extreemly verbose
echo "INFO: Check /var/log/image_build.txt for image build output"
echo "INFO: This file can be found in logs/undercloud.tar.xz in the directory containing console.log"
start_metric "tripleo.${STABLE_RELEASE:-master}.${TOCI_JOBTYPE}.overcloud.images.seconds"
$TRIPLEO_ROOT/tripleo-ci/scripts/tripleo.sh --overcloud-images 2>&1 | awk '{ print strftime("%Y-%m-%d %H:%M:%S.000"), "|", $0; fflush(); }' | sudo dd of=/var/log/image_build.txt || (tail -n 50 /var/log/image_build.txt && false)
stop_metric "tripleo.${STABLE_RELEASE:-master}.${TOCI_JOBTYPE}.overcloud.images.seconds"
OVERCLOUD_IMAGE_MB=$(du -ms overcloud-full.qcow2 | cut -f 1)
record_metric "tripleo.${STABLE_RELEASE:-master}.${TOCI_JOBTYPE}.overcloud.image.size_mb" "$OVERCLOUD_IMAGE_MB"
start_metric "tripleo.${STABLE_RELEASE:-master}.${TOCI_JOBTYPE}.register.nodes.seconds"
if [ $INTROSPECT == 1 ]; then
export INTROSPECT_NODES=1
fi
$TRIPLEO_ROOT/tripleo-ci/scripts/tripleo.sh --register-nodes
# We don't want to keep this set for further calls to tripleo.sh
unset INTROSPECT_NODES
stop_metric "tripleo.${STABLE_RELEASE:-master}.${TOCI_JOBTYPE}.register.nodes.seconds"
if [ $INTROSPECT == 1 ] ; then
start_metric "tripleo.${STABLE_RELEASE:-master}.${TOCI_JOBTYPE}.introspect.seconds"
$TRIPLEO_ROOT/tripleo-ci/scripts/tripleo.sh --introspect-nodes
stop_metric "tripleo.${STABLE_RELEASE:-master}.${TOCI_JOBTYPE}.introspect.seconds"
fi
if [ $PREDICTABLE_PLACEMENT == 1 ]; then
source ~/stackrc
NODE_ID_0=$(ironic node-list | grep available | head -n 1 | tail -n 1 | awk '{print $2}')
NODE_ID_1=$(ironic node-list | grep available | head -n 2 | tail -n 1 | awk '{print $2}')
NODE_ID_2=$(ironic node-list | grep available | head -n 3 | tail -n 1 | awk '{print $2}')
NODE_ID_3=$(ironic node-list | grep available | head -n 4 | tail -n 1 | awk '{print $2}')
ironic node-update $NODE_ID_0 replace properties/capabilities='node:controller-0,boot_option:local'
ironic node-update $NODE_ID_1 replace properties/capabilities='node:controller-1,boot_option:local'
ironic node-update $NODE_ID_2 replace properties/capabilities='node:controller-2,boot_option:local'
ironic node-update $NODE_ID_3 replace properties/capabilities='node:compute-0,boot_option:local'
fi
sleep 60
fi
if [ -n "${OVERCLOUD_UPDATE_ARGS:-}" ] ; then
# Reinstall openstack-tripleo-heat-templates from delorean-current.
# Since we're testing updates, we want to remove any version we may have
# installed from the delorean-ci repo and install from delorean-current,
# or just delorean in the case of stable branches.
sudo rpm -ev --nodeps openstack-tripleo-heat-templates
sudo yum -y --disablerepo=* --enablerepo=delorean,delorean-current install openstack-tripleo-heat-templates
fi
if [ "$MULTINODE" = "1" ]; then
# Start the script that will configure os-collect-config on the subnodes
source ~/stackrc
if [ "$OVERCLOUD_MAJOR_UPGRADE" == 1 ] ; then
# Download the previous release openstack-tripleo-heat-templates to a directory
# we then deploy this and later upgrade to the default --templates location
# FIXME - we should make the tht-compat package work here instead
OLD_THT=$(curl https://trunk.rdoproject.org/centos7-$UPGRADE_RELEASE/current/ | grep "openstack-tripleo-heat-templates" | grep "noarch.rpm" | grep -v "tripleo-heat-templates-compat" | sed "s/^.*>openstack-tripleo-heat-templates/openstack-tripleo-heat-templates/" | cut -d "<" -f1)
echo "Downloading https://trunk.rdoproject.org/centos7-$UPGRADE_RELEASE/current/$OLD_THT"
rm -fr $TRIPLEO_ROOT/$UPGRADE_RELEASE/*
mkdir -p $TRIPLEO_ROOT/$UPGRADE_RELEASE
curl -o $TRIPLEO_ROOT/$UPGRADE_RELEASE/$OLD_THT https://trunk.rdoproject.org/centos7-$UPGRADE_RELEASE/current/$OLD_THT
pushd $TRIPLEO_ROOT/$UPGRADE_RELEASE
rpm2cpio openstack-tripleo-heat-templates-*.rpm | cpio -ivd
popd
# Backup current deploy args:
UPGRADE_OVERCLOUD_DEPLOY_ARGS=$OVERCLOUD_DEPLOY_ARGS
# Rewrite all template paths to ensure paths match the
# --templates location
TEMPLATE_PATH="/usr/share/openstack-tripleo-heat-templates"
ENV_PATH="-e $TEMPLATE_PATH"
STABLE_TEMPLATE_PATH="$TRIPLEO_ROOT/$UPGRADE_RELEASE/usr/share/openstack-tripleo-heat-templates"
STABLE_ENV_PATH="-e $STABLE_TEMPLATE_PATH"
echo "SHDEBUG OVERCLOUD_DEPLOY_ARGS BEFORE=$OVERCLOUD_DEPLOY_ARGS"
UPGRADE_OVERCLOUD_DEPLOY_ARGS=${UPGRADE_OVERCLOUD_DEPLOY_ARGS//$STABLE_ENV_PATH/$ENV_PATH}
OVERCLOUD_DEPLOY_ARGS=${OVERCLOUD_DEPLOY_ARGS//$ENV_PATH/$STABLE_ENV_PATH}
echo "UPGRADE_OVERCLOUD_DEPLOY_ARGS=$UPGRADE_OVERCLOUD_DEPLOY_ARGS"
echo "OVERCLOUD_DEPLOY_ARGS=$OVERCLOUD_DEPLOY_ARGS"
# Set deploy args for stable deployment:
export OVERCLOUD_DEPLOY_ARGS="$OVERCLOUD_DEPLOY_ARGS --templates $STABLE_TEMPLATE_PATH -e $STABLE_TEMPLATE_PATH/environments/deployed-server-environment.yaml -e $STABLE_TEMPLATE_PATH/environments/services/sahara.yaml"
if [ ! -z $UPGRADE_ENV ]; then
export OVERCLOUD_DEPLOY_ARGS="$OVERCLOUD_DEPLOY_ARGS -e $TRIPLEO_ROOT/$UPGRADE_RELEASE/$UPGRADE_ENV"
fi
echo_vars_to_deploy_env
$TRIPLEO_ROOT/$UPGRADE_RELEASE/usr/share/openstack-tripleo-heat-templates/deployed-server/scripts/get-occ-config.sh 2>&1 | sudo dd of=/var/log/deployed-server-os-collect-config.log &
else
/usr/share/openstack-tripleo-heat-templates/deployed-server/scripts/get-occ-config.sh 2>&1 | sudo dd of=/var/log/deployed-server-os-collect-config.log &
fi
# Create dummy overcloud-full image since there is no way (yet) to disable
# this constraint in the heat templates
if ! openstack image show overcloud-full; then
qemu-img create -f qcow2 overcloud-full.qcow2 1G
glance image-create \
--container-format bare \
--disk-format qcow2 \
--name overcloud-full \
--file overcloud-full.qcow2
fi
fi
if [ $OVERCLOUD == 1 ] ; then
source ~/stackrc
start_metric "tripleo.${STABLE_RELEASE:-master}.${TOCI_JOBTYPE}.overcloud.deploy.seconds"
http_proxy= $TRIPLEO_ROOT/tripleo-ci/scripts/tripleo.sh --overcloud-deploy ${TRIPLEO_SH_ARGS:-}
stop_metric "tripleo.${STABLE_RELEASE:-master}.${TOCI_JOBTYPE}.overcloud.deploy.seconds"
# Add hosts to /etc/hosts
openstack stack output show overcloud HostsEntry -f value -c output_value | sudo tee -a /etc/hosts
fi
if [ $UNDERCLOUD_IDEMPOTENT == 1 ]; then
echo "INFO: Check /var/log/undercloud_install_idempotent.txt for undercloud install output"
echo "INFO: This file can be found in logs/undercloud.tar.xz in the directory containing console.log"
$TRIPLEO_ROOT/tripleo-ci/scripts/tripleo.sh --undercloud 2>&1 | sudo dd of=/var/log/undercloud_install_idempotent.txt || (tail -n 50 /var/log/undercloud_install_idempotent.txt && false)
fi
if [ -n "${OVERCLOUD_UPDATE_ARGS:-}" ] ; then
# Reinstall openstack-tripleo-heat-templates, this will pick up the version
# from the delorean-ci repo if the patch being tested is from
# tripleo-heat-templates, otherwise it will just reinstall from
# delorean-current.
sudo rpm -ev --nodeps openstack-tripleo-heat-templates
sudo yum -y install openstack-tripleo-heat-templates
start_metric "tripleo.${STABLE_RELEASE:-master}.${TOCI_JOBTYPE}.overcloud.update.seconds"
http_proxy= $TRIPLEO_ROOT/tripleo-ci/scripts/tripleo.sh --overcloud-update ${TRIPLEO_SH_ARGS:-}
stop_metric "tripleo.${STABLE_RELEASE:-master}.${TOCI_JOBTYPE}.overcloud.update.seconds"
fi
if [ "$MULTINODE" == 0 ] && [ "$OVERCLOUD" == 1 ] ; then
# Sanity test we deployed what we said we would
source ~/stackrc
[ "$NODECOUNT" != $(nova list | grep ACTIVE | wc -l | cut -f1 -d " ") ] && echo "Wrong number of nodes deployed" && exit 1
if [ $PREDICTABLE_PLACEMENT == 1 ]; then
# Verify our public VIP is the one we specified
grep -q 10.0.0.9 ~/overcloudrc || (echo "Wrong public vip deployed " && exit 1)
# Verify our specified hostnames were used
INSTANCE_ID_0=$(nova list | grep controller-0-tripleo-ci-a-foo | awk '{print $2}')
INSTANCE_ID_1=$(nova list | grep controller-1-tripleo-ci-b-bar | awk '{print $2}')
INSTANCE_ID_2=$(nova list | grep controller-2-tripleo-ci-c-baz | awk '{print $2}')
INSTANCE_ID_3=$(nova list | grep compute-0-tripleo-ci-a-test | awk '{print $2}')
# Verify the correct ironic nodes were used
echo "Verifying predictable placement configuration was honored."
ironic node-list | grep $INSTANCE_ID_0 | grep -q $NODE_ID_0 || (echo "$INSTANCE_ID_0 not deployed to node $NODE_ID_0" && exit 1)
ironic node-list | grep $INSTANCE_ID_1 | grep -q $NODE_ID_1 || (echo "$INSTANCE_ID_1 not deployed to node $NODE_ID_1" && exit 1)
ironic node-list | grep $INSTANCE_ID_2 | grep -q $NODE_ID_2 || (echo "$INSTANCE_ID_2 not deployed to node $NODE_ID_2" && exit 1)
ironic node-list | grep $INSTANCE_ID_3 | grep -q $NODE_ID_3 || (echo "$INSTANCE_ID_3 not deployed to node $NODE_ID_3" && exit 1)
echo "Verified."
fi
if [ $PACEMAKER == 1 ] ; then
# Wait for the pacemaker cluster to settle and all resources to be
# available. heat-{api,engine} are the best candidates since due to the
# constraint ordering they are typically started last. We'll wait up to
# 180s.
start_metric "tripleo.${STABLE_RELEASE:-master}.${TOCI_JOBTYPE}.overcloud.settle.seconds"
timeout -k 10 240 ssh $SSH_OPTIONS heat-admin@$(nova list | grep controller-0 | awk '{print $12}' | cut -d'=' -f2) sudo crm_resource -r openstack-heat-api --wait || {
exitcode=$?
echo "crm_resource for openstack-heat-api has failed!"
exit $exitcode
}
timeout -k 10 240 ssh $SSH_OPTIONS heat-admin@$(nova list | grep controller-0 | awk '{print $12}' | cut -d'=' -f2) sudo crm_resource -r openstack-heat-engine --wait|| {
exitcode=$?
echo "crm_resource for openstack-heat-engine has failed!"
exit $exitcode
}
stop_metric "tripleo.${STABLE_RELEASE:-master}.${TOCI_JOBTYPE}.overcloud.settle.seconds"
fi
fi
if [ -f ~/overcloudrc ]; then
source ~/overcloudrc
fi
if [ "$OVERCLOUD_MAJOR_UPGRADE" == 1 ] ; then
# Re-enable the delorean-ci repo, as ZUUL_REFS,
# and thus the contents of delorean-ci may contain packages
# we want to test for the current branch on upgrade
if [ -s /etc/nodepool/sub_nodes_private ]; then
for ip in $(cat /etc/nodepool/sub_nodes_private); do
ssh $SSH_OPTIONS -tt -i /etc/nodepool/id_rsa $ip \
sudo sed -i -e \"s/enabled=0/enabled=1/\" /etc/yum.repos.d/delorean-ci.repo
done
fi
source ~/stackrc
# Set deploy args for stable deployment:
# We have to use the backward compatible
if [ ! -z $UPGRADE_ENV ]; then
export OVERCLOUD_DEPLOY_ARGS="$OVERCLOUD_DEPLOY_ARGS -e $UPGRADE_ENV"
fi
# update-from-deployed-server-$UPGRADE_RELEASE.yaml environment when upgrading from
# $UPGRADE_RELEASE.
export OVERCLOUD_DEPLOY_ARGS="$UPGRADE_OVERCLOUD_DEPLOY_ARGS -e /usr/share/openstack-tripleo-heat-templates/environments/deployed-server-environment.yaml -e /usr/share/openstack-tripleo-heat-templates/environments/services/sahara.yaml"
if [ "$UPGRADE_RELEASE" == "newton" ]; then
export OVERCLOUD_DEPLOY_ARGS="$OVERCLOUD_DEPLOY_ARGS -e /usr/share/openstack-tripleo-heat-templates/environments/updates/update-from-deployed-server-$UPGRADE_RELEASE.yaml"
fi
if [ ! -z $UPGRADE_ENV ]; then
export OVERCLOUD_DEPLOY_ARGS="$OVERCLOUD_DEPLOY_ARGS -e $UPGRADE_ENV"
fi
echo_vars_to_deploy_env
if [ "$MULTINODE" = "1" ]; then
/usr/share/openstack-tripleo-heat-templates/deployed-server/scripts/get-occ-config.sh 2>&1 | sudo dd of=/var/log/deployed-server-os-collect-config-22.log &
fi
# We run basic sanity tests before/after, which includes creating some resources which
# must survive the upgrade. The upgrade is performed in two steps, even though this
# is an all-in-one test, as this is close to how a real deployment with computes would
# be upgraded.
$TRIPLEO_ROOT/tripleo-ci/scripts/tripleo.sh --overcloud-sanity --skip-sanitytest-cleanup
$TRIPLEO_ROOT/tripleo-ci/scripts/tripleo.sh --overcloud-upgrade
$TRIPLEO_ROOT/tripleo-ci/scripts/tripleo.sh --overcloud-sanity --skip-sanitytest-create --skip-sanitytest-cleanup
$TRIPLEO_ROOT/tripleo-ci/scripts/tripleo.sh --overcloud-upgrade-converge
$TRIPLEO_ROOT/tripleo-ci/scripts/tripleo.sh --overcloud-sanity --skip-sanitytest-create
fi
if [ $CA_SERVER == 1 ] ; then
source ~/overcloudrc
# Verify that the domain exists
openstack domain show $LDAP_DOMAIN_NAME
# Add admin role to admin user for freeipadomain
openstack role add admin --domain $LDAP_DOMAIN_NAME --user admin --user-domain Default
# Verify we can access the users in the given domain
openstack user list --domain $LDAP_DOMAIN_NAME
# Add demo project to domain
openstack project create demo --domain $LDAP_DOMAIN_NAME
# Add admin role to user for that project
openstack role add admin --project demo --project-domain $LDAP_DOMAIN_NAME \
--user demo --user-domain $LDAP_DOMAIN_NAME
# Create rc file for demo user
cat <<EOF >~/overcloudrc.demouser
# Clear any old environment that may conflict.
for key in \$( set | awk '{FS="="} /^OS_/ {print \$1}' ); do unset \$key ; done
export OS_USERNAME=demo
export OS_USER_DOMAIN_NAME=$LDAP_DOMAIN_NAME
export OS_PROJECT_DOMAIN_NAME=$LDAP_DOMAIN_NAME
export OS_BAREMETAL_API_VERSION=1.29
export NOVA_VERSION=1.1
export OS_PROJECT_NAME=demo
export OS_PASSWORD=$DEMO_USER_PASS
export OS_NO_CACHE=True
export COMPUTE_API_VERSION=1.1
export no_proxy=,overcloud.$TRIPLEO_DOMAIN,overcloud.ctlplane.$TRIPLEO_DOMAIN,overcloud.$TRIPLEO_DOMAIN,overcloud.ctlplane.$TRIPLEO_DOMAIN
export OS_CLOUDNAME=overcloud
export OS_AUTH_URL=https://overcloud.$TRIPLEO_DOMAIN:13000/v3
export IRONIC_API_VERSION=1.29
export OS_IDENTITY_API_VERSION=3
export OS_AUTH_TYPE=password
export PYTHONWARNINGS="ignore:Certificate has no, ignore:A true SSLContext object is not available"
EOF
# Tell tripleo.sh/pingtest to use demouserrc instead of overcloudrc. Note
# that we don't include the $HOME path prefix on the variable, as this is
# implicitly added in tripleo.sh
export ALT_OVERCLOUDRC=overcloudrc.demouser
fi
if [ $RUN_PING_TEST == 1 ] ; then
start_metric "tripleo.${STABLE_RELEASE:-master}.${TOCI_JOBTYPE}.overcloud.ping_test.seconds"
OVERCLOUD_PINGTEST_OLD_HEATCLIENT=0 $TRIPLEO_ROOT/tripleo-ci/scripts/tripleo.sh --overcloud-pingtest $OVERCLOUD_PINGTEST_ARGS
stop_metric "tripleo.${STABLE_RELEASE:-master}.${TOCI_JOBTYPE}.overcloud.ping_test.seconds"
fi
if [ $RUN_TEMPEST_TESTS == 1 ] ; then
start_metric "tripleo.${STABLE_RELEASE:-master}.${TOCI_JOBTYPE}.overcloud.tempest.seconds"
export TEMPEST_REGEX='^(?=(.*smoke))(?!('
export TEMPEST_REGEX="${TEMPEST_REGEX}tempest.scenario.test_volume_boot_pattern" # http://bugzilla.redhat.com/1272289
export TEMPEST_REGEX="${TEMPEST_REGEX}|tempest.api.identity.*v3" # https://bugzilla.redhat.com/1266947
export TEMPEST_REGEX="${TEMPEST_REGEX}|.*test_external_network_visibility" # https://bugs.launchpad.net/tripleo/+bug/1577769
export TEMPEST_REGEX="${TEMPEST_REGEX}|tempest.api.data_processing" # Sahara is not enabled by default and has problem with performance
export TEMPEST_REGEX="${TEMPEST_REGEX}))"
bash $TRIPLEO_ROOT/tripleo-ci/scripts/tripleo.sh --run-tempest
stop_metric "tripleo.${STABLE_RELEASE:-master}.${TOCI_JOBTYPE}.overcloud.tempest.seconds"
fi
if [ $TEST_OVERCLOUD_DELETE -eq 1 ] ; then
source ~/stackrc
$TRIPLEO_ROOT/tripleo-ci/scripts/tripleo.sh --overcloud-delete
fi
# Upgrade part
if [ "$UNDERCLOUD_MAJOR_UPGRADE" == 1 ] ; then
# Reset or unset STABLE_RELEASE so that we upgrade to the next major
# version
if [ "$STABLE_RELEASE" = "queens" ]; then
# TODO: switch STABLE_RELEASE to rocky when released
export STABLE_RELEASE=""
elif [ "$STABLE_RELEASE" = "pike" ]; then
export STABLE_RELEASE="queens"
elif [ "$STABLE_RELEASE" = "ocata" ]; then
export STABLE_RELEASE="pike"
elif [ "$STABLE_RELEASE" = "newton" ]; then
export STABLE_RELEASE="ocata"
fi
echo_vars_to_deploy_env
# Add the delorean ci repo so that we include the package being tested
layer_ci_repo
$TRIPLEO_ROOT/tripleo-ci/scripts/tripleo.sh --undercloud-upgrade 2>&1 | awk '{ print strftime("%Y-%m-%d %H:%M:%S.000"), "|", $0; fflush(); }' | sudo dd of=/var/log/undercloud_upgrade.txt || (tail -n 50 /var/log/undercloud_upgrade.txt && false)
fi

View File

@ -1,77 +0,0 @@
#!/bin/bash
set -eu -o pipefail
function usage {
echo "Helper script for downloading tripleo-ci jobs logs"
echo
echo "Example:"
echo "getthelogs http://logs.openstack.org/00/123456/7/check/gate-tripleo-ci-foo/d3adbeef"
echo
echo "Downloads the logs and starts a shell from the logs root directory"
}
function finish {
rc=${rc:-$?}
trap - EXIT
cd $TDIR/../
echo "Download job exited ${rc}"
PS1="JOBLOGS ]\$ " bash --noprofile --norc
}
function get_dirs {
local drop="\b(etc|ara|ara_oooq|docs|build|stackviz|sudoers.d|config-data|extra)\b"
local directories=""
directories=$(curl -s "$1" 2> /dev/null | grep -E "\[DIR" | grep -vE "${drop}" | sed -e "s,.*href=\"\([^\"]*\)\".*,${1}\1,g")
if [ -n "$directories" ]; then
for d in $directories; do
directories="$directories $(get_dirs $d/)"
done
echo $directories
else
echo ""
fi
return 0
}
[[ "${1:--}" =~ ^\s+?- ]] && (usage; exit 1)
type -p wget 2>&1 >/dev/null || ( echo "Please install a wget tool!"; exit 127 )
trap finish EXIT SIGINT SIGTERM
WORKERS=6
BASEURL=${1%/}
SC=$(dirname $BASEURL | grep -o \/ | wc -w)
if [[ $BASEURL =~ 'logs.rdoproject' && SC -le 9 ]] ||\
[[ $BASEURL =~ 'logs.rdoproject.org/openstack-periodic' && SC -le 5 ]]; then
console="$BASEURL/console.txt.gz"
elif [[ ! $(basename $BASEURL) == 'logs' && SC -le 7 ]]; then
console="$BASEURL/job-output.txt.gz"
BASEURL=${BASEURL}/logs
else
console=''
fi
TDIR=${BASEURL##*http://}
TDIR=${TDIR##*https://}
TDIR=/tmp/${TDIR}
mkdir -p $TDIR
cd /tmp
echo "Target dir for download: $TDIR"
echo Will download logs from the following URLs:
list_to_get="$console $(get_dirs $BASEURL/)"
for d in $list_to_get; do
echo $d
done
rm -f wget-jobs.txt
for d in $list_to_get; do
args="\"-nv -nc --no-use-server-timestamps \
--accept-regex='\.txt\.gz$|messages$' \
--reject='index.html*' \
--recursive -l 10 --domains logs.openstack.org,logs.rdoproject.org \
--no-parent \
-erobots=off --wait 0.25 ${d}\""
echo "${args}" >> wget-jobs.txt
done
cat wget-jobs.txt | sed -n '{p;p}' | shuf > wget-jobs-shuf.txt
cat wget-jobs-shuf.txt | xargs -r -n1 -P ${WORKERS} -I{} sh -c "wget {}"

View File

@ -1,83 +0,0 @@
#!/bin/bash
export METRICS_START_TIMES=/tmp/metric-start-times
export METRICS_DATA_FILE=/tmp/metrics-data
# Record a metric. If no DTS is provided the current date is used.
function record_metric {
local METRIC_NAME=$1
local METRIC_VALUE=$2
local DTS=${3:-$(date +%s)}
if [ -z "$METRIC_NAME" -o -z "$METRIC_VALUE" ]; then
echo "Please specify METRIC_NAME and METRIC_VALUE" >&2
exit 1
fi
echo "$METRIC_NAME:$METRIC_VALUE:$DTS" >> $METRICS_DATA_FILE
}
# Start a time metric by keeping track of a timestamp until stop_metric is
# called. NOTE: time metrics names must be unique.
function start_metric {
local NAME=$1
local METRIC_NAME
local START_TIME
START_TIME=$(date +%s)
# we use : as our delimiter so convert to _. Also convert spaces and /'s.
METRIC_NAME=$(echo "$1" | sed -e 's|[\ \///:]|_|g')
if grep -c "^$METRIC_NAME:" $METRICS_START_TIMES &>/dev/null; then
echo "start_metric has already been called for $NAME" >&2
exit 1
fi
echo "$METRIC_NAME:$START_TIME" >> $METRICS_START_TIMES
}
# Stop a time metric previously started by the start_metric function.
# The total time (in seconds) is calculated and logged to the metrics
# data file. NOTE: the end time is used as the DTS.
function stop_metric {
local END_TIME
local LINE
local METRIC_NAME
local NAME=$1
local START_TIME
local TOTAL_TIME
METRIC_NAME=$(echo "$1" | sed -e 's|[\ \///:]|_|g')
END_TIME=$(date +%s)
if ! grep -c "^$METRIC_NAME" $METRICS_START_TIMES &>/dev/null; then
echo "Please call start_metric before calling stop_metric for $NAME" >&2
exit 1
fi
LINE=$(grep "^$METRIC_NAME:" $METRICS_START_TIMES)
START_TIME=$(grep "^$METRIC_NAME:" $METRICS_START_TIMES | cut -d ':' -f '2')
TOTAL_TIME="$(($END_TIME - $START_TIME))"
record_metric "$METRIC_NAME" "$TOTAL_TIME" "$END_TIME"
}
function metrics_to_graphite {
# With a lot of metrics, the trace output gets pretty spammy
set +x
echo "Sending metrics to graphite"
local SERVER=$1
local PORT=${2:-2003} # default port for graphite data
local METRIC_NAME
local METRIC_VAL
local DTS
set +e #ignore errors posting metrics results
for X in $(cat $METRICS_DATA_FILE); do
METRIC_NAME=$(echo $X | cut -d ":" -f 1)
METRIC_VAL=$(echo $X | cut -d ":" -f 2)
DTS=$(echo $X | cut -d ":" -f 3)
echo "Sending $METRIC_NAME to Graphite"
echo "$METRIC_NAME $METRIC_VAL $DTS" | nc ${SERVER} ${PORT}
done
set -e
# reset the existing data file and start times
echo "" > METRICS_START_TIMES
echo "" > METRICS_DATA_FILE
set -x
}

View File

@ -1,40 +0,0 @@
#!/bin/bash
RELEASE=$1
BUILDS="/var/www/html/builds-${RELEASE}/current-tripleo"
MIRRORURL="https://images.rdoproject.org/${RELEASE}/delorean/current-tripleo"
IMAGES="overcloud-full.tar ironic-python-agent.tar"
function check_new_image {
local img=$1
wget ${MIRRORURL}/${img}.md5 -O test_md5 -o /dev/null || {
echo "File ${MIRRORURL}/${img}.md5 doesn't present, can NOT continue"
exit 1
}
diff -q test_md5 ${img}.md5 >/dev/null
}
function update_images {
for img in $IMAGES; do
wget ${MIRRORURL}/${img} -O ${img}-${RELEASE}
wget ${MIRRORURL}/${img}.md5 -O ${img}-${RELEASE}.md5
down_md5="$(cat ${img}-${RELEASE}.md5 | awk {'print $1'})"
real_md5="$(md5sum ${img}-${RELEASE} | awk {'print $1'})"
if [[ "$down_md5" == "$real_md5" ]]; then
mv -f ${img}-${RELEASE} ${img}
mv -f ${img}-${RELEASE}.md5 ${img}.md5
else
echo "md5 doesn't match, image download was broken!"
echo "Calculated md5 is $real_md5 and downloaded is $down_md5"
rm -f "${img}-${RELEASE}"
rm -f "${img}-${RELEASE}.md5"
fi
done
wget ${MIRRORURL}/delorean_hash.txt -O delorean_hash.txt -o /dev/null
}
mkdir -p $BUILDS
pushd $BUILDS
check_new_image overcloud-full.tar && echo "${RELEASE} images are up to date" || update_images
rm -f test_md5
popd

View File

@ -1,77 +0,0 @@
Exec { path => [ "/bin/", "/sbin/" ] }
package{"wget": }
package{"python34": }
# The git repositories are created in a unconfined context
# TODO: fix this
exec{"setenforce 0":}
vcsrepo {"/opt/stack/tripleo-ci":
source => "https://opendev.org/openstack/tripleo-ci",
provider => git,
ensure => latest,
}
file { "/etc/sysconfig/network-scripts/ifcfg-eth1":
ensure => "present",
content => "DEVICE=eth1\nBOOTPROTO=dhcp\nONBOOT=yes\nPERSISTENT_DHCLIENT=yes\nPEERDNS=no\nNM_CONTROLLED=no",
} ~> exec{"ifrestart":
command => "ifdown eth1 ; ifup eth1",
}
class { "apache":
} ->
file {"/var/www/cgi-bin/upload.cgi":
ensure => "link",
target => "/opt/stack/tripleo-ci/scripts/mirror-server/upload.cgi",
} ->
file {"/var/www/html/builds":
ensure => "directory",
owner => "apache",
}
file { '/var/www/html/builds-master':
ensure => 'link',
target => '/var/www/html/builds',
}
file {"/var/www/html/builds-pike":
ensure => "directory",
owner => "apache",
}
file {"/var/www/html/builds-ocata":
ensure => "directory",
owner => "apache",
}
file {"/var/www/html/builds-newton":
ensure => "directory",
owner => "apache",
}
cron {"refresh-server":
command => "timeout 20m puppet apply /opt/stack/tripleo-ci/scripts/mirror-server/mirror-server.pp",
minute => "*/30"
}
cron {"mirror-images-master":
command => "timeout 60m /opt/stack/tripleo-ci/scripts/mirror-server/mirror-images.sh master | tee /var/log/images_update-master.log",
hour => "2",
minute => "0"
}
cron {"mirror-images-queens":
command => "timeout 60m /opt/stack/tripleo-ci/scripts/mirror-server/mirror-images.sh queens | tee /var/log/images_update-queens.log",
hour => "2",
minute => "0"
}
cron {"mirror-images-pike":
command => "timeout 60m /opt/stack/tripleo-ci/scripts/mirror-server/mirror-images.sh pike | tee /var/log/images_update-pike.log",
hour => "2",
minute => "0"
}
cron {"mirror-images-ocata":
command => "timeout 60m /opt/stack/tripleo-ci/scripts/mirror-server/mirror-images.sh ocata | tee /var/log/images_update-ocata.log",
hour => "2",
minute => "0"
}
cron {"mirror-images-newton":
command => "timeout 60m /opt/stack/tripleo-ci/scripts/mirror-server/mirror-images.sh newton | tee /var/log/images_update-newton.log",
hour => "2",
minute => "0"
}

View File

@ -1,155 +0,0 @@
#!/usr/bin/env python
import gzip
import logging
import os
import re
import requests
from requests import ConnectionError
from requests.exceptions import Timeout
import sys
logging.basicConfig(
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
log = logging.getLogger('scraper')
log.setLevel(logging.DEBUG)
dlrn_re = re.compile(
r'((?P<p1>[0-9a-z]{2})/(?P<p2>[0-9a-z]{2})/(?P=p1)(?P=p2)[0-9a-z_]+)')
class config(object):
WEB_TIMEOUT = (3.05, 1)
runs = 4 # parse jobs from last 4 runs
jobs = [
'periodic-tripleo-ci-centos-7-scenario001-multinode-oooq',
'periodic-tripleo-ci-centos-7-scenario002-multinode-oooq',
'periodic-tripleo-ci-centos-7-scenario003-multinode-oooq',
'periodic-tripleo-ci-centos-7-scenario004-multinode-oooq',
]
api_url = 'http://health.openstack.org/runs/key/build_name/%s/recent'
cache_file = "/tmp/cached_results_for_multinode_jobs.gz"
# Only master now
base_dir = '/var/www/html/builds/'
class Web(object):
"""Download web page
Web class for downloading web page
"""
def __init__(self, url):
self.url = url
def get(self):
"""Get web file
:return: request obj
"""
log.debug("GET %s", self.url)
try:
req = requests.get(self.url, timeout=config.WEB_TIMEOUT)
except ConnectionError:
log.error("Connection error when retrieving %s", self.url)
return None
except Timeout:
log.error("Timeout reached when retrieving %s", self.url)
return None
except Exception as e:
log.error("Unknown error when retrieving %s: %s", self.url, str(e))
return None
if int(req.status_code) != 200:
log.warn("Page %s got status %s", self.url, req.status_code)
return req
def last_runs(job, limit=1):
web = Web(config.api_url % job)
data = web.get()
if data.ok:
try:
return data.json()[:limit]
except ValueError as e:
log.error("Failed to get JSON from %s:%s", config.api_url % job, e)
else:
log.error("Failed to get API data for %s", config.api_url % job)
return []
def extract_dlrn(url):
repo_url = url + "/logs/undercloud/etc/yum.repos.d/delorean.repo.txt.gz"
web = Web(repo_url)
req = web.get()
if not req.ok:
log.debug("Trying to download repo file again")
web = Web(repo_url)
req = web.get()
if not req.ok:
log.error("Failed to retrieve repo file: %s", repo_url)
return None
else:
for line in req.content.split("\n"):
if dlrn_re.search(line):
return dlrn_re.search(line).group(1)
log.error("Failed to find DLRN trunk hash in the file %s", repo_url)
return None
def check_cached_result(link):
if os.path.exists(config.cache_file):
with gzip.open(config.cache_file, "rb") as f:
for line in f:
if link in line:
return line.split("=")[1].strip()
return None
def add_to_cache(link, dlrn):
with gzip.open(config.cache_file, "ab") as f:
f.write(link + "=" + dlrn + "\n")
def process_job(run):
link = run['link']
result = run['status'] == 'success'
dlrn = check_cached_result(link)
if not dlrn:
dlrn = extract_dlrn(link)
if dlrn:
add_to_cache(link, dlrn)
return dlrn, result
def found(dlrn):
if not dlrn:
return False
metadata = os.path.join(config.base_dir, dlrn, "metadata.txt")
return os.path.exists(metadata)
def add_job_to_metadata(dlrn, job):
path = os.path.join(config.base_dir, dlrn, "metadata.txt")
success = job + "=SUCCESS"
with open(path, "r") as f:
if success in f.read():
return
with open(path, "a") as f:
f.write(success + "\n")
def main():
jobs = sys.argv[1:] or config.jobs
for job in jobs:
log.debug("Working on job %s", job)
for run in last_runs(job, config.runs):
log.debug("Checking run from %s and link %s",
run["start_date"], run["link"])
dlrn_hash, passed = process_job(run)
log.debug("Extracted DLRN=%s passed=%s",
str(dlrn_hash), str(passed))
if passed and found(dlrn_hash):
log.debug("Adding success to metdata of %s", dlrn_hash)
add_job_to_metadata(dlrn_hash, job)
if __name__ == '__main__':
main()

View File

@ -1,64 +0,0 @@
#!/bin/bash
set -ex
# Here be the promote script
# 1. Find all metadata files newer then the one currently promoted
# 2. If any of them have all the jobs reported back that we're interested in then promote it
# o Bumb current-tripleo on the dlrn server
# o Bump current-tripleo on this server
# ./promote.sh release linkname promote-jobname test-jobname [test-jobname] ...
RELEASE=$1
LINKNAME=$2
PROMOTE_JOBNAME=$3
BASEDIR=/var/www/html/builds-$RELEASE
CURRENT=$BASEDIR/$LINKNAME
CURRENT_META=$CURRENT/metadata.txt
JOB_URL=https://ci.centos.org/job/$PROMOTE_JOBNAME/buildWithParameters
shift
shift
shift
# Working with relative paths is easier as we need to set relative links on the dlrn server
pushd $BASEDIR
mkdir -p $CURRENT
if [ -f $CURRENT_META ] ; then
DIRS2TEST=$(find . -newer $CURRENT_META -name metadata.txt | xargs --no-run-if-empty ls -t)
else
# We haven't had a successful promote yet, check the last 7 days for a success
DIRS2TEST=$(find . -mtime -7 -name metadata.txt | xargs --no-run-if-empty ls -t)
fi
[ -z "$DIRS2TEST" ] && exit 0
for DIR in $DIRS2TEST ; do
OK=0
for JOB in $@ ; do
grep "$JOB=SUCCESS" $DIR || OK=1
done
[ $OK == 1 ] && continue
DIR=$(dirname $DIR)
#(trown) Do not echo the curl command so we can keep the RDO_PROMOTE_TOKEN
# relatively secret. The token only provides access to promote the current-tripleo
# symlink, and is easy to change, but better to not advertise it in the logs.
set +x
source ~/.promoterc
curl $JOB_URL?token=$RDO_PROMOTE_TOKEN\&tripleo_dlrn_promote_hash=$(basename $DIR)
set -x
ln -snf $DIR $CURRENT
break
done
# Remove any files older then 1 day that arn't one of the current pins
find */*/* -type f -name metadata.txt -mtime +0 \
-not -samefile $LINKNAME/metadata.txt | \
xargs --no-run-if-empty dirname | \
xargs --no-run-if-empty -t rm -rf
# Remove all empty nested directories
find . -type d -empty -delete
popd

View File

@ -1,80 +0,0 @@
#!/bin/python3.4
from builtins import FileExistsError
import cgi
import fcntl
import os
import shutil
import sys
import tempfile
basedir = "/var/www/html/"
print("Content-Type: text/html\n")
def saveform(form, storagedir):
for key in form.keys():
entry = form[key]
if not isinstance(entry.value, str):
# Ensure no directory entries sneak in
filename = os.path.split(entry.filename)[1]
filename = os.path.join(storagedir, filename)
if os.path.exists(filename):
print("already received %s" % entry.filename)
continue
fp = tempfile.NamedTemporaryFile(delete=False)
count = 1
while count > 0:
data = entry.file.read(1024 * 16)
count = fp.write(data)
fp.close()
shutil.move(fp.name, filename)
else:
line = "%s=%s\n" % (entry.name, entry.value)
fp = open(os.path.join(storagedir, "metadata.txt"), "a+")
if line not in fp.read():
fp.write(line)
fp.close()
def run():
if not os.environ.get("REMOTE_ADDR", "").startswith("192.168."):
print("File uploads only allowed from the tripleo test network")
return 1
form = cgi.FieldStorage()
try:
repohash = form["repohash"].value
except KeyError:
print("repohash missing")
return 1
try:
branch_dir = form["folder"].value
except KeyError:
print("upload folder missing")
return 1
storagedir = os.path.abspath(os.path.join(basedir, branch_dir, repohash))
if basedir not in storagedir:
print("incorrect hash")
return 1
try:
os.makedirs(storagedir)
except FileExistsError:
pass
fd = os.open("/tmp/lock", os.O_WRONLY | os.O_CREAT)
fcntl.lockf(fd, fcntl.LOCK_EX)
try:
saveform(form, storagedir)
finally:
fcntl.lockf(fd, fcntl.LOCK_UN)
os.close(fd)
sys.exit(run())

View File

@ -1,60 +0,0 @@
#!/bin/bash
#
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
set -eu
set -o pipefail
SCRIPT_NAME=$(basename $0)
SCRIPT_HOME=$(dirname $0)
function show_options {
echo "Usage: $SCRIPT_NAME"
echo
echo "Create a random password."
echo
echo "This outputs a random password."
echo
echo "The password is made by taking a uuid and passing it though sha1sum."
echo "We may change this in future to gain more entropy."
echo
exit $1
}
TEMP=`getopt -o h -n $SCRIPT_NAME -- "$@"`
if [ $? != 0 ]; then
echo "Terminating..." >&2;
exit 1;
fi
# Note the quotes around `$TEMP': they are essential!
eval set -- "$TEMP"
while true ; do
case "$1" in
-h) show_options 0;;
--) shift ; break ;;
*) echo "Error: unsupported option $1." ; exit 1 ;;
esac
done
EXTRA=${1:-""}
if [ -n "$EXTRA" ]; then
show_options 1
fi
uuidgen | sha1sum | awk '{print $1}'

View File

@ -1,84 +0,0 @@
#!/bin/bash
set -ex
export PATH=$PATH:scripts
source $1
# Script to deploy the base infrastructure required to create the ovb-common and ovb-testenv stacks
# Parts of this script could have been a heat stack but not all
# We can't use heat to create the flavors as they can't be given a name with the heat resource
nova flavor-show bmc || nova flavor-create bmc auto 512 20 1
nova flavor-show baremetal || nova flavor-create baremetal auto 8192 41 4
nova flavor-show undercloud || nova flavor-create undercloud auto 8192 41 4
# Remove the flavors that provide most disk space, the disks on rh2 are small we've over committed
# disk space, so this will help protect against an single instance filling the disk on a compute node
nova flavor-delete m1.large || true
nova flavor-delete m1.xlarge || true
glance image-show 'CentOS-7-x86_64-GenericCloud' || \
glance image-create --progress --name 'CentOS-7-x86_64-GenericCloud' --is-public true --disk-format qcow2 --container-format bare \
--copy-from http://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud.qcow2
glance image-show 'ipxe-boot' || \
glance image-create --name ipxe-boot --is-public true --disk-format qcow2 --property os_shutdown_timeout=5 --container-format bare \
--copy-from https://opendev.org/openstack/openstack-virtual-baremetal/raw/branch/master/ipxe/ipxe-boot.qcow2
# Create a pool of floating IP's
neutron net-show public || neutron net-create public --router:external=True
neutron subnet-show public_subnet || neutron subnet-create --name public_subnet --enable_dhcp=False --allocation_pool start=$PUBLIC_IP_FLOATING_START,end=$PUBLIC_IP_FLOATING_END --gateway $PUBLIC_IP_GATWAY public $PUBLIC_IP_NET
# Create a shared private network
neutron net-show private || neutron net-create --shared private
neutron subnet-show private_subnet || neutron subnet-create --name private_subnet --gateway 192.168.100.1 --allocation-pool start=192.168.100.2,end=192.168.103.254 --dns-nameserver 8.8.8.8 private 192.168.100.0/22
# Give outside access to the private network
if ! neutron router-show private_router ; then
neutron router-create private_router
neutron router-gateway-set private_router public
neutron router-interface-add private_router private_subnet
fi
# Keys to used in infrastructure
nova keypair-show tripleo-cd-admins || nova keypair-add --pub-key scripts/tripleo-cd-admins tripleo-cd-admins
# Create a new project/user whose creds will be injected into the te-broker for creating heat stacks
./scripts/assert-user -n openstack-nodepool -t openstack-nodepool -u openstack-nodepool -e openstack-nodepool@noreply.org || true
NODEPOOLUSERID=$(openstack user show openstack-nodepool | awk '$2=="id" {print $4}')
NODEPOOLPROJECTID=$(openstack project show openstack-nodepool | awk '$2=="id" {print $4}')
nova quota-update --instances 9999 --cores 9999 --ram $QUOTA_RAM --floating-ips $QUOTA_FIPS $NODEPOOLPROJECTID
nova quota-update --instances 9999 --cores 9999 --ram $QUOTA_RAM --floating-ips $QUOTA_FIPS --user $NODEPOOLUSERID $NODEPOOLPROJECTID
neutron quota-update --network $QUOTA_NETS --subnet $QUOTA_NETS --port $QUOTA_PORTS --floatingip $QUOTA_FIPS --tenant-id $NODEPOOLPROJECTID
touch ~/nodepoolrc
chmod 600 ~/nodepoolrc
echo -e "export OS_USERNAME=openstack-nodepool\nexport OS_TENANT_NAME=openstack-nodepool" > ~/nodepoolrc
echo "export OS_AUTH_URL=$OS_AUTH_URL" >> ~/nodepoolrc
set +x
PASSWORD=$(grep openstack-nodepool os-asserted-users | awk '{print $2}')
echo "export OS_PASSWORD=$PASSWORD" >> ~/nodepoolrc
set -x
source ~/nodepoolrc
nova keypair-show tripleo-cd-admins || nova keypair-add --pub-key scripts/tripleo-cd-admins tripleo-cd-admins
# And finally some servers we need
nova show te-broker || nova boot --flavor m1.medium --image "CentOS-7-x86_64-GenericCloud" --key-name tripleo-cd-admins --nic net-name=private,v4-fixed-ip=$TEBROKERIP --user-data scripts/deploy-server.sh --file "/etc/nodepoolrc=$HOME/nodepoolrc" te-broker
nova show mirror-server || nova boot --flavor m1.medium --image "CentOS-7-x86_64-GenericCloud" --key-name tripleo-cd-admins --nic net-name=private,v4-fixed-ip=$MIRRORIP --user-data scripts/deploy-server.sh mirror-server
nova show proxy-server || nova boot --flavor m1.medium --image "CentOS-7-x86_64-GenericCloud" --key-name tripleo-cd-admins --nic net-name=private,v4-fixed-ip=$PROXYIP --user-data scripts/deploy-server.sh proxy-server
if ! nova image-show bmc-template ; then
nova keypair-add --pub-key ~/.ssh/id_rsa.pub undercloud
nova boot --flavor bmc --image "CentOS-7-x86_64-GenericCloud" --key-name undercloud --user-data scripts/deploy-server.sh bmc-template
FLOATINGIP=$(nova floating-ip-create $EXTNET | grep public | awk '{print $4}')
nova floating-ip-associate bmc-template $FLOATINGIP
while ! ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=2 centos@$FLOATINGIP ls /var/tmp/ready ; do
sleep 10
done
nova image-create --poll bmc-template bmc-template
nova delete bmc-template
nova keypair-delete undercloud
fi

View File

@ -1,21 +0,0 @@
Exec { path => [ "/bin/", "/sbin/" ] }
vcsrepo {"/opt/stack/tripleo-ci":
source => "https://opendev.org/openstack/tripleo-ci",
provider => git,
ensure => latest,
}
cron {"refresh-server":
command => "timeout 20m puppet apply /opt/stack/tripleo-ci/scripts/proxy-server/proxy-server.pp",
minute => "*/30"
}
package{"squid": } ->
file {"/etc/squid/squid.conf":
source => "/opt/stack/tripleo-ci/scripts/proxy-server/squid.conf",
} ~>
service {"squid":
ensure => "running",
enable => true,
}

View File

@ -1,78 +0,0 @@
#
# Recommended minimum configuration:
#
# Example rule allowing access from your local networks.
# Adapt to list your (internal) IP networks from where browsing
# should be allowed
acl localnet src 10.0.0.0/8 # RFC1918 possible internal network
acl localnet src 172.16.0.0/12 # RFC1918 possible internal network
acl localnet src 192.168.0.0/16 # RFC1918 possible internal network
acl localnet src fc00::/7 # RFC 4193 local private network range
acl localnet src fe80::/10 # RFC 4291 link-local (directly plugged) machines
acl SSL_ports port 443
acl Safe_ports port 80 # http
acl Safe_ports port 21 # ftp
acl Safe_ports port 443 # https
acl Safe_ports port 70 # gopher
acl Safe_ports port 210 # wais
acl Safe_ports port 1025-65535 # unregistered ports
acl Safe_ports port 280 # http-mgmt
acl Safe_ports port 488 # gss-http
acl Safe_ports port 591 # filemaker
acl Safe_ports port 777 # multiling http
acl CONNECT method CONNECT
#
# Recommended minimum Access Permission configuration:
#
# Deny requests to certain unsafe ports
http_access deny !Safe_ports
# Deny CONNECT to other than secure SSL ports
http_access deny CONNECT !SSL_ports
# Only allow cachemgr access from localhost
http_access allow localhost manager
http_access deny manager
# We strongly recommend the following be uncommented to protect innocent
# web applications running on the proxy server who think the only
# one who can access services on "localhost" is a local user
#http_access deny to_localhost
#
# INSERT YOUR OWN RULE(S) HERE TO ALLOW ACCESS FROM YOUR CLIENTS
#
# Example rule allowing access from your local networks.
# Adapt localnet in the ACL section to list your (internal) IP networks
# from where browsing should be allowed
http_access allow localnet
http_access allow localhost
# And finally deny all other access to this proxy
http_access deny all
# Squid normally listens to port 3128
http_port 3128
# stray from the default here so that we can cache cloud images
# (derekh): carried over from rh1 may no longer be needed
maximum_object_size 1024 MB
cache_dir aufs /var/spool/squid 16384 16 256
# Leave coredumps in the first cache dir
coredump_dir /var/spool/squid
#
# Add any of your own refresh_pattern entries above these.
#
refresh_pattern ^ftp: 1440 20% 10080
refresh_pattern ^gopher: 1440 0% 1440
# Never cache repomd.xml in yum repositories as serving an old one
# causes yum installs to fail
refresh_pattern -i repomd.xml$ 0 0% 0
refresh_pattern -i (/cgi-bin/|\?) 0 0% 0
refresh_pattern . 0 20% 4320

View File

@ -1,91 +0,0 @@
parameter_defaults:
CloudName: ci-overcloud.rh1.tripleo.org
ControllerExtraConfig:
tripleo::loadbalancer::public_virtual_ip: 66.187.229.2
neutron::agents::ml2::ovs::prevent_arp_spoofing: false
# https://bugs.launchpad.net/tripleo/+bug/1590101
# Tripleo sets this to 1400, the mtu of most pysical networks without jumbo frames is 1500
# Tripleo also forces dhcp-option-force=26,1400 , this leaves no overhead room for vxlan
# we probably shouldn't force this as neutron automatically subtracts the overlay protocol overhead global_physnet_mtu.
# TODO: investigate properly
neutron::global_physnet_mtu: 1500
# this is deprecated but take precendence ??
neutron::network_device_mtu: 1500
# rh2 disks are small, we're relying on the fact that CI jobs at different stages wont
# ever use peak disk usage together (also they don't use all thats allocate in the flavor)
nova::scheduler::filter::disk_allocation_ratio: 4
# We have a few compute nodes in rh1 that aren't using/don't have SSDs. Since
# those nodes are using a 1 TB HDD instead, they tend to get preferred by the
# scheduler if we use the normal weighers. In reality, we care mostly about
# evenly distributing the RAM across nodes, so let's only use that weigher.
nova::scheduler::filter::scheduler_weight_classes: nova.scheduler.weights.ram.RAMWeigher
# More than 4 Heat engine workers allows Heat to essentially DoS the other services
# if a lot of jobs come in at once. This is borrowed from
# worker-config-mitaka-and-below.yaml. Once rh1 is on a newer release we should
# be able to just use HeatWorkers.
heat::config::heat_config:
DEFAULT/num_engine_workers:
value: 4
# Limit the workers for these services. More workers doesn't appreciably help our
# capacity, so it's just a waste of memory and CPU.
HeatWorkers: 12
NovaWorkers: 12
NovaComputeExtraConfig:
neutron::agents::ml2::ovs::prevent_arp_spoofing: false
neutron::plugins::ml2::firewall_driver: neutron.agent.firewall.NoopFirewallDriver
neutron::agents::ml2::ovs::firewall_driver: neutron.agent.firewall.NoopFirewallDriver
neutron::global_physnet_mtu: 1500
# this is deprecated but takes precendence ??
neutron::network_device_mtu: 1500
# Allow file injection so that the nodepool cloud creds can be injected into the te-broker
nova::compute::libvirt::libvirt_inject_partition: -1
# This should be OK if the cloud is exclusivly for CI but it might end in tears
nova::compute::libvirt::libvirt_disk_cachemodes:
- file=unsafe
EndpointMap:
AodhAdmin: {protocol: 'http', port: '8042', host: 'IP_ADDRESS'}
AodhInternal: {protocol: 'http', port: '8042', host: 'IP_ADDRESS'}
AodhPublic: {protocol: 'https', port: '13042', host: 'CLOUDNAME'}
CeilometerAdmin: {protocol: 'http', port: '8777', host: 'IP_ADDRESS'}
CeilometerInternal: {protocol: 'http', port: '8777', host: 'IP_ADDRESS'}
CeilometerPublic: {protocol: 'https', port: '13777', host: 'CLOUDNAME'}
CinderAdmin: {protocol: 'http', port: '8776', host: 'IP_ADDRESS'}
CinderInternal: {protocol: 'http', port: '8776', host: 'IP_ADDRESS'}
CinderPublic: {protocol: 'https', port: '13776', host: 'CLOUDNAME'}
GlanceAdmin: {protocol: 'http', port: '9292', host: 'IP_ADDRESS'}
GlanceInternal: {protocol: 'http', port: '9292', host: 'IP_ADDRESS'}
GlancePublic: {protocol: 'https', port: '13292', host: 'CLOUDNAME'}
GnocchiAdmin: {protocol: 'http', port: '8041', host: 'IP_ADDRESS'}
GnocchiInternal: {protocol: 'http', port: '8041', host: 'IP_ADDRESS'}
GnocchiPublic: {protocol: 'https', port: '13041', host: 'CLOUDNAME'}
HeatAdmin: {protocol: 'http', port: '8004', host: 'IP_ADDRESS'}
HeatInternal: {protocol: 'http', port: '8004', host: 'IP_ADDRESS'}
HeatPublic: {protocol: 'https', port: '13004', host: 'CLOUDNAME'}
HorizonPublic: {protocol: 'https', port: '443', host: 'CLOUDNAME'}
KeystoneAdmin: {protocol: 'http', port: '35357', host: 'IP_ADDRESS'}
KeystoneInternal: {protocol: 'http', port: '5000', host: 'IP_ADDRESS'}
KeystonePublic: {protocol: 'https', port: '13000', host: 'CLOUDNAME'}
KeystoneV3Admin: {protocol: 'http', port: '35357', host: 'IP_ADDRESS'}
KeystoneV3Internal: {protocol: 'http', port: '5000', host: 'IP_ADDRESS'}
KeystoneV3Public: {protocol: 'https', port: '13000', host: 'CLOUDNAME'}
NeutronAdmin: {protocol: 'http', port: '9696', host: 'IP_ADDRESS'}
NeutronInternal: {protocol: 'http', port: '9696', host: 'IP_ADDRESS'}
NeutronPublic: {protocol: 'https', port: '13696', host: 'CLOUDNAME'}
NovaAdmin: {protocol: 'http', port: '8774', host: 'IP_ADDRESS'}
NovaInternal: {protocol: 'http', port: '8774', host: 'IP_ADDRESS'}
NovaPublic: {protocol: 'https', port: '13774', host: 'CLOUDNAME'}
NovaEC2Admin: {protocol: 'http', port: '8773', host: 'IP_ADDRESS'}
NovaEC2Internal: {protocol: 'http', port: '8773', host: 'IP_ADDRESS'}
NovaEC2Public: {protocol: 'https', port: '13773', host: 'CLOUDNAME'}
NovaVNCProxyAdmin: {protocol: 'http', port: '6080', host: 'IP_ADDRESS'}
NovaVNCProxyInternal: {protocol: 'http', port: '6080', host: 'IP_ADDRESS'}
NovaVNCProxyPublic: {protocol: 'https', port: '13080', host: 'CLOUDNAME'}
SaharaAdmin: {protocol: 'http', port: '8386', host: 'IP_ADDRESS'}
SaharaInternal: {protocol: 'http', port: '8386', host: 'IP_ADDRESS'}
SaharaPublic: {protocol: 'https', port: '13386', host: 'CLOUDNAME'}
SwiftAdmin: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
SwiftInternal: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
SwiftPublic: {protocol: 'https', port: '13808', host: 'CLOUDNAME'}
resource_registry:
OS::TripleO::NodeTLSData: /usr/share/openstack-tripleo-heat-templates/puppet/extraconfig/tls/tls-cert-inject.yaml

View File

@ -1,22 +0,0 @@
#!/bin/bash
export PUBLIC_IP_NET=66.187.229.0/24
export PUBLIC_IP_GATWAY=66.187.229.1
# TODO: remove the iptables rules on the bastion preventing some traffic
# to some of the IPs at the start if the public range.
export PUBLIC_IP_FLOATING_START=66.187.229.16
export PUBLIC_IP_FLOATING_END=66.187.229.254
# The total number of testenv-workers running, to allow for timeouts
# Should be slightly higher then the max number of CI slaves running.
export TOTALOVBENVS=80
export QUOTA_RAM=3145728
export QUOTA_FIPS=75
export QUOTA_NETS=1000
export QUOTA_PORTS=10000
export TEBROKERIP=192.168.103.254
export MIRRORIP=192.168.103.253
export PROXYIP=192.168.103.252
export http_proxy="http://$PROXYIP:3128/"

View File

@ -1,75 +0,0 @@
parameter_defaults:
CloudName: ci-overcloud.rh2.tripleo.org
ControllerExtraConfig:
tripleo::loadbalancer::public_virtual_ip: 8.43.87.224
neutron::agents::ml2::ovs::prevent_arp_spoofing: false
# https://bugs.launchpad.net/tripleo/+bug/1590101
# Tripleo sets this to 1400, the mtu of most pysical networks without jumbo frames is 1500
# Tripleo also forces dhcp-option-force=26,1400 , this leaves no overhead room for vxlan
# we probably shouldn't force this as neutron automatically subtracts the overlay protocol overhead global_physnet_mtu.
# TODO: investigate properly
neutron::global_physnet_mtu: 1500
# this is deprecated but take precendence ??
neutron::network_device_mtu: 1500
# rh2 disks are small, we're relying on the fact that CI jobs at different stages wont
# ever use peak disk usage together (also they don't use all thats allocate in the flavor)
nova::scheduler::filter::disk_allocation_ratio: 3
NovaComputeExtraConfig:
neutron::agents::ml2::ovs::prevent_arp_spoofing: false
neutron::plugins::ml2::firewall_driver: neutron.agent.firewall.NoopFirewallDriver
neutron::agents::ml2::ovs::firewall_driver: neutron.agent.firewall.NoopFirewallDriver
neutron::global_physnet_mtu: 1500
# this is deprecated but takes precendence ??
neutron::network_device_mtu: 1500
# Allow file injection so that the nodepool cloud creds can be injected into the te-broker
nova::compute::libvirt::libvirt_inject_partition: -1
# This should be OK if the cloud is exclusivly for CI but it might end in tears
nova::compute::libvirt::libvirt_disk_cachemodes:
- file=unsafe
EndpointMap:
AodhAdmin: {protocol: 'http', port: '8042', host: 'IP_ADDRESS'}
AodhInternal: {protocol: 'http', port: '8042', host: 'IP_ADDRESS'}
AodhPublic: {protocol: 'https', port: '13042', host: 'CLOUDNAME'}
CeilometerAdmin: {protocol: 'http', port: '8777', host: 'IP_ADDRESS'}
CeilometerInternal: {protocol: 'http', port: '8777', host: 'IP_ADDRESS'}
CeilometerPublic: {protocol: 'https', port: '13777', host: 'CLOUDNAME'}
CinderAdmin: {protocol: 'http', port: '8776', host: 'IP_ADDRESS'}
CinderInternal: {protocol: 'http', port: '8776', host: 'IP_ADDRESS'}
CinderPublic: {protocol: 'https', port: '13776', host: 'CLOUDNAME'}
GlanceAdmin: {protocol: 'http', port: '9292', host: 'IP_ADDRESS'}
GlanceInternal: {protocol: 'http', port: '9292', host: 'IP_ADDRESS'}
GlancePublic: {protocol: 'https', port: '13292', host: 'CLOUDNAME'}
GnocchiAdmin: {protocol: 'http', port: '8041', host: 'IP_ADDRESS'}
GnocchiInternal: {protocol: 'http', port: '8041', host: 'IP_ADDRESS'}
GnocchiPublic: {protocol: 'https', port: '13041', host: 'CLOUDNAME'}
HeatAdmin: {protocol: 'http', port: '8004', host: 'IP_ADDRESS'}
HeatInternal: {protocol: 'http', port: '8004', host: 'IP_ADDRESS'}
HeatPublic: {protocol: 'https', port: '13004', host: 'CLOUDNAME'}
HorizonPublic: {protocol: 'https', port: '443', host: 'CLOUDNAME'}
KeystoneAdmin: {protocol: 'http', port: '35357', host: 'IP_ADDRESS'}
KeystoneInternal: {protocol: 'http', port: '5000', host: 'IP_ADDRESS'}
KeystonePublic: {protocol: 'https', port: '13000', host: 'CLOUDNAME'}
KeystoneV3Admin: {protocol: 'http', port: '35357', host: 'IP_ADDRESS'}
KeystoneV3Internal: {protocol: 'http', port: '5000', host: 'IP_ADDRESS'}
KeystoneV3Public: {protocol: 'https', port: '13000', host: 'CLOUDNAME'}
NeutronAdmin: {protocol: 'http', port: '9696', host: 'IP_ADDRESS'}
NeutronInternal: {protocol: 'http', port: '9696', host: 'IP_ADDRESS'}
NeutronPublic: {protocol: 'https', port: '13696', host: 'CLOUDNAME'}
NovaAdmin: {protocol: 'http', port: '8774', host: 'IP_ADDRESS'}
NovaInternal: {protocol: 'http', port: '8774', host: 'IP_ADDRESS'}
NovaPublic: {protocol: 'https', port: '13774', host: 'CLOUDNAME'}
NovaEC2Admin: {protocol: 'http', port: '8773', host: 'IP_ADDRESS'}
NovaEC2Internal: {protocol: 'http', port: '8773', host: 'IP_ADDRESS'}
NovaEC2Public: {protocol: 'https', port: '13773', host: 'CLOUDNAME'}
NovaVNCProxyAdmin: {protocol: 'http', port: '6080', host: 'IP_ADDRESS'}
NovaVNCProxyInternal: {protocol: 'http', port: '6080', host: 'IP_ADDRESS'}
NovaVNCProxyPublic: {protocol: 'https', port: '13080', host: 'CLOUDNAME'}
SaharaAdmin: {protocol: 'http', port: '8386', host: 'IP_ADDRESS'}
SaharaInternal: {protocol: 'http', port: '8386', host: 'IP_ADDRESS'}
SaharaPublic: {protocol: 'https', port: '13386', host: 'CLOUDNAME'}
SwiftAdmin: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
SwiftInternal: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
SwiftPublic: {protocol: 'https', port: '13808', host: 'CLOUDNAME'}
resource_registry:
OS::TripleO::NodeTLSData: /usr/share/openstack-tripleo-heat-templates/puppet/extraconfig/tls/tls-cert-inject.yaml

View File

@ -1,19 +0,0 @@
#!/bin/bash
export PUBLIC_IP_NET=8.43.86.0/23
export PUBLIC_IP_GATWAY=8.43.87.254
export PUBLIC_IP_FLOATING_START=8.43.87.225
export PUBLIC_IP_FLOATING_END=8.43.87.253
# The total number of testenv-workers running, to allow for timeouts
# Should be slightly higher then the max number of CI slaves running.
export TOTALOVBENVS=20
export QUOTA_RAM=655360
export QUOTA_FIPS=20
export QUOTA_NETS=25
export QUOTA_PORTS=250
export TEBROKERIP=192.168.103.254
export MIRRORIP=192.168.103.253
export http_proxy="http://192.168.103.252:3128/"

View File

@ -1,52 +0,0 @@
#!/bin/bash
TRIPLEO_OS_FAMILY='unsupported' # Generic OS Family: debian, redhat, suse
TRIPLEO_OS_DISTRO='unsupported' # Specific distro: centos, fedora, rhel,
# opensuse, sles, ubuntu
if [ -f /etc/redhat-release ]; then
TRIPLEO_OS_FAMILY='redhat'
if $(grep -Eqs 'Red Hat Enterprise Linux' /etc/redhat-release); then
TRIPLEO_OS_DISTRO='rhel'
fi
if $(grep -Eqs 'Derived from Red Hat Enterprise Linux' /etc/redhat-release); then
TRIPLEO_OS_DISTRO='centos'
fi
if $(grep -Eqs 'CentOS' /etc/redhat-release); then
TRIPLEO_OS_DISTRO='centos'
fi
if $(grep -Eqs 'Fedora' /etc/redhat-release); then
TRIPLEO_OS_DISTRO='fedora'
fi
fi
if [ -f /etc/debian_version ]; then
TRIPLEO_OS_FAMILY='debian'
if $(grep -Eqs 'Ubuntu' /etc/lsb-release); then
TRIPLEO_OS_DISTRO='ubuntu'
fi
if $(grep -Eqs 'Debian' /etc/os-release); then
TRIPLEO_OS_DISTRO='debian'
fi
fi
function get_os_release {
(
source /etc/os-release
echo $ID
)
}
if [ -f /etc/os-release ]; then
if [ "$(get_os_release)" = "opensuse" ]; then
TRIPLEO_OS_FAMILY='suse'
TRIPLEO_OS_DISTRO='opensuse'
fi
if [ "$(get_os_release)" = "sles" ]; then
TRIPLEO_OS_FAMILY='suse'
TRIPLEO_OS_DISTRO='sles'
fi
fi
export TRIPLEO_OS_FAMILY
export TRIPLEO_OS_DISTRO

View File

@ -1,126 +0,0 @@
#!/bin/bash
set -ex
ENVNUM=${1:-$(date +%s)}
NODECOUNT=${2:-2}
UCINSTANCE=${3:-}
CREATE_UNDERCLOUD=${4:-""}
SSH_KEY=${5:-""}
# We recognize 3 values for NETISO: none, multi-nic, public-bond
NETISO=${6:-'multi-nic'}
COMPUTE_NODECOUNT=${7:-0}
EXTRA_NODECOUNT=${8:-0}
PROVISIONNET=provision-${ENVNUM}
PUBLICNET=public-${ENVNUM}
ENVFILE=env-${ENVNUM}-base.yaml
COMPUTE_ENVFILE=env-${ENVNUM}-compute.yaml
EXTRA_ENVFILE=env-${ENVNUM}-extra.yaml
ROLE_ARGS=
set +x
source /etc/nodepoolrc
set -x
if [ ! -e /opt/stack/openstack-virtual-baremetal ] ; then
sudo git clone https://opendev.org/openstack/openstack-virtual-baremetal.git /opt/stack/openstack-virtual-baremetal
sudo chown -R $USER /opt/stack/openstack-virtual-baremetal
fi
cd /opt/stack/openstack-virtual-baremetal/
if [ -n "$SSH_KEY" ]; then
echo "$SSH_KEY" > temp-key-$ENVNUM.pub
KEY_NAME="tripleo-ci-key-$ENVNUM"
openstack keypair create --public-key temp-key-$ENVNUM.pub $KEY_NAME
else
KEY_NAME="tripleo-cd-admins"
fi
BUILD_NODES_JSON_EXTRA_ARGS=""
/bin/cp --remove-destination environments/base.yaml $ENVFILE
sed -i -e "s/bmc_image:.*/bmc_image: bmc-template/" $ENVFILE
sed -i -e "s/key_name:.*/key_name: ${KEY_NAME}/" $ENVFILE
sed -i -e "s/node_count:.*/node_count: ${NODECOUNT}/" $ENVFILE
sed -i -e "s/provision_net_shared:.*/provision_net_shared: true/" $ENVFILE
sed -i -e "s/public_net_shared:.*/public_net_shared: true/" $ENVFILE
if [[ $NODEPOOL_PROVIDER == "rdo-cloud-tripleo" ]] || [[ $NODEPOOL_PROVIDER == "vexxhost-nodepool-tripleo" ]]; then
sed -i -e "s/provision_net_shared:.*/provision_net_shared: false/" $ENVFILE
sed -i -e "s/public_net_shared:.*/public_net_shared: false/" $ENVFILE
sed -i -e "s/bmc_flavor:.*/bmc_flavor: ${BMC_FLAVOR}/" $ENVFILE
sed -i -e "s/baremetal_flavor:.*/baremetal_flavor: ${BAREMETAL_FLAVOR}/" $ENVFILE
fi
if [ -n "$CREATE_UNDERCLOUD" ]; then
sed -i -e "s/undercloud_name:.*/undercloud_name: extra-node/" $ENVFILE
sed -i -e "s/undercloud_flavor:.*/undercloud_flavor: ${UNDERCLOUD_FLAVOR}/" $ENVFILE
BUILD_NODES_JSON_EXTRA_ARGS="--add_undercloud"
fi
ENVIRONMENT_ARGS="-e $ENVFILE -e environments/bmc-use-cache.yaml -e environments/undercloud-floating-none.yaml"
if [ $COMPUTE_NODECOUNT -gt 0 ]; then
sed -i -e "s/role:.*/role: control/" $ENVFILE
fi
if [ -z "$CREATE_UNDERCLOUD" ]; then
ENVIRONMENT_ARGS="$ENVIRONMENT_ARGS -e environments/quintupleo-no-undercloud.yaml"
fi
if [ $NETISO == 'multi-nic' ]; then
ENVIRONMENT_ARGS="$ENVIRONMENT_ARGS -e environments/all-networks-port-security.yaml"
elif [ $NETISO == 'public-bond' ]; then
ENVIRONMENT_ARGS="$ENVIRONMENT_ARGS -e environments/all-networks-public-bond-port-security.yaml"
else
ENVIRONMENT_ARGS="$ENVIRONMENT_ARGS -e environments/port-security.yaml"
fi
if [ $COMPUTE_NODECOUNT -gt 0 ]; then
/bin/cp --remove-destination environments/base-role.yaml $COMPUTE_ENVFILE
sed -i -e "s/baremetal_flavor:.*/baremetal_flavor: baremetal-compute/" $COMPUTE_ENVFILE
sed -i -e "s/key_name:.*/key_name: ${KEY_NAME}/" $COMPUTE_ENVFILE
sed -i -e "s/node_count:.*/node_count: ${COMPUTE_NODECOUNT}/" $COMPUTE_ENVFILE
# This is already the default in the template, but just to be safe.
sed -i -e "s/role:.*/role: compute/" $COMPUTE_ENVFILE
ROLE_ARGS="--role $COMPUTE_ENVFILE"
fi
if [ $EXTRA_NODECOUNT -gt 0 ]; then
EXTRA_NODE_KEY_NAME="extra_node_key"
/bin/cp --remove-destination environments/base-extra-node.yaml $EXTRA_ENVFILE
sed -i -e "s/baremetal_flavor:.*/baremetal_flavor: ${EXTRA_NODE_FLAVOR}/" $EXTRA_ENVFILE
sed -i -e "s/key_name:.*/key_name: ${EXTRA_NODE_KEY_NAME}/" $EXTRA_ENVFILE
sed -i -e "s/node_count:.*/node_count: ${EXTRA_NODECOUNT}/" $EXTRA_ENVFILE
sed -i -e "s/baremetal_image:.*/baremetal_image: ${BAREMETAL_IMAGE}/" $EXTRA_ENVFILE
# We changed the path of the environment, so we need to fix the relative
# path in the resource registry too.
sed -i -e "s|../templates/baremetal-ports-extra-node-port-security.yaml|templates/baremetal-ports-extra-node-port-security.yaml|" $EXTRA_ENVFILE
ROLE_ARGS="--role $EXTRA_ENVFILE"
BUILD_NODES_JSON_EXTRA_ARGS="$BUILD_NODES_JSON_EXTRA_ARGS --network_details"
fi
/opt/stack/openstack-virtual-baremetal/bin/deploy.py --name baremetal_${ENVNUM} --quintupleo --id ${ENVNUM} $ENVIRONMENT_ARGS $ROLE_ARGS
while ! heat stack-show baremetal_${ENVNUM} | grep CREATE_COMPLETE ; do
sleep 10
if heat stack-show baremetal_${ENVNUM} | grep CREATE_FAILED ; then
echo "Failed creating OVB stack" > $TE_DATAFILE
heat stack-show baremetal_${ENVNUM} >> $TE_DATAFILE 2>&1
openstack stack failures list --long baremetal_${ENVNUM} >> $TE_DATAFILE 2>&1 || echo 'Failed to list stack failures' >> $TE_DATAFILE
exit 0
fi
done
undercloud_networks="$PROVISIONNET"
nova interface-attach --net-id $(neutron net-show -F id -f value $PROVISIONNET) $UCINSTANCE
if [ $NETISO != 'none' ]; then
nova interface-attach --net-id $(neutron net-show -F id -f value $PUBLICNET) $UCINSTANCE
undercloud_networks="$undercloud_networks $PUBLICNET"
fi
for PORT_SECURITY_NET in $undercloud_networks; do
neutron port-update $(openstack port list --network $PORT_SECURITY_NET --server $UCINSTANCE -f value -c ID) \
--no-security-groups --port-security-enabled=False
done
# This writes out the env file as env-ID.yaml while overwrites the one we have created
/opt/stack/openstack-virtual-baremetal/bin/build-nodes-json --env env-${ENVNUM}.yaml --network_details \
--nodes_json ${TE_DATAFILE:-~/instackenv.json} $BUILD_NODES_JSON_EXTRA_ARGS

View File

@ -1,81 +0,0 @@
#!/bin/bash
set -xe
ENVNUM=${1:-$(date +%s)}
PROVISIONNET=provision-${ENVNUM}
PUBLICNET=public-${ENVNUM}
ENVFILE=env-${ENVNUM}-base.yaml
COMPUTE_ENVFILE=env-${ENVNUM}-compute.yaml
EXTRA_ENVFILE=env-${ENVNUM}-extra.yaml
rm -f /opt/stack/openstack-virtual-baremetal/$ENVFILE
rm -f /opt/stack/openstack-virtual-baremetal/env-${ENVNUM}.yaml
rm -f /opt/stack/openstack-virtual-baremetal/$COMPUTE_ENVFILE
rm -f /opt/stack/openstack-virtual-baremetal/$EXTRA_ENVFILE
rm -f /opt/stack/openstack-virtual-baremetal/temp-key-$ENVNUM.pub
set +x
source /etc/nodepoolrc
set -x
# NOTE(bnemec): This function starts the port deletions in the background.
# To ensure they complete before you proceed, you must call "wait" after
# calling this function.
function delete_ports {
local subnetid=${1:-}
if [ -z "$subnetid" ]; then
return
fi
for PORT in $(neutron port-list | grep $subnetid | awk '{print $2}') ; do
neutron port-delete $PORT &
done
}
# Save the end of the bmc log for debugging IPMI connectivity problems
PYTHONIOENCODING='utf-8'
CONSOLE_LOG_PATH=/var/www/html/tebroker/console-logs/
nova console-log bmc-${ENVNUM} | tail -n 100 | awk -v envnum="$ENVNUM" '$0=envnum ": " $0' >> /var/log/bmc-console-logs
# Save all the consoles in the stack to a dedicated directory, stripping out ANSI color codes.
for server in $(openstack server list -f value -c Name | grep baremetal-${ENVNUM}) bmc-$ENVNUM ; do
openstack console log show $server | sed 's/\[[0-9;]*[a-zA-Z]//g' > $CONSOLE_LOG_PATH/$server-console.log || true
done
# Delete the ports that have been attached to the undercloud
SUBNETID=$(neutron subnet-show $PUBLICNET | awk '$2=="id" {print $4}' || echo '')
delete_ports $SUBNETID
SUBNETID=$(neutron subnet-show $PROVISIONNET | awk '$2=="id" {print $4}')
delete_ports $SUBNETID
# Needed to ensure all ports have been deleted before we delete the heat stack
wait
# If there was a keypair for this specific run, delete it.
openstack keypair delete "tripleo-ci-key-$ENVNUM" || true
function delete_stack {
local stackname=$1
# Nothing to do if the specified stack doesn't exist
if ! heat stack-show $stackname; then
return 0
fi
# NOTE(bnemec): I'm periodically seeing the stack-delete fail to connect to
# Heat. It looks like a transient network issue, so let's just retry when it happens.
for i in $(seq 10); do
heat stack-delete -y $stackname && break
sleep 5
done
while heat stack-show $stackname 2>&1 > /dev/null ; do
# If the delete failed, try again
if heat stack-show $stackname | grep DELETE_FAILED ; then
heat stack-delete -y $stackname || true
fi
sleep 20
done
}
# Extra role stacks must be deleted first
delete_stack baremetal_${ENVNUM}-extra
delete_stack baremetal_${ENVNUM}-compute
delete_stack baremetal_${ENVNUM}

View File

@ -1,5 +0,0 @@
[Unit]
Description=Geard deamon
[Service]
ExecStart=/bin/geard -d --keepalive --keepalive-idle 1200 --keepalive-interval 60 --keepalive-count 10

View File

@ -1,26 +0,0 @@
#!/bin/bash
set +x
source /etc/nodepoolrc
# Keep X number of testenv workers running, each testenv worker exists after processing a single job
BASEPATH=$(realpath $(dirname $0)/../..)
ENVFILE=$BASEPATH/scripts/rh1.env
if [[ $NODEPOOL_PROVIDER == "rdo-cloud-tripleo" ]]; then
ENVFILE=$BASEPATH/scripts/rdocloud.env
elif [[ $NODEPOOL_PROVIDER == "vexxhost-nodepool-tripleo" ]]; then
ENVFILE=$BASEPATH/scripts/vexxhost.env
fi
TENUM=0
while true ; do
NUMCURRENTJOBS=$(jobs -p -r | wc -l)
source $ENVFILE
if [ $NUMCURRENTJOBS -lt $TOTALOVBENVS ] ; then
TENUM=$(($TENUM+1))
echo "Starting testenv-worker $TENUM"
python $BASEPATH/scripts/te-broker/testenv-worker --tenum $TENUM $BASEPATH/scripts/te-broker/create-env $BASEPATH/scripts/te-broker/destroy-env &
fi
# Trottle a little so we don't end up hitting the openstack APIs too hard
sleep 10
done

View File

@ -1,27 +0,0 @@
Exec { path => [ "/bin/", "/sbin/" ] }
vcsrepo {"/opt/stack/openstack-virtual-baremetal":
source => "https://opendev.org/openstack/openstack-virtual-baremetal.git",
provider => git,
ensure => latest,
}
vcsrepo {"/opt/stack/tripleo-ci":
source => "https://opendev.org/openstack/tripleo-ci",
provider => git,
ensure => latest,
}
cron {"refresh-server":
command => "timeout 20m puppet apply /opt/stack/tripleo-ci/scripts/te-broker/te-broker.pp",
minute => "*/30"
}
service{"te_workers":
ensure => "running",
enable => true,
}
service{"geard":
ensure => "running",
enable => true,
}

View File

@ -1,13 +0,0 @@
#!/bin/bash
curl -L https://trunk.rdoproject.org/centos7-master/current-tripleo/delorean.repo > /etc/yum.repos.d/delorean.repo
curl -L https://trunk.rdoproject.org/centos7/delorean-deps.repo > /etc/yum.repos.d/delorean-deps.repo
yum install -y python-pip python-heatclient python-neutronclient python-novaclient python-swiftclient python-openstackclient
pip install gear
BASEPATH=$(realpath $(dirname $0))
cp $BASEPATH/geard.service /lib/systemd/system/geard.service
cp $BASEPATH/te_workers.service /lib/systemd/system/te_workers.service

View File

@ -1,5 +0,0 @@
[Unit]
Description=TE Workers
[Service]
ExecStart=/opt/stack/tripleo-ci/scripts/te-broker/start_workers.sh

View File

@ -1,313 +0,0 @@
#!/usr/bin/python
#
# Runs a tripleo-ci test-worker
#
# Copyright 2013 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import argparse
import json
import logging
import logging.handlers
import os
import subprocess
import sys
import tempfile
import threading
import time
import uuid
import gear
from novaclient import client as novaclient
from novaclient import exceptions
# 100Mb log files
maxBytes = 1024*1024*100
logging.basicConfig(
filename="/var/www/html/tebroker/testenv-worker.log",
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
class CallbackClient(gear.Client):
def __init__(self):
super(CallbackClient, self).__init__()
self.event = threading.Event()
def handleWorkComplete(self, packet):
super(CallbackClient, self).handleWorkComplete(packet)
self.event.set()
def handleWorkException(self, packet):
super(CallbackClient, self).handleWorkException(packet)
self.event.set()
def handleWorkFail(self, packet):
super(CallbackClient, self).handleWorkFail(packet)
self.event.set()
def wait(self, timeout=None):
"""Wait for notification of completion, error or failure.
:param timeout: a timeout for the operation in seconds
:type timeout: float
:returns: True if a notification was received, False on timeout
"""
self.event.wait(timeout)
return self.event.is_set()
class TEWorkerThread(threading.Thread):
def __init__(self, geard, num, timeout, scriptfiles):
super(TEWorkerThread, self).__init__()
self.geard = geard
self.timeout = timeout
self.scriptfiles = scriptfiles
self.running = True
self.num = num
self.worker = None
self.ucinstance = None
self.complete_event = None
def run(self):
try:
logger.info('running TE worker')
self.runJob()
except gear.InterruptedError:
logger.info('getJob interrupted...')
except Exception:
logger.exception('Error while run_te_worker worker')
self.running = False
def runJob(self):
self.worker = gear.Worker('testenv-worker-%s' % self.num)
try:
self._add_servers(self.worker, self.geard)
self.worker.waitForServer()
self.worker.registerFunction('lockenv')
logger.info('Getting new job...')
job = self.worker.getJob()
logger.info('Received job : %s', job.arguments)
arguments = json.loads(job.arguments)
call_back = arguments["callback_name"]
self.ucinstance = arguments["ucinstance"]
job_timeout = int(arguments.get("timeout", self.timeout))
# Once this Job is called we call back to the client to run its
# commands while this environment is locked
self._run_callback(job_timeout, call_back, arguments)
job.sendWorkComplete("")
finally:
self.worker.shutdown()
def _add_servers(self, client, servers):
for server in servers.split(','):
server = server.rsplit(':', 1)
if len(server) == 1:
server.append('4730')
client.addServer(server[0], int(server[1]))
def _run_callback(self, timeout, callback_name, arguments):
client = CallbackClient()
self.complete_event = client.event
self._add_servers(client, self.geard)
client.waitForServer()
try:
with tempfile.NamedTemporaryFile('r') as fp:
os.environ["TE_DATAFILE"] = fp.name
logger.info(
subprocess.check_output([
self.scriptfiles[0],
self.num,
arguments.get("envsize", "2"),
arguments.get("ucinstance", ""),
arguments.get("create_undercloud", ""),
arguments.get("ssh_key", ""),
arguments.get("net_iso", "multi-nic"),
arguments.get("compute_envsize", "0"),
arguments.get("extra_nodes", "0"),
],
stderr=subprocess.STDOUT))
clientdata = fp.read()
except subprocess.CalledProcessError as e:
logger.error(e.output)
clientdata = "Couldn't retrieve env"
cb_job = gear.Job(callback_name, clientdata)
client.submitJob(cb_job)
# Wait for 30 seconds, then test the status of the job
if not client.wait(30):
# Request the job status from the broker
cb_job.connection.sendPacket(gear.Packet(gear.constants.REQ,
gear.constants.GET_STATUS,
cb_job.handle))
# Let a little time pass for the STATUS_RES to return, If we're in
# here we've already waited 30 seconds so another 10 wont make much
# difference
time.sleep(10)
if not cb_job.running:
logger.error("No sign of the Callback job starting,"
"assuming its no longer present")
clientdata = subprocess.check_output(
[self.scriptfiles[1], self.num], stderr=subprocess.STDOUT)
logger.info(clientdata)
client.shutdown()
return
# We timeout after the configured timeout - the 40 second sleep that we
# perform during initial handshaking. Note that after this timeout we
# offer the environment for other test clients, but the prior client's
# credentials are still valid, so very confusing errors can occur if we
# were ever to timeout without the client timing out first.
client.wait(timeout - 40)
if cb_job.failure:
logger.error("The Job appears to have failed.")
elif not cb_job.complete:
logger.error("No sign of Job completing, Freeing environment.")
else:
logger.info('Returned from Job : %s', cb_job.data)
try:
clientdata = subprocess.check_output(
[self.scriptfiles[1], self.num], stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
logger.error(e.output)
raise
logger.info(clientdata)
client.shutdown()
def _get_auth_values_from_rc():
"""Read auth details from /etc/nodepoolrc
:returns: A dict containing the following keys: user, tenant, auth_url
and password.
"""
values = {}
with open('/etc/nodepoolrc') as rc:
for line in rc.readlines():
parts = line.split('=', 1)
if 'OS_USERNAME' in parts[0]:
values['user'] = parts[1]
elif 'OS_TENANT' in parts[0]:
values['tenant'] = parts[1]
elif 'OS_AUTH_URL' in parts[0]:
values['auth_url'] = parts[1]
elif 'OS_PASSWORD' in parts[0]:
values['password'] = parts[1]
return {k: v.rstrip() for k, v in values.items()}
def _get_nova_client():
auth_values = _get_auth_values_from_rc()
nclient = novaclient.Client(2,
auth_values['user'],
auth_values['password'],
project_name=auth_values['tenant'],
auth_url=auth_values['auth_url']
)
return nclient
def _check_instance_alive(nclient, instance, event):
"""Check that instance still exists in Nova
Attempt to get the server specified by instance. If the server is not
found, set the client event to indicate the job has gone away and we
should clean up the testenv.
instance will be None if the worker has not yet been assigned to a
Jenkins slave, and we should do nothing in that case.
:param nclient: A novaclient instance
:param instance: The UUID of the instance to check
:param event: The gear client event to set if the instance has gone away.
"""
if instance:
try:
nclient.servers.get(instance)
except exceptions.NotFound:
# There is a very brief period of time where instance could be set
# and event not. It's unlikely to happen, but let's be safe.
if event:
event.set()
logger.info('Job instance "%s" went away.', instance)
def main(args=sys.argv[1:]):
parser = argparse.ArgumentParser(
description='Registers a test environment with a gearman broker, the '
'registered job "lockenv" then holds the environment in a '
'"locked" state while it calls back to the client. The '
'clients job is provided with data (contents of datafile)'
)
parser.add_argument(
'scriptfiles',
nargs=2,
help='Path to a script whos output is provided to the client')
parser.add_argument('--timeout', '-t', type=int, default=10800,
help='The maximum number of seconds to hold the '
'testenv for, can be overridden by the client.')
parser.add_argument('--tenum', '-n', default=uuid.uuid4().hex,
help='A unique identifier identifing this env on '
'this host.')
parser.add_argument('--geard', '-b', default='127.0.0.1:4730',
help='A comma separated list of gearman brokers to '
'connect to.')
parser.add_argument('--debug', '-d', action='store_true',
help='Set to debug mode.')
opts = parser.parse_args(args)
global logger
logger = logging.getLogger('testenv-worker-' + opts.tenum)
logger.addHandler(logging.handlers.RotatingFileHandler(
"/var/www/html/tebroker/testenv-worker.log",
maxBytes=maxBytes,
backupCount=5))
logger.setLevel(logging.INFO)
logger.removeHandler(logger.handlers[0])
if opts.debug:
logger.setLevel(logging.DEBUG)
logger.info('Starting test-env worker with data %r', opts.scriptfiles)
te_worker = TEWorkerThread(
opts.geard,
opts.tenum,
opts.timeout,
opts.scriptfiles)
te_worker.start()
counter = 0
nclient = _get_nova_client()
while te_worker.running:
counter += 1
# Only check for instance existence once per minute to avoid DoS'ing
# the controller
if counter % 60 == 0:
_check_instance_alive(nclient, te_worker.ucinstance,
te_worker.complete_event)
time.sleep(1)
if __name__ == '__main__':
main()

View File

@ -1,17 +0,0 @@
#!/bin/bash
function set_env {
export TO_BUILD=$1
echo $TO_BUILD
exit
}
# If we are uploading to the cache then we shouldn't use it
[[ "$PERIODIC" == 1 ]] && set_env "true"
# The updates job already takes a long time, always use cache for it
[[ "$TOCI_JOBTYPE" =~ updates ]] && set_env "false"
# There are some projects that require images building
for PROJFULLREF in ${ZUUL_CHANGES//^/ }; do
PROJ=${PROJFULLREF%%:*};
PROJ=${PROJ##*/};
[[ "$PROJ" =~ diskimage-builder|tripleo-image-elements|tripleo-puppet-elements|instack-undercloud|python-tripleoclient|tripleo-common ]] && set_env "true"
done
set_env "false"

View File

@ -1,8 +0,0 @@
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDfZ1CezNQCbPJsShmpF3F2VnhofEwoertmObne8U8tJ4Zi6kiV7wErIqPAXBGNthzJs8V3pgfiXP2yexgQz4eqd7BsdGovmBXV+bbHMrC6IG2E337pIGJ+W9Mb5nvWFjxmWEr+G/dG3AWZor1lak21ZSJkbioF0EA7r6y8nY1N75nIAuZEqFhciDAP8uenSYTb3EDC1vsrigkHiZeC4B0atj0NpqbaQWRx0D0Y7MSt5wgwm0DNk5C3u6zEKJz60JYRBIhYCtoK5BUMkssD9/ysc3JR0ctw0jkfCv1zsbp5ah6IZ15aYo2Ze3OYZCfbnqoH+zOppH5YvOisUMoHy24n derekh@laptop
ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA+yNMzUrQXa0EOfv+WJtfmLO1WdoOaD47G9qwllSUc4GPRkYzkTNdxcEPrR3XBR94ctOeWOHZ/w7ymhvwK5LLsoNBK+WgRz/mg8oHcii2GoL0fNojdwUMyFMIJxJT+iwjF/omyhyrW/aLAztAKRO7BdOkNlXMAAcMxKzQtFqdZm09ghoImu3BPYUTyDKHMp+t0P1d7mkHdd719oDfMf+5miHxQeJZJCWAsGwroN7k8a46rvezDHEygBsDAF2ZpS2iGMABos/vTp1oyHkCgCqc3rM0OoKqcKB5iQ9Qaqi5ung08BXP/PHfVynXzdGMjTh4w+6jiMw7Dx2GrQIJsDolKQ== dan.prince@dovetail
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDSP0DZRAwaTAvHk7mHlLfSwVq6QCRqKn8mE6nwW1UzBmTzKdq9pK5XPqEAQgUKoarl+M+QhCNrBaNKpUqPF1dH76S0+2k2HARrxubTlXsQ9UDQQHQZxGjsrYW9sZ/F7yh4Yac7HW4pZANumyAxt0yKE0BLTZX9JojaiBn7bMzw1i5BS6qXIyH7oohd3YThxkpMCqP4O6W6wX90FSDYPtbSaZ1Q+9hzNkS29bXcsoy6uwTixkfedsCgkLb2wa9jcDHCely94Tn/oR+JjT9OQ19Tq8p/rjL8lullIrkHsEEsQ/4sIlB6441DgbeLtQAPPA7pyw50KfBCyTfHQZWPsacN jslagle@redhat.com
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDv9l/F0vq7nzAT5hdrBfqDv0rD76nHNn+siS6s5gaFyAuvXJG808pqFk5bJLbRdRIA1/cLxIQeB+bB7IjeTS7Afbz/baAOPTtoumwEU8wLPzR7IyTg60R4o7pKOJG2cP45s3TGODsYt5eEAr96EGp9ayyanfuJZZf2wQWdNp1+vQXain8WHv9KIKI5XmcKI80x8RBWV86OKKsmbqV4yYxAkuLitq4h3Bhw3LP+VOxaqApevnpt7fcrvn8QR3XMsLKNZsJhT9r1qeLEZisundZPN+0EuiC7seu5zAuCBcKjRrBo7Ime8TYn5sjz9DTMcWvY3xHF2DZN2YdVxp4O8/iD bnemec@localhost.localdomain
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDDhi/BqsZibuAPiUjJe7b3Dqe5nyI7BckOwfGwJYg436+bFQMoR/7RKmtPe+ISVQ04lwIriIPwKGaSHj5mbEe4LsCLZ5jAUHxvWfgHitqS5ln295zU7vp1z28o7e6LQNplgExyqQlxUPdOU48tmlz93F6szSYkNYvZnhzMn9syrajC74qPuKsmHTeYFLEcxesb7/u+BtxCk8WdjYTb//sk038NEtIsNhrGtAOV3WcDpXnA5mNMpUfeoQ4yiN9LqtreXr7Zeo587LV3T2QL+huAE0J7EuCzHAKk6TIzJqjLidg0SYwZZwfbxgviU66QLkeyzh9oiovwskelvOQCBFq3 sshnaidm@redhat.com
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDYt5FqMTz91Mbctj7wWg2tAkzwMDwDFtvw0l/6SGPqV+w84SxM1sRmCm1iGdjCk7Rhy3493yRMrA6RT02yTQnXyXG5xC9stspWku9GPNNXyg83SvC/iz53E5SWwYQISmgBK+dYNwzjiN8C8ohxmT8elV1ElckgGvzTOk80KygUzpf+KOfezQcSXZWxBbYsK/8FamPBoWGLCByv+zVX+dSjNgraqdGZDlXns+NiZAeEHeBwKTufFpN//1xm4lG+ah4g5oqaXNf1M7LApPSSm4r5VdFp0+S5SbcPocu+ztwttstnLI0fgJ5XUyqUJM0fZbaj1qkhFeG7bCi/75XIjnkp emilien@redhat.com
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDMZI3iVVe8UO6NdHJrGUwglumu4Tgz0yBzQky5aJtuFBfPLAd0LKz2dCq91oXUZ1EprKJgxU9cB2ydFcd2baHDstsJDf0Rg3wvZdwtM8g7q6xsOE/uhwn192ZQR2EK6HlfjUoNTYJvRsuTHJ9u+xpAsLjau6I/A7aE9U7yNhJNfiGBagEdelvKujSyxdWq9ZBMFb5HKLg346OaD0xHJFS/574mKlCZkq991i8pf8nGheIkJ1XtRWCtbc8v0vKccgxVlHUxFegRTsrKIHnQfm0JHnpLaf24yrXEJhnwYHqKH5fYN6IeReV3bq0A08vH4O/O5+BJRM0J/WXhXOTvjy6pZlLyjF7KOTpESz0EZYuWgvRC2+QHTwXlAtlILnjx8N8Al0pVwz/IJ0bdZzS7+usaZvka350aCaHyRXtuwtydd1lOkz34nbJAEzsCmeaS8JH/EWfD5BuaqaZRMs2GkMEK3Ey9JVOGGb4wiHDOk3f+IQHMMz9y7Cv17jeQ+CBgFUqb1mACjg2fXnyDJCcA7EXFB+ksI4slg0rei37DOXSDe8lFhwtMum9UQf8HpYJkepbMRa3OvdibrXeGCb3+SoAE0EKLkzqKSK+UfNA2hX8a1IKsTDGLF8b91AS2NaYUISHCRCLNhwEGeDAjjkIhxy3K94nTTHbHRrRhnVvZeDDC4Q== josorior@redhat.com
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDiJ2p/mxivnTas4Dw5d8mBs+KJMP/qk3HAQ7DPx0kcVUXPL1lgD7TowtdpOn5tyjsGg73/b+SzmrUyRdwsUHmTXL7H8pgdar+fYwCbNmmzhToCnB2GpMqQ1W1F6icJt1d0QXeUvJMGQz/X9p9X9I5LciFEzfA04sFTn+nUXyhdB3Tt/wHFEI4xXjIvbK7wx4QMGRDDJJGT3KbmhKOSWljILgPB54i10wg3xJLfW8fwiF2J160j6wsHztuJLT/v8ZhanEq/euNQ6vEApAvbVHpEg5aPO3x9/7ZFZ5K4TN0YGJ22ffkYvaaauNc5sdvTyV5j9jg5xJtdyVcxyBxc66Of gcerami@redpanda

View File

@ -1,181 +0,0 @@
#!/bin/bash
#
# Copyright 2013 Red Hat
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
set -e # exit on the first non-zero status
set -u # exit on unset variables
set -o pipefail
SCRIPT_NAME=$(basename $0)
function show_options {
EXITVAL=${1:-1}
echo "Usage: $SCRIPT_NAME [-h] [-w TIMEOUT] [-l LOOP_COUNT] [-f FAIL_MATCH] [-s SUCCESS_MATCH] --delay SLEEP_TIME -- COMMAND"
echo
echo "Waits for a command to fail, succeed, or timeout."
echo
echo "Options:"
echo " -h,--help -- this help"
echo " -w,--walltime TIMEOUT -- Timeout after TIMEOUT seconds."
echo " -l,--looptimeout LOOP_COUNT -- Timeout after checking COMMAND LOOP_COUNT times."
echo " -d,--delay SLEEP_TIME -- Seconds to sleep between checks of COMMAND."
echo " -s,--success-match -- Output that indicates a success."
echo " -f,--fail-match -- Output that indicates a short-circuit failure."
echo
echo "Execute the command in a loop until it succeeds, a timeout is reached, or"
echo "a short-circuit failure occurs. Between each check of the command sleep for"
echo "the number of seconds specified by SLEEP_TIME."
echo
echo "Examples:"
echo " wait_for -w 300 --delay 10 -- ping -c 1 192.0.2.2"
echo " wait_for -w 10 --delay 1 -- ls file_we_are_waiting_for"
echo " wait_for -w 30 --delay 3 -- date \| grep 8"
echo " wait_for -w 300 --delay 10 --fail-match CREATE_FAILED -- openstack stack show undercloud"
echo " wait_for -w 300 --delay 10 --success-match CREATE_COMPLETE -- openstack stack show undercloud"
exit $EXITVAL
}
USE_WALLTIME=
TIMEOUT=
DELAY=
if [ -n "${SUCCESSFUL_MATCH_OUTPUT:-}" ]; then
echo "DEPRECATION WARNING: Using env vars for specifying SUCCESSFUL_MATCH_OUTPUT is deprecated."
fi
SUCCESSFUL_MATCH_OUTPUT=${SUCCESSFUL_MATCH_OUTPUT:-""}
if [ -n "${FAIL_MATCH_OUTPUT:-}" ]; then
echo "DEPRECATION WARNING: Using env vars for specifying FAIL_MATCH_OUTPUT is deprecated."
fi
FAIL_MATCH_OUTPUT=${FAIL_MATCH_OUTPUT:-""}
USE_ARGPARSE=0
# We have to support positional arguments for backwards compat
if [ -n "$1" -a "${1:0:1}" == "-" ]; then
USE_ARGPARSE=1
else
echo "DEPRECATION WARNING: Using positional arguments for wait_for is deprecated."
fi
if [ $USE_ARGPARSE -eq 1 ]; then
set +e
TEMP=$(getopt -o h,w:,l:,d:,s:,f: -l help,walltime:,looptimeout:,delay:,success-match:,fail-match: -n $SCRIPT_NAME -- "$@")
if [ $? != 0 ]; then
show_options;
fi
set -e
# Note the quotes around `$TEMP': they are essential!
eval set -- "$TEMP"
while true ; do
case "$1" in
-h) show_options 0;;
--help) show_options 0;;
-w|--walltime) [ -n "$USE_WALLTIME" ] && show_options
USE_WALLTIME=1
TIMEOUT="$2"
shift 2
;;
-l|--looptimeout) [ -n "$USE_WALLTIME" ] && show_options
USE_WALLTIME=0
TIMEOUT="$2"
shift 2
;;
-d|--delay) DELAY="$2"; shift 2;;
-s|--success-match) SUCCESSFUL_MATCH_OUTPUT="$2"; shift 2;;
-f|--fail-match) FAIL_MATCH_OUTPUT="$2"; shift 2;;
--) shift ; break ;;
esac
done
else
TIMEOUT=${1:-""}
DELAY=${2:-""}
USE_WALLTIME=0
shift 2 || true
fi
COMMAND="$@"
if [ -z "$TIMEOUT" -o -z "$DELAY" -o -z "$COMMAND" ]; then
show_options
fi
ENDTIME=$(($(date +%s) + $TIMEOUT))
TIME_REMAINING=0
function update_time_remaining {
CUR_TIME="$(date +%s)"
TIME_REMAINING=$(($ENDTIME - $CUR_TIME))
}
OUTPUT=
function check_cmd {
STATUS=0
OUTPUT=$(eval $COMMAND 2>&1) || STATUS=$?
if [[ -n "$SUCCESSFUL_MATCH_OUTPUT" ]] \
&& [[ $OUTPUT =~ $SUCCESSFUL_MATCH_OUTPUT ]]; then
exit 0
elif [[ -n "$FAIL_MATCH_OUTPUT" ]] \
&& [[ $OUTPUT =~ $FAIL_MATCH_OUTPUT ]]; then
echo "Command output matched '$FAIL_MATCH_OUTPUT'. Here is the complete output failure:"
echo "${OUTPUT}"
echo "Exiting ..."
exit 1
elif [[ -z "$SUCCESSFUL_MATCH_OUTPUT" ]] && [[ $STATUS -eq 0 ]]; then
# The command successfully completed and we aren't testing against
# it's output so we have finished waiting.
exit 0
fi
}
i=0
while [ $USE_WALLTIME -eq 1 -o $i -lt $TIMEOUT ]; do
if [ $USE_WALLTIME -eq 1 ]; then
update_time_remaining
if [ $TIME_REMAINING -le 0 ]; then
break
fi
else
i=$((i + 1))
fi
check_cmd
if [ $USE_WALLTIME -eq 1 ]; then
update_time_remaining
if [ $TIME_REMAINING -lt $DELAY ]; then
if [ $TIME_REMAINING -gt 0 ]; then
sleep $TIME_REMAINING
check_cmd
fi
else
sleep $DELAY
fi
else
sleep $DELAY
fi
done
if [ $USE_WALLTIME -eq 1 ]; then
SECONDS=$TIMEOUT
else
SECONDS=$((TIMEOUT * DELAY))
fi
printf 'Timing out after %d seconds:\nCOMMAND=%s\nOUTPUT=%s\n' \
"$SECONDS" "$COMMAND" "$OUTPUT"
exit 1

View File

@ -1,2 +0,0 @@
parameter_defaults:
CephPoolDefaultSize: 1

View File

@ -1,56 +0,0 @@
parameter_defaults:
# Because we don't have DNS, the certificate relies on the VIP
# being consistent.
PublicVirtualFixedIPs: [{'ip_address':'2001:db8:fd00:1000::18'}]
SSLCertificate: |
-----BEGIN CERTIFICATE-----
MIIDOTCCAiECAQEwDQYJKoZIhvcNAQELBQAwYjELMAkGA1UEBhMCWFgxFTATBgNV
BAcMDERlZmF1bHQgQ2l0eTEcMBoGA1UECgwTRGVmYXVsdCBDb21wYW55IEx0ZDEe
MBwGA1UEAwwVMjAwMTpkYjg6ZmQwMDoxMDAwOjoxMB4XDTE2MTIxMzEyMTkwOVoX
DTI2MTIxMTEyMTkwOVowYzELMAkGA1UEBhMCWFgxFTATBgNVBAcMDERlZmF1bHQg
Q2l0eTEcMBoGA1UECgwTRGVmYXVsdCBDb21wYW55IEx0ZDEfMB0GA1UEAwwWMjAw
MTpkYjg6ZmQwMDoxMDAwOjoxODCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC
ggEBALdc3oejFTrAt410pHbwPdTcXfeTQxLXbqFNu/KU90bN97rfP3Tbqk9KYZwk
SCbCS2O2Sh4e+t0ktdIl7x5Tq4jm8LaMGsGJUl4wzTEx1nbn1nk5mmPkjieiAxnH
3BXqYzYnlZatgZH8grBOGAYBylyqdDDuqGmERygWASCXqc9qV5IMNN6Bhfd+IxuA
mRC3lQOxq/Ng/Ilq8S1uBxUtZNbad9rgQv9N1azRsb06rbpSnyJTW6fsD9ybEiHF
H4iX/krUxLaTQ0aT/gGU/jWrSF0IBLyhZ1V6FkcKrLLfJkz/29Zzir+Qu2MutzwO
TnwKPxZwWG0aIzEypzGApWlP7QECAwEAATANBgkqhkiG9w0BAQsFAAOCAQEAZWNb
WIs2yXdiDsLdp+WjxSy+GmTnrR7f6P9tD5EA9Xu0OmDHLoLFgfplrs0BoCaXBMvV
5aeG2SR54NaSr7gW5Fo9eQS7ljTEAAjV2m/NlFL/hlYBARMz/0WXuMqtEY+0Lw6J
o/x5UnwKgtidbYB7ffcJaO+Zx5S467ByIAZbQU0khrx5CedO6TZYczYL4n0FtBGF
xaaW3HDPgVlI+X/BYPDsRwpPlcjV8AN3fWpxWniKswg/HRYt2JEktROmIoOW2CMh
YvTD7NjziZB+DtsZaajrXyYjowuDo/K80o28bhfa+Ft/cnxOF0mbfcQhUQu+/TdG
+XeEbwvOXUCpWqvkxQ==
-----END CERTIFICATE-----
SSLIntermediateCertificate: ''
SSLKey: |
-----BEGIN RSA PRIVATE KEY-----
MIIEowIBAAKCAQEAt1zeh6MVOsC3jXSkdvA91Nxd95NDEtduoU278pT3Rs33ut8/
dNuqT0phnCRIJsJLY7ZKHh763SS10iXvHlOriObwtowawYlSXjDNMTHWdufWeTma
Y+SOJ6IDGcfcFepjNieVlq2BkfyCsE4YBgHKXKp0MO6oaYRHKBYBIJepz2pXkgw0
3oGF934jG4CZELeVA7Gr82D8iWrxLW4HFS1k1tp32uBC/03VrNGxvTqtulKfIlNb
p+wP3JsSIcUfiJf+StTEtpNDRpP+AZT+NatIXQgEvKFnVXoWRwqsst8mTP/b1nOK
v5C7Yy63PA5OfAo/FnBYbRojMTKnMYClaU/tAQIDAQABAoIBAEHY8PUGpMYPlRsQ
a0tnJN0/MeOJl8m5Ybo9fyQzSMW9i5wpNdGSXQvSa1E2Wk7I8Wfie6VrfK1T1JOe
GcvrQiuMu57im20rElBO+DEVc64JvaSVAhOjbUVgUfq9IhUgsfYtqvOMZX52jrwf
n1Lw66nxcH5uBj62SRNlK4x3ynx4WLbUF9159lpjOMyU67JqwVxl6yL8JS1XZ0TB
Zmyhtr7DgzR0RUjPaZ3Bc31z5HWswHaj0rISbgtFtI0L31YTu8hABZbTPY8hVt9S
Jfk5wt2GreOZQG57kWi+B0+hQTytIY8c7Qq1UlwJBvGahtfBBr1j4/0SnE33NMdq
o2JduNECgYEA7MZW1wnpgOZabFcg4m1vPzTleg3wYdQfOp+gRMFoRi/wef2j87mb
O9lZ3vjozko2Pu4cpC9UznGOMfVkz2g32lxGmIOYy7BX/ncaWUDGGCQeYyAiI/Ct
RVJDCFWbkDqIXENqzkBySMQT3YFJwtCGaW36NgkrAqTJfCOKWvwqpy0CgYEAxkBL
chEu036ocCn2DBOFjkKKizvIH0xxKEweXqKmyhX1cievszweMhYXbMrJp+cNl4Wx
f0/lpdd6pwl3SvXShJa47DK1U3sNhNu2Hm55nBctAyr3znQaxp6HLpDLiJtM+dhA
su2sTKUB0mFkzv+FzxVkVbMVIPSfLWW0pc63AaUCgYB6Bv5adVFSjWWs0HqyN5RO
DwT1kpmA0cIGhJ4k0UjcY3icZoxI4C+Q+MPIlp/lMXojlLROKdabTwrp1H7QJfYj
I8t+btQgphGzo86AkVGGv5apn7X4r+GdCHhjozs5afE9G8G/6pb7yZIzZp6NOMsN
KxD5KGByTFtLD0YO/kc9QQKBgANfdqkjJ7PLdNwrqpb8AgqwkSDbTIVTzJ0N5npf
tAy+IOXnTPL0s+LQQ2fDb3je0nlgeeublWGj799FpvTsh79fvRevqSwZbNp0zn1o
DcOdFVP3eC3YBdiZO92L8xqI4d9iUX950bOgHw7QP2HSMRmb2JAostRTtR2VnGQx
xyVNAoGBALBxncp/mQT8mq9WtQqpr/q1DzfPB0P+VXp00AD19pNRbskr2gLqAcX6
Azhecqwpt3roctmTBPIgrfAexC4e+MzaYpGngP/nov2urd9F+0egVCxzzVwea0FK
MpNCwF2T0lKDjeQETLOyrnyrSMkOWPk+eyWd2nipK48n8utE44mw
-----END RSA PRIVATE KEY-----
resource_registry:
OS::TripleO::NodeTLSData: /usr/share/openstack-tripleo-heat-templates/puppet/extraconfig/tls/tls-cert-inject.yaml

View File

@ -1,59 +0,0 @@
parameter_defaults:
# Because we don't have DNS, the certificate relies on the VIP
# being consistent.
ControlFixedIPs: [{'ip_address':'192.168.24.6'}]
SSLCertificate: |
-----BEGIN CERTIFICATE-----
MIIDhTCCAm2gAwIBAgIJAI8wrqY4AYnnMA0GCSqGSIb3DQEBCwUAMFkxCzAJBgNV
BAYTAlhYMRUwEwYDVQQHDAxEZWZhdWx0IENpdHkxHDAaBgNVBAoME0RlZmF1bHQg
Q29tcGFueSBMdGQxFTATBgNVBAMMDDE5Mi4xNjguMjQuNjAeFw0xNjExMDkxNTIx
NDhaFw0xNzExMDkxNTIxNDhaMFkxCzAJBgNVBAYTAlhYMRUwEwYDVQQHDAxEZWZh
dWx0IENpdHkxHDAaBgNVBAoME0RlZmF1bHQgQ29tcGFueSBMdGQxFTATBgNVBAMM
DDE5Mi4xNjguMjQuNjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAOo1
4ECMx3cWSIIxo6wmvLINZyyUqdhEp3yEZT4KM38wDui+Sw52uVhd2WzyM6ywkFDz
ePs4X64P5KbbOJz5k5mR/4QzGDREmF9vvmzIiQ1URVxOAxGvNoc2LwHDJyYBoIF0
s6uRwXfL6zyxc4sYsWVejDmk1JyHFcf1pSTg3pz6wsfhgnG3J/zW3kGK85TwqF3a
Fz34yNSJF9y7muRjg0K0hW392sU2xgAkjYrpk6mKweWDcEa3MU38RMrOfEw0ORGJ
MYJZjGDN7Fah12k95sUZ8Xbjnp476i63PBVpB+/rQtGsZK7AFWy/MtBRPfvNkpm+
DqAOip2Y4WG08rgEuqsCAwEAAaNQME4wHQYDVR0OBBYEFHNIz4VamTAoAR176ZU5
3zGPZSR1MB8GA1UdIwQYMBaAFHNIz4VamTAoAR176ZU53zGPZSR1MAwGA1UdEwQF
MAMBAf8wDQYJKoZIhvcNAQELBQADggEBAOU2MShnCtj3F1EqoCsx84x+fPajjy/i
Rbd+FIcGKOOBlUxLNMLQeJyB8utui1uXa0LWuun4Vc0JLdri7qSXfbOqgEi/Dkj7
fvLPACSxLzBdHr4atIEP+WkPAL31obcuKshm0TxaMui4ymR6gIMem/uHR/rg5l6L
I6Y9hNS/A2k6HohhbIVRuT0abxmDZ9RNjJxxm0RydMwxMTtzJGJrc/t/SznFpKRf
FQ2CFh3O5w14mJaKJZPuOM8g8q/PVHSZkMO7A+2BoxDKjF/P2fPvGPQSV3AoYleu
72dAUied9SXghzgyx5BYCJTwRL9poSG39okq+/zl4c+JVyFxweamlSI=
-----END CERTIFICATE-----
SSLIntermediateCertificate: ''
SSLKey: |
-----BEGIN RSA PRIVATE KEY-----
MIIEpAIBAAKCAQEA6jXgQIzHdxZIgjGjrCa8sg1nLJSp2ESnfIRlPgozfzAO6L5L
Dna5WF3ZbPIzrLCQUPN4+zhfrg/kpts4nPmTmZH/hDMYNESYX2++bMiJDVRFXE4D
Ea82hzYvAcMnJgGggXSzq5HBd8vrPLFzixixZV6MOaTUnIcVx/WlJODenPrCx+GC
cbcn/NbeQYrzlPCoXdoXPfjI1IkX3Lua5GODQrSFbf3axTbGACSNiumTqYrB5YNw
RrcxTfxEys58TDQ5EYkxglmMYM3sVqHXaT3mxRnxduOenjvqLrc8FWkH7+tC0axk
rsAVbL8y0FE9+82Smb4OoA6KnZjhYbTyuAS6qwIDAQABAoIBAGrmhmHQH+bqI9RR
Bey4jZ4TYglGHbPZujyqTW+gxlqCO/vdh3Klm/mKdIg2kIF84ZIhXxIDbCCNnobO
PqvfepA4xFC55Y1N1eKRpXAPpAaSdUopTVtQBup2P8RlJ8RAL9uGwc9Iqu6cA9Xj
BOLzNV0YZ5aT9nsvaTConWUUwmcRagVrmpHCS1ZeSksBfkLLH9Ujwx+WSiRjetZe
2AznoFcq86LC5lXNWekkoFveejXk5grYfQnmjEsZbrgafeyFs15wJiMmMgJP6IiX
XbtPhAmF3j/JkRRO/SGW44bUm+MyyiiCkTMf64nReZBIRM2k0ahkjIaCtPI+Of3n
WLnODkECgYEA/3nLux8EW3RxtBlE3JntoqTBzawGGnxQXvbH/m3ZJOAB3AQiFMyC
J5YRKvr/pq0D0zCIbPqzU4yl5s2snMlwu6V6FLb3BhkaXTB50ym58O0COBhKF1SY
d8NTh8q+XDxejA6pIDx1aabstYNJQJEvq79UERX1OIu0wD74k8gfiaECgYEA6rDo
xXisEBf+3zppDCtLKYcv7Dkt5a8ziLnG8344TW8AhFeQrLo+BbuSBjT3urZxhT89
0Z0QeIG6GvuBJGdfSJDWaOh6z6uiugM22Fj5ToX6XyBztgzq9IHLZwOSrRKDgJYK
whvCWd+bmk56nffYvrsSgnkDzS3otu+8ImdrmMsCgYAyWloaTnm4YNIMUjKtXDVn
Khv8opwtWjUpEnDGp/X1e6VCLRpeiRRaIwzNMN10h4NKe7VX7W2s2PrV69HKsTVj
9rCCJrG3ZPk6RtSsFvxMDRqz3a20vXqzmC79g6tTWJ618inJbcEiisD64Z5teHAx
5Kr4FyDW/Uop3f92eSomIQKBgQCRRbagXaTsRV4v4aEatkXNS/AfIfnGHiAhNxXu
rSe+zsIcKXPVZ2dy9vtZujnKKpq0Z0Ql1zpVzb9camgzF4XEjlsaDyXOHKkayViO
rOyNbaNgdN8LRo29kgZNGsoVNcuOsdGw+6vfI+nWp1OBKuTQk03O04fF3vAZJOZo
yWmx5wKBgQDlcuxMQxrXg7Nd/+kRlgK8Ffmx7PDQEnTVWkU8JtoA29gJDNFbI9k9
dMVSnAUQ/3DZ33RvWB9cVHHT2LZhmBXBTLqBecgZ8x7HSfRSA8tC4lwWFFMUq/yW
ki6qyklexL30AjbARxdUI0efGk/ko9TzTPIwT6tVXMue4Dp0IsrhWw==
-----END RSA PRIVATE KEY-----
resource_registry:
OS::TripleO::NodeTLSData: /usr/share/openstack-tripleo-heat-templates/puppet/extraconfig/tls/tls-cert-inject.yaml

View File

@ -1,2 +0,0 @@
parameter_defaults:
EnablePackageInstall: true

View File

@ -1,6 +0,0 @@
parameter_defaults:
HostnameMap:
overcloud-controller-0: controller-0-tripleo-ci-a-foo
overcloud-controller-1: controller-1-tripleo-ci-b-bar
overcloud-controller-2: controller-2-tripleo-ci-c-baz
overcloud-novacompute-0: compute-0-tripleo-ci-a-test

View File

@ -1,26 +0,0 @@
parameter_defaults:
CAMap:
self-signed-cert:
content: |
-----BEGIN CERTIFICATE-----
MIIDlzCCAn+gAwIBAgIJAMi8i1B5OWQCMA0GCSqGSIb3DQEBCwUAMGIxCzAJBgNV
BAYTAlhYMRUwEwYDVQQHDAxEZWZhdWx0IENpdHkxHDAaBgNVBAoME0RlZmF1bHQg
Q29tcGFueSBMdGQxHjAcBgNVBAMMFTIwMDE6ZGI4OmZkMDA6MTAwMDo6MTAeFw0x
NjEyMTMxMjE4MDlaFw0yNjEyMTExMjE4MDlaMGIxCzAJBgNVBAYTAlhYMRUwEwYD
VQQHDAxEZWZhdWx0IENpdHkxHDAaBgNVBAoME0RlZmF1bHQgQ29tcGFueSBMdGQx
HjAcBgNVBAMMFTIwMDE6ZGI4OmZkMDA6MTAwMDo6MTCCASIwDQYJKoZIhvcNAQEB
BQADggEPADCCAQoCggEBAKgpCyl3kJKDWBWJ2rLRaLNysJ1UENa/9YO5Ssw46Vob
hHuxHIydHrht/6RJOcBD5sd6qZemyPu0D/EKYRzRbQxxI6EJ9jYaUJJl+3O79el2
ZYrkYuj0mKvdXNo4Esgs8O/EWqA7tyJWAsxr+5A3VzafekOaNhTbL2QiaUEs+iKn
hghNIpgQzzoysYiVk3JERzz8fV0ADk9P2oYN21ZX2dKxIKC5h17fwXFPVanf88Tr
0R3Zgxx/ipw+n6oV8WT3n0YSFMZmjOIjUczJqIedLlcPJZ1eiwFnBdTH6CG1d+fA
I/gM4M/g2OCY6uyyEZkYJ/ruS5rZc2uawthZPXuCnS8CAwEAAaNQME4wHQYDVR0O
BBYEFE9jNM+rReAuIerBa5yxWf4mUz4/MB8GA1UdIwQYMBaAFE9jNM+rReAuIerB
a5yxWf4mUz4/MAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAAPGqxml
NJJuofnkaOXtFnZbYqho4fdSpCiVg8gB9ywH65BaUJOPIXPimeML1Bo1nyqBb3jX
6LOONKqf74NfJQ/HmBu2AfTMbDXtbLwt7mOCYNvEKCTmvnVJaJATn5r2L1mRnPdD
drStjHxqaKgJDOIo/khaQsDHaFwo8bV4VTrZ2X7ml6y5RjDPoSimiWt8J2/G3wKr
unPv1a7O1M0EsbrqF4RkxEfh9dVWWgRrMpnbPW3LqrXDZ0wdI33DC4vvTi1LZmH/
LbeRLQFMnzZWI5rctYAUFSfe/Ra4G4cMyg0xaZk3PqZPQRr0aA2SCphfuZ/TatUE
a4eXTxLMxeR9Hcs=
-----END CERTIFICATE-----

View File

@ -1,25 +0,0 @@
parameter_defaults:
CAMap:
self-signed-cert:
content: |
-----BEGIN CERTIFICATE-----
MIIDhTCCAm2gAwIBAgIJAI8wrqY4AYnnMA0GCSqGSIb3DQEBCwUAMFkxCzAJBgNV
BAYTAlhYMRUwEwYDVQQHDAxEZWZhdWx0IENpdHkxHDAaBgNVBAoME0RlZmF1bHQg
Q29tcGFueSBMdGQxFTATBgNVBAMMDDE5Mi4xNjguMjQuNjAeFw0xNjExMDkxNTIx
NDhaFw0xNzExMDkxNTIxNDhaMFkxCzAJBgNVBAYTAlhYMRUwEwYDVQQHDAxEZWZh
dWx0IENpdHkxHDAaBgNVBAoME0RlZmF1bHQgQ29tcGFueSBMdGQxFTATBgNVBAMM
DDE5Mi4xNjguMjQuNjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAOo1
4ECMx3cWSIIxo6wmvLINZyyUqdhEp3yEZT4KM38wDui+Sw52uVhd2WzyM6ywkFDz
ePs4X64P5KbbOJz5k5mR/4QzGDREmF9vvmzIiQ1URVxOAxGvNoc2LwHDJyYBoIF0
s6uRwXfL6zyxc4sYsWVejDmk1JyHFcf1pSTg3pz6wsfhgnG3J/zW3kGK85TwqF3a
Fz34yNSJF9y7muRjg0K0hW392sU2xgAkjYrpk6mKweWDcEa3MU38RMrOfEw0ORGJ
MYJZjGDN7Fah12k95sUZ8Xbjnp476i63PBVpB+/rQtGsZK7AFWy/MtBRPfvNkpm+
DqAOip2Y4WG08rgEuqsCAwEAAaNQME4wHQYDVR0OBBYEFHNIz4VamTAoAR176ZU5
3zGPZSR1MB8GA1UdIwQYMBaAFHNIz4VamTAoAR176ZU53zGPZSR1MAwGA1UdEwQF
MAMBAf8wDQYJKoZIhvcNAQELBQADggEBAOU2MShnCtj3F1EqoCsx84x+fPajjy/i
Rbd+FIcGKOOBlUxLNMLQeJyB8utui1uXa0LWuun4Vc0JLdri7qSXfbOqgEi/Dkj7
fvLPACSxLzBdHr4atIEP+WkPAL31obcuKshm0TxaMui4ymR6gIMem/uHR/rg5l6L
I6Y9hNS/A2k6HohhbIVRuT0abxmDZ9RNjJxxm0RydMwxMTtzJGJrc/t/SznFpKRf
FQ2CFh3O5w14mJaKJZPuOM8g8q/PVHSZkMO7A+2BoxDKjF/P2fPvGPQSV3AoYleu
72dAUied9SXghzgyx5BYCJTwRL9poSG39okq+/zl4c+JVyFxweamlSI=
-----END CERTIFICATE-----

View File

@ -1,26 +0,0 @@
parameter_defaults:
SSLRootCertificate: |
-----BEGIN CERTIFICATE-----
MIIDlzCCAn+gAwIBAgIJAMi8i1B5OWQCMA0GCSqGSIb3DQEBCwUAMGIxCzAJBgNV
BAYTAlhYMRUwEwYDVQQHDAxEZWZhdWx0IENpdHkxHDAaBgNVBAoME0RlZmF1bHQg
Q29tcGFueSBMdGQxHjAcBgNVBAMMFTIwMDE6ZGI4OmZkMDA6MTAwMDo6MTAeFw0x
NjEyMTMxMjE4MDlaFw0yNjEyMTExMjE4MDlaMGIxCzAJBgNVBAYTAlhYMRUwEwYD
VQQHDAxEZWZhdWx0IENpdHkxHDAaBgNVBAoME0RlZmF1bHQgQ29tcGFueSBMdGQx
HjAcBgNVBAMMFTIwMDE6ZGI4OmZkMDA6MTAwMDo6MTCCASIwDQYJKoZIhvcNAQEB
BQADggEPADCCAQoCggEBAKgpCyl3kJKDWBWJ2rLRaLNysJ1UENa/9YO5Ssw46Vob
hHuxHIydHrht/6RJOcBD5sd6qZemyPu0D/EKYRzRbQxxI6EJ9jYaUJJl+3O79el2
ZYrkYuj0mKvdXNo4Esgs8O/EWqA7tyJWAsxr+5A3VzafekOaNhTbL2QiaUEs+iKn
hghNIpgQzzoysYiVk3JERzz8fV0ADk9P2oYN21ZX2dKxIKC5h17fwXFPVanf88Tr
0R3Zgxx/ipw+n6oV8WT3n0YSFMZmjOIjUczJqIedLlcPJZ1eiwFnBdTH6CG1d+fA
I/gM4M/g2OCY6uyyEZkYJ/ruS5rZc2uawthZPXuCnS8CAwEAAaNQME4wHQYDVR0O
BBYEFE9jNM+rReAuIerBa5yxWf4mUz4/MB8GA1UdIwQYMBaAFE9jNM+rReAuIerB
a5yxWf4mUz4/MAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAAPGqxml
NJJuofnkaOXtFnZbYqho4fdSpCiVg8gB9ywH65BaUJOPIXPimeML1Bo1nyqBb3jX
6LOONKqf74NfJQ/HmBu2AfTMbDXtbLwt7mOCYNvEKCTmvnVJaJATn5r2L1mRnPdD
drStjHxqaKgJDOIo/khaQsDHaFwo8bV4VTrZ2X7ml6y5RjDPoSimiWt8J2/G3wKr
unPv1a7O1M0EsbrqF4RkxEfh9dVWWgRrMpnbPW3LqrXDZ0wdI33DC4vvTi1LZmH/
LbeRLQFMnzZWI5rctYAUFSfe/Ra4G4cMyg0xaZk3PqZPQRr0aA2SCphfuZ/TatUE
a4eXTxLMxeR9Hcs=
-----END CERTIFICATE-----
resource_registry:
OS::TripleO::NodeTLSCAData: /usr/share/openstack-tripleo-heat-templates/puppet/extraconfig/tls/ca-inject.yaml

View File

@ -1,26 +0,0 @@
parameter_defaults:
SSLRootCertificate: |
-----BEGIN CERTIFICATE-----
MIIDhTCCAm2gAwIBAgIJAI8wrqY4AYnnMA0GCSqGSIb3DQEBCwUAMFkxCzAJBgNV
BAYTAlhYMRUwEwYDVQQHDAxEZWZhdWx0IENpdHkxHDAaBgNVBAoME0RlZmF1bHQg
Q29tcGFueSBMdGQxFTATBgNVBAMMDDE5Mi4xNjguMjQuNjAeFw0xNjExMDkxNTIx
NDhaFw0xNzExMDkxNTIxNDhaMFkxCzAJBgNVBAYTAlhYMRUwEwYDVQQHDAxEZWZh
dWx0IENpdHkxHDAaBgNVBAoME0RlZmF1bHQgQ29tcGFueSBMdGQxFTATBgNVBAMM
DDE5Mi4xNjguMjQuNjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAOo1
4ECMx3cWSIIxo6wmvLINZyyUqdhEp3yEZT4KM38wDui+Sw52uVhd2WzyM6ywkFDz
ePs4X64P5KbbOJz5k5mR/4QzGDREmF9vvmzIiQ1URVxOAxGvNoc2LwHDJyYBoIF0
s6uRwXfL6zyxc4sYsWVejDmk1JyHFcf1pSTg3pz6wsfhgnG3J/zW3kGK85TwqF3a
Fz34yNSJF9y7muRjg0K0hW392sU2xgAkjYrpk6mKweWDcEa3MU38RMrOfEw0ORGJ
MYJZjGDN7Fah12k95sUZ8Xbjnp476i63PBVpB+/rQtGsZK7AFWy/MtBRPfvNkpm+
DqAOip2Y4WG08rgEuqsCAwEAAaNQME4wHQYDVR0OBBYEFHNIz4VamTAoAR176ZU5
3zGPZSR1MB8GA1UdIwQYMBaAFHNIz4VamTAoAR176ZU53zGPZSR1MAwGA1UdEwQF
MAMBAf8wDQYJKoZIhvcNAQELBQADggEBAOU2MShnCtj3F1EqoCsx84x+fPajjy/i
Rbd+FIcGKOOBlUxLNMLQeJyB8utui1uXa0LWuun4Vc0JLdri7qSXfbOqgEi/Dkj7
fvLPACSxLzBdHr4atIEP+WkPAL31obcuKshm0TxaMui4ymR6gIMem/uHR/rg5l6L
I6Y9hNS/A2k6HohhbIVRuT0abxmDZ9RNjJxxm0RydMwxMTtzJGJrc/t/SznFpKRf
FQ2CFh3O5w14mJaKJZPuOM8g8q/PVHSZkMO7A+2BoxDKjF/P2fPvGPQSV3AoYleu
72dAUied9SXghzgyx5BYCJTwRL9poSG39okq+/zl4c+JVyFxweamlSI=
-----END CERTIFICATE-----
resource_registry:
OS::TripleO::NodeTLSCAData: /usr/share/openstack-tripleo-heat-templates/puppet/extraconfig/tls/ca-inject.yaml

View File

@ -1,84 +0,0 @@
resource_registry:
OS::TripleO::Controller::Ports::ExternalPort: /usr/share/openstack-tripleo-heat-templates/network/ports/external_from_pool.yaml
OS::TripleO::Controller::Ports::InternalApiPort: /usr/share/openstack-tripleo-heat-templates/network/ports/internal_api_from_pool.yaml
OS::TripleO::Controller::Ports::StoragePort: /usr/share/openstack-tripleo-heat-templates/network/ports/storage_from_pool.yaml
OS::TripleO::Controller::Ports::StorageMgmtPort: /usr/share/openstack-tripleo-heat-templates/network/ports/storage_mgmt_from_pool.yaml
OS::TripleO::Controller::Ports::TenantPort: /usr/share/openstack-tripleo-heat-templates/network/ports/tenant_from_pool.yaml
OS::TripleO::Compute::Ports::ExternalPort: /usr/share/openstack-tripleo-heat-templates/network/ports/noop.yaml
OS::TripleO::Compute::Ports::InternalApiPort: /usr/share/openstack-tripleo-heat-templates/network/ports/internal_api_from_pool.yaml
OS::TripleO::Compute::Ports::StoragePort: /usr/share/openstack-tripleo-heat-templates/network/ports/storage_from_pool.yaml
OS::TripleO::Compute::Ports::StorageMgmtPort: /usr/share/openstack-tripleo-heat-templates/network/ports/storage_mgmt_from_pool.yaml
OS::TripleO::Compute::Ports::TenantPort: /usr/share/openstack-tripleo-heat-templates/network/ports/tenant_from_pool.yaml
OS::TripleO::CephStorage::Ports::ExternalPort: /usr/share/openstack-tripleo-heat-templates/network/ports/noop.yaml
OS::TripleO::CephStorage::Ports::InternalApiPort: /usr/share/openstack-tripleo-heat-templates/network/ports/noop.yaml
OS::TripleO::CephStorage::Ports::StoragePort: /usr/share/openstack-tripleo-heat-templates/network/ports/storage_from_pool.yaml
OS::TripleO::CephStorage::Ports::StorageMgmtPort: /usr/share/openstack-tripleo-heat-templates/network/ports/storage_mgmt_from_pool.yaml
OS::TripleO::CephStorage::Ports::TenantPort: /usr/share/openstack-tripleo-heat-templates/network/ports/noop.yaml
OS::TripleO::SwiftStorage::Ports::ExternalPort: /usr/share/openstack-tripleo-heat-templates/network/ports/noop.yaml
OS::TripleO::SwiftStorage::Ports::InternalApiPort: /usr/share/openstack-tripleo-heat-templates/network/ports/internal_api_from_pool.yaml
OS::TripleO::SwiftStorage::Ports::StoragePort: /usr/share/openstack-tripleo-heat-templates/network/ports/storage_from_pool.yaml
OS::TripleO::SwiftStorage::Ports::StorageMgmtPort: /usr/share/openstack-tripleo-heat-templates/network/ports/storage_mgmt_from_pool.yaml
OS::TripleO::SwiftStorage::Ports::TenantPort: /usr/share/openstack-tripleo-heat-templates/network/ports/noop.yaml
OS::TripleO::BlockStorage::Ports::ExternalPort: /usr/share/openstack-tripleo-heat-templates/network/ports/noop.yaml
OS::TripleO::BlockStorage::Ports::InternalApiPort: /usr/share/openstack-tripleo-heat-templates/network/ports/internal_api_from_pool.yaml
OS::TripleO::BlockStorage::Ports::StoragePort: /usr/share/openstack-tripleo-heat-templates/network/ports/storage_from_pool.yaml
OS::TripleO::BlockStorage::Ports::StorageMgmtPort: /usr/share/openstack-tripleo-heat-templates/network/ports/storage_mgmt_from_pool.yaml
OS::TripleO::BlockStorage::Ports::TenantPort: /usr/share/openstack-tripleo-heat-templates/network/ports/noop.yaml
OS::TripleO::Network::Ports::NetVipMap: /usr/share/openstack-tripleo-heat-templates/network/ports/net_vip_map_external.yaml
OS::TripleO::Network::Ports::ExternalVipPort: /usr/share/openstack-tripleo-heat-templates/network/ports/noop.yaml
OS::TripleO::Network::Ports::InternalApiVipPort: /usr/share/openstack-tripleo-heat-templates/network/ports/noop.yaml
OS::TripleO::Network::Ports::StorageVipPort: /usr/share/openstack-tripleo-heat-templates/network/ports/noop.yaml
OS::TripleO::Network::Ports::StorageMgmtVipPort: /usr/share/openstack-tripleo-heat-templates/network/ports/noop.yaml
OS::TripleO::Network::Ports::RedisVipPort: /usr/share/openstack-tripleo-heat-templates/network/ports/from_service.yaml
parameter_defaults:
ControlPlaneIP: 192.168.24.200
ExternalNetworkVip: 10.0.0.9
InternalApiNetworkVip: 172.17.0.9
StorageNetworkVip: 172.18.0.9
StorageMgmtNetworkVip: 172.19.0.9
ServiceVips:
redis: 172.17.0.8
ControllerIPs:
# Each controller will get an IP from the lists below, first controller, first IP
external:
- 10.0.0.251
- 10.0.0.252
- 10.0.0.253
internal_api:
- 172.17.0.251
- 172.17.0.252
- 172.17.0.253
storage:
- 172.18.0.251
- 172.18.0.252
- 172.18.0.253
storage_mgmt:
- 172.19.0.251
- 172.19.0.252
- 172.19.0.253
tenant:
- 172.16.0.251
- 172.16.0.252
- 172.16.0.253
NovaComputeIPs:
# Each compute will get an IP from the lists below, first compute, first IP
internal_api:
- 172.17.0.249
storage:
- 172.18.0.249
storage_mgmt:
- 172.19.0.249
tenant:
- 172.16.0.249
CephStorageIPs:
# Each ceph node will get an IP from the lists below, first node, first IP
storage:
- 172.18.0.248
storage_mgmt:
- 172.19.0.248

View File

@ -1,22 +0,0 @@
Generated Network Isolation Templates
-------------------------------------
These templates were generated by the UI tool at
https://github.com/cybertron/tripleo-scripts#net-iso-genpy
ui-settings.pickle is specific to the tool. TripleO will not use it when
doing deployments with these templates, but it is needed to be able to
load the templates into the UI again. Note that the UI only reads this file,
so any changes made by hand to the templates will not be reflected in the UI.
The network-isolation.yaml file needs to reference the port files shipped with
tripleo-heat-templates, so by default the tool generates the paths assuming
network-isolation.yaml will be copied into the environments/ directory of
tripleo-heat-templates.
If these templates are at ~/generated-templates and a local copy of
tripleo-heat-templates (it is not recommended to make changes to the packaged
tripleo-heat-templates tree) is at ~/tht, then an example deployment would
look like this:
cp ~/generated-templates/network-isolation.yaml ~/tht/environments/generated-network-isolation.yaml
openstack overcloud deploy --templates ~/tht -e ~/tht/environments/generated-network-isolation.yaml -e ~/generated-templates/network-environment.yaml

View File

@ -1,25 +0,0 @@
resource_registry:
OS::TripleO::BlockStorage::Net::SoftwareConfig: nic-configs/cinder-storage.yaml
OS::TripleO::Compute::Net::SoftwareConfig: nic-configs/compute.yaml
OS::TripleO::Controller::Net::SoftwareConfig: nic-configs/controller.yaml
OS::TripleO::ObjectStorage::Net::SoftwareConfig: nic-configs/swift-storage.yaml
OS::TripleO::CephStorage::Net::SoftwareConfig: nic-configs/ceph-storage.yaml
parameter_defaults:
ControlPlaneSubnetCidr: '24'
ControlPlaneDefaultRoute: 192.168.24.1
EC2MetadataIp: 192.168.24.1
ExternalNetCidr: 2001:db8:fd00:1000::/64
ExternalAllocationPools: [{"start": "2001:db8:fd00:1000::10", "end": "2001:db8:fd00:1000:ffff:ffff:ffff:fffe"}]
ExternalInterfaceDefaultRoute: 2001:db8:fd00:1000::1
NeutronExternalNetworkBridge: "''"
InternalApiNetCidr: fd00:fd00:fd00:2000::/64
InternalApiAllocationPools: [{"start": "fd00:fd00:fd00:2000::10", "end": "fd00:fd00:fd00:2000:ffff:ffff:ffff:fffe"}]
StorageNetCidr: fd00:fd00:fd00:3000::/64
StorageAllocationPools: [{"start": "fd00:fd00:fd00:3000::10", "end": "fd00:fd00:fd00:3000:ffff:ffff:ffff:fffe"}]
StorageMgmtNetCidr: fd00:fd00:fd00:4000::/64
StorageMgmtAllocationPools: [{"start": "fd00:fd00:fd00:4000::10", "end": "fd00:fd00:fd00:4000:ffff:ffff:ffff:fffe"}]
TenantNetCidr: 172.16.0.0/24
TenantAllocationPools: [{"start": "172.16.0.10", "end": "172.16.0.250"}]
DnsServers: ["8.8.8.8", "8.8.4.4"]

View File

@ -1,36 +0,0 @@
resource_registry:
# Redis
OS::TripleO::Network::Ports::RedisVipPort: ../network/ports/vip_v6.yaml
OS::TripleO::Controller::Ports::RedisVipPort: ../network/ports/vip_v6.yaml
# External
OS::TripleO::Network::External: ../network/external_v6.yaml
OS::TripleO::Network::Ports::ExternalVipPort: ../network/ports/external_v6.yaml
OS::TripleO::Controller::Ports::ExternalPort: ../network/ports/external_v6.yaml
# InternalApi
OS::TripleO::Network::InternalApi: ../network/internal_api_v6.yaml
OS::TripleO::Network::Ports::InternalApiVipPort: ../network/ports/internal_api_v6.yaml
OS::TripleO::Controller::Ports::InternalApiPort: ../network/ports/internal_api_v6.yaml
OS::TripleO::Compute::Ports::InternalApiPort: ../network/ports/internal_api_v6.yaml
# Storage
OS::TripleO::Network::Storage: ../network/storage_v6.yaml
OS::TripleO::Network::Ports::StorageVipPort: ../network/ports/storage_v6.yaml
OS::TripleO::Controller::Ports::StoragePort: ../network/ports/storage_v6.yaml
OS::TripleO::Compute::Ports::StoragePort: ../network/ports/storage_v6.yaml
OS::TripleO::CephStorage::Ports::StoragePort: ../network/ports/storage_v6.yaml
# StorageMgmt
OS::TripleO::Network::StorageMgmt: ../network/storage_mgmt_v6.yaml
OS::TripleO::Network::Ports::StorageMgmtVipPort: ../network/ports/storage_mgmt_v6.yaml
OS::TripleO::Controller::Ports::StorageMgmtPort: ../network/ports/storage_mgmt_v6.yaml
OS::TripleO::CephStorage::Ports::StorageMgmtPort: ../network/ports/storage_mgmt_v6.yaml
# Tenant
OS::TripleO::Network::Tenant: ../network/tenant.yaml
OS::TripleO::Network::Ports::TenantVipPort: ../network/ports/tenant.yaml
OS::TripleO::Controller::Ports::TenantPort: ../network/ports/tenant.yaml
OS::TripleO::Compute::Ports::TenantPort: ../network/ports/tenant.yaml
parameter_defaults:
CephIPv6: True
CorosyncIPv6: True
MongoDbIPv6: True
NovaIPv6: True
RabbitIPv6: True
MemcachedIPv6: True

View File

@ -1,120 +0,0 @@
heat_template_version: 2015-04-30
parameters:
ControlPlaneIp:
default: ''
description: IP address/subnet on the ctlplane network
type: string
ExternalIpSubnet:
default: ''
description: IP address/subnet on the external network
type: string
InternalApiIpSubnet:
default: ''
description: IP address/subnet on the internal API network
type: string
StorageIpSubnet:
default: ''
description: IP address/subnet on the storage network
type: string
StorageMgmtIpSubnet:
default: ''
description: IP address/subnet on the storage mgmt network
type: string
TenantIpSubnet:
default: ''
description: IP address/subnet on the tenant network
type: string
ManagementIpSubnet: # Only populated when including environments/network-management.yaml
default: ''
description: IP address/subnet on the management network
type: string
BondInterfaceOvsOptions:
default: 'bond_mode=active-backup'
description: The ovs_options string for the bond interface. Set things like
lacp=active and/or bond_mode=balance-slb using this option.
type: string
ExternalNetworkVlanID:
default: 10
description: Vlan ID for the external network traffic.
type: number
InternalApiNetworkVlanID:
default: 20
description: Vlan ID for the internal_api network traffic.
type: number
StorageNetworkVlanID:
default: 30
description: Vlan ID for the storage network traffic.
type: number
StorageMgmtNetworkVlanID:
default: 40
description: Vlan ID for the storage mgmt network traffic.
type: number
TenantNetworkVlanID:
default: 50
description: Vlan ID for the tenant network traffic.
type: number
ManagementNetworkVlanID:
default: 60
description: Vlan ID for the management network traffic.
type: number
ExternalInterfaceDefaultRoute:
default: '10.0.0.1'
description: default route for the external network
type: string
ControlPlaneSubnetCidr: # Override this via parameter_defaults
default: '24'
description: The subnet CIDR of the control plane network.
type: string
ControlPlaneDefaultRoute: # Override this via parameter_defaults
description: The default route of the control plane network.
type: string
DnsServers: # Override this via parameter_defaults
default: []
description: A list of DNS servers (2 max for some implementations) that will be added to resolv.conf.
type: comma_delimited_list
EC2MetadataIp: # Override this via parameter_defaults
description: The IP address of the EC2 metadata server.
type: string
resources:
OsNetConfigImpl:
properties:
config:
os_net_config:
network_config:
- type: interface
name: nic1
mtu: 1350
use_dhcp: false
addresses:
- ip_netmask:
list_join:
- /
- - {get_param: ControlPlaneIp}
- {get_param: ControlPlaneSubnetCidr}
routes:
- default: true
ip_netmask: 0.0.0.0/0
next_hop: {get_param: ControlPlaneDefaultRoute}
- ip_netmask: 169.254.169.254/32
next_hop: {get_param: EC2MetadataIp}
- type: interface
name: nic4
mtu: 1350
use_dhcp: false
addresses:
- ip_netmask: {get_param: StorageIpSubnet}
- type: interface
name: nic5
mtu: 1350
use_dhcp: false
addresses:
- ip_netmask: {get_param: StorageMgmtIpSubnet}
group: os-apply-config
type: OS::Heat::StructuredConfig
outputs:
OS::stack_id:
description: The OsNetConfigImpl resource.
value: {get_resource: OsNetConfigImpl}

View File

@ -1,92 +0,0 @@
heat_template_version: 2015-04-30
parameters:
ControlPlaneIp:
default: ''
description: IP address/subnet on the ctlplane network
type: string
ExternalIpSubnet:
default: ''
description: IP address/subnet on the external network
type: string
InternalApiIpSubnet:
default: ''
description: IP address/subnet on the internal API network
type: string
StorageIpSubnet:
default: ''
description: IP address/subnet on the storage network
type: string
StorageMgmtIpSubnet:
default: ''
description: IP address/subnet on the storage mgmt network
type: string
TenantIpSubnet:
default: ''
description: IP address/subnet on the tenant network
type: string
ManagementIpSubnet: # Only populated when including environments/network-management.yaml
default: ''
description: IP address/subnet on the management network
type: string
BondInterfaceOvsOptions:
default: 'bond_mode=active-backup'
description: The ovs_options string for the bond interface. Set things like
lacp=active and/or bond_mode=balance-slb using this option.
type: string
ExternalNetworkVlanID:
default: 10
description: Vlan ID for the external network traffic.
type: number
InternalApiNetworkVlanID:
default: 20
description: Vlan ID for the internal_api network traffic.
type: number
StorageNetworkVlanID:
default: 30
description: Vlan ID for the storage network traffic.
type: number
StorageMgmtNetworkVlanID:
default: 40
description: Vlan ID for the storage mgmt network traffic.
type: number
TenantNetworkVlanID:
default: 50
description: Vlan ID for the tenant network traffic.
type: number
ManagementNetworkVlanID:
default: 60
description: Vlan ID for the management network traffic.
type: number
ExternalInterfaceDefaultRoute:
default: '10.0.0.1'
description: default route for the external network
type: string
ControlPlaneSubnetCidr: # Override this via parameter_defaults
default: '24'
description: The subnet CIDR of the control plane network.
type: string
ControlPlaneDefaultRoute: # Override this via parameter_defaults
description: The default route of the control plane network.
type: string
DnsServers: # Override this via parameter_defaults
default: []
description: A list of DNS servers (2 max for some implementations) that will be added to resolv.conf.
type: comma_delimited_list
EC2MetadataIp: # Override this via parameter_defaults
description: The IP address of the EC2 metadata server.
type: string
resources:
OsNetConfigImpl:
properties:
config:
os_net_config:
network_config: []
group: os-apply-config
type: OS::Heat::StructuredConfig
outputs:
OS::stack_id:
description: The OsNetConfigImpl resource.
value: {get_resource: OsNetConfigImpl}

View File

@ -1,131 +0,0 @@
heat_template_version: 2015-04-30
parameters:
ControlPlaneIp:
default: ''
description: IP address/subnet on the ctlplane network
type: string
ExternalIpSubnet:
default: ''
description: IP address/subnet on the external network
type: string
InternalApiIpSubnet:
default: ''
description: IP address/subnet on the internal API network
type: string
StorageIpSubnet:
default: ''
description: IP address/subnet on the storage network
type: string
StorageMgmtIpSubnet:
default: ''
description: IP address/subnet on the storage mgmt network
type: string
TenantIpSubnet:
default: ''
description: IP address/subnet on the tenant network
type: string
ManagementIpSubnet: # Only populated when including environments/network-management.yaml
default: ''
description: IP address/subnet on the management network
type: string
BondInterfaceOvsOptions:
default: 'bond_mode=active-backup'
description: The ovs_options string for the bond interface. Set things like
lacp=active and/or bond_mode=balance-slb using this option.
type: string
ExternalNetworkVlanID:
default: 10
description: Vlan ID for the external network traffic.
type: number
InternalApiNetworkVlanID:
default: 20
description: Vlan ID for the internal_api network traffic.
type: number
StorageNetworkVlanID:
default: 30
description: Vlan ID for the storage network traffic.
type: number
StorageMgmtNetworkVlanID:
default: 40
description: Vlan ID for the storage mgmt network traffic.
type: number
TenantNetworkVlanID:
default: 50
description: Vlan ID for the tenant network traffic.
type: number
ManagementNetworkVlanID:
default: 60
description: Vlan ID for the management network traffic.
type: number
ExternalInterfaceDefaultRoute:
default: '10.0.0.1'
description: default route for the external network
type: string
ControlPlaneSubnetCidr: # Override this via parameter_defaults
default: '24'
description: The subnet CIDR of the control plane network.
type: string
ControlPlaneDefaultRoute: # Override this via parameter_defaults
description: The default route of the control plane network.
type: string
DnsServers: # Override this via parameter_defaults
default: []
description: A list of DNS servers (2 max for some implementations) that will be added to resolv.conf.
type: comma_delimited_list
EC2MetadataIp: # Override this via parameter_defaults
description: The IP address of the EC2 metadata server.
type: string
resources:
OsNetConfigImpl:
properties:
config:
os_net_config:
network_config:
- type: interface
name: nic1
mtu: 1350
use_dhcp: false
addresses:
- ip_netmask:
list_join:
- /
- - {get_param: ControlPlaneIp}
- {get_param: ControlPlaneSubnetCidr}
routes:
- default: true
ip_netmask: 0.0.0.0/0
next_hop: {get_param: ControlPlaneDefaultRoute}
- ip_netmask: 169.254.169.254/32
next_hop: {get_param: EC2MetadataIp}
- type: interface
name: nic3
mtu: 1350
use_dhcp: false
addresses:
- ip_netmask: {get_param: InternalApiIpSubnet}
- type: interface
name: nic4
mtu: 1350
use_dhcp: false
addresses:
- ip_netmask: {get_param: StorageIpSubnet}
- type: ovs_bridge
name: br-tenant
dns_servers: {get_param: DnsServers}
use_dhcp: false
addresses:
- ip_netmask: {get_param: TenantIpSubnet}
members:
- type: interface
name: nic6
mtu: 1350
primary: true
group: os-apply-config
type: OS::Heat::StructuredConfig
outputs:
OS::stack_id:
description: The OsNetConfigImpl resource.
value: {get_resource: OsNetConfigImpl}

View File

@ -1,152 +0,0 @@
heat_template_version: 2015-04-30
parameters:
ControlPlaneIp:
default: ''
description: IP address/subnet on the ctlplane network
type: string
ExternalIpSubnet:
default: ''
description: IP address/subnet on the external network
type: string
InternalApiIpSubnet:
default: ''
description: IP address/subnet on the internal API network
type: string
StorageIpSubnet:
default: ''
description: IP address/subnet on the storage network
type: string
StorageMgmtIpSubnet:
default: ''
description: IP address/subnet on the storage mgmt network
type: string
TenantIpSubnet:
default: ''
description: IP address/subnet on the tenant network
type: string
ManagementIpSubnet: # Only populated when including environments/network-management.yaml
default: ''
description: IP address/subnet on the management network
type: string
BondInterfaceOvsOptions:
default: 'bond_mode=active-backup'
description: The ovs_options string for the bond interface. Set things like
lacp=active and/or bond_mode=balance-slb using this option.
type: string
ExternalNetworkVlanID:
default: 10
description: Vlan ID for the external network traffic.
type: number
InternalApiNetworkVlanID:
default: 20
description: Vlan ID for the internal_api network traffic.
type: number
StorageNetworkVlanID:
default: 30
description: Vlan ID for the storage network traffic.
type: number
StorageMgmtNetworkVlanID:
default: 40
description: Vlan ID for the storage mgmt network traffic.
type: number
TenantNetworkVlanID:
default: 50
description: Vlan ID for the tenant network traffic.
type: number
ManagementNetworkVlanID:
default: 60
description: Vlan ID for the management network traffic.
type: number
ExternalInterfaceDefaultRoute:
default: '10.0.0.1'
description: default route for the external network
type: string
ControlPlaneSubnetCidr: # Override this via parameter_defaults
default: '24'
description: The subnet CIDR of the control plane network.
type: string
ControlPlaneDefaultRoute: # Override this via parameter_defaults
description: The default route of the control plane network.
type: string
DnsServers: # Override this via parameter_defaults
default: []
description: A list of DNS servers (2 max for some implementations) that will be added to resolv.conf.
type: comma_delimited_list
EC2MetadataIp: # Override this via parameter_defaults
description: The IP address of the EC2 metadata server.
type: string
resources:
OsNetConfigImpl:
properties:
config:
os_net_config:
network_config:
- type: interface
name: nic1
mtu: 1350
use_dhcp: false
addresses:
- ip_netmask:
list_join:
- /
- - {get_param: ControlPlaneIp}
- {get_param: ControlPlaneSubnetCidr}
routes:
- default: true
ip_netmask: 0.0.0.0/0
next_hop: {get_param: ControlPlaneDefaultRoute}
- ip_netmask: 169.254.169.254/32
next_hop: {get_param: EC2MetadataIp}
- type: ovs_bridge
name: br-ex
dns_servers: {get_param: DnsServers}
use_dhcp: false
addresses:
- ip_netmask: {get_param: ExternalIpSubnet}
routes:
- default: true
ip_netmask: 0.0.0.0/0
next_hop: {get_param: ExternalInterfaceDefaultRoute}
members:
- type: interface
name: nic2
mtu: 1350
primary: true
- type: interface
name: nic3
mtu: 1350
use_dhcp: false
addresses:
- ip_netmask: {get_param: InternalApiIpSubnet}
- type: interface
name: nic4
mtu: 1350
use_dhcp: false
addresses:
- ip_netmask: {get_param: StorageIpSubnet}
- type: interface
name: nic5
mtu: 1350
use_dhcp: false
addresses:
- ip_netmask: {get_param: StorageMgmtIpSubnet}
- type: ovs_bridge
name: br-tenant
dns_servers: {get_param: DnsServers}
use_dhcp: false
addresses:
- ip_netmask: {get_param: TenantIpSubnet}
members:
- type: interface
name: nic6
mtu: 1350
primary: true
group: os-apply-config
type: OS::Heat::StructuredConfig
outputs:
OS::stack_id:
description: The OsNetConfigImpl resource.
value: {get_resource: OsNetConfigImpl}

View File

@ -1,92 +0,0 @@
heat_template_version: 2015-04-30
parameters:
ControlPlaneIp:
default: ''
description: IP address/subnet on the ctlplane network
type: string
ExternalIpSubnet:
default: ''
description: IP address/subnet on the external network
type: string
InternalApiIpSubnet:
default: ''
description: IP address/subnet on the internal API network
type: string
StorageIpSubnet:
default: ''
description: IP address/subnet on the storage network
type: string
StorageMgmtIpSubnet:
default: ''
description: IP address/subnet on the storage mgmt network
type: string
TenantIpSubnet:
default: ''
description: IP address/subnet on the tenant network
type: string
ManagementIpSubnet: # Only populated when including environments/network-management.yaml
default: ''
description: IP address/subnet on the management network
type: string
BondInterfaceOvsOptions:
default: 'bond_mode=active-backup'
description: The ovs_options string for the bond interface. Set things like
lacp=active and/or bond_mode=balance-slb using this option.
type: string
ExternalNetworkVlanID:
default: 10
description: Vlan ID for the external network traffic.
type: number
InternalApiNetworkVlanID:
default: 20
description: Vlan ID for the internal_api network traffic.
type: number
StorageNetworkVlanID:
default: 30
description: Vlan ID for the storage network traffic.
type: number
StorageMgmtNetworkVlanID:
default: 40
description: Vlan ID for the storage mgmt network traffic.
type: number
TenantNetworkVlanID:
default: 50
description: Vlan ID for the tenant network traffic.
type: number
ManagementNetworkVlanID:
default: 60
description: Vlan ID for the management network traffic.
type: number
ExternalInterfaceDefaultRoute:
default: '10.0.0.1'
description: default route for the external network
type: string
ControlPlaneSubnetCidr: # Override this via parameter_defaults
default: '24'
description: The subnet CIDR of the control plane network.
type: string
ControlPlaneDefaultRoute: # Override this via parameter_defaults
description: The default route of the control plane network.
type: string
DnsServers: # Override this via parameter_defaults
default: []
description: A list of DNS servers (2 max for some implementations) that will be added to resolv.conf.
type: comma_delimited_list
EC2MetadataIp: # Override this via parameter_defaults
description: The IP address of the EC2 metadata server.
type: string
resources:
OsNetConfigImpl:
properties:
config:
os_net_config:
network_config: []
group: os-apply-config
type: OS::Heat::StructuredConfig
outputs:
OS::stack_id:
description: The OsNetConfigImpl resource.
value: {get_resource: OsNetConfigImpl}

View File

@ -1,729 +0,0 @@
(dp0
S'global_data'
p1
(dp2
S'control'
p3
(dp4
S'route'
p5
V192.168.24.1
p6
sS'mask'
p7
I24
sS'ec2'
p8
V192.168.24.1
p9
ssS'major'
p10
I1
sS'management'
p11
(dp12
S'start'
p13
V172.20.0.10
p14
sS'cidr'
p15
V172.20.0.0/24
p16
sS'vlan'
p17
I6
sS'end'
p18
V172.20.0.250
p19
ssS'dns2'
p20
V8.8.4.4
p21
sS'dns1'
p22
V8.8.8.8
p23
sS'storage'
p24
(dp25
g13
Vfd00:fd00:fd00:3000::10
p26
sg15
Vfd00:fd00:fd00:3000::/64
p27
sg17
I3
sg18
Vfd00:fd00:fd00:3000:ffff:ffff:ffff:fffe
p28
ssS'auto_routes'
p29
I00
sS'bond_options'
p30
V
p31
sS'external'
p32
(dp33
S'bridge'
p34
V''
p35
sg18
V2001:db8:fd00:1000:ffff:ffff:ffff:fffe
p36
sg17
I1
sg13
V2001:db8:fd00:1000::10
p37
sg15
V2001:db8:fd00:1000::/64
p38
sS'gateway'
p39
V2001:db8:fd00:1000::1
p40
ssS'internal_api'
p41
(dp42
g13
Vfd00:fd00:fd00:2000::10
p43
sg15
Vfd00:fd00:fd00:2000::/64
p44
sg17
I2
sg18
Vfd00:fd00:fd00:2000:ffff:ffff:ffff:fffe
p45
ssS'ipv6'
p46
I01
sS'storage_mgmt'
p47
(dp48
g13
Vfd00:fd00:fd00:4000::10
p49
sg15
Vfd00:fd00:fd00:4000::/64
p50
sg17
I4
sg18
Vfd00:fd00:fd00:4000:ffff:ffff:ffff:fffe
p51
ssS'minor'
p52
I2
sS'tenant'
p53
(dp54
g13
V172.16.0.10
p55
sg15
V172.16.0.0/24
p56
sg17
I5
sg18
V172.16.0.250
p57
sssS'data'
p58
(dp59
S'cinder-storage.yaml'
p60
(lp61
sS'ceph-storage.yaml'
p62
(lp63
(dp64
Vaddresses
p65
(lp66
sVnetwork
p67
VControlPlane
p68
sVprimary
p69
I01
sVmtu
p70
I1350
sS'members'
p71
(lp72
(dp73
Vip_netmask
p74
V0.0.0.0/0
p75
sVname
p76
VRoute
p77
sVdefault
p78
I01
sVnext_hop
p79
V{get_param: ControlPlaneDefaultRoute}
p80
sg71
(lp81
sVtype
p82
Vroute
p83
sasVroutes
p84
(lp85
sVuse_dhcp
p86
I00
sVtype
p87
Vinterface
p88
sVname
p89
Vnic1
p90
sa(dp91
Vaddresses
p92
(lp93
sVnetwork
p94
VStorage
p95
sVprimary
p96
I01
sVmtu
p97
I1350
sg71
(lp98
sVroutes
p99
(lp100
sVuse_dhcp
p101
I00
sVtype
p102
Vinterface
p103
sVname
p104
Vnic4
p105
sa(dp106
Vaddresses
p107
(lp108
sVnetwork
p109
VStorageMgmt
p110
sVprimary
p111
I01
sVmtu
p112
I1350
sg71
(lp113
sVroutes
p114
(lp115
sVuse_dhcp
p116
I00
sVtype
p117
Vinterface
p118
sVname
p119
Vnic5
p120
sasS'controller.yaml'
p121
(lp122
(dp123
Vaddresses
p124
(lp125
sVnetwork
p126
VControlPlane
p127
sVprimary
p128
I01
sVmtu
p129
I1350
sg71
(lp130
(dp131
Vip_netmask
p132
V0.0.0.0/0
p133
sVname
p134
VRoute
p135
sVdefault
p136
I01
sVnext_hop
p137
V{get_param: ControlPlaneDefaultRoute}
p138
sg71
(lp139
sVtype
p140
Vroute
p141
sasVroutes
p142
(lp143
sVuse_dhcp
p144
I00
sVtype
p145
Vinterface
p146
sVname
p147
Vnic1
p148
sa(dp149
Vdns_servers
p150
V{get_param: DnsServers}
p151
sVaddresses
p152
(lp153
sVnetwork
p154
VExternal
p155
sVmtu
p156
I-1
sg71
(lp157
(dp158
Vaddresses
p159
(lp160
sVnetwork
p161
VNone
p162
sVprimary
p163
I01
sVmtu
p164
I1350
sg71
(lp165
sVroutes
p166
(lp167
sVuse_dhcp
p168
I00
sVtype
p169
Vinterface
p170
sVname
p171
Vnic2
p172
sa(dp173
Vip_netmask
p174
V0.0.0.0/0
p175
sVname
p176
VRoute
p177
sVdefault
p178
I01
sVnext_hop
p179
V{get_param: ExternalInterfaceDefaultRoute}
p180
sg71
(lp181
sVtype
p182
Vroute
p183
sasVroutes
p184
(lp185
sVuse_dhcp
p186
I00
sVtype
p187
Vovs_bridge
p188
sVname
p189
Vbr-ex
p190
sa(dp191
Vaddresses
p192
(lp193
sVnetwork
p194
VInternalApi
p195
sVprimary
p196
I01
sVmtu
p197
I1350
sg71
(lp198
sVroutes
p199
(lp200
sVuse_dhcp
p201
I00
sVtype
p202
Vinterface
p203
sVname
p204
Vnic3
p205
sa(dp206
Vaddresses
p207
(lp208
sVnetwork
p209
VStorage
p210
sVprimary
p211
I01
sVmtu
p212
I1350
sg71
(lp213
sVroutes
p214
(lp215
sVuse_dhcp
p216
I00
sVtype
p217
Vinterface
p218
sVname
p219
Vnic4
p220
sa(dp221
Vaddresses
p222
(lp223
sVnetwork
p224
VStorageMgmt
p225
sVprimary
p226
I01
sVmtu
p227
I1350
sg71
(lp228
sVroutes
p229
(lp230
sVuse_dhcp
p231
I00
sVtype
p232
Vinterface
p233
sVname
p234
Vnic5
p235
sa(dp236
Vdns_servers
p237
V{get_param: DnsServers}
p238
sVaddresses
p239
(lp240
sVnetwork
p241
VTenant
p242
sVmtu
p243
I-1
sg71
(lp244
(dp245
Vaddresses
p246
(lp247
sVnetwork
p248
VNone
p249
sVprimary
p250
I01
sVmtu
p251
I1350
sg71
(lp252
sVroutes
p253
(lp254
sVuse_dhcp
p255
I00
sVtype
p256
Vinterface
p257
sVname
p258
Vnic6
p259
sasVroutes
p260
(lp261
sVuse_dhcp
p262
I00
sVtype
p263
Vovs_bridge
p264
sVname
p265
Vbr-tenant
p266
sasS'swift-storage.yaml'
p267
(lp268
sS'compute.yaml'
p269
(lp270
(dp271
Vaddresses
p272
(lp273
sVnetwork
p274
VControlPlane
p275
sVprimary
p276
I01
sVmtu
p277
I1350
sg71
(lp278
(dp279
Vip_netmask
p280
V0.0.0.0/0
p281
sVname
p282
VRoute
p283
sVdefault
p284
I01
sVnext_hop
p285
V{get_param: ControlPlaneDefaultRoute}
p286
sg71
(lp287
sVtype
p288
Vroute
p289
sasVroutes
p290
(lp291
sVuse_dhcp
p292
I00
sVtype
p293
Vinterface
p294
sVname
p295
Vnic1
p296
sa(dp297
Vaddresses
p298
(lp299
sVnetwork
p300
VInternalApi
p301
sVprimary
p302
I01
sVmtu
p303
I1350
sg71
(lp304
sVroutes
p305
(lp306
sVuse_dhcp
p307
I00
sVtype
p308
Vinterface
p309
sVname
p310
Vnic3
p311
sa(dp312
Vaddresses
p313
(lp314
sVnetwork
p315
VStorage
p316
sVprimary
p317
I01
sVmtu
p318
I1350
sg71
(lp319
sVroutes
p320
(lp321
sVuse_dhcp
p322
I00
sVtype
p323
Vinterface
p324
sVname
p325
Vnic4
p326
sa(dp327
Vdns_servers
p328
V{get_param: DnsServers}
p329
sVaddresses
p330
(lp331
sVnetwork
p332
VTenant
p333
sVmtu
p334
I-1
sg71
(lp335
(dp336
Vaddresses
p337
(lp338
sVnetwork
p339
VNone
p340
sVprimary
p341
I01
sVmtu
p342
I1350
sg71
(lp343
sVroutes
p344
(lp345
sVuse_dhcp
p346
I00
sVtype
p347
Vinterface
p348
sVname
p349
Vnic6
p350
sasVroutes
p351
(lp352
sVuse_dhcp
p353
I00
sVtype
p354
Vovs_bridge
p355
sVname
p356
Vbr-tenant
p357
sass.

View File

@ -1,6 +0,0 @@
---
parameter_defaults:
ControlPlaneSubnetCidr: "24"
ControlPlaneDefaultRoute: 192.168.24.1
EC2MetadataIp: 192.168.24.1
DnsServers: ["8.8.8.8", "8.8.4.4"]

View File

@ -1,23 +0,0 @@
Generated Network Isolation Templates
-------------------------------------
These templates were generated by the UI tool at
https://github.com/cybertron/tripleo-scripts#net-iso-genpy
ui-settings.pickle is specific to the tool. TripleO will not use it when
doing deployments with these templates, but it is needed to be able to
load the templates into the UI again. Note that the UI only reads this file,
so any changes made by hand to the templates will not be reflected in the UI.
The network-isolation.yaml file needs to reference the port files shipped with
tripleo-heat-templates, so by default the tool generates the paths assuming
network-isolation.yaml will be copied into the environments/ directory of
tripleo-heat-templates.
If the standard tripleo-heat-templates are in use, then the
network-isolation-absolute.yaml file can be used instead. It has hard-coded
references to the port files in /usr/share/openstack-tripleo-heat-templates.
If the generated network isolation templates are at ~/generated-templates, an
example deployment command would look like:
openstack overcloud deploy --templates -e ~/generated-templates/network-isolation-absolute.yaml -e ~/generated-templates/network-environment.yaml

View File

@ -1,26 +0,0 @@
resource_registry:
OS::TripleO::BlockStorage::Net::SoftwareConfig: nic-configs/cinder-storage.yaml
OS::TripleO::Compute::Net::SoftwareConfig: nic-configs/compute.yaml
OS::TripleO::Controller::Net::SoftwareConfig: nic-configs/controller.yaml
OS::TripleO::ObjectStorage::Net::SoftwareConfig: nic-configs/swift-storage.yaml
OS::TripleO::CephStorage::Net::SoftwareConfig: nic-configs/ceph-storage.yaml
parameter_defaults:
ControlPlaneSubnetCidr: '24'
ControlPlaneDefaultRoute: 192.168.24.1
EC2MetadataIp: 192.168.24.1
ExternalNetCidr: 10.0.0.0/24
ExternalAllocationPools: [{"start": "10.0.0.10", "end": "10.0.0.50"}]
ExternalInterfaceDefaultRoute: 10.0.0.1
PublicVirtualFixedIPs: [{ "ip_address": "10.0.0.5" }]
NeutronExternalNetworkBridge: "''"
InternalApiNetCidr: 172.17.0.0/24
InternalApiAllocationPools: [{"start": "172.17.0.10", "end": "172.17.0.250"}]
StorageNetCidr: 172.18.0.0/24
StorageAllocationPools: [{"start": "172.18.0.10", "end": "172.18.0.250"}]
StorageMgmtNetCidr: 172.19.0.0/24
StorageMgmtAllocationPools: [{"start": "172.19.0.10", "end": "172.19.0.250"}]
TenantNetCidr: 172.16.0.0/24
TenantAllocationPools: [{"start": "172.16.0.10", "end": "172.16.0.250"}]
DnsServers: ["8.8.8.8", "8.8.4.4"]

View File

@ -1,29 +0,0 @@
resource_registry:
# Redis
OS::TripleO::Network::Ports::RedisVipPort: /usr/share/openstack-tripleo-heat-templates/network/ports/vip.yaml
# External
OS::TripleO::Network::External: /usr/share/openstack-tripleo-heat-templates/network/external.yaml
OS::TripleO::Network::Ports::ExternalVipPort: /usr/share/openstack-tripleo-heat-templates/network/ports/external.yaml
OS::TripleO::Controller::Ports::ExternalPort: /usr/share/openstack-tripleo-heat-templates/network/ports/external.yaml
# InternalApi
OS::TripleO::Network::InternalApi: /usr/share/openstack-tripleo-heat-templates/network/internal_api.yaml
OS::TripleO::Network::Ports::InternalApiVipPort: /usr/share/openstack-tripleo-heat-templates/network/ports/internal_api.yaml
OS::TripleO::Controller::Ports::InternalApiPort: /usr/share/openstack-tripleo-heat-templates/network/ports/internal_api.yaml
OS::TripleO::Compute::Ports::InternalApiPort: /usr/share/openstack-tripleo-heat-templates/network/ports/internal_api.yaml
# Storage
OS::TripleO::Network::Storage: /usr/share/openstack-tripleo-heat-templates/network/storage.yaml
OS::TripleO::Network::Ports::StorageVipPort: /usr/share/openstack-tripleo-heat-templates/network/ports/storage.yaml
OS::TripleO::Controller::Ports::StoragePort: /usr/share/openstack-tripleo-heat-templates/network/ports/storage.yaml
OS::TripleO::Compute::Ports::StoragePort: /usr/share/openstack-tripleo-heat-templates/network/ports/storage.yaml
OS::TripleO::CephStorage::Ports::StoragePort: /usr/share/openstack-tripleo-heat-templates/network/ports/storage.yaml
# StorageMgmt
OS::TripleO::Network::StorageMgmt: /usr/share/openstack-tripleo-heat-templates/network/storage_mgmt.yaml
OS::TripleO::Network::Ports::StorageMgmtVipPort: /usr/share/openstack-tripleo-heat-templates/network/ports/storage_mgmt.yaml
OS::TripleO::Controller::Ports::StorageMgmtPort: /usr/share/openstack-tripleo-heat-templates/network/ports/storage_mgmt.yaml
OS::TripleO::Compute::Ports::StorageMgmtPort: /usr/share/openstack-tripleo-heat-templates/network/ports/storage_mgmt.yaml
OS::TripleO::CephStorage::Ports::StorageMgmtPort: /usr/share/openstack-tripleo-heat-templates/network/ports/storage_mgmt.yaml
# Tenant
OS::TripleO::Network::Tenant: /usr/share/openstack-tripleo-heat-templates/network/tenant.yaml
OS::TripleO::Network::Ports::TenantVipPort: /usr/share/openstack-tripleo-heat-templates/network/ports/tenant.yaml
OS::TripleO::Controller::Ports::TenantPort: /usr/share/openstack-tripleo-heat-templates/network/ports/tenant.yaml
OS::TripleO::Compute::Ports::TenantPort: /usr/share/openstack-tripleo-heat-templates/network/ports/tenant.yaml

View File

@ -1,29 +0,0 @@
resource_registry:
# Redis
OS::TripleO::Network::Ports::RedisVipPort: ../network/ports/vip.yaml
# External
OS::TripleO::Network::External: ../network/external.yaml
OS::TripleO::Network::Ports::ExternalVipPort: ../network/ports/external.yaml
OS::TripleO::Controller::Ports::ExternalPort: ../network/ports/external.yaml
# InternalApi
OS::TripleO::Network::InternalApi: ../network/internal_api.yaml
OS::TripleO::Network::Ports::InternalApiVipPort: ../network/ports/internal_api.yaml
OS::TripleO::Controller::Ports::InternalApiPort: ../network/ports/internal_api.yaml
OS::TripleO::Compute::Ports::InternalApiPort: ../network/ports/internal_api.yaml
# Storage
OS::TripleO::Network::Storage: ../network/storage.yaml
OS::TripleO::Network::Ports::StorageVipPort: ../network/ports/storage.yaml
OS::TripleO::Controller::Ports::StoragePort: ../network/ports/storage.yaml
OS::TripleO::Compute::Ports::StoragePort: ../network/ports/storage.yaml
OS::TripleO::CephStorage::Ports::StoragePort: ../network/ports/storage.yaml
# StorageMgmt
OS::TripleO::Network::StorageMgmt: ../network/storage_mgmt.yaml
OS::TripleO::Network::Ports::StorageMgmtVipPort: ../network/ports/storage_mgmt.yaml
OS::TripleO::Controller::Ports::StorageMgmtPort: ../network/ports/storage_mgmt.yaml
OS::TripleO::Compute::Ports::StorageMgmtPort: ../network/ports/storage_mgmt.yaml
OS::TripleO::CephStorage::Ports::StorageMgmtPort: ../network/ports/storage_mgmt.yaml
# Tenant
OS::TripleO::Network::Tenant: ../network/tenant.yaml
OS::TripleO::Network::Ports::TenantVipPort: ../network/ports/tenant.yaml
OS::TripleO::Controller::Ports::TenantPort: ../network/ports/tenant.yaml
OS::TripleO::Compute::Ports::TenantPort: ../network/ports/tenant.yaml

View File

@ -1,119 +0,0 @@
heat_template_version: 2015-04-30
parameters:
ControlPlaneIp:
default: ''
description: IP address/subnet on the ctlplane network
type: string
ExternalIpSubnet:
default: ''
description: IP address/subnet on the external network
type: string
InternalApiIpSubnet:
default: ''
description: IP address/subnet on the internal API network
type: string
StorageIpSubnet:
default: ''
description: IP address/subnet on the storage network
type: string
StorageMgmtIpSubnet:
default: ''
description: IP address/subnet on the storage mgmt network
type: string
TenantIpSubnet:
default: ''
description: IP address/subnet on the tenant network
type: string
ManagementIpSubnet: # Only populated when including environments/network-management.yaml
default: ''
description: IP address/subnet on the management network
type: string
BondInterfaceOvsOptions:
default: 'bond_mode=active-backup'
description: The ovs_options string for the bond interface. Set things like
lacp=active and/or bond_mode=balance-slb using this option.
type: string
ExternalNetworkVlanID:
default: 10
description: Vlan ID for the external network traffic.
type: number
InternalApiNetworkVlanID:
default: 20
description: Vlan ID for the internal_api network traffic.
type: number
StorageNetworkVlanID:
default: 30
description: Vlan ID for the storage network traffic.
type: number
StorageMgmtNetworkVlanID:
default: 40
description: Vlan ID for the storage mgmt network traffic.
type: number
TenantNetworkVlanID:
default: 50
description: Vlan ID for the tenant network traffic.
type: number
ManagementNetworkVlanID:
default: 60
description: Vlan ID for the management network traffic.
type: number
ExternalInterfaceDefaultRoute:
default: '10.0.0.1'
description: default route for the external network
type: string
ControlPlaneSubnetCidr: # Override this via parameter_defaults
default: '24'
description: The subnet CIDR of the control plane network.
type: string
ControlPlaneDefaultRoute: # Override this via parameter_defaults
description: The default route of the control plane network.
type: string
DnsServers: # Override this via parameter_defaults
default: []
description: A list of DNS servers (2 max for some implementations) that will be added to resolv.conf.
type: comma_delimited_list
EC2MetadataIp: # Override this via parameter_defaults
description: The IP address of the EC2 metadata server.
type: string
resources:
OsNetConfigImpl:
properties:
config:
os_net_config:
network_config:
- type: interface
name: nic1
mtu: 1350
use_dhcp: false
addresses:
- ip_netmask:
list_join:
- /
- - {get_param: ControlPlaneIp}
- {get_param: ControlPlaneSubnetCidr}
routes:
- ip_netmask: 169.254.169.254/32
next_hop: {get_param: EC2MetadataIp}
- default: true
next_hop: {get_param: ControlPlaneDefaultRoute}
- type: interface
name: nic4
mtu: 1350
use_dhcp: false
addresses:
- ip_netmask: {get_param: StorageIpSubnet}
- type: interface
name: nic5
mtu: 1350
use_dhcp: false
addresses:
- ip_netmask: {get_param: StorageMgmtIpSubnet}
group: os-apply-config
type: OS::Heat::StructuredConfig
outputs:
OS::stack_id:
description: The OsNetConfigImpl resource.
value: {get_resource: OsNetConfigImpl}

View File

@ -1,92 +0,0 @@
heat_template_version: 2015-04-30
parameters:
ControlPlaneIp:
default: ''
description: IP address/subnet on the ctlplane network
type: string
ExternalIpSubnet:
default: ''
description: IP address/subnet on the external network
type: string
InternalApiIpSubnet:
default: ''
description: IP address/subnet on the internal API network
type: string
StorageIpSubnet:
default: ''
description: IP address/subnet on the storage network
type: string
StorageMgmtIpSubnet:
default: ''
description: IP address/subnet on the storage mgmt network
type: string
TenantIpSubnet:
default: ''
description: IP address/subnet on the tenant network
type: string
ManagementIpSubnet: # Only populated when including environments/network-management.yaml
default: ''
description: IP address/subnet on the management network
type: string
BondInterfaceOvsOptions:
default: 'bond_mode=active-backup'
description: The ovs_options string for the bond interface. Set things like
lacp=active and/or bond_mode=balance-slb using this option.
type: string
ExternalNetworkVlanID:
default: 10
description: Vlan ID for the external network traffic.
type: number
InternalApiNetworkVlanID:
default: 20
description: Vlan ID for the internal_api network traffic.
type: number
StorageNetworkVlanID:
default: 30
description: Vlan ID for the storage network traffic.
type: number
StorageMgmtNetworkVlanID:
default: 40
description: Vlan ID for the storage mgmt network traffic.
type: number
TenantNetworkVlanID:
default: 50
description: Vlan ID for the tenant network traffic.
type: number
ManagementNetworkVlanID:
default: 60
description: Vlan ID for the management network traffic.
type: number
ExternalInterfaceDefaultRoute:
default: '10.0.0.1'
description: default route for the external network
type: string
ControlPlaneSubnetCidr: # Override this via parameter_defaults
default: '24'
description: The subnet CIDR of the control plane network.
type: string
ControlPlaneDefaultRoute: # Override this via parameter_defaults
description: The default route of the control plane network.
type: string
DnsServers: # Override this via parameter_defaults
default: []
description: A list of DNS servers (2 max for some implementations) that will be added to resolv.conf.
type: comma_delimited_list
EC2MetadataIp: # Override this via parameter_defaults
description: The IP address of the EC2 metadata server.
type: string
resources:
OsNetConfigImpl:
properties:
config:
os_net_config:
network_config: []
group: os-apply-config
type: OS::Heat::StructuredConfig
outputs:
OS::stack_id:
description: The OsNetConfigImpl resource.
value: {get_resource: OsNetConfigImpl}

View File

@ -1,136 +0,0 @@
heat_template_version: 2015-04-30
parameters:
ControlPlaneIp:
default: ''
description: IP address/subnet on the ctlplane network
type: string
ExternalIpSubnet:
default: ''
description: IP address/subnet on the external network
type: string
InternalApiIpSubnet:
default: ''
description: IP address/subnet on the internal API network
type: string
StorageIpSubnet:
default: ''
description: IP address/subnet on the storage network
type: string
StorageMgmtIpSubnet:
default: ''
description: IP address/subnet on the storage mgmt network
type: string
TenantIpSubnet:
default: ''
description: IP address/subnet on the tenant network
type: string
ManagementIpSubnet: # Only populated when including environments/network-management.yaml
default: ''
description: IP address/subnet on the management network
type: string
BondInterfaceOvsOptions:
default: 'bond_mode=active-backup'
description: The ovs_options string for the bond interface. Set things like
lacp=active and/or bond_mode=balance-slb using this option.
type: string
ExternalNetworkVlanID:
default: 10
description: Vlan ID for the external network traffic.
type: number
InternalApiNetworkVlanID:
default: 20
description: Vlan ID for the internal_api network traffic.
type: number
StorageNetworkVlanID:
default: 30
description: Vlan ID for the storage network traffic.
type: number
StorageMgmtNetworkVlanID:
default: 40
description: Vlan ID for the storage mgmt network traffic.
type: number
TenantNetworkVlanID:
default: 50
description: Vlan ID for the tenant network traffic.
type: number
ManagementNetworkVlanID:
default: 60
description: Vlan ID for the management network traffic.
type: number
ExternalInterfaceDefaultRoute:
default: '10.0.0.1'
description: default route for the external network
type: string
ControlPlaneSubnetCidr: # Override this via parameter_defaults
default: '24'
description: The subnet CIDR of the control plane network.
type: string
ControlPlaneDefaultRoute: # Override this via parameter_defaults
description: The default route of the control plane network.
type: string
DnsServers: # Override this via parameter_defaults
default: []
description: A list of DNS servers (2 max for some implementations) that will be added to resolv.conf.
type: comma_delimited_list
EC2MetadataIp: # Override this via parameter_defaults
description: The IP address of the EC2 metadata server.
type: string
resources:
OsNetConfigImpl:
properties:
config:
os_net_config:
network_config:
- type: interface
name: nic1
mtu: 1350
use_dhcp: false
addresses:
- ip_netmask:
list_join:
- /
- - {get_param: ControlPlaneIp}
- {get_param: ControlPlaneSubnetCidr}
routes:
- ip_netmask: 169.254.169.254/32
next_hop: {get_param: EC2MetadataIp}
- default: true
next_hop: {get_param: ControlPlaneDefaultRoute}
- type: interface
name: nic3
mtu: 1350
use_dhcp: false
addresses:
- ip_netmask: {get_param: InternalApiIpSubnet}
- type: interface
name: nic4
mtu: 1350
use_dhcp: false
addresses:
- ip_netmask: {get_param: StorageIpSubnet}
- type: ovs_bridge
name: br-tenant
dns_servers: {get_param: DnsServers}
use_dhcp: false
addresses:
- ip_netmask: {get_param: TenantIpSubnet}
members:
- type: interface
name: nic6
mtu: 1350
primary: true
- type: interface
name: nic5
mtu: 1350
use_dhcp: false
addresses:
- ip_netmask: {get_param: StorageMgmtIpSubnet}
group: os-apply-config
type: OS::Heat::StructuredConfig
outputs:
OS::stack_id:
description: The OsNetConfigImpl resource.
value: {get_resource: OsNetConfigImpl}

View File

@ -1,148 +0,0 @@
heat_template_version: 2015-04-30
parameters:
ControlPlaneIp:
default: ''
description: IP address/subnet on the ctlplane network
type: string
ExternalIpSubnet:
default: ''
description: IP address/subnet on the external network
type: string
InternalApiIpSubnet:
default: ''
description: IP address/subnet on the internal API network
type: string
StorageIpSubnet:
default: ''
description: IP address/subnet on the storage network
type: string
StorageMgmtIpSubnet:
default: ''
description: IP address/subnet on the storage mgmt network
type: string
TenantIpSubnet:
default: ''
description: IP address/subnet on the tenant network
type: string
ManagementIpSubnet: # Only populated when including environments/network-management.yaml
default: ''
description: IP address/subnet on the management network
type: string
BondInterfaceOvsOptions:
default: 'bond_mode=active-backup'
description: The ovs_options string for the bond interface. Set things like
lacp=active and/or bond_mode=balance-slb using this option.
type: string
ExternalNetworkVlanID:
default: 10
description: Vlan ID for the external network traffic.
type: number
InternalApiNetworkVlanID:
default: 20
description: Vlan ID for the internal_api network traffic.
type: number
StorageNetworkVlanID:
default: 30
description: Vlan ID for the storage network traffic.
type: number
StorageMgmtNetworkVlanID:
default: 40
description: Vlan ID for the storage mgmt network traffic.
type: number
TenantNetworkVlanID:
default: 50
description: Vlan ID for the tenant network traffic.
type: number
ManagementNetworkVlanID:
default: 60
description: Vlan ID for the management network traffic.
type: number
ExternalInterfaceDefaultRoute:
default: '10.0.0.1'
description: default route for the external network
type: string
ControlPlaneSubnetCidr: # Override this via parameter_defaults
default: '24'
description: The subnet CIDR of the control plane network.
type: string
ControlPlaneDefaultRoute: # Override this via parameter_defaults
description: The default route of the control plane network.
type: string
DnsServers: # Override this via parameter_defaults
default: []
description: A list of DNS servers (2 max for some implementations) that will be added to resolv.conf.
type: comma_delimited_list
EC2MetadataIp: # Override this via parameter_defaults
description: The IP address of the EC2 metadata server.
type: string
resources:
OsNetConfigImpl:
properties:
config:
os_net_config:
network_config:
- type: interface
name: nic1
mtu: 1350
use_dhcp: false
addresses:
- ip_netmask:
list_join:
- /
- - {get_param: ControlPlaneIp}
- {get_param: ControlPlaneSubnetCidr}
routes:
- ip_netmask: 169.254.169.254/32
next_hop: {get_param: EC2MetadataIp}
- type: ovs_bridge
name: br-ex
dns_servers: {get_param: DnsServers}
use_dhcp: false
addresses:
- ip_netmask: {get_param: ExternalIpSubnet}
routes:
- ip_netmask: 0.0.0.0/0
next_hop: {get_param: ExternalInterfaceDefaultRoute}
members:
- type: interface
name: nic2
mtu: 1350
primary: true
- type: interface
name: nic3
mtu: 1350
use_dhcp: false
addresses:
- ip_netmask: {get_param: InternalApiIpSubnet}
- type: interface
name: nic4
mtu: 1350
use_dhcp: false
addresses:
- ip_netmask: {get_param: StorageIpSubnet}
- type: interface
name: nic5
mtu: 1350
use_dhcp: false
addresses:
- ip_netmask: {get_param: StorageMgmtIpSubnet}
- type: ovs_bridge
name: br-tenant
dns_servers: {get_param: DnsServers}
use_dhcp: false
addresses:
- ip_netmask: {get_param: TenantIpSubnet}
members:
- type: interface
name: nic6
mtu: 1350
primary: true
group: os-apply-config
type: OS::Heat::StructuredConfig
outputs:
OS::stack_id:
description: The OsNetConfigImpl resource.
value: {get_resource: OsNetConfigImpl}

View File

@ -1,92 +0,0 @@
heat_template_version: 2015-04-30
parameters:
ControlPlaneIp:
default: ''
description: IP address/subnet on the ctlplane network
type: string
ExternalIpSubnet:
default: ''
description: IP address/subnet on the external network
type: string
InternalApiIpSubnet:
default: ''
description: IP address/subnet on the internal API network
type: string
StorageIpSubnet:
default: ''
description: IP address/subnet on the storage network
type: string
StorageMgmtIpSubnet:
default: ''
description: IP address/subnet on the storage mgmt network
type: string
TenantIpSubnet:
default: ''
description: IP address/subnet on the tenant network
type: string
ManagementIpSubnet: # Only populated when including environments/network-management.yaml
default: ''
description: IP address/subnet on the management network
type: string
BondInterfaceOvsOptions:
default: 'bond_mode=active-backup'
description: The ovs_options string for the bond interface. Set things like
lacp=active and/or bond_mode=balance-slb using this option.
type: string
ExternalNetworkVlanID:
default: 10
description: Vlan ID for the external network traffic.
type: number
InternalApiNetworkVlanID:
default: 20
description: Vlan ID for the internal_api network traffic.
type: number
StorageNetworkVlanID:
default: 30
description: Vlan ID for the storage network traffic.
type: number
StorageMgmtNetworkVlanID:
default: 40
description: Vlan ID for the storage mgmt network traffic.
type: number
TenantNetworkVlanID:
default: 50
description: Vlan ID for the tenant network traffic.
type: number
ManagementNetworkVlanID:
default: 60
description: Vlan ID for the management network traffic.
type: number
ExternalInterfaceDefaultRoute:
default: '10.0.0.1'
description: default route for the external network
type: string
ControlPlaneSubnetCidr: # Override this via parameter_defaults
default: '24'
description: The subnet CIDR of the control plane network.
type: string
ControlPlaneDefaultRoute: # Override this via parameter_defaults
description: The default route of the control plane network.
type: string
DnsServers: # Override this via parameter_defaults
default: []
description: A list of DNS servers (2 max for some implementations) that will be added to resolv.conf.
type: comma_delimited_list
EC2MetadataIp: # Override this via parameter_defaults
description: The IP address of the EC2 metadata server.
type: string
resources:
OsNetConfigImpl:
properties:
config:
os_net_config:
network_config: []
group: os-apply-config
type: OS::Heat::StructuredConfig
outputs:
OS::stack_id:
description: The OsNetConfigImpl resource.
value: {get_resource: OsNetConfigImpl}

View File

@ -1,671 +0,0 @@
(dp0
S'global_data'
p1
(dp2
S'control'
p3
(dp4
S'route'
p5
V192.168.24.1
p6
sS'mask'
p7
I24
sS'ec2'
p8
V192.168.24.1
p9
ssS'major'
p10
I1
sS'management'
p11
(dp12
S'start'
p13
V172.20.0.10
p14
sS'cidr'
p15
V172.20.0.0/24
p16
sS'vlan'
p17
I6
sS'end'
p18
V172.20.0.250
p19
ssS'dns2'
p20
V8.8.4.4
p21
sS'dns1'
p22
V8.8.8.8
p23
sS'storage'
p24
(dp25
g13
V172.18.0.10
p26
sg15
V172.18.0.0/24
p27
sg17
I3
sg18
V172.18.0.250
p28
ssS'auto_routes'
p29
I01
sS'bond_options'
p30
V
p31
sS'external'
p32
(dp33
S'bridge'
p34
V''
p35
sg18
V10.0.0.50
p36
sg17
I1
sg13
V10.0.0.10
p37
sg15
V10.0.0.0/24
p38
sS'gateway'
p39
V10.0.0.1
p40
ssS'internal_api'
p41
(dp42
g13
V172.17.0.10
p43
sg15
V172.17.0.0/24
p44
sg17
I2
sg18
V172.17.0.250
p45
ssS'ipv6'
p46
I00
sS'storage_mgmt'
p47
(dp48
g13
V172.19.0.10
p49
sg15
V172.19.0.0/24
p50
sg17
I4
sg18
V172.19.0.250
p51
ssS'minor'
p52
I2
sS'tenant'
p53
(dp54
g13
V172.16.0.10
p55
sg15
V172.16.0.0/24
p56
sg17
I5
sg18
V172.16.0.250
p57
sssS'data'
p58
(dp59
S'cinder-storage.yaml'
p60
(lp61
sS'ceph-storage.yaml'
p62
(lp63
(dp64
Vaddresses
p65
(lp66
sVnetwork
p67
VControlPlane
p68
sVprimary
p69
I01
sVmtu
p70
I1350
sS'members'
p71
(lp72
sVroutes
p73
(lp74
sVuse_dhcp
p75
I00
sVtype
p76
Vinterface
p77
sVname
p78
Vnic1
p79
sa(dp80
Vaddresses
p81
(lp82
sVnetwork
p83
VStorage
p84
sVprimary
p85
I01
sVmtu
p86
I1350
sg71
(lp87
sVroutes
p88
(lp89
sVuse_dhcp
p90
I00
sVtype
p91
Vinterface
p92
sVname
p93
Vnic4
p94
sa(dp95
Vaddresses
p96
(lp97
sVnetwork
p98
VStorageMgmt
p99
sVprimary
p100
I01
sVmtu
p101
I1350
sg71
(lp102
sVroutes
p103
(lp104
sVuse_dhcp
p105
I00
sVtype
p106
Vinterface
p107
sVname
p108
Vnic5
p109
sasS'controller.yaml'
p110
(lp111
(dp112
Vaddresses
p113
(lp114
sVnetwork
p115
VControlPlane
p116
sVprimary
p117
I01
sVmtu
p118
I1350
sg71
(lp119
sVroutes
p120
(lp121
sVuse_dhcp
p122
I00
sVtype
p123
Vinterface
p124
sVname
p125
Vnic1
p126
sa(dp127
Vdns_servers
p128
V{get_param: DnsServers}
p129
sVaddresses
p130
(lp131
sVnetwork
p132
VExternal
p133
sVmtu
p134
I-1
sg71
(lp135
(dp136
Vaddresses
p137
(lp138
sVnetwork
p139
VNone
p140
sVprimary
p141
I01
sVmtu
p142
I1350
sg71
(lp143
sVroutes
p144
(lp145
sVuse_dhcp
p146
I00
sVtype
p147
Vinterface
p148
sVname
p149
Vnic2
p150
sasVroutes
p151
(lp152
sVuse_dhcp
p153
I00
sVtype
p154
Vovs_bridge
p155
sVname
p156
Vbr-ex
p157
sa(dp158
Vaddresses
p159
(lp160
sVnetwork
p161
VInternalApi
p162
sVprimary
p163
I01
sVmtu
p164
I1350
sg71
(lp165
sVroutes
p166
(lp167
sVuse_dhcp
p168
I00
sVtype
p169
Vinterface
p170
sVname
p171
Vnic3
p172
sa(dp173
Vaddresses
p174
(lp175
sVnetwork
p176
VStorage
p177
sVprimary
p178
I01
sVmtu
p179
I1350
sg71
(lp180
sVroutes
p181
(lp182
sVuse_dhcp
p183
I00
sVtype
p184
Vinterface
p185
sVname
p186
Vnic4
p187
sa(dp188
Vaddresses
p189
(lp190
sVnetwork
p191
VStorageMgmt
p192
sVprimary
p193
I01
sVmtu
p194
I1350
sg71
(lp195
sVroutes
p196
(lp197
sVuse_dhcp
p198
I00
sVtype
p199
Vinterface
p200
sVname
p201
Vnic5
p202
sa(dp203
Vdns_servers
p204
V{get_param: DnsServers}
p205
sVaddresses
p206
(lp207
sVnetwork
p208
VTenant
p209
sVmtu
p210
I-1
sg71
(lp211
(dp212
Vaddresses
p213
(lp214
sVnetwork
p215
VNone
p216
sVprimary
p217
I01
sVmtu
p218
I1350
sg71
(lp219
sVroutes
p220
(lp221
sVuse_dhcp
p222
I00
sVtype
p223
Vinterface
p224
sVname
p225
Vnic6
p226
sasVroutes
p227
(lp228
sVuse_dhcp
p229
I00
sVtype
p230
Vovs_bridge
p231
sVname
p232
Vbr-tenant
p233
sasS'swift-storage.yaml'
p234
(lp235
sS'compute.yaml'
p236
(lp237
(dp238
Vaddresses
p239
(lp240
sVnetwork
p241
VControlPlane
p242
sVprimary
p243
I01
sVmtu
p244
I1350
sg71
(lp245
sVroutes
p246
(lp247
sVuse_dhcp
p248
I00
sVtype
p249
Vinterface
p250
sVname
p251
Vnic1
p252
sa(dp253
Vaddresses
p254
(lp255
sVnetwork
p256
VInternalApi
p257
sVprimary
p258
I01
sVmtu
p259
I1350
sg71
(lp260
sVroutes
p261
(lp262
sVuse_dhcp
p263
I00
sVtype
p264
Vinterface
p265
sVname
p266
Vnic3
p267
sa(dp268
Vaddresses
p269
(lp270
sVnetwork
p271
VStorage
p272
sVprimary
p273
I01
sVmtu
p274
I1350
sg71
(lp275
sVroutes
p276
(lp277
sVuse_dhcp
p278
I00
sVtype
p279
Vinterface
p280
sVname
p281
Vnic4
p282
sa(dp283
Vdns_servers
p284
V{get_param: DnsServers}
p285
sVaddresses
p286
(lp287
sVnetwork
p288
VTenant
p289
sVmtu
p290
I-1
sg71
(lp291
(dp292
Vaddresses
p293
(lp294
sVnetwork
p295
VNone
p296
sVprimary
p297
I01
sVmtu
p298
I1350
sg71
(lp299
sVroutes
p300
(lp301
sVuse_dhcp
p302
I00
sVtype
p303
Vinterface
p304
sVname
p305
Vnic6
p306
sasVroutes
p307
(lp308
sVuse_dhcp
p309
I00
sVtype
p310
Vovs_bridge
p311
sVname
p312
Vbr-tenant
p313
sa(dp314
Vaddresses
p315
(lp316
sVnetwork
p317
VStorageMgmt
p318
sVprimary
p319
I01
sVmtu
p320
I1350
sg71
(lp321
sVroutes
p322
(lp323
sVuse_dhcp
p324
I00
sVtype
p325
Vinterface
p326
sVname
p327
Vnic5
p328
sass.

View File

@ -1,22 +0,0 @@
-----BEGIN CERTIFICATE-----
MIIDlzCCAn+gAwIBAgIJAMi8i1B5OWQCMA0GCSqGSIb3DQEBCwUAMGIxCzAJBgNV
BAYTAlhYMRUwEwYDVQQHDAxEZWZhdWx0IENpdHkxHDAaBgNVBAoME0RlZmF1bHQg
Q29tcGFueSBMdGQxHjAcBgNVBAMMFTIwMDE6ZGI4OmZkMDA6MTAwMDo6MTAeFw0x
NjEyMTMxMjE4MDlaFw0yNjEyMTExMjE4MDlaMGIxCzAJBgNVBAYTAlhYMRUwEwYD
VQQHDAxEZWZhdWx0IENpdHkxHDAaBgNVBAoME0RlZmF1bHQgQ29tcGFueSBMdGQx
HjAcBgNVBAMMFTIwMDE6ZGI4OmZkMDA6MTAwMDo6MTCCASIwDQYJKoZIhvcNAQEB
BQADggEPADCCAQoCggEBAKgpCyl3kJKDWBWJ2rLRaLNysJ1UENa/9YO5Ssw46Vob
hHuxHIydHrht/6RJOcBD5sd6qZemyPu0D/EKYRzRbQxxI6EJ9jYaUJJl+3O79el2
ZYrkYuj0mKvdXNo4Esgs8O/EWqA7tyJWAsxr+5A3VzafekOaNhTbL2QiaUEs+iKn
hghNIpgQzzoysYiVk3JERzz8fV0ADk9P2oYN21ZX2dKxIKC5h17fwXFPVanf88Tr
0R3Zgxx/ipw+n6oV8WT3n0YSFMZmjOIjUczJqIedLlcPJZ1eiwFnBdTH6CG1d+fA
I/gM4M/g2OCY6uyyEZkYJ/ruS5rZc2uawthZPXuCnS8CAwEAAaNQME4wHQYDVR0O
BBYEFE9jNM+rReAuIerBa5yxWf4mUz4/MB8GA1UdIwQYMBaAFE9jNM+rReAuIerB
a5yxWf4mUz4/MAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAAPGqxml
NJJuofnkaOXtFnZbYqho4fdSpCiVg8gB9ywH65BaUJOPIXPimeML1Bo1nyqBb3jX
6LOONKqf74NfJQ/HmBu2AfTMbDXtbLwt7mOCYNvEKCTmvnVJaJATn5r2L1mRnPdD
drStjHxqaKgJDOIo/khaQsDHaFwo8bV4VTrZ2X7ml6y5RjDPoSimiWt8J2/G3wKr
unPv1a7O1M0EsbrqF4RkxEfh9dVWWgRrMpnbPW3LqrXDZ0wdI33DC4vvTi1LZmH/
LbeRLQFMnzZWI5rctYAUFSfe/Ra4G4cMyg0xaZk3PqZPQRr0aA2SCphfuZ/TatUE
a4eXTxLMxeR9Hcs=
-----END CERTIFICATE-----

View File

@ -1,6 +0,0 @@
---
parameter_defaults:
ControllerSchedulerHints:
'capabilities:node': 'controller-%index%'
NovaComputeSchedulerHints:
'capabilities:node': 'compute-%index%'

View File

@ -1,4 +0,0 @@
# Only a controller as we're doing all-in-one testing via the multinode job
- name: Controller
CountDefault: 1
# Note services are defined in multinode_major_upgrade.yaml

View File

@ -1,15 +0,0 @@
"network_config": [
{
"type": "ovs_bridge",
"name": "br-ctlplane",
"ovs_extra": [
"br-set-external-id br-ctlplane bridge-id br-ctlplane"
],
"addresses": [
{
"ip_netmask": "{{PUBLIC_INTERFACE_IP}}"
}
],
"mtu": {{LOCAL_MTU}}
}
]