Implement local scripts for all gate tests

This patch implements the following:
 - scripts-library.sh which includes commonly used functions, variables
   and other preparation commands for all other scripts
 - bootstrap-ansible.sh which only deploys a selected version of ansible
   and ensures that any other requirements are prepared on the
   deployment host
 - bootstrap-aio.sh which runs all host preparation actions for an
   all-in-one build
 - gate-check-lint.sh which runs a lint and syntax check
 - gate-check-commit.sh which runs all actions required for a gate
   commit check, utilising the other scripts where required
 - run-smoke-test.sh which runs tempest from inside the utility container
 - run-playbooks.sh which runs the playbooks
 - the existing conf.d/swift.yml is renamed to be an example
   configuration - the example configurations can be used as
   documentation
 - etc/network/interfaces.d/aio_interfaces.cfg,
   etc/rpc_deploy/conf.d/swift.yml and
   etc/rpc_deploy/rpc_user_config.yml are now configurations used for
   the AIO deployment
 - a workaround for https://bugs.launchpad.net/bugs/1244589 to ensure
   that DHCP checksums are implemented by the host which is required for
   the smoke tests to work
 - the removal of the rpc heat templates as they're unusable in their
   current state
 - setting MAX_RETRIES to 0, ensuring that any failures cause an
   immediate commit check failure in the gate - this prevents the
   masking of failures by retry attempts

DocImpact
Co-Authored-By: Kevin Carter <kevin.carter@rackspace.com>
Closes-Bug: #1415883
Closes-Bug: #1417999
Closes-Bug: #1419807
Change-Id: I95242d48ad0fb055f16510803c8aa14dc183ac17
This commit is contained in:
Jesse Pretorius 2015-02-04 15:51:06 +00:00
parent b964ecbc3e
commit 6b12bf3663
18 changed files with 1392 additions and 1127 deletions

View File

@ -0,0 +1,45 @@
## Required network bridges; br-vlan, br-vxlan, br-mgmt.
auto br-mgmt
iface br-mgmt inet static
bridge_stp off
bridge_waitport 0
bridge_fd 0
# Notice the bridge port is the vlan tagged interface
bridge_ports none
address 172.29.236.100
netmask 255.255.252.0
auto br-vxlan
iface br-vxlan inet static
bridge_stp off
bridge_waitport 0
bridge_fd 0
bridge_ports none
address 172.29.240.100
netmask 255.255.252.0
# To ensure ssh checksum is correct
up /sbin/iptables -A POSTROUTING -t mangle -p tcp --dport 22 -j CHECKSUM --checksum-fill
down /sbin/iptables -D POSTROUTING -t mangle -p tcp --dport 22 -j CHECKSUM --checksum-fill
# To ensure dhcp checksum is correct
up /sbin/iptables -A POSTROUTING -t mangle -p udp --dport 68 -j CHECKSUM --checksum-fill
down /sbin/iptables -D POSTROUTING -t mangle -p udp --dport 68 -j CHECKSUM --checksum-fill
# To provide internet connectivity to instances
up /sbin/iptables -t nat -A POSTROUTING -o eth0 -j MASQUERADE
down /sbin/iptables -t nat -D POSTROUTING -o eth0 -j MASQUERADE
auto br-vlan
iface br-vlan inet manual
bridge_stp off
bridge_waitport 0
bridge_fd 0
# Notice this bridge port is an Untagged host interface
bridge_ports none
auto br-storage
iface br-storage inet static
bridge_stp off
bridge_waitport 0
bridge_fd 0
bridge_ports none
address 172.29.244.100
netmask 255.255.252.0

View File

@ -1,294 +1,22 @@
--- ---
# Copyright 2015, Rackspace US, Inc. global_overrides:
# swift:
# Licensed under the Apache License, Version 2.0 (the "License"); part_power: 8
# you may not use this file except in compliance with the License. storage_network: 'br-storage'
# You may obtain a copy of the License at replication_network: 'br-storage'
# drives:
# http://www.apache.org/licenses/LICENSE-2.0 - name: swift1.img
# - name: swift2.img
# Unless required by applicable law or agreed to in writing, software - name: swift3.img
# distributed under the License is distributed on an "AS IS" BASIS, mount_point: /srv
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. storage_policies:
# See the License for the specific language governing permissions and - policy:
# limitations under the License. name: default
# index: 0
# Overview default: True
# ======== swift-proxy_hosts:
# aio1:
# This file contains the configuration for the Object Storage (swift) ip: 172.29.236.100
# service. Only enable these options for environments that contain the swift_hosts:
# Object Storage service. For more information on Object Storage options, aio1:
# see the documentation at ip: 172.29.236.100
#
# http://docs.openstack.org/developer/swift/index.html
#
# The hierarchical structure of this file supports overrides for most
# options. However, for typical deployments, defining options closest
# to the top level provides a functional configuration.
#
# Configuration reference
# =======================
#
# Level: swift (required)
# Contains global options.
#
# Option: storage_network (optional, string)
# Network to use for object storage operations. Defaults to the management
# network if omitted.
# If using a storage network, specify the network bridge containing it,
# typically 'br-storage'.
#
# Option: repl_network (optional, string)
# Network to use for object replication operations. Defaults to the
# 'storage_network' value if omitted.
# If using a replication network, specify the network bridge containing it,
# typically 'br-repl'.
#
# Option: part_power (required, integer)
# Partition power. Immutable without rebuilding the rings.
# Applies to all rings unless overridden at the 'account' or 'container'
# levels or within a policy under the 'storage_policies' level.
#
# Option: repl_number (optional, integer)
# Number of replicas for each partition. Defaults to 3.
# Applies to all rings unless overridden at the 'account' or 'container'
# levels or within a policy under the 'storage_policies' level.
#
# Option: min_part_hours (optional, integer)
# Minimum time in hours between multiple moves of the same partition.
# Defaults to 1.
# Applies to all rings unless overridden at the 'account' or 'container'
# levels or within a policy under the 'storage_policies' level.
#
# Option: region (optional, integer)
# Region of a disk. Defaults to 1.
# Applies to all disks in all hosts unless overridden deeper in the
# structure.
#
# Option: zone (optional, integer)
# Zone of a disk. Defaults to 0.
# Applies to all disks in all hosts unless overridden deeper in the
# structure.
#
# Option: weight (optional, integer)
# Weight of a disk. Defaults to 100.
# Applies to all disks in all hosts unless overridden deeper in the
# structure.
#
# Option: mount_point (required, string)
# Top-level directory for mount points of disks. Defaults to /mnt.
# Applies to all hosts unless overridden deeper in the structure.
#
# Level: drives (required)
# Contains the mount points of disks.
# Applies to all hosts unless overridden deeper in the structure.
#
# Option: name (required, string)
# Mount point of a disk. Use one entry for each disk.
# Applies to all hosts unless overridden deeper in the structure.
#
# The following example shows disks mounted at /mnt/sda and /mnt/sdb
# on all storage hosts:
# mount_point: /mnt
# drives:
# - name: sda
# - name: sdb
#
# Level: account (optional)
# Contains 'min_part_hours' and 'repl_number' options specific to the
# account ring.
#
# Level: container (optional)
# Contains 'min_part_hours' and 'repl_number' options specific to the
# container ring.
#
# Level: storage_policies (required)
# Contains storage policies. Minimum one policy. One policy must include
# the 'index: 0' and 'default: True' options.
#
# Level: policy (required)
# Contains a storage policy.
#
# Option: name (required, string)
# Policy name.
#
# Option: index (required, integer)
# Policy index. One policy must include this option with a '0'
# value.
#
# Option: default (optional, boolean)
# Defines the default policy. One policy must include this option
# with a 'True' value.
#
# Option: deprecated (optional, boolean)
# Defines a deprecated policy.
#
# Note: The following levels and options override any values higher
# in the structure and generally apply to advanced deployments.
#
# Option: repl_number (optional, integer)
# Number of replicas of each partition in this policy.
#
# Option: min_part_hours (optional, integer)
# Minimum time between multiple moves of the same partition in this
# policy.
#
# Level: swift_proxy-hosts (required)
# Contains definitions for proxy hosts.
#
# Level: <value> (optional, string)
# Name of a proxy host. Typical deployments require at least three
# proxy hosts.
#
# Option: ip (required, string)
# IP address of the host.
#
# Level: swift_hosts (required)
# Contains definitions for storage hosts.
#
# Level: <value> (required, string)
# Name of a storage host. Typical deployments require at least three
# storage hosts.
#
# Option: ip (required, string)
# IP address of the host.
#
# Note: The following levels and options override any values higher
# in the structure and generally apply to advanced deployments.
#
# Level: container_vars (optional)
# Contains options specific to this host.
#
# Level: swift_vars (optional)
# Contains swift options specific to this host.
#
# Option: region (optional, integer)
# Region of all disks in this host.
#
# Option: zone (optional, integer)
# Zone of all disks in this host.
#
# Option: weight (optional, integer)
# Weight of all disks in this host.
#
# Level: groups (optional)
# Contains groups specific to this host.
# The following example shows a storage host with the account ring,
# container ring, and 'silver' storage policy:
# groups:
# - account
# - container
# - silver
#
# Level: drives (optional)
# Contains the mount points of disks specific to this host.
#
# Level or option: name (optional, string)
# Mount point of a disk specific to this host. Use one entry for
# each disk. Functions as a level for disks that contain additional
# options.
#
# Option: region (optional, integer)
# Region of a disk in this host.
#
# Option: zone (optional, integer)
# Zone of a disk in this host.
#
# Option: weight (optional, integer)
# Weight of a disk in this host.
#
# Level: groups (optional)
# Contains groups for a disk in this host.
# The following example shows a disk with the account ring,
# container ring, and 'silver' storage policy:
# groups:
# - account
# - container
# - silver
# Default (example) configuration
# ===============================
# Global options
# global_overrides:
# swift:
# storage_network: 'br-storage'
# replication_network: 'br-repl'
# part_power: 8
# repl_number: 3
# min_part_hours: 1
# region: 1
# zone: 0
# weight: 100
# mount_point: /mnt
# drives:
# - name: sdc
# - name: sdd
# - name: sde
# - name: sdf
# account:
# container:
# storage_policies:
# - policy:
# name: gold
# index: 0
# default: True
# - policy:
# name: silver
# index: 1
# repl_number: 3
# deprecated: True
# Proxy hosts
# swift-proxy_hosts:
# infra-node1:
# ip: 192.0.2.1
# infra-node2:
# ip: 192.0.2.2
# infra-node3:
# ip: 192.0.2.3
# Storage hosts
#
# The first three hosts contain options for typical deployments. Hosts
# four and five show options for more advanced deployments.
# swift_hosts:
# swift-node1:
# ip: 192.0.2.4
# swift-node2:
# ip: 192.0.2.5
# swift-node3:
# ip: 192.0.2.6
# swift-node4:
# ip: 192.0.2.7
# container_vars:
# swift_vars:
# zone: 3
# swift-node5:
# ip: 192.0.2.8
# container_vars:
# swift_vars:
# storage_ip: 198.51.100.8
# repl_ip: 203.0.113.8
# region: 3
# zone: 4
# weight: 200
# groups:
# - account
# - container
# - silver
# drives:
# - name: sdb
# storage_ip: 198.51.100.9
# repl_ip: 203.0.113.9
# weight: 75
# groups:
# - gold
# - name: sdc
# - name: sdd
# - name: sde
# - name: sdf

View File

@ -0,0 +1,294 @@
---
# Copyright 2015, Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Overview
# ========
#
# This file contains the configuration for the Object Storage (swift)
# service. Only enable these options for environments that contain the
# Object Storage service. For more information on Object Storage options,
# see the documentation at
#
# http://docs.openstack.org/developer/swift/index.html
#
# The hierarchical structure of this file supports overrides for most
# options. However, for typical deployments, defining options closest
# to the top level provides a functional configuration.
#
# Configuration reference
# =======================
#
# Level: swift (required)
# Contains global options.
#
# Option: storage_network (optional, string)
# Network to use for object storage operations. Defaults to the management
# network if omitted.
# If using a storage network, specify the network bridge containing it,
# typically 'br-storage'.
#
# Option: repl_network (optional, string)
# Network to use for object replication operations. Defaults to the
# 'storage_network' value if omitted.
# If using a replication network, specify the network bridge containing it,
# typically 'br-repl'.
#
# Option: part_power (required, integer)
# Partition power. Immutable without rebuilding the rings.
# Applies to all rings unless overridden at the 'account' or 'container'
# levels or within a policy under the 'storage_policies' level.
#
# Option: repl_number (optional, integer)
# Number of replicas for each partition. Defaults to 3.
# Applies to all rings unless overridden at the 'account' or 'container'
# levels or within a policy under the 'storage_policies' level.
#
# Option: min_part_hours (optional, integer)
# Minimum time in hours between multiple moves of the same partition.
# Defaults to 1.
# Applies to all rings unless overridden at the 'account' or 'container'
# levels or within a policy under the 'storage_policies' level.
#
# Option: region (optional, integer)
# Region of a disk. Defaults to 1.
# Applies to all disks in all hosts unless overridden deeper in the
# structure.
#
# Option: zone (optional, integer)
# Zone of a disk. Defaults to 0.
# Applies to all disks in all hosts unless overridden deeper in the
# structure.
#
# Option: weight (optional, integer)
# Weight of a disk. Defaults to 100.
# Applies to all disks in all hosts unless overridden deeper in the
# structure.
#
# Option: mount_point (required, string)
# Top-level directory for mount points of disks. Defaults to /mnt.
# Applies to all hosts unless overridden deeper in the structure.
#
# Level: drives (required)
# Contains the mount points of disks.
# Applies to all hosts unless overridden deeper in the structure.
#
# Option: name (required, string)
# Mount point of a disk. Use one entry for each disk.
# Applies to all hosts unless overridden deeper in the structure.
#
# The following example shows disks mounted at /mnt/sda and /mnt/sdb
# on all storage hosts:
# mount_point: /mnt
# drives:
# - name: sda
# - name: sdb
#
# Level: account (optional)
# Contains 'min_part_hours' and 'repl_number' options specific to the
# account ring.
#
# Level: container (optional)
# Contains 'min_part_hours' and 'repl_number' options specific to the
# container ring.
#
# Level: storage_policies (required)
# Contains storage policies. Minimum one policy. One policy must include
# the 'index: 0' and 'default: True' options.
#
# Level: policy (required)
# Contains a storage policy.
#
# Option: name (required, string)
# Policy name.
#
# Option: index (required, integer)
# Policy index. One policy must include this option with a '0'
# value.
#
# Option: default (optional, boolean)
# Defines the default policy. One policy must include this option
# with a 'True' value.
#
# Option: deprecated (optional, boolean)
# Defines a deprecated policy.
#
# Note: The following levels and options override any values higher
# in the structure and generally apply to advanced deployments.
#
# Option: repl_number (optional, integer)
# Number of replicas of each partition in this policy.
#
# Option: min_part_hours (optional, integer)
# Minimum time between multiple moves of the same partition in this
# policy.
#
# Level: swift_proxy-hosts (required)
# Contains definitions for proxy hosts.
#
# Level: <value> (optional, string)
# Name of a proxy host. Typical deployments require at least three
# proxy hosts.
#
# Option: ip (required, string)
# IP address of the host.
#
# Level: swift_hosts (required)
# Contains definitions for storage hosts.
#
# Level: <value> (required, string)
# Name of a storage host. Typical deployments require at least three
# storage hosts.
#
# Option: ip (required, string)
# IP address of the host.
#
# Note: The following levels and options override any values higher
# in the structure and generally apply to advanced deployments.
#
# Level: container_vars (optional)
# Contains options specific to this host.
#
# Level: swift_vars (optional)
# Contains swift options specific to this host.
#
# Option: region (optional, integer)
# Region of all disks in this host.
#
# Option: zone (optional, integer)
# Zone of all disks in this host.
#
# Option: weight (optional, integer)
# Weight of all disks in this host.
#
# Level: groups (optional)
# Contains groups specific to this host.
# The following example shows a storage host with the account ring,
# container ring, and 'silver' storage policy:
# groups:
# - account
# - container
# - silver
#
# Level: drives (optional)
# Contains the mount points of disks specific to this host.
#
# Level or option: name (optional, string)
# Mount point of a disk specific to this host. Use one entry for
# each disk. Functions as a level for disks that contain additional
# options.
#
# Option: region (optional, integer)
# Region of a disk in this host.
#
# Option: zone (optional, integer)
# Zone of a disk in this host.
#
# Option: weight (optional, integer)
# Weight of a disk in this host.
#
# Level: groups (optional)
# Contains groups for a disk in this host.
# The following example shows a disk with the account ring,
# container ring, and 'silver' storage policy:
# groups:
# - account
# - container
# - silver
# Default (example) configuration
# ===============================
# Global options
# global_overrides:
# swift:
# storage_network: 'br-storage'
# replication_network: 'br-repl'
# part_power: 8
# repl_number: 3
# min_part_hours: 1
# region: 1
# zone: 0
# weight: 100
# mount_point: /mnt
# drives:
# - name: sdc
# - name: sdd
# - name: sde
# - name: sdf
# account:
# container:
# storage_policies:
# - policy:
# name: gold
# index: 0
# default: True
# - policy:
# name: silver
# index: 1
# repl_number: 3
# deprecated: True
# Proxy hosts
# swift-proxy_hosts:
# infra-node1:
# ip: 192.0.2.1
# infra-node2:
# ip: 192.0.2.2
# infra-node3:
# ip: 192.0.2.3
# Storage hosts
#
# The first three hosts contain options for typical deployments. Hosts
# four and five show options for more advanced deployments.
# swift_hosts:
# swift-node1:
# ip: 192.0.2.4
# swift-node2:
# ip: 192.0.2.5
# swift-node3:
# ip: 192.0.2.6
# swift-node4:
# ip: 192.0.2.7
# container_vars:
# swift_vars:
# zone: 3
# swift-node5:
# ip: 192.0.2.8
# container_vars:
# swift_vars:
# storage_ip: 198.51.100.8
# repl_ip: 203.0.113.8
# region: 3
# zone: 4
# weight: 200
# groups:
# - account
# - container
# - silver
# drives:
# - name: sdb
# storage_ip: 198.51.100.9
# repl_ip: 203.0.113.9
# weight: 75
# groups:
# - gold
# - name: sdc
# - name: sdd
# - name: sde
# - name: sdf

View File

@ -1,191 +1,91 @@
--- ---
# Copyright 2014, Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This is the md5 of the environment file
# this will ensure consistency when deploying.
environment_version: 3511a43b8e4cc39af4beaaa852b5f917 environment_version: 3511a43b8e4cc39af4beaaa852b5f917
# User defined container networks in CIDR notation. The inventory generator
# assigns IP addresses to network interfaces inside containers from these
# ranges.
cidr_networks: cidr_networks:
# Management (same range as br-mgmt on the target hosts)
container: 172.29.236.0/22 container: 172.29.236.0/22
# Service (optional, same range as br-snet on the target hosts)
snet: 172.29.248.0/22
# Tunnel endpoints for VXLAN tenant networks
# (same range as br-vxlan on the target hosts)
tunnel: 172.29.240.0/22 tunnel: 172.29.240.0/22
# Storage (same range as br-storage on the target hosts)
storage: 172.29.244.0/22 storage: 172.29.244.0/22
# User defined list of consumed IP addresses that may intersect
# with the provided CIDR.
used_ips: used_ips:
- 172.29.236.1,172.29.236.50 - 172.29.236.1,172.29.236.50
- 172.29.244.1,172.29.244.50 - 172.29.244.1,172.29.244.50
# As a user you can define anything that you may wish to "globally"
# override from within the rpc_deploy configuration file. Anything
# specified here will take precedence over anything else any where.
global_overrides: global_overrides:
# Size of cinder volumes container. Default is 5GB. internal_lb_vip_address: 172.29.236.100
# Space must be added for cinder to have enough space to convert images.
# For example, to be able to convert 100GB images, set this value to 105.
#cinder_volume_lv_size_gb: 105GB
# Internal Management vip address
internal_lb_vip_address: 172.29.236.1
# External DMZ VIP address
external_lb_vip_address: 192.168.1.1 external_lb_vip_address: 192.168.1.1
# Name of load balancer
lb_name: lb_name_in_core
# Bridged interface to use with tunnel type networks
tunnel_bridge: "br-vxlan" tunnel_bridge: "br-vxlan"
# Bridged interface to build containers with
management_bridge: "br-mgmt" management_bridge: "br-mgmt"
# Define your Add on container networks.
# group_binds: bind a provided network to a particular group
# container_bridge: instructs inventory where a bridge is plugged
# into on the host side of a veth pair
# container_interface: interface name within a container
# ip_from_q: name of a cidr to pull an IP address from
# type: Networks must have a type. types are: ["raw", "vxlan", "flat", "vlan"]
# range: Optional value used in "vxlan" and "vlan" type networks
# net_name: Optional value used in mapping network names used in neutron ml2
# You must have a management network.
provider_networks: provider_networks:
- network: - network:
container_bridge: "br-mgmt"
container_interface: "eth1"
type: "raw"
ip_from_q: "container"
group_binds: group_binds:
- all_containers - all_containers
- hosts - hosts
type: "raw"
container_bridge: "br-mgmt"
container_interface: "eth1"
ip_from_q: "container"
- network: - network:
group_binds:
- glance_api
- cinder_api
- cinder_volume
- nova_compute
# If you are using the storage network for swift_proxy add it to the group_binds
# - swift_proxy
type: "raw"
container_bridge: "br-storage"
container_interface: "eth2"
ip_from_q: "storage"
- network:
group_binds:
- glance_api
- nova_compute
- neutron_linuxbridge_agent
type: "raw"
container_bridge: "br-snet"
container_interface: "eth3"
ip_from_q: "snet"
- network:
group_binds:
- neutron_linuxbridge_agent
container_bridge: "br-vxlan" container_bridge: "br-vxlan"
container_interface: "eth10" container_interface: "eth10"
ip_from_q: "tunnel"
type: "vxlan" type: "vxlan"
ip_from_q: "tunnel"
range: "1:1000" range: "1:1000"
net_name: "vxlan" net_name: "vxlan"
- network:
group_binds: group_binds:
- neutron_linuxbridge_agent - neutron_linuxbridge_agent
- network:
container_bridge: "br-vlan" container_bridge: "br-vlan"
container_interface: "eth11" container_interface: "eth11"
type: "flat" type: "flat"
net_name: "vlan" net_name: "flat"
- network:
group_binds: group_binds:
- neutron_linuxbridge_agent - neutron_linuxbridge_agent
- network:
container_bridge: "br-vlan" container_bridge: "br-vlan"
container_interface: "eth11" container_interface: "eth11"
type: "vlan" type: "vlan"
range: "1:1" range: "1:1"
net_name: "vlan" net_name: "vlan"
group_binds:
- neutron_linuxbridge_agent
- network:
container_bridge: "br-storage"
container_interface: "eth2"
type: "raw"
ip_from_q: "storage"
group_binds:
- glance_api
- cinder_api
- cinder_volume
- nova_compute
# - swift_proxy
# User defined Infrastructure Hosts, this should be a required group
infra_hosts: infra_hosts:
infra1: aio1:
ip: 172.29.236.100 ip: 172.29.236.100
## You can override the maas_filesystem_monitors here
# container_vars:
# maas_filesystem_overrides:
# - filesystem: /
# threshold: 85.0
# - filesystem: /boot
# threshold: 90.0
infra2:
ip: 172.29.236.101
infra3:
ip: 172.29.236.102
# User defined Compute Hosts, this should be a required group
compute_hosts: compute_hosts:
compute1: aio1:
ip: 172.29.236.103 ip: 172.29.236.100
# User defined Storage Hosts, this should be a required group
storage_hosts: storage_hosts:
cinder1: aio1:
ip: 172.29.236.104 ip: 172.29.236.100
# "container_vars" can be set outside of all other options as
# host specific optional variables.
container_vars: container_vars:
# If you would like to define a cinder availablility zone this can
# be done with the namespaced variable.
cinder_storage_availability_zone: cinderAZ_1
# When creating more than ONE availablity zone you should define a
# sane default for the system to use when schedulng volume creation.
cinder_default_availability_zone: cinderAZ_1
# In this example we are defining what cinder volumes are
# on a given host.
cinder_backends: cinder_backends:
# if the "limit_container_types" argument is set, within
# the top level key of the provided option the inventory
# process will perform a string match on the container name with
# the value found within the "limit_container_types" argument.
# If any part of the string found within the container
# name the options are appended as host_vars inside of inventory.
limit_container_types: cinder_volume limit_container_types: cinder_volume
lvm: lvm:
volume_group: cinder-volumes volume_group: cinder-volumes
volume_driver: cinder.volume.drivers.lvm.LVMISCSIDriver volume_driver: cinder.volume.drivers.lvm.LVMISCSIDriver
volume_backend_name: LVM_iSCSI volume_backend_name: LVM_iSCSI
cinder2:
ip: 172.29.236.105
container_vars:
cinder_storage_availability_zone: cinderAZ_2
cinder_default_availability_zone: cinderAZ_1
cinder_backends:
limit_container_types: cinder_volume
lvm_ssd:
volume_group: cinder-volumes
volume_driver: cinder.volume.drivers.lvm.LVMISCSIDriver
volume_backend_name: LVM_iSCSI_SSD
# User defined Logging Hosts, this should be a required group
log_hosts: log_hosts:
logger1: aio1:
ip: 172.29.236.107 ip: 172.29.236.100
# User defined Networking Hosts, this should be a required group
network_hosts: network_hosts:
network1: aio1:
ip: 172.29.236.108 ip: 172.29.236.100
haproxy_hosts:
aio1:
ip: 172.29.236.100

View File

@ -0,0 +1,52 @@
#!/usr/bin/env bash
# Copyright 2014, Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTICE:
# This script is purpose built to resolve an issue within neutron
# where packet checksums are being dropped.
# Launchpad issue:
# https://bugs.launchpad.net/bugs/1244589
#
# Open review:
# https://review.openstack.org/#/c/148718/
#
# TODO(cloudnull) remove this script once the bug is fixed.
# Iptables path, used for ipv4 firewall.
IPTABLES=$(which iptables)
if [ ! -z "${IPTABLES}" ];then
if [ ! "$(${IPTABLES} -t mangle -nL | awk '$4 == "0.0.0.0/0" && $5 == "0.0.0.0/0" && $9 == "fill"')" ];then
${IPTABLES} -A POSTROUTING \
-t mangle \
-p udp \
--dport 68 \
-j CHECKSUM \
--checksum-fill
fi
fi
# Ip6tables path, used for ipv6 firewall.
IP6TABLES=$(which ip6tables)
if [ ! -z "${IP6TABLES}" ];then
if [ ! "$(${IP6TABLES} -t mangle -nL | awk '$3 == "::/0" && $4 == "::/0" && $8 == "fill"')" ];then
${IP6TABLES} -A POSTROUTING \
-t mangle \
-p udp \
--dport 68 \
-j CHECKSUM \
--checksum-fill
fi
fi

View File

@ -84,3 +84,24 @@
notify: Restart os service notify: Restart os service
tags: tags:
- neutron_config - neutron_config
- name: Drop iptables checksum fix
copy:
src: "post-up-checksum-rules.sh"
dest: "/etc/network/if-up.d/post-up-checksum-rules.sh"
owner: "root"
group: "root"
mode: "0755"
when: >
inventory_hostname in groups['neutron_linuxbridge_agent']
tags:
- neutron_config
- neutron_checksum_fix
- name: Run iptables checksum fix
command: /etc/network/if-up.d/post-up-checksum-rules.sh
when: >
inventory_hostname in groups['neutron_linuxbridge_agent']
tags:
- neutron_config
- neutron_checksum_fix

148
scripts/bootstrap-aio.sh Executable file
View File

@ -0,0 +1,148 @@
#!/usr/bin/env bash
# Copyright 2014, Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
## Shell Opts ----------------------------------------------------------------
set -e -u -v +x
## Vars
DEPLOY_SWIFT=${DEPLOY_SWIFT:-"yes"}
FLUSH_IPTABLES=${FLUSH_IPTABLES:-"yes"}
## Functions -----------------------------------------------------------------
info_block "Checking for required libraries." || source $(dirname ${0})/scripts-library.sh
## Main ----------------------------------------------------------------------
# Enable logging of all commands executed
set -x
# update the package cache and install required packages
apt-get update && apt-get install -y \
python-dev \
python2.7 \
build-essential \
curl \
git-core \
ipython \
tmux \
vim \
vlan \
bridge-utils \
lvm2 \
xfsprogs \
linux-image-extra-$(uname -r)
# output diagnostic information
get_instance_info && set -x
if [ "${FLUSH_IPTABLES}" == "yes" ]; then
# Flush all the iptables rules set by openstack-infra
iptables -F
iptables -X
iptables -t nat -F
iptables -t nat -X
iptables -t mangle -F
iptables -t mangle -X
iptables -P INPUT ACCEPT
iptables -P FORWARD ACCEPT
iptables -P OUTPUT ACCEPT
fi
# Ensure newline at end of file (missing on Rackspace public cloud Trusty image)
if ! cat -E /etc/ssh/sshd_config | tail -1 | grep -q "\$$"; then
echo >> /etc/ssh/sshd_config
fi
# Ensure that sshd permits root login, or ansible won't be able to connect
if grep -q "^PermitRootLogin" /etc/ssh/sshd_config; then
sed -i 's/^PermitRootLogin.*/PermitRootLogin yes/' /etc/ssh/sshd_config
else
echo 'PermitRootLogin yes' >> /etc/ssh/sshd_config
fi
# create /opt if it doesn't already exist
if [ ! -d "/opt" ];then
mkdir /opt
fi
# create /etc/rc.local if it doesn't already exist
if [ ! -f "/etc/rc.local" ];then
touch /etc/rc.local
chmod +x /etc/rc.local
fi
# ensure that the ssh key exists and is an authorized_key
ssh_key_create
# prepare the storage appropriately
configure_diskspace
# build the loopback drive for swap to use
loopback_create /opt/swap.img 1024M thick swap
# Ensure swap will be used on the host
sysctl -w vm.swappiness=10 | tee -a /etc/sysctl.conf
# build the loopback drive for cinder to use
# but only if the cinder-volumes vg doesn't already exist
if ! vgs cinder-volumes > /dev/null 2>&1; then
CINDER="cinder.img"
loopback_create /opt/${CINDER} 10G thin rc
CINDER_DEVICE=$(losetup -a | awk -F: "/${CINDER}/ {print \$1}")
pvcreate ${CINDER_DEVICE}
pvscan
vgcreate cinder-volumes ${CINDER_DEVICE}
fi
# build the loopback drives for swift to use
if [ "${DEPLOY_SWIFT}" == "yes" ]; then
for SWIFT in swift1.img swift2.img swift3.img; do
loopback_create /opt/${SWIFT} 10G thin none
if ! grep -q "^/opt/${SWIFT}" /etc/fstab; then
echo "/opt/${SWIFT} /srv/${SWIFT} xfs loop,noatime,nodiratime,nobarrier,logbufs=8 0 0" >> /etc/fstab
fi
if ! mount | grep -q "^/opt/${SWIFT}"; then
mkfs.xfs -f /opt/${SWIFT}
mkdir -p /srv/${SWIFT}
mount /srv/${SWIFT}
fi
done
fi
# copy the required interfaces configuration file into place
IFACE_CFG_SOURCE="etc/network/interfaces.d/aio_interfaces.cfg"
IFACE_CFG_TARGET="/${IFACE_CFG_SOURCE}"
cp ${IFACE_CFG_SOURCE} ${IFACE_CFG_TARGET}
# Ensure the network source is in place
if ! grep -q "^source /etc/network/interfaces.d/\*.cfg$" /etc/network/interfaces; then
echo -e "\nsource /etc/network/interfaces.d/*.cfg" | tee -a /etc/network/interfaces
fi
# Set base DNS to google, ensuring consistent DNS in different environments
echo -e 'nameserver 8.8.8.8\nnameserver 8.8.4.4' | tee /etc/resolv.conf
# Bring up the new interfaces
for iface in $(awk '/^iface/ {print $2}' ${IFACE_CFG_TARGET}); do
/sbin/ifup $iface || true
done
# output an updated set of diagnostic information
get_instance_info
# Final message
info_block "The system has been prepared for an all-in-one build."

111
scripts/bootstrap-ansible.sh Executable file
View File

@ -0,0 +1,111 @@
#!/usr/bin/env bash
# Copyright 2014, Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# (c) 2014, Kevin Carter <kevin.carter@rackspace.com>
set -e -u -v +x
## Variables -----------------------------------------------------------------
ANSIBLE_DEPLOY_METHOD="pip"
ANSIBLE_GIT_REPO="https://github.com/ansible/ansible"
ANSIBLE_GIT_RELEASE="${ANSIBLE_GIT_RELEASE:-1.6.10}"
ANSIBLE_WORKING_DIR="/opt/ansible_v${ANSIBLE_GIT_RELEASE}"
GET_PIP_URL="${GET_PIP_URL:-https://mirror.rackspace.com/rackspaceprivatecloud/downloads/get-pip.py}"
## Functions -----------------------------------------------------------------
info_block "Checking for required libraries." || source $(dirname ${0})/scripts-library.sh
## Main ----------------------------------------------------------------------
# Enable logging of all commands executed
set -x
# Install the base packages
apt-get update && apt-get -y install git python-all python-dev curl
# Install pip
if [ ! "$(which pip)" ];then
curl ${GET_PIP_URL} > /opt/get-pip.py
python2 /opt/get-pip.py || python /opt/get-pip.py
fi
if [ "${ANSIBLE_DEPLOY_METHOD}" == "git" ]; then
# If the working directory exists remove it
if [ -d "${ANSIBLE_WORKING_DIR}" ];then
rm -rf "${ANSIBLE_WORKING_DIR}"
fi
# Clone down the base ansible source
git clone "${ANSIBLE_GIT_REPO}" "${ANSIBLE_WORKING_DIR}"
pushd "${ANSIBLE_WORKING_DIR}"
git checkout "v${ANSIBLE_GIT_RELEASE}"
git submodule update --init --recursive
popd
# Install requirements if there are any
if [ -f "${ANSIBLE_WORKING_DIR}/requirements.txt" ];then
pip2 install -r "${ANSIBLE_WORKING_DIR}/requirements.txt" || pip install -r "${ANSIBLE_WORKING_DIR}/requirements.txt"
fi
# Install ansible
pip2 install "${ANSIBLE_WORKING_DIR}" || pip install "${ANSIBLE_WORKING_DIR}"
else
# Use pip to install ansible
pip install ansible==${ANSIBLE_GIT_RELEASE}
fi
set +x
info_block "Ansible is now bootstrapped and ready for use."
# Create openstack ansible wrapper tool
cat > /usr/local/bin/openstack-ansible <<EOF
#!/usr/bin/env bash
# Copyright 2014, Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# (c) 2014, Kevin Carter <kevin.carter@rackspace.com>
# Openstack wrapper tool to ease the use of ansible with multiple variable files.
export PATH="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
# Discover the variable files.
VAR1="\$(for i in \$(ls /etc/*_deploy/*user_*.yml); do echo -ne "-e @\$i "; done)"
# Provide information on the discovered variables.
echo -e "\n--- [ Variable files ] ---\n \"\${VAR1}\""
# Run the ansible playbook command.
\$(which ansible-playbook) \${VAR1} \$@
EOF
set -x
# Ensure wrapper tool is executable
chmod +x /usr/local/bin/openstack-ansible
# Enable logging of all commands executed
set +x
info_block "The openstack-ansible convenience script has been created."

116
scripts/gate-check-commit.sh Executable file
View File

@ -0,0 +1,116 @@
#!/usr/bin/env bash
# Copyright 2014, Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
## Shell Opts ----------------------------------------------------------------
set -e -u -v +x
## Variables -----------------------------------------------------------------
ADMIN_PASSWORD=${ADMIN_PASSWORD:-"secrete"}
BOOTSTRAP_ANSIBLE=${BOOTSTRAP_ANSIBLE:-"yes"}
BOOTSTRAP_AIO=${BOOTSTRAP_AIO:-"yes"}
DEPLOY_SWIFT=${DEPLOY_SWIFT:-"yes"}
DEPLOY_TEMPEST=${DEPLOY_TEMPEST:-"no"}
RUN_PLAYBOOKS=${RUN_PLAYBOOKS:-"yes"}
RUN_TEMPEST=${RUN_TEMPEST:-"no"}
CONFIG_PREFIX=${CONFIG_PREFIX:-"rpc"}
PLAYBOOK_DIRECTORY=${PLAYBOOK_DIRECTORY:-"${CONFIG_PREFIX}_deployment"}
ANSIBLE_PARAMETERS=${ANSIBLE_PARAMETERS:-"--forks 10 -vvvv"}
## Functions -----------------------------------------------------------------
info_block "Checking for required libraries." || source $(dirname ${0})/scripts-library.sh
## Main ----------------------------------------------------------------------
# ensure that the current kernel can support vxlan
if ! modprobe vxlan; then
MINIMUM_KERNEL_VERSION=$(awk '/required_kernel/ {print $2}' ${PLAYBOOK_DIRECTORY}/inventory/group_vars/all.yml)
info_block "A minimum kernel version of ${MINIMUM_KERNEL_VERSION} is required for vxlan support."
exit 1
fi
# Get initial host information and reset verbosity
set +x && get_instance_info && set -x
# Bootstrap ansible if required
if [ "${BOOTSTRAP_ANSIBLE}" == "yes" ]; then
source $(dirname ${0})/bootstrap-ansible.sh
fi
# Bootstrap an AIO setup if required
if [ "${BOOTSTRAP_AIO}" == "yes" ]; then
source $(dirname ${0})/bootstrap-aio.sh
fi
# Get initial host information and reset verbosity
set +x && get_instance_info && set -x
# Install requirements
pip2 install -r requirements.txt || pip install -r requirements.txt
# Copy the base etc files
if [ ! -d "/etc/${CONFIG_PREFIX}_deploy" ];then
cp -R etc/${CONFIG_PREFIX}_deploy /etc/
# Generate the passwords
USER_VARS_PATH="/etc/${CONFIG_PREFIX}_deploy/user_variables.yml"
# Adjust any defaults to suit the AIO
# commented lines are removed by pw-token gen, so this substitution must
# happen prior.
sed -i "s/# nova_virt_type:.*/nova_virt_type: qemu/" ${USER_VARS_PATH}
./scripts/pw-token-gen.py --file ${USER_VARS_PATH}
# change the generated passwords for the OpenStack (admin) and Kibana (kibana) accounts
sed -i "s/keystone_auth_admin_password:.*/keystone_auth_admin_password: ${ADMIN_PASSWORD}/" ${USER_VARS_PATH}
sed -i "s/kibana_password:.*/kibana_password: ${ADMIN_PASSWORD}/" ${USER_VARS_PATH}
if [ "${DEPLOY_SWIFT}" == "yes" ]; then
# ensure that glance is configured to use swift
sed -i "s/glance_default_store:.*/glance_default_store: swift/" ${USER_VARS_PATH}
sed -i "s/glance_swift_store_auth_address:.*/glance_swift_store_auth_address: '{{ auth_identity_uri }}'/" ${USER_VARS_PATH}
sed -i "s/glance_swift_store_container:.*/glance_swift_store_container: glance_images/" ${USER_VARS_PATH}
sed -i "s/glance_swift_store_key:.*/glance_swift_store_key: '{{ glance_service_password }}'/" ${USER_VARS_PATH}
sed -i "s/glance_swift_store_region:.*/glance_swift_store_region: RegionOne/" ${USER_VARS_PATH}
sed -i "s/glance_swift_store_user:.*/glance_swift_store_user: 'service:glance'/" ${USER_VARS_PATH}
fi
if [ "${BOOTSTRAP_AIO}" == "yes" ]; then
# adjust the default user configuration for the AIO
USER_CONFIG_PATH="/etc/${CONFIG_PREFIX}_deploy/${CONFIG_PREFIX}_user_config.yml"
ENV_CONFIG_PATH="/etc/${CONFIG_PREFIX}_deploy/${CONFIG_PREFIX}_environment.yml"
sed -i "s/environment_version: .*/environment_version: $(md5sum ${ENV_CONFIG_PATH} | awk '{print $1}')/" ${USER_CONFIG_PATH}
SERVER_IP_ADDRESS="$(ip -o -4 addr show dev eth0 | awk -F '[ /]+' '/global/ {print $4}')"
sed -i "s/external_lb_vip_address: .*/external_lb_vip_address: ${SERVER_IP_ADDRESS}/" ${USER_CONFIG_PATH}
if [ "${DEPLOY_SWIFT}" == "yes" ]; then
# add the swift proxy host network provider map
sed -i 's/# - swift_proxy/- swift_proxy/' ${USER_CONFIG_PATH}
fi
fi
fi
# Run the ansible playbooks if required
if [ "${RUN_PLAYBOOKS}" == "yes" ]; then
source $(dirname ${0})/run-playbooks.sh
fi
# Run the tempest tests if required
if [ "${RUN_TEMPEST}" == "yes" ]; then
source $(dirname ${0})/run-tempest.sh
fi

73
scripts/gate-check-lint.sh Executable file
View File

@ -0,0 +1,73 @@
#!/usr/bin/env bash
# Copyright 2014, Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
## Shell Opts ----------------------------------------------------------------
set -e -u -v +x
## Variables -----------------------------------------------------------------
BOOTSTRAP_ANSIBLE=${BOOTSTRAP_ANSIBLE:-"yes"}
PLAYBOOK_PATH=${PLAYBOOK_PATH:-"rpc_deployment/playbooks"}
## Functions -----------------------------------------------------------------
info_block "Checking for required libraries." || source $(dirname ${0})/scripts-library.sh
## Main ----------------------------------------------------------------------
# Enable logging of all commands executed
set -x
# Bootstrap ansible if required
if [ "${BOOTSTRAP_ANSIBLE}" == "yes" ]; then
source $(dirname ${0})/bootstrap-ansible.sh
fi
# Check whether pip or pip2 is available
if ! ( which pip > /dev/null && which pip2 > /dev/null ); then
info_block "ERROR: Please install pip before proceeding."
exit 1
fi
# Check whether ansible-playbook is available
if ! which ansible-playbook > /dev/null; then
info_block "ERROR: Please install ansible before proceeding."
exit 1
fi
# Install the development requirements
if [ -f dev-requirements.txt ]; then
pip2 install -r dev-requirements.txt || pip install -r dev-requirements.txt
else
pip2 install ansible-lint || pip install ansible-lint
fi
# Perform our simple sanity checks
echo -e '[all]\nlocalhost ansible_connection=local' | tee local_only_inventory
# Do a basic syntax check on all playbooks and roles
info_block "Running Syntax Check"
ansible-playbook -i local_only_inventory --syntax-check \
$(find ${PLAYBOOK_PATH} -type f -name "*.yml" \
! -name "os-service-config-update.yml" \
! -name "host-network-setup.yml")
# Perform a lint check on all playbooks and roles
info_block "Running Lint Check"
ansible-lint --version
ansible-lint $(find ${PLAYBOOK_PATH} -type f -name "*.yml" \
! -name "os-service-config-update.yml" \
! -name "host-network-setup.yml")

View File

@ -1,14 +0,0 @@
#!/usr/bin/env bash
set -e -u -v -x
LAB=${1:-"uklab16_20"}
# Go to rpc_deployment directory
pushd ../rpc_deployment
# Delete all containers
ansible-playbook -i inventory/${LAB}.yml \
-e dinv=inventory/host_vars/${LAB}.yml \
-e @inventory/overrides/${LAB}.yml \
-e group=all \
setup/destroy-containers.yml
popd

View File

@ -1,488 +1,9 @@
#!/usr/bin/env bash #!/bin/bash
# Copyright 2014, Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This is a placeholder until we change the macro for the
# os-ansible-deployment gate check script to use the
# newly split script set.
## Shell Opts ----------------------------------------------------------------
set -e -u -v -x set -e -u -v -x
## Vars source $(dirname ${0})/gate-check-commit.sh
FROZEN_REPO_URL=${FROZEN_REPO_URL:-"https://mirror.rackspace.com/rackspaceprivatecloud"}
MAX_RETRIES=${MAX_RETRIES:-5}
ADMIN_PASSWORD=${ADMIN_PASSWORD:-"secrete"}
DEPLOY_SWIFT=${DEPLOY_SWIFT:-"yes"}
## Functions -----------------------------------------------------------------
# Get instance info
function get_instance_info(){
free -mt
df -h
mount
lsblk
fdisk -l /dev/xv* /dev/sd* /dev/vd*
uname -a
pvs
vgs
lvs
which lscpu && lscpu
ip a
ip r
tracepath 8.8.8.8 -m 5
which xenstore-read && xenstore-read vm-data/provider_data/provider ||:
}
function configure_hp_diskspace(){
# hp instances arrive with a 470GB drive (vdb) mounted at /mnt
# this function repurposes that for the lxc vg then creates a
# 50GB lv for /opt
mount |grep "/dev/vdb on /mnt" || return 0 # skip if not on hp
umount /mnt
pvcreate -ff -y /dev/vdb
vgcreate lxc /dev/vdb
lvcreate -n opt -L50g lxc
mkfs.ext4 /dev/lxc/opt
mount /dev/lxc/opt /opt
get_instance_info
}
function key_create(){
ssh-keygen -t rsa -f /root/.ssh/id_rsa -N ''
}
# Used to retry process that may fail due to random issues.
function successerator() {
set +e
RETRY=0
# Set the initial return value to failure
false
while [ $? -ne 0 -a ${RETRY} -lt ${MAX_RETRIES} ];do
RETRY=$((${RETRY}+1))
$@
done
if [ ${RETRY} -eq ${MAX_RETRIES} ];then
echo "Hit maximum number of retries, giving up..."
exit 1
fi
set -e
}
function install_bits() {
successerator ansible-playbook -e @/etc/rpc_deploy/user_variables.yml \
playbooks/$@
}
function loopback_create() {
LOOP_FILENAME=${1}
LOOP_FILESIZE=${2}
if ! losetup -a | grep "(${LOOP_FILENAME})$" > /dev/null; then
LOOP_DEVICE=$(losetup -f)
dd if=/dev/zero of=${LOOP_FILENAME} bs=1 count=0 seek=${LOOP_FILESIZE}
losetup ${LOOP_DEVICE} ${LOOP_FILENAME}
fi
}
## Main ----------------------------------------------------------------------
# update the package cache and install required packages
apt-get update
apt-get install -y python-dev \
python2.7 \
build-essential \
curl \
git-core \
ipython \
tmux \
vim \
vlan \
bridge-utils \
lvm2 \
xfsprogs \
linux-image-extra-$(uname -r)
get_instance_info
# Flush all the iptables rules set by openstack-infra
iptables -F
iptables -X
iptables -t nat -F
iptables -t nat -X
iptables -t mangle -F
iptables -t mangle -X
iptables -P INPUT ACCEPT
iptables -P FORWARD ACCEPT
iptables -P OUTPUT ACCEPT
# Ensure newline at end of file (missing on Rackspace public cloud Trusty image)
if ! cat -E /etc/ssh/sshd_config | tail -1 | grep -q "\$$"; then
echo >> /etc/ssh/sshd_config
fi
# Ensure that sshd permits root login, or ansible won't be able to connect
if grep "^PermitRootLogin" /etc/ssh/sshd_config > /dev/null; then
sed -i 's/^PermitRootLogin.*/PermitRootLogin yes/' /etc/ssh/sshd_config
else
echo 'PermitRootLogin yes' >> /etc/ssh/sshd_config
fi
# ensure that the current kernel can support vxlan
if ! modprobe vxlan; then
MINIMUM_KERNEL_VERSION=$(awk '/required_kernel/ {print $2}' rpc_deployment/inventory/group_vars/all.yml)
echo "A minimum kernel version of ${MINIMUM_KERNEL_VERSION} is required for vxlan support."
echo "This build will not work without it."
exit 1
fi
# create /opt if it doesn't already exist
if [ ! -d "/opt" ];then
mkdir /opt
fi
configure_hp_diskspace
# create /etc/rc.local if it doesn't already exist
if [ ! -f "/etc/rc.local" ];then
touch /etc/rc.local
chmod +x /etc/rc.local
fi
# Make the system key used for bootstrapping self
if [ ! -d /root/.ssh ];then
mkdir -p /root/.ssh
chmod 700 /root/.ssh
fi
pushd /root/.ssh/
if [ ! -f "id_rsa" ];then
key_create
fi
if [ ! -f "id_rsa.pub" ];then
rm "id_rsa"
key_create
fi
KEYENTRY=$(cat id_rsa.pub)
if [ ! "$(grep \"$KEYENTRY\" authorized_keys)" ];then
echo "$KEYENTRY" | tee -a authorized_keys
fi
popd
# build the loopback drive for swap to use
if [ ! "$(swapon -s | grep -v Filename)" ]; then
dd if=/dev/zero of=/opt/swap.img bs=512M count=1
mkswap /opt/swap.img
echo '/opt/swap.img none swap loop 0 0' >> /etc/fstab
swapon -a
fi
# build the loopback drive for cinder to use
CINDER="cinder.img"
loopback_create /opt/${CINDER} 1000G
CINDER_DEVICE=$(losetup -a | awk -F: "/${CINDER}/ {print \$1}")
if ! pvs ${CINDER_DEVICE} > /dev/null; then
pvcreate ${CINDER_DEVICE}
pvscan
fi
if ! vgs cinder-volumes > /dev/null; then
vgcreate cinder-volumes ${CINDER_DEVICE}
fi
# ensure that the cinder loopback is enabled after reboot
if ! grep ${CINDER} /etc/rc.local; then
sed -i "\$i losetup \$(losetup -f) /opt/${CINDER}" /etc/rc.local
fi
if [ "${DEPLOY_SWIFT}" == "yes" ]; then
# build the loopback drives for swift to use
for SWIFT in swift1.img swift2.img swift3.img; do
loopback_create /opt/${SWIFT} 1000G
SWIFT_DEVICE=$(losetup -a | awk -F: "/${SWIFT}/ {print \$1}")
if ! grep "${SWIFT}" /etc/fstab > /dev/null; then
echo "/opt/${SWIFT} /srv/${SWIFT} xfs loop,noatime,nodiratime,nobarrier,logbufs=8 0 0" >> /etc/fstab
fi
if ! grep "${SWIFT}" /proc/mounts > /dev/null; then
mkfs.xfs -f ${SWIFT_DEVICE}
mkdir -p /srv/${SWIFT}
mount /srv/${SWIFT}
fi
done
fi
# Copy the gate's repo to the expected location
mkdir -p /opt/ansible-lxc-rpc
cp -R * /opt/ansible-lxc-rpc
pushd /opt/ansible-lxc-rpc
# Copy the base etc files
if [ -d "/etc/rpc_deploy" ];then
rm -rf "/etc/rpc_deploy"
fi
cp -R /opt/ansible-lxc-rpc/etc/rpc_deploy /etc/
# Install pip
curl ${FROZEN_REPO_URL}/downloads/get-pip.py | python
# Install requirements
pip install -r /opt/ansible-lxc-rpc/requirements.txt
# Generate the passwords
/opt/ansible-lxc-rpc/scripts/pw-token-gen.py --file /etc/rpc_deploy/user_variables.yml
popd
# change the generated passwords for the OpenStack (admin) and Kibana (kibana) accounts
sed -i "s/keystone_auth_admin_password:.*/keystone_auth_admin_password: ${ADMIN_PASSWORD}/" /etc/rpc_deploy/user_variables.yml
sed -i "s/kibana_password:.*/kibana_password: ${ADMIN_PASSWORD}/" /etc/rpc_deploy/user_variables.yml
if [ "${DEPLOY_SWIFT}" == "yes" ]; then
# ensure that glance is configured to use swift
sed -i "s/glance_default_store:.*/glance_default_store: swift/" /etc/rpc_deploy/user_variables.yml
sed -i "s/glance_swift_store_auth_address:.*/glance_swift_store_auth_address: '{{ auth_identity_uri }}'/" /etc/rpc_deploy/user_variables.yml
sed -i "s/glance_swift_store_container:.*/glance_swift_store_container: glance_images/" /etc/rpc_deploy/user_variables.yml
sed -i "s/glance_swift_store_key:.*/glance_swift_store_key: '{{ glance_service_password }}'/" /etc/rpc_deploy/user_variables.yml
sed -i "s/glance_swift_store_region:.*/glance_swift_store_region: RegionOne/" /etc/rpc_deploy/user_variables.yml
sed -i "s/glance_swift_store_user:.*/glance_swift_store_user: 'service:glance'/" /etc/rpc_deploy/user_variables.yml
fi
# build the required user configuration
cat > /etc/rpc_deploy/rpc_user_config.yml <<EOF
---
environment_version: $(md5sum /etc/rpc_deploy/rpc_environment.yml | awk '{print $1}')
cidr_networks:
container: 172.29.236.0/22
tunnel: 172.29.240.0/22
storage: 172.29.244.0/22
used_ips:
- 172.29.236.1,172.29.236.50
- 172.29.244.1,172.29.244.50
global_overrides:
rpc_repo_url: ${FROZEN_REPO_URL}
internal_lb_vip_address: 172.29.236.100
external_lb_vip_address: $(ip -o -4 addr show dev eth0 | awk -F '[ /]+' '/global/ {print $4}')
tunnel_bridge: "br-vxlan"
management_bridge: "br-mgmt"
provider_networks:
- network:
container_bridge: "br-mgmt"
container_interface: "eth1"
ip_from_q: "container"
type: "raw"
group_binds:
- all_containers
- hosts
- network:
container_bridge: "br-vxlan"
container_interface: "eth10"
ip_from_q: "tunnel"
type: "vxlan"
range: "1:1000"
net_name: "vxlan"
group_binds:
- neutron_linuxbridge_agent
- network:
container_bridge: "br-vlan"
container_interface: "eth11"
type: "flat"
net_name: "vlan"
group_binds:
- neutron_linuxbridge_agent
- network:
container_bridge: "br-vlan"
container_interface: "eth11"
type: "vlan"
range: "1:1"
net_name: "vlan"
group_binds:
- neutron_linuxbridge_agent
- network:
container_bridge: "br-storage"
container_interface: "eth2"
ip_from_q: "storage"
type: "raw"
group_binds:
- glance_api
- cinder_api
- cinder_volume
- nova_compute
EOF
if [ "${DEPLOY_SWIFT}" == "yes" ]; then
# add the swift bits
cat >> /etc/rpc_deploy/rpc_user_config.yml <<EOF
- swift_proxy
EOF
cat > /etc/rpc_deploy/conf.d/swift.yml <<EOF
---
global_overrides:
swift:
part_power: 8
storage_network: 'br-storage'
replication_network: 'br-storage'
drives:
- name: swift1.img
- name: swift2.img
- name: swift3.img
mount_point: /srv
storage_policies:
- policy:
name: default
index: 0
default: True
swift-proxy_hosts:
aio1:
ip: 172.29.236.100
swift_hosts:
aio1:
ip: 172.29.236.100
EOF
fi
cat >> /etc/rpc_deploy/rpc_user_config.yml <<EOF
infra_hosts:
aio1:
ip: 172.29.236.100
compute_hosts:
aio1:
ip: 172.29.236.100
storage_hosts:
aio1:
ip: 172.29.236.100
container_vars:
cinder_backends:
limit_container_types: cinder_volume
lvm:
volume_group: cinder-volumes
volume_driver: cinder.volume.drivers.lvm.LVMISCSIDriver
volume_backend_name: LVM_iSCSI
log_hosts:
aio1:
ip: 172.29.236.100
network_hosts:
aio1:
ip: 172.29.236.100
haproxy_hosts:
aio1:
ip: 172.29.236.100
EOF
cat > /etc/network/interfaces.d/aio-bridges.cfg <<EOF
## Required network bridges; br-vlan, br-vxlan, br-mgmt.
auto br-mgmt
iface br-mgmt inet static
bridge_stp off
bridge_waitport 0
bridge_fd 0
# Notice the bridge port is the vlan tagged interface
bridge_ports none
address 172.29.236.100
netmask 255.255.252.0
auto br-vxlan
iface br-vxlan inet static
bridge_stp off
bridge_waitport 0
bridge_fd 0
bridge_ports none
address 172.29.240.100
netmask 255.255.252.0
auto br-vlan
iface br-vlan inet manual
bridge_stp off
bridge_waitport 0
bridge_fd 0
# Notice this bridge port is an Untagged host interface
bridge_ports none
auto br-storage
iface br-storage inet static
bridge_stp off
bridge_waitport 0
bridge_fd 0
bridge_ports none
address 172.29.244.100
netmask 255.255.252.0
EOF
# Ensure the network source is in place
if [ ! "$(grep -Rni '^source\ /etc/network/interfaces.d/\*.cfg' /etc/network/interfaces)" ]; then
echo "source /etc/network/interfaces.d/*.cfg" | tee -a /etc/network/interfaces
fi
# Bring up the new interfaces
for i in br-storage br-vlan br-vxlan br-mgmt; do
/sbin/ifup $i || true
done
# Export the home directory just in case it's not set
export HOME="/root"
pushd /opt/ansible-lxc-rpc/rpc_deployment
# Install all host bits
install_bits setup/host-setup.yml
# Install haproxy for dev purposes only
install_bits infrastructure/haproxy-install.yml
# Install all of the infra bits
install_bits infrastructure/memcached-install.yml
install_bits infrastructure/galera-install.yml
install_bits infrastructure/rabbit-install.yml
install_bits infrastructure/rsyslog-install.yml
install_bits infrastructure/elasticsearch-install.yml
install_bits infrastructure/logstash-install.yml
install_bits infrastructure/kibana-install.yml
install_bits infrastructure/es2unix-install.yml
install_bits infrastructure/rsyslog-config.yml
# install all of the Openstack Bits
if [ -f playbooks/openstack/openstack-common.yml ]; then
# cater for 9.x.x release (icehouse)
install_bits openstack/openstack-common.yml
fi
if [ -f playbooks/openstack/keystone-all.yml ]; then
# cater for 10.x.x release (juno) onwards
install_bits openstack/keystone-all.yml
else
# cater for 9.x.x release (icehouse)
install_bits openstack/keystone.yml
install_bits openstack/keystone-add-all-services.yml
fi
if [ "${DEPLOY_SWIFT}" == "yes" ]; then
install_bits openstack/swift-all.yml
fi
install_bits openstack/glance-all.yml
install_bits openstack/heat-all.yml
install_bits openstack/nova-all.yml
install_bits openstack/neutron-all.yml
install_bits openstack/cinder-all.yml
install_bits openstack/horizon-all.yml
if [ -f playbooks/openstack/utility-all.yml ]; then
# cater for 10.x.x release (juno) onwards
install_bits openstack/utility-all.yml
else
# cater for 9.x.x release (icehouse)
install_bits openstack/utility.yml
fi
if [ -f playbooks/openstack/rpc-support-all.yml ]; then
# cater for 10.x.x release (juno) onwards
install_bits openstack/rpc-support-all.yml
else
# cater for 9.x.x release (icehouse)
install_bits openstack/rpc-support.yml
fi
# Stop rsyslog container(s)
for i in $(lxc-ls | grep "rsyslog"); do
lxc-stop -k -n $i; lxc-start -d -n $i
done
# Reconfigure Rsyslog
install_bits infrastructure/rsyslog-config.yml
popd
get_instance_info

View File

@ -1,91 +0,0 @@
# Copyright 2014, Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
heat_template_version: 2013-05-23
description: Heat template to deploy Rackspace Private Cloud v9
parameters:
ssh_key_name:
type: string
description: Name of a Key Pair to enable SSH access to the instance
image_name:
type: string
description: Name of image to use for server
flavor_name:
type: string
description: Name Flavor to use for server
server_name:
type: string
default: RPCv9.0.0-AIO
description: The Instance Name
install_script_url:
type: string
default: https://raw.githubusercontent.com/stackforge/os-ansible-deployment/master/scripts/cloudserver-aio.sh
description: The aio script installation URL
frozen_repo_url:
type: string
default: https://mirror.rackspace.com/rackspaceprivatecloud
description: URL to the frozen
repo_url:
type: string
default: https://github.com/stackforge/os-ansible-deployment.git
description: The repository URL
repo_branch:
type: string
default: master
description: The repository branch
net_id:
type: string
description: ID of Neutron network into which servers get deployed
sec_group:
type: string
description: Name of the security group
outputs:
RPCAIO_public_ip:
description: The public IP address of the newly configured Server.
value: { get_attr: [ RPCAIO, first_address ] }
RPCAIO_password:
description: The password for all the things.
value: secrete
resources:
RPCAIO_port:
type: OS::Neutron::Port
properties:
network_id: { get_param: net_id }
security_groups: [{ get_param: sec_group }]
RPCAIO:
type: OS::Nova::Server
properties:
flavor: { get_param: flavor_name }
image: { get_param: image_name }
name: { get_param: server_name }
key_name: { get_param: ssh_key_name }
networks:
- port: { get_resource: RPCAIO_port }
user_data:
str_replace:
params:
"%install_script_url%": { get_param: install_script_url }
"%repo_url%": { get_param: repo_url }
"%repo_branch%": { get_param: repo_branch }
"%frozen_repo_url%": { get_param: frozen_repo_url }
template: |
#!/usr/bin/env bash
export REPO_URL="%repo_url%"
export REPO_BRANCH="%repo_branch%"
export FROZEN_REPO_URL="%frozen_repo_url%"
apt-get update
apt-get -y install wget
pushd /opt
bash <(wget -O- "%install_script_url%")
popd

View File

@ -1,99 +0,0 @@
# Copyright 2014, Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
heat_template_version: 2013-05-23
description: Heat template to deploy Rackspace Private Cloud v9
parameters:
ssh_key_name:
type: string
description: Name of a Key Pair to enable SSH access to the instance
image_name:
type: string
description: Name of image to use for server
default: Ubuntu 12.04 LTS (Precise Pangolin) (PVHVM)
constraints:
- allowed_values:
- Ubuntu 12.04 LTS (Precise Pangolin) (PVHVM)
- Ubuntu 12.04 LTS (Precise Pangolin)
description: Must be a supported operating system.
flavor_name:
type: string
description: Name Flavor to use for server
default: 8 GB Performance
constraints:
- allowed_values:
- 8 GB General Purpose v1
- 15 GB General Purpose v1
- 30 GB General Purpose v1
- 15 GB I/O v1
- 30 GB I/O v1
- 8 GB Standard Instance
- 15 GB Standard Instance
- 30 GB Standard Instance
- 8 GB Performance
- 15 GB Performance
- 30 GB Performance
description: Must be a valid flavor.
server_name:
type: string
default: RPCv9.0.0-AIO
description: The Instance Name
install_script_url:
type: string
default: https://raw.githubusercontent.com/stackforge/os-ansible-deployment/master/scripts/cloudserver-aio.sh
description: The aio script installation URL
frozen_repo_url:
type: string
default: https://mirror.rackspace.com/rackspaceprivatecloud
description: URL to the frozen
repo_url:
type: string
default: https://github.com/stackforge/os-ansible-deployment.git
description: The repository URL
repo_branch:
type: string
default: master
description: The repository branch
outputs:
RPCAIO_public_ip:
description: The public IP address of the newly configured Server.
value: { get_attr: [ RPCAIO, first_address ] }
RPCAIO_password:
description: The password for all the things.
value: secrete
resources:
RPCAIO:
type: "Rackspace::Cloud::Server"
properties:
flavor: { get_param: flavor_name }
image: { get_param: image_name }
name: { get_param: server_name }
key_name: { get_param: ssh_key_name }
user_data:
str_replace:
params:
"%install_script_url%": { get_param: install_script_url }
"%repo_url%": { get_param: repo_url }
"%repo_branch%": { get_param: repo_branch }
"%frozen_repo_url%": { get_param: frozen_repo_url }
template: |
#!/usr/bin/env bash
export REPO_URL="%repo_url%"
export REPO_BRANCH="%repo_branch%"
export FROZEN_REPO_URL="%frozen_repo_url%"
apt-get update
apt-get -y install wget
pushd /opt
bash <(wget -O- "%install_script_url%")
popd

View File

@ -12,11 +12,22 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
set -e -u -v -x
REPO_URL=${REPO_URL:-"https://github.com/stackforge/os-ansible-deployment.git"} ## Shell Opts ----------------------------------------------------------------
REPO_BRANCH=${REPO_BRANCH:-"master"}
WORKING_FOLDER=${WORKING_FOLDER:-"/opt/stackforge/os-ansible-deployment"} set -e -u -v +x
## Variables -----------------------------------------------------------------
export REPO_URL=${REPO_URL:-"https://github.com/stackforge/os-ansible-deployment.git"}
export REPO_BRANCH=${REPO_BRANCH:-"master"}
export WORKING_FOLDER=${WORKING_FOLDER:-"/opt/stackforge/os-ansible-deployment"}
export ANSIBLE_PARAMETERS=${ANSIBLE_ANSIBLE_PARAMETERS:-"--forks 10"}
## Main ----------------------------------------------------------------------
# set verbosity
set -x
# install git so that we can fetch the repo # install git so that we can fetch the repo
apt-get update && apt-get install -y git apt-get update && apt-get install -y git
@ -24,19 +35,31 @@ apt-get update && apt-get install -y git
# fetch the repo # fetch the repo
git clone -b ${REPO_BRANCH} ${REPO_URL} ${WORKING_FOLDER}/ git clone -b ${REPO_BRANCH} ${REPO_URL} ${WORKING_FOLDER}/
# put an motd in place to help the user know how to restart galera after reboot # run the same aio build script that is used in the OpenStack CI pipeline
cd ${WORKING_FOLDER}
bash scripts/gate-check-commit.sh
# put a motd in place to help the user know what stuff is accessible once the build is complete
cat > /etc/update-motd.d/20-openstack<< EOF cat > /etc/update-motd.d/20-openstack<< EOF
#!/usr/bin/env bash #!/usr/bin/env bash
echo "" echo ""
echo "############ ansible-os-deployment all-in-one build #############" echo "############ os-ansible-deployment all-in-one build #############"
echo ""
echo " OpenStack Services are now listening on $(ip -o -4 addr show dev eth0 | awk -F '[ /]+' '/global/ {print $4}')"
echo ""
EOF
chmod +x /etc/update-motd.d/20-openstack
# put an motd in place to help the user know how to restart galera after reboot
cat > /etc/update-motd.d/21-galera<< EOF
#!/usr/bin/env bash
echo ""
echo "If this server has been rebooted, you will need to re-bootstrap" echo "If this server has been rebooted, you will need to re-bootstrap"
echo "Galera to get the cluster operational. To do this execute:" echo "Galera to get the cluster operational. To do this execute:"
echo "" echo ""
echo "cd /opt/ansible-lxc-rpc/rpc_deployment" echo "cd /opt/ansible-lxc-rpc/rpc_deployment"
echo "ansible-playbook -e @/etc/rpc_deploy/user_variables.yml playbooks/infrastructure/galera-startup.yml" echo "ansible-playbook -e @/etc/rpc_deploy/user_variables.yml playbooks/infrastructure/galera-startup.yml"
echo ""
EOF EOF
chmod +x /etc/update-motd.d/20-openstack chmod +x /etc/update-motd.d/21-galera
# run the same aio build script that is used in the OpenStack CI pipeline
cd ${WORKING_FOLDER}
./scripts/os-ansible-aio-check.sh

116
scripts/run-playbooks.sh Executable file
View File

@ -0,0 +1,116 @@
#!/usr/bin/env bash
# Copyright 2014, Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
## Shell Opts ----------------------------------------------------------------
set -e -u -v +x
## Variables -----------------------------------------------------------------
DEPLOY_HOST=${DEPLOY_HOST:-"yes"}
DEPLOY_LB=${DEPLOY_LB:-"yes"}
DEPLOY_INFRASTRUCTURE=${DEPLOY_INFRASTRUCTURE:-"yes"}
DEPLOY_LOGGING=${DEPLOY_LOGGING:-"yes"}
DEPLOY_OPENSTACK=${DEPLOY_OPENSTACK:-"yes"}
DEPLOY_SWIFT=${DEPLOY_SWIFT:-"yes"}
DEPLOY_RPC_SUPPORT=${DEPLOY_RPC_SUPPORT:-"yes"}
DEPLOY_TEMPEST=${DEPLOY_TEMPEST:-"no"}
ANSIBLE_PARAMETERS=${ANSIBLE_PARAMETERS:-"--forks 10"}
PLAYBOOK_DIRECTORY=${PLAYBOOK_DIRECTORY:-"rpc_deployment"}
## Functions -----------------------------------------------------------------
info_block "Checking for required libraries." || source $(dirname ${0})/scripts-library.sh
function install_bits() {
successerator openstack-ansible ${ANSIBLE_PARAMETERS} playbooks/$@
}
## Main ----------------------------------------------------------------------
# Initiate the deployment
pushd ${PLAYBOOK_DIRECTORY}
if [ "${DEPLOY_HOST}" == "yes" ]; then
# Install all host bits
install_bits setup/host-setup.yml
fi
if [ "${DEPLOY_LB}" == "yes" ]; then
# Install haproxy for dev purposes only
install_bits infrastructure/haproxy-install.yml
fi
if [ "${DEPLOY_INFRASTRUCTURE}" == "yes" ]; then
# Install all of the infra bits
install_bits infrastructure/memcached-install.yml
install_bits infrastructure/galera-install.yml
install_bits infrastructure/rabbit-install.yml
if [ "${DEPLOY_LOGGING}" == "yes" ]; then
install_bits infrastructure/rsyslog-install.yml
install_bits infrastructure/elasticsearch-install.yml
install_bits infrastructure/logstash-install.yml
install_bits infrastructure/kibana-install.yml
install_bits infrastructure/es2unix-install.yml
fi
fi
if [ "${DEPLOY_OPENSTACK}" == "yes" ]; then
# install all of the OpenStack Bits
if [ -f playbooks/openstack/openstack-common.yml ]; then
# cater for 9.x.x release (icehouse)
install_bits openstack/openstack-common.yml
fi
if [ -f playbooks/openstack/keystone-all.yml ]; then
# cater for 10.x.x release (juno) onwards
install_bits openstack/keystone-all.yml
else
# cater for 9.x.x release (icehouse)
install_bits openstack/keystone.yml
install_bits openstack/keystone-add-all-services.yml
fi
if [ "${DEPLOY_SWIFT}" == "yes" ]; then
install_bits openstack/swift-all.yml
fi
install_bits openstack/glance-all.yml
install_bits openstack/heat-all.yml
install_bits openstack/nova-all.yml
install_bits openstack/neutron-all.yml
install_bits openstack/cinder-all.yml
install_bits openstack/horizon-all.yml
if [ -f playbooks/openstack/utility-all.yml ]; then
# cater for 10.x.x release (juno) onwards
install_bits openstack/utility-all.yml
elif [ -f playbooks/openstack/utility.yml ]; then
# cater for 9.x.x release (icehouse)
install_bits openstack/utility.yml
fi
if [ "${DEPLOY_TEMPEST}" == "yes" ]; then
# Deploy tempest
install_bits openstack/tempest.yml
fi
fi
if [ "${DEPLOY_RPC_SUPPORT}" == "yes" ]; then
if [ -f playbooks/openstack/rpc-support-all.yml ]; then
# cater for 10.x.x release (juno) onwards
install_bits openstack/rpc-support-all.yml
elif [ -f playbooks/openstack/rpc-support.yml ]; then
# cater for 9.x.x release (icehouse)
install_bits openstack/rpc-support.yml
fi
fi
if [ "${DEPLOY_INFRASTRUCTURE}" == "yes" ] && [ "${DEPLOY_LOGGING}" == "yes" ]; then
# Configure Rsyslog
install_bits infrastructure/rsyslog-config.yml
fi
popd

61
scripts/run-tempest.sh Executable file
View File

@ -0,0 +1,61 @@
#!/usr/bin/env bash
# Copyright 2014, Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
## Shell Opts ----------------------------------------------------------------
set -e -u -v +x
## Variables -----------------------------------------------------------------
TEMPEST_SCRIPT_PATH=${TEMPEST_SCRIPT_PATH:-/root/rpc_tempest_gate.sh}
TEMPEST_SCRIPT_PARAMETERS=${TEMPEST_SCRIPT_PARAMETERS:-commit_aio}
CONFIG_PREFIX=${CONFIG_PREFIX:-"rpc"}
PLAYBOOK_DIRECTORY_PARENT=${PLAYBOOK_DIRECTORY_PARENT:-"${CONFIG_PREFIX}_deployment"}
## Functions -----------------------------------------------------------------
info_block "Checking for required libraries." || source $(dirname ${0})/scripts-library.sh
## Main ----------------------------------------------------------------------
# Check that ansible has been installed
if ! which ansible > /dev/null 2>&1; then
info_block "ERROR: Please ensure that ansible is installed."
exit 1
fi
# Check that we are in the root path of the cloned repo
if [ ! -d "etc" -a ! -d "scripts" -a ! -f "requirements.txt" ]; then
info_block "ERROR: Please execute this script from the root directory of the cloned source code."
exit 1
fi
pushd ${PLAYBOOK_DIRECTORY_PARENT}
# Check that there are utility containers
if ! ansible 'utility[0]' --list-hosts; then
info_block "ERROR: No utility containers have been deployed in your environment."
exit 99
fi
# Check that the utility container already has the required tempest script deployed
if ! ansible 'utility[0]' -m shell -a "ls -al ${TEMPEST_SCRIPT_PATH}"; then
info_block "ERROR: Please execute the 'os-tempest-install.yml' playbook prior to this script."
exit 99
fi
# Execute the tempest tests
info_block "Executing tempest tests"
ansible 'utility[0]' -m shell -a "${TEMPEST_SCRIPT_PATH} ${TEMPEST_SCRIPT_PARAMETERS}"
popd

260
scripts/scripts-library.sh Executable file
View File

@ -0,0 +1,260 @@
#!/usr/bin/env bash
# Copyright 2014, Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
## Variables -----------------------------------------------------------------
LINE='-----------------------------------------------------------------------'
STARTTIME=${STARTTIME:-"$(date +%s)"}
REPORT_DATA=""
MAX_RETRIES=${MAX_RETRIES:-0}
# Export known paths
export PATH="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
# Override the current HOME directory
export HOME="/root"
## Functions -----------------------------------------------------------------
# Output details provided as parameters
function print_info() {
set +x
PROC_NAME="- [ $@ ] -"
printf "\n%s%s\n" "$PROC_NAME" "${LINE:${#PROC_NAME}}"
}
# Output a formatted block around a message
function info_block(){
set +x
echo "${LINE}"
print_info "$@"
echo "${LINE}"
}
# Output a formatted block of information about the run on exit
function exit_state() {
set +x
info_block "Run time reports"
echo -e "${REPORT_DATA}"
TOTALSECONDS="$[$(date +%s) - $STARTTIME]"
info_block "Run Time = ${TOTALSECONDS} seconds || $(($TOTALSECONDS / 60)) minutes"
if [ "${1}" == 0 ];then
info_block "Status: Build Success"
else
info_block "Status: Build Failure"
fi
exit ${1}
}
# Exit with error details
function exit_fail() {
set +x
get_instance_info
info_block "Error Info - $@"
exit_state 1
}
# Output diagnostic information
function get_instance_info() {
set +x
info_block 'Path'
echo ${PATH}
info_block 'Current User'
whoami
info_block 'Home Directory'
echo ${HOME}
info_block 'Available Memory'
free -mt
info_block 'Available Disk Space'
df -h
info_block 'Mounted Devices'
mount
info_block 'Block Devices'
lsblk -i
info_block 'Block Devices Information'
blkid
info_block 'Block Device Partitions'
for blk_dev in $(lsblk -nrdo NAME,TYPE | awk '/disk/ {print $1}'); do
# Ignoring errors for the below command is important as sometimes
# the block device in question is unpartitioned or has an invalid
# partition. In this case, parted returns 'unrecognised disk label'
# and the bash script exits due to the -e environment setting.
parted /dev/$blk_dev print || true
done
info_block 'PV Information'
pvs
info_block 'VG Information'
vgs
info_block 'LV Information'
lvs
info_block 'Contents of /etc/fstab'
cat /etc/fstab
info_block 'CPU Information'
which lscpu && lscpu
info_block 'Kernel Information'
uname -a
info_block 'Container Information'
which lxc-ls && lxc-ls --fancy
info_block 'Firewall Information'
iptables -vnL
iptables -t nat -vnL
iptables -t mangle -vnL
info_block 'Network Devices'
ip a
info_block 'Network Routes'
ip r
info_block 'Trace Path from google'
tracepath 8.8.8.8 -m 5
info_block 'XEN Server Information'
which xenstore-read && xenstore-read vm-data/provider_data/provider ||:
}
# Used to retry a process that may fail due to transient issues
function successerator() {
set +e +x
# Get the time that the method was started.
OP_START_TIME="$(date +%s)"
MAX_ATTEMPTS=$((${MAX_RETRIES}+1))
for ATTEMPT in $(seq ${MAX_ATTEMPTS}); do
$@ && { report_success; return 0; }
done
exit_fail "Hit maximum number of retries, giving up..."
set -e -x
}
# Report success
function report_success() {
OP_TOTAL_SECONDS="$[$(date +%s) - $OP_START_TIME]"
REPORT_OUTPUT="${OP_TOTAL_SECONDS} seconds"
REPORT_DATA+="- Operation: [ $@ ]\t${REPORT_OUTPUT}\tNumber of Attempts [ ${ATTEMPT} ]\n"
print_info "Run Time = ${REPORT_OUTPUT}"
}
function ssh_key_create(){
# Ensure that the ssh key exists and is an authorized_key
key_path="${HOME}/.ssh"
key_file="${key_path}/id_rsa"
# Ensure that the .ssh directory exists and has the right mode
if [ ! -d ${key_path} ]; then
mkdir -p ${key_path}
chmod 700 ${key_path}
fi
if [ ! -f "${key_file}" ] || [ ! -f "${key_file}.pub" ]; then
rm -f ${key_file}*
ssh-keygen -t rsa -f ${key_file} -N ''
fi
# Ensure that the public key is included in the authorized_keys
# for the default root directory and the current home directory
key_content=$(cat "${key_file}.pub")
if ! grep -q "${key_content}" ${key_path}/authorized_keys; then
echo "${key_content}" | tee -a ${key_path}/authorized_keys
fi
}
function configure_diskspace(){
# If there are any block devices available other than the one
# used for the root disk, repurpose it for our needs.
# the disk we use needs to have at least 60GB of space
min_disk_size_b=$((60 * 1024 * 1024 * 1024))
blk_devices=$(lsblk -nrdo NAME,TYPE | awk '/d[b-z]+ disk/ {print $1}')
for blk_dev in ${blk_devices}; do
# only do this if the cinder-volumes vg doesn't already exist
if ! vgs cinder-volumes > /dev/null 2>&1; then
blk_dev_size_b=$(lsblk -nrdbo NAME,TYPE,SIZE | awk "/^${blk_dev} disk/ {print \$3}")
if [ "${blk_dev_size_b}" -gt "${min_disk_size_b}" ]; then
# dismount any mount points on the device
mount_points=$(awk "/^\/dev\/${blk_dev}[0-9]* / {print \$2}" /proc/mounts)
for mount_point in ${mount_points}; do
umount ${mount_point}
done
#add a vg for cinder volumes
parted --script /dev/${blk_dev} mklabel gpt
parted --align optimal --script /dev/${blk_dev} mkpart cinder 0% 100%
pvcreate -ff -y /dev/${blk_dev}1
vgcreate cinder-volumes /dev/${blk_dev}1
# add an lv for lxc to use
# it does not use it's own vg to ensure that the container disk usage
# is thin-provisioned in the simplest way as openstack-infra instances
# do not have enough disk space to handle thick-provisioned containers
lvcreate -n lxc -L50g cinder-volumes
# prepare the file system and mount it
mkfs.ext4 /dev/cinder-volumes/lxc
mkdir -p /var/lib/lxc
mount /dev/cinder-volumes/lxc /var/lib/lxc
fi
fi
done
}
function loopback_create() {
LOOP_FILENAME=${1}
LOOP_FILESIZE=${2}
LOOP_FILE_TYPE=${3} # thin, thick
LOOP_MOUNT_METHOD=${4} # swap, rc, none
if [ ! -f "${LOOP_FILENAME}" ]; then
if [ "${LOOP_FILE_TYPE}" = "thin" ]; then
truncate -s ${LOOP_FILESIZE} ${LOOP_FILENAME}
elif [ "${LOOP_FILE_TYPE}" = "thick" ]; then
dd if=/dev/zero of=${LOOP_FILENAME} bs=${LOOP_FILESIZE} count=1
fi
fi
if [ "${LOOP_MOUNT_METHOD}" = "rc" ]; then
if ! losetup -a | grep -q "(${LOOP_FILENAME})$"; then
LOOP_DEVICE=$(losetup -f)
losetup ${LOOP_DEVICE} ${LOOP_FILENAME}
fi
if ! grep -q ${LOOP_FILENAME} /etc/rc.local; then
sed -i "\$i losetup \$(losetup -f) ${LOOP_FILENAME}" /etc/rc.local
fi
fi
if [ "${LOOP_MOUNT_METHOD}" = "swap" ]; then
if ! swapon -s | grep -q ${LOOP_FILENAME}; then
mkswap ${LOOP_FILENAME}
swapon -a
fi
if ! grep -q "^${LOOP_FILENAME} " /etc/fstab; then
echo "${LOOP_FILENAME} none swap loop 0 0" >> /etc/fstab
fi
fi
}
# Exit if the script is not being run as root
if [ ! "$(whoami)" == "root" ]; then
info_block "This script must be run as root."
exit 1
fi
# Check that we are in the root path of the cloned repo
if [ ! -d "etc" -a ! -d "scripts" -a ! -f "requirements.txt" ]; then
info_block "ERROR: Please execute this script from the root directory of the cloned source code."
exit 1
fi
# Trap all Death Signals and Errors
trap "exit_fail ${LINENO} $? 'Received STOP Signal'" SIGHUP SIGINT SIGTERM
trap "exit_fail ${LINENO} $?" ERR