* added heat template for use with RPC9.0.0 and RAX

* added heat template for use with RPC9.0.0 and OpenStack
* rewrote the AIO script.
This commit is contained in:
Kevin Carter 2014-09-18 14:20:39 -05:00
parent c94213055c
commit f17d16ed02
3 changed files with 586 additions and 128 deletions

View File

@ -12,40 +12,95 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# (c) 2014, Kevin Carter <kevin.carter@rackspace.com>
# Assumptions:
# This assumes that the lab environment will be setup using
# a rackspace cloud server build on the rackspace public cloud.
# The lab will attempt to create the required Volume Groups that
# the lab will want to use as it creates your containers, if
# /dev/xvde is not available all containers will be created using
# the local file system. If /dev/xvde is available it WILL BE
# NUKED and partitioned for the environment. Once the partitioning is
# done the lab will create all of the required containers as well as
# anything else that may be needed prior to running the installation.
# If using LVM you should have NO LESS than 100 GB of consumable space
# on your the /dev/xvde device. If you have less than 100G the installation
# will fail.
# THIS IS NOT FOR PRODUCTION USE, NOR WILL IT EVER BE. This is a simple
# lab setup tool that will allow you to quickly build an ALL IN ONE
# environment for development purposes.
set -e -u -v -x
LAB_NAME=${LAB_NAME:-ansible-lxc-rpc-inventory}
LAB_LV_DEVICE=${LAB_LV_DEVICE:-/dev/xvde}
LAB_BRIDGE_INTERFACE=${LAB_BRIDGE_INTERFACE:-br-mgmt}
LAB_MAIN_INTERFACE=${LAB_MAIN_INTERFACE:-eth0}
REPO_URL=${REPO_URL:-"https://github.com/rcbops/ansible-lxc-rpc.git"}
REPO_BRANCH=${REPO_BRANCH:-"master"}
FROZEN_REPO_URL=${FROZEN_REPO:-"http://rpc-slushee.rackspace.com"}
MAX_RETRIES=${MAX_RETRIES:-5}
apt-get update
apt-get install -y python-dev \
python2.7 \
build-essential \
curl \
git-core \
ipython \
tmux \
vim \
vlan \
bridge-utils \
lvm2 \
linux-image-extra-$(uname -r)
function key_create(){
ssh-keygen -t rsa -f /root/.ssh/id_rsa -N ''
}
# Used to retry process that may fail due to random issues.
function successerator() {
set +e
RETRY=0
# Set the initial return value to failure
false
while [ $? -ne 0 -a ${RETRY} -lt ${MAX_RETRIES} ];do
RETRY=$((${RETRY}+1))
$@
done
if [ ${RETRY} -eq ${MAX_RETRIES} ];then
echo "Hit maximum number of retries, giving up..."
exit 1
fi
set -e
}
function install_bits() {
successerator ansible-playbook -e @/etc/rpc_deploy/user_variables.yml \
playbooks/$@
}
if [ ! -d "/opt" ];then
mkdir /opt
fi
if [ ! "$(swapon -s | grep -v Filename)" ];then
cat > /opt/swap.sh <<EOF
#!/usr/bin/env bash
if [ ! "\$(swapon -s | grep -v Filename)" ];then
SWAPFILE="/tmp/SwapFile"
if [ -f "\${SWAPFILE}" ];then
swapoff -a
rm \${SWAPFILE}
fi
dd if=/dev/zero of=\${SWAPFILE} bs=1M count=512
mkswap \${SWAPFILE}
swapon \${SWAPFILE}
fi
EOF
chmod +x /opt/swap.sh
/opt/swap.sh
fi
if [ -f "/opt/swap.sh" ];then
if [ ! -f "/etc/rc.local" ];then
touch /etc/rc.local
fi
if [ "$(grep 'exit 0' /etc/rc.local)" ];then
sed -i '/exit\ 0/ s/^/#\ /' /etc/rc.local
fi
if [ ! "$(grep 'swap.sh' /etc/rc.local)" ];then
echo "/opt/swap.sh" | tee -a /etc/rc.local
fi
chmod +x /etc/rc.local
fi
# Make the system key used for bootstrapping self
pushd /root/.ssh/
if [ ! -f "id_rsa" ];then
@ -63,36 +118,6 @@ if [ ! "$(grep \"$KEYENTRY\" authorized_keys)" ];then
fi
popd
# Install base System Packages
apt-get update && apt-get install -y python-dev \
build-essential \
curl \
git-core \
ipython \
tmux \
vim
apt-get -y upgrade
# If Ephemeral disk is detected carve it up as LVM
if [ -e "${LAB_LV_DEVICE}" ];then
SPACE=$(parted -s /dev/xvde p | awk '/Disk/ {print $3}' | grep -o '[0-9]\+')
ENOUGH_SPACE=$(python -c "o=\"$SPACE\".split('.')[0]; print(int(o) > 100)")
if [ "$ENOUGH_SPACE" == True ];then
apt-get update && apt-get install -y lvm2
if [ ! "$(echo C | parted ${LAB_LV_DEVICE} p | grep gpt)" ];then
parted -s ${LAB_LV_DEVICE} mktable gpt
parted -s ${LAB_LV_DEVICE} mkpart lvm 0% 90%
parted -s ${LAB_LV_DEVICE} mkpart lvm 90% 100%
fi
if [ ! "$(pvs | grep '/dev/xvde1')" ];then
pvcreate ${LAB_LV_DEVICE}1
vgcreate lxc ${LAB_LV_DEVICE}1
fi
if [ ! "$(pvs | grep '/dev/xvde2')" ];then
pvcreate ${LAB_LV_DEVICE}2
vgcreate cinder-volumes ${LAB_LV_DEVICE}2
fi
else
CINDER="/opt/cinder.img"
if [ ! "$(losetup -a | grep /opt/cinder.img)" ];then
LOOP=$(losetup -f)
@ -102,78 +127,342 @@ if [ -e "${LAB_LV_DEVICE}" ];then
vgcreate cinder-volumes ${LOOP}
pvscan
fi
# Get the source
if [ -d "/opt/ansible-lxc-rpc" ];then
rm -rf "/opt/ansible-lxc-rpc"
fi
git clone "${REPO_URL}" "/opt/ansible-lxc-rpc"
pushd /opt/ansible-lxc-rpc
git checkout "${REPO_BRANCH}"
# Copy the base etc files
if [ -d "/etc/rpc_deploy" ];then
rm -rf "/etc/rpc_deploy"
fi
cp -R /opt/ansible-lxc-rpc/etc/rpc_deploy /etc/
# Install pip
curl ${FROZEN_REPO_URL}/downloads/get-pip.py | python
# Install requirements
pip install -r /opt/ansible-lxc-rpc/requirements.txt
# Generate the passwords
/opt/ansible-lxc-rpc/scripts/pw-token-gen.py --file /etc/rpc_deploy/user_variables.yml
popd
# Get modern pip
curl https://bootstrap.pypa.io/get-pip.py | python
cat > /etc/rpc_deploy/user_variables.yml <<EOF
---
rpc_repo_url: ${FROZEN_REPO_URL}
required_kernel: 3.13.0-30-generic
## Rackspace Cloud Details
rackspace_cloud_auth_url: https://identity.api.rackspacecloud.com/v2.0
rackspace_cloud_tenant_id: SomeTenantID
# The cloudfiles_tenant_id is the long MossoCloudFS tenantID only required
rackspace_cloudfiles_tenant_id: SomeTenantID
rackspace_cloud_username: SomeUserName
rackspace_cloud_password: SomeUsersPassword
rackspace_cloud_api_key: SomeAPIKey
## Rabbit Options
rabbitmq_password: secrete
rabbitmq_cookie_token: secrete
## Tokens
memcached_encryption_key: secrete
## Container default user
container_openstack_password: secrete
## Galera Options
mysql_root_password: secrete
mysql_debian_sys_maint_password: secrete
## Keystone Options
keystone_container_mysql_password: secrete
keystone_auth_admin_token: secrete
keystone_auth_admin_password: secrete
keystone_service_password: secrete
## Cinder Options
cinder_container_mysql_password: secrete
cinder_service_password: secrete
cinder_v2_service_password: secrete
# Set default_store to "swift" if using Cloud Files or swift backend
glance_default_store: file
glance_container_mysql_password: secrete
glance_service_password: secrete
glance_swift_store_auth_address: "{{ rackspace_cloud_auth_url }}"
glance_swift_store_user: "{{ rackspace_cloudfiles_tenant_id }}:{{ rackspace_cloud_username }}"
glance_swift_store_key: "{{ rackspace_cloud_password }}"
glance_swift_store_container: SomeContainerName
glance_swift_store_region: SomeRegion
glance_swift_store_endpoint_type: internalURL
glance_notification_driver: noop
## Heat Options
heat_stack_domain_admin_password: secrete
heat_container_mysql_password: secrete
### THE HEAT AUTH KEY NEEDS TO BE 32 CHARACTERS LONG ##
heat_auth_encryption_key: 12345678901234567890123456789012
### THE HEAT AUTH KEY NEEDS TO BE 32 CHARACTERS LONG ##
heat_service_password: secrete
heat_cfn_service_password: secrete
## Horizon Options
horizon_container_mysql_password: secrete
## MaaS Options
maas_auth_method: password
maas_auth_url: "{{ rackspace_cloud_auth_url }}"
maas_username: "{{ rackspace_cloud_username }}"
maas_api_key: "{{ rackspace_cloud_api_key }}"
maas_auth_token: some_token
maas_api_url: https://monitoring.api.rackspacecloud.com/v1.0/{{ rackspace_cloud_tenant_id }}
maas_notification_plan: npTechnicalContactsEmail
maas_agent_token: some_token
maas_target_alias: public0_v4
maas_scheme: https
# Override scheme for specific service remote monitor by specifying here: E.g.
# maas_nova_scheme: http
maas_keystone_user: maas
maas_keystone_password: secrete
# Check this number of times before registering state change
maas_alarm_local_consecutive_count: 3
maas_alarm_remote_consecutive_count: 1
# Timeout must be less than period
maas_check_period: 60
maas_check_timeout: 30
maas_monitoring_zones:
- mzdfw
- mziad
- mzord
- mzlon
- mzhkg
maas_repo_version: v9.0.0
## Neutron Options
neutron_container_mysql_password: secrete
neutron_service_password: secrete
## Nova Options
nova_virt_type: qemu
nova_container_mysql_password: secrete
nova_metadata_proxy_secret: secrete
nova_ec2_service_password: secrete
nova_service_password: secrete
nova_v3_service_password: secrete
nova_s3_service_password: secrete
## RPC Support
rpc_support_holland_password: secrete
## Kibana Options
kibana_password: secrete
EOF
# Install ansible
pip install ansible==1.6.6
# Get our playbooks
if [ ! -d /opt/ansible-lxc-rpc ]; then
git clone https://github.com/rcbops/ansible-lxc-rpc /opt/ansible-lxc-rpc
fi
# Get the eth0 IP address
MAINADDR="$(ip route show dev ${LAB_MAIN_INTERFACE} | awk '{print $7}' | tail -n 1)"
# Get the eth2 CIDR
VIPADDR="$(ip route show dev ${LAB_BRIDGE_INTERFACE} | awk '{print $7}' | tail -n 1)"
cp -R /opt/ansible-lxc-rpc/etc/rpc_deploy /etc/rpc_deploy
cat > /etc/rpc_deploy/rpc_user_config.yml <<EOF
---
# This is the md5 of the environment file
environment_version: $(md5sum /etc/rpc_deploy/rpc_environment.yml | awk '{print $1}')
# User defined CIDR used for containers
cidr: $VIPADDR/24
# User defined Infrastructure Hosts
cidr_networks:
# Cidr used in the Management network
container: 172.29.236.0/22
# Cidr used in the Service network
snet: 172.29.248.0/22
# Cidr used in the VM network
tunnel: 172.29.240.0/22
# Cidr used in the Storage network
storage: 172.29.244.0/22
used_ips:
- 172.29.236.1,172.29.236.50
- 172.29.244.1,172.29.244.50
global_overrides:
rpc_repo_url: ${FROZEN_REPO_URL}
# Internal Management vip address
internal_lb_vip_address: 172.29.236.100
# External DMZ VIP address
external_lb_vip_address: 10.200.200.146
# Bridged interface to use with tunnel type networks
tunnel_bridge: "br-vxlan"
# Bridged interface to build containers with
management_bridge: "br-mgmt"
# Define your Add on container networks.
provider_networks:
- network:
group_binds:
- all_containers
- hosts
type: "raw"
container_bridge: "br-mgmt"
container_interface: "eth1"
ip_from_q: "container"
- network:
group_binds:
- glance_api
- cinder_api
- cinder_volume
- nova_compute
type: "raw"
container_bridge: "br-storage"
container_interface: "eth2"
ip_from_q: "storage"
- network:
group_binds:
- glance_api
- nova_compute
- neutron_linuxbridge_agent
type: "raw"
container_bridge: "br-snet"
container_interface: "eth3"
ip_from_q: "snet"
- network:
group_binds:
- neutron_linuxbridge_agent
container_bridge: "br-vxlan"
container_interface: "eth10"
ip_from_q: "tunnel"
type: "vxlan"
range: "1:1000"
net_name: "vxlan"
- network:
group_binds:
- neutron_linuxbridge_agent
container_bridge: "br-vlan"
container_interface: "eth11"
type: "flat"
net_name: "vlan"
- network:
group_binds:
- neutron_linuxbridge_agent
container_bridge: "br-vlan"
container_interface: "eth11"
type: "vlan"
range: "1:1"
net_name: "vlan"
# Name of load balancer
lb_name: lb_name_in_core
# User defined Infrastructure Hosts, this should be a required group
infra_hosts:
aio1:
ip: $MAINADDR
# User defined Compute Hosts
ip: 172.29.236.100
# User defined Compute Hosts, this should be a required group
compute_hosts:
aio1:
ip: $MAINADDR
# User defined Storage Hosts
ip: 172.29.236.100
# User defined Storage Hosts, this should be a required group
storage_hosts:
aio1:
ip: $MAINADDR
# User defined Network Hosts
network_hosts:
aio1:
ip: $MAINADDR
# User defined Logging Hosts
ip: 172.29.236.100
container_vars:
cinder_backends:
limit_container_types: cinder_volume
lvm:
volume_group: cinder-volumes
volume_driver: cinder.volume.drivers.lvm.LVMISCSIDriver
volume_backend_name: LVM_iSCSI
# User defined Logging Hosts, this should be a required group
log_hosts:
aio1:
ip: $MAINADDR
## Other hosts can be added whenever needed.
ip: 172.29.236.100
# User defined Networking Hosts, this should be a required group
network_hosts:
aio1:
ip: 172.29.236.100
haproxy_hosts:
aio1:
ip: $MAINADDR
ip: 172.29.236.100
EOF
sed -i "s/internal_lb_vip_address:.*/internal_lb_vip_address: ${VIPADDR}/" /opt/ansible-lxc-rpc/rpc_deployment/vars/user_variables.yml
# Install all the things
pushd /opt/ansible-lxc-rpc
# Ensure that the scripts python requirements are installed
pip install -r requirements.txt
cat > /etc/network/interfaces.d/aio-bridges.cfg <<EOF
## Required network bridges; br-vlan, br-vxlan, br-mgmt.
auto br-mgmt
iface br-mgmt inet static
bridge_stp off
bridge_waitport 0
bridge_fd 0
# Notice the bridge port is the vlan tagged interface
bridge_ports none
address 172.29.236.100
netmask 255.255.252.0
auto br-vxlan
iface br-vxlan inet static
bridge_stp off
bridge_waitport 0
bridge_fd 0
bridge_ports none
address 172.29.240.100
netmask 255.255.252.0
auto br-vlan
iface br-vlan inet manual
bridge_stp off
bridge_waitport 0
bridge_fd 0
# Notice this bridge port is an Untagged host interface
bridge_ports none
auto br-storage
iface br-storage inet static
bridge_stp off
bridge_waitport 0
bridge_fd 0
bridge_ports none
address 172.29.244.100
netmask 255.255.252.0
auto br-snet
iface br-snet inet static
bridge_stp off
bridge_waitport 0
bridge_fd 0
bridge_ports none
# Notice there is NO physical interface in this bridge!
address 172.29.248.100
netmask 255.255.252.0
EOF
# Ensure the network source is in place
if [ ! "$(grep -Rni '^source\ /etc/network/interfaces.d/\*.cfg' /etc/network/interfaces)" ]; then
echo "source /etc/network/interfaces.d/*.cfg" | tee -a /etc/network/interfaces
fi
# Bring up the new interfaces
for i in br-snet br-storage br-vlan br-vxlan br-mgmt; do
/sbin/ifup $i || true
done
# Export the home directory just in case it's not set
export HOME="/root"
pushd /opt/ansible-lxc-rpc/rpc_deployment
# Base Setup
ansible-playbook -e @/opt/ansible-lxc-rpc/rpc_deployment/vars/user_variables.yml playbooks/setup/all-the-setup-things.yml
# Infrastructure Setup
ansible-playbook -e @/opt/ansible-lxc-rpc/rpc_deployment/vars/user_variables.yml playbooks/infrastructure/all-the-infrastructure-things.yml
# Openstack Service Setup
ansible-playbook -e @/opt/ansible-lxc-rpc/rpc_deployment/vars/user_variables.yml playbooks/openstack/all-the-openstack-things.yml
popd
# Install all host bits
install_bits setup/host-setup.yml
# Install haproxy for dev purposes only
install_bits infrastructure/haproxy-install.yml
# Install all of the infra bits
install_bits infrastructure/infrastructure-setup.yml
# install all of the Openstack Bits
install_bits openstack/openstack-common.yml
install_bits openstack/keystone.yml
install_bits openstack/keystone-add-all-services.yml
install_bits openstack/glance-all.yml
install_bits openstack/heat-all.yml
install_bits openstack/nova-all.yml
install_bits openstack/neutron-all.yml
install_bits openstack/cinder-all.yml
install_bits openstack/horizon-all.yml
install_bits openstack/utility.yml
install_bits openstack/rpc-support.yml
# Stop rsyslog container(s)
for i in $(lxc-ls | grep "rsyslog"); do
lxc-stop -k -n $i; lxc-start -d -n $i
done
# Reconfigure Rsyslog
install_bits infrastructure/rsyslog-config.yml
popd
if [ ! "$(dpkg -l | grep linux-image-extra-3.13.0-35-generic)" ];then
apt-get install -y linux-image-extra-3.13.0-35-generic
rm /etc/update-motd.d/*
cat > /etc/update-motd.d/00-rpc-notice<< EOF
#!/usr/bin/env bash
echo ""
echo "############ RPC DEPLOYMENT #############"
echo "A new kernel was installed on this system. you will"
echo "need to re-bootstrap Galera to get the cluster operataional."
echo "from the /opt/ansible-lxc-rpc/rpc_deployment directory execute:"
echo ""
echo "ansible-playbook -e @/etc/rpc_deploy/user_variables.yml playbooks/infrastructure/galera-startup.yml"
EOF
chmod +x /etc/update-motd.d/00-rpc-notice
shutdown -r now
fi

View File

@ -0,0 +1,91 @@
# Copyright 2014, Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
heat_template_version: 2013-05-23
description: Heat template to deploy Rackspace Private Cloud v9
parameters:
ssh_key_name:
type: string
description: Name of a Key Pair to enable SSH access to the instance
image_name:
type: string
description: Name of image to use for server
flavor_name:
type: string
description: Name Flavor to use for server
server_name:
type: string
default: RPCv9.0.0-AIO
description: The Instance Name
install_script_url:
type: string
default: https://raw.githubusercontent.com/rcbops/ansible-lxc-rpc/master/scripts/cloudserver-aio.sh
description: The aio script installation URL
frozen_repo_url:
type: string
default: http://rpc-slushee.rackspace.com
description: URL to the frozen
repo_url:
type: string
default: https://github.com/rcbops/ansible-lxc-rpc.git
description: The repository URL
repo_branch:
type: string
default: master
description: The repository branch
net_id:
type: string
description: ID of Neutron network into which servers get deployed
sec_group:
type: string
description: Name of the security group
outputs:
RPCAIO_public_ip:
description: The public IP address of the newly configured Server.
value: { get_attr: [ RPCAIO, first_address ] }
RPCAIO_password:
description: The password for all the things.
value: secrete
resources:
RPCAIO_port:
type: OS::Neutron::Port
properties:
network_id: { get_param: net_id }
security_groups: [{ get_param: sec_group }]
RPCAIO:
type: OS::Nova::Server
properties:
flavor: { get_param: flavor_name }
image: { get_param: image_name }
name: { get_param: server_name }
key_name: { get_param: ssh_key_name }
networks:
- port: { get_resource: RPCAIO_port }
user_data:
str_replace:
params:
"%install_script_url%": { get_param: install_script_url }
"%repo_url%": { get_param: repo_url }
"%repo_branch%": { get_param: repo_branch }
"%frozen_repo_url%": { get_param: frozen_repo_url }
template: |
#!/usr/bin/env bash
export REPO_URL="%repo_url%"
export REPO_BRANCH="%repo_branch%"
export FROZEN_REPO_URL="%frozen_repo_url%"
apt-get update
apt-get -y install wget
pushd /opt
bash <(wget -O- "%install_script_url%")
popd

View File

@ -0,0 +1,78 @@
# Copyright 2014, Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
heat_template_version: 2013-05-23
description: Heat template to deploy Rackspace Private Cloud v9
parameters:
ssh_key_name:
type: string
description: Name of a Key Pair to enable SSH access to the instance
image_name:
type: string
description: Name of image to use for server
flavor_name:
type: string
description: Name Flavor to use for server
server_name:
type: string
default: RPCv9.0.0-AIO
description: The Instance Name
install_script_url:
type: string
default: https://raw.githubusercontent.com/rcbops/ansible-lxc-rpc/master/scripts/cloudserver-aio.sh
description: The aio script installation URL
frozen_repo_url:
type: string
default: http://rpc-slushee.rackspace.com
description: URL to the frozen
repo_url:
type: string
default: https://github.com/rcbops/ansible-lxc-rpc.git
description: The repository URL
repo_branch:
type: string
default: master
description: The repository branch
outputs:
RPCAIO_public_ip:
description: The public IP address of the newly configured Server.
value: { get_attr: [ RPCAIO, first_address ] }
RPCAIO_password:
description: The password for all the things.
value: secrete
resources:
RPCAIO:
type: OS::Nova::Server
properties:
flavor: { get_param: flavor_name }
image: { get_param: image_name }
name: { get_param: server_name }
key_name: { get_param: ssh_key_name }
user_data:
str_replace:
params:
"%install_script_url%": { get_param: install_script_url }
"%repo_url%": { get_param: repo_url }
"%repo_branch%": { get_param: repo_branch }
"%frozen_repo_url%": { get_param: frozen_repo_url }
template: |
#!/usr/bin/env bash
export REPO_URL="%repo_url%"
export REPO_BRANCH="%repo_branch%"
export FROZEN_REPO_URL="%frozen_repo_url%"
apt-get update
apt-get -y install wget
pushd /opt
bash <(wget -O- "%install_script_url%")
popd