Allow deploying secondary worker and API nodes

This commit introduces a new parameter "OCTAVIA_NODE", which
is standalone by default. Only the main (for ha) or standalone
nodes create resources like accounts, the management network,
images, etc.

We also introduce a multinode vagrant configuration.

A follow up patch will introduce an haproxy in front of the octavia
API endpoints.

Change-Id: I835ee7e0e925232b55f3c14eeed98aeb0f0ce463
This commit is contained in:
Miguel Angel Ajo 2016-06-02 16:08:23 -04:00
parent a7ac7ad1cb
commit b47fcb2834
13 changed files with 363 additions and 55 deletions

View File

@ -119,8 +119,10 @@ function octavia_configure {
# Used to communicate with the amphora over the mgmt network, may differ from amp_ssh_key in a real deployment.
iniset $OCTAVIA_CONF haproxy_amphora key_path ${OCTAVIA_AMP_SSH_KEY_PATH}
recreate_database_mysql octavia
octavia-db-manage upgrade head
if [ $OCTAVIA_NODE == 'main' ] || [ $OCTAVIA_NODE == 'standalone' ] ; then
recreate_database_mysql octavia
octavia-db-manage upgrade head
fi
if [[ -a $OCTAVIA_CERTS_DIR ]] ; then
rm -rf $OCTAVIA_CERTS_DIR
@ -145,7 +147,8 @@ function octavia_configure {
}
function create_mgmt_network_interface {
id_and_mac=$(neutron port-create --name octavia-health-manager-listen-port --security-group lb-mgmt-sec-grp --device-owner Octavia:health-mgr --binding:host_id=$(hostname) lb-mgmt-net | awk '/ id | mac_address / {print $4}')
id_and_mac=$(neutron port-create --name octavia-health-manager-$OCTAVIA_NODE-listen-port --security-group lb-mgmt-sec-grp --device-owner Octavia:health-mgr --binding:host_id=$(hostname) lb-mgmt-net | awk '/ id | mac_address / {print $4}')
id_and_mac=($id_and_mac)
MGMT_PORT_ID=${id_and_mac[0]}
MGMT_PORT_MAC=${id_and_mac[1]}
@ -153,7 +156,13 @@ function create_mgmt_network_interface {
sudo ovs-vsctl -- --may-exist add-port ${OVS_BRIDGE:-br-int} o-hm0 -- set Interface o-hm0 type=internal -- set Interface o-hm0 external-ids:iface-status=active -- set Interface o-hm0 external-ids:attached-mac=$MGMT_PORT_MAC -- set Interface o-hm0 external-ids:iface-id=$MGMT_PORT_ID
sudo ip link set dev o-hm0 address $MGMT_PORT_MAC
sudo dhclient -v o-hm0 -cf $OCTAVIA_DHCLIENT_CONF
iniset $OCTAVIA_CONF health_manager controller_ip_port_list $MGMT_PORT_IP:$OCTAVIA_HM_LISTEN_PORT
if [ $OCTAVIA_CONTROLLER_IP_PORT_LIST == 'auto' ] ; then
iniset $OCTAVIA_CONF health_manager controller_ip_port_list $MGMT_PORT_IP:$OCTAVIA_HM_LISTEN_PORT
else
iniset $OCTAVIA_CONF health_manager controller_ip_port_list $OCTAVIA_CONTROLLER_IP_PORT_LIST
fi
iniset $OCTAVIA_CONF health_manager bind_ip $MGMT_PORT_IP
iniset $OCTAVIA_CONF health_manager bind_port $OCTAVIA_HM_LISTEN_PORT
}
@ -172,7 +181,6 @@ function build_mgmt_network {
OCTAVIA_MGMT_SEC_GRP_ID=$(nova secgroup-list | awk ' / lb-mgmt-sec-grp / {print $2}')
iniset ${OCTAVIA_CONF} controller_worker amp_secgroup_list ${OCTAVIA_MGMT_SEC_GRP_ID}
create_mgmt_network_interface
}
function configure_octavia_tempest {
@ -194,32 +202,49 @@ function octavia_start {
# Several steps in this function would more logically be in the configure function, but
# we need nova, glance, and neutron to be running.
nova keypair-add --pub-key ${OCTAVIA_AMP_SSH_KEY_PATH}.pub ${OCTAVIA_AMP_SSH_KEY_NAME}
if ! [ "$DISABLE_AMP_IMAGE_BUILD" == 'True' ]; then
build_octavia_worker_image
if [ $OCTAVIA_NODE != 'main' ] && [ $OCTAVIA_NODE != 'standalone' ] ; then
# without the other services enabled apparently we don't have
# credentials at this point
TOP_DIR=$(cd $(dirname "$0") && pwd)
source ${TOP_DIR}/openrc admin admin
fi
OCTAVIA_AMP_IMAGE_ID=$(glance image-list | grep ${OCTAVIA_AMP_IMAGE_NAME} | awk '{print $2}')
if [ -n "$OCTAVIA_AMP_IMAGE_ID" ]; then
glance image-tag-update ${OCTAVIA_AMP_IMAGE_ID} ${OCTAVIA_AMP_IMAGE_TAG}
if [ $OCTAVIA_NODE == 'main' ] || [ $OCTAVIA_NODE == 'standalone' ] ; then
# things that should only happen on the ha main node / or once
nova keypair-add --pub-key ${OCTAVIA_AMP_SSH_KEY_PATH}.pub ${OCTAVIA_AMP_SSH_KEY_NAME}
if ! [ "$DISABLE_AMP_IMAGE_BUILD" == 'True' ]; then
build_octavia_worker_image
fi
OCTAVIA_AMP_IMAGE_ID=$(glance image-list | grep ${OCTAVIA_AMP_IMAGE_NAME} | awk '{print $2}')
if [ -n "$OCTAVIA_AMP_IMAGE_ID" ]; then
glance image-tag-update ${OCTAVIA_AMP_IMAGE_ID} ${OCTAVIA_AMP_IMAGE_TAG}
fi
create_amphora_flavor
# Create a management network.
build_mgmt_network
create_octavia_accounts
# Adds service and endpoint
if is_service_enabled tempest; then
configure_octavia_tempest ${OCTAVIA_AMP_NETWORK_ID}
fi
else
OCTAVIA_AMP_IMAGE_ID=$(glance image-list | grep ${OCTAVIA_AMP_IMAGE_NAME} | awk '{print $2}')
fi
create_mgmt_network_interface
iniset $OCTAVIA_CONF controller_worker amp_image_tag ${OCTAVIA_AMP_IMAGE_TAG}
create_amphora_flavor
# Create a management network.
build_mgmt_network
OCTAVIA_AMP_NETWORK_ID=$(neutron net-list | awk '/ lb-mgmt-net / {print $2}')
iniset $OCTAVIA_CONF controller_worker amp_boot_network_list ${OCTAVIA_AMP_NETWORK_ID}
if is_service_enabled tempest; then
configure_octavia_tempest ${OCTAVIA_AMP_NETWORK_ID}
fi
# Adds service and endpoint
create_octavia_accounts
run_process $OCTAVIA_API "$OCTAVIA_API_BINARY $OCTAVIA_API_ARGS"
run_process $OCTAVIA_CONSUMER "$OCTAVIA_CONSUMER_BINARY $OCTAVIA_CONSUMER_ARGS"
@ -236,7 +261,7 @@ function octavia_stop {
[ ! -z "$pids" ] && sudo kill $pids
}
function octavia_configure_common {
if is_service_enabled $OCTAVIA_SERVICE; then
if is_service_enabled $OCTAVIA_SERVICE && [ $OCTAVIA_NODE == 'main' ] || [ $OCTAVIA_NODE = 'standalone' ] ; then
inicomment $NEUTRON_LBAAS_CONF service_providers service_provider
iniadd $NEUTRON_LBAAS_CONF service_providers service_provider $OCTAVIA_SERVICE_PROVIDER
fi
@ -258,32 +283,35 @@ function octavia_cleanup {
if [ ${OCTAVIA_AMP_SSH_KEY_PATH}x != x ] ; then
rm -f ${OCTAVIA_AMP_SSH_KEY_PATH} ${OCTAVIA_AMP_SSH_KEY_PATH}.pub
fi
if [ ${OCTAVIA_AMP_SSH_KEY_NAME}x != x ] ; then
nova keypair-delete ${OCTAVIA_AMP_SSH_KEY_NAME}
if [ $OCTAVIA_NODE == 'main' ] || [ $OCTAVIA_NODE == 'standalone' ] ; then
if [ ${OCTAVIA_AMP_SSH_KEY_NAME}x != x ] ; then
nova keypair-delete ${OCTAVIA_AMP_SSH_KEY_NAME}
fi
fi
}
# check for service enabled
if is_service_enabled $OCTAVIA; then
if [ $OCTAVIA_NODE == 'main' ] || [ $OCTAVIA_NODE == 'standalone' ] ; then # main-ha node stuff only
if ! is_service_enabled $Q_SVC || ! is_service_enabled $LBAAS_V2; then
die "The neutron $Q_SVC and $LBAAS_V2 services must be enabled to use $OCTAVIA"
fi
if ! is_service_enabled $Q_SVC || ! is_service_enabled $LBAAS_V2; then
die "The neutron $Q_SVC and $LBAAS_V2 services must be enabled to use $OCTAVIA"
fi
# Check if an amphora image is already loaded
AMPHORA_IMAGE_NAME=$(glance image-list | awk '/ amphora-x64-haproxy / {print $4}')
export AMPHORA_IMAGE_NAME
# Check if an amphora image is already loaded
AMPHORA_IMAGE_NAME=$(glance image-list | awk '/ amphora-x64-haproxy / {print $4}')
export AMPHORA_IMAGE_NAME
if [ "$DISABLE_AMP_IMAGE_BUILD" == 'True' ]; then
echo "Found DISABLE_AMP_IMAGE_BUILD == True"
echo "Skipping amphora image build"
fi
if [ "$DISABLE_AMP_IMAGE_BUILD" == 'True' ]; then
echo "Found DISABLE_AMP_IMAGE_BUILD == True"
echo "Skipping amphora image build"
fi
if [ "$AMPHORA_IMAGE_NAME" == 'amphora-x64-haproxy' ]; then
echo "Found existing amphora image: $AMPHORA_IMAGE_NAME"
echo "Skipping amphora image build"
DISABLE_AMP_IMAGE_BUILD=True
export DISABLE_AMP_IMAGE_BUILD
if [ "$AMPHORA_IMAGE_NAME" == 'amphora-x64-haproxy' ]; then
echo "Found existing amphora image: $AMPHORA_IMAGE_NAME"
echo "Skipping amphora image build"
DISABLE_AMP_IMAGE_BUILD=True
export DISABLE_AMP_IMAGE_BUILD
fi
fi
if [[ "$1" == "stack" && "$2" == "install" ]]; then

View File

@ -1,5 +1,5 @@
This file describes how to use Vagrant (http://www.vagrantup.com) to
create a devstack virtual machine that contains two nova instances
create a devstack virtual environment that contains two nova instances
running a simple web server and a working Neutron LBaaS Version 2 load
balancer backed by Octavia.
@ -10,28 +10,74 @@ the package appropriate for your system. On Ubuntu, simply type:
sudo apt-get install vagrant
2) copy 'Vagrantfile' from this directory to any appropriate directory
and run 'vagrant up':
mkdir $HOME/lbaas-octavia-vagrant # or any other appropriate directory
cp Vagrantfile *.sh $HOME/lbaas-octavia-vagrant
cd $HOME/lbaas-octavia-vagrant
cp -rfp $HOME/lbaas-octavia-vagrant
3) Continue either by the single node deployment (6GB RAM minimum), or by the
multinode deployment (12GB RAM minimum).
Single node deployment
~~~~~~~~~~~~~~~~~~~~~~
1) Create and deploy the environment VM
cd $HOME/lbaas-octavia-vagrant/single
vagrant up
Alternatively, you can specify the number of vcpus or memory:
VM_CPUS=4 VM_MEMORY=8192 vagrant up
3) Wait for the vagrant VM to boot and install, typically 20-30 minutes
2) Wait for the vagrant VM to boot and install, typically 20-30 minutes
4) SSH into the vagrant box
3) SSH into the vagrant box
vagrant ssh
5) Determine the loadbalancer IP:
4) Continue on the common section bellow
Multinode
~~~~~~~~~
This will create an environment where the octavia services are replicated
across two nodes, and in front of the octavia api, an haproxy is configured
to distribute traffic among both API servers, and provide failure tolerance.
Please note that the database is a single mysql instance, with no clustering.
1) Create and deploy the environment VMs
cd $HOME/lbaas-octavia-vagrant/multinode
vagrant up main
2) Wait for the main node to be deployed, and then start the second node
vagrant up second
3) Log in to the main node, and run local-manual.sh now that everything is
deployed
vagrant ssh main
cd devstack
./local-manual.sh
logout
4) SSH in any of the vagrant boxes:
vagrant ssh main
vagrant ssh second
4) Continue on the common section bellow
Common to multinode and single node
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1) Determine the loadbalancer IP:
source openrc admin admin
neutron lbaas-loadbalancer-show lb1 | grep vip_address
6) make HTTP requests to test your load balancer:
2) make HTTP requests to test your load balancer:
curl <LB_IP>

50
devstack/samples/multinode/Vagrantfile vendored Executable file
View File

@ -0,0 +1,50 @@
# -*- mode: ruby -*-
# vi: set ft=ruby :
require '../providers.rb'
Vagrant.configure(2) do |config|
config.vm.define 'main' do |main|
configure_providers(main.vm)
main.vm.network "private_network", ip:"192.168.42.10"
main.vm.hostname = "main"
main.vm.provision "shell", privileged: false, inline: <<-SHELL
#!/usr/bin/env bash
set -e
sudo apt-get update
sudo apt-get -y upgrade
sudo apt-get -y install git
git clone https://git.openstack.org/openstack-dev/devstack
cp /vagrant/local.conf ~/devstack
cp /vagrant/local.sh ~/devstack/local-manual.sh
cp /vagrant/webserver.sh ~/devstack
cd ~/devstack
./stack.sh
SHELL
end
config.vm.define 'second' do |second|
configure_providers(second.vm)
second.vm.network "private_network", ip:"192.168.42.11"
second.vm.hostname = "second"
second.vm.provision "shell", privileged: false, inline: <<-SHELL
#!/usr/bin/env bash
set -e
sudo apt-get update
sudo apt-get -y upgrade
sudo apt-get -y install git
git clone https://git.openstack.org/openstack-dev/devstack
cp /vagrant/local-2.conf ~/devstack/local.conf
cd ~/devstack
./stack.sh
SHELL
end
end

View File

@ -0,0 +1,73 @@
[[local|localrc]]
# The name of the RECLONE environment variable is a bit misleading. It doesn't actually
# reclone repositories, rather it uses git fetch to make sure the repos are current.
RECLONE=True
# Load the external LBaaS plugin.
enable_plugin octavia https://git.openstack.org/openstack/octavia
LIBS_FROM_GIT+=python-neutronclient
DATABASE_PASSWORD=password
ADMIN_PASSWORD=password
SERVICE_PASSWORD=password
SERVICE_TOKEN=password
RABBIT_PASSWORD=password
# Enable Logging
LOGFILE=$DEST/logs/stack.sh.log
VERBOSE=True
LOG_COLOR=True
SCREEN_LOGDIR=$DEST/logs
ENABLED_SERVICES=
# Nova
enable_service n-cpu
# Neutron
enable_service neutron
enable_service q-agt
# LBaaS V2 and Octavia
enable_service octavia
enable_service o-api
enable_service o-cw
enable_service o-hm
enable_service o-hk
#NOTE(mangelajo): there are possibly bugs in the housekeeper that needs to be
# addressed to make it fully stateless. Now as per @lingxian
# the house keeper could create more spare amphoras than needed
# in parallel nodes.
OCTAVIA_USE_PREGENERATED_CERTS=True
OCTAVIA_USE_PREGENERATED_SSH_KEY=True
OCTAVIA_CONTROLLER_IP_PORT_LIST=192.168.0.3:5555,192.168.0.4:5555
OCTAVIA_NODE=second
# we are not enabling the mysql service here, but this is necessary
# to get the connection string constructed
DATABASE_TYPE=mysql
Q_PLUGIN=ml2
Q_ML2_TENANT_NETWORK_TYPE=vxlan
IMAGE_URLS+=",http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-x86_64-disk.img"
LOGFILE=$DEST/logs/stack.sh.log
# Old log files are automatically removed after 7 days to keep things neat. Change
# the number of days by setting ``LOGDAYS``.
LOGDAYS=2
HOST_IP=192.168.42.11
SERVICE_HOST=192.168.42.10
MULTI_HOST=1
Q_HOST=$SERVICE_HOST
MYSQL_HOST=$SERVICE_HOST
RABBIT_HOST=$SERVICE_HOST
GLANCE_HOSTPORT=$SERVICE_HOST:9292
NOVA_VNC_ENABLED=True
NOVNCPROXY_URL="http://$SERVICE_HOST:6080/vnc_auto.html"

View File

@ -0,0 +1,84 @@
[[local|localrc]]
# The name of the RECLONE environment variable is a bit misleading. It doesn't actually
# reclone repositories, rather it uses git fetch to make sure the repos are current.
RECLONE=True
# Load the external LBaaS plugin.
enable_plugin neutron-lbaas https://git.openstack.org/openstack/neutron-lbaas
enable_plugin neutron-lbaas-dashboard https://git.openstack.org/openstack/neutron-lbaas-dashboard
enable_plugin octavia https://git.openstack.org/openstack/octavia
enable_plugin barbican https://git.openstack.org/openstack/barbican
LIBS_FROM_GIT+=python-neutronclient
DATABASE_PASSWORD=password
ADMIN_PASSWORD=password
SERVICE_PASSWORD=password
SERVICE_TOKEN=password
RABBIT_PASSWORD=password
# Enable Logging
LOGFILE=$DEST/logs/stack.sh.log
VERBOSE=True
LOG_COLOR=True
SCREEN_LOGDIR=$DEST/logs
# Pre-requisites
enable_service rabbit
enable_service mysql
enable_service key
# Horizon
enable_service horizon
# Nova
enable_service n-api
enable_service n-crt
enable_service n-cpu
enable_service n-cond
enable_service n-sch
# Glance
enable_service g-api
enable_service g-reg
# Neutron
enable_service q-svc
enable_service q-agt
enable_service q-dhcp
enable_service q-l3
enable_service q-meta
# Cinder
enable_service c-api
enable_service c-vol
enable_service c-sch
# LBaaS V2 and Octavia
enable_service q-lbaasv2
enable_service octavia
enable_service o-cw
enable_service o-hm
enable_service o-hk
enable_service o-api
OCTAVIA_USE_PREGENERATED_CERTS=True
OCTAVIA_USE_PREGENERATED_SSH_KEY=True
OCTAVIA_CONTROLLER_IP_PORT_LIST=192.168.0.3:5555,192.168.0.4:5555
OCTAVIA_NODE=main
Q_PLUGIN=ml2
Q_ML2_TENANT_NETWORK_TYPE=vxlan
IMAGE_URLS+=",http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-x86_64-disk.img"
LOGFILE=$DEST/logs/stack.sh.log
# Old log files are automatically removed after 7 days to keep things neat. Change
# the number of days by setting ``LOGDAYS``.
LOGDAYS=2
HOST_IP=192.168.42.10
MULTI_HOST=1

View File

@ -0,0 +1 @@
../singlenode/local.sh

View File

@ -0,0 +1 @@
../singlenode/webserver.sh

View File

@ -0,0 +1,22 @@
# defaults
VM_MEMORY = ENV['VM_MEMORY'] || "8192"
VM_CPUS = ENV['VM_CPUS'] || "1"
def configure_providers(vm)
vm.provider "virtualbox" do |vb, config|
config.vm.box = "ubuntu/trusty64"
vb.gui = true
vb.memory = VM_MEMORY
vb.cpus = VM_CPUS
end
vm.provider "libvirt" do |lb, config|
config.vm.box = "celebdor/trusty64"
config.vm.synced_folder './', '/vagrant', type: 'rsync'
lb.nested = true
lb.memory = VM_MEMORY
lb.cpus = VM_CPUS
lb.suspend_mode = 'managedsave'
end
end

View File

@ -42,12 +42,6 @@ OCTAVIA_AMP_SSH_KEY_TYPE=${OCTAVIA_SSH_KEY_TYPE:-"rsa"}
OCTAVIA_AMP_SSH_KEY_PATH=${OCTAVIA_SSH_KEY_PATH:-${OCTAVIA_SSH_DIR}/octavia_ssh_key}
OCTAVIA_AMP_SSH_KEY_NAME=${OCTAVIA_AMP_SSH_KEY_NAME:-"octavia_ssh_key"}
OCTAVIA_USE_PREGENERATED_SSH_KEY=${OCTAVIA_USE_PREGENERATED_SSH_KEY:-"False"}
OCTAVIA_PREGENERATED_SSH_KEY_PATH=${OCTAVIA_PREGENERATED_SSH_KEY_PATH:-"${OCTAVIA_DIR}/devstack/pregenerated/ssh-keys/octavia_ssh_key"}
OCTAVIA_USE_PREGENERATED_CERTS=${OCTAVIA_USE_PREGENERATED_CERTS:-"False"}
OCTAVIA_PREGENERATED_CERTS_DIR=${OCTAVIA_PREGENERATED_CERTS_DIR:-"${OCTAVIA_DIR}/devstack/pregenerated/certs"}
OCTAVIA_AMP_FLAVOR_ID=${OCTAVIA_AMP_FLAVOR_ID:-"10"}
OCTAVIA_AMP_IMAGE_NAME=${OCTAVIA_AMP_IMAGE_NAME:-"amphora-x64-haproxy"}
OCTAVIA_AMP_IMAGE_FILE=${OCTAVIA_AMP_IMAGE_FILE:-${OCTAVIA_DIR}/diskimage-create/${OCTAVIA_AMP_IMAGE_NAME}.qcow2}
@ -80,3 +74,12 @@ NEUTRON_LBAAS_CONF=$NEUTRON_CONF_DIR/neutron_lbaas.conf
OCTAVIA_SERVICE_PROVIDER=${OCTAVIA_SERVICE_PROVIDER:-"LOADBALANCERV2:Octavia:neutron_lbaas.drivers.octavia.driver.OctaviaDriver:default"}
Q_SVC=${Q_SVC:-"q-svc"}
LBAAS_V2=${LBAAS_V2:-"q-lbaasv2"}
# HA-deployment related settings
OCTAVIA_USE_PREGENERATED_SSH_KEY=${OCTAVIA_USE_PREGENERATED_SSH_KEY:-"False"}
OCTAVIA_PREGENERATED_SSH_KEY_PATH=${OCTAVIA_PREGENERATED_SSH_KEY_PATH:-"${OCTAVIA_DIR}/devstack/pregenerated/ssh-keys/octavia_ssh_key"}
OCTAVIA_USE_PREGENERATED_CERTS=${OCTAVIA_USE_PREGENERATED_CERTS:-"False"}
OCTAVIA_PREGENERATED_CERTS_DIR=${OCTAVIA_PREGENERATED_CERTS_DIR:-"${OCTAVIA_DIR}/devstack/pregenerated/certs"}
OCTAVIA_NODE=${OCTAVIA_NODE:-"standalone"}
OCTAVIA_CONTROLLER_IP_PORT_LIST=${OCTAVIA_CONTROLLER_IP_PORT_LIST:-"auto"}