NFP (contrib) - Controller Image Build Scripts

This changeset contains the following,
(1) NFP controller image builder tool
    This supports both devstack and Redhat RPM installations.
(2) NFP setup script to configure the NFP on RPM installed setup.

Change-Id: I074d713ad94286240fdae1887b0d4acd28f66144
Implements: blueprint gbp-network-services-framework
This commit is contained in:
Rajendra Machani 2016-08-02 20:09:57 +05:30
parent 05c119d3a3
commit 380519eb46
38 changed files with 1654 additions and 318 deletions

View File

@ -24,7 +24,8 @@ NFPSERVICE_DIR=$DEST/gbp
NEUTRON_CONF_DIR=/etc/neutron
NEUTRON_CONF=$NEUTRON_CONF_DIR/neutron.conf
NFP_CONF_DIR=/etc/nfp
DISKIMAGE_CREATE_DIR=$NFPSERVICE_DIR/gbpservice/tests/contrib/diskimage-create
DISKIMAGE_CREATE_DIR=$NFPSERVICE_DIR/gbpservice/contrib/nfp/tools/image_builder
NEUTRON_SRC_BRANCH_FOR_NFP_CONTROLLER=stable/mitaka
# Save trace setting
XTRACE=$(set +o | grep xtrace)
@ -196,9 +197,9 @@ function create_nfp_gbp_resources {
gbp l3policy-create\
--ip-version 4\
--proxy-ip-pool=192.169.0.0/24\
--ip-pool 120.0.0.0/24\
--subnet-prefix-length 24\
--ip-pool 172.16.0.0/16\
--subnet-prefix-length 20\
--proxy-ip-pool=172.17.0.0/16\
service_management
gbp l2policy-create\
@ -244,7 +245,12 @@ function create_nfp_image {
if [[ $NFP_DEVSTACK_MODE = base ]]; then
RefConfiguratorQcow2ImageName=reference_configurator_image
echo "Building Image: $RefConfiguratorQcow2ImageName"
sudo python $DISKIMAGE_CREATE_DIR/disk_image_create.py $DISKIMAGE_CREATE_DIR/ref_configurator_conf.json
sudo python -c\
'from gbpservice.contrib.nfp.tools.image_builder import disk_image_create as DIB;\
DIB.cur_dir = "'$DISKIMAGE_CREATE_DIR'";\
DIB.conf["ubuntu_release"] = {"release": "wily"};\
DIB.conf["dib"] = {"image_size": 3, "elements": ["nfp-reference-configurator", "dhcp-all-interfaces", "devuser"], "offline": True, "cache_dir": "'$HOME'/.cache/image-create"};\
DIB.dib()'
RefConfiguratorQcow2Image=$(cat $DISKIMAGE_CREATE_DIR/output/last_built_image_path)
echo "Uploading Image: $RefConfiguratorQcow2ImageName"
glance image-create --name $RefConfiguratorQcow2ImageName --disk-format qcow2 --container-format bare --visibility public --file $RefConfiguratorQcow2Image
@ -255,7 +261,26 @@ function create_nfp_image {
create_port_for_vm $ConfiguratorQcow2ImageName $ConfiguratorInstanceName
if [[ $ConfiguratorQcow2Image = build ]]; then
echo "Building Image: $ConfiguratorQcow2ImageName"
sudo python $DISKIMAGE_CREATE_DIR/disk_image_create.py $DISKIMAGE_CREATE_DIR/configurator_conf.json $GBPSERVICE_BRANCH
# Prepare source for configurator
git clone -b $NEUTRON_SRC_BRANCH_FOR_NFP_CONTROLLER https://github.com/openstack/neutron-lib.git
cp -r neutron-lib/neutron_lib $DISKIMAGE_CREATE_DIR/neutron_lib
rm -rf neutron-lib
git_clone $GBPSERVICE_REPO $DEVSTACK_DIR/group-based-policy $GBPSERVICE_BRANCH
cp -r $DEVSTACK_DIR/group-based-policy/gbpservice $DISKIMAGE_CREATE_DIR/gbpservice
rm -rf $DEVSTACK_DIR/group-based-policy
git clone -b $NEUTRON_SRC_BRANCH_FOR_NFP_CONTROLLER https://github.com/openstack/neutron.git
cp -r neutron/neutron $DISKIMAGE_CREATE_DIR/neutron
rm -rf neutron
git clone -b $NEUTRON_SRC_BRANCH_FOR_NFP_CONTROLLER https://github.com/openstack/neutron-lbaas.git
cp -r neutron-lbaas/neutron_lbaas $DISKIMAGE_CREATE_DIR/neutron_lbaas
rm -rf neutron-lbaas
sudo python -c\
'from gbpservice.contrib.nfp.tools.image_builder import disk_image_create as DIB;\
DIB.cur_dir = "'$DISKIMAGE_CREATE_DIR'";\
DIB.conf["ubuntu_release"] = {"release": "trusty"};\
DIB.conf["dib"] = {"image_size": 10, "elements": ["configurator"], "offline": True, "cache_dir": "'$HOME'/.cache/image-create"};\
DIB.dib()'
rm -rf $DISKIMAGE_CREATE_DIR/neutron_lib $DISKIMAGE_CREATE_DIR/gbpservice $DISKIMAGE_CREATE_DIR/neutron $DISKIMAGE_CREATE_DIR/neutron_lbaas
ConfiguratorQcow2Image=$(cat $DISKIMAGE_CREATE_DIR/output/last_built_image_path)
fi
echo "Uploading Image: $ConfiguratorQcow2ImageName"
@ -279,7 +304,7 @@ function create_nfp_image {
function configure_configurator_user_data {
CUR_DIR=$PWD
sudo rm -rf /opt/configurator_user_data
sudo cp -r $NFPSERVICE_DIR/devstack/exercises/nfp_service/user-data/configurator_user_data /opt/.
sudo cp -r $DISKIMAGE_CREATE_DIR/configurator_user_data /opt/.
cd /opt
sudo rm -rf my.key my.key.pub
sudo ssh-keygen -t rsa -N "" -f my.key

View File

@ -35,8 +35,6 @@ function nfp_configure_neutron {
iniset $NEUTRON_CONF admin_owned_resources_apic_tscp plumbing_resource_owner_user "neutron"
iniset $NEUTRON_CONF admin_owned_resources_apic_tscp plumbing_resource_owner_password $ADMIN_PASSWORD
iniset $NEUTRON_CONF admin_owned_resources_apic_tscp plumbing_resource_owner_tenant_name "service"
iniset $NEUTRON_CONF group_policy_implicit_policy default_ip_pool "11.0.0.0/8"
iniset $NEUTRON_CONF group_policy_implicit_policy default_proxy_ip_pool "192.169.0.0/16"
iniset $NEUTRON_CONF group_policy_implicit_policy default_external_segment_name "default"
iniset $NEUTRON_CONF nfp_node_driver is_service_admin_owned "True"
iniset $NEUTRON_CONF nfp_node_driver svc_management_ptg_name "svc_management_ptg"

View File

@ -1,71 +0,0 @@
FROM ubuntu:14.04
RUN apt-get -y update --fix-missing
# dependencies
RUN apt-get -y --force-yes install vim\
python2.7\
python-pip\
python2.7-dev\
build-essential libssl-dev libffi-dev\
libyaml-dev\
python-ipaddr\
git\
rabbitmq-server\
unzip
# python dependencies
RUN pip install python-keystoneclient \
oslo.config==3.6.0 \
oslo.log==2.4.0 \
oslo.messaging==4.2.0 \
oslo.db==4.4.0 \
oslo.policy \
iptools \
cryptography \
pecan==1.0.4 \
amqp==1.4.9 \
wsme
#haproxy lbaasv2 depedencies
RUN sudo apt-get -y --force-yes install python-dev
RUN sudo pip install "octavia<0.8"
RUN git clone -b stable/mitaka --single-branch https://github.com/openstack/neutron-lbaas.git neutron-lbaas
RUN sudo pip install -r /neutron-lbaas/requirements.txt --allow-all-external
RUN cur_dir=$PWD
RUN cd /neutron-lbaas && sudo python setup.py install
RUN cd $cur_dir
RUN git clone https://github.com/kevinsteves/pan-python.git pan-python
RUN cur_dir=$PWD
RUN cd /pan-python && sudo ./setup.py install
RUN cd $cur_dir
# Cache buster
ADD https://www.random.org/strings/?num=10&len=8&digits=on&upperalpha=on&loweralpha=on&unique=on&format=plain&rnd=new cache-buster
RUN git clone -b stable/mitaka --single-branch https://github.com/openstack/neutron-lib.git neutron_lib
RUN cp -r /neutron_lib/neutron_lib /usr/local/lib/python2.7/dist-packages/
RUN git clone -b stable/mitaka --single-branch https://github.com/openstack/neutron.git neutron
RUN cp -r /neutron/neutron /usr/local/lib/python2.7/dist-packages/
RUN git clone https://github.com/openstack/group-based-policy.git group-based-policy
RUN cd /group-based-policy && git fetch https://git.openstack.org/openstack/group-based-policy GIT-BRANCH-NAME && git checkout FETCH_HEAD
RUN cp -r /group-based-policy/gbpservice /usr/local/lib/python2.7/dist-packages/
RUN cp /group-based-policy/gbpservice/nfp/bin/nfp /usr/bin/
RUN chmod +x /usr/bin/nfp
RUN cp /group-based-policy/gbpservice/contrib/nfp/bin/nfp_configurator.ini /etc/
RUN cp /group-based-policy/gbpservice/contrib/nfp/bin/policy.json /etc/
RUN mkdir -p /var/log/nfp
RUN touch /var/log/nfp/nfp_configurator.log
RUN touch /var/log/nfp/nfp_pecan.log
RUN sed -i "/version = version.encode('utf-8')/a\ version = '1.8.0'" /usr/local/lib/python2.7/dist-packages/pbr/packaging.py
RUN apt-get -y --force-yes install screen
COPY ./run.sh /run.sh
RUN chmod +777 /run.sh
EXPOSE 8070:8080
EXPOSE 5672:5672
ENV HOME /root
CMD ["/run.sh"]

View File

View File

@ -0,0 +1,58 @@
FROM ubuntu:14.04
RUN apt-get -y update --fix-missing
# dependencies
RUN apt-get -y --force-yes install \
python2.7\
python-pip\
python2.7-dev\
build-essential libssl-dev libffi-dev\
libyaml-dev\
python-ipaddr\
rabbitmq-server\
python-dev \
screen
# python dependencies
RUN pip install \
python-keystoneclient \
oslo.config==3.6.0 \
oslo.log==2.4.0 \
oslo.messaging==4.2.0 \
oslo.db==4.4.0 \
oslo.policy \
iptools \
cryptography \
pecan==1.0.4 \
amqp==1.4.9 \
wsme \
"octavia<0.8"
# copy local src to docker image
COPY ./neutron_lib /usr/local/lib/python2.7/dist-packages/neutron_lib
COPY ./gbpservice /usr/local/lib/python2.7/dist-packages/gbpservice
COPY ./neutron /usr/local/lib/python2.7/dist-packages/neutron
COPY ./neutron_lbaas /usr/local/lib/python2.7/dist-packages/neutron_lbaas
COPY ./configurator_run.sh /configurator_run.sh
# Cache buster
ADD https://www.random.org/strings/?num=10&len=8&digits=on&upperalpha=on&loweralpha=on&unique=on&format=plain&rnd=new cache-buster
# clean-up
RUN rm -rf /var/lib/apt/lists/* && apt-get autoremove -y
RUN cp /usr/local/lib/python2.7/dist-packages/gbpservice/nfp/bin/nfp /usr/bin/
RUN chmod +x /usr/bin/nfp
RUN cp /usr/local/lib/python2.7/dist-packages/gbpservice/contrib/nfp/bin/nfp_configurator.ini /etc/
RUN cp /usr/local/lib/python2.7/dist-packages/gbpservice/contrib/nfp/bin/policy.json /etc/
RUN mkdir -p /var/log/nfp
RUN touch /var/log/nfp/nfp_configurator.log
RUN touch /var/log/nfp/nfp_pecan.log
RUN sed -i "/version = version.encode('utf-8')/a\ version = '1.8.0'" /usr/local/lib/python2.7/dist-packages/pbr/packaging.py
RUN chmod +555 /configurator_run.sh
EXPOSE 8070:8080
EXPOSE 5672:5672
ENV HOME /root
CMD ["/configurator_run.sh"]

View File

@ -0,0 +1,147 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from oslo_serialization import jsonutils
import os
import subprocess
import sys
conf = {}
cur_dir = ''
docker_build_dir = None
def parse_json(j_file):
global conf
with open(j_file) as json_data:
conf = jsonutils.load(json_data)
return
def create_configurator_docker():
docker_images = cur_dir + '/docker-images/'
docker_images = os.path.realpath(docker_images)
# create a docker image
os.chdir(docker_build_dir)
# build configuratro docker
docker_args = ['docker', 'build', '-t', 'configurator-docker', '.']
ret = subprocess.call(docker_args)
if(ret):
print("Failed to build docker image [configurator-docker]")
return -1
if not os.path.isdir(docker_images):
os.mkdir(docker_images)
os.chdir(docker_images)
del(docker_args)
# save the docker image
docker_args = ['docker', 'save', '-o', 'configurator-docker',
'configurator-docker']
ret = subprocess.call(docker_args)
if(ret):
print("Failed to save docker image [configurator-docker]")
return -1
# set environment variable, needed by 'extra-data.d'
os.environ['DOCKER_IMAGES'] = docker_images
return 0
def dib():
global docker_build_dir
dib = conf['dib']
elems = cur_dir + '/elements/'
# set the elements path in environment variable
os.environ['ELEMENTS_PATH'] = elems
# set the Ubuntu Release for the build in environment variable
os.environ['DIB_RELEASE'] = conf['ubuntu_release']['release']
# basic elements
dib_args = ['disk-image-create', 'base', 'vm', 'ubuntu']
image_name = conf['ubuntu_release']['release']
# element for creating configurator image
if 'nfp-reference-configurator' in dib['elements']:
image_name = 'nfp_reference_service'
service_dir = "%s/../../../../tests/contrib/nfp_service/" % cur_dir
service_dir = os.path.realpath(service_dir)
pecan_dir = "%s/../../../../nfp/" % cur_dir
pecan_dir = os.path.realpath(pecan_dir)
os.environ['PECAN_GIT_PATH'] = pecan_dir
os.environ['SERVICE_GIT_PATH'] = service_dir
if 'devuser' in dib['elements']:
os.environ['DIB_DEV_USER_USERNAME'] = 'ubuntu'
os.environ['DIB_DEV_USER_SHELL'] = '/bin/bash'
os.environ['SSH_RSS_KEY'] = (
"%s/%s" % (cur_dir, image_name))
os.environ['DIB_DEV_USER_AUTHORIZED_KEYS'] = (
"%s.pub" % os.environ['SSH_RSS_KEY'])
elif 'configurator' in dib['elements']:
if not docker_build_dir:
docker_build_dir = cur_dir
if(create_configurator_docker()):
return (False, None)
# for bigger size images
if "--no-tmpfs" not in dib_args:
dib_args.append('--no-tmpfs')
# append docker-opt element
if "docker-opt" not in dib_args:
dib_args.append("docker-opt")
for element in dib['elements']:
image_name = image_name + '_' + element
dib_args.append(element)
# offline mode, assuming the image cache (tar) already exists
if(dib['offline']):
dib_args.append('--offline')
# set the image build cache dir
dib_args.append('--image-cache')
dib_args.append(dib['cache_dir'])
# set image size
dib_args.append('--image-size')
dib_args.append(str(dib['image_size']))
timestamp = datetime.datetime.now().strftime('%I%M%p-%d-%m-%Y')
image_name = image_name + '_' + timestamp
dib_args.append('-o')
if 'nfp-reference-configurator' in dib['elements']:
image_name = 'nfp_reference_service'
dib_args.append(str(image_name))
os.chdir(cur_dir)
out_dir = 'output'
if not os.path.isdir(out_dir):
os.makedirs(out_dir)
os.chdir(out_dir)
print("DIB-ARGS: ", dib_args)
ret = subprocess.call(dib_args)
if not ret:
output_path = os.path.realpath('./')
print("Output path: ", output_path)
output_image = output_path + '/' + image_name + '.qcow2'
print("Image location: %s" % output_image)
with open("%s/last_built_image_path" % output_path, "w") as f:
f.write(output_image)
return (True, output_image)
return (False, None)

View File

@ -0,0 +1,15 @@
#!/bin/sh
# configure_configurator_user_data() - Configure Configurator user data
function configure_configurator_user_data {
rm -rf ssh_key ssh_key.pub
ssh-keygen -t rsa -N "" -f ssh_key
value=`cat ssh_key.pub`
sed -i "8 i\ -\ $value" configurator_user_data
sed -i '9d' configurator_user_data
}
configure_configurator_user_data

View File

@ -1,5 +1,5 @@
Visibility
----------
configurator
------------
These are the custom elements defined for adding configurator
docker inside the cloud image.

View File

@ -0,0 +1,11 @@
#!/bin/bash
set -eu
# create docker directory
sudo mkdir -p ${TMP_MOUNT_PATH}/usr/share/docker
# create 'images' directory to hold docker images
sudo mkdir -p ${TMP_MOUNT_PATH}/usr/share/docker/images
# copy the directory 'docker' to VM
#copy docker images to VM
sudo cp -L ${DOCKER_IMAGES}/configurator-docker ${TMP_MOUNT_PATH}/usr/share/docker/images/

View File

@ -0,0 +1,4 @@
#!/bin/bash
set -eux
docker load -i /usr/share/docker/images/configurator-docker

View File

@ -0,0 +1,5 @@
#!/bin/bash
set -eux
# start docker on every boot
sed -i '2i docker start configurator' /etc/rc.local

View File

@ -0,0 +1,5 @@
#!/bin/bash
set -eux
# install docker engine
wget -qO- https://get.docker.com/ | bash

View File

@ -0,0 +1,8 @@
#!/bin/bash
set -eux
# start docker and load the configurator image
docker daemon --raw-logs &
# sleep for docker to comeup
sleep 10

View File

@ -1,6 +1,5 @@
#!/bin/bash
set -eux
# since we already did a docker load, we can delete docker images
rm -rf /usr/share/configurator/docker/images/
rm -rf /usr/share/docker/images/

View File

@ -0,0 +1,598 @@
#!/usr/bin/python
import argparse
import sys
import os
import shutil
import subprocess
import ConfigParser
import commands
import time
import platform
from image_builder import disk_image_create as DIB
# Defines
TEMP_WORK_DIR = "tmp"
CONFIG = ConfigParser.ConfigParser()
NEUTRON_CONF = "/etc/neutron/neutron.conf"
FILE_PATH = os.path.dirname(os.path.realpath(__file__))
CONFIGURATOR_USER_DATA = FILE_PATH + "/image_builder/configurator_user_data"
TEMPLATES_PATH = FILE_PATH + "/templates/gbp_resources.yaml"
# global values
# these src_dirs will be copied from host to inside docker image, these
# diretories are assumed to present in src_path
src_dirs = ["gbpservice", "neutron", "neutron_lbaas", "neutron_lib"]
# create a temp directory for copying srcs
dst_dir = "/tmp/controller_docker_build/"
parser = argparse.ArgumentParser()
parser.add_argument('--build-controller-vm', action='store_true',
dest='build_controller_vm',
default=False, help='enable building controller vm')
parser.add_argument('--image-build-cache-dir', type=str,
help=('directory path where trusty image tar.gz'
' can be found for building controller vm'))
parser.add_argument('--enable-orchestrator', action='store_true',
dest='enable_orchestrator',
default=False,
help='enable creating orchestrator systemctl file')
parser.add_argument('--enable-proxy', action='store_true',
dest='enable_proxy',
default=False,
help='enable creating proxy systemctl file')
parser.add_argument('--create-resources', action='store_true',
dest='create_resources',
default=False,
help='enable creating nfp required resources')
parser.add_argument('--launch-controller', action='store_true',
dest='launch_controller',
default=False, help='enable to launch controller vm')
parser.add_argument('--clean-up', action='store_true', dest='clean_up_nfp',
default=False,
help='enable to clean up nfp services and resources')
parser.add_argument('--controller-path', type=str, dest='controller_path',
help='patch to the controller image')
args = parser.parse_args()
def get_src_dirs():
print("Getting source dirs for copying inside the docker image")
# get the operating system type
(os_type, os_version, os_release) = platform.dist()
if os_type in ['Ubuntu']:
src_path = "/usr/lib/python2.7/dist-packages/"
elif os_type in ['centos', 'redhat']:
src_path = "/usr/lib/python2.7/site-packages/"
else:
print("ERROR: Unsupported Operating System(%s)" % os_type)
return 1
for src_dir in src_dirs:
to_copy = src_path + src_dir
if not os.path.isdir(to_copy):
print("ERROR: directory not found: ", to_copy)
return 1
# create a tmp directory for creating configurator docker
subprocess.call(["rm", "-rf", dst_dir])
os.mkdir(dst_dir)
dockerfile = DIB.cur_dir + "/Dockerfile"
run_sh = DIB.cur_dir + "/configurator_run.sh"
# these src_dirs will be copied from host to inside docker image
for src_dir in src_dirs:
to_copy = src_path + src_dir
if(subprocess.call(["cp", "-r", to_copy, dst_dir])):
print("ERROR: failed to copy %s to ./ directory" % to_copy)
return 1
subprocess.call(["cp", dockerfile, dst_dir])
subprocess.call(["cp", run_sh, dst_dir])
DIB.docker_build_dir = dst_dir
return 0
def clean_src_dirs():
subprocess.call(["rm", "-rf", dst_dir])
def update_user_data():
os.chdir(DIB.cur_dir)
print("Updating user_data with fresh ssh key")
subprocess.call(["bash", "edit_user_data.sh"])
return
def build_configuration_vm():
cur_dir = os.path.dirname(__file__)
cur_dir = os.path.realpath(cur_dir)
if not cur_dir:
# if script is executed from current dir, get abs path
cur_dir = os.path.realpath('./')
# update dib current working dir
DIB.cur_dir = cur_dir + '/image_builder'
if(get_src_dirs()):
return
# update configurator user_data with a fresh rsa ssh keypair
update_user_data()
# set the cache dir where trusty tar.gz will be present
if args.image_build_cache_dir:
cache_dir = args.image_build_cache_dir
else:
cache_dir = os.environ.get('HOME', '-1') + '/.cache/image-create'
# create a configurattion dictionary needed by DIB
DIB.conf['ubuntu_release'] = {'release': 'trusty'}
DIB.conf['dib'] = {"image_size": 10, "elements": ["configurator"],
"offline": True, "cache_dir": cache_dir}
# Build configurator VM
(ret, image) = DIB.dib()
if not ret:
print("ERROR: Failed to create Configurator VM")
else:
print("SUCCESS, created Configurator VM: ", image)
# clean the scr_dirs copied in PWD
clean_src_dirs()
os.chdir(cur_dir)
return
def restart_nfp_orchestrator():
try:
subprocess.call(["systemctl", "daemon-reload"])
subprocess.call(["service", "nfp_orchestrator", "restart"])
except Exception as error:
print("Error restarting nfp_orchestrator service")
print(error)
sys.exit(1)
def restart_nfp_config_orch():
try:
subprocess.call(["systemctl", "daemon-reload"])
subprocess.call(["service", "nfp_config_orch", "restart"])
except Exception as error:
print("Error restarting nfp_orchestrator service")
print(error)
sys.exit(1)
def restart_nfp_proxy():
try:
subprocess.call(["systemctl", "daemon-reload"])
subprocess.call(["service", "nfp_proxy", "restart"])
except Exception as error:
print("Error restarting nfp_proxy service")
print(error)
sys.exit(1)
def restart_nfp_proxy_agent():
try:
subprocess.call(["systemctl", "daemon-reload"])
subprocess.call(["service", "nfp_proxy_agent", "restart"])
except Exception as error:
print("Error restarting nfp_proxy_agent service")
print(error)
sys.exit(1)
def create_orchestrator_ctl():
"""
create nfp orchestrator systemctl service file
"""
if not os.path.exists("/var/log/nfp"):
os.makedirs("/var/log/nfp")
os.system("chown neutron:neutron /var/log/nfp")
if not os.path.exists(TEMP_WORK_DIR):
os.makedirs(TEMP_WORK_DIR)
orch_ctl_file = TEMP_WORK_DIR + "/nfp_orchestrator.service"
try:
file = open(orch_ctl_file, 'w+')
except:
print("Error creating " + orch_ctl_file + " file")
sys.exit(1)
file.write("[Unit]\nDescription=One Convergence NFP Orchestrator\n")
file.write("After=syslog.target network.target\n\n[Service]")
file.write("\nUser=neutron\nExecStart=/usr/bin/nfp --config-file ")
file.write(" /etc/neutron/neutron.conf --config-file ")
file.write(" /etc/neutron/plugins/ml2/ml2_conf.ini ")
file.write(" --config-file /etc/nfp/nfp_orchestrator.ini ")
file.write("--log-file /var/log/nfp/nfp_orchestrator.log\n\n")
file.write("[Install]\nWantedBy=multi-user.target")
file.close()
if os.path.exists("/usr/lib/systemd/system"):
shutil.copy(orch_ctl_file, "/usr/lib/systemd/system/")
else:
print("Error: /usr/lib/systemd/system not present")
sys.exit(1)
orch_config_file = TEMP_WORK_DIR + "/nfp_config_orch.service"
try:
file = open(orch_config_file, 'w+')
except:
print("Error creating " + orch_ctl_file + " file")
sys.exit(1)
file.write("[Unit]\nDescription=One Convergence NFP Config Orchestrator")
file.write("\nAfter=syslog.target network.target")
file.write("\n\n[Service]\nType=simple\nUser=neutron")
file.write("\nExecStart=/usr/bin/nfp"
" --config-file /etc/nfp/nfp_config_orch.ini")
file.write(" --config-file /etc/neutron/neutron.conf"
" --log-file /var/log/nfp/nfp_config_orch.log")
file.write("\n\n[Install]\nWantedBy=multi-user.target")
file.close()
if os.path.exists("/usr/lib/systemd/system"):
shutil.copy(orch_config_file, "/usr/lib/systemd/system/")
else:
print("Error: /usr/lib/systemd/system not present")
sys.exit(1)
try:
shutil.rmtree(TEMP_WORK_DIR)
except:
print("Error: Cleaning up the temp directory")
sys.exit(1)
def create_nfp_namespace_file():
"""
create nfp proxy systemctl service file
"""
if not os.path.exists(TEMP_WORK_DIR):
os.makedirs(TEMP_WORK_DIR)
proxy_tool_file = TEMP_WORK_DIR + "/nfp_namespace"
try:
filepx = open(proxy_tool_file, 'w+')
except:
print("Error creating " + proxy_tool_file + " file")
sys.exit(1)
filepx.write("#!/usr/bin/bash\n")
filepx.write("\nNOVA_CONF=/etc/nova/nova.conf\nNOVA_SESSION=neutron")
filepx.write("\n\nget_openstack_creds () {")
filepx.write("\n\tAUTH_URI=`crudini --get $NOVA_CONF $NOVA_SESSION"
" admin_auth_url`")
filepx.write("\n\tADMIN_USER=`crudini --get $NOVA_CONF $NOVA_SESSION"
" admin_username`")
filepx.write("\n\tADMIN_PASSWD=`crudini --get $NOVA_CONF $NOVA_SESSION"
" admin_password`")
filepx.write("\n\tADMIN_TENANT_NAME=`crudini --get $NOVA_CONF"
" $NOVA_SESSION admin_tenant_name`")
filepx.write("\n\texport OS_USERNAME=$ADMIN_USER")
filepx.write("\n\texport OS_TENANT_NAME=$ADMIN_TENANT_NAME")
filepx.write("\n\texport OS_PASSWORD=$ADMIN_PASSWD")
filepx.write("\n\texport OS_AUTH_URL=$AUTH_URI\n\n}")
filepx.write("\n\nfunction namespace_delete {\n\tget_openstack_creds")
filepx.write("\n\n\tproxyPortId=`neutron port-list | ")
filepx.write("grep pt_nfp_proxy_pt | awk '{print $2}'`")
filepx.write("\n\ttapName=\"tap${proxyPortId:0:11}\"\n\n"
"\t#Deletion namespace")
filepx.write("\n\tNFP_P=`ip netns | grep \"nfp-proxy\"`")
filepx.write("\n\tif [ ${#NFP_P} -ne 0 ]; then\n\t\t"
"ip netns delete nfp-proxy")
filepx.write("\n\t\techo \"namespace removed\"\n\tfi")
filepx.write("\n\n\t#pt1 port removing from ovs")
filepx.write("\n\tPORT=`ovs-vsctl show | grep \"$tapName\"`")
filepx.write("\n\tif [ ${#PORT} -ne 0 ]; then")
filepx.write("\n\t\tovs-vsctl del-port br-int $tapName")
filepx.write("\n\t\techo \"ovs port is removed\"")
filepx.write("\n\tfi\n\tpkill nfp_proxy")
filepx.write("\n\n\tgbp pt-delete nfp_proxy_pt")
filepx.write("\n\n\techo \"nfp-proxy cleaning success.... \"\n\n}")
filepx.write("\n\nfunction namespace_create {\n\n\tget_openstack_creds")
filepx.write("\n\tSERVICE_MGMT_GROUP=\"svc_management_ptg\"")
filepx.write("\n\tcidr=\"/24\"")
filepx.write("\n\techo \"Creating new namespace nfp-proxy....\"")
filepx.write("\n\n\t#new namespace with name proxy")
filepx.write("\n\tNFP_P=`ip netns add nfp-proxy`")
filepx.write("\n\tif [ ${#NFP_P} -eq 0 ]; then")
filepx.write("\n\t\techo \"New namepace nfp-proxt create\"")
filepx.write("\n\telse\n\t\techo \"nfp-proxy creation failed\"\n\t\t"
"exit 0")
filepx.write("\n\tfi\n\n\t# create nfp_proxy pt")
filepx.write("\n\tgbp pt-create --policy-target-group $SERVICE_MGMT_GROUP"
" nfp_proxy_pt")
filepx.write("\n\n\t# Get the nfp_proxy_pt port id, mac address")
filepx.write("\n\tproxyPortId=`neutron port-list | grep pt_nfp_proxy_pt"
" | awk '{print $2}'`")
filepx.write("\n\tproxyMacAddr=`neutron port-list | grep pt_nfp_proxy_pt"
" | awk '{print $6}'`")
filepx.write("\n\tproxyPortIp=`neutron port-list | grep pt_nfp_proxy_pt"
" | awk '{print $11}' | sed 's/^\"\(.*\)\"}$/\\1/'`")
filepx.write("\n\ttapName=\"tap${proxyPortId:0:11}\"")
filepx.write("\n\tnew_ip_cidr=\"$proxyPortIp/24\"")
filepx.write("\n\tproxyBrd=`ipcalc -4 $proxyPortIp -m 255.255.255.0 -b"
" | grep BROADCAST | awk -F '=' '{print $2}'`")
filepx.write("\n\n\t# Create a tap interface and add it"
" to the ovs bridge br-int")
filepx.write("\n\tovs-vsctl add-port br-int $tapName -- set Interface"
" $tapName type=internal")
filepx.write(" external_ids:iface-id=$proxyPortId"
" external_ids:iface-status=active"
" external_ids:attached-mac=$proxyMacAddr")
filepx.write("\n\n\t# Add the tap interface to proxy\n\t"
"ip link set $tapName netns nfp-proxy")
filepx.write("\n\n\t# Get the link up\n\tip netns exec nfp-proxy"
" ip link set $tapName up")
filepx.write("\n\n\t# set the mac address on the tap interface\n\t"
"ip netns exec nfp-proxy"
" ip link set $tapName address $proxyMacAddr")
filepx.write("\n\n\t# assign ip address to the proxy tap interface")
filepx.write("\n\tip netns exec nfp-proxy ip -4 addr add"
" $new_ip_cidr scope global dev $tapName brd $proxyBrd")
filepx.write("\n\n\t# Update the neutron port with the host id binding")
filepx.write("\n\tneutron port-update $proxyPortId"
" --binding:host_id=`hostname`")
filepx.write("\n\n\tPING=`ip netns exec nfp-proxy"
" ping $1 -q -c 2 > /dev/null`")
filepx.write("\n\tif [ ${#PING} -eq 0 ]\n\tthen")
filepx.write("\n\t\techo \"nfp-proxy namespcace creation success and"
" reaching to $1\"")
filepx.write("\n\telse\n\t\techo \"Fails reaching to $1\"")
filepx.write("\n\tfi\n\n\tip netns exec nfp-proxy /usr/bin/nfp_proxy")
filepx.write(" --config-file=/etc/nfp/nfp_proxy.ini"
" --log-file /var/log/nfp/nfp_proxy.log")
filepx.write("\n}")
filepx.close()
if os.path.exists("/usr/lib/python2.7/site-packages/gbpservice/nfp/"
"tools/"):
shutil.copy(proxy_tool_file,
"/usr/lib/python2.7/site-packages/gbpservice/nfp/tools/")
else:
os.makedirs("/usr/lib/python2.7/site-packages/gbpservice/nfp/tools")
shutil.copy(proxy_tool_file, "/usr/lib/python2.7/site-packages/gbpservice/nfp/tools/")
try:
shutil.rmtree(TEMP_WORK_DIR)
except:
print("Error: Cleaning up the temp directory")
sys.exit(1)
def create_proxy_ctl():
"""
create nfp proxy systemctl service file
"""
if not os.path.exists("/var/log/nfp"):
os.makedirs("/var/log/nfp")
if not os.path.exists(TEMP_WORK_DIR):
os.makedirs(TEMP_WORK_DIR)
proxy_sup_file = TEMP_WORK_DIR + "/nfpproxy_startup"
try:
filepx = open(proxy_sup_file, 'w+')
except:
print("Error creating " + proxy_sup_file + " file")
sys.exit(1)
filepx.write("#!/usr/bin/sh\nNFP_PROXY_AGENT_INI=/etc/nfp/nfp_proxy.ini")
filepx.write("\nCONFIGURATOR_IP=`crudini --get $NFP_PROXY_AGENT_INI"
" NFP_CONTROLLER rest_server_address`\n")
filepx.write(". /usr/lib/python2.7/site-packages/gbpservice/nfp/tools/"
"nfp_namespace;")
filepx.write("namespace_delete ;namespace_create $CONFIGURATOR_IP")
filepx.close()
proxy_ctl_file = TEMP_WORK_DIR + "/nfp_proxy.service"
try:
file = open(proxy_ctl_file, 'w+')
except:
print("Error creating " + proxy_ctl_file + " file")
sys.exit(1)
file.write("[Unit]\nDescription=One Convergence NFP Proxy\n")
file.write("After=syslog.target network.target\n\n")
file.write("\n[Service]\nUser=root\nExecStart=/usr/bin/nfpproxy_startup")
file.write("\nRestart=on-abort")
file.write("\n\n[Install]\nWantedBy=multi-user.target")
file.close()
if os.path.exists("/usr/lib/systemd/system"):
shutil.copy(proxy_ctl_file, "/usr/lib/systemd/system/")
else:
print("error: /usr/lib/systemd/system not present")
sys.exit(1)
if os.path.exists("/usr/bin"):
shutil.copy(proxy_sup_file, "/usr/bin/")
os.system("chmod +x /usr/bin/nfpproxy_startup")
else:
print("error: /usr/bin not present")
sys.exit(1)
try:
shutil.rmtree(TEMP_WORK_DIR)
except:
print("Error: Cleaning up the temp directory")
sys.exit(1)
def create_proxy_agent_ctl():
"""
create nfp proxy agent systemctl service file
"""
if not os.path.exists(TEMP_WORK_DIR):
os.makedirs(TEMP_WORK_DIR)
proxy_ctl_file = TEMP_WORK_DIR + "/nfp_proxy_agent.service"
try:
file = open(proxy_ctl_file, 'w+')
except:
print("Error creating " + proxy_ctl_file + " file")
sys.exit(1)
file.write("[Unit]\nDescription=One Convergence NFP Proxy Agent")
file.write("\nAfter=syslog.target network.target\n")
file.write("\n[Service]\nUser=root")
file.write("\nExecStart=/usr/bin/nfp "
"--config-file /etc/neutron/neutron.conf ")
file.write("--config-file /etc/nfp/nfp_proxy_agent.ini ")
file.write("--log-file /var/log/nfp/nfp_proxy_agent.log\n")
file.write("\n[Install]\nWantedBy=multi-user.target\n")
file.close()
if os.path.exists("/usr/lib/systemd/system"):
shutil.copy(proxy_ctl_file, "/usr/lib/systemd/system/")
else:
print("error: /usr/lib/systemd/system not present")
sys.exit(1)
try:
shutil.rmtree(TEMP_WORK_DIR)
except:
print("Error: Cleaning up the temp directory")
sys.exit(1)
def get_openstack_creds():
CONFIG.read(NEUTRON_CONF)
AUTH_URI = CONFIG.get('keystone_authtoken', 'auth_uri')
AUTH_USER = CONFIG.get('keystone_authtoken', 'admin_user')
AUTH_PASSWORD = CONFIG.get('keystone_authtoken', 'admin_password')
AUTH_TENANT_NAME = CONFIG.get('keystone_authtoken', 'admin_tenant_name')
os.environ["OS_USERNAME"] = AUTH_USER
os.environ["OS_TENANT_NAME"] = AUTH_TENANT_NAME
os.environ["OS_PASSWORD"] = AUTH_PASSWORD
os.environ["OS_AUTH_URL"] = AUTH_URI
def create_nfp_resources():
"""
create nfp resources
"""
get_openstack_creds()
os.system("gbp l3policy-create default-nfp --ip-pool 172.16.0.0/16"
" --subnet-prefix-length 20 --proxy-ip-pool=172.17.0.0/16")
l3policy_Id = commands.getstatusoutput(
"gbp l3policy-list | grep '\sdefault-nfp\s' | awk '{print $2}'")[1]
os.system("gbp l2policy-create --l3-policy " +
l3policy_Id + " svc_management_ptg")
l2policy_Id = commands.getstatusoutput(
"gbp l2policy-list | grep '\ssvc_management_ptg\s'"
" | awk '{print $2}'")[1]
os.system("gbp group-create svc_management_ptg --service_management True"
" --l2-policy " + l2policy_Id)
# Create GBP Resources Heat stack
os.system("heat stack-create --template-file " + TEMPLATES_PATH +
" gbp_services_stack")
def launch_configurator():
get_openstack_creds()
if os.path.isfile(args.controller_path):
os.system("glance image-create --name configurator"
" --disk-format qcow2 --container-format bare"
" --visibility public --file " + args.controller_path)
else:
print("Error " + args.controller_path + " does not exist")
sys.exit(1)
Port_id = commands.getstatusoutput(
"gbp policy-target-create --policy-target-group svc_management_ptg"
" configuratorVM_instance | grep port_id | awk '{print $4}'")[1]
Image_id = commands.getstatusoutput(
"glance image-list | grep configurator |awk '{print $2}'")[1]
if Image_id and Port_id:
os.system("nova boot --flavor m1.medium --image " +
Image_id + " --user-data " + CONFIGURATOR_USER_DATA +
" --nic port-id=" + Port_id + " configuratorVM_instance")
else:
if not Port_id:
print("Error unable to create the controller port id")
else:
print("Erro unable to get configurator image info")
sys.exit(1)
def clean_up():
"""
clean up nfp resources
"""
get_openstack_creds()
InstanceId = commands.getstatusoutput(
"nova list | grep configuratorVM_instance | awk '{print $2}'")[1]
if InstanceId:
os.system("nova delete " + InstanceId)
time.sleep(10)
PolicyTargetId = commands.getstatusoutput(
"gbp policy-target-list | grep configuratorVM_instance"
" | awk '{print $2}'")[1]
if PolicyTargetId:
os.system("gbp policy-target-delete " + PolicyTargetId)
ImageId = commands.getstatusoutput(
"glance image-list | grep configurator | awk '{print $2}'")[1]
if ImageId:
os.system("glance image-delete " + ImageId)
ServiceMGMTId = commands.getstatusoutput(
"gbp group-list | grep '\ssvc_management_ptg\s'"
" | awk '{print $2}'")[1]
if ServiceMGMTId:
SvcGroupId = commands.getstatusoutput(
"gbp group-list | grep '\ssvc_management_ptg\s'"
" | awk '{print $2}'")[1]
l2policyId = commands.getstatusoutput(
"gbp l2policy-list | grep '\ssvc_management_ptg\s'"
" | awk '{print $2}'")[1]
l3policyId = commands.getstatusoutput(
"gbp l3policy-list | grep '\sdefault-nfp\s'"
" | awk '{print $2}'")[1]
os.system("gbp group-delete " + SvcGroupId)
os.system("gbp l2policy-delete " + l2policyId)
os.system("gbp l3policy-delete " + l3policyId)
HeatId = commands.getstatusoutput(
"heat stack-list | grep '\sgbp_services_stack\s'"
" | awk '{print $2}'")[1]
if HeatId:
os.system("heat stack-delete gbp_services_stack")
def main():
if args.build_controller_vm:
build_configuration_vm()
elif args.enable_orchestrator:
create_orchestrator_ctl()
restart_nfp_orchestrator()
restart_nfp_config_orch()
elif args.enable_proxy:
create_nfp_namespace_file()
create_proxy_ctl()
restart_nfp_proxy()
create_proxy_agent_ctl()
restart_nfp_proxy_agent()
elif args.create_resources:
create_nfp_resources()
elif args.launch_controller:
if args.controller_path:
launch_configurator()
else:
parser.print_help()
elif args.clean_up_nfp:
clean_up()
else:
parser.print_help()
if __name__ == '__main__':
main()

View File

@ -0,0 +1,54 @@
{
"heat_template_version": "2013-05-23",
"description": "Configuration for Firewall service",
"resources": {
"Firewall": {
"type": "OS::Neutron::Firewall",
"properties": {
"admin_state_up": true,
"firewall_policy_id": {
"get_resource": "Firewall_Policy"
},
"name": "FWaaS",
"description": "Firewll Resource"
}
},
"Firewall_Policy": {
"type": "OS::Neutron::FirewallPolicy",
"properties": {
"description": "firewall policy Resource",
"audited": false,
"firewall_rules": [
{
"get_resource": "Rule_1"
},
{
"get_resource": "Rule_2"
}
],
"name": "FW_policy"
}
},
"Rule_1": {
"type": "OS::Neutron::FirewallRule",
"properties": {
"protocol": "tcp",
"name": "fw-tcp-rule",
"enabled": true,
"destination_port": "80",
"action": "allow",
"description": "fw config to allow tcp"
}
},
"Rule_2": {
"type": "OS::Neutron::FirewallRule",
"properties": {
"protocol": "icmp",
"name": "fw-icmp-rule",
"enabled": true,
"action": "allow",
"description": "fw config to allow icmp"
}
}
}
}

View File

@ -0,0 +1,546 @@
heat_template_version: 2014-10-16
resources:
HTTP-REDIRECT-FW-LB:
type: OS::GroupBasedPolicy::PolicyRule
properties:
name: HTTP-REDIRECT-FW-LB
enabled: true
policy_classifier_id: {get_resource: HTTP}
policy_actions: [{get_resource: REDIRECT-FW-LB}]
shared: true
HTTP-REDIRECT-LB:
type: OS::GroupBasedPolicy::PolicyRule
properties:
name: HTTP-REDIRECT-LB
enabled: true
policy_classifier_id: {get_resource: HTTP}
policy_actions: [{get_resource: REDIRECT-LB}]
shared: true
MySQL-REDIRECT-FW:
type: OS::GroupBasedPolicy::PolicyRule
properties:
name: MySQL-REDIRECT-FW
enabled: true
policy_classifier_id: {get_resource: MySQL}
policy_actions: [{get_resource: REDIRECT-FW}]
shared: true
ANY-REDIRECT-VPN-FW-LB:
type: OS::GroupBasedPolicy::PolicyRule
properties:
name: ANY-REDIRECT-VPN-FW-LB
enabled: true
policy_classifier_id: {get_resource: ANY }
policy_actions: [{get_resource: REDIRECT-VPN-FW-LB }]
shared: true
HTTP-ALLOW:
type: OS::GroupBasedPolicy::PolicyRule
properties:
name: HTTP-ALLOW
enabled: true
policy_classifier_id: {get_resource: HTTP}
policy_actions: [{get_resource: ALLOW}]
shared: true
HTTPS-ALLOW:
type: OS::GroupBasedPolicy::PolicyRule
properties:
name: HTTPS-ALLOW
enabled: true
policy_classifier_id: {get_resource: HTTPS}
policy_actions: [{get_resource: ALLOW}]
shared: true
SYSLOG-UDP-ALLOW:
type: OS::GroupBasedPolicy::PolicyRule
properties:
name: SYSLOG-UDP-ALLOW
enabled: true
policy_classifier_id: {get_resource: SYSLOG-UDP }
policy_actions: [{get_resource: ALLOW}]
shared: true
ICMP-ALLOW:
type: OS::GroupBasedPolicy::PolicyRule
properties:
name: ICMP-ALLOW
enabled: true
policy_classifier_id: {get_resource: ICMP}
policy_actions: [{get_resource: ALLOW}]
shared: true
SSH-ALLOW:
type: OS::GroupBasedPolicy::PolicyRule
properties:
name: SSH-ALLOW
enabled: true
policy_classifier_id: {get_resource: SSH}
policy_actions: [{get_resource: ALLOW}]
shared: true
SNMP-ALLOW:
type: OS::GroupBasedPolicy::PolicyRule
properties:
name: SNMP-ALLOW
enabled: true
policy_classifier_id: {get_resource: SNMP}
policy_actions: [{get_resource: ALLOW}]
shared: true
ANY-ALLOW:
type: OS::GroupBasedPolicy::PolicyRule
properties:
name: ANY-ALLOW
enabled: true
policy_classifier_id: {get_resource: ANY }
policy_actions: [{get_resource: ALLOW}]
shared: true
TCP-ALLOW:
type: OS::GroupBasedPolicy::PolicyRule
properties:
name: TCP-ALLOW
enabled: true
policy_classifier_id: {get_resource: ANY-TCP }
policy_actions: [{get_resource: ALLOW}]
shared: true
UDP-ALLOW:
type: OS::GroupBasedPolicy::PolicyRule
properties:
name: UDP-ALLOW
enabled: true
policy_classifier_id: {get_resource: ANY-UDP }
policy_actions: [{get_resource: ALLOW}]
shared: true
KEYSTONE-ALLOW:
type: OS::GroupBasedPolicy::PolicyRule
properties:
name: KEYSTONE-ALLOW
enabled: true
policy_classifier_id: {get_resource: KEYSTONE }
policy_actions: [{get_resource: ALLOW}]
shared: true
KEYSTONE-ADMIN-ALLOW:
type: OS::GroupBasedPolicy::PolicyRule
properties:
name: KEYSTONE-ADMIN-ALLOW
enabled: true
policy_classifier_id: {get_resource: KEYSTONE-ADMIN }
policy_actions: [{get_resource: ALLOW}]
shared: true
NEUTRON-ALLOW:
type: OS::GroupBasedPolicy::PolicyRule
properties:
name: NEUTRON-ALLOW
enabled: true
policy_classifier_id: {get_resource: NEUTRON }
policy_actions: [{get_resource: ALLOW}]
shared: true
NOVA-ALLOW:
type: OS::GroupBasedPolicy::PolicyRule
properties:
name: NOVA-ALLOW
enabled: true
policy_classifier_id: {get_resource: NOVA }
policy_actions: [{get_resource: ALLOW}]
shared: true
CEILOMETER-ALLOW:
type: OS::GroupBasedPolicy::PolicyRule
properties:
name: CEILOMETER-ALLOW
enabled: true
policy_classifier_id: {get_resource: CEILOMETER }
policy_actions: [{get_resource: ALLOW}]
shared: true
MySQL-ALLOW:
type: OS::GroupBasedPolicy::PolicyRule
properties:
name: MySQL-ALLOW
enabled: true
policy_classifier_id: {get_resource: MySQL }
policy_actions: [{get_resource: ALLOW}]
shared: true
ICMP:
type: OS::GroupBasedPolicy::PolicyClassifier
properties:
name: ICMP
protocol: icmp
direction: bi
shared: true
SSH:
type: OS::GroupBasedPolicy::PolicyClassifier
properties:
name: SSH
protocol: tcp
port_range: 22
direction: in
shared: true
HTTP:
type: OS::GroupBasedPolicy::PolicyClassifier
properties:
name: HTTP
protocol: tcp
port_range: 80
direction: in
shared: true
HTTPS:
type: OS::GroupBasedPolicy::PolicyClassifier
properties:
name: HTTPS
protocol: tcp
port_range: 443
direction: in
shared: true
SNMP:
type: OS::GroupBasedPolicy::PolicyClassifier
properties:
name: SNMP
protocol: udp
port_range: 161:162
direction: bi
shared: true
SYSLOG-UDP:
type: OS::GroupBasedPolicy::PolicyClassifier
properties:
name: SYSLOG-UDP
protocol: udp
port_range: 514
direction: bi
shared: true
ANY:
type: OS::GroupBasedPolicy::PolicyClassifier
properties:
name: ANY-TRAFFIC
direction: bi
shared: true
ANY-TCP:
type: OS::GroupBasedPolicy::PolicyClassifier
properties:
name: ANY-TCP
protocol: tcp
direction: in
shared: true
ANY-UDP:
type: OS::GroupBasedPolicy::PolicyClassifier
properties:
name: ANY-UDP
protocol: udp
direction: bi
shared: true
MySQL:
type: OS::GroupBasedPolicy::PolicyClassifier
properties:
name: MySQL
protocol: tcp
port_range: 3306
direction: in
shared: true
NEUTRON:
type: OS::GroupBasedPolicy::PolicyClassifier
properties:
name: NEUTRON
protocol: tcp
port_range: 9696
direction: out
shared: true
NOVA:
type: OS::GroupBasedPolicy::PolicyClassifier
properties:
name: NOVA
protocol: tcp
port_range: 8774
direction: out
shared: true
CEILOMETER:
type: OS::GroupBasedPolicy::PolicyClassifier
properties:
name: CEILOMETER
protocol: tcp
port_range: 8777
direction: out
shared: true
KEYSTONE:
type: OS::GroupBasedPolicy::PolicyClassifier
properties:
name: KEYSTONE
protocol: tcp
port_range: 5000
direction: out
shared: true
KEYSTONE-ADMIN:
type: OS::GroupBasedPolicy::PolicyClassifier
properties:
name: KEYSTONE-ADMIN
protocol: tcp
port_range: 35357
direction: out
shared: true
ALLOW:
type: OS::GroupBasedPolicy::PolicyAction
properties:
name: ALLOW
action_type: allow
shared: true
REDIRECT-LB:
type: OS::GroupBasedPolicy::PolicyAction
properties:
name: REDIRECT-LB
action_type: redirect
action_value: {get_resource: SPEC-LB}
shared: true
REDIRECT-FW:
type: OS::GroupBasedPolicy::PolicyAction
properties:
name: REDIRECT-FW
action_type: redirect
action_value: {get_resource: SPEC-FW}
shared: true
REDIRECT-FW-LB:
type: OS::GroupBasedPolicy::PolicyAction
properties:
name: REDIRECT-FW-LB
action_type: redirect
action_value: {get_resource: SPEC-FW-LB}
shared: true
REDIRECT-VPN-FW-LB:
type: OS::GroupBasedPolicy::PolicyAction
properties:
name: REDIRECT-VPN-FW-LB
action_type: redirect
action_value: {get_resource: SPEC-VPN-FW-LB }
shared: true
SPEC-LB:
type: OS::GroupBasedPolicy::ServiceChainSpec
properties:
name: LB
nodes: [{get_resource: NODE-LB}]
shared: true
SPEC-FW:
type: OS::GroupBasedPolicy::ServiceChainSpec
properties:
name: FW
nodes: [{get_resource: NODE-FW}]
shared: true
SPEC-FW-LB:
type: OS::GroupBasedPolicy::ServiceChainSpec
properties:
name: FW-LB
nodes:
- { get_resource: NODE-FW }
- { get_resource: NODE-LB }
shared: true
SPEC-VPN-FW-LB:
type: OS::GroupBasedPolicy::ServiceChainSpec
properties:
name: VPN-FW-LB
nodes:
- { get_resource: NODE-VPN }
- { get_resource: NODE-FW }
- { get_resource: NODE-LB }
shared: true
SPEC-VPN-FW-LB-MANAGED:
type: OS::GroupBasedPolicy::ServiceChainSpec
properties:
name: SPEC-VPN-FW-LB-MANAGED
nodes:
- { get_resource: NODE-VPN-ASAv }
- { get_resource: NODE-FW-ASAv }
- { get_resource: NODE-LB-F5 }
shared: true
SPEC-ASAvFW-LB:
type: OS::GroupBasedPolicy::ServiceChainSpec
properties:
name: SPEC-ASAvFW-LB-MANAGED
nodes:
- { get_resource: NODE-FW-ASAv }
- { get_resource: NODE-LB-F5 }
shared: true
NODE-FW:
type: OS::GroupBasedPolicy::ServiceChainNode
properties:
name: FW
service_profile_id: {get_resource: PROFILE-FW}
config: { get_file: fw.template }
shared: True
NODE-LB:
type: OS::GroupBasedPolicy::ServiceChainNode
properties:
name: LB
service_profile_id: {get_resource: PROFILE-LB}
config: { get_file: lb.template }
shared: true
NODE-VPN:
type: OS::GroupBasedPolicy::ServiceChainNode
properties:
name: VPN
service_profile_id: {get_resource: PROFILE-VPN}
config: { get_file: vpn.template }
shared: true
NODE-LB-F5:
type: OS::GroupBasedPolicy::ServiceChainNode
properties:
name: LB-F5
service_profile_id: {get_resource: PROFILE-LB-F5}
config: { get_file: lb.template }
shared: true
NODE-FW-ASAv:
type: OS::GroupBasedPolicy::ServiceChainNode
properties:
name: FW-ASAv
service_profile_id: {get_resource: PROFILE-FW-ASAV}
config: { get_file: fw.template }
shared: True
NODE-VPN-ASAv:
type: OS::GroupBasedPolicy::ServiceChainNode
properties:
name: VPN-ASAv
service_profile_id: {get_resource: PROFILE-VPN-ASAV}
config: { get_file: vpn.template }
shared: True
NODE-FW-NEXTGEN:
type: OS::GroupBasedPolicy::ServiceChainNode
properties:
name: FW-NEXTGEN
service_profile_id: {get_resource: PROFILE-FW-PALOALTO}
config: { get_file: fw.template }
shared: True
PROFILE-FW:
type: OS::GroupBasedPolicy::ServiceProfile
properties:
name: FW
vendor: NFP
service_type: FIREWALL
insertion_mode: l3
service_flavor: service_vendor=vyos,device_type=nova
shared: true
PROFILE-VPN:
type: OS::GroupBasedPolicy::ServiceProfile
properties:
name: VPN
vendor: NFP
service_type: VPN
insertion_mode: l3
service_flavor: service_vendor=vyos,device_type=nova
shared: true
PROFILE-LB:
type: OS::GroupBasedPolicy::ServiceProfile
properties:
name: LB
vendor: NFP
service_type: LOADBALANCER
insertion_mode: l3
service_flavor: service_vendor=haproxy,device_type=nova
shared: true
PROFILE-FW-ASAV:
type: OS::GroupBasedPolicy::ServiceProfile
properties:
name: FW-ASAV
vendor: NFP
service_type: FIREWALL
insertion_mode: l3
service_flavor: service_vendor=asav,device_type=nova
shared: true
PROFILE-VPN-ASAV:
type: OS::GroupBasedPolicy::ServiceProfile
properties:
name: VPN-ASAV
vendor: NFP
service_type: VPN
insertion_mode: l3
service_flavor: service_vendor=asav,device_type=nova
shared: true
PROFILE-FW-PALOALTO:
type: OS::GroupBasedPolicy::ServiceProfile
properties:
name: FW-PALOALTO
vendor: NFP
service_type: FIREWALL
insertion_mode: l3
service_flavor: service_vendor=paloalto,device_type=nova
shared: true
PROFILE-LB-F5:
type: OS::GroupBasedPolicy::ServiceProfile
properties:
name: LB-F5
vendor: NFP
service_type: LOADBALANCER
insertion_mode: l3
service_flavor: service_vendor=f5,device_type=nova
shared: true
LBVIP-IP-POLICY:
type: OS::GroupBasedPolicy::NetworkServicePolicy
properties:
name: LBVIP-IP-POLICY
network_service_params:
- type: ip_single
name: vip_ip
value: self_subnet
shared: True
FIP-POLICY:
type: OS::GroupBasedPolicy::NetworkServicePolicy
properties:
name: FIP-POLICY
network_service_params:
- type: ip_pool
name: fip
value: nat_pool
shared: True

View File

@ -0,0 +1,65 @@
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description": "Template to test Haproxy Loadbalacer service",
"Parameters": {
"Subnet": {
"Description": "Pool Subnet CIDR, on which VIP port should be created",
"Type": "String"
},
"vip_ip": {
"Description": "VIP IP Address",
"Type": "String"
},
"service_chain_metadata": {
"Description": "sc metadata",
"Type": "String"
}
},
"Resources" : {
"HttpHM": {
"Type": "OS::Neutron::HealthMonitor",
"Properties": {
"admin_state_up": true,
"delay": 20,
"expected_codes": "200",
"http_method": "GET",
"max_retries": 3,
"timeout": 10,
"type": "HTTP",
"url_path": "/"
}
},
"HaproxyPool": {
"Type": "OS::Neutron::Pool",
"Properties": {
"admin_state_up": true,
"description": "Haproxy pool from teplate",
"lb_method": "ROUND_ROBIN",
"monitors": [{"Ref":"HttpHM"}],
"name": "Haproxy pool",
"provider": "loadbalancer",
"protocol": "HTTP",
"subnet_id": {"Ref":"Subnet"},
"vip": {
"subnet": {"Ref":"Subnet"},
"address": {"Ref":"vip_ip"},
"name": "Haproxy vip",
"description": {"Ref":"service_chain_metadata"},
"protocol_port": 80,
"connection_limit": -1,
"admin_state_up": true
}
}
},
"HaproxyLb": {
"Type": "OS::Neutron::LoadBalancer",
"Properties": {
"pool_id": {"Ref":"HaproxyPool"},
"protocol_port": 80
}
}
}
}

View File

@ -0,0 +1,93 @@
{
"description": "Creates new vpn service - ike + ipsec + vpn service + site-site connection(s)",
"heat_template_version": "2013-05-23",
"parameters": {
"RouterId": {
"description": "Router ID",
"type": "string"
},
"ServiceDescription": {
"description": "fip;tunnel_local-cidr",
"type": "string"
},
"Subnet": {
"description": "Subnet id on which vpn service is launched",
"type": "string"
}
},
"resources": {
"IKEPolicy": {
"properties": {
"auth_algorithm": "sha1",
"encryption_algorithm": "3des",
"ike_version": "v1",
"lifetime": {
"units": "seconds",
"value": 3600
},
"name": "IKEPolicy",
"pfs": "group5",
"phase1_negotiation_mode": "main"
},
"type": "OS::Neutron::IKEPolicy"
},
"IPsecPolicy": {
"properties": {
"auth_algorithm": "sha1",
"encapsulation_mode": "tunnel",
"encryption_algorithm": "3des",
"lifetime": {
"units": "seconds",
"value": 3600
},
"name": "IPsecPolicy",
"pfs": "group5",
"transform_protocol": "esp"
},
"type": "OS::Neutron::IPsecPolicy"
},
"VPNService": {
"properties": {
"admin_state_up": "true",
"description": {
"get_param": "ServiceDescription"
},
"name": "VPNService",
"router_id": {
"get_param": "RouterId"
},
"subnet_id": {
"get_param": "Subnet"
}
},
"type": "OS::Neutron::VPNService"
},
"site_to_site_connection1": {
"properties": {
"admin_state_up": "true",
"dpd": {
"actions": "hold",
"interval": 30,
"timeout": 120
},
"ikepolicy_id": {
"get_resource": "IKEPolicy"
},
"initiator": "bi-directional",
"ipsecpolicy_id": {
"get_resource": "IPsecPolicy"
},
"mtu": 1500,
"name": "site_to_site_connection1",
"peer_address": "192.168.102.117",
"peer_cidrs": ["19.0.0.0/24"],
"peer_id": "19.0.0.3",
"psk": "secret",
"vpnservice_id": {
"get_resource": "VPNService"
}
},
"type": "OS::Neutron::IPsecSiteConnection"
}
}
}

View File

@ -1,17 +1,22 @@
#!/bin/bash
NEW_BASE="$BASE/new"
DISK_IMAGE_DIR=$NEW_BASE/group-based-policy/gbpservice/tests/contrib
DISKIMAGE_CREATE_DIR=$NEW_BASE/group-based-policy/gbpservice/contrib/nfp/tools/image_builder/
function prepare_nfp_image_builder {
#setup_develop $NFPSERVICE_DIR
sudo -H -E pip install -r $DISK_IMAGE_DIR/diskimage-create/requirements.txt
sudo -H -E pip install -r $DISKIMAGE_CREATE_DIR/requirements.txt
sudo apt-get install -y --force-yes qemu-utils
}
function create_nfp_image {
TOP_DIR=$1
sudo python $DISK_IMAGE_DIR/diskimage-create/disk_image_create.py $DISK_IMAGE_DIR/diskimage-create/ref_configurator_conf.json
BUILT_IMAGE_PATH=$(cat $DISK_IMAGE_DIR/diskimage-create/output/last_built_image_path)
sudo python -c\
'from gbpservice.contrib.nfp.tools.image_builder import disk_image_create as DIB;\
DIB.cur_dir = "'$DISKIMAGE_CREATE_DIR'";\
DIB.conf["ubuntu_release"] = {"release": "wily"};\
DIB.conf["dib"] = {"image_size": 3, "elements": ["nfp-reference-configurator", "dhcp-all-interfaces", "devuser"], "offline": True, "cache_dir": "'$HOME'/.cache/image-create"};\
DIB.dib()'
BUILT_IMAGE_PATH=$(cat $DISKIMAGE_CREATE_DIR/output/last_built_image_path)
upload_image file://$BUILT_IMAGE_PATH
openstack --os-cloud=devstack-admin flavor create --ram 512 --disk 3 --vcpus 1 m1.nfp-tiny
@ -136,7 +141,7 @@ function namespace_create {
function create_nfp_gbp_resources {
TOP_DIR=$1
source $TOP_DIR/openrc neutron service
IMAGE_PATH=$(cat $DISK_IMAGE_DIR/diskimage-create/output/last_built_image_path)
IMAGE_PATH=$(cat $DISKIMAGE_CREATE_DIR/output/last_built_image_path)
IMAGE_NAME=`basename "$IMAGE_PATH"`
IMAGE_NAME_FLAT="${IMAGE_NAME%.*}"
FLAVOR=m1.nfp-tiny

View File

@ -1,13 +0,0 @@
{
"dib":
{
"image_size_in_GB": 3,
"cache_path": "~/.cache/image-create",
"elements": ["configurator", "root-passwd"],
"root_password": "config123"
},
"ubuntu_release":
{
"release": "trusty"
}
}

View File

@ -1,163 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from oslo_serialization import jsonutils
import subprocess
import sys
conf = []
cur_dir = ''
def parse_json(j_file):
global conf
with open(j_file) as json_data:
conf = jsonutils.load(json_data)
return
def set_nfp_git_branch(nfp_branch_name, configurator_dir):
Dockerfile_path = configurator_dir + '/Dockerfile'
cmd = "sudo sed -i \"s/GIT-BRANCH-NAME/%s/g\" %s" % (
nfp_branch_name.replace('/', '\/'), Dockerfile_path)
os.system(cmd)
def create_configurator_docker(nfp_branch_name):
configurator_dir = "%s/../../../contrib/nfp/configurator" % cur_dir
docker_images = "%s/output/docker_images/" % cur_dir
if not os.path.exists(docker_images):
os.makedirs(docker_images)
# create a docker image
os.chdir(configurator_dir)
set_nfp_git_branch(nfp_branch_name, configurator_dir)
docker_args = ['docker', 'build', '-t', 'configurator-docker', '.']
ret = subprocess.call(docker_args)
if(ret):
print("Failed to build docker image [configurator-docker]")
return -1
os.chdir(docker_images)
del(docker_args)
# save the docker image
docker_args = ['docker', 'save', '-o', 'configurator-docker',
'configurator-docker']
ret = subprocess.call(docker_args)
if(ret):
print("Failed to save docker image [configurator-docker]")
return -1
# set environment variable, needed by 'extra-data.d'
os.environ['DOCKER_IMAGES_PATH'] = docker_images
return 0
def dib(nfp_branch_name):
dib = conf['dib']
elems = "%s/elements/" % cur_dir
# set the elements path in environment variable
os.environ['ELEMENTS_PATH'] = elems
# set the Ubuntu Release for the build in environment variable
os.environ['DIB_RELEASE'] = conf['ubuntu_release']['release']
# basic elements
dib_args = ['disk-image-create', 'base', 'vm', 'ubuntu']
# configures elements
for element in dib['elements']:
dib_args.append(element)
# root login enabled, set password environment varaible
if element == 'root-passwd':
os.environ['DIB_PASSWORD'] = dib['root_password']
elif element == 'devuser':
os.environ['DIB_DEV_USER_USERNAME'] = 'ubuntu'
os.environ['DIB_DEV_USER_SHELL'] = '/bin/bash'
elif element == 'nfp-reference-configurator':
image_name = 'nfp_reference_service'
service_dir = "%s/../nfp_service/" % cur_dir
pecan_dir = os.path.abspath(os.path.join(cur_dir,
'../../../nfp'))
service_dir = os.path.realpath(service_dir)
pecan_dir = os.path.realpath(pecan_dir)
os.environ['PECAN_GIT_PATH'] = pecan_dir
os.environ['SERVICE_GIT_PATH'] = service_dir
if 'devuser' in dib['elements']:
os.environ['SSH_RSS_KEY'] = (
"%s/output/%s" % (cur_dir, image_name))
os.environ['DIB_DEV_USER_AUTHORIZED_KEYS'] = (
"%s.pub" % os.environ['SSH_RSS_KEY'])
elif element == 'configurator':
image_name = 'configurator'
create_configurator_docker(nfp_branch_name)
# for bigger size images
dib_args.append('--no-tmpfs')
# offline mode, assuming the image cache (tar) already exists
dib_args.append('--offline')
cache_path = dib['cache_path'].replace('~', os.environ.get('HOME', '-1'))
dib_args.append('--image-cache')
dib_args.append(cache_path)
dib_args.append('--image-size')
dib_args.append(str(dib['image_size_in_GB']))
dib_args.append('-o')
dib_args.append(str(image_name))
os.chdir(cur_dir)
out_dir = 'output'
if not os.path.isdir(out_dir):
os.makedirs(out_dir)
os.chdir(out_dir)
print("DIB-ARGS: %r" % dib_args)
ret = subprocess.call(dib_args)
if not ret:
image_path = "%s/output/%s.qcow2" % (cur_dir, image_name)
print("Image location: %s" % image_path)
with open("%s/output/last_built_image_path" % cur_dir, "w") as f:
f.write(image_path)
if __name__ == "__main__":
if len(sys.argv) < 2:
print("ERROR: Invalid Usage")
print("Usage:\n\t%s <json config file> [NFP_BRANCH_NAME]"
% sys.argv[0])
print("\twhere: <json config file> contains all the configuration")
print("\tand NFP_BRANCH_NAME is the string, and is optional.")
exit()
# save PWD
cur_dir = os.path.dirname(__file__)
cur_dir = os.path.realpath(cur_dir)
if not cur_dir:
# if script is executed from current dir, get abs path
cur_dir = os.path.realpath('./')
# parse args from json file
parse_json(sys.argv[1])
elements = conf['dib']['elements']
nfp_branch_name = sys.argv[2] if len(sys.argv) == 3 else None
if 'configurator' in elements and nfp_branch_name is None:
print("ERROR: You have to pass NFP_BRANCH_NAME.")
exit()
# run Disk Image Builder to create VM image
dib(nfp_branch_name)

View File

@ -1,8 +0,0 @@
#!/bin/bash
set -eux
# create 'images' directory to hold docker images
sudo mkdir -p ${TMP_MOUNT_PATH}/usr/share/configurator/docker/images
#copy docker images to VM
sudo cp -L ${DOCKER_IMAGES_PATH}/configurator-docker ${TMP_MOUNT_PATH}/usr/share/configurator/docker/images/

View File

@ -1,12 +0,0 @@
#!/bin/bash
set -eux
apt-get -y --force-yes install apt-transport-https ca-certificates
apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys 58118E89F3A912897C070ADBF76221572C52609D
echo "deb https://apt.dockerproject.org/repo ubuntu-$DIB_RELEASE main" >> /etc/apt/sources.list.d/docker.list
apt-get update
apt-get purge lxc-docker
apt-get -y --force-yes install linux-image-extra-$(uname -r)
apt-get -y --force-yes install docker-engine
sed -i '2i docker start configurator' /etc/rc.local

View File

@ -1,10 +0,0 @@
#!/bin/bash
set -eux
# start docker for loading the configurator image
docker daemon --raw-logs &
# wait for docker daemon to come up
sleep 10
# preload the docker image inside the VM
docker load -i /usr/share/configurator/docker/images/configurator-docker

View File

@ -1,11 +0,0 @@
#!/bin/bash
if [ -z "$DIB_PASSWORD" ]; then
echo "Error during setup password for root"
exit 1
fi
sed -i "s/disable_root: true/disable_root: false/" /etc/cloud/cloud.cfg
install-packages augeas-tools openssh-server openssh-client
augtool -s set /files/etc/ssh/sshd_config/PasswordAuthentication yes
augtool -s set /files/etc/ssh/sshd_config/PermitRootLogin yes
augtool -s set /files/etc/ssh/ssh_config/PasswordAuthentication yes
echo -e "$DIB_PASSWORD\n$DIB_PASSWORD\n" | passwd

View File

@ -1,12 +0,0 @@
{
"dib":
{
"image_size_in_GB": 3,
"cache_path": "~/.cache/image-create",
"elements": ["nfp-reference-configurator", "dhcp-all-interfaces", "devuser"]
},
"ubuntu_release":
{
"release": "wily"
}
}