Files
tacker/devstack/lib/tacker
Yasufumi Ogawa 500d2eb780 Change tosca-parser and heat-translator install
With devstack, tosca-parser and heat-translator of stable version are
installed via pip as listed in `requirements.txt`. However, we usually
use devsatck for deploying development env, and it's recommended to get
the latest division sometimes. On the other hand, there are not so many
cases using stable ones actually.

This update includes several tricks as following, but most of them are
no need if the two packages have its own devstack script. So, we should
remove the tricks after that as described in TODOs added in the update.

NOTE: As described below, we should update upper-constraints.txt as
following example and upload it to tacker's repo everytime [1] is
updated for the change.

  $ bash devstack/lib/download_upper_consts

* Install packages from cloned git repos in `/opt/stack/` as similar
  to other packages.

* Comment out entries of the two packages in `requirements.txt` and
  `lower-constraints.txt` because no need to install them from here,
  but activate it if you install stable packages.

* To pass tox jobs, add files below.

  * upper-constraints.txt: The two packages are removed from official
    constraints file [1] to skip checking version of stable. This file
    is refreshed everytime running `stack.sh` to catch up the official.

  * requirements-extra.txt: Install the packages from git repo. Without
    separating it from `requirements.txt`, `tox -e lower-constraints`
    is failed.

* Update params in `tox.ini` to activate the modifications above.

[1] https://releases.openstack.org/constraints/upper/master

Signed-off-by: Yasufumi Ogawa <yasufum.o@gmail.com>
Change-Id: I776137215ea06c8ac54612d13a663349c2e7db7d
2020-12-14 21:32:37 +00:00

515 lines
19 KiB
Bash

#!/bin/bash
#
# lib/tacker
# functions - functions specific to tacker
# Dependencies:
# ``functions`` file
# ``DEST`` must be defined
# ``STACK_USER`` must be defined
# ``stack.sh`` calls the entry points in this order:
#
# - install_tacker
# - configure_tacker
# - create_tacker_accounts
# - init_tacker
# - start_tacker
# - tacker_horizon_install
# - tacker_create_initial_network
#
# ``unstack.sh`` calls the entry points in this order:
#
# - stop_tacker
# - cleanup_tacker
# Tacker
# ---------------
# Save trace setting
XTRACE=$(set +o | grep xtrace)
set +o xtrace
# Defaults
# --------
# Set up default directories
GITREPO["tacker-horizon"]=${TACKERHORIZON_REPO:-${GIT_BASE}/openstack/tacker-horizon.git}
GITBRANCH["tacker-horizon"]=${TACKERHORIZON_BRANCH:-master}
GITDIR["tacker-horizon"]=$DEST/tacker-horizon
TACKER_DIR=$DEST/tacker
TACKER_AUTH_CACHE_DIR=${TACKER_AUTH_CACHE_DIR:-/var/cache/tacker}
# Support entry points installation of console scripts
if [[ -d $TACKER_DIR/bin/tacker-server ]]; then
TACKER_BIN_DIR=$TACKER_DIR/bin
else
TACKER_BIN_DIR=$(get_python_exec_prefix)
fi
TACKER_CONF_DIR=/etc/tacker
TACKER_CONF=$TACKER_CONF_DIR/tacker.conf
TACKER_DATA_DIR=${TACKER_DATA_DIR:=$DATA_DIR/tacker}
# Default name for Tacker database
TACKER_DB_NAME=${TACKER_DB_NAME:-tacker}
# Default Tacker Port
TACKER_PORT=${TACKER_PORT:-9890}
# Default Tacker Host
TACKER_HOST=${TACKER_HOST:-$SERVICE_HOST}
# Default protocol
TACKER_PROTOCOL=${TACKER_PROTOCOL:-$SERVICE_PROTOCOL}
# Default admin username
TACKER_ADMIN_USERNAME=${TACKER_ADMIN_USERNAME:-tacker}
# Default auth strategy
TACKER_AUTH_STRATEGY=${TACKER_AUTH_STRATEGY:-keystone}
TACKER_USE_ROOTWRAP=${TACKER_USE_ROOTWRAP:-True}
TACKER_RR_CONF_FILE=$TACKER_CONF_DIR/rootwrap.conf
if [[ "$TACKER_USE_ROOTWRAP" == "False" ]]; then
TACKER_RR_COMMAND="sudo"
else
TACKER_ROOTWRAP=$(get_rootwrap_location tacker)
TACKER_RR_COMMAND="sudo $TACKER_ROOTWRAP $TACKER_RR_CONF_FILE"
fi
TACKER_NOVA_URL=${TACKER_NOVA_URL:-http://127.0.0.1:8774/v2}
TACKER_NOVA_CA_CERTIFICATES_FILE=${TACKER_NOVA_CA_CERTIFICATES_FILE:-}
TACKER_NOVA_API_INSECURE=${TACKER_NOVA_API_INSECURE:-False}
HEAT_CONF_DIR=/etc/heat
CEILOMETER_CONF_DIR=/etc/ceilometer
source ${TACKER_DIR}/tacker/tests/contrib/post_test_hook_lib.sh
VNF_PACKAGE_CSAR_PATH=${VNF_PACKAGE_CSAR_PATH:=$TACKER_DATA_DIR/vnfpackage}
FILESYSTEM_STORE_DATA_DIR=${FILESYSTEM_STORE_DATA_DIR:=$TACKER_DATA_DIR/csar_files}
GLANCE_DEFAULT_BACKEND=${GLANCE_DEFAULT_BACKEND:=file}
# Functions
# ---------
# Test if any Tacker services are enabled
# is_tacker_enabled
function is_tacker_enabled {
[[ ,${ENABLED_SERVICES} =~ ,"tacker" ]] && return 0
return 1
}
# create_tacker_cache_dir() - Part of the _tacker_setup_keystone() process
function create_tacker_cache_dir {
# Create cache dir
sudo install -d -o $STACK_USER $TACKER_AUTH_CACHE_DIR
rm -f $TACKER_AUTH_CACHE_DIR/*
}
# create_tacker_accounts() - Set up common required tacker accounts
# Tenant User Roles
# ------------------------------------------------------------------
# service tacker admin # if enabled
# Migrated from keystone_data.sh
function create_tacker_accounts {
if is_service_enabled tacker; then
create_service_user "tacker"
get_or_create_role "advsvc"
create_service_user "tacker" "advsvc"
create_service_user "tacker" "admin"
local tacker_service=$(get_or_create_service "tacker" \
"nfv-orchestration" "Tacker NFV Orchestration Service")
get_or_create_endpoint $tacker_service \
"$REGION_NAME" \
"$TACKER_PROTOCOL://$SERVICE_HOST:$TACKER_PORT/" \
"$TACKER_PROTOCOL://$SERVICE_HOST:$TACKER_PORT/" \
"$TACKER_PROTOCOL://$SERVICE_HOST:$TACKER_PORT/"
fi
}
# stack.sh entry points
# ---------------------
# init_tacker() - Initialize databases, etc.
function init_tacker {
recreate_database $TACKER_DB_NAME
# Run Tacker db migrations
$TACKER_BIN_DIR/tacker-db-manage --config-file $TACKER_CONF upgrade head
}
# install_tacker() - Collect source and prepare
function install_tacker {
setup_develop $TACKER_DIR
}
function start_tacker {
local cfg_file_options="--config-file $TACKER_CONF"
local service_port=$TACKER_PORT
local service_protocol=$TACKER_PROTOCOL
# Start the Tacker service
sudo cp $TACKER_DIR/etc/systemd/system/tacker.service $SYSTEMD_DIR/devstack@tacker.service
iniset -sudo $SYSTEMD_DIR/devstack@tacker.service "Service" "User" $STACK_USER
iniset -sudo $SYSTEMD_DIR/devstack@tacker.service "Service" "ExecStart" "$TACKER_BIN_DIR/tacker-server $cfg_file_options"
sudo systemctl enable devstack@tacker.service
sudo systemctl restart devstack@tacker.service
# Start the Tacker conductor service
sudo cp $TACKER_DIR/etc/systemd/system/tacker-conductor.service $SYSTEMD_DIR/devstack@tacker-conductor.service
iniset -sudo $SYSTEMD_DIR/devstack@tacker-conductor.service "Service" "User" $STACK_USER
iniset -sudo $SYSTEMD_DIR/devstack@tacker-conductor.service "Service" "ExecStart" "$TACKER_BIN_DIR/tacker-conductor $cfg_file_options"
sudo systemctl enable devstack@tacker-conductor.service
sudo systemctl restart devstack@tacker-conductor.service
echo "Waiting for Tacker to start..."
if ! timeout $SERVICE_TIMEOUT sh -c "while ! wget --no-proxy -q -O- $service_protocol://$TACKER_HOST:$service_port; do sleep 1; done"; then
die $LINENO "Tacker did not start"
fi
}
# stop_tacker() - Stop running processes (non-screen)
function stop_tacker {
stop_process tacker
stop_process tacker-conductor
}
# cleanup_tacker() - Remove residual data files, anything left over from previous
# runs that a clean run would need to clean up
function cleanup_tacker {
sudo rm -rf $TACKER_AUTH_CACHE_DIR
sudo rm -rf $VNF_PACKAGE_CSAR_PATH
sudo rm -rf $FILESYSTEM_STORE_DATA_DIR
}
function _create_tacker_conf_dir {
# Put config files in ``TACKER_CONF_DIR`` for everyone to find
sudo install -d -o $STACK_USER $TACKER_CONF_DIR
}
function _create_tacker_data_dirs {
# Create tacker data folder
sudo install -d -o $STACK_USER $TACKER_DATA_DIR
# Create vnf package csar extract path folder
sudo install -d -o $STACK_USER ${VNF_PACKAGE_CSAR_PATH}
# Create folder for storing csar files in the glance store.
sudo install -d -o $STACK_USER ${FILESYSTEM_STORE_DATA_DIR}
}
# configure_tacker()
# Set common config for all tacker server and agents.
function configure_tacker {
_create_tacker_conf_dir
cd $TACKER_DIR
./tools/generate_config_file_sample.sh
cd -
cp $TACKER_DIR/etc/tacker/tacker.conf.sample $TACKER_CONF
iniset_rpc_backend tacker $TACKER_CONF
iniset $TACKER_CONF database connection `database_connection_url $TACKER_DB_NAME`
iniset $TACKER_CONF DEFAULT state_path $TACKER_DATA_DIR
iniset $TACKER_CONF DEFAULT use_syslog $SYSLOG
iniset $TACKER_CONF vnf_package vnf_package_csar_path "$VNF_PACKAGE_CSAR_PATH"
iniset $TACKER_CONF glance_store filesystem_store_datadir "$FILESYSTEM_STORE_DATA_DIR"
iniset $TACKER_CONF glance_store default_backend "$GLANCE_DEFAULT_BACKEND"
# Create the required data folders
_create_tacker_data_dirs
# Format logging
if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ]; then
setup_colorized_logging $TACKER_CONF DEFAULT project_id
else
# Show user_name and project_name by default like in nova
iniset $TACKER_CONF DEFAULT logging_context_format_string "%(asctime)s.%(msecs)03d %(levelname)s %(name)s [%(request_id)s %(user_name)s %(project_name)s] %(instance)s%(message)s"
fi
# server
TACKER_API_PASTE_FILE=$TACKER_CONF_DIR/api-paste.ini
cp $TACKER_DIR/etc/tacker/api-paste.ini $TACKER_API_PASTE_FILE
iniset $TACKER_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
iniset $TACKER_CONF DEFAULT auth_strategy $TACKER_AUTH_STRATEGY
_tacker_setup_keystone $TACKER_CONF keystone_authtoken
if [[ "${TACKER_MODE}" == "all" ]]; then
iniset "/$Q_PLUGIN_CONF_FILE" ml2_type_flat flat_networks $PUBLIC_PHYSICAL_NETWORK,$MGMT_PHYS_NET
iniset "/$Q_PLUGIN_CONF_FILE" ovs bridge_mappings $PUBLIC_PHYSICAL_NETWORK:$PUBLIC_BRIDGE,$MGMT_PHYS_NET:$BR_MGMT
# Experimental settings for monitor alarm auth settings,
# Will be changed according to new implementation.
iniset $TACKER_CONF alarm_auth username admin
iniset $TACKER_CONF alarm_auth password "$ADMIN_PASSWORD"
iniset $TACKER_CONF alarm_auth project_name admin
iniset $TACKER_CONF alarm_auth url http://$SERVICE_HOST:5000/v3
echo "Creating bridge"
sudo ovs-vsctl --may-exist add-br ${BR_MGMT}
fi
if [[ "${USE_BARBICAN}" == "True" ]]; then
iniset $TACKER_CONF vim_keys use_barbican True
fi
_tacker_setup_rootwrap
}
# Utility Functions
#------------------
# _tacker_deploy_rootwrap_filters() - deploy rootwrap filters to $TACKER_CONF_ROOTWRAP_D (owned by root).
function _tacker_deploy_rootwrap_filters {
local srcdir=$1
sudo install -d -o root -m 755 $TACKER_CONF_ROOTWRAP_D
sudo install -o root -m 644 $srcdir/etc/tacker/rootwrap.d/* $TACKER_CONF_ROOTWRAP_D/
}
# _tacker_setup_rootwrap() - configure Tacker's rootwrap
function _tacker_setup_rootwrap {
if [[ "$TACKER_USE_ROOTWRAP" == "False" ]]; then
return
fi
# Wipe any existing ``rootwrap.d`` files first
TACKER_CONF_ROOTWRAP_D=$TACKER_CONF_DIR/rootwrap.d
if [[ -d $TACKER_CONF_ROOTWRAP_D ]]; then
sudo rm -rf $TACKER_CONF_ROOTWRAP_D
fi
_tacker_deploy_rootwrap_filters $TACKER_DIR
sudo install -o root -g root -m 644 $TACKER_DIR/etc/tacker/rootwrap.conf $TACKER_RR_CONF_FILE
sudo sed -e "s:^filters_path=.*$:filters_path=$TACKER_CONF_ROOTWRAP_D:" -i $TACKER_RR_CONF_FILE
# Specify ``rootwrap.conf`` as first parameter to tacker-rootwrap
ROOTWRAP_SUDOER_CMD="$TACKER_ROOTWRAP $TACKER_RR_CONF_FILE *"
# Set up the rootwrap sudoers for tacker
TEMPFILE=`mktemp`
echo "$STACK_USER ALL=(root) NOPASSWD: $ROOTWRAP_SUDOER_CMD" >$TEMPFILE
chmod 0440 $TEMPFILE
sudo chown root:root $TEMPFILE
sudo mv $TEMPFILE /etc/sudoers.d/tacker-rootwrap
# Update the root_helper
iniset $TACKER_CONF agent root_helper "$TACKER_RR_COMMAND"
}
# Configures keystone integration for tacker service and agents
function _tacker_setup_keystone {
local conf_file=$1
local section=$2
local use_auth_url=$3
# Configures keystone for metadata_agent
# metadata_agent needs auth_url to communicate with keystone
if [[ "$use_auth_url" == "True" ]]; then
iniset $conf_file $section auth_url $KEYSTONE_SERVICE_URI
fi
create_tacker_cache_dir
configure_auth_token_middleware $conf_file $TACKER_ADMIN_USERNAME $TACKER_AUTH_CACHE_DIR $section
}
function tacker_horizon_install {
git_clone_by_name "tacker-horizon"
setup_dev_lib "tacker-horizon"
sudo cp $DEST/tacker-horizon/tacker_horizon/enabled/* $DEST/horizon/openstack_dashboard/enabled/
# make sure NFV's dashboard static files get loaded when deploying
$PYTHON $DEST/tacker-horizon/manage.py collectstatic --noinput
echo yes | $PYTHON $DEST/tacker-horizon/manage.py compress
restart_apache_server
}
function tacker_horizon_uninstall {
sudo rm -f $DEST/horizon/openstack_dashboard/enabled/_80_nfv.py
restart_apache_server
}
function openstack_image_create {
image=$1
disk_format=raw
container_format=bare
image_name=$2
openstack --os-cloud=devstack-admin image create $image_name --public --container-format=$container_format --disk-format $disk_format --file ${image}
openstack image show $image_name -f value -c id
}
function tacker_check_and_download_images {
local image_url
image_url[0]="http://download.cirros-cloud.net/0.4.0/cirros-0.4.0-x86_64-disk.img"
# Customized image of OpenWRT 15.05.1 that can fix the continuously
# respawning OpenWRT-based VNFs.
image_url[1]="$TACKER_DIR/samples/images/openwrt-x86-kvm_guest-combined-ext4.img.gz"
local image_fname image_name glance_name
local gz_pattern="\.gz$"
local length=${#image_url[@]}
local index=0
while [ $index -lt $length ]
do
image_fname=`basename "${image_url[$index]}"`
glance_name=${image_fname%.*}
if [[ $glance_name =~ "openwrt" ]]; then
glance_name="OpenWRT"
fi
image_name=`openstack image list | grep "$glance_name" | awk '{print $4}'`
if [[ $image_name == "" ]]; then
if [[ ! -f $FILES/$image_fname || "$(stat -c "%s" $FILES/$image_fname)" = "0" ]]; then
if [[ -f ${image_url[$index]} ]]; then
cp ${image_url[$index]} $FILES/$image_fname
else
{
wget --progress=dot:giga -c ${image_url[$index]} -O $FILES/$image_fname
}||{
echo "WARNING: download image ${image_url[$index]} failed"
index=$(($index+1))
continue
}
fi
if [[ $image_fname =~ $gz_pattern ]]; then
new_image_fname=${image_fname%.*}
gunzip -c $FILES/$image_fname > $FILES/$new_image_fname
image_fname=$new_image_fname
fi
fi
{
openstack_image_create $FILES/$image_fname $glance_name
}||{
echo "ERROR: tacker image create for $image_fname failed"
image_fname=$image_fname"*"
sudo rm -rf $FILES/$image_fname
exit 1
}
fi
index=$(($index+1))
done
}
function tacker_create_initial_network {
# create necessary networks
# prepare network
echo "Deleting networks"
for net in ${NET_MGMT} ${NET0} ${NET1}
do
for i in $(openstack network list | awk "/${net}/{print \$2}")
do
openstack network delete $i
done
done
echo "Creating networks"
NET_MGMT_ID=$(openstack network create --provider-network-type flat --provider-physical-network ${MGMT_PHYS_NET} --share ${NET_MGMT} | awk '/ id /{print $4}')
SUBNET_MGMT_ID=$(openstack subnet create ${SUBNET_MGMT} --ip-version 4 --gateway ${NETWORK_GATEWAY_MGMT} --network ${NET_MGMT_ID} --subnet-range ${FIXED_RANGE_MGMT} | awk '/ id /{print $4}')
NET0_ID=$(openstack network create --share ${NET0} | awk '/ id /{print $4}')
SUBNET0_ID=$(openstack subnet create ${SUBNET0} --ip-version 4 --gateway ${NETWORK_GATEWAY0} --network ${NET0_ID} --subnet-range ${FIXED_RANGE0} | awk '/ id /{print $4}')
NET1_ID=$(openstack network create --share ${NET1} | awk '/ id /{print $4}')
SUBNET1_ID=$(openstack subnet create ${SUBNET1} --ip-version 4 --gateway ${NETWORK_GATEWAY1} --network ${NET1_ID} --subnet-range ${FIXED_RANGE1} | awk '/ id /{print $4}')
echo "Assign ip address to BR_MGMT"
sudo ip link set ${BR_MGMT} up
sudo ip -4 address flush dev ${BR_MGMT}
sudo ip address add ${NETWORK_GATEWAY_MGMT_IP} dev ${BR_MGMT}
}
function tacker_register_default_vim {
# Note: These must be the same as in tacker/tests/etc/samples/local-vim.yaml
# and devstack/lib/tacker/vim_config.yaml
DEFAULT_VIM_PROJECT_NAME="nfv"
DEFAULT_VIM_USER="nfv_user"
DEFAULT_VIM_PASSWORD="devstack"
echo "Create NFV VIM project $DEFAULT_VIM_PROJECT_NAME ..."
get_or_create_project $DEFAULT_VIM_PROJECT_NAME
echo "Create NFV VIM user $DEFAULT_VIM_USER ..."
get_or_create_user $DEFAULT_VIM_USER $DEFAULT_VIM_PASSWORD
get_or_add_user_project_role admin $DEFAULT_VIM_USER $DEFAULT_VIM_PROJECT_NAME
# get_or_add_user_project_role advsvc $DEFAULT_VIM_USER $DEFAULT_VIM_PROJECT_NAME
echo "Set register default VIM sh which is called by ansible role..."
cp $TACKER_DIR/devstack/vim_config.yaml $TACKER_DATA_DIR
VIM_CONFIG_FILE="$TACKER_DATA_DIR/vim_config.yaml"
sed -e "s|^auth_url:.*$|auth_url: \'${KEYSTONE_SERVICE_URI}\'|" -i $VIM_CONFIG_FILE
echo "The content of VIM config file $VIM_CONFIG_FILE :"
cat $VIM_CONFIG_FILE
# TODO(tpatil): Remove installing python-tackerclient using pip once
# a solution is available to install it using requirements.
pip_install python-tackerclient
echo "Set up target openstack vim resources ..."
openstack --os-cloud=devstack-admin quota set \
--class --instances -1 --cores -1 --ram -1 default
openstack --os-cloud=devstack-admin \
quota set --ports -1 $DEFAULT_VIM_PROJECT_NAME
openstack --os-region-name $REGION_NAME --os-project-name $DEFAULT_VIM_PROJECT_NAME \
--os-user-domain-id default --os-username $DEFAULT_VIM_USER \
--os-project-domain-id default --os-auth-url $KEYSTONE_SERVICE_URI \
--os-password $DEFAULT_VIM_PASSWORD keypair create userKey
openstack --os-region-name $REGION_NAME --os-project-name $DEFAULT_VIM_PROJECT_NAME \
--os-user-domain-id default --os-username $DEFAULT_VIM_USER \
--os-project-domain-id default --os-auth-url $KEYSTONE_SERVICE_URI \
--os-password $DEFAULT_VIM_PASSWORD \
security group create \
--description "tacker functest security group" test_secgrp
openstack --os-region-name $REGION_NAME --os-project-name $DEFAULT_VIM_PROJECT_NAME \
--os-user-domain-id default --os-username $DEFAULT_VIM_USER \
--os-project-domain-id default --os-auth-url $KEYSTONE_SERVICE_URI \
--os-password $DEFAULT_VIM_PASSWORD \
security group rule create \
--ingress --protocol icmp test_secgrp
openstack --os-region-name $REGION_NAME --os-project-name $DEFAULT_VIM_PROJECT_NAME \
--os-user-domain-id default --os-username $DEFAULT_VIM_USER \
--os-project-domain-id default --os-auth-url $KEYSTONE_SERVICE_URI \
--os-password $DEFAULT_VIM_PASSWORD \
security group rule create \
--ingress --protocol tcp --dst-port 22 test_secgrp
}
function modify_heat_flavor_policy_rule {
local policy_file=$HEAT_CONF_DIR/policy.yaml
touch $policy_file
# Allow non-admin projects with 'admin' roles to create flavors in Heat
echo '"resource_types:OS::Nova::Flavor": "role:admin"' >> $policy_file
}
function configure_maintenance_event_types {
local event_definitions_file=$CEILOMETER_CONF_DIR/event_definitions.yaml
local maintenance_events_file=$TACKER_DIR/etc/ceilometer/maintenance_event_types.yaml
echo "Configure maintenance event types to $event_definitions_file"
cat $maintenance_events_file >> $event_definitions_file
}
# Install pip package from local git repositry.
function install_package_local_repo {
# Name of package, such as `tosca-parser`.
local pkg_name=$1
# (optional) URL of git repo, it's required if the package is not under
# `https://opendev.org/openstack/`.
local git_url=$2
local repo_dir=$DEST/$pkg_name
if [[ $git_url == "" ]]; then
# Expect the repo is under opendev
local git_url=https://opendev.org/openstack/$pkg_name
fi
if [ ! -d $repo_dir ]; then
git clone $git_url $repo_dir
fi
# Install local package with `pip install -e local_dir`.
setup_develop $repo_dir
}