Files
tacker/devstack/lib/tacker
Shivam Shukla db7aaec746 Fix Tacker CI Failures from Tosca-Parser, TackerClient, and Neutron/Setuptools
Tacker CI jobs have been failing due to multiple issues:
   1. DevStack builds failed with Neutron logger errors and setuptools
      incompatibility when Global_Venv=False.
   2. python-tackerclient was always installed from PyPI instead of Git
      due to missing installation function in Tacker's DevStack plugin.
   3. Stricter relationship validation in tosca-parser v2.13.0 caused
      template validation errors.

This patch updates sample test data for Tosca-parser validation,
enables Git installation for python-tackerclient, and adjusts job
configurations (Neutron driver and Global_Venv) to resolve Neutron
and setuptools related errors.

Closes-Bug: https://bugs.launchpad.net/tacker/+bug/2118586
Closes-Bug: https://bugs.launchpad.net/tacker/+bug/2119435
Closes-Bug: https://bugs.launchpad.net/tacker/+bug/2119433
Change-Id: If9bb32267c3006bfda53053a5a2b4d59e62e1c2b
Signed-off-by: Shivam Shukla <shivam.shukla3@india.nec.com>
2025-08-29 07:23:42 +00:00

485 lines
18 KiB
Bash

#!/bin/bash
#
# lib/tacker
# functions - functions specific to tacker
# Dependencies:
# ``functions`` file
# ``DEST`` must be defined
# ``STACK_USER`` must be defined
# ``stack.sh`` calls the entry points in this order:
#
# - install_tacker
# - configure_tacker
# - create_tacker_accounts
# - create_keystone_endpoint
# - init_tacker
# - start_tacker
# - tacker_horizon_install
# - tacker_create_initial_network
#
# ``unstack.sh`` calls the entry points in this order:
#
# - stop_tacker
# - cleanup_tacker
# Tacker
# ---------------
# Save trace setting
XTRACE=$(set +o | grep xtrace)
set +o xtrace
# Defaults
# --------
# Set up default directories
GITREPO["tacker-horizon"]=${TACKERHORIZON_REPO:-${GIT_BASE}/openstack/tacker-horizon.git}
GITBRANCH["tacker-horizon"]=${TACKERHORIZON_BRANCH:-master}
GITDIR["tacker-horizon"]=$DEST/tacker-horizon
GITREPO["python-tackerclient"]=${TACKERCLIENT_REPO:-${GIT_BASE}/openstack/python-tackerclient.git}
GITBRANCH["python-tackerclient"]=${TACKERCLIENT_BRANCH:-master}
GITDIR["python-tackerclient"]=$DEST/python-tackerclient
TACKER_DIR=$DEST/tacker
# Support entry points installation of console scripts
if [[ -d $TACKER_DIR/bin/tacker-server ]]; then
TACKER_BIN_DIR=$TACKER_DIR/bin
else
TACKER_BIN_DIR=$(get_python_exec_prefix)
fi
TACKER_CONF_DIR=/etc/tacker
TACKER_CONF=$TACKER_CONF_DIR/tacker.conf
TACKER_DATA_DIR=${TACKER_DATA_DIR:=$DATA_DIR/tacker}
# Default name for Tacker database
TACKER_DB_NAME=${TACKER_DB_NAME:-tacker}
# Default Tacker Port
TACKER_PORT=${TACKER_PORT:-9890}
# Default Tacker Host
TACKER_HOST=${TACKER_HOST:-$SERVICE_HOST}
# Default protocol
TACKER_PROTOCOL=${TACKER_PROTOCOL:-$SERVICE_PROTOCOL}
# Default admin username
TACKER_ADMIN_USERNAME=${TACKER_ADMIN_USERNAME:-tacker}
# Default auth strategy
TACKER_AUTH_STRATEGY=${TACKER_AUTH_STRATEGY:-keystone}
TACKER_NOVA_URL=${TACKER_NOVA_URL:-http://127.0.0.1:8774/v2}
TACKER_NOVA_CA_CERTIFICATES_FILE=${TACKER_NOVA_CA_CERTIFICATES_FILE:-}
TACKER_NOVA_API_INSECURE=${TACKER_NOVA_API_INSECURE:-False}
VNF_PACKAGE_CSAR_PATH=${VNF_PACKAGE_CSAR_PATH:=$TACKER_DATA_DIR/vnfpackage}
FILESYSTEM_STORE_DATA_DIR=${FILESYSTEM_STORE_DATA_DIR:=$TACKER_DATA_DIR/csar_files}
GLANCE_DEFAULT_BACKEND=${GLANCE_DEFAULT_BACKEND:=file}
# Functions
# ---------
# install_tackerclient - Collect source and prepare
function install_tackerclient {
if use_library_from_git "python-tackerclient"; then
git_clone_by_name "python-tackerclient"
setup_dev_lib "python-tackerclient"
fi
}
# function install_db_client_mysql() - ensure mysql client is installed
function install_db_client_mysql {
if is_oraclelinux; then
install_package mysql-community-client
elif is_fedora || is_suse; then
install_package mariadb-client
elif is_ubuntu; then
install_package mysql-client
else
exit_distro_not_supported "mysql client installation"
fi
}
# function install_db_client_postgresql() - ensure postgresql client is installed
function install_db_client_postgresql {
if is_ubuntu || is_fedora || is_suse; then
install_package postgresql-client
else
exit_distro_not_supported "postgresql client installation"
fi
}
# create_tacker_accounts() - Set up common required tacker accounts
# Tenant User Roles
# ------------------------------------------------------------------
# service tacker admin # if enabled
# Migrated from keystone_data.sh
function create_tacker_accounts {
if is_service_enabled tacker; then
create_service_user "tacker"
get_or_create_role "advsvc"
create_service_user "tacker" "advsvc"
create_service_user "tacker" "admin"
local tacker_service=$(get_or_create_service "tacker" \
"nfv-orchestration" "Tacker NFV Orchestration Service")
get_or_create_endpoint $tacker_service \
"$REGION_NAME" \
"$TACKER_PROTOCOL://$TACKER_HOST:$TACKER_PORT/" \
"$TACKER_PROTOCOL://$TACKER_HOST:$TACKER_PORT/" \
"$TACKER_PROTOCOL://$TACKER_HOST:$TACKER_PORT/"
fi
}
# create_keystone_endpoint() - create admin endpoint for keystone
function create_keystone_endpoint {
KEYSTONE_SERVICE=$(get_or_create_service "keystone" \
"identity" "Keystone Identity Service")
get_or_create_endpoint $KEYSTONE_SERVICE \
"$REGION_NAME" \
"${KEYSTONE_SERVICE_PROTOCOL}://${KEYSTONE_SERVICE_HOST}/identity" \
"${KEYSTONE_SERVICE_PROTOCOL}://${KEYSTONE_SERVICE_HOST}/identity" \
"${KEYSTONE_SERVICE_PROTOCOL}://${KEYSTONE_SERVICE_HOST}/identity"
}
# stack.sh entry points
# ---------------------
# init_tacker() - Initialize databases, etc.
function init_tacker {
# In case db service is disabled, which would have installed cli commands.
install_db_client_$DATABASE_TYPE
recreate_database $TACKER_DB_NAME
# Run Tacker db migrations
$TACKER_BIN_DIR/tacker-db-manage --config-file $TACKER_CONF upgrade head
}
# install_tacker() - Collect source and prepare
function install_tacker {
setup_develop $TACKER_DIR
}
function start_tacker {
local cfg_file_options="--config-file $TACKER_CONF"
local service_port=$TACKER_PORT
local service_protocol=$TACKER_PROTOCOL
# Start the Tacker service
sudo cp $TACKER_DIR/etc/systemd/system/tacker.service $SYSTEMD_DIR/devstack@tacker.service
iniset -sudo $SYSTEMD_DIR/devstack@tacker.service "Service" "User" $STACK_USER
iniset -sudo $SYSTEMD_DIR/devstack@tacker.service "Service" "ExecStart" "$TACKER_BIN_DIR/tacker-server $cfg_file_options"
sudo systemctl enable devstack@tacker.service
sudo systemctl restart devstack@tacker.service
# Start the Tacker conductor service
sudo cp $TACKER_DIR/etc/systemd/system/tacker-conductor.service $SYSTEMD_DIR/devstack@tacker-conductor.service
iniset -sudo $SYSTEMD_DIR/devstack@tacker-conductor.service "Service" "User" $STACK_USER
iniset -sudo $SYSTEMD_DIR/devstack@tacker-conductor.service "Service" "ExecStart" "$TACKER_BIN_DIR/tacker-conductor $cfg_file_options"
sudo systemctl enable devstack@tacker-conductor.service
sudo systemctl restart devstack@tacker-conductor.service
echo "Waiting for Tacker to start..."
if ! timeout $SERVICE_TIMEOUT sh -c "while ! wget --no-proxy -q -O- $service_protocol://$TACKER_HOST:$service_port; do sleep 1; done"; then
die $LINENO "Tacker did not start"
fi
}
# stop_tacker() - Stop running processes (non-screen)
function stop_tacker {
stop_process tacker
stop_process tacker-conductor
}
# cleanup_tacker() - Remove residual data files, anything left over from previous
# runs that a clean run would need to clean up
function cleanup_tacker {
sudo rm -rf $VNF_PACKAGE_CSAR_PATH
sudo rm -rf $FILESYSTEM_STORE_DATA_DIR
}
function _create_tacker_conf_dir {
# Put config files in ``TACKER_CONF_DIR`` for everyone to find
sudo install -d -o $STACK_USER $TACKER_CONF_DIR
}
function _create_tacker_data_dirs {
# Create tacker data folder
sudo install -d -o $STACK_USER $TACKER_DATA_DIR
# Create vnf package csar extract path folder
sudo install -d -o $STACK_USER ${VNF_PACKAGE_CSAR_PATH}
# Create folder for storing csar files in the glance store.
sudo install -d -o $STACK_USER ${FILESYSTEM_STORE_DATA_DIR}
}
# configure_tacker()
# Set common config for all tacker server and agents.
function configure_tacker {
_create_tacker_conf_dir
cd $TACKER_DIR
./tools/generate_config_file_sample.sh
cd -
cp $TACKER_DIR/etc/tacker/tacker.conf.sample $TACKER_CONF
cp $TACKER_DIR/etc/tacker/prometheus-plugin.yaml $TACKER_CONF_DIR/prometheus-plugin.yaml
iniset_rpc_backend tacker $TACKER_CONF
iniset $TACKER_CONF database connection `database_connection_url $TACKER_DB_NAME`
iniset $TACKER_CONF DEFAULT state_path $TACKER_DATA_DIR
iniset $TACKER_CONF DEFAULT use_syslog $SYSLOG
iniset $TACKER_CONF vnf_package vnf_package_csar_path "$VNF_PACKAGE_CSAR_PATH"
iniset $TACKER_CONF glance_store filesystem_store_datadir "$FILESYSTEM_STORE_DATA_DIR"
iniset $TACKER_CONF glance_store default_backend "$GLANCE_DEFAULT_BACKEND"
# Create the required data folders
_create_tacker_data_dirs
# Format logging
setup_logging $TACKER_CONF
# server
TACKER_API_PASTE_FILE=$TACKER_CONF_DIR/api-paste.ini
cp $TACKER_DIR/etc/tacker/api-paste.ini $TACKER_API_PASTE_FILE
iniset $TACKER_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
iniset $TACKER_CONF DEFAULT auth_strategy $TACKER_AUTH_STRATEGY
_tacker_setup_keystone $TACKER_CONF keystone_authtoken
if [[ "${TACKER_MODE}" == "all" || "${IS_ZUUL_FT}" == "True" ]]; then
iniset "/$Q_PLUGIN_CONF_FILE" ml2_type_flat flat_networks $PUBLIC_PHYSICAL_NETWORK,$MGMT_PHYS_NET
iniset "/$Q_PLUGIN_CONF_FILE" ovs bridge_mappings $PUBLIC_PHYSICAL_NETWORK:$PUBLIC_BRIDGE,$MGMT_PHYS_NET:$BR_MGMT
echo "Creating bridge"
sudo ovs-vsctl --may-exist add-br ${BR_MGMT}
fi
if [[ "${USE_BARBICAN}" == "True" ]]; then
iniset $TACKER_CONF vim_keys use_barbican True
fi
}
# Utility Functions
#------------------
# Configures keystone integration for tacker service and agents
function _tacker_setup_keystone {
local conf_file=$1
local section=$2
local use_auth_url=$3
# Configures keystone for metadata_agent
# metadata_agent needs auth_url to communicate with keystone
if [[ "$use_auth_url" == "True" ]]; then
iniset $conf_file $section auth_url $KEYSTONE_SERVICE_URI
fi
configure_keystone_authtoken_middleware $conf_file $TACKER_ADMIN_USERNAME $section
}
function tacker_horizon_install {
git_clone_by_name "tacker-horizon"
setup_dev_lib "tacker-horizon"
sudo cp $DEST/tacker-horizon/tacker_horizon/enabled/* $DEST/horizon/openstack_dashboard/enabled/
# make sure NFV's dashboard static files get loaded when deploying
$PYTHON $DEST/tacker-horizon/manage.py collectstatic --noinput
echo yes | $PYTHON $DEST/tacker-horizon/manage.py compress
restart_apache_server
}
function tacker_horizon_uninstall {
sudo rm -f $DEST/horizon/openstack_dashboard/enabled/_80_nfv.py
restart_apache_server
}
function openstack_image_create {
image=$1
container_format=bare
image_name=$2
disk_format=$3
openstack --os-cloud=devstack-admin image create $image_name --public --container-format=$container_format --disk-format $disk_format --file ${image}
openstack image show $image_name -f value -c id
}
function tacker_check_and_download_images {
local image_url
image_url[0]="http://download.cirros-cloud.net/0.5.2/cirros-0.5.2-x86_64-disk.img"
local image_fname image_name glance_name
local gz_pattern="\.gz$"
local length=${#image_url[@]}
local index=0
install_package jq
while [ $index -lt $length ]
do
image_fname=`basename "${image_url[$index]}"`
glance_name=${image_fname%.*}
image_name=`openstack image list | grep "$glance_name" | awk '{print $4}'`
if [[ $image_name == "" ]]; then
if [[ ! -f $FILES/$image_fname || "$(stat -c "%s" $FILES/$image_fname)" = "0" ]]; then
if [[ -f ${image_url[$index]} ]]; then
cp ${image_url[$index]} $FILES/$image_fname
else
{
wget --progress=dot:giga -c ${image_url[$index]} -O $FILES/$image_fname
}||{
echo "WARNING: download image ${image_url[$index]} failed"
index=$(($index+1))
continue
}
fi
if [[ $image_fname =~ $gz_pattern ]]; then
new_image_fname=${image_fname%.*}
gunzip -c $FILES/$image_fname > $FILES/$new_image_fname
image_fname=$new_image_fname
fi
fi
{
disk_format=`qemu-img info --output=json $FILES/$image_fname | jq -r '.format'`
openstack_image_create $FILES/$image_fname $glance_name $disk_format
}||{
echo "ERROR: tacker image create for $image_fname failed"
image_fname=$image_fname"*"
sudo rm -rf $FILES/$image_fname
exit 1
}
fi
index=$(($index+1))
done
}
function tacker_create_initial_network {
# create necessary networks
# prepare network
echo "Deleting networks"
for net in ${NET_MGMT} ${NET0} ${NET1}
do
for i in $(openstack network list | awk "/${net}/{print \$2}")
do
openstack network delete $i
done
done
echo "Creating networks"
NET_MGMT_ID=$(openstack network create --provider-network-type flat --provider-physical-network ${MGMT_PHYS_NET} --share ${NET_MGMT} | awk '/ id /{print $4}')
SUBNET_MGMT_ID=$(openstack subnet create ${SUBNET_MGMT} --ip-version 4 --gateway ${NETWORK_GATEWAY_MGMT} --network ${NET_MGMT_ID} --subnet-range ${FIXED_RANGE_MGMT} | awk '/ id /{print $4}')
NET0_ID=$(openstack network create --share ${NET0} | awk '/ id /{print $4}')
SUBNET0_ID=$(openstack subnet create ${SUBNET0} --ip-version 4 --gateway ${NETWORK_GATEWAY0} --network ${NET0_ID} --subnet-range ${FIXED_RANGE0} | awk '/ id /{print $4}')
NET1_ID=$(openstack network create --share ${NET1} | awk '/ id /{print $4}')
SUBNET1_ID=$(openstack subnet create ${SUBNET1} --ip-version 4 --gateway ${NETWORK_GATEWAY1} --network ${NET1_ID} --subnet-range ${FIXED_RANGE1} | awk '/ id /{print $4}')
echo "Assign ip address to BR_MGMT"
sudo ip link set ${BR_MGMT} up
sudo ip -4 address flush dev ${BR_MGMT}
sudo ip address add ${NETWORK_GATEWAY_MGMT_IP} dev ${BR_MGMT}
}
function tacker_setup_default_vim_resources {
# Note: These must be the same as in samples/tests/etc/samples/local-vim.yaml
# and devstack/vim_config.yaml
DEFAULT_VIM_PROJECT_NAME="nfv"
DEFAULT_VIM_USER="nfv_user"
DEFAULT_VIM_PASSWORD="devstack"
echo "Create NFV VIM project $DEFAULT_VIM_PROJECT_NAME ..."
get_or_create_project $DEFAULT_VIM_PROJECT_NAME
echo "Create NFV VIM user $DEFAULT_VIM_USER ..."
get_or_create_user $DEFAULT_VIM_USER $DEFAULT_VIM_PASSWORD default
get_or_add_user_project_role admin $DEFAULT_VIM_USER $DEFAULT_VIM_PROJECT_NAME
# get_or_add_user_project_role advsvc $DEFAULT_VIM_USER $DEFAULT_VIM_PROJECT_NAME
echo "Set register default VIM sh which is called by ansible role..."
cp $TACKER_DIR/devstack/vim_config.yaml $TACKER_DATA_DIR
VIM_CONFIG_FILE="$TACKER_DATA_DIR/vim_config.yaml"
sed -e "s|^auth_url:.*$|auth_url: \'${KEYSTONE_SERVICE_URI}\'|" -i $VIM_CONFIG_FILE
echo "The content of VIM config file $VIM_CONFIG_FILE :"
cat $VIM_CONFIG_FILE
echo "Set up target openstack vim resources ..."
openstack --os-cloud=devstack-admin quota set \
--default --instances -1 --cores -1 --ram -1 default
openstack --os-cloud=devstack-admin \
quota set --ports -1 $DEFAULT_VIM_PROJECT_NAME --force
openstack --os-region-name $REGION_NAME --os-project-name $DEFAULT_VIM_PROJECT_NAME \
--os-user-domain-id default --os-username $DEFAULT_VIM_USER \
--os-project-domain-id default --os-auth-url $KEYSTONE_SERVICE_URI \
--os-password $DEFAULT_VIM_PASSWORD --os-cloud "" keypair create userKey
openstack --os-region-name $REGION_NAME --os-project-name $DEFAULT_VIM_PROJECT_NAME \
--os-user-domain-id default --os-username $DEFAULT_VIM_USER \
--os-project-domain-id default --os-auth-url $KEYSTONE_SERVICE_URI \
--os-password $DEFAULT_VIM_PASSWORD --os-cloud "" \
security group create \
--description "tacker functest security group" test_secgrp
openstack --os-region-name $REGION_NAME --os-project-name $DEFAULT_VIM_PROJECT_NAME \
--os-user-domain-id default --os-username $DEFAULT_VIM_USER \
--os-project-domain-id default --os-auth-url $KEYSTONE_SERVICE_URI \
--os-password $DEFAULT_VIM_PASSWORD --os-cloud "" \
security group rule create \
--ingress --protocol icmp test_secgrp
openstack --os-region-name $REGION_NAME --os-project-name $DEFAULT_VIM_PROJECT_NAME \
--os-user-domain-id default --os-username $DEFAULT_VIM_USER \
--os-project-domain-id default --os-auth-url $KEYSTONE_SERVICE_URI \
--os-password $DEFAULT_VIM_PASSWORD --os-cloud "" \
security group rule create \
--ingress --protocol tcp --dst-port 22 test_secgrp
}
function setup_k8s_service {
# These kernel modules and configurations are required to build
# a kubernetes cluster and communicate between pods.
cat <<EOF | sudo tee /etc/modules-load.d/k8s.conf
overlay
br_netfilter
EOF
sudo modprobe overlay
sudo modprobe br_netfilter
cat <<EOF | sudo tee /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.ipv4.ip_forward = 1
EOF
sudo sysctl --system
# NOTE: When create a k8s environment with devstack-plugin-container and
# deploy a Pod, the following error occurred - `network: failed to set bridge
# addr: "cni0" already has an IP address different from 10.x.x.x` and
# the Pod fails to be deployed. As a fix, delete the related interface and
# restart service.
local flannel_path="https://github.com/flannel-io/flannel/releases/latest/download/kube-flannel.yml"
if ip link show cni0 > /dev/null 2>&1; then
sudo ip link set cni0 down
sudo ip link delete cni0
fi
if ip link show flannel.1 > /dev/null 2>&1; then
sudo ip link set flannel.1 down
sudo ip link delete flannel.1
fi
sudo systemctl restart kubelet
kubectl delete pod -n kube-system \
$(kubectl get pod -n kube-system --no-headers -o custom-columns=":metadata.name" |
grep coredns | tr -s '\n' ' ')
kubectl delete -f $flannel_path
kubectl apply -f $flannel_path
}