Add documentation for running v1 FT locally

This patch adds documentation for running v1 FT locally on a multi-node
system similar to Zuul. The porpose of this change is to encourage
users to test Tacker v1 features on local since most of v1 features are
not under active development anymore and planned to be moved to
periodic jobs or dropped from zuul tests.

Five test scenarios are supported in this change as a first step.

  * tacker-ft-legacy-vim
  * tacker-ft-v1-vnfpkgm
  * tacker-ft-v1-k8s
  * tacker-ft-v1-tosca-vnflcm
  * tacker-ft-v1-userdata-vnflcm

It also provides sample DevStack configuration files, setup scripts and
`local.conf.HOSTNAME`.

Implements: blueprint reduce-ft-time
Co-Authored-By: Ai Hamaho <ai.hamano@ntt-at.co.jp>
Co-Authored-By: Yasufumi Ogawa <yasufum.o@gmail.com>
Change-Id: Id0debea403ccb9490da14b1a77c89ff7b42ee178
This commit is contained in:
ssunaga
2024-12-19 08:10:29 +09:00
committed by Yasufumi Ogawa
parent 2789e93eb8
commit f2383969da
16 changed files with 1818 additions and 14 deletions

View File

@@ -0,0 +1,99 @@
# Using k8s
# local.conf.controller
# NOTE: Items that are used in local.conf on Zuul but are not required
# locally have been commented out.
[[local|localrc]]
disable_all_services
enable_service c-api
#disable_service c-bak
enable_service c-sch
enable_service c-vol
enable_service cinder
#disable_service coredns
enable_service etcd3
enable_service g-api
enable_service g-reg
#disable_service horizon
enable_service key
enable_service mysql
enable_service n-api
enable_service n-api-meta
enable_service n-cond
#disable_service n-cpu
enable_service n-novnc
enable_service n-sch
enable_service neutron
enable_service ovn-controller
enable_service ovn-northd
enable_service ovs-vswitchd
enable_service ovsdb-server
enable_service placement-api
enable_service placement-client
enable_service q-ovn-metadata-agent
enable_service q-qos
enable_service q-svc
enable_service rabbit
#disable_service s-account
#disable_service s-container
#disable_service s-object
#disable_service s-proxy
#disable_service swift
#disable_service tempest
#disable_service tls-proxy
ADMIN_PASSWORD="secretadmin"
#CELLSV2_SETUP="singleconductor"
DATABASE_PASSWORD="secretdatabase"
#DATABASE_TYPE="mysql"
DEBUG_LIBVIRT_COREDUMPS="True"
#DEVSTACK_PARALLEL="True"
ENABLE_SYSCTL_MEM_TUNING="True"
ENABLE_SYSCTL_NET_TUNING="True"
ENABLE_ZSWAP="True"
#ERROR_ON_CLONE="True"
ETCD_USE_RAMDISK="True"
FIXED_RANGE="10.1.0.0/20"
FLOATING_RANGE="192.168.56.0/24"
GLOBAL_VENV="False"
HOST_IP="192.168.56.21"
IPV4_ADDRS_SAFE_TO_USE="10.1.0.0/20"
#IS_ZUUL_FT="True"
#KEYSTONE_SERVICE_HOST="10.209.133.219"
L2_AGENT_EXTENSIONS="qos"
LIBVIRT_TYPE="qemu"
LOGFILE="/opt/stack/logs/devstacklog.txt"
#LOG_COLOR="False"
MYSQL_HOST="192.168.56.21"
NETWORK_GATEWAY="10.1.0.1"
NOVA_LIBVIRT_TB_CACHE_SIZE="128"
NOVA_VNC_ENABLED="True"
NOVNC_FROM_PACKAGE="True"
OVN_DBS_LOG_LEVEL="dbg"
OVN_L3_CREATE_PUBLIC_NETWORK="True"
OVS_BRIDGE_MAPPINGS="public:br-ex,mgmtphysnet0:br-infra"
PHYSICAL_NETWORK="mgmtphysnet0"
PUBLIC_BRIDGE_MTU="1430"
PUBLIC_NETWORK_GATEWAY="192.168.56.1"
Q_ML2_PLUGIN_MECHANISM_DRIVERS="ovn,logger"
Q_SERVICE_PLUGIN_CLASSES="ovn-router,neutron.services.qos.qos_plugin.QoSPlugin,qos"
RABBIT_PASSWORD="secretrabbit"
SERVICE_HOST="192.168.56.21"
SERVICE_PASSWORD="secretservice"
#SWIFT_HASH="1234123412341234"
#SWIFT_REPLICAS="1"
#SWIFT_START_ALL_SERVICES="False"
#TACKER_HOST="10.208.192.130"
#TACKER_MODE="standalone"
USE_PYTHON3="True"
#VERBOSE="True"
VERBOSE_NO_TIMESTAMP="True"
LIBS_FROM_GIT=barbican,cinder,devstack,devstack-plugin-container,glance,heat,horizon,keystone,neutron,nova,os-test-images,placement,python-barbicanclient,python-tackerclient,requirements,swift,tacker,tacker-horizon
enable_plugin barbican https://opendev.org/openstack/barbican
enable_plugin heat https://opendev.org/openstack/heat
enable_plugin neutron https://opendev.org/openstack/neutron
[[post-config|$NEUTRON_CONF]]
[DEFAULT]
global_physnet_mtu = 1430
[[post-config|$NEUTRON_DHCP_CONF]]
[DEFAULT]
enable_isolated_metadata = True

View File

@@ -0,0 +1,62 @@
# Using k8s
# local.conf.controller-k8s
# NOTE: Items that are used in local.conf on Zuul but are not required
# locally have been commented out.
[[local|localrc]]
disable_all_services
enable_service container
#disable_service etcd3
enable_service k8s-master
enable_service ovn-controller
enable_service ovn-northd
enable_service ovs-vswitchd
enable_service ovsdb-server
enable_service q-ovn-metadata-agent
ADMIN_PASSWORD="secretadmin"
#CELLSV2_SETUP="singleconductor"
CONTAINER_ENGINE="crio"
CRIO_VERSION="1.30.5"
DATABASE_HOST="192.168.56.21"
DATABASE_PASSWORD="secretdatabase"
DATABASE_TYPE="mysql"
ENABLE_CHASSIS_AS_GW="False"
ENABLE_SYSCTL_MEM_TUNING="True"
ENABLE_SYSCTL_NET_TUNING="True"
ENABLE_ZSWAP="True"
#ERROR_ON_CLONE="True"
FIXED_RANGE="10.1.0.0/20"
FLOATING_RANGE="192.168.56.0/24"
#GLANCE_HOSTPORT="10.209.133.219:9292"
GLOBAL_VENV="False"
HOST_IP="192.168.56.23"
IPV4_ADDRS_SAFE_TO_USE="10.1.0.0/20"
#IS_ZUUL_FT="True"
K8S_API_SERVER_IP="192.168.56.23"
K8S_TOKEN="9agf12.zsu5uh2m4pzt3qba"
K8S_VERSION="1.30.5"
#KEYSTONE_SERVICE_HOST="10.209.133.219"
LIBVIRT_TYPE="qemu"
LOGFILE="/opt/stack/logs/devstacklog.txt"
#LOG_COLOR="False"
MYSQL_HOST="192.168.56.21"
NETWORK_GATEWAY="10.1.0.1"
NOVA_LIBVIRT_TB_CACHE_SIZE="128"
NOVA_VNC_ENABLED="True"
NOVNC_FROM_PACKAGE="True"
OVS_BRIDGE_MAPPINGS="public:br-ex,mgmtphysnet0:br-infra"
PHYSICAL_NETWORK="mgmtphysnet0"
PUBLIC_BRIDGE_MTU="1430"
PUBLIC_NETWORK_GATEWAY="192.168.56.1"
#Q_HOST="10.209.133.219"
Q_ML2_PLUGIN_MECHANISM_DRIVERS="ovn,logger"
RABBIT_HOST="192.168.56.21"
RABBIT_PASSWORD="secretrabbit"
SERVICE_HOST="192.168.56.21"
SERVICE_PASSWORD="secretservice"
#TACKER_HOST="10.208.192.130"
#TACKER_MODE="standalone"
USE_PYTHON3="True"
#VERBOSE="True"
VERBOSE_NO_TIMESTAMP="True"
LIBS_FROM_GIT=barbican,cinder,devstack,devstack-plugin-container,glance,heat,horizon,keystone,neutron,nova,os-test-images,placement,python-barbicanclient,python-tackerclient,requirements,swift,tacker,tacker-horizon
enable_plugin devstack-plugin-container https://opendev.org/openstack/devstack-plugin-container

View File

@@ -0,0 +1,68 @@
# Using k8s
# local.conf.controller-tacker
# NOTE: Items that are used in local.conf on Zuul but are not required
# locally have been commented out.
[[local|localrc]]
disable_all_services
enable_service ovn-controller
enable_service ovn-northd
enable_service ovs-vswitchd
enable_service ovsdb-server
enable_service q-ovn-metadata-agent
enable_service tacker
enable_service tacker-conductor
ADMIN_PASSWORD="secretadmin"
#CELLSV2_SETUP="singleconductor"
#CONTAINER_ENGINE="crio"
#CRIO_VERSION="1.30.5"
DATABASE_HOST="192.168.56.21"
DATABASE_PASSWORD="secretdatabase"
DATABASE_TYPE="mysql"
ENABLE_CHASSIS_AS_GW="False"
ENABLE_SYSCTL_MEM_TUNING="True"
ENABLE_SYSCTL_NET_TUNING="True"
ENABLE_ZSWAP="True"
#ERROR_ON_CLONE="True"
FIXED_RANGE="10.1.0.0/20"
FLOATING_RANGE="192.168.56.0/24"
#GLANCE_HOSTPORT="10.209.133.219:9292"
GLOBAL_VENV="False"
HOST_IP="192.168.56.22"
IPV4_ADDRS_SAFE_TO_USE="10.1.0.0/20"
IS_ZUUL_FT="True"
#K8S_API_SERVER_IP="10.209.0.153"
#K8S_TOKEN="9agf12.zsu5uh2m4pzt3qba"
#K8S_VERSION="1.30.5"
#KEYSTONE_SERVICE_HOST="10.209.133.219"
LIBVIRT_TYPE="qemu"
LOGFILE="/opt/stack/logs/devstacklog.txt"
#LOG_COLOR="False"
MYSQL_HOST="192.168.56.21"
NETWORK_GATEWAY="10.1.0.1"
NOVA_LIBVIRT_TB_CACHE_SIZE="128"
NOVA_VNC_ENABLED="True"
NOVNC_FROM_PACKAGE="True"
OVS_BRIDGE_MAPPINGS="public:br-ex,mgmtphysnet0:br-infra"
PHYSICAL_NETWORK="mgmtphysnet0"
PUBLIC_BRIDGE_MTU="1430"
PUBLIC_NETWORK_GATEWAY="192.168.56.1"
#Q_HOST="10.209.133.219"
Q_ML2_PLUGIN_MECHANISM_DRIVERS="ovn,logger"
RABBIT_HOST="192.168.56.21"
RABBIT_PASSWORD="secretrabbit"
SERVICE_HOST="192.168.56.21"
SERVICE_PASSWORD="secretservice"
TACKER_HOST="192.168.56.22"
TACKER_MODE="standalone"
#USE_PYTHON3="True"
#VERBOSE="True"
VERBOSE_NO_TIMESTAMP="True"
LIBS_FROM_GIT=barbican,cinder,devstack,devstack-plugin-container,glance,heat,horizon,keystone,neutron,nova,os-test-images,placement,python-barbicanclient,python-tackerclient,requirements,swift,tacker,tacker-horizon
enable_plugin tacker https://opendev.org/openstack/tacker
[[post-config|$TACKER_CONF]]
[database]
max_pool_size = 0
[kubernetes_vim]
stack_retries = 120

View File

@@ -0,0 +1,68 @@
# Not using k8s
# local.conf.compute1
# NOTE: Items that are used in local.conf on Zuul but are not required
# locally have been commented out.
[[local|localrc]]
disable_all_services
enable_service c-bak
enable_service c-vol
#disable_service dstat
enable_service file_tracker
#disable_service horizon
enable_service memory_tracker
enable_service n-cpu
enable_service openstack-cli-server
enable_service ovn-controller
enable_service ovs-vswitchd
enable_service ovsdb-server
enable_service placement-client
enable_service q-ovn-metadata-agent
#disable_service tempest
#disable_service tls-proxy
ADMIN_PASSWORD="secretadmin"
#CELLSV2_SETUP="singleconductor"
DATABASE_HOST="192.168.56.11"
DATABASE_PASSWORD="secretdatabase"
DATABASE_TYPE="mysql"
ENABLE_CHASSIS_AS_GW="False"
ENABLE_SYSCTL_MEM_TUNING="True"
ENABLE_SYSCTL_NET_TUNING="True"
ENABLE_ZSWAP="True"
#ERROR_ON_CLONE="True"
FIXED_RANGE="10.1.0.0/20"
FLOATING_RANGE="192.168.56.0/24"
#GLANCE_HOSTPORT="192.168.56.11:9292"
GNOCCHI_SERVICE_HOST="192.168.56.12"
HOST_IP="192.168.56.13"
IPV4_ADDRS_SAFE_TO_USE="10.1.0.0/20"
#IS_ZUUL_FT="True"
#KEYSTONE_SERVICE_HOST="192.168.56.11"
L2_AGENT_EXTENSIONS="qos"
LIBVIRT_TYPE="qemu"
LOGFILE="/opt/stack/logs/devstacklog.txt"
#LOG_COLOR="False"
MYSQL_HOST="192.168.56.11"
NETWORK_GATEWAY="10.1.0.1"
NOVA_LIBVIRT_TB_CACHE_SIZE="128"
NOVA_VNC_ENABLED="True"
NOVNC_FROM_PACKAGE="True"
OVS_BRIDGE_MAPPINGS="public:br-ex,mgmtphysnet0:br-infra"
PHYSICAL_NETWORK="mgmtphysnet0"
PUBLIC_BRIDGE_MTU="1430"
PUBLIC_NETWORK_GATEWAY="192.168.56.1"
#Q_HOST="192.168.56.11"
Q_ML2_PLUGIN_MECHANISM_DRIVERS="ovn,logger"
Q_SERVICE_PLUGIN_CLASSES="qos,networking_sfc.services.flowclassifier.plugin.FlowClassifierPlugin,neutron.services.qos.qos_plugin.QoSPlugin,ovn-router"
RABBIT_HOST="192.168.56.11"
RABBIT_PASSWORD="secretrabbit"
SERVICE_HOST="192.168.56.11"
SERVICE_PASSWORD="secretservice"
#TACKER_HOST="192.168.56.12"
#TACKER_MODE="standalone"
#VERBOSE="True"
VERBOSE_NO_TIMESTAMP="True"
LIBS_FROM_GIT=aodh,barbican,cinder,devstack,glance,heat,horizon,keystone,networking-sfc,neutron,nova,os-test-images,placement,python-barbicanclient,python-tackerclient,requirements,swift,tacker,tacker-horizon
[[post-config|$NOVA_CONF]]
[DEFAULT]
vif_plugging_is_fatal = False

View File

@@ -0,0 +1,68 @@
# Not using k8s
# local.conf.compute2
# NOTE: Items that are used in local.conf on Zuul but are not required
# locally have been commented out.
[[local|localrc]]
disable_all_services
enable_service c-bak
enable_service c-vol
#disable_service dstat
enable_service file_tracker
#disable_service horizon
enable_service memory_tracker
enable_service n-cpu
enable_service openstack-cli-server
enable_service ovn-controller
enable_service ovs-vswitchd
enable_service ovsdb-server
enable_service placement-client
enable_service q-ovn-metadata-agent
#disable_service tempest
#disable_service tls-proxy
ADMIN_PASSWORD="secretadmin"
#CELLSV2_SETUP="singleconductor"
DATABASE_HOST="192.168.56.11"
DATABASE_PASSWORD="secretdatabase"
DATABASE_TYPE="mysql"
ENABLE_CHASSIS_AS_GW="False"
ENABLE_SYSCTL_MEM_TUNING="True"
ENABLE_SYSCTL_NET_TUNING="True"
ENABLE_ZSWAP="True"
#ERROR_ON_CLONE="True"
FIXED_RANGE="10.1.0.0/20"
FLOATING_RANGE="192.168.56.0/24"
#GLANCE_HOSTPORT="192.168.56.11:9292"
GNOCCHI_SERVICE_HOST="192.168.56.12"
HOST_IP="192.168.56.14"
IPV4_ADDRS_SAFE_TO_USE="10.1.0.0/20"
#IS_ZUUL_FT="True"
#KEYSTONE_SERVICE_HOST="192.168.56.11"
L2_AGENT_EXTENSIONS="qos"
LIBVIRT_TYPE="qemu"
LOGFILE="/opt/stack/logs/devstacklog.txt"
#LOG_COLOR="False"
MYSQL_HOST="192.168.56.11"
NETWORK_GATEWAY="10.1.0.1"
NOVA_LIBVIRT_TB_CACHE_SIZE="128"
NOVA_VNC_ENABLED="True"
NOVNC_FROM_PACKAGE="True"
OVS_BRIDGE_MAPPINGS="public:br-ex,mgmtphysnet0:br-infra"
PHYSICAL_NETWORK="mgmtphysnet0"
PUBLIC_BRIDGE_MTU="1430"
PUBLIC_NETWORK_GATEWAY="192.168.56.1"
#Q_HOST="192.168.56.11"
Q_ML2_PLUGIN_MECHANISM_DRIVERS="ovn,logger"
Q_SERVICE_PLUGIN_CLASSES="qos,networking_sfc.services.flowclassifier.plugin.FlowClassifierPlugin,neutron.services.qos.qos_plugin.QoSPlugin,ovn-router"
RABBIT_HOST="192.168.56.11"
RABBIT_PASSWORD="secretrabbit"
SERVICE_HOST="192.168.56.11"
SERVICE_PASSWORD="secretservice"
#TACKER_HOST="192.168.56.12"
#TACKER_MODE="standalone"
#VERBOSE="True"
VERBOSE_NO_TIMESTAMP="True"
LIBS_FROM_GIT=aodh,barbican,cinder,devstack,glance,heat,horizon,keystone,networking-sfc,neutron,nova,os-test-images,placement,python-barbicanclient,python-tackerclient,requirements,swift,tacker,tacker-horizon
[[post-config|$NOVA_CONF]]
[DEFAULT]
vif_plugging_is_fatal = False

View File

@@ -0,0 +1,93 @@
# Not using k8s
# local.conf.controller
# NOTE: Items that are used in local.conf on Zuul but are not required
# locally have been commented out.
[[local|localrc]]
disable_all_services
enable_service c-api
#disable_service c-bak
enable_service c-sch
enable_service c-vol
enable_service cinder
enable_service g-api
#disable_service horizon
enable_service key
enable_service mysql
enable_service n-api
enable_service n-api-meta
enable_service n-cond
#disable_service n-cpu
enable_service n-novnc
enable_service n-sch
enable_service ovn-controller
enable_service ovn-northd
enable_service ovs-vswitchd
enable_service ovsdb-server
enable_service placement-api
enable_service q-ovn-metadata-agent
enable_service q-qos
enable_service q-svc
enable_service rabbit
#disable_service s-account
#disable_service s-container
#disable_service s-object
#disable_service s-proxy
#disable_service swift
#disable_service tempest
#disable_service tls-proxy
ADMIN_PASSWORD="secretadmin"
#CELLSV2_SETUP="singleconductor"
DATABASE_PASSWORD="secretdatabase"
#DATABASE_TYPE="mysql"
DEBUG_LIBVIRT_COREDUMPS="True"
#DEVSTACK_PARALLEL="True"
ENABLE_SYSCTL_MEM_TUNING="True"
ENABLE_SYSCTL_NET_TUNING="True"
ENABLE_ZSWAP="True"
#ERROR_ON_CLONE="True"
FIXED_RANGE="10.1.0.0/20"
FLOATING_RANGE="192.168.56.0/24"
GNOCCHI_SERVICE_HOST="192.168.56.12"
HOST_IP="192.168.56.11"
IPV4_ADDRS_SAFE_TO_USE="10.1.0.0/20"
#KEYSTONE_SERVICE_HOST="192.168.56.11"
L2_AGENT_EXTENSIONS="qos"
LIBVIRT_TYPE="qemu"
LOGFILE="/opt/stack/logs/devstacklog.txt"
#LOG_COLOR="False"
MYSQL_HOST="192.168.56.11"
NETWORK_GATEWAY="10.1.0.1"
NOVA_LIBVIRT_TB_CACHE_SIZE="128"
NOVA_VNC_ENABLED="True"
NOVNC_FROM_PACKAGE="True"
OVN_DBS_LOG_LEVEL="dbg"
OVN_L3_CREATE_PUBLIC_NETWORK="True"
OVS_BRIDGE_MAPPINGS="public:br-ex,mgmtphysnet0:br-infra"
PHYSICAL_NETWORK="mgmtphysnet0"
PUBLIC_BRIDGE_MTU="1430"
PUBLIC_NETWORK_GATEWAY="192.168.56.1"
Q_ML2_PLUGIN_MECHANISM_DRIVERS="ovn,logger"
Q_SERVICE_PLUGIN_CLASSES="ovn-router,networking_sfc.services.flowclassifier.plugin.FlowClassifierPlugin,neutron.services.qos.qos_plugin.QoSPlugin,qos"
RABBIT_PASSWORD="secretrabbit"
SERVICE_HOST="192.168.56.11"
SERVICE_PASSWORD="secretservice"
#SWIFT_HASH="1234123412341234"
#SWIFT_REPLICAS="1"
#SWIFT_START_ALL_SERVICES="False"
#VERBOSE="True"
VERBOSE_NO_TIMESTAMP="True"
LIBS_FROM_GIT=aodh,barbican,cinder,devstack,glance,heat,horizon,keystone,networking-sfc,neutron,nova,os-test-images,placement,python-barbicanclient,python-tackerclient,requirements,swift,tacker,tacker-horizon
enable_plugin aodh https://opendev.org/openstack/aodh
enable_plugin barbican https://opendev.org/openstack/barbican
enable_plugin heat https://opendev.org/openstack/heat
enable_plugin networking-sfc https://opendev.org/openstack/networking-sfc
[[post-config|$CINDER_CONF]]
[lvmdriver-1]
image_volume_cache_enabled = False
[[post-config|$NEUTRON_CONF]]
[DEFAULT]
client_socket_timeout = 60
[[post-config|$NEUTRON_DHCP_CONF]]
[DEFAULT]
enable_isolated_metadata = True

View File

@@ -0,0 +1,63 @@
# Not using k8s
# local.conf.controller-tacker
# NOTE: Items that are used in local.conf on Zuul but are not required
# locally have been commented out.
[[local|localrc]]
disable_all_services
enable_service ovn-controller
enable_service ovs-vswitchd
enable_service ovsdb-server
enable_service q-ovn-metadata-agent
enable_service tacker
enable_service tacker-conductor
ADMIN_PASSWORD="secretadmin"
#CELLSV2_SETUP="singleconductor"
DATABASE_HOST="192.168.56.11"
DATABASE_PASSWORD="secretdatabase"
DATABASE_TYPE="mysql"
ENABLE_CHASSIS_AS_GW="False"
ENABLE_SYSCTL_MEM_TUNING="True"
ENABLE_SYSCTL_NET_TUNING="True"
ENABLE_ZSWAP="True"
#ERROR_ON_CLONE="True"
FIXED_RANGE="10.1.0.0/20"
FLOATING_RANGE="192.168.56.0/24"
#GLANCE_HOSTPORT="192.168.56.11:9292"
GNOCCHI_SERVICE_HOST="192.168.56.12"
HOST_IP="192.168.56.12"
IPV4_ADDRS_SAFE_TO_USE="10.1.0.0/20"
IS_ZUUL_FT="True"
#KEYSTONE_SERVICE_HOST="192.168.56.11"
L2_AGENT_EXTENSIONS="qos"
LIBVIRT_TYPE="qemu"
LOGFILE="/opt/stack/logs/devstacklog.txt"
#LOG_COLOR="False"
MYSQL_HOST="192.168.56.11"
NETWORK_GATEWAY="10.1.0.1"
NOVA_LIBVIRT_TB_CACHE_SIZE="128"
NOVA_VNC_ENABLED="True"
NOVNC_FROM_PACKAGE="True"
OVS_BRIDGE_MAPPINGS="public:br-ex,mgmtphysnet0:br-infra"
PHYSICAL_NETWORK="mgmtphysnet0"
PUBLIC_BRIDGE_MTU="1430"
PUBLIC_NETWORK_GATEWAY="192.168.56.1"
#Q_HOST="192.168.56.11"
Q_ML2_PLUGIN_MECHANISM_DRIVERS="ovn,logger"
Q_SERVICE_PLUGIN_CLASSES="qos,networking_sfc.services.flowclassifier.plugin.FlowClassifierPlugin,neutron.services.qos.qos_plugin.QoSPlugin,ovn-router"
RABBIT_HOST="192.168.56.11"
RABBIT_PASSWORD="secretrabbit"
SERVICE_HOST="192.168.56.11"
SERVICE_PASSWORD="secretservice"
TACKER_HOST="192.168.56.12"
TACKER_MODE="standalone"
#VERBOSE="True"
VERBOSE_NO_TIMESTAMP="True"
LIBS_FROM_GIT=aodh,barbican,cinder,devstack,glance,heat,horizon,keystone,networking-sfc,neutron,nova,os-test-images,placement,python-barbicanclient,python-tackerclient,requirements,swift,tacker,tacker-horizon
enable_plugin tacker https://opendev.org/openstack/tacker
[[post-config|$TACKER_CONF]]
[database]
max_pool_size = 0
[openstack_vim]
stack_retries = 120

View File

@@ -15,16 +15,16 @@ tacker/tests/functional.
Writing a testcase:A testcase is written by declaring a class name derived from
class base.BaseTackerTest. BaseTackerTest is class declared in
tacker/tests/functional/vnfd/base.py.
tacker/tests/functional/base.py.
A testcase body typically looks as below:
.. code-block:: python
class vnfClassName(base.BaseTackerTest):
class testClassName(base.BaseTackerTest):
def test_create_delete(self):
//setup
//Testcase operations
@@ -33,7 +33,7 @@ A testcase body typically looks as below:
//cleanup
In above example test class 'vnfClassName' is derived from
In above example test class 'testClassName' is derived from
base.BaseTackerTest. Testcases typically has sections to setup, test, validate
results and finally cleanup.
@@ -56,7 +56,6 @@ create/delete/list vnfd/vnf once given the necessary parameters.
Verify tackerclient/v1_0/client.py for all the tacker related apis supported.
Important guidelines to follow:
===============================
@@ -66,6 +65,7 @@ Important guidelines to follow:
pip install -r test-requirements.txt
* It is important that the test case executed leaves the
system in the same state it was prior to test case execution
and not leave any stale data on system as this might affect
@@ -84,7 +84,6 @@ Important guidelines to follow:
tox -e pep8
Execution of testcase:
======================
@@ -92,14 +91,7 @@ Execution of testcase:
tacker service and endpoint, creates "nfv_user" and "nfv" project,
and registers default VIM with the created user and project.
* From tacker directory, all function testcases can be executed using
following commands:
.. code-block:: console
tox -e functional
* Or from tacker directory, specific testcases can be executed using
* From tacker directory, specific testcases can be executed using
following commands:
.. code-block:: console
@@ -107,6 +99,308 @@ Execution of testcase:
tox -e functional tacker.tests.functional.xxx.yyy.<testcase>
Multi-node configuration for testing
------------------------------------
For the Zuul environment that runs functional test, install
tacker server via devstack installation in multi-node configuration.
.. note::
Not all functional tests require a multi-node configuration.
Many tests can be run in an all-in-one mode.
See :doc:`/install/devstack` for installing all-in-one mode.
The steps to create an environment with a multi-node configuration
similar to Zuul(controller/controller-tacker/compute1/compute2) are as
follows.
Here is a sample case that does not use k8s.
Create four virtual machine (VM) environments each with IP addresses
and host names, for example:
- 192.168.56.11 controller
- 192.168.56.12 controller-tacker
- 192.168.56.13 compute1
- 192.168.56.14 compute2
From above four hosts, DevStack on the controller host must be built first.
Once completed, proceed with the remaining three hosts:
controller-tacker, compute1, and compute2.
The order of building Devstack on these three hosts is not important,
and you can build them simultaneously if desired.
To build DevStack on each host, run the script ./stack.sh.
Here is a sample case of using k8s.
Create four virtual machine (VM) environments each with IP addresses
and host names, for example:
- 192.168.56.21 controller
- 192.168.56.22 controller-tacker
- 192.168.56.23 controller-k8s
From above four hosts, DevStack on the controller host must be built first.
Once completed, proceed with the remaining three hosts: controller-tacker
and controller-k8s.
The order of building Devstack on these three hosts is not important,
and you can build them simultaneously if desired.
To build DevStack on each host, run the script ./stack.sh.
Regarding the specs of your machine,
see `Devstack`_ or :doc:`/install/devstack` for details on the OS
and Linux distribution to use.
For not using k8s
^^^^^^^^^^^^^^^^^
#. Preparation
* Prepare 4VMs that meet the following criteria
.. list-table::
:widths: 60 150
:header-rows: 1
* - Criteria
- Recommended
* - CPU
- 4 cores or more
* - RAM
- 16 GB or more
* - Storage
- 32 GB or more
* Create stack user on each VM
.. code-block:: console
$ sudo useradd -s /bin/bash -d /opt/stack -m stack
$ sudo chmod +x /opt/stack
$ echo "stack ALL=(ALL) NOPASSWD: ALL" | sudo tee /etc/sudoers.d/stack
* Download devstack on each VM
.. code-block:: console
$ git clone https://opendev.org/openstack/devstack
#. Create controller node
* Create the following local.conf on controller node
.. code-block:: console
$ cd devstack
$ vi local.conf
.. literalinclude:: ../../../devstack/multi-nodes/openstack/local.conf.controller
:language: ini
* Execute installation script
.. code-block:: console
$ ./stack.sh
#. Create controller-tacker node
* Create the following local.conf on controller-tacker node
.. code-block:: console
$ cd devstack
$ vi local.conf
.. literalinclude:: ../../../devstack/multi-nodes/openstack/local.conf.controller-tacker
:language: ini
* Execute installation script
.. code-block:: console
$ ./stack.sh
#. Create compute1 node
* Create the following local.conf on compute1 node
.. code-block:: console
$ cd devstack
$ vi local.conf
.. literalinclude:: ../../../devstack/multi-nodes/openstack/local.conf.compute1
:language: ini
* Execute installation script
.. code-block:: console
$ ./stack.sh
#. Create compute2 node
* Create the following local.conf on compute2 node
.. code-block:: console
$ cd devstack
$ vi local.conf
.. literalinclude:: ../../../devstack/multi-nodes/openstack/local.conf.compute2
:language: ini
* Execute installation script
.. code-block:: console
$ ./stack.sh
For using k8s
^^^^^^^^^^^^^
#. Preparation
* Prepare 3VMs that meet the following criteria
.. list-table::
:widths: 60 150
:header-rows: 1
* - Criteria
- Recommended
* - CPU
- 4 cores or more
* - RAM
- 16 GB or more
* - Storage
- 32 GB or more
* Create stack user on each VM
.. code-block:: console
$ sudo useradd -s /bin/bash -d /opt/stack -m stack
$ sudo chmod +x /opt/stack
$ echo "stack ALL=(ALL) NOPASSWD: ALL" | sudo tee /etc/sudoers.d/stack
* Download devstack on each VM
.. code-block:: console
$ git clone https://opendev.org/openstack/devstack
#. Create controller node
* Create the following local.conf on controller node
.. code-block:: console
$ cd devstack
$ vi local.conf
.. literalinclude:: ../../../devstack/multi-nodes/k8s/local.conf.controller
:language: ini
* Execute installation script
.. code-block:: console
$ ./stack.sh
#. Create controller-tacker node
* Create the following local.conf on controller-tacker node
.. code-block:: console
$ cd devstack
$ vi local.conf
.. literalinclude:: ../../../devstack/multi-nodes/k8s/local.conf.controller-tacker
:language: ini
* Execute installation script
.. code-block:: console
$ ./stack.sh
#. Create controller-k8s node
* Create the following local.conf on controller-k8s node
.. code-block:: console
$ cd devstack
$ vi local.conf
.. literalinclude:: ../../../devstack/multi-nodes/k8s/local.conf.controller-k8s
:language: ini
* Execute installation script
.. code-block:: console
$ ./stack.sh
.. note::
Pre-settings may be required to install Kubernetes.
See `Kubernetes documentation`_ for the target version for details.
For example, the following settings are required for Kubernetes 1.30.5.
.. code-block:: console
$ sudo modprobe overlay
$ sudo modprobe br_netfilter
$ cat <<EOF | sudo tee /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tableis=1
net.bridge.bridge-nf-call-iptables=1
net.ipv4.ip_forward=1
EOF
$ sudo sysctl --system
Settings
^^^^^^^^
Several settings are required to run the functional tests (FT).
We provide shell script files that implement those settings.
See :doc:`/reference/script_ft_v1` for how to use them.
Running these shell script files will complete the settings
for running following functional tests (FT).
* tacker-ft-legacy-vim
* tacker-ft-v1-vnfpkgm
* tacker-ft-v1-k8s
* tacker-ft-v1-tosca-vnflcm
* tacker-ft-v1-userdata-vnflcm
Committing testcase and opening a review:
=========================================
@@ -114,7 +408,13 @@ Committing testcase and opening a review:
review using below guidelines:
https://docs.openstack.org/infra/manual/developers.html
Sample testcase:
================
* Check sample tests under following directory:
https://opendev.org/openstack/tacker/src/branch/master/tacker/tests/functional/
.. _Devstack: https://docs.openstack.org/devstack/latest/
.. _Kubernetes documentation:
https://kubernetes.io/docs/setup/production-environment/container-runtimes/#install-and-configure-prerequisites

View File

@@ -184,6 +184,14 @@ So the first step of installing tacker is to clone Devstack and prepare your
:language: ini
.. note::
Standalone mode is used in Zuul environments that run FT.
For more information about FT, see
:doc:`/contributor/tacker_functional_test` or the local.conf
used in each Zuul environment.
#. Execute installation script
After saving the ``local.conf``, we can run ``stack.sh`` in the terminal

View File

@@ -26,3 +26,4 @@ Tools
vim_config
gen_vnf_pkg
script_ft_v1

View File

@@ -0,0 +1,630 @@
==========================================
Scripts for Setting of v1 Functional Tests
==========================================
We provide sample setup scripts for running Tacker v1 Functional Tests (FT).
It's intended to help to run the tests on your local environment.
.. note::
The content of this document has been confirmed to work
using Ubuntu 22.04, Kubernetes 1.30.5 and Helm 3.15.4.
Target Tests
~~~~~~~~~~~~
Not all the v1 functional tests are supported.
* tacker-ft-legacy-vim
* tacker-ft-v1-vnfpkgm
* tacker-ft-v1-k8s
* tacker-ft-v1-tosca-vnflcm
* tacker-ft-v1-userdata-vnflcm
Files
~~~~~
.. code-block::
tools/doc_samples/setting_ft/
|
+--- openstack/
| openstack-controller.sh
| openstack-controller-tacker.sh
|
\--- kubernetes/
kube-controller.sh
kube-controller-k8s.sh
kube-controller-tacker.sh
* openstack/{openstack-controller.sh,openstack-controller-tacker.sh}:
These shell script files are what you would run without k8s.
Store the shell script files on each of the virtual machine (VM) hosts
controller and controller-tacker that you have created.
Execute them on each host in order.
* kubernetes/
{kube-controller.sh,kube-controller-k8s.sh,kube-controller-tacker.sh}:
These shell script files are what you run when using k8s.
The rest is the same as above.
Store the shell script files on each of your VM hosts controller,
controller-k8s, and controller-tacker.
Execute them on each host in order.
Usage
~~~~~
Here is how to use the shell script files provided.
Run these provided shell script files after the shell script file
./stack.sh that you ran to build the DevStack has finished successfully.
Perform the following steps:
#. Edit files (if necessary)
Edit these shell script files as needed for your environment.
.. code-block:: console
$ vi openstack-controller.sh
$ vi openstack-controller-tacker.sh
$ vi kube-controller.sh
$ vi kube-controller-k8s.sh
$ vi kube-controller-tacker.sh
#. Grant execution rights to each shell script file
Grant execution rights to the shell script files provided here as follows:
.. code-block:: console
$ chmod +x openstack-controller.sh
$ chmod +x openstack-controller-tacker.sh
$ chmod +x kube-controller.sh
$ chmod +x kube-controller-k8s.sh
$ chmod +x kube-controller-tacker.sh
#. Run the shell script files
Run the shell script files provided here as follows.
The command prompt (e.g. stack@controller:~$ ) represents the user
name and host name to run.
Follow Step (a) if you're not using Kubernetes (k8s), or Step (b)
if you're using k8s.
In each case, execute the shell script files in the following order:
Step (a) not using k8s
.. code-block:: console
stack@controller:~$ ./openstack-controller.sh
stack@controller-tacker:~$ ./openstack-controller-tacker.sh
Output example:
.. code-block:: console
stack@controller:~$ ./openstack-controller.sh
d02ebf6e-9b4b-474f-9eb4-6492454653d4
Manager "ptcp:6640:127.0.0.1"
is_connected: true
Bridge br-ex
Port eth1
Interface eth1
Port br-ex
Interface br-ex
type: internal
Bridge br-int
fail_mode: secure
datapath_type: system
Port ovn-0d4c53-0
Interface ovn-0d4c53-0
type: geneve
options: {csum="true", key=flow, remote_ip="192.168.56.12"}
Port ovn-19aa8a-0
Interface ovn-19aa8a-0
type: geneve
options: {csum="true", key=flow, remote_ip="192.168.56.14"}
Port br-int
Interface br-int
type: internal
Port ovn-b5aa08-0
Interface ovn-b5aa08-0
type: geneve
options: {csum="true", key=flow, remote_ip="192.168.56.13"}
ovs_version: "2.17.9"
mysql: [Warning] Using a password on the command line interface can be insecure.
host hypervisor_hostname mapped uuid
compute1 compute1 0 36fa9820-f25d-4ee9-8ec6-348c61230367
compute2 compute2 0 52cb3474-aaba-4168-bcbe-d5eb2ec9c2d2
INFO dbcounter [None req-fa994509-fb86-4112-a675-88f62d29f404 None None] Registered counter for database nova_api
DEBUG dbcounter [-] [102425] Writer thread running {{(pid=102425) stat_writer /opt/stack/data/venv/lib/python3.10/site-packages/dbcounter.py:102}}
INFO dbcounter [None req-fa994509-fb86-4112-a675-88f62d29f404 None None] Registered counter for database nova_cell1
DEBUG dbcounter [-] [102425] Writer thread running {{(pid=102425) stat_writer /opt/stack/data/venv/lib/python3.10/site-packages/dbcounter.py:102}}
mysql: [Warning] Using a password on the command line interface can be insecure.
host hypervisor_hostname mapped uuid
compute1 compute1 1 36fa9820-f25d-4ee9-8ec6-348c61230367
compute2 compute2 1 52cb3474-aaba-4168-bcbe-d5eb2ec9c2d2
.. code-block:: console
stack@controller-tacker:~$ ./openstack-controller-tacker.sh
+----------------+-----------------------------------------------------+
| Field | Value |
+----------------+-----------------------------------------------------+
| auth_cred | { |
| | "username": "nfv_user", |
| | "user_domain_name": "Default", |
| | "cert_verify": "False", |
| | "project_id": null, |
| | "project_name": "nfv", |
| | "project_domain_name": "Default", |
| | "auth_url": "http://192.168.56.11/identity/v3", |
| | "key_type": "barbican_key", |
| | "secret_uuid": "***", |
| | "password": "***" |
| | } |
| auth_url | http://192.168.56.11/identity/v3 |
| created_at | 2024-12-20 02:50:33.307091 |
| description | Default VIM |
| extra | |
| id | aef62040-8bbf-42a6-ae67-41ecb176b676 |
| is_default | True |
| name | VIM0 |
| placement_attr | { |
| | "regions": [ |
| | "RegionOne" |
| | ] |
| | } |
| project_id | d43072cade474f6183fafe62a723964a |
| status | ACTIVE |
| type | openstack |
| updated_at | None |
| vim_project | { |
| | "name": "nfv", |
| | "project_domain_name": "Default" |
| | } |
+----------------+-----------------------------------------------------+
Step (b) using k8s
.. code-block:: console
stack@controller:~$ ./kube-controller.sh
stack@controller-k8s:~$ ./kube-controller-k8s.sh
stack@controller-tacker:~$ ./kube-controller-tacker.sh
Output example:
.. code-block:: console
stack@controller:~$ ./kube-controller.sh
d2ecc874-7e67-4de0-acc6-a91c85a3db3d
Manager "ptcp:6640:127.0.0.1"
is_connected: true
Bridge br-int
fail_mode: secure
datapath_type: system
Port ovn-965252-0
Interface ovn-965252-0
type: geneve
options: {csum="true", key=flow, remote_ip="192.168.56.23"}
Port br-int
Interface br-int
type: internal
Port ovn-947be9-0
Interface ovn-947be9-0
type: geneve
options: {csum="true", key=flow, remote_ip="192.168.56.22"}
Bridge br-ex
Port br-ex
Interface br-ex
type: internal
Port eth1
Interface eth1
ovs_version: "2.17.9"
.. code-block:: console
stack@controller-k8s:~$ ./kube-controller-k8s.sh
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN mode DEFAULT group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc fq_codel state UP mode DEFAULT group default qlen 1000
link/ether 08:00:27:c8:98:64 brd ff:ff:ff:ff:ff:ff
altname enp0s3
3: eth1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc fq_codel state UP mode DEFAULT group default qlen 1000
link/ether 08:00:27:fe:b8:4b brd ff:ff:ff:ff:ff:ff
altname enp0s8
4: ovs-system: <BROADCAST,MULTICAST> mtu 1500 qdisc noop state DOWN mode DEFAULT group default qlen 1000
link/ether ce:a5:37:75:58:27 brd ff:ff:ff:ff:ff:ff
5: br-int: <BROADCAST,MULTICAST> mtu 1500 qdisc noop state DOWN mode DEFAULT group default qlen 1000
link/ether 1a:b4:9a:5c:f7:f3 brd ff:ff:ff:ff:ff:ff
7: veth8a11ff95@if2: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP mode DEFAULT group default
link/ether 4e:9f:3b:b9:1a:54 brd ff:ff:ff:ff:ff:ff link-netns 6516b4bd-db04-404d-ae04-c82203f4cd86
8: veth76da22e3@if2: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP mode DEFAULT group default
link/ether 02:e6:c1:b1:42:57 brd ff:ff:ff:ff:ff:ff link-netns 9d7ff2fb-21c1-457e-9fa1-a7b3e8e87176
9: genev_sys_6081: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 65000 qdisc noqueue master ovs-system state UNKNOWN mode DEFAULT group default qlen 1000
link/ether 5e:44:4d:87:83:7f brd ff:ff:ff:ff:ff:ff
10: br-ex: <BROADCAST,MULTICAST> mtu 1500 qdisc noop state DOWN mode DEFAULT group default qlen 1000
link/ether 86:af:dc:f3:fe:4d brd ff:ff:ff:ff:ff:ff
NAMESPACE NAME READY STATUS RESTARTS AGE
kube-flannel pod/kube-flannel-ds-cv57g 1/1 Running 0 38d
kube-system pod/coredns-55cb58b774-9qmrm 1/1 Running 0 38d
kube-system pod/coredns-55cb58b774-tn9pq 1/1 Running 0 38d
kube-system pod/kube-apiserver-controller-k8s 1/1 Running 5 (17m ago) 38d
kube-system pod/kube-controller-manager-controller-k8s 1/1 Running 2 (21m ago) 38d
kube-system pod/kube-proxy-9t2rz 1/1 Running 0 38d
kube-system pod/kube-scheduler-controller-k8s 1/1 Running 2 (21m ago) 38d
NAMESPACE NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
default service/kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 38d
kube-system service/kube-dns ClusterIP 10.96.0.10 <none> 53/UDP,53/TCP,9153/TCP 38d
NAMESPACE NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE
kube-flannel daemonset.apps/kube-flannel-ds 1 1 1 1 1 <none> 38d
kube-system daemonset.apps/kube-proxy 1 1 1 1 1 kubernetes.io/os=linux 38d
NAMESPACE NAME READY UP-TO-DATE AVAILABLE AGE
kube-system deployment.apps/coredns 2/2 2 2 38d
NAMESPACE NAME DESIRED CURRENT READY AGE
kube-system replicaset.apps/coredns-55cb58b774 2 2 2 38d
pod "coredns-55cb58b774-9qmrm" deleted
pod "coredns-55cb58b774-tn9pq" deleted
NAMESPACE NAME READY STATUS RESTARTS AGE
kube-flannel pod/kube-flannel-ds-cv57g 1/1 Running 0 38d
kube-system pod/coredns-55cb58b774-6dllm 1/1 Running 0 7s
kube-system pod/coredns-55cb58b774-xmkqq 0/1 Running 0 7s
kube-system pod/kube-apiserver-controller-k8s 1/1 Running 5 (17m ago) 38d
kube-system pod/kube-controller-manager-controller-k8s 1/1 Running 2 (21m ago) 38d
kube-system pod/kube-proxy-9t2rz 1/1 Running 0 38d
kube-system pod/kube-scheduler-controller-k8s 1/1 Running 2 (21m ago) 38d
NAMESPACE NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
default service/kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 38d
kube-system service/kube-dns ClusterIP 10.96.0.10 <none> 53/UDP,53/TCP,9153/TCP 38d
NAMESPACE NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE
kube-flannel daemonset.apps/kube-flannel-ds 1 1 1 1 1 <none> 38d
kube-system daemonset.apps/kube-proxy 1 1 1 1 1 kubernetes.io/os=linux 38d
NAMESPACE NAME READY UP-TO-DATE AVAILABLE AGE
kube-system deployment.apps/coredns 1/2 2 1 38d
NAMESPACE NAME DESIRED CURRENT READY AGE
kube-system replicaset.apps/coredns-55cb58b774 2 2 1 38d
Reading package lists... Done
Building dependency tree... Done
Reading state information... Done
The following NEW packages will be installed:
sshpass
0 upgraded, 1 newly installed, 0 to remove and 55 not upgraded.
Need to get 11.7 kB of archives.
After this operation, 35.8 kB of additional disk space will be used.
Get:1 http://us.archive.ubuntu.com/ubuntu jammy/universe amd64 sshpass amd64 1.09-1 [11.7 kB]
Fetched 11.7 kB in 2s (5,856 B/s)
Selecting previously unselected package sshpass.
(Reading database ... 79969 files and directories currently installed.)
Preparing to unpack .../sshpass_1.09-1_amd64.deb ...
Unpacking sshpass (1.09-1) ...
Setting up sshpass (1.09-1) ...
Processing triggers for man-db (2.10.2-1) ...
Scanning processes...
Scanning candidates...
Scanning linux images...
Running kernel seems to be up-to-date.
Restarting services...
/etc/needrestart/restart.d/systemd-manager
systemctl restart packagekit.service polkit.service ssh.service systemd-networkd.service systemd-resolved.service systemd-timesyncd.service systemd-udevd.service udisks2.service
Service restarts being deferred:
/etc/needrestart/restart.d/dbus.service
systemctl restart networkd-dispatcher.service
systemctl restart systemd-logind.service
systemctl restart user@1000.service
No containers need to be restarted.
No user sessions are running outdated binaries.
No VM guests are running outdated hypervisor (qemu) binaries on this host.
Warning: Permanently added 'controller-tacker' (ED25519) to the list of known hosts.
Adding user `helm' ...
Adding new group `helm' (1002) ...
Adding new user `helm' (1002) with group `helm' ...
Creating home directory `/home/helm' ...
Copying files from `/etc/skel' ...
total 16
drwxr-xr-x 2 root root 4096 Nov 11 10:32 .
drwxr-x--- 3 helm helm 4096 Dec 20 04:43 ..
-rw------- 1 stack stack 5653 Nov 11 10:32 config
total 16
drwxr-xr-x 2 helm helm 4096 Nov 11 10:32 .
drwxr-x--- 3 helm helm 4096 Dec 20 04:43 ..
-rw------- 1 helm helm 5653 Nov 11 10:32 config
total 4
drwxr-xr-x 2 helm helm 4096 Dec 20 04:43 helm
--- /etc/ssh/sshd_config_bk 2024-07-23 18:04:13.103999238 +0000
+++ /etc/ssh/sshd_config 2024-12-20 04:43:09.287879199 +0000
@@ -54,7 +54,7 @@
#IgnoreRhosts yes
# To disable tunneled clear text passwords, change to no here!
-#PasswordAuthentication yes
+PasswordAuthentication yes
#PermitEmptyPasswords no
# Change to yes to enable challenge-response passwords (beware issues with
% Total % Received % Xferd Average Speed Time Time Time Current
Dload Upload Total Spent Left Speed
100 15.8M 100 15.8M 0 0 9656k 0 0:00:01 0:00:01 --:--:-- 9658k
linux-amd64/
linux-amd64/helm
linux-amd64/LICENSE
linux-amd64/README.md
version.BuildInfo{Version:"v3.15.4", GitCommit:"fa9efb07d9d8debbb4306d72af76a383895aa8c4", GitTreeState:"clean", GoVersion:"go1.22.6"}
.. code-block:: console
stack@controller-tacker:~$ ./kube-controller-tacker.sh
+----------------+-----------------------------------------------------+
| Field | Value |
+----------------+-----------------------------------------------------+
| auth_cred | { |
| | "username": "nfv_user", |
| | "user_domain_name": "Default", |
| | "cert_verify": "False", |
| | "project_id": null, |
| | "project_name": "nfv", |
| | "project_domain_name": "Default", |
| | "auth_url": "http://192.168.56.21/identity/v3", |
| | "key_type": "barbican_key", |
| | "secret_uuid": "***", |
| | "password": "***" |
| | } |
| auth_url | http://192.168.56.21/identity/v3 |
| created_at | 2024-12-20 09:36:53.346748 |
| description | Default VIM |
| extra | |
| id | 76bf55a1-7df9-4d0b-999a-9febd074dc6f |
| is_default | True |
| name | VIM0 |
| placement_attr | { |
| | "regions": [ |
| | "RegionOne" |
| | ] |
| | } |
| project_id | 89047a7c599f44978802b1330fecc646 |
| status | ACTIVE |
| type | openstack |
| updated_at | None |
| vim_project | { |
| | "name": "nfv", |
| | "project_domain_name": "Default" |
| | } |
+----------------+-----------------------------------------------------+
% Total % Received % Xferd Average Speed Time Time Time Current
Dload Upload Total Spent Left Speed
100 138 100 138 0 0 500 0 --:--:-- --:--:-- --:--:-- 500
100 49.0M 100 49.0M 0 0 31.9M 0 0:00:01 0:00:01 --:--:-- 46.5M
% Total % Received % Xferd Average Speed Time Time Time Current
Dload Upload Total Spent Left Speed
100 138 100 138 0 0 512 0 --:--:-- --:--:-- --:--:-- 513
100 64 100 64 0 0 164 0 --:--:-- --:--:-- --:--:-- 164
kubectl: OK
Client Version: v1.30.5
Kustomize Version: v5.0.4-0.20230601165947-6ce0bf390ce3
total 8
-rw------- 1 stack stack 5653 Nov 11 10:32 config
Kubernetes control plane is running at https://192.168.56.23:6443
CoreDNS is running at https://192.168.56.23:6443/api/v1/namespaces/kube-system/services/kube-dns:dns/proxy
To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'.
Config for Kubernetes VIM 'tacker/samples/tests/etc/samples/local-k8s-vim.yaml' generated.
NAME TYPE DATA AGE
default-token-k8svim kubernetes.io/service-account-token 3 1s
--- tacker/samples/tests/etc/samples/local-k8s-vim.yaml_bk 2024-11-11 02:46:00.096741454 +0000
+++ tacker/samples/tests/etc/samples/local-k8s-vim.yaml 2024-12-20 09:36:57.433035278 +0000
@@ -1,5 +1,24 @@
-auth_url: "https://127.0.0.1:6443"
-bearer_token: "secret_token"
+auth_url: "https://192.168.56.23:6443"
+bearer_token: "eyJhbGciOiJSUzI1NiIsImtpZCI6IkItQ1FuM2FCcmNDaF9uRzNTd05ETWFtbFFhVWgtbmZwaExLY0dUeFRPRE0ifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJkZWZhdWx0Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZWNyZXQubmFtZSI6ImRlZmF1bHQtdG9rZW4tazhzdmltIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQubmFtZSI6ImRlZmF1bHQiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC51aWQiOiI5YWFmNWJlYi02MTIzLTQyYWItYTE3Ni04ODUxZWJkNGFkOTAiLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6ZGVmYXVsdDpkZWZhdWx0In0.qSxCrtCjtVG1AbyeDuXpkxrenskrSPLx9pnLhNyL5Bgckis97ILaqSjf4IbUL0myqQUKET9smlNxXm1Hjk7bmjL5TBUMNJiewywuOXZkQhF3xqJWmdcl_9bPWcYp0D4olHbtPNpgImbRLn_ZfzymdqtYx6I-SRUCKQunkAGq4dxOM9wLQ3VPLja1li9lDeU6NXgkX7XGO8rA2m1Q0tPzINVNanN-z0Rut0XdWzEhepDwo_MyLnLdhg4oC5gbfNqbUwwqkDDV3Pt6c6_d1vXohDeS5VJETrTZG16qbDY5Ah8YPeiayfLseuznk3rui3lYUWvHZvO4J_ZCUV1LZ7zcOQ"
+ssl_ca_cert: "-----BEGIN CERTIFICATE-----
+MIIDBTCCAe2gAwIBAgIIWX6AGYfkbaYwDQYJKoZIhvcNAQELBQAwFTETMBEGA1UE
+AxMKa3ViZXJuZXRlczAeFw0yNDExMTExMDI1MzhaFw0zNDExMDkxMDMwMzhaMBUx
+EzARBgNVBAMTCmt1YmVybmV0ZXMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK
+AoIBAQC+jwt4uPT7uyx6DlWrJ7OnnfQFKKfPJ/rHEOiVpV57qG6JW9rCnYzXZ0i/
+eEVDXtQnQ/NZ2VXPY0UZI30Ew+w99z+Eh/m/MCsyTOq5YUuN3/5NQ4NsXc8VBHSm
+yoelJLw2hPwmzNsgDouZqtvIURFuwxL4tc1/UeH51sj4cw4l6yJcRC0I2llYxF8Q
+znTaOWeQ5LuaxoHOFb01wENFacoRNgcNoFB7oVeb5h+c0hM+cHqeRdQVc96VQDxa
+ynqIzdJ+whDmzEif5RK2R7LWNLXLQlEIUkGnOg+iaLdXPbGKzS38o5mZqRheXVHD
+nFb5ZeOQ1oqPStQJCz7cNMMkS983AgMBAAGjWTBXMA4GA1UdDwEB/wQEAwICpDAP
+BgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBTqeh0oly+huPQfzIMaslJesN+CsjAV
+BgNVHREEDjAMggprdWJlcm5ldGVzMA0GCSqGSIb3DQEBCwUAA4IBAQBrpAL5oE6b
+Dw/di4gowUfv5boTpHmbpRxXhA/MBL5THTV0rR7hkdt3O+j2wsoGWrbuSkyfBhUi
+AVp3V98+qNmKiLKKYlugCTCUK3J0uHewWdlCY+voKiBR0oMdzMGqbApqZ7GFPIVJ
+ORycUf3R8Gg07BeMzrXNM4AylRu8jsfwa/xCLCLg4ueNwHxQYHlA77vmj+2tTb8K
+mmkaAGRaIZrzH+Y/Dg7whAKtym7S5TxutXqWa3mRL/2M2kwP+Y3RdhXqvAFlmytK
+eHFOJSeuYYa1kLTiCMknLAcwd6XLA7CyWiS1FJmSHGp5eIlCUku4oV7IhaMb6Fgp
+mRmUryUhgyKs
+-----END CERTIFICATE-----"
project_name: "default"
-ssl_ca_cert: None
type: "kubernetes"
+
Config for Kubernetes VIM 'tacker/samples/tests/etc/samples/local-k8s-vim-helm.yaml' generated.
--- tacker/samples/tests/etc/samples/local-k8s-vim-helm.yaml_bk 2024-11-11 02:46:00.096741454 +0000
+++ tacker/samples/tests/etc/samples/local-k8s-vim-helm.yaml 2024-12-20 09:36:58.733045070 +0000
@@ -1,7 +1,25 @@
-auth_url: "https://127.0.0.1:6443"
-bearer_token: "secret_token"
+auth_url: "https://192.168.56.23:6443"
+bearer_token: "eyJhbGciOiJSUzI1NiIsImtpZCI6IkItQ1FuM2FCcmNDaF9uRzNTd05ETWFtbFFhVWgtbmZwaExLY0dUeFRPRE0ifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJkZWZhdWx0Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZWNyZXQubmFtZSI6ImRlZmF1bHQtdG9rZW4tazhzdmltIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQubmFtZSI6ImRlZmF1bHQiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC51aWQiOiI5YWFmNWJlYi02MTIzLTQyYWItYTE3Ni04ODUxZWJkNGFkOTAiLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6ZGVmYXVsdDpkZWZhdWx0In0.qSxCrtCjtVG1AbyeDuXpkxrenskrSPLx9pnLhNyL5Bgckis97ILaqSjf4IbUL0myqQUKET9smlNxXm1Hjk7bmjL5TBUMNJiewywuOXZkQhF3xqJWmdcl_9bPWcYp0D4olHbtPNpgImbRLn_ZfzymdqtYx6I-SRUCKQunkAGq4dxOM9wLQ3VPLja1li9lDeU6NXgkX7XGO8rA2m1Q0tPzINVNanN-z0Rut0XdWzEhepDwo_MyLnLdhg4oC5gbfNqbUwwqkDDV3Pt6c6_d1vXohDeS5VJETrTZG16qbDY5Ah8YPeiayfLseuznk3rui3lYUWvHZvO4J_ZCUV1LZ7zcOQ"
+ssl_ca_cert: "-----BEGIN CERTIFICATE-----
+MIIDBTCCAe2gAwIBAgIIWX6AGYfkbaYwDQYJKoZIhvcNAQELBQAwFTETMBEGA1UE
+AxMKa3ViZXJuZXRlczAeFw0yNDExMTExMDI1MzhaFw0zNDExMDkxMDMwMzhaMBUx
+EzARBgNVBAMTCmt1YmVybmV0ZXMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK
+AoIBAQC+jwt4uPT7uyx6DlWrJ7OnnfQFKKfPJ/rHEOiVpV57qG6JW9rCnYzXZ0i/
+eEVDXtQnQ/NZ2VXPY0UZI30Ew+w99z+Eh/m/MCsyTOq5YUuN3/5NQ4NsXc8VBHSm
+yoelJLw2hPwmzNsgDouZqtvIURFuwxL4tc1/UeH51sj4cw4l6yJcRC0I2llYxF8Q
+znTaOWeQ5LuaxoHOFb01wENFacoRNgcNoFB7oVeb5h+c0hM+cHqeRdQVc96VQDxa
+ynqIzdJ+whDmzEif5RK2R7LWNLXLQlEIUkGnOg+iaLdXPbGKzS38o5mZqRheXVHD
+nFb5ZeOQ1oqPStQJCz7cNMMkS983AgMBAAGjWTBXMA4GA1UdDwEB/wQEAwICpDAP
+BgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBTqeh0oly+huPQfzIMaslJesN+CsjAV
+BgNVHREEDjAMggprdWJlcm5ldGVzMA0GCSqGSIb3DQEBCwUAA4IBAQBrpAL5oE6b
+Dw/di4gowUfv5boTpHmbpRxXhA/MBL5THTV0rR7hkdt3O+j2wsoGWrbuSkyfBhUi
+AVp3V98+qNmKiLKKYlugCTCUK3J0uHewWdlCY+voKiBR0oMdzMGqbApqZ7GFPIVJ
+ORycUf3R8Gg07BeMzrXNM4AylRu8jsfwa/xCLCLg4ueNwHxQYHlA77vmj+2tTb8K
+mmkaAGRaIZrzH+Y/Dg7whAKtym7S5TxutXqWa3mRL/2M2kwP+Y3RdhXqvAFlmytK
+eHFOJSeuYYa1kLTiCMknLAcwd6XLA7CyWiS1FJmSHGp5eIlCUku4oV7IhaMb6Fgp
+mRmUryUhgyKs
+-----END CERTIFICATE-----"
project_name: "default"
-ssl_ca_cert: None
type: "kubernetes"
extra:
- use_helm: true
\ No newline at end of file
+ use_helm: true
+--------------------------------------+------+----------------------------------+-----------+------------+--------+
| ID | Name | Tenant_id | Type | Is Default | Status |
+--------------------------------------+------+----------------------------------+-----------+------------+--------+
| 76bf55a1-7df9-4d0b-999a-9febd074dc6f | VIM0 | 89047a7c599f44978802b1330fecc646 | openstack | True | ACTIVE |
+--------------------------------------+------+----------------------------------+-----------+------------+--------+
+----------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| Field | Value |
+----------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| auth_cred | { |
| | "bearer_token": "***", |
| | "ssl_ca_cert": "b'gAAAAABnZTrA9T2bRK905WuX6oZxoIcorZEsX_St5bu-iKqepORVUseveibpN5NLeMDj5K8n3bTV6VFegWuoBK467CZ8re-mGEFfGXMFjhGF5kjDAf_Ec- |
| | EqrC5c4s1qNW7oaklGL1lNg6yDvbDPhGp_N79pyfn9bMbocEh_tBK_CCOythaJ1QudoObqbXglmgTY45xH_-h4WzZWd0TTC-p-ESd5BOlCLM- |
| | uCHunD1SN9Ext6dy3vsfU6mVMDNaSiEgHMUe0zpiuOBJd0ld-U1NtXRmbmTw_Stg66Gx8AVLEIDxmqFmsAjzK- |
| | XW62L3N2NqXJ0WBc_E0VmSpnvXOvLR1cpNkCL08ZPqJ5jZonriTFoEId9V2e1UFQrQBnigiwvGEH8_GQ4mZI1LxIqzQLpUwkd_jPtzsCTpdRnFeec6YmJms2JCoIWrNOQOeGwpXXqSRIVk9LqqzMQ5pBhx7LH-ODwJy8 |
| | GLHc2cEoy2OiyZ4jfhkhBnBzK99QqFWGTkWAoOfbCAxSswnQQNXJPZDB8rZ_tBowvUGAHh1WaIz3c5nArKEM2ynpB_naii6KmsGTP7cA3Vh0uF5DAn3vDk1W_sjt93edzUT9k2sHpwSvqcLkJep3HibGeFKxO72AljgE |
| | UOUAX0ap63x3Hf5- |
| | 1HuZrRyWWBE7Je4QoDVE_vcGVQlVeTC5BihADUPHzhRc1S8FbtGGg5WALV65c7HdsvSRzXtzN4_qEBz_0aD7BcFBXSoXimk3er8DT96zH6MADc62Z_4vnHglwV_jpRkfk1HuMpwCtobRuh5T6RX9tQ9Bbldx3G8gOoMz |
| | mhcdwDOX8G5ILd_UdArwS9_5Bxm7T9nNfTTiadmHj7saYPe3uQim0BTuqcPxQOieXvukmz7ge29HLJBOZ8DrwRQX8xnXIzf5AezaGzpWV61ADa8VlGei62cbJa3fM1rxboB_YVETfjjReNqT84n8s7sSy2KrjcqOXJA7 |
| | cwi_Bg1z1zXzd2Dp3bmqJzFYuIcHc0errA4GajrtyppmMxIteZeNB8ai6Kwc9Zi2zra4nh7r3Ybbn_zR9Hg4Zb0RYD9BdRQAb4qJTK1zFA5bgCGMrWCaZb- |
| | e9UBrCXo5_BkGPg9Ow0emifG2fCkB0qLN7yAuoMl34xuBs7v6ZkA0TSRTh2Mdg5fnNUPsAXH32xJ0fDkiKA9pcR9dkBbG04flDqZpy2niV19PF2JYHo- |
| | 1Zej591qKwEan_tpGDOzArFDNAFYrAkScFhCIzlE53MCsq99n-ETLMYMTZRZtbAWcP8BQRerbEaZsRBUw6YsqI9MLKeTaiAZz8ZVt_JKwSIVqs-Mlx9jlcE- |
| | NsPNMFQSPl8WqEJlCvAI_HWOhang59N0UasjcQTw44H6lVXzQB8CfNBea1uQS4dDm43zITaScto2wwccLyTSg9RAAwneWOuDDaLPNu0vQKf5IJ5eD_w-fbH-U- |
| | Vzuw2RyNCfbOaTnqzb66nR8JEqQ8P64TkXAgkl2K6y_yXYIxEd2SkGjMSq3mTnx6SNbLpcwY7DsT9v0iNJEyemB8078EWZOaZr1_WqlH8uEA=='", |
| | "auth_url": "https://192.168.56.23:6443", |
| | "username": "None", |
| | "key_type": "barbican_key", |
| | "secret_uuid": "***" |
| | } |
| auth_url | https://192.168.56.23:6443 |
| created_at | 2024-12-20 09:37:05.618109 |
| description | Kubernetes VIM |
| extra | helm_info={'masternode_ip':['192.168.56.23'],'masternode_username':'helm','masternode_password':'helm_password'} |
| id | adf0cca6-8d5d-4e92-9e21-a5638ddf5113 |
| is_default | False |
| name | vim-kubernetes |
| placement_attr | { |
| | "regions": [ |
| | "default", |
| | "kube-flannel", |
| | "kube-node-lease", |
| | "kube-public", |
| | "kube-system" |
| | ] |
| | } |
| project_id | 89047a7c599f44978802b1330fecc646 |
| status | ACTIVE |
| type | kubernetes |
| updated_at | None |
| vim_project | { |
| | "name": "default" |
| | } |
+----------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+----------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| Field | Value |
+----------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| auth_cred | { |
| | "bearer_token": "***", |
| | "ssl_ca_cert": "b'gAAAAABnZTrD-FN762CKIgk_kmZym19PDCeTd9Bh8rXTwdHS_p5lKGg0aGGNdJmdSZwmpWI4HwrJxZeq_LCaritEqF4_HukQs1Z7jq5G_Zj9b- |
| | JztptO530v6_LjrKVJmnYQb8Jupsx01Z52gfkSbfaBmThAE4SbmpOBBpHfdcBwUFpHe3OAIzl9GFG2wWNteVrZ- |
| | TLhFro9YwlM8oh9kQKVOHEapiA8SFrIGPpOn1XcWN6t__KbqDrU2nrK0rDry0jCsPuRZ2MPIrKjCczphoA2MmgDSiEO-CEfdLHfegULpKYfAEgxRr-CpNYJCcRNHIJvrEzxZHiBHLD38q0w7XsyAdaTNLn6Z6p-S-nm_ |
| | rucwA3lwz7EaVxGBImsoO2XRgydLRsZy5M60- |
| | zjWhiiIVtsVQBF9U6WXHl432okRZxLG0TIQIiwiZMh1S9cGaCsiPkGbYZcl176Li3pfA1ERbzAdlK7Fro8tKwEzc3qy0Rs9aMMP5VbHCGNISLtQHVOiQfedbykuQqKje9ILs7QyIHbtPj_zxe1o0XYfE8Y0ALc1jUBfR |
| | 3F9M7VSun-Q9XYdUuuejGNtkGAZTaLTZVuwZiLRVg4hyNN1Qz09Qgm- |
| | 9Qnhq6ygpThX2ytcQeds0zwdC3VQ9tsN1dMzjz_xR49QwwlIYYL0d_gMMuQnwnSbW5YEB3qznCNxDdZ2tG_EGBQE7T5UybCQU7UzcwCxpXnh6-m1aA5aoY5EryAhaVWukQ9Iv- |
| | jjyzGVh50gMnO8UJBjF4N0JH-7fzRsqxGxOpm7NTpBURMzrdFtq4wqDbZ_KGXWL_rAhRN7rlkizvm2-4JDRhEjndHHnN41AJbj8zEM5_u_ufbPklv6Sy6hQ70j8ojVz8Bqxqv5RF39NPiT-kgVJsMqkrX0C7_yvkva9O |
| | V9SxorgdhyksyhPFUgCVraLdXVJY95UKsQeA_GpTTQJ0CryD6OWU0BhUAN5SvqARr4zElA_TAvjaKxr4v7fVFddT0v2DcncG2OhOe6k82svwPVvvhA8avHLgTHOdl_qSPDrv9AWguBom0wqQex_EgcsLdwrBFMI2uJqe |
| | inn1ISd-Lg6JKcYfrC9klVWSw9XNRn9jM_fhd3SfttzSId6NPm5y_rSJlE_aE6UmlbMBRJzM0_zaFuI0IYzu-_If63ADCB9gN9b1XTlCgb64VWKucse_aahftvTi73arcBegUKu- |
| | KScpZ9BIFyQHcrPiR3uAeLHxn_wXv2-5Nhxw35IMZzGBgael1N8bBaSEsgAGLfl2kNjt9j1O1XryDdmiqYmPaMyqq1M02CpAHoI7AIUKvv3-4ULHj7yT3MYoe0SFVZ7J_iKHl0wZKm- |
| | qmP8CRL34hQbzs89pkCIrYKmo2KxcmcAdmYdBuQiVhwWqW4VDuA64wB0IP-QIQTVrtmikcFYH8huT85m-rU5230f2MiamQMZ01ADV1PMu8uJf- |
| | ASgqfaesWeC61Of4nhbIZ5Wm1Rp0Ln2Y45CmiM5V5DbtXsHeYhkwT8KjEO9LvJ7WNLlYyuRMFO6Xwh8bEjE78H91RAKjgQQurL65svtLxA=='", |
| | "auth_url": "https://192.168.56.23:6443", |
| | "username": "None", |
| | "key_type": "barbican_key", |
| | "secret_uuid": "***" |
| | } |
| auth_url | https://192.168.56.23:6443 |
| created_at | 2024-12-20 09:37:08.136510 |
| description | Kubernetes VIM |
| extra | use_helm=True |
| id | 4d843bcc-af0b-42ab-86dd-dd710905a3c2 |
| is_default | False |
| name | vim-kubernetes-helm |
| placement_attr | { |
| | "regions": [ |
| | "default", |
| | "kube-flannel", |
| | "kube-node-lease", |
| | "kube-public", |
| | "kube-system" |
| | ] |
| | } |
| project_id | 89047a7c599f44978802b1330fecc646 |
| status | ACTIVE |
| type | kubernetes |
| updated_at | None |
| vim_project | { |
| | "name": "default" |
| | } |
+----------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+--------------------------------------+---------------------+----------------------------------+------------+------------+--------+
| ID | Name | Tenant_id | Type | Is Default | Status |
+--------------------------------------+---------------------+----------------------------------+------------+------------+--------+
| 4d843bcc-af0b-42ab-86dd-dd710905a3c2 | vim-kubernetes-helm | 89047a7c599f44978802b1330fecc646 | kubernetes | False | ACTIVE |
| 76bf55a1-7df9-4d0b-999a-9febd074dc6f | VIM0 | 89047a7c599f44978802b1330fecc646 | openstack | True | ACTIVE |
| adf0cca6-8d5d-4e92-9e21-a5638ddf5113 | vim-kubernetes | 89047a7c599f44978802b1330fecc646 | kubernetes | False | ACTIVE |
+--------------------------------------+---------------------+----------------------------------+------------+------------+--------+
constants.py container_update_mgmt.py __init__.py __pycache__ vnflcm_abstract_driver.py vnflcm_noop.py
--- /opt/stack/tacker/setup.cfg_bk 2024-11-11 02:46:00.132741905 +0000
+++ /opt/stack/tacker/setup.cfg 2024-12-20 09:37:11.401141579 +0000
@@ -63,6 +63,7 @@
tacker.tacker.mgmt.drivers =
noop = tacker.vnfm.mgmt_drivers.noop:VnfMgmtNoop
vnflcm_noop = tacker.vnfm.mgmt_drivers.vnflcm_noop:VnflcmMgmtNoop
+ mgmt-container-update = tacker.vnfm.mgmt_drivers.container_update_mgmt:ContainerUpdateMgmtDriver
oslo.config.opts =
tacker.auth = tacker.auth:config_opts
tacker.common.config = tacker.common.config:config_opts
--- /etc/tacker/tacker.conf_bk 2024-11-11 03:11:18.252006525 +0000
+++ /etc/tacker/tacker.conf 2024-12-20 09:37:11.781144499 +0000
@@ -3059,6 +3059,7 @@
# MGMT driver to communicate with Hosting VNF/logical service instance tacker
# plugin will use (list value)
#vnflcm_mgmt_driver = vnflcm_noop
+vnflcm_mgmt_driver = vnflcm_noop,mgmt-container-update
#
# From tacker.vnfm.plugin
...
copying tacker/tests/var/ca.crt -> build/lib/tacker/tests/var
copying tacker/tests/var/certandkey.pem -> build/lib/tacker/tests/var
copying tacker/tests/var/certificate.crt -> build/lib/tacker/tests/var
copying tacker/tests/var/privatekey.key -> build/lib/tacker/tests/var

View File

@@ -0,0 +1,82 @@
#!/bin/sh
# kube-controller-k8s.sh
cd "${HOME}" || exit
### Add values for unqualified-search-registries of the file registries.conf
if [ -f "/etc/containers/registries.conf" ]
then
sudo cp -p /etc/containers/registries.conf /etc/containers/registries.conf_bk
sudo sed -i "s/unqualified-search-registries\ =\ \[\"docker\.io\",\ \"quay\.io\"\]/unqualified-search-registries\ =\ \[\"docker\.io\",\ \"k8s\.gcr\.io\",\ \"quay\.io\",\ \"celebdor\"\]/" \
/etc/containers/registries.conf
else
echo "the file /etc/containers/registries.conf is not exist."
fi
### Removing ip link and restarting kubelet
# kubectl get all -A
# ip link
sudo ip link set cni0 down
sudo ip link set flannel.1 down
# ip link
sudo ip link delete cni0
sudo ip link delete flannel.1
ip link
sudo systemctl restart kubelet
kubectl get all -A
### Restart coredns
kubectl delete pod -n kube-system $(kubectl get pod -n kube-system --no-headers -o custom-columns=":metadata.name" | grep coredns | tr -s '\n' ' ')
kubectl get all -A
### Transfer the file .kube/config from this host controller-k8s to the host controller-tacker.
### Install Tool sshpass to run the command scp without entering an interactive passphrase.
sudo apt-get -y install sshpass
sshpass -p "vagrant" scp -po "StrictHostKeyChecking no" .kube/config vagrant@controller-tacker:/tmp/kubeconfig
### Create and configure user helm.
sudo adduser --disabled-password --gecos "" "helm"
echo "helm:helm_password" | sudo chpasswd
sudo sh -c "cat >> /etc/sudoers.d/50_helm_sh" << EOF
helm ALL=(root) NOPASSWD:ALL
EOF
if [ -d "/home/helm" ]
then
sudo cp -pr .kube /home/helm/.
sudo ls -la /home/helm/.kube
sudo chown -R helm:helm /home/helm/.kube
else
echo "the directory /home/helm is not exist."
fi
sudo ls -la /home/helm/.kube
sudo mkdir -p /var/tacker/helm
sudo chmod 755 /var/tacker/helm
sudo chown helm:helm /var/tacker/helm
ls -l /var/tacker/.
if [ -f "/etc/ssh/sshd_config" ]
then
sudo cp -p /etc/ssh/sshd_config /etc/ssh/sshd_config_bk
sudo sh -c "sed -i 's/#PasswordAuthentication yes/PasswordAuthentication yes/' /etc/ssh/sshd_config"
diff -u /etc/ssh/sshd_config_bk /etc/ssh/sshd_config
else
echo "the file /etc/ssh/sshd_config is not exist."
fi
sudo systemctl restart sshd
### Install the Helm package.
curl -O https://get.helm.sh/helm-v3.15.4-linux-amd64.tar.gz
if [ -f "helm-v3.15.4-linux-amd64.tar.gz" ]
then
tar -zxvf helm-v3.15.4-linux-amd64.tar.gz
else
echo "the file helm-v3.15.4-linux-amd64.tar.gz is not exist."
fi
if [ -f "linux-amd64/helm" ]
then
sudo mv linux-amd64/helm /usr/local/bin/helm
else
echo "the file linux-amd64/helm is not exist."
fi
helm version
# echo "End shell script ${0}"

View File

@@ -0,0 +1,182 @@
#!/bin/sh
# kube-controller-tacker.sh
CONTROLLER_IP_ADDRESS="192.168.56.21"
OS_AUTH_URL="http://${CONTROLLER_IP_ADDRESS}/identity"
CONTROLLER_K8S_IP_ADDRESS="192.168.56.23"
### Change the IP address of the file local-vim.yaml
cd "${HOME}" || exit
if [ -f "tacker/samples/tests/etc/samples/local-vim.yaml" ]
then
cp -p tacker/samples/tests/etc/samples/local-vim.yaml tacker/samples/tests/etc/samples/local-vim.yaml_bk
sed -i "s/auth_url:\ http:\/\/127.0.0.1\/identity/auth_url:\ http:\/\/${CONTROLLER_IP_ADDRESS}\/identity/" tacker/samples/tests/etc/samples/local-vim.yaml
else
echo "the file tacker/samples/tests/etc/samples/local-vim.yaml is not exist."
fi
### Register the default VIM
if [ -d "${HOME}/tacker" ]
then
cd "${HOME}/tacker" || exit
else
echo "the directory ${HOME}/tacker is not exist."
fi
openstack vim register \
--os-username nfv_user \
--os-project-name nfv \
--os-password devstack \
--os-auth-url "${OS_AUTH_URL}" \
--os-project-domain-name Default \
--os-user-domain-name Default \
--is-default \
--description "Default VIM" \
--config-file /opt/stack/tacker/samples/tests/etc/samples/local-vim.yaml \
VIM0
cd "${HOME}" || exit
### Install the tool kubectl on this host controller-tacker.
curl -LO https://dl.k8s.io/release/v1.30.5/bin/linux/amd64/kubectl
curl -LO "https://dl.k8s.io/release/v1.30.5/bin/linux/amd64/kubectl.sha256"
echo "$(cat kubectl.sha256) kubectl" | sha256sum --check
sudo install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl
kubectl version --client
mkdir .kube
if [ -f "/tmp/kubeconfig" ]
then
sudo mv /tmp/kubeconfig .kube/config
sudo chown stack:stack .kube/config
sudo chmod 600 .kube/config
else
echo "the file /tmp/kubeconfig is not exist."
fi
ls -l .kube
kubectl cluster-info
### Register the Secret and modify the file local-k8s-vim.yaml using the tool gen_vim_config.sh.
# kubectl get secret
if [ -f "tacker/samples/tests/etc/samples/local-k8s-vim.yaml" ] && \
[ -f "tacker/tools/gen_vim_config.sh" ]
then
cp -p tacker/samples/tests/etc/samples/local-k8s-vim.yaml tacker/samples/tests/etc/samples/local-k8s-vim.yaml_bk
bash tacker/tools/gen_vim_config.sh -p default -t k8s -e https://${CONTROLLER_K8S_IP_ADDRESS}:6443 --k8s-use-cert -o tacker/samples/tests/etc/samples/local-k8s-vim.yaml
kubectl get secret
diff -u tacker/samples/tests/etc/samples/local-k8s-vim.yaml_bk tacker/samples/tests/etc/samples/local-k8s-vim.yaml
# cat tacker/samples/tests/etc/samples/local-k8s-vim.yaml
else
echo "the file tacker/samples/tests/etc/samples/local-k8s-vim.yaml is not exist."
echo "or the file tacker/tools/gen_vim_config.sh is not exist."
fi
### Add the extra configuration to the file local-k8s-vim.yaml.
if [ -f "tacker/samples/tests/etc/samples/local-k8s-vim.yaml" ]
then
cp -p tacker/samples/tests/etc/samples/local-k8s-vim.yaml tacker/samples/tests/etc/samples/local-k8s-vim.yaml_bk2
cat << EOF >> "tacker/samples/tests/etc/samples/local-k8s-vim.yaml"
extra:
helm_info: "{'masternode_ip':['${CONTROLLER_K8S_IP_ADDRESS}'],'masternode_username':'helm','masternode_password':'helm_password'}"
EOF
# cat tacker/samples/tests/etc/samples/local-k8s-vim.yaml
else
echo "the file tacker/samples/tests/etc/samples/local-k8s-vim.yaml is not exist."
fi
### Modify the file local-k8s-vim-helm.yaml using the tool gen_vim_config.sh.
if [ -f "tacker/samples/tests/etc/samples/local-k8s-vim-helm.yaml" ] && \
[ -f "tacker/tools/gen_vim_config.sh" ]
then
cp -p tacker/samples/tests/etc/samples/local-k8s-vim-helm.yaml tacker/samples/tests/etc/samples/local-k8s-vim-helm.yaml_bk
bash tacker/tools/gen_vim_config.sh -p default -t k8s -e https://${CONTROLLER_K8S_IP_ADDRESS}:6443 --k8s-use-cert --k8s-use-helm -o tacker/samples/tests/etc/samples/local-k8s-vim-helm.yaml
diff -u tacker/samples/tests/etc/samples/local-k8s-vim-helm.yaml_bk tacker/samples/tests/etc/samples/local-k8s-vim-helm.yaml
# cat tacker/samples/tests/etc/samples/local-k8s-vim-helm.yaml
else
echo "the file tacker/samples/tests/etc/samples/local-k8s-vim-helm.yaml is not exist."
echo "or the file tacker/tools/gen_vim_config.sh is not exist."
fi
### Register the Kubernetes VIM
openstack vim list \
--os-username nfv_user \
--os-project-name nfv \
--os-password devstack \
--os-auth-url "${OS_AUTH_URL}" \
--os-project-domain-name Default \
--os-user-domain-name Default
openstack vim register \
--os-username nfv_user \
--os-project-name nfv \
--os-password devstack \
--os-auth-url "${OS_AUTH_URL}" \
--os-project-domain-name Default \
--os-user-domain-name Default \
--description "Kubernetes VIM" \
--config-file tacker/samples/tests/etc/samples/local-k8s-vim.yaml \
vim-kubernetes
openstack vim register \
--os-username nfv_user \
--os-project-name nfv \
--os-password devstack \
--os-auth-url "${OS_AUTH_URL}" \
--os-project-domain-name Default \
--os-user-domain-name Default \
--description "Kubernetes VIM" \
--config-file tacker/samples/tests/etc/samples/local-k8s-vim-helm.yaml \
vim-kubernetes-helm
openstack vim list \
--os-username nfv_user \
--os-project-name nfv \
--os-password devstack \
--os-auth-url "${OS_AUTH_URL}" \
--os-project-domain-name Default \
--os-user-domain-name Default
### Set MgmtDriver.
if [ -d "/opt/stack/tacker/tacker/vnfm/mgmt_drivers" ] && \
[ -f "/opt/stack/tacker/samples/mgmt_driver/kubernetes/container_update/container_update_mgmt.py" ]
then
# ls /opt/stack/tacker/tacker/vnfm/mgmt_drivers
cp -p /opt/stack/tacker/samples/mgmt_driver/kubernetes/container_update/container_update_mgmt.py /opt/stack/tacker/tacker/vnfm/mgmt_drivers/.
ls /opt/stack/tacker/tacker/vnfm/mgmt_drivers
sudo chown stack:stack /opt/stack/tacker/tacker/vnfm/mgmt_drivers/container_update_mgmt.py
else
echo "the directory /opt/stack/tacker/tacker/vnfm/mgmt_drivers is not exist."
echo "or the file /opt/stack/tacker/samples/mgmt_driver/kubernetes/container_update/container_update_mgmt.py is not exist."
fi
if [ -f "/opt/stack/tacker/setup.cfg" ]
then
cp -p /opt/stack/tacker/setup.cfg /opt/stack/tacker/setup.cfg_bk
sudo sed -i "/VnflcmMgmtNoop/a \ \ \ \ mgmt-container-update = \
tacker.vnfm.mgmt_drivers.container_update_mgmt:ContainerUpdateMgmtDriver" \
/opt/stack/tacker/setup.cfg
diff -u /opt/stack/tacker/setup.cfg_bk /opt/stack/tacker/setup.cfg
else
echo "the file /opt/stack/tacker/setup.cfg is not exist."
fi
if [ -f "/etc/tacker/tacker.conf" ]
then
cp -p /etc/tacker/tacker.conf /etc/tacker/tacker.conf_bk
sudo sed -i "/vnflcm_mgmt_driver = vnflcm_noop/a vnflcm_mgmt_driver = \
vnflcm_noop,mgmt-container-update" /etc/tacker/tacker.conf
diff -u /etc/tacker/tacker.conf_bk /etc/tacker/tacker.conf
else
echo "the file /etc/tacker/tacker.conf is not exist."
fi
if [ -d "/opt/stack/tacker" ]
then
cd "/opt/stack/tacker" || exit
sudo python3 setup.py build
sudo chown -R stack:stack /opt/stack/tacker/
else
echo "the directory /opt/stack/tacker is not exist."
fi
sudo systemctl restart devstack@tacker-conductor
cd "${HOME}" || exit
# echo "End shell script ${0}"

View File

@@ -0,0 +1,9 @@
#!/bin/sh
# kube-controller.sh
### Open Virtual Switch (OVS)
# sudo ovs-vsctl show
sudo ovs-vsctl add-port br-ex eth1
sudo ovs-vsctl show
# echo "End shell script ${0}"

View File

@@ -0,0 +1,35 @@
#!/bin/sh
# openstack-controller-tacker.sh
CONTROLLER_IP_ADDRESS="192.168.56.11"
OS_AUTH_URL="http://${CONTROLLER_IP_ADDRESS}/identity"
### Change the IP address of the file local-vim.yaml
if [ -f "tacker/samples/tests/etc/samples/local-vim.yaml" ]
then
cp -p tacker/samples/tests/etc/samples/local-vim.yaml tacker/samples/tests/etc/samples/local-vim.yaml_bk
sed -i "s/auth_url:\ http:\/\/127.0.0.1\/identity/auth_url:\ http:\/\/${CONTROLLER_IP_ADDRESS}\/identity/" tacker/samples/tests/etc/samples/local-vim.yaml
else
echo "the file tacker/samples/tests/etc/samples/local-vim.yaml is not exist."
fi
### Register the default VIM
if [ -d "${HOME}/tacker" ]
then
cd "${HOME}/tacker" || exit
else
echo "the directory ${HOME}/tacker is not exist."
fi
openstack vim register \
--os-username nfv_user \
--os-project-name nfv \
--os-password devstack \
--os-auth-url "${OS_AUTH_URL}" \
--os-project-domain-name Default \
--os-user-domain-name Default \
--is-default \
--description "Default VIM" \
--config-file /opt/stack/tacker/samples/tests/etc/samples/local-vim.yaml \
VIM0
# echo "End shell script ${0}"

View File

@@ -0,0 +1,36 @@
#!/bin/sh
# openstack-controller.sh
DATABASE_PASSWORD="secretdatabase"
### Open Virtual Switch (OVS)
sudo ovs-vsctl add-port br-ex eth1
sudo ovs-vsctl show
### multi-node, MySQL
cd "${HOME}" || exit
mysql -p --password="${DATABASE_PASSWORD}" << END_OF_INPUT_1
use nova_cell1
select host,hypervisor_hostname, mapped, uuid from compute_nodes;
quit
END_OF_INPUT_1
/bin/rm -f "./settings-compute-nodes.sh"
cat << EOF > "./settings-compute-nodes.sh"
#!/bin/sh
${SHELL} --rcfile /dev/fd/3 3<< END_OF_SHELL
source data/venv/bin/activate
nova-manage cell_v2 discover_hosts
mysql -p --password="${DATABASE_PASSWORD}" << END_OF_INPUT_2
use nova_cell1
select host,hypervisor_hostname, mapped, uuid from compute_nodes;
quit
END_OF_INPUT_2
deactivate
exit
END_OF_SHELL
EOF
# source "./settings-compute-nodes.sh"
. "./settings-compute-nodes.sh"
/bin/rm -f "./settings-compute-nodes.sh"
# echo "End shell script ${0}"