diff --git a/.zuul.yaml b/.zuul.yaml
index c46c3ea0e..4b836b54f 100644
--- a/.zuul.yaml
+++ b/.zuul.yaml
@@ -63,10 +63,10 @@
- compute1
- job:
- name: tacker-functional-devstack-multinode-legacy
+ name: tacker-functional-devstack-multinode-sol
parent: devstack
description: |
- Base multinodes job for devstack-based functional tests
+ Multinodes job for SOL devstack-based functional tests
nodeset: openstack-4-nodes-jammy
pre-run: playbooks/devstack/pre.yaml
run: playbooks/devstack/run.yaml
@@ -82,18 +82,14 @@
- openstack/nova
- openstack/placement
- openstack/aodh
- - openstack/blazar
- - openstack/blazar-nova
- openstack/horizon
- openstack/barbican
- openstack/heat
- openstack/networking-sfc
- openstack/python-barbicanclient
- - openstack/python-blazarclient
- openstack/python-tackerclient
- openstack/tacker
- openstack/tacker-horizon
- - x/fenix
vars:
devstack_localrc:
CELLSV2_SETUP: singleconductor
@@ -135,8 +131,6 @@
networking-sfc: https://opendev.org/openstack/networking-sfc
aodh: https://opendev.org/openstack/aodh
barbican: https://opendev.org/openstack/barbican
- blazar: https://opendev.org/openstack/blazar
- fenix: https://opendev.org/x/fenix
devstack_services:
# Core services enabled for this branch.
# This list replaces the test-matrix.
@@ -194,7 +188,7 @@
# Tacker services
tacker: true
tacker-conductor: true
- tox_envlist: dsvm-functional-legacy
+ tox_envlist: dsvm-functional-sol
group-vars:
compute:
# Since a VirtualInterfaceCreateException occurs during a test,
@@ -218,15 +212,6 @@
TACKER_HOST: "{{ hostvars['controller-tacker']['nodepool']['private_ipv4'] }}"
TACKER_MODE: standalone
IS_ZUUL_FT: True
- # NOTES:
- # - Without this ugly inline template, we would have to overwrite devstack_localrc
- # as a whole in some way. However keeping up with parent jobs' definitions would
- # be too costly. E.g., ADMIN_PASSWORD, NETWORK_GATEWAY, etc. Too many variables.
- # - The reason we set CEILOMETER_BACKEND=none for compute nodes is that otherwise
- # gnocchi setup would run on every compute nodes (esp. multiple asynchronous calls
- # of recreate_database() would be disastrous). Unused api servers would also be
- # deployed on each compute node.
- CEILOMETER_BACKEND: "{% if 'compute' in group_names %}none{% else %}gnocchi{% endif %}"
Q_SERVICE_PLUGIN_CLASSES: "qos,\
networking_sfc.services.flowclassifier.plugin.FlowClassifierPlugin,\
neutron.services.qos.qos_plugin.QoSPlugin,\
@@ -246,15 +231,6 @@
# Neutron services
q-ovn-metadata-agent: true
-- job:
- name: tacker-functional-devstack-multinode-sol
- parent: tacker-functional-devstack-multinode-legacy
- description: |
- Multinodes job for SOL devstack-based functional tests
- host-vars:
- controller-tacker:
- tox_envlist: dsvm-functional-sol
-
- job:
name: tacker-functional-devstack-multinode-sol-v2
parent: tacker-functional-devstack-multinode-sol
@@ -369,7 +345,6 @@
- openstack/octavia
- openstack/placement
- openstack/python-barbicanclient
- - openstack/python-blazarclient
- openstack/python-octaviaclient
- openstack/python-tackerclient
- openstack/tacker
@@ -561,15 +536,6 @@
vars:
setup_multi_tenant: true
-- job:
- name: tacker-functional-devstack-multinode-libs-master
- parent: tacker-functional-devstack-multinode-legacy
- description: |
- devstack-based functional tests with libs from the master branch
- required-projects:
- - openstack/heat-translator
- - openstack/tosca-parser
-
- job:
name: tacker-functional-devstack-multinode-sol-kubernetes-v2
parent: tacker-functional-devstack-multinode-sol-kubernetes
@@ -721,7 +687,7 @@
- job:
name: tacker-functional-devstack-enhanced-policy-sol
- parent: tacker-functional-devstack-multinode-legacy
+ parent: tacker-functional-devstack-multinode-sol
description: |
Enhanced policy job for SOL devstack-based functional tests
host-vars:
@@ -753,7 +719,7 @@
- job:
name: tacker-functional-devstack-multinode-sol-terraform-v2
- parent: tacker-functional-devstack-multinode-legacy
+ parent: tacker-functional-devstack-multinode-sol
description: |
Multinodes job for SOL Terraform devstack-based functional tests
attempts: 1
@@ -770,7 +736,7 @@
- job:
name: tacker-compliance-devstack-multinode-sol
- parent: tacker-functional-devstack-multinode-legacy
+ parent: tacker-functional-devstack-multinode-sol
description: |
Multinodes job for SOL devstack-based compliance tests
host-vars:
@@ -787,15 +753,9 @@
- release-notes-jobs-python3
check:
jobs:
- - tacker-functional-devstack-multinode-legacy:
- # TODO(ueha): Remove the job after Legacy APIs obsolete
- voting: false
- tacker-functional-devstack-multinode-sol
- tacker-functional-devstack-multinode-sol-separated-nfvo
- tacker-functional-devstack-multinode-sol-kubernetes
- - tacker-functional-devstack-multinode-libs-master:
- # TODO(ueha): Remove the job after Legacy APIs obsolete
- voting: false
- tacker-functional-devstack-multinode-sol-v2
- tacker-functional-devstack-multinode-sol-separated-nfvo-v2
- tacker-functional-devstack-multinode-sol-v2-individual-vnfc-mgmt
diff --git a/contrib/tacker-config/ns-clean.sh b/contrib/tacker-config/ns-clean.sh
deleted file mode 100755
index dc67b1073..000000000
--- a/contrib/tacker-config/ns-clean.sh
+++ /dev/null
@@ -1,53 +0,0 @@
-#!/bin/bash
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-echo "Deleting network service NS1, NS2"
-for ns in NS1 NS2; do
- network_service_id=$(openstack ns list | grep $ns | awk '{print $2}')
- if [ -n "$network_service_id" ]; then
- openstack ns delete $network_service_id
- fi
-done
-
-sleep 5
-
-echo "Deleting network service descriptor NSD-template"
-nsd_id=$(openstack ns descriptor list | grep NSD-template | awk '{print $2}')
-if [ -n "$nsd_id" ]; then
- openstack ns descriptor delete $nsd_id
-fi
-
-echo "Deleting vnf descriptors"
-for vnfd_name in sample-tosca-vnfd1 sample-tosca-vnfd2 sample-vnfd1 sample-vnfd2; do
- vnfd_id=$(openstack vnf descriptor list | grep $vnfd_name | awk '{print $2}')
- if [ -n "$vnfd_id" ]; then
- openstack vnf descriptor delete $vnfd_id
- fi
-done
-
-echo "Deleting http_client and http_server"
-for server_name in http_client http_server; do
- server_id=$(openstack server list | grep $server_name | awk '{print $2}')
- if [ -n "$server_id" ]; then
- openstack server delete $server_id
- fi
-done
-
-sleep 10
-
-echo "Deleting VIM0"
-vim_id=$(openstack vim list | grep VIM0 | awk '{print $2}')
-if [ -n "$vim_id" ]; then
- openstack vim delete $vim_id
-fi
diff --git a/contrib/tacker-config/ns-config.sh b/contrib/tacker-config/ns-config.sh
deleted file mode 100755
index 33be00214..000000000
--- a/contrib/tacker-config/ns-config.sh
+++ /dev/null
@@ -1,60 +0,0 @@
-#!/bin/bash
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-network_name='net0'
-network_id=$(openstack network list | grep $network_name | awk '{print $2}')
-if [ -z "$network_id" ]; then
- echo "Creating network net0"
- openstack network create $network_name --provider-network-type=vxlan --provider-segment 1005
- openstack subnet create --network $network_name --subnet-range 10.0.10.0/24 subnet-test
- network_id=$(openstack network list | grep $network_name | awk '{print $2}')
-fi
-
-echo "Creating HTTP client"
-openstack server create --flavor m1.tiny --image cirros-0.5.2-x86_64-disk --nic net-id=$network_id http_client
-echo "Creating HTTP server"
-openstack server create --flavor m1.tiny --image cirros-0.5.2-x86_64-disk --nic net-id=$network_id http_server
-
-sleep 15
-
-client_ip=$(openstack server list | grep http_client | grep -Eo '[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+')
-network_source_port_id=$(openstack port list | grep $client_ip | awk '{print $2}')
-ip_dst=$(openstack server list | grep http_server | grep -Eo '[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+')
-network_dest_port_id=$(openstack port list | grep $ip_dst | awk '{print $2}')
-
-echo "Creating/ Updating ns_param.yaml file"
-cat > ../../samples/tosca-templates/vnffg-nsd/ns_param.yaml << EOL
-nsd:
- vl1_name: net_mgmt
- vl2_name: net0
- net_src_port_id: ${network_source_port_id}
- ip_dest_prefix: ${ip_dst}/24
- net_dst_port_id: ${network_dest_port_id}
-EOL
-
-vim_default=$(openstack vim list | grep openstack | awk '{print $10}')
-if [ "$vim_default" != "True" ]; then
- echo "Creating default VIM"
- cat > ./vim_config.yaml << EOL
-auth_url: $OS_AUTH_URL
-username: $OS_USERNAME
-password: $OS_PASSWORD
-project_name: $OS_PROJECT_NAME
-project_domain_name: $OS_PROJECT_DOMAIN_ID
-user_domain_name: $OS_USER_DOMAIN_ID
-EOL
- openstack vim register --config-file vim_config.yaml --is-default VIM0
- rm ./vim_config.yaml
-fi
-
diff --git a/contrib/tacker-config/vnffg-clean.sh b/contrib/tacker-config/vnffg-clean.sh
deleted file mode 100755
index 2b1867fc2..000000000
--- a/contrib/tacker-config/vnffg-clean.sh
+++ /dev/null
@@ -1,56 +0,0 @@
-#!/bin/bash
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-echo "Deleting VNF forwarding graph VNFFG1"
-for vnffg in VNFFG1; do
- vnffg_id=$(openstack vnf graph list | grep $vnffg | awk '{print $2}')
- if [ -n "$vnffg_id" ]; then
- openstack vnf graph delete $vnffg_id
- fi
-done
-
-sleep 5
-
-echo "Deleting VNFs"
-for vnf_name in VNF1 VNF2; do
- vnf_id=$(openstack vnf list | grep $vnf_name | awk '{print $2}')
- if [ -n "$vnf_id" ]; then
- openstack vnf delete $vnf_id
- fi
-done
-
-echo "Deleting VNF descriptors"
-for vnfd_name in VNFD1 VNFD2; do
- vnfd_id=$(openstack vnf descriptor list | grep $vnfd_name | awk '{print $2}')
- if [ -n "$vnfd_id" ]; then
- openstack vnf descriptor delete $vnfd_id
- fi
-done
-
-echo "Deleting http_client and http_server"
-for server_name in http_client http_server; do
- server_id=$(openstack server list | grep $server_name | awk '{print $2}')
- if [ -n "$server_id" ]; then
- openstack server delete $server_id
- fi
-done
-
-sleep 5
-
-echo "Deleting VIM0"
-vim_id=$(openstack vim list | grep VIM0 | awk '{print $2}')
-if [ -n "$vim_id" ]; then
- openstack vim delete $vim_id
-fi
-
diff --git a/contrib/tacker-config/vnffg-config.sh b/contrib/tacker-config/vnffg-config.sh
deleted file mode 100755
index 0c521e9ff..000000000
--- a/contrib/tacker-config/vnffg-config.sh
+++ /dev/null
@@ -1,64 +0,0 @@
-#!/bin/bash
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-network_name='net0'
-network_id=$(openstack network list | grep $network_name | awk '{print $2}')
-if [ -z "$network_id" ]; then
- echo "Creating network net0"
- openstack network create $network_name --provider-network-type=vxlan --provider-segment 1005
- openstack subnet create --network $network_name --subnet-range 10.0.10.0/24 subnet-test
- network_id=$(openstack network list | grep $network_name | awk '{print $2}')
-fi
-
-echo "Creating HTTP client"
-openstack server create --flavor m1.tiny --image cirros-0.5.2-x86_64-disk --nic net-id=$network_id http_client
-echo "Creating HTTP server"
-openstack server create --flavor m1.tiny --image cirros-0.5.2-x86_64-disk --nic net-id=$network_id http_server
-
-sleep 15
-
-ip_src=$(openstack server list | grep http_client | grep -Eo '[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+')
-network_source_port_id=$(openstack port list | grep $ip_src | awk '{print $2}')
-ip_dst=$(openstack server list | grep http_server | grep -Eo '[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+')
-network_dest_port_id=$(openstack port list | grep $ip_dst | awk '{print $2}')
-
-echo "Creating/ Updating ns_param.yaml file"
-cat > ../../samples/tosca-templates/vnffgd/vnffg-param-file.yaml << EOL
-net_src_port_id: ${network_source_port_id}
-ip_dst_pre: ${ip_dst}/24
-net_dst_port_id: ${network_dest_port_id}
-dst_port_range: 80-80
-EOL
-
-vim_default=$(openstack vim list | grep openstack | awk '{print $10}')
-if [ "$vim_default" != "True" ]; then
- echo "Creating default VIM"
- cat > ./vim_config.yaml << EOL
-auth_url: $OS_AUTH_URL
-username: $OS_USERNAME
-password: $OS_PASSWORD
-project_name: $OS_PROJECT_NAME
-project_domain_name: $OS_PROJECT_DOMAIN_ID
-user_domain_name: $OS_USER_DOMAIN_ID
-EOL
- openstack vim register --config-file vim_config.yaml --is-default VIM0
- rm ./vim_config.yaml
-fi
-
-echo "Create VNF1 and VNF2"
-openstack vnf descriptor create --vnfd-file ../../samples/tosca-templates/vnffgd/tosca-vnffg-vnfd1.yaml VNFD1
-openstack vnf create --vnfd-name VNFD1 --vim-name VIM0 VNF1
-openstack vnf descriptor create --vnfd-file ../../samples/tosca-templates/vnffgd/tosca-vnffg-vnfd2.yaml VNFD2
-openstack vnf create --vnfd-name VNFD2 --vim-name VIM0 VNF2
-
diff --git a/devstack/lib/tacker b/devstack/lib/tacker
index 5dc6c0f20..d4dc9e6b8 100644
--- a/devstack/lib/tacker
+++ b/devstack/lib/tacker
@@ -81,8 +81,6 @@ TACKER_NOVA_URL=${TACKER_NOVA_URL:-http://127.0.0.1:8774/v2}
TACKER_NOVA_CA_CERTIFICATES_FILE=${TACKER_NOVA_CA_CERTIFICATES_FILE:-}
TACKER_NOVA_API_INSECURE=${TACKER_NOVA_API_INSECURE:-False}
-CEILOMETER_CONF_DIR=/etc/ceilometer
-
VNF_PACKAGE_CSAR_PATH=${VNF_PACKAGE_CSAR_PATH:=$TACKER_DATA_DIR/vnfpackage}
FILESYSTEM_STORE_DATA_DIR=${FILESYSTEM_STORE_DATA_DIR:=$TACKER_DATA_DIR/csar_files}
GLANCE_DEFAULT_BACKEND=${GLANCE_DEFAULT_BACKEND:=file}
@@ -268,12 +266,6 @@ function configure_tacker {
iniset "/$Q_PLUGIN_CONF_FILE" ml2_type_flat flat_networks $PUBLIC_PHYSICAL_NETWORK,$MGMT_PHYS_NET
iniset "/$Q_PLUGIN_CONF_FILE" ovs bridge_mappings $PUBLIC_PHYSICAL_NETWORK:$PUBLIC_BRIDGE,$MGMT_PHYS_NET:$BR_MGMT
- # Experimental settings for monitor alarm auth settings,
- # Will be changed according to new implementation.
- iniset $TACKER_CONF alarm_auth username admin
- iniset $TACKER_CONF alarm_auth password "$ADMIN_PASSWORD"
- iniset $TACKER_CONF alarm_auth project_name admin
-
echo "Creating bridge"
sudo ovs-vsctl --may-exist add-br ${BR_MGMT}
fi
@@ -365,9 +357,6 @@ function openstack_image_create {
function tacker_check_and_download_images {
local image_url
image_url[0]="http://download.cirros-cloud.net/0.5.2/cirros-0.5.2-x86_64-disk.img"
- # Customized image of OpenWRT 15.05.1 that can fix the continuously
- # respawning OpenWRT-based VNFs.
- image_url[1]="$TACKER_DIR/samples/images/openwrt-x86-kvm_guest-combined-ext4.img.gz"
local image_fname image_name glance_name
local gz_pattern="\.gz$"
@@ -379,9 +368,6 @@ function tacker_check_and_download_images {
do
image_fname=`basename "${image_url[$index]}"`
glance_name=${image_fname%.*}
- if [[ $glance_name =~ "openwrt" ]]; then
- glance_name="OpenWRT"
- fi
image_name=`openstack image list | grep "$glance_name" | awk '{print $4}'`
if [[ $image_name == "" ]]; then
if [[ ! -f $FILES/$image_fname || "$(stat -c "%s" $FILES/$image_fname)" = "0" ]]; then
@@ -495,11 +481,3 @@ function tacker_setup_default_vim_resources {
--ingress --protocol tcp --dst-port 22 test_secgrp
}
-
-function configure_maintenance_event_types {
- local event_definitions_file=$CEILOMETER_CONF_DIR/event_definitions.yaml
- local maintenance_events_file=$TACKER_DIR/etc/ceilometer/maintenance_event_types.yaml
-
- echo "Configure maintenance event types to $event_definitions_file"
- cat $maintenance_events_file >> $event_definitions_file
-}
diff --git a/devstack/local.conf.example b/devstack/local.conf.example
index a46341c4c..65c01cfe6 100644
--- a/devstack/local.conf.example
+++ b/devstack/local.conf.example
@@ -39,18 +39,9 @@ enable_plugin heat https://opendev.org/openstack/heat master
enable_plugin networking-sfc https://opendev.org/openstack/networking-sfc master
enable_plugin barbican https://opendev.org/openstack/barbican master
-# Ceilometer
-#CEILOMETER_PIPELINE_INTERVAL=300
-CEILOMETER_EVENT_ALARM=True
-enable_plugin ceilometer https://opendev.org/openstack/ceilometer master
+# Aodh
enable_plugin aodh https://opendev.org/openstack/aodh master
-# Blazar
-enable_plugin blazar https://github.com/openstack/blazar.git master
-
-# Fenix
-enable_plugin fenix https://opendev.org/x/fenix.git master
-
# Tacker
enable_plugin tacker https://opendev.org/openstack/tacker master
diff --git a/devstack/local.conf.kubernetes b/devstack/local.conf.kubernetes
index c5386a9b8..ea3996244 100644
--- a/devstack/local.conf.kubernetes
+++ b/devstack/local.conf.kubernetes
@@ -40,17 +40,9 @@ enable_plugin heat https://opendev.org/openstack/heat master
enable_plugin networking-sfc https://opendev.org/openstack/networking-sfc master
enable_plugin barbican https://opendev.org/openstack/barbican master
-# Ceilometer
-#CEILOMETER_PIPELINE_INTERVAL=300
-enable_plugin ceilometer https://opendev.org/openstack/ceilometer master
+# Aodh
enable_plugin aodh https://opendev.org/openstack/aodh master
-# Blazar
-enable_plugin blazar https://github.com/openstack/blazar.git master
-
-# Fenix
-enable_plugin fenix https://opendev.org/x/fenix.git master
-
# Tacker
enable_plugin tacker https://opendev.org/openstack/tacker master
diff --git a/devstack/plugin.sh b/devstack/plugin.sh
index 28eb5012a..170919b07 100644
--- a/devstack/plugin.sh
+++ b/devstack/plugin.sh
@@ -49,11 +49,6 @@ if is_service_enabled tacker; then
tacker_check_and_download_images
echo_summary "Setup default VIM resources"
tacker_setup_default_vim_resources
-
- if is_service_enabled ceilometer; then
- echo_summary "Configure maintenance event types"
- configure_maintenance_event_types
- fi
fi
fi
diff --git a/doc/source/_images/tacker-design-etsi.png b/doc/source/_images/tacker-design-etsi.png
deleted file mode 100644
index 740e8d74a..000000000
Binary files a/doc/source/_images/tacker-design-etsi.png and /dev/null differ
diff --git a/doc/source/_images/tacker-design-etsi.pu b/doc/source/_images/tacker-design-etsi.pu
index f6b3f41b7..5b489ec7d 100644
--- a/doc/source/_images/tacker-design-etsi.pu
+++ b/doc/source/_images/tacker-design-etsi.pu
@@ -11,13 +11,9 @@ frame "tacker-server" {
}
frame "tacker-conductor" {
- component "mgmt-driver" as mgmt {
+ component "mgmt-driver" {
'component Noop as noop
- component OpenWRT as wrt
- }
- component "monitor-driver" as monitor {
- component Ping as ping
- component Zabbix as zabbix
+ component "Vnflcm Mgmt Driver" as mgmt
}
component conductor {
component "Conductor Server" as cond
@@ -64,18 +60,5 @@ lcm_driver <-down-> ks_driver
ks_driver <-down-> ks
lcm_driver <-[hidden]down-> mgmt
-lcm_driver <-[hidden]down-> monitor
-'cond <-down-> mgmt
-'cond <-down-> monitor
-/'
-'cond <-right-> noop
-cond <-right-> wrt
-wrt <-down-> vm
-cond <-down-> ping
-ping <-down-> vm
-cond <-down-> zabbix
-zabbix <-down-> vm
-mgmt <-[hidden]down-> monitor
-'/
@enduml
\ No newline at end of file
diff --git a/doc/source/_images/tacker-design-etsi.svg b/doc/source/_images/tacker-design-etsi.svg
new file mode 100644
index 000000000..0216dacc1
--- /dev/null
+++ b/doc/source/_images/tacker-design-etsi.svg
@@ -0,0 +1,173 @@
+
\ No newline at end of file
diff --git a/doc/source/_images/tacker-design-legacy.png b/doc/source/_images/tacker-design-legacy.png
deleted file mode 100644
index 5c7bbe2f2..000000000
Binary files a/doc/source/_images/tacker-design-legacy.png and /dev/null differ
diff --git a/doc/source/_images/tacker-design-legacy.pu b/doc/source/_images/tacker-design-legacy.pu
index e083936ee..a29a73414 100644
--- a/doc/source/_images/tacker-design-legacy.pu
+++ b/doc/source/_images/tacker-design-legacy.pu
@@ -13,28 +13,10 @@ frame "tacker-server" {
component "NFVOPlugin" as nfvo_plugin
'cond <-down-> vnfm_plugin
}
- component "mgmt-driver" as mgmt {
- 'component Noop as noop
- component OpenWRT as wrt
- }
- component "monitor-driver" as monitor {
- component Ping as ping
- component Zabbix as zabbix
- }
- component "infra-driver" as infra {
- component "OpenStack Driver" as os_driver
- component "Kubernetes Driver" as ks_driver
- }
component "vim-driver" as vim {
component "OpenStack Driver" as os_vim
component "Kubernetes Driver" as ks_vim
}
- component "policy-action-driver" as policy {
- component "AutoHeal" as autoheal
- component "AutoScale" as autoscale
- component "Respawn" as respawn
- component "Log" as log
- }
}
frame "tacker-conductor" as cond {
@@ -62,7 +44,6 @@ cloud "NFV infrastructures" as nfv_infra {
}
'# Relationships
-client <-down-> vnfm
client <-down-> nfvo
vnfm <-down-> vnfm_plugin
@@ -71,33 +52,12 @@ nfvo <-down-> nfvo_plugin
vnfm_plugin <-down-> db_vnfm
nfvo_plugin <-down-> db_nfvo
-vnfm_plugin <-down-> os_driver
-vnfm_plugin <-down-> ks_driver
-
-vnfm_plugin <-down-> policy
-vnfm_plugin <-down-> mgmt
-vnfm_plugin <-down-> monitor
-/'
-vnfm_plugin <-down-> autoheal
-vnfm_plugin <-down-> autoscale
-vnfm_plugin <-down-> respawn
-vnfm_plugin <-down-> log
-vnfm_plugin <-down-> wrt
-vnfm_plugin <-down-> ping
-vnfm_plugin <-down-> zabbix
-'/
-
nfvo_plugin <-down-> os_vim
nfvo_plugin <-down-> ks_vim
-os_driver <-down-> os
-ks_driver <-down-> ks
-
os_vim <-down-> os
ks_vim <-down-> ks
cond <-[hidden]left-> server
-mgmt <-[hidden]down-> monitor
-monitor <-[hidden]down-> nfv_infra
@enduml
\ No newline at end of file
diff --git a/doc/source/_images/tacker-design-legacy.svg b/doc/source/_images/tacker-design-legacy.svg
new file mode 100644
index 000000000..a6ea4e937
--- /dev/null
+++ b/doc/source/_images/tacker-design-legacy.svg
@@ -0,0 +1,168 @@
+
\ No newline at end of file
diff --git a/doc/source/_images/tacker-design.png b/doc/source/_images/tacker-design.png
deleted file mode 100644
index 74705683f..000000000
Binary files a/doc/source/_images/tacker-design.png and /dev/null differ
diff --git a/doc/source/admin/external_oauth2_usage_guide.rst b/doc/source/admin/external_oauth2_usage_guide.rst
index dcfd9fc91..2412199f5 100644
--- a/doc/source/admin/external_oauth2_usage_guide.rst
+++ b/doc/source/admin/external_oauth2_usage_guide.rst
@@ -47,7 +47,7 @@ see `Middleware Architecture`_.
$ vi /etc/tacker/api-paste.ini
[composite:tackerapi_v1_0]
- keystone = request_id catch_errors alarm_receiver external_oauth2_token keystonecontext extensions tackerapiapp_v1_0
+ keystone = request_id catch_errors external_oauth2_token keystonecontext extensions tackerapiapp_v1_0
[composite:vnfpkgmapi_v1]
keystone = request_id catch_errors external_oauth2_token keystonecontext vnfpkgmapp_v1
diff --git a/doc/source/admin/oauth2_usage_guide.rst b/doc/source/admin/oauth2_usage_guide.rst
index 2d5ba93f0..fb77d0a4a 100644
--- a/doc/source/admin/oauth2_usage_guide.rst
+++ b/doc/source/admin/oauth2_usage_guide.rst
@@ -136,8 +136,8 @@ identity, see `Middleware Architecture`_.
$ vi /etc/tacker/api-paste.ini
[composite:tackerapi_v1_0]
- #keystone = request_id catch_errors alarm_receiver authtoken keystonecontext extensions tackerapiapp_v1_0
- keystone = request_id catch_errors alarm_receiver oauth2token keystonecontext extensions tackerapiapp_v1_0
+ #keystone = request_id catch_errors authtoken keystonecontext extensions tackerapiapp_v1_0
+ keystone = request_id catch_errors oauth2token keystonecontext extensions tackerapiapp_v1_0
[composite:vnfpkgmapi_v1]
#keystone = request_id catch_errors authtoken keystonecontext vnfpkgmapp_v1
diff --git a/doc/source/install/manual_installation.rst b/doc/source/install/manual_installation.rst
index e52f170f3..62a40c62d 100644
--- a/doc/source/install/manual_installation.rst
+++ b/doc/source/install/manual_installation.rst
@@ -233,10 +233,6 @@ Installing Tacker Server
[database]
connection = mysql+pymysql://tacker:@:3306/tacker?charset=utf8
- ...
-
- [tacker]
- monitor_driver = ping,http_ping
#. Copy the ``tacker.conf`` to ``/etc/tacker/`` directory.
diff --git a/doc/source/user/architecture.rst b/doc/source/user/architecture.rst
index b2c7ab631..47aa77782 100644
--- a/doc/source/user/architecture.rst
+++ b/doc/source/user/architecture.rst
@@ -58,7 +58,7 @@ ETSI NFV-SOL Tacker Implementation
Tacker ETSI NFV-SOL based implementation is described as the following:
-.. figure:: ../_images/tacker-design-etsi.png
+.. figure:: ../_images/tacker-design-etsi.svg
:figwidth: 700 px
:align: left
:width: 700 px
@@ -77,12 +77,8 @@ executed in tacker-server with DB queries. The others are redirected to
infra-driver to execute the actual logics for control and management of
virtualised resources.
-Tacker also provides configuring and monitoring system for VNF. The
-mgmt-driver or monitor-driver can be called by `Conductor Server`. In Ussuri
-release, OpenWRT for mgmt-driver and Ping/Zabbix for monitor-driver are
-available.
-
-.. TODO(yoshito-ito): add ActionDriver after the implementation.
+Tacker also provides configuring system for VNF. The mgmt-driver can be called
+by `Conductor Server`.
.. note:: VIM related operations such as "Register VIM" and "Update VIM" are
not defined in ETSI NFV-SOL. Users may need to use legacy Tacker.
@@ -92,18 +88,20 @@ Legacy Tacker Implementation
Legacy Tacker implementation is described as the following:
-.. figure:: ../_images/tacker-design-legacy.png
+.. figure:: ../_images/tacker-design-legacy.svg
:figwidth: 800 px
:align: left
:width: 800 px
When a REST API call is sent to tacker-server, VNFM and NFVO plugins handle
-the request and execute connected methods in each plugin. The each plugin
-(NFVOPlugin or VNFMPlugin) invokes required driver methods such as
-mgmt-driver, monitor-driver, infra-driver, and vim-driver.
+the request and execute connected methods in each plugin. The NFVO plugin
+invokes required vim-driver methods.
-.. TODO(yoshito-ito): check the new fenix driver to add here.
+.. note:: Legacy API features other than the VIM feature have been deprecated.
+ So only Nfvo receives the API from the tacker-client, but Vnfm and
+ VNFMPlugin remain because they are used by VNF LCM API V1.
.. _NFV-SOL002 : https://portal.etsi.org/webapp/WorkProgram/Report_WorkItem.asp?WKI_ID=49492
.. _NFV-SOL003 : https://portal.etsi.org/webapp/WorkProgram/Report_WorkItem.asp?WKI_ID=49506
.. _NFV-SOL005 : https://portal.etsi.org/webapp/WorkProgram/Report_WorkItem.asp?WKI_ID=50935
+
diff --git a/doc/source/user/mgmt_driver_deploy_k8s_usage_guide.rst b/doc/source/user/mgmt_driver_deploy_k8s_usage_guide.rst
index d12071b27..c6ec266b2 100644
--- a/doc/source/user/mgmt_driver_deploy_k8s_usage_guide.rst
+++ b/doc/source/user/mgmt_driver_deploy_k8s_usage_guide.rst
@@ -370,7 +370,6 @@ The sample script (``kubernetes_mgmt.py``) uses the
...
tacker.tacker.mgmt.drivers =
noop = tacker.vnfm.mgmt_drivers.noop:VnfMgmtNoop
- openwrt = tacker.vnfm.mgmt_drivers.openwrt.openwrt:VnfMgmtOpenWRT
vnflcm_noop = tacker.vnfm.mgmt_drivers.vnflcm_noop:VnflcmMgmtNoop
mgmt-drivers-kubernetes = tacker.vnfm.mgmt_drivers.kubernetes_mgmt:KubernetesMgmtDriver
...
diff --git a/doc/source/user/mgmt_driver_for_ansible_driver_usage_guide.rst b/doc/source/user/mgmt_driver_for_ansible_driver_usage_guide.rst
index 286d7bd32..ba140dcd6 100644
--- a/doc/source/user/mgmt_driver_for_ansible_driver_usage_guide.rst
+++ b/doc/source/user/mgmt_driver_for_ansible_driver_usage_guide.rst
@@ -344,7 +344,6 @@ of the tacker.
...
tacker.tacker.mgmt.drivers =
noop = tacker.vnfm.mgmt_drivers.noop:VnfMgmtNoop
- openwrt = tacker.vnfm.mgmt_drivers.openwrt.openwrt:VnfMgmtOpenWRT
vnflcm_noop = tacker.vnfm.mgmt_drivers.vnflcm_noop:VnflcmMgmtNoop
ansible_driver = tacker.vnfm.mgmt_drivers.ansible.ansible:DeviceMgmtAnsible
diff --git a/etc/ceilometer/maintenance_event_types.yaml b/etc/ceilometer/maintenance_event_types.yaml
deleted file mode 100644
index c60cc97ad..000000000
--- a/etc/ceilometer/maintenance_event_types.yaml
+++ /dev/null
@@ -1,34 +0,0 @@
-- event_type: 'maintenance.scheduled'
- traits:
- service:
- fields: payload.service
- allowed_actions:
- fields: payload.allowed_actions
- instance_ids:
- fields: payload.instance_ids
- reply_url:
- fields: payload.reply_url
- state:
- fields: payload.state
- session_id:
- fields: payload.session_id
- actions_at:
- fields: payload.actions_at
- type: datetime
- project_id:
- fields: payload.project_id
- reply_at:
- fields: payload.reply_at
- type: datetime
- metadata:
- fields: payload.metadata
-- event_type: 'maintenance.host'
- traits:
- host:
- fields: payload.host
- project_id:
- fields: payload.project_id
- session_id:
- fields: payload.session_id
- state:
- fields: payload.state
diff --git a/etc/config-generator.conf b/etc/config-generator.conf
index deebb3abb..ad8102de1 100644
--- a/etc/config-generator.conf
+++ b/etc/config-generator.conf
@@ -11,7 +11,6 @@ namespace = oslo.policy
namespace = oslo.reports
namespace = oslo.service.periodic_tasks
namespace = oslo.service.service
-namespace = tacker.alarm_receiver
namespace = tacker.auth
namespace = tacker.common.config
namespace = tacker.common.ext_oauth2_auth
@@ -21,18 +20,12 @@ namespace = tacker.keymgr
namespace = tacker.nfvo.drivers.vim.kubernetes_driver
namespace = tacker.nfvo.drivers.vim.openstack_driver
namespace = tacker.nfvo.nfvo_plugin
-namespace = tacker.plugins.fenix
namespace = tacker.service
namespace = tacker.sol_refactored.common.config
namespace = tacker.vnflcm.vnflcm_driver
namespace = tacker.vnfm.infra_drivers.kubernetes.kubernetes_driver
namespace = tacker.vnfm.infra_drivers.openstack.openstack
namespace = tacker.vnfm.infra_drivers.openstack.translate_template
-namespace = tacker.vnfm.mgmt_drivers.openwrt.openwrt
-namespace = tacker.vnfm.monitor
-namespace = tacker.vnfm.monitor_drivers.ceilometer.ceilometer
-namespace = tacker.vnfm.monitor_drivers.http_ping.http_ping
-namespace = tacker.vnfm.monitor_drivers.ping.ping
namespace = tacker.vnfm.nfvo_client
namespace = tacker.vnfm.plugin
namespace = tacker.wsgi
\ No newline at end of file
diff --git a/etc/tacker/api-paste.ini b/etc/tacker/api-paste.ini
index 595ec4710..87862c82a 100644
--- a/etc/tacker/api-paste.ini
+++ b/etc/tacker/api-paste.ini
@@ -18,7 +18,7 @@ use = egg:Paste#urlmap
[composite:tackerapi_v1_0]
use = call:tacker.auth:pipeline_factory
noauth = request_id catch_errors extensions tackerapiapp_v1_0
-keystone = request_id catch_errors alarm_receiver authtoken keystonecontext extensions tackerapiapp_v1_0
+keystone = request_id catch_errors authtoken keystonecontext extensions tackerapiapp_v1_0
[composite:vnfpkgmapi_v1]
use = call:tacker.auth:pipeline_factory
@@ -56,9 +56,6 @@ paste.filter_factory = oslo_middleware:RequestId.factory
[filter:catch_errors]
paste.filter_factory = oslo_middleware:CatchErrors.factory
-[filter:alarm_receiver]
-paste.filter_factory = tacker.alarm_receiver:AlarmReceiver.factory
-
[filter:keystonecontext]
paste.filter_factory = tacker.auth:TackerKeystoneContext.factory
diff --git a/releasenotes/notes/obsolete-legacy-apis-excluding-vim-3ed84bc4845cd6d6.yaml b/releasenotes/notes/obsolete-legacy-apis-excluding-vim-3ed84bc4845cd6d6.yaml
new file mode 100644
index 000000000..378fded19
--- /dev/null
+++ b/releasenotes/notes/obsolete-legacy-apis-excluding-vim-3ed84bc4845cd6d6.yaml
@@ -0,0 +1,4 @@
+---
+upgrade:
+ - |
+ Legacy APIs excluding VIM feature are obsoleted.
diff --git a/roles/ensure-db-cli-installed/tasks/main.yaml b/roles/ensure-db-cli-installed/tasks/main.yaml
index 54f89293b..d416741bd 100644
--- a/roles/ensure-db-cli-installed/tasks/main.yaml
+++ b/roles/ensure-db-cli-installed/tasks/main.yaml
@@ -1,13 +1,3 @@
-# Currently the plugins which implicitly require db cli to be
-# pre-installed when the database is remote (i.e., MYSQL_HOST
-# != host-where-it-is-enabled) is only ceilometer. So it'd be
-# more accurate to set the outermost 'when' condition like:
-#
-# when:
-# - devstack_plugins['ceilometer'] | default(false)
-# - devstack_localrc['DATABASE_TYPE'] is defined
-#
-# Either is fine so let's dispense with it to keep it simple.
- block:
- name: install mysql client if needed
include_tasks: mysql.yaml
diff --git a/samples/images/openwrt-x86-kvm_guest-combined-ext4.img.gz b/samples/images/openwrt-x86-kvm_guest-combined-ext4.img.gz
deleted file mode 100644
index 18eb892b0..000000000
Binary files a/samples/images/openwrt-x86-kvm_guest-combined-ext4.img.gz and /dev/null differ
diff --git a/samples/mistral/workflows/create_vnf.yaml b/samples/mistral/workflows/create_vnf.yaml
deleted file mode 100644
index 755743dea..000000000
--- a/samples/mistral/workflows/create_vnf.yaml
+++ /dev/null
@@ -1,53 +0,0 @@
----
-version: '2.0'
-
-std.create_vnf:
- type: direct
-
- description: |
- Create a VNF and waits till VNF is active.
-
- input:
- - body
- output:
- vnf_id: <% $.vnf_id %>
- vim_id: <% $.vim_id %>
- mgmt_ip_address: <% $.mgmt_ip_address %>
- status: <% $.status %>
-
- task-defaults:
- on-error:
- - delete_vnf
-
- tasks:
- create_vnf:
- description: Request to create a VNF.
- action: tacker.create_vnf body=<% $.body %>
- input:
- body: <% $.body %>
- publish:
- vnf_id: <% task(create_vnf).result.vnf.id %>
- vim_id: <% task(create_vnf).result.vnf.vim_id %>
- mgmt_ip_address: <% task(create_vnf).result.vnf.mgmt_ip_address %>
- status: <% task(create_vnf).result.vnf.status %>
- on-success:
- - wait_vnf_active
-
- wait_vnf_active:
- description: Waits till VNF is ACTIVE.
- action: tacker.show_vnf vnf=<% $.vnf_id %>
- retry:
- count: 10
- delay: 10
- break-on: <% $.status = 'ACTIVE' %>
- break-on: <% $.status = 'ERROR' %>
- continue-on: <% $.status = 'PENDING_CREATE' %>
- publish:
- mgmt_ip_address: <% task(wait_vnf_active).result.vnf.mgmt_ip_address %>
- status: <% task(wait_vnf_active).result.vnf.status %>
- on-success:
- - delete_vnf: <% $.status = 'ERROR' %>
-
- delete_vnf:
- description: Request to delete a VNF.
- action: tacker.delete_vnf vnf=<% $.vnf_id %>
diff --git a/samples/mistral/workflows/create_vnfd.yaml b/samples/mistral/workflows/create_vnfd.yaml
deleted file mode 100644
index 36e727ac5..000000000
--- a/samples/mistral/workflows/create_vnfd.yaml
+++ /dev/null
@@ -1,22 +0,0 @@
----
-version: '2.0'
-
-std.create_vnfd:
- type: direct
-
- description: |
- Create a VNFD.
-
- input:
- - body
- output:
- vnfd_id: <% $.vnfd_id %>
-
- tasks:
- create_vnfd:
- description: Request to create a VNFD.
- action: tacker.create_vnfd body=<% $.body %>
- input:
- body: <% $.body %>
- publish:
- vnfd_id: <% task(create_vnfd).result.vnfd.id %>
diff --git a/samples/mistral/workflows/delete_vnf.yaml b/samples/mistral/workflows/delete_vnf.yaml
deleted file mode 100644
index 4be2a13cc..000000000
--- a/samples/mistral/workflows/delete_vnf.yaml
+++ /dev/null
@@ -1,16 +0,0 @@
----
-version: '2.0'
-
-std.delete_vnf:
- type: direct
-
- description: |
- Delete a VNF.
-
- input:
- - vnf_id
-
- tasks:
- delete_vnf:
- description: Request to delete a VNF.
- action: tacker.delete_vnf vnf=<% $.vnf_id %>
diff --git a/samples/mistral/workflows/delete_vnfd.yaml b/samples/mistral/workflows/delete_vnfd.yaml
deleted file mode 100644
index 45197ce58..000000000
--- a/samples/mistral/workflows/delete_vnfd.yaml
+++ /dev/null
@@ -1,16 +0,0 @@
----
-version: '2.0'
-
-std.delete_vnfd:
- type: direct
-
- description: |
- Delete a VNFD.
-
- input:
- - vnfd_id
-
- tasks:
- delete_vnfd:
- description: Request to delete a VNFD.
- action: tacker.delete_vnfd vnfd=<% $.vnfd_id %>
diff --git a/samples/mistral/workflows/input/create_vnf.json b/samples/mistral/workflows/input/create_vnf.json
deleted file mode 100644
index e69e69b09..000000000
--- a/samples/mistral/workflows/input/create_vnf.json
+++ /dev/null
@@ -1,11 +0,0 @@
-{
- "body": {
- "vnf": {
- "attributes": {},
- "vim_id": "",
- "description": "Sample for tacker.create_vnf action",
- "vnfd_id": "dda99d4c-f24d-4550-b104-0958fef427b3",
- "name": "tacker-create-vnf"
- }
- }
-}
diff --git a/samples/mistral/workflows/input/create_vnfd.json b/samples/mistral/workflows/input/create_vnfd.json
deleted file mode 100644
index 26625ffbd..000000000
--- a/samples/mistral/workflows/input/create_vnfd.json
+++ /dev/null
@@ -1,10 +0,0 @@
-{
- "body":{
- "vnfd":{
- "attributes":{
- "vnfd":"tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0\n\ndescription: Demo example\n\nmetadata:\n template_name: sample-tosca-vnfd\n\ntopology_template:\n node_templates:\n VDU1:\n type: tosca.nodes.nfv.VDU.Tacker\n properties:\n image: cirros-0.5.2-x86_64-disk\n flavor: m1.tiny\n availability_zone: nova\n mgmt_driver: noop\n config: |\n param0: key1\n param1: key2\n\n CP1:\n type: tosca.nodes.nfv.CP.Tacker\n properties:\n management: true\n anti_spoofing_protection: false\n requirements:\n - virtualLink:\n node: VL1\n - virtualBinding:\n node: VDU1\n\n CP2:\n type: tosca.nodes.nfv.CP.Tacker\n properties:\n anti_spoofing_protection: false\n requirements:\n - virtualLink:\n node: VL2\n - virtualBinding:\n node: VDU1\n\n CP3:\n type: tosca.nodes.nfv.CP.Tacker\n properties:\n anti_spoofing_protection: false\n requirements:\n - virtualLink:\n node: VL3\n - virtualBinding:\n node: VDU1\n\n VL1:\n type: tosca.nodes.nfv.VL\n properties:\n network_name: net_mgmt\n vendor: Tacker\n\n VL2:\n type: tosca.nodes.nfv.VL\n properties:\n network_name: net0\n vendor: Tacker\n\n VL3:\n type: tosca.nodes.nfv.VL\n properties:\n network_name: net1\n vendor: Tacker\n"
- },
- "name":"tacker-create-vnfd"
- }
- }
-}
diff --git a/samples/mistral/workflows/input/delete_vnf.json b/samples/mistral/workflows/input/delete_vnf.json
deleted file mode 100644
index 4241b2142..000000000
--- a/samples/mistral/workflows/input/delete_vnf.json
+++ /dev/null
@@ -1,3 +0,0 @@
-{
- "vnf_id": "d7606ee7-053a-4064-bb67-501ac704a6ed"
-}
diff --git a/samples/mistral/workflows/input/delete_vnfd.json b/samples/mistral/workflows/input/delete_vnfd.json
deleted file mode 100644
index 036f1e5e7..000000000
--- a/samples/mistral/workflows/input/delete_vnfd.json
+++ /dev/null
@@ -1,3 +0,0 @@
-{
- "vnfd_id": "ad78e292-6b0b-47b5-80f4-3abe9e9c7e12"
-}
diff --git a/samples/tests/etc/samples/install_vnfc.sh b/samples/tests/etc/samples/install_vnfc.sh
deleted file mode 100644
index 250f3679e..000000000
--- a/samples/tests/etc/samples/install_vnfc.sh
+++ /dev/null
@@ -1,2 +0,0 @@
-#!/bin/sh
-echo "Successfully installed VNFC" > /tacker
diff --git a/samples/tests/etc/samples/ns-vnffg-param.yaml b/samples/tests/etc/samples/ns-vnffg-param.yaml
deleted file mode 100644
index 1c9661be5..000000000
--- a/samples/tests/etc/samples/ns-vnffg-param.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-nsd:
- vl1_name: net_mgmt
- vl2_name: net0
- net_src_port_id: c0a40f9c-d229-40b7-871f-55131cf17783
- ip_dest_prefix: 10.10.0.11/24
diff --git a/samples/tests/etc/samples/sample-tosca-alarm-respawn.yaml b/samples/tests/etc/samples/sample-tosca-alarm-respawn.yaml
deleted file mode 100644
index 36859e9fe..000000000
--- a/samples/tests/etc/samples/sample-tosca-alarm-respawn.yaml
+++ /dev/null
@@ -1,58 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-description: Demo example
-
-metadata:
- template_name: sample-tosca-vnfd
-
-topology_template:
- node_templates:
- VDU1:
- type: tosca.nodes.nfv.VDU.Tacker
- capabilities:
- nfv_compute:
- properties:
- disk_size: 1 GB
- mem_size: 512 MB
- num_cpus: 2
- properties:
- image: cirros-0.5.2-x86_64-disk
- mgmt_driver: noop
- availability_zone: nova
- metadata: {metering.server_group: VDU1}
-
- CP1:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL1
- - virtualBinding:
- node: VDU1
-
- VL1:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net_mgmt
- vendor: Tacker
-
- policies:
- - vdu1_cpu_usage_monitoring_policy:
- type: tosca.policies.tacker.Alarming
- triggers:
- vdu_hcpu_usage_respawning:
- event_type:
- type: tosca.events.resource.utilization
- implementation: ceilometer
- metric: cpu_util
- condition:
- threshold: 50
- constraint: utilization greater_than 50%
- granularity: 600
- evaluations: 1
- aggregation_method: mean
- resource_type: instance
- comparison_operator: gt
- metadata: VDU1
- action: [respawn]
diff --git a/samples/tests/etc/samples/sample-tosca-alarm-scale.yaml b/samples/tests/etc/samples/sample-tosca-alarm-scale.yaml
deleted file mode 100644
index 468da5e74..000000000
--- a/samples/tests/etc/samples/sample-tosca-alarm-scale.yaml
+++ /dev/null
@@ -1,84 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-description: Demo example
-
-metadata:
- template_name: sample-tosca-vnfd
-
-topology_template:
- node_templates:
- VDU1:
- type: tosca.nodes.nfv.VDU.Tacker
- capabilities:
- nfv_compute:
- properties:
- disk_size: 1 GB
- mem_size: 512 MB
- num_cpus: 2
- properties:
- image: cirros-0.5.2-x86_64-disk
- mgmt_driver: noop
- availability_zone: nova
- metadata: {metering.server_group: SG1}
-
- CP1:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL1
- - virtualBinding:
- node: VDU1
-
- VL1:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net_mgmt
- vendor: Tacker
-
- policies:
- - SP1:
- type: tosca.policies.tacker.Scaling
- properties:
- targets: [VDU1]
- increment: 1
- cooldown: 60
- min_instances: 1
- max_instances: 3
- default_instances: 2
-
- - vdu_cpu_usage_monitoring_policy:
- type: tosca.policies.tacker.Alarming
- triggers:
- vdu_hcpu_usage_scaling_out:
- event_type:
- type: tosca.events.resource.utilization
- implementation: ceilometer
- metric: cpu_util
- condition:
- threshold: 50
- constraint: utilization greater_than 50%
- granularity: 600
- evaluations: 1
- aggregation_method: mean
- resource_type: instance
- comparison_operator: gt
- metadata: SG1
- action: [SP1]
-
- vdu_hcpu_usage_scaling_in:
- event_type:
- type: tosca.events.resource.utilization
- implementation: ceilometer
- metric: cpu_util
- condition:
- threshold: 10
- constraint: utilization less_than 10%
- granularity: 600
- evaluations: 1
- aggregation_method: mean
- resource_type: instance
- comparison_operator: lt
- metadata: SG1
- action: [SP1]
diff --git a/samples/tests/etc/samples/sample-tosca-scale-all.yaml b/samples/tests/etc/samples/sample-tosca-scale-all.yaml
deleted file mode 100644
index 941468a23..000000000
--- a/samples/tests/etc/samples/sample-tosca-scale-all.yaml
+++ /dev/null
@@ -1,50 +0,0 @@
-
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-
-description: sample-tosca-vnfd-scaling
-
-metadata:
- template_name: sample-tosca-vnfd-scaling
-
-topology_template:
- node_templates:
- VDU1:
- type: tosca.nodes.nfv.VDU.Tacker
- capabilities:
- nfv_compute:
- properties:
- num_cpus: 1
- mem_size: 512 MB
- disk_size: 1 GB
- properties:
- image: cirros-0.5.2-x86_64-disk
- mgmt_driver: noop
- availability_zone: nova
-
- CP1:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL1
- - virtualBinding:
- node: VDU1
-
- VL1:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net_mgmt
- vendor: Tacker
-
- policies:
- - SP1:
- type: tosca.policies.tacker.Scaling
- properties:
- targets: [VDU1]
- increment: 1
- cooldown: 60
- min_instances: 1
- max_instances: 3
- default_instances: 2
diff --git a/samples/tests/etc/samples/sample-tosca-vnf-artifacts-image-values.yaml b/samples/tests/etc/samples/sample-tosca-vnf-artifacts-image-values.yaml
deleted file mode 100644
index f57d0dcc6..000000000
--- a/samples/tests/etc/samples/sample-tosca-vnf-artifacts-image-values.yaml
+++ /dev/null
@@ -1,10 +0,0 @@
-{
- image_source: 'http://download.cirros-cloud.net/0.5.2/cirros-0.5.2-x86_64-disk.img',
- flavor: 'm1.tiny',
- zone: 'nova',
- network: 'net_mgmt',
- management: 'true',
- pkt_in_network: 'net0',
- pkt_out_network: 'net1',
- vendor: 'tacker'
-}
diff --git a/samples/tests/etc/samples/sample-tosca-vnf-update-values.yaml b/samples/tests/etc/samples/sample-tosca-vnf-update-values.yaml
deleted file mode 100644
index b66848cdc..000000000
--- a/samples/tests/etc/samples/sample-tosca-vnf-update-values.yaml
+++ /dev/null
@@ -1,3 +0,0 @@
-{
-flavor: 'm1.small'
-}
diff --git a/samples/tests/etc/samples/sample-tosca-vnf-values.yaml b/samples/tests/etc/samples/sample-tosca-vnf-values.yaml
deleted file mode 100644
index b24c67b94..000000000
--- a/samples/tests/etc/samples/sample-tosca-vnf-values.yaml
+++ /dev/null
@@ -1,10 +0,0 @@
-{
- image_name: 'cirros-0.5.2-x86_64-disk',
- flavor: 'm1.tiny',
- zone: 'nova',
- network: 'net_mgmt',
- management: 'true',
- pkt_in_network: 'net0',
- pkt_out_network: 'net1',
- vendor: 'tacker'
-}
diff --git a/samples/tests/etc/samples/sample-tosca-vnfd-anti-affinity-multi-vdu.yaml b/samples/tests/etc/samples/sample-tosca-vnfd-anti-affinity-multi-vdu.yaml
deleted file mode 100644
index b809fef5f..000000000
--- a/samples/tests/etc/samples/sample-tosca-vnfd-anti-affinity-multi-vdu.yaml
+++ /dev/null
@@ -1,164 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-
-description: Demo example
-
-metadata:
- template_name: sample-tosca-vnfd
-
-topology_template:
- node_templates:
- VDU1:
- type: tosca.nodes.nfv.VDU.Tacker
- properties:
- name: anti-affinity-vdu-insufficient-comp-nodes
- image: cirros-0.5.2-x86_64-disk
- flavor: m1.tiny
- availability_zone: nova
- mgmt_driver: noop
- config: |
- param0: key1
- param1: key2
-
- CP11:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL1
- - virtualBinding:
- node: VDU1
-
- CP12:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL2
- - virtualBinding:
- node: VDU1
-
- CP13:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL3
- - virtualBinding:
- node: VDU1
-
- VDU2:
- type: tosca.nodes.nfv.VDU.Tacker
- properties:
- name: anti-affinity-vdu-insufficient-comp-nodes
- image: cirros-0.5.2-x86_64-disk
- flavor: m1.medium
- availability_zone: nova
- mgmt_driver: noop
- config: |
- param0: key1
- param1: key2
-
- CP21:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL1
- - virtualBinding:
- node: VDU2
-
- CP22:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL2
- - virtualBinding:
- node: VDU2
-
- CP23:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL3
- - virtualBinding:
- node: VDU2
-
- VDU3:
- type: tosca.nodes.nfv.VDU.Tacker
- properties:
- name: anti-affinity-vdu-insufficient-comp-nodes
- image: cirros-0.5.2-x86_64-disk
- flavor: m1.tiny
- availability_zone: nova
- mgmt_driver: noop
- config: |
- param0: key1
- param1: key2
-
- CP31:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL1
- - virtualBinding:
- node: VDU3
-
- CP32:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL2
- - virtualBinding:
- node: VDU3
-
- CP33:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL3
- - virtualBinding:
- node: VDU3
-
- VL1:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net_mgmt
- vendor: Tacker
-
- VL2:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net0
- vendor: Tacker
-
- VL3:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net1
- vendor: Tacker
-
- policies:
- - my_compute_placement_policy:
- type: tosca.policies.tacker.Placement
- properties:
- policy: anti-affinity
- strict: true
- description: Apply anti-affinity placement policy to the application servers
- targets: [ VDU1, VDU2, VDU3 ]
diff --git a/samples/tests/etc/samples/sample-tosca-vnfd-block-storage.yaml b/samples/tests/etc/samples/sample-tosca-vnfd-block-storage.yaml
deleted file mode 100644
index 729e64665..000000000
--- a/samples/tests/etc/samples/sample-tosca-vnfd-block-storage.yaml
+++ /dev/null
@@ -1,59 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-
-description: Demo example
-
-metadata:
- template_name: sample-tosca-vnfd
-
-topology_template:
- node_templates:
- VDU1:
- type: tosca.nodes.nfv.VDU.Tacker
- capabilities:
- nfv_compute:
- properties:
- num_cpus: 1
- mem_size: 512 MB
- disk_size: 1 GB
- properties:
- name: test-vdu-block-storage
- image: cirros-0.5.2-x86_64-disk
- availability_zone: nova
- mgmt_driver: noop
- config: |
- param0: key1
- param1: key2
-
- CP1:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- name: test-cp
- management: true
- order: 0
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL1
- - virtualBinding:
- node: VDU1
-
- VB1:
- type: tosca.nodes.BlockStorage.Tacker
- properties:
- size: 1 GB
- image: cirros-0.5.2-x86_64-disk
-
- CB1:
- type: tosca.nodes.BlockStorageAttachment
- properties:
- location: /dev/vdb
- requirements:
- - virtualBinding:
- node: VDU1
- - virtualAttachment:
- node: VB1
- VL1:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net_mgmt
- vendor: Tacker
diff --git a/samples/tests/etc/samples/sample-tosca-vnfd-existing-block-storage.yaml b/samples/tests/etc/samples/sample-tosca-vnfd-existing-block-storage.yaml
deleted file mode 100644
index e371fc83b..000000000
--- a/samples/tests/etc/samples/sample-tosca-vnfd-existing-block-storage.yaml
+++ /dev/null
@@ -1,63 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-
-description: Demo example
-
-metadata:
- template_name: sample-tosca-vnfd
-
-topology_template:
- inputs:
- my_vol:
- default: 0dbf28ba-d0b7-4369-99ce-7a3c31dc996f
- description: volume id
- type: string
- node_templates:
- VDU1:
- type: tosca.nodes.nfv.VDU.Tacker
- capabilities:
- nfv_compute:
- properties:
- num_cpus: 1
- mem_size: 512 MB
- disk_size: 1 GB
- properties:
- name: test-vdu-block-storage
- image: cirros-0.5.2-x86_64-disk
- availability_zone: nova
- mgmt_driver: noop
- config: |
- param0: key1
- param1: key2
-
- CP1:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- name: test-cp
- management: true
- order: 0
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL1
- - virtualBinding:
- node: VDU1
-
- VB1:
- type: tosca.nodes.BlockStorage.Tacker
- properties:
- volume_id: my_vol
-
- CB1:
- type: tosca.nodes.BlockStorageAttachment
- properties:
- location: /dev/vdb
- requirements:
- - virtualBinding:
- node: VDU1
- - virtualAttachment:
- node: VB1
- VL1:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net_mgmt
- vendor: Tacker
diff --git a/samples/tests/etc/samples/sample-tosca-vnfd-flavor.yaml b/samples/tests/etc/samples/sample-tosca-vnfd-flavor.yaml
deleted file mode 100644
index 812ff685a..000000000
--- a/samples/tests/etc/samples/sample-tosca-vnfd-flavor.yaml
+++ /dev/null
@@ -1,68 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-
-description: Demo example
-
-metadata:
- template_name: sample-tosca-vnfd
-
-topology_template:
- node_templates:
- VDU1_flavor_func:
- type: tosca.nodes.nfv.VDU.Tacker
- capabilities:
- nfv_compute:
- properties:
- num_cpus: 1
- disk_size: 1 GB
- mem_size: 512 MB
- properties:
- image: cirros-0.5.2-x86_64-disk
- availability_zone: nova
- mgmt_driver: noop
- config: |
- param0: key1
- param1: key2
-
- CP1:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- requirements:
- - virtualLink:
- node: VL1
- - virtualBinding:
- node: VDU1_flavor_func
-
- CP2:
- type: tosca.nodes.nfv.CP.Tacker
- requirements:
- - virtualLink:
- node: VL2
- - virtualBinding:
- node: VDU1_flavor_func
-
- CP3:
- type: tosca.nodes.nfv.CP.Tacker
- requirements:
- - virtualLink:
- node: VL3
- - virtualBinding:
- node: VDU1_flavor_func
-
- VL1:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net_mgmt
- vendor: Tacker
-
- VL2:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net0
- vendor: Tacker
-
- VL3:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net1
- vendor: Tacker
diff --git a/samples/tests/etc/samples/sample-tosca-vnfd-image.yaml b/samples/tests/etc/samples/sample-tosca-vnfd-image.yaml
deleted file mode 100644
index e1c83abb6..000000000
--- a/samples/tests/etc/samples/sample-tosca-vnfd-image.yaml
+++ /dev/null
@@ -1,71 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-
-description: Demo example with auto image creation
-
-metadata:
- template_name: sample-tosca-vnfd-image
-
-topology_template:
- node_templates:
- VDU1_image_func:
- type: tosca.nodes.nfv.VDU.Tacker
- properties:
- flavor: m1.tiny
- availability_zone: nova
- mgmt_driver: noop
- config: |
- param0: key1
- param1: key2
- artifacts:
- VNFImage_image_func:
- type: tosca.artifacts.Deployment.Image.VM
- file: http://download.cirros-cloud.net/0.5.2/cirros-0.5.2-x86_64-disk.img
-
- CP1:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL1
- - virtualBinding:
- node: VDU1_image_func
-
- CP2:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL2
- - virtualBinding:
- node: VDU1_image_func
-
- CP3:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL3
- - virtualBinding:
- node: VDU1_image_func
-
- VL1:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net_mgmt
- vendor: Tacker
-
- VL2:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net0
- vendor: Tacker
-
- VL3:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net1
- vendor: Tacker
diff --git a/samples/tests/etc/samples/sample-tosca-vnfd-instance-reservation.yaml b/samples/tests/etc/samples/sample-tosca-vnfd-instance-reservation.yaml
deleted file mode 100644
index d50ae0a9d..000000000
--- a/samples/tests/etc/samples/sample-tosca-vnfd-instance-reservation.yaml
+++ /dev/null
@@ -1,90 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-
-description: VNF TOSCA template with flavor input parameters
-
-metadata:
- template_name: sample-tosca-vnfd-instance-reservation
-
-topology_template:
- node_templates:
- VDU1:
- type: tosca.nodes.nfv.VDU.Tacker
- properties:
- image: cirros-0.5.2-x86_64-disk
- flavor: { get_input: flavor }
- reservation_metadata:
- resource_type: {get_input: resource_type}
- id: { get_input: server_group_id }
-
- CP1:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- order: 0
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL1
- - virtualBinding:
- node: VDU1
-
- CP2:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- order: 1
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL2
- - virtualBinding:
- node: VDU1
-
- CP3:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- order: 2
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL3
- - virtualBinding:
- node: VDU1
-
- VL1:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net_mgmt
- vendor: Tacker
-
- VL2:
- type: tosca.nodes.nfv.VL
-
- properties:
- network_name: net0
- vendor: Tacker
-
- VL3:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net1
- vendor: Tacker
-
-
- policies:
- - RSV:
- type: tosca.policies.tacker.Reservation
- reservation:
- start_actions: [SP_RSV]
- before_end_actions: [SP_RSV]
- end_actions: [noop]
- properties:
- lease_id: { get_input: lease_id }
- - SP_RSV:
- type: tosca.policies.tacker.Scaling
- properties:
- increment: 2
- cooldown: 60
- min_instances: 0
- max_instances: 2
- default_instances: 0
- targets: [VDU1]
diff --git a/samples/tests/etc/samples/sample-tosca-vnfd-large-template.yaml b/samples/tests/etc/samples/sample-tosca-vnfd-large-template.yaml
deleted file mode 100644
index 2d25493d2..000000000
--- a/samples/tests/etc/samples/sample-tosca-vnfd-large-template.yaml
+++ /dev/null
@@ -1,137 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-
-description: Demo example
-
-metadata:
- template_name: sample-tosca-vnfd-large-template
-
-topology_template:
- node_templates:
- VDU1:
- type: tosca.nodes.nfv.VDU.Tacker
- properties:
- image: cirros-0.5.2-x86_64-disk
- flavor: m1.tiny
- availability_zone: nova
- mgmt_driver: noop
- config: |
- param0: key1
- param1: key2
- user_data_format: RAW
- user_data: |
- #!/bin/sh
- echo "my hostname is `hostname`" > /tmp/hostname
- echo "my hostname is `hostname`" > /tmp/hostname
- echo "my hostname is `hostname`" > /tmp/hostname
- echo "my hostname is `hostname`" > /tmp/hostname
- echo "my hostname is `hostname`" > /tmp/hostname
- echo "my hostname is `hostname`" > /tmp/hostname
- echo "my hostname is `hostname`" > /tmp/hostname
- echo "my hostname is `hostname`" > /tmp/hostname
- echo "my hostname is `hostname`" > /tmp/hostname
- echo "my hostname is `hostname`" > /tmp/hostname
- echo "my hostname is `hostname`" > /tmp/hostname
- echo "my hostname is `hostname`" > /tmp/hostname
- echo "my hostname is `hostname`" > /tmp/hostname
- echo "my hostname is `hostname`" > /tmp/hostname
- echo "my hostname is `hostname`" > /tmp/hostname
- echo "my hostname is `hostname`" > /tmp/hostname
- echo "my hostname is `hostname`" > /tmp/hostname
- echo "my hostname is `hostname`" > /tmp/hostname
- echo "my hostname is `hostname`" > /tmp/hostname
- echo "my hostname is `hostname`" > /tmp/hostname
- echo "my hostname is `hostname`" > /tmp/hostname
- echo "my hostname is `hostname`" > /tmp/hostname
- echo "my hostname is `hostname`" > /tmp/hostname
- echo "my hostname is `hostname`" > /tmp/hostname
- echo "my hostname is `hostname`" > /tmp/hostname
-
- CP11:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL1
- - virtualBinding:
- node: VDU1
-
- CP12:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL2
- - virtualBinding:
- node: VDU1
-
- VDU2:
- type: tosca.nodes.nfv.VDU.Tacker
- properties:
- image: cirros-0.5.2-x86_64-disk
- flavor: m1.medium
- availability_zone: nova
- mgmt_driver: noop
- config: |
- param0: key1
- param1: key2
- user_data_format: RAW
- user_data: |
- #!/bin/sh
- echo "my hostname is `hostname`" > /tmp/hostname
- echo "my hostname is `hostname`" > /tmp/hostname
- echo "my hostname is `hostname`" > /tmp/hostname
- echo "my hostname is `hostname`" > /tmp/hostname
- echo "my hostname is `hostname`" > /tmp/hostname
- echo "my hostname is `hostname`" > /tmp/hostname
- echo "my hostname is `hostname`" > /tmp/hostname
- echo "my hostname is `hostname`" > /tmp/hostname
- echo "my hostname is `hostname`" > /tmp/hostname
- echo "my hostname is `hostname`" > /tmp/hostname
- echo "my hostname is `hostname`" > /tmp/hostname
- echo "my hostname is `hostname`" > /tmp/hostname
- echo "my hostname is `hostname`" > /tmp/hostname
- echo "my hostname is `hostname`" > /tmp/hostname
- echo "my hostname is `hostname`" > /tmp/hostname
- echo "my hostname is `hostname`" > /tmp/hostname
- echo "my hostname is `hostname`" > /tmp/hostname
- echo "my hostname is `hostname`" > /tmp/hostname
- echo "my hostname is `hostname`" > /tmp/hostname
- echo "my hostname is `hostname`" > /tmp/hostname
- echo "my hostname is `hostname`" > /tmp/hostname
- echo "my hostname is `hostname`" > /tmp/hostname
- echo "my hostname is `hostname`" > /tmp/hostname
- echo "my hostname is `hostname`" > /tmp/hostname
- echo "my hostname is `hostname`" > /tmp/hostname
-
- CP21:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- requirements:
- - virtualLink:
- node: VL1
- - virtualBinding:
- node: VDU2
-
- CP22:
- type: tosca.nodes.nfv.CP.Tacker
- requirements:
- - virtualLink:
- node: VL2
- - virtualBinding:
- node: VDU2
-
- VL1:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net_mgmt
- vendor: Tacker
-
- VL2:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net0
- vendor: Tacker
diff --git a/samples/tests/etc/samples/sample-tosca-vnfd-maintenance.yaml b/samples/tests/etc/samples/sample-tosca-vnfd-maintenance.yaml
deleted file mode 100644
index 0c9ef9047..000000000
--- a/samples/tests/etc/samples/sample-tosca-vnfd-maintenance.yaml
+++ /dev/null
@@ -1,51 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-
-description: Maintenance VNF with Fenix
-
-metadata:
- template_name: tosca-vnfd-maintenance
-
-topology_template:
- node_templates:
- VDU1:
- capabilities:
- nfv_compute:
- properties:
- disk_size: 15 GB
- mem_size: 2048 MB
- num_cpus: 2
- properties:
- availability_zone: nova
- image: cirros-0.5.2-x86_64-disk
- maintenance: true
- mgmt_driver: noop
- type: tosca.nodes.nfv.VDU.Tacker
-
- CP11:
- properties:
- anti_spoofing_protection: false
- management: true
- order: 0
- requirements:
- - virtualLink:
- node: VL1
- - virtualBinding:
- node: VDU1
- type: tosca.nodes.nfv.CP.Tacker
-
- VL1:
- properties:
- network_name: net_mgmt
- vendor: Tacker
- type: tosca.nodes.nfv.VL
- policies:
- - SP1:
- properties:
- cooldown: 120
- default_instances: 3
- increment: 1
- max_instances: 3
- min_instances: 1
- targets:
- - VDU1
- type: tosca.policies.tacker.Scaling
diff --git a/samples/tests/etc/samples/sample-tosca-vnfd-monitor.yaml b/samples/tests/etc/samples/sample-tosca-vnfd-monitor.yaml
deleted file mode 100644
index affa52584..000000000
--- a/samples/tests/etc/samples/sample-tosca-vnfd-monitor.yaml
+++ /dev/null
@@ -1,52 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-
-description: VNFD With Ping Monitor
-
-metadata:
- template_name: sample-tosca-vnfd-monitor
-
-topology_template:
- node_templates:
-
- VDU1:
- type: tosca.nodes.nfv.VDU.Tacker
- properties:
- image: cirros-0.5.2-x86_64-disk
- flavor: m1.tiny
- config: |
- param0: key1
- param1: key2
- monitoring_policy:
- name: ping
- actions:
- failure: respawn
- parameters:
- count: 3
- interval: 10
- monitoring_delay: 45
- timeout: 2
- config_drive: true
- user_data_format: RAW
- user_data: |
- #!/bin/sh
- echo "my hostname is `hostname`" > /tmp/hostname
- df -h > /tmp/test.txt
- sleep 90
- sudo ifdown eth0
-
- CP1:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL1
- - virtualBinding:
- node: VDU1
-
- VL1:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net_mgmt
- vendor: Tacker
diff --git a/samples/tests/etc/samples/sample-tosca-vnfd-multi-vdu-monitoring-vdu-autoheal.yaml b/samples/tests/etc/samples/sample-tosca-vnfd-multi-vdu-monitoring-vdu-autoheal.yaml
deleted file mode 100644
index a1832cad3..000000000
--- a/samples/tests/etc/samples/sample-tosca-vnfd-multi-vdu-monitoring-vdu-autoheal.yaml
+++ /dev/null
@@ -1,88 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-description: Demo example
-
-metadata:
- template_name: sample-tosca-vnfd
-
-topology_template:
- node_templates:
- VDU1:
- type: tosca.nodes.nfv.VDU.Tacker
- capabilities:
- nfv_compute:
- properties:
- disk_size: 1 GB
- mem_size: 512 MB
- num_cpus: 1
- properties:
- image: cirros-0.5.2-x86_64-disk
- mgmt_driver: noop
- availability_zone: nova
- mgmt_driver: noop
- monitoring_policy:
- name: ping
- parameters:
- monitoring_delay: 45
- count: 3
- interval: 1
- timeout: 2
- actions:
- failure: vdu_autoheal
-
- CP1:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL1
- - virtualBinding:
- node: VDU1
-
- VDU2:
- type: tosca.nodes.nfv.VDU.Tacker
- capabilities:
- nfv_compute:
- properties:
- disk_size: 1 GB
- mem_size: 512 MB
- num_cpus: 1
- properties:
- image: cirros-0.5.2-x86_64-disk
- mgmt_driver: noop
- availability_zone: nova
- mgmt_driver: noop
- monitoring_policy:
- name: ping
- parameters:
- monitoring_delay: 45
- count: 3
- interval: 1
- timeout: 2
- actions:
- failure: vdu_autoheal
- user_data_format: RAW
- user_data: |
- #!/bin/sh
- echo "my hostname is `hostname`" > /tmp/hostname
- df -h > /home/cirros/diskinfo
- sleep 90
- sudo ifdown eth0
-
- CP2:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL1
- - virtualBinding:
- node: VDU2
-
- VL1:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net_mgmt
- vendor: Tacker
diff --git a/samples/tests/etc/samples/sample-tosca-vnfd-multi-vdu-monitoring.yaml b/samples/tests/etc/samples/sample-tosca-vnfd-multi-vdu-monitoring.yaml
deleted file mode 100644
index b53ad9ae4..000000000
--- a/samples/tests/etc/samples/sample-tosca-vnfd-multi-vdu-monitoring.yaml
+++ /dev/null
@@ -1,175 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-
-description: Multi VDU monitoring example
-
-metadata:
- template_name: sample-tosca-multi-vdu-monitoring
-
-topology_template:
- node_templates:
- VDU1:
- type: tosca.nodes.nfv.VDU.Tacker
- properties:
- image: cirros-0.5.2-x86_64-disk
- flavor: m1.tiny
- availability_zone: nova
- mgmt_driver: noop
- config: |
- param0: key1
- param1: key2
- mgmt_driver: noop
- monitoring_policy:
- name: ping
- actions:
- failure: respawn
- parameters:
- count: 3
- interval: 10
- monitoring_delay: 45
- timeout: 2
-
- CP11:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- anti_spoofing_protection: True
- requirements:
- - virtualLink:
- node: VL1
- - virtualBinding:
- node: VDU1
-
- CP12:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL2
- - virtualBinding:
- node: VDU1
-
- CP13:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL3
- - virtualBinding:
- node: VDU1
-
- VDU2:
- type: tosca.nodes.nfv.VDU.Tacker
- properties:
- image: cirros-0.5.2-x86_64-disk
- flavor: m1.tiny
- availability_zone: nova
- mgmt_driver: noop
- config: |
- param0: key1
- param1: key2
- monitoring_policy:
- name: ping
- actions:
- failure: respawn
- parameters:
- count: 3
- interval: 10
- monitoring_delay: 45
- timeout: 2
-
- CP21:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- requirements:
- - virtualLink:
- node: VL1
- - virtualBinding:
- node: VDU2
-
- CP22:
- type: tosca.nodes.nfv.CP.Tacker
- requirements:
- - virtualLink:
- node: VL2
- - virtualBinding:
- node: VDU2
-
- CP23:
- type: tosca.nodes.nfv.CP.Tacker
- requirements:
- - virtualLink:
- node: VL3
- - virtualBinding:
- node: VDU2
-
- VDU3:
- type: tosca.nodes.nfv.VDU.Tacker
- properties:
- image: cirros-0.5.2-x86_64-disk
- flavor: m1.tiny
- availability_zone: nova
- mgmt_driver: noop
- monitoring_policy:
- name: ping
- actions:
- failure: respawn
- parameters:
- count: 3
- interval: 10
- monitoring_delay: 45
- timeout: 2
- config_drive: true
- user_data_format: RAW
- user_data: |
- #!/bin/sh
- echo "my hostname is `hostname`" > /tmp/hostname
- df -h > /home/cirros/diskinfo
- sleep 90
- sudo ifdown eth0
-
- CP31:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- requirements:
- - virtualLink:
- node: VL1
- - virtualBinding:
- node: VDU3
-
- CP32:
- type: tosca.nodes.nfv.CP.Tacker
- requirements:
- - virtualLink:
- node: VL2
- - virtualBinding:
- node: VDU3
-
- CP33:
- type: tosca.nodes.nfv.CP.Tacker
- requirements:
- - virtualLink:
- node: VL3
- - virtualBinding:
- node: VDU3
-
- VL1:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net_mgmt
- vendor: Tacker
-
- VL2:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net0
- vendor: Tacker
-
- VL3:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net1
- vendor: Tacker
diff --git a/samples/tests/etc/samples/sample-tosca-vnfd-multi-vdu.yaml b/samples/tests/etc/samples/sample-tosca-vnfd-multi-vdu.yaml
deleted file mode 100644
index cd8d4cc6c..000000000
--- a/samples/tests/etc/samples/sample-tosca-vnfd-multi-vdu.yaml
+++ /dev/null
@@ -1,152 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-
-description: Demo example
-
-metadata:
- template_name: sample-tosca-vnfd
-
-topology_template:
- node_templates:
- VDU1:
- type: tosca.nodes.nfv.VDU.Tacker
- properties:
- image: cirros-0.5.2-x86_64-disk
- flavor: m1.tiny
- availability_zone: nova
- mgmt_driver: noop
- config: |
- param0: key1
- param1: key2
-
- CP11:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL1
- - virtualBinding:
- node: VDU1
-
- CP12:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL2
- - virtualBinding:
- node: VDU1
-
- CP13:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL3
- - virtualBinding:
- node: VDU1
-
- VDU2:
- type: tosca.nodes.nfv.VDU.Tacker
- properties:
- image: cirros-0.5.2-x86_64-disk
- flavor: m1.medium
- availability_zone: nova
- mgmt_driver: noop
- config: |
- param0: key1
- param1: key2
-
- CP21:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL1
- - virtualBinding:
- node: VDU2
-
- CP22:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL2
- - virtualBinding:
- node: VDU2
-
- CP23:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL3
- - virtualBinding:
- node: VDU2
-
- VDU3:
- type: tosca.nodes.nfv.VDU.Tacker
- properties:
- image: cirros-0.5.2-x86_64-disk
- flavor: m1.tiny
- availability_zone: nova
- mgmt_driver: noop
- config: |
- param0: key1
- param1: key2
-
- CP31:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL1
- - virtualBinding:
- node: VDU3
-
- CP32:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL2
- - virtualBinding:
- node: VDU3
-
- CP33:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL3
- - virtualBinding:
- node: VDU3
-
- VL1:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net_mgmt
- vendor: Tacker
-
- VL2:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net0
- vendor: Tacker
-
- VL3:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net1
- vendor: Tacker
diff --git a/samples/tests/etc/samples/sample-tosca-vnfd-no-monitor.yaml b/samples/tests/etc/samples/sample-tosca-vnfd-no-monitor.yaml
deleted file mode 100644
index dad85b1ca..000000000
--- a/samples/tests/etc/samples/sample-tosca-vnfd-no-monitor.yaml
+++ /dev/null
@@ -1,35 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-
-description: VNFD With no Monitor
-
-metadata:
- template_name: sample-tosca-vnfd-no-monitor
-
-topology_template:
- node_templates:
-
- VDU1:
- type: tosca.nodes.nfv.VDU.Tacker
- properties:
- image: cirros-0.5.2-x86_64-disk
- flavor: m1.tiny
- config: |
- param0: key1
- param1: key2
-
- CP1:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL1
- - virtualBinding:
- node: VDU1
-
- VL1:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net_mgmt
- vendor: TACKER
diff --git a/samples/tests/etc/samples/sample-tosca-vnfd-param-artifacts-image.yaml b/samples/tests/etc/samples/sample-tosca-vnfd-param-artifacts-image.yaml
deleted file mode 100644
index d0ff7c109..000000000
--- a/samples/tests/etc/samples/sample-tosca-vnfd-param-artifacts-image.yaml
+++ /dev/null
@@ -1,104 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-
-description: VNF TOSCA template with input parameters
-
-metadata:
- template_name: sample-tosca-vnfd
-
-topology_template:
- inputs:
- image_source:
- type: string
- description: Image source
-
- flavor:
- type: string
- description: Flavor Information
-
- zone:
- type: string
- description: Zone Information
-
- network:
- type: string
- description: mgmt network
-
- management:
- type: string
- description: management network
-
- pkt_in_network:
- type: string
- description: In network
-
- pkt_out_network:
- type: string
- description: Out network
-
- vendor:
- type: string
- description: Vendor information
-
- node_templates:
- VDU1:
- type: tosca.nodes.nfv.VDU.Tacker
- properties:
- flavor: {get_input: flavor}
- availability_zone: { get_input: zone }
- mgmt_driver: noop
- config: |
- param0: key1
- param1: key2
- artifacts:
- VNFImage:
- type: tosca.artifacts.Deployment.Image.VM
- file: { get_input: image_source }
-
- CP1:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: { get_input: management }
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL1
- - virtualBinding:
- node: VDU1
-
- CP2:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL2
- - virtualBinding:
- node: VDU1
-
- CP3:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL3
- - virtualBinding:
- node: VDU1
-
- VL1:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: { get_input: network }
- vendor: {get_input: vendor}
-
- VL2:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: { get_input: pkt_in_network }
- vendor: {get_input: vendor}
-
- VL3:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: { get_input: pkt_out_network }
- vendor: {get_input: vendor}
diff --git a/samples/tests/etc/samples/sample-tosca-vnfd-param.yaml b/samples/tests/etc/samples/sample-tosca-vnfd-param.yaml
deleted file mode 100644
index 4f3a8ef0e..000000000
--- a/samples/tests/etc/samples/sample-tosca-vnfd-param.yaml
+++ /dev/null
@@ -1,101 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-
-description: VNF TOSCA template with input parameters
-
-metadata:
- template_name: sample-tosca-vnfd
-
-topology_template:
- inputs:
- image_name:
- type: string
- description: Image Name
-
- flavor:
- type: string
- description: Flavor Information
-
- zone:
- type: string
- description: Zone Information
-
- network:
- type: string
- description: mgmt network
-
- management:
- type: string
- description: management network
-
- pkt_in_network:
- type: string
- description: In network
-
- pkt_out_network:
- type: string
- description: Out network
-
- vendor:
- type: string
- description: Vendor information
-
- node_templates:
- VDU1:
- type: tosca.nodes.nfv.VDU.Tacker
- properties:
- image: { get_input: image_name}
- flavor: {get_input: flavor}
- availability_zone: { get_input: zone }
- mgmt_driver: noop
- config: |
- param0: key1
- param1: key2
-
- CP1:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: { get_input: management }
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL1
- - virtualBinding:
- node: VDU1
-
- CP2:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL2
- - virtualBinding:
- node: VDU1
-
- CP3:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL3
- - virtualBinding:
- node: VDU1
-
- VL1:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: { get_input: network }
- vendor: {get_input: vendor}
-
- VL2:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: { get_input: pkt_in_network }
- vendor: {get_input: vendor}
-
- VL3:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: { get_input: pkt_out_network }
- vendor: {get_input: vendor}
diff --git a/samples/tests/etc/samples/sample-tosca-vnfd-placement-policy-affinity.yaml b/samples/tests/etc/samples/sample-tosca-vnfd-placement-policy-affinity.yaml
deleted file mode 100644
index b333aff0c..000000000
--- a/samples/tests/etc/samples/sample-tosca-vnfd-placement-policy-affinity.yaml
+++ /dev/null
@@ -1,120 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-
-description: Demo example
-
-metadata:
- template_name: sample-tosca-vnfd
-
-topology_template:
- node_templates:
- VDU1:
- type: tosca.nodes.nfv.VDU.Tacker
- properties:
- name: affinity-vdu
- image: cirros-0.5.2-x86_64-disk
- flavor: m1.tiny
- availability_zone: nova
- mgmt_driver: noop
- config: |
- param0: key1
- param1: key2
-
- CP11:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL1
- - virtualBinding:
- node: VDU1
-
- CP12:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL2
- - virtualBinding:
- node: VDU1
-
- CP13:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL3
- - virtualBinding:
- node: VDU1
-
- VDU2:
- type: tosca.nodes.nfv.VDU.Tacker
- properties:
- name: affinity-vdu
- image: cirros-0.5.2-x86_64-disk
- flavor: m1.medium
- availability_zone: nova
- mgmt_driver: noop
- config: |
- param0: key1
- param1: key2
-
- CP21:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL1
- - virtualBinding:
- node: VDU2
-
- CP22:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL2
- - virtualBinding:
- node: VDU2
-
- CP23:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL3
- - virtualBinding:
- node: VDU2
-
- VL1:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net_mgmt
- vendor: Tacker
-
- VL2:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net0
- vendor: Tacker
-
- VL3:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net1
- vendor: Tacker
- policies:
- - my_compute_placement_policy:
- type: tosca.policies.tacker.Placement
- properties:
- policy: affinity
- strict: true
- description: Apply affinity placement policy to the application servers
- targets: [ VDU1, VDU2 ]
diff --git a/samples/tests/etc/samples/sample-tosca-vnfd-placement-policy-anti-affinity.yaml b/samples/tests/etc/samples/sample-tosca-vnfd-placement-policy-anti-affinity.yaml
deleted file mode 100644
index 2016887bc..000000000
--- a/samples/tests/etc/samples/sample-tosca-vnfd-placement-policy-anti-affinity.yaml
+++ /dev/null
@@ -1,120 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-
-description: Demo example
-
-metadata:
- template_name: sample-tosca-vnfd
-
-topology_template:
- node_templates:
- VDU1:
- type: tosca.nodes.nfv.VDU.Tacker
- properties:
- name: anti-affinity-vdu-multi-comp-nodes
- image: cirros-0.5.2-x86_64-disk
- flavor: m1.tiny
- availability_zone: nova
- mgmt_driver: noop
- config: |
- param0: key1
- param1: key2
-
- CP11:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL1
- - virtualBinding:
- node: VDU1
-
- CP12:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL2
- - virtualBinding:
- node: VDU1
-
- CP13:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL3
- - virtualBinding:
- node: VDU1
-
- VDU2:
- type: tosca.nodes.nfv.VDU.Tacker
- properties:
- name: anti-affinity-vdu-multi-comp-nodes
- image: cirros-0.5.2-x86_64-disk
- flavor: m1.medium
- availability_zone: nova
- mgmt_driver: noop
- config: |
- param0: key1
- param1: key2
-
- CP21:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL1
- - virtualBinding:
- node: VDU2
-
- CP22:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL2
- - virtualBinding:
- node: VDU2
-
- CP23:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL3
- - virtualBinding:
- node: VDU2
-
- VL1:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net_mgmt
- vendor: Tacker
-
- VL2:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net0
- vendor: Tacker
-
- VL3:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net1
- vendor: Tacker
- policies:
- - my_compute_placement_policy:
- type: tosca.policies.tacker.Placement
- properties:
- policy: anti-affinity
- strict: true
- description: Apply anti-affinity placement policy to the application servers
- targets: [ VDU1, VDU2 ]
diff --git a/samples/tests/etc/samples/sample-tosca-vnfd-placement-policy-invalid.yaml b/samples/tests/etc/samples/sample-tosca-vnfd-placement-policy-invalid.yaml
deleted file mode 100644
index 504ece5a6..000000000
--- a/samples/tests/etc/samples/sample-tosca-vnfd-placement-policy-invalid.yaml
+++ /dev/null
@@ -1,163 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-
-description: Demo example
-
-metadata:
- template_name: sample-tosca-vnfd
-
-topology_template:
- node_templates:
- VDU1:
- type: tosca.nodes.nfv.VDU.Tacker
- properties:
- name: invalid-placement-policy-vdu
- image: cirros-0.5.2-x86_64-disk
- flavor: m1.tiny
- availability_zone: nova
- mgmt_driver: noop
- config: |
- param0: key1
- param1: key2
-
- CP11:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL1
- - virtualBinding:
- node: VDU1
-
- CP12:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL2
- - virtualBinding:
- node: VDU1
-
- CP13:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL3
- - virtualBinding:
- node: VDU1
-
- VDU2:
- type: tosca.nodes.nfv.VDU.Tacker
- properties:
- name: invalid-placement-policy-vdu
- image: cirros-0.5.2-x86_64-disk
- flavor: m1.medium
- availability_zone: nova
- mgmt_driver: noop
- config: |
- param0: key1
- param1: key2
-
- CP21:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL1
- - virtualBinding:
- node: VDU2
-
- CP22:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL2
- - virtualBinding:
- node: VDU2
-
- CP23:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL3
- - virtualBinding:
- node: VDU2
-
- VDU3:
- type: tosca.nodes.nfv.VDU.Tacker
- properties:
- name: invalid-placement-policy-vdu
- image: cirros-0.5.2-x86_64-disk
- flavor: m1.tiny
- availability_zone: nova
- mgmt_driver: noop
- config: |
- param0: key1
- param1: key2
-
- CP31:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL1
- - virtualBinding:
- node: VDU3
-
- CP32:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL2
- - virtualBinding:
- node: VDU3
-
- CP33:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL3
- - virtualBinding:
- node: VDU3
-
- VL1:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net_mgmt
- vendor: Tacker
-
- VL2:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net0
- vendor: Tacker
-
- VL3:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net1
- vendor: Tacker
- policies:
- - my_compute_placement_policy:
- type: tosca.policies.tacker.Placement
- properties:
- policy: invalid
- strict: true
- description: Apply invalid placement policy to the application servers
- targets: [ VDU1, VDU2, VDU3 ]
diff --git a/samples/tests/etc/samples/sample-tosca-vnfd-single-vdu-monitoring-vdu-autoheal.yaml b/samples/tests/etc/samples/sample-tosca-vnfd-single-vdu-monitoring-vdu-autoheal.yaml
deleted file mode 100644
index 29b86d260..000000000
--- a/samples/tests/etc/samples/sample-tosca-vnfd-single-vdu-monitoring-vdu-autoheal.yaml
+++ /dev/null
@@ -1,55 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-description: Demo example
-
-metadata:
- template_name: sample-tosca-vnfd
-
-topology_template:
- node_templates:
- VDU1:
- type: tosca.nodes.nfv.VDU.Tacker
- capabilities:
- nfv_compute:
- properties:
- disk_size: 1 GB
- mem_size: 512 MB
- num_cpus: 1
- properties:
- image: cirros-0.5.2-x86_64-disk
- mgmt_driver: noop
- availability_zone: nova
- mgmt_driver: noop
- monitoring_policy:
- name: ping
- parameters:
- monitoring_delay: 45
- count: 3
- interval: 1
- timeout: 2
- actions:
- failure: vdu_autoheal
- user_data_format: RAW
- user_data: |
- #!/bin/sh
- echo "my hostname is `hostname`" > /tmp/hostname
- df -h > /home/cirros/diskinfo
- sleep 90
- sudo ifdown eth0
-
- CP1:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL1
- - virtualBinding:
- node: VDU1
-
- VL1:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net_mgmt
- vendor: Tacker
-
diff --git a/samples/tests/etc/samples/sample-tosca-vnfd-static-ip.yaml b/samples/tests/etc/samples/sample-tosca-vnfd-static-ip.yaml
deleted file mode 100644
index e105b065e..000000000
--- a/samples/tests/etc/samples/sample-tosca-vnfd-static-ip.yaml
+++ /dev/null
@@ -1,72 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-
-description: VNFD with predefined properties.
-
-topology_template:
- node_templates:
-
- VDU1:
- type: tosca.nodes.nfv.VDU.Tacker
- capabilities:
- nfv_compute:
- properties:
- num_cpus: 1
- mem_size: 512 MB
- disk_size: 1 GB
- properties:
- image: cirros-0.5.2-x86_64-disk
- availability_zone: nova
- mgmt_driver: noop
- config: |
- param0: key1
- param1: key2
-
- CP1:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- ip_address: 192.168.120.225
- anti_spoofing_protection: true
- requirements:
- - virtualLink:
- node: VL1
- - virtualBinding:
- node: VDU1
-
- CP2:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL2
- - virtualBinding:
- node: VDU1
-
- CP3:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- anti_spoofing_protection: true
- requirements:
- - virtualLink:
- node: VL3
- - virtualBinding:
- node: VDU1
-
- VL1:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net_mgmt
- vendor: Tacker
-
- VL2:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net0
- vendor: Tacker
-
- VL3:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net1
- vendor: Tacker
diff --git a/samples/tests/etc/samples/sample-tosca-vnfd.yaml b/samples/tests/etc/samples/sample-tosca-vnfd.yaml
deleted file mode 100644
index bfac29868..000000000
--- a/samples/tests/etc/samples/sample-tosca-vnfd.yaml
+++ /dev/null
@@ -1,82 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-
-description: Demo example
-
-metadata:
- template_name: sample-tosca-vnfd
-
-topology_template:
- inputs:
- vdu-name:
- type: string
- description: Vdu name
- default: test-vdu
- cp-name:
- type: string
- description: Cp name
- default: test-cp
- node_templates:
- VDU1:
- type: tosca.nodes.nfv.VDU.Tacker
- properties:
- name: {get_input : vdu-name}
- image: cirros-0.5.2-x86_64-disk
- flavor: m1.tiny
- key_name: userKey
- availability_zone: nova
- mgmt_driver: noop
- config: |
- param0: key1
- param1: key2
-
- CP1:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- name: {get_input : cp-name}
- management: true
- anti_spoofing_protection: true
- security_groups:
- - test_secgrp
- requirements:
- - virtualLink:
- node: VL1
- - virtualBinding:
- node: VDU1
-
- CP2:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL2
- - virtualBinding:
- node: VDU1
-
- CP3:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL3
- - virtualBinding:
- node: VDU1
-
- VL1:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net_mgmt
- vendor: Tacker
-
- VL2:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net0
- vendor: Tacker
-
- VL3:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net1
- vendor: Tacker
diff --git a/samples/tests/etc/samples/sample_tosca_assign_floatingip_to_vdu.yaml b/samples/tests/etc/samples/sample_tosca_assign_floatingip_to_vdu.yaml
deleted file mode 100644
index 4c90e3356..000000000
--- a/samples/tests/etc/samples/sample_tosca_assign_floatingip_to_vdu.yaml
+++ /dev/null
@@ -1,44 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-
-description: Example Floating IP - Allocate one IP from floating network and attach to CP.
-
-metadata:
- template_name: sample-tosca-vnfd-test-fip-with-floating-network
-
-topology_template:
- node_templates:
- VDU1:
- type: tosca.nodes.nfv.VDU.Tacker
- capabilities:
- nfv_compute:
- properties:
- disk_size: 1 GB
- mem_size: 512 MB
- num_cpus: 1
- properties:
- image: cirros-0.5.2-x86_64-disk
- mgmt_driver: noop
-
- CP1:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- requirements:
- - virtualLink:
- node: VL1
- - virtualBinding:
- node: VDU1
-
- VL1:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net_mgmt
- vendor: Tacker
-
- FIP1:
- type: tosca.nodes.network.FloatingIP
- properties:
- floating_network: public
- requirements:
- - link:
- node: CP1
\ No newline at end of file
diff --git a/samples/tests/etc/samples/sample_tosca_vnfc.yaml b/samples/tests/etc/samples/sample_tosca_vnfc.yaml
deleted file mode 100644
index daa9695dc..000000000
--- a/samples/tests/etc/samples/sample_tosca_vnfc.yaml
+++ /dev/null
@@ -1,42 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-metadata:
- template_name: sample-tosca-vnfd-for-vnfc
-
-topology_template:
- node_templates:
- firewall_vnfc:
- type: tosca.nodes.nfv.VNFC.Tacker
- requirements:
- - host: VDU1
- interfaces:
- Standard:
- create: install_vnfc.sh
-
- VDU1:
- type: tosca.nodes.nfv.VDU.Tacker
- properties:
- flavor: m1.small
- mgmt_driver: noop
- config: |
- param0: key1
- param1: key2
- artifacts:
- fedora:
- type: tosca.artifacts.Deployment.Image.VM
- file: https://github.com/bharaththiruveedula/dotfiles/raw/master/fedora-sw.qcow2
- CP1:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL1
- - virtualBinding:
- node: VDU1
-
- VL1:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: private
- vendor: Tacker
diff --git a/samples/tests/etc/samples/test-ns-nsd.yaml b/samples/tests/etc/samples/test-ns-nsd.yaml
deleted file mode 100644
index 425b36d0d..000000000
--- a/samples/tests/etc/samples/test-ns-nsd.yaml
+++ /dev/null
@@ -1,37 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-imports:
- - test-ns-vnfd1
- - test-ns-vnfd2
-
-topology_template:
- inputs:
- vl1_name:
- type: string
- description: name of VL1 virtuallink
- default: net_mgmt
- vl2_name:
- type: string
- description: name of VL2 virtuallink
- default: net0
- node_templates:
- VNF1:
- type: tosca.nodes.nfv.VNF1
- requirements:
- - virtualLink1: VL1
- - virtualLink2: VL2
-
- VNF2:
- type: tosca.nodes.nfv.VNF2
-
- VL1:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: {get_input: vl1_name}
- vendor: tacker
-
- VL2:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: {get_input: vl2_name}
- vendor: tacker
-
diff --git a/samples/tests/etc/samples/test-ns-vnfd1.yaml b/samples/tests/etc/samples/test-ns-vnfd1.yaml
deleted file mode 100644
index 777ef576d..000000000
--- a/samples/tests/etc/samples/test-ns-vnfd1.yaml
+++ /dev/null
@@ -1,98 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-
-description: Demo example
-node_types:
- tosca.nodes.nfv.VNF1:
- requirements:
- - virtualLink1:
- type: tosca.nodes.nfv.VL
- required: true
- - virtualLink2:
- type: tosca.nodes.nfv.VL
- required: true
- capabilities:
- forwader1:
- type: tosca.capabilities.nfv.Forwarder
- forwader2:
- type: tosca.capabilities.nfv.Forwarder
-
-topology_template:
- substitution_mappings:
- node_type: tosca.nodes.nfv.VNF1
- requirements:
- virtualLink1: [CP11, virtualLink]
- virtualLink2: [CP14, virtualLink]
- capabilities:
- forwarder1: [CP11, forwarder]
- forwarder2: [CP14, forwarder]
-
- node_templates:
- VDU1:
- type: tosca.nodes.nfv.VDU.Tacker
- properties:
- image: cirros-0.5.2-x86_64-disk
- flavor: m1.tiny
- availability_zone: nova
- mgmt_driver: noop
- config: |
- param0: key1
- param1: key2
-
- CP11:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- anti_spoofing_protection: false
- requirements:
- - virtualBinding:
- node: VDU1
-
- CP12:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL2
- - virtualBinding:
- node: VDU1
-
- VDU2:
- type: tosca.nodes.nfv.VDU.Tacker
- properties:
- image: cirros-0.5.2-x86_64-disk
- flavor: m1.medium
- availability_zone: nova
- mgmt_driver: noop
- config: |
- param0: key1
- param1: key2
-
- CP13:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- requirements:
- - virtualLink:
- node: VL1
- - virtualBinding:
- node: VDU2
-
- CP14:
- type: tosca.nodes.nfv.CP.Tacker
- requirements:
- - virtualBinding:
- node: VDU2
-
- VL1:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net_mgmt
- vendor: Tacker
-
- VL2:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net0
- vendor: Tacker
-
diff --git a/samples/tests/etc/samples/test-ns-vnfd2.yaml b/samples/tests/etc/samples/test-ns-vnfd2.yaml
deleted file mode 100644
index f75115cbb..000000000
--- a/samples/tests/etc/samples/test-ns-vnfd2.yaml
+++ /dev/null
@@ -1,68 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-
-description: Demo example
-
-node_types:
- tosca.nodes.nfv.VNF2:
- capabilities:
- forwarder1:
- type: tosca.capabilities.nfv.Forwarder
-topology_template:
- substitution_mappings:
- node_type: tosca.nodes.nfv.VNF2
- capabilities:
- forwarder1: [CP21, forwarder]
- node_templates:
- VDU1:
- type: tosca.nodes.nfv.VDU.Tacker
- properties:
- image: cirros-0.5.2-x86_64-disk
- flavor: m1.tiny
- availability_zone: nova
- mgmt_driver: noop
- config: |
- param0: key1
- param1: key2
-
- CP21:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL1
- - virtualBinding:
- node: VDU1
-
- VDU2:
- type: tosca.nodes.nfv.VDU.Tacker
- properties:
- image: cirros-0.5.2-x86_64-disk
- flavor: m1.medium
- availability_zone: nova
- mgmt_driver: noop
- config: |
- param0: key1
- param1: key2
-
- CP22:
- type: tosca.nodes.nfv.CP.Tacker
- requirements:
- - virtualLink:
- node: VL2
- - virtualBinding:
- node: VDU2
-
- VL1:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net_mgmt
- vendor: Tacker
-
- VL2:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net0
- vendor: Tacker
-
diff --git a/samples/tests/etc/samples/test-nsd-vnfd1.yaml b/samples/tests/etc/samples/test-nsd-vnfd1.yaml
deleted file mode 100644
index 777ef576d..000000000
--- a/samples/tests/etc/samples/test-nsd-vnfd1.yaml
+++ /dev/null
@@ -1,98 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-
-description: Demo example
-node_types:
- tosca.nodes.nfv.VNF1:
- requirements:
- - virtualLink1:
- type: tosca.nodes.nfv.VL
- required: true
- - virtualLink2:
- type: tosca.nodes.nfv.VL
- required: true
- capabilities:
- forwader1:
- type: tosca.capabilities.nfv.Forwarder
- forwader2:
- type: tosca.capabilities.nfv.Forwarder
-
-topology_template:
- substitution_mappings:
- node_type: tosca.nodes.nfv.VNF1
- requirements:
- virtualLink1: [CP11, virtualLink]
- virtualLink2: [CP14, virtualLink]
- capabilities:
- forwarder1: [CP11, forwarder]
- forwarder2: [CP14, forwarder]
-
- node_templates:
- VDU1:
- type: tosca.nodes.nfv.VDU.Tacker
- properties:
- image: cirros-0.5.2-x86_64-disk
- flavor: m1.tiny
- availability_zone: nova
- mgmt_driver: noop
- config: |
- param0: key1
- param1: key2
-
- CP11:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- anti_spoofing_protection: false
- requirements:
- - virtualBinding:
- node: VDU1
-
- CP12:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL2
- - virtualBinding:
- node: VDU1
-
- VDU2:
- type: tosca.nodes.nfv.VDU.Tacker
- properties:
- image: cirros-0.5.2-x86_64-disk
- flavor: m1.medium
- availability_zone: nova
- mgmt_driver: noop
- config: |
- param0: key1
- param1: key2
-
- CP13:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- requirements:
- - virtualLink:
- node: VL1
- - virtualBinding:
- node: VDU2
-
- CP14:
- type: tosca.nodes.nfv.CP.Tacker
- requirements:
- - virtualBinding:
- node: VDU2
-
- VL1:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net_mgmt
- vendor: Tacker
-
- VL2:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net0
- vendor: Tacker
-
diff --git a/samples/tests/etc/samples/test-nsd-vnfd2.yaml b/samples/tests/etc/samples/test-nsd-vnfd2.yaml
deleted file mode 100644
index f75115cbb..000000000
--- a/samples/tests/etc/samples/test-nsd-vnfd2.yaml
+++ /dev/null
@@ -1,68 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-
-description: Demo example
-
-node_types:
- tosca.nodes.nfv.VNF2:
- capabilities:
- forwarder1:
- type: tosca.capabilities.nfv.Forwarder
-topology_template:
- substitution_mappings:
- node_type: tosca.nodes.nfv.VNF2
- capabilities:
- forwarder1: [CP21, forwarder]
- node_templates:
- VDU1:
- type: tosca.nodes.nfv.VDU.Tacker
- properties:
- image: cirros-0.5.2-x86_64-disk
- flavor: m1.tiny
- availability_zone: nova
- mgmt_driver: noop
- config: |
- param0: key1
- param1: key2
-
- CP21:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL1
- - virtualBinding:
- node: VDU1
-
- VDU2:
- type: tosca.nodes.nfv.VDU.Tacker
- properties:
- image: cirros-0.5.2-x86_64-disk
- flavor: m1.medium
- availability_zone: nova
- mgmt_driver: noop
- config: |
- param0: key1
- param1: key2
-
- CP22:
- type: tosca.nodes.nfv.CP.Tacker
- requirements:
- - virtualLink:
- node: VL2
- - virtualBinding:
- node: VDU2
-
- VL1:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net_mgmt
- vendor: Tacker
-
- VL2:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net0
- vendor: Tacker
-
diff --git a/samples/tests/etc/samples/test-nsd.yaml b/samples/tests/etc/samples/test-nsd.yaml
deleted file mode 100644
index 82a93b9eb..000000000
--- a/samples/tests/etc/samples/test-nsd.yaml
+++ /dev/null
@@ -1,37 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-imports:
- - test-nsd-vnfd1
- - test-nsd-vnfd2
-
-topology_template:
- inputs:
- vl1_name:
- type: string
- description: name of VL1 virtuallink
- default: net_mgmt
- vl2_name:
- type: string
- description: name of VL2 virtuallink
- default: net0
- node_templates:
- VNF1:
- type: tosca.nodes.nfv.VNF1
- requirements:
- - virtualLink1: VL1
- - virtualLink2: VL2
-
- VNF2:
- type: tosca.nodes.nfv.VNF2
-
- VL1:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: {get_input: vl1_name}
- vendor: tacker
-
- VL2:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: {get_input: vl2_name}
- vendor: tacker
-
diff --git a/samples/tests/etc/samples/tosca-ns-vnffg-vnfd1-sample.yaml b/samples/tests/etc/samples/tosca-ns-vnffg-vnfd1-sample.yaml
deleted file mode 100644
index 80d3b748a..000000000
--- a/samples/tests/etc/samples/tosca-ns-vnffg-vnfd1-sample.yaml
+++ /dev/null
@@ -1,67 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-
-description: VirtualLinks of CP11 and CP22 will be provided by NS descriptor
-node_types:
- tosca.nodes.nfv.VNF1:
- requirements:
- - virtualLink1:
- type: tosca.nodes.nfv.VL
- required: true
- capabilities:
- forwader1:
- type: tosca.capabilities.nfv.Forwarder
-
-topology_template:
- substitution_mappings:
- node_type: tosca.nodes.nfv.VNF1
- requirements:
- virtualLink1: [CP11, virtualLink]
- capabilities:
- forwarder1: [CP11, forwarder]
-
- node_templates:
- VDU1:
- type: tosca.nodes.nfv.VDU.Tacker
- properties:
- image: cirros-0.5.2-x86_64-disk
- flavor: m1.tiny
- availability_zone: nova
- mgmt_driver: noop
- config: |
- param0: key1
- param1: key2
- user_data_format: RAW
- user_data: |
- #!/bin/sh
- echo 1 > /proc/sys/net/ipv4/ip_forward
-
- CP11:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- anti_spoofing_protection: false
- requirements:
- - virtualBinding:
- node: VDU1
-
- CP12:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL2
- - virtualBinding:
- node: VDU1
-
- VL1:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net_mgmt
- vendor: Tacker
-
- VL2:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net0
- vendor: Tacker
diff --git a/samples/tests/etc/samples/tosca-ns-vnffg-vnfd2-sample.yaml b/samples/tests/etc/samples/tosca-ns-vnffg-vnfd2-sample.yaml
deleted file mode 100644
index a0dd69717..000000000
--- a/samples/tests/etc/samples/tosca-ns-vnffg-vnfd2-sample.yaml
+++ /dev/null
@@ -1,61 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-
-description: Demo example
-
-node_types:
- tosca.nodes.nfv.VNF2:
- capabilities:
- forwarder1:
- type: tosca.capabilities.nfv.Forwarder
-topology_template:
- substitution_mappings:
- node_type: tosca.nodes.nfv.VNF2
- capabilities:
- forwarder1: [CP21, forwarder]
- node_templates:
- VDU1:
- type: tosca.nodes.nfv.VDU.Tacker
- properties:
- image: cirros-0.5.2-x86_64-disk
- flavor: m1.tiny
- availability_zone: nova
- mgmt_driver: noop
- config: |
- param0: key1
- param1: key2
- user_data_format: RAW
- user_data: |
- #!/bin/sh
- echo 1 > /proc/sys/net/ipv4/ip_forward
-
- CP21:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL1
- - virtualBinding:
- node: VDU1
-
- CP22:
- type: tosca.nodes.nfv.CP.Tacker
- requirements:
- - virtualLink:
- node: VL2
- - virtualBinding:
- node: VDU1
-
- VL1:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net_mgmt
- vendor: Tacker
-
- VL2:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net0
- vendor: Tacker
-
diff --git a/samples/tests/etc/samples/tosca-ns-vnffg.yaml b/samples/tests/etc/samples/tosca-ns-vnffg.yaml
deleted file mode 100644
index 45facb483..000000000
--- a/samples/tests/etc/samples/tosca-ns-vnffg.yaml
+++ /dev/null
@@ -1,109 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-
-description: Import VNFDs(already on-boarded) with input parameters
-imports:
- - sample-vnfd1
- - sample-vnfd2
-
-topology_template:
- inputs:
- vl1_name:
- type: string
- description: name of VL1 virtuallink
- default: net_mgmt
- vl2_name:
- type: string
- description: name of VL2 virtuallink
- default: net0
- net_src_port_id:
- type: string
- description: neutron port id of source port
- ip_dest_prefix:
- type: string
- description: IP prefix of destination port
-
- node_templates:
- VNF1:
- type: tosca.nodes.nfv.VNF1
- requirements:
- - virtualLink1: VL1
-
- VNF2:
- type: tosca.nodes.nfv.VNF2
-
- VL1:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: {get_input: vl1_name}
- vendor: tacker
-
- VL2:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: {get_input: vl2_name}
- vendor: tacker
-
- Forwarding_path1:
- type: tosca.nodes.nfv.FP.TackerV2
- description: creates path inside ns (src_port->CP12->CP22->dst_port)
- properties:
- id: 51
- policy:
- type: ACL
- criteria:
- - name: block_tcp
- classifier:
- network_src_port_id: {get_input: net_src_port_id}
- destination_port_range: 80-1024
- ip_proto: 6
- ip_dst_prefix: {get_input: ip_dest_prefix}
- path:
- - forwarder: sample-vnfd1
- capability: CP12
- - forwarder: sample-vnfd2
- capability: CP22
-
- Forwarding_path2:
- type: tosca.nodes.nfv.FP.TackerV2
- description: creates path inside ns (src_port->CP12->dst_port)
- properties:
- id: 52
- policy:
- type: ACL
- criteria:
- - name: block_tcp
- classifier:
- network_src_port_id: {get_input: net_src_port_id}
- destination_port_range: 8080-8080
- ip_proto: 6
- ip_dst_prefix: {get_input: ip_dest_prefix}
- path:
- - forwarder: sample-vnfd1
- capability: CP12
-
- groups:
-
- VNFFG1:
- type: tosca.groups.nfv.VNFFG
- description: HTTP to Corporate Net
- properties:
- vendor: tacker
- version: 1.0
- number_of_endpoints: 2
- dependent_virtual_link: [VL1, VL2]
- connection_point: [CP12, CP22]
- constituent_vnfs: [sample-vnfd1, sample-vnfd2]
- members: [Forwarding_path1]
-
- VNFFG2:
- type: tosca.groups.nfv.VNFFG
- description: HTTP to Corporate Net
- properties:
- vendor: tacker
- version: 1.0
- number_of_endpoints: 1
- dependent_virtual_link: [VL1]
- connection_point: [CP12]
- constituent_vnfs: [sample-vnfd1]
- members: [Forwarding_path2]
-
diff --git a/samples/tosca-templates/nsd/ns_param.yaml b/samples/tosca-templates/nsd/ns_param.yaml
deleted file mode 100644
index 97c8cf418..000000000
--- a/samples/tosca-templates/nsd/ns_param.yaml
+++ /dev/null
@@ -1,3 +0,0 @@
-nsd:
- vl1_name: net_mgmt
- vl2_name: net0
diff --git a/samples/tosca-templates/nsd/sample-tosca-nsd.yaml b/samples/tosca-templates/nsd/sample-tosca-nsd.yaml
deleted file mode 100644
index 73c16eb81..000000000
--- a/samples/tosca-templates/nsd/sample-tosca-nsd.yaml
+++ /dev/null
@@ -1,39 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-
-description: Import VNFDs(already on-boarded) with input parameters
-imports:
- - sample-tosca-vnfd1
- - sample-tosca-vnfd2
-
-topology_template:
- inputs:
- vl1_name:
- type: string
- description: name of VL1 virtuallink
- default: net_mgmt
- vl2_name:
- type: string
- description: name of VL2 virtuallink
- default: net0
- node_templates:
- VNF1:
- type: tosca.nodes.nfv.VNF1
- requirements:
- - virtualLink1: VL1
- - virtualLink2: VL2
-
- VNF2:
- type: tosca.nodes.nfv.VNF2
-
- VL1:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: {get_input: vl1_name}
- vendor: tacker
-
- VL2:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: {get_input: vl2_name}
- vendor: tacker
-
diff --git a/samples/tosca-templates/nsd/sample-tosca-vnfd1.yaml b/samples/tosca-templates/nsd/sample-tosca-vnfd1.yaml
deleted file mode 100644
index e6b036a8b..000000000
--- a/samples/tosca-templates/nsd/sample-tosca-vnfd1.yaml
+++ /dev/null
@@ -1,98 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-
-description: VirtualLinks of CP11 and CP22 will be provided by NS descriptor
-node_types:
- tosca.nodes.nfv.VNF1:
- requirements:
- - virtualLink1:
- type: tosca.nodes.nfv.VL
- required: true
- - virtualLink2:
- type: tosca.nodes.nfv.VL
- required: true
- capabilities:
- forwader1:
- type: tosca.capabilities.nfv.Forwarder
- forwader2:
- type: tosca.capabilities.nfv.Forwarder
-
-topology_template:
- substitution_mappings:
- node_type: tosca.nodes.nfv.VNF1
- requirements:
- virtualLink1: [CP11, virtualLink]
- virtualLink2: [CP14, virtualLink]
- capabilities:
- forwarder1: [CP11, forwarder]
- forwarder2: [CP14, forwarder]
-
- node_templates:
- VDU1:
- type: tosca.nodes.nfv.VDU.Tacker
- properties:
- image: cirros-0.5.2-x86_64-disk
- flavor: m1.tiny
- availability_zone: nova
- mgmt_driver: noop
- config: |
- param0: key1
- param1: key2
-
- CP11:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- anti_spoofing_protection: false
- requirements:
- - virtualBinding:
- node: VDU1
-
- CP12:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL2
- - virtualBinding:
- node: VDU1
-
- VDU2:
- type: tosca.nodes.nfv.VDU.Tacker
- properties:
- image: cirros-0.5.2-x86_64-disk
- flavor: m1.medium
- availability_zone: nova
- mgmt_driver: noop
- config: |
- param0: key1
- param1: key2
-
- CP13:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- requirements:
- - virtualLink:
- node: VL1
- - virtualBinding:
- node: VDU2
-
- CP14:
- type: tosca.nodes.nfv.CP.Tacker
- requirements:
- - virtualBinding:
- node: VDU2
-
- VL1:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net_mgmt
- vendor: Tacker
-
- VL2:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net0
- vendor: Tacker
-
diff --git a/samples/tosca-templates/nsd/sample-tosca-vnfd2.yaml b/samples/tosca-templates/nsd/sample-tosca-vnfd2.yaml
deleted file mode 100644
index f75115cbb..000000000
--- a/samples/tosca-templates/nsd/sample-tosca-vnfd2.yaml
+++ /dev/null
@@ -1,68 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-
-description: Demo example
-
-node_types:
- tosca.nodes.nfv.VNF2:
- capabilities:
- forwarder1:
- type: tosca.capabilities.nfv.Forwarder
-topology_template:
- substitution_mappings:
- node_type: tosca.nodes.nfv.VNF2
- capabilities:
- forwarder1: [CP21, forwarder]
- node_templates:
- VDU1:
- type: tosca.nodes.nfv.VDU.Tacker
- properties:
- image: cirros-0.5.2-x86_64-disk
- flavor: m1.tiny
- availability_zone: nova
- mgmt_driver: noop
- config: |
- param0: key1
- param1: key2
-
- CP21:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL1
- - virtualBinding:
- node: VDU1
-
- VDU2:
- type: tosca.nodes.nfv.VDU.Tacker
- properties:
- image: cirros-0.5.2-x86_64-disk
- flavor: m1.medium
- availability_zone: nova
- mgmt_driver: noop
- config: |
- param0: key1
- param1: key2
-
- CP22:
- type: tosca.nodes.nfv.CP.Tacker
- requirements:
- - virtualLink:
- node: VL2
- - virtualBinding:
- node: VDU2
-
- VL1:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net_mgmt
- vendor: Tacker
-
- VL2:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net0
- vendor: Tacker
-
diff --git a/samples/tosca-templates/nsd/sample2-tosca-nsd.yaml b/samples/tosca-templates/nsd/sample2-tosca-nsd.yaml
deleted file mode 100644
index 3d1fbc28e..000000000
--- a/samples/tosca-templates/nsd/sample2-tosca-nsd.yaml
+++ /dev/null
@@ -1,15 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-
-description: Import VNDs(already on-boarded) without param
-imports:
- - sample2-tosca-vnfd1
- - sample2-tosca-vnfd2
-
-topology_template:
- node_templates:
- VNF1:
- type: tosca.nodes.nfv.VNF1
-
- VNF2:
- type: tosca.nodes.nfv.VNF2
-
diff --git a/samples/tosca-templates/nsd/sample2-tosca-vnfd1.yaml b/samples/tosca-templates/nsd/sample2-tosca-vnfd1.yaml
deleted file mode 100644
index 7c453d706..000000000
--- a/samples/tosca-templates/nsd/sample2-tosca-vnfd1.yaml
+++ /dev/null
@@ -1,98 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-
-description: Demo Sample VNFD1
-node_types:
- tosca.nodes.nfv.VNF1:
- requirements:
- - virtualLink1:
- type: tosca.nodes.nfv.VL
- required: true
- - virtualLink2:
- type: tosca.nodes.nfv.VL
- required: true
-
-topology_template:
- substitution_mappings:
- node_type: tosca.nodes.nfv.VNF1
-
- node_templates:
- VDU1:
- type: tosca.nodes.nfv.VDU.Tacker
- properties:
- image: cirros-0.5.2-x86_64-disk
- flavor: m1.tiny
- availability_zone: nova
- mgmt_driver: noop
- config: |
- param0: key1
- param1: key2
-
- CP11:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- order: 0
- management: true
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL1
- - virtualBinding:
- node: VDU1
-
- CP12:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- order: 1
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL2
- - virtualBinding:
- node: VDU1
-
- VDU2:
- type: tosca.nodes.nfv.VDU.Tacker
- properties:
- image: cirros-0.5.2-x86_64-disk
- flavor: m1.tiny
- availability_zone: nova
- mgmt_driver: noop
- config: |
- param0: key1
- param1: key2
-
- CP13:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- order: 0
- management: true
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL1
- - virtualBinding:
- node: VDU2
-
- CP14:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- order: 1
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL2
- - virtualBinding:
- node: VDU2
-
- VL1:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net_mgmt
- vendor: Tacker
-
- VL2:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net0
- vendor: Tacker
-
diff --git a/samples/tosca-templates/nsd/sample2-tosca-vnfd2.yaml b/samples/tosca-templates/nsd/sample2-tosca-vnfd2.yaml
deleted file mode 100644
index 594b85c54..000000000
--- a/samples/tosca-templates/nsd/sample2-tosca-vnfd2.yaml
+++ /dev/null
@@ -1,98 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-
-description: Demo example VNFD2
-node_types:
- tosca.nodes.nfv.VNF2:
- requirements:
- - virtualLink1:
- type: tosca.nodes.nfv.VL
- required: true
- - virtualLink2:
- type: tosca.nodes.nfv.VL
- required: true
-
-topology_template:
- substitution_mappings:
- node_type: tosca.nodes.nfv.VNF2
-
- node_templates:
- VDU1:
- type: tosca.nodes.nfv.VDU.Tacker
- properties:
- image: cirros-0.5.2-x86_64-disk
- flavor: m1.tiny
- availability_zone: nova
- mgmt_driver: noop
- config: |
- param0: key1
- param1: key2
-
- CP11:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- order: 0
- management: true
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL1
- - virtualBinding:
- node: VDU1
-
- CP12:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- order: 1
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL2
- - virtualBinding:
- node: VDU1
-
- VDU2:
- type: tosca.nodes.nfv.VDU.Tacker
- properties:
- image: cirros-0.5.2-x86_64-disk
- flavor: m1.tiny
- availability_zone: nova
- mgmt_driver: noop
- config: |
- param0: key1
- param1: key2
-
- CP13:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- order: 0
- management: true
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL1
- - virtualBinding:
- node: VDU2
-
- CP14:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- order: 1
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL2
- - virtualBinding:
- node: VDU2
-
- VL1:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net_mgmt
- vendor: Tacker
-
- VL2:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net0
- vendor: Tacker
-
diff --git a/samples/tosca-templates/vnfd/test_tosca_vnfc.yaml b/samples/tosca-templates/vnfd/test_tosca_vnfc.yaml
deleted file mode 100644
index 87828fe22..000000000
--- a/samples/tosca-templates/vnfd/test_tosca_vnfc.yaml
+++ /dev/null
@@ -1,40 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-metadata:
- template_name: sample-tosca-vnfd-for-vnfc
-
-topology_template:
- node_templates:
- firewall_vnfc:
- type: tosca.nodes.nfv.VNFC.Tacker
- requirements:
- - host: VDU1
- interfaces:
- Standard:
- create: install_vnfc.sh
-
- VDU1:
- type: tosca.nodes.nfv.VDU.Tacker
- properties:
- image: fedora-software-config
- flavor: m1.small
- mgmt_driver: noop
- key_name: stack_key
- config: |
- param0: key1
- param1: key2
-
- CP1:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL1
- - virtualBinding:
- node: VDU1
- VL1:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net1
- vendor: Tacker
diff --git a/samples/tosca-templates/vnfd/test_tosca_vnfc_multiple_servers.yaml b/samples/tosca-templates/vnfd/test_tosca_vnfc_multiple_servers.yaml
deleted file mode 100644
index 161177e97..000000000
--- a/samples/tosca-templates/vnfd/test_tosca_vnfc_multiple_servers.yaml
+++ /dev/null
@@ -1,63 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-metadata:
- template_name: sample-tosca-vnfd-for-vnfc
-
-topology_template:
- node_templates:
- firewall_vnfc:
- type: tosca.nodes.nfv.VNFC.Tacker
- requirements:
- - host: VDU1
- - host: VDU2
- interfaces:
- Standard:
- create: /home/bharatht/install_vnfc.sh
-
- VDU1:
- type: tosca.nodes.nfv.VDU.Tacker
- properties:
- image: fedora-software-config
- flavor: m1.small
- mgmt_driver: noop
- key_name: stack_key
- config: |
- param0: key1
- param1: key2
-
- CP1:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL1
- - virtualBinding:
- node: VDU1
- VDU2:
- type: tosca.nodes.nfv.VDU.Tacker
- properties:
- image: fedora-software-config
- flavor: m1.small
- mgmt_driver: noop
- key_name: stack_key
- config: |
- param0: key1
- param1: key2
-
- CP2:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL1
- - virtualBinding:
- node: VDU2
-
- VL1:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: private
- vendor: Tacker
diff --git a/samples/tosca-templates/vnfd/tosca-config-openwrt-dnsmasq.yaml b/samples/tosca-templates/vnfd/tosca-config-openwrt-dnsmasq.yaml
deleted file mode 100644
index 7d883eb2c..000000000
--- a/samples/tosca-templates/vnfd/tosca-config-openwrt-dnsmasq.yaml
+++ /dev/null
@@ -1,39 +0,0 @@
-vdus:
- VDU1:
- config:
- dhcp: |
- package dhcp
-
- config dnsmasq
- option domainneeded '1'
- option boguspriv '1'
- option filterwin2k '0'
- option localise_queries '1'
- option rebind_protection '1'
- option rebind_localhost '1'
- option local '/lan/'
- option domain 'lan'
- option expandhosts '1'
- option nonegcache '0'
- option authoritative '1'
- option readethers '1'
- option leasefile '/tmp/dhcp.leases'
- option resolvfile '/tmp/resolv.conf.auto'
- option localservice '1'
-
- config dhcp 'lan'
- option interface 'lan'
- option start '100'
- option limit '150'
- option leasetime '12h'
- option dhcpv6 'server'
- option ra 'server'
- option ra_management '1'
-
- config 'domain'
- option 'name' 'www.facebook.com'
- option 'ip' '1.2.3.4'
-
- config 'domain'
- option 'name' 'www.google.com'
- option 'ip' '192.168.1.140'
diff --git a/samples/tosca-templates/vnfd/tosca-config-openwrt-firewall.yaml b/samples/tosca-templates/vnfd/tosca-config-openwrt-firewall.yaml
deleted file mode 100644
index 42f071be8..000000000
--- a/samples/tosca-templates/vnfd/tosca-config-openwrt-firewall.yaml
+++ /dev/null
@@ -1,97 +0,0 @@
-vdus:
- VDU1:
- config:
- firewall: |
- package firewall
-
- config defaults
- option syn_flood '1'
- option input 'ACCEPT'
- option output 'ACCEPT'
- option forward 'REJECT'
-
- config zone
- option name 'lan'
- list network 'lan'
- option input 'ACCEPT'
- option output 'ACCEPT'
- option forward 'ACCEPT'
-
- config zone
- option name 'wan'
- list network 'wan'
- list network 'wan6'
- option input 'REJECT'
- option output 'ACCEPT'
- option forward 'REJECT'
- option masq '1'
- option mtu_fix '1'
-
- config forwarding
- option src 'lan'
- option dest 'wan'
-
- config rule
- option name 'Allow-DHCP-Renew'
- option src 'wan'
- option proto 'udp'
- option dest_port '68'
- option target 'ACCEPT'
- option family 'ipv4'
-
- config rule
- option name 'Allow-Ping'
- option src 'wan'
- option proto 'icmp'
- option icmp_type 'echo-request'
- option family 'ipv4'
- option target 'ACCEPT'
-
- config rule
- option name 'Allow-IGMP'
- option src 'wan'
- option proto 'igmp'
- option family 'ipv4'
- option target 'ACCEPT'
-
- config rule
- option name 'Allow-DHCPv6'
- option src 'wan'
- option proto 'udp'
- option src_ip 'fe80::/10'
- option src_port '547'
- option dest_ip 'fe80::/10'
- option dest_port '546'
- option family 'ipv6'
- option target 'ACCEPT'
-
- config rule
- option name 'Allow-MLD'
- option src 'wan'
- option proto 'icmp'
- option src_ip 'fe80::/10'
- list icmp_type '130/0'
- list icmp_type '131/0'
- list icmp_type '132/0'
- list icmp_type '143/0'
- option family 'ipv6'
- option target 'ACCEPT'
-
- config rule
- option name 'Allow-ICMPv6-Input'
- option src 'wan'
- option proto 'icmp'
- list icmp_type 'echo-request'
- list icmp_type 'echo-reply'
- list icmp_type 'destination-unreachable'
- list icmp_type 'packet-too-big'
- list icmp_type 'time-exceeded'
- list icmp_type 'bad-header'
- list icmp_type 'unknown-header-type'
- list icmp_type 'router-solicitation'
- list icmp_type 'neighbour-solicitation'
- list icmp_type 'router-advertisement'
- list icmp_type 'neighbour-advertisement'
- option limit '190/sec'
- option family 'ipv6'
- option target 'REJECT'
diff --git a/samples/tosca-templates/vnfd/tosca-config-openwrt-qos.yaml b/samples/tosca-templates/vnfd/tosca-config-openwrt-qos.yaml
deleted file mode 100644
index b8058b623..000000000
--- a/samples/tosca-templates/vnfd/tosca-config-openwrt-qos.yaml
+++ /dev/null
@@ -1,71 +0,0 @@
-vdus:
- VDU1:
- config:
- qos: |
- package qos
-
- config interface 'wan'
- option classgroup 'Default'
- option upload '1000'
- option download '16000'
- option enabled '1'
-
- config classify
- option target 'Priority'
- option ports '22,53'
- option comment 'ssh, dns'
-
- config classify
- option target 'Normal'
- option proto 'tcp'
- option ports '20,21,25,80,110,443,993,995'
- option comment 'ftp, smtp, http(s), imap'
-
- config classify
- option target 'Express'
- option ports '5190'
- option comment 'AOL, iChat, ICQ'
-
- config default
- option target 'Express'
- option proto 'udp'
- option pktsize '-500'
-
- config reclassify
- option target 'Priority'
- option proto 'icmp'
-
- config default
- option target 'Bulk'
- option portrange '1024-65535'
-
- config classgroup 'Default'
- option classes 'Priority Express Normal Bulk'
- option default 'Normal'
-
- config class 'Priority'
- option packetsize '400'
- option avgrate '10'
- option priority '20'
-
- config class 'Priority_down'
- option packetsize '1000'
- option avgrate '10'
-
- config class 'Express'
- option packetsize '1000'
- option avgrate '50'
- option priority '10'
-
- config class 'Normal'
- option packetsize '1500'
- option packetdelay '100'
- option avgrate '10'
- option priority '5'
-
- config class 'Normal_down'
- option avgrate '20'
-
- config class 'Bulk'
- option avgrate '1'
- option packetdelay '200'
diff --git a/samples/tosca-templates/vnfd/tosca-config-openwrt-vrouter.yaml b/samples/tosca-templates/vnfd/tosca-config-openwrt-vrouter.yaml
deleted file mode 100644
index 71ef41828..000000000
--- a/samples/tosca-templates/vnfd/tosca-config-openwrt-vrouter.yaml
+++ /dev/null
@@ -1,46 +0,0 @@
-vdus:
- VDU1:
- config:
- network: |
- package network
-
- config interface 'loopback'
- option ifname 'lo'
- option proto 'static'
- option ipaddr '127.0.0.1'
- option netmask '255.0.0.0'
-
- config interface 'net_mgmt'
- option ifname 'eth0'
- option proto 'dhcp'
-
- config interface 'net0'
- option ifname 'eth1'
- option proto 'dhcp'
-
- config interface 'net1'
- option ifname 'eth2'
- option proto 'dhcp'
-
- firewall: |
- package firewall
-
- config defaults
- option syn_flood '1'
- option input 'ACCEPT'
- option output 'ACCEPT'
- option forward 'ACCEPT'
-
- config zone
- option name 'net_mgmt'
- option network 'net_mgmt'
- option input 'ACCEPT'
- option output 'ACCEPT'
- option forward 'REJECT'
-
- config zone
- option name 'lan'
- list network 'net0 net1'
- option input 'ACCEPT'
- option output 'ACCEPT'
- option forward 'ACCEPT'
diff --git a/samples/tosca-templates/vnfd/tosca-placement-policy-anti-affinity.yaml b/samples/tosca-templates/vnfd/tosca-placement-policy-anti-affinity.yaml
deleted file mode 100644
index 7d8ca8235..000000000
--- a/samples/tosca-templates/vnfd/tosca-placement-policy-anti-affinity.yaml
+++ /dev/null
@@ -1,76 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-description: Demo example
-
-metadata:
- template_name: sample-tosca-vnfd
-
-topology_template:
- node_templates:
- VDU1:
- type: tosca.nodes.nfv.VDU.Tacker
- capabilities:
- nfv_compute:
- properties:
- disk_size: 1 GB
- mem_size: 512 MB
- num_cpus: 2
- properties:
- image: cirros-0.5.2-x86_64-disk
- mgmt_driver: noop
- availability_zone: nova
-
- CP1:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL1
- - virtualBinding:
- node: VDU1
-
- VDU2:
- type: tosca.nodes.nfv.VDU.Tacker
- capabilities:
- nfv_compute:
- properties:
- disk_size: 1 GB
- mem_size: 512 MB
- num_cpus: 2
- properties:
- image: cirros-0.5.2-x86_64-disk
- mgmt_driver: noop
- availability_zone: nova
-
- CP2:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL2
- - virtualBinding:
- node: VDU2
-
- VL1:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net_mgmt
- vendor: Tacker
-
- VL2:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net0
- vendor: Tacker
-
- policies:
- - my_compute_placement_policy:
- type: tosca.policies.tacker.Placement
- properties:
- policy: anti-affinity
- strict: true
- description: Apply anti-affinity placement policy to the application servers
- targets: [ VDU1, VDU2 ]
diff --git a/samples/tosca-templates/vnfd/tosca-vnfd-alarm-multi-actions.yaml b/samples/tosca-templates/vnfd/tosca-vnfd-alarm-multi-actions.yaml
deleted file mode 100644
index 1c015444b..000000000
--- a/samples/tosca-templates/vnfd/tosca-vnfd-alarm-multi-actions.yaml
+++ /dev/null
@@ -1,58 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-description: Demo example
-
-metadata:
- template_name: sample-tosca-vnfd
-
-topology_template:
- node_templates:
- VDU1:
- type: tosca.nodes.nfv.VDU.Tacker
- capabilities:
- nfv_compute:
- properties:
- disk_size: 1 GB
- mem_size: 512 MB
- num_cpus: 2
- properties:
- image: cirros-0.5.2-x86_64-disk
- mgmt_driver: noop
- availability_zone: nova
- metadata: {metering.server_group: VDU1}
-
- CP1:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL1
- - virtualBinding:
- node: VDU1
-
- VL1:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net_mgmt
- vendor: Tacker
-
- policies:
- - vdu1_cpu_usage_monitoring_policy:
- type: tosca.policies.tacker.Alarming
- triggers:
- vdu_hcpu_usage_respawning:
- event_type:
- type: tosca.events.resource.utilization
- implementation: ceilometer
- metric: cpu_util
- condition:
- threshold: 50
- constraint: utilization greater_than 50%
- granularity: 600
- evaluations: 1
- aggregation_method: mean
- comparison_operator: gt
- resource_type: instance
- metadata: VDU1
- action: [respawn, log]
diff --git a/samples/tosca-templates/vnfd/tosca-vnfd-alarm-respawn.yaml b/samples/tosca-templates/vnfd/tosca-vnfd-alarm-respawn.yaml
deleted file mode 100644
index 2f67e86b3..000000000
--- a/samples/tosca-templates/vnfd/tosca-vnfd-alarm-respawn.yaml
+++ /dev/null
@@ -1,58 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-description: Demo example
-
-metadata:
- template_name: sample-tosca-vnfd
-
-topology_template:
- node_templates:
- VDU1:
- type: tosca.nodes.nfv.VDU.Tacker
- capabilities:
- nfv_compute:
- properties:
- disk_size: 1 GB
- mem_size: 512 MB
- num_cpus: 2
- properties:
- image: cirros-0.5.2-x86_64-disk
- mgmt_driver: noop
- availability_zone: nova
- metadata: {metering.server_group: VDU1}
-
- CP1:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL1
- - virtualBinding:
- node: VDU1
-
- VL1:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net_mgmt
- vendor: Tacker
-
- policies:
- - vdu1_cpu_usage_monitoring_policy:
- type: tosca.policies.tacker.Alarming
- triggers:
- vdu_hcpu_usage_respawning:
- event_type:
- type: tosca.events.resource.utilization
- implementation: ceilometer
- metric: cpu_util
- condition:
- threshold: 50
- constraint: utilization greater_than 50%
- granularity: 300
- evaluations: 1
- aggregation_method: mean
- resource_type: instance
- comparison_operator: gt
- metadata: VDU1
- action: [respawn]
diff --git a/samples/tosca-templates/vnfd/tosca-vnfd-alarm-scale.yaml b/samples/tosca-templates/vnfd/tosca-vnfd-alarm-scale.yaml
deleted file mode 100644
index d77387940..000000000
--- a/samples/tosca-templates/vnfd/tosca-vnfd-alarm-scale.yaml
+++ /dev/null
@@ -1,84 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-description: Demo example
-
-metadata:
- template_name: sample-tosca-vnfd
-
-topology_template:
- node_templates:
- VDU1:
- type: tosca.nodes.nfv.VDU.Tacker
- capabilities:
- nfv_compute:
- properties:
- disk_size: 1 GB
- mem_size: 512 MB
- num_cpus: 2
- properties:
- image: cirros-0.5.2-x86_64-disk
- mgmt_driver: noop
- availability_zone: nova
- metadata: {metering.server_group: SG1}
-
- CP1:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL1
- - virtualBinding:
- node: VDU1
-
- VL1:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net_mgmt
- vendor: Tacker
-
- policies:
- - SP1:
- type: tosca.policies.tacker.Scaling
- properties:
- targets: [VDU1]
- increment: 1
- cooldown: 120
- min_instances: 1
- max_instances: 3
- default_instances: 1
-
- - vdu_cpu_usage_monitoring_policy:
- type: tosca.policies.tacker.Alarming
- triggers:
- vdu_hcpu_usage_scaling_out:
- event_type:
- type: tosca.events.resource.utilization
- implementation: ceilometer
- metric: cpu_util
- condition:
- threshold: 80
- constraint: utilization greater_than 80%
- granularity: 60
- evaluations: 1
- aggregation_method: mean
- resource_type: instance
- comparison_operator: gt
- metadata: SG1
- action: [SP1]
-
- vdu_lcpu_usage_scaling_in:
- event_type:
- type: tosca.events.resource.utilization
- implementation: ceilometer
- metric: cpu_util
- condition:
- threshold: 10
- constraint: utilization less_than 10%
- granularity: 60
- evaluations: 1
- aggregation_method: mean
- resource_type: instance
- comparison_operator: lt
- metadata: SG1
- action: [SP1]
diff --git a/samples/tosca-templates/vnfd/tosca-vnfd-block-attach.yaml b/samples/tosca-templates/vnfd/tosca-vnfd-block-attach.yaml
deleted file mode 100644
index 00c3c1db7..000000000
--- a/samples/tosca-templates/vnfd/tosca-vnfd-block-attach.yaml
+++ /dev/null
@@ -1,57 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-
-description: Demo example
-
-metadata:
- template_name: sample-tosca-vnfd
-
-topology_template:
- node_templates:
- VDU1:
- type: tosca.nodes.nfv.VDU.Tacker
- capabilities:
- nfv_compute:
- properties:
- num_cpus: 1
- mem_size: 512 MB
- disk_size: 1 GB
- properties:
- image: cirros-0.5.2-x86_64-disk
- availability_zone: nova
- mgmt_driver: noop
- config: |
- param0: key1
- param1: key2
-
- CP1:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- order: 0
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL1
- - virtualBinding:
- node: VDU1
-
- VB1:
- type: tosca.nodes.BlockStorage.Tacker
- properties:
- size: 1 GB
- image: cirros-0.5.2-x86_64-disk
-
- CB1:
- type: tosca.nodes.BlockStorageAttachment
- properties:
- location: /dev/vdb
- requirements:
- - virtualBinding:
- node: VDU1
- - virtualAttachment:
- node: VB1
- VL1:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net_mgmt
- vendor: Tacker
diff --git a/samples/tosca-templates/vnfd/tosca-vnfd-containerized-two-containers.yaml b/samples/tosca-templates/vnfd/tosca-vnfd-containerized-two-containers.yaml
deleted file mode 100644
index dc3c663b8..000000000
--- a/samples/tosca-templates/vnfd/tosca-vnfd-containerized-two-containers.yaml
+++ /dev/null
@@ -1,39 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-description: A sample containerized VNF with two containers per VDU
-
-metadata:
- template_name: sample-tosca-vnfd
-
-topology_template:
- node_templates:
- VDU1:
- type: tosca.nodes.nfv.VDU.Tacker
- properties:
- namespace: default
- mapping_ports:
- - "80:80"
- - "88:88"
- service_type: NodePort
- vnfcs:
- front_end:
- num_cpus: 0.5
- mem_size: 512 MB
- image: nginx
- ports:
- - "80"
- rss_reader:
- num_cpus: 0.5
- mem_size: 512 MB
- image: nickchase/rss-php-nginx:v1
- ports:
- - "88"
- policies:
- - SP1:
- type: tosca.policies.tacker.Scaling
- targets: [VDU1]
- properties:
- min_instances: 1
- max_instances: 3
- target_cpu_utilization_percentage: 40
- default_instances: 1 # required parameter but ignored for cnf
- increment: 1 # required parameter but ignored for cnf
diff --git a/samples/tosca-templates/vnfd/tosca-vnfd-containerized.yaml b/samples/tosca-templates/vnfd/tosca-vnfd-containerized.yaml
deleted file mode 100644
index a98d45635..000000000
--- a/samples/tosca-templates/vnfd/tosca-vnfd-containerized.yaml
+++ /dev/null
@@ -1,38 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-description: A sample containerized VNF with one container per VDU
-
-metadata:
- template_name: sample-tosca-vnfd
-
-topology_template:
- node_templates:
- VDU1:
- type: tosca.nodes.nfv.VDU.Tacker
- properties:
- namespace: default
- mapping_ports:
- - "80:80"
- labels:
- - "app: webserver"
- service_type: ClusterIP
- vnfcs:
- web_server:
- num_cpus: 0.2
- mem_size: 100 MB
- image: k8s.gcr.io/hpa-example
- ports:
- - "80"
- config: |
- param0: key1
- param1: key2
-
- policies:
- - SP1:
- type: tosca.policies.tacker.Scaling
- targets: [VDU1]
- properties:
- min_instances: 1
- max_instances: 10
- target_cpu_utilization_percentage: 50
- default_instances: 1 # required parameter but ignored for cnf
- increment: 1 # required parameter but ignored for cnf
diff --git a/samples/tosca-templates/vnfd/tosca-vnfd-cpu-dedicate.yaml b/samples/tosca-templates/vnfd/tosca-vnfd-cpu-dedicate.yaml
deleted file mode 100644
index 775dec9d5..000000000
--- a/samples/tosca-templates/vnfd/tosca-vnfd-cpu-dedicate.yaml
+++ /dev/null
@@ -1,38 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-
-description: Dedicated CPU example
-
-metadata:
- template_name: sample-tosca-vnfd-cpu-dedicate
-
-topology_template:
- node_templates:
- VDU1:
- type: tosca.nodes.nfv.VDU.Tacker
- capabilities:
- nfv_compute:
- properties:
- disk_size: 10 GB
- mem_size: 2048 MB
- num_cpus: 2
- cpu_allocation:
- cpu_affinity: dedicated
- properties:
- image: cirros-0.5.2-x86_64-disk
-
- CP1:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- order: 0
- requirements:
- - virtualLink:
- node: VL1
- - virtualBinding:
- node: VDU1
-
- VL1:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net_mgmt
- vendor: Tacker
diff --git a/samples/tosca-templates/vnfd/tosca-vnfd-hello-world.yaml b/samples/tosca-templates/vnfd/tosca-vnfd-hello-world.yaml
deleted file mode 100644
index 548a5c0c2..000000000
--- a/samples/tosca-templates/vnfd/tosca-vnfd-hello-world.yaml
+++ /dev/null
@@ -1,76 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-
-description: Demo example
-
-metadata:
- template_name: sample-tosca-vnfd
-
-topology_template:
- node_templates:
- VDU1:
- type: tosca.nodes.nfv.VDU.Tacker
- capabilities:
- nfv_compute:
- properties:
- num_cpus: 1
- mem_size: 512 MB
- disk_size: 1 GB
- properties:
- image: cirros-0.5.2-x86_64-disk
- availability_zone: nova
- mgmt_driver: noop
- config: |
- param0: key1
- param1: key2
-
- CP1:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- order: 0
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL1
- - virtualBinding:
- node: VDU1
-
- CP2:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- order: 1
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL2
- - virtualBinding:
- node: VDU1
-
- CP3:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- order: 2
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL3
- - virtualBinding:
- node: VDU1
-
- VL1:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net_mgmt
- vendor: Tacker
-
- VL2:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net0
- vendor: Tacker
-
- VL3:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net1
- vendor: Tacker
diff --git a/samples/tosca-templates/vnfd/tosca-vnfd-host-reservation-param-values.yaml b/samples/tosca-templates/vnfd/tosca-vnfd-host-reservation-param-values.yaml
deleted file mode 100644
index 099566686..000000000
--- a/samples/tosca-templates/vnfd/tosca-vnfd-host-reservation-param-values.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-{
- lease_id: '8b01bdf8-a47c-49ea-96f1-3504fccfc9d4',
- resource_type: 'physical_host',
- reservation_id: '707e4f81-aedd-44cd-a445-fd18a47d0228',
-}
diff --git a/samples/tosca-templates/vnfd/tosca-vnfd-host-reservation.yaml b/samples/tosca-templates/vnfd/tosca-vnfd-host-reservation.yaml
deleted file mode 100644
index ae0bb8ae5..000000000
--- a/samples/tosca-templates/vnfd/tosca-vnfd-host-reservation.yaml
+++ /dev/null
@@ -1,103 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-
-description: VNF TOSCA template with reservation id input parameters
-
-metadata:
- template_name: sample-tosca-vnfd-host-reservation
-
-topology_template:
- inputs:
- lease_id:
- type: string
- description: lease id
-
- resource_type:
- type: string
- description: reservation resource type
-
- reservation_id:
- type: string
- description: reservation id
-
- node_templates:
- VDU1:
- type: tosca.nodes.nfv.VDU.Tacker
- properties:
- image: cirros-0.5.2-x86_64-disk
- flavor: m1.tiny
- reservation_metadata:
- resource_type: { get_input: resource_type }
- id: { get_input: reservation_id }
-
- CP1:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- order: 0
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL1
- - virtualBinding:
- node: VDU1
-
- CP2:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- order: 1
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL2
- - virtualBinding:
- node: VDU1
-
- CP3:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- order: 2
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL3
- - virtualBinding:
- node: VDU1
-
- VL1:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net_mgmt
- vendor: Tacker
-
- VL2:
- type: tosca.nodes.nfv.VL
-
- properties:
- network_name: net0
- vendor: Tacker
-
- VL3:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net1
- vendor: Tacker
-
-
- policies:
- - RSV:
- type: tosca.policies.tacker.Reservation
- reservation:
- start_actions: [SP_RSV]
- before_end_actions: [SP_RSV]
- end_actions: [noop]
- properties:
- lease_id: { get_input: lease_id }
- - SP_RSV:
- type: tosca.policies.tacker.Scaling
- properties:
- increment: 2
- cooldown: 120
- min_instances: 0
- max_instances: 2
- default_instances: 0
- targets: [VDU1]
diff --git a/samples/tosca-templates/vnfd/tosca-vnfd-http-monitor.yaml b/samples/tosca-templates/vnfd/tosca-vnfd-http-monitor.yaml
deleted file mode 100644
index 0870fd989..000000000
--- a/samples/tosca-templates/vnfd/tosca-vnfd-http-monitor.yaml
+++ /dev/null
@@ -1,84 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-
-description: Demo example
-
-metadata:
- template_name: sample-tosca-vnfd
-
-topology_template:
- node_templates:
- VDU1:
- type: tosca.nodes.nfv.VDU.Tacker
- capabilities:
- nfv_compute:
- properties:
- num_cpus: 1
- mem_size: 2 GB
- disk_size: 20 GB
- properties:
- image: ubuntu
- availability_zone: nova
- mgmt_driver: noop
- config: |
- param0: key1
- param1: key2
- monitoring_policy:
- name: http_ping
- parameters:
- retry: 5
- timeout: 10
- port: 8000
- actions:
- failure: respawn
-
- CP1:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- order: 0
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL1
- - virtualBinding:
- node: VDU1
-
- CP2:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- order: 1
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL2
- - virtualBinding:
- node: VDU1
-
- CP3:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- order: 2
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL3
- - virtualBinding:
- node: VDU1
-
- VL1:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net_mgmt
- vendor: Tacker
-
- VL2:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net0
- vendor: Tacker
-
- VL3:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net1
- vendor: Tacker
diff --git a/samples/tosca-templates/vnfd/tosca-vnfd-hugepages.yaml b/samples/tosca-templates/vnfd/tosca-vnfd-hugepages.yaml
deleted file mode 100644
index a6765fa7a..000000000
--- a/samples/tosca-templates/vnfd/tosca-vnfd-hugepages.yaml
+++ /dev/null
@@ -1,37 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-
-description: Huge Pages example
-
-metadata:
- template_name: sample-tosca-vnfd-hugepages
-
-topology_template:
- node_templates:
- VDU1:
- type: tosca.nodes.nfv.VDU.Tacker
- capabilities:
- nfv_compute:
- properties:
- disk_size: 10 GB
- mem_size: 2048 MB
- num_cpus: 2
- mem_page_size: large
- properties:
- image: cirros-0.5.2-x86_64-disk
-
- CP1:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- order: 0
- requirements:
- - virtualLink:
- node: VL1
- - virtualBinding:
- node: VDU1
-
- VL1:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net_mgmt
- vendor: Tacker
diff --git a/samples/tosca-templates/vnfd/tosca-vnfd-image.yaml b/samples/tosca-templates/vnfd/tosca-vnfd-image.yaml
deleted file mode 100644
index 432f6cab8..000000000
--- a/samples/tosca-templates/vnfd/tosca-vnfd-image.yaml
+++ /dev/null
@@ -1,79 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-
-description: Demo example with auto image creation
-
-metadata:
- template_name: sample-tosca-vnfd-image
-
-topology_template:
- node_templates:
- VDU1:
- type: tosca.nodes.nfv.VDU.Tacker
- capabilities:
- nfv_compute:
- properties:
- num_cpus: 1
- mem_size: 512 MB
- disk_size: 1 GB
- properties:
- availability_zone: nova
- mgmt_driver: noop
- config: |
- param0: key1
- param1: key2
- artifacts:
- VNFImage:
- type: tosca.artifacts.Deployment.Image.VM
- file: http://download.cirros-cloud.net/0.5.2/cirros-0.5.2-x86_64-disk.img
-
- CP1:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- order: 0
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL1
- - virtualBinding:
- node: VDU1
-
- CP2:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- order: 1
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL2
- - virtualBinding:
- node: VDU1
-
- CP3:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- order: 2
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL3
- - virtualBinding:
- node: VDU1
-
- VL1:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net_mgmt
- vendor: Tacker
-
- VL2:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net0
- vendor: Tacker
-
- VL3:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net1
- vendor: Tacker
diff --git a/samples/tosca-templates/vnfd/tosca-vnfd-instance-reservation-param-values.yaml b/samples/tosca-templates/vnfd/tosca-vnfd-instance-reservation-param-values.yaml
deleted file mode 100644
index 6722c1003..000000000
--- a/samples/tosca-templates/vnfd/tosca-vnfd-instance-reservation-param-values.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-{
- flavor: '707e4f81-aedd-44cd-a445-fd18a47d0228',
- lease_id: '8b01bdf8-a47c-49ea-96f1-3504fccfc9d4',
- resource_type: 'virtual_instance',
- server_group_id: '8b01bdf8-a47c-49ea-96f1-3504fccfc9d4',
-}
diff --git a/samples/tosca-templates/vnfd/tosca-vnfd-instance-reservation.yaml b/samples/tosca-templates/vnfd/tosca-vnfd-instance-reservation.yaml
deleted file mode 100644
index 85cd1e477..000000000
--- a/samples/tosca-templates/vnfd/tosca-vnfd-instance-reservation.yaml
+++ /dev/null
@@ -1,107 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-
-description: VNF TOSCA template with flavor input parameters
-
-metadata:
- template_name: sample-tosca-vnfd-instance-reservation
-
-topology_template:
- inputs:
- flavor:
- type: string
- description: Flavor Information
-
- lease_id:
- type: string
- description: lease id
-
- resource_type:
- type: string
- description: reservation resource type
-
- server_group_id:
- type: string
- description: server group id
-
- node_templates:
- VDU1:
- type: tosca.nodes.nfv.VDU.Tacker
- properties:
- image: cirros-0.5.2-x86_64-disk
- flavor: { get_input: flavor }
- reservation_metadata:
- resource_type: { get_input: resource_type }
- id: { get_input: server_group_id }
-
- CP1:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- order: 0
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL1
- - virtualBinding:
- node: VDU1
-
- CP2:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- order: 1
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL2
- - virtualBinding:
- node: VDU1
-
- CP3:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- order: 2
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL3
- - virtualBinding:
- node: VDU1
-
- VL1:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net_mgmt
- vendor: Tacker
-
- VL2:
- type: tosca.nodes.nfv.VL
-
- properties:
- network_name: net0
- vendor: Tacker
-
- VL3:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net1
- vendor: Tacker
-
-
- policies:
- - RSV:
- type: tosca.policies.tacker.Reservation
- reservation:
- start_actions: [SP_RSV]
- before_end_actions: [SP_RSV]
- end_actions: [noop]
- properties:
- lease_id: { get_input: lease_id }
- - SP_RSV:
- type: tosca.policies.tacker.Scaling
- properties:
- increment: 2
- cooldown: 120
- min_instances: 0
- max_instances: 2
- default_instances: 0
- targets: [VDU1]
diff --git a/samples/tosca-templates/vnfd/tosca-vnfd-keyname.yaml b/samples/tosca-templates/vnfd/tosca-vnfd-keyname.yaml
deleted file mode 100644
index 113b4ef4d..000000000
--- a/samples/tosca-templates/vnfd/tosca-vnfd-keyname.yaml
+++ /dev/null
@@ -1,43 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-
-description: Demo example with key_name
-
-metadata:
- template_name: sample-tosca-vnfd-keyname
-
-topology_template:
- node_templates:
- VDU1:
- type: tosca.nodes.nfv.VDU.Tacker
- capabilities:
- nfv_compute:
- properties:
- num_cpus: 1
- mem_size: 512 MB
- disk_size: 1 GB
- properties:
- image: cirros-0.5.2-x86_64-disk
- availability_zone: nova
- mgmt_driver: noop
- key_name: userKey
- config: |
- param0: key1
- param1: key2
-
- CP1:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- order: 0
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL1
- - virtualBinding:
- node: VDU1
-
- VL1:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net1
- vendor: Tacker
diff --git a/samples/tosca-templates/vnfd/tosca-vnfd-mac-ip.yaml b/samples/tosca-templates/vnfd/tosca-vnfd-mac-ip.yaml
deleted file mode 100644
index 973d09c97..000000000
--- a/samples/tosca-templates/vnfd/tosca-vnfd-mac-ip.yaml
+++ /dev/null
@@ -1,45 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-
-description: Demo example with key_name
-
-metadata:
- template_name: sample-tosca-vnfd-keyname
-
-topology_template:
- node_templates:
- VDU1:
- type: tosca.nodes.nfv.VDU.Tacker
- capabilities:
- nfv_compute:
- properties:
- num_cpus: 1
- mem_size: 512 MB
- disk_size: 1 GB
- properties:
- image: cirros-0.5.2-x86_64-disk
- availability_zone: nova
- mgmt_driver: noop
- key_name: userKey
- config: |
- param0: key1
- param1: key2
-
- CP1:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- mac_address: 6c:40:08:a0:de:0a
- ip_address: 10.10.1.12
- order: 0
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL1
- - virtualBinding:
- node: VDU1
-
- VL1:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net1
- vendor: Tacker
diff --git a/samples/tosca-templates/vnfd/tosca-vnfd-monitor-multi-vdu.yaml b/samples/tosca-templates/vnfd/tosca-vnfd-monitor-multi-vdu.yaml
deleted file mode 100644
index 02c959d63..000000000
--- a/samples/tosca-templates/vnfd/tosca-vnfd-monitor-multi-vdu.yaml
+++ /dev/null
@@ -1,193 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-
-description: Monitoring for multiple vdus
-
-metadata:
- template_name: tosca-vnfd-monitoir-multi-vdu
-
-topology_template:
- node_templates:
- VDU1:
- type: tosca.nodes.nfv.VDU.Tacker
- capabilities:
- nfv_compute:
- properties:
- num_cpus: 1
- mem_size: 512 MB
- disk_size: 1 GB
- properties:
- image: cirros-0.5.2-x86_64-disk
- availability_zone: nova
- mgmt_driver: noop
- config: |
- param0: key1
- param1: key2
- monitoring_policy:
- name: ping
- parameters:
- monitoring_delay: 45
- count: 3
- interval: 1
- timeout: 2
- actions:
- failure: respawn
-
-
- CP11:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- order: 0
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL1
- - virtualBinding:
- node: VDU1
-
- CP12:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- order: 1
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL2
- - virtualBinding:
- node: VDU1
-
- CP13:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- order: 2
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL3
- - virtualBinding:
- node: VDU1
-
- VDU2:
- type: tosca.nodes.nfv.VDU.Tacker
- properties:
- image: cirros-0.5.2-x86_64-disk
- flavor: m1.medium
- availability_zone: nova
- mgmt_driver: noop
- config: |
- param0: key1
- param1: key2
- monitoring_policy:
- name: ping
- parameters:
- monitoring_delay: 45
- count: 3
- interval: 1
- timeout: 2
- actions:
- failure: respawn
-
-
- CP21:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- order: 0
- requirements:
- - virtualLink:
- node: VL1
- - virtualBinding:
- node: VDU2
-
- CP22:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- order: 1
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL2
- - virtualBinding:
- node: VDU2
-
- CP23:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- order: 2
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL3
- - virtualBinding:
- node: VDU2
-
- VDU3:
- type: tosca.nodes.nfv.VDU.Tacker
- properties:
- image: cirros-0.5.2-x86_64-disk
- flavor: m1.tiny
- availability_zone: nova
- mgmt_driver: noop
- config: |
- param0: key1
- param1: key2
- monitoring_policy:
- name: ping
- parameters:
- monitoring_delay: 45
- count: 3
- interval: 1
- timeout: 2
- actions:
- failure: respawn
-
- CP31:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- order: 0
- requirements:
- - virtualLink:
- node: VL1
- - virtualBinding:
- node: VDU3
-
- CP32:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- order: 1
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL2
- - virtualBinding:
- node: VDU3
-
- CP33:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- order: 2
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL3
- - virtualBinding:
- node: VDU3
-
- VL1:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net_mgmt
- vendor: Tacker
-
- VL2:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net0
- vendor: Tacker
-
- VL3:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net1
- vendor: Tacker
diff --git a/samples/tosca-templates/vnfd/tosca-vnfd-monitor.yaml b/samples/tosca-templates/vnfd/tosca-vnfd-monitor.yaml
deleted file mode 100644
index b6be423f5..000000000
--- a/samples/tosca-templates/vnfd/tosca-vnfd-monitor.yaml
+++ /dev/null
@@ -1,85 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-
-description: Demo example
-
-metadata:
- template_name: sample-tosca-vnfd
-
-topology_template:
- node_templates:
- VDU1:
- type: tosca.nodes.nfv.VDU.Tacker
- capabilities:
- nfv_compute:
- properties:
- num_cpus: 1
- mem_size: 512 MB
- disk_size: 1 GB
- properties:
- image: cirros-0.5.2-x86_64-disk
- availability_zone: nova
- mgmt_driver: noop
- config: |
- param0: key1
- param1: key2
- monitoring_policy:
- name: ping
- parameters:
- monitoring_delay: 45
- count: 3
- interval: 1
- timeout: 2
- actions:
- failure: respawn
-
- CP1:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- order: 0
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL1
- - virtualBinding:
- node: VDU1
-
- CP2:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- order: 1
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL2
- - virtualBinding:
- node: VDU1
-
- CP3:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- order: 2
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL3
- - virtualBinding:
- node: VDU1
-
- VL1:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net_mgmt
- vendor: Tacker
-
- VL2:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net0
- vendor: Tacker
-
- VL3:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net1
- vendor: Tacker
diff --git a/samples/tosca-templates/vnfd/tosca-vnfd-monitoring-vdu-autoheal.yaml b/samples/tosca-templates/vnfd/tosca-vnfd-monitoring-vdu-autoheal.yaml
deleted file mode 100644
index 29b86d260..000000000
--- a/samples/tosca-templates/vnfd/tosca-vnfd-monitoring-vdu-autoheal.yaml
+++ /dev/null
@@ -1,55 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-description: Demo example
-
-metadata:
- template_name: sample-tosca-vnfd
-
-topology_template:
- node_templates:
- VDU1:
- type: tosca.nodes.nfv.VDU.Tacker
- capabilities:
- nfv_compute:
- properties:
- disk_size: 1 GB
- mem_size: 512 MB
- num_cpus: 1
- properties:
- image: cirros-0.5.2-x86_64-disk
- mgmt_driver: noop
- availability_zone: nova
- mgmt_driver: noop
- monitoring_policy:
- name: ping
- parameters:
- monitoring_delay: 45
- count: 3
- interval: 1
- timeout: 2
- actions:
- failure: vdu_autoheal
- user_data_format: RAW
- user_data: |
- #!/bin/sh
- echo "my hostname is `hostname`" > /tmp/hostname
- df -h > /home/cirros/diskinfo
- sleep 90
- sudo ifdown eth0
-
- CP1:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL1
- - virtualBinding:
- node: VDU1
-
- VL1:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net_mgmt
- vendor: Tacker
-
diff --git a/samples/tosca-templates/vnfd/tosca-vnfd-multi-vdu.yaml b/samples/tosca-templates/vnfd/tosca-vnfd-multi-vdu.yaml
deleted file mode 100644
index ed2dd5af4..000000000
--- a/samples/tosca-templates/vnfd/tosca-vnfd-multi-vdu.yaml
+++ /dev/null
@@ -1,164 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-
-description: Demo example
-
-metadata:
- template_name: sample-tosca-vnfd
-
-topology_template:
- node_templates:
- VDU1:
- type: tosca.nodes.nfv.VDU.Tacker
- capabilities:
- nfv_compute:
- properties:
- num_cpus: 1
- mem_size: 512 MB
- disk_size: 1 GB
- properties:
- image: cirros-0.5.2-x86_64-disk
- availability_zone: nova
- mgmt_driver: noop
- config: |
- param0: key1
- param1: key2
-
- CP11:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- order: 0
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL1
- - virtualBinding:
- node: VDU1
-
- CP12:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- order: 1
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL2
- - virtualBinding:
- node: VDU1
-
- CP13:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- order: 2
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL3
- - virtualBinding:
- node: VDU1
-
- VDU2:
- type: tosca.nodes.nfv.VDU.Tacker
- properties:
- image: cirros-0.5.2-x86_64-disk
- flavor: m1.medium
- availability_zone: nova
- mgmt_driver: noop
- config: |
- param0: key1
- param1: key2
-
- CP21:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- order: 0
- requirements:
- - virtualLink:
- node: VL1
- - virtualBinding:
- node: VDU2
-
- CP22:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- order: 1
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL2
- - virtualBinding:
- node: VDU2
-
- CP23:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- order: 2
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL3
- - virtualBinding:
- node: VDU2
-
- VDU3:
- type: tosca.nodes.nfv.VDU.Tacker
- properties:
- image: cirros-0.5.2-x86_64-disk
- flavor: m1.tiny
- availability_zone: nova
- mgmt_driver: noop
- config: |
- param0: key1
- param1: key2
-
- CP31:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- order: 0
- requirements:
- - virtualLink:
- node: VL1
- - virtualBinding:
- node: VDU3
-
- CP32:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- order: 1
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL2
- - virtualBinding:
- node: VDU3
-
- CP33:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- order: 2
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL3
- - virtualBinding:
- node: VDU3
-
- VL1:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net_mgmt
- vendor: Tacker
-
- VL2:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net0
- vendor: Tacker
-
- VL3:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net1
- vendor: Tacker
diff --git a/samples/tosca-templates/vnfd/tosca-vnfd-network.yaml b/samples/tosca-templates/vnfd/tosca-vnfd-network.yaml
deleted file mode 100644
index 0bfe77568..000000000
--- a/samples/tosca-templates/vnfd/tosca-vnfd-network.yaml
+++ /dev/null
@@ -1,78 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-
-description: Demo VNFD with custom network details.
-
-metadata:
- template_name: sample-tosca-vnfd
-
-topology_template:
- node_templates:
- VDU1:
- type: tosca.nodes.nfv.VDU.Tacker
- properties:
- image: cirros-0.5.2-x86_64-disk
- flavor: m1.tiny
- availability_zone: nova
- mgmt_driver: noop
- config: |
- param0: key1
- param1: key2
-
- CP1:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- order: 0
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL1
- - virtualBinding:
- node: VDU1
-
- CP2:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- order: 1
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL2
- - virtualBinding:
- node: VDU1
-
- CP3:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- order: 2
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL3
- - virtualBinding:
- node: VDU1
-
- VL1:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net_mgmt
- vendor: Tacker
-
- VL2:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: custom_net0
- vendor: Tacker
- ip_version: 4
- cidr: '20.0.0.0/24'
- start_ip: '20.0.0.50'
- end_ip: '20.0.0.200'
- gateway_ip: '20.0.0.1'
-
- VL3:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: custom_net1
- vendor: Tacker
- ip_version: 4
- cidr: '30.0.0.0/24'
diff --git a/samples/tosca-templates/vnfd/tosca-vnfd-nova-flavor.yaml b/samples/tosca-templates/vnfd/tosca-vnfd-nova-flavor.yaml
deleted file mode 100644
index 82bb7d757..000000000
--- a/samples/tosca-templates/vnfd/tosca-vnfd-nova-flavor.yaml
+++ /dev/null
@@ -1,71 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-
-description: Demo example
-
-metadata:
- template_name: sample-tosca-vnfd
-
-topology_template:
- node_templates:
- VDU1:
- type: tosca.nodes.nfv.VDU.Tacker
- properties:
- image: cirros-0.5.2-x86_64-disk
- flavor: m1.tiny
- availability_zone: nova
- mgmt_driver: noop
- config: |
- param0: key1
- param1: key2
-
- CP1:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- order: 0
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL1
- - virtualBinding:
- node: VDU1
-
- CP2:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- order: 1
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL2
- - virtualBinding:
- node: VDU1
-
- CP3:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- order: 2
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL3
- - virtualBinding:
- node: VDU1
-
- VL1:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net_mgmt
- vendor: Tacker
-
- VL2:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net0
- vendor: Tacker
-
- VL3:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net1
- vendor: Tacker
diff --git a/samples/tosca-templates/vnfd/tosca-vnfd-numacount.yaml b/samples/tosca-templates/vnfd/tosca-vnfd-numacount.yaml
deleted file mode 100644
index f55fe3ab7..000000000
--- a/samples/tosca-templates/vnfd/tosca-vnfd-numacount.yaml
+++ /dev/null
@@ -1,37 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-
-description: NUMA Node Count Input example
-
-metadata:
- template_name: sample-tosca-vnfd-numacount
-
-topology_template:
- node_templates:
- VDU1:
- type: tosca.nodes.nfv.VDU.Tacker
- capabilities:
- nfv_compute:
- properties:
- disk_size: 10 GB
- mem_size: 2048 MB
- num_cpus: 2
- numa_node_count: 2
- properties:
- image: cirros-0.5.2-x86_64-disk
-
- CP1:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- order: 0
- requirements:
- - virtualLink:
- node: VL1
- - virtualBinding:
- node: VDU1
-
- VL1:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net_mgmt
- vendor: Tacker
diff --git a/samples/tosca-templates/vnfd/tosca-vnfd-numadefine.yaml b/samples/tosca-templates/vnfd/tosca-vnfd-numadefine.yaml
deleted file mode 100644
index cf4e6e3b1..000000000
--- a/samples/tosca-templates/vnfd/tosca-vnfd-numadefine.yaml
+++ /dev/null
@@ -1,47 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-
-description: NUMA Node Define example
-
-metadata:
- template_name: sample-tosca-vnfd-numadefine
-
-topology_template:
- node_templates:
- VDU1:
- type: tosca.nodes.nfv.VDU.Tacker
- capabilities:
- nfv_compute:
- properties:
- disk_size: 10 GB
- mem_size: 4096 MB
- num_cpus: 6
- numa_nodes:
- node0:
- id: 0
- vcpus: [0, 1]
- mem_size: 1024
- node1:
- id: 1
- vcpus: [2,3,4,5]
- mem_size: 3072
- properties:
- image: cirros-0.5.2-x86_64-disk
- mgmt_driver: noop
- availability_zone: nova
-
- CP1:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- order: 0
- requirements:
- - virtualLink:
- node: VL1
- - virtualBinding:
- node: VDU1
-
- VL1:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net_mgmt
- vendor: Tacker
diff --git a/samples/tosca-templates/vnfd/tosca-vnfd-openwrt.yaml b/samples/tosca-templates/vnfd/tosca-vnfd-openwrt.yaml
deleted file mode 100644
index dd78d2062..000000000
--- a/samples/tosca-templates/vnfd/tosca-vnfd-openwrt.yaml
+++ /dev/null
@@ -1,84 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-
-description: OpenWRT with services
-
-metadata:
- template_name: OpenWRT
-
-topology_template:
- node_templates:
-
- VDU1:
- type: tosca.nodes.nfv.VDU.Tacker
- capabilities:
- nfv_compute:
- properties:
- num_cpus: 1
- mem_size: 512 MB
- disk_size: 1 GB
- properties:
- image: OpenWRT
- config: |
- param0: key1
- param1: key2
- mgmt_driver: openwrt
- monitoring_policy:
- name: ping
- parameters:
- count: 3
- interval: 10
- actions:
- failure: respawn
-
- CP1:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- order: 0
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL1
- - virtualBinding:
- node: VDU1
-
- CP2:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- order: 1
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL2
- - virtualBinding:
- node: VDU1
-
- CP3:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- order: 2
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL3
- - virtualBinding:
- node: VDU1
-
- VL1:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net_mgmt
- vendor: Tacker
-
- VL2:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net0
- vendor: Tacker
-
- VL3:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net1
- vendor: Tacker
-
diff --git a/samples/tosca-templates/vnfd/tosca-vnfd-param-values.yaml b/samples/tosca-templates/vnfd/tosca-vnfd-param-values.yaml
deleted file mode 100644
index 42675d2cc..000000000
--- a/samples/tosca-templates/vnfd/tosca-vnfd-param-values.yaml
+++ /dev/null
@@ -1,11 +0,0 @@
-{
- image_name: 'cirros-0.5.2-x86_64-disk',
- image_source: 'http://download.cirros-cloud.net/0.5.2/cirros-0.5.2-x86_64-disk.img',
- flavor: 'm1.tiny',
- zone: 'nova',
- network: 'net_mgmt',
- management: 'true',
- pkt_in_network: 'net0',
- pkt_out_network: 'net1',
- vendor: 'tacker'
-}
diff --git a/samples/tosca-templates/vnfd/tosca-vnfd-scale.yaml b/samples/tosca-templates/vnfd/tosca-vnfd-scale.yaml
deleted file mode 100644
index cd7b8f841..000000000
--- a/samples/tosca-templates/vnfd/tosca-vnfd-scale.yaml
+++ /dev/null
@@ -1,65 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-
-description: sample-tosca-vnfd-scaling
-
-metadata:
- template_name: sample-tosca-vnfd-scaling
-
-topology_template:
- node_templates:
- VDU1:
- type: tosca.nodes.nfv.VDU.Tacker
- properties:
- image: cirros-0.5.2-x86_64-disk
- mgmt_driver: noop
- availability_zone: nova
- flavor: m1.tiny
-
- CP1:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- order: 0
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL1
- - virtualBinding:
- node: VDU1
-
- VDU2:
- type: tosca.nodes.nfv.VDU.Tacker
- properties:
- image: cirros-0.5.2-x86_64-disk
- mgmt_driver: noop
- availability_zone: nova
- flavor: m1.tiny
-
- CP2:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- order: 0
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL1
- - virtualBinding:
- node: VDU2
-
- VL1:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net_mgmt
- vendor: Tacker
-
- policies:
- - SP1:
- type: tosca.policies.tacker.Scaling
- properties:
- targets: [VDU1, VDU2]
- increment: 1
- cooldown: 120
- min_instances: 1
- max_instances: 3
- default_instances: 2
diff --git a/samples/tosca-templates/vnfd/tosca-vnfd-secgroups.yaml b/samples/tosca-templates/vnfd/tosca-vnfd-secgroups.yaml
deleted file mode 100644
index 05d5b1a3c..000000000
--- a/samples/tosca-templates/vnfd/tosca-vnfd-secgroups.yaml
+++ /dev/null
@@ -1,45 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-
-description: Demo example with key_name
-
-metadata:
- template_name: sample-tosca-vnfd-secgroups
-
-topology_template:
- node_templates:
- VDU1:
- type: tosca.nodes.nfv.VDU.Tacker
- capabilities:
- nfv_compute:
- properties:
- num_cpus: 1
- mem_size: 512 MB
- disk_size: 1 GB
- properties:
- image: cirros-0.5.2-x86_64-disk
- availability_zone: nova
- mgmt_driver: noop
- key_name: userKey
- config: |
- param0: key1
- param1: key2
-
- CP1:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- anti_spoofing_protection: true
- security_groups:
- - default
- - test_secgrp
- requirements:
- - virtualLink:
- node: VL1
- - virtualBinding:
- node: VDU1
-
- VL1:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net_mgmt
- vendor: Tacker
diff --git a/samples/tosca-templates/vnfd/tosca-vnfd-sriov.yaml b/samples/tosca-templates/vnfd/tosca-vnfd-sriov.yaml
deleted file mode 100644
index f628a228d..000000000
--- a/samples/tosca-templates/vnfd/tosca-vnfd-sriov.yaml
+++ /dev/null
@@ -1,48 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-
-description: SR-IOV example
-
-metadata:
- template_name: sample-tosca-vnfd-sriov
-
-topology_template:
- node_templates:
- VDU1:
- type: tosca.nodes.nfv.VDU.Tacker
- properties:
- image: ubuntu
- flavor: numa-sriov
-
- CP1:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- order: 0
- requirements:
- - virtualLink:
- node: VL1
- - virtualBinding:
- node: VDU1
-
- CP2:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- order: 1
- type: sriov
- requirements:
- - virtualLink:
- node: VL2
- - virtualBinding:
- node: VDU1
-
- VL1:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net_mgmt
- vendor: Tacker
-
- VL2:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: sr3010
- vendor: Tacker
diff --git a/samples/tosca-templates/vnfd/tosca-vnfd-userdata.yaml b/samples/tosca-templates/vnfd/tosca-vnfd-userdata.yaml
deleted file mode 100644
index 33c041e54..000000000
--- a/samples/tosca-templates/vnfd/tosca-vnfd-userdata.yaml
+++ /dev/null
@@ -1,47 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-
-description: Demo with user-data
-
-metadata:
- template_name: sample-vnfd-userdata
-
-topology_template:
- node_templates:
- VDU1:
- type: tosca.nodes.nfv.VDU.Tacker
- capabilities:
- nfv_compute:
- properties:
- num_cpus: 1
- mem_size: 512 MB
- disk_size: 1 GB
- properties:
- image: cirros-0.5.2-x86_64-disk
- config: |
- param0: key1
- param1: key2
- mgmt_driver: noop
- user_data_format: RAW
- user_data: |
- #!/bin/sh
- echo "my hostname is `hostname`" > /tmp/hostname
- df -h > /home/openwrt/diskinfo
-
- CP1:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- order: 0
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL1
- - virtualBinding:
- node: VDU1
-
- VL1:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net_mgmt
- vendor: Tacker
-
diff --git a/samples/tosca-templates/vnfd/tosca-vnfd-vcpu-topology.yaml b/samples/tosca-templates/vnfd/tosca-vnfd-vcpu-topology.yaml
deleted file mode 100644
index 607561ea4..000000000
--- a/samples/tosca-templates/vnfd/tosca-vnfd-vcpu-topology.yaml
+++ /dev/null
@@ -1,40 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-
-description: vCPU Topology example
-
-metadata:
- template_name: sample-tosca-vnfd-vcpu-topology
-
-topology_template:
- node_templates:
- VDU1:
- type: tosca.nodes.nfv.VDU.Tacker
- capabilities:
- nfv_compute:
- properties:
- disk_size: 80 GB
- mem_size: 4096 MB
- num_cpus: 8
- cpu_allocation:
- socket_count: 2
- thread_count: 2
- core_count: 2
- properties:
- image: cirros-0.5.2-x86_64-disk
-
- CP1:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- order: 0
- requirements:
- - virtualLink:
- node: VL1
- - virtualBinding:
- node: VDU1
-
- VL1:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net_mgmt
- vendor: Tacker
diff --git a/samples/tosca-templates/vnfd/tosca-vnfd-vdu-name.yaml b/samples/tosca-templates/vnfd/tosca-vnfd-vdu-name.yaml
deleted file mode 100644
index c040c762e..000000000
--- a/samples/tosca-templates/vnfd/tosca-vnfd-vdu-name.yaml
+++ /dev/null
@@ -1,84 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-
-description: Demo example
-
-metadata:
- template_name: sample-tosca-vnfd-vdu-name
-
-topology_template:
- inputs:
- vdu-name:
- type: string
- description: Vdu name
- default: test-vdu
- cp-name:
- type: string
- description: Cp name
- default: test-cp
- node_templates:
- VDU1:
- type: tosca.nodes.nfv.VDU.Tacker
- capabilities:
- nfv_compute:
- properties:
- num_cpus: 1
- mem_size: 512 MB
- disk_size: 1 GB
- properties:
- name: {get_input : vdu-name}
- image: cirros-0.5.2-x86_64-disk
- availability_zone: nova
- mgmt_driver: noop
- config: |
- param0: key1
- param1: key2
-
- CP1:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- name: {get_input : cp-name}
- management: true
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL1
- - virtualBinding:
- node: VDU1
-
- CP2:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL2
- - virtualBinding:
- node: VDU1
-
- CP3:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL3
- - virtualBinding:
- node: VDU1
-
- VL1:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net_mgmt
- vendor: Tacker
-
- VL2:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net0
- vendor: Tacker
-
- VL3:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net1
- vendor: Tacker
diff --git a/samples/tosca-templates/vnfd/tosca-vnfd-vip.yaml b/samples/tosca-templates/vnfd/tosca-vnfd-vip.yaml
deleted file mode 100644
index b931251a9..000000000
--- a/samples/tosca-templates/vnfd/tosca-vnfd-vip.yaml
+++ /dev/null
@@ -1,94 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-
-description: |
- Demo example with virtural IP.
- The VCP is used to grab an IP which will be used as an virtual IP as CP1 and CP2.
-
-metadata:
- template_name: sample-tosca-vnfd-vip
-
-topology_template:
- node_templates:
- VDU1:
- type: tosca.nodes.nfv.VDU.Tacker
- capabilities:
- nfv_compute:
- properties:
- num_cpus: 1
- mem_size: 512 MB
- disk_size: 1 GB
- properties:
- image: cirros-0.5.2-x86_64-disk
- availability_zone: nova
- mgmt_driver: noop
- config: |
- param0: key1
- param1: key2
-
- CP1:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- anti_spoofing_protection: true
- security_groups:
- - default
- ip_address: 10.10.1.11
- order: 0
- allowed_address_pairs:
- - ip_address: 10.10.1.13
- requirements:
- - virtualLink:
- node: VL1
- - virtualBinding:
- node: VDU1
-
- VDU2:
- type: tosca.nodes.nfv.VDU.Tacker
- capabilities:
- nfv_compute:
- properties:
- num_cpus: 1
- mem_size: 512 MB
- disk_size: 1 GB
- properties:
- image: cirros-0.5.2-x86_64-disk
- availability_zone: nova
- mgmt_driver: noop
- config: |
- param0: key1
- param1: key2
-
- CP2:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- anti_spoofing_protection: true
- security_groups:
- - default
- ip_address: 10.10.1.12
- order: 0
- allowed_address_pairs:
- - ip_address: 10.10.1.13
- requirements:
- - virtualLink:
- node: VL1
- - virtualBinding:
- node: VDU2
-
- VCP:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- anti_spoofing_protection: true
- security_groups:
- - default
- ip_address: 10.10.1.13
- requirements:
- - virtualLink:
- node: VL1
-
- VL1:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net1
- vendor: Tacker
diff --git a/samples/tosca-templates/vnfd/tosca-vnfd-with-params.yaml b/samples/tosca-templates/vnfd/tosca-vnfd-with-params.yaml
deleted file mode 100644
index 0ede65842..000000000
--- a/samples/tosca-templates/vnfd/tosca-vnfd-with-params.yaml
+++ /dev/null
@@ -1,150 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-
-description: VNF TOSCA template with input parameters
-
-metadata:
- template_name: sample-tosca-vnfd
-
-topology_template:
- inputs:
- image_name:
- type: string
- description: Image Name
-
- image_source:
- type: string
- description: Image source
-
- flavor:
- type: string
- description: Flavor Information
-
- zone:
- type: string
- description: Zone Information
-
- network:
- type: string
- description: management network
-
- management:
- type: string
- description: management network
-
- pkt_in_network:
- type: string
- description: In network
-
- pkt_out_network:
- type: string
- description: Out network
-
- vendor:
- type: string
- description: Vendor information
-
- node_templates:
- VDU1:
- type: tosca.nodes.nfv.VDU.Tacker
- properties:
- image: { get_input: image_name }
- flavor: { get_input: flavor }
- availability_zone: { get_input: zone }
- mgmt_driver: noop
- config: |
- param0: key1
- param1: key2
-
- CP11:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: { get_input: management }
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL1
- - virtualBinding:
- node: VDU1
-
- CP12:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL2
- - virtualBinding:
- node: VDU1
-
- CP13:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL3
- - virtualBinding:
- node: VDU1
-
- VDU2:
- type: tosca.nodes.nfv.VDU.Tacker
- properties:
- flavor: { get_input: flavor }
- availability_zone: { get_input: zone }
- mgmt_driver: noop
- config: |
- param0: key1
- param1: key2
- artifacts:
- VNFImage:
- type: tosca.artifacts.Deployment.Image.VM
- file: { get_input: image_source }
-
- CP21:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: { get_input: management }
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL1
- - virtualBinding:
- node: VDU2
-
- CP22:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL2
- - virtualBinding:
- node: VDU2
-
- CP23:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL3
- - virtualBinding:
- node: VDU2
-
- VL1:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: { get_input: network }
- vendor: { get_input: vendor }
-
- VL2:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: { get_input: pkt_in_network }
- vendor: { get_input: vendor }
-
- VL3:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: { get_input: pkt_out_network }
- vendor: { get_input: vendor }
diff --git a/samples/tosca-templates/vnfd/tosca-vnfd-zabbix-monitor.yaml b/samples/tosca-templates/vnfd/tosca-vnfd-zabbix-monitor.yaml
deleted file mode 100644
index bf51eab33..000000000
--- a/samples/tosca-templates/vnfd/tosca-vnfd-zabbix-monitor.yaml
+++ /dev/null
@@ -1,136 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-
-description: Monitoring for multiple vdus
-
-metadata:
- template_name: tosca-vnfd-monitoir-multi-vdu
-
-topology_template:
- node_templates:
- VDU1:
- type: tosca.nodes.nfv.VDU.Tacker
- capabilities:
- nfv_compute:
- properties:
- num_cpus: 2
- mem_size: 2048 MB
- disk_size: 15 GB
- properties:
- name: VDU1
- image: ubuntu16.04
- availability_zone: nova
- mgmt_driver: noop
- config: |
- param0: key1
- param1: key2
- user_data_format: RAW
- user_data: |
- #!/bin/bash
- sudo apt-get -y update
- sudo apt-get -y upgrade
- sudo apt-get -y install zabbix-agent
- sudo apt-get -y install apache2
-
- sudo sed -i "2s/.*/`ifconfig [Interface name in VNF] | grep ""\"inet addr:\"""| cut -d: -f2 | awk ""\"{ print $1 }\"""`/g" "/etc/hosts"
- sudo sed -i "s/Bcast/`cat /etc/hostname`/g" "/etc/hosts"
- sudo sed -i "3s/.*/[Zabbix Host IP Address]\tmonitor/g" "/etc/hosts"
- sudo /etc/init.d/networking restart
- sudo echo 'zabbix ALL=NOPASSWD: ALL' >> /etc/sudoers
-
- sudo sed -i "s/# EnableRemoteCommands=0/EnableRemoteCommands=1/" "/etc/zabbix/zabbix_agentd.conf"
- sudo sed -i "s/Server=127.0.0.1/Server=[Zabbix server's IP Address]/" "/etc/zabbix/zabbix_agentd.conf"
- sudo sed -i "s/ServerActive=127.0.0.1/ServerActive=[Zabbix server's IP Address:Port]/" "/etc/zabbix/zabbix_agentd.conf"
- sudo sed -i "s/Hostname=Zabbix server/Hostname=`cat /etc/hostname`/" "/etc/zabbix/zabbix_agentd.conf"
-
- sudo service apache2 restart
- sudo service zabbix-agent restart
- sudo echo 'ubuntu:ubuntu' | chpasswd
- sudo echo 'root:root' | chpasswd
- app_monitoring_policy:
- name: zabbix
- zabbix_username: Admin
- zabbix_password: zabbix
- zabbix_server_ip: 192.168.11.53
- zabbix_server_port: 80
- parameters:
- application:
- app_name: apache2
- app_port: 80
- ssh_username: ubuntu
- ssh_password: ubuntu
- app_status:
- condition: [down]
- actionname: cmd
- cmd-action: sudo service apache2 restart
- app_memory:
- condition: [greater,22]
- actionname: cmd
- cmd-action: sudo service apache2 stop
- OS:
- os_agent_info:
- condition: [down]
- actionname: cmd
- cmd-action: sudo service zabbix-agent restart
- os_proc_value:
- condition: [and less,22]
- actionname: cmd
- cmd-action: sudo reboot
- os_cpu_load:
- condition: [and greater,30]
- actionname: cmd
- cmd-action: sudo reboot
- os_cpu_usage:
- condition: [less,30]
- actionname: cmd
- cmd-action: sudo reboot
-
- CP11:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- order: 0
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL1
- - virtualBinding:
- node: VDU1
-
- CP12:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- order: 1
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL2
- - virtualBinding:
- node: VDU1
-
- CP13:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- order: 2
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL3
- - virtualBinding:
- node: VDU1
- VL1:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net_mgmt
- vendor: Tacker
-
- VL2:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net0
- vendor: Tacker
-
- VL3:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net1
- vendor: Tacker
diff --git a/samples/tosca-templates/vnfd/tosca_vnfd_assign_fip_to_vdu_floating_ip_address.yaml b/samples/tosca-templates/vnfd/tosca_vnfd_assign_fip_to_vdu_floating_ip_address.yaml
deleted file mode 100644
index 2e7a8ffd1..000000000
--- a/samples/tosca-templates/vnfd/tosca_vnfd_assign_fip_to_vdu_floating_ip_address.yaml
+++ /dev/null
@@ -1,45 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-
-description: Example Floating IP - Allocate specified IP from floating network and attach to CP.
-
-metadata:
- template_name: sample-tosca-vnfd-test-fip-with-floating-ip-address
-
-topology_template:
- node_templates:
- VDU1:
- type: tosca.nodes.nfv.VDU.Tacker
- capabilities:
- nfv_compute:
- properties:
- disk_size: 1 GB
- mem_size: 512 MB
- num_cpus: 1
- properties:
- image: cirros-0.5.2-x86_64-disk
- mgmt_driver: noop
-
- CP1:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- requirements:
- - virtualLink:
- node: VL1
- - virtualBinding:
- node: VDU1
-
- VL1:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net1
- vendor: Tacker
-
- FIP1:
- type: tosca.nodes.network.FloatingIP
- properties:
- floating_network: public
- floating_ip_address: 192.168.56.154
- requirements:
- - link:
- node: CP1
\ No newline at end of file
diff --git a/samples/tosca-templates/vnfd/tosca_vnfd_assign_fip_to_vdu_floating_network.yaml b/samples/tosca-templates/vnfd/tosca_vnfd_assign_fip_to_vdu_floating_network.yaml
deleted file mode 100644
index d32425821..000000000
--- a/samples/tosca-templates/vnfd/tosca_vnfd_assign_fip_to_vdu_floating_network.yaml
+++ /dev/null
@@ -1,44 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-
-description: Example Floating IP - Allocate one IP from floating network and attach to CP.
-
-metadata:
- template_name: sample-tosca-vnfd-test-fip-with-floating-network
-
-topology_template:
- node_templates:
- VDU1:
- type: tosca.nodes.nfv.VDU.Tacker
- capabilities:
- nfv_compute:
- properties:
- disk_size: 1 GB
- mem_size: 512 MB
- num_cpus: 1
- properties:
- image: cirros-0.5.2-x86_64-disk
- mgmt_driver: noop
-
- CP1:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- requirements:
- - virtualLink:
- node: VL1
- - virtualBinding:
- node: VDU1
-
- VL1:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net1
- vendor: Tacker
-
- FIP1:
- type: tosca.nodes.network.FloatingIP
- properties:
- floating_network: public
- requirements:
- - link:
- node: CP1
\ No newline at end of file
diff --git a/samples/tosca-templates/vnffg-nsd/ns_param.yaml b/samples/tosca-templates/vnffg-nsd/ns_param.yaml
deleted file mode 100644
index a9c1686e6..000000000
--- a/samples/tosca-templates/vnffg-nsd/ns_param.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-nsd:
- vl1_name: net_mgmt
- vl2_name: net0
- net_src_port_id: 5610a59a-68d3-431b-aa11-843744e81041
- ip_dest_prefix: 10.10.0.5/24
- net_dst_port_id: 0eed8399-6c51-4eee-b037-0999c93ac898
diff --git a/samples/tosca-templates/vnffg-nsd/tosca-multiple-vnffg-nsd.yaml b/samples/tosca-templates/vnffg-nsd/tosca-multiple-vnffg-nsd.yaml
deleted file mode 100644
index c09ccc8cd..000000000
--- a/samples/tosca-templates/vnffg-nsd/tosca-multiple-vnffg-nsd.yaml
+++ /dev/null
@@ -1,112 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-
-description: Import VNFDs(already on-boarded) with input parameters
-imports:
- - sample-vnfd1
- - sample-vnfd2
-
-topology_template:
- inputs:
- vl1_name:
- type: string
- description: name of VL1 virtuallink
- default: net_mgmt
- vl2_name:
- type: string
- description: name of VL2 virtuallink
- default: net0
- net_src_port_id:
- type: string
- description: neutron port id of source port
- ip_dest_prefix:
- type: string
- description: IP prefix of destination port
-
- node_templates:
- VNF1:
- type: tosca.nodes.nfv.VNF1
- requirements:
- - virtualLink1: VL1
-
- VNF2:
- type: tosca.nodes.nfv.VNF2
-
- VL1:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: {get_input: vl1_name}
- vendor: tacker
-
- VL2:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: {get_input: vl2_name}
- vendor: tacker
-
- Forwarding_path1:
- type: tosca.nodes.nfv.FP.TackerV2
- description: creates path inside ns (src_port->CP12->CP22->dst_port)
- properties:
- id: 51
- symmetrical: true
- policy:
- type: ACL
- criteria:
- - name: block_tcp
- classifier:
- network_src_port_id: {get_input: net_src_port_id}
- network_dst_port_id: {get_input: net_dst_port_id}
- destination_port_range: 80-1024
- ip_proto: 6
- ip_dst_prefix: {get_input: ip_dest_prefix}
- path:
- - forwarder: sample-vnfd1
- capability: CP12
- - forwarder: sample-vnfd2
- capability: CP22
-
- Forwarding_path2:
- type: tosca.nodes.nfv.FP.TackerV2
- description: creates path inside ns (src_port->CP12->dst_port)
- properties:
- id: 52
- symmetrical: false
- policy:
- type: ACL
- criteria:
- - name: block_tcp
- classifier:
- network_src_port_id: {get_input: net_src_port_id}
- destination_port_range: 8080-8080
- ip_proto: 6
- ip_dst_prefix: {get_input: ip_dest_prefix}
- path:
- - forwarder: sample-vnfd1
- capability: CP12
-
- groups:
-
- VNFFG1:
- type: tosca.groups.nfv.VNFFG
- description: HTTP to Corporate Net
- properties:
- vendor: tacker
- version: 1.0
- number_of_endpoints: 2
- dependent_virtual_link: [VL1, VL2]
- connection_point: [CP12, CP22]
- constituent_vnfs: [sample-vnfd1, sample-vnfd2]
- members: [Forwarding_path1]
-
- VNFFG2:
- type: tosca.groups.nfv.VNFFG
- description: HTTP to Corporate Net
- properties:
- vendor: tacker
- version: 1.0
- number_of_endpoints: 1
- dependent_virtual_link: [VL1]
- connection_point: [CP12]
- constituent_vnfs: [sample-vnfd1]
- members: [Forwarding_path2]
-
diff --git a/samples/tosca-templates/vnffg-nsd/tosca-single-vnffg-nsd.yaml b/samples/tosca-templates/vnffg-nsd/tosca-single-vnffg-nsd.yaml
deleted file mode 100644
index a4be14e52..000000000
--- a/samples/tosca-templates/vnffg-nsd/tosca-single-vnffg-nsd.yaml
+++ /dev/null
@@ -1,82 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-
-description: Import VNFDs(already on-boarded) with input parameters
-imports:
- - sample-vnfd1
- - sample-vnfd2
-
-topology_template:
- inputs:
- vl1_name:
- type: string
- description: name of VL1 virtuallink
- default: net_mgmt
- vl2_name:
- type: string
- description: name of VL2 virtuallink
- default: net0
- net_src_port_id:
- type: string
- description: neutron port id of source port
- net_dst_port_id:
- type: string
- description: neutron port id of destination port
- ip_dest_prefix:
- type: string
- description: IP prefix of destination port
-
- node_templates:
- VNF1:
- type: tosca.nodes.nfv.VNF1
- requirements:
- - virtualLink1: VL1
-
- VNF2:
- type: tosca.nodes.nfv.VNF2
-
- VL1:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: {get_input: vl1_name}
- vendor: tacker
-
- VL2:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: {get_input: vl2_name}
- vendor: tacker
-
- Forwarding_path1:
- type: tosca.nodes.nfv.FP.TackerV2
- description: creates path inside ns (src_port->CP12->CP22->dst_port)
- properties:
- id: 51
- symmetrical: true
- policy:
- type: ACL
- criteria:
- - name: block_tcp
- classifier:
- network_src_port_id: {get_input: net_src_port_id}
- network_dst_port_id: {get_input: net_dst_port_id}
- destination_port_range: 80-1024
- ip_proto: 6
- ip_dst_prefix: {get_input: ip_dest_prefix}
- path:
- - forwarder: sample-vnfd1
- capability: CP12
- - forwarder: sample-vnfd2
- capability: CP22
-
- groups:
- VNFFG1:
- type: tosca.groups.nfv.VNFFG
- description: HTTP to Corporate Net
- properties:
- vendor: tacker
- version: 1.0
- number_of_endpoints: 2
- dependent_virtual_link: [VL1, VL2]
- connection_point: [CP12, CP22]
- constituent_vnfs: [sample-vnfd1, sample-vnfd2]
- members: [Forwarding_path1]
diff --git a/samples/tosca-templates/vnffg-nsd/tosca-vnfd1-sample.yaml b/samples/tosca-templates/vnffg-nsd/tosca-vnfd1-sample.yaml
deleted file mode 100644
index 80d3b748a..000000000
--- a/samples/tosca-templates/vnffg-nsd/tosca-vnfd1-sample.yaml
+++ /dev/null
@@ -1,67 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-
-description: VirtualLinks of CP11 and CP22 will be provided by NS descriptor
-node_types:
- tosca.nodes.nfv.VNF1:
- requirements:
- - virtualLink1:
- type: tosca.nodes.nfv.VL
- required: true
- capabilities:
- forwader1:
- type: tosca.capabilities.nfv.Forwarder
-
-topology_template:
- substitution_mappings:
- node_type: tosca.nodes.nfv.VNF1
- requirements:
- virtualLink1: [CP11, virtualLink]
- capabilities:
- forwarder1: [CP11, forwarder]
-
- node_templates:
- VDU1:
- type: tosca.nodes.nfv.VDU.Tacker
- properties:
- image: cirros-0.5.2-x86_64-disk
- flavor: m1.tiny
- availability_zone: nova
- mgmt_driver: noop
- config: |
- param0: key1
- param1: key2
- user_data_format: RAW
- user_data: |
- #!/bin/sh
- echo 1 > /proc/sys/net/ipv4/ip_forward
-
- CP11:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- anti_spoofing_protection: false
- requirements:
- - virtualBinding:
- node: VDU1
-
- CP12:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL2
- - virtualBinding:
- node: VDU1
-
- VL1:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net_mgmt
- vendor: Tacker
-
- VL2:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net0
- vendor: Tacker
diff --git a/samples/tosca-templates/vnffg-nsd/tosca-vnfd2-sample.yaml b/samples/tosca-templates/vnffg-nsd/tosca-vnfd2-sample.yaml
deleted file mode 100644
index a0dd69717..000000000
--- a/samples/tosca-templates/vnffg-nsd/tosca-vnfd2-sample.yaml
+++ /dev/null
@@ -1,61 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-
-description: Demo example
-
-node_types:
- tosca.nodes.nfv.VNF2:
- capabilities:
- forwarder1:
- type: tosca.capabilities.nfv.Forwarder
-topology_template:
- substitution_mappings:
- node_type: tosca.nodes.nfv.VNF2
- capabilities:
- forwarder1: [CP21, forwarder]
- node_templates:
- VDU1:
- type: tosca.nodes.nfv.VDU.Tacker
- properties:
- image: cirros-0.5.2-x86_64-disk
- flavor: m1.tiny
- availability_zone: nova
- mgmt_driver: noop
- config: |
- param0: key1
- param1: key2
- user_data_format: RAW
- user_data: |
- #!/bin/sh
- echo 1 > /proc/sys/net/ipv4/ip_forward
-
- CP21:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL1
- - virtualBinding:
- node: VDU1
-
- CP22:
- type: tosca.nodes.nfv.CP.Tacker
- requirements:
- - virtualLink:
- node: VL2
- - virtualBinding:
- node: VDU1
-
- VL1:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net_mgmt
- vendor: Tacker
-
- VL2:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net0
- vendor: Tacker
-
diff --git a/samples/tosca-templates/vnffgd/tosca-vnffg-vnfd1.yaml b/samples/tosca-templates/vnffgd/tosca-vnffg-vnfd1.yaml
deleted file mode 100644
index 0f16981e0..000000000
--- a/samples/tosca-templates/vnffgd/tosca-vnffg-vnfd1.yaml
+++ /dev/null
@@ -1,88 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-
-description: Demo example
-
-metadata:
- template_name: sample-tosca-vnfd1
-
-topology_template:
- node_templates:
- VDU1:
- type: tosca.nodes.nfv.VDU.Tacker
- capabilities:
- nfv_compute:
- properties:
- num_cpus: 1
- mem_size: 512 MB
- disk_size: 1 GB
- properties:
- image: cirros-0.5.2-x86_64-disk
- availability_zone: nova
- mgmt_driver: noop
- config: |
- param0: key1
- param1: key2
- user_data_format: RAW
- user_data: |
- #!/bin/sh
- echo 1 > /proc/sys/net/ipv4/ip_forward
- cat << EOF >> /etc/network/interfaces
- auto eth1
- iface eth1 inet dhcp
- auto eth2
- iface eth2 inet dhcp
- EOF
- ifup eth1
- ifup eth2
-
- CP11:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- order: 0
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL11
- - virtualBinding:
- node: VDU1
-
- CP12:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- order: 1
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL12
- - virtualBinding:
- node: VDU1
-
- CP13:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- order: 2
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL13
- - virtualBinding:
- node: VDU1
-
- VL11:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net_mgmt
- vendor: Tacker
-
- VL12:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net0
- vendor: Tacker
-
- VL13:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net1
- vendor: Tacker
diff --git a/samples/tosca-templates/vnffgd/tosca-vnffg-vnfd2.yaml b/samples/tosca-templates/vnffgd/tosca-vnffg-vnfd2.yaml
deleted file mode 100644
index c01ecc933..000000000
--- a/samples/tosca-templates/vnffgd/tosca-vnffg-vnfd2.yaml
+++ /dev/null
@@ -1,88 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-
-description: Demo example
-
-metadata:
- template_name: sample-tosca-vnfd1
-
-topology_template:
- node_templates:
- VDU1:
- type: tosca.nodes.nfv.VDU.Tacker
- capabilities:
- nfv_compute:
- properties:
- num_cpus: 1
- mem_size: 512 MB
- disk_size: 1 GB
- properties:
- image: cirros-0.5.2-x86_64-disk
- availability_zone: nova
- mgmt_driver: noop
- config: |
- param0: key1
- param1: key2
- user_data_format: RAW
- user_data: |
- #!/bin/sh
- echo 1 > /proc/sys/net/ipv4/ip_forward
- cat << EOF >> /etc/network/interfaces
- auto eth1
- iface eth1 inet dhcp
- auto eth2
- iface eth2 inet dhcp
- EOF
- ifup eth1
- ifup eth2
-
- CP21:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- order: 0
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL21
- - virtualBinding:
- node: VDU1
-
- CP22:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- order: 1
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL22
- - virtualBinding:
- node: VDU1
-
- CP23:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- order: 2
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL23
- - virtualBinding:
- node: VDU1
-
- VL21:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net_mgmt
- vendor: Tacker
-
- VL22:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net0
- vendor: Tacker
-
- VL23:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net1
- vendor: Tacker
diff --git a/samples/tosca-templates/vnffgd/tosca-vnffgd-correlation.yaml b/samples/tosca-templates/vnffgd/tosca-vnffgd-correlation.yaml
deleted file mode 100644
index 0c97a5cc0..000000000
--- a/samples/tosca-templates/vnffgd/tosca-vnffgd-correlation.yaml
+++ /dev/null
@@ -1,61 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-
-description: Sample VNFFG template
-
-topology_template:
-
- inputs:
- net_src_port_id:
- type: string
- description: Port UUID of source VM.
-
- dst_port_range:
- type: string
- description: Destination port range
-
- ip_dst_pre:
- type: string
- description: Cidr format of destination ip.
-
- net_dst_port_id:
- type: string
- description: Port UUID of dest VM.
-
- node_templates:
-
- Forwarding_path1:
- type: tosca.nodes.nfv.FP.TackerV2
- description: creates path (CP12->CP22)
- properties:
- id: 51
- correlation: mpls
- policy:
- type: ACL
- criteria:
- - name: block_tcp
- classifier:
- network_src_port_id: { get_input: net_src_port_id }
- destination_port_range: { get_input: dst_port_range }
- ip_proto: 6
- ip_dst_prefix: { get_input: ip_dst_pre }
- network_dst_port_id: { get_input: net_dst_port_id }
- path:
- - forwarder: VNFD1
- capability: CP12
- sfc_encap: True
- - forwarder: VNFD2
- capability: CP22
- sfc_encap: True
-
- groups:
- VNFFG1:
- type: tosca.groups.nfv.VNFFG
- description: HTTP to Corporate Net
- properties:
- vendor: tacker
- version: 1.0
- number_of_endpoints: 2
- dependent_virtual_link: [VL12,VL22]
- connection_point: [CP12,CP22]
- constituent_vnfs: [VNFD1,VNFD2]
- members: [Forwarding_path1]
diff --git a/samples/tosca-templates/vnffgd/tosca-vnffgd-legacy-multiple-classifiers-sample.yaml b/samples/tosca-templates/vnffgd/tosca-vnffgd-legacy-multiple-classifiers-sample.yaml
deleted file mode 100644
index 3a2f90cf9..000000000
--- a/samples/tosca-templates/vnffgd/tosca-vnffgd-legacy-multiple-classifiers-sample.yaml
+++ /dev/null
@@ -1,42 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-
-description: Sample VNFFG template
-
-topology_template:
-
- node_templates:
-
- Forwarding_path1:
- type: tosca.nodes.nfv.FP.Tacker
- description: creates path (CP12->CP22)
- properties:
- id: 51
- policy:
- type: ACL
- criteria:
- - network_src_port_id: 640dfd77-c92b-45a3-b8fc-22712de480e1
- destination_port_range: 80-1024
- ip_proto: 6
- ip_dst_prefix: 192.168.1.2/24
- - network_src_port_id: 640dfd77-c92b-45a3-b8fc-22712de480eda
- destination_port_range: 80-1024
- ip_proto: 6
- ip_dst_prefix: 192.168.2.2/24
- path:
- - forwarder: VNFD1
- capability: CP12
- - forwarder: VNFD2
- capability: CP22
-
- groups:
- VNFFG1:
- type: tosca.groups.nfv.VNFFG
- description: HTTP to Corporate Net
- properties:
- vendor: tacker
- version: 1.0
- number_of_endpoints: 2
- dependent_virtual_link: [VL12,VL22]
- connection_point: [CP12,CP22]
- constituent_vnfs: [VNFD1,VNFD2]
- members: [Forwarding_path1]
diff --git a/samples/tosca-templates/vnffgd/tosca-vnffgd-legacy-sample.yaml b/samples/tosca-templates/vnffgd/tosca-vnffgd-legacy-sample.yaml
deleted file mode 100644
index 129e6ea3a..000000000
--- a/samples/tosca-templates/vnffgd/tosca-vnffgd-legacy-sample.yaml
+++ /dev/null
@@ -1,38 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-
-description: Sample VNFFG template
-
-topology_template:
-
- node_templates:
-
- Forwarding_path1:
- type: tosca.nodes.nfv.FP.Tacker
- description: creates path (CP12->CP22)
- properties:
- id: 51
- policy:
- type: ACL
- criteria:
- - network_src_port_id: 640dfd77-c92b-45a3-b8fc-22712de480e1
- destination_port_range: 80-1024
- ip_proto: 6
- ip_dst_prefix: 192.168.1.2/24
- path:
- - forwarder: VNFD1
- capability: CP12
- - forwarder: VNFD2
- capability: CP22
-
- groups:
- VNFFG1:
- type: tosca.groups.nfv.VNFFG
- description: HTTP to Corporate Net
- properties:
- vendor: tacker
- version: 1.0
- number_of_endpoints: 2
- dependent_virtual_link: [VL12,VL22]
- connection_point: [CP12,CP22]
- constituent_vnfs: [VNFD1,VNFD2]
- members: [Forwarding_path1]
diff --git a/samples/tosca-templates/vnffgd/tosca-vnffgd-legacy-symmetrical-sample.yaml b/samples/tosca-templates/vnffgd/tosca-vnffgd-legacy-symmetrical-sample.yaml
deleted file mode 100644
index cdb4eca36..000000000
--- a/samples/tosca-templates/vnffgd/tosca-vnffgd-legacy-symmetrical-sample.yaml
+++ /dev/null
@@ -1,40 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-
-description: Sample symmetrical VNFFG template (network_dst_port_id and ip_dst_prefix must be set)
-
-topology_template:
-
- node_templates:
-
- Forwarding_path1:
- type: tosca.nodes.nfv.FP.Tacker
- description: creates path (CP12->CP22)
- properties:
- id: 51
- symmetrical: true
- policy:
- type: ACL
- criteria:
- - network_src_port_id: e8463552-f236-4127-afc2-571f2b1d81bb
- network_dst_port_id: 8bff0400-a7b8-41eb-bbef-d7cff67a7361
- ip_dst_prefix: 10.10.0.9/24
- destination_port_range: 80-1024
- ip_proto: 6
- path:
- - forwarder: VNFD1
- capability: CP12
- - forwarder: VNFD2
- capability: CP22
-
- groups:
- VNFFG1:
- type: tosca.groups.nfv.VNFFG
- description: HTTP to Corporate Net
- properties:
- vendor: tacker
- version: 1.0
- number_of_endpoints: 2
- dependent_virtual_link: [VL12,VL22]
- connection_point: [CP12,CP22]
- constituent_vnfs: [VNFD1,VNFD2]
- members: [Forwarding_path1]
diff --git a/samples/tosca-templates/vnffgd/tosca-vnffgd-multiple-classifiers-sample.yaml b/samples/tosca-templates/vnffgd/tosca-vnffgd-multiple-classifiers-sample.yaml
deleted file mode 100644
index cabff75d4..000000000
--- a/samples/tosca-templates/vnffgd/tosca-vnffgd-multiple-classifiers-sample.yaml
+++ /dev/null
@@ -1,46 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-
-description: Sample VNFFG template
-
-topology_template:
-
- node_templates:
-
- Forwarding_path1:
- type: tosca.nodes.nfv.FP.TackerV2
- description: creates path (CP12->CP22)
- properties:
- id: 51
- policy:
- type: ACL
- criteria:
- - name: block_tcp
- classifier:
- network_src_port_id: 640dfd77-c92b-45a3-b8fc-22712de480e1
- destination_port_range: 80-1024
- ip_proto: 6
- ip_dst_prefix: 192.168.1.2/24
- - name: block_udp
- classifier:
- network_src_port_id: 640dfd77-c92b-45a3-b8fc-22712de480eda
- destination_port_range: 80-1024
- ip_proto: 17
- ip_dst_prefix: 192.168.2.2/24
- path:
- - forwarder: VNFD1
- capability: CP12
- - forwarder: VNFD2
- capability: CP22
-
- groups:
- VNFFG1:
- type: tosca.groups.nfv.VNFFG
- description: HTTP to Corporate Net
- properties:
- vendor: tacker
- version: 1.0
- number_of_endpoints: 2
- dependent_virtual_link: [VL12,VL22]
- connection_point: [CP12,CP22]
- constituent_vnfs: [VNFD1,VNFD2]
- members: [Forwarding_path1]
diff --git a/samples/tosca-templates/vnffgd/tosca-vnffgd-no-classifier-sample.yaml b/samples/tosca-templates/vnffgd/tosca-vnffgd-no-classifier-sample.yaml
deleted file mode 100644
index 0e4c0b8f1..000000000
--- a/samples/tosca-templates/vnffgd/tosca-vnffgd-no-classifier-sample.yaml
+++ /dev/null
@@ -1,31 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-
-description: Sample VNFFG template with no flow classifier
-
-topology_template:
-
- node_templates:
-
- Forwarding_path1:
- type: tosca.nodes.nfv.FP.TackerV2
- description: creates path (CP12->CP22)
- properties:
- id: 51
- path:
- - forwarder: VNFD1
- capability: CP12
- - forwarder: VNFD2
- capability: CP22
-
- groups:
- VNFFG1:
- type: tosca.groups.nfv.VNFFG
- description: HTTP to Corporate Net
- properties:
- vendor: tacker
- version: 1.0
- number_of_endpoints: 2
- dependent_virtual_link: [VL12,VL22]
- connection_point: [CP12,CP22]
- constituent_vnfs: [VNFD1,VNFD2]
- members: [Forwarding_path1]
\ No newline at end of file
diff --git a/samples/tosca-templates/vnffgd/tosca-vnffgd-param-sample.yaml b/samples/tosca-templates/vnffgd/tosca-vnffgd-param-sample.yaml
deleted file mode 100644
index 06eca790f..000000000
--- a/samples/tosca-templates/vnffgd/tosca-vnffgd-param-sample.yaml
+++ /dev/null
@@ -1,51 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-
-description: Sample VNFFG parameterized template
-
-topology_template:
-
- inputs:
- net_src_port_id:
- type: string
- description: Port UUID of source VM.
-
- dst_port_range:
- type: string
- description: Destination port range
-
- ip_dst_pre:
- type: string
- description: Cidr format of destination ip.
-
- node_templates:
-
- Forwarding_path1:
- type: tosca.nodes.nfv.FP.Tacker
- description: creates path (CP12->CP22)
- properties:
- id: 51
- policy:
- type: ACL
- criteria:
- - network_src_port_id: { get_input: net_src_port_id }
- destination_port_range: { get_input: dst_port_range }
- ip_proto: 6
- ip_dst_prefix: { get_input: ip_dst_pre }
- path:
- - forwarder: VNFD1
- capability: CP12
- - forwarder: VNFD2
- capability: CP22
-
- groups:
- VNFFG1:
- type: tosca.groups.nfv.VNFFG
- description: HTTP to Corporate Net
- properties:
- vendor: tacker
- version: 1.0
- number_of_endpoints: 2
- dependent_virtual_link: [VL12,VL22]
- connection_point: [CP12,CP22]
- constituent_vnfs: [VNFD1,VNFD2]
- members: [Forwarding_path1]
diff --git a/samples/tosca-templates/vnffgd/tosca-vnffgd-sample-VNF1.yaml b/samples/tosca-templates/vnffgd/tosca-vnffgd-sample-VNF1.yaml
deleted file mode 100644
index 0210db3dc..000000000
--- a/samples/tosca-templates/vnffgd/tosca-vnffgd-sample-VNF1.yaml
+++ /dev/null
@@ -1,38 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-
-description: Sample VNFFG template
-
-topology_template:
-
- node_templates:
-
- Forwarding_path1:
- type: tosca.nodes.nfv.FP.TackerV2
- description: creates path CP12
- properties:
- id: 51
- policy:
- type: ACL
- criteria:
- - name: block_tcp
- classifier:
- network_src_port_id: 640dfd77-c92b-45a3-b8fc-22712de480e1
- destination_port_range: 8080-8080
- ip_proto: 6
- ip_dst_prefix: 192.168.1.2/24
- path:
- - forwarder: VNFD1
- capability: CP12
-
- groups:
- VNFFG1:
- type: tosca.groups.nfv.VNFFG
- description: HTTP to Corporate Net
- properties:
- vendor: tacker
- version: 1.0
- number_of_endpoints: 1
- dependent_virtual_link: [VL12]
- connection_point: [CP12]
- constituent_vnfs: [VNFD1]
- members: [Forwarding_path1]
diff --git a/samples/tosca-templates/vnffgd/tosca-vnffgd-sample.yaml b/samples/tosca-templates/vnffgd/tosca-vnffgd-sample.yaml
deleted file mode 100644
index 6bd4246fc..000000000
--- a/samples/tosca-templates/vnffgd/tosca-vnffgd-sample.yaml
+++ /dev/null
@@ -1,40 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-
-description: Sample VNFFG template
-
-topology_template:
-
- node_templates:
-
- Forwarding_path1:
- type: tosca.nodes.nfv.FP.TackerV2
- description: creates path (CP12->CP22)
- properties:
- id: 51
- policy:
- type: ACL
- criteria:
- - name: block_tcp
- classifier:
- network_src_port_id: 14ad4f29-629f-4b97-8bc8-86e96cb49974
- destination_port_range: 80-1024
- ip_proto: 6
- ip_dst_prefix: 10.10.0.5/24
- path:
- - forwarder: VNFD1
- capability: CP12
- - forwarder: VNFD2
- capability: CP22
-
- groups:
- VNFFG1:
- type: tosca.groups.nfv.VNFFG
- description: HTTP to Corporate Net
- properties:
- vendor: tacker
- version: 1.0
- number_of_endpoints: 2
- dependent_virtual_link: [VL12,VL22]
- connection_point: [CP12,CP22]
- constituent_vnfs: [VNFD1,VNFD2]
- members: [Forwarding_path1]
diff --git a/samples/tosca-templates/vnffgd/tosca-vnffgd-symmetrical-sample.yaml b/samples/tosca-templates/vnffgd/tosca-vnffgd-symmetrical-sample.yaml
deleted file mode 100644
index 353066763..000000000
--- a/samples/tosca-templates/vnffgd/tosca-vnffgd-symmetrical-sample.yaml
+++ /dev/null
@@ -1,42 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-
-description: Sample symmetrical VNFFG template (network_dst_port_id and ip_dst_prefix must be set)
-
-topology_template:
-
- node_templates:
-
- Forwarding_path1:
- type: tosca.nodes.nfv.FP.TackerV2
- description: creates path (CP12->CP22)
- properties:
- id: 51
- symmetrical: true
- policy:
- type: ACL
- criteria:
- - name: block_tcp
- classifier:
- network_src_port_id: e8463552-f236-4127-afc2-571f2b1d81bb
- network_dst_port_id: 8bff0400-a7b8-41eb-bbef-d7cff67a7361
- ip_dst_prefix: 10.10.0.9/24
- destination_port_range: 80-1024
- ip_proto: 6
- path:
- - forwarder: VNFD1
- capability: CP12
- - forwarder: VNFD2
- capability: CP22
-
- groups:
- VNFFG1:
- type: tosca.groups.nfv.VNFFG
- description: HTTP to Corporate Net
- properties:
- vendor: tacker
- version: 1.0
- number_of_endpoints: 2
- dependent_virtual_link: [VL12,VL22]
- connection_point: [CP12,CP22]
- constituent_vnfs: [VNFD1,VNFD2]
- members: [Forwarding_path1]
diff --git a/samples/tosca-templates/vnffgd/vnffg-param-file.yaml b/samples/tosca-templates/vnffgd/vnffg-param-file.yaml
deleted file mode 100644
index e0bcec5ff..000000000
--- a/samples/tosca-templates/vnffgd/vnffg-param-file.yaml
+++ /dev/null
@@ -1,3 +0,0 @@
-net_src_port_id: 640dfd77-c92b-45a3-b8fc-22712de480e1
-dst_port_range: 80-1024
-ip_dst_pre: 192.168.1.2/24
diff --git a/setup.cfg b/setup.cfg
index 478387b84..e0b848c62 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -62,23 +62,8 @@ tacker.tacker.vnfm.drivers =
kubernetes = tacker.vnfm.infra_drivers.kubernetes.kubernetes_driver:Kubernetes
tacker.tacker.mgmt.drivers =
noop = tacker.vnfm.mgmt_drivers.noop:VnfMgmtNoop
- openwrt = tacker.vnfm.mgmt_drivers.openwrt.openwrt:VnfMgmtOpenWRT
vnflcm_noop = tacker.vnfm.mgmt_drivers.vnflcm_noop:VnflcmMgmtNoop
-tacker.tacker.monitor.drivers =
- ping = tacker.vnfm.monitor_drivers.ping.ping:VNFMonitorPing
- http_ping = tacker.vnfm.monitor_drivers.http_ping.http_ping:VNFMonitorHTTPPing
-tacker.tacker.app_monitor.drivers =
- zabbix = tacker.vnfm.monitor_drivers.zabbix.zabbix:VNFMonitorZabbix
-tacker.tacker.alarm_monitor.drivers =
- ceilometer = tacker.vnfm.monitor_drivers.ceilometer.ceilometer:VNFMonitorCeilometer
-tacker.tacker.policy.actions =
- autoscaling = tacker.vnfm.policy_actions.autoscaling.autoscaling:VNFActionAutoscaling
- respawn = tacker.vnfm.policy_actions.respawn.respawn:VNFActionRespawn
- log = tacker.vnfm.policy_actions.log.log:VNFActionLog
- log_and_kill = tacker.vnfm.policy_actions.log.log:VNFActionLogAndKill
- vdu_autoheal = tacker.vnfm.policy_actions.vdu_autoheal.vdu_autoheal:VNFActionVduAutoheal
oslo.config.opts =
- tacker.alarm_receiver = tacker.alarm_receiver:config_opts
tacker.auth = tacker.auth:config_opts
tacker.common.config = tacker.common.config:config_opts
tacker.common.ext_oauth2_auth = tacker.common.ext_oauth2_auth:config_opts
@@ -88,18 +73,12 @@ oslo.config.opts =
tacker.nfvo.drivers.vim.kubernetes_driver = tacker.nfvo.drivers.vim.kubernetes_driver:config_opts
tacker.nfvo.drivers.vim.openstack_driver = tacker.nfvo.drivers.vim.openstack_driver:config_opts
tacker.nfvo.nfvo_plugin = tacker.nfvo.nfvo_plugin:config_opts
- tacker.plugins.fenix = tacker.plugins.fenix:config_opts
tacker.service = tacker.service:config_opts
tacker.sol_refactored.common.config = tacker.sol_refactored.common.config:config_opts
tacker.vnflcm.vnflcm_driver = tacker.vnflcm.vnflcm_driver:config_opts
tacker.vnfm.infra_drivers.kubernetes.kubernetes_driver = tacker.vnfm.infra_drivers.kubernetes.kubernetes_driver:config_opts
tacker.vnfm.infra_drivers.openstack.openstack = tacker.vnfm.infra_drivers.openstack.openstack:config_opts
tacker.vnfm.infra_drivers.openstack.translate_template = tacker.vnfm.infra_drivers.openstack.translate_template:config_opts
- tacker.vnfm.mgmt_drivers.openwrt.openwrt = tacker.vnfm.mgmt_drivers.openwrt.openwrt:config_opts
- tacker.vnfm.monitor = tacker.vnfm.monitor:config_opts
- tacker.vnfm.monitor_drivers.ceilometer.ceilometer = tacker.vnfm.monitor_drivers.ceilometer.ceilometer:config_opts
- tacker.vnfm.monitor_drivers.http_ping.http_ping = tacker.vnfm.monitor_drivers.http_ping.http_ping:config_opts
- tacker.vnfm.monitor_drivers.ping.ping = tacker.vnfm.monitor_drivers.ping.ping:config_opts
tacker.vnfm.nfvo_client = tacker.vnfm.nfvo_client:config_opts
tacker.vnfm.plugin = tacker.vnfm.plugin:config_opts
tacker.wsgi = tacker.wsgi:config_opts
diff --git a/tacker/alarm_receiver.py b/tacker/alarm_receiver.py
deleted file mode 100644
index 6a104ee84..000000000
--- a/tacker/alarm_receiver.py
+++ /dev/null
@@ -1,121 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from urllib import parse
-
-from oslo_config import cfg
-from oslo_log import log as logging
-from oslo_serialization import jsonutils
-
-from tacker._i18n import _
-from tacker.vnfm.monitor_drivers.token import Token
-from tacker import wsgi
-# check alarm url with db --> move to plugin
-
-
-LOG = logging.getLogger(__name__)
-
-OPTS = [
- cfg.StrOpt('username', default='admin',
- help=_('User name for alarm monitoring')),
- cfg.StrOpt('password', default='devstack',
- help=_('Password for alarm monitoring')),
- cfg.StrOpt('project_name', default='admin',
- help=_('Project name for alarm monitoring')),
- cfg.StrOpt('user_domain_name', default='Default',
- help=_('User domain name for alarm monitoring')),
- cfg.StrOpt('project_domain_name', default='Default',
- help=_('Project domain name for alarm monitoring')),
-]
-
-cfg.CONF.register_opts(OPTS, 'alarm_auth')
-
-
-def config_opts():
- return [('alarm_auth', OPTS)]
-
-
-class AlarmReceiver(wsgi.Middleware):
- def process_request(self, req):
- if req.method != 'POST':
- return
- url = req.url
- if not self.handle_url(url):
- return
- prefix, info, params = self.handle_url(req.url)
- resource = 'trigger' if info[4] != 'maintenance' else 'maintenance'
- redirect = resource + 's'
- auth = cfg.CONF.keystone_authtoken
- alarm_auth = cfg.CONF.alarm_auth
- token = Token(username=alarm_auth.username,
- password=alarm_auth.password,
- project_name=alarm_auth.project_name,
- auth_url=auth.auth_url + '/v3',
- user_domain_name=alarm_auth.user_domain_name,
- project_domain_name=alarm_auth.project_domain_name)
-
- token_identity = token.create_token()
- req.headers['X_AUTH_TOKEN'] = token_identity
- # Change the body request
- if req.body:
- body_dict = dict()
- body_dict[resource] = {}
- body_dict[resource].setdefault('params', {})
- # Update params in the body request
- body_info = jsonutils.loads(req.body)
- body_dict[resource]['params']['credential'] = info[6]
- if resource == 'maintenance':
- body_info.update({
- 'body': self._handle_maintenance_body(body_info)})
- del body_info['reason_data']
- else:
- # Update policy and action
- body_dict[resource]['policy_name'] = info[4]
- body_dict[resource]['action_name'] = info[5]
- body_dict[resource]['params']['data'] = body_info
- req.body = jsonutils.dump_as_bytes(body_dict)
- LOG.debug('Body alarm: %s', req.body)
- # Need to change url because of mandatory
- req.environ['PATH_INFO'] = prefix + redirect
- req.environ['QUERY_STRING'] = ''
- LOG.debug('alarm url in receiver: %s', req.url)
-
- def handle_url(self, url):
- # alarm_url = 'http://host:port/v1.0/vnfs/vnf-uuid/mon-policy-name/action-name/8ef785' # noqa
- parts = parse.urlparse(url)
- p = parts.path.split('/')
- if len(p) != 7:
- return None
-
- if any((p[0] != '', p[2] != 'vnfs')):
- return None
- # decode action name: respawn%25log
- p[5] = parse.unquote(p[5])
- qs = parse.parse_qs(parts.query)
- params = dict((k, v[0]) for k, v in qs.items())
- prefix_url = '/%(collec)s/%(vnf_uuid)s/' % {'collec': p[2],
- 'vnf_uuid': p[3]}
- return prefix_url, p, params
-
- def _handle_maintenance_body(self, body_info):
- body = {}
- traits_list = body_info['reason_data']['event']['traits']
- if type(traits_list) is not list:
- return
- for key, t_type, val in traits_list:
- if t_type == 1 and val and (val[0] == '[' or val[0] == '{'):
- body[key] = eval(val)
- else:
- body[key] = val
- return body
diff --git a/tacker/api/v1/base.py b/tacker/api/v1/base.py
index 9aef532fa..83788b2b4 100644
--- a/tacker/api/v1/base.py
+++ b/tacker/api/v1/base.py
@@ -567,20 +567,6 @@ class Controller(object):
if 'validate' not in attr_vals:
continue
for rule in attr_vals['validate']:
- # skip validating vnfd_id when vnfd_template is specified to
- # create vnf
- if (resource == 'vnf') and ('vnfd_template' in body['vnf'])\
- and (attr == "vnfd_id") and is_create:
- continue
- # skip validating vnffgd_id when vnffgd_template is provided
- if ((resource == 'vnffg')
- and ('vnffgd_template' in body['vnffg'])
- and (attr == 'vnffgd_id') and is_create):
- continue
- # skip validating nsd_id when nsd_template is provided
- if (resource == 'ns') and ('nsd_template' in body['ns'])\
- and (attr == 'nsd_id') and is_create:
- continue
res = attributes.validators[rule](res_dict[attr],
attr_vals['validate'][rule])
if res:
diff --git a/tacker/common/csar_utils.py b/tacker/common/csar_utils.py
index a2db6a6fe..9d2db07b0 100644
--- a/tacker/common/csar_utils.py
+++ b/tacker/common/csar_utils.py
@@ -32,7 +32,6 @@ from toscaparser.tosca_template import ToscaTemplate
from tacker.common import exceptions
import tacker.conf
-from tacker.extensions import vnfm
from tacker.tosca import utils as toscautils
@@ -202,23 +201,11 @@ def _populate_flavour_data(tosca):
flavour.update(
{'instantiation_levels': instantiation_levels})
- mgmt_driver = None
for template_name, node_tpl in \
tp.tpl.get('node_templates').items():
# check the flavour property in vnf data
_update_flavour_data_from_vnf(
tp.custom_defs, node_tpl, flavour)
- if node_tpl['type'] in CONF.vnf_package.get_lower_list:
- if node_tpl['type'] == "tosca.nodes.nfv.VDU.Tacker":
- # get mgmt_driver
- mgmt_driver_flavour = \
- node_tpl['properties'].get('mgmt_driver')
- if mgmt_driver_flavour:
- if mgmt_driver and \
- mgmt_driver_flavour != mgmt_driver:
- raise vnfm.MultipleMGMTDriversSpecified()
- mgmt_driver = mgmt_driver_flavour
- flavour.update({'mgmt_driver': mgmt_driver})
for template_name, node_tpl in \
tp.tpl.get('node_templates').items():
diff --git a/tacker/common/exceptions.py b/tacker/common/exceptions.py
index 28de2adfd..ed231ef9d 100644
--- a/tacker/common/exceptions.py
+++ b/tacker/common/exceptions.py
@@ -161,50 +161,10 @@ class InvalidContentType(TackerException):
message = _("Invalid content type %(content_type)s")
-class NetworkVlanRangeError(TackerException):
- message = _("Invalid network VLAN range: '%(vlan_range)s' - '%(error)s'")
-
- def __init__(self, **kwargs):
- # Convert vlan_range tuple to 'start:end' format for display
- if isinstance(kwargs['vlan_range'], tuple):
- kwargs['vlan_range'] = "%d:%d" % kwargs['vlan_range']
- super(NetworkVlanRangeError, self).__init__(**kwargs)
-
-
class DuplicatedExtension(TackerException):
message = _("Found duplicate extension: %(alias)s")
-class MgmtDriverException(TackerException):
- message = _("VNF configuration failed")
-
-
-class AlarmUrlInvalid(BadRequest):
- message = _("Invalid alarm url for VNF %(vnf_id)s")
-
-
-class TriggerNotFound(NotFound):
- message = _("Trigger %(trigger_name)s does not exist for VNF %(vnf_id)s")
-
-
-class VnfPolicyNotFound(NotFound):
- message = _("Policy %(policy)s does not exist for VNF %(vnf_id)s")
-
-
-class VnfPolicyActionInvalid(BadRequest):
- message = _("Invalid action %(action)s for policy %(policy)s, "
- "should be one of %(valid_actions)s")
-
-
-class VnfPolicyTypeInvalid(BadRequest):
- message = _("Invalid type %(type)s for policy %(policy)s, "
- "should be one of %(valid_types)s")
-
-
-class DuplicateResourceName(TackerException):
- message = _("%(resource)s with name %(name)s already exists")
-
-
class DuplicateEntity(Conflict):
message = _("%(_type)s already exist with given %(entry)s")
diff --git a/tacker/common/utils.py b/tacker/common/utils.py
index 2f58a3b30..7eb8854ca 100644
--- a/tacker/common/utils.py
+++ b/tacker/common/utils.py
@@ -24,12 +24,10 @@ import inspect
import logging as std_logging
import math
import os
-import random
import re
import signal
import socket
from stevedore import driver
-import string
import sys
from eventlet.green import subprocess
@@ -253,14 +251,6 @@ def deep_update(orig_dict, new_dict):
orig_dict[key] = value
-def generate_resource_name(resource, prefix='tmpl'):
- return prefix + '-' \
- + ''.join(random.SystemRandom().choice(
- string.ascii_lowercase + string.digits)
- for _ in range(16)) \
- + '-' + resource
-
-
def get_auth_url_v3(auth_url):
if re.match('.+v3/?$', auth_url) is not None:
return auth_url
diff --git a/tacker/conf/vnf_package.py b/tacker/conf/vnf_package.py
index 3ab752035..66bf1be48 100644
--- a/tacker/conf/vnf_package.py
+++ b/tacker/conf/vnf_package.py
@@ -69,7 +69,7 @@ Related options:
help=_("Exclude node from node_template")),
cfg.ListOpt('get_lower_list',
- default=['tosca.nodes.nfv.VNF', 'tosca.nodes.nfv.VDU.Tacker'],
+ default=['tosca.nodes.nfv.VNF'],
help=_("List of types to get from lower-vnfd")),
cfg.ListOpt('del_input_list',
diff --git a/tacker/db/migration/models/head.py b/tacker/db/migration/models/head.py
index 8ac1e2bea..245919150 100644
--- a/tacker/db/migration/models/head.py
+++ b/tacker/db/migration/models/head.py
@@ -23,7 +23,5 @@ Based on this comparison database can be healed with healing migration.
from tacker.db import model_base # noqa
from tacker.db.nfvo import nfvo_db # noqa
-from tacker.db.nfvo import ns_db # noqa
-from tacker.db.nfvo import vnffg_db # noqa
from tacker.db.vnfm import vnfm_db # noqa
from tacker.sol_refactored.db.sqlalchemy import models # noqa
diff --git a/tacker/db/nfvo/nfvo_db_plugin.py b/tacker/db/nfvo/nfvo_db_plugin.py
index 3c5ddd94d..563763ee5 100644
--- a/tacker/db/nfvo/nfvo_db_plugin.py
+++ b/tacker/db/nfvo/nfvo_db_plugin.py
@@ -20,14 +20,11 @@ from sqlalchemy.orm import exc as orm_exc
from sqlalchemy import sql
from tacker.common import exceptions
-from tacker.db.common_services import common_services_db_plugin
from tacker.db import db_base
from tacker.db.nfvo import nfvo_db
from tacker.db.vnfm import vnfm_db
from tacker.extensions import nfvo
from tacker import manager
-from tacker.plugins.common import constants
-
VIM_ATTRIBUTES = ('id', 'type', 'tenant_id', 'name', 'description',
'placement_attr', 'shared', 'is_default',
@@ -40,7 +37,6 @@ class NfvoPluginDb(nfvo.NFVOPluginBase, db_base.CommonDbMixin):
def __init__(self):
super(NfvoPluginDb, self).__init__()
- self._cos_db_plg = common_services_db_plugin.CommonServicesPluginDb()
@property
def _core_plugin(self):
@@ -117,15 +113,6 @@ class NfvoPluginDb(nfvo.NFVOPluginBase, db_base.CommonDbMixin):
entry=e.columns)
vim_dict = self._make_vim_dict(vim_db)
- # TODO(hiromu): Remove Event table
- # NOTE(hiromu): "REGISTERED" in res_state is a workaround to delete
- # the status field from the Vim table.
- self._cos_db_plg.create_event(
- context, res_id=vim_dict['id'],
- res_type=constants.RES_TYPE_VIM,
- res_state='REGISTERED',
- evt_type=constants.RES_EVT_CREATE,
- tstamp=vim_dict['created_at'])
return vim_dict
def delete_vim(self, context, vim_id, soft_delete=True):
@@ -133,16 +120,6 @@ class NfvoPluginDb(nfvo.NFVOPluginBase, db_base.CommonDbMixin):
vim_db = self._get_resource(context, nfvo_db.Vim, vim_id)
if soft_delete:
vim_db.update({'deleted_at': timeutils.utcnow()})
-
- # TODO(hiromu): Remove Event table
- # NOTE(hiromu): "REGISTERED" in res_state is a workaround to
- # delete the status field from the Vim table.
- self._cos_db_plg.create_event(
- context, res_id=vim_db['id'],
- res_type=constants.RES_TYPE_VIM,
- res_state='REGISTERED',
- evt_type=constants.RES_EVT_DELETE,
- tstamp=vim_db[constants.RES_EVT_DELETED_FLD])
else:
context.session.query(nfvo_db.VimAuth).filter_by(
vim_id=vim_id).delete()
@@ -192,16 +169,6 @@ class NfvoPluginDb(nfvo.NFVOPluginBase, db_base.CommonDbMixin):
vim_project})
vim_db.update({'updated_at': timeutils.utcnow()})
- # TODO(hiromu): Remove Event table
- # NOTE(hiromu): "REGISTERED" in res_state is a workaround to delete
- # the status field from the Vim table.
- self._cos_db_plg.create_event(
- context, res_id=vim_db['id'],
- res_type=constants.RES_TYPE_VIM,
- res_state='REGISTERED',
- evt_type=constants.RES_EVT_UPDATE,
- tstamp=vim_db[constants.RES_EVT_UPDATED_FLD])
-
return self.get_vim(context, vim_id)
def _validate_default_vim(self, context, vim, vim_id=None):
diff --git a/tacker/db/nfvo/ns_db.py b/tacker/db/nfvo/ns_db.py
deleted file mode 100644
index eb777d9ab..000000000
--- a/tacker/db/nfvo/ns_db.py
+++ /dev/null
@@ -1,386 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from datetime import datetime
-
-from oslo_db.exception import DBDuplicateEntry
-from oslo_log import log as logging
-from oslo_utils import timeutils
-from oslo_utils import uuidutils
-
-import sqlalchemy as sa
-from sqlalchemy import orm
-from sqlalchemy.orm import exc as orm_exc
-from sqlalchemy import schema
-
-from tacker.common import exceptions
-from tacker.db.common_services import common_services_db_plugin
-from tacker.db import db_base
-from tacker.db import model_base
-from tacker.db import models_v1
-from tacker.db import types
-from tacker.extensions import nfvo
-from tacker.extensions.nfvo_plugins import network_service
-from tacker.plugins.common import constants
-
-LOG = logging.getLogger(__name__)
-_ACTIVE_UPDATE = (constants.ACTIVE, constants.PENDING_UPDATE)
-_ACTIVE_UPDATE_ERROR_DEAD = (
- constants.PENDING_CREATE, constants.ACTIVE, constants.PENDING_UPDATE,
- constants.PENDING_DELETE, constants.ERROR, constants.DEAD)
-CREATE_STATES = (constants.PENDING_CREATE, constants.DEAD)
-
-
-###########################################################################
-# db tables
-
-class NSD(model_base.BASE, models_v1.HasId, models_v1.HasTenant,
- models_v1.Audit):
- """Represents NSD to create NS."""
-
- __tablename__ = 'nsd'
- # Descriptive name
- name = sa.Column(sa.String(255), nullable=False)
- description = sa.Column(sa.Text)
- vnfds = sa.Column(types.Json, nullable=True)
-
- # Nsd template source - onboarded
- template_source = sa.Column(sa.String(255), server_default='onboarded')
-
- # (key, value) pair to spin up
- attributes = orm.relationship('NSDAttribute',
- backref='nsd')
-
- __table_args__ = (
- schema.UniqueConstraint(
- "tenant_id",
- "name",
- name="uniq_nsd0tenant_id0name"),
- )
-
-
-class NSDAttribute(model_base.BASE, models_v1.HasId):
- """Represents attributes necessary for creation of ns in (key, value) pair
-
- """
-
- __tablename__ = 'nsd_attribute'
- nsd_id = sa.Column(types.Uuid, sa.ForeignKey('nsd.id'),
- nullable=False)
- key = sa.Column(sa.String(255), nullable=False)
- value = sa.Column(sa.TEXT(65535), nullable=True)
-
-
-class NS(model_base.BASE, models_v1.HasId, models_v1.HasTenant,
- models_v1.Audit):
- """Represents network services that deploys services.
-
- """
-
- __tablename__ = 'ns'
- nsd_id = sa.Column(types.Uuid, sa.ForeignKey('nsd.id'))
- nsd = orm.relationship('NSD')
-
- name = sa.Column(sa.String(255), nullable=False)
- description = sa.Column(sa.Text, nullable=True)
-
- # Dict of VNF details that network service launches
- vnf_ids = sa.Column(sa.TEXT(65535), nullable=True)
-
- # VNFFG ids
- vnffg_ids = sa.Column(sa.TEXT(65535), nullable=True)
-
- # Dict of mgmt ip addresses that network service launches
- mgmt_ip_addresses = sa.Column(sa.TEXT(65535), nullable=True)
-
- status = sa.Column(sa.String(64), nullable=False)
- vim_id = sa.Column(types.Uuid, sa.ForeignKey('vims.id'), nullable=False)
- error_reason = sa.Column(sa.Text, nullable=True)
-
- __table_args__ = (
- schema.UniqueConstraint(
- "tenant_id",
- "name",
- name="uniq_ns0tenant_id0name"),
- )
-
-
-class NSPluginDb(network_service.NSPluginBase, db_base.CommonDbMixin):
-
- def __init__(self):
- super(NSPluginDb, self).__init__()
- self._cos_db_plg = common_services_db_plugin.CommonServicesPluginDb()
-
- def _get_resource(self, context, model, id):
- try:
- return self._get_by_id(context, model, id)
- except orm_exc.NoResultFound:
- if issubclass(model, NSD):
- raise network_service.NSDNotFound(nsd_id=id)
- if issubclass(model, NS):
- raise network_service.NSNotFound(ns_id=id)
- else:
- raise
-
- def _get_ns_db(self, context, ns_id, current_statuses):
- try:
- ns_db = (
- self._model_query(context, NS).
- filter(NS.id == ns_id).
- filter(NS.status.in_(current_statuses)).
- with_for_update().one())
- except orm_exc.NoResultFound:
- raise network_service.NSNotFound(ns_id=ns_id)
- return ns_db
-
- def _update_ns_db(self, ns_db, new_status):
- ns_db.update({'status': new_status})
- return ns_db
-
- def _make_attributes_dict(self, attributes_db):
- return dict((attr.key, attr.value) for attr in attributes_db)
-
- def _make_nsd_dict(self, nsd, fields=None):
- res = {
- 'attributes': self._make_attributes_dict(nsd['attributes']),
- }
- key_list = ('id', 'tenant_id', 'name', 'description',
- 'created_at', 'updated_at', 'vnfds', 'template_source')
- res.update((key, nsd[key]) for key in key_list)
- return self._fields(res, fields)
-
- def _make_dev_attrs_dict(self, dev_attrs_db):
- return dict((arg.key, arg.value) for arg in dev_attrs_db)
-
- def _make_ns_dict(self, ns_db, fields=None):
- LOG.debug('ns_db %s', ns_db)
- res = {}
- key_list = ('id', 'tenant_id', 'nsd_id', 'name', 'description',
- 'vnf_ids', 'vnffg_ids', 'status', 'mgmt_ip_addresses',
- 'error_reason', 'vim_id', 'created_at', 'updated_at')
- res.update((key, ns_db[key]) for key in key_list)
- return self._fields(res, fields)
-
- def create_nsd(self, context, nsd):
- vnfds = nsd['vnfds']
- nsd = nsd['nsd']
- LOG.debug('nsd %s', nsd)
- tenant_id = self._get_tenant_id_for_create(context, nsd)
- template_source = nsd.get('template_source')
-
- try:
- with context.session.begin(subtransactions=True):
- nsd_id = uuidutils.generate_uuid()
- nsd_db = NSD(
- id=nsd_id,
- tenant_id=tenant_id,
- name=nsd.get('name'),
- vnfds=vnfds,
- description=nsd.get('description'),
- deleted_at=datetime.min,
- template_source=template_source)
- context.session.add(nsd_db)
- for (key, value) in nsd.get('attributes', {}).items():
- attribute_db = NSDAttribute(
- id=uuidutils.generate_uuid(),
- nsd_id=nsd_id,
- key=key,
- value=value)
- context.session.add(attribute_db)
- except DBDuplicateEntry as e:
- raise exceptions.DuplicateEntity(
- _type="nsd",
- entry=e.columns)
- LOG.debug('nsd_db %(nsd_db)s %(attributes)s ',
- {'nsd_db': nsd_db,
- 'attributes': nsd_db.attributes})
- nsd_dict = self._make_nsd_dict(nsd_db)
- LOG.debug('nsd_dict %s', nsd_dict)
- self._cos_db_plg.create_event(
- context, res_id=nsd_dict['id'],
- res_type=constants.RES_TYPE_NSD,
- res_state=constants.RES_EVT_ONBOARDED,
- evt_type=constants.RES_EVT_CREATE,
- tstamp=nsd_dict[constants.RES_EVT_CREATED_FLD])
- return nsd_dict
-
- def delete_nsd(self, context, nsd_id, soft_delete=True):
- with context.session.begin(subtransactions=True):
- nss_db = context.session.query(NS).filter_by(
- nsd_id=nsd_id).first()
- if nss_db is not None and nss_db.deleted_at is None:
- raise nfvo.NSDInUse(nsd_id=nsd_id)
-
- nsd_db = self._get_resource(context, NSD, nsd_id)
- if soft_delete:
- nsd_db.update({'deleted_at': timeutils.utcnow()})
- self._cos_db_plg.create_event(
- context, res_id=nsd_db['id'],
- res_type=constants.RES_TYPE_NSD,
- res_state=constants.RES_EVT_NA_STATE,
- evt_type=constants.RES_EVT_DELETE,
- tstamp=nsd_db[constants.RES_EVT_DELETED_FLD])
- else:
- context.session.query(NSDAttribute).filter_by(
- nsd_id=nsd_id).delete()
- context.session.delete(nsd_db)
-
- def get_nsd(self, context, nsd_id, fields=None):
- nsd_db = self._get_resource(context, NSD, nsd_id)
- return self._make_nsd_dict(nsd_db)
-
- def get_nsds(self, context, filters, fields=None):
- if ('template_source' in filters) and \
- (filters['template_source'][0] == 'all'):
- filters.pop('template_source')
- return self._get_collection(context, NSD,
- self._make_nsd_dict,
- filters=filters, fields=fields)
-
- # reference implementation. needs to be overrided by subclass
- def create_ns(self, context, ns):
- LOG.debug('ns %s', ns)
- ns = ns['ns']
- tenant_id = self._get_tenant_id_for_create(context, ns)
- nsd_id = ns['nsd_id']
- vim_id = ns['vim_id']
- name = ns.get('name')
- ns_id = ns['ns_id']
- description = None
- if 'description' in ns:
- description = ns.get('description')
- try:
- with context.session.begin(subtransactions=True):
- if description is None:
- nsd_db = self._get_resource(context, NSD,
- nsd_id)
- description = nsd_db.description
- ns_db = NS(id=ns_id,
- tenant_id=tenant_id,
- name=name,
- description=description,
- vnf_ids=None,
- vnffg_ids=None,
- status=constants.PENDING_CREATE,
- mgmt_ip_addresses=None,
- nsd_id=nsd_id,
- vim_id=vim_id,
- error_reason=None,
- deleted_at=datetime.min)
- context.session.add(ns_db)
- except DBDuplicateEntry as e:
- raise exceptions.DuplicateEntity(
- _type="ns",
- entry=e.columns)
- evt_details = "NS UUID assigned."
- self._cos_db_plg.create_event(
- context, res_id=ns_id,
- res_type=constants.RES_TYPE_NS,
- res_state=constants.PENDING_CREATE,
- evt_type=constants.RES_EVT_CREATE,
- tstamp=ns_db[constants.RES_EVT_CREATED_FLD],
- details=evt_details)
- return self._make_ns_dict(ns_db)
-
- def create_ns_post(self, context, ns_id, vnfd_dict, vnffgd_templates):
- LOG.debug('ns ID %s', ns_id)
- mgmt_ip_addresses = dict()
- vnf_ids = dict()
- vnffg_ids = dict()
-
- if not vnf_ids:
- vnf_ids = None
- if not mgmt_ip_addresses:
- mgmt_ip_addresses = None
- if not vnffg_ids:
- vnffg_ids = None
- status = constants.ACTIVE
-
- with context.session.begin(subtransactions=True):
- ns_db = self._get_resource(context, NS, ns_id)
- ns_db.update({'vnf_ids': vnf_ids})
- ns_db.update({'vnffg_ids': vnffg_ids})
- ns_db.update({'mgmt_ip_addresses': mgmt_ip_addresses})
- ns_db.update({'status': status})
- ns_db.update({'updated_at': timeutils.utcnow()})
- ns_dict = self._make_ns_dict(ns_db)
-
- self._cos_db_plg.create_event(
- context, res_id=ns_dict['id'],
- res_type=constants.RES_TYPE_NS,
- res_state=constants.RES_EVT_NA_STATE,
- evt_type=constants.RES_EVT_UPDATE,
- tstamp=ns_dict[constants.RES_EVT_UPDATED_FLD])
- return ns_dict
-
- # reference implementation. needs to be overrided by subclass
- def delete_ns_pre(self, context, ns_id, force_delete=False):
- with context.session.begin(subtransactions=True):
- ns_db = self._get_ns_db(
- context, ns_id, _ACTIVE_UPDATE_ERROR_DEAD)
- if not force_delete:
- if (ns_db is not None and ns_db.status in
- [constants.PENDING_DELETE,
- constants.PENDING_CREATE,
- constants.PENDING_UPDATE]):
- raise network_service.NSInUse(ns_id=ns_id)
- ns_db = self._update_ns_db(ns_db, constants.PENDING_DELETE)
- deleted_ns_db = self._make_ns_dict(ns_db)
- self._cos_db_plg.create_event(
- context, res_id=ns_id,
- res_type=constants.RES_TYPE_NS,
- res_state=deleted_ns_db['status'],
- evt_type=constants.RES_EVT_DELETE,
- tstamp=timeutils.utcnow(), details="NS delete initiated")
- return deleted_ns_db
-
- def delete_ns_post(self, context, ns_id, soft_delete=True,
- force_delete=False):
- ns = self.get_ns(context, ns_id)
- nsd_id = ns.get('nsd_id')
- with context.session.begin(subtransactions=True):
- if force_delete:
- query = (
- self._model_query(context, NS).
- filter(NS.id == ns_id))
- else:
- query = (
- self._model_query(context, NS).
- filter(NS.id == ns_id).
- filter(NS.status == constants.PENDING_DELETE))
- if soft_delete:
- deleted_time_stamp = timeutils.utcnow()
- query.update({'deleted_at': deleted_time_stamp})
- self._cos_db_plg.create_event(
- context, res_id=ns_id,
- res_type=constants.RES_TYPE_NS,
- res_state=constants.PENDING_DELETE,
- evt_type=constants.RES_EVT_DELETE,
- tstamp=deleted_time_stamp,
- details="ns Delete Complete")
- else:
- query.delete()
- try:
- template_db = self._get_resource(context, NSD, nsd_id)
- if template_db.get('template_source') == 'inline':
- self.delete_nsd(context, nsd_id)
- except orm_exc.NoResultFound:
- pass
-
- def get_ns(self, context, ns_id, fields=None):
- ns_db = self._get_resource(context, NS, ns_id)
- return self._make_ns_dict(ns_db)
-
- def get_nss(self, context, filters=None, fields=None):
- return self._get_collection(context, NS,
- self._make_ns_dict,
- filters=filters, fields=fields)
diff --git a/tacker/db/nfvo/vnffg_db.py b/tacker/db/nfvo/vnffg_db.py
deleted file mode 100644
index 705cac435..000000000
--- a/tacker/db/nfvo/vnffg_db.py
+++ /dev/null
@@ -1,1416 +0,0 @@
-# Copyright 2016 Red Hat Inc
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from oslo_utils import uuidutils
-import random
-import sqlalchemy as sa
-
-from oslo_log import log as logging
-from sqlalchemy import orm
-from sqlalchemy.orm import exc as orm_exc
-from tacker.db import db_base
-from tacker.db import model_base
-from tacker.db import models_v1
-from tacker.db.nfvo.ns_db import NS
-from tacker.db import types
-from tacker.extensions import nfvo
-from tacker.extensions.nfvo_plugins import vnffg
-from tacker import manager
-from tacker.plugins.common import constants
-
-
-LOG = logging.getLogger(__name__)
-_ACTIVE_UPDATE = (constants.ACTIVE, constants.PENDING_UPDATE)
-_ACTIVE_UPDATE_ERROR_DEAD = (
- constants.PENDING_CREATE, constants.ACTIVE, constants.PENDING_UPDATE,
- constants.PENDING_DELETE, constants.ERROR, constants.DEAD)
-_VALID_VNFFG_UPDATE_ATTRIBUTES = ('vnf_mapping',)
-_VALID_SFC_UPDATE_ATTRIBUTES = ('chain', 'symmetrical')
-_VALID_NFP_UPDATE_ATTRIBUTES = ('symmetrical',)
-_VALID_FC_UPDATE_ATTRIBUTES = ()
-MATCH_CRITERIA = (
- 'eth_type', 'eth_src', 'eth_dst', 'vlan_id', 'vlan_pcp', 'mpls_label',
- 'mpls_tc', 'ip_dscp', 'ip_ecn', 'ip_src_prefix', 'ip_dst_prefix',
- 'ip_proto', 'destination_port_range', 'source_port_range',
- 'network_src_port_id', 'network_dst_port_id', 'network_id', 'network_name',
- 'tenant_id', 'icmpv4_type', 'icmpv4_code', 'arp_op', 'arp_spa',
- 'arp_tpa', 'arp_sha', 'arp_tha', 'ipv6_src', 'ipv6_dst', 'ipv6_flabel',
- 'icmpv6_type', 'icmpv6_code', 'ipv6_nd_target', 'ipv6_nd_sll',
- 'ipv6_nd_tll')
-
-MATCH_DB_KEY_LIST = (
- 'eth_type', 'eth_src', 'eth_dst', 'vlan_id', 'vlan_pcp', 'mpls_label',
- 'mpls_tc', 'ip_dscp', 'ip_ecn', 'ip_src_prefix', 'ip_dst_prefix',
- 'ip_proto', 'destination_port_min', 'destination_port_max',
- 'source_port_min', 'source_port_max', 'network_src_port_id',
- 'network_dst_port_id', 'network_id', 'tenant_id', 'icmpv4_type',
- 'icmpv4_code', 'arp_op', 'arp_spa', 'arp_tpa', 'arp_sha', 'arp_tha',
- 'ipv6_src', 'ipv6_dst', 'ipv6_flabel', 'icmpv6_type', 'icmpv6_code',
- 'ipv6_nd_target', 'ipv6_nd_sll', 'ipv6_nd_tll'
-)
-
-CP = 'connection_points'
-
-
-class VnffgTemplate(model_base.BASE, models_v1.HasId, models_v1.HasTenant):
- """Represents template to create a VNF Forwarding Graph."""
-
- # Descriptive name
- name = sa.Column(sa.String(255), nullable=False)
- description = sa.Column(sa.Text)
-
- # Vnffg template
- template = sa.Column(types.Json)
-
- # Vnffgd template source - onboarded
- template_source = sa.Column(sa.String(255), server_default='onboarded')
-
-
-class Vnffg(model_base.BASE, models_v1.HasTenant, models_v1.HasId):
- """VNF Forwarding Graph Data Model"""
-
- name = sa.Column(sa.String(255), nullable=False)
- description = sa.Column(sa.String(255), nullable=True)
-
- # List of associated NFPs
- forwarding_paths = orm.relationship("VnffgNfp", backref="vnffg")
-
- vnffgd_id = sa.Column(types.Uuid, sa.ForeignKey('vnffgtemplates.id'))
- vnffgd = orm.relationship('VnffgTemplate')
-
- status = sa.Column(sa.String(255), nullable=False)
-
- # Mapping of VNFD to VNF instance names
- vnf_mapping = sa.Column(types.Json)
-
- attributes = sa.Column(types.Json)
-
- # Associated Network Service
- ns_id = sa.Column(types.Uuid, sa.ForeignKey('ns.id'), nullable=True)
-
-
-class VnffgNfp(model_base.BASE, models_v1.HasTenant, models_v1.HasId):
- """Network Forwarding Path Data Model"""
-
- name = sa.Column(sa.String(255), nullable=False)
- vnffg_id = sa.Column(types.Uuid, sa.ForeignKey('vnffgs.id'),
- nullable=False)
-
- # List of associated classifiers
- classifiers = orm.relationship('VnffgClassifier', backref='nfp')
- chain = orm.relationship('VnffgChain', backref='nfp',
- uselist=False)
-
- status = sa.Column(sa.String(255), nullable=False)
- path_id = sa.Column(sa.String(255), nullable=True)
-
- # symmetry of forwarding path
- symmetrical = sa.Column(sa.Boolean(), default=False)
-
-
-class VnffgChain(model_base.BASE, models_v1.HasTenant, models_v1.HasId):
- """Service Function Chain Data Model"""
-
- status = sa.Column(sa.String(255), nullable=False)
-
- instance_id = sa.Column(sa.String(255), nullable=True)
-
- # symmetry of forwarding path
- symmetrical = sa.Column(sa.Boolean(), default=False)
-
- # chain
- chain = sa.Column(types.Json)
-
- path_id = sa.Column(sa.String(255), nullable=True)
- nfp_id = sa.Column(types.Uuid, sa.ForeignKey('vnffgnfps.id'))
-
-
-class VnffgClassifier(model_base.BASE, models_v1.HasTenant, models_v1.HasId):
- """VNFFG NFP Classifier Data Model"""
-
- name = sa.Column(sa.String(255), nullable=True)
-
- status = sa.Column(sa.String(255), nullable=False)
-
- instance_id = sa.Column(sa.String(255), nullable=True)
-
- chain_id = sa.Column(types.Uuid, sa.ForeignKey('vnffgchains.id'))
- chain = orm.relationship('VnffgChain', backref='classifier',
- uselist=False, foreign_keys=[chain_id])
- nfp_id = sa.Column(types.Uuid, sa.ForeignKey('vnffgnfps.id'))
- # match criteria
- match = orm.relationship('ACLMatchCriteria')
-
-
-class ACLMatchCriteria(model_base.BASE, models_v1.HasId):
- """Represents ACL match criteria of a classifier."""
-
- vnffgc_id = sa.Column(types.Uuid, sa.ForeignKey('vnffgclassifiers.id'))
- eth_src = sa.Column(sa.String(36), nullable=True)
- eth_dst = sa.Column(sa.String(36), nullable=True)
- eth_type = sa.Column(sa.String(36), nullable=True)
- vlan_id = sa.Column(sa.Integer, nullable=True)
- vlan_pcp = sa.Column(sa.Integer, nullable=True)
- mpls_label = sa.Column(sa.Integer, nullable=True)
- mpls_tc = sa.Column(sa.Integer, nullable=True)
- ip_dscp = sa.Column(sa.Integer, nullable=True)
- ip_ecn = sa.Column(sa.Integer, nullable=True)
- ip_src_prefix = sa.Column(sa.String(36), nullable=True)
- ip_dst_prefix = sa.Column(sa.String(36), nullable=True)
- source_port_min = sa.Column(sa.Integer, nullable=True)
- source_port_max = sa.Column(sa.Integer, nullable=True)
- destination_port_min = sa.Column(sa.Integer, nullable=True)
- destination_port_max = sa.Column(sa.Integer, nullable=True)
- ip_proto = sa.Column(sa.Integer, nullable=True)
- network_id = sa.Column(types.Uuid, nullable=True)
- network_src_port_id = sa.Column(types.Uuid, nullable=True)
- network_dst_port_id = sa.Column(types.Uuid, nullable=True)
- tenant_id = sa.Column(sa.String(64), nullable=True)
- icmpv4_type = sa.Column(sa.Integer, nullable=True)
- icmpv4_code = sa.Column(sa.Integer, nullable=True)
- arp_op = sa.Column(sa.Integer, nullable=True)
- arp_spa = sa.Column(sa.String(36), nullable=True)
- arp_tpa = sa.Column(sa.String(36), nullable=True)
- arp_sha = sa.Column(sa.String(36), nullable=True)
- arp_tha = sa.Column(sa.String(36), nullable=True)
- ipv6_src = sa.Column(sa.String(36), nullable=True)
- ipv6_dst = sa.Column(sa.String(36), nullable=True)
- ipv6_flabel = sa.Column(sa.Integer, nullable=True)
- icmpv6_type = sa.Column(sa.Integer, nullable=True)
- icmpv6_code = sa.Column(sa.Integer, nullable=True)
- ipv6_nd_target = sa.Column(sa.String(36), nullable=True)
- ipv6_nd_sll = sa.Column(sa.String(36), nullable=True)
- ipv6_nd_tll = sa.Column(sa.String(36), nullable=True)
-
-
-class VnffgPluginDbMixin(vnffg.VNFFGPluginBase, db_base.CommonDbMixin):
-
- def __init__(self):
- super(VnffgPluginDbMixin, self).__init__()
-
- def create_vnffg(self, context, vnffg):
- vnffg_dict = self._create_vnffg_pre(context, vnffg)
- sfc_instance = uuidutils.generate_uuid()
- fc_instance = uuidutils.generate_uuid()
- self._create_vnffg_post(context, sfc_instance,
- fc_instance, vnffg_dict)
- self._create_vnffg_status(context, vnffg_dict)
- return vnffg_dict
-
- def get_vnffg(self, context, vnffg_id, fields=None):
- vnffg_db = self._get_resource(context, Vnffg, vnffg_id)
- return self._make_vnffg_dict(vnffg_db, fields)
-
- def get_vnffgs(self, context, filters=None, fields=None):
- return self._get_collection(context, Vnffg, self._make_vnffg_dict,
- filters=filters, fields=fields)
-
- def update_vnffg(self, context, vnffg_id, vnffg):
- vnffg_dict = self._update_vnffg_pre(context, vnffg_id)
- self._update_vnffg_post(context, vnffg_id, constants.ACTIVE, vnffg)
- return vnffg_dict
-
- def delete_vnffg(self, context, vnffg_id):
- self._delete_vnffg_pre(context, vnffg_id)
- self._delete_vnffg_post(context, vnffg_id, False)
-
- def create_vnffgd(self, context, vnffgd):
- template = vnffgd['vnffgd']
- LOG.debug('template %s', template)
- tenant_id = self._get_tenant_id_for_create(context, template)
- template_source = template.get('template_source')
-
- with context.session.begin(subtransactions=True):
- template_id = uuidutils.generate_uuid()
- template_db = VnffgTemplate(
- id=template_id,
- tenant_id=tenant_id,
- name=template.get('name'),
- description=template.get('description'),
- template=template.get('template'),
- template_source=template_source)
- context.session.add(template_db)
-
- LOG.debug('template_db %(template_db)s',
- {'template_db': template_db})
- return self._make_template_dict(template_db)
-
- def get_vnffgd(self, context, vnffgd_id, fields=None):
- template_db = self._get_resource(context, VnffgTemplate,
- vnffgd_id)
- return self._make_template_dict(template_db, fields)
-
- def get_vnffgds(self, context, filters=None, fields=None):
- if ('template_source' in filters) and \
- (filters['template_source'][0] == 'all'):
- filters.pop('template_source')
- return self._get_collection(context, VnffgTemplate,
- self._make_template_dict,
- filters=filters, fields=fields)
-
- def delete_vnffgd(self, context, vnffgd_id):
- with context.session.begin(subtransactions=True):
- vnffg_db = context.session.query(Vnffg).filter_by(
- vnffgd_id=vnffgd_id).first()
- if vnffg_db is not None:
- raise nfvo.VnffgdInUse(vnffgd_id=vnffgd_id)
-
- template_db = self._get_resource(context, VnffgTemplate,
- vnffgd_id)
- context.session.delete(template_db)
-
- def get_classifier(self, context, classifier_id, fields=None):
- classifier_db = self._get_resource(context, VnffgClassifier,
- classifier_id)
- return self._make_classifier_dict(classifier_db, fields)
-
- def get_classifiers(self, context, filters=None, fields=None):
- return self._get_collection(context, VnffgClassifier,
- self._make_classifier_dict,
- filters=filters, fields=fields)
-
- def create_classifiers_map(self, classifier_ids, instance_ids):
- return {classifier_id: instance_ids[i]
- for i, classifier_id in enumerate(classifier_ids)}
-
- def get_nfp(self, context, nfp_id, fields=None):
- nfp_db = self._get_resource(context, VnffgNfp, nfp_id)
- return self._make_nfp_dict(nfp_db, fields)
-
- def get_nfps(self, context, filters=None, fields=None):
- return self._get_collection(context, VnffgNfp,
- self._make_nfp_dict,
- filters=filters, fields=fields)
-
- def get_sfc(self, context, sfc_id, fields=None):
- chain_db = self._get_resource(context, VnffgChain, sfc_id)
- return self._make_chain_dict(chain_db, fields)
-
- def get_sfcs(self, context, filters=None, fields=None):
- return self._get_collection(context, VnffgChain,
- self._make_chain_dict,
- filters=filters, fields=fields)
-
- def _update_template_params(self, original, paramvalues, param_matched):
- if 'get_input' not in str(original):
- return
- if isinstance(original, dict):
- for key_, value in original.items():
- if isinstance(value, dict) and 'get_input' in value:
- if value['get_input'] in paramvalues:
- original[key_] = paramvalues[value['get_input']]
- param_matched.setdefault(value['get_input'], 0)
- param_matched[value['get_input']] += 1
- else:
- raise nfvo.VnffgTemplateParamParsingException(
- get_input=value['get_input'])
- else:
- self._update_template_params(value,
- paramvalues, param_matched)
- elif isinstance(original, list):
- for element in original:
- self._update_template_params(element,
- paramvalues, param_matched)
-
- def _process_parameterized_template(self, dev_attrs, vnffgd_template):
- param_vattrs_dict = dev_attrs.pop('param_values', None)
- param_matched = {}
- if isinstance(param_vattrs_dict, dict):
- self._update_template_params(vnffgd_template,
- param_vattrs_dict, param_matched)
- else:
- raise nfvo.VnffgParamValueFormatError(
- param_value=param_vattrs_dict)
- for param_key in param_vattrs_dict:
- if param_matched.get(param_key) is None:
- LOG.warning("Param input %s not used.", param_key)
-
- def _parametrize_topology_template(self, vnffg, template_db):
- if vnffg.get('attributes') and \
- vnffg['attributes'].get('param_values'):
- vnffg_param = vnffg['attributes']
- vnffgd_topology_template = \
- template_db.template['vnffgd']['topology_template']
- self._process_parameterized_template(vnffg_param,
- vnffgd_topology_template)
- template_db.template['vnffgd']['topology_template'] = \
- vnffgd_topology_template
-
- # called internally, not by REST API
- def _create_vnffg_pre(self, context, vnffg):
- vnffg = vnffg['vnffg']
- LOG.debug('vnffg %s', vnffg)
- tenant_id = self._get_tenant_id_for_create(context, vnffg)
- name = vnffg.get('name')
- vnffg_id = vnffg.get('id') or uuidutils.generate_uuid()
- template_id = vnffg['vnffgd_id']
- ns_id = vnffg.get('ns_id', None)
- symmetrical_in_temp = self._get_symmetrical_template(context, vnffg)
- symmetrical = symmetrical_in_temp if symmetrical_in_temp is not None \
- else vnffg.get('symmetrical')
-
- with context.session.begin(subtransactions=True):
- template_db = self._get_resource(context, VnffgTemplate,
- template_id)
- LOG.debug('vnffg template %s', template_db)
-
- self._parametrize_topology_template(vnffg, template_db)
-
- vnf_members = self._get_vnffg_property(template_db.template,
- 'constituent_vnfs')
- LOG.debug('Constituent VNFs: %s', vnf_members)
- vnf_mapping = self._get_vnf_mapping(context, vnffg.get(
- 'vnf_mapping'), vnf_members)
- LOG.debug('VNF Mapping: %s', vnf_mapping)
- # create NFP dict
- nfp_dict = self._create_nfp_pre(template_db)
- LOG.debug('NFP: %s', nfp_dict)
- path_id = nfp_dict['path_id']
- try:
- if path_id:
- vnffgNfp_db = (self._model_query(context, VnffgNfp).
- filter(VnffgNfp.path_id == path_id).one())
- raise nfvo.NfpDuplicatePathID(path_id=path_id,
- nfp_name=vnffgNfp_db.name,
- vnffg_name=name)
- except orm_exc.NoResultFound:
- pass
-
- vnffg_db = Vnffg(id=vnffg_id,
- tenant_id=tenant_id,
- name=name,
- description=template_db.description,
- vnf_mapping=vnf_mapping,
- vnffgd_id=template_id,
- ns_id=ns_id,
- attributes=template_db.get('template'),
- status=constants.PENDING_CREATE)
- context.session.add(vnffg_db)
-
- nfp_id = uuidutils.generate_uuid()
- sfc_id = uuidutils.generate_uuid()
-
- classifiers = self._policy_to_acl_criteria(context, template_db,
- nfp_dict['name'],
- vnf_mapping)
- LOG.debug('classifiers %s', classifiers)
-
- classifier_ids = [uuidutils.generate_uuid() for i in classifiers]
-
- nfp_db = VnffgNfp(id=nfp_id, vnffg_id=vnffg_id,
- tenant_id=tenant_id,
- name=nfp_dict['name'],
- status=constants.PENDING_CREATE,
- path_id=path_id,
- symmetrical=symmetrical)
- context.session.add(nfp_db)
-
- chain = self._create_port_chain(context, vnf_mapping, template_db,
- nfp_dict['name'])
- LOG.debug('chain: %s', chain)
- sfc_db = VnffgChain(id=sfc_id,
- tenant_id=tenant_id,
- status=constants.PENDING_CREATE,
- symmetrical=symmetrical,
- chain=chain,
- nfp_id=nfp_id,
- path_id=path_id)
-
- context.session.add(sfc_db)
-
- for i, classifier_id in enumerate(classifier_ids):
-
- sfcc_db = VnffgClassifier(id=classifier_id,
- name=classifiers[i]['name'],
- tenant_id=tenant_id,
- status=constants.PENDING_CREATE,
- nfp_id=nfp_id,
- chain_id=sfc_id)
- context.session.add(sfcc_db)
-
- match_db_table = ACLMatchCriteria(
- id=uuidutils.generate_uuid(),
- vnffgc_id=classifier_id,
- tenant_id=tenant_id,
- **classifiers[i]['match'])
-
- context.session.add(match_db_table)
-
- return self._make_vnffg_dict(vnffg_db)
-
- @staticmethod
- def _create_nfp_pre(template_db):
- template = template_db.template['vnffgd']['topology_template']
- nfp_dict = dict()
- vnffg_name = list(template['groups'].keys())[0]
- # we assume only one NFP for initial implementation
- nfp_dict['name'] = template['groups'][vnffg_name]['members'][0]
- nfp_dict['path_id'] = template['node_templates'][nfp_dict['name']][
- 'properties'].get('id', None)
- # 'path_id' will be updated when creating port chain is done
- return nfp_dict
-
- def _create_port_chain(self, context, vnf_mapping, template_db, nfp_name):
- """Creates a list of physical port ids to represent an ordered chain
-
- :param context: SQL session context
- :param vnf_mapping: dict of VNFD to VNF instance mappings
- :param template_db: VNFFG Descriptor
- :param nfp_name: name of the forwarding path with chain requirements
- :return: list of port chain including vnf name and list of CPs
- """
- chain_list = []
- prev_forwarder = None
- vnfm_plugin = manager.TackerManager.get_service_plugins()['VNFM']
- # Build the list of logical chain representation
- logical_chain = self._get_nfp_attribute(template_db.template,
- nfp_name, 'path')
- # Build physical port chain
- for element in logical_chain:
- if element['forwarder'] not in vnf_mapping:
- raise nfvo.NfpForwarderNotFoundException(vnfd=element[
- 'forwarder'],
- mapping=vnf_mapping)
- # TODO(trozet): validate CP in VNFD has forwarding capability
- # Find VNF resources
- vnf = vnfm_plugin.get_vnf_resources(context,
- vnf_mapping[element[
- 'forwarder']]
- )
- vnf_info = vnfm_plugin.get_vnf(context,
- vnf_mapping[element['forwarder']])
- vnf_cp = None
- for resource in vnf:
- if resource['name'] == element['capability']:
- vnf_cp = resource['id']
- break
- if vnf_cp is None:
- raise nfvo.VnffgCpNotFoundException(cp_id=element[
- 'capability'], vnf_id=vnf_mapping[element['forwarder']])
- # Check if this is a new VNF entry in the chain
- if element['forwarder'] != prev_forwarder:
- chain_list.append(
- {'name': vnf_info['name'],
- CP: [vnf_cp],
- 'sfc_encap': element.get('sfc_encap', True)})
- prev_forwarder = element['forwarder']
- # Must be an egress CP
- else:
- if len(chain_list[-1][CP]) > 1:
- raise nfvo.NfpRequirementsException(vnfd=element[
- 'forwarder'])
- else:
- chain_list[-1][CP].append(vnf_cp)
- return chain_list
-
- @staticmethod
- def _get_vnffg_property(template, vnffg_property):
- template = template['vnffgd']['topology_template']
- vnffg_name = list(template['groups'].keys())[0]
- try:
- return template['groups'][vnffg_name]['properties'][vnffg_property]
- except KeyError:
- raise nfvo.VnffgPropertyNotFoundException(
- vnffg_property=vnffg_property)
-
- @staticmethod
- def _get_nfp_attribute(template, nfp, attribute):
- """Finds any attribute of an NFP described in a template
-
- :param template: VNFFGD template
- :param nfp: name of NFP
- :param attribute: attribute to find
- :return: value of attribute from template
- """
- template = template['vnffgd']['topology_template']
- try:
- attr_val = VnffgPluginDbMixin._search_value(
- template['node_templates'][nfp], attribute)
- if attr_val is None:
- LOG.debug('NFP %(nfp)s, attr %(attr)s',
- {'nfp': template['node_templates'][nfp],
- 'attr': attribute})
- raise nfvo.NfpAttributeNotFoundException(attribute=attribute)
- else:
- return attr_val
- except KeyError:
- raise nfvo.NfpAttributeNotFoundException(attribute=attribute)
-
- @staticmethod
- def _search_value(search_dict, search_key):
- for k, v in search_dict.items():
- if k == search_key:
- return v
- elif isinstance(v, dict):
- val = VnffgPluginDbMixin._search_value(v, search_key)
- if val is not None:
- return val
-
- def _validate_vnfd_in_vnf_mapping(self, vnf_mapping, vnf_members):
- """Validate whether or not the vnf_mapping is valid for update.
-
- In the update_vnnfg procedure we need to know whether or not the
- the vnf_mapping is valid so we can use it to update the chain.
- """
- if not vnf_mapping:
- raise nfvo.VnfMappingNotFoundException()
- else:
- for vnfd, vnf in vnf_mapping.items():
- if vnfd not in vnf_members:
- raise nfvo.VnfMappingNotValidException(vnfd=vnfd)
-
- def _combine_current_and_new_vnf_mapping(self, context,
- new_mapping, old_mapping):
- """Create an updated vnf mapping.
-
- In this function we create an updated vnf mapping which is
- a mix of the vnf_mapping which already exists in database
- and the new mapping that the user passes.
- """
- updated_vnf_mapping = old_mapping.copy()
- updated_vnf_mapping.update(new_mapping)
- return updated_vnf_mapping
-
- def _get_vnf_mapping(self, context, vnf_mapping, vnf_members):
- """Creates/validates a mapping of VNFD names to VNF IDs for NFP.
-
- :param context: SQL session context
- :param vnf_mapping: dict of requested VNFD:VNF_ID mappings
- :param vnf_members: list of constituent VNFs from a VNFFG
- :return: dict of VNFD:VNF_ID mappings
- """
- vnfm_plugin = manager.TackerManager.get_service_plugins()['VNFM']
- new_mapping = dict()
-
- for vnfd in vnf_members:
- # there should only be one ID returned for a unique name
- try:
- vnfd_id = vnfm_plugin.get_vnfds(context, {'name': [vnfd]},
- fields=['id']).pop()['id']
- except Exception:
- raise nfvo.VnffgdVnfdNotFoundException(vnfd_name=vnfd)
- if vnfd_id is None:
- raise nfvo.VnffgdVnfdNotFoundException(vnfd_name=vnfd)
- else:
- # if no VNF mapping, we need to abstractly look for instances
- # that match VNFD
- if vnf_mapping is None or vnfd not in vnf_mapping:
- # find suitable VNFs from vnfd_id
- LOG.debug('Searching VNFS with id %s', vnfd_id)
- vnf_list = vnfm_plugin.get_vnfs(context,
- {'vnfd_id': [vnfd_id]},
- fields=['id'])
- if len(vnf_list) == 0:
- raise nfvo.VnffgInvalidMappingException(vnfd_name=vnfd)
- else:
- LOG.debug('Matching VNFs found %s', vnf_list)
- vnf_list = [vnf['id'] for vnf in vnf_list]
- if len(vnf_list) > 1:
- new_mapping[vnfd] = random.choice(vnf_list)
- else:
- new_mapping[vnfd] = vnf_list[0]
- # if VNF mapping, validate instances exist and match the VNFD
- else:
- vnf_vnfd = vnfm_plugin.get_vnf(context, vnf_mapping[vnfd],
- fields=['vnfd_id'])
- if vnf_vnfd is not None:
- vnf_vnfd_id = vnf_vnfd['vnfd_id']
- else:
- raise nfvo.VnffgInvalidMappingException(vnfd_name=vnfd)
- if vnfd_id != vnf_vnfd_id:
- raise nfvo.VnffgInvalidMappingException(vnfd_name=vnfd)
- else:
- new_mapping[vnfd] = vnf_mapping.pop(vnfd)
- self._validate_vim(context, new_mapping.values())
- return new_mapping
-
- def _validate_vim(self, context, vnfs):
- """Validates all VNFs are in the same VIM
-
- :param context: SQL Session Context
- :param vnfs: List of VNF instance IDs
- :return: None
- """
- LOG.debug('validating vim for vnfs %s', vnfs)
- vnfm_plugin = manager.TackerManager.get_service_plugins()['VNFM']
- vim_id = None
- for vnf in vnfs:
- vnf_dict = vnfm_plugin.get_vnf(context, vnf)
- if vim_id is None:
- vim_id = vnf_dict['vim_id']
- elif vnf_dict['vim_id'] != vim_id:
- raise nfvo.VnffgVimMappingException(vnf_id=vnf, vim_id=vim_id)
-
- def _validate_criteria(self, criteria):
- """Validate whether or not the classifiers are unique.
-
- We define a classifier as unique when at least one
- key-value pair is different from another classifier.
- """
- if not criteria:
- raise nfvo.NfpPolicyCriteriaIndexError()
- elif len(criteria) == 1:
- pass
- else:
- for index, dict_one in enumerate(criteria):
- if index != (len(criteria) - 1):
- for dict_two in criteria[(index + 1):]:
- if dict_one == dict_two:
- raise nfvo. \
- NfpDuplicatePolicyCriteria(first_dict=dict_one,
- sec_dict=dict_two)
-
- def _policy_to_acl_criteria(self, context, template_db, nfp_name,
- vnf_mapping):
- template = template_db.template['vnffgd']['topology_template']
- nfp = template['node_templates'][nfp_name]
-
- if 'policy' in nfp['properties']:
- policy = nfp['properties']['policy']
- if 'type' in policy:
- if policy['type'] != 'ACL':
- raise nfvo.NfpPolicyTypeError(type=policy['type'])
-
- if 'criteria' not in policy:
- raise nfvo.NfpPolicyCriteriaError(
- error="Missing criteria in policy")
- validation_list = []
- for item in policy['criteria']:
- if item.get('name') is None:
- LOG.warning('The unnamed classifier approach'
- ' will be deprecated in subsequent'
- ' releases')
- validation_list.append(item)
- else:
- validation_list.append(item['classifier'])
-
- self._validate_criteria(validation_list)
-
- classifiers = []
- for criteria in policy['criteria']:
- match = dict()
- if criteria.get('name') is None:
- criteria_dict = criteria.copy()
- else:
- criteria_dict = criteria['classifier'].copy()
- for key, val in criteria_dict.items():
- if key in MATCH_CRITERIA:
- match.update(self._convert_criteria(context, key, val,
- vnf_mapping))
- else:
- raise nfvo.NfpPolicyCriteriaError(error="Unsupported "
- "criteria: "
- "{}".format(key))
- classifiers.append({'name': criteria.get('name'),
- 'match': match})
- return classifiers
- else:
- return []
-
- def _convert_criteria(self, context, criteria, value, vnf_mapping):
- """Method is used to convert criteria to proper db value from template
-
- :param context: SQL session context
- :param criteria: input criteria name
- :param value: input value
- :param vnf_mapping: mapping of VNFD to VNF instances
- :return: converted dictionary
- """
-
- if criteria.endswith('_range'):
- prefix = criteria[:-6]
- criteria_min = prefix + "_min"
- criteria_max = prefix + "_max"
- try:
- min_val, max_val = value.split('-')
- except ValueError:
- raise nfvo.NfpPolicyCriteriaError(error="Range missing or "
- "incorrect for "
- "{}".format(criteria))
- return {criteria_min: int(min_val), criteria_max: int(max_val)}
-
- elif criteria.endswith('_name'):
- prefix = criteria[:-5]
- vnf_id = list(vnf_mapping.values())[0]
- new_value = self._vim_resource_name_to_id(context, prefix, value,
- vnf_id)
- new_name = prefix + "_id"
- return {new_name: new_value}
-
- else:
- return {criteria: value}
-
- def _vim_resource_name_to_id(self, context, resource, name, vnf_id):
- """Converts a VIM resource name to its ID
-
- :param context: SQL session context
- :param resource: resource type to find (network, subnet, etc)
- :param name: name of the resource to find its ID
- :param vnf_id: A VNF instance ID that is part of the chain to which
- the classifier will apply to
- :return: ID of the resource name
- """
- # this should be overridden with driver call to find ID given name
- # for resource
- return uuidutils.generate_uuid()
-
- # called internally, not by REST API
- # instance_id = None means error on creation
- def _create_vnffg_post(self, context, sfc_instance_id, path_id,
- classifiers_map, vnffg_dict):
- LOG.debug('SFC created instance is %s', sfc_instance_id)
- LOG.debug('Flow Classifiers created instances are %s',
- [classifiers_map[item] for item in classifiers_map])
- nfp_dict = self.get_nfp(context, vnffg_dict['forwarding_paths'])
- sfc_id = nfp_dict['chain_id']
- with context.session.begin(subtransactions=True):
- nfp_query = (self._model_query(context, VnffgNfp).
- filter(VnffgNfp.id == nfp_dict['id']).
- filter(VnffgNfp.status == constants.PENDING_CREATE).
- one())
- nfp_query.update({'path_id': path_id})
- query = (self._model_query(context, VnffgChain).
- filter(VnffgChain.id == sfc_id).
- filter(VnffgChain.status == constants.PENDING_CREATE).
- one())
- query.update({'instance_id': sfc_instance_id, 'path_id': path_id})
- if sfc_instance_id is None:
- query.update({'status': constants.ERROR})
- else:
- query.update({'status': constants.ACTIVE})
- for classifier_id, fc_instance_id in classifiers_map.items():
- query = (self._model_query(context, VnffgClassifier).
- filter(VnffgClassifier.id == classifier_id).
- filter(VnffgClassifier.status ==
- constants.PENDING_CREATE).
- one())
- query.update({'instance_id': fc_instance_id})
-
- if fc_instance_id is None:
- query.update({'status': constants.ERROR})
- else:
- query.update({'status': constants.ACTIVE})
-
- def _create_vnffg_status(self, context, vnffg):
- nfp = self.get_nfp(context, vnffg['forwarding_paths'])
- chain = self.get_sfc(context, nfp['chain_id'])
-
- if chain['status'] == constants.ERROR:
- self._update_all_status(context, vnffg['id'], nfp['id'],
- constants.ERROR)
-
- elif chain['status'] == constants.ACTIVE:
- classifiers_active_state = True
- for classifier in [self.get_classifier(context, classifier_id)
- for classifier_id in nfp['classifier_ids']]:
-
- if classifier['status'] == constants.ACTIVE:
- continue
- elif classifier['status'] == constants.ERROR:
- classifiers_active_state = False
- break
-
- if classifiers_active_state:
- self._update_all_status(context, vnffg['id'], nfp['id'],
- constants.ACTIVE)
- else:
- self._update_all_status(context, vnffg['id'], nfp['id'],
- constants.ERROR)
-
- def _update_all_status(self, context, vnffg_id, nfp_id, status):
- nfp_dict = self.get_nfp(context, nfp_id)
- sfc_id = nfp_dict['chain_id']
-
- with context.session.begin(subtransactions=True):
- for classifier_id in nfp_dict['classifier_ids']:
- query_cls = (self._model_query(context, VnffgClassifier).
- filter(VnffgClassifier.id == classifier_id))
- query_cls.update({'status': status})
- query_chain = (self._model_query(context, VnffgChain).
- filter(VnffgChain.id == sfc_id))
- query_chain.update({'status': status})
- query = (self._model_query(context, Vnffg).
- filter(Vnffg.id == vnffg_id))
- query.update({'status': status})
- nfp_query = (self._model_query(context, VnffgNfp).
- filter(VnffgNfp.id == nfp_id))
- nfp_query.update({'status': status})
-
- def _make_vnffg_dict(self, vnffg_db, fields=None):
- LOG.debug('vnffg_db %s', vnffg_db)
- LOG.debug('vnffg_db nfp %s', vnffg_db.forwarding_paths)
- res = {
- 'forwarding_paths': vnffg_db.forwarding_paths[0]['id']
- }
- key_list = ('id', 'tenant_id', 'name', 'description', 'ns_id',
- 'vnf_mapping', 'status', 'vnffgd_id', 'attributes')
- res.update((key, vnffg_db[key]) for key in key_list)
- return self._fields(res, fields)
-
- def _update_vnffg_status_pre(self, context, vnffg_id):
- vnffg = self.get_vnffg(context, vnffg_id)
- nfp = self.get_nfp(context, vnffg['forwarding_paths'])
- sfc = self.get_sfc(context, nfp['chain_id'])
- classifiers = [self.get_classifier(context, classifier_id) for
- classifier_id in nfp['classifier_ids']]
- with context.session.begin(subtransactions=True):
- vnffg_db = self._get_vnffg_db(context, vnffg['id'], _ACTIVE_UPDATE,
- constants.PENDING_UPDATE)
- self._get_nfp_db(context, nfp['id'], _ACTIVE_UPDATE,
- constants.PENDING_UPDATE)
- self._get_sfc_db(context, sfc['id'], _ACTIVE_UPDATE,
- constants.PENDING_UPDATE)
- for classifier in classifiers:
- self._get_classifier_db(context, classifier['id'],
- _ACTIVE_UPDATE,
- constants.PENDING_UPDATE)
- return self._make_vnffg_dict(vnffg_db)
-
- def _update_vnffg_pre(self, context, vnffg, vnffg_id, vnffg_old):
- vnffg = vnffg['vnffg']
- del vnffg['symmetrical']
- if vnffg.get('vnffgd_template') is None:
- try:
- return self._update_vnffg_without_template(context, vnffg_old,
- vnffg, vnffg_id)
- except (nfvo.VnfMappingNotFoundException,
- nfvo.VnfMappingNotValidException) as e:
- raise e
-
- with context.session.begin(subtransactions=True):
- # Templates
- template_db_new = self._get_resource(context, VnffgTemplate,
- vnffg['vnffgd_id'])
-
- LOG.debug('vnffg new template %s', template_db_new)
-
- template_db_old = self._get_resource(context, VnffgTemplate,
- vnffg_old['vnffgd_id'])
-
- LOG.debug('vnffg old template %s', template_db_old)
-
- self._parametrize_topology_template(vnffg, template_db_new)
-
- # VNF-Members
- vnf_members_new = self._get_vnffg_property(
- template_db_new.template, 'constituent_vnfs')
-
- LOG.debug('New Constituent VNFs: %s', vnf_members_new)
-
- vnf_members_old = self._get_vnffg_property(
- template_db_old.template, 'constituent_vnfs')
-
- LOG.debug('Old Constituent VNFs: %s', vnf_members_old)
-
- if set(vnf_members_new) == set(vnf_members_old):
- if vnffg.get('vnf_mapping') is None:
- final_vnf_mapping = vnffg_old['vnf_mapping']
- else:
- try:
- self._validate_vnfd_in_vnf_mapping(
- vnffg['vnf_mapping'], vnf_members_new)
- except (nfvo.VnfMappingNotFoundException,
- nfvo.VnfMappingNotValidException) as e:
- raise e
- updated_vnf_mapping = \
- self._combine_current_and_new_vnf_mapping(
- context, vnffg['vnf_mapping'],
- vnffg_old['vnf_mapping'])
-
- final_vnf_mapping = self._get_vnf_mapping(
- context, updated_vnf_mapping, vnf_members_new)
- else:
- final_vnf_mapping = self._get_vnf_mapping(context, vnffg.get(
- 'vnf_mapping'),
- vnf_members_new)
-
- LOG.debug('VNF Mapping: %s', final_vnf_mapping)
- # Update the vnffg with the new template.
- query_vnffg = (self._model_query(context, Vnffg).
- filter(Vnffg.id == vnffg_old['id']).
- filter(Vnffg.status == constants.PENDING_UPDATE))
- query_vnffg.update({'vnf_mapping': final_vnf_mapping,
- 'vnffgd_id': vnffg['vnffgd_id'],
- 'description': template_db_new.description,
- 'attributes': template_db_new.get('template')})
-
- # Delete the old_vnffgd_template if template_source is 'inline'
- if template_db_old.template_source == 'inline':
- self.delete_vnffgd(context, vnffg_old['vnffgd_id'])
-
- # update NFP
- nfp_dict_old = self.get_nfp(context, vnffg_old['forwarding_paths'])
-
- LOG.debug('Current NFP: %s', nfp_dict_old)
-
- nfp_dict_new = self._update_nfp_pre(template_db_new, nfp_dict_old)
-
- LOG.debug('New NFP: %s', nfp_dict_new)
-
- query_nfp = (self._model_query(context, VnffgNfp).
- filter(VnffgNfp.id == nfp_dict_old['id']).
- filter(VnffgNfp.status == constants.PENDING_UPDATE))
- query_nfp.update(nfp_dict_new)
-
- # update chain
- chain_old = self.get_sfc(context, nfp_dict_old['chain_id'])
- LOG.debug('Current chain: %s', chain_old)
- chain_new = self._create_port_chain(context, final_vnf_mapping,
- template_db_new,
- nfp_dict_new['name'])
- LOG.debug('New chain: %s', chain_new)
- # to check if it is updated
- update_chain = self._set_updated_chain(chain_old['chain'],
- chain_new)
-
- if update_chain:
- query_chain = (self._model_query(context, VnffgChain).
- filter(VnffgChain.id == chain_old['id']).
- filter(VnffgChain.status == constants.
- PENDING_UPDATE))
- query_chain.update({'chain': chain_new,
- 'path_id': nfp_dict_new['path_id']})
-
- # update classifiers
- classifiers_old = []
- for classifier_id in nfp_dict_old['classifier_ids']:
- classifiers_old.append(self.
- get_classifier(context,
- classifier_id,
- fields=['name', 'match', 'id']))
- classifiers_new = self._policy_to_acl_criteria(context,
- template_db_new,
- nfp_dict_new['name'],
- final_vnf_mapping)
- try:
- classifiers_update, classifiers_delete = \
- self._find_classifiers_to_update(classifiers_old,
- classifiers_new)
- except nfvo.UpdateVnffgException as e:
- raise e
- for clsfr in classifiers_update:
- if clsfr.get('id'):
- for item in MATCH_DB_KEY_LIST:
- if clsfr['match'].get(item) is None:
- clsfr['match'][item] = None
- query_match = (self._model_query(context,
- ACLMatchCriteria).
- filter(ACLMatchCriteria.vnffgc_id == clsfr['id']))
- query_match.update(clsfr['match'])
- else:
- classifier_id = uuidutils.generate_uuid()
- sfcc_db = VnffgClassifier(id=classifier_id,
- name=clsfr['name'],
- tenant_id=vnffg_old['tenant_id'],
- status=constants.PENDING_CREATE,
- nfp_id=nfp_dict_old['id'],
- chain_id=chain_old['id'])
- context.session.add(sfcc_db)
-
- match_db = ACLMatchCriteria(
- id=uuidutils.generate_uuid(),
- vnffgc_id=classifier_id,
- **clsfr['match'])
- context.session.add(match_db)
- for clsfr in classifiers_delete:
- query_clsfr = (self._model_query(context, VnffgClassifier).
- filter(VnffgClassifier.id == clsfr['id']).
- filter(VnffgClassifier.status == constants.
- PENDING_UPDATE))
- query_clsfr.update({'status': constants.PENDING_DELETE})
-
- return self.get_vnffg(context, vnffg_id)
-
- def _find_classifiers_to_update(self, current_classifiers,
- new_classifiers):
- update_classifiers = []
- delete_classifiers = []
- names_list = []
- for new_clsfr in new_classifiers:
- found_name = False
- if new_clsfr['name'] is None:
- LOG.error('VNFFG update requires named classifiers')
- raise nfvo.UpdateVnffgException(
- message="Failed to update VNFFG")
- for cur_clsfr in current_classifiers:
- if cur_clsfr['name'] == new_clsfr['name']:
- new_clsfr['id'] = cur_clsfr['id']
- names_list.append(new_clsfr['name'])
- update_classifiers.append(new_clsfr)
- found_name = True
- break
- if not found_name:
- names_list.append(new_clsfr['name'])
- update_classifiers.append(new_clsfr)
- for cur_clsfr in current_classifiers:
- if cur_clsfr['name'] not in names_list:
- delete_classifiers.append(cur_clsfr)
- return update_classifiers, delete_classifiers
-
- def _set_updated_chain(self, current_chain, new_chain):
- if len(current_chain) != len(new_chain):
- return True
- else:
- for i, item in enumerate(current_chain):
- cp_vnf = new_chain[i]
- if (cp_vnf['name'] == item['name'] and
- cp_vnf['connection_points'] == item[
- 'connection_points']):
- continue
- else:
- return True
- return False
-
- def _update_vnffg_without_template(self, context, old_vnffg, new_vnffg,
- vnffg_id):
-
- template_db = self._get_resource(context, VnffgTemplate,
- old_vnffg['vnffgd_id'])
- vnfd_members = self._get_vnffg_property(template_db.template,
- 'constituent_vnfs')
- nfp = self.get_nfp(context, old_vnffg['forwarding_paths'])
-
- chain_dict = self.get_sfc(context, nfp['chain_id'])
- try:
- self._validate_vnfd_in_vnf_mapping(new_vnffg.get('vnf_mapping'),
- vnfd_members)
- except (nfvo.VnfMappingNotFoundException,
- nfvo.VnfMappingNotValidException) as e:
- raise e
-
- combined_vnf_mapping = self._combine_current_and_new_vnf_mapping(
- context, new_vnffg['vnf_mapping'], old_vnffg['vnf_mapping'])
-
- new_vnffg['vnf_mapping'] = self._get_vnf_mapping(context,
- combined_vnf_mapping,
- vnfd_members)
- new_chain = self._create_port_chain(context,
- new_vnffg['vnf_mapping'],
- template_db,
- nfp['name'])
-
- LOG.debug('chain update: %s', new_chain)
-
- query_vnffg = (self._model_query(context, Vnffg).
- filter(Vnffg.id == old_vnffg['id']).
- filter(Vnffg.status == constants.PENDING_UPDATE))
- query_vnffg.update({'vnf_mapping': new_vnffg['vnf_mapping']})
-
- query_chain = (self._model_query(context, VnffgChain).
- filter(VnffgChain.id == chain_dict['id']).
- filter(VnffgChain.status == constants.
- PENDING_UPDATE))
- query_chain.update({'chain': new_chain})
-
- return self.get_vnffg(context, vnffg_id)
-
- def _update_nfp_pre(self, template_db, nfp_dict_old):
- template_new = template_db.template['vnffgd']['topology_template']
- nfp_dict_new = dict()
- vnffg_name = list(template_new['groups'].keys())[0]
- nfp_dict_new['name'] = template_new['groups'][vnffg_name]['members'][0]
- nfp_dict_new['path_id'] = template_new['node_templates'][nfp_dict_new[
- 'name']]['properties'].get('id')
-
- if not nfp_dict_new['path_id']:
- nfp_dict_new['path_id'] = nfp_dict_old['path_id']
- return nfp_dict_new
-
- def _update_vnffg_post(self, context, n_sfc_chain_id,
- classifiers_map, vnffg_dict):
- """Updates the status and the n-sfc instance_ids in the db
-
- :param context: SQL Session Context
- :param n_sfc_chain_id: Id of port-chain in n-sfc side
- :param classifiers_map: classifier and instance Ids map
- :param vnffg_dict: vnffg dictionary
- :return: None
- """
- nfp_dict = self.get_nfp(context, vnffg_dict['forwarding_paths'])
- sfc_id = nfp_dict['chain_id']
- with context.session.begin(subtransactions=True):
- query_chain = (self._model_query(context, VnffgChain).
- filter(VnffgChain.id == sfc_id).
- filter(VnffgChain.status == constants.PENDING_UPDATE).one())
- if n_sfc_chain_id is None:
- query_chain.update({'status': constants.ERROR})
- else:
- query_chain.update({'status': constants.ACTIVE})
- for clsfr_id in nfp_dict['classifier_ids']:
- query_clsfr = (self._model_query(context, VnffgClassifier).
- filter(VnffgClassifier.id == clsfr_id))
- if classifiers_map.get(clsfr_id):
- query_clsfr.update({
- 'instance_id': classifiers_map[clsfr_id]})
- if classifiers_map[clsfr_id]:
- query_clsfr.update({'status': constants.ACTIVE})
- else:
- query_clsfr.update({'status': constants.ERROR})
- else:
- # Deletion of unused Match criterias which are
- # not longer required due to the update classifier
- # procedure.
- query_match = (
- self._model_query(context, ACLMatchCriteria).
- filter(ACLMatchCriteria.vnffgc_id == clsfr_id))
- query_match.delete()
- query_clsfr.delete()
-
- def _update_vnffg_status_post(self, context, vnffg, error=False,
- db_state=constants.ERROR):
-
- nfp = self.get_nfp(context, vnffg['forwarding_paths'])
- chain = self.get_sfc(context, nfp['chain_id'])
-
- if error:
- if db_state == constants.ACTIVE:
- self._update_all_status(context, vnffg['id'], nfp['id'],
- constants.ACTIVE)
- else:
- self._update_all_status(context, vnffg['id'], nfp['id'],
- constants.ERROR)
- else:
- if chain['status'] == constants.ERROR:
- self._update_all_status(context, vnffg['id'], nfp['id'],
- constants.ERROR)
- elif chain['status'] == constants.ACTIVE:
- classifiers_active_state = True
- for classifier in [self.get_classifier(context, classifier_id)
- for classifier_id in nfp['classifier_ids']]:
- if classifier['status'] == constants.ACTIVE:
- continue
- elif classifier['status'] == constants.ERROR:
- classifiers_active_state = False
- break
- if classifiers_active_state:
- self._update_all_status(context, vnffg['id'], nfp['id'],
- constants.ACTIVE)
- else:
- self._update_all_status(context, vnffg['id'], nfp['id'],
- constants.ERROR)
-
- def _get_vnffg_db(self, context, vnffg_id, current_statuses, new_status):
- try:
- vnffg_db = (
- self._model_query(context, Vnffg).
- filter(Vnffg.id == vnffg_id).
- filter(Vnffg.status.in_(current_statuses)).
- with_for_update().one())
- except orm_exc.NoResultFound:
- raise nfvo.VnffgNotFoundException(vnffg_id=vnffg_id)
- if vnffg_db.status == constants.PENDING_UPDATE:
- raise nfvo.VnffgInUse(vnffg_id=vnffg_id)
- vnffg_db.update({'status': new_status})
- return vnffg_db
-
- def _get_nfp_db(self, context, nfp_id, current_statuses, new_status):
- try:
- nfp_db = (
- self._model_query(context, VnffgNfp).
- filter(VnffgNfp.id == nfp_id).
- filter(VnffgNfp.status.in_(current_statuses)).
- with_for_update().one())
- except orm_exc.NoResultFound:
- raise nfvo.NfpNotFoundException(nfp_id=nfp_id)
- if nfp_db.status == constants.PENDING_UPDATE:
- raise nfvo.NfpInUse(nfp_id=nfp_id)
- nfp_db.update({'status': new_status})
- return nfp_db
-
- def _get_sfc_db(self, context, sfc_id, current_statuses, new_status):
- try:
- sfc_db = (
- self._model_query(context, VnffgChain).
- filter(VnffgChain.id == sfc_id).
- filter(VnffgChain.status.in_(current_statuses)).
- with_for_update().one())
- except orm_exc.NoResultFound:
- raise nfvo.SfcNotFoundException(sfc_id=sfc_id)
- if sfc_db.status == constants.PENDING_UPDATE:
- raise nfvo.SfcInUse(sfc_id=sfc_id)
- sfc_db.update({'status': new_status})
- return sfc_db
-
- def _get_classifier_db(self, context, fc_id, current_statuses, new_status):
- try:
- fc_db = (
- self._model_query(context, VnffgClassifier).
- filter(VnffgClassifier.id == fc_id).
- filter(VnffgClassifier.status.in_(current_statuses)).
- with_for_update().one())
- except orm_exc.NoResultFound:
- raise nfvo.ClassifierNotFoundException(fc_id=fc_id)
- if fc_db.status == constants.PENDING_UPDATE:
- raise nfvo.ClassifierInUse(fc_id=fc_id)
- fc_db.update({'status': new_status})
- return fc_db
-
- def _delete_vnffg_pre(self, context, vnffg_id):
- vnffg = self.get_vnffg(context, vnffg_id)
- ns_id = vnffg.get('ns_id')
- if ns_id:
- ns_db = self._get_resource(context, NS, ns_id)
- # If network service is not in pending_delete status,
- # raise error when delete vnffg.
- if ns_db['status'] != constants.PENDING_DELETE:
- raise nfvo.VnffgInUseNS(vnffg_id=vnffg_id,
- ns_id=vnffg.get('ns_id'))
- nfp = self.get_nfp(context, vnffg['forwarding_paths'])
- chain = self.get_sfc(context, nfp['chain_id'])
- classifiers = [self.get_classifier(context, classifier_id)
- for classifier_id in nfp['classifier_ids']]
- with context.session.begin(subtransactions=True):
- vnffg_db = self._get_vnffg_db(
- context, vnffg['id'], _ACTIVE_UPDATE_ERROR_DEAD,
- constants.PENDING_DELETE)
- self._get_nfp_db(context, nfp['id'], _ACTIVE_UPDATE_ERROR_DEAD,
- constants.PENDING_DELETE)
- self._get_sfc_db(context, chain['id'], _ACTIVE_UPDATE_ERROR_DEAD,
- constants.PENDING_DELETE)
- for classifier in classifiers:
- self._get_classifier_db(context, classifier['id'],
- _ACTIVE_UPDATE_ERROR_DEAD,
- constants.PENDING_DELETE)
-
- return self._make_vnffg_dict(vnffg_db)
-
- def _delete_vnffg_post(self, context, vnffg_id, error):
- vnffg = self.get_vnffg(context, vnffg_id)
- nfp = self.get_nfp(context, vnffg['forwarding_paths'])
- chain = self.get_sfc(context, nfp['chain_id'])
- classifiers = [self.get_classifier(context, classifier_id)
- for classifier_id in nfp['classifier_ids']]
- fc_queries = []
- match_queries = []
- with context.session.begin(subtransactions=True):
- vnffg_query = (
- self._model_query(context, Vnffg).
- filter(Vnffg.id == vnffg['id']).
- filter(Vnffg.status == constants.PENDING_DELETE))
- nfp_query = (
- self._model_query(context, VnffgNfp).
- filter(VnffgNfp.id == nfp['id']).
- filter(VnffgNfp.status == constants.PENDING_DELETE))
- sfc_query = (
- self._model_query(context, VnffgChain).
- filter(VnffgChain.id == chain['id']).
- filter(VnffgChain.status == constants.PENDING_DELETE))
- for classifier in classifiers:
- fc_queries.append((
- self._model_query(context, VnffgClassifier).
- filter(VnffgClassifier.id == classifier['id']).
- filter(VnffgClassifier.status ==
- constants.PENDING_DELETE)))
- match_queries.append((
- self._model_query(context, ACLMatchCriteria).
- filter(ACLMatchCriteria.vnffgc_id == classifier['id'])))
- if error:
- vnffg_query.update({'status': constants.ERROR})
- nfp_query.update({'status': constants.ERROR})
- sfc_query.update({'status': constants.ERROR})
- for fc_query in fc_queries:
- fc_query.update({'status': constants.ERROR})
- else:
- for match_query in match_queries:
- match_query.delete()
- for fc_query in fc_queries:
- fc_query.delete()
- sfc_query.delete()
- nfp_query.delete()
- vnffg_query.delete()
-
- vnffgd_id = vnffg.get('vnffgd_id')
- template_db = self._get_resource(context, VnffgTemplate,
- vnffgd_id)
-
- if template_db.get('template_source') == 'inline':
- self.delete_vnffgd(context, vnffgd_id)
-
- def _get_symmetrical_template(self, context, vnffg):
- fp_prop = self._get_fp_properties(context, vnffg)
- return fp_prop.get('symmetrical', False)
-
- def _get_correlation_template(self, context, vnffg):
- fp_prop = self._get_fp_properties(context, vnffg)
- return fp_prop.get('correlation', 'mpls')
-
- def _get_fp_properties(self, context, vnffg):
- vnffgd_topo = None
- if vnffg.get('vnffgd_template'):
- vnffgd_topo = vnffg['vnffgd_template']['topology_template']
- elif vnffg.get('vnffgd_id'):
- vnffgd_template = self.get_vnffgd(context, vnffg.get('vnffgd_id'))
- vnffgd_topo = vnffgd_template['template']['vnffgd'][
- 'topology_template']
- vnffg_name = list(vnffgd_topo['groups'].keys())[0]
- nfp_name = vnffgd_topo['groups'][vnffg_name]['members'][0]
- fp_prop = vnffgd_topo['node_templates'][nfp_name]['properties']
- return fp_prop
-
- def _make_template_dict(self, template, fields=None):
- res = {}
- key_list = ('id', 'tenant_id', 'name', 'description', 'template',
- 'template_source')
- res.update((key, template[key]) for key in key_list)
- return self._fields(res, fields)
-
- def _make_acl_match_dict(self, acl_match_db):
- key_list = MATCH_DB_KEY_LIST
- return {key: entry[key] for key in key_list for entry in acl_match_db
- if entry[key]}
-
- def _make_classifier_dict(self, classifier_db, fields=None):
- LOG.debug('classifier_db %s', classifier_db)
- LOG.debug('classifier_db match %s', classifier_db.match)
- res = {
- 'match': self._make_acl_match_dict(classifier_db.match)
- }
- key_list = ('id', 'name', 'tenant_id', 'instance_id', 'status',
- 'chain_id', 'nfp_id')
- res.update((key, classifier_db[key]) for key in key_list)
- return self._fields(res, fields)
-
- def _make_nfp_dict(self, nfp_db, fields=None):
- LOG.debug('nfp_db %s', nfp_db)
- res = {'chain_id': nfp_db.chain['id'],
- 'classifier_ids': [classifier['id'] for classifier in
- nfp_db.classifiers]}
- key_list = ('name', 'id', 'tenant_id', 'symmetrical', 'status',
- 'path_id', 'vnffg_id')
- res.update((key, nfp_db[key]) for key in key_list)
- return self._fields(res, fields)
-
- def _make_chain_dict(self, chain_db, fields=None):
- LOG.debug('chain_db %s', chain_db)
- res = {}
- key_list = ('id', 'tenant_id', 'symmetrical', 'status', 'chain',
- 'path_id', 'nfp_id', 'instance_id')
- res.update((key, chain_db[key]) for key in key_list)
- return self._fields(res, fields)
-
- def _get_resource(self, context, model, res_id):
- try:
- return self._get_by_id(context, model, res_id)
- except orm_exc.NoResultFound:
- if issubclass(model, Vnffg):
- raise nfvo.VnffgNotFoundException(vnffg_id=res_id)
- elif issubclass(model, VnffgClassifier):
- raise nfvo.ClassifierNotFoundException(classifier_id=res_id)
- if issubclass(model, VnffgTemplate):
- raise nfvo.VnffgdNotFoundException(vnffgd_id=res_id)
- if issubclass(model, VnffgChain):
- raise nfvo.SfcNotFoundException(sfc_id=res_id)
- else:
- raise
diff --git a/tacker/db/vnfm/vnfm_db.py b/tacker/db/vnfm/vnfm_db.py
index 579196228..f5dc94447 100644
--- a/tacker/db/vnfm/vnfm_db.py
+++ b/tacker/db/vnfm/vnfm_db.py
@@ -14,9 +14,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-from datetime import datetime
-
-from oslo_db.exception import DBDuplicateEntry
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import timeutils
@@ -28,17 +25,13 @@ from sqlalchemy.orm import exc as orm_exc
from sqlalchemy import schema
from tacker._i18n import _
-from tacker.api.v1 import attributes
from tacker.common import exceptions
import tacker.conf
-from tacker import context as t_context
-from tacker.db.common_services import common_services_db_plugin
from tacker.db import db_base
from tacker.db.db_sqlalchemy import models
from tacker.db import model_base
from tacker.db import models_v1
from tacker.db.nfvo import nfvo_db # noqa: F401
-from tacker.db.nfvo import ns_db
from tacker.db import types
from tacker.extensions import vnfm
from tacker import manager
@@ -47,12 +40,6 @@ from tacker.plugins.common import constants
CONF = tacker.conf.CONF
LOG = logging.getLogger(__name__)
-_ACTIVE_UPDATE = (constants.ACTIVE, constants.PENDING_UPDATE,
- constants.PENDING_HEAL)
-_ACTIVE_UPDATE_ERROR_DEAD = (
- constants.PENDING_CREATE, constants.ACTIVE, constants.PENDING_UPDATE,
- constants.PENDING_SCALE_IN, constants.PENDING_SCALE_OUT, constants.ERROR,
- constants.PENDING_DELETE, constants.DEAD, constants.PENDING_HEAL)
CREATE_STATES = (constants.PENDING_CREATE, constants.DEAD)
@@ -182,13 +169,8 @@ class VNFMPluginDb(vnfm.VNFMPluginBase, db_base.CommonDbMixin):
def _core_plugin(self):
return manager.TackerManager.get_plugin()
- def subnet_id_to_network_id(self, context, subnet_id):
- subnet = self._core_plugin.get_subnet(context, subnet_id)
- return subnet['network_id']
-
def __init__(self):
super(VNFMPluginDb, self).__init__()
- self._cos_db_plg = common_services_db_plugin.CommonServicesPluginDb()
def _get_resource(self, context, model, id):
try:
@@ -242,152 +224,12 @@ class VNFMPluginDb(vnfm.VNFMPluginBase, db_base.CommonDbMixin):
res.update((key, vnf_db[key]) for key in key_list)
return self._fields(res, fields)
- @staticmethod
- def _mgmt_driver_name(vnf_dict):
- return vnf_dict['vnfd']['mgmt_driver']
-
- @staticmethod
- def _instance_id(vnf_dict):
- return vnf_dict['instance_id']
-
- def create_vnfd(self, context, vnfd):
- vnfd = vnfd['vnfd']
- LOG.debug('vnfd %s', vnfd)
- tenant_id = self._get_tenant_id_for_create(context, vnfd)
- service_types = vnfd.get('service_types')
- mgmt_driver = vnfd.get('mgmt_driver')
- template_source = vnfd.get("template_source")
-
- if (not attributes.is_attr_set(service_types)):
- LOG.debug('service types unspecified')
- raise vnfm.ServiceTypesNotSpecified()
-
- try:
- with context.session.begin(subtransactions=True):
- vnfd_id = uuidutils.generate_uuid()
- vnfd_db = VNFD(
- id=vnfd_id,
- tenant_id=tenant_id,
- name=vnfd.get('name'),
- description=vnfd.get('description'),
- mgmt_driver=mgmt_driver,
- template_source=template_source,
- deleted_at=datetime.min)
- context.session.add(vnfd_db)
- for (key, value) in vnfd.get('attributes', {}).items():
- attribute_db = VNFDAttribute(
- id=uuidutils.generate_uuid(),
- vnfd_id=vnfd_id,
- key=key,
- value=value)
- context.session.add(attribute_db)
- for service_type in (item['service_type']
- for item in vnfd['service_types']):
- service_type_db = ServiceType(
- id=uuidutils.generate_uuid(),
- tenant_id=tenant_id,
- vnfd_id=vnfd_id,
- service_type=service_type)
- context.session.add(service_type_db)
- except DBDuplicateEntry as e:
- raise exceptions.DuplicateEntity(
- _type="vnfd",
- entry=e.columns)
- LOG.debug('vnfd_db %(vnfd_db)s %(attributes)s ',
- {'vnfd_db': vnfd_db,
- 'attributes': vnfd_db.attributes})
- vnfd_dict = self._make_vnfd_dict(vnfd_db)
- LOG.debug('vnfd_dict %s', vnfd_dict)
- self._cos_db_plg.create_event(
- context, res_id=vnfd_dict['id'],
- res_type=constants.RES_TYPE_VNFD,
- res_state=constants.RES_EVT_ONBOARDED,
- evt_type=constants.RES_EVT_CREATE,
- tstamp=vnfd_dict[constants.RES_EVT_CREATED_FLD])
- return vnfd_dict
-
- def update_vnfd(self, context, vnfd_id,
- vnfd):
- with context.session.begin(subtransactions=True):
- vnfd_db = self._get_resource(context, VNFD,
- vnfd_id)
- vnfd_db.update(vnfd['vnfd'])
- vnfd_db.update({'updated_at': timeutils.utcnow()})
- vnfd_dict = self._make_vnfd_dict(vnfd_db)
- self._cos_db_plg.create_event(
- context, res_id=vnfd_dict['id'],
- res_type=constants.RES_TYPE_VNFD,
- res_state=constants.RES_EVT_NA_STATE,
- evt_type=constants.RES_EVT_UPDATE,
- tstamp=vnfd_dict[constants.RES_EVT_UPDATED_FLD])
- return vnfd_dict
-
- def delete_vnfd(self,
- context,
- vnfd_id,
- soft_delete=True):
- with context.session.begin(subtransactions=True):
- # TODO(yamahata): race. prevent from newly inserting hosting vnf
- # that refers to this vnfd
- vnfs_db = context.session.query(VNF).filter_by(
- vnfd_id=vnfd_id).first()
- if vnfs_db is not None and vnfs_db.deleted_at is None:
- raise vnfm.VNFDInUse(vnfd_id=vnfd_id)
- vnfd_db = self._get_resource(context, VNFD,
- vnfd_id)
- if soft_delete:
- vnfd_db.update({'deleted_at': timeutils.utcnow()})
- self._cos_db_plg.create_event(
- context, res_id=vnfd_db['id'],
- res_type=constants.RES_TYPE_VNFD,
- res_state=constants.RES_EVT_NA_STATE,
- evt_type=constants.RES_EVT_DELETE,
- tstamp=vnfd_db[constants.RES_EVT_DELETED_FLD])
- else:
- context.session.query(ServiceType).filter_by(
- vnfd_id=vnfd_id).delete()
- context.session.query(VNFDAttribute).filter_by(
- vnfd_id=vnfd_id).delete()
- context.session.delete(vnfd_db)
-
def get_vnfd(self, context, vnfd_id, fields=None):
vnfd_db = self._get_resource(context, VNFD, vnfd_id)
if not vnfd_db:
raise exceptions.NotFound(resource='VNFD', name=vnfd_id)
return self._make_vnfd_dict(vnfd_db)
- def get_vnfds(self, context, filters, fields=None):
- if ('template_source' in filters and
- filters['template_source'][0] == 'all'):
- filters.pop('template_source')
- return self._get_collection(context, VNFD,
- self._make_vnfd_dict,
- filters=filters, fields=fields)
-
- def choose_vnfd(self, context, service_type,
- required_attributes=None):
- required_attributes = required_attributes or []
- LOG.debug('required_attributes %s', required_attributes)
- with context.session.begin(subtransactions=True):
- query = (
- context.session.query(VNFD).
- filter(
- sa.exists().
- where(sa.and_(
- VNFD.id == ServiceType.vnfd_id,
- ServiceType.service_type == service_type))))
- for key in required_attributes:
- query = query.filter(
- sa.exists().
- where(sa.and_(
- VNFD.id ==
- VNFDAttribute.vnfd_id,
- VNFDAttribute.key == key)))
- LOG.debug('statements %s', query)
- vnfd_db = query.first()
- if vnfd_db:
- return self._make_vnfd_dict(vnfd_db)
-
def _vnf_attribute_update_or_create(
self, context, vnf_id, key, value):
arg = (self._model_query(context, VNFAttribute).
@@ -401,94 +243,6 @@ class VNFMPluginDb(vnfm.VNFMPluginBase, db_base.CommonDbMixin):
key=key, value=value)
context.session.add(arg)
- # called internally, not by REST API
- def _create_vnf_pre(self, context, vnf):
- LOG.debug('vnf %s', vnf)
- tenant_id = self._get_tenant_id_for_create(context, vnf)
- vnfd_id = vnf['vnfd_id']
- name = vnf.get('name')
- vnf_id = uuidutils.generate_uuid()
- attributes = vnf.get('attributes', {})
- vim_id = vnf.get('vim_id')
- placement_attr = vnf.get('placement_attr', {})
- try:
- with context.session.begin(subtransactions=True):
- vnfd_db = self._get_resource(context, VNFD,
- vnfd_id)
- vnf_db = VNF(id=vnf_id,
- tenant_id=tenant_id,
- name=name,
- description=vnfd_db.description,
- instance_id=None,
- vnfd_id=vnfd_id,
- vim_id=vim_id,
- placement_attr=placement_attr,
- status=constants.PENDING_CREATE,
- error_reason=None,
- deleted_at=datetime.min)
- context.session.add(vnf_db)
- for key, value in attributes.items():
- arg = VNFAttribute(
- id=uuidutils.generate_uuid(), vnf_id=vnf_id,
- key=key, value=value)
- context.session.add(arg)
- except DBDuplicateEntry as e:
- raise exceptions.DuplicateEntity(
- _type="vnf",
- entry=e.columns)
- evt_details = "VNF UUID assigned."
- self._cos_db_plg.create_event(
- context, res_id=vnf_id,
- res_type=constants.RES_TYPE_VNF,
- res_state=constants.PENDING_CREATE,
- evt_type=constants.RES_EVT_CREATE,
- tstamp=vnf_db[constants.RES_EVT_CREATED_FLD],
- details=evt_details)
- return self._make_vnf_dict(vnf_db)
-
- # called internally, not by REST API
- # intsance_id = None means error on creation
- def _create_vnf_post(self, context, vnf_id, instance_id,
- mgmt_ip_address, vnf_dict):
- LOG.debug('vnf_dict %s', vnf_dict)
- with context.session.begin(subtransactions=True):
- query = (self._model_query(context, VNF).
- filter(VNF.id == vnf_id).
- filter(VNF.status.in_(CREATE_STATES)).
- one())
- query.update({'instance_id': instance_id,
- 'mgmt_ip_address': mgmt_ip_address})
- if instance_id is None or vnf_dict['status'] == constants.ERROR:
- query.update({'status': constants.ERROR})
-
- for (key, value) in vnf_dict['attributes'].items():
- # do not store decrypted vim auth in vnf attr table
- if 'vim_auth' not in key:
- self._vnf_attribute_update_or_create(context, vnf_id,
- key, value)
- evt_details = ("Infra Instance ID created: %s and "
- "Mgmt IP address set: %s") % (instance_id,
- mgmt_ip_address)
- self._cos_db_plg.create_event(
- context, res_id=vnf_dict['id'],
- res_type=constants.RES_TYPE_VNF,
- res_state=vnf_dict['status'],
- evt_type=constants.RES_EVT_CREATE,
- tstamp=timeutils.utcnow(), details=evt_details)
-
- def _create_vnf_status(self, context, vnf_id, new_status):
- with context.session.begin(subtransactions=True):
- query = (self._model_query(context, VNF).
- filter(VNF.id == vnf_id).
- filter(VNF.status.in_(CREATE_STATES)).one())
- query.update({'status': new_status})
- self._cos_db_plg.create_event(
- context, res_id=vnf_id,
- res_type=constants.RES_TYPE_VNF,
- res_state=new_status,
- evt_type=constants.RES_EVT_CREATE,
- tstamp=timeutils.utcnow(), details="VNF creation completed")
-
def _get_vnf_db(self, context, vnf_id, current_statuses):
try:
vnf_db = (
@@ -551,12 +305,6 @@ class VNFMPluginDb(vnfm.VNFMPluginBase, db_base.CommonDbMixin):
if mgmt_ip_address:
vnf_db.update({'mgmt_ip_address': mgmt_ip_address})
updated_vnf_dict = self._make_vnf_dict(vnf_db)
- self._cos_db_plg.create_event(
- context, res_id=updated_vnf_dict['id'],
- res_type=constants.RES_TYPE_VNF,
- res_state=updated_vnf_dict['status'],
- evt_type=constants.RES_EVT_SCALE,
- tstamp=timeutils.utcnow())
return updated_vnf_dict
def _update_vnf_scaling_status_err(self,
@@ -572,12 +320,6 @@ class VNFMPluginDb(vnfm.VNFMPluginBase, db_base.CommonDbMixin):
LOG.error("Failed to revert scale info for vnf "
"instance %(id)s. Error: %(error)s",
{"id": vnf_info['id'], "error": e})
- self._cos_db_plg.create_event(
- context, res_id=vnf_info['id'],
- res_type=constants.RES_TYPE_VNF,
- res_state='ERROR',
- evt_type=constants.RES_EVT_SCALE,
- tstamp=timeutils.utcnow())
def _update_vnf_scaling(self,
context,
@@ -602,182 +344,6 @@ class VNFMPluginDb(vnfm.VNFMPluginBase, db_base.CommonDbMixin):
if 'vim_auth' not in key:
self._vnf_attribute_update_or_create(
context, vnf_info['id'], key, value)
- self._cos_db_plg.create_event(
- context, res_id=vnf_info['id'],
- res_type=constants.RES_TYPE_VNF,
- res_state=status,
- evt_type=constants.RES_EVT_SCALE,
- tstamp=timestamp)
-
- def _update_vnf_pre(self, context, vnf_id, new_status):
- with context.session.begin(subtransactions=True):
- vnf_db = self._update_vnf_status_db(
- context, vnf_id, _ACTIVE_UPDATE, new_status)
- updated_vnf_dict = self._make_vnf_dict(vnf_db)
- if new_status in constants.VNF_STATUS_TO_EVT_TYPES:
- self._cos_db_plg.create_event(
- context, res_id=vnf_id,
- res_type=constants.RES_TYPE_VNF,
- res_state=updated_vnf_dict['status'],
- evt_type=constants.VNF_STATUS_TO_EVT_TYPES[new_status],
- tstamp=timeutils.utcnow())
- return updated_vnf_dict
-
- def _update_vnf_post(self, context, vnf_id, new_status,
- new_vnf_dict, vnf_status, evt_type):
- updated_time_stamp = timeutils.utcnow()
- with context.session.begin(subtransactions=True):
- (self._model_query(context, VNF).
- filter(VNF.id == vnf_id).
- filter(VNF.status == vnf_status).
- update({'status': new_status,
- 'updated_at': updated_time_stamp,
- 'mgmt_ip_address': new_vnf_dict['mgmt_ip_address']}))
-
- dev_attrs = new_vnf_dict.get('attributes', {})
- (context.session.query(VNFAttribute).
- filter(VNFAttribute.vnf_id == vnf_id).
- filter(~VNFAttribute.key.in_(dev_attrs.keys())).
- delete(synchronize_session='fetch'))
-
- for (key, value) in dev_attrs.items():
- if 'vim_auth' not in key:
- self._vnf_attribute_update_or_create(context, vnf_id,
- key, value)
- self._cos_db_plg.create_event(
- context, res_id=vnf_id,
- res_type=constants.RES_TYPE_VNF,
- res_state=new_status,
- evt_type=evt_type,
- tstamp=updated_time_stamp)
-
- def _delete_vnf_pre(self, context, vnf_id, force_delete=False):
- with context.session.begin(subtransactions=True):
-
- nss_db = context.session.query(ns_db.NS).filter(
- ns_db.NS.vnf_ids.like("%" + vnf_id + "%")).first()
-
- if not force_delete:
- # If vnf is deleted by NFVO, then vnf_id would
- # exist in the nss_db otherwise it should be queried from
- # vnf db table.
- if nss_db is not None:
- if nss_db.status not in [constants.PENDING_DELETE,
- constants.ERROR]:
- raise vnfm.VNFInUse(vnf_id=vnf_id)
- else:
- vnf_db = self._get_vnf_db(context, vnf_id,
- _ACTIVE_UPDATE_ERROR_DEAD)
- if (vnf_db is not None and vnf_db.status == constants.
- PENDING_CREATE):
- raise vnfm.VNFInUse(
- message="Operation on PENDING_CREATE VNF is not "
- "permitted.")
-
- vnf_db = self._update_vnf_status_db(
- context, vnf_id, _ACTIVE_UPDATE_ERROR_DEAD,
- constants.PENDING_DELETE, vnf_db=vnf_db)
- else:
- vnf_db = self._update_vnf_status_db_no_check(context,
- vnf_id, _ACTIVE_UPDATE_ERROR_DEAD,
- constants.PENDING_DELETE)
- deleted_vnf_db = self._make_vnf_dict(vnf_db)
- details = "VNF delete initiated" if not force_delete else \
- "VNF force delete initiated"
- self._cos_db_plg.create_event(
- context, res_id=vnf_id,
- res_type=constants.RES_TYPE_VNF,
- res_state=deleted_vnf_db['status'],
- evt_type=constants.RES_EVT_DELETE,
- tstamp=timeutils.utcnow(), details=details)
- return deleted_vnf_db
-
- def _delete_vnf_force(self, context, vnf_id):
- # Check mapping vnf in vnffg_db
- with context.session.begin(subtransactions=True):
- nss_db = context.session.query(ns_db.NS).filter(
- ns_db.NS.vnf_ids.like("%" + vnf_id + "%")).first()
- if nss_db:
- pass
-
- def _delete_vnf_post(self, context, vnf_dict, error,
- soft_delete=True, force_delete=False):
- vnf_id = vnf_dict['id']
- with context.session.begin(subtransactions=True):
- if force_delete:
- query = (
- self._model_query(context, VNF).
- filter(VNF.id == vnf_id))
- else:
- query = (
- self._model_query(context, VNF).
- filter(VNF.id == vnf_id).
- filter(VNF.status == constants.PENDING_DELETE))
- if error:
- query.update({'status': constants.ERROR})
- self._cos_db_plg.create_event(
- context, res_id=vnf_id,
- res_type=constants.RES_TYPE_VNF,
- res_state=constants.ERROR,
- evt_type=constants.RES_EVT_DELETE,
- tstamp=timeutils.utcnow(),
- details="VNF Delete ERROR")
- else:
- if soft_delete:
- deleted_time_stamp = timeutils.utcnow()
- query.update({'deleted_at': deleted_time_stamp})
- self._cos_db_plg.create_event(
- context, res_id=vnf_id,
- res_type=constants.RES_TYPE_VNF,
- res_state=constants.PENDING_DELETE,
- evt_type=constants.RES_EVT_DELETE,
- tstamp=deleted_time_stamp,
- details="VNF Delete Complete")
- else:
- (self._model_query(context, VNFAttribute).
- filter(VNFAttribute.vnf_id == vnf_id).delete())
- query.delete()
-
- # Delete corresponding vnfd
- if vnf_dict['vnfd']['template_source'] == "inline":
- self.delete_vnfd(context, vnf_dict["vnfd_id"])
-
- # reference implementation. needs to be overrided by subclass
- def create_vnf(self, context, vnf):
- vnf_dict = self._create_vnf_pre(context, vnf)
- # start actual creation of hosting vnf.
- # Waiting for completion of creation should be done backgroundly
- # by another thread if it takes a while.
- instance_id = uuidutils.generate_uuid()
- vnf_dict['instance_id'] = instance_id
- self._create_vnf_post(context, vnf_dict['id'], instance_id, None,
- vnf_dict)
- self._create_vnf_status(context, vnf_dict['id'],
- constants.ACTIVE)
- return vnf_dict
-
- # reference implementation. needs to be overrided by subclass
- def update_vnf(self, context, vnf_id, vnf):
- new_status = constants.PENDING_UPDATE
- vnf_dict = self._update_vnf_pre(context, vnf_id, new_status)
- # start actual update of hosting vnf
- # waiting for completion of update should be done backgroundly
- # by another thread if it takes a while
- self._update_vnf_post(context, vnf_id,
- constants.ACTIVE,
- vnf_dict)
- return vnf_dict
-
- # reference implementation. needs to be overrided by subclass
- def delete_vnf(self, context, vnf_id, soft_delete=True):
- vnf_dict = self._delete_vnf_pre(context, vnf_id)
- # start actual deletion of hosting vnf.
- # Waiting for completion of deletion should be done backgroundly
- # by another thread if it takes a while.
- self._delete_vnf_post(context,
- vnf_dict,
- False,
- soft_delete=soft_delete)
def get_vnf(self, context, vnf_id, fields=None):
vnf_db = self._get_resource(context, VNF, vnf_id)
@@ -787,47 +353,6 @@ class VNFMPluginDb(vnfm.VNFMPluginBase, db_base.CommonDbMixin):
return self._get_collection(context, VNF, self._make_vnf_dict,
filters=filters, fields=fields)
- def set_vnf_error_status_reason(self, context, vnf_id, new_reason):
- with context.session.begin(subtransactions=True):
- (self._model_query(context, VNF).
- filter(VNF.id == vnf_id).
- update({'error_reason': new_reason}))
-
- def _mark_vnf_status(self, vnf_id, exclude_status, new_status):
- context = t_context.get_admin_context()
- with context.session.begin(subtransactions=True):
- try:
- vnf_db = (
- self._model_query(context, VNF).
- filter(VNF.id == vnf_id).
- filter(~VNF.status.in_(exclude_status)).
- with_for_update().one())
- except orm_exc.NoResultFound:
- LOG.error('no vnf found %s', vnf_id)
- return False
-
- vnf_db.update({'status': new_status})
- self._cos_db_plg.create_event(
- context, res_id=vnf_id,
- res_type=constants.RES_TYPE_VNF,
- res_state=new_status,
- evt_type=constants.RES_EVT_MONITOR,
- tstamp=timeutils.utcnow())
- return True
-
- def _mark_vnf_error(self, vnf_id):
- return self._mark_vnf_status(
- vnf_id, [constants.DEAD], constants.ERROR)
-
- def _mark_vnf_dead(self, vnf_id):
- exclude_status = [
- constants.PENDING_CREATE,
- constants.PENDING_UPDATE,
- constants.PENDING_DELETE,
- constants.ERROR]
- return self._mark_vnf_status(
- vnf_id, exclude_status, constants.DEAD)
-
def create_placement_constraint(self, context, placement_obj_list):
context.session.add_all(placement_obj_list)
@@ -876,30 +401,6 @@ class VNFMPluginDb(vnfm.VNFMPluginBase, db_base.CommonDbMixin):
'resource': placement_obj.resource,
'updated_at': timeutils.utcnow()}))
- def update_vnf_rollback_status_err(self,
- context,
- vnf_info):
- vnf_lcm_op_occs = vnf_info['vnf_lcm_op_occ']
- evt_type = constants.RES_EVT_CREATE
- if vnf_lcm_op_occs.operation == 'SCALE':
- evt_type = constants.RES_EVT_SCALE,
- self._cos_db_plg.create_event(context, res_id=vnf_info['id'],
- res_type=constants.RES_TYPE_VNF,
- res_state='ERROR', evt_type=evt_type,
- tstamp=timeutils.utcnow())
-
- def _update_vnf_rollback_pre(self,
- context,
- vnf_info):
- vnf_lcm_op_occs = vnf_info['vnf_lcm_op_occ']
- evt_type = constants.RES_EVT_CREATE
- if vnf_lcm_op_occs.operation == 'SCALE':
- evt_type = constants.RES_EVT_CREATE
- self._cos_db_plg.create_event(context, res_id=vnf_info['id'],
- res_type=constants.RES_TYPE_VNF,
- res_state='ROLL_BACK', evt_type=evt_type,
- tstamp=timeutils.utcnow())
-
def _update_vnf_rollback(self,
context,
vnf_info,
@@ -920,14 +421,8 @@ class VNFMPluginDb(vnfm.VNFMPluginBase, db_base.CommonDbMixin):
delete(synchronize_session='fetch'))
vnf_lcm_op_occs = vnf_info['vnf_lcm_op_occ']
- evt_type = constants.RES_EVT_CREATE
if vnf_lcm_op_occs.operation == 'SCALE':
for (key, value) in dev_attrs.items():
if 'vim_auth' not in key:
self._vnf_attribute_update_or_create(
context, vnf_info['id'], key, value)
- evt_type = constants.RES_EVT_SCALE
- self._cos_db_plg.create_event(context, res_id=vnf_info['id'],
- res_type=constants.RES_TYPE_VNF,
- res_state=status, evt_type=evt_type,
- tstamp=timestamp)
diff --git a/tacker/extensions/common_services.py b/tacker/extensions/common_services.py
index ba695cb7e..a24f0c017 100644
--- a/tacker/extensions/common_services.py
+++ b/tacker/extensions/common_services.py
@@ -41,54 +41,11 @@ class InputValuesMissing(exceptions.InvalidInput):
message = _("Parameter input values missing for the key '%(key)s'")
-class ParamYAMLInputMissing(exceptions.InvalidInput):
- message = _("Parameter YAML input missing")
-
-
class InvalidFormat(exceptions.InvalidInput):
message = _("Invalid format. '%(error)s'")
-RESOURCE_ATTRIBUTE_MAP = {
-
- 'events': {
- 'id': {
- 'allow_post': False,
- 'allow_put': False,
- 'is_visible': True,
- },
- 'resource_id': {
- 'allow_post': False,
- 'allow_put': False,
- 'is_visible': True
- },
- 'resource_type': {
- 'allow_post': False,
- 'allow_put': False,
- 'is_visible': True
- },
- 'resource_state': {
- 'allow_post': False,
- 'allow_put': False,
- 'is_visible': True
- },
- 'timestamp': {
- 'allow_post': False,
- 'allow_put': False,
- 'is_visible': True,
- },
- 'event_details': {
- 'allow_post': False,
- 'allow_put': False,
- 'is_visible': True,
- },
- 'event_type': {
- 'allow_post': False,
- 'allow_put': False,
- 'is_visible': True,
- },
- }
-}
+RESOURCE_ATTRIBUTE_MAP = {}
class Common_services(extensions.ExtensionDescriptor):
diff --git a/tacker/extensions/nfvo.py b/tacker/extensions/nfvo.py
index b5aab7ff5..da2cd1474 100644
--- a/tacker/extensions/nfvo.py
+++ b/tacker/extensions/nfvo.py
@@ -84,202 +84,6 @@ class VimFromVnfNotFoundException(exceptions.NotFound):
message = _('VIM from VNF %(vnf_id)s could not be found')
-class ToscaParserFailed(exceptions.InvalidInput):
- message = _("tosca-parser failed: - %(error_msg_details)s")
-
-
-class VnffgdInvalidTemplate(exceptions.InvalidInput):
- message = _("Invalid VNFFG template input: %(template)s")
-
-
-class VnffgdDuplicateForwarderException(exceptions.InvalidInput):
- message = _("Invalid Forwarding Path contains duplicate forwarder not in "
- "order: %(forwarder)s")
-
-
-class VnffgdDuplicateCPException(exceptions.InvalidInput):
- message = _("Invalid Forwarding Path contains duplicate connection point "
- ": %(cp)s")
-
-
-class VnffgdVnfdNotFoundException(exceptions.NotFound):
- message = _("Specified VNFD %(vnfd_name)s in VNFFGD does not exist. "
- "Please create VNFDs before creating VNFFG")
-
-
-class VnffgdCpNotFoundException(exceptions.NotFound):
- message = _("Specified CP %(cp_id)s could not be found in VNFD "
- "%(vnfd_name)s. Please check VNFD for correct Connection "
- "Point.")
-
-
-class VnffgdCpNoForwardingException(exceptions.TackerException):
- message = _("Specified CP %(cp_id)s in VNFD %(vnfd_name)s "
- "does not have forwarding capability, which is required to be "
- "included in forwarding path")
-
-
-class VnffgdWrongEndpointNumber(exceptions.TackerException):
- message = _("Specified number_of_endpoints %(number)s is not equal to "
- "the number of connection_point %(cps)s")
-
-
-class VnffgdInUse(exceptions.InUse):
- message = _('VNFFGD %(vnffgd_id)s is still in use')
-
-
-class VnffgdNotFoundException(exceptions.NotFound):
- message = _('VNFFG Template %(vnffgd_id)s could not be found')
-
-
-class VnffgCreateFailed(exceptions.TackerException):
- message = _('Creating VNFFG based on %(vnffgd_id)s failed')
-
-
-class VnffgInvalidMappingException(exceptions.TackerException):
- message = _("Matching VNF Instance for VNFD %(vnfd_name)s could not be "
- "found. Please create an instance of this VNFD before "
- "creating/updating VNFFG.")
-
-
-class VnffgParamValueFormatError(exceptions.TackerException):
- message = _("Param values %(param_value)s is not in dict format.")
-
-
-class VnffgTemplateParamParsingException(exceptions.TackerException):
- message = _("Failed to parse VNFFG Template due to "
- "missing input param %(get_input)s.")
-
-
-class VnffgPropertyNotFoundException(exceptions.NotFound):
- message = _('VNFFG Property %(vnffg_property)s could not be found')
-
-
-class VnffgCpNotFoundException(exceptions.NotFound):
- message = _("Specified CP %(cp_id)s could not be found in VNF "
- "%(vnf_id)s.")
-
-
-class VnffgNotFoundException(exceptions.NotFound):
- message = _('VNFFG %(vnffg_id)s could not be found')
-
-
-class VnffgInUse(exceptions.InUse):
- message = _('VNFFG %(vnffg_id)s is still in use')
-
-
-class VnffgVnfNotFoundException(exceptions.NotFound):
- message = _("Specified VNF instance %(vnf_name)s in VNF Mapping could not "
- "be found")
-
-
-class VnffgDeleteFailed(exceptions.TackerException):
- message = _('Deleting VNFFG %(vnffg_id)s failed')
-
-
-class VnffgInUseNS(exceptions.TackerException):
- message = _('VNFFG %(vnffg_id)s belongs to active network service '
- '%(ns_id)s')
-
-
-class NfpAttributeNotFoundException(exceptions.NotFound):
- message = _('NFP attribute %(attribute)s could not be found')
-
-
-class NfpNotFoundException(exceptions.NotFound):
- message = _('NFP %(nfp_id)s could not be found')
-
-
-class NfpInUse(exceptions.InUse):
- message = _('NFP %(nfp_id)s is still in use')
-
-
-class NfpPolicyCriteriaError(exceptions.PolicyCheckError):
- message = _('%(error)s in policy')
-
-
-class NfpPolicyCriteriaIndexError(exceptions.TackerException):
- message = _('Criteria list can not be empty')
-
-
-class NfpDuplicatePolicyCriteria(exceptions.TackerException):
- message = _('The %(first_dict)s and %(sec_dict)s are overlapped')
-
-
-class NfpDuplicatePathID(exceptions.TackerException):
- message = _('The path_id %(path_id)s is overlapped with '
- 'NFP %(nfp_name)s in %(vnffg_name)s')
-
-
-class NfpPolicyTypeError(exceptions.PolicyCheckError):
- message = _('Unsupported Policy Type: %(type)s')
-
-
-class NfpForwarderNotFoundException(exceptions.NotFound):
- message = _('VNFD Forwarder %(vnfd)s not found in VNF Mapping %(mapping)s')
-
-
-class NfpRequirementsException(exceptions.TackerException):
- message = _('VNFD Forwarder %(vnfd)s specified more than twice in '
- 'requirements path')
-
-
-class SfcInUse(exceptions.InUse):
- message = _('SFC %(sfc_id)s is still in use')
-
-
-class SfcNotFoundException(exceptions.NotFound):
- message = _('Service Function Chain %(sfc_id)s could not be found')
-
-
-class ClassifierInUse(exceptions.InUse):
- message = _('Classifier %(classifier_id)s is still in use')
-
-
-class ClassifierNotFoundException(exceptions.NotFound):
- message = _('Classifier %(classifier_id)s could not be found')
-
-
-class VnfMappingNotFoundException(exceptions.NotFound):
- message = _('VNF mapping not found/defined')
-
-
-class VnfMappingNotValidException(exceptions.TackerException):
- message = _('The %(vnfd)s is not found in constituent VNFDs')
-
-
-class NSDInUse(exceptions.InUse):
- message = _('NSD %(nsd_id)s is still in use')
-
-
-class NSInUse(exceptions.InUse):
- message = _('NS %(ns_id)s is still in use')
-
-
-class NoTasksException(exceptions.TackerException):
- message = _('No tasks to run for %(action)s on %(resource)s')
-
-
-class UpdateChainException(exceptions.TackerException):
- message = _("%(message)s")
-
-
-class CreateChainException(exceptions.TackerException):
- message = _("%(message)s")
-
-
-class UpdateClassifierException(exceptions.TackerException):
- message = _("%(message)s")
-
-
-class UpdateVnffgException(exceptions.TackerException):
- message = _("%(message)s")
-
-
-class FlowClassiferCreationFailed(exceptions.TackerException):
- message = _("%(message)s")
-
-
NAME_MAX_LEN = 255
RESOURCE_ATTRIBUTE_MAP = {
@@ -381,454 +185,6 @@ RESOURCE_ATTRIBUTE_MAP = {
'default': None
},
},
-
- 'vnffgds': {
- 'id': {
- 'allow_post': False,
- 'allow_put': False,
- 'validate': {'type:uuid': None},
- 'is_visible': True,
- 'primary_key': True,
- },
- 'tenant_id': {
- 'allow_post': True,
- 'allow_put': False,
- 'validate': {'type:string': None},
- 'required_by_policy': True,
- 'is_visible': True,
- },
- 'name': {
- 'allow_post': True,
- 'allow_put': True,
- 'validate': {'type:string': NAME_MAX_LEN},
- 'is_visible': True,
- },
- 'description': {
- 'allow_post': True,
- 'allow_put': True,
- 'validate': {'type:string': None},
- 'is_visible': True,
- 'default': '',
- },
- 'template': {
- 'allow_post': True,
- 'allow_put': False,
- 'convert_to': attr.convert_none_to_empty_dict,
- 'validate': {'type:dict_or_nodata': None},
- 'is_visible': True,
- 'default': None,
- },
- 'template_source': {
- 'allow_post': False,
- 'allow_put': False,
- 'is_visible': True,
- 'default': 'onboarded'
- }
- },
-
- 'vnffgs': {
- 'id': {
- 'allow_post': False,
- 'allow_put': False,
- 'validate': {'type:uuid': None},
- 'is_visible': True,
- 'primary_key': True
- },
- 'tenant_id': {
- 'allow_post': True,
- 'allow_put': False,
- 'validate': {'type:string': None},
- 'required_by_policy': True,
- 'is_visible': True
- },
- 'vnffgd_id': {
- 'allow_post': True,
- 'allow_put': False,
- 'validate': {'type:uuid': None},
- 'is_visible': True,
- 'default': None
- },
- 'name': {
- 'allow_post': True,
- 'allow_put': True,
- 'validate': {'type:string': NAME_MAX_LEN},
- 'is_visible': True,
- },
- 'description': {
- 'allow_post': True,
- 'allow_put': True,
- 'validate': {'type:string': None},
- 'is_visible': True,
- 'default': '',
- },
- 'vnf_mapping': {
- 'allow_post': True,
- 'allow_put': True,
- 'convert_to': attr.convert_none_to_empty_dict,
- 'validate': {'type:dict_or_nodata': None},
- 'is_visible': True,
- 'default': None,
- },
- 'attributes': {
- 'allow_post': True,
- 'allow_put': True,
- 'convert_to': attr.convert_none_to_empty_dict,
- 'validate': {'type:dict_or_nodata': None},
- 'is_visible': True,
- 'default': None,
- },
- 'symmetrical': {
- 'allow_post': True,
- 'allow_put': True,
- 'is_visible': True,
- 'validate': {'type:boolean': None},
- 'default': False,
- },
- 'forwarding_paths': {
- 'allow_post': False,
- 'allow_put': False,
- 'is_visible': True,
- },
- 'status': {
- 'allow_post': False,
- 'allow_put': False,
- 'is_visible': True,
- },
- 'vnffgd_template': {
- 'allow_post': True,
- 'allow_put': True,
- 'validate': {'type:dict_or_nodata': None},
- 'is_visible': True,
- 'default': None,
- },
- 'ns_id': {
- 'allow_post': True,
- 'allow_put': False,
- 'is_visible': True,
- 'default': None,
- },
- },
-
- 'nfps': {
- 'id': {
- 'allow_post': False,
- 'allow_put': False,
- 'validate': {'type:uuid': None},
- 'is_visible': True,
- 'primary_key': True
- },
- 'tenant_id': {
- 'allow_post': False,
- 'allow_put': False,
- 'validate': {'type:string': None},
- 'required_by_policy': True,
- 'is_visible': True
- },
- 'vnffg_id': {
- 'allow_post': False,
- 'allow_put': False,
- 'validate': {'type:uuid': None},
- 'is_visible': True,
- },
- 'name': {
- 'allow_post': False,
- 'allow_put': False,
- 'validate': {'type:string': None},
- 'is_visible': True,
- },
- 'classifier_id': {
- 'allow_post': False,
- 'allow_put': False,
- 'validate': {'type:uuid': None},
- 'is_visible': True,
- },
- 'chain_id': {
- 'allow_post': False,
- 'allow_put': False,
- 'validate': {'type:uuid': None},
- 'is_visible': True,
- },
- 'path_id': {
- 'allow_post': False,
- 'allow_put': False,
- 'validate': {'type:string': None},
- 'is_visible': True,
- },
- 'symmetrical': {
- 'allow_post': False,
- 'allow_put': False,
- 'is_visible': True,
- 'validate': {'type:boolean': None},
- 'default': False,
- },
- 'status': {
- 'allow_post': False,
- 'allow_put': False,
- 'is_visible': True,
- },
- },
- 'sfcs': {
- 'id': {
- 'allow_post': False,
- 'allow_put': False,
- 'validate': {'type:uuid': None},
- 'is_visible': True,
- 'primary_key': True
- },
- 'tenant_id': {
- 'allow_post': False,
- 'allow_put': False,
- 'validate': {'type:string': None},
- 'required_by_policy': True,
- 'is_visible': True
- },
- 'nfp_id': {
- 'allow_post': False,
- 'allow_put': False,
- 'validate': {'type:uuid': None},
- 'is_visible': True,
- },
- 'instance_id': {
- 'allow_post': False,
- 'allow_put': False,
- 'validate': {'type:uuid': None},
- 'is_visible': True,
- },
- 'chain': {
- 'allow_post': False,
- 'allow_put': False,
- 'is_visible': True,
- },
- 'path_id': {
- 'allow_post': False,
- 'allow_put': False,
- 'is_visible': True,
- },
- 'symmetrical': {
- 'allow_post': False,
- 'allow_put': False,
- 'is_visible': True,
- 'validate': {'type:boolean': None},
- 'default': False,
- },
- 'status': {
- 'allow_post': False,
- 'allow_put': False,
- 'is_visible': True,
- },
- },
- 'classifiers': {
- 'id': {
- 'allow_post': False,
- 'allow_put': False,
- 'validate': {'type:uuid': None},
- 'is_visible': True,
- 'primary_key': True
- },
- 'tenant_id': {
- 'allow_post': False,
- 'allow_put': False,
- 'validate': {'type:string': None},
- 'required_by_policy': True,
- 'is_visible': True
- },
- 'nfp_id': {
- 'allow_post': False,
- 'allow_put': False,
- 'validate': {'type:uuid': None},
- 'is_visible': True,
- },
- 'instance_id': {
- 'allow_post': False,
- 'allow_put': False,
- 'validate': {'type:uuid': None},
- 'is_visible': True,
- },
- 'match': {
- 'allow_post': False,
- 'allow_put': False,
- 'is_visible': True,
- },
- 'chain_id': {
- 'allow_post': False,
- 'allow_put': False,
- 'is_visible': True,
- },
- 'status': {
- 'allow_post': False,
- 'allow_put': False,
- 'is_visible': True,
- },
- 'name': {
- 'allow_post': True,
- 'allow_put': True,
- 'validate': {'type:string': NAME_MAX_LEN},
- 'is_visible': True,
- },
- },
-
- 'nsds': {
- 'id': {
- 'allow_post': False,
- 'allow_put': False,
- 'validate': {'type:uuid': None},
- 'is_visible': True,
- 'primary_key': True,
- },
- 'tenant_id': {
- 'allow_post': True,
- 'allow_put': False,
- 'validate': {'type:string': None},
- 'required_by_policy': True,
- 'is_visible': True,
- },
- 'name': {
- 'allow_post': True,
- 'allow_put': True,
- 'validate': {'type:string': NAME_MAX_LEN},
- 'is_visible': True,
- },
- 'description': {
- 'allow_post': True,
- 'allow_put': True,
- 'validate': {'type:string': None},
- 'is_visible': True,
- 'default': '',
- },
- 'created_at': {
- 'allow_post': False,
- 'allow_put': False,
- 'is_visible': True,
- },
- 'updated_at': {
- 'allow_post': False,
- 'allow_put': False,
- 'is_visible': True,
- },
- 'attributes': {
- 'allow_post': True,
- 'allow_put': False,
- 'convert_to': attr.convert_none_to_empty_dict,
- 'validate': {'type:dict_or_nodata': None},
- 'is_visible': True,
- 'default': None,
- },
- 'template_source': {
- 'allow_post': False,
- 'allow_put': False,
- 'is_visible': True,
- 'default': 'onboarded'
- },
-
- },
-
- 'nss': {
- 'id': {
- 'allow_post': False,
- 'allow_put': False,
- 'validate': {'type:uuid': None},
- 'is_visible': True,
- 'primary_key': True,
- },
- 'tenant_id': {
- 'allow_post': True,
- 'allow_put': False,
- 'validate': {'type:string': None},
- 'required_by_policy': True,
- 'is_visible': True,
- },
- 'name': {
- 'allow_post': True,
- 'allow_put': True,
- 'validate': {'type:string': NAME_MAX_LEN},
- 'is_visible': True,
- },
- 'description': {
- 'allow_post': True,
- 'allow_put': True,
- 'validate': {'type:string': NAME_MAX_LEN},
- 'is_visible': True,
- 'default': '',
- },
- 'created_at': {
- 'allow_post': False,
- 'allow_put': False,
- 'is_visible': True,
- },
- 'updated_at': {
- 'allow_post': False,
- 'allow_put': False,
- 'is_visible': True,
- },
- 'vnf_ids': {
- 'allow_post': True,
- 'allow_put': False,
- 'validate': {'type:string': None},
- 'is_visible': True,
- 'default': '',
- },
- 'vnffg_ids': {
- 'allow_post': True,
- 'allow_put': False,
- 'validate': {'type:string': None},
- 'is_visible': True,
- 'default': '',
- },
- 'nsd_id': {
- 'allow_post': True,
- 'allow_put': False,
- 'validate': {'type:uuid': None},
- 'is_visible': True,
- 'default': None,
- },
- 'placement_attr': {
- 'allow_post': True,
- 'allow_put': False,
- 'validate': {'type:dict_or_none': None},
- 'is_visible': True,
- 'default': {},
- },
- 'vim_id': {
- 'allow_post': True,
- 'allow_put': False,
- 'validate': {'type:string': None},
- 'is_visible': True,
- 'default': '',
- },
- 'status': {
- 'allow_post': False,
- 'allow_put': False,
- 'is_visible': True,
- },
- 'error_reason': {
- 'allow_post': False,
- 'allow_put': False,
- 'is_visible': True,
- },
- 'attributes': {
- 'allow_post': True,
- 'allow_put': False,
- 'convert_to': attr.convert_none_to_empty_dict,
- 'validate': {'type:dict_or_nodata': None},
- 'is_visible': True,
- 'default': None,
- },
- 'mgmt_ip_addresses': {
- 'allow_post': False,
- 'allow_put': False,
- 'convert_to': attr.convert_none_to_empty_dict,
- 'validate': {'type:dict_or_nodata': None},
- 'is_visible': True,
- },
- 'nsd_template': {
- 'allow_post': True,
- 'allow_put': False,
- 'validate': {'type:dict_or_nodata': None},
- 'is_visible': True,
- 'default': None,
- },
- },
-
}
@@ -906,9 +262,5 @@ class NFVOPluginBase(service_base.NFVPluginBase, metaclass=abc.ABCMeta):
def get_vims(self, context, filters=None, fields=None):
pass
- def get_vim_by_name(self, context, vim_name, fields=None,
- mask_password=True):
- raise NotImplementedError()
-
def get_default_vim(self, context):
pass
diff --git a/tacker/extensions/nfvo_plugins/network_service.py b/tacker/extensions/nfvo_plugins/network_service.py
deleted file mode 100644
index d00fb5ff8..000000000
--- a/tacker/extensions/nfvo_plugins/network_service.py
+++ /dev/null
@@ -1,64 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import abc
-
-from tacker._i18n import _
-from tacker.common import exceptions
-from tacker.services import service_base
-
-
-class NSPluginBase(service_base.NFVPluginBase, metaclass=abc.ABCMeta):
-
- @abc.abstractmethod
- def create_nsd(self, context, nsd):
- pass
-
- @abc.abstractmethod
- def delete_nsd(self, context, nsd_id):
- pass
-
- @abc.abstractmethod
- def get_nsd(self, context, nsd_id, fields=None):
- pass
-
- @abc.abstractmethod
- def get_nsds(self, context, filters=None, fields=None):
- pass
-
- @abc.abstractmethod
- def create_ns(self, context, ns):
- pass
-
- @abc.abstractmethod
- def get_nss(self, context, filters=None, fields=None):
- pass
-
- @abc.abstractmethod
- def get_ns(self, context, ns_id, fields=None):
- pass
-
- @abc.abstractmethod
- def delete_ns(self, context, ns_id):
- pass
-
-
-class NSDNotFound(exceptions.NotFound):
- message = _('NSD %(nsd_id)s could not be found')
-
-
-class NSNotFound(exceptions.NotFound):
- message = _('NS %(ns_id)s could not be found')
-
-
-class NSInUse(exceptions.InUse):
- message = _('NS %(ns_id)s in use')
diff --git a/tacker/extensions/nfvo_plugins/vnffg.py b/tacker/extensions/nfvo_plugins/vnffg.py
deleted file mode 100644
index 721c95c1b..000000000
--- a/tacker/extensions/nfvo_plugins/vnffg.py
+++ /dev/null
@@ -1,81 +0,0 @@
-# Copyright 2016 Red Hat Inc
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import abc
-
-from tacker.services import service_base
-
-
-class VNFFGPluginBase(service_base.NFVPluginBase, metaclass=abc.ABCMeta):
-
- @abc.abstractmethod
- def create_vnffgd(self, context, vnffgd):
- pass
-
- @abc.abstractmethod
- def delete_vnffgd(self, context, vnffgd_id):
- pass
-
- @abc.abstractmethod
- def get_vnffgd(self, context, vnffgd_id, fields=None):
- pass
-
- @abc.abstractmethod
- def get_vnffgds(self, context, filters=None, fields=None):
- pass
-
- @abc.abstractmethod
- def create_vnffg(self, context, vnffg):
- pass
-
- @abc.abstractmethod
- def get_vnffgs(self, context, filters=None, fields=None):
- pass
-
- @abc.abstractmethod
- def get_vnffg(self, context, vnffg_id, fields=None):
- pass
-
- @abc.abstractmethod
- def update_vnffg(self, context, vnffg_id, vnffg):
- pass
-
- @abc.abstractmethod
- def delete_vnffg(self, context, vnffg_id):
- pass
-
- @abc.abstractmethod
- def get_nfp(self, context, nfp_id, fields=None):
- pass
-
- @abc.abstractmethod
- def get_nfps(self, context, filters=None, fields=None):
- pass
-
- @abc.abstractmethod
- def get_sfcs(self, context, filters=None, fields=None):
- pass
-
- @abc.abstractmethod
- def get_sfc(self, context, sfc_id, fields=None):
- pass
-
- @abc.abstractmethod
- def get_classifiers(self, context, filters=None, fields=None):
- pass
-
- @abc.abstractmethod
- def get_classifier(self, context, classifier_id, fields=None):
- pass
diff --git a/tacker/extensions/vnfm.py b/tacker/extensions/vnfm.py
index 7606df750..df8cbe36d 100644
--- a/tacker/extensions/vnfm.py
+++ b/tacker/extensions/vnfm.py
@@ -20,10 +20,8 @@ from oslo_log import log as logging
from tacker._i18n import _
from tacker.api import extensions
from tacker.api.v1 import attributes as attr
-from tacker.api.v1 import base
from tacker.api.v1 import resource_helper
from tacker.common import exceptions
-from tacker import manager
from tacker.plugins.common import constants
from tacker.services import service_base
@@ -31,38 +29,10 @@ from tacker.services import service_base
LOG = logging.getLogger(__name__)
-class MultipleMGMTDriversSpecified(exceptions.InvalidInput):
- message = _('More than one MGMT Driver per vnfd is not supported')
-
-
-class ServiceTypesNotSpecified(exceptions.InvalidInput):
- message = _('service types are not specified')
-
-
-class VNFDInUse(exceptions.InUse):
- message = _('VNFD %(vnfd_id)s is still in use')
-
-
class VNFInUse(exceptions.InUse):
message = _('VNF %(vnf_id)s is still in use')
-class InvalidMgmtDriver(exceptions.InvalidInput):
- message = _('Invalid Mgmt driver %(mgmt_driver_name)s.')
-
-
-class InvalidInfraDriver(exceptions.InvalidInput):
- message = _('VIM type %(vim_name)s is not supported as an infra driver')
-
-
-class InvalidAPIAttributeType(exceptions.InvalidInput):
- message = _('Expecting dict type for API attribute instead of %(atype)s ')
-
-
-class VNFCreateFailed(exceptions.TackerException):
- message = _('creating VNF based on %(vnfd_id)s failed')
-
-
class VNFUpdateInvalidInput(exceptions.TackerException):
message = _('VNF Update Invalid Input %(reason)s')
@@ -95,10 +65,6 @@ class VNFDeleteFailed(exceptions.TackerException):
message = _('%(reason)s')
-class VNFHealFailed(exceptions.TackerException):
- message = _('VNF %(vnf_id)s failed to heal')
-
-
class VNFDNotFound(exceptions.NotFound):
message = _('VNFD %(vnfd_id)s could not be found')
@@ -173,54 +139,11 @@ class HeatClientException(exceptions.TackerException):
message = _("%(msg)s")
-class IPAddrInvalidInput(exceptions.InvalidInput):
- message = _("IP Address input values should be in a list format")
-
-
-class HugePageSizeInvalidInput(exceptions.InvalidInput):
- message = _("Value specified for mem_page_size is invalid: "
- "%(error_msg_details)s. The valid values are 'small', 'large',"
- " 'any' or an integer value in MB")
-
-
-class CpuAllocationInvalidKeys(exceptions.InvalidInput):
- message = _("Invalid keys specified in VNFD - %(error_msg_details)s."
- "Supported keys are: %(valid_keys)s")
-
-
-class CpuAllocationInvalidValues(exceptions.InvalidInput):
- message = _("Invalid values specified in VNFD - %(error_msg_details)s."
- "Supported Values are: %(valid_values)s")
-
-
-class NumaNodesInvalidKeys(exceptions.InvalidInput):
- message = _("Invalid keys specified in VNFD - %(error_msg_details)s."
- "Supported keys are: %(valid_keys)s")
-
-
-class FilePathMissing(exceptions.InvalidInput):
- message = _("'file' attribute is missing for "
- "tosca.artifacts.Deployment.Image.VM artifact type")
-
-
class InfraDriverUnreachable(exceptions.ServiceUnavailable):
message = _("Could not retrieve VNF resource IDs and"
" types. Please check %(service)s status.")
-class VNFInactive(exceptions.InvalidInput):
- message = _("VNF %(vnf_id)s is not in Active state: %(message)s")
-
-
-class MetadataNotMatched(exceptions.InvalidInput):
- message = _("Metadata for alarm policy is not matched")
-
-
-class InvalidResourceType(exceptions.InvalidInput):
- message = _("Resource type %(resource_type)s for alarm policy "
- "is not supported")
-
-
class InvalidSubstitutionMapping(exceptions.InvalidInput):
message = _("Input for substitution mapping requirements are not"
" valid for %(requirement)s. They must be in the form"
@@ -236,348 +159,17 @@ class InvalidParamsForSM(exceptions.InvalidInput):
message = _("Please provide parameters for substitution mappings")
-class InvalidKubernetesScalingPolicyNumber(exceptions.InvalidInput):
- message = _("Please provide only one Scaling policy")
-
-
-class InvalidKubernetesNetworkNumber(exceptions.InvalidInput):
- message = _("Please provide one network for all vdus")
-
-
-class InvalidKubernetesInputParameter(exceptions.InvalidInput):
- message = _("Found unsupported keys for %(found_keys)s ")
-
-
class InvalidInstReqInfoForScaling(exceptions.InvalidInput):
message = _("Scaling resource cannot be set to "
"fixed ip_address or mac_address.")
-class InvalidMaintenanceParameter(exceptions.InvalidInput):
- message = _("Could not find the required params for maintenance")
-
-
class OIDCAuthFailed(exceptions.InvalidInput):
message = _("OIDC authentication and authorization failed."
" Detail: %(detail)s")
-def _validate_service_type_list(data, valid_values=None):
- if not isinstance(data, list):
- msg = _("Invalid data format for service list: '%s'") % data
- LOG.debug(msg)
- return msg
- if not data:
- msg = _("Empty list is not allowed for service list. '%s'") % data
- LOG.debug(msg)
- return msg
- key_specs = {
- 'service_type': {
- 'type:string': None,
- }
- }
- for service in data:
- msg = attr._validate_dict(service, key_specs)
- if msg:
- LOG.debug(msg)
- return msg
-
-
-attr.validators['type:service_type_list'] = _validate_service_type_list
-
-NAME_MAX_LEN = 255
-
-RESOURCE_ATTRIBUTE_MAP = {
-
- 'vnfds': {
- 'id': {
- 'allow_post': False,
- 'allow_put': False,
- 'validate': {'type:uuid': None},
- 'is_visible': True,
- 'primary_key': True,
- },
- 'tenant_id': {
- 'allow_post': True,
- 'allow_put': False,
- 'validate': {'type:string': None},
- 'required_by_policy': True,
- 'is_visible': True,
- },
- 'name': {
- 'allow_post': True,
- 'allow_put': True,
- 'validate': {'type:string': NAME_MAX_LEN},
- 'is_visible': True,
- },
- 'description': {
- 'allow_post': True,
- 'allow_put': True,
- 'validate': {'type:string': None},
- 'is_visible': True,
- 'default': '',
- },
- 'service_types': {
- 'allow_post': True,
- 'allow_put': False,
- 'convert_to': attr.convert_to_list,
- 'validate': {'type:service_type_list': None},
- 'is_visible': True,
- 'default': attr.ATTR_NOT_SPECIFIED,
- },
- 'attributes': {
- 'allow_post': True,
- 'allow_put': False,
- 'convert_to': attr.convert_none_to_empty_dict,
- 'validate': {'type:dict_or_nodata': None},
- 'is_visible': True,
- 'default': None,
- },
- 'created_at': {
- 'allow_post': False,
- 'allow_put': False,
- 'is_visible': True,
- },
- 'updated_at': {
- 'allow_post': False,
- 'allow_put': False,
- 'is_visible': True,
- },
- 'template_source': {
- 'allow_post': False,
- 'allow_put': False,
- 'is_visible': True,
- 'default': 'onboarded'
- },
- },
-
- 'vnfs': {
- 'id': {
- 'allow_post': False,
- 'allow_put': False,
- 'validate': {'type:uuid': None},
- 'is_visible': True,
- 'primary_key': True
- },
- 'tenant_id': {
- 'allow_post': True,
- 'allow_put': False,
- 'validate': {'type:string': None},
- 'required_by_policy': True,
- 'is_visible': True
- },
- 'vnfd_id': {
- 'allow_post': True,
- 'allow_put': False,
- 'validate': {'type:uuid': None},
- 'is_visible': True,
- 'default': None
- },
- 'vim_id': {
- 'allow_post': True,
- 'allow_put': False,
- 'validate': {'type:string': None},
- 'is_visible': True,
- 'default': '',
- },
- 'name': {
- 'allow_post': True,
- 'allow_put': True,
- 'validate': {'type:string': NAME_MAX_LEN},
- 'is_visible': True,
- },
- 'description': {
- 'allow_post': True,
- 'allow_put': True,
- 'validate': {'type:string': None},
- 'is_visible': True,
- 'default': '',
- },
- 'instance_id': {
- 'allow_post': False,
- 'allow_put': False,
- 'validate': {'type:string': None},
- 'is_visible': True,
- },
- 'mgmt_ip_address': {
- 'allow_post': False,
- 'allow_put': False,
- 'validate': {'type:string': None},
- 'is_visible': True,
- },
- 'attributes': {
- 'allow_post': True,
- 'allow_put': True,
- 'validate': {'type:dict_or_none': None},
- 'is_visible': True,
- 'default': {},
- },
- 'placement_attr': {
- 'allow_post': True,
- 'allow_put': False,
- 'validate': {'type:dict_or_none': None},
- 'is_visible': True,
- 'default': {},
- },
- 'status': {
- 'allow_post': False,
- 'allow_put': False,
- 'is_visible': True,
- },
- 'error_reason': {
- 'allow_post': False,
- 'allow_put': False,
- 'is_visible': True,
- },
- 'created_at': {
- 'allow_post': False,
- 'allow_put': False,
- 'is_visible': True,
- },
- 'updated_at': {
- 'allow_post': False,
- 'allow_put': False,
- 'is_visible': True,
- },
- 'vnfd_template': {
- 'allow_post': True,
- 'allow_put': False,
- 'validate': {'type:dict_or_none': None},
- 'is_visible': True,
- 'default': None,
- },
- },
-}
-
-
-SUB_RESOURCE_ATTRIBUTE_MAP = {
- 'actions': {
- 'parent': {
- 'collection_name': 'vnfs',
- 'member_name': 'vnf'
- },
- 'members': {
- 'scale': {
- 'parameters': {
- 'policy': {
- 'allow_post': True,
- 'allow_put': False,
- 'is_visible': True,
- 'validate': {'type:string': None}
- },
- 'type': {
- 'allow_post': True,
- 'allow_put': False,
- 'is_visible': True,
- 'validate': {'type:string': None}
- },
- 'tenant_id': {
- 'allow_post': True,
- 'allow_put': False,
- 'validate': {'type:string': None},
- 'required_by_policy': False,
- 'is_visible': False
- },
- }
- },
- }
- },
- 'triggers': {
- 'parent': {
- 'collection_name': 'vnfs',
- 'member_name': 'vnf'
- },
- 'members': {
- 'trigger': {
- 'parameters': {
- 'policy_name': {
- 'allow_post': True,
- 'allow_put': False,
- 'is_visible': True,
- 'validate': {'type:string': None}
- },
- 'action_name': {
- 'allow_post': True,
- 'allow_put': False,
- 'is_visible': True,
- 'validate': {'type:string': None}
- },
- 'params': {
- 'allow_post': True,
- 'allow_put': False,
- 'is_visible': True,
- 'validate': {'type:dict_or_none': None}
- },
- 'tenant_id': {
- 'allow_post': True,
- 'allow_put': False,
- 'validate': {'type:string': None},
- 'required_by_policy': False,
- 'is_visible': False
- }
- }
- },
- }
- },
- 'resources': {
- 'parent': {
- 'collection_name': 'vnfs',
- 'member_name': 'vnf'
- },
- 'members': {
- 'resource': {
- 'parameters': {
- 'name': {
- 'allow_post': False,
- 'allow_put': False,
- 'is_visible': True,
- },
- 'type': {
- 'allow_post': False,
- 'allow_put': False,
- 'is_visible': True,
- },
- 'id': {
- 'allow_post': False,
- 'allow_put': False,
- 'is_visible': True,
- },
- }
- }
- }
- },
- 'maintenances': {
- 'parent': {
- 'collection_name': 'vnfs',
- 'member_name': 'vnf'
- },
- 'members': {
- 'maintenance': {
- 'parameters': {
- 'params': {
- 'allow_post': True,
- 'allow_put': False,
- 'is_visible': True,
- 'validate': {'type:dict_or_none': None}
- },
- 'tenant_id': {
- 'allow_post': True,
- 'allow_put': False,
- 'validate': {'type:string': None},
- 'required_by_policy': False,
- 'is_visible': False
- },
- 'response': {
- 'allow_post': False,
- 'allow_put': False,
- 'validate': {'type:dict_or_none': None},
- 'is_visible': True
- }
- }
- }
- }
- }
-}
+RESOURCE_ATTRIBUTE_MAP = {}
class Vnfm(extensions.ExtensionDescriptor):
@@ -606,33 +198,10 @@ class Vnfm(extensions.ExtensionDescriptor):
special_mappings = {}
plural_mappings = resource_helper.build_plural_mappings(
special_mappings, RESOURCE_ATTRIBUTE_MAP)
- plural_mappings['service_types'] = 'service_type'
attr.PLURALS.update(plural_mappings)
- resources = resource_helper.build_resource_info(
+ return resource_helper.build_resource_info(
plural_mappings, RESOURCE_ATTRIBUTE_MAP, constants.VNFM,
translate_name=True)
- plugin = manager.TackerManager.get_service_plugins()[
- constants.VNFM]
- for collection_name in SUB_RESOURCE_ATTRIBUTE_MAP:
- parent = SUB_RESOURCE_ATTRIBUTE_MAP[collection_name]['parent']
-
- for resource_name in SUB_RESOURCE_ATTRIBUTE_MAP[
- collection_name]['members']:
- params = SUB_RESOURCE_ATTRIBUTE_MAP[
- collection_name]['members'][resource_name]['parameters']
-
- controller = base.create_resource(collection_name,
- resource_name,
- plugin, params,
- allow_bulk=True,
- parent=parent)
-
- resource = extensions.ResourceExtension(
- collection_name,
- controller, parent,
- attr_map=params)
- resources.append(resource)
- return resources
@classmethod
def get_plugin_interface(cls):
@@ -657,22 +226,10 @@ class VNFMPluginBase(service_base.NFVPluginBase, metaclass=abc.ABCMeta):
def get_plugin_description(self):
return 'Tacker VNF Manager plugin'
- @abc.abstractmethod
- def create_vnfd(self, context, vnfd):
- pass
-
- @abc.abstractmethod
- def delete_vnfd(self, context, vnfd_id):
- pass
-
@abc.abstractmethod
def get_vnfd(self, context, vnfd_id, fields=None):
pass
- @abc.abstractmethod
- def get_vnfds(self, context, filters=None, fields=None):
- pass
-
@abc.abstractmethod
def get_vnfs(self, context, filters=None, fields=None):
pass
@@ -680,34 +237,3 @@ class VNFMPluginBase(service_base.NFVPluginBase, metaclass=abc.ABCMeta):
@abc.abstractmethod
def get_vnf(self, context, vnf_id, fields=None):
pass
-
- @abc.abstractmethod
- def get_vnf_resources(self, context, vnf_id, fields=None, filters=None):
- pass
-
- @abc.abstractmethod
- def create_vnf(self, context, vnf):
- pass
-
- @abc.abstractmethod
- def update_vnf(
- self, context, vnf_id, vnf):
- pass
-
- @abc.abstractmethod
- def delete_vnf(self, context, vnf_id):
- pass
-
- @abc.abstractmethod
- def create_vnf_scale(
- self, context, vnf_id, scale):
- pass
-
- @abc.abstractmethod
- def create_vnf_trigger(
- self, context, vnf_id, trigger):
- pass
-
- @abc.abstractmethod
- def create_vnf_maintenance(self, context, vnf_id, maintenance):
- pass
diff --git a/tacker/nfvo/drivers/vim/openstack_driver.py b/tacker/nfvo/drivers/vim/openstack_driver.py
index d29535e6d..435a04585 100644
--- a/tacker/nfvo/drivers/vim/openstack_driver.py
+++ b/tacker/nfvo/drivers/vim/openstack_driver.py
@@ -17,10 +17,8 @@
import os
from keystoneauth1 import exceptions
-from keystoneauth1 import identity
from keystoneauth1.identity import v3
from keystoneauth1 import session
-from neutronclient.common import exceptions as nc_exceptions
from neutronclient.v2_0 import client as neutron_client
from oslo_config import cfg
from oslo_log import log as logging
@@ -32,9 +30,7 @@ from tacker import context as t_context
from tacker.extensions import nfvo
from tacker.keymgr import API as KEYMGR_API
from tacker.nfvo.drivers.vim import abstract_vim_driver
-from tacker.nfvo.drivers.vnffg import abstract_vnffg_driver
from tacker.nfvo.nfvo_plugin import NfvoPlugin
-from tacker.plugins.common import constants
from tacker.vnfm import keystone
LOG = logging.getLogger(__name__)
@@ -86,8 +82,7 @@ def config_opts():
return [('vim_keys', OPTS)]
-class OpenStack_Driver(abstract_vim_driver.VimAbstractDriver,
- abstract_vnffg_driver.VnffgAbstractDriver):
+class OpenStack_Driver(abstract_vim_driver.VimAbstractDriver):
"""Driver for OpenStack VIM
OpenStack driver handles interactions with local as well as
@@ -313,624 +308,3 @@ class OpenStack_Driver(abstract_vim_driver.VimAbstractDriver,
auth_plugin = self._get_auth_plugin(**auth_cred)
sess = session.Session(auth=auth_plugin)
return client_type(session=sess)
-
- def _translate_ip_protocol(self, ip_proto):
- if ip_proto == '1':
- return 'icmp'
- elif ip_proto == '6':
- return 'tcp'
- elif ip_proto == '17':
- return 'udp'
- else:
- return None
-
- def _create_classifier_params(self, fc):
- classifier_params = {}
- for field in fc:
- if field in FC_MAP:
- classifier_params[FC_MAP[field]] = fc[field]
- elif field == 'ip_proto':
- protocol = self._translate_ip_protocol(str(fc[field]))
- if not protocol:
- raise ValueError('protocol %s not supported' % fc[field])
- classifier_params['protocol'] = protocol
- else:
- LOG.warning("flow classifier %s not supported by "
- "networking-sfc driver", field)
- return classifier_params
-
- def create_flow_classifier(self, name, fc, auth_attr=None):
- if not auth_attr:
- LOG.warning("auth information required for n-sfc driver")
- return None
- fc['name'] = name
- LOG.debug('fc passed is %s', fc)
-
- sfc_classifier_params = self._create_classifier_params(fc)
- LOG.debug('sfc_classifier_params is %s', sfc_classifier_params)
-
- if len(sfc_classifier_params) > 0:
- neutronclient_ = NeutronClient(auth_attr)
-
- fc_id = neutronclient_.flow_classifier_create(
- sfc_classifier_params)
- return fc_id
-
- raise ValueError('empty match field for input flow classifier')
-
- def create_chain(self, name, path_id, fc_ids, vnfs, symmetrical=False,
- correlation='mpls', auth_attr=None):
- if not auth_attr:
- LOG.warning("auth information required for n-sfc driver")
- return None
-
- neutronclient_ = NeutronClient(auth_attr)
- port_pairs_list = neutronclient_.port_pair_list()
- port_pair_groups_list = neutronclient_.port_pair_group_list()
- port_chains_list = neutronclient_.port_chain_list()
- port_pair_group_list = []
- new_ppgs = []
- new_pps = []
-
- try:
- for vnf in vnfs:
- # TODO(s3wong): once scaling is in place and VNFFG supports it
- # that model needs to be implemented to concatenate all
- # port-pairs into the port-pair-group
- # port pair group could include port-pairs from different VNFs
- if CONNECTION_POINT not in vnf:
- LOG.warning("Chain creation failed due to missing "
- "connection point info in VNF "
- "%(vnfname)s", {'vnfname': vnf['name']})
- return None
- cp_list = vnf[CONNECTION_POINT]
- num_cps = len(cp_list)
- if num_cps not in [1, 2]:
- LOG.warning("Chain creation failed due to wrong number of "
- "connection points: expected [1 | 2], got "
- "%(cps)d", {'cps': num_cps})
- return None
- if num_cps == 1:
- ingress = cp_list[0]
- egress = cp_list[0]
- else:
- ingress = cp_list[0]
- egress = cp_list[1]
-
- # If sfc_encap is True, pp_corr is set to correlation to
- # make use of correlation, otherwise pp_corr is set to None
- # to install SFC proxy
- sfc_encap = vnf.get(SFC_ENCAP, True)
- pp_corr = correlation if sfc_encap else None
-
- # valid_port_in_use function is used to find out the
- # port_pair_group_id of the existing port pair group
- # which was created by ingress and egress of current VNF
- port_pair_group_id = self.valid_port_in_use(
- ingress, egress, port_pairs_list, port_pair_groups_list)
- if not port_pair_group_id:
- # create the new port pair group if it is not existed
- port_pair = dict()
- port_pair['name'] = vnf['name'] + '-connection-points'
- port_pair['description'] = 'port pair for ' + vnf['name']
- port_pair['ingress'] = ingress
- port_pair['egress'] = egress
- port_pair['service_function_parameters'] = {
- 'correlation': pp_corr}
- port_pair_id = neutronclient_.port_pair_create(port_pair)
- if not port_pair_id:
- LOG.warning("Chain creation failed due to port pair "
- "creation failed for vnf %(vnf)s",
- {'vnf': vnf['name']})
- return None
- new_pps.append(port_pair_id)
- port_pair_group = {}
- port_pair_group['name'] = vnf['name'] + '-port-pair-group'
- port_pair_group['description'] = \
- 'port pair group for ' + vnf['name']
- port_pair_group['port_pairs'] = []
- port_pair_group['port_pairs'].append(port_pair_id)
- port_pair_group_id = \
- neutronclient_.port_pair_group_create(port_pair_group)
- new_ppgs.append(port_pair_group_id)
- if not port_pair_group_id:
- LOG.warning("Chain creation failed due to port pair group "
- "creation failed for vnf "
- "%(vnf)s", {'vnf': vnf['name']})
- raise nfvo.CreateChainException(
- message="Failed to create port-pair-group")
- port_pair_group_list.append(port_pair_group_id)
-
- # Check list port pair group between new port chain and the
- # existing port chains. Networking-sfc does not allow to create
- # two port chains with the same port pair groups and the same order
- for pc in port_chains_list['port_chains']:
- ppg_list = pc['port_pair_groups']
- if ppg_list == port_pair_group_list:
- # raise exception when the Vnffg path is already existing
- raise nfvo.CreateChainException(
- message="Vnffg path already exists")
- except nfvo.CreateChainException as e:
- # clean neutron resources such as port pair, port pair group and
- # flow classifier if we create it
- for ppg in new_ppgs:
- neutronclient_.port_pair_group_delete(ppg)
- for pp in new_pps:
- neutronclient_.port_pair_delete(pp)
- for fc_id in fc_ids:
- neutronclient_.flow_classifier_delete(fc_id)
- raise e
-
- # TODO(s3wong): should the chain name be given as a parameter?
- port_chain = {}
- port_chain['name'] = name + '-port-chain'
- if path_id:
- port_chain['chain_id'] = path_id
- port_chain['description'] = 'port-chain for Tacker VNFFG'
- port_chain['port_pair_groups'] = port_pair_group_list
- port_chain['flow_classifiers'] = fc_ids
- port_chain['chain_parameters'] = {}
- port_chain['chain_parameters']['symmetric'] = symmetrical
- port_chain['chain_parameters']['correlation'] = correlation
- return neutronclient_.port_chain_create(port_chain)
-
- def update_chain(self, chain_id, fc_ids, vnfs,
- symmetrical=None, auth_attr=None):
- # (s3wong): chain can be updated either for
- # the list of fc and/or list of port-pair-group
- # since n-sfc driver does NOT track the ppg id
- # it will look it up (or reconstruct) from
- # networking-sfc DB --- but the caveat is that
- # the VNF name MUST be unique
-
- # TODO(mardim) Currently we figure out which VNF belongs to what
- # port-pair-group or port-pair through the name of VNF.
- # This is not the best approach. The best approach for the future
- # propably is to maintain in the database the ID of the
- # port-pair-group and port-pair that VNF belongs to so we can
- # implemement the update in a more robust way.
-
- if not auth_attr:
- LOG.warning("auth information required for n-sfc driver")
- return None
-
- neutronclient_ = NeutronClient(auth_attr)
- port_pairs_list = neutronclient_.port_pair_list()
- port_pair_groups_list = neutronclient_.port_pair_group_list()
- port_chains_list = neutronclient_.port_chain_list()
- new_ppgs = []
- updated_port_chain = dict()
-
- pc_info = neutronclient_.port_chain_show(chain_id)
- if set(fc_ids) != set(pc_info['port_chain']['flow_classifiers']):
- updated_port_chain['flow_classifiers'] = fc_ids
- old_ppgs = pc_info['port_chain']['port_pair_groups']
- old_ppgs_dict = {neutronclient_.
- port_pair_group_show(ppg_id)['port_pair_group']['name'].
- split('-')[0]: ppg_id for ppg_id in old_ppgs}
- past_ppgs_dict = old_ppgs_dict.copy()
- try:
- for vnf in vnfs:
- port_pair_group = {}
- port_pair = {}
- if vnf['name'] in old_ppgs_dict:
- old_ppg_id = old_ppgs_dict.pop(vnf['name'])
- new_ppgs.append(old_ppg_id)
- else:
- if CONNECTION_POINT not in vnf:
- LOG.warning("Chain update failed due to missing "
- "connection point info in VNF "
- "%(vnfname)s", {'vnfname': vnf['name']})
- raise nfvo.UpdateChainException(
- message="Connection point not found")
- cp_list = vnf[CONNECTION_POINT]
- num_cps = len(cp_list)
- if num_cps not in [1, 2]:
- LOG.warning("Chain update failed due to wrong number "
- "of connection points: expected [1 | 2],"
- "got %(cps)d", {'cps': num_cps})
- raise nfvo.UpdateChainException(
- message="Invalid number of connection points")
- if num_cps == 1:
- ingress = cp_list[0]
- egress = cp_list[0]
- else:
- ingress = cp_list[0]
- egress = cp_list[1]
-
- # valid_port_in_use function is used to find out the
- # port_pair_group_id of the existing port pair group
- # which was created by ingress and egress of current VNF
- port_pair_group_id = self.valid_port_in_use(
- ingress, egress, port_pairs_list,
- port_pair_groups_list)
- if not port_pair_group_id:
- port_pair['name'] = vnf['name'] + '-connection-points'
- port_pair['description'] = \
- 'port pair for ' + vnf['name']
- port_pair['ingress'] = ingress
- port_pair['egress'] = egress
- port_pair_id = neutronclient_.port_pair_create(
- port_pair)
- if not port_pair_id:
- LOG.warning("Chain update failed due to port pair "
- "creation failed for "
- "vnf %(vnf)s", {'vnf': vnf['name']})
- raise nfvo.UpdateChainException(
- message="Failed to create port-pair")
- port_pair_group['name'] = \
- vnf['name'] + '-port-pair-group'
- port_pair_group['description'] = \
- 'port pair group for ' + vnf['name']
- port_pair_group['port_pairs'] = []
- port_pair_group['port_pairs'].append(port_pair_id)
- port_pair_group_id = neutronclient_.\
- port_pair_group_create(port_pair_group)
- if not port_pair_group_id:
- LOG.warning("Chain update failed due to port pair "
- "group creation failed for vnf "
- "%(vnf)s", {'vnf': vnf['name']})
- for pp_id in port_pair_group['port_pairs']:
- neutronclient_.port_pair_delete(pp_id)
- raise nfvo.UpdateChainException(
- message="Failed to create port-pair-group")
- new_ppgs.append(port_pair_group_id)
- for pc in port_chains_list['port_chains']:
- ppg_list = pc['port_pair_groups']
- if ppg_list == new_ppgs:
- # raise exception when the Vnffg path already exists
- nfvo.UpdateChainException(
- message="Vnffg path already exists")
- except nfvo.UpdateChainException as e:
- self._delete_ppgs_and_pps(neutronclient_, new_ppgs,
- past_ppgs_dict, port_chains_list, fc_ids)
- raise e
-
- updated_port_chain['port_pair_groups'] = new_ppgs
- updated_port_chain['flow_classifiers'] = fc_ids
- try:
- pc_id = neutronclient_.port_chain_update(chain_id,
- updated_port_chain)
- except (nc_exceptions.BadRequest, nfvo.UpdateChainException) as e:
- self._delete_ppgs_and_pps(neutronclient_, new_ppgs,
- past_ppgs_dict, port_chains_list)
- raise e
- for ppg_name in old_ppgs_dict:
- ppg_info = neutronclient_. \
- port_pair_group_show(old_ppgs_dict[ppg_name])
- ppg_inuse = self.valid_ppg_for_multiple_chain(
- ppg_info['port_pair_group']['id'], port_chains_list)
- if not ppg_inuse:
- neutronclient_.port_pair_group_delete(old_ppgs_dict[ppg_name])
- port_pairs = ppg_info['port_pair_group']['port_pairs']
- if port_pairs and len(port_pairs):
- for j in range(0, len(port_pairs)):
- pp_id = port_pairs[j]
- neutronclient_.port_pair_delete(pp_id)
- return pc_id
-
- def delete_chain(self, chain_id, auth_attr=None):
- if not auth_attr:
- LOG.warning("auth information required for n-sfc driver")
- return None
-
- neutronclient_ = NeutronClient(auth_attr)
- neutronclient_.port_chain_delete(chain_id)
-
- def update_flow_classifier(self, chain_id, fc, auth_attr=None):
- if not auth_attr:
- LOG.warning("auth information required for n-sfc driver")
- return None
-
- fc_id = fc.pop('instance_id')
- fc_status = fc.pop('status')
- match_dict = fc.pop('match')
- fc.update(match_dict)
-
- sfc_classifier_params = self._create_classifier_params(fc)
- neutronclient_ = NeutronClient(auth_attr)
- if fc_status == constants.PENDING_UPDATE:
- fc_info = neutronclient_.flow_classifier_show(fc_id)
- for field in sfc_classifier_params:
- # If the new classifier is the same with the old one then
- # no change needed.
- if (fc_info['flow_classifier'].get(field) is not None) and \
- (sfc_classifier_params[field] == fc_info[
- 'flow_classifier'][field]):
- continue
-
- # If the new classifier has different match criteria
- # with the old one then we strip the classifier from
- # the chain we delete the old classifier and we create
- # a new one with the same name as before but with different
- # match criteria. We are not using the flow_classifier_update
- # from the n-sfc because it does not support match criteria
- # update for an existing classifier yet.
- else:
- try:
- self._dissociate_classifier_from_chain(chain_id,
- [fc_id],
- neutronclient_)
- except Exception as e:
- raise e
- fc_id = neutronclient_.flow_classifier_create(
- sfc_classifier_params)
- if fc_id is None:
- raise nfvo.UpdateClassifierException(
- message="Failed to update classifiers")
- break
-
- # If the new classifier is completely different from the existing
- # ones (name and match criteria) then we just create it.
- else:
- fc_id = neutronclient_.flow_classifier_create(
- sfc_classifier_params)
- if fc_id is None:
- raise nfvo.UpdateClassifierException(
- message="Failed to update classifiers")
-
- return fc_id
-
- def _dissociate_classifier_from_chain(self, chain_id, fc_ids,
- neutronclient):
- pc_info = neutronclient.port_chain_show(chain_id)
- current_fc_list = pc_info['port_chain']['flow_classifiers']
- for fc_id in fc_ids:
- current_fc_list.remove(fc_id)
- pc_id = neutronclient.port_chain_update(chain_id,
- {'flow_classifiers': current_fc_list})
- if pc_id is None:
- raise nfvo.UpdateClassifierException(
- message="Failed to update classifiers")
- for fc_id in fc_ids:
- try:
- neutronclient.flow_classifier_delete(fc_id)
- except ValueError as e:
- raise e
-
- def remove_and_delete_flow_classifiers(self, chain_id, fc_ids,
- auth_attr=None):
- if not auth_attr:
- LOG.warning("auth information required for n-sfc driver")
- raise EnvironmentError('auth attribute required for'
- ' networking-sfc driver')
- neutronclient_ = NeutronClient(auth_attr)
- try:
- self._dissociate_classifier_from_chain(chain_id, fc_ids,
- neutronclient_)
- except Exception as e:
- raise e
-
- def delete_flow_classifier(self, fc_id, auth_attr=None):
- if not auth_attr:
- LOG.warning("auth information required for n-sfc driver")
- raise EnvironmentError('auth attribute required for'
- ' networking-sfc driver')
-
- neutronclient_ = NeutronClient(auth_attr)
- neutronclient_.flow_classifier_delete(fc_id)
-
- def _delete_ppgs_and_pps(self, neutronclient, new_ppgs,
- past_ppgs_dict, pcs_list, fc_ids):
- if new_ppgs:
- for item in new_ppgs:
- if item not in past_ppgs_dict.values():
- ppg_inuse = self.valid_ppg_for_multiple_chain(
- item, pcs_list)
- if not ppg_inuse:
- # clean port pair and port pair group if
- # it is not in used
- new_ppg_info = neutronclient.port_pair_group_show(item)
- neutronclient.port_pair_group_delete(item)
- new_port_pairs = new_ppg_info['port_pair_group'][
- 'port_pairs']
- if new_port_pairs and len(new_port_pairs):
- for j in range(0, len(new_port_pairs)):
- new_pp_id = new_port_pairs[j]
- neutronclient.port_pair_delete(new_pp_id)
- # clean flow classifiers
- for fc_id in fc_ids:
- neutronclient.flow_classifier_delete(fc_id)
-
- def valid_port_in_use(self, ingress, egress, pps_list, ppgs_list):
- # This function checks the the ports are used or not and return the
- # port pair group id of these ports
- port_pair_list = pps_list['port_pairs']
- port_pair_group_list = ppgs_list['port_pair_groups']
- port_pair_id = None
- port_pair_group_id = None
- for pp in port_pair_list:
- if (ingress == pp['ingress']) and (egress == pp['egress']):
- port_pair_id = pp['id']
- break
- if port_pair_id:
- for ppg in port_pair_group_list:
- if port_pair_id in ppg['port_pairs']:
- port_pair_group_id = ppg['id']
- break
- return port_pair_group_id
-
- def valid_ppg_for_multiple_chain(self, ppg_id, pcs_list):
- # This function returns True if a ppg belongs to more than one
- # port chain. If not return False.
- count = 0
- for pc in pcs_list['port_chains']:
- if ppg_id in pc['port_pair_groups']:
- count = count + 1
- return True if count > 1 else False
-
-
-class NeutronClient(object):
- """Neutron Client class for networking-sfc driver"""
-
- def __init__(self, auth_attr):
- auth_cred = auth_attr.copy()
- verify = utils.str_to_bool(auth_cred.pop('cert_verify', 'True'))
- auth = identity.Password(**auth_cred)
- sess = session.Session(auth=auth, verify=verify)
- self.client = neutron_client.Client(session=sess)
-
- def flow_classifier_show(self, fc_id):
- try:
- fc = self.client.show_sfc_flow_classifier(fc_id)
- if fc is None:
- raise ValueError('classifier %s not found' % fc_id)
- return fc
- except nc_exceptions.NotFound:
- LOG.error('classifier %s not found', fc_id)
- raise ValueError('classifier %s not found' % fc_id)
-
- def flow_classifier_create(self, fc_dict):
- LOG.debug("fc_dict passed is {fc_dict}".format(fc_dict=fc_dict))
- try:
- fc = self.client.create_sfc_flow_classifier(
- {'flow_classifier': fc_dict})
- return fc['flow_classifier']['id']
- except Exception as ex:
- LOG.error("Error while creating Flow Classifier: %s", str(ex))
- raise nfvo.FlowClassiferCreationFailed(message=str(ex))
-
- def flow_classifier_update(self, fc_id, update_fc):
- update_fc_dict = {'flow_classifier': update_fc}
- return self.client.update_sfc_flow_classifier(fc_id, update_fc_dict)
-
- def flow_classifier_delete(self, fc_id):
- try:
- self.client.delete_sfc_flow_classifier(fc_id)
- except nc_exceptions.NotFound:
- LOG.error("fc %s not found", fc_id)
- raise ValueError('fc %s not found' % fc_id)
-
- def port_pair_create(self, port_pair_dict):
- try:
- pp = self.client.create_sfc_port_pair(
- {'port_pair': port_pair_dict})
- except nc_exceptions.BadRequest as e:
- LOG.error("create port pair returns %s", e)
- raise ValueError(str(e))
-
- if pp and len(pp):
- return pp['port_pair']['id']
- else:
- return None
-
- def port_pair_list(self):
- pp_list = self.client.list_sfc_port_pairs()
- return pp_list
-
- def port_pair_delete(self, port_pair_id):
- try:
- self.client.delete_sfc_port_pair(port_pair_id)
- except nc_exceptions.NotFound:
- LOG.error('port pair %s not found', port_pair_id)
- raise ValueError('port pair %s not found' % port_pair_id)
-
- def port_pair_group_create(self, ppg_dict):
- try:
- ppg = self.client.create_sfc_port_pair_group(
- {'port_pair_group': ppg_dict})
- except nc_exceptions.BadRequest as e:
- LOG.error('create port pair group returns %s', e)
- raise ValueError(str(e))
-
- if ppg and len(ppg):
- return ppg['port_pair_group']['id']
- else:
- return None
-
- def port_pair_group_list(self):
- ppg_list = self.client.list_sfc_port_pair_groups()
- return ppg_list
-
- def port_pair_group_delete(self, ppg_id):
- try:
- self.client.delete_sfc_port_pair_group(ppg_id)
- except nc_exceptions.NotFound:
- LOG.error('port pair group %s not found', ppg_id)
- raise ValueError('port pair group %s not found' % ppg_id)
-
- def port_chain_create(self, port_chain_dict):
- try:
- pc = self.client.create_sfc_port_chain(
- {'port_chain': port_chain_dict})
- except nc_exceptions.BadRequest as e:
- LOG.error('create port chain returns %s', e)
- raise ValueError(str(e))
-
- if pc and len(pc):
- return pc['port_chain']['id'], pc['port_chain']['chain_id']
- else:
- return None
-
- def port_chain_delete(self, port_chain_id):
- try:
- port_chain = self.client.show_sfc_port_chain(port_chain_id)
- if port_chain:
- self.client.delete_sfc_port_chain(port_chain_id)
- port_chain_list = \
- self.client.list_sfc_port_chains()['port_chains']
- ppg_list = port_chain['port_chain'].get('port_pair_groups')
- if ppg_list and len(ppg_list):
- for i in range(0, len(ppg_list)):
- ppg_in_use = False
- # Firstly, Tacker delete port chain, if a port pair
- # group still belong to other port chains, Tacker
- # will mark it as in_use and does not delete it.
- for pc in port_chain_list:
- if ppg_list[i] in pc['port_pair_groups']:
- ppg_in_use = True
- break
- if not ppg_in_use:
- ppg = self.client.show_sfc_port_pair_group(
- ppg_list[i])
- if ppg:
- self.client.delete_sfc_port_pair_group(
- ppg_list[i])
- port_pairs = \
- ppg['port_pair_group']['port_pairs']
- if port_pairs and len(port_pairs):
- for j in range(0, len(port_pairs)):
- pp_id = port_pairs[j]
- self.client.delete_sfc_port_pair(pp_id)
- except nc_exceptions.NotFound:
- LOG.error('port chain %s not found', port_chain_id)
- raise ValueError('port chain %s not found' % port_chain_id)
-
- def port_chain_update(self, port_chain_id, port_chain):
- try:
- pc = self.client.update_sfc_port_chain(port_chain_id,
- {'port_chain': port_chain})
- except nc_exceptions.BadRequest as e:
- LOG.error('update port chain returns %s', e)
- raise ValueError(str(e))
- if pc and len(pc):
- return pc['port_chain']['id']
- else:
- raise nfvo.UpdateChainException(message="Failed to update "
- "port-chain")
-
- def port_chain_list(self):
- pc_list = self.client.list_sfc_port_chains()
- return pc_list
-
- def port_chain_show(self, port_chain_id):
- try:
- port_chain = self.client.show_sfc_port_chain(port_chain_id)
- if port_chain is None:
- raise ValueError('port chain %s not found' % port_chain_id)
-
- return port_chain
- except nc_exceptions.NotFound:
- LOG.error('port chain %s not found', port_chain_id)
- raise ValueError('port chain %s not found' % port_chain_id)
-
- def port_pair_group_show(self, ppg_id):
- try:
- port_pair_group = self.client.show_sfc_port_pair_group(ppg_id)
- if port_pair_group is None:
- raise ValueError('port pair group %s not found' % ppg_id)
-
- return port_pair_group
- except nc_exceptions.NotFound:
- LOG.error('port pair group %s not found', ppg_id)
- raise ValueError('port pair group %s not found' % ppg_id)
diff --git a/tacker/nfvo/drivers/vnffg/__init__.py b/tacker/nfvo/drivers/vnffg/__init__.py
deleted file mode 100644
index e69de29bb..000000000
diff --git a/tacker/nfvo/drivers/vnffg/abstract_vnffg_driver.py b/tacker/nfvo/drivers/vnffg/abstract_vnffg_driver.py
deleted file mode 100644
index 96c8f0e08..000000000
--- a/tacker/nfvo/drivers/vnffg/abstract_vnffg_driver.py
+++ /dev/null
@@ -1,65 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import abc
-
-from tacker.api import extensions
-
-
-class VnffgAbstractDriver(extensions.PluginInterface, metaclass=abc.ABCMeta):
-
- @abc.abstractmethod
- def get_type(self):
- """Return one of predefined type of Tacker drivers."""
- pass
-
- @abc.abstractmethod
- def get_name(self):
- """Return a symbolic name for the Tacker VNFFG SFC driver."""
- pass
-
- @abc.abstractmethod
- def get_description(self):
- pass
-
- @abc.abstractmethod
- def create_chain(self, name, fc_id, vnfs, symmetrical=False,
- auth_attr=None):
- """Create service function chain and returns an ID"""
- pass
-
- @abc.abstractmethod
- def update_chain(self, chain_id, fc_ids, vnfs,
- symmetrical=False,
- auth_attr=None):
- """Update service function chain"""
- pass
-
- @abc.abstractmethod
- def delete_chain(self, chain_id, auth_attr=None):
- """Delete service function chain"""
- pass
-
- @abc.abstractmethod
- def create_flow_classifier(self, name, fc, auth_attr=None):
- """Create flow classifier and returns an ID"""
- pass
-
- @abc.abstractmethod
- def update_flow_classifier(self, fc_id, fc, auth_attr=None):
- """Update flow classifier"""
- pass
-
- @abc.abstractmethod
- def delete_flow_classifier(self, fc_id, auth_attr=None):
- """Delete flow classifier"""
- pass
diff --git a/tacker/nfvo/drivers/vnffg/sfc_drivers/__init__.py b/tacker/nfvo/drivers/vnffg/sfc_drivers/__init__.py
deleted file mode 100644
index e69de29bb..000000000
diff --git a/tacker/nfvo/drivers/vnffg/sfc_drivers/noop.py b/tacker/nfvo/drivers/vnffg/sfc_drivers/noop.py
deleted file mode 100644
index 88aae68c9..000000000
--- a/tacker/nfvo/drivers/vnffg/sfc_drivers/noop.py
+++ /dev/null
@@ -1,71 +0,0 @@
-# Copyright 2016 Red Hat Inc
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from oslo_log import log as logging
-from oslo_utils import uuidutils
-from tacker.common import log
-from tacker.nfvo.drivers.vnffg import abstract_vnffg_driver
-
-LOG = logging.getLogger(__name__)
-
-
-class VNFFGNoop(abstract_vnffg_driver.VnffgAbstractDriver):
-
- """Noop driver for VNFFG tests"""
-
- def __init__(self):
- super(VNFFGNoop, self).__init__()
- self._instances = set()
-
- def get_type(self):
- return 'noop'
-
- def get_name(self):
- return 'noop'
-
- def get_description(self):
- return 'VNFFG Noop driver'
-
- @log.log
- def create_chain(self, name, fc_id, vnfs, auth_attr=None):
- instance_id = uuidutils.generate_uuid()
- self._instances.add(instance_id)
- return instance_id
-
- @log.log
- def update_chain(self, chain_id, fc_ids, vnfs, auth_attr=None):
- if chain_id not in self._instances:
- LOG.error('Chain not found')
- raise ValueError('No chain instance %s' % chain_id)
-
- @log.log
- def delete_chain(self, chain_id, auth_attr=None):
- self._instances.remove(chain_id)
-
- @log.log
- def create_flow_classifier(self, name, fc, auth_attr=None):
- instance_id = uuidutils.generate_uuid()
- self._instances.add(instance_id)
- return instance_id
-
- @log.log
- def update_flow_classifier(self, fc_id, fc, auth_attr=None):
- if fc_id not in self._instances:
- LOG.error('FC not found')
- raise ValueError('No FC instance %s' % fc_id)
-
- @log.log
- def delete_flow_classifier(self, fc_id, auth_attr=None):
- self._instances.remove(fc_id)
diff --git a/tacker/nfvo/nfvo_plugin.py b/tacker/nfvo/nfvo_plugin.py
index 28df1825c..edb31b448 100644
--- a/tacker/nfvo/nfvo_plugin.py
+++ b/tacker/nfvo/nfvo_plugin.py
@@ -15,35 +15,20 @@
# under the License.
import copy
-import os
-import yaml
-from cryptography import fernet
import eventlet
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import strutils
from oslo_utils import uuidutils
-from tempfile import mkstemp
-from toscaparser import tosca_template
-from toscaparser.tosca_template import ToscaTemplate
from tacker._i18n import _
from tacker.common import driver_manager
-from tacker.common import exceptions
from tacker.common import log
from tacker.common import utils
-from tacker import context as t_context
from tacker.db.nfvo import nfvo_db_plugin
-from tacker.db.nfvo import ns_db
-from tacker.db.nfvo import vnffg_db
-from tacker.extensions import common_services as cs
from tacker.extensions import nfvo
-from tacker.keymgr import API as KEYMGR_API
-from tacker import manager
-from tacker.plugins.common import constants
-from tacker.tosca import utils as toscautils
from tacker.vnfm import keystone
from tacker.vnfm import vim_client
@@ -56,8 +41,7 @@ def config_opts():
return [('nfvo_vim', NfvoPlugin.OPTS)]
-class NfvoPlugin(nfvo_db_plugin.NfvoPluginDb, vnffg_db.VnffgPluginDbMixin,
- ns_db.NSPluginDb):
+class NfvoPlugin(nfvo_db_plugin.NfvoPluginDb):
"""NFVO reference plugin for NFVO extension
Implements the NFVO extension and defines public facing APIs for VIM
@@ -71,9 +55,6 @@ class NfvoPlugin(nfvo_db_plugin.NfvoPluginDb, vnffg_db.VnffgPluginDbMixin,
cfg.ListOpt(
'vim_drivers', default=['openstack', 'kubernetes'],
help=_('VIM driver for launching VNFs')),
- cfg.IntOpt(
- 'monitor_interval', default=30,
- help=_('Interval to check for VIM health')),
]
cfg.CONF.register_opts(OPTS, 'nfvo_vim')
@@ -97,20 +78,6 @@ class NfvoPlugin(nfvo_db_plugin.NfvoPluginDb, vnffg_db.VnffgPluginDbMixin,
LOG.error(f'Validation Failed for Keystone auth_url: {auth_url}')
raise nfvo.VimConnectionException(message=str(e))
- def get_auth_dict(self, context):
- auth = CONF.keystone_authtoken
- auth_url = utils.get_auth_url_v3(auth.auth_url)
- self.validate_keystone_auth_url(auth_url=auth_url, verify=True)
- return {
- 'auth_url': auth_url,
- 'token': context.auth_token,
- 'project_domain_name': auth.project_domain_name or context.domain,
- 'project_name': context.tenant_name
- }
-
- def spawn_n(self, function, *args, **kwargs):
- self._pool.spawn_n(function, *args, **kwargs)
-
@log.log
def create_vim(self, context, vim):
LOG.debug('Create vim called with parameters %s',
@@ -241,615 +208,3 @@ class NfvoPlugin(nfvo_db_plugin.NfvoPluginDb, vnffg_db.VnffgPluginDbMixin,
'deregister_vim',
vim_obj=vim_obj)
super(NfvoPlugin, self).delete_vim(context, vim_id)
-
- @log.log
- def validate_tosca(self, template):
- if "tosca_definitions_version" not in template:
- raise nfvo.ToscaParserFailed(
- error_msg_details='tosca_definitions_version missing in '
- 'template'
- )
-
- LOG.debug('template yaml: %s', template)
-
- toscautils.updateimports(template)
-
- try:
- tosca_template.ToscaTemplate(
- a_file=False, yaml_dict_tpl=template,
- local_defs=toscautils.tosca_tmpl_local_defs())
- except Exception as e:
- LOG.exception("tosca-parser error: %s", str(e))
- raise nfvo.ToscaParserFailed(error_msg_details=str(e))
-
- @log.log
- def validate_vnffgd_path(self, template):
- temp = template['vnffgd']['topology_template']
- vnffg_name = list(temp['groups'].keys())[0]
- nfp_name = temp['groups'][vnffg_name]['members'][0]
- path = self._get_nfp_attribute(template, nfp_name,
- 'path')
-
- prev_element = None
- known_forwarders = set()
- for element in path:
- if element.get('forwarder') in known_forwarders:
- if prev_element is not None and element.get('forwarder') \
- != prev_element['forwarder']:
- raise nfvo.VnffgdDuplicateForwarderException(
- forwarder=element.get('forwarder')
- )
- elif prev_element is not None and element.get(
- 'capability') == prev_element['capability']:
- raise nfvo.VnffgdDuplicateCPException(
- cp=element.get('capability')
- )
- else:
- known_forwarders.add(element.get('forwarder'))
- prev_element = element
-
- @log.log
- def validate_vnffg_properties(self, template):
-
- # check whether number_of_endpoints is same with connection_point
- connection_point = self._get_vnffg_property(
- template, 'connection_point')
- number_endpoint = self._get_vnffg_property(
- template, 'number_of_endpoints')
-
- if len(connection_point) != number_endpoint:
- raise nfvo.VnffgdWrongEndpointNumber(
- number=number_endpoint,
- cps=connection_point)
-
- @log.log
- def create_vnffgd(self, context, vnffgd):
- template = vnffgd['vnffgd']
-
- if 'template_source' in template:
- template_source = template.get('template_source')
- else:
- template_source = 'onboarded'
- vnffgd['vnffgd']['template_source'] = template_source
-
- if 'vnffgd' not in template.get('template'):
- raise nfvo.VnffgdInvalidTemplate(template=template.get('template'))
- else:
- self.validate_tosca(template['template']['vnffgd'])
-
- self.validate_vnffgd_path(template['template'])
-
- self.validate_vnffg_properties(template['template'])
-
- template_yaml = template['template']['vnffgd']
- if not template.get('description'):
- template['description'] = template_yaml.get('description', '')
- if not template.get('name') and 'metadata' in template_yaml:
- template['name'] = template_yaml['metadata'].get(
- 'template_name', '')
-
- return super(NfvoPlugin, self).create_vnffgd(context, vnffgd)
-
- @log.log
- def create_vnffg(self, context, vnffg):
- vnffg_info = vnffg['vnffg']
- name = vnffg_info['name']
-
- if vnffg_info.get('vnffgd_template'):
- vnffgd_name = utils.generate_resource_name(name, 'inline')
- vnffgd = {'vnffgd': {'tenant_id': vnffg_info['tenant_id'],
- 'name': vnffgd_name,
- 'template': {
- 'vnffgd': vnffg_info['vnffgd_template']},
- 'template_source': 'inline',
- 'description': vnffg_info['description']}}
- vnffg_info['vnffgd_id'] = \
- self.create_vnffgd(context, vnffgd).get('id')
-
- vnffg_dict = super(NfvoPlugin, self)._create_vnffg_pre(context, vnffg)
- nfp = super(NfvoPlugin, self).get_nfp(context,
- vnffg_dict['forwarding_paths'])
- sfc = super(NfvoPlugin, self).get_sfc(context, nfp['chain_id'])
- name_match_list = []
- for classifier_id in nfp['classifier_ids']:
- classifier_dict = super(NfvoPlugin, self).get_classifier(
- context, classifier_id, fields=['name', 'match'])
- name_match_list.append(classifier_dict)
- # grab the first VNF to check it's VIM type
- # we have already checked that all VNFs are in the same VIM
- vim_obj = self._get_vim_from_vnf(
- context, list(vnffg_dict['vnf_mapping'].values())[0])
- # TODO(trozet): figure out what auth info we actually need to pass
- # to the driver. Is it a session, or is full vim obj good enough?
-
- correlation = super(NfvoPlugin, self)._get_correlation_template(
- context, vnffg_info)
- driver_type = vim_obj['type']
- try:
- fc_ids = []
- for item in name_match_list:
- fc_ids.append(self._vim_drivers.invoke(driver_type,
- 'create_flow_classifier',
- name=item['name'],
- fc=item['match'],
- auth_attr=vim_obj['auth_cred']))
- sfc_id, path_id = self._vim_drivers.invoke(driver_type,
- 'create_chain',
- name=vnffg_dict['name'],
- path_id=sfc['path_id'],
- vnfs=sfc['chain'],
- fc_ids=fc_ids,
- symmetrical=sfc['symmetrical'],
- correlation=correlation,
- auth_attr=vim_obj['auth_cred'])
- except Exception:
- with excutils.save_and_reraise_exception():
- self.delete_vnffg(context, vnffg_id=vnffg_dict['id'])
- classifiers_map = super(NfvoPlugin, self). \
- create_classifiers_map(nfp['classifier_ids'], fc_ids)
- super(NfvoPlugin, self)._create_vnffg_post(context, sfc_id,
- path_id, classifiers_map,
- vnffg_dict)
- super(NfvoPlugin, self)._create_vnffg_status(context, vnffg_dict)
- return vnffg_dict
-
- @log.log
- def update_vnffg(self, context, vnffg_id, vnffg):
- vnffg_info = vnffg['vnffg']
- # put vnffg related objects in PENDING_UPDATE status
- vnffg_old = super(NfvoPlugin, self)._update_vnffg_status_pre(
- context, vnffg_id)
- name = vnffg_old['name']
-
- # create inline vnffgd if given by user
- if vnffg_info.get('vnffgd_template'):
- vnffgd_name = utils.generate_resource_name(name, 'inline')
- vnffgd = {'vnffgd': {'tenant_id': vnffg_old['tenant_id'],
- 'name': vnffgd_name,
- 'template': {
- 'vnffgd': vnffg_info['vnffgd_template']},
- 'template_source': 'inline',
- 'description': vnffg_old['description']}}
- try:
- vnffg_info['vnffgd_id'] = \
- self.create_vnffgd(context, vnffgd).get('id')
- except Exception:
- with excutils.save_and_reraise_exception():
- super(NfvoPlugin, self)._update_vnffg_status_post(context,
- vnffg_old, error=True, db_state=constants.ACTIVE)
- try:
-
- vnffg_dict = super(NfvoPlugin, self). \
- _update_vnffg_pre(context, vnffg, vnffg_id, vnffg_old)
-
- except (nfvo.VnfMappingNotFoundException,
- nfvo.VnfMappingNotValidException):
- with excutils.save_and_reraise_exception():
-
- if vnffg_info.get('vnffgd_template'):
- super(NfvoPlugin, self).delete_vnffgd(
- context, vnffg_info['vnffgd_id'])
-
- super(NfvoPlugin, self)._update_vnffg_status_post(
- context, vnffg_old, error=True, db_state=constants.ACTIVE)
- except nfvo.UpdateVnffgException:
- with excutils.save_and_reraise_exception():
- super(NfvoPlugin, self).delete_vnffgd(context,
- vnffg_info['vnffgd_id'])
-
- super(NfvoPlugin, self)._update_vnffg_status_post(context,
- vnffg_old,
- error=True)
-
- nfp = super(NfvoPlugin, self).get_nfp(context,
- vnffg_dict['forwarding_paths'])
- sfc = super(NfvoPlugin, self).get_sfc(context, nfp['chain_id'])
-
- classifier_dict = dict()
- classifier_update = []
- classifier_delete_ids = []
- classifier_ids = []
- for classifier_id in nfp['classifier_ids']:
- classifier_dict = super(NfvoPlugin, self).get_classifier(
- context, classifier_id, fields=['id', 'name', 'match',
- 'instance_id', 'status'])
- if classifier_dict['status'] == constants.PENDING_DELETE:
- classifier_delete_ids.append(
- classifier_dict.pop('instance_id'))
- else:
- classifier_ids.append(classifier_dict.pop('id'))
- classifier_update.append(classifier_dict)
-
- # TODO(gongysh) support different vim for each vnf
- vim_obj = self._get_vim_from_vnf(context,
- list(vnffg_dict[
- 'vnf_mapping'].values())[0])
- driver_type = vim_obj['type']
- try:
- fc_ids = []
- self._vim_drivers.invoke(driver_type,
- 'remove_and_delete_flow_classifiers',
- chain_id=sfc['instance_id'],
- fc_ids=classifier_delete_ids,
- auth_attr=vim_obj['auth_cred'])
- for item in classifier_update:
- fc_ids.append(self._vim_drivers.invoke(driver_type,
- 'update_flow_classifier',
- chain_id=sfc['instance_id'],
- fc=item,
- auth_attr=vim_obj['auth_cred']))
- n_sfc_chain_id = self._vim_drivers.invoke(
- driver_type, 'update_chain',
- vnfs=sfc['chain'], fc_ids=fc_ids,
- chain_id=sfc['instance_id'], auth_attr=vim_obj['auth_cred'])
- except Exception:
- with excutils.save_and_reraise_exception():
- super(NfvoPlugin, self)._update_vnffg_status_post(context,
- vnffg_dict,
- error=True)
-
- classifiers_map = super(NfvoPlugin, self).create_classifiers_map(
- classifier_ids, fc_ids)
- super(NfvoPlugin, self)._update_vnffg_post(context, n_sfc_chain_id,
- classifiers_map,
- vnffg_dict)
- super(NfvoPlugin, self)._update_vnffg_status_post(context, vnffg_dict)
- return vnffg_dict
-
- @log.log
- def delete_vnffg(self, context, vnffg_id):
- vnffg_dict = super(NfvoPlugin, self)._delete_vnffg_pre(context,
- vnffg_id)
- nfp = super(NfvoPlugin, self).get_nfp(context,
- vnffg_dict['forwarding_paths'])
- sfc = super(NfvoPlugin, self).get_sfc(context, nfp['chain_id'])
-
- classifiers = [super(NfvoPlugin, self).
- get_classifier(context, classifier_id)
- for classifier_id in nfp['classifier_ids']]
- vim_obj = self._get_vim_from_vnf(context,
- list(vnffg_dict[
- 'vnf_mapping'].values())[0])
- driver_type = vim_obj['type']
- try:
- if sfc['instance_id'] is not None:
- self._vim_drivers.invoke(driver_type, 'delete_chain',
- chain_id=sfc['instance_id'],
- auth_attr=vim_obj['auth_cred'])
- for classifier in classifiers:
- if classifier['instance_id'] is not None:
- self._vim_drivers.invoke(driver_type,
- 'delete_flow_classifier',
- fc_id=classifier['instance_id'],
- auth_attr=vim_obj['auth_cred'])
- except Exception:
- with excutils.save_and_reraise_exception():
- vnffg_dict['status'] = constants.ERROR
- super(NfvoPlugin, self)._delete_vnffg_post(context, vnffg_id,
- True)
- super(NfvoPlugin, self)._delete_vnffg_post(context, vnffg_id, False)
- return vnffg_dict
-
- def _get_vim_from_vnf(self, context, vnf_id):
- """Figures out VIM based on a VNF
-
- :param context: SQL Session Context
- :param vnf_id: VNF ID
- :return: VIM or VIM properties if fields are provided
- """
- vnfm_plugin = manager.TackerManager.get_service_plugins()['VNFM']
- vim_id = vnfm_plugin.get_vnf(context, vnf_id, fields=['vim_id'])
- vim_obj = self.get_vim(context, vim_id['vim_id'], mask_password=False)
- if vim_obj is None:
- raise nfvo.VimFromVnfNotFoundException(vnf_id=vnf_id)
- self._build_vim_auth(vim_obj)
- return vim_obj
-
- def _build_vim_auth(self, vim_info):
- LOG.debug('VIM id is %s', vim_info['id'])
- vim_auth = vim_info['auth_cred']
- vim_auth['password'] = self._decode_vim_auth(vim_info['id'],
- vim_auth)
- vim_auth['auth_url'] = vim_info['auth_url']
-
- # These attributes are needless for authentication
- # from keystone, so we remove them.
- needless_attrs = ['key_type', 'secret_uuid']
- for attr in needless_attrs:
- if attr in vim_auth:
- vim_auth.pop(attr, None)
- return vim_auth
-
- def _decode_vim_auth(self, vim_id, auth):
- """Decode Vim credentials
-
- Decrypt VIM cred, get fernet Key from local_file_system or
- barbican.
- """
- cred = auth['password'].encode('utf-8')
- if auth.get('key_type') == 'barbican_key':
- k_context = t_context.generate_tacker_service_context()
- secret_uuid = auth['secret_uuid']
- if CONF.ext_oauth2_auth.use_ext_oauth2_auth:
- keymgr_api = KEYMGR_API(CONF.ext_oauth2_auth.token_endpoint)
- else:
- keymgr_api = KEYMGR_API(CONF.keystone_authtoken.auth_url)
- secret_obj = keymgr_api.get(k_context, secret_uuid)
- vim_key = secret_obj.payload
- else:
- vim_key = self._find_vim_key(vim_id)
-
- f = fernet.Fernet(vim_key)
- if not f:
- LOG.error('Unable to decode VIM auth')
- raise nfvo.VimNotFoundException(vim_id=vim_id)
- return f.decrypt(cred).decode('utf-8')
-
- @staticmethod
- def _find_vim_key(vim_id):
- key_file = os.path.join(CONF.vim_keys.openstack, vim_id)
- LOG.debug('Attempting to open key file for vim id %s', vim_id)
- try:
- with open(key_file, 'r') as f:
- return f.read()
- except Exception:
- LOG.error('VIM id invalid or key not found for %s', vim_id)
- raise nfvo.VimKeyNotFoundException(vim_id=vim_id)
-
- def _vim_resource_name_to_id(self, context, resource, name, vnf_id):
- """Converts a VIM resource name to its ID
-
- :param resource: resource type to find (network, subnet, etc)
- :param name: name of the resource to find its ID
- :param vnf_id: A VNF instance ID that is part of the chain to which
- the classifier will apply to
- :return: ID of the resource name
- """
- vim_obj = self._get_vim_from_vnf(context, vnf_id)
- driver_type = vim_obj['type']
- return self._vim_drivers.invoke(driver_type,
- 'get_vim_resource_id',
- vim_obj=vim_obj,
- resource_type=resource,
- resource_name=name)
-
- @log.log
- def create_nsd(self, context, nsd):
- nsd_data = nsd['nsd']
- template = nsd_data['attributes'].get('nsd')
- if isinstance(template, dict):
- nsd_data['attributes']['nsd'] = yaml.safe_dump(
- template)
- LOG.debug('nsd %s', nsd_data)
-
- if 'template_source' in nsd_data:
- template_source = nsd_data.get('template_source')
- else:
- template_source = "onboarded"
- nsd['nsd']['template_source'] = template_source
-
- self._parse_template_input(context, nsd)
- return super(NfvoPlugin, self).create_nsd(
- context, nsd)
-
- def _parse_template_input(self, context, nsd):
- nsd_dict = nsd['nsd']
- nsd_yaml = nsd_dict['attributes'].get('nsd')
- inner_nsd_dict = yaml.safe_load(nsd_yaml)
- nsd['vnfds'] = dict()
- LOG.debug('nsd_dict: %s', inner_nsd_dict)
-
- vnfm_plugin = manager.TackerManager.get_service_plugins()['VNFM']
- vnfd_imports = inner_nsd_dict.get('imports')
- if not vnfd_imports:
- LOG.error('VNFD import section is missing')
- raise nfvo.ToscaParserFailed(
- error_msg_details='VNFD import section is missing')
- inner_nsd_dict['imports'] = []
- new_files = []
- for vnfd_name in vnfd_imports:
- vnfd = vnfm_plugin.get_vnfd(context, vnfd_name)
- # Copy VNF types and VNF names
- sm_dict = yaml.safe_load(vnfd['attributes']['vnfd'])[
- 'topology_template'][
- 'substitution_mappings']
- nsd['vnfds'][sm_dict['node_type']] = vnfd['name']
- # Ugly Hack to validate the child templates
- # TODO(tbh): add support in tosca-parser to pass child
- # templates as dict
- fd, temp_path = mkstemp()
- with open(temp_path, 'w') as fp:
- fp.write(vnfd['attributes']['vnfd'])
- os.close(fd)
- new_files.append(temp_path)
- inner_nsd_dict['imports'].append(temp_path)
- # Prepend the tacker_defs.yaml import file with the full
- # path to the file
- toscautils.updateimports(inner_nsd_dict)
-
- try:
- ToscaTemplate(a_file=False,
- yaml_dict_tpl=inner_nsd_dict,
- local_defs=toscautils.tosca_tmpl_local_defs())
- except Exception as e:
- LOG.exception("tosca-parser error: %s", str(e))
- raise nfvo.ToscaParserFailed(error_msg_details=str(e))
- finally:
- for file_path in new_files:
- os.remove(file_path)
- inner_nsd_dict['imports'] = vnfd_imports
-
- if ('description' not in nsd_dict or
- nsd_dict['description'] == ''):
- nsd_dict['description'] = inner_nsd_dict.get(
- 'description', '')
- if (('name' not in nsd_dict or
- not len(nsd_dict['name'])) and
- 'metadata' in inner_nsd_dict):
- nsd_dict['name'] = inner_nsd_dict['metadata'].get(
- 'template_name', '')
-
- LOG.debug('nsd %s', nsd)
-
- def _get_vnfd_id(self, vnfd_name, onboarded_vnfds):
- for vnfd in onboarded_vnfds:
- if vnfd_name == vnfd['name']:
- return vnfd['id']
-
- @log.log
- def _get_vnffgds_from_nsd(self, nsd_dict):
- ns_topo = nsd_dict.get('topology_template')
- vnffgd_templates = dict()
- if ns_topo and ns_topo.get('groups'):
- for vnffg_name in ns_topo.get('groups'):
- vnffgd_template = dict()
- # TODO(phuoc): add checking in case vnffg_name exists
- # more than one time.
- # Constructing vnffgd from nsd, remove imports section
- vnffgd_template['tosca_definitions_version'] = \
- nsd_dict.get('tosca_definitions_version')
- vnffgd_template['description'] = nsd_dict.get('description')
- vnffgd_template['topology_template'] = dict()
- vnffgd_template['topology_template']['groups'] = dict()
- vnffgd_template['topology_template']['groups'][vnffg_name] = \
- ns_topo['groups'].get(vnffg_name)
- vnffgd_template['topology_template']['node_templates'] = dict()
- for fp_name in ns_topo['groups'][vnffg_name]['members']:
- vnffgd_template['topology_template']['node_templates'][
- fp_name] = ns_topo['node_templates'].get(fp_name)
- vnffgd_templates[vnffg_name] = vnffgd_template
- return vnffgd_templates
-
- # TODO(hiromu): Remove create NS API after the deprecation is accepted.
- @log.log
- def create_ns(self, context, ns):
- """Create NS, corresponding VNFs, VNFFGs.
-
- :param ns: ns dict which contains nsd_id and attributes
- """
- ns_info = ns['ns']
- name = ns_info['name']
-
- if ns_info.get('nsd_template'):
- nsd_name = utils.generate_resource_name(name, 'inline')
- nsd = {'nsd': {
- 'attributes': {'nsd': ns_info['nsd_template']},
- 'description': ns_info['description'],
- 'name': nsd_name,
- 'template_source': 'inline',
- 'tenant_id': ns_info['tenant_id']}}
- ns_info['nsd_id'] = self.create_nsd(context, nsd).get('id')
-
- nsd = self.get_nsd(context, ns['ns']['nsd_id'])
- nsd_dict = yaml.safe_load(nsd['attributes']['nsd'])
- vnfm_plugin = manager.TackerManager.get_service_plugins()['VNFM']
- onboarded_vnfds = vnfm_plugin.get_vnfds(context, [])
- region_name = ns_info.get('placement_attr', {}).\
- get('region_name', None)
-
- vim_res = self.vim_client.get_vim(context, ns['ns']['vim_id'],
- region_name)
- if not ns['ns']['vim_id']:
- ns['ns']['vim_id'] = vim_res['vim_id']
-
- # TODO(phuoc): currently, create_ns function does not have
- # create_ns_pre function, that pre-defines information of a network
- # service. Creating ns_uuid keeps ns_id for consistency, it should be
- # provided as return value of create_ns_pre function in ns db.
- # Generate ns_uuid
- ns['ns']['ns_id'] = uuidutils.generate_uuid()
-
- # Step-1
- param_values = ns['ns']['attributes'].get('param_values', {})
- if 'get_input' in str(nsd_dict):
- self._process_parameterized_input(ns['ns']['attributes'],
- nsd_dict)
- # Step-2
- vnfds = nsd['vnfds']
- # vnfd_dict is used while generating workflow
- vnfd_dict = dict()
- for node_name, node_val in \
- (nsd_dict['topology_template']['node_templates']).items():
- if node_val.get('type') not in vnfds:
- continue
- vnfd_name = vnfds[node_val.get('type')]
- if not vnfd_dict.get(vnfd_name):
- vnfd_dict[vnfd_name] = {
- 'id': self._get_vnfd_id(vnfd_name, onboarded_vnfds),
- 'instances': [node_name]
- }
- else:
- vnfd_dict[vnfd_name]['instances'].append(node_name)
- if not node_val.get('requirements'):
- continue
- if not param_values.get(vnfd_name):
- param_values[vnfd_name] = {}
- param_values[vnfd_name]['substitution_mappings'] = dict()
- req_dict = dict()
- requirements = node_val.get('requirements')
- for requirement in requirements:
- req_name = list(requirement.keys())[0]
- req_val = list(requirement.values())[0]
- res_name = req_val + ns['ns']['nsd_id'][:11]
- req_dict[req_name] = res_name
- if req_val in nsd_dict['topology_template']['node_templates']:
- param_values[vnfd_name]['substitution_mappings'][
- res_name] = nsd_dict['topology_template'][
- 'node_templates'][req_val]
-
- param_values[vnfd_name]['substitution_mappings'][
- 'requirements'] = req_dict
- ns['vnfd_details'] = vnfd_dict
-
- vnffgd_templates = self._get_vnffgds_from_nsd(nsd_dict)
- LOG.debug('vnffgd_templates: %s', vnffgd_templates)
- ns['vnffgd_templates'] = vnffgd_templates
-
- ns_dict = super(NfvoPlugin, self).create_ns(context, ns)
-
- super(NfvoPlugin, self).create_ns_post(
- context, ns_dict['id'], vnfd_dict, vnffgd_templates)
- return ns_dict
-
- @log.log
- def _update_params(self, original, paramvalues):
- for key, value in (original).items():
- if not isinstance(value, dict) or 'get_input' not in str(value):
- pass
- elif isinstance(value, dict):
- if 'get_input' in value:
- if value['get_input'] in paramvalues:
- original[key] = paramvalues[value['get_input']]
- else:
- LOG.error('Key missing Value: %s', key)
- raise cs.InputValuesMissing(key=key)
- else:
- self._update_params(value, paramvalues)
-
- @log.log
- def _process_parameterized_input(self, attrs, nsd_dict):
- param_vattrs_dict = attrs.pop('param_values', None)
- if param_vattrs_dict:
- for node in \
- nsd_dict['topology_template']['node_templates'].values():
- if 'get_input' in str(node):
- self._update_params(node, param_vattrs_dict['nsd'])
- else:
- raise cs.ParamYAMLInputMissing()
-
- # TODO(hiromu): Remove delete NS API after the deprecation is accepted.
- @log.log
- def delete_ns(self, context, ns_id, ns=None):
- # Extract "force_delete" from request's body
- force_delete = False
- if ns and ns.get('ns', {}).get('attributes', {}).get('force'):
- force_delete = ns['ns'].get('attributes').get('force')
- if force_delete and not context.is_admin:
- LOG.warning("force delete is admin only operation")
- raise exceptions.AdminRequired(reason="Admin only operation")
- ns = super(NfvoPlugin, self).get_ns(context, ns_id)
- LOG.debug("Deleting ns: %s", ns)
- super(NfvoPlugin, self).delete_ns_pre(context, ns_id, force_delete)
- super(NfvoPlugin, self).delete_ns_post(
- context, ns_id, force_delete=force_delete)
- return ns['id']
diff --git a/tacker/nfvo/workflows/__init__.py b/tacker/nfvo/workflows/__init__.py
deleted file mode 100644
index e69de29bb..000000000
diff --git a/tacker/plugins/common/constants.py b/tacker/plugins/common/constants.py
index 9907697cd..e5888551c 100644
--- a/tacker/plugins/common/constants.py
+++ b/tacker/plugins/common/constants.py
@@ -63,49 +63,10 @@ ALL_STATUSES = (
*PENDING_STATUSES,
)
-POLICY_SCALING = 'tosca.policies.tacker.Scaling'
-POLICY_SCALING_ACTIONS = (ACTION_SCALE_OUT,
- ACTION_SCALE_IN) = ('out', 'in')
-POLICY_ACTIONS = {POLICY_SCALING: POLICY_SCALING_ACTIONS}
-POLICY_ALARMING = 'tosca.policies.tacker.Alarming'
-POLICY_EVENT_ALARMING = 'tosca.policies.tacker.EventAlarming'
-VALID_POLICY_TYPES = [POLICY_SCALING, POLICY_ALARMING]
-POLICY_RESERVATION = 'tosca.policies.tacker.Reservation'
-RESERVATION_POLICY_ACTIONS = ['start_actions',
- 'before_end_actions', 'end_actions']
-DEFAULT_ALARM_ACTIONS = ['respawn', 'log', 'log_and_kill', 'notify']
-
-RES_TYPE_VNFD = "vnfd"
-RES_TYPE_NSD = "nsd"
-RES_TYPE_NS = "ns"
-RES_TYPE_VNF = "vnf"
-RES_TYPE_VIM = "vim"
-
-RES_EVT_CREATE = "CREATE"
RES_EVT_INSTANTIATE = "INSTANTIATE"
RES_EVT_TERMINATE = "TERMINATE"
-RES_EVT_DELETE = "DELETE"
-RES_EVT_UPDATE = "UPDATE"
-RES_EVT_MONITOR = "MONITOR"
RES_EVT_SCALE = "SCALE"
-RES_EVT_NA_STATE = "Not Applicable"
-RES_EVT_ONBOARDED = "OnBoarded"
RES_EVT_HEAL = "HEAL"
-RES_EVT_MAINTENANCE = [
- "MAINTENANCE", "SCALE_IN", "MAINTENANCE_COMPLETE",
- "PREPARE_MAINTENANCE", "PLANNED_MAINTENANCE", "INSTANCE_ACTION_DONE"
-]
-
-
-VNF_STATUS_TO_EVT_TYPES = {PENDING_CREATE: RES_EVT_CREATE,
- PENDING_UPDATE: RES_EVT_UPDATE,
- PENDING_DELETE: RES_EVT_DELETE,
- PENDING_HEAL: RES_EVT_HEAL}
-
-
-RES_EVT_CREATED_FLD = "created_at"
-RES_EVT_DELETED_FLD = "deleted_at"
-RES_EVT_UPDATED_FLD = "updated_at"
TYPE_COMPUTE = "COMPUTE"
TYPE_LINKPORT = "LINKPORT"
diff --git a/tacker/plugins/common/utils.py b/tacker/plugins/common/utils.py
deleted file mode 100644
index dfdff5d7c..000000000
--- a/tacker/plugins/common/utils.py
+++ /dev/null
@@ -1,68 +0,0 @@
-# Copyright 2013 Cisco Systems, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""
-Common utilities and helper functions for Openstack Networking Plugins.
-"""
-
-from tacker._i18n import _
-from tacker.common import exceptions as n_exc
-from tacker.common import utils
-from tacker.plugins.common import constants
-
-
-def verify_vlan_range(vlan_range):
- """Raise an exception for invalid tags or malformed range."""
- for vlan_tag in vlan_range:
- if not utils.is_valid_vlan_tag(vlan_tag):
- raise n_exc.NetworkVlanRangeError(
- vlan_range=vlan_range,
- error=_("%s is not a valid VLAN tag") % vlan_tag)
- if vlan_range[1] < vlan_range[0]:
- raise n_exc.NetworkVlanRangeError(
- vlan_range=vlan_range,
- error=_("End of VLAN range is less than start of VLAN range"))
-
-
-def parse_network_vlan_range(network_vlan_range):
- """Interpret a string as network[:vlan_begin:vlan_end]."""
- entry = network_vlan_range.strip()
- if ':' in entry:
- try:
- network, vlan_min, vlan_max = entry.split(':')
- vlan_range = (int(vlan_min), int(vlan_max))
- except ValueError as ex:
- raise n_exc.NetworkVlanRangeError(vlan_range=entry, error=ex)
- verify_vlan_range(vlan_range)
- return network, vlan_range
- else:
- return entry, None
-
-
-def parse_network_vlan_ranges(network_vlan_ranges_cfg_entries):
- """Interpret a list of strings as network[:vlan_begin:vlan_end] entries."""
- networks = {}
- for entry in network_vlan_ranges_cfg_entries:
- network, vlan_range = parse_network_vlan_range(entry)
- if vlan_range:
- networks.setdefault(network, []).append(vlan_range)
- else:
- networks.setdefault(network, [])
- return networks
-
-
-def in_pending_status(status):
- return status in (constants.PENDING_CREATE,
- constants.PENDING_UPDATE,
- constants.PENDING_DELETE)
diff --git a/tacker/plugins/fenix.py b/tacker/plugins/fenix.py
deleted file mode 100644
index a4b4b8d00..000000000
--- a/tacker/plugins/fenix.py
+++ /dev/null
@@ -1,460 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import requests
-import time
-import yaml
-
-from oslo_config import cfg
-from oslo_serialization import jsonutils
-
-from tacker._i18n import _
-from tacker.common import clients
-from tacker.common import log
-from tacker.extensions import vnfm
-from tacker.plugins.common import constants
-from tacker.vnfm import vim_client
-
-
-CONF = cfg.CONF
-OPTS = [
- cfg.IntOpt('lead_time', default=120,
- help=_('Time for migration_type operation')),
- cfg.IntOpt('max_interruption_time', default=120,
- help=_('Time for how long live migration can take')),
- cfg.IntOpt('recovery_time', default=2,
- help=_('Time for migrated node could be fully running state')),
- cfg.IntOpt('request_retries',
- default=5,
- help=_("Number of attempts to retry for request")),
- cfg.IntOpt('request_retry_wait',
- default=5,
- help=_("Wait time (in seconds) between consecutive request"))
-]
-CONF.register_opts(OPTS, 'fenix')
-MAINTENANCE_KEYS = (
- 'instance_ids', 'session_id', 'state', 'reply_url'
-)
-MAINTENANCE_SUB_KEYS = {
- 'PREPARE_MAINTENANCE': [('allowed_actions', 'list'),
- ('instance_ids', 'list')],
- 'PLANNED_MAINTENANCE': [('allowed_actions', 'list'),
- ('instance_ids', 'list')]
-}
-
-
-def config_opts():
- return [('fenix', OPTS)]
-
-
-class FenixPlugin(object):
- def __init__(self):
- self.REQUEST_RETRIES = cfg.CONF.fenix.request_retries
- self.REQUEST_RETRY_WAIT = cfg.CONF.fenix.request_retry_wait
- self.endpoint = None
- self._instances = {}
- self.vim_client = vim_client.VimClient()
-
- @log.log
- def request(self, plugin, context, vnf_dict, maintenance={},
- data_func=None):
- params_list = [maintenance]
- method = 'put'
- is_reply = True
- if data_func:
- action, create_func = data_func.split('_', 1)
- create_func = '_create_%s_list' % create_func
- if action in ['update', 'delete'] and hasattr(self, create_func):
- params_list = getattr(self, create_func)(
- context, vnf_dict, action)
- method = action if action == 'delete' else 'put'
- is_reply = False
- for params in params_list:
- self._request(plugin, context, vnf_dict, params, method, is_reply)
- return len(params_list)
-
- @log.log
- def create_vnf_constraints(self, plugin, context, vnf_dict):
- self.update_vnf_constraints(plugin, context, vnf_dict,
- objects=['instance_group',
- 'project_instance'])
-
- @log.log
- def delete_vnf_constraints(self, plugin, context, vnf_dict):
- self.update_vnf_constraints(plugin, context, vnf_dict,
- action='delete',
- objects=['instance_group',
- 'project_instance'])
-
- @log.log
- def update_vnf_instances(self, plugin, context, vnf_dict,
- action='update'):
- requests = self.update_vnf_constraints(plugin, context,
- vnf_dict, action,
- objects=['project_instance'])
- if requests[0]:
- self.post(context, vnf_dict)
-
- @log.log
- def update_vnf_constraints(self, plugin, context, vnf_dict,
- action='update', objects=[]):
- result = []
- for obj in objects:
- requests = self.request(plugin, context, vnf_dict,
- data_func='%s_%s' % (action, obj))
- result.append(requests)
- return result
-
- @log.log
- def post(self, context, vnf_dict, **kwargs):
- post_function = getattr(context, 'maintenance_post_function', None)
- if not post_function:
- return
- post_function(context, vnf_dict)
- del context.maintenance_post_function
-
- @log.log
- def project_instance_pre(self, context, vnf_dict):
- key = vnf_dict['id']
- if key not in self._instances:
- self._instances.update({
- key: self._get_instances(context, vnf_dict)})
-
- @log.log
- def validate_maintenance(self, maintenance):
- body = maintenance['maintenance']['params']['data']['body']
- if not set(MAINTENANCE_KEYS).issubset(body) or \
- body['state'] not in constants.RES_EVT_MAINTENANCE:
- raise vnfm.InvalidMaintenanceParameter()
- sub_keys = MAINTENANCE_SUB_KEYS.get(body['state'], ())
- for key, val_type in sub_keys:
- if key not in body or type(body[key]) is not eval(val_type):
- raise vnfm.InvalidMaintenanceParameter()
- return body
-
- @log.log
- def _request(self, plugin, context, vnf_dict, maintenance,
- method='put', is_reply=True):
- client = self._get_openstack_clients(context, vnf_dict)
- if not self.endpoint:
- self.endpoint = client.keystone_session.get_endpoint(
- service_type='maintenance', region_name=client.region_name)
- if not self.endpoint:
- raise vnfm.ServiceTypeNotFound(service_type_id='maintenance')
-
- if 'reply_url' in maintenance:
- url = maintenance['reply_url']
- elif 'url' in maintenance:
- url = "%s/%s" % (self.endpoint.rstrip('/'),
- maintenance['url'].strip('/'))
- else:
- return
-
- def create_headers():
- return {
- 'X-Auth-Token': client.keystone_session.get_token(),
- 'Content-Type': 'application/json',
- 'Accept': 'application/json'
- }
-
- request_body = {}
- request_body['headers'] = create_headers()
- state = constants.ACK if vnf_dict['status'] == constants.ACTIVE \
- else constants.NACK
- if method == 'put':
- data = maintenance.get('data', {})
- if is_reply:
- data['session_id'] = maintenance.get('session_id', '')
- data['state'] = "%s_%s" % (state, maintenance['state'])
- request_body['data'] = jsonutils.dump_as_bytes(data)
-
- def request_wait():
- retries = self.REQUEST_RETRIES
- while retries > 0:
- response = getattr(requests, method)(url, **request_body)
- if response.status_code == 200:
- break
- else:
- retries -= 1
- time.sleep(self.REQUEST_RETRY_WAIT)
-
- plugin.spawn_n(request_wait)
-
- @log.log
- def handle_maintenance(self, plugin, context, maintenance):
- action = '_create_%s' % maintenance['state'].lower()
- maintenance['data'] = {}
- if hasattr(self, action):
- getattr(self, action)(plugin, context, maintenance)
-
- @log.log
- def _create_maintenance(self, plugin, context, maintenance):
- vnf_dict = maintenance.get('vnf', {})
- vnf_dict['attributes'].update({'maintenance_scaled': 0})
- plugin._update_vnf_post(context, vnf_dict['id'], constants.ACTIVE,
- vnf_dict, constants.ACTIVE,
- constants.RES_EVT_UPDATE)
- instances = self._get_instances(context, vnf_dict)
- instance_ids = [x['id'] for x in instances]
- maintenance['data'].update({'instance_ids': instance_ids})
-
- @log.log
- def _create_scale_in(self, plugin, context, maintenance):
- def post_function(context, vnf_dict):
- scaled = int(vnf_dict['attributes'].get('maintenance_scaled', 0))
- vnf_dict['attributes']['maintenance_scaled'] = str(scaled + 1)
- plugin._update_vnf_post(context, vnf_dict['id'], constants.ACTIVE,
- vnf_dict, constants.ACTIVE,
- constants.RES_EVT_UPDATE)
- instances = self._get_instances(context, vnf_dict)
- instance_ids = [x['id'] for x in instances]
- maintenance['data'].update({'instance_ids': instance_ids})
- self.request(plugin, context, vnf_dict, maintenance)
-
- vnf_dict = maintenance.get('vnf', {})
- policy_action = self._create_scale_dict(plugin, context, vnf_dict)
- if policy_action:
- maintenance.update({'policy_action': policy_action})
- context.maintenance_post_function = post_function
-
- @log.log
- def _create_prepare_maintenance(self, plugin, context, maintenance):
- self._create_planned_maintenance(plugin, context, maintenance)
-
- @log.log
- def _create_planned_maintenance(self, plugin, context, maintenance):
- def post_function(context, vnf_dict):
- migration_type = self._get_constraints(vnf_dict,
- key='migration_type',
- default='MIGRATE')
- maintenance['data'].update({'instance_action': migration_type})
- self.request(plugin, context, vnf_dict, maintenance)
-
- vnf_dict = maintenance.get('vnf', {})
- instances = self._get_instances(context, vnf_dict)
- request_instance_id = maintenance['instance_ids'][0]
- selected = None
- for instance in instances:
- if instance['id'] == request_instance_id:
- selected = instance
- break
- if not selected:
- vnfm.InvalidMaintenanceParameter()
-
- migration_type = self._get_constraints(vnf_dict, key='migration_type',
- default='MIGRATE')
- if migration_type == 'OWN_ACTION':
- policy_action = self._create_migrate_dict(context, vnf_dict,
- selected)
- maintenance.update({'policy_action': policy_action})
- context.maintenance_post_function = post_function
- else:
- post_function(context, vnf_dict)
-
- @log.log
- def _create_maintenance_complete(self, plugin, context, maintenance):
- def post_function(context, vnf_dict):
- vim_res = self.vim_client.get_vim(context, vnf_dict['vim_id'])
- scaled = int(vnf_dict['attributes'].get('maintenance_scaled', 0))
- if vim_res['vim_type'] == 'openstack':
- scaled -= 1
- vnf_dict['attributes']['maintenance_scaled'] = str(scaled)
- plugin._update_vnf_post(context, vnf_dict['id'],
- constants.ACTIVE, vnf_dict,
- constants.ACTIVE,
- constants.RES_EVT_UPDATE)
- if scaled > 0:
- scale_out(plugin, context, vnf_dict)
- else:
- instances = self._get_instances(context, vnf_dict)
- instance_ids = [x['id'] for x in instances]
- maintenance['data'].update({'instance_ids': instance_ids})
- self.request(plugin, context, vnf_dict, maintenance)
-
- def scale_out(plugin, context, vnf_dict):
- policy_action = self._create_scale_dict(plugin, context, vnf_dict,
- scale_type='out')
- context.maintenance_post_function = post_function
- plugin._vnf_action.invoke(policy_action['action'],
- 'execute_action', plugin=plugin,
- context=context, vnf_dict=vnf_dict,
- args=policy_action['args'])
-
- vnf_dict = maintenance.get('vnf', {})
- scaled = vnf_dict.get('attributes', {}).get('maintenance_scaled', 0)
- if int(scaled):
- policy_action = self._create_scale_dict(plugin, context, vnf_dict,
- scale_type='out')
- maintenance.update({'policy_action': policy_action})
- context.maintenance_post_function = post_function
-
- @log.log
- def _create_scale_dict(self, plugin, context, vnf_dict, scale_type='in'):
- policy_action, scale_dict = {}, {}
- policies = self._get_scaling_policies(plugin, context, vnf_dict)
- if not policies:
- return
- scale_dict['type'] = scale_type
- scale_dict['policy'] = policies[0]['name']
- policy_action['action'] = 'autoscaling'
- policy_action['args'] = {'scale': scale_dict}
- return policy_action
-
- @log.log
- def _create_migrate_dict(self, context, vnf_dict, instance):
- policy_action, heal_dict = {}, {}
- heal_dict['vdu_name'] = instance['name']
- heal_dict['cause'] = ["Migrate resource '%s' to other host."]
- heal_dict['stack_id'] = instance['stack_name']
- if 'scaling_group_names' in vnf_dict['attributes']:
- sg_names = vnf_dict['attributes']['scaling_group_names']
- sg_names = list(jsonutils.loads(sg_names).keys())
- heal_dict['heat_tpl'] = '%s_res.yaml' % sg_names[0]
- policy_action['action'] = 'vdu_autoheal'
- policy_action['args'] = heal_dict
- return policy_action
-
- @log.log
- def _create_instance_group_list(self, context, vnf_dict, action):
- group_id = vnf_dict['attributes'].get('maintenance_group', '')
- if not group_id:
- return
-
- def get_constraints(data):
- maintenance_config = self._get_constraints(vnf_dict)
- data['max_impacted_members'] = maintenance_config.get(
- 'max_impacted_members', 1)
- data['recovery_time'] = maintenance_config.get('recovery_time', 60)
-
- params, data = {}, {}
- params['url'] = '/instance_group/%s' % group_id
- if action == 'update':
- data['group_id'] = group_id
- data['project_id'] = vnf_dict['tenant_id']
- data['group_name'] = 'tacker_nonha_app_group_%s' % vnf_dict['id']
- data['anti_affinity_group'] = False
- data['max_instances_per_host'] = 0
- data['resource_mitigation'] = True
- get_constraints(data)
- params.update({'data': data})
- return [params]
-
- @log.log
- def _create_project_instance_list(self, context, vnf_dict, action):
- group_id = vnf_dict.get('attributes', {}).get('maintenance_group', '')
- if not group_id:
- return
-
- params_list = []
- url = '/instance'
- instances = self._get_instances(context, vnf_dict)
- _instances = self._instances.get(vnf_dict['id'], {})
- if _instances:
- if action == 'update':
- instances = [v for v in instances if v not in _instances]
- del self._instances[vnf_dict['id']]
- else:
- instances = [v for v in _instances if v not in instances]
- if len(instances) != len(_instances):
- del self._instances[vnf_dict['id']]
-
- if action == 'update':
- maintenance_configs = self._get_constraints(vnf_dict)
- for instance in instances:
- params, data = {}, {}
- params['url'] = '%s/%s' % (url, instance['id'])
- data['project_id'] = instance['project_id']
- data['instance_id'] = instance['id']
- data['instance_name'] = instance['name']
- data['migration_type'] = maintenance_configs.get(
- 'migration_type', 'MIGRATE')
- data['resource_mitigation'] = maintenance_configs.get(
- 'mitigation_type', True)
- data['max_interruption_time'] = maintenance_configs.get(
- 'max_interruption_time',
- cfg.CONF.fenix.max_interruption_time)
- data['lead_time'] = maintenance_configs.get(
- 'lead_time', cfg.CONF.fenix.lead_time)
- data['group_id'] = group_id
- params.update({'data': data})
- params_list.append(params)
- elif action == 'delete':
- for instance in instances:
- params = {}
- params['url'] = '%s/%s' % (url, instance['id'])
- params_list.append(params)
- return params_list
-
- @log.log
- def _get_instances(self, context, vnf_dict):
- vim_res = self.vim_client.get_vim(context, vnf_dict['vim_id'])
- action = '_get_instances_with_%s' % vim_res['vim_type']
- if hasattr(self, action):
- return getattr(self, action)(context, vnf_dict)
- return {}
-
- @log.log
- def _get_instances_with_openstack(self, context, vnf_dict):
- def get_attrs_with_link(links):
- attrs = {}
- for link in links:
- href, rel = link['href'], link['rel']
- if rel == 'self':
- words = href.split('/')
- attrs['project_id'] = words[5]
- attrs['stack_name'] = words[7]
- break
- return attrs
-
- instances = []
- if not vnf_dict['instance_id']:
- return instances
-
- client = self._get_openstack_clients(context, vnf_dict)
- resources = client.heat.resources.list(vnf_dict['instance_id'],
- nested_depth=2)
- for resource in resources:
- if resource.resource_type == 'OS::Nova::Server' and \
- resource.resource_status != 'DELETE_IN_PROGRESS':
- instance = {
- 'id': resource.physical_resource_id,
- 'name': resource.resource_name
- }
- instance.update(get_attrs_with_link(resource.links))
- instances.append(instance)
- return instances
-
- @log.log
- def _get_scaling_policies(self, plugin, context, vnf_dict):
- vnf_id = vnf_dict['id']
- policies = []
- if 'scaling_group_names' in vnf_dict['attributes']:
- policies = plugin.get_vnf_policies(
- context, vnf_id, filters={'type': constants.POLICY_SCALING})
- return policies
-
- @log.log
- def _get_constraints(self, vnf, key=None, default=None):
- config = vnf.get('attributes', {}).get('config', '{}')
- maintenance_config = yaml.safe_load(config).get('maintenance', {})
- if key:
- return maintenance_config.get(key, default)
- return maintenance_config
-
- @log.log
- def _get_openstack_clients(self, context, vnf_dict):
- vim_res = self.vim_client.get_vim(context, vnf_dict['vim_id'])
- region_name = vnf_dict.setdefault('placement_attr', {}).get(
- 'region_name', None)
- client = clients.OpenstackClients(auth_attr=vim_res['vim_auth'],
- region_name=region_name)
- return client
diff --git a/tacker/tests/constants.py b/tacker/tests/constants.py
index 1dc08017e..51ed13534 100644
--- a/tacker/tests/constants.py
+++ b/tacker/tests/constants.py
@@ -10,34 +10,10 @@
# License for the specific language governing permissions and limitations
# under the License.
-POLICY_ALARMING = 'tosca.policies.tacker.Alarming'
-DEFAULT_ALARM_ACTIONS = ['respawn', 'log', 'log_and_kill', 'notify']
-POLICY_RESERVATION = 'tosca.policies.tacker.Reservation'
-VNF_CIRROS_CREATE_TIMEOUT = 300
-VNFC_CREATE_TIMEOUT = 600
-VNF_CIRROS_UPDATE_TIMEOUT = 300
-VNF_CIRROS_DELETE_TIMEOUT = 300
-VNF_CIRROS_DEAD_TIMEOUT = 500
-ACTIVE_SLEEP_TIME = 3
-DEAD_SLEEP_TIME = 1
-SCALE_WINDOW_SLEEP_TIME = 120
-SCALE_SLEEP_TIME = 30
-NS_CREATE_TIMEOUT = 400
-NS_DELETE_TIMEOUT = 300
+
NOVA_CLIENT_VERSION = 2
CINDER_CLIENT_VERSION = 3
-VDU_MARK_UNHEALTHY_TIMEOUT = 500
-VDU_MARK_UNHEALTHY_SLEEP_TIME = 3
-VDU_AUTOHEALING_TIMEOUT = 500
-VDU_AUTOHEALING_SLEEP_TIME = 3
-VNF_CIRROS_PENDING_HEAL_TIMEOUT = 300
-PENDING_SLEEP_TIME = 3
-# Blazar related
-LEASE_EVENT_STATUS = 'DONE'
-START_LEASE_EVET_TYPE = 'start_lease'
-LEASE_CHECK_EVENT_TIMEOUT = 300
-LEASE_CHECK_SLEEP_TIME = 3
UUID = 'f26f181d-7891-4720-b022-b074ec1733ef'
INVALID_UUID = 'f181d-7891-4720-b022-b074ec3ef'
# artifact related
diff --git a/tacker/tests/functional/base.py b/tacker/tests/functional/base.py
index 3b4bd560f..f2f3d16a2 100644
--- a/tacker/tests/functional/base.py
+++ b/tacker/tests/functional/base.py
@@ -13,10 +13,8 @@
# under the License.
import os
-import time
import yaml
-from blazarclient import client as blazar_client
from cinderclient import client as cinder_client
from glanceclient.v2 import client as glance_client
from keystoneauth1.identity import v3
@@ -31,12 +29,10 @@ from tempest.lib import base
from tacker.common import clients
from tacker.common import utils
-from tacker.plugins.common import constants as evt_constants
from tacker.tests import constants
from tacker.tests.utils import read_file
from tacker import version
-from tackerclient.common import exceptions
from tackerclient.v1_0 import client as tacker_client
@@ -177,19 +173,6 @@ class BaseTackerTest(base.BaseTestCase):
data['project_domain_name'] = domain_name
return clients.OpenstackClients(auth_attr=data).heat
- @classmethod
- def blazarclient(cls, vim_conf_file=None):
- data = cls.get_credentials(vim_conf_file)
- domain_name = data.pop('domain_name')
- data['user_domain_name'] = domain_name
- data['project_domain_name'] = domain_name
- auth_ses = (clients.OpenstackClients(auth_attr=data)
- .keystone_session.session)
- return blazar_client.Client(session=auth_ses,
- service_type='reservation',
- interface='public',
- region_name='RegionOne')
-
@classmethod
def glanceclient(cls, vim_conf_file=None):
vim_params = cls.get_credentials(vim_conf_file)
@@ -203,13 +186,6 @@ class BaseTackerTest(base.BaseTestCase):
auth_ses = session.Session(auth=auth, verify=verify)
return glance_client.Client(session=auth_ses)
- @classmethod
- def aodh_http_client(cls, vim_conf_file=None):
- auth_session = cls.get_auth_session(vim_conf_file)
- return SessionClient(session=auth_session,
- service_type='alarming',
- region_name='RegionOne')
-
@classmethod
def cinderclient(cls, vim_conf_file=None):
vim_params = cls.get_credentials(vim_conf_file)
@@ -224,136 +200,6 @@ class BaseTackerTest(base.BaseTestCase):
return cinder_client.Client(constants.CINDER_CLIENT_VERSION,
session=auth_ses)
- def get_vdu_resource(self, stack_id, res_name):
- return self.h_client.resources.get(stack_id, res_name)
-
- def wait_until_vnf_status(self, vnf_id, target_status, timeout,
- sleep_interval):
- start_time = int(time.time())
- status = None
- while True:
- try:
- vnf_result = self.client.show_vnf(vnf_id)
- status = vnf_result['vnf']['status']
- if status == target_status:
- break
- except exceptions.InternalServerError:
- pass
-
- if int(time.time()) - start_time > timeout:
- break
- time.sleep(sleep_interval)
-
- self.assertEqual(status, target_status,
- "vnf %(vnf_id)s with status %(status)s is"
- " expected to be %(target)s" %
- {"vnf_id": vnf_id, "status": status,
- "target": target_status})
-
- def wait_until_vnf_active(self, vnf_id, timeout, sleep_interval):
- self.wait_until_vnf_status(vnf_id, 'ACTIVE', timeout,
- sleep_interval)
-
- def verify_vnf_update(self, vnf_id):
- self.wait_until_vnf_status(vnf_id, 'ACTIVE',
- constants.VNF_CIRROS_CREATE_TIMEOUT,
- constants.ACTIVE_SLEEP_TIME)
- self.wait_until_vnf_status(vnf_id, 'PENDING_HEAL',
- constants.VNF_CIRROS_PENDING_HEAL_TIMEOUT,
- constants.PENDING_SLEEP_TIME)
- self.wait_until_vnf_status(vnf_id, 'ACTIVE',
- constants.VNF_CIRROS_CREATE_TIMEOUT,
- constants.ACTIVE_SLEEP_TIME)
-
- def wait_until_vnf_delete(self, vnf_id, timeout, sleep_interval=1):
- start_time = int(time.time())
- while True:
- status = None
- try:
- vnf_result = self.client.show_vnf(vnf_id)
- status = vnf_result['vnf']['status']
- except exceptions.NotFound:
- return
- except Exception as e:
- LOG.error("Failed to get vnf status: %s", str(e))
-
- if status is not None and status != 'PENDING_DELETE':
- raise Exception("Failed with status: %s" % status)
- if int(time.time()) - start_time > timeout:
- raise Exception("Timeout for deleting vnf %s.",
- vnf_id)
- time.sleep(sleep_interval)
-
- def wait_until_vnf_dead(self, vnf_id, timeout, sleep_interval):
- self.wait_until_vnf_status(vnf_id, 'DEAD', timeout,
- sleep_interval)
-
- def validate_vnf_instance(self, vnfd_instance, vnf_instance):
- self.assertIsNotNone(vnf_instance)
- self.assertIsNotNone(vnf_instance['vnf']['id'])
- self.assertIsNotNone(vnf_instance['vnf']['instance_id'])
- if vnfd_instance:
- self.assertEqual(vnf_instance['vnf']['vnfd_id'], vnfd_instance[
- 'vnfd']['id'])
-
- def verify_vnf_restart(self, vnfd_instance, vnf_instance):
- vnf_id = vnf_instance['vnf']['id']
- self.wait_until_vnf_active(
- vnf_id,
- constants.VNF_CIRROS_CREATE_TIMEOUT,
- constants.ACTIVE_SLEEP_TIME)
- self.validate_vnf_instance(vnfd_instance, vnf_instance)
- self.assertIsNotNone(self.client.show_vnf(vnf_id)['vnf'][
- 'mgmt_ip_address'])
-
- self.wait_until_vnf_dead(
- vnf_id,
- constants.VNF_CIRROS_DEAD_TIMEOUT,
- constants.DEAD_SLEEP_TIME)
- self.wait_until_vnf_active(
- vnf_id,
- constants.VNF_CIRROS_CREATE_TIMEOUT,
- constants.ACTIVE_SLEEP_TIME)
- self.validate_vnf_instance(vnfd_instance, vnf_instance)
-
- def verify_vnf_monitor_events(self, vnf_id, vnf_state_list):
- for state in vnf_state_list:
- params = {'resource_id': vnf_id, 'resource_state': state,
- 'event_type': evt_constants.RES_EVT_MONITOR}
- vnf_evt_list = self.client.list_vnf_events(**params)
- mesg = ("%s - state transition expected." % state)
- self.assertIsNotNone(vnf_evt_list['vnf_events'], mesg)
-
- def verify_vnf_crud_events(self, vnf_id, evt_type, res_state,
- tstamp=None, cnt=1):
- params = {'resource_id': vnf_id,
- 'resource_state': res_state,
- 'resource_type': evt_constants.RES_TYPE_VNF,
- 'event_type': evt_type}
- if tstamp:
- params['timestamp'] = tstamp
-
- vnf_evt_list = self.client.list_vnf_events(**params)
-
- self.assertIsNotNone(vnf_evt_list['vnf_events'],
- "List of VNF events are Empty")
- self.assertEqual(cnt, len(vnf_evt_list['vnf_events']))
-
- def verify_vnfd_events(self, vnfd_id, evt_type, res_state,
- tstamp=None, cnt=1):
- params = {'resource_id': vnfd_id,
- 'resource_state': res_state,
- 'resource_type': evt_constants.RES_TYPE_VNFD,
- 'event_type': evt_type}
- if tstamp:
- params['timestamp'] = tstamp
-
- vnfd_evt_list = self.client.list_vnfd_events(**params)
-
- self.assertIsNotNone(vnfd_evt_list['vnfd_events'],
- "List of VNFD events are Empty")
- self.assertEqual(cnt, len(vnfd_evt_list['vnfd_events']))
-
def get_vim(self, vim_list, vim_name):
if len(vim_list.values()) == 0:
assert False, "vim_list is Empty: Default VIM is missing"
@@ -364,24 +210,6 @@ class BaseTackerTest(base.BaseTestCase):
return vim
return None
- def verify_antispoofing_in_stack(self, stack_id, resource_name):
- resource_types = self.h_client.resources
- resource_details = resource_types.get(stack_id=stack_id,
- resource_name=resource_name)
- resource_dict = resource_details.to_dict()
- self.assertTrue(resource_dict['attributes']['port_security_enabled'])
-
- def trigger_vnf(self, vnf, policy_name, policy_action):
- credential = 'g0jtsxu9'
- body = {"trigger": {'policy_name': policy_name,
- 'action_name': policy_action,
- 'params': {
- 'data': {'alarm_id': '35a80852-e24f-46ed-bd34-e2f831d00172', 'current': 'alarm'}, # noqa
- 'credential': credential}
- }
- }
- self.client.post('/vnfs/%s/triggers' % vnf, body)
-
def assertDictSupersetOf(self, expected_subset, actual_superset):
"""Checks that actual dict contains the expected dict.
@@ -402,58 +230,6 @@ class BaseTackerTest(base.BaseTestCase):
"Key %(key)s expected: %(exp)r, actual %(act)r" %
{'key': k, 'exp': v, 'act': actual_superset[k]})
- def create_cinder_volume(cls, vol_size, vol_name):
- try:
- cinder_volume = cls.cinder_client.volumes.create(vol_size,
- name=vol_name)
- except Exception as e:
- LOG.error("Failed to create cinder volume: %s", str(e))
- return None
-
- return cinder_volume.id
-
- def delete_cinder_volume(cls, vol_id):
- try:
- cls.cinder_client.volumes.delete(vol_id)
- except Exception as e:
- LOG.error("Failed to delete cinder volume: %s", str(e))
-
- def vnfd_and_vnf_create(self, vnfd_file, vnf_name, volume_id=None,
- volume_name=None):
- input_yaml = read_file(vnfd_file)
- tosca_dict = yaml.safe_load(input_yaml)
- if volume_id is not None:
- volume_detail = tosca_dict['topology_template']['inputs']
- volume_detail[volume_name]['default'] = volume_id
- tosca_arg = {'vnfd': {'name': vnf_name,
- 'attributes': {'vnfd': tosca_dict}}}
-
- # Create vnfd with tosca template
- vnfd_instance = self.client.create_vnfd(body=tosca_arg)
- self.assertIsNotNone(vnfd_instance)
-
- # Create vnf with vnfd_id
- vnfd_id = vnfd_instance['vnfd']['id']
- self.addCleanup(self.client.delete_vnfd, vnfd_id)
-
- vnf_arg = {'vnf': {'vnfd_id': vnfd_id, 'name': vnf_name}}
- vnf_instance = self.client.create_vnf(body=vnf_arg)
- self.validate_vnf_instance(vnfd_instance, vnf_instance)
-
- return vnfd_instance, vnf_instance, tosca_dict
-
- def vnfd_and_vnf_create_inline(self, vnfd_file, vnf_name):
- vnfd_instance = {}
- input_yaml = read_file(vnfd_file)
- tosca_dict = yaml.safe_load(input_yaml)
-
- # create vnf directly from template
- vnf_arg = {'vnf': {'vnfd_template': tosca_dict, 'name': vnf_name}}
- vnf_instance = self.client.create_vnf(body=vnf_arg)
- self.validate_vnf_instance(vnfd_instance, vnf_instance)
-
- return vnf_instance, tosca_dict
-
def _list_op_occs(self, filter_string='', http_client=None):
if http_client is None:
http_client = self.http_client
diff --git a/tacker/tests/functional/common/test_cmd_executer.py b/tacker/tests/functional/common/test_cmd_executer.py
deleted file mode 100644
index e3b3c1203..000000000
--- a/tacker/tests/functional/common/test_cmd_executer.py
+++ /dev/null
@@ -1,87 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import time
-import yaml
-
-from oslo_config import cfg
-
-from tacker.common import cmd_executer
-from tacker.plugins.common import constants as evt_constants
-from tacker.tests import constants
-from tacker.tests.functional import base
-from tacker.tests.utils import read_file
-
-
-CONF = cfg.CONF
-VNF_CIRROS_CREATE_TIMEOUT = 120
-
-
-class TestRemoteCommandExecutor(base.BaseTackerTest):
-
- def _test_create_vnf(self, vnfd_file, vnf_name):
- input_yaml = read_file(vnfd_file)
- tosca_dict = yaml.safe_load(input_yaml)
-
- # create vnf directly from template
- vnf_arg = {'vnf': {'vnfd_template': tosca_dict, 'name': vnf_name}}
- vnf_instance = self.client.create_vnf(body=vnf_arg)
- vnf_id = vnf_instance['vnf']['id']
- self.wait_until_vnf_active(
- vnf_id,
- constants.VNF_CIRROS_CREATE_TIMEOUT,
- constants.ACTIVE_SLEEP_TIME)
- vnf_show_out = self.client.show_vnf(vnf_id)['vnf']
- self.assertIsNotNone(vnf_show_out['mgmt_ip_address'])
-
- # Fetch mgmt ip of VNF
- mgmt_ip = eval(vnf_show_out['mgmt_ip_address'])['VDU1']
-
- return vnf_id, mgmt_ip
-
- def _test_cmd_executor(self, vnfd_file, vnf_name):
- vnf_id, mgmt_ip = self._test_create_vnf(vnfd_file, vnf_name)
-
- # Wait for VM initialization. It takes time after VM started until
- # sshd starts. It is a bit ad-hoc but adopted it for simplicity.
- time.sleep(100)
-
- # Login on VNF instance, and execute 'hostname' command to verify
- # connection and command output.
- usr = 'cirros'
- psswrd = 'gocubsgo'
- cmd = 'hostname'
- rcmd_executor = cmd_executer.RemoteCommandExecutor(user=usr,
- password=psswrd,
- host=mgmt_ip)
- result = rcmd_executor.execute_command(cmd)
- self.assertEqual(cmd, result.get_command())
- self.assertEqual(0, result.get_return_code())
- self.assertIn('test-vdu', result.get_stdout()[0])
-
- self._test_delete_vnf(vnf_id)
-
- def _test_delete_vnf(self, vnf_id):
- # Delete vnf_instance with vnf_id
- try:
- self.client.delete_vnf(vnf_id)
- except Exception:
- assert False, "vnf Delete failed"
-
- self.wait_until_vnf_delete(vnf_id,
- constants.VNF_CIRROS_DELETE_TIMEOUT)
- self.verify_vnf_crud_events(vnf_id, evt_constants.RES_EVT_DELETE,
- evt_constants.PENDING_DELETE, cnt=2)
-
- def test_cmd_executor(self):
- self._test_cmd_executor('sample-tosca-vnfd.yaml',
- 'test_tosca_vnf_with_cirros_inline')
diff --git a/tacker/tests/functional/legacy/__init__.py b/tacker/tests/functional/legacy/__init__.py
deleted file mode 100644
index e69de29bb..000000000
diff --git a/tacker/tests/functional/legacy/nfvo/__init__.py b/tacker/tests/functional/legacy/nfvo/__init__.py
deleted file mode 100644
index e69de29bb..000000000
diff --git a/tacker/tests/functional/legacy/nfvo/test_nfvo.py b/tacker/tests/functional/legacy/nfvo/test_nfvo.py
deleted file mode 100644
index ccc7e2c77..000000000
--- a/tacker/tests/functional/legacy/nfvo/test_nfvo.py
+++ /dev/null
@@ -1,87 +0,0 @@
-# Copyright 2016 Brocade Communications System, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import yaml
-
-from oslo_config import cfg
-from tackerclient.common import exceptions
-
-from tacker.plugins.common import constants as evt_constants
-from tacker.tests import constants
-from tacker.tests.functional import base
-from tacker.tests.utils import read_file
-
-import time
-CONF = cfg.CONF
-
-
-class NsdTestCreate(base.BaseTackerTest):
-
- def _test_create_tosca_vnfd(self, tosca_vnfd_file, vnfd_name):
- input_yaml = read_file(tosca_vnfd_file)
- tosca_dict = yaml.safe_load(input_yaml)
- tosca_arg = {'vnfd': {'name': vnfd_name,
- 'attributes': {'vnfd': tosca_dict}}}
- vnfd_instance = self.client.create_vnfd(body=tosca_arg)
- self.assertEqual(vnfd_instance['vnfd']['name'], vnfd_name)
- self.assertIsNotNone(vnfd_instance)
-
- vnfds = self.client.list_vnfds().get('vnfds')
- self.assertIsNotNone(vnfds, "List of vnfds are Empty after Creation")
- return vnfd_instance['vnfd']['id']
-
- def _test_create_nsd(self, tosca_nsd_file, nsd_name):
- input_yaml = read_file(tosca_nsd_file)
- tosca_dict = yaml.safe_load(input_yaml)
- tosca_arg = {'nsd': {'name': nsd_name,
- 'attributes': {'nsd': tosca_dict}}}
- nsd_instance = self.client.create_nsd(body=tosca_arg)
- self.assertIsNotNone(nsd_instance)
- return nsd_instance['nsd']['id']
-
- def _test_delete_nsd(self, nsd_id):
- try:
- self.client.delete_nsd(nsd_id)
- except Exception:
- assert False, "nsd Delete failed"
-
- def _test_delete_vnfd(self, vnfd_id, timeout=constants.NS_DELETE_TIMEOUT):
- start_time = int(time.time())
- while True:
- try:
- self.client.delete_vnfd(vnfd_id)
- except exceptions.Conflict:
- time.sleep(2)
- except Exception:
- assert False, "vnfd Delete failed"
- else:
- break
- if (int(time.time()) - start_time) > timeout:
- assert False, "vnfd still in use"
- self.verify_vnfd_events(vnfd_id, evt_constants.RES_EVT_DELETE,
- evt_constants.RES_EVT_NA_STATE)
-
- def test_create_delete_nsd(self):
- vnfd1_id = self._test_create_tosca_vnfd(
- 'test-nsd-vnfd1.yaml',
- 'test-nsd-vnfd1')
- vnfd2_id = self._test_create_tosca_vnfd(
- 'test-nsd-vnfd2.yaml',
- 'test-nsd-vnfd2')
- nsd_id = self._test_create_nsd(
- 'test-nsd.yaml',
- 'test-nsd')
- self._test_delete_nsd(nsd_id)
- self._test_delete_vnfd(vnfd1_id)
- self._test_delete_vnfd(vnfd2_id)
diff --git a/tacker/tests/functional/legacy/vnfm/__init__.py b/tacker/tests/functional/legacy/vnfm/__init__.py
deleted file mode 100644
index e69de29bb..000000000
diff --git a/tacker/tests/functional/legacy/vnfm/test_tosca_vnf.py b/tacker/tests/functional/legacy/vnfm/test_tosca_vnf.py
deleted file mode 100644
index 4e2721471..000000000
--- a/tacker/tests/functional/legacy/vnfm/test_tosca_vnf.py
+++ /dev/null
@@ -1,227 +0,0 @@
-# Copyright 2016 Brocade Communications System, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import time
-import yaml
-
-from glanceclient import exc
-from novaclient import exceptions
-from oslo_config import cfg
-
-from tacker.plugins.common import constants as evt_constants
-from tacker.tests import constants
-from tacker.tests.functional import base
-
-
-CONF = cfg.CONF
-VNF_CIRROS_CREATE_TIMEOUT = 120
-
-
-class VnfTestToscaCreate(base.BaseTackerTest):
- def _test_create_vnf(self, vnfd_file, vnf_name,
- template_source="onboarded"):
-
- if template_source == "onboarded":
- (vnfd_instance,
- vnf_instance,
- tosca_dict) = self.vnfd_and_vnf_create(vnfd_file, vnf_name)
-
- if template_source == 'inline':
- vnf_instance, tosca_dict = self.vnfd_and_vnf_create_inline(
- vnfd_file, vnf_name)
-
- vnfd_id = vnf_instance['vnf']['vnfd_id']
- vnf_id = vnf_instance['vnf']['id']
- self.wait_until_vnf_active(
- vnf_id,
- constants.VNF_CIRROS_CREATE_TIMEOUT,
- constants.ACTIVE_SLEEP_TIME)
- vnf_show_out = self.client.show_vnf(vnf_id)['vnf']
- self.assertIsNotNone(vnf_show_out['mgmt_ip_address'])
-
- prop_dict = tosca_dict['topology_template']['node_templates'][
- 'CP1']['properties']
-
- # Verify if ip_address is static, it is same as in show_vnf
- if prop_dict.get('ip_address'):
- mgmt_ip_address_input = prop_dict.get('ip_address')
- mgmt_info = yaml.safe_load(
- vnf_show_out['mgmt_ip_address'])
- self.assertEqual(mgmt_ip_address_input, mgmt_info['VDU1'])
-
- # Verify anti spoofing settings
- stack_id = vnf_show_out['instance_id']
- template_dict = tosca_dict['topology_template']['node_templates']
- for field in template_dict:
- prop_dict = template_dict[field]['properties']
- if prop_dict.get('anti_spoofing_protection'):
- self.verify_antispoofing_in_stack(stack_id=stack_id,
- resource_name=field)
-
- self.verify_vnf_crud_events(
- vnf_id, evt_constants.RES_EVT_CREATE,
- evt_constants.PENDING_CREATE, cnt=2)
- self.verify_vnf_crud_events(
- vnf_id, evt_constants.RES_EVT_CREATE, evt_constants.ACTIVE)
- return vnfd_id, vnf_id
-
- def _test_delete_vnf(self, vnf_id):
- # Delete vnf_instance with vnf_id
- try:
- self.client.delete_vnf(vnf_id)
- except Exception:
- assert False, "vnf Delete failed"
-
- self.wait_until_vnf_delete(vnf_id,
- constants.VNF_CIRROS_DELETE_TIMEOUT)
- self.verify_vnf_crud_events(vnf_id, evt_constants.RES_EVT_DELETE,
- evt_constants.PENDING_DELETE, cnt=2)
-
- def _test_create_delete_vnf_tosca(self, vnfd_file, vnf_name,
- template_source):
- vnfd_id, vnf_id = self._test_create_vnf(vnfd_file, vnf_name,
- template_source)
- servers = self.novaclient().servers.list()
- vdus = []
- for server in servers:
- vdus.append(server.name)
- self.assertIn('test-vdu', vdus)
-
- port_list = self.neutronclient().list_ports()['ports']
- vdu_ports = []
- for port in port_list:
- vdu_ports.append(port['name'])
- self.assertIn('test-cp', vdu_ports)
- self._test_delete_vnf(vnf_id)
-
- def test_create_delete_vnf_tosca_from_vnfd(self):
- self._test_create_delete_vnf_tosca('sample-tosca-vnfd.yaml',
- 'test_tosca_vnf_with_cirros',
- 'onboarded')
-
- def test_create_delete_vnf_from_template(self):
- self._test_create_delete_vnf_tosca('sample-tosca-vnfd.yaml',
- 'test_tosca_vnf_with_cirros_inline',
- 'inline')
-
- def test_re_create_delete_vnf(self):
- self._test_create_delete_vnf_tosca('sample-tosca-vnfd.yaml',
- 'test_vnf',
- 'inline')
- time.sleep(1)
- self._test_create_delete_vnf_tosca('sample-tosca-vnfd.yaml',
- 'test_vnf',
- 'inline')
-
- def test_create_delete_vnf_static_ip(self):
- vnfd_id, vnf_id = self._test_create_vnf(
- 'sample-tosca-vnfd-static-ip.yaml',
- 'test_tosca_vnf_with_cirros_no_monitoring')
- self._test_delete_vnf(vnf_id)
-
-
-class VnfTestToscaCreateFlavorCreation(base.BaseTackerTest):
- def test_create_delete_vnf_tosca_no_monitoring(self):
- vnf_name = 'tosca_vnfd_with_auto_flavor'
- vnfd_file = 'sample-tosca-vnfd-flavor.yaml'
- vnfd_instance, vnf_instance, tosca_dict = self.vnfd_and_vnf_create(
- vnfd_file, vnf_name)
- vnf_id = vnf_instance['vnf']['id']
- self.wait_until_vnf_active(
- vnf_id,
- constants.VNF_CIRROS_CREATE_TIMEOUT,
- constants.ACTIVE_SLEEP_TIME)
- self.assertIsNotNone(self.client.show_vnf(vnf_id)['vnf'][
- 'mgmt_ip_address'])
-
- self.verify_vnf_crud_events(
- vnf_id, evt_constants.RES_EVT_CREATE,
- evt_constants.PENDING_CREATE, cnt=2)
- self.verify_vnf_crud_events(
- vnf_id, evt_constants.RES_EVT_CREATE, evt_constants.ACTIVE)
-
- servers = self.novaclient().servers.list()
- vdu_server = None
- for server in servers:
- if 'VDU1_flavor_func' in server.name:
- vdu_server = server
- break
- self.assertIsNotNone(vdu_server)
- flavor_id = server.flavor["id"]
- nova_flavors = self.novaclient().flavors
- flavor = nova_flavors.get(flavor_id)
- self.assertIsNotNone(flavor)
- self.assertIn('VDU1_flavor_func_flavor', flavor.name)
- # Delete vnf_instance with vnf_id
- try:
- self.client.delete_vnf(vnf_id)
- except Exception:
- assert False, "vnf Delete failed"
-
- self.wait_until_vnf_delete(vnf_id,
- constants.VNF_CIRROS_DELETE_TIMEOUT)
- self.verify_vnf_crud_events(vnf_id, evt_constants.RES_EVT_DELETE,
- evt_constants.PENDING_DELETE, cnt=2)
-
- self.assertRaises(exceptions.NotFound, nova_flavors.delete,
- [flavor_id])
-
-
-class VnfTestToscaCreateImageCreation(base.BaseTackerTest):
-
- def test_create_delete_vnf_tosca_no_monitoring(self):
- vnf_name = 'tosca_vnfd_with_auto_image'
- vnfd_file = 'sample-tosca-vnfd-image.yaml'
- vnfd_instance, vnf_instance, tosca_dict = self.vnfd_and_vnf_create(
- vnfd_file, vnf_name)
- vnf_id = vnf_instance['vnf']['id']
- self.wait_until_vnf_active(
- vnf_id,
- constants.VNF_CIRROS_CREATE_TIMEOUT,
- constants.ACTIVE_SLEEP_TIME)
- self.assertIsNotNone(self.client.show_vnf(vnf_id)['vnf'][
- 'mgmt_ip_address'])
-
- self.verify_vnf_crud_events(
- vnf_id, evt_constants.RES_EVT_CREATE,
- evt_constants.PENDING_CREATE, cnt=2)
- self.verify_vnf_crud_events(
- vnf_id, evt_constants.RES_EVT_CREATE, evt_constants.ACTIVE)
-
- servers = self.novaclient().servers.list()
- vdu_server = None
- for server in servers:
- if 'VDU1_image_func' in server.name:
- vdu_server = server
- break
- self.assertIsNotNone(vdu_server)
- image_id = vdu_server.image["id"]
- glanceclient = self.glanceclient()
- image = glanceclient.images.get(image_id)
- self.assertIsNotNone(image)
- self.assertIn('VNFImage_image_func', image.name)
- # Delete vnf_instance with vnf_id
- try:
- self.client.delete_vnf(vnf_id)
- except Exception:
- assert False, "vnf Delete failed"
-
- self.wait_until_vnf_delete(vnf_id,
- constants.VNF_CIRROS_DELETE_TIMEOUT)
- self.verify_vnf_crud_events(vnf_id, evt_constants.RES_EVT_DELETE,
- evt_constants.PENDING_DELETE, cnt=2)
-
- self.assertRaises(exc.HTTPNotFound,
- glanceclient.images.delete,
- [image_id])
diff --git a/tacker/tests/functional/legacy/vnfm/test_tosca_vnf_alarm.py b/tacker/tests/functional/legacy/vnfm/test_tosca_vnf_alarm.py
deleted file mode 100644
index b863b59d6..000000000
--- a/tacker/tests/functional/legacy/vnfm/test_tosca_vnf_alarm.py
+++ /dev/null
@@ -1,125 +0,0 @@
-#
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-import time
-import unittest
-
-from oslo_serialization import jsonutils
-
-from tacker.plugins.common import constants as evt_constants
-from tacker.tests import constants
-from tacker.tests.functional import base
-
-
-class VnfTestAlarmMonitor(base.BaseTackerTest):
-
- def _test_vnf_tosca_alarm(self, vnfd_file, vnf_name):
- vnfd_instance, vnf_instance, tosca_dict = self.vnfd_and_vnf_create(
- vnfd_file, vnf_name)
-
- vnf_id = vnf_instance['vnf']['id']
-
- def _waiting_time(count):
- self.wait_until_vnf_active(
- vnf_id,
- constants.VNF_CIRROS_CREATE_TIMEOUT,
- constants.ACTIVE_SLEEP_TIME)
- vnf = self.client.show_vnf(vnf_id)['vnf']
- # {"VDU1": ["10.0.0.14", "10.0.0.5"]}
- self.assertEqual(count, len(jsonutils.loads(vnf[
- 'mgmt_ip_address'])['VDU1']))
-
- def _inject_monitoring_policy(vnfd_dict):
- polices = vnfd_dict['topology_template'].get('policies', [])
- mon_policy = dict()
- for policy_dict in polices:
- for name, policy in policy_dict.items():
- if policy['type'] == constants.POLICY_ALARMING:
- triggers = policy['triggers']
- for trigger_name, trigger_dict in triggers.items():
- policy_action_list = trigger_dict['action']
- for policy_action_name in policy_action_list:
- mon_policy[trigger_name] = policy_action_name
- return mon_policy
-
- def verify_policy(policy_dict, kw_policy):
- for name, action in policy_dict.items():
- if kw_policy in name:
- return name
-
- # trigger alarm
- monitoring_policy = _inject_monitoring_policy(tosca_dict)
- for mon_policy_name, mon_policy_action in monitoring_policy.items():
- if mon_policy_action in constants.DEFAULT_ALARM_ACTIONS:
- self.wait_until_vnf_active(
- vnf_id,
- constants.VNF_CIRROS_CREATE_TIMEOUT,
- constants.ACTIVE_SLEEP_TIME)
- self.trigger_vnf(vnf_id, mon_policy_name, mon_policy_action)
- else:
- if 'scaling_out' in mon_policy_name:
- _waiting_time(2)
- time.sleep(constants.SCALE_WINDOW_SLEEP_TIME)
- # scaling-out backend action
- scaling_out_action = mon_policy_action + '-out'
- self.trigger_vnf(
- vnf_id, mon_policy_name, scaling_out_action)
-
- _waiting_time(3)
-
- scaling_in_name = verify_policy(monitoring_policy,
- kw_policy='scaling_in')
- if scaling_in_name:
- time.sleep(constants.SCALE_WINDOW_SLEEP_TIME)
- # scaling-in backend action
- scaling_in_action = mon_policy_action + '-in'
- self.trigger_vnf(
- vnf_id, scaling_in_name, scaling_in_action)
-
- _waiting_time(2)
-
- self.verify_vnf_crud_events(
- vnf_id, evt_constants.RES_EVT_SCALE,
- evt_constants.ACTIVE, cnt=2)
- self.verify_vnf_crud_events(
- vnf_id, evt_constants.RES_EVT_SCALE,
- evt_constants.PENDING_SCALE_OUT, cnt=1)
- self.verify_vnf_crud_events(
- vnf_id, evt_constants.RES_EVT_SCALE,
- evt_constants.PENDING_SCALE_IN, cnt=1)
- # Delete vnf_instance with vnf_id
- try:
- self.client.delete_vnf(vnf_id)
- except Exception:
- assert False, ("Failed to delete vnf %s after the monitor test" %
- vnf_id)
-
- # Verify VNF monitor events captured for states, ACTIVE and DEAD
- vnf_state_list = [evt_constants.ACTIVE, evt_constants.DEAD]
- self.verify_vnf_monitor_events(vnf_id, vnf_state_list)
-
- # Wait for delete vnf_instance
- self.addCleanup(self.wait_until_vnf_delete, vnf_id,
- constants.VNF_CIRROS_DELETE_TIMEOUT)
-
- @unittest.skip("heat-translator use old metric cpu_util & aggregation API")
- def test_vnf_alarm_respawn(self):
- self._test_vnf_tosca_alarm(
- 'sample-tosca-alarm-respawn.yaml',
- 'alarm and respawn-vnf')
-
- @unittest.skip("heat-translator use old metric cpu_util & aggregation API")
- def test_vnf_alarm_scale(self):
- self._test_vnf_tosca_alarm(
- 'sample-tosca-alarm-scale.yaml',
- 'alarm and scale vnf')
diff --git a/tacker/tests/functional/legacy/vnfm/test_tosca_vnf_block_storage.py b/tacker/tests/functional/legacy/vnfm/test_tosca_vnf_block_storage.py
deleted file mode 100644
index ab1e10c1a..000000000
--- a/tacker/tests/functional/legacy/vnfm/test_tosca_vnf_block_storage.py
+++ /dev/null
@@ -1,110 +0,0 @@
-# Copyright 2016 Brocade Communications System, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import yaml
-
-from oslo_config import cfg
-
-from tacker.plugins.common import constants as evt_constants
-from tacker.tests import constants
-from tacker.tests.functional import base
-
-
-CONF = cfg.CONF
-VNF_CIRROS_CREATE_TIMEOUT = 120
-
-
-class VnfBlockStorageTestToscaCreate(base.BaseTackerTest):
- def _test_create_vnf(self, vnfd_file, vnf_name,
- template_source="onboarded"):
-
- if template_source == "onboarded":
- (vnfd_instance,
- vnf_instance,
- tosca_dict) = self.vnfd_and_vnf_create(vnfd_file, vnf_name)
-
- if template_source == 'inline':
- vnf_instance, tosca_dict = self.vnfd_and_vnf_create_inline(
- vnfd_file, vnf_name)
-
- vnfd_id = vnf_instance['vnf']['vnfd_id']
- vnf_id = vnf_instance['vnf']['id']
- self.wait_until_vnf_active(
- vnf_id,
- constants.VNF_CIRROS_CREATE_TIMEOUT,
- constants.ACTIVE_SLEEP_TIME)
- vnf_show_out = self.client.show_vnf(vnf_id)['vnf']
- self.assertIsNotNone(vnf_show_out['mgmt_ip_address'])
-
- prop_dict = tosca_dict['topology_template']['node_templates'][
- 'CP1']['properties']
-
- # Verify if ip_address is static, it is same as in show_vnf
- if prop_dict.get('ip_address'):
- mgmt_ip_address_input = prop_dict.get('ip_address')
- mgmt_info = yaml.safe_load(
- vnf_show_out['mgmt_ip_address'])
- self.assertEqual(mgmt_ip_address_input, mgmt_info['VDU1'])
-
- # Verify anti spoofing settings
- stack_id = vnf_show_out['instance_id']
- template_dict = tosca_dict['topology_template']['node_templates']
- for field in template_dict:
- prop_dict = template_dict[field]['properties']
- if prop_dict.get('anti_spoofing_protection'):
- self.verify_antispoofing_in_stack(stack_id=stack_id,
- resource_name=field)
-
- self.verify_vnf_crud_events(
- vnf_id, evt_constants.RES_EVT_CREATE,
- evt_constants.PENDING_CREATE, cnt=2)
- self.verify_vnf_crud_events(
- vnf_id, evt_constants.RES_EVT_CREATE, evt_constants.ACTIVE)
- return vnfd_id, vnf_id
-
- def _test_delete_vnf(self, vnf_id):
- # Delete vnf_instance with vnf_id
- try:
- self.client.delete_vnf(vnf_id)
- except Exception:
- assert False, "vnf Delete failed"
-
- self.wait_until_vnf_delete(vnf_id,
- constants.VNF_CIRROS_DELETE_TIMEOUT)
- self.verify_vnf_crud_events(vnf_id, evt_constants.RES_EVT_DELETE,
- evt_constants.PENDING_DELETE, cnt=2)
-
- def _test_create_delete_vnf_tosca(self, vnfd_file, vnf_name,
- template_source):
- vnfd_id, vnf_id = self._test_create_vnf(vnfd_file, vnf_name,
- template_source)
- servers = self.novaclient().servers.list()
- vdus = []
- for server in servers:
- vdus.append(server.name)
- self.assertIn('test-vdu-block-storage', vdus)
-
- for server in servers:
- if server.name == 'test-vdu-block-storage':
- server_id = server.id
- server_volumes = self.novaclient().volumes\
- .get_server_volumes(server_id)
- self.assertTrue(len(server_volumes) > 0)
- self._test_delete_vnf(vnf_id)
-
- def test_create_delete_vnf_tosca_from_vnfd(self):
- self._test_create_delete_vnf_tosca(
- 'sample-tosca-vnfd-block-storage.yaml',
- 'test_tosca_vnf_with_cirros',
- 'onboarded')
diff --git a/tacker/tests/functional/legacy/vnfm/test_tosca_vnf_existing_block_storage.py b/tacker/tests/functional/legacy/vnfm/test_tosca_vnf_existing_block_storage.py
deleted file mode 100644
index 81819b407..000000000
--- a/tacker/tests/functional/legacy/vnfm/test_tosca_vnf_existing_block_storage.py
+++ /dev/null
@@ -1,118 +0,0 @@
-# Copyright 2021 NEC, Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import yaml
-
-from oslo_config import cfg
-
-from tacker.plugins.common import constants as evt_constants
-from tacker.tests import constants
-from tacker.tests.functional import base
-
-
-CONF = cfg.CONF
-VNF_CIRROS_CREATE_TIMEOUT = 120
-
-
-class VnfExistingBlockStorageTestToscaCreate(base.BaseTackerTest):
-
- def _test_create_vnf(self, vnfd_file, vnf_name, volume_id, volume_name,
- template_source="onboarded"):
-
- if template_source == "onboarded":
- (vnfd_instance,
- vnf_instance,
- tosca_dict) = self.vnfd_and_vnf_create(vnfd_file,
- vnf_name, volume_id, volume_name)
-
- vnfd_id = vnf_instance['vnf']['vnfd_id']
- vnf_id = vnf_instance['vnf']['id']
- self.wait_until_vnf_active(
- vnf_id,
- constants.VNF_CIRROS_CREATE_TIMEOUT,
- constants.ACTIVE_SLEEP_TIME)
- vnf_show_out = self.client.show_vnf(vnf_id)['vnf']
- self.assertIsNotNone(vnf_show_out['mgmt_ip_address'])
-
- prop_dict = tosca_dict['topology_template']['node_templates'][
- 'CP1']['properties']
-
- # Verify if ip_address is static, it is same as in show_vnf
- if prop_dict.get('ip_address'):
- mgmt_ip_address_input = prop_dict.get('ip_address')
- mgmt_info = yaml.safe_load(
- vnf_show_out['mgmt_ip_address'])
- self.assertEqual(mgmt_ip_address_input, mgmt_info['VDU1'])
-
- # Verify anti spoofing settings
- stack_id = vnf_show_out['instance_id']
- template_dict = tosca_dict['topology_template']['node_templates']
- for field in template_dict:
- prop_dict = template_dict[field]['properties']
- if prop_dict.get('anti_spoofing_protection'):
- self.verify_antispoofing_in_stack(stack_id=stack_id,
- resource_name=field)
-
- self.verify_vnf_crud_events(
- vnf_id, evt_constants.RES_EVT_CREATE,
- evt_constants.PENDING_CREATE, cnt=2)
- self.verify_vnf_crud_events(
- vnf_id, evt_constants.RES_EVT_CREATE, evt_constants.ACTIVE)
- return vnfd_id, vnf_id
-
- def _test_delete_vnf(self, vnf_id):
- # Delete vnf_instance with vnf_id
- try:
- self.client.delete_vnf(vnf_id)
- except Exception:
- assert False, "vnf Delete failed"
-
- self.wait_until_vnf_delete(vnf_id,
- constants.VNF_CIRROS_DELETE_TIMEOUT)
- self.verify_vnf_crud_events(vnf_id, evt_constants.RES_EVT_DELETE,
- evt_constants.PENDING_DELETE, cnt=2)
-
- def _test_create_delete_vnf_tosca(self, vnfd_file, vnf_name,
- template_source, volume_id, volume_name):
- vnfd_idx, vnf_id = self._test_create_vnf(vnfd_file, vnf_name,
- volume_id, volume_name, template_source)
- servers = self.novaclient().servers.list()
- vdus = []
- for server in servers:
- vdus.append(server.name)
- self.assertIn('test-vdu-block-storage', vdus)
-
- for server in servers:
- if server.name == 'test-vdu-block-storage':
- server_id = server.id
- server_volumes = self.novaclient().volumes\
- .get_server_volumes(server_id)
- self.assertTrue(len(server_volumes) > 0)
- self._test_delete_vnf(vnf_id)
-
- def _test_create_cinder_volume(self):
- volume_name = 'my_vol'
- size = 1
- volume_id = self.create_cinder_volume(size, volume_name)
- self.assertIsNotNone(volume_id)
-
- return volume_id, volume_name
-
- def test_create_delete_vnf_tosca_from_vnfd(self):
- volume_id, volume_name = self._test_create_cinder_volume()
- self._test_create_delete_vnf_tosca(
- 'sample-tosca-vnfd-existing-block-storage.yaml',
- 'test_tosca_vnf_with_cirros',
- 'onboarded', volume_id, volume_name)
- self.delete_cinder_volume(volume_id)
diff --git a/tacker/tests/functional/legacy/vnfm/test_tosca_vnf_floatingip.py b/tacker/tests/functional/legacy/vnfm/test_tosca_vnf_floatingip.py
deleted file mode 100644
index 5567cc648..000000000
--- a/tacker/tests/functional/legacy/vnfm/test_tosca_vnf_floatingip.py
+++ /dev/null
@@ -1,93 +0,0 @@
-# Copyright 2017 OpenStack Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-
-from tacker.tests import constants
-from tacker.tests.functional import base
-
-
-class VnfTestToscaFloatingIp(base.BaseTackerTest):
-
- def get_heat_stack_resource(self, stack_id, resource_name):
- resource_types = self.h_client.resources
- resource_details = resource_types.get(stack_id=stack_id,
- resource_name=resource_name)
- resource_dict = resource_details.to_dict()
- return resource_dict
-
- def connect_public_and_private_nw_with_router(self):
- public_nw = 'public'
- private_nw = 'net_mgmt'
- private_nw_subnet = 'subnet_mgmt'
- public_nw_id = None
- private_nw_id = None
- private_nw_subnet_id = None
- neutronclient = self.neutronclient()
- networks = neutronclient.list_networks()['networks']
- for nw in networks:
- if nw['name'] == public_nw:
- public_nw_id = nw['id']
- if nw['name'] == private_nw:
- private_nw_id = nw['id']
- if public_nw_id and private_nw_id:
- break
- self.assertIsNotNone(public_nw_id)
- self.assertIsNotNone(private_nw_id)
- subnets = neutronclient.list_subnets()['subnets']
- for subnet in subnets:
- if subnet['network_id'] == private_nw_id\
- and subnet['name'] == private_nw_subnet:
- private_nw_subnet_id = subnet['id']
- break
- self.assertIsNotNone(private_nw_subnet_id)
- router_id = neutronclient.create_router(
- {'router': {'name': 'fip_test_router'}})['router']['id']
- self.assertIsNotNone(router_id)
- self.addCleanup(self.neutronclient().delete_router, router_id)
- rt_gw_id = neutronclient.add_gateway_router(
- router_id, {'network_id': public_nw_id})['router']['id']
- self.assertIsNotNone(rt_gw_id)
- self.addCleanup(self.neutronclient().remove_gateway_router,
- router_id)
- rt_int = neutronclient.add_interface_router(
- router_id, {'subnet_id': private_nw_subnet_id})['id']
- self.assertIsNotNone(rt_int)
- self.addCleanup(self.neutronclient().remove_interface_router,
- router_id, {'subnet_id': private_nw_subnet_id})
-
- def test_assign_floatingip_to_vdu(self):
- vnfd_file = 'sample_tosca_assign_floatingip_to_vdu.yaml'
- vnf_name = 'Assign Floating IP to VDU'
- self.connect_public_and_private_nw_with_router()
- vnfd_instance, vnf_instance, tosca_dict = self.vnfd_and_vnf_create(
- vnfd_file, vnf_name)
- vnf_id = vnf_instance['vnf']['id']
- self.addCleanup(self.wait_until_vnf_delete, vnf_id,
- constants.VNF_CIRROS_DELETE_TIMEOUT)
- self.addCleanup(self.client.delete_vnf, vnf_id)
- self.wait_until_vnf_active(
- vnf_id,
- constants.VNF_CIRROS_CREATE_TIMEOUT,
- constants.ACTIVE_SLEEP_TIME)
- vnf_show_out = self.client.show_vnf(vnf_id)['vnf']
- self.assertIsNotNone(vnf_show_out['mgmt_ip_address'])
-
- stack_id = vnf_show_out['instance_id']
- fip_res = self.get_heat_stack_resource(stack_id, 'FIP1')
- floating_ip_address = fip_res['attributes']['floating_ip_address']
- self.assertIsNotNone(floating_ip_address)
- fip_port_id = fip_res['attributes']['port_id']
- port_res = self.get_heat_stack_resource(stack_id, 'CP1')
- port_id = port_res['attributes']['id']
- self.assertEqual(fip_port_id, port_id)
diff --git a/tacker/tests/functional/legacy/vnfm/test_tosca_vnf_maintenance.py b/tacker/tests/functional/legacy/vnfm/test_tosca_vnf_maintenance.py
deleted file mode 100644
index 0bc3c834d..000000000
--- a/tacker/tests/functional/legacy/vnfm/test_tosca_vnf_maintenance.py
+++ /dev/null
@@ -1,196 +0,0 @@
-# Copyright 2020 Distributed Cloud and Network (DCN)
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from datetime import datetime
-import time
-import yaml
-
-from oslo_serialization import jsonutils
-from oslo_utils import uuidutils
-
-from tacker.plugins.common import constants as evt_constants
-from tacker.tests import constants
-from tacker.tests.functional import base
-from tacker.tests.utils import read_file
-
-
-class VnfTestMaintenanceMonitor(base.BaseTackerTest):
-
- def _test_vnf_tosca_maintenance(self, vnfd_file, vnf_name):
- input_yaml = read_file(vnfd_file)
- tosca_dict = yaml.safe_load(input_yaml)
- tosca_arg = {'vnfd': {'name': vnf_name,
- 'attributes': {'vnfd': tosca_dict}}}
-
- # Create vnfd with tosca template
- vnfd_instance = self.client.create_vnfd(body=tosca_arg)
- self.assertIsNotNone(vnfd_instance)
-
- # Create vnf with vnfd_id
- vnfd_id = vnfd_instance['vnfd']['id']
- vnf_arg = {'vnf': {'vnfd_id': vnfd_id, 'name': vnf_name}}
- vnf_instance = self.client.create_vnf(body=vnf_arg)
- vnf_id = vnf_instance['vnf']['id']
-
- self.validate_vnf_instance(vnfd_instance, vnf_instance)
-
- def _wait_vnf_active_and_assert_vdu_count(vdu_count, scale_type=None):
- self.wait_until_vnf_active(
- vnf_id,
- constants.VNF_CIRROS_CREATE_TIMEOUT,
- constants.ACTIVE_SLEEP_TIME)
-
- vnf = self.client.show_vnf(vnf_id)['vnf']
- self.assertEqual(vdu_count, len(jsonutils.loads(
- vnf['mgmt_ip_address'])['VDU1']))
-
- def _verify_maintenance_attributes(vnf_dict):
- vnf_attrs = vnf_dict.get('attributes', {})
- maintenance_vdus = vnf_attrs.get('maintenance', '{}')
- maintenance_vdus = jsonutils.loads(maintenance_vdus)
- maintenance_url = vnf_attrs.get('maintenance_url', '')
- words = maintenance_url.split('/')
-
- self.assertEqual(len(maintenance_vdus.keys()), 2)
- self.assertEqual(len(words), 8)
- self.assertEqual(words[5], vnf_dict['id'])
- self.assertEqual(words[7], vnf_dict['tenant_id'])
-
- maintenance_urls = {}
- for vdu, access_key in maintenance_vdus.items():
- maintenance_urls[vdu] = maintenance_url + '/' + access_key
- return maintenance_urls
-
- def _verify_maintenance_alarm(url, project_id):
- aodh_client = self.aodh_http_client()
- alarm_query = {
- 'and': [
- {'=': {'project_id': project_id}},
- {'=~': {'alarm_actions': url}}]}
-
- # Check alarm instance for MAINTENANCE_ALL
- alarm_url = 'v2/query/alarms'
- encoded_data = jsonutils.dumps(alarm_query)
- encoded_body = jsonutils.dumps({'filter': encoded_data})
- resp, response_body = aodh_client.do_request(alarm_url, 'POST',
- body=encoded_body)
- self.assertEqual(len(response_body), 1)
- alarm_dict = response_body[0]
- self.assertEqual(url, alarm_dict.get('alarm_actions', [])[0])
- return response_body[0]
-
- def _verify_maintenance_actions(vnf_dict, alarm_dict):
- tacker_client = self.tacker_http_client()
- alarm_url = alarm_dict.get('alarm_actions', [])[0]
- tacker_url = '/%s' % alarm_url[alarm_url.find('v1.0'):]
-
- def _request_maintenance_action(state):
- alarm_body = _create_alarm_data(vnf_dict, alarm_dict, state)
- resp, response_body = tacker_client.do_request(
- tacker_url, 'POST', body=alarm_body)
-
- time.sleep(constants.SCALE_SLEEP_TIME)
- target_scaled = -1
- if state == 'MAINTENANCE':
- target_scaled = 0
- elif state == 'SCALE_IN':
- target_scaled = 1
- _wait_vnf_active_and_assert_vdu_count(2, scale_type='in')
- elif state == 'MAINTENANCE_COMPLETE':
- target_scaled = 0
- _wait_vnf_active_and_assert_vdu_count(3, scale_type='out')
-
- updated_vnf = self.client.show_vnf(vnf_id)['vnf']
- scaled = updated_vnf['attributes'].get('maintenance_scaled',
- '-1')
- self.assertEqual(int(scaled), target_scaled)
- time.sleep(constants.SCALE_WINDOW_SLEEP_TIME)
-
- _request_maintenance_action('MAINTENANCE')
- _request_maintenance_action('SCALE_IN')
- _request_maintenance_action('MAINTENANCE_COMPLETE')
-
- self.verify_vnf_crud_events(
- vnf_id, evt_constants.RES_EVT_SCALE,
- evt_constants.ACTIVE, cnt=2)
- self.verify_vnf_crud_events(
- vnf_id, evt_constants.RES_EVT_SCALE,
- evt_constants.PENDING_SCALE_OUT, cnt=1)
- self.verify_vnf_crud_events(
- vnf_id, evt_constants.RES_EVT_SCALE,
- evt_constants.PENDING_SCALE_IN, cnt=1)
-
- def _create_alarm_data(vnf_dict, alarm_dict, state):
- '''This function creates a raw payload of alarm to trigger Tacker directly.
-
- This function creates a raw payload which Fenix will put
- when Fenix process maintenance procedures. Alarm_receiver and
- specific steps of Fenix workflow will be tested by sending the raw
- to Tacker directly.
- '''
- utc_time = datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ')
- fake_url = 'http://localhost/'
- sample_data = {
- 'alarm_name': alarm_dict['name'],
- 'alarm_id': alarm_dict['alarm_id'],
- 'severity': 'low',
- 'previous': 'alarm',
- 'current': 'alarm',
- 'reason': 'Alarm test for Tacker functional test',
- 'reason_data': {
- 'type': 'event',
- 'event': {
- 'message_id': uuidutils.generate_uuid(),
- 'event_type': 'maintenance.scheduled',
- 'generated': utc_time,
- 'traits': [
- ['project_id', 1, vnf_dict['tenant_id']],
- ['allowed_actions', 1, '[]'],
- ['instance_ids', 1, fake_url],
- ['reply_url', 1, fake_url],
- ['state', 1, state],
- ['session_id', 1, uuidutils.generate_uuid()],
- ['actions_at', 4, utc_time],
- ['reply_at', 4, utc_time],
- ['metadata', 1, '{}']
- ],
- 'raw': {},
- 'message_signature': uuidutils.generate_uuid()
- }
- }
- }
- return jsonutils.dumps(sample_data)
-
- _wait_vnf_active_and_assert_vdu_count(3)
- urls = _verify_maintenance_attributes(vnf_instance['vnf'])
-
- maintenance_url = urls.get('ALL', '')
- project_id = vnf_instance['vnf']['tenant_id']
- alarm_dict = _verify_maintenance_alarm(maintenance_url, project_id)
- _verify_maintenance_actions(vnf_instance['vnf'], alarm_dict)
-
- try:
- self.client.delete_vnf(vnf_id)
- except Exception:
- assert False, (
- 'Failed to delete vnf %s after the maintenance test' % vnf_id)
- self.addCleanup(self.client.delete_vnfd, vnfd_id)
- self.addCleanup(self.wait_until_vnf_delete, vnf_id,
- constants.VNF_CIRROS_DELETE_TIMEOUT)
-
- # TODO(kden): add a test for migration
- def test_vnf_alarm_maintenance(self):
- self._test_vnf_tosca_maintenance(
- 'sample-tosca-vnfd-maintenance.yaml',
- 'maintenance_vnf')
diff --git a/tacker/tests/functional/legacy/vnfm/test_tosca_vnf_multiple_vdu.py b/tacker/tests/functional/legacy/vnfm/test_tosca_vnf_multiple_vdu.py
deleted file mode 100644
index 0075406e7..000000000
--- a/tacker/tests/functional/legacy/vnfm/test_tosca_vnf_multiple_vdu.py
+++ /dev/null
@@ -1,76 +0,0 @@
-# Copyright 2016 Brocade Communications System, Inc.
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from oslo_config import cfg
-from toscaparser import tosca_template
-import yaml
-
-from tacker.common import utils
-from tacker.plugins.common import constants as evt_constants
-from tacker.tests import constants
-from tacker.tests.functional import base
-from tacker.tosca import utils as toscautils
-
-CONF = cfg.CONF
-
-
-class VnfTestToscaMultipleVDU(base.BaseTackerTest):
- def test_create_delete_tosca_vnf_with_multiple_vdus(self):
- vnf_name = 'test_tosca_vnf_with_multiple_vdus'
- vnfd_file = 'sample-tosca-vnfd-multi-vdu.yaml'
- vnfd_instance, vnf_instance, tosca_dict = self.vnfd_and_vnf_create(
- vnfd_file, vnf_name)
-
- vnf_id = vnf_instance['vnf']['id']
- self.wait_until_vnf_active(vnf_id,
- constants.VNF_CIRROS_CREATE_TIMEOUT,
- constants.ACTIVE_SLEEP_TIME)
- self.assertEqual('ACTIVE',
- self.client.show_vnf(vnf_id)['vnf']['status'])
- self.validate_vnf_instance(vnfd_instance, vnf_instance)
-
- self.verify_vnf_crud_events(
- vnf_id, evt_constants.RES_EVT_CREATE,
- evt_constants.PENDING_CREATE, cnt=2)
- self.verify_vnf_crud_events(
- vnf_id, evt_constants.RES_EVT_CREATE, evt_constants.ACTIVE)
-
- # Validate mgmt_ip_address with input yaml file
- mgmt_ip_address = self.client.show_vnf(vnf_id)['vnf'][
- 'mgmt_ip_address']
- self.assertIsNotNone(mgmt_ip_address)
- mgmt_dict = yaml.safe_load(str(mgmt_ip_address))
-
- toscautils.updateimports(tosca_dict)
-
- tosca = tosca_template.ToscaTemplate(parsed_params={}, a_file=False,
- yaml_dict_tpl=tosca_dict,
- local_defs=toscautils.tosca_tmpl_local_defs())
-
- vdus = toscautils.findvdus(tosca)
-
- self.assertEqual(len(vdus), len(mgmt_dict.keys()))
- for vdu in vdus:
- self.assertIsNotNone(mgmt_dict[vdu.name])
- self.assertEqual(True, utils.is_valid_ipv4(mgmt_dict[vdu.name]))
-
- # Delete vnf_instance with vnf_id
- try:
- self.client.delete_vnf(vnf_id)
- except Exception:
- assert False, "vnf Delete of test_vnf_with_multiple_vdus failed"
-
- self.wait_until_vnf_delete(vnf_id,
- constants.VNF_CIRROS_DELETE_TIMEOUT)
- self.verify_vnf_crud_events(vnf_id, evt_constants.RES_EVT_DELETE,
- evt_constants.PENDING_DELETE, cnt=2)
diff --git a/tacker/tests/functional/legacy/vnfm/test_tosca_vnf_reservation.py b/tacker/tests/functional/legacy/vnfm/test_tosca_vnf_reservation.py
deleted file mode 100644
index 9e00c2b24..000000000
--- a/tacker/tests/functional/legacy/vnfm/test_tosca_vnf_reservation.py
+++ /dev/null
@@ -1,235 +0,0 @@
-#
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import datetime
-import json
-import testtools
-import time
-import yaml
-
-from blazarclient import exception
-from oslo_serialization import jsonutils
-from tacker.plugins.common import constants as evt_constants
-from tacker.tests import constants
-from tacker.tests.functional import base
-from tacker.tests.utils import read_file
-
-
-HYPERVISORS = []
-
-
-def hypervisors():
- global HYPERVISORS
- client = base.BaseTackerTest.novaclient()
- hypervisor_list = client.hypervisors.list()
- for hypervisor in hypervisor_list:
- if hypervisor.running_vms == 0:
- HYPERVISORS.append(hypervisor)
- return HYPERVISORS
-
-
-class VnfTestReservationMonitor(base.BaseTackerTest):
-
- def _test_vnf_tosca_reservation(self, vnfd_file, vnf_name,
- lease_id, param_values=None):
- input_yaml = read_file(vnfd_file)
- tosca_dict = yaml.safe_load(input_yaml)
-
- # TODO(niraj-singh): It's not possible to pass parameters through
- # parameter file due to Bug #1799683. Once this bug is fixed, no need
- # to update vnfd yaml.
- vdu_prop = tosca_dict['topology_template']['node_templates']['VDU1']
- vdu_prop['properties']['flavor'] = param_values.get('flavor')
- vdu_prop['properties']['reservation_metadata']['id'] =\
- param_values.get('server_group_id')
- vdu_prop['properties']['reservation_metadata']['resource_type'] =\
- param_values.get('resource_type')
- policies = tosca_dict['topology_template']['policies']
- policies[0]['RSV']['reservation']['properties']['lease_id'] =\
- param_values.get('lease_id')
-
- tosca_arg = {'vnfd': {'name': vnf_name,
- 'attributes': {'vnfd': tosca_dict}}}
- blazarclient = self.blazarclient()
-
- # Create vnfd with tosca template
- vnfd_instance = self.client.create_vnfd(body=tosca_arg)
- self.assertIsNotNone(vnfd_instance)
-
- # Create vnf with vnfd_id
- vnfd_id = vnfd_instance['vnfd']['id']
- self.addCleanup(self.client.delete_vnfd, vnfd_id)
-
- vnf_arg = {'vnf': {'vnfd_id': vnfd_id, 'name': vnf_name}}
- vnf_instance = self.client.create_vnf(body=vnf_arg)
-
- self.validate_vnf_instance(vnfd_instance, vnf_instance)
-
- vnf_id = vnf_instance['vnf']['id']
-
- def _get_reservation_policy(vnfd_dict):
- policies = vnfd_dict['topology_template'].get('policies', [])
- res_policy = dict()
- for policy_dict in policies:
- for name, policy in policy_dict.items():
- if policy['type'] == constants.POLICY_RESERVATION:
- reservations = policy['reservation']
- for reserv_key, reserv_value in reservations.items():
- if reserv_key == 'properties':
- continue
- for policy_action in reserv_value:
- res_policy[reserv_key] = policy_action
- return res_policy
-
- def _check_lease_event_status():
- lease_event_status = _verify_and_get_lease_event_status()
- self.assertEqual(lease_event_status, constants.LEASE_EVENT_STATUS,
- "Lease %(lease_id)s with status %(status)s is"
- " expected to be %(target)s" %
- {"lease_id": lease_id,
- "status": lease_event_status,
- "target": constants.LEASE_EVENT_STATUS})
-
- def _verify_and_get_lease_event_status():
- start_time = int(time.time())
- while ((int(time.time()) - start_time) <
- constants.LEASE_CHECK_EVENT_TIMEOUT):
- lease_detail = blazarclient.lease.get(lease_id)
- lease_events = lease_detail.get('events')
- for event in lease_events:
- lease_event_status = event.get('status')
- if ((event.get('event_type') ==
- constants.START_LEASE_EVET_TYPE) and (
- lease_event_status ==
- constants.LEASE_EVENT_STATUS)):
- return lease_event_status
- time.sleep(constants.LEASE_CHECK_SLEEP_TIME)
-
- def _wait_vnf_active_and_assert_vdu_count(vdu_count, scale_type=None):
- self.wait_until_vnf_active(
- vnf_id,
- constants.VNF_CIRROS_CREATE_TIMEOUT,
- constants.ACTIVE_SLEEP_TIME)
-
- vnf = self.client.show_vnf(vnf_id)['vnf']
- # {"VDU1": ["10.0.0.14", "10.0.0.5"]}
- if scale_type == 'scaling-in' and vdu_count == 0:
- self.assertFalse(jsonutils.loads(vnf.get('mgmt_ip_address',
- '{}')),
- "Once scaling-in completes, mgmt_ip_address"
- " should be empty.")
- elif scale_type == 'scaling-out':
- self.assertEqual(vdu_count, len(json.loads(
- vnf['mgmt_ip_address'])['VDU1']))
- elif vdu_count == 0 and scale_type is None:
- self.assertIsNone(vnf['mgmt_ip_address'])
-
- reservation_policy = _get_reservation_policy(tosca_dict)
- _wait_vnf_active_and_assert_vdu_count(0)
-
- # trigger alarm for start action
- start_action = reservation_policy.get('start_actions')
- scaling_out_action = start_action + '-out'
- _check_lease_event_status()
- # scaling-out action
- self.trigger_vnf(vnf_id, 'start_actions', scaling_out_action)
- time.sleep(constants.SCALE_SLEEP_TIME)
- # checking VDU's count after scaling out
- _wait_vnf_active_and_assert_vdu_count(2, scale_type='scaling-out')
- time.sleep(constants.SCALE_WINDOW_SLEEP_TIME)
-
- # trigger alarm for before end action
- before_end_action = reservation_policy.get('before_end_actions')
- scaling_in_action = before_end_action + '-in'
-
- # scaling-in action
- self.trigger_vnf(vnf_id, 'before_end_actions', scaling_in_action)
- time.sleep(constants.SCALE_SLEEP_TIME)
- # checking VDU's count after scaling in
- _wait_vnf_active_and_assert_vdu_count(0, scale_type='scaling-in')
-
- self.verify_vnf_crud_events(
- vnf_id, evt_constants.RES_EVT_SCALE,
- evt_constants.ACTIVE, cnt=2)
- self.verify_vnf_crud_events(
- vnf_id, evt_constants.RES_EVT_SCALE,
- evt_constants.PENDING_SCALE_OUT, cnt=1)
- self.verify_vnf_crud_events(
- vnf_id, evt_constants.RES_EVT_SCALE,
- evt_constants.PENDING_SCALE_IN, cnt=1)
-
- try:
- self.client.delete_vnf(vnf_id)
- except Exception:
- assert False, (
- "Failed to delete vnf %s after the reservation test" % vnf_id)
- # Wait for delete vnf_instance
- self.addCleanup(self.wait_until_vnf_delete, vnf_id,
- constants.VNF_CIRROS_DELETE_TIMEOUT)
-
- def _get_instance_reservation(self):
- blazarclient = self.blazarclient()
- reservations = [{'disk_gb': 0,
- 'vcpus': 1, 'memory_mb': 1,
- 'amount': 2, 'affinity': True,
- 'resource_properties': '',
- 'resource_type': 'virtual:instance'}]
- events = []
-
- start_date = (datetime.datetime.utcnow() + datetime.timedelta(
- minutes=2)).strftime("%Y-%m-%d %H:%M")
- end_date = (datetime.datetime.utcnow() + datetime.timedelta(
- minutes=30)).strftime("%Y-%m-%d %H:%M")
- host_added = False
- for hypervisor in HYPERVISORS:
- try:
- blazar_host = blazarclient.host.create(
- hypervisor.hypervisor_hostname)
- host_added = True
- break
- except exception.BlazarClientException:
- pass
-
- if not host_added:
- self.skipTest("Skip test as Blazar failed to create host from one"
- " of available hypervisors '%s' as it found some"
- " instances were running on them" %
- ",".join([hypervisor.hypervisor_hostname
- for hypervisor in HYPERVISORS]))
- instance_reservation = blazarclient.lease.create(
- 'test-reservation', start_date, end_date, reservations, events)
-
- self.addCleanup(
- blazarclient.host.delete, blazar_host.get('id'))
- self.addCleanup(
- blazarclient.lease.delete, instance_reservation['id'])
-
- return instance_reservation, blazar_host
-
- @testtools.skipIf(len(hypervisors()) == 0,
- 'Skip test as there are no'
- ' hypervisors available in nova')
- def test_vnf_alarm_scale_with_instance_reservation(self):
- instance_reservation, blazar_host = self._get_instance_reservation()
- lease_id = str(instance_reservation['reservations'][0]['lease_id'])
- flavor_id = str(instance_reservation['reservations'][0]['flavor_id'])
- server_group_id = str(
- instance_reservation['reservations'][0]['server_group_id'])
- param_to_create_lease = {'lease_id': lease_id,
- 'flavor': flavor_id,
- 'server_group_id': server_group_id,
- 'resource_type': 'virtual_instance'}
- self._test_vnf_tosca_reservation(
- 'sample-tosca-vnfd-instance-reservation.yaml',
- 'VNFD1', lease_id, param_to_create_lease)
diff --git a/tacker/tests/functional/legacy/vnfm/test_tosca_vnf_scale.py b/tacker/tests/functional/legacy/vnfm/test_tosca_vnf_scale.py
deleted file mode 100644
index ac6fc9679..000000000
--- a/tacker/tests/functional/legacy/vnfm/test_tosca_vnf_scale.py
+++ /dev/null
@@ -1,90 +0,0 @@
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import time
-
-from oslo_config import cfg
-from oslo_serialization import jsonutils
-
-from tacker.plugins.common import constants as evt_constants
-from tacker.tests import constants
-from tacker.tests.functional import base
-
-
-CONF = cfg.CONF
-
-
-class VnfTestToscaScale(base.BaseTackerTest):
-
- def test_vnf_tosca_scale(self):
- vnf_name = 'test_tosca_vnf_scale_all'
- vnfd_file = 'sample-tosca-scale-all.yaml'
- vnfd_instance, vnf_instance, tosca_dict = self.vnfd_and_vnf_create(
- vnfd_file, vnf_name)
- vnf_id = vnf_instance['vnf']['id']
-
- # TODO(kanagaraj-manickam) once load-balancer support is enabled,
- # update this logic to validate the scaling
- def _wait(count):
- self.wait_until_vnf_active(
- vnf_id,
- constants.VNF_CIRROS_CREATE_TIMEOUT,
- constants.ACTIVE_SLEEP_TIME)
- vnf = self.client.show_vnf(vnf_id)['vnf']
-
- # {"VDU1": ["10.0.0.14", "10.0.0.5"]}
- self.assertEqual(count, len(jsonutils.loads(vnf[
- 'mgmt_ip_address'])['VDU1']))
-
- _wait(2)
- # Get nested resources when vnf is in active state
- vnf_details = self.client.list_vnf_resources(vnf_id)['resources']
- resources_list = list()
- for vnf_detail in vnf_details:
- resources_list.append(vnf_detail['name'])
- self.assertIn('VDU1', resources_list)
-
- self.assertIn('CP1', resources_list)
- self.assertIn('SP1_group', resources_list)
-
- def _scale(type, count):
- body = {"scale": {'type': type, 'policy': 'SP1'}}
- self.client.scale_vnf(vnf_id, body)
- _wait(count)
-
- # scale out
- time.sleep(constants.SCALE_WINDOW_SLEEP_TIME)
- _scale('out', 3)
-
- # scale in
- time.sleep(constants.SCALE_WINDOW_SLEEP_TIME)
- _scale('in', 2)
-
- # Verifying that as part of SCALE OUT, VNF states PENDING_SCALE_OUT
- # and ACTIVE occurs and as part of SCALE IN, VNF states
- # PENDING_SCALE_IN and ACTIVE occur.
- self.verify_vnf_crud_events(vnf_id, evt_constants.RES_EVT_SCALE,
- evt_constants.ACTIVE, cnt=2)
- self.verify_vnf_crud_events(vnf_id, evt_constants.RES_EVT_SCALE,
- evt_constants.PENDING_SCALE_OUT, cnt=1)
- self.verify_vnf_crud_events(vnf_id, evt_constants.RES_EVT_SCALE,
- evt_constants.PENDING_SCALE_IN, cnt=1)
- # Delete vnf_instance with vnf_id
- try:
- self.client.delete_vnf(vnf_id)
- except Exception:
- assert False, "vnf Delete failed"
-
- # Wait for delete vnf_instance
- self.addCleanup(self.wait_until_vnf_delete, vnf_id,
- constants.VNF_CIRROS_DELETE_TIMEOUT)
diff --git a/tacker/tests/functional/legacy/vnfm/test_tosca_vnfc.py b/tacker/tests/functional/legacy/vnfm/test_tosca_vnfc.py
deleted file mode 100644
index 565f378fe..000000000
--- a/tacker/tests/functional/legacy/vnfm/test_tosca_vnfc.py
+++ /dev/null
@@ -1,115 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import os
-
-from oslo_config import cfg
-from toscaparser import tosca_template
-import unittest
-import yaml
-
-
-from tacker.common import utils
-from tacker.plugins.common import constants as evt_constants
-from tacker.tests import constants
-from tacker.tests.functional import base
-from tacker.tests.utils import read_file
-from tacker.tosca import utils as toscautils
-
-CONF = cfg.CONF
-SOFTWARE_DEPLOYMENT = 'OS::Heat::SoftwareDeployment'
-
-
-class VnfTestToscaVNFC(base.BaseTackerTest):
-
- @unittest.skip("Until BUG 1673012")
- def test_create_delete_tosca_vnfc(self):
- input_yaml = read_file('sample_tosca_vnfc.yaml')
- tosca_dict = yaml.safe_load(input_yaml)
- path = os.path.abspath(os.path.join(
- os.path.dirname(__file__), "../../../etc/samples"))
- vnfd_name = 'sample-tosca-vnfc'
- tosca_dict['topology_template']['node_templates'
- ]['firewall_vnfc'
- ]['interfaces'
- ]['Standard']['create'] = path \
- + '/install_vnfc.sh'
- tosca_arg = {'vnfd': {'name': vnfd_name,
- 'attributes': {'vnfd': tosca_dict}}}
-
- # Create vnfd with tosca template
- vnfd_instance = self.client.create_vnfd(body=tosca_arg)
- self.assertIsNotNone(vnfd_instance)
-
- # Create vnf with vnfd_id
- vnfd_id = vnfd_instance['vnfd']['id']
- self.addCleanup(self.client.delete_vnfd, vnfd_id)
- vnf_arg = {'vnf': {'vnfd_id': vnfd_id, 'name':
- "test_tosca_vnfc"}}
- vnf_instance = self.client.create_vnf(body=vnf_arg)
-
- vnf_id = vnf_instance['vnf']['id']
- self.wait_until_vnf_active(vnf_id,
- constants.VNFC_CREATE_TIMEOUT,
- constants.ACTIVE_SLEEP_TIME)
- self.assertEqual('ACTIVE',
- self.client.show_vnf(vnf_id)['vnf']['status'])
- self.validate_vnf_instance(vnfd_instance, vnf_instance)
-
- self.verify_vnf_crud_events(
- vnf_id, evt_constants.RES_EVT_CREATE, evt_constants.PENDING_CREATE,
- cnt=2)
- self.verify_vnf_crud_events(
- vnf_id, evt_constants.RES_EVT_CREATE, evt_constants.ACTIVE)
-
- # Validate mgmt_ip_address with input yaml file
- mgmt_ip_address = self.client.show_vnf(vnf_id)['vnf'][
- 'mgmt_ip_address']
- self.assertIsNotNone(mgmt_ip_address)
- mgmt_dict = yaml.safe_load(str(mgmt_ip_address))
-
- input_dict = yaml.safe_load(input_yaml)
- toscautils.updateimports(input_dict)
-
- tosca = tosca_template.ToscaTemplate(parsed_params={}, a_file=False,
- yaml_dict_tpl=input_dict,
- local_defs=toscautils.tosca_tmpl_local_defs())
-
- vdus = toscautils.findvdus(tosca)
-
- self.assertEqual(len(vdus), len(mgmt_dict.keys()))
- for vdu in vdus:
- self.assertIsNotNone(mgmt_dict[vdu.name])
- self.assertEqual(True, utils.is_valid_ipv4(mgmt_dict[vdu.name]))
-
- # Check the status of SoftwareDeployment
- heat_stack_id = self.client.show_vnf(vnf_id)['vnf']['instance_id']
- resource_types = self.h_client.resources
- resources = resource_types.list(stack_id=heat_stack_id)
- for resource in resources:
- resource = resource.to_dict()
- if resource['resource_type'] == \
- SOFTWARE_DEPLOYMENT:
- self.assertEqual('CREATE_COMPLETE',
- resource['resource_status'])
- break
-
- # Delete vnf_instance with vnf_id
- try:
- self.client.delete_vnf(vnf_id)
- except Exception:
- assert False, "vnf Delete of test_vnf_with_multiple_vdus failed"
-
- self.wait_until_vnf_delete(vnf_id,
- constants.VNF_CIRROS_DELETE_TIMEOUT)
- self.verify_vnf_crud_events(vnf_id, evt_constants.RES_EVT_DELETE,
- evt_constants.PENDING_DELETE, cnt=2)
diff --git a/tacker/tests/functional/legacy/vnfm/test_tosca_vnfd.py b/tacker/tests/functional/legacy/vnfm/test_tosca_vnfd.py
deleted file mode 100644
index df5ead4cc..000000000
--- a/tacker/tests/functional/legacy/vnfm/test_tosca_vnfd.py
+++ /dev/null
@@ -1,65 +0,0 @@
-# Copyright 2016 Brocade Communications System, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import time
-
-from oslo_config import cfg
-import yaml
-
-from tacker.plugins.common import constants as evt_constants
-from tacker.tests.functional import base
-from tacker.tests.utils import read_file
-
-CONF = cfg.CONF
-
-
-class VnfdTestCreate(base.BaseTackerTest):
- def _test_create_list_delete_tosca_vnfd(self, tosca_vnfd_file, vnfd_name):
- input_yaml = read_file(tosca_vnfd_file)
- tosca_dict = yaml.safe_load(input_yaml)
- tosca_arg = {'vnfd': {'name': vnfd_name,
- 'attributes': {'vnfd': tosca_dict}}}
- vnfd_instance = self.client.create_vnfd(body=tosca_arg)
- self.assertIsNotNone(vnfd_instance)
-
- vnfds = self.client.list_vnfds().get('vnfds')
- self.assertIsNotNone(vnfds, "List of vnfds are Empty after Creation")
-
- vnfd_id = vnfd_instance['vnfd']['id']
- self.verify_vnfd_events(
- vnfd_id, evt_constants.RES_EVT_CREATE,
- evt_constants.RES_EVT_ONBOARDED)
-
- try:
- self.client.delete_vnfd(vnfd_id)
- except Exception:
- assert False, "vnfd Delete failed"
- self.verify_vnfd_events(vnfd_id, evt_constants.RES_EVT_DELETE,
- evt_constants.RES_EVT_NA_STATE)
-
- def test_tosca_vnfd(self):
- self._test_create_list_delete_tosca_vnfd('sample-tosca-vnfd.yaml',
- 'sample-tosca-vnfd-template')
-
- def test_tosca_large_vnfd(self):
- self._test_create_list_delete_tosca_vnfd(
- 'sample-tosca-vnfd-large-template.yaml',
- 'sample-tosca-vnfd-large-template')
-
- def test_tosca_re_create_delete_vnfd(self):
- self._test_create_list_delete_tosca_vnfd('sample-tosca-vnfd.yaml',
- 'test_vnfd')
- time.sleep(1)
- self._test_create_list_delete_tosca_vnfd('sample-tosca-vnfd.yaml',
- 'test_vnfd')
diff --git a/tacker/tests/functional/legacy/vnfm/test_vnf.py b/tacker/tests/functional/legacy/vnfm/test_vnf.py
deleted file mode 100644
index c7aabc39c..000000000
--- a/tacker/tests/functional/legacy/vnfm/test_vnf.py
+++ /dev/null
@@ -1,107 +0,0 @@
-# Copyright 2015 Brocade Communications System, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import yaml
-
-from oslo_config import cfg
-
-from tacker.plugins.common import constants as evt_constants
-from tacker.tests import constants
-from tacker.tests.functional import base
-from tacker.tests.utils import read_file
-
-CONF = cfg.CONF
-VNF_CIRROS_CREATE_TIMEOUT = 120
-
-
-class VnfTestCreate(base.BaseTackerTest):
- def _test_create_delete_vnf(self, vnf_name, vnfd_name, vim_id=None):
- input_yaml = read_file('sample-tosca-vnfd-no-monitor.yaml')
- tosca_dict = yaml.safe_load(input_yaml)
- tosca_arg = {'vnfd': {'name': vnfd_name,
- 'attributes': {'vnfd': tosca_dict}}}
-
- # Create vnfd with tosca template
- vnfd_instance = self.client.create_vnfd(body=tosca_arg)
- self.assertIsNotNone(vnfd_instance)
-
- # Create vnf with vnfd_id
- vnfd_id = vnfd_instance['vnfd']['id']
- self.addCleanup(self.client.delete_vnfd, vnfd_id)
-
- vnf_arg = {'vnf': {'vnfd_id': vnfd_id, 'name': vnf_name}}
- if vim_id:
- vnf_arg['vnf']['vim_id'] = vim_id
- vnf_instance = self.client.create_vnf(body=vnf_arg)
- self.validate_vnf_instance(vnfd_instance, vnf_instance)
-
- vnf_id = vnf_instance['vnf']['id']
- self.wait_until_vnf_active(
- vnf_id,
- constants.VNF_CIRROS_CREATE_TIMEOUT,
- constants.ACTIVE_SLEEP_TIME)
- self.assertIsNotNone(self.client.show_vnf(vnf_id)['vnf'][
- 'mgmt_ip_address'])
- if vim_id:
- self.assertEqual(vim_id, vnf_instance['vnf']['vim_id'])
-
- # Get vnf details when vnf is in active state
- vnf_details = self.client.list_vnf_resources(vnf_id)['resources'][0]
- self.assertIn('name', vnf_details)
- self.assertIn('id', vnf_details)
- self.assertIn('type', vnf_details)
-
- self.verify_vnf_crud_events(
- vnf_id, evt_constants.RES_EVT_CREATE,
- evt_constants.PENDING_CREATE, cnt=2)
- self.verify_vnf_crud_events(
- vnf_id, evt_constants.RES_EVT_CREATE, evt_constants.ACTIVE)
-
- # update VIM name when VNFs are active.
- # check for exception.
- vim0_id = vnf_instance['vnf']['vim_id']
- msg = "VIM %s is still in use by VNF" % vim0_id
- try:
- update_arg = {'vim': {'name': "vnf_vim"}}
- self.client.update_vim(vim0_id, update_arg)
- except Exception as err:
- self.assertEqual(err.message, msg)
- else:
- self.assertTrue(
- False,
- "Name of vim(s) with active vnf(s) should not be changed!")
-
- # Delete vnf_instance with vnf_id
- try:
- self.client.delete_vnf(vnf_id)
- except Exception:
- assert False, "vnf Delete failed"
-
- self.wait_until_vnf_delete(vnf_id,
- constants.VNF_CIRROS_DELETE_TIMEOUT)
- self.verify_vnf_crud_events(vnf_id, evt_constants.RES_EVT_DELETE,
- evt_constants.PENDING_DELETE, cnt=2)
-
- def test_create_delete_vnf_with_default_vim(self):
- self._test_create_delete_vnf(
- vnf_name='test_vnf_with_cirros_no_monitoring_default_vim',
- vnfd_name='sample_cirros_vnf_no_monitoring_default_vim')
-
- def test_create_delete_vnf_with_vim_id(self):
- vim_list = self.client.list_vims()
- vim0_id = self.get_vim(vim_list, 'VIM0')['id']
- self._test_create_delete_vnf(
- vim_id=vim0_id,
- vnf_name='test_vnf_with_cirros_vim_id',
- vnfd_name='sample_cirros_vnf_no_monitoring_vim_id')
diff --git a/tacker/tests/functional/legacy/vnfm/test_vnf_monitoring.py b/tacker/tests/functional/legacy/vnfm/test_vnf_monitoring.py
deleted file mode 100644
index 08ffbb0f3..000000000
--- a/tacker/tests/functional/legacy/vnfm/test_vnf_monitoring.py
+++ /dev/null
@@ -1,89 +0,0 @@
-# Copyright 2015 Brocade Communications System, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-from tacker.plugins.common import constants as evt_constants
-from tacker.tests import constants
-from tacker.tests.functional import base
-
-
-class VnfTestPingMonitor(base.BaseTackerTest):
-
- def _test_vnf_with_monitoring(self, vnfd_file, vnf_name):
- vnfd_instance, vnf_instance, tosca_dict = self.vnfd_and_vnf_create(
- vnfd_file, vnf_name)
-
- # Verify vnf goes from ACTIVE->DEAD->ACTIVE states
- self.verify_vnf_restart(vnfd_instance, vnf_instance)
-
- # Delete vnf_instance with vnf_id
- vnf_id = vnf_instance['vnf']['id']
- try:
- self.client.delete_vnf(vnf_id)
- except Exception:
- assert False, ("Failed to delete vnf %s after the monitor test" %
- vnf_id)
-
- # Verify VNF monitor events captured for states, ACTIVE and DEAD
- vnf_state_list = [evt_constants.ACTIVE, evt_constants.DEAD]
- self.verify_vnf_monitor_events(vnf_id, vnf_state_list)
-
- self.addCleanup(self.wait_until_vnf_delete, vnf_id,
- constants.VNF_CIRROS_DELETE_TIMEOUT)
-
- def test_create_delete_vnf_monitoring_tosca_template(self):
- self._test_vnf_with_monitoring(
- 'sample-tosca-vnfd-monitor.yaml',
- 'ping monitor vnf with tosca template')
-
- def test_create_delete_vnf_multi_vdu_monitoring_tosca_template(self):
- self._test_vnf_with_monitoring(
- 'sample-tosca-vnfd-multi-vdu-monitoring.yaml',
- 'ping monitor multi vdu vnf with tosca template')
-
- def _test_vnf_with_monitoring_vdu_autoheal_action(
- self, vnfd_file, vnf_name):
- vnfd_instance, vnf_instance, tosca_dict = self.vnfd_and_vnf_create(
- vnfd_file, vnf_name)
- vnf_id = vnf_instance['vnf']['id']
-
- self.verify_vnf_update(vnf_id)
-
- # Delete vnf_instance with vnf_id
- try:
- self.client.delete_vnf(vnf_id)
- except Exception:
- assert False, ("Failed to delete vnf %s after the monitor test" %
- vnf_id)
- self.addCleanup(self.wait_until_vnf_delete, vnf_id,
- constants.VNF_CIRROS_DELETE_TIMEOUT)
-
- params = {'resource_id': vnf_id,
- 'resource_state': 'PENDING_UPDATE',
- 'event_type': evt_constants.RES_EVT_MONITOR}
- vnf_events = self.client.list_vnf_events(**params)
- # Check if vdu_autoheal action emits 4 monitoring events.
- self.assertGreaterEqual(4, len(vnf_events['vnf_events']))
-
- def test_vnf_monitoring_with_vdu_autoheal_action_for_multi_vdu(self):
- self._test_vnf_with_monitoring_vdu_autoheal_action(
- 'sample-tosca-vnfd-multi-vdu-monitoring-vdu-autoheal.yaml',
- 'ping multi vdu monitor having vdu_autoheal failure action '
- 'with tosca template')
-
- def test_vnf_monitoring_with_vdu_autoheal_action_for_single_vdu(self):
- self._test_vnf_with_monitoring_vdu_autoheal_action(
- 'sample-tosca-vnfd-single-vdu-monitoring-vdu-autoheal.yaml',
- 'ping vdu monitor having vdu_autoheal failure action '
- 'with tosca template')
diff --git a/tacker/tests/functional/legacy/vnfm/test_vnf_placement_policy.py b/tacker/tests/functional/legacy/vnfm/test_vnf_placement_policy.py
deleted file mode 100644
index 28bb09e2d..000000000
--- a/tacker/tests/functional/legacy/vnfm/test_vnf_placement_policy.py
+++ /dev/null
@@ -1,128 +0,0 @@
-# Copyright 2018 NTT DATA
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-import yaml
-
-from tackerclient.common import exceptions
-
-from tacker.tests import constants
-from tacker.tests.functional import base
-from tacker.tests.utils import read_file
-
-
-class VnfTestCreate(base.BaseTackerTest):
- def _test_create_delete_vnf(self, vnf_name, vnfd_name,
- placement_policy, vdu_name,
- vnf_expected_status="ACTIVE"):
- input_yaml = read_file(vnfd_name + '.yaml')
- tosca_dict = yaml.safe_load(input_yaml)
- tosca_arg = {'vnfd': {'name': vnfd_name,
- 'attributes': {'vnfd': tosca_dict}}}
-
- # Create vnfd with tosca template
- vnfd_instance = self.client.create_vnfd(body=tosca_arg)
- self.assertIsNotNone(vnfd_instance)
-
- # Get vnfd_id
- vnfd_id = vnfd_instance['vnfd']['id']
-
- # Add vnfd delete to cleanup job so that if vnf_instance fails to
- # create then it will be cleaned-up automatically in tearDown()
- self.addCleanup(self.client.delete_vnfd, vnfd_id)
-
- vnf_arg = {'vnf': {'vnfd_id': vnfd_id, 'name': vnf_name}}
-
- # Create vnf instance
- vnf_instance = self.client.create_vnf(body=vnf_arg)
- vnf_id = vnf_instance['vnf']['id']
-
- # Delete vnf_instance after tearDown
- self.addCleanup(self.wait_until_vnf_delete, vnf_id,
- constants.VNF_CIRROS_DELETE_TIMEOUT)
- self.addCleanup(self.client.delete_vnf, vnf_id)
-
- self.validate_vnf_instance(vnfd_instance, vnf_instance)
-
- self.wait_until_vnf_status(
- vnf_id,
- vnf_expected_status,
- constants.VNF_CIRROS_CREATE_TIMEOUT,
- constants.ACTIVE_SLEEP_TIME)
-
- # VDU names are random generated names with initials of *vnf_name*.
- # Search the instance_list with *vdu_name* regular expression
- opts = {
- "name": vdu_name
- }
- compute_hosts = []
- vm_statuses = []
- for server in self.novaclient().servers.list(search_opts=opts):
- instance_host = getattr(server,
- "OS-EXT-SRV-ATTR:hypervisor_hostname")
- vm_statuses.append(getattr(server, "status"))
- compute_hosts.append(instance_host)
-
- # check vnf placement policies
- if placement_policy == 'affinity':
- # check "compute_hosts" is not empty
- self.assertTrue(compute_hosts)
-
- # Get the first compute_host on which VDU is deployed and compare
- # it with other compute hosts of VDU's
- compute_host = compute_hosts[0]
- for vnf in compute_hosts:
- self.assertEqual(compute_host, vnf)
- elif placement_policy == 'anti-affinity':
- if vnf_expected_status == "ERROR":
- # Check one of the VM should be in "ERROR" status
- # and instance host should be None.
- self.assertIn("ERROR", vm_statuses)
- self.assertIn(None, compute_hosts)
- else:
- distinct_comp_hosts = set(compute_hosts)
- self.assertEqual(len(compute_hosts), len(distinct_comp_hosts))
-
- def test_create_delete_vnf_with_placement_policy_affinity(self):
- self._test_create_delete_vnf(
- vnf_name='test_vnf_with_placement_policy_affinity',
- vnfd_name='sample-tosca-vnfd-placement-policy-affinity',
- vdu_name='affinity-vdu',
- placement_policy='affinity')
-
- def test_create_delete_vnf_with_placement_policy_anti_affinity(self):
- self._test_create_delete_vnf(
- vnf_name='test_vnf_with_placement_policy_anti_affinity',
- vnfd_name='sample-tosca-vnfd-placement-policy-anti-affinity',
- vdu_name='anti-affinity-vdu-multi-comp-nodes',
- placement_policy='anti-affinity')
-
- def test_vnf_with_policy_anti_affinity_insufficient_comp_nodes(self):
- self._test_create_delete_vnf(
- vnf_name='test_vnf_anti_affinity_insufficient_comp_nodes',
- vnfd_name='sample-tosca-vnfd-anti-affinity-multi-vdu',
- vdu_name='anti-affinity-vdu-insufficient-comp-nodes',
- placement_policy='anti-affinity',
- vnf_expected_status="ERROR")
-
- def test_vnf_with_placement_policy_invalid(self):
- exc = self.assertRaises(
- exceptions.BadRequest,
- self._test_create_delete_vnf,
- vnf_name='test_vnf_with_placement_policy_invalid',
- vnfd_name='sample-tosca-vnfd-placement-policy-invalid',
- vdu_name='invalid-placement-policy-vdu',
- placement_policy='invalid')
- self.assertIn('"invalid"', exc.message)
- self.assertIn('The value "invalid" of property "policy" is '
- 'not valid. Expected a value from "[affinity, '
- 'anti-affinity]".', exc.message)
diff --git a/tacker/tests/functional/legacy/vnfm/test_vnfm_param.py b/tacker/tests/functional/legacy/vnfm/test_vnfm_param.py
deleted file mode 100644
index 01f847f9d..000000000
--- a/tacker/tests/functional/legacy/vnfm/test_vnfm_param.py
+++ /dev/null
@@ -1,206 +0,0 @@
-# Copyright 2015 Brocade Communications System, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import time
-import yaml
-
-from tacker.plugins.common import constants as evt_constants
-from tacker.tests import constants
-from tacker.tests.functional import base
-from tacker.tests.utils import read_file
-
-
-class VnfmTestParam(base.BaseTackerTest):
- def _test_vnfd_create(self, vnfd_file, vnfd_name):
- yaml_input = read_file(vnfd_file)
- tosca_dict = yaml.safe_load(yaml_input)
- req_dict = {'vnfd': {'name': vnfd_name,
- 'attributes': {'vnfd': tosca_dict}}}
-
- # Create vnfd
- vnfd_instance = self.client.create_vnfd(body=req_dict)
- self.assertIsNotNone(vnfd_instance)
- vnfd_id = vnfd_instance['vnfd']['id']
- self.assertIsNotNone(vnfd_id)
- self.verify_vnfd_events(
- vnfd_id, evt_constants.RES_EVT_CREATE,
- evt_constants.RES_EVT_ONBOARDED)
- return vnfd_instance
-
- def _test_vnfd_delete(self, vnfd_instance):
- # Delete vnfd
- vnfd_id = vnfd_instance['vnfd']['id']
- self.assertIsNotNone(vnfd_id)
- try:
- self.client.delete_vnfd(vnfd_id)
- except Exception:
- assert False, "vnfd Delete failed"
- self.verify_vnfd_events(vnfd_id, evt_constants.RES_EVT_DELETE,
- evt_constants.RES_EVT_NA_STATE)
- try:
- vnfd_d = self.client.show_vnfd(vnfd_id)
- except Exception:
- assert True, "Vnfd Delete success" + str(vnfd_d) + str(Exception)
-
- def _test_vnf_create(self, vnfd_instance, vnf_name, param_values):
- # Create the vnf with values
- vnfd_id = vnfd_instance['vnfd']['id']
- # Create vnf with values file
- vnf_dict = dict()
- vnf_dict = {'vnf': {'vnfd_id': vnfd_id, 'name': vnf_name,
- 'attributes': {'param_values': param_values}}}
- vnf_instance = self.client.create_vnf(body=vnf_dict)
-
- self.validate_vnf_instance(vnfd_instance, vnf_instance)
- vnf_id = vnf_instance['vnf']['id']
- self.wait_until_vnf_active(
- vnf_id,
- constants.VNF_CIRROS_CREATE_TIMEOUT,
- constants.ACTIVE_SLEEP_TIME)
- self.assertIsNotNone(self.client.show_vnf(vnf_id)['vnf'][
- 'mgmt_ip_address'])
- vnf_instance = self.client.show_vnf(vnf_id)
-
- self.verify_vnf_crud_events(
- vnf_id, evt_constants.RES_EVT_CREATE,
- evt_constants.PENDING_CREATE, cnt=2)
- self.verify_vnf_crud_events(
- vnf_id, evt_constants.RES_EVT_CREATE, evt_constants.ACTIVE)
-
- # Verify values dictionary is same as param values from vnf_show
-
- param_values = vnf_instance['vnf']['attributes']['param_values']
- param_values_dict = yaml.safe_load(param_values)
-
- return vnf_instance, param_values_dict
-
- def _test_vnf_update(self, vnf_instance, param_values):
- # Update Vnf
- vnf_id = vnf_instance['vnf']['id']
- new_param_values = {'vnf': {'attributes': {
- 'param_values': param_values}}}
- self.client.update_vnf(vnf_id, new_param_values)
- self.wait_until_vnf_active(
- vnf_id,
- constants.VNF_CIRROS_UPDATE_TIMEOUT,
- constants.ACTIVE_SLEEP_TIME)
-
- # Wait until the update on the heat side is completed,
- # because vnf deletion will cause a conflict without waiting for this.
- stack_id = self.client.show_vnf(vnf_id)['vnf']['instance_id']
- start_time = int(time.time())
- while True:
- vdu_resource = self.get_vdu_resource(stack_id, "VDU1")
- vdu_resource_dict = vdu_resource.to_dict()
- vdu_resource_status = vdu_resource_dict['resource_status']
- if ((int(time.time()) - start_time >
- constants.VNF_CIRROS_UPDATE_TIMEOUT) or
- (vdu_resource_status == 'UPDATE_COMPLETE')):
- break
- time.sleep(constants.ACTIVE_SLEEP_TIME)
-
- self.verify_vnf_crud_events(
- vnf_id, evt_constants.RES_EVT_UPDATE, evt_constants.PENDING_UPDATE)
- self.verify_vnf_crud_events(
- vnf_id, evt_constants.RES_EVT_UPDATE, evt_constants.ACTIVE)
-
- # Verify vnf_param_values_dict is same as param values from vnf_show
- vnf_instance = self.client.show_vnf(vnf_id)
- vnf_param_values = vnf_instance['vnf']['attributes']['param_values']
- vnf_param_values_dict = yaml.safe_load(vnf_param_values)
-
- # Verify stack_parameters is same as parameters from stack_show
- instance_id = vnf_instance['vnf']['instance_id']
- stack_values = self.h_client.stacks.get(instance_id)
- stack_parameters = stack_values.parameters
-
- return vnf_param_values_dict, stack_parameters
-
- def _test_vnf_delete(self, vnf_instance):
- # Delete Vnf
- vnf_id = vnf_instance['vnf']['id']
- try:
- self.client.delete_vnf(vnf_id)
- except Exception:
- assert False, "vnf Delete failed"
- self.wait_until_vnf_delete(vnf_id,
- constants.VNF_CIRROS_DELETE_TIMEOUT)
- self.verify_vnf_crud_events(vnf_id, evt_constants.RES_EVT_DELETE,
- evt_constants.PENDING_DELETE, cnt=2)
-
- try:
- vnf_d = self.client.show_vnf(vnf_id)
- except Exception:
- assert True, "Vnf Delete success" + str(vnf_d) + str(Exception)
-
- def test_vnfd_param_tosca_template(self):
- vnfd_name = 'sample_cirros_vnfd_tosca'
- vnfd_instance = self._test_vnfd_create(
- 'sample-tosca-vnfd-param.yaml', vnfd_name)
- self._test_vnfd_delete(vnfd_instance)
-
- def test_vnf_param_tosca_template_default(self):
- self._test_vnf_param_tosca_template(
- vnfd_name='cirros_vnfd_tosca_param',
- vnfd_file='sample-tosca-vnfd-param.yaml',
- param_file='sample-tosca-vnf-values.yaml',
- vnf_name='test_vnf_with_parameters_tosca_template')
-
- def test_vnf_param_with_artifacts_image_tosca_template(self):
- self._test_vnf_param_tosca_template(
- vnfd_name='cirros_vnfd_tosca_param_artifacts_image',
- vnfd_file='sample-tosca-vnfd-param-artifacts-image.yaml',
- param_file='sample-tosca-vnf-artifacts-image-values.yaml',
- vnf_name='test_vnf_with_param_artifacts_image_tosca_templ')
-
- def _test_vnf_param_tosca_template(self, vnfd_file, vnfd_name,
- param_file, vnf_name):
- vnfd_instance = self._test_vnfd_create(vnfd_file, vnfd_name)
-
- # Get vnfd_id
- vnfd_id = vnfd_instance['vnfd']['id']
-
- # Add vnfd delete to cleanup job so that if vnf_instance fails to
- # create or update then it will be cleaned-up automatically
- # in tearDown()
- self.addCleanup(self.client.delete_vnfd, vnfd_id)
-
- # Create vnf instance
- values_str = read_file(param_file)
- values_dict = yaml.safe_load(values_str)
- vnf_instance, param_values_dict = self._test_vnf_create(
- vnfd_instance, vnf_name, values_dict)
- self.assertEqual(values_dict, param_values_dict)
-
- new_values_str = read_file('sample-tosca-vnf-update-values.yaml')
- new_values_dict = yaml.safe_load(new_values_str)
- vnf_param_values_dict, stack_parameters = self._test_vnf_update(
- vnf_instance, new_values_dict)
- for key, value in new_values_dict.items():
- if vnf_param_values_dict.get(key):
- self.assertEqual(value, vnf_param_values_dict[key])
- if stack_parameters.get(key):
- self.assertEqual(value, stack_parameters[key])
-
- self._test_vnf_delete(vnf_instance)
- vnf_id = vnf_instance['vnf']['id']
- self.verify_vnf_crud_events(
- vnf_id, evt_constants.RES_EVT_CREATE,
- evt_constants.PENDING_CREATE, cnt=2)
- self.verify_vnf_crud_events(
- vnf_id, evt_constants.RES_EVT_CREATE, evt_constants.ACTIVE)
- self.wait_until_vnf_delete(vnf_id,
- constants.VNF_CIRROS_DELETE_TIMEOUT)
- self.verify_vnf_crud_events(vnf_id, evt_constants.RES_EVT_DELETE,
- evt_constants.PENDING_DELETE, cnt=2)
diff --git a/tacker/extensions/nfvo_plugins/__init__.py b/tacker/tests/functional/sol/legacy_nfvo/__init__.py
similarity index 100%
rename from tacker/extensions/nfvo_plugins/__init__.py
rename to tacker/tests/functional/sol/legacy_nfvo/__init__.py
diff --git a/tacker/tests/functional/legacy/nfvo/test_vim.py b/tacker/tests/functional/sol/legacy_nfvo/test_vim.py
similarity index 89%
rename from tacker/tests/functional/legacy/nfvo/test_vim.py
rename to tacker/tests/functional/sol/legacy_nfvo/test_vim.py
index f112f1f29..ed7b0165f 100644
--- a/tacker/tests/functional/legacy/nfvo/test_vim.py
+++ b/tacker/tests/functional/sol/legacy_nfvo/test_vim.py
@@ -15,7 +15,6 @@
import time
import yaml
-from tacker.plugins.common import constants as evt_constants
from tacker.tests.functional import base
from tacker.tests.utils import read_file
@@ -42,7 +41,6 @@ class VimTestCreate(base.BaseTackerTest):
vim_obj = vim_res['vim']
vim_id = vim_obj['id']
self.verify_vim(vim_obj, data, name, description, version)
- self.verify_vim_events(vim_id, evt_constants.RES_EVT_CREATE)
# Read vim
vim_show_res = self.client.show_vim(vim_id)
@@ -52,7 +50,6 @@ class VimTestCreate(base.BaseTackerTest):
vim_update = self.client.update_vim(vim_id, update_vim_arg)
vim_obj = vim_update['vim']
self.verify_vim(vim_obj, data, new_name, new_desc, version)
- self.verify_vim_events(vim_id, evt_constants.RES_EVT_UPDATE)
# With the updated name above, create another VIM with the
# same name and check for Duplicate name exception.
@@ -77,7 +74,6 @@ class VimTestCreate(base.BaseTackerTest):
self.client.delete_vim(vim_id)
except Exception:
self.assertFalse(True, "Failed to delete vim %s" % vim_id)
- self.verify_vim_events(vim_id, evt_constants.RES_EVT_DELETE)
def verify_vim(self, vim_instance, config_data, name, description,
version):
@@ -97,19 +93,6 @@ class VimTestCreate(base.BaseTackerTest):
method_name = 'verify_vim_' + version
getattr(self, method_name)(vim_instance, config_data)
- def verify_vim_events(self, vim_id, evt_type, tstamp=None, cnt=1):
- params = {'resource_id': vim_id,
- 'resource_type': evt_constants.RES_TYPE_VIM,
- 'event_type': evt_type}
- if tstamp:
- params['timestamp'] = tstamp
-
- vim_evt_list = self.client.list_vim_events(**params)
-
- self.assertIsNotNone(vim_evt_list['vim_events'],
- "List of VIM events are Empty")
- self.assertEqual(cnt, len(vim_evt_list['vim_events']))
-
def verify_vim_v3(self, vim_instance, config_data):
self.assertEqual(config_data['project_name'],
vim_instance['auth_cred']['project_name'])
diff --git a/tacker/tests/unit/api/v1/test_resource_helper.py b/tacker/tests/unit/api/v1/test_resource_helper.py
index 320daee0f..e9098f1e5 100644
--- a/tacker/tests/unit/api/v1/test_resource_helper.py
+++ b/tacker/tests/unit/api/v1/test_resource_helper.py
@@ -32,16 +32,10 @@ class ResourceHelperTestCase(base.BaseTestCase):
'allow_post': False,
'allow_put': False,
}
- },
- 'vnffgs': {
- 'id': {
- 'allow_post': False,
- 'allow_put': False,
- }
- },
+ }
}
- expected_res = {'vnffgs': 'vnffg', 'vims': 'vim'}
+ expected_res = {'vims': 'vim'}
result = build_plural_mappings(special_mappings, resource_map)
self.assertEqual(expected_res, result)
@@ -53,7 +47,7 @@ class ResourceHelperTestCase(base.BaseTestCase):
'allow_post': False,
}
},
- 'vnffgs': {
+ 'vims': {
'id': {
'allow_post': False,
'allow_put': False,
@@ -61,21 +55,21 @@ class ResourceHelperTestCase(base.BaseTestCase):
},
}
- expected_res = {'vnffgs': 'vnffg', 'policies': 'policy'}
+ expected_res = {'vims': 'vim', 'policies': 'policy'}
result = build_plural_mappings(special_mappings, resource_map)
self.assertEqual(expected_res, result)
@mock.patch.object(manager.TackerManager, "get_service_plugins")
def test_build_resource_info(self, mock_get_service_plugins):
mock_get_service_plugins.return_value = {"DUMMY": ""}
- plural_mappings = {'test_vnffgs': 'test_vnffg', 'policies': 'policy'}
+ plural_mappings = {'test_vims': 'test_vim', 'policies': 'policy'}
resource_map = {
'policies': {
'id': {
'allow_post': False,
}
},
- 'test_vnffgs': {
+ 'test_vims': {
'id': {
'allow_post': False,
},
@@ -100,5 +94,5 @@ class ResourceHelperTestCase(base.BaseTestCase):
action_map=action_map,
translate_name=True)
for i in range(len(result)):
- a = result[i].collection in ('policies', 'test-vnffgs')
+ a = result[i].collection in ('policies', 'test-vims')
self.assertEqual(a, True)
diff --git a/tacker/tests/unit/db/utils.py b/tacker/tests/unit/db/utils.py
index 623a1e038..9a0e6ee98 100644
--- a/tacker/tests/unit/db/utils.py
+++ b/tacker/tests/unit/db/utils.py
@@ -14,14 +14,10 @@
# under the License.
import codecs
-from datetime import datetime
import os
import yaml
-DUMMY_NS_2_NAME = 'dummy_ns_2'
-
-
def _get_template(name):
filename = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
@@ -30,191 +26,38 @@ def _get_template(name):
return f.read()
-tosca_cvnf_vnfd = _get_template('test_tosca_cvnf.yaml')
-tosca_vnfd_openwrt = _get_template('test_tosca_openwrt.yaml')
-tosca_vnfd_openwrt_param = _get_template('test_tosca_openwrt_param.yaml')
-tosca_invalid_vnfd = _get_template('test_tosca_parser_failure.yaml')
etsi_vnfd = _get_template('etsi_nfv/tosca_vnfd.yaml')
etsi_vnfd_group = _get_template('etsi_nfv/tosca_vnfd_group_member.yaml')
-config_data = _get_template('config_data.yaml')
-update_config_data = _get_template('update_config_data.yaml')
-hot_data = _get_template('hot_data.yaml')
-param_data = _get_template('param_data.yaml')
-update_param_data = _get_template('update_param_data.yaml')
-update_invalid_param_data = _get_template('update_invalid_param_data.yaml')
-update_new_param_data = _get_template('update_new_param_data.yaml')
-vnffg_params = _get_template('vnffg_params.yaml')
-vnffg_multi_params = _get_template('vnffg_multi_params.yaml')
-vnffgd_template = yaml.safe_load(_get_template('vnffgd_template.yaml'))
-vnffgd_tosca_template = yaml.safe_load(_get_template(
- 'tosca_vnffgd_template.yaml'))
-vnffgd_tosca_no_classifier_template = yaml.safe_load(_get_template(
- 'tosca_vnffgd_no_classifier_template.yaml'))
-vnffgd_tosca_template_for_update = yaml.safe_load(_get_template(
- 'tosca_vnffgd_template_for_update.yaml'))
-vnffgd_legacy_template = yaml.safe_load(_get_template(
- 'tosca_vnffgd_legacy_template_for_update.yaml'))
-vnffgd_tosca_param_template = yaml.safe_load(_get_template(
- 'tosca_vnffgd_param_template.yaml'))
-vnffgd_tosca_str_param_template = yaml.safe_load(_get_template(
- 'tosca_vnffgd_str_param_template.yaml'))
-vnffgd_tosca_multi_param_template = yaml.safe_load(_get_template(
- 'tosca_vnffgd_multi_param_template.yaml'))
-vnffgd_invalid_tosca_template = yaml.safe_load(_get_template(
- 'tosca_invalid_vnffgd_template.yaml'))
-vnffgd_tosca_dupl_criteria_template = yaml.safe_load(_get_template(
- 'tosca_vnffgd_dupl_criteria_template.yaml'))
-vnfd_scale_tosca_template = _get_template('tosca_scale.yaml')
-vnfd_alarm_respawn_tosca_template = _get_template(
- 'test_tosca_vnfd_alarm_respawn.yaml')
-vnfd_alarm_scale_tosca_template = _get_template(
- 'test_tosca_vnfd_alarm_scale.yaml')
-vnfd_alarm_multi_actions_tosca_template = _get_template(
- 'test_tosca_vnfd_alarm_multi_actions.yaml')
-nsd_tosca_template = yaml.safe_load(_get_template('tosca_nsd_template.yaml'))
-vnffgd_wrong_cp_number_template = yaml.safe_load(_get_template(
- 'tosca_vnffgd_wrong_cp_number_template.yaml'))
-vnfd_instance_reservation_alarm_scale_tosca_template = _get_template(
- 'test_tosca-vnfd-instance-reservation.yaml')
-hot_grant = _get_template('hot_grant.yaml')
hot_scale_grant = _get_template('hot_scale_grant.yaml')
hot_scale_nest_grant = _get_template('hot_scale_nest_grant.yaml')
hot_scale_initial = _get_template('hot_scale_initial.yaml')
hot_scale_nest_initial = _get_template('hot_scale_nest_initial.yaml')
-def get_dummy_vnfd_obj():
- return {'vnfd': {'service_types': [{'service_type': 'vnfd'}],
- 'name': 'dummy_vnfd',
- 'tenant_id': 'ad7ebc56538745a08ef7c5e97f8bd437',
- 'attributes': {'vnfd': yaml.safe_load(
- tosca_vnfd_openwrt)},
- 'description': 'dummy_vnfd_description',
- 'template_source': 'onboarded',
- 'auth': {'tenantName': 'admin', 'passwordCredentials': {
- 'username': 'admin', 'password': 'devstack'}}}}
-
-
-def get_invalid_vnfd_obj():
- return {'vnfd': {'service_types': [{'service_type': 'vnfd'}],
- 'name': 'dummy_vnfd',
- 'tenant_id': 'ad7ebc56538745a08ef7c5e97f8bd437',
- 'attributes': {'vnfd': yaml.safe_load(
- tosca_invalid_vnfd)},
- 'description': 'dummy_vnfd_description',
- 'template_source': 'onboarded',
- 'auth': {'tenantName': 'admin', 'passwordCredentials': {
- 'username': 'admin', 'password': 'devstack'}}}}
-
-
-def get_dummy_vnfd_obj_inline():
- return {'vnfd': {'service_types': [{'service_type': 'vnfd'}],
- 'name': 'tmpl-koeak4tqgoqo8cr4-dummy_inline_vnf',
- 'tenant_id': 'ad7ebc56538745a08ef7c5e97f8bd437',
- 'attributes': {'vnfd': yaml.safe_load(
- tosca_vnfd_openwrt)},
- 'template_source': 'inline',
- 'auth': {'tenantName': 'admin', 'passwordCredentials': {
- 'username': 'admin', 'password': 'devstack'}}}}
-
-
-def get_dummy_inline_vnf_obj():
- return {'vnf': {'description': 'dummy_inline_vnf_description',
- 'vnfd_template': yaml.safe_load(tosca_vnfd_openwrt),
- 'vim_id': '6261579e-d6f3-49ad-8bc3-a9cb974778ff',
- 'tenant_id': 'ad7ebc56538745a08ef7c5e97f8bd437',
- 'name': 'dummy_inline_vnf',
- 'attributes': {},
- 'vnfd_id': None}}
-
-
-def get_dummy_inline_cvnf_obj():
- return {'vnf': {'description': 'dummy_inline_cvnf_description',
- 'vnfd_template': yaml.safe_load(tosca_cvnf_vnfd),
- 'vim_id': '6261579e-d6f3-49ad-8bc3-a9cb974778ff',
- 'tenant_id': 'ad7ebc56538745a08ef7c5e97f8bd437',
- 'name': 'dummy_cvnf',
- 'attributes': {},
- 'vnfd_id': None}}
-
-
-def get_dummy_vnf_obj():
- return {'vnf': {'description': 'dummy_vnf_description',
- 'vnfd_id': 'eb094833-995e-49f0-a047-dfb56aaf7c4e',
- 'vim_id': '6261579e-d6f3-49ad-8bc3-a9cb974778ff',
- 'tenant_id': 'ad7ebc56538745a08ef7c5e97f8bd437',
- 'name': 'dummy_vnf',
- 'deleted_at': datetime.min,
- 'attributes': {},
- 'vnfd_template': None}}
-
-
-def get_dummy_vnf_config_obj():
- return {'vnf': {'attributes': {'config': {'vdus': {'vdu1': {
- 'config': {'firewall': 'dummy_firewall_values'}}}}}}}
-
-
-def get_dummy_vnf_invalid_config_type_obj():
- return {'vnf': {'attributes': {'config': 'dummy_config'}}}
-
-
-def get_dummy_vnf_invalid_param_content():
- return {'vnf': {'attributes': {'param_values': {}}}}
-
-
-def get_dummy_vnf_param_obj():
- return {'vnf': {'attributes': {'param_values':
- {'flavor': 'm1.tiny',
- 'reservation_id': '99999999-3925-4c9e-9074-239a902b68d7'}}}}
-
-
-def get_dummy_vnf_invalid_param_type_obj():
- return {'vnf': {'attributes': {'param_values': 'dummy_param'}}}
-
-
def get_dummy_vnf(status='PENDING_CREATE', scaling_group=False,
instance_id=None):
+ def_dir = os.path.join(
+ os.path.dirname(os.path.abspath(__file__)),
+ "../vnfm/infra_drivers/openstack/data/etsi_nfv")
+ vnfd = yaml.safe_load(etsi_vnfd)
+ vnfd['imports'] = [
+ f'{def_dir}/etsi_nfv_sol001_common_types.yaml',
+ f'{def_dir}/etsi_nfv_sol001_vnfd_types.yaml']
dummy_vnf = {'status': status, 'instance_id': instance_id, 'name':
- 'test_openwrt', 'tenant_id': 'ad7ebc56538745a08ef7c5e97f8bd437',
+ 'test_vnf', 'tenant_id': 'ad7ebc56538745a08ef7c5e97f8bd437',
'vnfd_id': 'eb094833-995e-49f0-a047-dfb56aaf7c4e',
'vnfd': {
'service_types': [{'service_type': 'vnfd',
'id': '4a4c2d44-8a52-4895-9a75-9d1c76c3e738'}],
- 'description': 'OpenWRT with services',
+ 'description': 'Dummy VNF',
'tenant_id': 'ad7ebc56538745a08ef7c5e97f8bd437',
- 'mgmt_driver': 'openwrt',
- 'attributes': {'vnfd': tosca_vnfd_openwrt},
+ 'attributes': {'vnfd': str(vnfd)},
'id': 'fb048660-dc1b-4f0f-bd89-b023666650ec',
- 'name': 'openwrt_services'},
+ 'name': 'dummy_vnf'},
'mgmt_ip_address': None, 'service_context': [],
'attributes': {'param_values': ''},
'id': 'eb84260e-5ff7-4332-b032-50a14d6c1123',
- 'description': 'OpenWRT with services'}
- if scaling_group:
- dummy_vnf['attributes'].update({'scaling_group_names':
- '{"SP1": "SP1_group"}',
- 'heat_template': 'test'})
- return dummy_vnf
-
-
-def get_dummy_vnf_test(status='PENDING_CREATE', scaling_group=False,
- instance_id=None):
- dummy_vnf = {'status': status, 'instance_id': instance_id, 'name':
- 'test_openwrt', 'tenant_id': 'ad7ebc56538745a08ef7c5e97f8bd437',
- 'vnfd_id': 'eb094833-995e-49f0-a047-dfb56aaf7c4e',
- 'vnfd': {
- 'service_types': [{'service_type': 'vnfd',
- 'id': '4a4c2d44-8a52-4895-9a75-9d1c76c3e738'}],
- 'description': 'OpenWRT with services',
- 'tenant_id': 'ad7ebc56538745a08ef7c5e97f8bd437',
- 'mgmt_driver': 'openwrt',
- 'attributes': {'vnfd_simple': tosca_vnfd_openwrt},
- 'id': 'fb048660-dc1b-4f0f-bd89-b023666650ec',
- 'name': 'openwrt_services'},
- 'mgmt_ip_address': None, 'service_context': [],
- 'attributes': {'param_values': ''},
- 'id': 'eb84260e-5ff7-4332-b032-50a14d6c1123',
- 'description': 'OpenWRT with services'}
+ 'description': 'Dummy VNF'}
if scaling_group:
dummy_vnf['attributes'].update({'scaling_group_names':
'{"SP1": "SP1_group"}',
@@ -226,20 +69,19 @@ def get_dummy_vnf_etsi(status='PENDING_CREATE', scaling_group=False,
instance_id=None, flavour='Simple', vnfd_name=None):
vnfd_key = 'vnfd_' + flavour
dummy_vnf = {'status': status, 'instance_id': instance_id, 'name':
- 'test_openwrt', 'tenant_id': 'ad7ebc56538745a08ef7c5e97f8bd437',
+ 'test_vnf_etsi', 'tenant_id': 'ad7ebc56538745a08ef7c5e97f8bd437',
'vnfd_id': 'eb094833-995e-49f0-a047-dfb56aaf7c4e',
'vnfd': {
'service_types': [{'service_type': 'vnfd',
'id': '4a4c2d44-8a52-4895-9a75-9d1c76c3e738'}],
- 'description': 'OpenWRT with services',
+ 'description': 'Dummy VNF etsi',
'tenant_id': 'ad7ebc56538745a08ef7c5e97f8bd437',
- 'mgmt_driver': 'openwrt',
'id': 'fb048660-dc1b-4f0f-bd89-b023666650ec',
- 'name': 'openwrt_services'},
+ 'name': 'dummy_vnf_etsi'},
'mgmt_ip_address': None, 'service_context': [],
'attributes': {'param_values': ''},
'id': 'eb84260e-5ff7-4332-b032-50a14d6c1123',
- 'description': 'OpenWRT with services'}
+ 'description': 'Dummy VNF etsi'}
if not vnfd_name:
# Set vnfd including without "tosca.groups.nfv.PlacementGroup"
dummy_vnf['vnfd']['attributes'] = {vnfd_key: etsi_vnfd}
@@ -253,63 +95,6 @@ def get_dummy_vnf_etsi(status='PENDING_CREATE', scaling_group=False,
return dummy_vnf
-def get_dummy_vnf_config_attr():
- return {'status': 'PENDING_CREATE', 'instance_id': None, 'name':
- 'test_openwrt', 'tenant_id': 'ad7ebc56538745a08ef7c5e97f8bd437',
- 'vnfd_id': 'eb094833-995e-49f0-a047-dfb56aaf7c4e',
- 'vnfd': {'service_types': [{'service_type': 'vnfd',
- 'id': '4a4c2d44-8a52-4895-9a75-9d1c76c3e738'}],
- 'description': 'OpenWRT with services',
- 'tenant_id': 'ad7ebc56538745a08ef7c5e97f8bd437',
- 'mgmt_driver': 'openwrt',
- 'attributes': {'vnfd': tosca_vnfd_openwrt},
- 'id': 'fb048660-dc1b-4f0f-bd89-b023666650ec', 'name':
- 'openwrt_services'}, 'mgmt_ip_address': None,
- 'service_context': [],
- 'attributes': {'config': config_data},
- 'id': 'eb84260e-5ff7-4332-b032-50a14d6c1123',
- 'description': 'OpenWRT with services'}
-
-
-def get_dummy_vnf_param_attr():
- return {'status': 'PENDING_CREATE', 'instance_id': None, 'name':
- 'test_openwrt', 'tenant_id': 'ad7ebc56538745a08ef7c5e97f8bd437',
- 'vnfd_id': 'eb094833-995e-49f0-a047-dfb56aaf7c4e',
- 'vnfd': {'service_types': [{'service_type': 'vnfd',
- 'id': '4a4c2d44-8a52-4895-9a75-9d1c76c3e738'}],
- 'description': 'OpenWRT with services',
- 'tenant_id': 'ad7ebc56538745a08ef7c5e97f8bd437',
- 'mgmt_driver': 'openwrt',
- 'attributes': {'vnfd': tosca_vnfd_openwrt_param},
- 'id': 'fb048660-dc1b-4f0f-bd89-b023666650ec',
- 'name': 'openwrt_services'},
- 'mgmt_url': None, 'service_context': [],
- 'attributes': {'heat_template': hot_data,
- 'param_values': param_data},
- 'id': 'eb84260e-5ff7-4332-b032-50a14d6c1123',
- 'description': 'OpenWRT with services'}
-
-
-def get_dummy_vnf_update_config():
- return {'vnf': {'attributes': {'config': update_config_data}}}
-
-
-def get_dummy_vnf_update_param():
- return {'vnf': {'attributes': {'param_values': update_param_data}}}
-
-
-def get_dummy_vnf_update_new_param():
- return {'vnf': {'attributes': {'param_values': update_new_param_data}}}
-
-
-def get_dummy_vnf_update_invalid_param():
- return {'vnf': {'attributes': {'param_values': update_invalid_param_data}}}
-
-
-def get_dummy_vnf_update_empty_param():
- return {'vnf': {'attributes': {'param_values': {}}}}
-
-
def get_vim_obj():
return {'vim': {'type': 'openstack',
'auth_url': 'http://localhost/identity',
@@ -332,226 +117,6 @@ def get_vim_auth_obj():
'project_domain_name': 'Default'}
-def get_dummy_vnffgd_obj():
- return {'vnffgd': {'name': 'dummy_vnffgd',
- 'tenant_id': 'ad7ebc56538745a08ef7c5e97f8bd437',
- 'template': {'vnffgd': vnffgd_tosca_template},
- 'description': 'dummy_vnffgd_description',
- 'template_source': 'onboarded'}}
-
-
-def get_dummy_vnffgd_obj_inline():
- return {'vnffgd': {'name': 'dummy_vnffgd_inline',
- 'tenant_id': 'ad7ebc56538745a08ef7c5e97f8bd437',
- 'template': {'vnffgd': vnffgd_tosca_template},
- 'description': 'dummy_vnffgd_description_inline',
- 'template_source': 'inline'}}
-
-
-def get_dummy_vnffgd_obj_no_description():
- return {'vnffgd': {'name': 'dummy_vnffgd',
- 'tenant_id': 'ad7ebc56538745a08ef7c5e97f8bd437',
- 'template': {'vnffgd': vnffgd_tosca_template},
- 'template_source': 'onboarded'}}
-
-
-def get_dummy_vnffgd_obj_no_name():
- return {'vnffgd': {'tenant_id': 'ad7ebc56538745a08ef7c5e97f8bd437',
- 'template': {'vnffgd': vnffgd_tosca_template},
- 'description': 'dummy_vnffgd_description',
- 'template_source': 'onboarded'}}
-
-
-def get_dummy_vnffg_obj():
- return {'vnffg': {'description': 'dummy_vnffg_description',
- 'vnffgd_id': 'eb094833-995e-49f0-a047-dfb56aaf7c4e',
- 'tenant_id': 'ad7ebc56538745a08ef7c5e97f8bd437',
- 'name': 'dummy_vnffg',
- 'attributes': {'template': vnffgd_tosca_template},
- 'vnf_mapping': {},
- 'symmetrical': False}}
-
-
-def get_dummy_vnffg_no_classifier_obj():
- return {'vnffg': {'description': 'dummy_vnffg_no_classifier_description',
- 'vnffgd_id': 'eb094833-995e-49f0-a047-dfb56aaf7c4e',
- 'tenant_id': 'ad7ebc56538745a08ef7c5e97f8bd437',
- 'name': 'dummy_vnffg',
- 'attributes': {
- 'template': vnffgd_tosca_no_classifier_template},
- 'vnf_mapping': {},
- 'symmetrical': False}}
-
-
-def get_dummy_vnffg_obj_inline():
- return {'vnffg': {'description': 'dummy_vnffg_description_inline',
- 'tenant_id': 'ad7ebc56538745a08ef7c5e97f8bd437',
- 'name': 'dummy_vnffg_inline',
- 'attributes': {'template': vnffgd_tosca_template},
- 'vnf_mapping': {},
- 'symmetrical': False,
- 'vnffgd_template': vnffgd_tosca_template}}
-
-
-def get_dummy_vnffg_obj_update_vnffgd_template():
- return {'vnffg': {'tenant_id': 'ad7ebc56538745a08ef7c5e97f8bd437',
- 'name': 'dummy_vnffg',
- 'symmetrical': False,
- 'vnffgd_template': vnffgd_tosca_template_for_update}}
-
-
-def get_dummy_vnffg_obj_legacy_vnffgd_template():
- return {'vnffg': {'tenant_id': 'ad7ebc56538745a08ef7c5e97f8bd437',
- 'name': 'dummy_vnffg',
- 'symmetrical': False,
- 'vnffgd_template': vnffgd_legacy_template}}
-
-
-def get_dummy_vnffg_param_obj():
- return {'vnffg': {'description': 'dummy_vnf_description',
- 'vnffgd_id': 'eb094833-995e-49f0-a047-dfb56aaf7c4e',
- 'tenant_id': 'ad7ebc56538745a08ef7c5e97f8bd437',
- 'name': 'dummy_vnffg',
- 'attributes': {
- 'template': vnffgd_tosca_param_template,
- 'param_values':
- yaml.safe_load(vnffg_params)
- },
- 'vnf_mapping': {},
- 'symmetrical': False}}
-
-
-def get_dummy_vnffg_str_param_obj():
- return {'vnffg': {'description': 'dummy_vnf_description',
- 'vnffgd_id': 'eb094833-995e-49f0-a047-dfb56aaf7c4e',
- 'tenant_id': 'ad7ebc56538745a08ef7c5e97f8bd437',
- 'name': 'dummy_vnffg',
- 'attributes': {
- 'template': vnffgd_tosca_param_template,
- 'param_values': 'value not dict format'},
- 'vnf_mapping': {},
- 'symmetrical': False}}
-
-
-def get_dummy_vnffg_multi_param_obj():
- return {'vnffg': {'description': 'dummy_vnf_description',
- 'vnffgd_id': 'eb094833-995e-49f0-a047-dfb56aaf7c4e',
- 'tenant_id': 'ad7ebc56538745a08ef7c5e97f8bd437',
- 'name': 'dummy_vnffg',
- 'attributes': {
- 'template': vnffgd_tosca_multi_param_template,
- 'param_values':
- yaml.safe_load(vnffg_multi_params)
- },
- 'vnf_mapping': {},
- 'symmetrical': False}}
-
-
-def get_dummy_vnffg_obj_vnf_mapping():
- return {'vnffg': {'description': 'dummy_vnf_description',
- 'vnffgd_id': 'eb094833-995e-49f0-a047-dfb56aaf7c4e',
- 'tenant_id': 'ad7ebc56538745a08ef7c5e97f8bd437',
- 'name': 'dummy_vnffg',
- 'attributes': {'template': vnffgd_tosca_template},
- 'vnf_mapping': {
- 'VNF1': '91e32c20-6d1f-47a4-9ba7-08f5e5effe07',
- 'VNF3': '7168062e-9fa1-4203-8cb7-f5c99ff3ee1b'
- },
- 'symmetrical': False}}
-
-
-def get_dummy_vnffg_obj_dupl_criteria():
- return {'vnffg': {'description': 'dummy_vnffg_description',
- 'vnffgd_id': 'eb094833-995e-49f0-a047-dfb56aaf7c4e',
- 'tenant_id': 'ad7ebc56538745a08ef7c5e97f8bd437',
- 'name': 'dummy_vnffg',
- 'attributes': {'template':
- vnffgd_tosca_dupl_criteria_template},
- 'vnf_mapping': {},
- 'symmetrical': False}}
-
-
-def get_dummy_nsd_obj():
- return {'nsd': {'description': 'dummy_nsd_description',
- 'name': 'dummy_NSD',
- 'tenant_id': '8819a1542a5948b68f94d4be0fd50496',
- 'attributes': {'nsd': nsd_tosca_template},
- 'template_source': 'onboarded'}}
-
-
-def get_dummy_nsd_obj_inline():
- return {'nsd': {'description': 'dummy_nsd_description_inline',
- 'name': 'dummy_NSD_inline',
- 'tenant_id': '8819a1542a5948b68f94d4be0fd50496',
- 'attributes': {'nsd': nsd_tosca_template},
- 'template_source': 'inline'}}
-
-
-def get_dummy_ns_obj():
- return {'ns': {'description': 'dummy_ns_description',
- 'id': 'ba6bf017-f6f7-45f1-a280-57b073bf78ea',
- 'nsd_id': 'eb094833-995e-49f0-a047-dfb56aaf7c4e',
- 'vim_id': '6261579e-d6f3-49ad-8bc3-a9cb974778ff',
- 'tenant_id': 'ad7ebc56538745a08ef7c5e97f8bd437',
- 'name': 'dummy_ns',
- 'attributes': {
- 'param_values': {'nsd': {'vl1_name': 'net_mgmt',
- 'vl2_name': 'net0'}}}}}
-
-
-def get_dummy_ns_obj_inline():
- return {'ns': {'description': 'dummy_ns_description_inline',
- 'id': 'ff35e3f0-0a11-4071-bce6-279fdf1c8bf9',
- 'vim_id': '6261579e-d6f3-49ad-8bc3-a9cb974778ff',
- 'tenant_id': 'ad7ebc56538745a08ef7c5e97f8bd437',
- 'name': 'dummy_ns_inline',
- 'attributes': {
- 'param_values': {'nsd': {'vl1_name': 'net_mgmt',
- 'vl2_name': 'net0'}}},
- 'nsd_template': nsd_tosca_template}}
-
-
-def get_dummy_ns_obj_2():
- return {'ns': {'description': 'dummy_ns_description',
- 'id': 'ba6bf017-f6f7-45f1-a280-57b073bf78ea',
- 'nsd_id': 'eb094833-995e-49f0-a047-dfb56aaf7c4e',
- 'vim_id': '6261579e-d6f3-49ad-8bc3-a9cb974778ff',
- 'tenant_id': 'ad7ebc56538745a08ef7c5e97f8bd437',
- 'name': DUMMY_NS_2_NAME,
- 'attributes': {
- 'param_values': {'nsd': {'vl1_name': 'net_mgmt',
- 'vl2_name': 'net0'}}}}}
-
-
-def get_dummy_vnf_instance():
- connection_info = get_dummy_vim_connection_info()
- return {'created_at': '', 'deleted': False, 'deleted_at': None,
- 'id': 'fake_id', 'instantiated_vnf_info': None,
- 'instantiation_state': 'NOT_INSTANTIATED',
- 'tenant_id': 'fake_tenant_id', 'updated_at': '',
- 'vim_connection_info': [connection_info],
- 'vnf_instance_description': 'VNF Description',
- 'vnf_instance_name': 'test', 'vnf_product_name': 'Sample VNF',
- 'vnf_provider': 'Company', 'vnf_software_version': '1.0',
- 'vnfd_id': 'fake_vnfd_id', 'vnfd_version': '1.0'}
-
-
-def get_dummy_vim_connection_info():
- return {'access_info': {
- 'auth_url': 'fake/url',
- 'cert_verify': 'False', 'password': 'admin',
- 'project_domain_name': 'Default',
- 'project_id': None, 'project_name': 'admin',
- 'user_domain_name': 'Default', 'username': 'admin'},
- 'created_at': '', 'deleted': False, 'deleted_at': '',
- 'id': 'fake_id', 'updated_at': '',
- 'vim_id': 'fake_vim_id', 'vim_type': 'openstack'}
-
-
-def get_dummy_grant_hot():
- return str(hot_grant)
-
-
def get_dummy_scale_grant_hot():
return str(hot_scale_grant)
diff --git a/tacker/tests/unit/hacking/__init__.py b/tacker/tests/unit/hacking/__init__.py
deleted file mode 100644
index e69de29bb..000000000
diff --git a/tacker/tests/unit/mistral/__init__.py b/tacker/tests/unit/mistral/__init__.py
deleted file mode 100644
index e69de29bb..000000000
diff --git a/tacker/tests/unit/nfvo/drivers/vim/test_openstack_driver.py b/tacker/tests/unit/nfvo/drivers/vim/test_openstack_driver.py
index 0327ec46f..72e6f660a 100644
--- a/tacker/tests/unit/nfvo/drivers/vim/test_openstack_driver.py
+++ b/tacker/tests/unit/nfvo/drivers/vim/test_openstack_driver.py
@@ -91,10 +91,6 @@ class FakeKeystone(mock.Mock):
pass
-class FakeNeutronClient(mock.Mock):
- pass
-
-
class FakeKeymgrAPI(mock.Mock):
pass
@@ -260,50 +256,6 @@ class TestOpenstack_Driver(base.TestCase):
self.keystone.initialize_client.assert_called_once_with(
**self.auth_obj)
- def test_get_vim_resource_id(self):
- resource_type = 'network'
- resource_name = 'net0'
- fake_networks = {'networks': [{'id': 'fake-uuid', 'name': 'net0'}]}
- fake_neutron_client = FakeNeutronClient()
- fake_neutron_client.list_networks.return_value = fake_networks
- self.openstack_driver._get_client = mock.Mock(
- return_value=fake_neutron_client)
-
- self.openstack_driver.get_vim_resource_id(
- self.vim_obj, resource_type, resource_name)
-
- self.openstack_driver._get_client.assert_called_once_with(
- self.vim_obj, mock.ANY)
- fake_neutron_client.list_networks.assert_called_once_with(
- **{'name': 'net0'})
-
- def test_get_vim_resource_id_name_not_unique(self):
- resource_type = 'network'
- resource_name = 'net0'
- fake_networks = {'networks': [{'id': 'fake-uuid-1', 'name': 'net0'},
- {'id': 'fake-uuid-2', 'name': 'net0'}]}
- fake_neutron_client = FakeNeutronClient()
- fake_neutron_client.list_networks.return_value = fake_networks
- self.openstack_driver._get_client = mock.Mock(
- return_value=fake_neutron_client)
-
- self.assertRaises(nfvo.VimGetResourceNameNotUnique,
- self.openstack_driver.get_vim_resource_id,
- self.vim_obj, resource_type, resource_name)
-
- def test_get_vim_resource_id_name_not_exist(self):
- resource_type = 'network'
- resource_name = 'net0'
- fake_networks = {'networks': []}
- fake_neutron_client = FakeNeutronClient()
- fake_neutron_client.list_networks.return_value = fake_networks
- self.openstack_driver._get_client = mock.Mock(
- return_value=fake_neutron_client)
-
- self.assertRaises(nfvo.VimGetResourceNotFoundException,
- self.openstack_driver.get_vim_resource_id,
- self.vim_obj, resource_type, resource_name)
-
@mock.patch('oslo_config.cfg.ConfigOpts.__getattr__')
def test_deregister_vim_barbican_ext_oauth2_auth(self, mock_get_conf_key):
mock_get_conf_key.side_effect = get_mock_conf_key_effect()
diff --git a/tacker/tests/unit/nfvo/drivers/vnffg/__init__.py b/tacker/tests/unit/nfvo/drivers/vnffg/__init__.py
deleted file mode 100644
index e69de29bb..000000000
diff --git a/tacker/tests/unit/nfvo/drivers/vnffg/sfc_drivers/__init__.py b/tacker/tests/unit/nfvo/drivers/vnffg/sfc_drivers/__init__.py
deleted file mode 100644
index e69de29bb..000000000
diff --git a/tacker/tests/unit/nfvo/drivers/vnffg/sfc_drivers/networking-sfc/__init__.py b/tacker/tests/unit/nfvo/drivers/vnffg/sfc_drivers/networking-sfc/__init__.py
deleted file mode 100644
index e69de29bb..000000000
diff --git a/tacker/tests/unit/nfvo/drivers/vnffg/sfc_drivers/networking-sfc/test_n_sfc.py b/tacker/tests/unit/nfvo/drivers/vnffg/sfc_drivers/networking-sfc/test_n_sfc.py
deleted file mode 100644
index 6f652b950..000000000
--- a/tacker/tests/unit/nfvo/drivers/vnffg/sfc_drivers/networking-sfc/test_n_sfc.py
+++ /dev/null
@@ -1,285 +0,0 @@
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from unittest import mock
-
-from oslo_utils import uuidutils
-
-from tacker import context
-from tacker.nfvo.drivers.vim import openstack_driver
-from tacker.tests.unit import base
-from tacker.tests.unit.db import utils
-
-
-class FakeNeutronClient(mock.Mock):
- def __init__(self):
- super(FakeNeutronClient, self).__init__()
- self.__fc_dict = {}
- self.__pp_dict = {}
- self.__ppg_dict = {}
- self.__chain_dict = {}
-
- def flow_classifier_create(self, fc_create_dict):
- fc_id = uuidutils.generate_uuid()
- self.__fc_dict[fc_id] = fc_create_dict
- return fc_id
-
- def show_flow_classifier(self, fc_dict):
- fc_name = fc_dict['name']
- for fc_id in self.__fc_dict:
- fc = self.__fc_dict[fc_id]
- if fc_name == fc['name']:
- return {'id': fc_id}
-
- return None
-
- def flow_classifier_update(self, fc_id, fc_update_dict):
- if fc_id not in self.__fc_dict:
- return None
- self.__fc_dict[fc_id] = fc_update_dict
- return fc_update_dict
-
- def flow_classifier_delete(self, fc_id):
- if fc_id not in self.__fc_dict:
- raise ValueError('fc not found')
- self.__fc_dict.pop(fc_id)
-
- def port_pair_create(self, port_pair):
- pp_id = uuidutils.generate_uuid()
- self.__pp_dict[pp_id] = port_pair
- return pp_id
-
- def port_pair_list(self):
- pp = {'port_pairs': [{'ingress': 'abc',
- 'egress': 'xyz'}]}
- return pp
-
- def show_port_pair(self, port_pair_dict):
- input_pp_name = port_pair_dict['name']
- for pp_id in self.__pp_dict:
- port_pair = self.__pp_dict[pp_id]
- if port_pair['name'] == input_pp_name:
- return {'id': pp_id}
-
- return None
-
- def port_pair_group_create(self, port_pair_group):
- ppg_id = uuidutils.generate_uuid()
- self.__ppg_dict[ppg_id] = port_pair_group
- return ppg_id
-
- def port_pair_group_list(self):
- value = {'port_pair_groups': []}
- return value
-
- def show_port_pair_group(self, port_pair_group_dict):
- input_ppg_name = port_pair_group_dict['name']
- for ppg_id in self.__ppg_dict:
- port_pair_group = self.__ppg_dict[ppg_id]
- if port_pair_group['name'] == input_ppg_name:
- return {'id': ppg_id}
-
- return None
-
- def port_chain_create(self, port_chain):
- chain_id = uuidutils.generate_uuid()
- self.__chain_dict[chain_id] = port_chain
- return chain_id
-
- def show_port_chain(self, port_chain_dict):
- input_chain_name = port_chain_dict['name']
- for chain_id in self.__chain_dict:
- port_chain = self.__chain_dict[chain_id]
- if port_chain['name'] == input_chain_name:
- return {'id': chain_id}
- return None
-
- def port_chain_delete(self, chain_id):
- if chain_id not in self.__chain_dict:
- raise ValueError('port chain delete failed')
- self.__chain_dict.pop(chain_id)
-
- def port_chain_list(self):
- pc = {'port_chains': [{'port_pair_groups': ['random_id1',
- 'random_id2']}]}
- return pc
-
-
-class TestChainSFC(base.TestCase):
-
- def setUp(self):
- super(TestChainSFC, self).setUp()
- self.context = context.get_admin_context()
- self.sfc_driver = openstack_driver.OpenStack_Driver()
- self._mock_neutron_client()
- self.addCleanup(mock.patch.stopall)
-
- def _mock_neutron_client(self):
- self.neutron_client = mock.Mock(wraps=FakeNeutronClient())
- fake_neutron_client = mock.Mock()
- fake_neutron_client.return_value = self.neutron_client
- self._mock(
- 'tacker.nfvo.drivers.vim.openstack_driver.'
- 'NeutronClient',
- fake_neutron_client)
-
- def _mock(self, target, new=mock.DEFAULT):
- patcher = mock.patch(target, new)
- return patcher.start()
-
- def test_create_flow_classifier(self):
- flow_classifier = {'name': 'fake_fc',
- 'source_port_range': '2005-2010',
- 'ip_proto': 6,
- 'destination_port_range': '80-180'}
- result = self.sfc_driver.\
- create_flow_classifier(name='fake_ffg', fc=flow_classifier,
- auth_attr=utils.get_vim_auth_obj())
- self.assertIsNotNone(result)
-
- def test_update_flow_classifier(self):
- auth_attr = utils.get_vim_auth_obj()
- flow_classifier = {'name': 'next_fake_fc',
- 'source_port_range': '2005-2010',
- 'ip_proto': 6,
- 'destination_port_range': '80-180'}
-
- fc_id = self.sfc_driver.\
- create_flow_classifier(name='fake_ffg', fc=flow_classifier,
- auth_attr=utils.get_vim_auth_obj())
-
- flow_classifier_update = {'name': 'next_fake_fc_two',
- 'instance_id': None,
- 'status': 'PENDING_CREATE',
- 'match': {'source_port_range': '5-10',
- 'ip_proto': 17,
- 'destination_port_range': '2-4'}}
-
- fc_id = self.sfc_driver.\
- create_flow_classifier(name='fake_ffg', fc=flow_classifier,
- auth_attr=utils.get_vim_auth_obj())
-
- self.assertIsNotNone(fc_id)
-
- vnf_1 = {'name': 'test_create_chain_vnf_1',
- 'connection_points': [uuidutils.generate_uuid(),
- uuidutils.generate_uuid()]}
- vnf_2 = {'name': 'test_create_chain_vnf_2',
- 'connection_points': [uuidutils.generate_uuid(),
- uuidutils.generate_uuid()]}
- vnf_3 = {'name': 'test_create_chain_vnf_3',
- 'connection_points': [uuidutils.generate_uuid(),
- uuidutils.generate_uuid()]}
- vnfs = [vnf_1, vnf_2, vnf_3]
-
- chain_id = self.sfc_driver.create_chain(name='fake_ffg',
- path_id=None,
- fc_ids=fc_id,
- vnfs=vnfs,
- auth_attr=auth_attr)
- self.assertIsNotNone(chain_id)
-
- result = self.sfc_driver.\
- update_flow_classifier(chain_id=chain_id,
- fc=flow_classifier_update,
- auth_attr=utils.get_vim_auth_obj())
- self.assertIsNotNone(result)
-
- def test_delete_flow_classifier(self):
- flow_classifier = {'name': 'another_fake_fc',
- 'description': 'another flow-classifier',
- 'source_port_range': '1999-2005',
- 'ip_proto': 6,
- 'destination_port_range': '80-100'}
- fc_id = self.sfc_driver.\
- create_flow_classifier(name='fake_ffg', fc=flow_classifier,
- auth_attr=utils.get_vim_auth_obj())
-
- self.assertIsNotNone(fc_id)
-
- try:
- self.sfc_driver.\
- delete_flow_classifier(fc_id=fc_id,
- auth_attr=utils.get_vim_auth_obj())
- except Exception:
- self.assertTrue(True)
-
- def test_create_chain(self):
- auth_attr = utils.get_vim_auth_obj()
- flow_classifier = {'name': 'test_create_chain_fc',
- 'description': 'fc for testing create chain',
- 'source_port_range': '1997-2008',
- 'ip_proto': 6,
- 'destination_port_range': '80-100'}
- fc_id = self.sfc_driver.\
- create_flow_classifier(name='fake_ffg', fc=flow_classifier,
- auth_attr=auth_attr)
-
- self.assertIsNotNone(fc_id)
-
- vnf_1 = {'name': 'test_create_chain_vnf_1',
- 'connection_points': [uuidutils.generate_uuid(),
- uuidutils.generate_uuid()]}
- vnf_2 = {'name': 'test_create_chain_vnf_2',
- 'connection_points': [uuidutils.generate_uuid(),
- uuidutils.generate_uuid()]}
- vnf_3 = {'name': 'test_create_chain_vnf_3',
- 'connection_points': [uuidutils.generate_uuid(),
- uuidutils.generate_uuid()]}
- vnfs = [vnf_1, vnf_2, vnf_3]
-
- result = self.sfc_driver.create_chain(name='fake_ffg',
- path_id=None,
- fc_ids=fc_id,
- vnfs=vnfs,
- auth_attr=auth_attr)
-
- self.assertIsNotNone(result)
-
- def test_delete_chain(self):
- auth_attr = utils.get_vim_auth_obj()
- flow_classifier = {'name': 'test_delete_chain_fc',
- 'description': 'fc for testing delete chain',
- 'source_port_range': '1000-2000',
- 'ip_proto': 6,
- 'destination_port_range': '80-180'}
- fc_id = self.sfc_driver.\
- create_flow_classifier(name='fake_ffg', fc=flow_classifier,
- auth_attr=auth_attr)
-
- self.assertIsNotNone(fc_id)
-
- vnf_1 = {'name': 'test_delete_chain_vnf_1',
- 'connection_points': [uuidutils.generate_uuid(),
- uuidutils.generate_uuid()]}
- vnf_2 = {'name': 'test_delete_chain_vnf_2',
- 'connection_points': [uuidutils.generate_uuid(),
- uuidutils.generate_uuid()]}
- vnf_3 = {'name': 'test_delete_chain_vnf_3',
- 'connection_points': [uuidutils.generate_uuid(),
- uuidutils.generate_uuid()]}
- vnfs = [vnf_1, vnf_2, vnf_3]
-
- chain_id = self.sfc_driver.create_chain(name='fake_ffg',
- path_id=None,
- fc_ids=fc_id,
- vnfs=vnfs,
- auth_attr=auth_attr)
-
- self.assertIsNotNone(chain_id)
-
- try:
- self.sfc_driver.delete_chain(chain_id,
- auth_attr=auth_attr)
- except Exception:
- self.assertTrue(True)
diff --git a/tacker/tests/unit/nfvo/test_nfvo_plugin.py b/tacker/tests/unit/nfvo/test_nfvo_plugin.py
index 1969dc8f0..4b5926ee1 100644
--- a/tacker/tests/unit/nfvo/test_nfvo_plugin.py
+++ b/tacker/tests/unit/nfvo/test_nfvo_plugin.py
@@ -13,34 +13,21 @@
# License for the specific language governing permissions and limitations
# under the License.
-import codecs
from datetime import datetime
from unittest import mock
-from unittest.mock import patch
from oslo_config import cfg
from oslo_utils import uuidutils
from requests_mock.contrib import fixture as rm_fixture
-from tacker.common import exceptions
from tacker import context
-from tacker.db.common_services import common_services_db_plugin
from tacker.db.nfvo import nfvo_db
-from tacker.db.nfvo import ns_db
-from tacker.db.nfvo import vnffg_db
-from tacker.extensions import nfvo
from tacker.keymgr import API as KEYMGR_API
-from tacker.manager import TackerManager
from tacker.nfvo import nfvo_plugin
-from tacker.plugins.common import constants
-from tacker.tests import constants as test_constants
from tacker.tests.unit.db import base as db_base
from tacker.tests.unit.db import utils
-from tacker.tests import utils as test_utils
-from tacker.vnfm import vim_client
SECRET_PASSWORD = '***'
-DUMMY_NS_2 = 'ba6bf017-f6f7-45f1-a280-57b073bf78ef'
def get_mock_conf_key_effect():
@@ -97,12 +84,6 @@ def dummy_get_vim(*args, **kwargs):
return vim_obj
-def _get_template(name):
- filename = test_utils.test_etc_sample(name)
- with codecs.open(filename, encoding='utf-8', errors='strict') as f:
- return f.read()
-
-
class FakeDriverManager(mock.Mock):
def invoke(self, *args, **kwargs):
if any(x in ['create', 'create_flow_classifier'] for
@@ -127,128 +108,6 @@ def dummy_get_vim_auth(*args, **kwargs):
'vim_type': 'openstack', 'vim_name': 'VIM0'}
-class FakeClient(mock.Mock):
- def __init__(self, auth):
- pass
-
-
-class FakeVNFMPlugin(mock.Mock):
-
- def __init__(self):
- super(FakeVNFMPlugin, self).__init__()
- self.vnf1_vnfd_id = 'eb094833-995e-49f0-a047-dfb56aaf7c4e'
- self.vnf1_vnf_id = '91e32c20-6d1f-47a4-9ba7-08f5e5effe07'
- self.vnf1_update_vnf_id = '91e32c20-6d1f-47a4-9ba7-08f5e5effaf6'
- self.vnf3_vnfd_id = 'e4015e9f-1ef2-49fb-adb6-070791ad3c45'
- self.vnf2_vnfd_id = 'e4015e9f-1ef2-49fb-adb6-070791ad3c45'
- self.vnf3_vnf_id = '7168062e-9fa1-4203-8cb7-f5c99ff3ee1b'
- self.vnf3_update_vnf_id = '10f66bc5-b2f1-45b7-a7cd-6dd6ad0017f5'
-
- self.cp11_id = 'd18c8bae-898a-4932-bff8-d5eac981a9c9'
- self.cp11_update_id = 'a18c8bae-898a-4932-bff8-d5eac981a9b8'
- self.cp12_id = 'c8906342-3e30-4b2a-9401-a251a7a9b5dd'
- self.cp12_update_id = 'b8906342-3e30-4b2a-9401-a251a7a9b5cc'
- self.cp32_id = '3d1bd2a2-bf0e-44d1-87af-a2c6b2cad3ed'
- self.cp32_update_id = '064c0d99-5a61-4711-9597-2a44dc5da14b'
-
- def get_vnfd(self, *args, **kwargs):
- if 'VNF1' in args:
- return {'id': self.vnf1_vnfd_id,
- 'name': 'VNF1',
- 'attributes': {'vnfd': _get_template(
- 'test-nsd-vnfd1.yaml')}}
- elif 'VNF2' in args:
- return {'id': self.vnf3_vnfd_id,
- 'name': 'VNF2',
- 'attributes': {'vnfd': _get_template(
- 'test-nsd-vnfd2.yaml')}}
-
- def get_vnfds(self, *args, **kwargs):
- if {'name': ['VNF1']} in args:
- return [{'id': self.vnf1_vnfd_id}]
- elif {'name': ['VNF3']} in args:
- return [{'id': self.vnf3_vnfd_id}]
- else:
- return []
-
- def get_vnfs(self, *args, **kwargs):
- if {'vnfd_id': [self.vnf1_vnfd_id]} in args:
- return [{'id': self.vnf1_vnf_id}]
- elif {'vnfd_id': [self.vnf3_vnfd_id]} in args:
- return [{'id': self.vnf3_vnf_id}]
- else:
- return None
-
- def get_vnf(self, *args, **kwargs):
- if self.vnf1_vnf_id in args:
- return self.get_dummy_vnf1()
- elif self.vnf1_update_vnf_id in args:
- return self.get_dummy_vnf1_update()
- elif self.vnf3_vnf_id in args:
- return self.get_dummy_vnf3()
- elif self.vnf3_update_vnf_id in args:
- return self.get_dummy_vnf3_update()
-
- def get_vnf_resources(self, *args, **kwargs):
- if self.vnf1_vnf_id in args:
- return self.get_dummy_vnf1_details()
- elif self.vnf1_update_vnf_id in args:
- return self.get_dummy_vnf1_update_details()
- elif self.vnf3_vnf_id in args:
- return self.get_dummy_vnf3_details()
- elif self.vnf3_update_vnf_id in args:
- return self.get_dummy_vnf3_update_details()
-
- def get_dummy_vnf1_details(self):
- return [{'name': 'CP11', 'id': self.cp11_id},
- {'name': 'CP12', 'id': self.cp12_id}]
-
- def get_dummy_vnf1_update_details(self):
- return [{'name': 'CP11', 'id': self.cp11_update_id},
- {'name': 'CP12', 'id': self.cp12_update_id}]
-
- def get_dummy_vnf3_details(self):
- return [{'name': 'CP32', 'id': self.cp32_id}]
-
- def get_dummy_vnf3_update_details(self):
- return [{'name': 'CP32', 'id': self.cp32_update_id}]
-
- def get_dummy_vnf1(self):
- return {'description': 'dummy_vnf_description',
- 'vnfd_id': self.vnf1_vnfd_id,
- 'vim_id': '6261579e-d6f3-49ad-8bc3-a9cb974778ff',
- 'tenant_id': 'ad7ebc56538745a08ef7c5e97f8bd437',
- 'name': 'dummy_vnf1',
- 'attributes': {}}
-
- def get_dummy_vnf1_update(self):
- return {'description': 'dummy_vnf_description',
- 'vnfd_id': self.vnf1_vnfd_id,
- 'vim_id': '6261579e-d6f3-49ad-8bc3-a9cb974778ff',
- 'tenant_id': 'ad7ebc56538745a08ef7c5e97f8bd437',
- 'name': 'dummy_vnf1_update',
- 'attributes': {}}
-
- def get_dummy_vnf3(self):
- return {'description': 'dummy_vnf_description',
- 'vnfd_id': self.vnf3_vnfd_id,
- 'vim_id': '6261579e-d6f3-49ad-8bc3-a9cb974778ff',
- 'tenant_id': 'ad7ebc56538745a08ef7c5e97f8bd437',
- 'name': 'dummy_vnf2',
- 'attributes': {}}
-
- def get_dummy_vnf3_update(self):
- return {'description': 'dummy_vnf_description',
- 'vnfd_id': self.vnf3_vnfd_id,
- 'vim_id': '6261579e-d6f3-49ad-8bc3-a9cb974778ff',
- 'tenant_id': 'ad7ebc56538745a08ef7c5e97f8bd437',
- 'name': 'dummy_vnf_update',
- 'attributes': {}}
-
- def _update_vnf_scaling(self, *args, **kwargs):
- pass
-
-
class TestNfvoPlugin(db_base.SqlTestCase):
def setUp(self):
super(TestNfvoPlugin, self).setUp()
@@ -403,19 +262,8 @@ class TestNfvoPlugin(db_base.SqlTestCase):
vim_dict = utils.get_vim_obj()
vim_type = 'openstack'
self._mock_driver_manager()
- mock.patch('tacker.nfvo.nfvo_plugin.NfvoPlugin._get_vim_from_vnf',
- side_effect=dummy_get_vim).start()
self.nfvo_plugin = nfvo_plugin.NfvoPlugin()
- mock.patch('tacker.db.common_services.common_services_db_plugin.'
- 'CommonServicesPluginDb.create_event'
- ).start()
- self._cos_db_plugin =\
- common_services_db_plugin.CommonServicesPluginDb()
res = self.nfvo_plugin.create_vim(self.context, vim_dict)
- self._cos_db_plugin.create_event.assert_any_call(
- self.context, evt_type=constants.RES_EVT_CREATE, res_id=mock.ANY,
- res_state=mock.ANY, res_type=constants.RES_TYPE_VIM,
- tstamp=mock.ANY)
self._driver_manager.invoke.assert_any_call(
vim_type, 'register_vim', vim_obj=vim_dict['vim'])
self.assertIsNotNone(res)
@@ -436,19 +284,8 @@ class TestNfvoPlugin(db_base.SqlTestCase):
'tenant_id': 'test-project'}}
vim_type = 'kubernetes'
self._mock_driver_manager()
- mock.patch('tacker.nfvo.nfvo_plugin.NfvoPlugin._get_vim_from_vnf',
- side_effect=dummy_get_vim).start()
self.nfvo_plugin = nfvo_plugin.NfvoPlugin()
- mock.patch('tacker.db.common_services.common_services_db_plugin.'
- 'CommonServicesPluginDb.create_event'
- ).start()
- self._cos_db_plugin =\
- common_services_db_plugin.CommonServicesPluginDb()
res = self.nfvo_plugin.create_vim(self.context, vim_dict)
- self._cos_db_plugin.create_event.assert_any_call(
- self.context, evt_type=constants.RES_EVT_CREATE, res_id=mock.ANY,
- res_state=mock.ANY, res_type=constants.RES_TYPE_VIM,
- tstamp=mock.ANY)
self._driver_manager.invoke.assert_any_call(
vim_type, 'register_vim', vim_obj=vim_dict['vim'])
self.assertIsNotNone(res)
@@ -480,19 +317,8 @@ class TestNfvoPlugin(db_base.SqlTestCase):
vim_auth_cert = vim_dict['vim']['auth_cred']['ssl_ca_cert']
vim_project = vim_dict['vim']['vim_project']
self._mock_driver_manager()
- mock.patch('tacker.nfvo.nfvo_plugin.NfvoPlugin._get_vim_from_vnf',
- side_effect=dummy_get_vim).start()
self.nfvo_plugin = nfvo_plugin.NfvoPlugin()
- mock.patch('tacker.db.common_services.common_services_db_plugin.'
- 'CommonServicesPluginDb.create_event'
- ).start()
- self._cos_db_plugin =\
- common_services_db_plugin.CommonServicesPluginDb()
res = self.nfvo_plugin.create_vim(self.context, vim_dict)
- self._cos_db_plugin.create_event.assert_any_call(
- self.context, evt_type=constants.RES_EVT_CREATE, res_id=mock.ANY,
- res_state=mock.ANY, res_type=constants.RES_TYPE_VIM,
- tstamp=mock.ANY)
self._driver_manager.invoke.assert_any_call(
vim_type, 'register_vim', vim_obj=vim_dict['vim'])
self.assertIsNotNone(res)
@@ -518,24 +344,11 @@ class TestNfvoPlugin(db_base.SqlTestCase):
self.context.tenant_id = 'ad7ebc56538745a08ef7c5e97f8bd437'
vim_obj = self.nfvo_plugin._get_vim(self.context, vim_id)
self._mock_driver_manager()
- mock.patch('tacker.nfvo.nfvo_plugin.NfvoPlugin._get_vim_from_vnf',
- side_effect=dummy_get_vim).start()
self.nfvo_plugin = nfvo_plugin.NfvoPlugin()
- mock.patch('tacker.db.common_services.common_services_db_plugin.'
- 'CommonServicesPluginDb.create_event'
- ).start()
- mock.patch('tacker.nfvo.nfvo_plugin.NfvoPlugin.get_auth_dict'
- ).start()
- self._cos_db_plugin =\
- common_services_db_plugin.CommonServicesPluginDb()
self.nfvo_plugin.delete_vim(self.context, vim_id)
self._driver_manager.invoke.assert_called_once_with(
vim_type, 'deregister_vim',
vim_obj=vim_obj)
- self._cos_db_plugin.create_event.assert_called_with(
- self.context, evt_type=constants.RES_EVT_DELETE, res_id=mock.ANY,
- res_state=mock.ANY, res_type=constants.RES_TYPE_VIM,
- tstamp=mock.ANY)
def test_update_vim(self):
vim_dict = {'vim': {'id': '6261579e-d6f3-49ad-8bc3-a9cb974778ff',
@@ -548,14 +361,7 @@ class TestNfvoPlugin(db_base.SqlTestCase):
self._insert_dummy_vim()
self.context.tenant_id = 'ad7ebc56538745a08ef7c5e97f8bd437'
self._mock_driver_manager()
- mock.patch('tacker.nfvo.nfvo_plugin.NfvoPlugin._get_vim_from_vnf',
- side_effect=dummy_get_vim).start()
self.nfvo_plugin = nfvo_plugin.NfvoPlugin()
- mock.patch('tacker.db.common_services.common_services_db_plugin.'
- 'CommonServicesPluginDb.create_event'
- ).start()
- self._cos_db_plugin =\
- common_services_db_plugin.CommonServicesPluginDb()
res = self.nfvo_plugin.update_vim(self.context, vim_dict['vim']['id'],
vim_dict)
vim_obj = self.nfvo_plugin._get_vim(
@@ -571,10 +377,6 @@ class TestNfvoPlugin(db_base.SqlTestCase):
self.assertEqual(vim_auth_username, res['auth_cred']['username'])
self.assertEqual(SECRET_PASSWORD, res['auth_cred']['password'])
self.assertIn('updated_at', res)
- self._cos_db_plugin.create_event.assert_called_with(
- self.context, evt_type=constants.RES_EVT_UPDATE, res_id=mock.ANY,
- res_state=mock.ANY, res_type=constants.RES_TYPE_VIM,
- tstamp=mock.ANY)
def test_update_vim_barbican(self):
vim_dict = {'vim': {'id': '6261579e-d6f3-49ad-8bc3-a9cb974778ff',
@@ -589,14 +391,7 @@ class TestNfvoPlugin(db_base.SqlTestCase):
old_vim_obj = self.nfvo_plugin._get_vim(
self.context, vim_dict['vim']['id'])
self._mock_driver_manager()
- mock.patch('tacker.nfvo.nfvo_plugin.NfvoPlugin._get_vim_from_vnf',
- side_effect=dummy_get_vim).start()
self.nfvo_plugin = nfvo_plugin.NfvoPlugin()
- mock.patch('tacker.db.common_services.common_services_db_plugin.'
- 'CommonServicesPluginDb.create_event'
- ).start()
- self._cos_db_plugin =\
- common_services_db_plugin.CommonServicesPluginDb()
res = self.nfvo_plugin.update_vim(self.context, vim_dict['vim']['id'],
vim_dict)
vim_obj = self.nfvo_plugin._get_vim(
@@ -613,10 +408,6 @@ class TestNfvoPlugin(db_base.SqlTestCase):
self.assertEqual(vim_auth_username, res['auth_cred']['username'])
self.assertEqual(SECRET_PASSWORD, res['auth_cred']['password'])
self.assertIn('updated_at', res)
- self._cos_db_plugin.create_event.assert_called_with(
- self.context, evt_type=constants.RES_EVT_UPDATE, res_id=mock.ANY,
- res_state=mock.ANY, res_type=constants.RES_TYPE_VIM,
- tstamp=mock.ANY)
def test_update_vim_userpass_to_oidc(self):
vim_dict = {'vim': {'id': '6261579e-d6f3-49ad-8bc3-a9cb974778ff',
@@ -640,14 +431,7 @@ class TestNfvoPlugin(db_base.SqlTestCase):
old_vim_obj = self.nfvo_plugin._get_vim(
self.context, vim_dict['vim']['id'])
self._mock_driver_manager()
- mock.patch('tacker.nfvo.nfvo_plugin.NfvoPlugin._get_vim_from_vnf',
- side_effect=dummy_get_vim).start()
self.nfvo_plugin = nfvo_plugin.NfvoPlugin()
- mock.patch('tacker.db.common_services.common_services_db_plugin.'
- 'CommonServicesPluginDb.create_event'
- ).start()
- self._cos_db_plugin =\
- common_services_db_plugin.CommonServicesPluginDb()
res = self.nfvo_plugin.update_vim(self.context, vim_dict['vim']['id'],
vim_dict)
vim_obj = self.nfvo_plugin._get_vim(
@@ -668,10 +452,6 @@ class TestNfvoPlugin(db_base.SqlTestCase):
self.assertEqual(SECRET_PASSWORD, res['auth_cred']['client_secret'])
self.assertEqual(vim_auth_cert, res['auth_cred']['ssl_ca_cert'])
self.assertIn('updated_at', res)
- self._cos_db_plugin.create_event.assert_called_with(
- self.context, evt_type=constants.RES_EVT_UPDATE, res_id=mock.ANY,
- res_state=mock.ANY, res_type=constants.RES_TYPE_VIM,
- tstamp=mock.ANY)
def test_update_vim_token_to_oidc(self):
vim_dict = {'vim': {'id': '6261579e-d6f3-49ad-8bc3-a9cb974778ff',
@@ -695,14 +475,7 @@ class TestNfvoPlugin(db_base.SqlTestCase):
old_vim_obj = self.nfvo_plugin._get_vim(
self.context, vim_dict['vim']['id'])
self._mock_driver_manager()
- mock.patch('tacker.nfvo.nfvo_plugin.NfvoPlugin._get_vim_from_vnf',
- side_effect=dummy_get_vim).start()
self.nfvo_plugin = nfvo_plugin.NfvoPlugin()
- mock.patch('tacker.db.common_services.common_services_db_plugin.'
- 'CommonServicesPluginDb.create_event'
- ).start()
- self._cos_db_plugin =\
- common_services_db_plugin.CommonServicesPluginDb()
res = self.nfvo_plugin.update_vim(self.context, vim_dict['vim']['id'],
vim_dict)
vim_obj = self.nfvo_plugin._get_vim(
@@ -724,10 +497,6 @@ class TestNfvoPlugin(db_base.SqlTestCase):
self.assertEqual(SECRET_PASSWORD, res['auth_cred']['client_secret'])
self.assertEqual(vim_auth_cert, res['auth_cred']['ssl_ca_cert'])
self.assertIn('updated_at', res)
- self._cos_db_plugin.create_event.assert_called_with(
- self.context, evt_type=constants.RES_EVT_UPDATE, res_id=mock.ANY,
- res_state=mock.ANY, res_type=constants.RES_TYPE_VIM,
- tstamp=mock.ANY)
def test_update_vim_oidc_to_token(self):
vim_dict = {'vim': {'id': '6261579e-d6f3-49ad-8bc3-a9cb974778ff',
@@ -742,14 +511,7 @@ class TestNfvoPlugin(db_base.SqlTestCase):
old_vim_obj = self.nfvo_plugin._get_vim(
self.context, vim_dict['vim']['id'])
self._mock_driver_manager()
- mock.patch('tacker.nfvo.nfvo_plugin.NfvoPlugin._get_vim_from_vnf',
- side_effect=dummy_get_vim).start()
self.nfvo_plugin = nfvo_plugin.NfvoPlugin()
- mock.patch('tacker.db.common_services.common_services_db_plugin.'
- 'CommonServicesPluginDb.create_event'
- ).start()
- self._cos_db_plugin =\
- common_services_db_plugin.CommonServicesPluginDb()
res = self.nfvo_plugin.update_vim(self.context, vim_dict['vim']['id'],
vim_dict)
vim_obj = self.nfvo_plugin._get_vim(
@@ -770,1002 +532,3 @@ class TestNfvoPlugin(db_base.SqlTestCase):
self.assertNotIn('username', res['auth_cred'])
self.assertNotIn('password', res['auth_cred'])
self.assertIn('updated_at', res)
- self._cos_db_plugin.create_event.assert_called_with(
- self.context, evt_type=constants.RES_EVT_UPDATE, res_id=mock.ANY,
- res_state=mock.ANY, res_type=constants.RES_TYPE_VIM,
- tstamp=mock.ANY)
-
- def _insert_dummy_vnffg_template(self):
- session = self.context.session
- vnffg_template = vnffg_db.VnffgTemplate(
- id='eb094833-995e-49f0-a047-dfb56aaf7c4e',
- tenant_id='ad7ebc56538745a08ef7c5e97f8bd437',
- name='fake_template',
- description='fake_template_description',
- template={'vnffgd': utils.vnffgd_tosca_template},
- template_source='onboarded')
- session.add(vnffg_template)
- session.flush()
- return vnffg_template
-
- def _insert_dummy_vnffg_template_inline(self):
- session = self.context.session
- vnffg_template = vnffg_db.VnffgTemplate(
- id='11da9f20-9347-4283-bc68-eb98061ef8f7',
- tenant_id='ad7ebc56538745a08ef7c5e97f8bd437',
- name='dummy_vnffgd_inline',
- description='dummy_vnffgd_description_inline',
- template={'vnffgd': utils.vnffgd_tosca_template},
- template_source='inline')
- session.add(vnffg_template)
- session.flush()
- return vnffg_template
-
- def _insert_dummy_vnffg_param_template(self):
- session = self.context.session
- vnffg_template = vnffg_db.VnffgTemplate(
- id='eb094833-995e-49f0-a047-dfb56aaf7c4e',
- tenant_id='ad7ebc56538745a08ef7c5e97f8bd437',
- name='fake_template',
- description='fake_template_description',
- template={'vnffgd': utils.vnffgd_tosca_param_template})
- session.add(vnffg_template)
- session.flush()
- return vnffg_template
-
- def _insert_dummy_vnffg_multi_param_template(self):
- session = self.context.session
- vnffg_template = vnffg_db.VnffgTemplate(
- id='eb094833-995e-49f0-a047-dfb56aaf7c4e',
- tenant_id='ad7ebc56538745a08ef7c5e97f8bd437',
- name='fake_template',
- description='fake_template_description',
- template={'vnffgd': utils.vnffgd_tosca_multi_param_template})
- session.add(vnffg_template)
- session.flush()
- return vnffg_template
-
- def _insert_dummy_vnffg_no_classifier_template(self):
- session = self.context.session
- vnffg_template = vnffg_db.VnffgTemplate(
- id='eb094833-995e-49f0-a047-dfb56aaf7c4e',
- tenant_id='ad7ebc56538745a08ef7c5e97f8bd437',
- name='fake_template',
- description='fake_template_description',
- template={'vnffgd': utils.vnffgd_tosca_no_classifier_template})
- session.add(vnffg_template)
- session.flush()
- return vnffg_template
-
- def _insert_dummy_vnffg_duplicate_criteria_template(self):
- session = self.context.session
- vnffg_template = vnffg_db.VnffgTemplate(
- id='eb094833-995e-49f0-a047-dfb56aaf7c4e',
- tenant_id='ad7ebc56538745a08ef7c5e97f8bd437',
- name='fake_template',
- description='fake_template_description',
- template={'vnffgd': utils.vnffgd_tosca_dupl_criteria_template})
- session.add(vnffg_template)
- session.flush()
- return vnffg_template
-
- def _insert_dummy_vnffg(self):
- session = self.context.session
- vnffg = vnffg_db.Vnffg(
- id='ffc1a59b-65bb-4874-94d3-84f639e63c74',
- tenant_id='ad7ebc56538745a08ef7c5e97f8bd437',
- name='dummy_vnffg',
- description="fake vnffg",
- vnffgd_id='eb094833-995e-49f0-a047-dfb56aaf7c4e',
- status='ACTIVE',
- vnf_mapping={'VNF1': '91e32c20-6d1f-47a4-9ba7-08f5e5effe07',
- 'VNF3': '7168062e-9fa1-4203-8cb7-f5c99ff3ee1b'})
- session.add(vnffg)
- nfp = vnffg_db.VnffgNfp(
- id='768f76a7-9025-4acd-b51c-0da609759983',
- tenant_id='ad7ebc56538745a08ef7c5e97f8bd437',
- status="ACTIVE",
- name='Forwarding_path1',
- vnffg_id='ffc1a59b-65bb-4874-94d3-84f639e63c74',
- path_id=51,
- symmetrical=False)
- session.add(nfp)
- sfc = vnffg_db.VnffgChain(
- id='f28e33bc-1061-4762-b942-76060bbd59c4',
- tenant_id='ad7ebc56538745a08ef7c5e97f8bd437',
- symmetrical=False,
- chain=[{'connection_points': [
- 'd18c8bae-898a-4932-bff8-d5eac981a9c9',
- 'c8906342-3e30-4b2a-9401-a251a7a9b5dd'],
- 'name': 'dummy_vnf1'},
- {'connection_points': ['3d1bd2a2-bf0e-44d1-87af-a2c6b2cad3ed'],
- 'name': 'dummy_vnf2'}],
- path_id=51,
- status='ACTIVE',
- nfp_id='768f76a7-9025-4acd-b51c-0da609759983',
- instance_id='bcfb295e-578e-405b-a349-39f06b25598c')
- session.add(sfc)
- fc = vnffg_db.VnffgClassifier(
- id='a85f21b5-f446-43f0-86f4-d83bdc5590ab',
- tenant_id='ad7ebc56538745a08ef7c5e97f8bd437',
- name='classifier_one',
- status='ACTIVE',
- instance_id='3007dc2d-30dc-4651-9184-f1e6273cc0b6',
- chain_id='f28e33bc-1061-4762-b942-76060bbd59c4',
- nfp_id='768f76a7-9025-4acd-b51c-0da609759983')
- session.add(fc)
- match = vnffg_db.ACLMatchCriteria(
- id='bdb0f2db-d4c2-42a2-a1df-426079ecc443',
- vnffgc_id='a85f21b5-f446-43f0-86f4-d83bdc5590ab',
- eth_src=None, eth_dst=None, eth_type=None, vlan_id=None,
- vlan_pcp=None, mpls_label=None, mpls_tc=None, ip_dscp=None,
- ip_ecn=None, ip_src_prefix=None, ip_dst_prefix='192.168.1.2/24',
- source_port_min=None, source_port_max=None,
- destination_port_min=80, destination_port_max=1024, ip_proto=6,
- network_id=None, network_src_port_id=None,
- network_dst_port_id=None, tenant_id=None, icmpv4_type=None,
- icmpv4_code=None, arp_op=None, arp_spa=None, arp_tpa=None,
- arp_sha=None, arp_tha=None, ipv6_src=None, ipv6_dst=None,
- ipv6_flabel=None, icmpv6_type=None, icmpv6_code=None,
- ipv6_nd_target=None, ipv6_nd_sll=None, ipv6_nd_tll=None)
- session.add(match)
- session.flush()
- return vnffg
-
- def test_validate_tosca(self):
- template = utils.vnffgd_tosca_template
- self.nfvo_plugin.validate_tosca(template)
-
- def test_validate_tosca_missing_tosca_ver(self):
- template = utils.vnffgd_template
- self.assertRaises(nfvo.ToscaParserFailed,
- self.nfvo_plugin.validate_tosca,
- template)
-
- def test_validate_tosca_invalid(self):
- template = utils.vnffgd_invalid_tosca_template
- self.assertRaises(nfvo.ToscaParserFailed,
- self.nfvo_plugin.validate_tosca,
- template)
-
- def test_validate_vnffg_properties(self):
- template = {'vnffgd': utils.vnffgd_tosca_template}
- self.nfvo_plugin.validate_vnffg_properties(template)
-
- def test_validate_vnffg_properties_wrong_number(self):
- template = {'vnffgd': utils.vnffgd_wrong_cp_number_template}
- self.assertRaises(nfvo.VnffgdWrongEndpointNumber,
- self.nfvo_plugin.validate_vnffg_properties,
- template)
-
- def test_create_vnffgd(self):
- vnffgd_obj = utils.get_dummy_vnffgd_obj()
- result = self.nfvo_plugin.create_vnffgd(self.context, vnffgd_obj)
- self.assertIsNotNone(result)
- self.assertIn('id', result)
- self.assertIn('template', result)
- self.assertIn('template_source', result)
- self.assertEqual('dummy_vnffgd', result['name'])
- self.assertEqual('dummy_vnffgd_description', result['description'])
- self.assertEqual('onboarded', result['template_source'])
-
- def test_create_vnffgd_inline(self):
- vnffgd_obj = utils.get_dummy_vnffgd_obj_inline()
- result = self.nfvo_plugin.create_vnffgd(self.context, vnffgd_obj)
- self.assertIsNotNone(result)
- self.assertIn('id', result)
- self.assertIn('template', result)
- self.assertEqual('inline', result['template_source'])
-
- def test_create_vnffgd_no_input_description(self):
- vnffgd_obj = utils.get_dummy_vnffgd_obj_no_description()
- result = self.nfvo_plugin.create_vnffgd(self.context, vnffgd_obj)
- self.assertIsNotNone(result)
- self.assertIn('id', result)
- self.assertIn('template', result)
- self.assertIn('template_source', result)
- self.assertEqual('Example VNFFG template', result['description'])
-
- def test_create_vnffgd_no_input_name(self):
- vnffgd_obj = utils.get_dummy_vnffgd_obj_no_name()
- result = self.nfvo_plugin.create_vnffgd(self.context, vnffgd_obj)
- self.assertIsNotNone(result)
- self.assertIn('id', result)
- self.assertIn('template', result)
- self.assertIn('template_source', result)
- self.assertEqual('example_vnffgd', result['name'])
-
- def test_create_vnffg_abstract_types(self):
- self._mock_driver_manager()
- mock.patch('tacker.nfvo.nfvo_plugin.NfvoPlugin._get_vim_from_vnf',
- side_effect=dummy_get_vim).start()
- self.nfvo_plugin = nfvo_plugin.NfvoPlugin()
- mock.patch('tacker.db.common_services.common_services_db_plugin.'
- 'CommonServicesPluginDb.create_event'
- ).start()
- self._cos_db_plugin =\
- common_services_db_plugin.CommonServicesPluginDb()
- with patch.object(TackerManager, 'get_service_plugins') as \
- mock_plugins:
- mock_plugins.return_value = {'VNFM': FakeVNFMPlugin()}
- mock.patch('tacker.common.driver_manager.DriverManager',
- side_effect=FakeDriverManager()).start()
- self._insert_dummy_vnffg_template()
- vnffg_obj = utils.get_dummy_vnffg_obj()
- result = self.nfvo_plugin.create_vnffg(self.context, vnffg_obj)
- self.assertIsNotNone(result)
- self.assertIn('id', result)
- self.assertIn('status', result)
- self.assertEqual('PENDING_CREATE', result['status'])
- self._driver_manager.invoke.assert_called_with(
- mock.ANY, mock.ANY,
- name=mock.ANY,
- path_id=mock.ANY,
- vnfs=mock.ANY,
- fc_ids=mock.ANY,
- auth_attr=mock.ANY,
- symmetrical=mock.ANY,
- correlation=mock.ANY)
-
- @mock.patch('tacker.nfvo.nfvo_plugin.NfvoPlugin.create_vnffgd')
- def test_create_vnffg_abstract_types_inline(self, mock_create_vnffgd):
- self._mock_driver_manager()
- mock.patch('tacker.nfvo.nfvo_plugin.NfvoPlugin._get_vim_from_vnf',
- side_effect=dummy_get_vim).start()
- self.nfvo_plugin = nfvo_plugin.NfvoPlugin()
- mock.patch('tacker.db.common_services.common_services_db_plugin.'
- 'CommonServicesPluginDb.create_event'
- ).start()
- self._cos_db_plugin =\
- common_services_db_plugin.CommonServicesPluginDb()
- with patch.object(TackerManager, 'get_service_plugins') as \
- mock_plugins:
- mock_plugins.return_value = {'VNFM': FakeVNFMPlugin()}
- mock.patch('tacker.common.driver_manager.DriverManager',
- side_effect=FakeDriverManager()).start()
- mock_create_vnffgd.return_value = {'id':
- '11da9f20-9347-4283-bc68-eb98061ef8f7'}
- self._insert_dummy_vnffg_template_inline()
- vnffg_obj = utils.get_dummy_vnffg_obj_inline()
- result = self.nfvo_plugin.create_vnffg(self.context, vnffg_obj)
- self.assertIsNotNone(result)
- self.assertIn('id', result)
- self.assertIn('status', result)
- self.assertEqual('PENDING_CREATE', result['status'])
- self.assertEqual('dummy_vnffg_inline', result['name'])
- mock_create_vnffgd.assert_called_once_with(mock.ANY, mock.ANY)
- self._driver_manager.invoke.assert_called_with(
- mock.ANY, mock.ANY,
- name=mock.ANY,
- path_id=mock.ANY,
- vnfs=mock.ANY,
- fc_ids=mock.ANY,
- auth_attr=mock.ANY,
- symmetrical=mock.ANY,
- correlation=mock.ANY)
-
- def test_create_vnffg_param_values(self):
- self._mock_driver_manager()
- mock.patch('tacker.nfvo.nfvo_plugin.NfvoPlugin._get_vim_from_vnf',
- side_effect=dummy_get_vim).start()
- self.nfvo_plugin = nfvo_plugin.NfvoPlugin()
- mock.patch('tacker.db.common_services.common_services_db_plugin.'
- 'CommonServicesPluginDb.create_event'
- ).start()
- self._cos_db_plugin =\
- common_services_db_plugin.CommonServicesPluginDb()
- with patch.object(TackerManager, 'get_service_plugins') as \
- mock_plugins:
- mock_plugins.return_value = {'VNFM': FakeVNFMPlugin()}
- mock.patch('tacker.common.driver_manager.DriverManager',
- side_effect=FakeDriverManager()).start()
- self._insert_dummy_vnffg_param_template()
- vnffg_obj = utils.get_dummy_vnffg_param_obj()
- result = self.nfvo_plugin.create_vnffg(self.context, vnffg_obj)
- self.assertIsNotNone(result)
- self.assertIn('id', result)
- self.assertIn('status', result)
- self.assertEqual('PENDING_CREATE', result['status'])
- self._driver_manager.invoke.assert_called_with(
- mock.ANY, mock.ANY,
- name=mock.ANY,
- path_id=mock.ANY,
- vnfs=mock.ANY,
- fc_ids=mock.ANY,
- auth_attr=mock.ANY,
- symmetrical=mock.ANY,
- correlation=mock.ANY)
-
- def test_create_vnffg_no_classifier(self):
- self._mock_driver_manager()
- mock.patch('tacker.nfvo.nfvo_plugin.NfvoPlugin._get_vim_from_vnf',
- side_effect=dummy_get_vim).start()
- self.nfvo_plugin = nfvo_plugin.NfvoPlugin()
- mock.patch('tacker.db.common_services.common_services_db_plugin.'
- 'CommonServicesPluginDb.create_event'
- ).start()
- self._cos_db_plugin =\
- common_services_db_plugin.CommonServicesPluginDb()
- with patch.object(TackerManager, 'get_service_plugins') as \
- mock_plugins:
- mock_plugins.return_value = {'VNFM': FakeVNFMPlugin()}
- mock.patch('tacker.common.driver_manager.DriverManager',
- side_effect=FakeDriverManager()).start()
- self._insert_dummy_vnffg_no_classifier_template()
- vnffg_obj = utils.get_dummy_vnffg_no_classifier_obj()
- result = self.nfvo_plugin.create_vnffg(self.context, vnffg_obj)
- self.assertIsNotNone(result)
- self.assertIn('id', result)
- self.assertIn('status', result)
- self.assertEqual('PENDING_CREATE', result['status'])
- self._driver_manager.invoke.assert_called_with(
- mock.ANY, mock.ANY,
- name=mock.ANY,
- path_id=mock.ANY,
- vnfs=mock.ANY,
- fc_ids=mock.ANY,
- auth_attr=mock.ANY,
- symmetrical=mock.ANY,
- correlation=mock.ANY)
-
- def test_create_vnffg_param_value_format_error(self):
- with patch.object(TackerManager, 'get_service_plugins') as \
- mock_plugins:
- mock_plugins.return_value = {'VNFM': FakeVNFMPlugin()}
- self._insert_dummy_vnffg_param_template()
- vnffg_obj = utils.get_dummy_vnffg_str_param_obj()
- self.assertRaises(nfvo.VnffgParamValueFormatError,
- self.nfvo_plugin.create_vnffg,
- self.context, vnffg_obj)
-
- def test_create_vnffg_template_param_not_parse(self):
- with patch.object(TackerManager, 'get_service_plugins') as \
- mock_plugins:
- mock_plugins.return_value = {'VNFM': FakeVNFMPlugin()}
- self._insert_dummy_vnffg_multi_param_template()
- vnffg_obj = utils.get_dummy_vnffg_param_obj()
- self.assertRaises(nfvo.VnffgTemplateParamParsingException,
- self.nfvo_plugin.create_vnffg,
- self.context, vnffg_obj)
-
- def test_create_vnffg_vnf_mapping(self):
- self._mock_driver_manager()
- mock.patch('tacker.nfvo.nfvo_plugin.NfvoPlugin._get_vim_from_vnf',
- side_effect=dummy_get_vim).start()
- self.nfvo_plugin = nfvo_plugin.NfvoPlugin()
- mock.patch('tacker.db.common_services.common_services_db_plugin.'
- 'CommonServicesPluginDb.create_event'
- ).start()
- self._cos_db_plugin =\
- common_services_db_plugin.CommonServicesPluginDb()
- with patch.object(TackerManager, 'get_service_plugins') as \
- mock_plugins:
- mock_plugins.return_value = {'VNFM': FakeVNFMPlugin()}
- mock.patch('tacker.common.driver_manager.DriverManager',
- side_effect=FakeDriverManager()).start()
- self._insert_dummy_vnffg_template()
- vnffg_obj = utils.get_dummy_vnffg_obj_vnf_mapping()
- result = self.nfvo_plugin.create_vnffg(self.context, vnffg_obj)
- self.assertIsNotNone(result)
- self.assertIn('id', result)
- self.assertIn('status', result)
- self.assertEqual('PENDING_CREATE', result['status'])
- self._driver_manager.invoke.assert_called_with(
- mock.ANY, mock.ANY,
- name=mock.ANY,
- path_id=mock.ANY,
- vnfs=mock.ANY,
- fc_ids=mock.ANY,
- auth_attr=mock.ANY,
- symmetrical=mock.ANY,
- correlation=mock.ANY
- )
-
- def test_create_vnffg_duplicate_criteria(self):
- with patch.object(TackerManager, 'get_service_plugins') as \
- mock_plugins:
- mock_plugins.return_value = {'VNFM': FakeVNFMPlugin()}
- mock.patch('tacker.common.driver_manager.DriverManager',
- side_effect=FakeDriverManager()).start()
- self._insert_dummy_vnffg_duplicate_criteria_template()
- vnffg_obj = utils.get_dummy_vnffg_obj_dupl_criteria()
- self.assertRaises(nfvo.NfpDuplicatePolicyCriteria,
- self.nfvo_plugin.create_vnffg,
- self.context, vnffg_obj)
-
- def test_update_vnffg_nonexistent_vnf(self):
- with patch.object(TackerManager, 'get_service_plugins') as \
- mock_plugins:
- mock_plugins.return_value = {'VNFM': FakeVNFMPlugin()}
- mock.patch('tacker.common.driver_manager.DriverManager',
- side_effect=FakeDriverManager()).start()
- self._insert_dummy_vnffg_template()
- vnffg = self._insert_dummy_vnffg()
- updated_vnffg = utils.get_dummy_vnffg_obj_vnf_mapping()
- updated_vnffg['vnffg']['symmetrical'] = True
- updated_vnf_mapping = \
- {'VNF1': '91e32c20-6d1f-47a4-9ba7-08f5e5effaf6',
- 'VNF3': '5c7f5631-9e74-46e8-b3d2-397c0eda9d0b'}
- updated_vnffg['vnffg']['vnf_mapping'] = updated_vnf_mapping
- self.assertRaises(nfvo.VnffgInvalidMappingException,
- self.nfvo_plugin.update_vnffg,
- self.context, vnffg['id'], updated_vnffg)
-
- def test_update_vnffg_empty_vnf_mapping_dict(self):
- with patch.object(TackerManager, 'get_service_plugins') as \
- mock_plugins:
- mock_plugins.return_value = {'VNFM': FakeVNFMPlugin()}
- mock.patch('tacker.common.driver_manager.DriverManager',
- side_effect=FakeDriverManager()).start()
- self._insert_dummy_vnffg_template()
- vnffg = self._insert_dummy_vnffg()
- updated_vnffg = utils.get_dummy_vnffg_obj_vnf_mapping()
- updated_vnffg['vnffg']['symmetrical'] = True
- updated_vnf_mapping = {}
- updated_vnffg['vnffg']['vnf_mapping'] = updated_vnf_mapping
- self.assertRaises(nfvo.VnfMappingNotFoundException,
- self.nfvo_plugin.update_vnffg,
- self.context, vnffg['id'], updated_vnffg)
-
- def test_update_vnffg_vnf_mapping_key_none(self):
- with patch.object(TackerManager, 'get_service_plugins') as \
- mock_plugins:
- mock_plugins.return_value = {'VNFM': FakeVNFMPlugin()}
- mock.patch('tacker.common.driver_manager.DriverManager',
- side_effect=FakeDriverManager()).start()
- self._insert_dummy_vnffg_template()
- vnffg = self._insert_dummy_vnffg()
- updated_vnffg = utils.get_dummy_vnffg_obj_vnf_mapping()
- updated_vnffg['vnffg']['symmetrical'] = True
- updated_vnf_mapping = None
- updated_vnffg['vnffg']['vnf_mapping'] = updated_vnf_mapping
- self.assertRaises(nfvo.VnfMappingNotFoundException,
- self.nfvo_plugin.update_vnffg,
- self.context, vnffg['id'], updated_vnffg)
-
- def test_update_vnffg_vnfd_not_in_vnffg_template(self):
- with patch.object(TackerManager, 'get_service_plugins') as \
- mock_plugins:
- mock_plugins.return_value = {'VNFM': FakeVNFMPlugin()}
- mock.patch('tacker.common.driver_manager.DriverManager',
- side_effect=FakeDriverManager()).start()
- self._insert_dummy_vnffg_template()
- vnffg = self._insert_dummy_vnffg()
- updated_vnffg = utils.get_dummy_vnffg_obj_vnf_mapping()
- updated_vnffg['vnffg']['symmetrical'] = True
- updated_vnf_mapping = \
- {'VNF2': '91e32c20-6d1f-47a4-9ba7-08f5e5effad7',
- 'VNF3': '5c7f5631-9e74-46e8-b3d2-397c0eda9d0b'}
- updated_vnffg['vnffg']['vnf_mapping'] = updated_vnf_mapping
- self.assertRaises(nfvo.VnfMappingNotValidException,
- self.nfvo_plugin.update_vnffg,
- self.context, vnffg['id'], updated_vnffg)
-
- def test_update_vnffg_vnf_mapping(self):
- self._mock_driver_manager()
- mock.patch('tacker.nfvo.nfvo_plugin.NfvoPlugin._get_vim_from_vnf',
- side_effect=dummy_get_vim).start()
- self.nfvo_plugin = nfvo_plugin.NfvoPlugin()
- mock.patch('tacker.db.common_services.common_services_db_plugin.'
- 'CommonServicesPluginDb.create_event'
- ).start()
- self._cos_db_plugin =\
- common_services_db_plugin.CommonServicesPluginDb()
- with patch.object(TackerManager, 'get_service_plugins') as \
- mock_plugins:
- mock_plugins.return_value = {'VNFM': FakeVNFMPlugin()}
- mock.patch('tacker.common.driver_manager.DriverManager',
- side_effect=FakeDriverManager()).start()
- self._insert_dummy_vnffg_template()
- vnffg = self._insert_dummy_vnffg()
- updated_vnffg = utils.get_dummy_vnffg_obj_vnf_mapping()
- updated_vnffg['vnffg']['symmetrical'] = True
- expected_mapping = {'VNF1': '91e32c20-6d1f-47a4-9ba7-08f5e5effaf6',
- 'VNF3': '10f66bc5-b2f1-45b7-a7cd-6dd6ad0017f5'}
-
- updated_vnf_mapping = \
- {'VNF1': '91e32c20-6d1f-47a4-9ba7-08f5e5effaf6',
- 'VNF3': '10f66bc5-b2f1-45b7-a7cd-6dd6ad0017f5'}
- updated_vnffg['vnffg']['vnf_mapping'] = updated_vnf_mapping
- result = self.nfvo_plugin.update_vnffg(self.context, vnffg['id'],
- updated_vnffg)
- self.assertIn('id', result)
- self.assertIn('status', result)
- self.assertIn('vnf_mapping', result)
- self.assertEqual('ffc1a59b-65bb-4874-94d3-84f639e63c74',
- result['id'])
- self.assertEqual('PENDING_UPDATE', result['status'])
- for vnfd, vnf in result['vnf_mapping'].items():
- self.assertIn(vnfd, expected_mapping)
- self.assertEqual(vnf, expected_mapping[vnfd])
- self._driver_manager.invoke.assert_called_with(mock.ANY,
- mock.ANY,
- vnfs=mock.ANY,
- fc_ids=mock.ANY,
- chain_id=mock.ANY,
- auth_attr=mock.ANY)
-
- def test_update_vnffg_vnffgd_template(self):
- self._mock_driver_manager()
- mock.patch('tacker.nfvo.nfvo_plugin.NfvoPlugin._get_vim_from_vnf',
- side_effect=dummy_get_vim).start()
- self.nfvo_plugin = nfvo_plugin.NfvoPlugin()
- mock.patch('tacker.db.common_services.common_services_db_plugin.'
- 'CommonServicesPluginDb.create_event'
- ).start()
- self._cos_db_plugin =\
- common_services_db_plugin.CommonServicesPluginDb()
- with patch.object(TackerManager, 'get_service_plugins') as \
- mock_plugins:
- mock_plugins.return_value = {'VNFM': FakeVNFMPlugin()}
- mock.patch('tacker.common.driver_manager.DriverManager',
- side_effect=FakeDriverManager()).start()
- self._insert_dummy_vnffg_template()
- vnffg = self._insert_dummy_vnffg()
- updated_vnffg = utils.get_dummy_vnffg_obj_update_vnffgd_template()
- expected_mapping = {'VNF1': '91e32c20-6d1f-47a4-9ba7-08f5e5effaf6'}
-
- updated_vnf_mapping = \
- {'VNF1': '91e32c20-6d1f-47a4-9ba7-08f5e5effaf6'}
- updated_vnffg['vnffg']['vnf_mapping'] = updated_vnf_mapping
- result = self.nfvo_plugin.update_vnffg(self.context, vnffg['id'],
- updated_vnffg)
- self.assertIn('id', result)
- self.assertIn('status', result)
- self.assertIn('vnf_mapping', result)
- self.assertEqual('ffc1a59b-65bb-4874-94d3-84f639e63c74',
- result['id'])
- for vnfd, vnf in result['vnf_mapping'].items():
- self.assertIn(vnfd, expected_mapping)
- self.assertEqual(vnf, expected_mapping[vnfd])
- self._driver_manager.invoke.assert_called_with(mock.ANY,
- mock.ANY,
- vnfs=mock.ANY,
- fc_ids=mock.ANY,
- chain_id=mock.ANY,
- auth_attr=mock.ANY)
-
- def test_update_vnffg_legacy_vnffgd_template(self):
- self._mock_driver_manager()
- mock.patch('tacker.nfvo.nfvo_plugin.NfvoPlugin._get_vim_from_vnf',
- side_effect=dummy_get_vim).start()
- self.nfvo_plugin = nfvo_plugin.NfvoPlugin()
- mock.patch('tacker.db.common_services.common_services_db_plugin.'
- 'CommonServicesPluginDb.create_event'
- ).start()
- self._cos_db_plugin =\
- common_services_db_plugin.CommonServicesPluginDb()
- with patch.object(TackerManager, 'get_service_plugins') as \
- mock_plugins:
- mock_plugins.return_value = {'VNFM': FakeVNFMPlugin()}
- mock.patch('tacker.common.driver_manager.DriverManager',
- side_effect=FakeDriverManager()).start()
- self._insert_dummy_vnffg_template()
- vnffg = self._insert_dummy_vnffg()
- updated_vnffg = utils.get_dummy_vnffg_obj_legacy_vnffgd_template()
- self.assertRaises(nfvo.UpdateVnffgException,
- self.nfvo_plugin.update_vnffg,
- self.context, vnffg['id'], updated_vnffg)
-
- def test_delete_vnffg(self):
- self._insert_dummy_vnffg_template()
- vnffg = self._insert_dummy_vnffg()
- self._mock_driver_manager()
- mock.patch('tacker.nfvo.nfvo_plugin.NfvoPlugin._get_vim_from_vnf',
- side_effect=dummy_get_vim).start()
- self.nfvo_plugin = nfvo_plugin.NfvoPlugin()
- mock.patch('tacker.db.common_services.common_services_db_plugin.'
- 'CommonServicesPluginDb.create_event'
- ).start()
- self._cos_db_plugin =\
- common_services_db_plugin.CommonServicesPluginDb()
- self.nfvo_plugin.delete_vnffg(self.context, vnffg['id'])
- self._driver_manager.invoke.assert_called_with(mock.ANY, mock.ANY,
- fc_id=mock.ANY,
- auth_attr=mock.ANY)
-
- def _insert_dummy_ns_template(self):
- session = self.context.session
- attributes = {
- 'nsd': 'imports: [VNF1, VNF2]\ntopology_template:\n inputs:\n '
- ' vl1_name: {default: net_mgmt, description: name of VL1'
- ' virtuallink, type: string}\n vl2_name: {default: '
- 'net0, description: name of VL2 virtuallink, type: string'
- '}\n node_templates:\n VL1:\n properties:\n '
- ' network_name: {get_input: vl1_name}\n vendor: '
- 'tacker\n type: tosca.nodes.nfv.VL\n VL2:\n '
- 'properties:\n network_name: {get_input: vl2_name}'
- '\n vendor: tacker\n type: tosca.nodes.nfv.VL'
- '\n VNF1:\n requirements:\n - {virtualLink1: '
- 'VL1}\n - {virtualLink2: VL2}\n type: tosca.node'
- 's.nfv.VNF1\n VNF2: {type: tosca.nodes.nfv.VNF2}\ntosca'
- '_definitions_version: tosca_simple_profile_for_nfv_1_0_0'
- '\n'}
- nsd_template = ns_db.NSD(
- id='eb094833-995e-49f0-a047-dfb56aaf7c4e',
- tenant_id='ad7ebc56538745a08ef7c5e97f8bd437',
- name='fake_template',
- vnfds={'tosca.nodes.nfv.VNF1': 'vnf1',
- 'tosca.nodes.nfv.VNF2': 'vnf2'},
- description='fake_nsd_template_description',
- deleted_at=datetime.min,
- template_source='onboarded')
- session.add(nsd_template)
- for (key, value) in attributes.items():
- attribute_db = ns_db.NSDAttribute(
- id=uuidutils.generate_uuid(),
- nsd_id='eb094833-995e-49f0-a047-dfb56aaf7c4e',
- key=key,
- value=value)
- session.add(attribute_db)
- session.flush()
- return nsd_template
-
- def _insert_dummy_ns_template_inline(self):
- session = self.context.session
- attributes = {
- 'nsd': 'imports: [VNF1, VNF2]\ntopology_template:\n inputs:\n '
- ' vl1_name: {default: net_mgmt, description: name of VL1'
- ' virtuallink, type: string}\n vl2_name: {default: '
- 'net0, description: name of VL2 virtuallink, type: string'
- '}\n node_templates:\n VL1:\n properties:\n '
- ' network_name: {get_input: vl1_name}\n vendor: '
- 'tacker\n type: tosca.nodes.nfv.VL\n VL2:\n '
- 'properties:\n network_name: {get_input: vl2_name}'
- '\n vendor: tacker\n type: tosca.nodes.nfv.VL'
- '\n VNF1:\n requirements:\n - {virtualLink1: '
- 'VL1}\n - {virtualLink2: VL2}\n type: tosca.node'
- 's.nfv.VNF1\n VNF2: {type: tosca.nodes.nfv.VNF2}\ntosca'
- '_definitions_version: tosca_simple_profile_for_nfv_1_0_0'
- '\n'}
- nsd_template = ns_db.NSD(
- id='be18005d-5656-4d81-b499-6af4d4d8437f',
- tenant_id='ad7ebc56538745a08ef7c5e97f8bd437',
- name='dummy_NSD',
- vnfds={'tosca.nodes.nfv.VNF1': 'vnf1',
- 'tosca.nodes.nfv.VNF2': 'vnf2'},
- description='dummy_nsd_description',
- deleted_at=datetime.min,
- template_source='inline')
- session.add(nsd_template)
- for (key, value) in attributes.items():
- attribute_db = ns_db.NSDAttribute(
- id=uuidutils.generate_uuid(),
- nsd_id='be18005d-5656-4d81-b499-6af4d4d8437f',
- key=key,
- value=value)
- session.add(attribute_db)
- session.flush()
- return nsd_template
-
- def _insert_dummy_ns(self, status='ACTIVE'):
- session = self.context.session
- ns = ns_db.NS(
- id='ba6bf017-f6f7-45f1-a280-57b073bf78ea',
- name='dummy_ns',
- tenant_id='ad7ebc56538745a08ef7c5e97f8bd437',
- status=status,
- nsd_id='eb094833-995e-49f0-a047-dfb56aaf7c4e',
- vim_id='6261579e-d6f3-49ad-8bc3-a9cb974778ff',
- description='dummy_ns_description',
- deleted_at=datetime.min)
- session.add(ns)
- session.flush()
- return ns
-
- def _insert_dummy_ns_2(self):
- session = self.context.session
- ns = ns_db.NS(
- id=DUMMY_NS_2,
- name='fake_ns',
- tenant_id='ad7ebc56538745a08ef7c5e97f8bd437',
- status='ACTIVE',
- nsd_id='eb094833-995e-49f0-a047-dfb56aaf7c4e',
- vim_id='6261579e-d6f3-49ad-8bc3-a9cb974778ff',
- description='fake_ns_description',
- deleted_at=datetime.min)
- session.add(ns)
- session.flush()
- return ns
-
- def test_create_nsd(self):
- nsd_obj = utils.get_dummy_nsd_obj()
- with patch.object(TackerManager, 'get_service_plugins') as \
- mock_plugins:
- mock_plugins.return_value = {'VNFM': FakeVNFMPlugin()}
- result = self.nfvo_plugin.create_nsd(self.context, nsd_obj)
- self.assertIsNotNone(result)
- self.assertEqual('dummy_NSD', result['name'])
- self.assertIn('id', result)
- self.assertEqual('dummy_NSD', result['name'])
- self.assertEqual('onboarded', result['template_source'])
- self.assertEqual('8819a1542a5948b68f94d4be0fd50496',
- result['tenant_id'])
- self.assertIn('attributes', result)
- self.assertIn('created_at', result)
- self.assertIn('updated_at', result)
-
- def test_create_nsd_without_vnfd_imports(self):
- nsd_obj = utils.get_dummy_nsd_obj()
- # Remove vnfd import section from nsd.
- nsd_obj['nsd']['attributes']['nsd'].pop('imports')
- with patch.object(TackerManager, 'get_service_plugins') as \
- mock_plugins:
- mock_plugins.return_value = {'VNFM': FakeVNFMPlugin()}
- self.assertRaises(nfvo.ToscaParserFailed,
- self.nfvo_plugin.create_nsd, self.context, nsd_obj)
-
- def test_create_nsd_inline(self):
- nsd_obj = utils.get_dummy_nsd_obj_inline()
- with patch.object(TackerManager, 'get_service_plugins') as \
- mock_plugins:
- mock_plugins.return_value = {'VNFM': FakeVNFMPlugin()}
- result = self.nfvo_plugin.create_nsd(self.context, nsd_obj)
- self.assertIsNotNone(result)
- self.assertIn('id', result)
- self.assertEqual('dummy_NSD_inline', result['name'])
- self.assertEqual('inline', result['template_source'])
- self.assertEqual('8819a1542a5948b68f94d4be0fd50496',
- result['tenant_id'])
- self.assertIn('attributes', result)
- self.assertIn('created_at', result)
- self.assertIn('updated_at', result)
-
- @mock.patch.object(nfvo_plugin.NfvoPlugin, 'get_auth_dict')
- @mock.patch.object(vim_client.VimClient, 'get_vim',
- return_value={"vim_type": "openstack"})
- @mock.patch.object(nfvo_plugin.NfvoPlugin, '_get_by_name')
- @mock.patch.object(uuidutils, 'generate_uuid',
- return_value=test_constants.UUID)
- def test_create_ns(self, mock_uuid, mock_get_by_name, mock_get_vimi,
- mock_auth_dict):
- self._insert_dummy_ns_template()
- self._insert_dummy_vim()
- mock_auth_dict.return_value = {
- 'auth_url': 'http://127.0.0.1',
- 'token': 'DummyToken',
- 'project_domain_name': 'dummy_domain',
- 'project_name': 'dummy_project'
- }
-
- with patch.object(TackerManager, 'get_service_plugins') as \
- mock_plugins:
- mock_plugins.return_value = {'VNFM': FakeVNFMPlugin()}
- mock_get_by_name.return_value = get_by_name()
-
- ns_obj = utils.get_dummy_ns_obj()
- result = self.nfvo_plugin.create_ns(self.context, ns_obj)
- self.assertIsNotNone(result)
- self.assertIn('id', result)
- self.assertEqual(ns_obj['ns']['nsd_id'], result['nsd_id'])
- self.assertEqual(ns_obj['ns']['name'], result['name'])
- self.assertIn('status', result)
- self.assertIn('tenant_id', result)
-
- @mock.patch.object(nfvo_plugin.NfvoPlugin, 'get_auth_dict')
- @mock.patch.object(vim_client.VimClient, 'get_vim',
- return_value={"vim_type": "openstack"})
- @mock.patch.object(nfvo_plugin.NfvoPlugin, '_get_by_name')
- def test_create_ns_empty_description(self, mock_get_by_name, mock_get_vim,
- mock_auth_dict):
- self._insert_dummy_ns_template()
- self._insert_dummy_vim()
- mock_auth_dict.return_value = {
- 'auth_url': 'http://127.0.0.1',
- 'token': 'DummyToken',
- 'project_domain_name': 'dummy_domain',
- 'project_name': 'dummy_project'
- }
- with patch.object(TackerManager, 'get_service_plugins') as \
- mock_plugins:
- mock_plugins.return_value = {'VNFM': FakeVNFMPlugin()}
- mock_get_by_name.return_value = get_by_name()
-
- ns_obj = utils.get_dummy_ns_obj()
- ns_obj['ns']['description'] = ''
- result = self.nfvo_plugin.create_ns(self.context, ns_obj)
- self.assertIn('id', result)
- self.assertEqual(ns_obj['ns']['name'], result['name'])
- self.assertEqual('', result['description'])
-
- @mock.patch('tacker.nfvo.nfvo_plugin.NfvoPlugin.create_nsd')
- @mock.patch.object(nfvo_plugin.NfvoPlugin, 'get_auth_dict')
- @mock.patch.object(vim_client.VimClient, 'get_vim',
- return_value={"vim_type": "openstack"})
- @mock.patch.object(nfvo_plugin.NfvoPlugin, '_get_by_name')
- def test_create_ns_inline(self, mock_get_by_name,
- mock_get_vimi, mock_auth_dict, mock_create_nsd):
- self._insert_dummy_ns_template_inline()
- self._insert_dummy_vim()
- mock_auth_dict.return_value = {
- 'auth_url': 'http://127.0.0.1',
- 'token': 'DummyToken',
- 'project_domain_name': 'dummy_domain',
- 'project_name': 'dummy_project'
- }
- with patch.object(TackerManager, 'get_service_plugins') as \
- mock_plugins:
- mock_plugins.return_value = {'VNFM': FakeVNFMPlugin()}
- mock_get_by_name.return_value = get_by_name()
- mock_create_nsd.return_value = {'id':
- 'be18005d-5656-4d81-b499-6af4d4d8437f'}
-
- ns_obj = utils.get_dummy_ns_obj_inline()
- result = self.nfvo_plugin.create_ns(self.context, ns_obj)
- self.assertIsNotNone(result)
- self.assertIn('id', result)
- self.assertEqual(ns_obj['ns']['nsd_id'], result['nsd_id'])
- self.assertEqual(ns_obj['ns']['name'], result['name'])
- self.assertEqual('dummy_ns_inline', result['name'])
- self.assertIn('status', result)
- self.assertIn('tenant_id', result)
- mock_create_nsd.assert_called_once_with(mock.ANY, mock.ANY)
-
- @mock.patch.object(nfvo_plugin.NfvoPlugin, 'get_auth_dict')
- @mock.patch.object(vim_client.VimClient, 'get_vim')
- @mock.patch.object(nfvo_plugin.NfvoPlugin, '_get_by_name')
- def test_delete_ns(self, mock_get_by_name, mock_get_vim, mock_auth_dict):
- self._insert_dummy_vim()
- self._insert_dummy_ns_template()
- self._insert_dummy_ns()
- mock_auth_dict.return_value = {
- 'auth_url': 'http://127.0.0.1',
- 'token': 'DummyToken',
- 'project_domain_name': 'dummy_domain',
- 'project_name': 'dummy_project'
- }
-
- with patch.object(TackerManager, 'get_service_plugins') as \
- mock_plugins:
- mock_plugins.return_value = {'VNFM': FakeVNFMPlugin()}
- mock_get_by_name.return_value = get_by_name()
- result = self.nfvo_plugin.delete_ns(self.context,
- 'ba6bf017-f6f7-45f1-a280-57b073bf78ea')
- self.assertIsNotNone(result)
-
- @mock.patch.object(nfvo_plugin.NfvoPlugin, 'get_auth_dict')
- @mock.patch.object(vim_client.VimClient, 'get_vim')
- @mock.patch.object(nfvo_plugin.NfvoPlugin, '_get_by_name')
- def test_delete_ns_force(self, mock_get_by_name,
- mock_get_vim, mock_auth_dict):
- self._insert_dummy_vim()
- self._insert_dummy_ns_template()
- self._insert_dummy_ns(status='PENDING_DELETE')
- mock_auth_dict.return_value = {
- 'auth_url': 'http://127.0.0.1',
- 'token': 'DummyToken',
- 'project_domain_name': 'dummy_domain',
- 'project_name': 'dummy_project'
- }
- nsattr = {'ns': {'attributes': {'force': True}}}
-
- with patch.object(TackerManager, 'get_service_plugins') as \
- mock_plugins:
- mock_plugins.return_value = {'VNFM': FakeVNFMPlugin()}
- mock_get_by_name.return_value = get_by_name()
- result = self.nfvo_plugin.delete_ns(self.context,
- 'ba6bf017-f6f7-45f1-a280-57b073bf78ea', ns=nsattr)
- self.assertIsNotNone(result)
-
- @mock.patch.object(nfvo_plugin.NfvoPlugin, 'get_auth_dict')
- @mock.patch.object(vim_client.VimClient, 'get_vim')
- @mock.patch.object(nfvo_plugin.NfvoPlugin, '_get_by_name')
- def test_delete_ns_force_non_admin(self, mock_get_by_name,
- mock_get_vim, mock_auth_dict):
- self._insert_dummy_vim()
- self._insert_dummy_ns_template()
- self._insert_dummy_ns(status='PENDING_DELETE')
- mock_auth_dict.return_value = {
- 'auth_url': 'http://127.0.0.1',
- 'token': 'DummyToken',
- 'project_domain_name': 'dummy_domain',
- 'project_name': 'dummy_project'
- }
- nsattr = {'ns': {'attributes': {'force': True}}}
-
- with patch.object(TackerManager, 'get_service_plugins') as \
- mock_plugins:
- mock_plugins.return_value = {'VNFM': FakeVNFMPlugin()}
- mock_get_by_name.return_value = get_by_name()
- non_admin_context = context.Context(user_id=None,
- tenant_id=None,
- is_admin=False)
- self.assertRaises(exceptions.AdminRequired,
- self.nfvo_plugin.delete_ns,
- non_admin_context,
- 'ba6bf017-f6f7-45f1-a280-57b073bf78ea',
- ns=nsattr)
-
- @mock.patch('oslo_config.cfg.ConfigOpts.__getattr__')
- @mock.patch('barbicanclient.base.validate_ref_and_return_uuid')
- @mock.patch('cryptography.fernet.Fernet.decrypt')
- def test_build_vim_auth_barbican_external(
- self, mock_decrypt, mock_validate, mock_get_conf_key):
- mock_get_conf_key.side_effect = get_mock_conf_key_effect()
- self._mock_external_token_api()
- barbican_uuid = 'test_uuid'
- mock_validate.return_value = barbican_uuid
- vim_dict = {'id': 'aaaa', 'name': 'VIM0', 'type': 'test_vim',
- 'auth_cred': {'username': 'test',
- 'user_domain_name': 'test',
- 'cert_verify': 'True',
- 'project_id': 'test',
- 'project_name': 'test',
- 'project_domain_name': 'test',
- 'auth_url': 'http://test/identity/v3',
- 'key_type': 'barbican_key',
- 'secret_uuid': '***',
- 'password': '***'},
- 'auth_url': 'http://127.0.0.1/identity/v3',
- 'placement_attr': {'regions': ['TestRegionOne']},
- 'tenant_id': 'test'}
-
- def mock_barbican_resp(request, context):
- auth_value = 'Bearer %s' % self.access_token
- req_auth = request._request.headers.get('Authorization')
- self.assertEqual(auth_value, req_auth)
- response = {
- 'name': 'AES key',
- 'expiration': '2023-01-13T19:14:44.180394',
- 'algorithm': 'aes',
- 'bit_length': 256,
- 'mode': 'cbc',
- 'payload': 'YmVlcg==',
- 'payload_content_type': 'application/octet-stream',
- 'payload_content_encoding': 'base64'
- }
- context.status_code = 200
- return response
- self.requests_mock.get('http://demo/barbican/v1/secrets/%s' %
- barbican_uuid,
- json=mock_barbican_resp)
-
- def mock_barbican_payload_resp(request, context):
- auth_value = 'Bearer %s' % self.access_token
- req_auth = request._request.headers.get('Authorization')
- self.assertEqual(auth_value, req_auth)
- response = '5cJeztZKzISf1JAt73oBeTPPCrymn96A3wqG96F4RxU='
- context.status_code = 200
- return response
-
- def mock_get_barbican_resp(request, context):
- auth_value = 'Bearer %s' % self.access_token
- req_auth = request._request.headers.get('Authorization')
- self.assertEqual(auth_value, req_auth)
- context.status_code = 200
- response = {
- "versions": {
- "values": [
- {
- "id": "v1",
- "status": "stable",
- "links": [
- {
- "rel": "self",
- "href": "http://demo/barbican/v1/"
- },
- {
- "rel": "describedby",
- "type": "text/html",
- "href": "https://docs.openstack.org/"}
- ],
- "media-types": [
- {
- "base": "application/json",
- "type": "application/"
- "vnd.openstack.key-manager-v1+json"
- }
- ]
- }
- ]
- }
- }
- return response
- self.requests_mock.get('http://demo/barbican/v1/secrets/%s/payload' %
- barbican_uuid,
- json=mock_barbican_payload_resp)
- self.requests_mock.get('http://demo/barbican',
- json=mock_get_barbican_resp)
- mock_decrypt.return_value = 'test'.encode('utf-8')
-
- self.nfvo_plugin._build_vim_auth(vim_dict)
diff --git a/tacker/tests/unit/nfvo/workflows/__init__.py b/tacker/tests/unit/nfvo/workflows/__init__.py
deleted file mode 100644
index e69de29bb..000000000
diff --git a/tacker/tests/unit/nfvo/workflows/vim_monitor/__init__.py b/tacker/tests/unit/nfvo/workflows/vim_monitor/__init__.py
deleted file mode 100644
index e69de29bb..000000000
diff --git a/tacker/tests/unit/objects/test_vnf.py b/tacker/tests/unit/objects/test_vnf.py
deleted file mode 100644
index 916ec53ec..000000000
--- a/tacker/tests/unit/objects/test_vnf.py
+++ /dev/null
@@ -1,69 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from unittest import mock
-
-import datetime
-import iso8601
-from tacker import context
-from tacker.db.nfvo import nfvo_db
-from tacker import objects
-from tacker.tests.unit.db.base import SqlTestCase
-from tacker.tests.unit.objects import fakes
-from tacker.tests import uuidsentinel
-
-
-class TestVnf(SqlTestCase):
-
- def setUp(self):
- super(TestVnf, self).setUp()
- self.context = context.get_admin_context()
- self.vnfd = self._create_vnfd()
- self.vims = self._create_vims()
-
- def _create_vnfd(self):
- vnfd_obj = objects.Vnfd(context=self.context, **fakes.vnfd_data)
- vnfd_obj.create()
-
- return vnfd_obj
-
- def _create_vims(self):
- vim_obj = nfvo_db.Vim(**fakes.vim_data)
-
- return vim_obj
-
- def test_save(self):
- vnf_data = fakes.get_vnf(self.vnfd.id, self.vims.id)
- vnf_obj = objects.vnf.VNF(context=self.context, **vnf_data)
-
- vnf_obj.id = uuidsentinel.instance_id
- vnf_obj.status = 'ERROR'
- vnf_obj.updated_at = datetime.datetime(
- 1900, 1, 1, 1, 1, 1, tzinfo=iso8601.UTC)
- vnf_obj.save()
-
- self.assertEqual('ERROR', vnf_obj.status)
-
- @mock.patch('tacker.objects.vnf._vnf_get')
- def test_vnf_index_list(self, mock_vnf_get):
- vnf_data = fakes.get_vnf(self.vnfd.id, self.vims.id)
- vnf_obj = objects.vnf.VNF(context=self.context, **vnf_data)
-
- vnf_obj.id = uuidsentinel.instance_id
- vnf_obj.updated_at = datetime.datetime(
- 1900, 1, 1, 1, 1, 1, tzinfo=iso8601.UTC)
- vnf_obj.save()
-
- mock_vnf_get.return_value = vnf_obj
- vnf_data_result = vnf_obj.vnf_index_list(vnf_obj.id, self.context)
- self.assertEqual('ACTIVE', vnf_data_result.status)
diff --git a/tacker/tests/unit/plugins/__init__.py b/tacker/tests/unit/plugins/__init__.py
deleted file mode 100644
index e69de29bb..000000000
diff --git a/tacker/tests/unit/services/__init__.py b/tacker/tests/unit/services/__init__.py
deleted file mode 100644
index e69de29bb..000000000
diff --git a/tacker/tests/unit/test_alarm_receiver.py b/tacker/tests/unit/test_alarm_receiver.py
deleted file mode 100644
index a041576cd..000000000
--- a/tacker/tests/unit/test_alarm_receiver.py
+++ /dev/null
@@ -1,98 +0,0 @@
-# Copyright 2015 Brocade Communications System, Inc.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from unittest import mock
-
-from oslo_serialization import jsonutils
-from webob import Request
-
-from tacker.alarm_receiver import AlarmReceiver
-from tacker.tests.unit import base
-
-
-class TestAlarmReceiver(base.TestCase):
- def setUp(self):
- '''url:
-
- http://tacker:9890/v1.0/vnfs/vnf-uuid/mon-policy-name/
- action-name/8ef785
- '''
- super(TestAlarmReceiver, self).setUp()
- self.alarmrc = AlarmReceiver(None)
- self.alarm_url = {
- '00_base': 'http://tacker:9890/v1.0',
- '01_url_base': '/vnfs/vnf-uuid/',
- '02_vnf_id': 'vnf-uuid',
- '03_monitoring_policy_name': 'mon-policy-name',
- '04_action_name': 'action-name',
- '05_key': '8ef785'
- }
- self.vnf_id = 'vnf-uuid'
- self.ordered_url = self._generate_alarm_url()
-
- def _generate_alarm_url(self):
- return 'http://tacker:9890/v1.0/vnfs/vnf-uuid/mon-policy-name/'\
- 'action-name/8ef785'
-
- def test_handle_url(self):
- prefix_url, p, params = self.alarmrc.handle_url(self.ordered_url)
- self.assertEqual(self.alarm_url['01_url_base'], prefix_url)
- self.assertEqual(self.alarm_url['02_vnf_id'], p[3])
- self.assertEqual(self.alarm_url['03_monitoring_policy_name'], p[4])
- self.assertEqual(self.alarm_url['04_action_name'], p[5])
- self.assertEqual(self.alarm_url['05_key'], p[6])
-
- def test_handle_url_action_name(self):
- new_url = 'http://tacker:9890/v1.0/vnfs/vnf-uuid/mon-policy-name/'\
- 'respawn%25log/8ef785'
- prefix_url, p, params = self.alarmrc.handle_url(new_url)
- self.assertEqual(self.alarm_url['01_url_base'], prefix_url)
- self.assertEqual(self.alarm_url['02_vnf_id'], p[3])
- self.assertEqual(self.alarm_url['03_monitoring_policy_name'], p[4])
- self.assertEqual('respawn%log', p[5])
- self.assertEqual(self.alarm_url['05_key'], p[6])
-
- @mock.patch('tacker.vnfm.monitor_drivers.token.Token.create_token')
- def test_process_request(self, mock_create_token):
- mock_create_token.return_value = 'fake_token'
- req = Request.blank(self.ordered_url)
- req.method = 'POST'
- self.alarmrc.process_request(req)
-
- self.assertEqual(bytes('', 'utf-8'), req.body)
- self.assertEqual('fake_token', req.headers['X_AUTH_TOKEN'])
- self.assertIn(self.alarm_url['01_url_base'], req.environ['PATH_INFO'])
- self.assertIn('triggers', req.environ['PATH_INFO'])
- self.assertEqual('', req.environ['QUERY_STRING'])
- mock_create_token.assert_called_once_with()
-
- @mock.patch('tacker.vnfm.monitor_drivers.token.Token.create_token')
- def test_process_request_with_body(self, mock_create_token):
- req = Request.blank(self.ordered_url)
- req.method = 'POST'
- old_body = {'fake_key': 'fake_value'}
- req.body = jsonutils.dump_as_bytes(old_body)
-
- self.alarmrc.process_request(req)
-
- body_dict = jsonutils.loads(req.body)
- self.assertDictEqual(old_body,
- body_dict['trigger']['params']['data'])
- self.assertEqual(self.alarm_url['05_key'],
- body_dict['trigger']['params']['credential'])
- self.assertEqual(self.alarm_url['03_monitoring_policy_name'],
- body_dict['trigger']['policy_name'])
- self.assertEqual(self.alarm_url['04_action_name'],
- body_dict['trigger']['action_name'])
diff --git a/tacker/tests/unit/tosca/__init__.py b/tacker/tests/unit/tosca/__init__.py
deleted file mode 100644
index e69de29bb..000000000
diff --git a/tacker/tests/unit/tosca/test_tosca_templates_under_samples.py b/tacker/tests/unit/tosca/test_tosca_templates_under_samples.py
deleted file mode 100644
index 01b503ecc..000000000
--- a/tacker/tests/unit/tosca/test_tosca_templates_under_samples.py
+++ /dev/null
@@ -1,90 +0,0 @@
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import testtools
-
-from toscaparser import tosca_template
-from toscaparser.utils import yamlparser
-from translator.hot import tosca_translator
-
-from tacker.tests import utils
-from tacker.tosca import utils as toscautils
-
-
-class TestSamples(testtools.TestCase):
- """Sample tosca validation.
-
- Helps to validate the tosca templates provided in the samples folder
- to make sure whether its valid YAML, valid TOSCA and
- possible to translate into HOT template.
- """
-
- def _get_list_of_sample(self, tosca_files):
- if tosca_files:
- base_path = utils.test_sample('../tosca-templates/vnfd/')
- if isinstance(tosca_files, list):
- list_of_samples = []
- for tosca_file in tosca_files:
- sample = base_path + tosca_file
- list_of_samples.append(sample)
- return list_of_samples
-
- def _test_samples(self, files):
- if files:
- for f in self._get_list_of_sample(files):
- with open(f, 'r') as _f:
- yaml_dict = None
- try:
- yaml_dict = yamlparser.simple_ordered_parse(_f.read())
- except: # noqa
- pass
- self.assertIsNotNone(
- yaml_dict,
- "Yaml parser failed to parse %s" % f)
-
- toscautils.updateimports(yaml_dict)
-
- tosca = None
- try:
- tosca = tosca_template.ToscaTemplate(
- a_file=False, yaml_dict_tpl=yaml_dict,
- local_defs=toscautils.tosca_tmpl_local_defs())
- except: # noqa
- pass
-
- self.assertIsNotNone(
- tosca,
- "Tosca parser failed to parse %s" % f)
- toscautils.post_process_template(tosca)
- hot = None
- try:
- hot = tosca_translator.TOSCATranslator(tosca,
- {}).translate()
- except: # noqa
- pass
-
- self.assertIsNotNone(
- hot,
- "Heat-translator failed to translate %s" % f)
-
- def test_scale_sample(self, tosca_file=['tosca-vnfd-scale.yaml']):
- self._test_samples(tosca_file)
-
- def test_alarm_sample(self, tosca_file=['tosca-vnfd-alarm-scale.yaml']):
- self._test_samples(tosca_file)
-
- def test_list_samples(self,
- files=['tosca-vnfd-scale.yaml',
- 'tosca-vnfd-alarm-scale.yaml']):
- self._test_samples(files)
diff --git a/tacker/tests/unit/vnflcm/fakes.py b/tacker/tests/unit/vnflcm/fakes.py
index 9b2e90d40..426e4935e 100644
--- a/tacker/tests/unit/vnflcm/fakes.py
+++ b/tacker/tests/unit/vnflcm/fakes.py
@@ -717,19 +717,6 @@ def get_vnfd_dict(image_path=None):
return vnfd_dict
-def get_dummy_vnf_instance():
- connection_info = get_dummy_vim_connection_info()
- return {'created_at': '', 'deleted': False, 'deleted_at': None,
- 'id': 'fake_id', 'instantiated_vnf_info': None,
- 'instantiation_state': 'NOT_INSTANTIATED',
- 'tenant_id': 'fake_tenant_id', 'updated_at': '',
- 'vim_connection_info': [connection_info],
- 'vnf_instance_description': 'VNF Description',
- 'vnf_instance_name': 'test', 'vnf_product_name': 'Sample VNF',
- 'vnf_provider': 'Company', 'vnf_software_version': '1.0',
- 'vnfd_id': 'fake_vnfd_id', 'vnfd_version': '1.0'}
-
-
def get_dummy_vim_connection_info():
return {'access_info': {
'auth_url': 'fake/url',
diff --git a/tacker/tests/unit/vnflcm/test_controller.py b/tacker/tests/unit/vnflcm/test_controller.py
index 1d4e07e87..1944fa471 100644
--- a/tacker/tests/unit/vnflcm/test_controller.py
+++ b/tacker/tests/unit/vnflcm/test_controller.py
@@ -48,7 +48,6 @@ from tacker.tests import constants
from tacker.tests.unit import base
from tacker.tests.unit.db import utils
from tacker.tests.unit import fake_request
-import tacker.tests.unit.nfvo.test_nfvo_plugin as test_nfvo_plugin
from tacker.tests.unit.vnflcm import fakes
from tacker.tests import uuidsentinel
import tacker.vnfm.nfvo_client as nfvo_client
@@ -72,83 +71,26 @@ class FakeVNFMPlugin(mock.Mock):
super(FakeVNFMPlugin, self).__init__()
self.vnf1_vnfd_id = 'eb094833-995e-49f0-a047-dfb56aaf7c4e'
self.vnf1_vnf_id = '91e32c20-6d1f-47a4-9ba7-08f5e5effe07'
- self.vnf1_update_vnf_id = '91e32c20-6d1f-47a4-9ba7-08f5e5effaf6'
- self.vnf2_vnfd_id = 'e4015e9f-1ef2-49fb-adb6-070791ad3c45'
self.vnf3_vnfd_id = 'e4015e9f-1ef2-49fb-adb6-070791ad3c45'
self.vnf3_vnf_id = '7168062e-9fa1-4203-8cb7-f5c99ff3ee1b'
- self.vnf3_update_vnf_id = '10f66bc5-b2f1-45b7-a7cd-6dd6ad0017f5'
self.vnf_for_cnf_vnfd_id = 'e889e4fe-52fe-437d-b1e1-a690dc95e3f8'
self.vnf_for_cnf_vnf_id = '436aaa6e-2db6-4d6e-a3fc-e728b2f0ac56'
- self.cp11_id = 'd18c8bae-898a-4932-bff8-d5eac981a9c9'
- self.cp11_update_id = 'a18c8bae-898a-4932-bff8-d5eac981a9b8'
- self.cp12_id = 'c8906342-3e30-4b2a-9401-a251a7a9b5dd'
- self.cp12_update_id = 'b8906342-3e30-4b2a-9401-a251a7a9b5cc'
- self.cp32_id = '3d1bd2a2-bf0e-44d1-87af-a2c6b2cad3ed'
- self.cp32_update_id = '064c0d99-5a61-4711-9597-2a44dc5da14b'
-
def get_vnfd(self, *args, **kwargs):
- if 'VNF1' in args:
- return {'id': self.vnf1_vnfd_id,
- 'name': 'VNF1',
- 'attributes': {'vnfd': _get_template(
- 'test-nsd-vnfd1.yaml')}}
- elif 'VNF2' in args:
- return {'id': self.vnf3_vnfd_id,
- 'name': 'VNF2',
- 'attributes': {'vnfd': _get_template(
- 'test-nsd-vnfd2.yaml')}}
-
- def get_vnfds(self, *args, **kwargs):
- if {'name': ['VNF1']} in args:
- return [{'id': self.vnf1_vnfd_id}]
- elif {'name': ['VNF3']} in args:
- return [{'id': self.vnf3_vnfd_id}]
- else:
- return []
-
- def get_vnfs(self, *args, **kwargs):
- if {'vnfd_id': [self.vnf1_vnfd_id]} in args:
- return [{'id': self.vnf1_vnf_id}]
- elif {'vnfd_id': [self.vnf3_vnfd_id]} in args:
- return [{'id': self.vnf3_vnf_id}]
- else:
- return None
+ return {'id': self.vnf1_vnfd_id,
+ 'name': 'dummy VNF',
+ 'attributes': {'vnfd': "dummy"}}
def get_vnf(self, *args, **kwargs):
if self.vnf1_vnf_id in args:
return self.get_dummy_vnf_error()
elif self.vnf3_vnf_id in args:
- return self.get_dummy_vnf_not_error()
+ return self.get_dummy_vnf_not_found_error()
elif self.vnf_for_cnf_vnf_id in args:
return fakes.vnf_dict_cnf()
else:
return self.get_dummy_vnf_active()
- def get_vnf_resources(self, *args, **kwargs):
- if self.vnf1_vnf_id in args:
- return self.get_dummy_vnf1_details()
- elif self.vnf1_update_vnf_id in args:
- return self.get_dummy_vnf1_update_details()
- elif self.vnf3_vnf_id in args:
- return self.get_dummy_vnf3_details()
- elif self.vnf3_update_vnf_id in args:
- return self.get_dummy_vnf3_update_details()
-
- def get_dummy_vnf1_details(self):
- return [{'name': 'CP11', 'id': self.cp11_id},
- {'name': 'CP12', 'id': self.cp12_id}]
-
- def get_dummy_vnf1_update_details(self):
- return [{'name': 'CP11', 'id': self.cp11_update_id},
- {'name': 'CP12', 'id': self.cp12_update_id}]
-
- def get_dummy_vnf3_details(self):
- return [{'name': 'CP32', 'id': self.cp32_id}]
-
- def get_dummy_vnf3_update_details(self):
- return [{'name': 'CP32', 'id': self.cp32_update_id}]
-
def get_dummy_vnf_active(self):
return {'tenant_id': uuidsentinel.tenant_id,
'name': "fake_name",
@@ -179,10 +121,13 @@ class FakeVNFMPlugin(mock.Mock):
"scale_group": '{"scaleGroupDict":' +
'{"SP1": {"maxLevel" : 3}}}'}}
- def get_dummy_vnf_not_error(self):
+ def get_dummy_vnf_not_found_error(self):
msg = _('VNF %(vnf_id)s could not be found')
raise vnfm.VNFNotFound(explanation=msg)
+ def _update_vnf_scaling(self, *args, **kwargs):
+ pass
+
@ddt.ddt
class TestController(base.TestCase):
@@ -193,7 +138,7 @@ class TestController(base.TestCase):
super(TestController, self).setUp()
self.patcher = mock.patch(
'tacker.manager.TackerManager.get_service_plugins',
- return_value={'VNFM': test_nfvo_plugin.FakeVNFMPlugin()})
+ return_value={'VNFM': FakeVNFMPlugin()})
self.mock_manager = self.patcher.start()
self.controller = controller.VnfLcmController()
self.vim_info = {
@@ -206,12 +151,9 @@ class TestController(base.TestCase):
}
self.context = context.get_admin_context()
- with mock.patch.object(tacker.db.vnfm.vnfm_db.VNFMPluginDb, 'get_vnfs',
- return_value=[]):
- with mock.patch.object(TackerManager, 'get_service_plugins',
- return_value={'VNFM':
- test_nfvo_plugin.FakeVNFMPlugin()}):
- self.controller = controller.VnfLcmController()
+ with mock.patch.object(TackerManager, 'get_service_plugins',
+ return_value={'VNFM': FakeVNFMPlugin()}):
+ self.controller = controller.VnfLcmController()
def tearDown(self):
self.mock_manager.stop()
@@ -222,7 +164,7 @@ class TestController(base.TestCase):
return fakes.wsgi_app_v1()
def _get_dummy_vnf(self, vnf_id=None, status=None):
- vnf_dict = utils.get_dummy_vnf()
+ vnf_dict = utils.get_dummy_vnf_etsi()
if status:
vnf_dict['status'] = status
@@ -261,8 +203,7 @@ class TestController(base.TestCase):
'VnfLcmController._create_vnf')
@mock.patch.object(objects.vnf_package.VnfPackage, 'save')
@mock.patch.object(TackerManager, 'get_service_plugins',
- return_value={'VNFM':
- test_nfvo_plugin.FakeVNFMPlugin()})
+ return_value={'VNFM': FakeVNFMPlugin()})
@mock.patch.object(objects.vnf_instance, '_vnf_instance_update')
@mock.patch.object(objects.vnf_instance, '_vnf_instance_create')
@mock.patch.object(objects.vnf_package_vnfd.VnfPackageVnfd, 'get_by_id')
@@ -316,8 +257,7 @@ class TestController(base.TestCase):
self.assertEqual(location_header, resp.headers['location'])
@mock.patch.object(TackerManager, 'get_service_plugins',
- return_value={'VNFM':
- test_nfvo_plugin.FakeVNFMPlugin()})
+ return_value={'VNFM': FakeVNFMPlugin()})
@ddt.data(
{'attribute': 'vnfdId', 'value': True,
'expected_type': 'uuid'},
@@ -370,7 +310,7 @@ class TestController(base.TestCase):
@mock.patch.object(sync_resource.SyncVnfPackage, 'create_package')
@mock.patch.object(nfvo_client.VnfPackageRequest, "index")
@mock.patch.object(TackerManager, 'get_service_plugins',
- return_value={'VNFM': test_nfvo_plugin.FakeVNFMPlugin()})
+ return_value={'VNFM': FakeVNFMPlugin()})
@mock.patch.object(objects.vnf_package_vnfd.VnfPackageVnfd, 'get_by_id')
def test_create_non_existing_vnf_package_vnfd(self, mock_vnf_by_id,
mock_get_service_plugins,
@@ -399,7 +339,7 @@ class TestController(base.TestCase):
@mock.patch('tacker.api.vnflcm.v1.controller.'
'VnfLcmController._create_vnf')
@mock.patch.object(TackerManager, 'get_service_plugins',
- return_value={'VNFM': test_nfvo_plugin.FakeVNFMPlugin()})
+ return_value={'VNFM': FakeVNFMPlugin()})
@mock.patch.object(sync_resource.SyncVnfPackage, 'create_package')
@mock.patch.object(nfvo_client.VnfPackageRequest, "index")
@mock.patch.object(objects.vnf_instance, '_vnf_instance_update')
@@ -445,7 +385,7 @@ class TestController(base.TestCase):
self.assertEqual(http_client.CREATED, resp.status_code)
@mock.patch.object(TackerManager, 'get_service_plugins',
- return_value={'VNFM': test_nfvo_plugin.FakeVNFMPlugin()})
+ return_value={'VNFM': FakeVNFMPlugin()})
@mock.patch.object(sync_resource.SyncVnfPackage, 'create_package')
@mock.patch.object(nfvo_client.VnfPackageRequest, "index")
@mock.patch.object(objects.vnf_instance, '_vnf_instance_create')
@@ -481,7 +421,7 @@ class TestController(base.TestCase):
self.assertEqual(http_client.INTERNAL_SERVER_ERROR, resp.status_code)
@mock.patch.object(TackerManager, 'get_service_plugins',
- return_value={'VNFM': test_nfvo_plugin.FakeVNFMPlugin()})
+ return_value={'VNFM': FakeVNFMPlugin()})
@mock.patch.object(nfvo_client.VnfPackageRequest, "index")
@mock.patch.object(objects.vnf_instance, '_vnf_instance_create')
@mock.patch.object(objects.vnf_package_vnfd.VnfPackageVnfd, 'get_by_id')
@@ -515,7 +455,7 @@ class TestController(base.TestCase):
self.assertEqual(http_client.NOT_FOUND, resp.status_code)
@mock.patch.object(TackerManager, 'get_service_plugins',
- return_value={'VNFM': test_nfvo_plugin.FakeVNFMPlugin()})
+ return_value={'VNFM': FakeVNFMPlugin()})
def test_create_without_vnfd_id(self, mock_get_service_plugins):
body = {"vnfInstanceName": "SampleVnfInstance",
"metadata": {"key": "value"}}
@@ -529,8 +469,7 @@ class TestController(base.TestCase):
self.assertEqual(http_client.BAD_REQUEST, resp.status_code)
@mock.patch.object(TackerManager, 'get_service_plugins',
- return_value={'VNFM':
- test_nfvo_plugin.FakeVNFMPlugin()})
+ return_value={'VNFM': FakeVNFMPlugin()})
@ddt.data('PATCH', 'PUT', 'HEAD', 'DELETE')
def test_create_not_allowed_http_method(self, method,
mock_get_service_plugins):
@@ -545,7 +484,7 @@ class TestController(base.TestCase):
self.assertEqual(http_client.METHOD_NOT_ALLOWED, resp.status_code)
@mock.patch.object(TackerManager, 'get_service_plugins',
- return_value={'VNFM': test_nfvo_plugin.FakeVNFMPlugin()})
+ return_value={'VNFM': FakeVNFMPlugin()})
@ddt.data({'name': "A" * 256,
'description': "VNF Description",
'meta': {"key": "value"}},
@@ -576,8 +515,7 @@ class TestController(base.TestCase):
self.assertEqual(http_client.BAD_REQUEST, resp.status_code)
@mock.patch.object(TackerManager, 'get_service_plugins',
- return_value={'VNFM':
- test_nfvo_plugin.FakeVNFMPlugin()})
+ return_value={'VNFM': FakeVNFMPlugin()})
@mock.patch('tacker.api.vnflcm.v1.controller.'
'VnfLcmController._notification_process')
@mock.patch('tacker.api.vnflcm.v1.controller.'
@@ -633,8 +571,7 @@ class TestController(base.TestCase):
mock_instantiate.assert_called_once()
@mock.patch.object(TackerManager, 'get_service_plugins',
- return_value={'VNFM':
- test_nfvo_plugin.FakeVNFMPlugin()})
+ return_value={'VNFM': FakeVNFMPlugin()})
@mock.patch('tacker.api.vnflcm.v1.controller.'
'VnfLcmController._get_vnf')
@mock.patch.object(objects.vnf_instance, "_vnf_instance_get_by_id")
@@ -676,8 +613,7 @@ class TestController(base.TestCase):
mock_get_vnf.assert_called_once()
@mock.patch.object(TackerManager, 'get_service_plugins',
- return_value={'VNFM':
- test_nfvo_plugin.FakeVNFMPlugin()})
+ return_value={'VNFM': FakeVNFMPlugin()})
@mock.patch('tacker.api.vnflcm.v1.controller.'
'VnfLcmController._notification_process')
@mock.patch('tacker.api.vnflcm.v1.controller.'
@@ -730,8 +666,7 @@ class TestController(base.TestCase):
mock_insta_notif_process.assert_called_once()
@mock.patch.object(TackerManager, 'get_service_plugins',
- return_value={'VNFM':
- test_nfvo_plugin.FakeVNFMPlugin()})
+ return_value={'VNFM': FakeVNFMPlugin()})
@mock.patch('tacker.api.vnflcm.v1.controller.'
'VnfLcmController._get_vnf')
@mock.patch.object(vim_client.VimClient, "get_vim")
@@ -781,8 +716,7 @@ class TestController(base.TestCase):
mock_get_vnf.assert_called_once()
@mock.patch.object(TackerManager, 'get_service_plugins',
- return_value={'VNFM':
- test_nfvo_plugin.FakeVNFMPlugin()})
+ return_value={'VNFM': FakeVNFMPlugin()})
@mock.patch('tacker.api.vnflcm.v1.controller.'
'VnfLcmController._get_vnf')
@mock.patch.object(objects.vnf_instance, "_vnf_instance_get_by_id")
@@ -826,8 +760,7 @@ class TestController(base.TestCase):
mock_get_vnf.assert_called_once()
@mock.patch.object(TackerManager, 'get_service_plugins',
- return_value={'VNFM':
- test_nfvo_plugin.FakeVNFMPlugin()})
+ return_value={'VNFM': FakeVNFMPlugin()})
@mock.patch('tacker.api.vnflcm.v1.controller.VnfLcmController.'
'_notification_process')
@mock.patch('tacker.api.vnflcm.v1.controller.'
@@ -885,8 +818,7 @@ class TestController(base.TestCase):
mock_insta_notif_process.assert_called_once()
@mock.patch.object(TackerManager, 'get_service_plugins',
- return_value={'VNFM':
- test_nfvo_plugin.FakeVNFMPlugin()})
+ return_value={'VNFM': FakeVNFMPlugin()})
@mock.patch('tacker.api.vnflcm.v1.controller.'
'VnfLcmController._get_vnf')
@mock.patch.object(vim_client.VimClient, "get_vim")
@@ -935,8 +867,7 @@ class TestController(base.TestCase):
mock_get_vnf.assert_called_once()
@mock.patch.object(TackerManager, 'get_service_plugins',
- return_value={'VNFM':
- test_nfvo_plugin.FakeVNFMPlugin()})
+ return_value={'VNFM': FakeVNFMPlugin()})
@mock.patch('tacker.api.vnflcm.v1.controller.'
'VnfLcmController._get_vnf')
@mock.patch.object(vim_client.VimClient, "get_vim")
@@ -986,8 +917,7 @@ class TestController(base.TestCase):
mock_get_vnf.assert_called_once()
@mock.patch.object(TackerManager, 'get_service_plugins',
- return_value={'VNFM':
- test_nfvo_plugin.FakeVNFMPlugin()})
+ return_value={'VNFM': FakeVNFMPlugin()})
@mock.patch('tacker.api.vnflcm.v1.controller.'
'VnfLcmController._get_vnf')
@mock.patch.object(vim_client.VimClient, "get_vim")
@@ -1030,8 +960,7 @@ class TestController(base.TestCase):
mock_get_vnf.assert_called_once()
@mock.patch.object(TackerManager, 'get_service_plugins',
- return_value={'VNFM':
- test_nfvo_plugin.FakeVNFMPlugin()})
+ return_value={'VNFM': FakeVNFMPlugin()})
@mock.patch('tacker.api.vnflcm.v1.controller.'
'VnfLcmController._get_vnf')
@mock.patch.object(objects.vnf_instance, "_vnf_instance_get_by_id")
@@ -1055,8 +984,7 @@ class TestController(base.TestCase):
self.assertEqual(http_client.CONFLICT, resp.status_code)
@mock.patch.object(TackerManager, 'get_service_plugins',
- return_value={'VNFM':
- test_nfvo_plugin.FakeVNFMPlugin()})
+ return_value={'VNFM': FakeVNFMPlugin()})
@mock.patch('tacker.api.vnflcm.v1.controller.'
'VnfLcmController._get_vnf')
@mock.patch.object(objects.vnf_instance, "_vnf_instance_get_by_id")
@@ -1087,8 +1015,7 @@ class TestController(base.TestCase):
resp.json['detail'])
@mock.patch.object(TackerManager, 'get_service_plugins',
- return_value={'VNFM':
- test_nfvo_plugin.FakeVNFMPlugin()})
+ return_value={'VNFM': FakeVNFMPlugin()})
@ddt.data({'attribute': 'flavourId', 'value': 123,
'expected_type': 'string'},
{'attribute': 'flavourId', 'value': True,
@@ -1128,8 +1055,7 @@ class TestController(base.TestCase):
self.assertEqual(expected_message, exception.msg)
@mock.patch.object(TackerManager, 'get_service_plugins',
- return_value={'VNFM':
- test_nfvo_plugin.FakeVNFMPlugin()})
+ return_value={'VNFM': FakeVNFMPlugin()})
def test_instantiate_without_flavour_id(self,
mock_get_service_plugins):
req = fake_request.HTTPRequest.blank(
@@ -1147,8 +1073,7 @@ class TestController(base.TestCase):
resp.json['detail'])
@mock.patch.object(TackerManager, 'get_service_plugins',
- return_value={'VNFM':
- test_nfvo_plugin.FakeVNFMPlugin()})
+ return_value={'VNFM': FakeVNFMPlugin()})
def test_instantiate_invalid_request_parameter(self,
mock_get_service_plugins):
body = {"flavourId": "simple"}
@@ -1169,8 +1094,7 @@ class TestController(base.TestCase):
self.assertEqual(http_client.INTERNAL_SERVER_ERROR, resp.status_code)
@mock.patch.object(TackerManager, 'get_service_plugins',
- return_value={'VNFM':
- test_nfvo_plugin.FakeVNFMPlugin()})
+ return_value={'VNFM': FakeVNFMPlugin()})
def test_instantiate_with_invalid_uuid(self,
mock_get_service_plugins):
req = fake_request.HTTPRequest.blank(
@@ -1190,8 +1114,7 @@ class TestController(base.TestCase):
resp.json['detail'])
@mock.patch.object(TackerManager, 'get_service_plugins',
- return_value={'VNFM':
- test_nfvo_plugin.FakeVNFMPlugin()})
+ return_value={'VNFM': FakeVNFMPlugin()})
@mock.patch('tacker.api.vnflcm.v1.controller.'
'VnfLcmController._get_vnf')
@mock.patch.object(objects.VnfInstance, "get_by_id")
@@ -1217,8 +1140,7 @@ class TestController(base.TestCase):
mock_get_vnf.assert_called_once()
@mock.patch.object(TackerManager, 'get_service_plugins',
- return_value={'VNFM':
- test_nfvo_plugin.FakeVNFMPlugin()})
+ return_value={'VNFM': FakeVNFMPlugin()})
@ddt.data('HEAD', 'PUT', 'DELETE', 'PATCH', 'GET')
def test_instantiate_invalid_http_method(self, method,
mock_get_service_plugins):
@@ -1234,7 +1156,7 @@ class TestController(base.TestCase):
self.assertEqual(http_client.METHOD_NOT_ALLOWED, resp.status_code)
@mock.patch.object(TackerManager, 'get_service_plugins',
- return_value={'VNFM': test_nfvo_plugin.FakeVNFMPlugin()})
+ return_value={'VNFM': FakeVNFMPlugin()})
@mock.patch.object(objects.vnf_instance, "_vnf_instance_get_by_id")
def test_show_vnf_not_instantiated(self, mock_vnf_by_id,
mock_get_service_plugins):
@@ -1247,8 +1169,7 @@ class TestController(base.TestCase):
self.assertEqual(expected_result, res_dict)
@mock.patch.object(TackerManager, 'get_service_plugins',
- return_value={'VNFM':
- test_nfvo_plugin.FakeVNFMPlugin()})
+ return_value={'VNFM': FakeVNFMPlugin()})
@mock.patch.object(objects.VnfInstance, "get_by_id")
def test_show_vnf_instantiated(self, mock_vnf_by_id,
mock_get_service_plugins):
@@ -1263,8 +1184,7 @@ class TestController(base.TestCase):
self.assertEqual(expected_result, res_dict)
@mock.patch.object(TackerManager, 'get_service_plugins',
- return_value={'VNFM':
- test_nfvo_plugin.FakeVNFMPlugin()})
+ return_value={'VNFM': FakeVNFMPlugin()})
@mock.patch.object(objects.VnfInstance, "get_by_id")
def test_show_vnf_instantiated_with_vim_info(
self, mock_vnf_by_id, mock_get_service_plugins):
@@ -1306,8 +1226,7 @@ class TestController(base.TestCase):
self.assertEqual(expected_result, res_dict)
@mock.patch.object(TackerManager, 'get_service_plugins',
- return_value={'VNFM':
- test_nfvo_plugin.FakeVNFMPlugin()})
+ return_value={'VNFM': FakeVNFMPlugin()})
@mock.patch.object(objects.vnf_instance, "_vnf_instance_get_by_id")
def test_show_with_non_existing_vnf_instance(self, mock_vnf_by_id,
mock_get_service_plugins):
@@ -1324,8 +1243,7 @@ class TestController(base.TestCase):
resp.json['detail'])
@mock.patch.object(TackerManager, 'get_service_plugins',
- return_value={'VNFM':
- test_nfvo_plugin.FakeVNFMPlugin()})
+ return_value={'VNFM': FakeVNFMPlugin()})
def test_show_with_invalid_uuid(self,
mock_get_service_plugins):
req = fake_request.HTTPRequest.blank(
@@ -1339,8 +1257,7 @@ class TestController(base.TestCase):
resp.json['detail'])
@mock.patch.object(TackerManager, 'get_service_plugins',
- return_value={'VNFM':
- test_nfvo_plugin.FakeVNFMPlugin()})
+ return_value={'VNFM': FakeVNFMPlugin()})
@ddt.data('HEAD', 'PUT', 'POST')
def test_show_invalid_http_method(self, http_method,
mock_get_service_plugins):
@@ -1353,8 +1270,7 @@ class TestController(base.TestCase):
self.assertEqual(http_client.METHOD_NOT_ALLOWED, resp.status_code)
@mock.patch.object(TackerManager, 'get_service_plugins',
- return_value={'VNFM':
- test_nfvo_plugin.FakeVNFMPlugin()})
+ return_value={'VNFM': FakeVNFMPlugin()})
@mock.patch('tacker.api.vnflcm.v1.controller.'
'VnfLcmController._notification_process')
@mock.patch('tacker.api.vnflcm.v1.controller.'
@@ -1393,8 +1309,7 @@ class TestController(base.TestCase):
mock_notification_process.assert_called_once()
@mock.patch.object(TackerManager, 'get_service_plugins',
- return_value={'VNFM':
- test_nfvo_plugin.FakeVNFMPlugin()})
+ return_value={'VNFM': FakeVNFMPlugin()})
@ddt.data(
{'attribute': 'terminationType', 'value': "TEST",
'expected_type': 'enum'},
@@ -1430,8 +1345,7 @@ class TestController(base.TestCase):
self.assertIn(expected_message, exception.msg)
@mock.patch.object(TackerManager, 'get_service_plugins',
- return_value={'VNFM':
- test_nfvo_plugin.FakeVNFMPlugin()})
+ return_value={'VNFM': FakeVNFMPlugin()})
def test_terminate_missing_termination_type(self,
mock_get_service_plugins):
body = {'gracefulTerminationTimeout': 10}
@@ -1450,8 +1364,7 @@ class TestController(base.TestCase):
resp.json['detail'])
@mock.patch.object(TackerManager, 'get_service_plugins',
- return_value={'VNFM':
- test_nfvo_plugin.FakeVNFMPlugin()})
+ return_value={'VNFM': FakeVNFMPlugin()})
@ddt.data('GET', 'HEAD', 'PUT', 'DELETE', 'PATCH')
def test_terminate_invalid_http_method(self, method,
mock_get_service_plugins):
@@ -1468,8 +1381,7 @@ class TestController(base.TestCase):
self.assertEqual(http_client.METHOD_NOT_ALLOWED, resp.status_code)
@mock.patch.object(TackerManager, 'get_service_plugins',
- return_value={'VNFM':
- test_nfvo_plugin.FakeVNFMPlugin()})
+ return_value={'VNFM': FakeVNFMPlugin()})
@mock.patch('tacker.api.vnflcm.v1.controller.'
'VnfLcmController._get_vnf')
@mock.patch.object(objects.vnf_instance, "_vnf_instance_get_by_id")
@@ -1494,8 +1406,7 @@ class TestController(base.TestCase):
mock_get_vnf.assert_called_once()
@mock.patch.object(TackerManager, 'get_service_plugins',
- return_value={'VNFM':
- test_nfvo_plugin.FakeVNFMPlugin()})
+ return_value={'VNFM': FakeVNFMPlugin()})
@mock.patch('tacker.api.vnflcm.v1.controller.'
'VnfLcmController._get_vnf')
@mock.patch.object(objects.VnfInstance, "get_by_id")
@@ -1528,8 +1439,7 @@ class TestController(base.TestCase):
mock_get_vnf.assert_called_once()
@mock.patch.object(TackerManager, 'get_service_plugins',
- return_value={'VNFM':
- test_nfvo_plugin.FakeVNFMPlugin()})
+ return_value={'VNFM': FakeVNFMPlugin()})
@mock.patch('tacker.api.vnflcm.v1.controller.'
'VnfLcmController._notification_process')
@mock.patch('tacker.api.vnflcm.v1.controller.'
@@ -1564,8 +1474,7 @@ class TestController(base.TestCase):
mock_rpc_heal.assert_called_once()
@mock.patch.object(TackerManager, 'get_service_plugins',
- return_value={'VNFM':
- test_nfvo_plugin.FakeVNFMPlugin()})
+ return_value={'VNFM': FakeVNFMPlugin()})
def test_heal_cause_max_length_exceeded(self,
mock_get_service_plugins):
body = {'cause': 'A' * 256}
@@ -1580,8 +1489,7 @@ class TestController(base.TestCase):
self.assertEqual(http_client.BAD_REQUEST, resp.status_code)
@mock.patch.object(TackerManager, 'get_service_plugins',
- return_value={'VNFM':
- test_nfvo_plugin.FakeVNFMPlugin()})
+ return_value={'VNFM': FakeVNFMPlugin()})
@mock.patch('tacker.api.vnflcm.v1.controller.'
'VnfLcmController._notification_process')
@mock.patch('tacker.api.vnflcm.v1.controller.'
@@ -1616,8 +1524,7 @@ class TestController(base.TestCase):
resp.json['detail'])
@mock.patch.object(TackerManager, 'get_service_plugins',
- return_value={'VNFM':
- test_nfvo_plugin.FakeVNFMPlugin()})
+ return_value={'VNFM': FakeVNFMPlugin()})
@mock.patch('tacker.api.vnflcm.v1.controller.'
'VnfLcmController._notification_process')
@mock.patch('tacker.api.vnflcm.v1.controller.'
@@ -1649,8 +1556,7 @@ class TestController(base.TestCase):
resp.json['detail'])
@mock.patch.object(TackerManager, 'get_service_plugins',
- return_value={'VNFM':
- test_nfvo_plugin.FakeVNFMPlugin()})
+ return_value={'VNFM': FakeVNFMPlugin()})
@mock.patch('tacker.api.vnflcm.v1.controller.'
'VnfLcmController._notification_process')
@mock.patch('tacker.api.vnflcm.v1.controller.'
@@ -1686,8 +1592,7 @@ class TestController(base.TestCase):
resp.json['detail'])
@mock.patch.object(TackerManager, 'get_service_plugins',
- return_value={'VNFM':
- test_nfvo_plugin.FakeVNFMPlugin()})
+ return_value={'VNFM': FakeVNFMPlugin()})
@ddt.data('HEAD', 'PUT', 'DELETE', 'PATCH', 'GET')
def test_heal_invalid_http_method(self, method,
mock_get_service_plugins):
@@ -1703,8 +1608,7 @@ class TestController(base.TestCase):
self.assertEqual(http_client.METHOD_NOT_ALLOWED, resp.status_code)
@mock.patch.object(TackerManager, 'get_service_plugins',
- return_value={'VNFM':
- test_nfvo_plugin.FakeVNFMPlugin()})
+ return_value={'VNFM': FakeVNFMPlugin()})
@ddt.data({'attribute': 'cause', 'value': 123,
'expected_type': 'string'},
{'attribute': 'cause', 'value': True,
@@ -1758,8 +1662,7 @@ class TestController(base.TestCase):
self.assertEqual([], resp.json)
@mock.patch.object(TackerManager, 'get_service_plugins',
- return_value={'VNFM':
- test_nfvo_plugin.FakeVNFMPlugin()})
+ return_value={'VNFM': FakeVNFMPlugin()})
@ddt.data('HEAD', 'PUT', 'DELETE', 'PATCH')
def test_index_invalid_http_method(self, method,
mock_get_service_plugins):
@@ -1774,8 +1677,7 @@ class TestController(base.TestCase):
@mock.patch('tacker.api.vnflcm.v1.controller.'
'VnfLcmController._delete')
@mock.patch.object(TackerManager, 'get_service_plugins',
- return_value={'VNFM':
- test_nfvo_plugin.FakeVNFMPlugin()})
+ return_value={'VNFM': FakeVNFMPlugin()})
@mock.patch.object(objects.vnf_instance, "_vnf_instance_get_by_id")
@mock.patch.object(objects.vnf_instance, '_destroy_vnf_instance')
def test_delete(self, mock_destroy_vnf_instance, mock_vnf_by_id,
@@ -1793,8 +1695,7 @@ class TestController(base.TestCase):
self.assertEqual(http_client.NO_CONTENT, resp.status_code)
@mock.patch.object(TackerManager, 'get_service_plugins',
- return_value={'VNFM':
- test_nfvo_plugin.FakeVNFMPlugin()})
+ return_value={'VNFM': FakeVNFMPlugin()})
@mock.patch.object(objects.VnfInstance, "get_by_id")
def test_delete_with_non_existing_vnf_instance(self, mock_vnf_by_id,
mock_get_service_plugins):
@@ -1814,8 +1715,7 @@ class TestController(base.TestCase):
resp.json['detail'])
@mock.patch.object(TackerManager, 'get_service_plugins',
- return_value={'VNFM':
- test_nfvo_plugin.FakeVNFMPlugin()})
+ return_value={'VNFM': FakeVNFMPlugin()})
def test_delete_with_invalid_uuid(self, mock_get_service_plugins):
req = fake_request.HTTPRequest.blank(
'/vnf_instances/%s' % constants.INVALID_UUID)
@@ -1831,8 +1731,7 @@ class TestController(base.TestCase):
resp.json['detail'])
@mock.patch.object(TackerManager, 'get_service_plugins',
- return_value={'VNFM':
- test_nfvo_plugin.FakeVNFMPlugin()})
+ return_value={'VNFM': FakeVNFMPlugin()})
@mock.patch.object(objects.VnfInstance, "get_by_id")
def test_delete_with_incorrect_instantiation_state(
self, mock_vnf_by_id, mock_get_service_plugins):
@@ -1857,8 +1756,7 @@ class TestController(base.TestCase):
resp.json['detail'])
@mock.patch.object(TackerManager, 'get_service_plugins',
- return_value={'VNFM':
- test_nfvo_plugin.FakeVNFMPlugin()})
+ return_value={'VNFM': FakeVNFMPlugin()})
@mock.patch.object(objects.VnfInstance, "get_by_id")
def test_delete_with_incorrect_task_state(self, mock_vnf_by_id,
mock_get_service_plugins):
@@ -2223,8 +2121,7 @@ class TestController(base.TestCase):
@mock.patch.object(objects.VnfInstance, "get_by_id")
@mock.patch.object(TackerManager, 'get_service_plugins',
- return_value={'VNFM':
- test_nfvo_plugin.FakeVNFMPlugin()})
+ return_value={'VNFM': FakeVNFMPlugin()})
@mock.patch.object(objects.VNF, "vnf_index_list")
@mock.patch.object(objects.VnfInstanceList, "vnf_instance_list")
@mock.patch.object(objects.VnfPackageVnfd, 'get_vnf_package_vnfd')
@@ -2271,8 +2168,7 @@ class TestController(base.TestCase):
@mock.patch.object(objects.VnfInstance, "get_by_id")
@mock.patch.object(TackerManager, 'get_service_plugins',
- return_value={'VNFM':
- test_nfvo_plugin.FakeVNFMPlugin()})
+ return_value={'VNFM': FakeVNFMPlugin()})
@mock.patch.object(objects.VNF, "vnf_index_list")
def test_update_vnf_none_vnf_data(
self,
@@ -2309,8 +2205,7 @@ class TestController(base.TestCase):
@mock.patch.object(objects.VnfInstance, "get_by_id")
@mock.patch.object(TackerManager, 'get_service_plugins',
- return_value={'VNFM':
- test_nfvo_plugin.FakeVNFMPlugin()})
+ return_value={'VNFM': FakeVNFMPlugin()})
@mock.patch.object(objects.VNF, "vnf_index_list")
def test_update_vnf_status_err(
self,
@@ -2350,8 +2245,7 @@ class TestController(base.TestCase):
@mock.patch.object(objects.VnfInstance, "get_by_id")
@mock.patch.object(TackerManager, 'get_service_plugins',
- return_value={'VNFM':
- test_nfvo_plugin.FakeVNFMPlugin()})
+ return_value={'VNFM': FakeVNFMPlugin()})
@mock.patch.object(objects.VNF, "vnf_index_list")
@mock.patch.object(objects.VnfInstanceList, "vnf_instance_list")
def test_update_vnf_none_instance_data(
@@ -2393,8 +2287,7 @@ class TestController(base.TestCase):
@mock.patch.object(objects.VnfInstance, "get_by_id")
@mock.patch.object(TackerManager, 'get_service_plugins',
- return_value={'VNFM':
- test_nfvo_plugin.FakeVNFMPlugin()})
+ return_value={'VNFM': FakeVNFMPlugin()})
@mock.patch.object(sync_resource.SyncVnfPackage, 'create_package')
@mock.patch.object(objects.vnf_package_vnfd.VnfPackageVnfd,
"get_vnf_package_vnfd")
@@ -2450,8 +2343,7 @@ class TestController(base.TestCase):
mock_update.assert_called_once()
@mock.patch.object(TackerManager, 'get_service_plugins',
- return_value={'VNFM':
- test_nfvo_plugin.FakeVNFMPlugin()})
+ return_value={'VNFM': FakeVNFMPlugin()})
@mock.patch.object(objects.VNF, "vnf_index_list")
@mock.patch.object(objects.VnfInstanceList, "vnf_instance_list")
@mock.patch.object(objects.VnfPackageVnfd, 'get_vnf_package_vnfd')
@@ -2493,8 +2385,7 @@ class TestController(base.TestCase):
@mock.patch.object(objects.VnfInstance, "get_by_id")
@ddt.data("vnfdId", "vnfPkgId")
@mock.patch.object(TackerManager, 'get_service_plugins',
- return_value={'VNFM':
- test_nfvo_plugin.FakeVNFMPlugin()})
+ return_value={'VNFM': FakeVNFMPlugin()})
@mock.patch.object(sync_resource.SyncVnfPackage, 'create_package')
@mock.patch.object(objects.vnf_package_vnfd.VnfPackageVnfd,
"get_vnf_package_vnfd")
@@ -2550,8 +2441,7 @@ class TestController(base.TestCase):
@ddt.data("vnfdId", "vnfPkgId")
@mock.patch.object(TackerManager, 'get_service_plugins',
- return_value={'VNFM':
- test_nfvo_plugin.FakeVNFMPlugin()})
+ return_value={'VNFM': FakeVNFMPlugin()})
@mock.patch.object(sync_resource.SyncVnfPackage, 'create_package')
@mock.patch.object(objects.vnf_package_vnfd.VnfPackageVnfd,
"get_vnf_package_vnfd")
@@ -4288,8 +4178,7 @@ class TestController(base.TestCase):
mock_vnf_get_by_id.assert_called_once()
@mock.patch.object(TackerManager, 'get_service_plugins',
- return_value={'VNFM':
- test_nfvo_plugin.FakeVNFMPlugin()})
+ return_value={'VNFM': FakeVNFMPlugin()})
@mock.patch('tacker.api.vnflcm.v1.controller.'
'VnfLcmController._notification_process')
@mock.patch('tacker.api.vnflcm.v1.controller.'
@@ -4323,8 +4212,7 @@ class TestController(base.TestCase):
@mock.patch('tacker.api.vnflcm.v1.controller.'
'VnfLcmController._notification_process')
@mock.patch.object(TackerManager, 'get_service_plugins',
- return_value={'VNFM':
- test_nfvo_plugin.FakeVNFMPlugin()})
+ return_value={'VNFM': FakeVNFMPlugin()})
@mock.patch('tacker.api.vnflcm.v1.controller.'
'VnfLcmController._notification_process')
@mock.patch('tacker.api.vnflcm.v1.controller.'
@@ -4356,8 +4244,7 @@ class TestController(base.TestCase):
self.assertEqual(http_client.ACCEPTED, resp.status_code)
@mock.patch.object(TackerManager, 'get_service_plugins',
- return_value={'VNFM':
- test_nfvo_plugin.FakeVNFMPlugin()})
+ return_value={'VNFM': FakeVNFMPlugin()})
@mock.patch('tacker.api.vnflcm.v1.controller.'
'VnfLcmController._get_vnf')
@mock.patch.object(objects.VnfInstance, "get_by_id")
@@ -4383,8 +4270,7 @@ class TestController(base.TestCase):
self.assertEqual(expected_msg, resp.json['detail'])
@mock.patch.object(TackerManager, 'get_service_plugins',
- return_value={'VNFM':
- test_nfvo_plugin.FakeVNFMPlugin()})
+ return_value={'VNFM': FakeVNFMPlugin()})
@ddt.data('HEAD', 'PUT', 'DELETE', 'PATCH', 'GET')
def test_change_ext_conn_invalid_http_method(self, method,
mock_get_service_plugins):
@@ -4400,8 +4286,7 @@ class TestController(base.TestCase):
self.assertEqual(http_client.METHOD_NOT_ALLOWED, resp.status_code)
@mock.patch.object(TackerManager, 'get_service_plugins',
- return_value={'VNFM':
- test_nfvo_plugin.FakeVNFMPlugin()})
+ return_value={'VNFM': FakeVNFMPlugin()})
@mock.patch('tacker.api.vnflcm.v1.controller.'
'VnfLcmController._notification_process')
@mock.patch('tacker.api.vnflcm.v1.controller.'
@@ -4457,8 +4342,7 @@ class TestController(base.TestCase):
resp.status_code, http_client.BAD_REQUEST)
@mock.patch.object(TackerManager, 'get_service_plugins',
- return_value={'VNFM':
- test_nfvo_plugin.FakeVNFMPlugin()})
+ return_value={'VNFM': FakeVNFMPlugin()})
def test_change_ext_conn_with_invalid_uuid(self, mock_get_service_plugins):
body = fakes.get_change_ext_conn_request_body()
req = fake_request.HTTPRequest.blank(
@@ -4663,8 +4547,7 @@ class TestController(base.TestCase):
self.assertEqual(http_client.CREATED, resp.status_code)
@mock.patch.object(TackerManager, 'get_service_plugins',
- return_value={'VNFM':
- test_nfvo_plugin.FakeVNFMPlugin()})
+ return_value={'VNFM': FakeVNFMPlugin()})
@mock.patch.object(vnf_subscription_view.ViewBuilder,
"subscription_list")
@mock.patch.object(vnf_subscription_view.ViewBuilder,
@@ -4690,8 +4573,7 @@ class TestController(base.TestCase):
self.assertEqual(expected_result, resp.json)
@mock.patch.object(TackerManager, 'get_service_plugins',
- return_value={'VNFM':
- test_nfvo_plugin.FakeVNFMPlugin()})
+ return_value={'VNFM': FakeVNFMPlugin()})
@mock.patch.object(vnf_subscription_view.ViewBuilder,
"subscription_list")
@mock.patch.object(vnf_subscription_view.ViewBuilder,
@@ -4715,8 +4597,7 @@ class TestController(base.TestCase):
self.assertEqual([], resp.json)
@mock.patch.object(TackerManager, 'get_service_plugins',
- return_value={'VNFM':
- test_nfvo_plugin.FakeVNFMPlugin()})
+ return_value={'VNFM': FakeVNFMPlugin()})
@mock.patch.object(vnf_subscription_view.ViewBuilder,
"validate_filter")
def test_subscription_list_error(self,
@@ -4815,8 +4696,7 @@ class TestController(base.TestCase):
self.assertEqual(expected_result_link, resp.headers['Link'])
@mock.patch.object(TackerManager, 'get_service_plugins',
- return_value={'VNFM':
- test_nfvo_plugin.FakeVNFMPlugin()})
+ return_value={'VNFM': FakeVNFMPlugin()})
@mock.patch.object(objects.LccnSubscriptionRequest,
"vnf_lcm_subscriptions_show")
def test_subscription_show(self, mock_get_subscription,
@@ -4836,8 +4716,7 @@ class TestController(base.TestCase):
self.assertEqual(expected_vnf, resp.json)
@mock.patch.object(TackerManager, 'get_service_plugins',
- return_value={'VNFM':
- test_nfvo_plugin.FakeVNFMPlugin()})
+ return_value={'VNFM': FakeVNFMPlugin()})
@mock.patch.object(vnf_subscription_view.ViewBuilder,
"subscription_list")
@mock.patch.object(vnf_subscription_view.ViewBuilder,
@@ -4892,8 +4771,7 @@ class TestController(base.TestCase):
self.assertEqual(expected_result, resp.json)
@mock.patch.object(TackerManager, 'get_service_plugins',
- return_value={'VNFM':
- test_nfvo_plugin.FakeVNFMPlugin()})
+ return_value={'VNFM': FakeVNFMPlugin()})
@mock.patch.object(objects.LccnSubscriptionRequest,
"vnf_lcm_subscriptions_show")
def test_subscription_show_not_found(self, mock_get_subscription,
@@ -4912,8 +4790,7 @@ class TestController(base.TestCase):
self.assertEqual(res.text, resp.text)
@mock.patch.object(TackerManager, 'get_service_plugins',
- return_value={'VNFM':
- test_nfvo_plugin.FakeVNFMPlugin()})
+ return_value={'VNFM': FakeVNFMPlugin()})
def test_subscription_list_filter_error(self,
mock_get_service_plugins):
@@ -4925,8 +4802,7 @@ class TestController(base.TestCase):
self.assertEqual(400, resp.status_code)
@mock.patch.object(TackerManager, 'get_service_plugins',
- return_value={'VNFM':
- test_nfvo_plugin.FakeVNFMPlugin()})
+ return_value={'VNFM': FakeVNFMPlugin()})
@mock.patch.object(objects.LccnSubscriptionRequest,
"vnf_lcm_subscriptions_show")
def test_subscription_show_error(self, mock_get_subscription,
@@ -4946,8 +4822,7 @@ class TestController(base.TestCase):
@mock.patch.object(objects.VnfPackage, 'get_by_id')
@mock.patch.object(objects.vnf_package.VnfPackage, 'save')
@mock.patch.object(TackerManager, 'get_service_plugins',
- return_value={'VNFM':
- test_nfvo_plugin.FakeVNFMPlugin()})
+ return_value={'VNFM': FakeVNFMPlugin()})
@mock.patch.object(objects.vnf_instance, '_vnf_instance_create')
@mock.patch.object(objects.vnf_package_vnfd.VnfPackageVnfd, 'get_by_id')
def test_create_using_internal_methods(
@@ -5007,8 +4882,7 @@ class TestControllerEnhancedPolicy(TestController):
@mock.patch.object(objects.VnfInstance, "get_by_id")
@mock.patch.object(TackerManager, 'get_service_plugins',
- return_value={'VNFM':
- test_nfvo_plugin.FakeVNFMPlugin()})
+ return_value={'VNFM': FakeVNFMPlugin()})
@mock.patch.object(objects.VNF, "vnf_index_list")
@mock.patch.object(objects.VnfInstanceList, "vnf_instance_list")
@mock.patch.object(objects.VnfPackageVnfd, 'get_vnf_package_vnfd')
@@ -5043,8 +4917,7 @@ class TestControllerEnhancedPolicy(TestController):
@mock.patch.object(objects.VnfInstance, "get_by_id")
@mock.patch.object(TackerManager, 'get_service_plugins',
- return_value={'VNFM':
- test_nfvo_plugin.FakeVNFMPlugin()})
+ return_value={'VNFM': FakeVNFMPlugin()})
@mock.patch.object(objects.VNF, "vnf_index_list")
def test_update_vnf_status_err(
self,
@@ -5081,8 +4954,7 @@ class TestControllerEnhancedPolicy(TestController):
@mock.patch.object(objects.VnfInstance, "get_by_id")
@mock.patch.object(TackerManager, 'get_service_plugins',
- return_value={'VNFM':
- test_nfvo_plugin.FakeVNFMPlugin()})
+ return_value={'VNFM': FakeVNFMPlugin()})
@mock.patch.object(sync_resource.SyncVnfPackage, 'create_package')
@mock.patch.object(objects.vnf_package_vnfd.VnfPackageVnfd,
"get_vnf_package_vnfd")
@@ -5138,8 +5010,7 @@ class TestControllerEnhancedPolicy(TestController):
@mock.patch.object(objects.VnfInstance, "get_by_id")
@mock.patch.object(TackerManager, 'get_service_plugins',
- return_value={'VNFM':
- test_nfvo_plugin.FakeVNFMPlugin()})
+ return_value={'VNFM': FakeVNFMPlugin()})
@mock.patch.object(objects.VNF, "vnf_index_list")
def test_update_vnf_none_vnf_data(
self,
@@ -5173,8 +5044,7 @@ class TestControllerEnhancedPolicy(TestController):
@mock.patch.object(objects.VnfInstance, "get_by_id")
@mock.patch.object(TackerManager, 'get_service_plugins',
- return_value={'VNFM':
- test_nfvo_plugin.FakeVNFMPlugin()})
+ return_value={'VNFM': FakeVNFMPlugin()})
@mock.patch.object(objects.VNF, "vnf_index_list")
@mock.patch.object(objects.VnfInstanceList, "vnf_instance_list")
def test_update_vnf_none_instance_data(
@@ -5213,8 +5083,7 @@ class TestControllerEnhancedPolicy(TestController):
@mock.patch.object(objects.VnfInstance, "get_by_id")
@mock.patch.object(TackerManager, 'get_service_plugins',
- return_value={'VNFM':
- test_nfvo_plugin.FakeVNFMPlugin()})
+ return_value={'VNFM': FakeVNFMPlugin()})
@mock.patch.object(objects.VNF, "vnf_index_list")
@mock.patch.object(objects.VnfInstanceList, "vnf_instance_list")
@mock.patch.object(objects.VnfPackageVnfd, 'get_vnf_package_vnfd')
@@ -5258,8 +5127,7 @@ class TestControllerEnhancedPolicy(TestController):
@ddt.data("vnfdId", "vnfPkgId")
@mock.patch.object(objects.VnfInstance, "get_by_id")
@mock.patch.object(TackerManager, 'get_service_plugins',
- return_value={'VNFM':
- test_nfvo_plugin.FakeVNFMPlugin()})
+ return_value={'VNFM': FakeVNFMPlugin()})
@mock.patch.object(sync_resource.SyncVnfPackage, 'create_package')
@mock.patch.object(objects.vnf_package_vnfd.VnfPackageVnfd,
"get_vnf_package_vnfd")
@@ -5547,8 +5415,7 @@ class TestControllerEnhancedPolicy(TestController):
'update_vnf', http_client.ACCEPTED))
@ddt.unpack
@mock.patch.object(TackerManager, 'get_service_plugins',
- return_value={'VNFM':
- test_nfvo_plugin.FakeVNFMPlugin()})
+ return_value={'VNFM': FakeVNFMPlugin()})
@mock.patch.object(objects.VNF, "vnf_index_list")
@mock.patch.object(objects.VnfInstanceList, "vnf_instance_list")
@mock.patch.object(objects.VnfPackageVnfd, 'get_vnf_package_vnfd')
@@ -5602,8 +5469,7 @@ class TestControllerEnhancedPolicy(TestController):
'change_ext_conn', http_client.ACCEPTED))
@ddt.unpack
@mock.patch.object(TackerManager, 'get_service_plugins',
- return_value={'VNFM':
- test_nfvo_plugin.FakeVNFMPlugin()})
+ return_value={'VNFM': FakeVNFMPlugin()})
@mock.patch('tacker.api.vnflcm.v1.controller.'
'VnfLcmController._notification_process')
@mock.patch('tacker.api.vnflcm.v1.controller.'
@@ -5646,8 +5512,7 @@ class TestControllerEnhancedPolicy(TestController):
@ddt.data(*fakes.get_test_data_policy_instantiate())
@ddt.unpack
@mock.patch.object(TackerManager, 'get_service_plugins',
- return_value={'VNFM':
- test_nfvo_plugin.FakeVNFMPlugin()})
+ return_value={'VNFM': FakeVNFMPlugin()})
@mock.patch('tacker.api.vnflcm.v1.controller.VnfLcmController.'
'_notification_process')
@mock.patch('tacker.api.vnflcm.v1.controller.'
@@ -5727,8 +5592,7 @@ class TestControllerEnhancedPolicy(TestController):
expected_vnf_inst_ids, [inst.get('id') for inst in resp.json])
@mock.patch.object(TackerManager, 'get_service_plugins',
- return_value={'VNFM':
- test_nfvo_plugin.FakeVNFMPlugin()})
+ return_value={'VNFM': FakeVNFMPlugin()})
@mock.patch('tacker.api.vnflcm.v1.controller.VnfLcmController.'
'_notification_process')
@mock.patch('tacker.api.vnflcm.v1.controller.'
diff --git a/tacker/tests/unit/vnflcm/test_load_vnf_interfaces.py b/tacker/tests/unit/vnflcm/test_load_vnf_interfaces.py
index 3abd39f29..079677129 100644
--- a/tacker/tests/unit/vnflcm/test_load_vnf_interfaces.py
+++ b/tacker/tests/unit/vnflcm/test_load_vnf_interfaces.py
@@ -27,8 +27,8 @@ from tacker.manager import TackerManager
from tacker import objects
from tacker.objects import fields
from tacker.tests.unit.db import base as db_base
-from tacker.tests.unit.nfvo.test_nfvo_plugin import FakeVNFMPlugin
from tacker.tests.unit.vnflcm import fakes
+from tacker.tests.unit.vnflcm.test_controller import FakeVNFMPlugin
from tacker.tests import utils as test_utils
from tacker.tests import uuidsentinel
from tacker.vnflcm import vnflcm_driver
diff --git a/tacker/tests/unit/vnflcm/test_vnflcm_driver.py b/tacker/tests/unit/vnflcm/test_vnflcm_driver.py
index 9ebd5cb32..098de1e7d 100644
--- a/tacker/tests/unit/vnflcm/test_vnflcm_driver.py
+++ b/tacker/tests/unit/vnflcm/test_vnflcm_driver.py
@@ -31,15 +31,14 @@ from tacker.common import exceptions
from tacker.common import utils
from tacker.conductor.conductorrpc.vnf_lcm_rpc import VNFLcmRPCAPI
from tacker import context
-from tacker.db.common_services import common_services_db_plugin
from tacker.manager import TackerManager
from tacker import objects
from tacker.objects import fields
from tacker.objects.fields import ErrorPoint as EP
from tacker.objects import vim_connection
from tacker.tests.unit.db import base as db_base
-from tacker.tests.unit.nfvo.test_nfvo_plugin import FakeVNFMPlugin
from tacker.tests.unit.vnflcm import fakes
+from tacker.tests.unit.vnflcm.test_controller import FakeVNFMPlugin
from tacker.tests.unit.vnfm.infra_drivers.kubernetes import fakes as k8s_fakes
from tacker.tests import utils as test_utils
from tacker.tests import uuidsentinel
@@ -47,7 +46,6 @@ from tacker.vnflcm import vnflcm_driver
from tacker.vnflcm.vnflcm_driver import VnfLcmDriver
from tacker.vnfm.infra_drivers.openstack import heat_client
from tacker.vnfm.infra_drivers.openstack import openstack as opn
-from tacker.vnfm import plugin
from tacker.vnfm import vim_client
@@ -2218,7 +2216,6 @@ class TestVnflcmDriver(db_base.SqlTestCase):
@mock.patch.object(objects.VnfLcmOpOcc, "save")
@mock.patch.object(VNFLcmRPCAPI, "send_notification")
@mock.patch.object(objects.VnfInstance, "save")
- @mock.patch.object(vnflcm_driver.VnfLcmDriver, "_update_vnf_rollback_pre")
@mock.patch.object(vnflcm_driver.VnfLcmDriver, "_update_vnf_rollback")
@mock.patch('tacker.vnflcm.utils._get_vnfd_dict')
@mock.patch('tacker.vnflcm.vnflcm_driver.VnfLcmDriver.'
@@ -2226,7 +2223,6 @@ class TestVnflcmDriver(db_base.SqlTestCase):
def test_rollback_vnf_7(
self, mock_vnf_interfaces, mock_vnfd_dict,
mock_update,
- mock_up,
mock_insta_save,
mock_notification,
mock_lcm_save,
@@ -2266,12 +2262,10 @@ class TestVnflcmDriver(db_base.SqlTestCase):
@mock.patch.object(objects.VnfLcmOpOcc, "save")
@mock.patch.object(VNFLcmRPCAPI, "send_notification")
@mock.patch.object(objects.VnfInstance, "save")
- @mock.patch.object(vnflcm_driver.VnfLcmDriver, "_update_vnf_rollback_pre")
@mock.patch.object(vnflcm_driver.VnfLcmDriver, "_update_vnf_rollback")
def test_rollback_vnf_6(
self,
mock_update,
- mock_up,
mock_insta_save,
mock_notification,
mock_lcm_save,
@@ -2312,12 +2306,10 @@ class TestVnflcmDriver(db_base.SqlTestCase):
@mock.patch.object(objects.VnfLcmOpOcc, "save")
@mock.patch.object(VNFLcmRPCAPI, "send_notification")
@mock.patch.object(objects.VnfInstance, "save")
- @mock.patch.object(vnflcm_driver.VnfLcmDriver, "_update_vnf_rollback_pre")
@mock.patch.object(vnflcm_driver.VnfLcmDriver, "_update_vnf_rollback")
def test_rollback_vnf_5(
self,
mock_update,
- mock_up,
mock_insta_save,
mock_notification,
mock_lcm_save,
@@ -2358,12 +2350,10 @@ class TestVnflcmDriver(db_base.SqlTestCase):
@mock.patch.object(objects.VnfLcmOpOcc, "save")
@mock.patch.object(VNFLcmRPCAPI, "send_notification")
@mock.patch.object(objects.VnfInstance, "save")
- @mock.patch.object(vnflcm_driver.VnfLcmDriver, "_update_vnf_rollback_pre")
@mock.patch.object(vnflcm_driver.VnfLcmDriver, "_update_vnf_rollback")
def test_rollback_vnf_4(
self,
mock_update,
- mock_up,
mock_insta_save,
mock_notification,
mock_lcm_save,
@@ -2404,12 +2394,10 @@ class TestVnflcmDriver(db_base.SqlTestCase):
@mock.patch.object(objects.VnfLcmOpOcc, "save")
@mock.patch.object(VNFLcmRPCAPI, "send_notification")
@mock.patch.object(objects.VnfInstance, "save")
- @mock.patch.object(vnflcm_driver.VnfLcmDriver, "_update_vnf_rollback_pre")
@mock.patch.object(vnflcm_driver.VnfLcmDriver, "_update_vnf_rollback")
def test_rollback_vnf_3(
self,
mock_update,
- mock_up,
mock_insta_save,
mock_notification,
mock_lcm_save,
@@ -2447,7 +2435,6 @@ class TestVnflcmDriver(db_base.SqlTestCase):
@mock.patch.object(objects.VnfLcmOpOcc, "save")
@mock.patch.object(VNFLcmRPCAPI, "send_notification")
@mock.patch.object(objects.VnfInstance, "save")
- @mock.patch.object(vnflcm_driver.VnfLcmDriver, "_update_vnf_rollback_pre")
@mock.patch.object(vnflcm_driver.VnfLcmDriver, "_update_vnf_rollback")
@mock.patch('tacker.vnflcm.utils._get_vnfd_dict')
@mock.patch('tacker.vnflcm.vnflcm_driver.VnfLcmDriver.'
@@ -2455,7 +2442,6 @@ class TestVnflcmDriver(db_base.SqlTestCase):
def test_rollback_vnf_scale(
self, mock_vnf_interfaces, mock_vnfd_dict,
mock_update,
- mock_up,
mock_insta_save,
mock_notification,
mock_lcm_save,
@@ -2496,12 +2482,10 @@ class TestVnflcmDriver(db_base.SqlTestCase):
@mock.patch.object(objects.VnfLcmOpOcc, "save")
@mock.patch.object(VNFLcmRPCAPI, "send_notification")
@mock.patch.object(objects.VnfInstance, "save")
- @mock.patch.object(vnflcm_driver.VnfLcmDriver, "_update_vnf_rollback_pre")
@mock.patch.object(vnflcm_driver.VnfLcmDriver, "_update_vnf_rollback")
def test_rollback_vnf_scale_6(
self,
mock_update,
- mock_up,
mock_insta_save,
mock_notification,
mock_lcm_save,
@@ -2541,12 +2525,10 @@ class TestVnflcmDriver(db_base.SqlTestCase):
@mock.patch.object(objects.VnfLcmOpOcc, "save")
@mock.patch.object(VNFLcmRPCAPI, "send_notification")
@mock.patch.object(objects.VnfInstance, "save")
- @mock.patch.object(vnflcm_driver.VnfLcmDriver, "_update_vnf_rollback_pre")
@mock.patch.object(vnflcm_driver.VnfLcmDriver, "_update_vnf_rollback")
def test_rollback_vnf_scale_5(
self,
mock_update,
- mock_up,
mock_insta_save,
mock_notification,
mock_lcm_save,
@@ -2586,12 +2568,10 @@ class TestVnflcmDriver(db_base.SqlTestCase):
@mock.patch.object(objects.VnfLcmOpOcc, "save")
@mock.patch.object(VNFLcmRPCAPI, "send_notification")
@mock.patch.object(objects.VnfInstance, "save")
- @mock.patch.object(vnflcm_driver.VnfLcmDriver, "_update_vnf_rollback_pre")
@mock.patch.object(vnflcm_driver.VnfLcmDriver, "_update_vnf_rollback")
def test_rollback_vnf_scale_4(
self,
mock_update,
- mock_up,
mock_insta_save,
mock_notification,
mock_lcm_save,
@@ -2631,12 +2611,10 @@ class TestVnflcmDriver(db_base.SqlTestCase):
@mock.patch.object(objects.VnfLcmOpOcc, "save")
@mock.patch.object(VNFLcmRPCAPI, "send_notification")
@mock.patch.object(objects.VnfInstance, "save")
- @mock.patch.object(vnflcm_driver.VnfLcmDriver, "_update_vnf_rollback_pre")
@mock.patch.object(vnflcm_driver.VnfLcmDriver, "_update_vnf_rollback")
def test_rollback_vnf_scale_3(
self,
mock_update,
- mock_up,
mock_insta_save,
mock_notification,
mock_lcm_save,
@@ -2706,21 +2684,16 @@ class TestVnflcmDriver(db_base.SqlTestCase):
@mock.patch.object(VnfLcmDriver,
'_init_mgmt_driver_hash')
@mock.patch.object(objects.VnfLcmOpOcc, "save")
- @mock.patch.object(common_services_db_plugin.CommonServicesPluginDb,
- "create_event")
@mock.patch.object(heat_client.HeatClient, "__init__")
@mock.patch.object(heat_client.HeatClient, "resource_get")
@mock.patch.object(heat_client.HeatClient, "resource_get_list")
- @mock.patch.object(vnflcm_driver.VnfLcmDriver, "_update_vnf_rollback_pre")
@mock.patch.object(vnflcm_driver.VnfLcmDriver, "_update_vnf_rollback")
def test_rollback_vnf_scale_resource_error(
self,
mock_update,
- mock_up,
mock_resource_get_list,
mock_resource_get,
mock_init,
- mock_event,
mock_lcm_save,
mock_init_hash,
mock_get_service_plugins):
@@ -2759,21 +2732,16 @@ class TestVnflcmDriver(db_base.SqlTestCase):
@mock.patch.object(VnfLcmDriver,
'_init_mgmt_driver_hash')
@mock.patch.object(objects.VnfLcmOpOcc, "save")
- @mock.patch.object(common_services_db_plugin.CommonServicesPluginDb,
- "create_event")
@mock.patch.object(heat_client.HeatClient, "__init__")
@mock.patch.object(heat_client.HeatClient, "resource_get")
@mock.patch.object(heat_client.HeatClient, "resource_get_list")
- @mock.patch.object(vnflcm_driver.VnfLcmDriver, "_update_vnf_rollback_pre")
@mock.patch.object(vnflcm_driver.VnfLcmDriver, "_update_vnf_rollback")
def test_rollback_vnf_scale_resource_list_error(
self,
mock_update,
- mock_up,
mock_resource_list,
mock_resource_get,
mock_init,
- mock_event,
mock_lcm_save,
mock_init_hash,
mock_get_service_plugins):
@@ -2816,12 +2784,8 @@ class TestVnflcmDriver(db_base.SqlTestCase):
@mock.patch.object(VnfLcmDriver,
'_init_mgmt_driver_hash')
@mock.patch.object(objects.VnfLcmOpOcc, "save")
- @mock.patch.object(common_services_db_plugin.CommonServicesPluginDb,
- "create_event")
@mock.patch.object(heat_client.HeatClient, "__init__")
- @mock.patch.object(plugin.VNFMMgmtMixin, "mgmt_call")
@mock.patch.object(opn.OpenStack, "delete")
- @mock.patch.object(vnflcm_driver.VnfLcmDriver, "_update_vnf_rollback_pre")
@mock.patch.object(vnflcm_driver.VnfLcmDriver, "_update_vnf_rollback")
@mock.patch('tacker.vnflcm.utils._get_vnfd_dict')
@mock.patch('tacker.vnflcm.vnflcm_driver.VnfLcmDriver.'
@@ -2829,11 +2793,8 @@ class TestVnflcmDriver(db_base.SqlTestCase):
def test_rollback_vnf_delete_error(
self, mock_vnf_interfaces, mock_vnfd_dict,
mock_update,
- mock_up,
mock_delete,
- mock_mgmt,
mock_init,
- mock_event,
mock_lcm_save,
mock_init_hash,
mock_get_service_plugins):
@@ -2870,13 +2831,9 @@ class TestVnflcmDriver(db_base.SqlTestCase):
@mock.patch.object(VnfLcmDriver,
'_init_mgmt_driver_hash')
@mock.patch.object(objects.VnfLcmOpOcc, "save")
- @mock.patch.object(common_services_db_plugin.CommonServicesPluginDb,
- "create_event")
@mock.patch.object(heat_client.HeatClient, "__init__")
- @mock.patch.object(plugin.VNFMMgmtMixin, "mgmt_call")
@mock.patch.object(opn.OpenStack, "delete")
@mock.patch.object(opn.OpenStack, "delete_wait")
- @mock.patch.object(vnflcm_driver.VnfLcmDriver, "_update_vnf_rollback_pre")
@mock.patch.object(vnflcm_driver.VnfLcmDriver, "_update_vnf_rollback")
@mock.patch('tacker.vnflcm.utils._get_vnfd_dict')
@mock.patch('tacker.vnflcm.vnflcm_driver.VnfLcmDriver.'
@@ -2884,12 +2841,9 @@ class TestVnflcmDriver(db_base.SqlTestCase):
def test_rollback_vnf_delete_wait_error(
self, mock_vnf_interfaces, mock_vnfd_dict,
mock_update,
- mock_up,
mock_delete_wait,
mock_delete,
- mock_mgmt,
mock_init,
- mock_event,
mock_lcm_save,
mock_init_hash,
mock_get_service_plugins):
@@ -2927,14 +2881,11 @@ class TestVnflcmDriver(db_base.SqlTestCase):
@mock.patch.object(VnfLcmDriver,
'_init_mgmt_driver_hash')
@mock.patch.object(objects.VnfLcmOpOcc, "save")
- @mock.patch.object(common_services_db_plugin.CommonServicesPluginDb,
- "create_event")
@mock.patch.object(heat_client.HeatClient, "__init__")
@mock.patch.object(heat_client.HeatClient, "resource_get")
@mock.patch.object(heat_client.HeatClient, "resource_get_list")
@mock.patch.object(opn.OpenStack, "get_rollback_ids")
@mock.patch.object(opn.OpenStack, "scale_in_reverse")
- @mock.patch.object(vnflcm_driver.VnfLcmDriver, "_update_vnf_rollback_pre")
@mock.patch.object(vnflcm_driver.VnfLcmDriver, "_update_vnf_rollback")
@mock.patch('tacker.vnflcm.utils._get_vnfd_dict')
@mock.patch('tacker.vnflcm.vnflcm_driver.VnfLcmDriver.'
@@ -2942,13 +2893,11 @@ class TestVnflcmDriver(db_base.SqlTestCase):
def test_rollback_vnf_scale_update_error(
self, mock_vnf_interfaces, mock_vnfd_dict,
mock_update,
- mock_up,
mock_scale,
mock_resource_get,
mock_resource_get_list,
mock_resource,
mock_init,
- mock_event,
mock_lcm_save,
mock_init_hash,
mock_get_service_plugins):
@@ -2995,15 +2944,12 @@ class TestVnflcmDriver(db_base.SqlTestCase):
@mock.patch.object(VnfLcmDriver,
'_init_mgmt_driver_hash')
@mock.patch.object(objects.VnfLcmOpOcc, "save")
- @mock.patch.object(common_services_db_plugin.CommonServicesPluginDb,
- "create_event")
@mock.patch.object(heat_client.HeatClient, "__init__")
@mock.patch.object(heat_client.HeatClient, "resource_get")
@mock.patch.object(heat_client.HeatClient, "resource_get_list")
@mock.patch.object(opn.OpenStack, "get_rollback_ids")
@mock.patch.object(opn.OpenStack, "scale_in_reverse")
@mock.patch.object(opn.OpenStack, "scale_update_wait")
- @mock.patch.object(vnflcm_driver.VnfLcmDriver, "_update_vnf_rollback_pre")
@mock.patch.object(vnflcm_driver.VnfLcmDriver, "_update_vnf_rollback")
@mock.patch('tacker.vnflcm.utils._get_vnfd_dict')
@mock.patch('tacker.vnflcm.vnflcm_driver.VnfLcmDriver.'
@@ -3011,14 +2957,12 @@ class TestVnflcmDriver(db_base.SqlTestCase):
def test_rollback_vnf_scale_update_wait_error(
self, mock_vnf_interfaces, mock_vnfd_dict,
mock_update,
- mock_up,
mock_wait,
mock_scale,
mock_resource_get,
mock_resource_get_list,
mock_resource,
mock_init,
- mock_event,
mock_lcm_save,
mock_init_hash,
mock_get_service_plugins):
@@ -3066,14 +3010,11 @@ class TestVnflcmDriver(db_base.SqlTestCase):
@mock.patch.object(VnfLcmDriver,
'_init_mgmt_driver_hash')
@mock.patch.object(objects.VnfLcmOpOcc, "save")
- @mock.patch.object(common_services_db_plugin.CommonServicesPluginDb,
- "create_event")
@mock.patch.object(heat_client.HeatClient, "__init__")
@mock.patch.object(opn.OpenStack, "get_rollback_ids")
@mock.patch.object(opn.OpenStack, "scale_in_reverse")
@mock.patch.object(opn.OpenStack, "scale_update_wait")
@mock.patch.object(opn.OpenStack, "scale_resource_update")
- @mock.patch.object(vnflcm_driver.VnfLcmDriver, "_update_vnf_rollback_pre")
@mock.patch.object(vnflcm_driver.VnfLcmDriver, "_update_vnf_rollback")
@mock.patch('tacker.vnflcm.utils._get_vnfd_dict')
@mock.patch('tacker.vnflcm.vnflcm_driver.VnfLcmDriver.'
@@ -3081,13 +3022,11 @@ class TestVnflcmDriver(db_base.SqlTestCase):
def test_rollback_vnf_scale_resource_get_error(
self, mock_vnf_interfaces, mock_vnfd_dict,
mock_update,
- mock_up,
mock_scale_resource,
mock_wait,
mock_scale,
mock_resource_get,
mock_init,
- mock_event,
mock_lcm_save,
mock_init_hash,
mock_get_service_plugins):
@@ -3148,12 +3087,10 @@ class TestVnflcmDriver(db_base.SqlTestCase):
@mock.patch.object(objects.VnfLcmOpOcc, "save")
@mock.patch.object(VNFLcmRPCAPI, "send_notification")
@mock.patch.object(objects.VnfInstance, "save")
- @mock.patch.object(vnflcm_driver.VnfLcmDriver, "_update_vnf_rollback_pre")
@mock.patch.object(vnflcm_driver.VnfLcmDriver, "_update_vnf_rollback")
def test_rollback_vnf_scale_cnf(
self,
mock_update,
- mock_up,
mock_insta_save,
mock_notification,
mock_lcm_save,
diff --git a/tacker/tests/unit/vnfm/infra_drivers/kubernetes/test_kubernetes_driver.py b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/test_kubernetes_driver.py
index a2ea237d4..ea92ba787 100644
--- a/tacker/tests/unit/vnfm/infra_drivers/kubernetes/test_kubernetes_driver.py
+++ b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/test_kubernetes_driver.py
@@ -39,10 +39,8 @@ from tacker.tests.unit.vnfm.infra_drivers.openstack.fixture_data import \
fixture_data_utils as fd_utils
from tacker.tests import utils as test_utils
from tacker.vnflcm import utils as vnflcm_utils
-from tacker.vnfm.infra_drivers.kubernetes.k8s import tosca_kube_object
from tacker.vnfm.infra_drivers.kubernetes.k8s import translate_outputs
from tacker.vnfm.infra_drivers.kubernetes import kubernetes_driver
-from tacker.vnfm.infra_drivers.kubernetes import translate_template
from unittest import mock
@@ -511,23 +509,6 @@ class TestKubernetes(base.TestCase):
)
self.assertEqual(msg, exc.format_message())
- @mock.patch.object(translate_template.TOSCAToKubernetes,
- 'deploy_kubernetes_objects')
- def test_create(self, mock_deploy_kubernetes_objects):
- auth_attr = fakes.fake_auth_attr()
- vnf = {
- 'vnfd': {
- 'attributes': {
- 'vnfd': {
- 'tosca_definitions_version': 'tosca_simple_yaml_1_0'}
- }}}
- plugin = ""
- mock_deploy_kubernetes_objects.return_value = \
- tosca_kube_object.ToscaKubeObject(
- namespace='namespace').namespace
- result = self.kubernetes.create(plugin, self.context, vnf, auth_attr)
- self.assertEqual("namespace", result)
-
@mock.patch.object(client.CoreV1Api, 'read_namespaced_service')
@mock.patch.object(client.CoreV1Api, 'list_namespaced_pod')
def test_create_wait(
@@ -1775,144 +1756,6 @@ class TestKubernetes(base.TestCase):
resource_type=resource_type)
mock_read_namespaced_pod.assert_called()
- @mock.patch.object(client.AppsV1Api, 'delete_namespaced_deployment')
- @mock.patch.object(client.AutoscalingV1Api,
- 'delete_namespaced_horizontal_pod_autoscaler')
- @mock.patch.object(client.CoreV1Api, 'delete_namespaced_service')
- @mock.patch.object(client.CoreV1Api, 'delete_namespaced_config_map')
- @mock.patch.object(objects.VnfResourceList, "get_by_vnf_instance_id")
- def test_delete_legacy(self, mock_vnf_resource_list,
- mock_delete_namespaced_config_map,
- mock_delete_namespaced_service,
- mock_delete_namespaced_horizontal_pod_autoscaler,
- mock_delete_namespaced_deployment):
- vnf_id = "fake_namespace,fake_name"
- mock_vnf_resource_list.return_value = list()
- mock_delete_namespaced_config_map.return_value = client.V1Status()
- mock_delete_namespaced_service.return_value = client.V1Status()
- mock_delete_namespaced_horizontal_pod_autoscaler.return_value = \
- client.V1Status()
- mock_delete_namespaced_deployment.return_value = client.V1Status()
- self.kubernetes.delete(plugin=None, context=self.context,
- vnf_id=vnf_id,
- auth_attr=utils.get_vim_auth_obj(),
- vnf_instance=None,
- terminate_vnf_req=None)
- mock_delete_namespaced_config_map.assert_called_once()
- mock_delete_namespaced_horizontal_pod_autoscaler.assert_called_once()
- mock_delete_namespaced_service.assert_called_once()
- mock_delete_namespaced_config_map.assert_called_once()
-
- @mock.patch.object(client.AppsV1Api, 'delete_namespaced_deployment')
- @mock.patch.object(client.AutoscalingV1Api,
- 'delete_namespaced_horizontal_pod_autoscaler')
- @mock.patch.object(client.CoreV1Api, 'delete_namespaced_service')
- @mock.patch.object(client.CoreV1Api, 'delete_namespaced_config_map')
- @mock.patch.object(objects.VnfResourceList, "get_by_vnf_instance_id")
- def test_delete_legacy_delete_api_fail(self, mock_vnf_resource_list,
- mock_delete_namespaced_config_map,
- mock_delete_namespaced_service,
- mock_delete_namespaced_horizontal_pod_autoscaler,
- mock_delete_namespaced_deployment):
- vnf_id = "fake_namespace,fake_name"
- mock_vnf_resource_list.return_value = list()
- mock_delete_namespaced_config_map.side_effect = Exception()
- mock_delete_namespaced_service.side_effect = Exception()
- mock_delete_namespaced_horizontal_pod_autoscaler.side_effect = \
- Exception()
- mock_delete_namespaced_deployment.side_effect = Exception()
- self.kubernetes.delete(plugin=None, context=self.context,
- vnf_id=vnf_id,
- auth_attr=utils.get_vim_auth_obj(),
- vnf_instance=None,
- terminate_vnf_req=None)
- mock_delete_namespaced_config_map.assert_called_once()
- mock_delete_namespaced_horizontal_pod_autoscaler.assert_called_once()
- mock_delete_namespaced_service.assert_called_once()
- mock_delete_namespaced_config_map.assert_called_once()
-
- @mock.patch.object(client.AppsV1Api, 'read_namespaced_deployment')
- @mock.patch.object(client.AutoscalingV1Api,
- 'read_namespaced_horizontal_pod_autoscaler')
- @mock.patch.object(client.CoreV1Api, 'read_namespaced_service')
- @mock.patch.object(client.CoreV1Api, 'read_namespaced_config_map')
- @mock.patch.object(objects.VnfResourceList, "get_by_vnf_instance_id")
- def test_delete_wait_legacy(self, mock_vnf_resource_list,
- mock_read_namespaced_config_map,
- mock_read_namespaced_service,
- mock_read_namespaced_horizontal_pod_autoscaler,
- mock_read_namespaced_deployment):
- vnf_id = "fake_namespace,fake_name"
- mock_vnf_resource_list.return_value = list()
- mock_read_namespaced_config_map.side_effect = Exception()
- mock_read_namespaced_service.side_effect = Exception()
- mock_read_namespaced_horizontal_pod_autoscaler.side_effect = \
- Exception()
- mock_read_namespaced_deployment.side_effect = Exception()
- self.kubernetes.delete_wait(plugin=None, context=self.context,
- vnf_id=vnf_id,
- auth_attr=utils.get_vim_auth_obj(),
- region_name=None,
- vnf_instance=None)
- mock_read_namespaced_config_map.assert_called_once()
- mock_read_namespaced_service.assert_called_once()
- mock_read_namespaced_horizontal_pod_autoscaler.assert_called_once()
- mock_read_namespaced_deployment.assert_called_once()
-
- @mock.patch.object(client.AppsV1Api, 'read_namespaced_deployment')
- @mock.patch.object(client.AutoscalingV1Api,
- 'read_namespaced_horizontal_pod_autoscaler')
- @mock.patch.object(client.CoreV1Api, 'read_namespaced_service')
- @mock.patch.object(client.CoreV1Api, 'read_namespaced_config_map')
- @mock.patch.object(objects.VnfResourceList, "get_by_vnf_instance_id")
- def test_delete_wait_legacy_retry(self, mock_vnf_resource_list,
- mock_read_namespaced_config_map,
- mock_read_namespaced_service,
- mock_read_namespaced_horizontal_pod_autoscaler,
- mock_read_namespaced_deployment):
- vnf_id = "fake_namespace,fake_name"
- mock_vnf_resource_list.return_value = list()
- mock_read_namespaced_config_map.return_value = client.V1Status()
- mock_read_namespaced_service.return_value = client.V1Status()
- mock_read_namespaced_horizontal_pod_autoscaler.return_value = \
- client.V1Status()
- mock_read_namespaced_deployment.return_value = client.V1Status()
- self.kubernetes.delete_wait(plugin=None, context=self.context,
- vnf_id=vnf_id,
- auth_attr=utils.get_vim_auth_obj(),
- region_name=None,
- vnf_instance=None)
- mock_read_namespaced_config_map.assert_called()
- mock_read_namespaced_service.assert_called()
- mock_read_namespaced_horizontal_pod_autoscaler.assert_called()
- mock_read_namespaced_deployment.assert_called()
-
- @mock.patch.object(translate_template.TOSCAToKubernetes,
- 'deploy_kubernetes_objects')
- def test_instantiate_vnf_without_target_k8s_files(
- self, mock_deploy_kubernetes_objects):
- vnf = objects.VnfInstance(vnf_metadata={'namespace': 'default'})
- vim_connection_info = objects.VimConnectionInfo(
- access_info={'auth_url': 'http://fake-url/identity/v3'})
- vnfd_dict = fakes.fake_vnf_dict()
- test_tosca_kube_object = tosca_kube_object.ToscaKubeObject(
- namespace='test_namespace', name='test_name')
- test_deployment_name = (
- test_tosca_kube_object.namespace + "," +
- test_tosca_kube_object.name)
- mock_deploy_kubernetes_objects.return_value = \
- test_deployment_name
- instantiate_vnf_req = objects.InstantiateVnfRequest(
- additional_params={'dummy_key': ["dummy_value"]})
- grant_response = None
- base_hot_dict = None
- vnf_package_path = self.yaml_path
- result = self.kubernetes.instantiate_vnf(
- self.context, vnf, vnfd_dict, vim_connection_info,
- instantiate_vnf_req, grant_response, vnf_package_path,
- base_hot_dict)
- self.assertEqual(result, "test_namespace,test_name")
-
@mock.patch.object(translate_outputs.Transformer, 'get_k8s_objs_from_yaml')
@mock.patch.object(translate_outputs.Transformer, 'deploy_k8s')
@mock.patch.object(client.AppsV1Api, 'read_namespaced_deployment')
@@ -2566,108 +2409,6 @@ class TestKubernetes(base.TestCase):
self.context, None,
utils.get_vim_auth_obj(), policy, None)
- def _test_scale_legacy(self, scale_type,
- current_replicas, after_replicas,
- mock_vnf_resource_list,
- mock_read_namespaced_deployment,
- mock_patch_namespaced_deployment_scale,
- mock_read_namespaced_horizontal_pod_autoscaler):
- policy = fakes.get_scale_policy(
- type=scale_type, aspect_id='SP1', is_legacy=True)
- policy['instance_id'] = "fake_namespace,fake_name"
- mock_vnf_resource_list.return_value = []
- mock_read_namespaced_deployment.return_value = \
- client.V1Deployment(
- spec=client.V1ScaleSpec(replicas=current_replicas),
- status=client.V1DeploymentStatus(replicas=current_replicas),
- metadata=client.V1ObjectMeta(labels={'scaling_name': 'SP1'}))
- mock_read_namespaced_horizontal_pod_autoscaler.return_value = \
- client.V1HorizontalPodAutoscaler(
- spec=client.V1HorizontalPodAutoscalerSpec(
- min_replicas=1, max_replicas=3,
- scale_target_ref=client.V1CrossVersionObjectReference(
- kind='Deployment', name='fake_name')))
- mock_patch_namespaced_deployment_scale.return_value = \
- client.V1Scale(
- spec=client.V1ScaleSpec(replicas=after_replicas),
- status=client.V1ScaleStatus(replicas=after_replicas))
- self.kubernetes.scale(context=self.context, plugin=None,
- auth_attr=utils.get_vim_auth_obj(),
- policy=policy,
- region_name=None)
-
- @mock.patch.object(client.AutoscalingV1Api,
- 'read_namespaced_horizontal_pod_autoscaler')
- @mock.patch.object(client.AppsV1Api, 'patch_namespaced_deployment_scale')
- @mock.patch.object(client.AppsV1Api, 'read_namespaced_deployment')
- @mock.patch.object(objects.VnfResourceList, "get_by_vnf_instance_id")
- def test_scale_legacy_in(self, mock_vnf_resource_list,
- mock_read_namespaced_deployment,
- mock_patch_namespaced_deployment_scale,
- mock_read_namespaced_horizontal_pod_autoscaler):
- self._test_scale_legacy('in', 2, 1,
- mock_vnf_resource_list,
- mock_read_namespaced_deployment,
- mock_patch_namespaced_deployment_scale,
- mock_read_namespaced_horizontal_pod_autoscaler)
- mock_read_namespaced_deployment.assert_called_once()
- mock_read_namespaced_horizontal_pod_autoscaler.assert_called_once()
- mock_patch_namespaced_deployment_scale.assert_called_once()
-
- @mock.patch.object(client.AutoscalingV1Api,
- 'read_namespaced_horizontal_pod_autoscaler')
- @mock.patch.object(client.AppsV1Api, 'patch_namespaced_deployment_scale')
- @mock.patch.object(client.AppsV1Api, 'read_namespaced_deployment')
- @mock.patch.object(objects.VnfResourceList, "get_by_vnf_instance_id")
- def test_scale_legacy_out(self, mock_vnf_resource_list,
- mock_read_namespaced_deployment,
- mock_patch_namespaced_deployment_scale,
- mock_read_namespaced_horizontal_pod_autoscaler):
- self._test_scale_legacy('out', 2, 3,
- mock_vnf_resource_list,
- mock_read_namespaced_deployment,
- mock_patch_namespaced_deployment_scale,
- mock_read_namespaced_horizontal_pod_autoscaler)
- mock_read_namespaced_deployment.assert_called_once()
- mock_read_namespaced_horizontal_pod_autoscaler.assert_called_once()
- mock_patch_namespaced_deployment_scale.assert_called_once()
-
- @mock.patch.object(client.AutoscalingV1Api,
- 'read_namespaced_horizontal_pod_autoscaler')
- @mock.patch.object(client.AppsV1Api, 'patch_namespaced_deployment_scale')
- @mock.patch.object(client.AppsV1Api, 'read_namespaced_deployment')
- @mock.patch.object(objects.VnfResourceList, "get_by_vnf_instance_id")
- def test_scale_legacy_in_less_than_min(self, mock_vnf_resource_list,
- mock_read_namespaced_deployment,
- mock_patch_namespaced_deployment_scale,
- mock_read_namespaced_horizontal_pod_autoscaler):
- self._test_scale_legacy('in', 1, 1,
- mock_vnf_resource_list,
- mock_read_namespaced_deployment,
- mock_patch_namespaced_deployment_scale,
- mock_read_namespaced_horizontal_pod_autoscaler)
- mock_read_namespaced_deployment.assert_called_once()
- mock_read_namespaced_horizontal_pod_autoscaler.assert_called_once()
- mock_patch_namespaced_deployment_scale.assert_called_once()
-
- @mock.patch.object(client.AutoscalingV1Api,
- 'read_namespaced_horizontal_pod_autoscaler')
- @mock.patch.object(client.AppsV1Api, 'patch_namespaced_deployment_scale')
- @mock.patch.object(client.AppsV1Api, 'read_namespaced_deployment')
- @mock.patch.object(objects.VnfResourceList, "get_by_vnf_instance_id")
- def test_scale_legacy_out_over_max(self, mock_vnf_resource_list,
- mock_read_namespaced_deployment,
- mock_patch_namespaced_deployment_scale,
- mock_read_namespaced_horizontal_pod_autoscaler):
- self._test_scale_legacy('out', 3, 3,
- mock_vnf_resource_list,
- mock_read_namespaced_deployment,
- mock_patch_namespaced_deployment_scale,
- mock_read_namespaced_horizontal_pod_autoscaler)
- mock_read_namespaced_deployment.assert_called_once()
- mock_read_namespaced_horizontal_pod_autoscaler.assert_called_once()
- mock_patch_namespaced_deployment_scale.assert_called_once()
-
@mock.patch.object(objects.VnfInstance, "get_by_id")
@mock.patch.object(client.AppsV1Api, 'read_namespaced_deployment_scale')
@mock.patch.object(client.CoreV1Api, 'list_namespaced_pod')
@@ -2816,56 +2557,6 @@ class TestKubernetes(base.TestCase):
self.context, None,
utils.get_vim_auth_obj(), policy, None, None)
- @mock.patch.object(client.CoreV1Api, 'list_namespaced_pod')
- @mock.patch.object(objects.VnfResourceList, "get_by_vnf_instance_id")
- def test_scale_wait_legacy(self, mock_vnf_resource_list,
- mock_list_namespaced_pod):
- policy = fakes.get_scale_policy(
- type='out', aspect_id='SP1', is_legacy=True)
- mock_vnf_resource_list.return_value = []
- mock_list_namespaced_pod.return_value = \
- client.V1PodList(items=[
- fakes.get_fake_pod_info(
- kind='Deployment', pod_status='Running')])
- self.kubernetes.scale_wait(context=self.context, plugin=None,
- auth_attr=utils.get_vim_auth_obj(),
- policy=policy,
- region_name=None,
- last_event_id=None)
- mock_list_namespaced_pod.assert_called_once()
-
- @mock.patch.object(client.CoreV1Api, 'list_namespaced_pod')
- @mock.patch.object(objects.VnfResourceList, "get_by_vnf_instance_id")
- def test_scale_wait_legacy_retry_over(self, mock_vnf_resource_list,
- mock_list_namespaced_pod):
- policy = fakes.get_scale_policy(
- type='out', aspect_id='SP1', is_legacy=True)
- mock_vnf_resource_list.return_value = []
- mock_list_namespaced_pod.return_value = \
- client.V1PodList(items=[
- fakes.get_fake_pod_info(
- kind='Deployment', pod_status='Pending')])
- self.assertRaises(vnfm.VNFCreateWaitFailed,
- self.kubernetes.scale_wait,
- self.context, None,
- utils.get_vim_auth_obj(), policy, None, None)
-
- @mock.patch.object(client.CoreV1Api, 'list_namespaced_pod')
- @mock.patch.object(objects.VnfResourceList, "get_by_vnf_instance_id")
- def test_scale_wait_legacy_status_unknown(self, mock_vnf_resource_list,
- mock_list_namespaced_pod):
- policy = fakes.get_scale_policy(
- type='out', aspect_id='SP1', is_legacy=True)
- mock_vnf_resource_list.return_value = []
- mock_list_namespaced_pod.return_value = \
- client.V1PodList(items=[
- fakes.get_fake_pod_info(
- kind='Deployment', pod_status='Unknown')])
- self.assertRaises(vnfm.VNFCreateWaitFailed,
- self.kubernetes.scale_wait,
- self.context, None,
- utils.get_vim_auth_obj(), policy, None, None)
-
@mock.patch.object(client.AppsV1Api, 'patch_namespaced_deployment_scale')
@mock.patch.object(client.AppsV1Api, 'read_namespaced_deployment_scale')
@mock.patch('tacker.vnflcm.utils._get_vnfd_dict')
diff --git a/tacker/tests/unit/vnfm/infra_drivers/kubernetes/test_tosca_kube_object.py b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/test_tosca_kube_object.py
deleted file mode 100644
index e5ef2c31b..000000000
--- a/tacker/tests/unit/vnfm/infra_drivers/kubernetes/test_tosca_kube_object.py
+++ /dev/null
@@ -1,83 +0,0 @@
-# Copyright (C) 2021 FUJITSU
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from tacker.tests.unit import base
-from tacker.vnfm.infra_drivers.kubernetes.k8s import tosca_kube_object
-
-
-class TestToscaKubeObject(base.TestCase):
- def setUp(self):
- super(TestToscaKubeObject, self).setUp()
- self.tosca_kube_object = tosca_kube_object.ToscaKubeObject(
- name='name',
- namespace='namespace',
- mapping_ports='mappingports',
- containers=[
- tosca_kube_object.Container(
- name="name")],
- network_name="network",
- mgmt_connection_point=True,
- scaling_object=[
- tosca_kube_object.ScalingObject(
- scale_target_name='scalingname')],
- service_type='servicetype',
- labels={
- 'lable': 'lable'},
- annotations="annotations")
-
- def test_tosca_kube_object(self):
- self.assertEqual('name', self.tosca_kube_object.name)
- self.assertEqual('namespace', self.tosca_kube_object.namespace)
-
-
-class TestContainerObject(base.TestCase):
- def setUp(self):
- super(TestContainerObject, self).setUp()
- self.container_object = tosca_kube_object.Container(
- name='container',
- num_cpus=1,
- mem_size="100MB",
- image="ubuntu",
- command='command',
- args=['args'],
- ports=['22'],
- config='config'
- )
-
- def test_container_object(self):
- self.assertEqual('container', self.container_object.name)
- self.assertEqual(1, self.container_object.num_cpus)
- self.assertEqual('100MB', self.container_object.mem_size)
- self.assertEqual('ubuntu', self.container_object.image)
-
-
-class TestScalingObject(base.TestCase):
- def setUp(self):
- super(TestScalingObject, self).setUp()
- self.scaling_object = tosca_kube_object.ScalingObject(
- scaling_name='scalingname',
- min_replicas=1,
- max_replicas=3,
- scale_target_name="cp1",
- target_cpu_utilization_percentage="40"
- )
-
- def test_scaling_object(self):
- self.assertEqual('scalingname', self.scaling_object.scaling_name)
- self.assertEqual(1, self.scaling_object.min_replicas)
- self.assertEqual(3, self.scaling_object.max_replicas)
- self.assertEqual("cp1", self.scaling_object.scale_target_name)
- self.assertEqual(
- "40", self.scaling_object.target_cpu_utilization_percentage)
diff --git a/tacker/tests/unit/vnfm/infra_drivers/kubernetes/test_translate_inputs.py b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/test_translate_inputs.py
deleted file mode 100644
index 40b5710b0..000000000
--- a/tacker/tests/unit/vnfm/infra_drivers/kubernetes/test_translate_inputs.py
+++ /dev/null
@@ -1,88 +0,0 @@
-# Copyright (C) 2020 FUJITSU
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import os
-
-from tacker.tests.unit import base
-from tacker.tests.unit.vnfm.infra_drivers.kubernetes import fakes
-from tacker.vnfm.infra_drivers.kubernetes.k8s import translate_inputs
-
-
-class TestParser(base.TestCase):
- def setUp(self):
- super(TestParser, self).setUp()
- self.k8s_client_dict = fakes.fake_k8s_client_dict()
- self.vnfd_path = '../../../../etc/samples/sample_tosca_vnfc.yaml'
- self.yaml_path = os.path.join(
- os.path.dirname(os.path.abspath(__file__)),
- self.vnfd_path)
- self.vnfd_dict = {
- "tosca_definitions_version": "tosca_simple_profile_for_nfv_1_0_0",
- "description": "Demo example",
- "metadata": {
- "template_name": "sample-tosca-vnfd"},
- "topology_template": {
- "node_templates": {
- "VDU1": {
- "type": "tosca.nodes.nfv.VDU.Tacker",
- "capabilities": {
- "nfv_compute": {
- "properties": {
- "num_cpus": 1,
- "mem_size": "512 MB",
- "disk_size": "1 GB"}}},
- "properties": {
- "vnfcs": {
- "web_server": {
- "mem_size": "100 MB",
- "config": "config"
- }
- },
- "labels": [
- "label1:1", "label2:2"
- ]
- }
- },
- "CP1": {
- "type": "tosca.nodes.nfv.CP.Tacker",
- "properties": {
- "order": 0,
- "management": True,
- "anti_spoofing_protection": False},
- "requirements": [
- {"virtualLink": {
- "node": "VL1"}},
- {"virtualBinding": {
- "node": "VDU1"}}]},
- "VL1": {
- "type": "tosca.nodes.nfv.VL",
- "properties": {
- "vendor": "Tacker",
- "network_name": "net_mgmt"}}
- }
- }
- }
- self.parser = translate_inputs.Parser(self.vnfd_dict)
-
- def test_loader(self):
- tosca_kube_object = self.parser.loader()
- self.assertEqual(tosca_kube_object[0].name[:8], "svc-VDU1")
- self.assertEqual(tosca_kube_object[0].containers[0].name, "web_server")
- self.assertEqual(
- tosca_kube_object[0].containers[0].mem_size,
- 100000000)
- self.assertEqual(
- tosca_kube_object[0].labels, {
- 'label1': '1', 'label2': '2'})
diff --git a/tacker/tests/unit/vnfm/infra_drivers/kubernetes/test_translate_outputs.py b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/test_translate_outputs.py
index 6f1c75de0..fb73da658 100644
--- a/tacker/tests/unit/vnfm/infra_drivers/kubernetes/test_translate_outputs.py
+++ b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/test_translate_outputs.py
@@ -21,7 +21,6 @@ from tacker.common import exceptions
from tacker.tests.unit import base
from tacker.tests.unit import fake_request
from tacker.tests.unit.vnfm.infra_drivers.kubernetes import fakes
-from tacker.vnfm.infra_drivers.kubernetes.k8s import tosca_kube_object
from tacker.vnfm.infra_drivers.kubernetes.k8s import translate_outputs
@@ -850,51 +849,3 @@ class TestTransformer(base.TestCase):
self.assertEqual(k8s_obj.api_version, 'apps/v1')
# V1ControllerRevision
self.assertEqual(k8s_obj.revision, 1)
-
- def test_transform(self):
- container_obj = tosca_kube_object.Container(
- config='config:abc\nconfig2:bcd',
- num_cpus=2,
- mem_size=10,
- name='container'
- )
- tosca_kube_objects = [tosca_kube_object.ToscaKubeObject(
- namespace='namespace',
- name='name',
- containers=[container_obj],
- mapping_ports=["123"],
- labels={}
- )]
- kubernetes_objects = self.transfromer.transform(tosca_kube_objects)
- self.assertEqual(kubernetes_objects['namespace'], 'namespace')
- self.assertEqual(
- kubernetes_objects['objects'][0].data, {
- 'config': 'abc', 'config2': 'bcd'})
-
- @mock.patch.object(client.CoreV1Api, 'create_namespaced_config_map')
- @mock.patch.object(client.AppsV1Api, 'create_namespaced_deployment')
- @mock.patch.object(client.CoreV1Api, 'create_namespaced_service')
- def test_deploy(
- self,
- mock_create_namespaced_config_map,
- mock_create_namespaced_deployment,
- mock_create_namespaced_service):
- mock_create_namespaced_config_map.return_value = ""
- mock_create_namespaced_deployment.return_value = ""
- mock_create_namespaced_service.return_value = ""
- container_obj = tosca_kube_object.Container(
- config='config:abc\nconfig2:bcd',
- num_cpus=2,
- mem_size=10,
- name='container'
- )
- tosca_kube_objects = [tosca_kube_object.ToscaKubeObject(
- namespace='namespace',
- name='name',
- containers=[container_obj],
- mapping_ports=["123"],
- labels={}
- )]
- kubernetes_objects = self.transfromer.transform(tosca_kube_objects)
- result = self.transfromer.deploy(kubernetes_objects)
- self.assertEqual(result, 'namespace,name')
diff --git a/tacker/tests/unit/vnfm/infra_drivers/kubernetes/test_translate_template.py b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/test_translate_template.py
deleted file mode 100644
index a3eff9ed3..000000000
--- a/tacker/tests/unit/vnfm/infra_drivers/kubernetes/test_translate_template.py
+++ /dev/null
@@ -1,89 +0,0 @@
-# Copyright (C) 2021 FUJITSU
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from kubernetes import client
-from unittest import mock
-
-
-from tacker.tests.unit import base
-from tacker.vnfm.infra_drivers.kubernetes.k8s import translate_outputs
-from tacker.vnfm.infra_drivers.kubernetes import translate_template
-
-
-class TestTOSCAToKubernetes(base.TestCase):
- def setUp(self):
- super(TestTOSCAToKubernetes, self).setUp()
- self.vnf = {
- "vnfd": {
- "service_types": [
- {
- "service_type": "vnfd",
- "id": "ca0d8667-ce35-4f7a-9744-ac4bc7d5579d"
- }
- ],
- "description": "Sample",
- "tenant_id": "689708956a2d4ae0a27120d3aca6a560",
- "created_at": "2016-10-20 07:38:54",
- "updated_at": None,
- "attributes": {
- "vnfd":
- "description: "
- "Demo example\nmetadata: "
- "{template_name: sample-tosca-vnfd}\n"
- "topology_template:\n "
- "node_templates:\n CP1:\n "
- "properties: {anti_spoofing_protection: "
- "false, management: true, order: 0}\n "
- "requirements:\n "
- "- virtualLink: {node: VL1}\n "
- "- virtualBinding: {node: VDU1}\n "
- "type: tosca.nodes.nfv.CP.Tacker\n "
- "VDU1:\n "
- "capabilities:\n "
- "nfv_compute:\n "
- "properties: {disk_size: 1 GB, "
- "mem_size: 512 MB, num_cpus: 1}\n "
- "properties: {mapping_ports: [80:80] , "
- "vnfcs: {web:{mem_size: 100 MB, "
- "config: param0:key1}}}\n "
- "type: tosca.nodes.nfv.VDU.Tacker\n "
- "VL1:\n properties: {network_name: "
- "net_mgmt, vendor: Tacker}\n "
- "type: tosca.nodes.nfv.VL\ntosca_definitions_version: "
- "tosca_simple_profile_for_nfv_1_0_0\n"
- },
- "id": "0fb827e7-32b0-4e5b-b300-e1b1dce8a831",
- "name": "vnfd-sample",
- "template_source": "onboarded or inline"
- }
- }
-
- self.core_v1_api_client = client.CoreV1Api
- self.app_v1_api_client = client.AppsV1Api
- self.scaling_api_client = client.AutoscalingApi
- self.tosca_to_kubernetes_object = translate_template.TOSCAToKubernetes(
- self.vnf, self.core_v1_api_client, self.app_v1_api_client,
- self.scaling_api_client)
-
- def test_generate_tosca_kube_objects(self):
- result = self.tosca_to_kubernetes_object.generate_tosca_kube_objects()
- self.assertEqual(result[0].name[:8], "svc-VDU1")
- self.assertEqual(result[0].containers[0].name, "web")
- self.assertEqual(result[0].containers[0].mem_size, 100000000)
-
- @mock.patch.object(translate_outputs.Transformer, 'deploy')
- def test_deploy_kuberentes_objects(self, mock_deploy):
- mock_deploy.return_value = "name, namespace"
- self.tosca_to_kubernetes_object.deploy_kubernetes_objects()
diff --git a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/config_data.yaml b/tacker/tests/unit/vnfm/infra_drivers/openstack/data/config_data.yaml
deleted file mode 100644
index 5468f9c49..000000000
--- a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/config_data.yaml
+++ /dev/null
@@ -1,12 +0,0 @@
-vdus:
- vdu1:
- config:
- firewall: |
- package firewall
-
- config defaults
- option syn_flood '1'
- option input 'ACCEPT'
- option output 'ACCEPT'
- option forward 'REJECT'
-
diff --git a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/etsi_nfv/tosca_vnfd.yaml b/tacker/tests/unit/vnfm/infra_drivers/openstack/data/etsi_nfv/tosca_vnfd.yaml
index d90232715..61146125d 100644
--- a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/etsi_nfv/tosca_vnfd.yaml
+++ b/tacker/tests/unit/vnfm/infra_drivers/openstack/data/etsi_nfv/tosca_vnfd.yaml
@@ -7,7 +7,6 @@ imports:
- etsi_nfv_sol001_common_types.yaml
- etsi_nfv_sol001_vnfd_types.yaml
-node_types:
topology_template:
node_templates:
VDU1:
@@ -54,7 +53,7 @@ topology_template:
name: cirros
version: '0.0.0'
checksum:
- algorithm: sha512
+ algorithm: sha-512
hash: f0fd1b50420dce4ca382ccfbb528eef3a38bbeff00b54e95e3876b9bafe7ed2d6f919ca35d9046d437c6d2d8698b1174a335fbd66035bb3edc525d2cdb187232
container_format: bare
disk_format: qcow2
@@ -89,10 +88,3 @@ topology_template:
l3_protocol_data:
ip_version: ipv4
cidr: 33.33.0.0/24
-
- policies:
- - policy_affinity_local_VDU1:
- type: tosca.policies.nfv.AntiAffinityRule
- targets: [ VDU1 ]
- properties:
- scope: zone
diff --git a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/hot_alarm_scale_custom.yaml b/tacker/tests/unit/vnfm/infra_drivers/openstack/data/hot_alarm_scale_custom.yaml
deleted file mode 100644
index 09126ce1a..000000000
--- a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/hot_alarm_scale_custom.yaml
+++ /dev/null
@@ -1,26 +0,0 @@
-
-outputs:
- mgmt_ip-VDU1:
- value:
- get_attr: [CP1, fixed_ips, 0, ip_address]
-resources:
- VDU1:
- type: OS::Nova::Server
- properties:
- availability_zone: nova
- user_data_format: SOFTWARE_CONFIG
- config_drive: false
- networks:
- - port: { get_resource: CP1 }
- image: cirros-0.5.2-x86_64-disk
- flavor: m1.tiny
- metadata: {metering.server_group: SG1-2e3261d9-14}
- VL1:
- type: OS::Neutron::Net
- CP1:
- type: OS::Neutron::Port
- properties:
- network: net_mgmt
- port_security_enabled: false
-heat_template_version: 2013-05-23
-description: Scaling template
diff --git a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/hot_data.yaml b/tacker/tests/unit/vnfm/infra_drivers/openstack/data/hot_data.yaml
deleted file mode 100644
index 99b8aefb7..000000000
--- a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/hot_data.yaml
+++ /dev/null
@@ -1,27 +0,0 @@
-heat_template_version: 2013-05-23
-description: 'OpenWRT with services
-
-'
-parameters:
- flavor: {type: string, default: m1.tiny}
- reservation_id: {type: string, default: 891cd152-3925-4c9e-9074-239a902b68d7}
-resources:
- VDU1:
- type: OS::Nova::Server
- properties:
- networks:
- - port: {get_resource: CP1}
- image: OpenWRT
- user_data_format: SOFTWARE_CONFIG
- config_drive: false
- flavor: {get_param: flavor}
- scheduler_hints:
- reservation: {get_param: reservation_id}
- CP1:
- type: OS::Neutron::Port
- properties: {network: existing_network_1, port_security_enabled: false}
-
-outputs:
- mgmt_ip-VDU1:
- value:
- get_attr: [CP1, fixed_ips, 0, ip_address]
diff --git a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/hot_flavor.yaml b/tacker/tests/unit/vnfm/infra_drivers/openstack/data/hot_flavor.yaml
deleted file mode 100644
index f5112d61a..000000000
--- a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/hot_flavor.yaml
+++ /dev/null
@@ -1,32 +0,0 @@
-heat_template_version: 2013-05-23
-
-description: >
- OpenWRT with services
-
-outputs:
- mgmt_ip-VDU1:
- value:
- get_attr: [CP1, fixed_ips, 0, ip_address]
-
-parameters: {}
-resources:
- VDU1:
- type: OS::Nova::Server
- properties:
- config_drive: false
- flavor: {get_resource: VDU1_flavor}
- image: OpenWRT
- networks:
- - port:
- get_resource: CP1
- user_data_format: SOFTWARE_CONFIG
- CP1:
- type: OS::Neutron::Port
- properties:
- network: existing_network_1
- VDU1_flavor:
- type: OS::Nova::Flavor
- properties:
- disk: 10
- ram: 512
- vcpus: 2
diff --git a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/hot_flavor_and_capabilities.yaml b/tacker/tests/unit/vnfm/infra_drivers/openstack/data/hot_flavor_and_capabilities.yaml
deleted file mode 100644
index 1d03ab70d..000000000
--- a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/hot_flavor_and_capabilities.yaml
+++ /dev/null
@@ -1,26 +0,0 @@
-heat_template_version: 2013-05-23
-
-description: >
- OpenWRT with services
-
-outputs:
- mgmt_ip-VDU1:
- value:
- get_attr: [CP1, fixed_ips, 0, ip_address]
-
-parameters: {}
-resources:
- VDU1:
- type: OS::Nova::Server
- properties:
- config_drive: false
- flavor: m1.nano
- image: OpenWRT
- networks:
- - port:
- get_resource: CP1
- user_data_format: SOFTWARE_CONFIG
- CP1:
- type: OS::Neutron::Port
- properties:
- network: existing_network_1
diff --git a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/hot_flavor_defaults.yaml b/tacker/tests/unit/vnfm/infra_drivers/openstack/data/hot_flavor_defaults.yaml
deleted file mode 100644
index b33bf1685..000000000
--- a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/hot_flavor_defaults.yaml
+++ /dev/null
@@ -1,32 +0,0 @@
-heat_template_version: 2013-05-23
-
-description: >
- OpenWRT with services
-
-outputs:
- mgmt_ip-VDU1:
- value:
- get_attr: [CP1, fixed_ips, 0, ip_address]
-
-parameters: {}
-resources:
- VDU1:
- type: OS::Nova::Server
- properties:
- config_drive: false
- flavor: {get_resource: VDU1_flavor}
- image: OpenWRT
- networks:
- - port:
- get_resource: CP1
- user_data_format: SOFTWARE_CONFIG
- CP1:
- type: OS::Neutron::Port
- properties:
- network: existing_network_1
- VDU1_flavor:
- type: OS::Nova::Flavor
- properties:
- disk: 10
- ram: 512
- vcpus: 1
diff --git a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/hot_flavor_no_units.yaml b/tacker/tests/unit/vnfm/infra_drivers/openstack/data/hot_flavor_no_units.yaml
deleted file mode 100644
index 817854c8d..000000000
--- a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/hot_flavor_no_units.yaml
+++ /dev/null
@@ -1,33 +0,0 @@
-heat_template_version: 2013-05-23
-
-description: >
- OpenWRT with services
-
-outputs:
- mgmt_ip-VDU1:
- value:
- get_attr: [CP1, fixed_ips, 0, ip_address]
-
-parameters: {}
-resources:
- VDU1:
- type: OS::Nova::Server
- properties:
- config_drive: false
- flavor: {get_resource: VDU1_flavor}
- image: OpenWRT
- networks:
- - port:
- get_resource: CP1
- user_data_format: SOFTWARE_CONFIG
- CP1:
- type: OS::Neutron::Port
- properties:
- network: existing_network_1
-
- VDU1_flavor:
- type: OS::Nova::Flavor
- properties:
- disk: 2
- ram: 512
- vcpus: 2
diff --git a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/hot_grant.yaml b/tacker/tests/unit/vnfm/infra_drivers/openstack/data/hot_grant.yaml
deleted file mode 100644
index 1d58dc38a..000000000
--- a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/hot_grant.yaml
+++ /dev/null
@@ -1,27 +0,0 @@
-heat_template_version: 2016-10-14
-description: test
-parameters:
- DB11_image:
- type: string
- default: cirros
-resources:
- DB11:
- type: OS::Nova::Server
- properties:
- user_data_format: SOFTWARE_CONFIG
- availability_zone: nova
- block_device_mapping_v2:
- - device_name: vda
- volume_id: {get_resource: ST1}
- flavor: m1.tiny
- networks:
- - port: {get_resource: DB11-CP}
- config_drive: false
- DB11-CP:
- type: OS::Neutron::Port
- properties:
- network: net_mgmt
-outputs:
- id-DB11:
- value:
- get_attr: [DB11-CP, fixed_ips, 0, ip_address]
diff --git a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/hot_image_after_processed_image.yaml b/tacker/tests/unit/vnfm/infra_drivers/openstack/data/hot_image_after_processed_image.yaml
deleted file mode 100644
index f61d0c320..000000000
--- a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/hot_image_after_processed_image.yaml
+++ /dev/null
@@ -1,16 +0,0 @@
-heat_template_version: 2013-05-23
-description: OpenWRT with services
-outputs: {}
-resources:
- VDU1:
- properties:
- config_drive: true
- flavor: m1.tiny
- image: {get_resource: VDU1_image}
- VDU1_image:
- type: OS::Glance::WebImage
- properties:
- location: http://URL/v1/openwrt.qcow2
- container_format: bare
- disk_format: raw
-
diff --git a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/hot_image_before_processed_image.yaml b/tacker/tests/unit/vnfm/infra_drivers/openstack/data/hot_image_before_processed_image.yaml
deleted file mode 100644
index 9839fa092..000000000
--- a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/hot_image_before_processed_image.yaml
+++ /dev/null
@@ -1,9 +0,0 @@
-heat_template_version: 2013-05-23
-description: OpenWRT with services
-outputs: {}
-resources:
- VDU1:
- properties:
- config_drive: true
- flavor: m1.tiny
- image: cirros-0.5.2-x86_64-disk
diff --git a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/hot_openwrt.yaml b/tacker/tests/unit/vnfm/infra_drivers/openstack/data/hot_openwrt.yaml
deleted file mode 100644
index c71d61908..000000000
--- a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/hot_openwrt.yaml
+++ /dev/null
@@ -1,26 +0,0 @@
-description: OpenWRT with services
-heat_template_version: 2013-05-23
-outputs:
- mgmt_ip-vdu1:
- description: management ip address
- value:
- get_attr: [vdu1-net_mgmt-port, fixed_ips, 0, ip_address]
-resources:
- vdu1:
- properties:
- availability_zone: nova
- config_drive: true
- flavor: m1.tiny
- image: cirros-0.5.2-x86_64-disk
- metadata: {param0: key0, param1: key1}
- networks:
- - port: {get_resource: vdu1-net_mgmt-port}
- - {network: net0}
- - {network: net1}
- type: OS::Nova::Server
- vdu1-net_mgmt-port:
- properties:
- fixed_ips: []
- network: net_mgmt
- port_security_enabled: false
- type: OS::Neutron::Port
diff --git a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/hot_openwrt_ipparams.yaml b/tacker/tests/unit/vnfm/infra_drivers/openstack/data/hot_openwrt_ipparams.yaml
deleted file mode 100644
index f17788c44..000000000
--- a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/hot_openwrt_ipparams.yaml
+++ /dev/null
@@ -1,41 +0,0 @@
-description: Parameterized VNF descriptor for IP addresses
-heat_template_version: 2013-05-23
-outputs:
- mgmt_ip-vdu1:
- description: management ip address
- value:
- get_attr: [vdu1-net_mgmt-port, fixed_ips, 0, ip_address]
-resources:
- vdu1:
- properties:
- availability_zone: nova
- config_drive: true
- flavor: m1.tiny
- image: cirros-0.5.2-x86_64-disk
- metadata: {param0: key0, param1: key1}
- networks:
- - port: {get_resource: vdu1-net_mgmt-port}
- - port: {get_resource: vdu1-net0-port}
- - port: {get_resource: vdu1-net1-port}
- type: OS::Nova::Server
- vdu1-net0-port:
- properties:
- fixed_ips:
- - {ip_address: 10.10.0.98}
- network: net0
- port_security_enabled: false
- type: OS::Neutron::Port
- vdu1-net1-port:
- properties:
- fixed_ips:
- - {ip_address: 10.10.1.98}
- network: net1
- port_security_enabled: false
- type: OS::Neutron::Port
- vdu1-net_mgmt-port:
- properties:
- fixed_ips:
- - {ip_address: 192.168.120.98}
- network: net_mgmt
- port_security_enabled: false
- type: OS::Neutron::Port
diff --git a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/hot_openwrt_params.yaml b/tacker/tests/unit/vnfm/infra_drivers/openstack/data/hot_openwrt_params.yaml
deleted file mode 100644
index 332822686..000000000
--- a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/hot_openwrt_params.yaml
+++ /dev/null
@@ -1,35 +0,0 @@
-description: Parameterized VNF descriptor
-heat_template_version: 2013-05-23
-outputs:
- mgmt_ip-vdu1:
- description: management ip address
- value:
- get_attr: [vdu1-net_mgmt-port, fixed_ips, 0, ip_address]
-resources:
- vdu1:
- properties:
- availability_zone: nova
- config_drive: true
- flavor: m1.tiny
- image: cirros-0.5.2-x86_64-disk
- key_name: keyName
- metadata: {param0: key0, param1: key1}
- networks:
- - port: {get_resource: vdu1-net_mgmt-port}
- - {network: net0}
- - {network: net1}
- user_data: '#!/bin/sh
-
- echo "my hostname is `hostname`" > /tmp/hostname
-
- df -h > /home/cirros/diskinfo
-
- '
- user_data_format: RAW
- type: OS::Nova::Server
- vdu1-net_mgmt-port:
- properties:
- fixed_ips: []
- network: net_mgmt
- port_security_enabled: false
- type: OS::Neutron::Port
diff --git a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/hot_placement_policy_affinity.yaml b/tacker/tests/unit/vnfm/infra_drivers/openstack/data/hot_placement_policy_affinity.yaml
deleted file mode 100644
index a8d71febe..000000000
--- a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/hot_placement_policy_affinity.yaml
+++ /dev/null
@@ -1,49 +0,0 @@
-heat_template_version: 2013-05-23
-description: 'Demo example
-
-'
-
-outputs:
- mgmt_ip-VDU1:
- value:
- get_attr: [CP1, fixed_ips, 0, ip_address]
- mgmt_ip-VDU2:
- value:
- get_attr: [CP2, fixed_ips, 0, ip_address]
-parameters: {}
-resources:
- VDU1:
- properties:
- availability_zone: nova
- config_drive: false
- flavor: m1.tiny
- image: cirros-0.5.2-x86_64-disk
- networks:
- - port: {get_resource: CP1}
- scheduler_hints:
- group: {get_resource: my_compute_placement_policy}
- user_data_format: SOFTWARE_CONFIG
- type: OS::Nova::Server
- VDU2:
- properties:
- availability_zone: nova
- config_drive: false
- flavor: m1.tiny
- image: cirros-0.5.2-x86_64-disk
- networks:
- - port: {get_resource: CP2}
- scheduler_hints:
- group: {get_resource: my_compute_placement_policy}
- user_data_format: SOFTWARE_CONFIG
- type: OS::Nova::Server
- CP1:
- properties: {network: net_mgmt, port_security_enabled: false}
- type: OS::Neutron::Port
- CP2:
- properties: {network: net0, port_security_enabled: false}
- type: OS::Neutron::Port
- my_compute_placement_policy:
- type: OS::Nova::ServerGroup
- properties:
- name: my_compute_placement_policy
- policies: [affinity]
diff --git a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/hot_placement_policy_anti_affinity.yaml b/tacker/tests/unit/vnfm/infra_drivers/openstack/data/hot_placement_policy_anti_affinity.yaml
deleted file mode 100644
index 36431355c..000000000
--- a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/hot_placement_policy_anti_affinity.yaml
+++ /dev/null
@@ -1,49 +0,0 @@
-heat_template_version: 2013-05-23
-description: 'Demo example
-
-'
-
-outputs:
- mgmt_ip-VDU1:
- value:
- get_attr: [CP1, fixed_ips, 0, ip_address]
- mgmt_ip-VDU2:
- value:
- get_attr: [CP2, fixed_ips, 0, ip_address]
-parameters: {}
-resources:
- VDU1:
- properties:
- availability_zone: nova
- config_drive: false
- flavor: m1.tiny
- image: cirros-0.5.2-x86_64-disk
- networks:
- - port: {get_resource: CP1}
- scheduler_hints:
- group: {get_resource: my_compute_placement_policy}
- user_data_format: SOFTWARE_CONFIG
- type: OS::Nova::Server
- VDU2:
- properties:
- availability_zone: nova
- config_drive: false
- flavor: m1.tiny
- image: cirros-0.5.2-x86_64-disk
- networks:
- - port: {get_resource: CP2}
- scheduler_hints:
- group: {get_resource: my_compute_placement_policy}
- user_data_format: SOFTWARE_CONFIG
- type: OS::Nova::Server
- CP1:
- properties: {network: net_mgmt, port_security_enabled: false}
- type: OS::Neutron::Port
- CP2:
- properties: {network: net0, port_security_enabled: false}
- type: OS::Neutron::Port
- my_compute_placement_policy:
- type: OS::Nova::ServerGroup
- properties:
- name: my_compute_placement_policy
- policies: [anti-affinity]
diff --git a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/hot_placement_policy_default_affinity.yaml b/tacker/tests/unit/vnfm/infra_drivers/openstack/data/hot_placement_policy_default_affinity.yaml
deleted file mode 100644
index a8d71febe..000000000
--- a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/hot_placement_policy_default_affinity.yaml
+++ /dev/null
@@ -1,49 +0,0 @@
-heat_template_version: 2013-05-23
-description: 'Demo example
-
-'
-
-outputs:
- mgmt_ip-VDU1:
- value:
- get_attr: [CP1, fixed_ips, 0, ip_address]
- mgmt_ip-VDU2:
- value:
- get_attr: [CP2, fixed_ips, 0, ip_address]
-parameters: {}
-resources:
- VDU1:
- properties:
- availability_zone: nova
- config_drive: false
- flavor: m1.tiny
- image: cirros-0.5.2-x86_64-disk
- networks:
- - port: {get_resource: CP1}
- scheduler_hints:
- group: {get_resource: my_compute_placement_policy}
- user_data_format: SOFTWARE_CONFIG
- type: OS::Nova::Server
- VDU2:
- properties:
- availability_zone: nova
- config_drive: false
- flavor: m1.tiny
- image: cirros-0.5.2-x86_64-disk
- networks:
- - port: {get_resource: CP2}
- scheduler_hints:
- group: {get_resource: my_compute_placement_policy}
- user_data_format: SOFTWARE_CONFIG
- type: OS::Nova::Server
- CP1:
- properties: {network: net_mgmt, port_security_enabled: false}
- type: OS::Neutron::Port
- CP2:
- properties: {network: net0, port_security_enabled: false}
- type: OS::Neutron::Port
- my_compute_placement_policy:
- type: OS::Nova::ServerGroup
- properties:
- name: my_compute_placement_policy
- policies: [affinity]
diff --git a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/hot_placement_policy_soft_anti_affinity.yaml b/tacker/tests/unit/vnfm/infra_drivers/openstack/data/hot_placement_policy_soft_anti_affinity.yaml
deleted file mode 100644
index 310b4c782..000000000
--- a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/hot_placement_policy_soft_anti_affinity.yaml
+++ /dev/null
@@ -1,49 +0,0 @@
-heat_template_version: 2013-05-23
-description: 'Demo example
-
-'
-
-outputs:
- mgmt_ip-VDU1:
- value:
- get_attr: [CP1, fixed_ips, 0, ip_address]
- mgmt_ip-VDU2:
- value:
- get_attr: [CP2, fixed_ips, 0, ip_address]
-parameters: {}
-resources:
- VDU1:
- properties:
- availability_zone: nova
- config_drive: false
- flavor: m1.tiny
- image: cirros-0.5.2-x86_64-disk
- networks:
- - port: {get_resource: CP1}
- scheduler_hints:
- group: {get_resource: my_compute_placement_policy}
- user_data_format: SOFTWARE_CONFIG
- type: OS::Nova::Server
- VDU2:
- properties:
- availability_zone: nova
- config_drive: false
- flavor: m1.tiny
- image: cirros-0.5.2-x86_64-disk
- networks:
- - port: {get_resource: CP2}
- scheduler_hints:
- group: {get_resource: my_compute_placement_policy}
- user_data_format: SOFTWARE_CONFIG
- type: OS::Nova::Server
- CP1:
- properties: {network: net_mgmt, port_security_enabled: false}
- type: OS::Neutron::Port
- CP2:
- properties: {network: net0, port_security_enabled: false}
- type: OS::Neutron::Port
- my_compute_placement_policy:
- type: OS::Nova::ServerGroup
- properties:
- name: my_compute_placement_policy
- policies: [soft-anti-affinity]
diff --git a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/hot_scale_custom.yaml b/tacker/tests/unit/vnfm/infra_drivers/openstack/data/hot_scale_custom.yaml
deleted file mode 100644
index 31636cb95..000000000
--- a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/hot_scale_custom.yaml
+++ /dev/null
@@ -1,25 +0,0 @@
-
-outputs:
- mgmt_ip-VDU1:
- value:
- get_attr: [CP1, fixed_ips, 0, ip_address]
-resources:
- VDU1:
- type: OS::Nova::Server
- properties:
- availability_zone: nova
- user_data_format: SOFTWARE_CONFIG
- config_drive: false
- networks:
- - port: { get_resource: CP1 }
- image: cirros-0.5.2-x86_64-disk
- flavor: m1.tiny
- VL1:
- type: OS::Neutron::Net
- CP1:
- type: OS::Neutron::Port
- properties:
- network: net_mgmt
- port_security_enabled: false
-heat_template_version: 2013-05-23
-description: Scaling template
diff --git a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/hot_scale_main.yaml b/tacker/tests/unit/vnfm/infra_drivers/openstack/data/hot_scale_main.yaml
deleted file mode 100644
index 7fb38582f..000000000
--- a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/hot_scale_main.yaml
+++ /dev/null
@@ -1,30 +0,0 @@
-heat_template_version: 2013-05-23
-description: 'sample-tosca-vnfd-scaling
-
-'
-
-parameters: {}
-outputs: {}
-resources:
- SP1_group:
- properties:
- desired_capacity: 2
- max_size: 3
- min_size: 1
- cooldown: 60
- resource: {type: SP1_res.yaml}
- type: OS::Heat::AutoScalingGroup
- SP1_scale_in:
- properties:
- adjustment_type: change_in_capacity
- auto_scaling_group_id: {get_resource: SP1_group}
- cooldown: 60
- scaling_adjustment: -1
- type: OS::Heat::ScalingPolicy
- SP1_scale_out:
- properties:
- adjustment_type: change_in_capacity
- auto_scaling_group_id: {get_resource: SP1_group}
- cooldown: 60
- scaling_adjustment: 1
- type: OS::Heat::ScalingPolicy
diff --git a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/hot_tosca_alarm_metadata.yaml b/tacker/tests/unit/vnfm/infra_drivers/openstack/data/hot_tosca_alarm_metadata.yaml
deleted file mode 100644
index d069f4db9..000000000
--- a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/hot_tosca_alarm_metadata.yaml
+++ /dev/null
@@ -1,46 +0,0 @@
-heat_template_version: 2013-05-23
-description: 'An exception will be raised when having the mismatched metadata
-(metadata is described in monitoring policy but unavailable in VDU properties).
-'
-
-outputs:
- mgmt_ip-VDU1:
- value:
- get_attr: [CP1, fixed_ips, 0, ip_address]
-parameters: {}
-resources:
- VDU1:
- properties:
- availability_zone: nova
- config_drive: false
- flavor: {get_resource: VDU1_flavor}
- image: cirros-0.5.2-x86_64-disk
- networks:
- - port: {get_resource: CP1}
- user_data_format: SOFTWARE_CONFIG
- type: OS::Nova::Server
- CP1:
- properties: {network: net_mgmt, port_security_enabled: false}
- type: OS::Neutron::Port
- VDU1_flavor:
- type: OS::Nova::Flavor
- properties:
- disk: 1
- ram: 512
- vcpus: 2
- vdu_hcpu_usage_respawning:
- type: OS::Aodh::GnocchiAggregationByResourcesAlarm
- properties:
- description: utilization greater_than 50%
- metric: cpu_util
- threshold: 50
- granularity: 60
- aggregation_method: mean
- resource_type: instance
- evaluation_periods: 1
- comparison_operator: gt
- query:
- str_replace:
- template: '{"=": {"server_group": "scaling_group_id"}}'
- params:
- scaling_group_id: VDU1-2e3261d9-1
diff --git a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/hot_tosca_alarm_respawn.yaml b/tacker/tests/unit/vnfm/infra_drivers/openstack/data/hot_tosca_alarm_respawn.yaml
deleted file mode 100644
index 241341c8a..000000000
--- a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/hot_tosca_alarm_respawn.yaml
+++ /dev/null
@@ -1,47 +0,0 @@
-heat_template_version: 2013-05-23
-description: 'Demo example
-
-'
-
-outputs:
- mgmt_ip-VDU1:
- value:
- get_attr: [CP1, fixed_ips, 0, ip_address]
-parameters: {}
-resources:
- VDU1:
- properties:
- availability_zone: nova
- config_drive: false
- flavor: {get_resource: VDU1_flavor}
- image: cirros-0.5.2-x86_64-disk
- networks:
- - port: {get_resource: CP1}
- user_data_format: SOFTWARE_CONFIG
- metadata: {'metering.server_group': VDU1-2e3261d9-1}
- type: OS::Nova::Server
- CP1:
- properties: {network: net_mgmt, port_security_enabled: false}
- type: OS::Neutron::Port
- VDU1_flavor:
- type: OS::Nova::Flavor
- properties:
- disk: 1
- ram: 512
- vcpus: 2
- vdu_hcpu_usage_respawning:
- type: OS::Aodh::GnocchiAggregationByResourcesAlarm
- properties:
- description: utilization greater_than 50%
- metric: cpu_util
- threshold: 50
- granularity: 60
- aggregation_method: mean
- resource_type: instance
- evaluation_periods: 1
- comparison_operator: gt
- query:
- str_replace:
- template: '{"=": {"server_group": "scaling_group_id"}}'
- params:
- scaling_group_id: VDU1-2e3261d9-1
diff --git a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/hot_tosca_alarm_scale.yaml b/tacker/tests/unit/vnfm/infra_drivers/openstack/data/hot_tosca_alarm_scale.yaml
deleted file mode 100644
index a473a5d9a..000000000
--- a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/hot_tosca_alarm_scale.yaml
+++ /dev/null
@@ -1,63 +0,0 @@
-heat_template_version: 2013-05-23
-description: 'sample-tosca-vnfd-scaling
-
-'
-
-parameters: {}
-outputs: {}
-resources:
- SP1_group:
- properties:
- cooldown: 60
- desired_capacity: 2
- max_size: 3
- min_size: 1
- resource: {type: SP1_res.yaml}
- type: OS::Heat::AutoScalingGroup
- SP1_scale_in:
- properties:
- adjustment_type: change_in_capacity
- auto_scaling_group_id: {get_resource: SP1_group}
- cooldown: 60
- scaling_adjustment: -1
- type: OS::Heat::ScalingPolicy
- SP1_scale_out:
- properties:
- adjustment_type: change_in_capacity
- auto_scaling_group_id: {get_resource: SP1_group}
- cooldown: 60
- scaling_adjustment: 1
- type: OS::Heat::ScalingPolicy
-
- vdu_hcpu_usage_scaling_out:
- type: OS::Aodh::GnocchiAggregationByResourcesAlarm
- properties:
- description: utilization greater_than 50%
- metric: cpu_util
- aggregation_method: mean
- granularity: 600
- evaluation_periods: 1
- threshold: 50
- resource_type: instance
- query:
- str_replace:
- template: '{"=": {"server_group": "scaling_group_id"}}'
- params:
- scaling_group_id: SG1-2e3261d9-14
- comparison_operator: gt
- vdu_lcpu_usage_scaling_in:
- type: OS::Aodh::GnocchiAggregationByResourcesAlarm
- properties:
- description: utilization less_than 10%
- metric: cpu_util
- aggregation_method: mean
- granularity: 600
- evaluation_periods: 1
- threshold: 10
- resource_type: instance
- query:
- str_replace:
- template: '{"=": {"server_group": "scaling_group_id"}}'
- params:
- scaling_group_id: SG1-2e3261d9-14
- comparison_operator: lt
diff --git a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/hot_tosca_allowed_address_pairs.yaml b/tacker/tests/unit/vnfm/infra_drivers/openstack/data/hot_tosca_allowed_address_pairs.yaml
deleted file mode 100644
index 90869332e..000000000
--- a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/hot_tosca_allowed_address_pairs.yaml
+++ /dev/null
@@ -1,88 +0,0 @@
-heat_template_version: 2013-05-23
-
-description: >
- VIP Template
-
-outputs:
- mgmt_ip-VDU1:
- value:
- get_attr: [CP2, fixed_ips, 0, ip_address]
-
-parameters: {}
-resources:
- VDU1:
- type: OS::Nova::Server
- properties:
- config_drive: false
- flavor: m1.nano
- image: {get_resource: VDU1_image}
- networks:
- - port:
- get_resource: CP1
- - port:
- get_resource: CP2
- user_data_format: SOFTWARE_CONFIG
- CP1:
- type: OS::Neutron::Port
- properties:
- fixed_ips:
- - ip_address: 10.10.1.11
- allowed_address_pairs:
- - ip_address: 10.10.1.12
- network: existing_network_1
- port_security_enabled: true
- security_groups:
- - default
-
- VCP1:
- type: OS::Neutron::Port
- properties:
- fixed_ips:
- - ip_address: 10.10.1.12
- network: existing_network_1
- port_security_enabled: true
- security_groups:
- - default
-
- CP2:
- type: OS::Neutron::Port
- properties:
- fixed_ips:
- - ip_address: 10.10.2.21
- allowed_address_pairs:
- - ip_address: 10.10.2.22
- - ip_address: 10.10.2.23
- mac_address: fe:1a:29:d9:36:45
- mac_address: fe:1a:29:d9:36:45
- network: existing_network_2
- port_security_enabled: true
- security_groups:
- - default
-
- VCP2:
- type: OS::Neutron::Port
- properties:
- fixed_ips:
- - ip_address: 10.10.2.22
- network: existing_network_2
- port_security_enabled: true
- security_groups:
- - default
-
- VCP3:
- type: OS::Neutron::Port
- properties:
- fixed_ips:
- - ip_address: 10.10.2.23
- network: existing_network_2
- port_security_enabled: true
- security_groups:
- - default
-
- VDU1_image:
- type: OS::Glance::WebImage
- properties:
- container_format: bare
- disk_format: raw
- location: http://URL/vRouterVNF.qcow2
- name: vm_image
diff --git a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/hot_tosca_artifacts_image_vnfd_params.yaml b/tacker/tests/unit/vnfm/infra_drivers/openstack/data/hot_tosca_artifacts_image_vnfd_params.yaml
deleted file mode 100644
index 2fd4faab4..000000000
--- a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/hot_tosca_artifacts_image_vnfd_params.yaml
+++ /dev/null
@@ -1,46 +0,0 @@
-description: >
- Generic VDU with parameterized image and flavor
-
-heat_template_version: 2013-05-23
-outputs:
- mgmt_ip-VDU1:
- value: {get_attr: [CP1, fixed_ips, 0, ip_address]}
-parameters:
- image_source:
- default: http://download.cirros-cloud.net/0.5.2/cirros-0.5.2-x86_64-disk.img
- description: Image source for the server
- type: string
-resources:
- CP1:
- properties:
- network: net_mgmt
- port_security_enabled: False
- type: OS::Neutron::Port
- CP2:
- properties:
- network: pkt_in
- port_security_enabled: False
- type: OS::Neutron::Port
- CP3:
- properties:
- network: pkt_out
- port_security_enabled: False
- type: OS::Neutron::Port
- VDU1:
- properties:
- config_drive: False
- flavor: m1.tiny
- image: {get_resource: VDU1_image}
- networks:
- - port: { get_resource: CP1 }
- - port: { get_resource: CP2 }
- - port: { get_resource: CP3 }
- user_data_format: SOFTWARE_CONFIG
- type: OS::Nova::Server
- VDU1_image:
- properties:
- container_format: bare
- disk_format: raw
- location: {get_param: image_source}
- name: VNFImage
- type: OS::Glance::WebImage
diff --git a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/hot_tosca_flavor_all_numa_count.yaml b/tacker/tests/unit/vnfm/infra_drivers/openstack/data/hot_tosca_flavor_all_numa_count.yaml
deleted file mode 100644
index 100f340e6..000000000
--- a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/hot_tosca_flavor_all_numa_count.yaml
+++ /dev/null
@@ -1,22 +0,0 @@
-heat_template_version: 2013-05-23
-
-description: >
- OpenWRT with services
-
-parameters: {}
-resources:
- VDU1:
- type: OS::Nova::Server
- properties:
- config_drive: false
- flavor: {get_resource: VDU1_flavor}
- image: OpenWRT
- user_data_format: SOFTWARE_CONFIG
- VDU1_flavor:
- type: OS::Nova::Flavor
- properties:
- disk: 10
- ram: 4096
- vcpus: 8
- extra_specs: {'hw:cpu_policy': 'dedicated', 'hw:mem_page_size': 'any', 'hw:cpu_sockets': 2, 'hw:cpu_threads': 2, 'hw:numa_nodes': 2, 'hw:cpu_cores': 2, 'hw:cpu_threads_policy': 'avoid'}
-outputs: {}
diff --git a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/hot_tosca_flavor_all_numa_nodes.yaml b/tacker/tests/unit/vnfm/infra_drivers/openstack/data/hot_tosca_flavor_all_numa_nodes.yaml
deleted file mode 100644
index b9216222e..000000000
--- a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/hot_tosca_flavor_all_numa_nodes.yaml
+++ /dev/null
@@ -1,22 +0,0 @@
-heat_template_version: 2013-05-23
-
-description: >
- OpenWRT with services
-
-parameters: {}
-resources:
- VDU1:
- type: OS::Nova::Server
- properties:
- config_drive: false
- flavor: {get_resource: VDU1_flavor}
- image: OpenWRT
- user_data_format: SOFTWARE_CONFIG
- VDU1_flavor:
- type: OS::Nova::Flavor
- properties:
- disk: 40
- ram: 4096
- vcpus: 6
- extra_specs: {'hw:cpu_policy': 'dedicated', 'hw:mem_page_size': 'any', 'hw:cpu_sockets': 2, 'hw:cpu_threads': 2, 'hw:numa_mem.1': 3072, 'hw:numa_mem.0': 1024, 'hw:numa_cpus.0': '0,1', 'hw:numa_cpus.1': '2,3,4,5', 'hw:cpu_cores': 2, 'hw:cpu_threads_policy': 'avoid', 'hw:numa_nodes': 2}
-outputs: {}
diff --git a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/hot_tosca_flavor_cpu_allocations.yaml b/tacker/tests/unit/vnfm/infra_drivers/openstack/data/hot_tosca_flavor_cpu_allocations.yaml
deleted file mode 100644
index 4d593b5aa..000000000
--- a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/hot_tosca_flavor_cpu_allocations.yaml
+++ /dev/null
@@ -1,22 +0,0 @@
-heat_template_version: 2013-05-23
-
-description: >
- OpenWRT with services
-
-parameters: {}
-resources:
- VDU1:
- type: OS::Nova::Server
- properties:
- config_drive: false
- flavor: {get_resource: VDU1_flavor}
- image: OpenWRT
- user_data_format: SOFTWARE_CONFIG
- VDU1_flavor:
- type: OS::Nova::Flavor
- properties:
- disk: 40
- ram: 4096
- vcpus: 6
- extra_specs: {'hw:cpu_policy': 'dedicated', 'hw:cpu_sockets': 2, 'hw:cpu_threads': 2, 'hw:cpu_cores': 2, 'hw:cpu_threads_policy': 'avoid'}
-outputs: {}
diff --git a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/hot_tosca_flavor_huge_pages.yaml b/tacker/tests/unit/vnfm/infra_drivers/openstack/data/hot_tosca_flavor_huge_pages.yaml
deleted file mode 100644
index e22afe42a..000000000
--- a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/hot_tosca_flavor_huge_pages.yaml
+++ /dev/null
@@ -1,22 +0,0 @@
-heat_template_version: 2013-05-23
-
-description: >
- OpenWRT with services
-
-parameters: {}
-resources:
- VDU1:
- type: OS::Nova::Server
- properties:
- config_drive: false
- flavor: {get_resource: VDU1_flavor}
- image: OpenWRT
- user_data_format: SOFTWARE_CONFIG
- VDU1_flavor:
- type: OS::Nova::Flavor
- properties:
- disk: 40
- ram: 4096
- vcpus: 6
- extra_specs: {'hw:mem_page_size': 'any'}
-outputs: {}
diff --git a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/hot_tosca_flavor_numa_nodes.yaml b/tacker/tests/unit/vnfm/infra_drivers/openstack/data/hot_tosca_flavor_numa_nodes.yaml
deleted file mode 100644
index a89ed8c86..000000000
--- a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/hot_tosca_flavor_numa_nodes.yaml
+++ /dev/null
@@ -1,22 +0,0 @@
-heat_template_version: 2013-05-23
-
-description: >
- OpenWRT with services
-
-parameters: {}
-resources:
- VDU1:
- type: OS::Nova::Server
- properties:
- config_drive: false
- flavor: {get_resource: VDU1_flavor}
- image: OpenWRT
- user_data_format: SOFTWARE_CONFIG
- VDU1_flavor:
- type: OS::Nova::Flavor
- properties:
- disk: 40
- ram: 4096
- vcpus: 6
- extra_specs: { 'hw:numa_mem.1': 3072, 'hw:numa_mem.0': 1024, 'hw:numa_cpus.0': '0,1', 'hw:numa_cpus.1': '2,3,4,5', 'hw:numa_nodes': 2}
-outputs: {}
diff --git a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/hot_tosca_flavor_numa_nodes_count.yaml b/tacker/tests/unit/vnfm/infra_drivers/openstack/data/hot_tosca_flavor_numa_nodes_count.yaml
deleted file mode 100644
index e2e657ef6..000000000
--- a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/hot_tosca_flavor_numa_nodes_count.yaml
+++ /dev/null
@@ -1,22 +0,0 @@
-heat_template_version: 2013-05-23
-
-description: >
- OpenWRT with services
-
-parameters: {}
-resources:
- VDU1:
- type: OS::Nova::Server
- properties:
- config_drive: false
- flavor: {get_resource: VDU1_flavor}
- image: OpenWRT
- user_data_format: SOFTWARE_CONFIG
- VDU1_flavor:
- type: OS::Nova::Flavor
- properties:
- disk: 40
- ram: 4096
- vcpus: 6
- extra_specs: {'hw:numa_nodes': 2 }
-outputs: {}
diff --git a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/hot_tosca_generic_vnfd_params.yaml b/tacker/tests/unit/vnfm/infra_drivers/openstack/data/hot_tosca_generic_vnfd_params.yaml
deleted file mode 100644
index 317ddb701..000000000
--- a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/hot_tosca_generic_vnfd_params.yaml
+++ /dev/null
@@ -1,45 +0,0 @@
-description: >
- Generic VDU with parameterized image and flavor
-
-heat_template_version: 2013-05-23
-outputs:
- mgmt_ip-VDU1:
- value: {get_attr: [CP1, fixed_ips, 0, ip_address]}
-parameters:
- flavor:
- constraints:
- - allowed_values: [m1.tiny, m1.small, m1.medium, m1.large, m1.large]
- default: m1.large
- description: Flavor name for the server
- type: string
- image:
- default: cirros
- description: Image name for the server
- type: string
-resources:
- CP1:
- properties:
- network: net_mgmt
- port_security_enabled: False
- type: OS::Neutron::Port
- CP2:
- properties:
- network: pkt_in
- port_security_enabled: False
- type: OS::Neutron::Port
- CP3:
- properties:
- network: pkt_out
- port_security_enabled: False
- type: OS::Neutron::Port
- VDU1:
- properties:
- config_drive: False
- flavor: { get_param: flavor }
- image: { get_param: image }
- networks:
- - port: { get_resource: CP1 }
- - port: { get_resource: CP2 }
- - port: { get_resource: CP3 }
- user_data_format: SOFTWARE_CONFIG
- type: OS::Nova::Server
diff --git a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/hot_tosca_image.yaml b/tacker/tests/unit/vnfm/infra_drivers/openstack/data/hot_tosca_image.yaml
deleted file mode 100644
index 7af2aebbe..000000000
--- a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/hot_tosca_image.yaml
+++ /dev/null
@@ -1,34 +0,0 @@
-heat_template_version: 2013-05-23
-
-description: >
- OpenWRT with services
-
-outputs:
- mgmt_ip-VDU1:
- value:
- get_attr: [CP1, fixed_ips, 0, ip_address]
-
-parameters: {}
-resources:
- VDU1:
- type: OS::Nova::Server
- properties:
- config_drive: false
- flavor: m1.nano
- image: {get_resource: VDU1_image}
- networks:
- - port:
- get_resource: CP1
- user_data_format: SOFTWARE_CONFIG
- CP1:
- type: OS::Neutron::Port
- properties:
- network: existing_network_1
-
- VDU1_image:
- type: OS::Glance::WebImage
- properties:
- container_format: bare
- disk_format: raw
- location: http://URL/vRouterVNF.qcow2
- name: vm_image
diff --git a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/hot_tosca_mac_ip.yaml b/tacker/tests/unit/vnfm/infra_drivers/openstack/data/hot_tosca_mac_ip.yaml
deleted file mode 100644
index 944ccc3b7..000000000
--- a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/hot_tosca_mac_ip.yaml
+++ /dev/null
@@ -1,37 +0,0 @@
-heat_template_version: 2013-05-23
-
-description: >
- SecurityGroup Template
-
-outputs:
- mgmt_ip-VDU1:
- value:
- get_attr: [CP1, fixed_ips, 0, ip_address]
-
-parameters: {}
-resources:
- VDU1:
- type: OS::Nova::Server
- properties:
- config_drive: false
- flavor: m1.nano
- image: {get_resource: VDU1_image}
- networks:
- - port:
- get_resource: CP1
- user_data_format: SOFTWARE_CONFIG
- CP1:
- type: OS::Neutron::Port
- properties:
- network: existing_network_1
- port_security_enabled: true
- mac_address: fe:1a:29:d9:36:43
- fixed_ips:
- - ip_address: 10.10.1.12
- VDU1_image:
- type: OS::Glance::WebImage
- properties:
- container_format: bare
- disk_format: raw
- location: http://URL/vRouterVNF.qcow2
- name: vm_image
diff --git a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/hot_tosca_mgmt_sriov.yaml b/tacker/tests/unit/vnfm/infra_drivers/openstack/data/hot_tosca_mgmt_sriov.yaml
deleted file mode 100644
index 3451f31ef..000000000
--- a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/hot_tosca_mgmt_sriov.yaml
+++ /dev/null
@@ -1,28 +0,0 @@
-heat_template_version: 2013-05-23
-
-description: >
- SRIOV and management port example
-
-outputs:
- mgmt_ip-VDU1:
- value:
- get_attr: [CP1, fixed_ips, 0, ip_address]
-parameters: {}
-resources:
- CP1:
- properties: {'binding:vnic_type': direct, network: net-mgmt}
- type: OS::Neutron::Port
- CP2:
- properties: {network: net0}
- type: OS::Neutron::Port
- VDU1:
- properties:
- availability_zone: nova
- config_drive: false
- flavor: numa-sriov
- image: OpenWRT
- user_data_format: SOFTWARE_CONFIG
- networks:
- - port: {get_resource: CP1}
- - port: {get_resource: CP2}
- type: OS::Nova::Server
diff --git a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/hot_tosca_monitoring_multi_vdu.yaml b/tacker/tests/unit/vnfm/infra_drivers/openstack/data/hot_tosca_monitoring_multi_vdu.yaml
deleted file mode 100644
index 351b7431a..000000000
--- a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/hot_tosca_monitoring_multi_vdu.yaml
+++ /dev/null
@@ -1,43 +0,0 @@
-heat_template_version: 2013-05-23
-description: 'Monitoring for multiple VDUs
-
-'
-outputs:
- mgmt_ip-VDU1:
- value:
- get_attr: [CP1, fixed_ips, 0, ip_address]
- mgmt_ip-VDU2:
- value:
- get_attr: [CP2, fixed_ips, 0, ip_address]
-
-parameters: {}
-resources:
- VDU1:
- properties:
- availability_zone: nova
- config_drive: false
- flavor: m1.tiny
- image: cirros-0.5.2-x86_64-disk
- networks:
- - port: {get_resource: CP1}
- user_data_format: SOFTWARE_CONFIG
- type: OS::Nova::Server
-
- CP1:
- properties: {network: net_mgmt, port_security_enabled: false}
- type: OS::Neutron::Port
-
- VDU2:
- properties:
- availability_zone: nova
- config_drive: false
- flavor: m1.tiny
- image: cirros-0.5.2-x86_64-disk
- networks:
- - port: {get_resource: CP2}
- user_data_format: SOFTWARE_CONFIG
- type: OS::Nova::Server
-
- CP2:
- properties: {network: net_mgmt, port_security_enabled: false}
- type: OS::Neutron::Port
diff --git a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/hot_tosca_openwrt.yaml b/tacker/tests/unit/vnfm/infra_drivers/openstack/data/hot_tosca_openwrt.yaml
deleted file mode 100644
index 75dbdb037..000000000
--- a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/hot_tosca_openwrt.yaml
+++ /dev/null
@@ -1,25 +0,0 @@
-heat_template_version: 2013-05-23
-description: 'OpenWRT with services
-
- '
-parameters: {}
-resources:
- VDU1:
- type: OS::Nova::Server
- properties:
- config_drive: false
- flavor: m1.tiny
- image: OpenWRT
- networks:
- - port:
- get_resource: CP1
- user_data_format: SOFTWARE_CONFIG
- CP1:
- type: OS::Neutron::Port
- properties:
- network: existing_network_1
- port_security_enabled: false
-outputs:
- mgmt_ip-VDU1:
- value:
- get_attr: [CP1, fixed_ips, 0, ip_address]
diff --git a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/hot_tosca_openwrt_kilo.yaml b/tacker/tests/unit/vnfm/infra_drivers/openstack/data/hot_tosca_openwrt_kilo.yaml
deleted file mode 100644
index 7d42cfa12..000000000
--- a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/hot_tosca_openwrt_kilo.yaml
+++ /dev/null
@@ -1,25 +0,0 @@
-heat_template_version: 2013-05-23
-description: 'OpenWRT with services
-
- '
-parameters: {}
-resources:
- VDU1:
- type: OS::Nova::Server
- properties:
- config_drive: false
- flavor: m1.tiny
- image: OpenWRT
- networks:
- - port:
- get_resource: CP1
- user_data_format: SOFTWARE_CONFIG
- CP1:
- type: OS::Neutron::Port
- properties:
- network: existing_network_1
- value_specs: {port_security_enabled: false}
-outputs:
- mgmt_ip-VDU1:
- value:
- get_attr: [CP1, fixed_ips, 0, ip_address]
diff --git a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/hot_tosca_openwrt_userdata.yaml b/tacker/tests/unit/vnfm/infra_drivers/openstack/data/hot_tosca_openwrt_userdata.yaml
deleted file mode 100644
index b10c0d97d..000000000
--- a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/hot_tosca_openwrt_userdata.yaml
+++ /dev/null
@@ -1,29 +0,0 @@
-heat_template_version: 2013-05-23
-description: 'OpenWRT with services
-
- '
-parameters: {}
-resources:
- VDU1:
- type: OS::Nova::Server
- properties:
- config_drive: false
- flavor: m1.tiny
- image: OpenWRT
- networks:
- - port:
- get_resource: CP1
- user_data_format: RAW
- user_data: |
- #!/bin/sh
- echo "my hostname is `hostname`" > /tmp/hostname
- df -h > /home/openwrt/diskinfo
- CP1:
- type: OS::Neutron::Port
- properties:
- network: existing_network_1
- port_security_enabled: false
-outputs:
- mgmt_ip-VDU1:
- value:
- get_attr: [CP1, fixed_ips, 0, ip_address]
diff --git a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/hot_tosca_security_groups.yaml b/tacker/tests/unit/vnfm/infra_drivers/openstack/data/hot_tosca_security_groups.yaml
deleted file mode 100644
index f6baa1318..000000000
--- a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/hot_tosca_security_groups.yaml
+++ /dev/null
@@ -1,38 +0,0 @@
-heat_template_version: 2013-05-23
-
-description: >
- SecurityGroup Template
-
-outputs:
- mgmt_ip-VDU1:
- value:
- get_attr: [CP1, fixed_ips, 0, ip_address]
-
-parameters: {}
-resources:
- VDU1:
- type: OS::Nova::Server
- properties:
- config_drive: false
- flavor: m1.nano
- image: {get_resource: VDU1_image}
- networks:
- - port:
- get_resource: CP1
- user_data_format: SOFTWARE_CONFIG
- CP1:
- type: OS::Neutron::Port
- properties:
- network: existing_network_1
- port_security_enabled: true
- security_groups:
- - default
- - test_secgrp
-
- VDU1_image:
- type: OS::Glance::WebImage
- properties:
- container_format: bare
- disk_format: raw
- location: http://URL/vRouterVNF.qcow2
- name: vm_image
diff --git a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/hot_tosca_sriov.yaml b/tacker/tests/unit/vnfm/infra_drivers/openstack/data/hot_tosca_sriov.yaml
deleted file mode 100644
index d10c8fcad..000000000
--- a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/hot_tosca_sriov.yaml
+++ /dev/null
@@ -1,28 +0,0 @@
-heat_template_version: 2013-05-23
-
-description: >
- SRIOV example
-
-outputs:
- mgmt_ip-VDU1:
- value:
- get_attr: [CP1, fixed_ips, 0, ip_address]
-parameters: {}
-resources:
- CP1:
- properties: {name: sriov, network: net-mgmt}
- type: OS::Neutron::Port
- CP2:
- properties: {'binding:vnic_type': direct, network: sr3010}
- type: OS::Neutron::Port
- VDU1:
- properties:
- availability_zone: nova
- config_drive: false
- flavor: numa-sriov
- image: OpenWRT
- user_data_format: SOFTWARE_CONFIG
- networks:
- - port: {get_resource: CP1}
- - port: {get_resource: CP2}
- type: OS::Nova::Server
diff --git a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/hot_tosca_vnfc.yaml b/tacker/tests/unit/vnfm/infra_drivers/openstack/data/hot_tosca_vnfc.yaml
deleted file mode 100644
index 027396a88..000000000
--- a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/hot_tosca_vnfc.yaml
+++ /dev/null
@@ -1,36 +0,0 @@
-heat_template_version: 2013-05-23
-parameters: {}
-resources:
- VDU1:
- type: OS::Nova::Server
- properties:
- config_drive: false
- flavor: m1.small
- image: Fedora
- networks:
- - port:
- get_resource: CP1
- user_data_format: SOFTWARE_CONFIG
- CP1:
- type: OS::Neutron::Port
- properties:
- network: existing_network_1
- port_security_enabled: false
- firewall_vnfc_create_config:
- type: OS::Heat::SoftwareConfig
- properties:
- config: 'echo "Test case for Tacker";'
- group: script
- firewall_vnfc_create_deploy:
- type: OS::Heat::SoftwareDeployment
- properties:
- config: {get_resource: firewall_vnfc_create_config}
- server: {get_resource: VDU1}
- depends_on:
- - VDU1
-
-outputs:
- mgmt_ip-VDU1:
- value:
- get_attr: [CP1, fixed_ips, 0, ip_address]
-
diff --git a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/hot_tosca_vnic_normal.yaml b/tacker/tests/unit/vnfm/infra_drivers/openstack/data/hot_tosca_vnic_normal.yaml
deleted file mode 100644
index cf9eb92f7..000000000
--- a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/hot_tosca_vnic_normal.yaml
+++ /dev/null
@@ -1,28 +0,0 @@
-heat_template_version: 2013-05-23
-
-description: >
- VNIC Normal Port example
-
-outputs:
- mgmt_ip-VDU1:
- value:
- get_attr: [CP1, fixed_ips, 0, ip_address]
-parameters: {}
-resources:
- CP1:
- properties: {network: net-mgmt}
- type: OS::Neutron::Port
- CP2:
- properties: {'binding:vnic_type': normal, network: net0}
- type: OS::Neutron::Port
- VDU1:
- properties:
- availability_zone: nova
- config_drive: false
- flavor: m1.small
- image: OpenWRT
- user_data_format: SOFTWARE_CONFIG
- networks:
- - port: {get_resource: CP1}
- - port: {get_resource: CP2}
- type: OS::Nova::Server
diff --git a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/param_data.yaml b/tacker/tests/unit/vnfm/infra_drivers/openstack/data/param_data.yaml
deleted file mode 100644
index d6980cba9..000000000
--- a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/param_data.yaml
+++ /dev/null
@@ -1,2 +0,0 @@
-flavor: m1.tiny
-reservation_id: 891cd152-3925-4c9e-9074-239a902b68d7
diff --git a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/test-tosca-vnfd-existing-block-storage.yaml b/tacker/tests/unit/vnfm/infra_drivers/openstack/data/test-tosca-vnfd-existing-block-storage.yaml
deleted file mode 100644
index e371fc83b..000000000
--- a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/test-tosca-vnfd-existing-block-storage.yaml
+++ /dev/null
@@ -1,63 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-
-description: Demo example
-
-metadata:
- template_name: sample-tosca-vnfd
-
-topology_template:
- inputs:
- my_vol:
- default: 0dbf28ba-d0b7-4369-99ce-7a3c31dc996f
- description: volume id
- type: string
- node_templates:
- VDU1:
- type: tosca.nodes.nfv.VDU.Tacker
- capabilities:
- nfv_compute:
- properties:
- num_cpus: 1
- mem_size: 512 MB
- disk_size: 1 GB
- properties:
- name: test-vdu-block-storage
- image: cirros-0.5.2-x86_64-disk
- availability_zone: nova
- mgmt_driver: noop
- config: |
- param0: key1
- param1: key2
-
- CP1:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- name: test-cp
- management: true
- order: 0
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL1
- - virtualBinding:
- node: VDU1
-
- VB1:
- type: tosca.nodes.BlockStorage.Tacker
- properties:
- volume_id: my_vol
-
- CB1:
- type: tosca.nodes.BlockStorageAttachment
- properties:
- location: /dev/vdb
- requirements:
- - virtualBinding:
- node: VDU1
- - virtualAttachment:
- node: VB1
- VL1:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net_mgmt
- vendor: Tacker
diff --git a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/test_tosca-vnfd-instance-reservation.yaml b/tacker/tests/unit/vnfm/infra_drivers/openstack/data/test_tosca-vnfd-instance-reservation.yaml
deleted file mode 100644
index 137ad1d70..000000000
--- a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/test_tosca-vnfd-instance-reservation.yaml
+++ /dev/null
@@ -1,90 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-
-description: VNF TOSCA template with instance reservation input parameters
-
-metadata:
- template_name: sample-tosca-vnfd-instance-reservation
-
-topology_template:
- node_templates:
- VDU1:
- type: tosca.nodes.nfv.VDU.Tacker
- properties:
- image: cirros-0.5.2-x86_64-disk
- flavor: 'cde27e47-1c88-4bb7-a64e-8d7c69014e4f'
- reservation_metadata:
- resource_type: 'virtual_instance'
- id: '8b01bdf8-a47c-49ea-96f1-3504fccfc9d4'
-
- CP1:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- order: 0
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL1
- - virtualBinding:
- node: VDU1
-
- CP2:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- order: 1
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL2
- - virtualBinding:
- node: VDU1
-
- CP3:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- order: 2
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL3
- - virtualBinding:
- node: VDU1
-
- VL1:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net_mgmt
- vendor: Tacker
-
- VL2:
- type: tosca.nodes.nfv.VL
-
- properties:
- network_name: net0
- vendor: Tacker
-
- VL3:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net1
- vendor: Tacker
-
-
- policies:
- - RSV:
- type: tosca.policies.tacker.Reservation
- reservation:
- start_actions: [SP_RSV]
- before_end_actions: [SP_RSV]
- end_actions: [noop]
- properties:
- lease_id: '6ff61be8-91c3-4874-8f1b-128a03a455cb'
- - SP_RSV:
- type: tosca.policies.tacker.Scaling
- properties:
- increment: 2
- cooldown: 120
- min_instances: 0
- max_instances: 2
- default_instances: 0
- targets: [VDU1]
diff --git a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/test_tosca_allowed_address_pairs.yaml b/tacker/tests/unit/vnfm/infra_drivers/openstack/data/test_tosca_allowed_address_pairs.yaml
deleted file mode 100644
index 2bb2b53c0..000000000
--- a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/test_tosca_allowed_address_pairs.yaml
+++ /dev/null
@@ -1,105 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-
-description: VIP Template
-
-metadata:
- template_name: vipTemplate
-
-topology_template:
- node_templates:
-
- VDU1:
- type: tosca.nodes.nfv.VDU.Tacker
- artifacts:
- vm_image:
- type: tosca.artifacts.Deployment.Image.VM
- file: http://URL/vRouterVNF.qcow2
- properties:
- flavor: m1.nano
- mgmt_driver: noop
- monitoring_policy:
- name: ping
- actions:
- failure: respawn
- parameters:
- count: 3
- interval: 10
-
- CP1:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- ip_address: 10.10.1.11
- anti_spoofing_protection: true
- allowed_address_pairs:
- - ip_address: 10.10.1.12
- security_groups:
- - default
- requirements:
- - virtualLink:
- node: VL1
- - virtualBinding:
- node: VDU1
- VCP1:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- ip_address: 10.10.1.12
- anti_spoofing_protection: true
- security_groups:
- - default
- requirements:
- - virtualLink:
- node: VL1
- CP2:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- mac_address: fe:1a:29:d9:36:45
- ip_address: 10.10.2.21
- anti_spoofing_protection: true
- allowed_address_pairs:
- - ip_address: 10.10.2.22
- - ip_address: 10.10.2.23
- mac_address: fe:1a:29:d9:36:45
- security_groups:
- - default
- requirements:
- - virtualLink:
- node: VL2
- - virtualBinding:
- node: VDU1
- VCP2:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- ip_address: 10.10.2.22
- anti_spoofing_protection: true
- security_groups:
- - default
- requirements:
- - virtualLink:
- node: VL2
- VCP3:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- ip_address: 10.10.2.23
- anti_spoofing_protection: true
- security_groups:
- - default
- requirements:
- - virtualLink:
- node: VL2
-
- VL1:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: existing_network_1
- vendor: Tacker
-
- VL2:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: existing_network_2
- vendor: Tacker
diff --git a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/test_tosca_cvnf.yaml b/tacker/tests/unit/vnfm/infra_drivers/openstack/data/test_tosca_cvnf.yaml
deleted file mode 100644
index fa3e60421..000000000
--- a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/test_tosca_cvnf.yaml
+++ /dev/null
@@ -1,37 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-
-description: A sample containerized VNF with one container per VDU
-
-metadata:
- template_name: sample-tosca-vnfd
-
-topology_template:
- node_templates:
- VDU1:
- type: tosca.nodes.nfv.VDU.Tacker
- properties:
- mapping_ports:
- - 80:80
- namespace: default
- vnfcs:
- web_server:
- num_cpus: 0.2
- mem_size: 100 MB
- image: ubuntu:16.04
- config: |
- param0: key1
- param1: key2
- CP11:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- requirements:
- - virtualLink:
- node: VL11
- - virtualBinding:
- node: VDU1
- VL11:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: k8s-pod-subnet
- vendor: Tacker
diff --git a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/test_tosca_flavor.yaml b/tacker/tests/unit/vnfm/infra_drivers/openstack/data/test_tosca_flavor.yaml
deleted file mode 100644
index a543b2903..000000000
--- a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/test_tosca_flavor.yaml
+++ /dev/null
@@ -1,44 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-
-description: OpenWRT with services
-
-metadata:
- template_name: OpenWRT
-
-topology_template:
- node_templates:
-
- VDU1:
- type: tosca.nodes.nfv.VDU.Tacker
- capabilities:
- nfv_compute:
- properties:
- num_cpus: 2
- disk_size: 10 GB
- mem_size: 512 MB
- properties:
- image: OpenWRT
- mgmt_driver: openwrt
- monitoring_policy:
- name: ping
- actions:
- failure: respawn
- parameters:
- count: 3
- interval: 10
-
- CP1:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- requirements:
- - virtualLink:
- node: VL1
- - virtualBinding:
- node: VDU1
-
- VL1:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: existing_network_1
- vendor: Tacker
diff --git a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/test_tosca_flavor_and_capabilities.yaml b/tacker/tests/unit/vnfm/infra_drivers/openstack/data/test_tosca_flavor_and_capabilities.yaml
deleted file mode 100644
index 9f076082b..000000000
--- a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/test_tosca_flavor_and_capabilities.yaml
+++ /dev/null
@@ -1,44 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-
-description: OpenWRT with services
-
-metadata:
- template_name: OpenWRT
-
-topology_template:
- node_templates:
-
- VDU1:
- type: tosca.nodes.nfv.VDU.Tacker
- capabilities:
- nfv_compute:
- properties:
- num_cpus: 2
- disk_size: 10 GB
- mem_size: 512 MB
- properties:
- image: OpenWRT
- mgmt_driver: openwrt
- flavor: m1.nano
- monitoring_policy:
- name: ping
- actions:
- failure: respawn
- parameters:
- count: 3
- interval: 10
- CP1:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- requirements:
- - virtualLink:
- node: VL1
- - virtualBinding:
- node: VDU1
-
- VL1:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: existing_network_1
- vendor: Tacker
diff --git a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/test_tosca_flavor_defaults.yaml b/tacker/tests/unit/vnfm/infra_drivers/openstack/data/test_tosca_flavor_defaults.yaml
deleted file mode 100644
index 3a19ff5c0..000000000
--- a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/test_tosca_flavor_defaults.yaml
+++ /dev/null
@@ -1,43 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-
-description: OpenWRT with services
-
-metadata:
- template_name: OpenWRT
-
-topology_template:
- node_templates:
-
- VDU1:
- type: tosca.nodes.nfv.VDU.Tacker
- capabilities:
- nfv_compute:
- properties:
- mem_size: 512
- num_cpus: 1
- disk_size: 10 GB
- properties:
- image: OpenWRT
- mgmt_driver: openwrt
- monitoring_policy:
- name: ping
- actions:
- failure: respawn
- parameters:
- count: 3
- interval: 10
- CP1:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- requirements:
- - virtualLink:
- node: VL1
- - virtualBinding:
- node: VDU1
-
- VL1:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: existing_network_1
- vendor: Tacker
diff --git a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/test_tosca_flavor_no_units.yaml b/tacker/tests/unit/vnfm/infra_drivers/openstack/data/test_tosca_flavor_no_units.yaml
deleted file mode 100644
index 576f007c2..000000000
--- a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/test_tosca_flavor_no_units.yaml
+++ /dev/null
@@ -1,43 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-
-description: OpenWRT with services
-
-metadata:
- template_name: OpenWRT
-
-topology_template:
- node_templates:
-
- VDU1:
- type: tosca.nodes.nfv.VDU.Tacker
- capabilities:
- nfv_compute:
- properties:
- num_cpus: 2
- disk_size: 2048
- mem_size: 512
- properties:
- image: OpenWRT
- mgmt_driver: openwrt
- monitoring_policy:
- name: ping
- actions:
- failure: respawn
- parameters:
- count: 3
- interval: 10
- CP1:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- requirements:
- - virtualLink:
- node: VL1
- - virtualBinding:
- node: VDU1
-
- VL1:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: existing_network_1
- vendor: Tacker
diff --git a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/test_tosca_image.yaml b/tacker/tests/unit/vnfm/infra_drivers/openstack/data/test_tosca_image.yaml
deleted file mode 100644
index 1c5af18cf..000000000
--- a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/test_tosca_image.yaml
+++ /dev/null
@@ -1,42 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-
-description: OpenWRT with services
-
-metadata:
- template_name: OpenWRT
-
-topology_template:
- node_templates:
-
- VDU1:
- type: tosca.nodes.nfv.VDU.Tacker
- artifacts:
- vm_image:
- type: tosca.artifacts.Deployment.Image.VM
- file: http://URL/vRouterVNF.qcow2
- properties:
- flavor: m1.nano
- mgmt_driver: noop
- monitoring_policy:
- name: ping
- actions:
- failure: respawn
- parameters:
- count: 3
- interval: 10
-
- CP1:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- requirements:
- - virtualLink:
- node: VL1
- - virtualBinding:
- node: VDU1
-
- VL1:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: existing_network_1
- vendor: Tacker
diff --git a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/test_tosca_mac_ip.yaml b/tacker/tests/unit/vnfm/infra_drivers/openstack/data/test_tosca_mac_ip.yaml
deleted file mode 100644
index 971de767b..000000000
--- a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/test_tosca_mac_ip.yaml
+++ /dev/null
@@ -1,45 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-
-description: SecurityGroup Template
-
-metadata:
- template_name: SecurityGroup
-
-topology_template:
- node_templates:
-
- VDU1:
- type: tosca.nodes.nfv.VDU.Tacker
- artifacts:
- vm_image:
- type: tosca.artifacts.Deployment.Image.VM
- file: http://URL/vRouterVNF.qcow2
- properties:
- flavor: m1.nano
- mgmt_driver: noop
- monitoring_policy:
- name: ping
- actions:
- failure: respawn
- parameters:
- count: 3
- interval: 10
-
- CP1:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- anti_spoofing_protection: true
- mac_address: fe:1a:29:d9:36:43
- ip_address: 10.10.1.12
- requirements:
- - virtualLink:
- node: VL1
- - virtualBinding:
- node: VDU1
-
- VL1:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: existing_network_1
- vendor: Tacker
diff --git a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/test_tosca_openwrt.yaml b/tacker/tests/unit/vnfm/infra_drivers/openstack/data/test_tosca_openwrt.yaml
deleted file mode 100644
index c03a8d4c6..000000000
--- a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/test_tosca_openwrt.yaml
+++ /dev/null
@@ -1,44 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-
-description: OpenWRT with services
-
-metadata:
- template_name: OpenWRT
-
-topology_template:
- node_templates:
-
- VDU1:
- type: tosca.nodes.nfv.VDU.Tacker
- properties:
- image: OpenWRT
- flavor: m1.tiny
- config: |
- param0: key1
- param1: key2
- mgmt_driver: openwrt
- monitoring_policy:
- name: ping
- actions:
- failure: respawn
- parameters:
- count: 3
- interval: 10
-
- CP1:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL1
- - virtualBinding:
- node: VDU1
-
- VL1:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: existing_network_1
- vendor: Tacker
-
diff --git a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/test_tosca_openwrt_param.yaml b/tacker/tests/unit/vnfm/infra_drivers/openstack/data/test_tosca_openwrt_param.yaml
deleted file mode 100644
index 7060356e6..000000000
--- a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/test_tosca_openwrt_param.yaml
+++ /dev/null
@@ -1,44 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-
-description: OpenWRT with services
-
-metadata:
- template_name: OpenWRT
-
-topology_template:
- inputs:
- flavor:
- type: string
- reservation_id:
- type: string
-
- node_templates:
-
- VDU1:
- type: tosca.nodes.nfv.VDU.Tacker
- properties:
- image: OpenWRT
- flavor: {get_input: flavor}
- reservation: {get_input: reservation_id}
- config: |
- param0: key1
- param1: key2
- mgmt_driver: openwrt
-
- CP1:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL1
- - virtualBinding:
- node: VDU1
-
- VL1:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: existing_network_1
- vendor: Tacker
-
diff --git a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/test_tosca_openwrt_userdata.yaml b/tacker/tests/unit/vnfm/infra_drivers/openstack/data/test_tosca_openwrt_userdata.yaml
deleted file mode 100644
index 9150437c2..000000000
--- a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/test_tosca_openwrt_userdata.yaml
+++ /dev/null
@@ -1,49 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-
-description: OpenWRT with services
-
-metadata:
- template_name: OpenWRT
-
-topology_template:
- node_templates:
-
- VDU1:
- type: tosca.nodes.nfv.VDU.Tacker
- properties:
- image: OpenWRT
- flavor: m1.tiny
- config: |
- param0: key1
- param1: key2
- mgmt_driver: openwrt
- monitoring_policy:
- name: ping
- actions:
- failure: respawn
- parameters:
- count: 3
- interval: 10
- user_data_format: RAW
- user_data: |
- #!/bin/sh
- echo "my hostname is `hostname`" > /tmp/hostname
- df -h > /home/openwrt/diskinfo
-
- CP1:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL1
- - virtualBinding:
- node: VDU1
-
- VL1:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: existing_network_1
- vendor: Tacker
-
diff --git a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/test_tosca_parser_failure.yaml b/tacker/tests/unit/vnfm/infra_drivers/openstack/data/test_tosca_parser_failure.yaml
deleted file mode 100644
index e7a7b105e..000000000
--- a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/test_tosca_parser_failure.yaml
+++ /dev/null
@@ -1,27 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-
-description: OpenWRT with services
-
-metadata:
- template_name: OpenWRT
-
-topology_template:
- node_templates:
-
- CP1:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL1
- - virtualBinding:
- node: VDU1
-
- VL1:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: existing_network_1
- vendor: Tacker
-
diff --git a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/test_tosca_post_process_template.yaml b/tacker/tests/unit/vnfm/infra_drivers/openstack/data/test_tosca_post_process_template.yaml
deleted file mode 100644
index c9941d1d7..000000000
--- a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/test_tosca_post_process_template.yaml
+++ /dev/null
@@ -1,75 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-
-description: OpenWRT with services
-
-metadata:
- template_name: OpenWRT
-
-topology_template:
- node_templates:
-
- VDU1:
- type: tosca.nodes.nfv.VDU.Tacker
- properties:
- image: OpenWRT
- flavor: m1.tiny
- reservation_metadata:
- resource_type: physical_host
- id: 459e94c9-efcd-4320-abf5-8c18cd82c331
- config: |
- param0: key1
- param1: key2
- mgmt_driver: openwrt
- monitoring_policy:
- name: ping
- actions:
- failure: respawn
- parameters:
- count: 3
- interval: 10
- metadata: {metering.server_group: VDU1-2e3261d9-1}
-
- CP1:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- anti_spoofing_protection: false
- type: sriov
- requirements:
- - virtualLink:
- node: VL1
- - virtualBinding:
- node: VDU1
-
- VL1:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: existing_network_1
- vendor: Tacker
-
- policies:
- - vdu1_placement_policy:
- type: tosca.policies.tacker.Placement
- properties:
- policy: affinity
- strict: true
- description: Apply affinity placement policy to the application servers
- targets: [ VDU1 ]
- - vdu1_cpu_usage_monitoring_policy:
- type: tosca.policies.tacker.Alarming
- triggers:
- vdu_hcpu_usage_respawning:
- event_type:
- type: tosca.events.resource.utilization
- implementation: Ceilometer
- metric: cpu_util
- condition:
- threshold: 50
- constraint: utilization greater_than 50%
- granularity: 60
- evaluations: 1
- aggregation_method: mean
- resource_type: instance
- comparison_operator: gt
- metadata: VDU1-2e3261d9-1
- action: ''
diff --git a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/test_tosca_security_groups.yaml b/tacker/tests/unit/vnfm/infra_drivers/openstack/data/test_tosca_security_groups.yaml
deleted file mode 100644
index 54d3f3685..000000000
--- a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/test_tosca_security_groups.yaml
+++ /dev/null
@@ -1,46 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-
-description: SecurityGroup Template
-
-metadata:
- template_name: SecurityGroup
-
-topology_template:
- node_templates:
-
- VDU1:
- type: tosca.nodes.nfv.VDU.Tacker
- artifacts:
- vm_image:
- type: tosca.artifacts.Deployment.Image.VM
- file: http://URL/vRouterVNF.qcow2
- properties:
- flavor: m1.nano
- mgmt_driver: noop
- monitoring_policy:
- name: ping
- actions:
- failure: respawn
- parameters:
- count: 3
- interval: 10
-
- CP1:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- anti_spoofing_protection: true
- security_groups:
- - default
- - test_secgrp
- requirements:
- - virtualLink:
- node: VL1
- - virtualBinding:
- node: VDU1
-
- VL1:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: existing_network_1
- vendor: Tacker
diff --git a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/test_tosca_vnfc.yaml b/tacker/tests/unit/vnfm/infra_drivers/openstack/data/test_tosca_vnfc.yaml
deleted file mode 100644
index c1f628053..000000000
--- a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/test_tosca_vnfc.yaml
+++ /dev/null
@@ -1,39 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-metadata:
- template_name: sample-tosca-vnfd-for-vnfc
-
-topology_template:
- node_templates:
- firewall_vnfc:
- type: tosca.nodes.nfv.VNFC.Tacker
- requirements:
- - host: VDU1
- interfaces:
- Standard:
- create: install_vnfc.sh
-
- VDU1:
- type: tosca.nodes.nfv.VDU.Tacker
- properties:
- image: Fedora
- flavor: m1.small
- mgmt_driver: noop
- config: |
- param0: key1
- param1: key2
-
- CP1:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL1
- - virtualBinding:
- node: VDU1
- VL1:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net1
- vendor: Tacker
diff --git a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/test_tosca_vnfd_alarm_multi_actions.yaml b/tacker/tests/unit/vnfm/infra_drivers/openstack/data/test_tosca_vnfd_alarm_multi_actions.yaml
deleted file mode 100644
index bb243bb8b..000000000
--- a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/test_tosca_vnfd_alarm_multi_actions.yaml
+++ /dev/null
@@ -1,58 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-description: Demo example
-
-metadata:
- template_name: sample-tosca-vnfd
-
-topology_template:
- node_templates:
- VDU1:
- type: tosca.nodes.nfv.VDU.Tacker
- capabilities:
- nfv_compute:
- properties:
- disk_size: 1 GB
- mem_size: 512 MB
- num_cpus: 2
- properties:
- image: cirros-0.5.2-x86_64-disk
- mgmt_driver: noop
- availability_zone: nova
- metadata: {metering.server_group: VDU1-2e3261d9-1}
-
- CP1:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL1
- - virtualBinding:
- node: VDU1
-
- VL1:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net_mgmt
- vendor: Tacker
-
- policies:
- - vdu1_cpu_usage_monitoring_policy:
- type: tosca.policies.tacker.Alarming
- triggers:
- mon_policy_multi_actions:
- event_type:
- type: tosca.events.resource.utilization
- implementation: ceilometer
- metric: cpu_util
- condition:
- threshold: 50
- constraint: utilization greater_than 50%
- granularity: 600
- evaluations: 1
- aggregation_method: mean
- resource_type: instance
- comparison_operator: gt
- metadata: VDU1-2e3261d9-1
- actions: [respawn, log]
diff --git a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/test_tosca_vnfd_alarm_respawn.yaml b/tacker/tests/unit/vnfm/infra_drivers/openstack/data/test_tosca_vnfd_alarm_respawn.yaml
deleted file mode 100644
index 7c3cef168..000000000
--- a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/test_tosca_vnfd_alarm_respawn.yaml
+++ /dev/null
@@ -1,58 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-description: Demo example
-
-metadata:
- template_name: sample-tosca-vnfd
-
-topology_template:
- node_templates:
- VDU1:
- type: tosca.nodes.nfv.VDU.Tacker
- capabilities:
- nfv_compute:
- properties:
- disk_size: 1 GB
- mem_size: 512 MB
- num_cpus: 2
- properties:
- image: cirros-0.5.2-x86_64-disk
- mgmt_driver: noop
- availability_zone: nova
- metadata: {metering.server_group: VDU1-2e3261d9-1}
-
- CP1:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL1
- - virtualBinding:
- node: VDU1
-
- VL1:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net_mgmt
- vendor: Tacker
-
- policies:
- - vdu1_cpu_usage_monitoring_policy:
- type: tosca.policies.tacker.Alarming
- triggers:
- vdu_hcpu_usage_respawning:
- event_type:
- type: tosca.events.resource.utilization
- implementation: ceilometer
- metric: cpu_util
- condition:
- threshold: 50
- constraint: utilization greater_than 50%
- granularity: 600
- evaluations: 1
- aggregation_method: mean
- resource_type: instance
- comparison_operator: gt
- metadata: VDU1-2e3261d9-1
- action: [respawn]
diff --git a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/test_tosca_vnfd_alarm_scale.yaml b/tacker/tests/unit/vnfm/infra_drivers/openstack/data/test_tosca_vnfd_alarm_scale.yaml
deleted file mode 100644
index 24aae263f..000000000
--- a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/test_tosca_vnfd_alarm_scale.yaml
+++ /dev/null
@@ -1,68 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-description: Demo example
-
-metadata:
- template_name: sample-tosca-vnfd
-
-topology_template:
- node_templates:
- VDU1:
- type: tosca.nodes.nfv.VDU.Tacker
- capabilities:
- nfv_compute:
- properties:
- disk_size: 1 GB
- mem_size: 512 MB
- num_cpus: 2
- properties:
- image: cirros-0.5.2-x86_64-disk
- mgmt_driver: noop
- availability_zone: nova
- metadata: {metering.server_group: SG1-2e3261d9-14}
-
- CP1:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL1
- - virtualBinding:
- node: VDU1
-
- VL1:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net_mgmt
- vendor: Tacker
-
- policies:
- - SP1:
- type: tosca.policies.tacker.Scaling
- properties:
- increment: 1
- cooldown: 120
- min_instances: 1
- max_instances: 3
- default_instances: 2
- targets: [ VDU1 ]
-
- - vdu1_cpu_usage_monitoring_policy:
- type: tosca.policies.tacker.Alarming
- triggers:
- vdu_hcpu_usage_scaling_out:
- event_type:
- type: tosca.events.resource.utilization
- implementation: ceilometer
- metric: cpu_util
- condition:
- threshold: 50
- constraint: utilization greater_than 50%
- granularity: 600
- evaluations: 1
- aggregation_method: mean
- resource_type: instance
- comparison_operator: gt
- metadata: SG1-2e3261d9-14
- action: [SP1]
diff --git a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/tosca_alarm_metadata.yaml b/tacker/tests/unit/vnfm/infra_drivers/openstack/data/tosca_alarm_metadata.yaml
deleted file mode 100644
index 7aaf1621b..000000000
--- a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/tosca_alarm_metadata.yaml
+++ /dev/null
@@ -1,60 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-description: >
- An exception will be raised when having the mismatched metadata
- (metadata is described in monitoring policy but unavailable in
- VDU properties).
-
-metadata:
- template_name: sample-tosca-vnfd
-
-topology_template:
- node_templates:
- VDU1:
- type: tosca.nodes.nfv.VDU.Tacker
- capabilities:
- nfv_compute:
- properties:
- disk_size: 1 GB
- mem_size: 512 MB
- num_cpus: 2
- properties:
- image: cirros-0.5.2-x86_64-disk
- mgmt_driver: noop
- availability_zone: nova
-
- CP1:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL1
- - virtualBinding:
- node: VDU1
-
- VL1:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net_mgmt
- vendor: Tacker
-
- policies:
- - vdu1_cpu_usage_monitoring_policy:
- type: tosca.policies.tacker.Alarming
- triggers:
- vdu_hcpu_usage_respawning:
- event_type:
- type: tosca.events.resource.utilization
- implementation: Ceilometer
- metric: cpu_util
- condition:
- threshold: 50
- constraint: utilization greater_than 50%
- granularity: 60
- evaluations: 1
- aggregation_method: mean
- resource_type: instance
- comparison_operator: gt
- metadata: VDU1-2e3261d9-1
- action: ''
diff --git a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/tosca_alarm_respawn.yaml b/tacker/tests/unit/vnfm/infra_drivers/openstack/data/tosca_alarm_respawn.yaml
deleted file mode 100644
index 88ee59fe2..000000000
--- a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/tosca_alarm_respawn.yaml
+++ /dev/null
@@ -1,59 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-description: Demo example
-
-metadata:
- template_name: sample-tosca-vnfd
-
-topology_template:
- node_templates:
- VDU1:
- type: tosca.nodes.nfv.VDU.Tacker
- capabilities:
- nfv_compute:
- properties:
- disk_size: 1 GB
- mem_size: 512 MB
- num_cpus: 2
- properties:
- image: cirros-0.5.2-x86_64-disk
- mgmt_driver: noop
- availability_zone: nova
- metadata: {metering.server_group: VDU1-2e3261d9-1}
-
-
- CP1:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL1
- - virtualBinding:
- node: VDU1
-
- VL1:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net_mgmt
- vendor: Tacker
-
- policies:
- - vdu1_cpu_usage_monitoring_policy:
- type: tosca.policies.tacker.Alarming
- triggers:
- vdu_hcpu_usage_respawning:
- event_type:
- type: tosca.events.resource.utilization
- implementation: Ceilometer
- metric: cpu_util
- condition:
- threshold: 50
- constraint: utilization greater_than 50%
- granularity: 60
- evaluations: 1
- aggregation_method: mean
- resource_type: instance
- comparison_operator: gt
- metadata: VDU1-2e3261d9-1
- action: ''
diff --git a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/tosca_alarm_scale.yaml b/tacker/tests/unit/vnfm/infra_drivers/openstack/data/tosca_alarm_scale.yaml
deleted file mode 100644
index da9628ec8..000000000
--- a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/tosca_alarm_scale.yaml
+++ /dev/null
@@ -1,80 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-
-description: sample-tosca-vnfd-scaling
-
-metadata:
- template_name: sample-tosca-vnfd-scaling
-
-topology_template:
- node_templates:
- VDU1:
- type: tosca.nodes.nfv.VDU.Tacker
- properties:
- image: cirros-0.5.2-x86_64-disk
- mgmt_driver: noop
- availability_zone: nova
- flavor: m1.tiny
- metadata: {metering.server_group: SG1-2e3261d9-14}
-
- CP1:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL1
- - virtualBinding:
- node: VDU1
-
- VL1:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net_mgmt
- vendor: Tacker
-
- policies:
- - SP1:
- type: tosca.policies.tacker.Scaling
- properties:
- targets: [VDU1]
- increment: 1
- cooldown: 60
- min_instances: 1
- max_instances: 3
- default_instances: 2
-
- - vdu_cpu_usage_monitoring_policy:
- type: tosca.policies.tacker.Alarming
- triggers:
- vdu_hcpu_usage_scaling_out:
- event_type:
- type: tosca.events.resource.utilization
- implementation: ceilometer
- metric: cpu_util
- condition:
- threshold: 50
- constraint: utilization greater_than 50%
- granularity: 600
- evaluations: 1
- aggregation_method: mean
- resource_type: instance
- comparison_operator: gt
- metadata: SG1-2e3261d9-14
- action: [SP1]
-
- vdu_lcpu_usage_scaling_in:
- event_type:
- type: tosca.events.resource.utilization
- implementation: ceilometer
- metric: cpu_util
- condition:
- threshold: 10
- constraint: utilization less_than 10%
- granularity: 600
- evaluations: 1
- aggregation_method: mean
- resource_type: instance
- comparison_operator: lt
- metadata: SG1-2e3261d9-14
- action: [SP1]
diff --git a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/tosca_artifacts_image_vnfd_params.yaml b/tacker/tests/unit/vnfm/infra_drivers/openstack/data/tosca_artifacts_image_vnfd_params.yaml
deleted file mode 100644
index a2331147a..000000000
--- a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/tosca_artifacts_image_vnfd_params.yaml
+++ /dev/null
@@ -1,84 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-
-description: Generic VDU with parameterized image and flavor
-
-metadata:
- template_name: OpenWRT
-
-topology_template:
-
- inputs:
-
- image_source:
- type: string
- description: Image source for the server
- default: OpenWRT
-
- node_templates:
-
- VDU1:
- type: tosca.nodes.nfv.VDU.Tacker
- properties:
- image: m1.tiny
- flavor: m1.tiny
- monitoring_policy:
- name: ping
- actions:
- failure: respawn
- parameters:
- count: 3
- interval: 10
- artifacts:
- VNFImage:
- type: tosca.artifacts.Deployment.Image.VM
- file: { get_input: image_source }
-
- CP1:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL1
- - virtualBinding:
- node: VDU1
-
- CP2:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL2
- - virtualBinding:
- node: VDU1
-
- CP3:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL3
- - virtualBinding:
- node: VDU1
-
- VL1:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net_mgmt
- vendor: Tacker
-
- VL2:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: pkt_in
- vendor: Tacker
-
- VL3:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: pkt_out
- vendor: Tacker
-
diff --git a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/tosca_block_storage.yaml b/tacker/tests/unit/vnfm/infra_drivers/openstack/data/tosca_block_storage.yaml
deleted file mode 100644
index 00c3c1db7..000000000
--- a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/tosca_block_storage.yaml
+++ /dev/null
@@ -1,57 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-
-description: Demo example
-
-metadata:
- template_name: sample-tosca-vnfd
-
-topology_template:
- node_templates:
- VDU1:
- type: tosca.nodes.nfv.VDU.Tacker
- capabilities:
- nfv_compute:
- properties:
- num_cpus: 1
- mem_size: 512 MB
- disk_size: 1 GB
- properties:
- image: cirros-0.5.2-x86_64-disk
- availability_zone: nova
- mgmt_driver: noop
- config: |
- param0: key1
- param1: key2
-
- CP1:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- order: 0
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL1
- - virtualBinding:
- node: VDU1
-
- VB1:
- type: tosca.nodes.BlockStorage.Tacker
- properties:
- size: 1 GB
- image: cirros-0.5.2-x86_64-disk
-
- CB1:
- type: tosca.nodes.BlockStorageAttachment
- properties:
- location: /dev/vdb
- requirements:
- - virtualBinding:
- node: VDU1
- - virtualAttachment:
- node: VB1
- VL1:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net_mgmt
- vendor: Tacker
diff --git a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/tosca_flavor_all_numa_count.yaml b/tacker/tests/unit/vnfm/infra_drivers/openstack/data/tosca_flavor_all_numa_count.yaml
deleted file mode 100644
index ecac354c6..000000000
--- a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/tosca_flavor_all_numa_count.yaml
+++ /dev/null
@@ -1,36 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-
-description: OpenWRT with services
-
-metadata:
- template_name: OpenWRT
-
-topology_template:
- node_templates:
-
- VDU1:
- type: tosca.nodes.nfv.VDU.Tacker
- capabilities:
- nfv_compute:
- properties:
- num_cpus: 8
- disk_size: 10 GB
- mem_size: 4096 MB
- mem_page_size: any
- numa_node_count: 2
- cpu_allocation:
- cpu_affinity: dedicated
- thread_allocation: avoid
- socket_count: 2
- thread_count: 2
- core_count: 2
- properties:
- image: OpenWRT
- mgmt_driver: openwrt
- monitoring_policy:
- name: ping
- actions:
- failure: respawn
- parameters:
- count: 3
- interval: 10
diff --git a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/tosca_flavor_all_numa_nodes.yaml b/tacker/tests/unit/vnfm/infra_drivers/openstack/data/tosca_flavor_all_numa_nodes.yaml
deleted file mode 100644
index 3d2f4b0e5..000000000
--- a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/tosca_flavor_all_numa_nodes.yaml
+++ /dev/null
@@ -1,44 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-
-description: OpenWRT with services
-
-metadata:
- template_name: OpenWRT
-
-topology_template:
- node_templates:
-
- VDU1:
- type: tosca.nodes.nfv.VDU.Tacker
- capabilities:
- nfv_compute:
- properties:
- num_cpus: 6
- disk_size: 40 GB
- mem_size: 4096 MB
- mem_page_size: any
- numa_nodes:
- node0:
- id: 0
- vcpus: [0, 1]
- mem_size: 1024
- node1:
- id: 1
- vcpus: [2,3,4,5]
- mem_size: 3072
- cpu_allocation:
- cpu_affinity: dedicated
- thread_allocation: avoid
- socket_count: 2
- thread_count: 2
- core_count: 2
- properties:
- image: OpenWRT
- mgmt_driver: openwrt
- monitoring_policy:
- name: ping
- actions:
- failure: respawn
- parameters:
- count: 3
- interval: 10
diff --git a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/tosca_flavor_cpu_allocations.yaml b/tacker/tests/unit/vnfm/infra_drivers/openstack/data/tosca_flavor_cpu_allocations.yaml
deleted file mode 100644
index 578fed47e..000000000
--- a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/tosca_flavor_cpu_allocations.yaml
+++ /dev/null
@@ -1,34 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-
-description: OpenWRT with services
-
-metadata:
- template_name: OpenWRT
-
-topology_template:
- node_templates:
-
- VDU1:
- type: tosca.nodes.nfv.VDU.Tacker
- capabilities:
- nfv_compute:
- properties:
- num_cpus: 6
- disk_size: 40 GB
- mem_size: 4096 MB
- cpu_allocation:
- cpu_affinity: dedicated
- thread_allocation: avoid
- socket_count: 2
- thread_count: 2
- core_count: 2
- properties:
- image: OpenWRT
- mgmt_driver: openwrt
- monitoring_policy:
- name: ping
- actions:
- failure: respawn
- parameters:
- count: 3
- interval: 10
diff --git a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/tosca_flavor_huge_pages.yaml b/tacker/tests/unit/vnfm/infra_drivers/openstack/data/tosca_flavor_huge_pages.yaml
deleted file mode 100644
index b5f8b49e6..000000000
--- a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/tosca_flavor_huge_pages.yaml
+++ /dev/null
@@ -1,29 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-
-description: OpenWRT with services
-
-metadata:
- template_name: OpenWRT
-
-topology_template:
- node_templates:
-
- VDU1:
- type: tosca.nodes.nfv.VDU.Tacker
- capabilities:
- nfv_compute:
- properties:
- num_cpus: 6
- disk_size: 40 GB
- mem_size: 4096 MB
- mem_page_size: any
- properties:
- image: OpenWRT
- mgmt_driver: openwrt
- monitoring_policy:
- name: ping
- actions:
- failure: respawn
- parameters:
- count: 3
- interval: 10
diff --git a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/tosca_flavor_numa_nodes.yaml b/tacker/tests/unit/vnfm/infra_drivers/openstack/data/tosca_flavor_numa_nodes.yaml
deleted file mode 100644
index 8beba4ec7..000000000
--- a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/tosca_flavor_numa_nodes.yaml
+++ /dev/null
@@ -1,37 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-
-description: OpenWRT with services
-
-metadata:
- template_name: OpenWRT
-
-topology_template:
- node_templates:
-
- VDU1:
- type: tosca.nodes.nfv.VDU.Tacker
- capabilities:
- nfv_compute:
- properties:
- num_cpus: 6
- disk_size: 40 GB
- mem_size: 4096 MB
- numa_nodes:
- node0:
- id: 0
- vcpus: [0, 1]
- mem_size: 1024
- node1:
- id: 1
- vcpus: [2,3,4,5]
- mem_size: 3072
- properties:
- image: OpenWRT
- mgmt_driver: openwrt
- monitoring_policy:
- name: ping
- actions:
- failure: respawn
- parameters:
- count: 3
- interval: 10
diff --git a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/tosca_flavor_numa_nodes_count.yaml b/tacker/tests/unit/vnfm/infra_drivers/openstack/data/tosca_flavor_numa_nodes_count.yaml
deleted file mode 100644
index 44cbf4ce6..000000000
--- a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/tosca_flavor_numa_nodes_count.yaml
+++ /dev/null
@@ -1,38 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-
-description: OpenWRT with services
-
-metadata:
- template_name: OpenWRT
-
-topology_template:
- node_templates:
-
- VDU1:
- type: tosca.nodes.nfv.VDU.Tacker
- capabilities:
- nfv_compute:
- properties:
- num_cpus: 6
- disk_size: 40 GB
- mem_size: 4096 MB
- numa_nodes:
- node0:
- id: 0
- vcpus: [0, 1]
- mem_size: 1024
- node1:
- id: 1
- vcpus: [2,3,4,5]
- mem_size: 3072
- numa_node_count: 2
- properties:
- image: OpenWRT
- mgmt_driver: openwrt
- monitoring_policy:
- name: ping
- actions:
- failure: respawn
- parameters:
- count: 3
- interval: 10
diff --git a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/tosca_flavor_with_wrong_cpu.yaml b/tacker/tests/unit/vnfm/infra_drivers/openstack/data/tosca_flavor_with_wrong_cpu.yaml
deleted file mode 100644
index c0409b1a9..000000000
--- a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/tosca_flavor_with_wrong_cpu.yaml
+++ /dev/null
@@ -1,36 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-
-description: OpenWRT with services
-
-metadata:
- template_name: OpenWRT
-
-topology_template:
- node_templates:
-
- VDU1:
- type: tosca.nodes.nfv.VDU.Tacker
- capabilities:
- nfv_compute:
- properties:
- num_cpus: 8
- disk_size: 10 GB
- mem_size: 4096 MB
- mem_page_size: any
- numa_node_count: 2
- cpu_allocation:
- cpu_affinity: dedicatedss
- thread_allocation: avoid
- socket_count: 2
- thread_count: 2
- core_count: 2
- properties:
- image: OpenWRT
- mgmt_driver: openwrt
- monitoring_policy:
- name: ping
- actions:
- failure: respawn
- parameters:
- count: 3
- interval: 10
diff --git a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/tosca_generic_vnfd_params.yaml b/tacker/tests/unit/vnfm/infra_drivers/openstack/data/tosca_generic_vnfd_params.yaml
deleted file mode 100644
index 6137f7354..000000000
--- a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/tosca_generic_vnfd_params.yaml
+++ /dev/null
@@ -1,86 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-
-description: Generic VDU with parameterized image and flavor
-
-metadata:
- template_name: OpenWRT
-
-topology_template:
-
- inputs:
- flavor:
- type: string
- description: Flavor name for the server
- constraints:
- - valid_values: [ m1.tiny, m1.small, m1.medium, m1.large, m1.large ]
- default: m1.tiny
-
- image:
- type: string
- description: Image name for the server
- default: OpenWRT
-
- node_templates:
-
- VDU1:
- type: tosca.nodes.nfv.VDU.Tacker
- properties:
- image: { get_input: image }
- flavor: { get_input: flavor }
- monitoring_policy:
- name: ping
- actions:
- failure: respawn
- parameters:
- count: 3
- interval: 10
-
- CP1:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL1
- - virtualBinding:
- node: VDU1
-
- CP2:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL2
- - virtualBinding:
- node: VDU1
-
- CP3:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL3
- - virtualBinding:
- node: VDU1
-
- VL1:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net_mgmt
- vendor: Tacker
-
- VL2:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: pkt_in
- vendor: Tacker
-
- VL3:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: pkt_out
- vendor: Tacker
-
diff --git a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/tosca_invalid_vnffgd_template.yaml b/tacker/tests/unit/vnfm/infra_drivers/openstack/data/tosca_invalid_vnffgd_template.yaml
deleted file mode 100644
index 82d5e9341..000000000
--- a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/tosca_invalid_vnffgd_template.yaml
+++ /dev/null
@@ -1,42 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-
-description: Example VNFFG template
-
-topology_template:
-
- node_templates:
-
- Forwarding_path1:
- type: tosca.nodes.nfv.FP.TackerV2
- description: creates path (CP11->CP12->CP32)
- properties:
- id: 51
- policy:
- type: ACL
- criteria:
- - name: classifier_one
- classifier:
- blah: tenant1_net
- destination_port_range: 80-1024
- ip_proto: 6
- ip_dst_prefix: 192.168.1.2/24
- path:
- - forwarder: VNF1
- capability: CP11
- - forwarder: VNF1
- capability: CP12
- - forwarder: VNF3
- capability: CP32
-
- groups:
- VNFFG1:
- type: tosca.groups.nfv.VNFFG
- description: HTTP to Corporate Net
- properties:
- vendor: tacker
- version: 1.0
- number_of_endpoints: 3
- dependent_virtual_link: [VL1,VL2,VL3]
- connection_point: [CP11,CP12,CP32]
- constituent_vnfs: [VNF1,VNF3]
- members: [Forwarding_path1]
diff --git a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/tosca_mgmt_sriov.yaml b/tacker/tests/unit/vnfm/infra_drivers/openstack/data/tosca_mgmt_sriov.yaml
deleted file mode 100644
index 64dd65757..000000000
--- a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/tosca_mgmt_sriov.yaml
+++ /dev/null
@@ -1,57 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-
-description: SRIOV and management port example
-
-metadata:
- template_name: sample-SRIOV-vnfd
-
-topology_template:
- node_templates:
- VDU1:
- type: tosca.nodes.nfv.VDU.Tacker
- properties:
- image: OpenWRT
- flavor: numa-sriov
- availability_zone: nova
- mgmt_driver: openwrt
- monitoring_policy:
- name: ping
- actions:
- failure: respawn
- parameters:
- count: 3
- interval: 10
- config: |
- param0: key1
- param1: key2
-
- CP1:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- type: sriov
- requirements:
- - virtualLink:
- node: VL1
- - virtualBinding:
- node: VDU1
-
- CP2:
- type: tosca.nodes.nfv.CP.Tacker
- requirements:
- - virtualLink:
- node: VL2
- - virtualBinding:
- node: VDU1
-
- VL1:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net-mgmt
- vendor: Tacker
-
- VL2:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net0
- vendor: Tacker
diff --git a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/tosca_monitoring_multi_vdu.yaml b/tacker/tests/unit/vnfm/infra_drivers/openstack/data/tosca_monitoring_multi_vdu.yaml
deleted file mode 100644
index 32c005ac5..000000000
--- a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/tosca_monitoring_multi_vdu.yaml
+++ /dev/null
@@ -1,74 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-
-description: Monitoring for multiple VDUs
-
-metadata:
- template_name: sample-tosca-vnfd
-
-topology_template:
- node_templates:
- VDU1:
- type: tosca.nodes.nfv.VDU.Tacker
- properties:
- image: cirros-0.5.2-x86_64-disk
- flavor: m1.tiny
- availability_zone: nova
- mgmt_driver: noop
- config: |
- param0: key1
- param1: key2
- monitoring_policy:
- name: ping
- actions:
- failure: respawn
- parameters:
- count: 3
- interval: 10
-
- CP1:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- order: 0
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL1
- - virtualBinding:
- node: VDU1
-
- VDU2:
- type: tosca.nodes.nfv.VDU.Tacker
- properties:
- image: cirros-0.5.2-x86_64-disk
- flavor: m1.tiny
- availability_zone: nova
- mgmt_driver: noop
- config: |
- param0: key1
- param1: key2
- monitoring_policy:
- name: ping
- actions:
- failure: respawn
- parameters:
- count: 3
- interval: 10
-
- CP2:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- order: 0
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL1
- - virtualBinding:
- node: VDU2
-
- VL1:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net_mgmt
- vendor: Tacker
diff --git a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/tosca_nsd_template.yaml b/tacker/tests/unit/vnfm/infra_drivers/openstack/data/tosca_nsd_template.yaml
deleted file mode 100644
index e8f81e3f5..000000000
--- a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/tosca_nsd_template.yaml
+++ /dev/null
@@ -1,38 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-imports:
- - VNF1
- - VNF2
-
-topology_template:
- inputs:
- vl1_name:
- type: string
- description: name of VL1 virtuallink
- default: net_mgmt
- vl2_name:
- type: string
- description: name of VL2 virtuallink
- default: net0
-
- node_templates:
- VNF1:
- type: tosca.nodes.nfv.VNF1
- requirements:
- - virtualLink1: VL1
- - virtualLink2: VL2
-
- VNF2:
- type: tosca.nodes.nfv.VNF2
-
- VL1:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: {get_input: vl1_name}
- vendor: tacker
-
- VL2:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: {get_input: vl2_name}
- vendor: tacker
-
diff --git a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/tosca_placement_policy_affinity.yaml b/tacker/tests/unit/vnfm/infra_drivers/openstack/data/tosca_placement_policy_affinity.yaml
deleted file mode 100644
index e94ab1b91..000000000
--- a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/tosca_placement_policy_affinity.yaml
+++ /dev/null
@@ -1,66 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-description: Demo example
-
-metadata:
- template_name: sample-tosca-vnfd
-
-topology_template:
- node_templates:
- VDU1:
- type: tosca.nodes.nfv.VDU.Tacker
- properties:
- flavor: m1.tiny
- image: cirros-0.5.2-x86_64-disk
- mgmt_driver: noop
- availability_zone: nova
-
- CP1:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL1
- - virtualBinding:
- node: VDU1
-
- VDU2:
- type: tosca.nodes.nfv.VDU.Tacker
- properties:
- flavor: m1.tiny
- image: cirros-0.5.2-x86_64-disk
- mgmt_driver: noop
- availability_zone: nova
-
- CP2:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL2
- - virtualBinding:
- node: VDU2
-
- VL1:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net_mgmt
- vendor: Tacker
-
- VL2:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net0
- vendor: Tacker
-
- policies:
- - my_compute_placement_policy:
- type: tosca.policies.tacker.Placement
- properties:
- policy: affinity
- strict: true
- description: Apply affinity placement policy to the application servers
- targets: [ VDU1, VDU2 ]
diff --git a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/tosca_placement_policy_anti_affinity.yaml b/tacker/tests/unit/vnfm/infra_drivers/openstack/data/tosca_placement_policy_anti_affinity.yaml
deleted file mode 100644
index 38604185d..000000000
--- a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/tosca_placement_policy_anti_affinity.yaml
+++ /dev/null
@@ -1,66 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-description: Demo example
-
-metadata:
- template_name: sample-tosca-vnfd
-
-topology_template:
- node_templates:
- VDU1:
- type: tosca.nodes.nfv.VDU.Tacker
- properties:
- flavor: m1.tiny
- image: cirros-0.5.2-x86_64-disk
- mgmt_driver: noop
- availability_zone: nova
-
- CP1:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL1
- - virtualBinding:
- node: VDU1
-
- VDU2:
- type: tosca.nodes.nfv.VDU.Tacker
- properties:
- flavor: m1.tiny
- image: cirros-0.5.2-x86_64-disk
- mgmt_driver: noop
- availability_zone: nova
-
- CP2:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL2
- - virtualBinding:
- node: VDU2
-
- VL1:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net_mgmt
- vendor: Tacker
-
- VL2:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net0
- vendor: Tacker
-
- policies:
- - my_compute_placement_policy:
- type: tosca.policies.tacker.Placement
- properties:
- policy: anti-affinity
- strict: true
- description: Apply anti-affinity placement policy to the application servers
- targets: [ VDU1, VDU2 ]
diff --git a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/tosca_placement_policy_default_affinity.yaml b/tacker/tests/unit/vnfm/infra_drivers/openstack/data/tosca_placement_policy_default_affinity.yaml
deleted file mode 100644
index 5ce100795..000000000
--- a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/tosca_placement_policy_default_affinity.yaml
+++ /dev/null
@@ -1,63 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-description: Demo example
-
-metadata:
- template_name: sample-tosca-vnfd
-
-topology_template:
- node_templates:
- VDU1:
- type: tosca.nodes.nfv.VDU.Tacker
- properties:
- flavor: m1.tiny
- image: cirros-0.5.2-x86_64-disk
- mgmt_driver: noop
- availability_zone: nova
-
- CP1:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL1
- - virtualBinding:
- node: VDU1
-
- VDU2:
- type: tosca.nodes.nfv.VDU.Tacker
- properties:
- flavor: m1.tiny
- image: cirros-0.5.2-x86_64-disk
- mgmt_driver: noop
- availability_zone: nova
-
- CP2:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL2
- - virtualBinding:
- node: VDU2
-
- VL1:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net_mgmt
- vendor: Tacker
-
- VL2:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net0
- vendor: Tacker
-
- policies:
- - my_compute_placement_policy:
- type: tosca.policies.tacker.Placement
- description: Apply default placement policy to the application servers
- targets: [ VDU1, VDU2 ]
diff --git a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/tosca_placement_policy_soft_anti_affinity.yaml b/tacker/tests/unit/vnfm/infra_drivers/openstack/data/tosca_placement_policy_soft_anti_affinity.yaml
deleted file mode 100644
index ac9ffbe64..000000000
--- a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/tosca_placement_policy_soft_anti_affinity.yaml
+++ /dev/null
@@ -1,66 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-description: Demo example
-
-metadata:
- template_name: sample-tosca-vnfd
-
-topology_template:
- node_templates:
- VDU1:
- type: tosca.nodes.nfv.VDU.Tacker
- properties:
- flavor: m1.tiny
- image: cirros-0.5.2-x86_64-disk
- mgmt_driver: noop
- availability_zone: nova
-
- CP1:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL1
- - virtualBinding:
- node: VDU1
-
- VDU2:
- type: tosca.nodes.nfv.VDU.Tacker
- properties:
- flavor: m1.tiny
- image: cirros-0.5.2-x86_64-disk
- mgmt_driver: noop
- availability_zone: nova
-
- CP2:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL2
- - virtualBinding:
- node: VDU2
-
- VL1:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net_mgmt
- vendor: Tacker
-
- VL2:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net0
- vendor: Tacker
-
- policies:
- - my_compute_placement_policy:
- type: tosca.policies.tacker.Placement
- properties:
- policy: anti-affinity
- strict: false
- description: Apply anti-affinity placement policy to the application servers
- targets: [ VDU1, VDU2 ]
diff --git a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/tosca_scale.yaml b/tacker/tests/unit/vnfm/infra_drivers/openstack/data/tosca_scale.yaml
deleted file mode 100644
index ba150ca25..000000000
--- a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/tosca_scale.yaml
+++ /dev/null
@@ -1,45 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-
-description: sample-tosca-vnfd-scaling
-
-metadata:
- template_name: sample-tosca-vnfd-scaling
-
-topology_template:
- node_templates:
- VDU1:
- type: tosca.nodes.nfv.VDU.Tacker
- properties:
- image: cirros-0.5.2-x86_64-disk
- mgmt_driver: noop
- availability_zone: nova
- flavor: m1.tiny
-
- CP1:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- anti_spoofing_protection: false
- requirements:
- - virtualLink:
- node: VL1
- - virtualBinding:
- node: VDU1
-
- VL1:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net_mgmt
- vendor: Tacker
-
- policies:
- - SP1:
- type: tosca.policies.tacker.Scaling
- properties:
- targets: [VDU1]
- increment: 1
- cooldown: 60
- min_instances: 1
- max_instances: 3
- default_instances: 2
-
diff --git a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/tosca_sriov.yaml b/tacker/tests/unit/vnfm/infra_drivers/openstack/data/tosca_sriov.yaml
deleted file mode 100644
index cb15b747a..000000000
--- a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/tosca_sriov.yaml
+++ /dev/null
@@ -1,59 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-
-description: SRIOV example
-
-metadata:
- template_name: sample-SRIOV-vnfd
-
-topology_template:
- node_templates:
- VDU1:
- type: tosca.nodes.nfv.VDU.Tacker
- properties:
- image: OpenWRT
- flavor: numa-sriov
- availability_zone: nova
- mgmt_driver: openwrt
- monitoring_policy:
- name: ping
- actions:
- failure: respawn
- parameters:
- count: 3
- interval: 10
- config: |
- param0: key1
- param1: key2
-
- CP1:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- name: sriov
- management: true
- requirements:
- - virtualLink:
- node: VL1
- - virtualBinding:
- node: VDU1
-
- CP2:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- type: sriov
- requirements:
- - virtualLink:
- node: VL2
- - virtualBinding:
- node: VDU1
-
- VL1:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net-mgmt
- vendor: Tacker
-
- VL2:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: sr3010
- vendor: Tacker
diff --git a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/tosca_vnffgd_dupl_criteria_template.yaml b/tacker/tests/unit/vnfm/infra_drivers/openstack/data/tosca_vnffgd_dupl_criteria_template.yaml
deleted file mode 100644
index 7226fc923..000000000
--- a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/tosca_vnffgd_dupl_criteria_template.yaml
+++ /dev/null
@@ -1,49 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-
-description: Example VNFFG template
-
-topology_template:
-
- node_templates:
-
- Forwarding_path1:
- type: tosca.nodes.nfv.FP.TackerV2
- description: creates path (CP11->CP12->CP32)
- properties:
- id: 51
- policy:
- type: ACL
- criteria:
- - name: classifier_one
- classifier:
- network_name: tenant1_net
- destination_port_range: 80-1024
- ip_proto: 6
- ip_dst_prefix: 192.168.1.2/24
- - name: classifier_two
- classifier:
- network_name: tenant1_net
- destination_port_range: 80-1024
- ip_proto: 6
- ip_dst_prefix: 192.168.1.2/24
-
- path:
- - forwarder: VNF1
- capability: CP11
- - forwarder: VNF1
- capability: CP12
- - forwarder: VNF3
- capability: CP32
-
- groups:
- VNFFG1:
- type: tosca.groups.nfv.VNFFG
- description: HTTP to Corporate Net
- properties:
- vendor: tacker
- version: 1.0
- number_of_endpoints: 3
- dependent_virtual_link: [VL1,VL2,VL3]
- connection_point: [CP11,CP12,CP32]
- constituent_vnfs: [VNF1,VNF3]
- members: [Forwarding_path1]
diff --git a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/tosca_vnffgd_legacy_template_for_update.yaml b/tacker/tests/unit/vnfm/infra_drivers/openstack/data/tosca_vnffgd_legacy_template_for_update.yaml
deleted file mode 100644
index e763213d4..000000000
--- a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/tosca_vnffgd_legacy_template_for_update.yaml
+++ /dev/null
@@ -1,36 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-
-description: Example VNFFG template for update
-
-topology_template:
-
- node_templates:
-
- Forwarding_path1:
- type: tosca.nodes.nfv.FP.Tacker
- description: creates path (CP11->CP12->CP32)
- properties:
- id: 51
- policy:
- type: ACL
- criteria:
- - network_name: tenant2_net
- source_port_range: 80-1024
- ip_proto: 17
- ip_dst_prefix: 192.168.1.3/24
- path:
- - forwarder: VNF1
- capability: CP11
-
- groups:
- VNFFG1:
- type: tosca.groups.nfv.VNFFG
- description: HTTP to Corporate Net
- properties:
- vendor: tacker
- version: 1.0
- number_of_endpoints: 1
- dependent_virtual_link: [VL1]
- connection_point: [CP11]
- constituent_vnfs: [VNF1]
- members: [Forwarding_path1]
diff --git a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/tosca_vnffgd_multi_param_template.yaml b/tacker/tests/unit/vnfm/infra_drivers/openstack/data/tosca_vnffgd_multi_param_template.yaml
deleted file mode 100644
index 1a54ae157..000000000
--- a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/tosca_vnffgd_multi_param_template.yaml
+++ /dev/null
@@ -1,53 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-
-description: Sample VNFFG parameterized template
-
-topology_template:
-
- inputs:
- net_src_port_id:
- type: string
- description: Port UUID of source VM.
-
- dst_port_range:
- type: string
- description: Destination port range
-
- ip_dst_pre:
- type: string
- description: Cidr format of destination ip.
-
- node_templates:
-
- Forwarding_path1:
- type: tosca.nodes.nfv.FP.TackerV2
- description: creates path (CP12->CP22)
- properties:
- id: 51
- policy:
- type: ACL
- criteria:
- - name: classifier_one
- classifier:
- network_src_port_id: { get_input: net_src_port_id }
- destination_port_range: { get_input: dst_port_range }
- ip_proto: 6
- ip_dst_prefix: { get_input: ip_dst_pre }
- path:
- - forwarder: VNFD1
- capability: CP12
- - forwarder: VNFD2
- capability: CP22
-
- groups:
- VNFFG1:
- type: tosca.groups.nfv.VNFFG
- description: HTTP to Corporate Net
- properties:
- vendor: tacker
- version: 1.0
- number_of_endpoints: 5
- dependent_virtual_link: [VL12,VL22]
- connection_point: [CP12,CP22]
- constituent_vnfs: [VNFD1,VNFD2]
- members: [Forwarding_path1]
diff --git a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/tosca_vnffgd_no_classifier_template.yaml b/tacker/tests/unit/vnfm/infra_drivers/openstack/data/tosca_vnffgd_no_classifier_template.yaml
deleted file mode 100644
index 9c56bd63d..000000000
--- a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/tosca_vnffgd_no_classifier_template.yaml
+++ /dev/null
@@ -1,29 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-description: Example VNFFG template without any flow classifiers
-
-topology_template:
- node_templates:
- Forwarding_path1:
- type: tosca.nodes.nfv.FP.TackerV2
- description: creates path (CP11->CP12->CP32)
- properties:
- id: 51
- path:
- - forwarder: VNF1
- capability: CP11
- - forwarder: VNF1
- capability: CP12
- - forwarder: VNF3
- capability: CP32
- groups:
- VNFFG1:
- type: tosca.groups.nfv.VNFFG
- description: HTTP to Corporate Net
- properties:
- vendor: tacker
- version: 1.0
- number_of_endpoints: 3
- dependent_virtual_link: [VL1,VL2,VL3]
- connection_point: [CP11,CP12,CP32]
- constituent_vnfs: [VNF1,VNF3]
- members: [Forwarding_path1]
diff --git a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/tosca_vnffgd_param_template.yaml b/tacker/tests/unit/vnfm/infra_drivers/openstack/data/tosca_vnffgd_param_template.yaml
deleted file mode 100644
index ad04ae87a..000000000
--- a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/tosca_vnffgd_param_template.yaml
+++ /dev/null
@@ -1,47 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-
-description: Example VNFFG template
-
-topology_template:
-
- inputs:
- net_src_port_id:
- type: string
- description: Port UUID of source VM.
-
- node_templates:
-
- Forwarding_path1:
- type: tosca.nodes.nfv.FP.TackerV2
- description: creates path (CP11->CP12->CP32)
- properties:
- id: 51
- policy:
- type: ACL
- criteria:
- - name: classifier_one
- classifier:
- network_src_port_id: { get_input: net_src_port_id }
- destination_port_range: 80-1024
- ip_proto: 6
- ip_dst_prefix: 192.168.1.2/24
- path:
- - forwarder: VNF1
- capability: CP11
- - forwarder: VNF1
- capability: CP12
- - forwarder: VNF3
- capability: CP32
-
- groups:
- VNFFG1:
- type: tosca.groups.nfv.VNFFG
- description: HTTP to Corporate Net
- properties:
- vendor: tacker
- version: 1.0
- number_of_endpoints: 3
- dependent_virtual_link: [VL1,VL2,VL3]
- connection_point: [CP11,CP12,CP32]
- constituent_vnfs: [VNF1,VNF3]
- members: [Forwarding_path1]
diff --git a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/tosca_vnffgd_str_param_template.yaml b/tacker/tests/unit/vnfm/infra_drivers/openstack/data/tosca_vnffgd_str_param_template.yaml
deleted file mode 100644
index 779af5dec..000000000
--- a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/tosca_vnffgd_str_param_template.yaml
+++ /dev/null
@@ -1,47 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-
-description: Example VNFFG template
-
-topology_template:
-
- inputs:
- net_name:
- type: string
- description: Name of the network
-
- node_templates:
-
- Forwarding_path1:
- type: tosca.nodes.nfv.FP.TackerV2
- description: creates path (CP11->CP12->CP32)
- properties:
- id: 51
- policy:
- type: ACL
- criteria:
- - name: classifier_one
- classifier:
- network_name: { get_input: net_name }
- destination_port_range: 80-1024
- ip_proto: 6
- ip_dst_prefix: 192.168.1.2/24
- path:
- - forwarder: VNF1
- capability: CP11
- - forwarder: VNF1
- capability: CP12
- - forwarder: VNF3
- capability: CP32
-
- groups:
- VNFFG1:
- type: tosca.groups.nfv.VNFFG
- description: HTTP to Corporate Net
- properties:
- vendor: tacker
- version: 1.0
- number_of_endpoints: 5
- dependent_virtual_link: [VL1,VL2,VL3]
- connection_point: [CP11,CP12,CP32]
- constituent_vnfs: [VNF1,VNF3]
- members: [Forwarding_path1]
diff --git a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/tosca_vnffgd_template.yaml b/tacker/tests/unit/vnfm/infra_drivers/openstack/data/tosca_vnffgd_template.yaml
deleted file mode 100644
index 5994fab2e..000000000
--- a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/tosca_vnffgd_template.yaml
+++ /dev/null
@@ -1,45 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-
-description: Example VNFFG template
-
-metadata:
- template_name: example_vnffgd
-
-topology_template:
-
- node_templates:
-
- Forwarding_path1:
- type: tosca.nodes.nfv.FP.TackerV2
- description: creates path (CP11->CP12->CP32)
- properties:
- id: 51
- policy:
- type: ACL
- criteria:
- - name: classifier_one
- classifier:
- network_name: tenant1_net
- destination_port_range: 80-1024
- ip_proto: 6
- ip_dst_prefix: 192.168.1.2/24
- path:
- - forwarder: VNF1
- capability: CP11
- - forwarder: VNF1
- capability: CP12
- - forwarder: VNF3
- capability: CP32
-
- groups:
- VNFFG1:
- type: tosca.groups.nfv.VNFFG
- description: HTTP to Corporate Net
- properties:
- vendor: tacker
- version: 1.0
- number_of_endpoints: 3
- dependent_virtual_link: [VL1,VL2,VL3]
- connection_point: [CP11,CP12,CP32]
- constituent_vnfs: [VNF1,VNF3]
- members: [Forwarding_path1]
diff --git a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/tosca_vnffgd_template_for_update.yaml b/tacker/tests/unit/vnfm/infra_drivers/openstack/data/tosca_vnffgd_template_for_update.yaml
deleted file mode 100644
index 293e52701..000000000
--- a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/tosca_vnffgd_template_for_update.yaml
+++ /dev/null
@@ -1,38 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-
-description: Example VNFFG template for update
-
-topology_template:
-
- node_templates:
-
- Forwarding_path1:
- type: tosca.nodes.nfv.FP.TackerV2
- description: creates path (CP11->CP12->CP32)
- properties:
- id: 51
- policy:
- type: ACL
- criteria:
- - name: classifier_two
- classifier:
- network_name: tenant2_net
- source_port_range: 80-1024
- ip_proto: 17
- ip_dst_prefix: 192.168.1.3/24
- path:
- - forwarder: VNF1
- capability: CP11
-
- groups:
- VNFFG1:
- type: tosca.groups.nfv.VNFFG
- description: HTTP to Corporate Net
- properties:
- vendor: tacker
- version: 1.0
- number_of_endpoints: 1
- dependent_virtual_link: [VL1]
- connection_point: [CP11]
- constituent_vnfs: [VNF1]
- members: [Forwarding_path1]
diff --git a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/tosca_vnffgd_wrong_cp_number_template.yaml b/tacker/tests/unit/vnfm/infra_drivers/openstack/data/tosca_vnffgd_wrong_cp_number_template.yaml
deleted file mode 100644
index 3fa058719..000000000
--- a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/tosca_vnffgd_wrong_cp_number_template.yaml
+++ /dev/null
@@ -1,42 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-
-description: Example VNFFG template
-
-topology_template:
-
- node_templates:
-
- Forwarding_path1:
- type: tosca.nodes.nfv.FP.TackerV2
- description: creates path (CP11->CP12->CP32)
- properties:
- id: 51
- policy:
- type: ACL
- criteria:
- - name: classifier_one
- classifier:
- blah: tenant1_net
- destination_port_range: 80-1024
- ip_proto: 6
- ip_dst_prefix: 192.168.1.2/24
- path:
- - forwarder: VNF1
- capability: CP11
- - forwarder: VNF1
- capability: CP12
- - forwarder: VNF3
- capability: CP32
-
- groups:
- VNFFG1:
- type: tosca.groups.nfv.VNFFG
- description: HTTP to Corporate Net
- properties:
- vendor: tacker
- version: 1.0
- number_of_endpoints: 4
- dependent_virtual_link: [VL1,VL2,VL3]
- connection_point: [CP11,CP12,CP32]
- constituent_vnfs: [VNF1,VNF3]
- members: [Forwarding_path1]
diff --git a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/tosca_vnic_port.yaml b/tacker/tests/unit/vnfm/infra_drivers/openstack/data/tosca_vnic_port.yaml
deleted file mode 100644
index 1b43cd6d0..000000000
--- a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/tosca_vnic_port.yaml
+++ /dev/null
@@ -1,58 +0,0 @@
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-
-description: VNIC Normal Port example
-
-metadata:
- template_name: sample-vnic-normal-vnfd
-
-topology_template:
- node_templates:
- VDU1:
- type: tosca.nodes.nfv.VDU.Tacker
- properties:
- image: OpenWRT
- flavor: m1.small
- availability_zone: nova
- mgmt_driver: openwrt
- monitoring_policy:
- name: ping
- actions:
- failure: respawn
- parameters:
- count: 3
- interval: 10
- config: |
- param0: key1
- param1: key2
-
- CP1:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- management: true
- requirements:
- - virtualLink:
- node: VL1
- - virtualBinding:
- node: VDU1
-
- CP2:
- type: tosca.nodes.nfv.CP.Tacker
- properties:
- type: vnic
- requirements:
- - virtualLink:
- node: VL2
- - virtualBinding:
- node: VDU1
-
- VL1:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net-mgmt
- vendor: Tacker
-
- VL2:
- type: tosca.nodes.nfv.VL
- properties:
- network_name: net0
- vendor: Tacker
diff --git a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/update_config_data.yaml b/tacker/tests/unit/vnfm/infra_drivers/openstack/data/update_config_data.yaml
deleted file mode 100644
index 5d95165c0..000000000
--- a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/update_config_data.yaml
+++ /dev/null
@@ -1,11 +0,0 @@
-vdus:
- vdu1:
- config:
- firewall: |
- package firewall
-
- config defaults
- option syn_flood '3'
- option input 'ACCEPT'
- option output 'ACCEPT'
- option forward 'ACCEPT'
diff --git a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/update_invalid_param_data.yaml b/tacker/tests/unit/vnfm/infra_drivers/openstack/data/update_invalid_param_data.yaml
deleted file mode 100644
index d6980cba9..000000000
--- a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/update_invalid_param_data.yaml
+++ /dev/null
@@ -1,2 +0,0 @@
-flavor: m1.tiny
-reservation_id: 891cd152-3925-4c9e-9074-239a902b68d7
diff --git a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/update_new_param_data.yaml b/tacker/tests/unit/vnfm/infra_drivers/openstack/data/update_new_param_data.yaml
deleted file mode 100644
index 5e1b392e4..000000000
--- a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/update_new_param_data.yaml
+++ /dev/null
@@ -1,3 +0,0 @@
-flavor: m1.tiny
-reservation_id: 99999999-3925-4c9e-9074-239a902b68d7
-new_param_key: new_param_value
diff --git a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/update_param_data.yaml b/tacker/tests/unit/vnfm/infra_drivers/openstack/data/update_param_data.yaml
deleted file mode 100644
index e40bbdc34..000000000
--- a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/update_param_data.yaml
+++ /dev/null
@@ -1,2 +0,0 @@
-flavor: m1.tiny
-reservation_id: 99999999-3925-4c9e-9074-239a902b68d7
diff --git a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/vnffg_multi_params.yaml b/tacker/tests/unit/vnfm/infra_drivers/openstack/data/vnffg_multi_params.yaml
deleted file mode 100644
index 0d2ffd6c1..000000000
--- a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/vnffg_multi_params.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-{
- 'net_src_port_id': '640dfd77-c92b-45a3-b8fc-22712de480e1',
- dst_port_range: 80-1024,
- ip_dst_pre: 192.168.1.2/24
-}
\ No newline at end of file
diff --git a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/vnffg_params.yaml b/tacker/tests/unit/vnfm/infra_drivers/openstack/data/vnffg_params.yaml
deleted file mode 100644
index dcc1c2a0e..000000000
--- a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/vnffg_params.yaml
+++ /dev/null
@@ -1,3 +0,0 @@
-{
- 'net_src_port_id': '640dfd77-c92b-45a3-b8fc-22712de480e1'
-}
diff --git a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/vnffgd_template.yaml b/tacker/tests/unit/vnfm/infra_drivers/openstack/data/vnffgd_template.yaml
deleted file mode 100644
index c049ff0fc..000000000
--- a/tacker/tests/unit/vnfm/infra_drivers/openstack/data/vnffgd_template.yaml
+++ /dev/null
@@ -1,32 +0,0 @@
- Forwarding_path1:
- type: tosca.nodes.nfv.FP
- id: 51
- description: creates path (CP11->CP12->CP32)
- properties:
- policy:
- type: ACL
- criteria:
- - network_name: tenant1_net
- - destination_port_range: 80-1024
- - ip_proto: 6
- - ip_dst_prefix: 192.168.1.2/24
- requirements:
- - forwarder: VNF1
- capability: CP11
- - forwarder: VNF1
- capability: CP12
- - forwarder: VNF3
- capability: CP32
-
- groups:
- VNFFG1:
- type: tosca.groups.nfv.VNFFG
- description: HTTP to Corporate Net
- properties:
- vendor: tacker
- version: 1.0
- number_of_endpoints: 3
- dependent_virtual_link: [VL1,VL2,VL3]
- connection_point: [CP11,CP12,CP32]
- constituent_vnfs: [VNF1,VNF3]
- members: [Forwarding_path1]
diff --git a/tacker/tests/unit/vnfm/infra_drivers/openstack/fixture_data/fixture_data_utils.py b/tacker/tests/unit/vnfm/infra_drivers/openstack/fixture_data/fixture_data_utils.py
index d436b0a72..9dbcc482b 100644
--- a/tacker/tests/unit/vnfm/infra_drivers/openstack/fixture_data/fixture_data_utils.py
+++ b/tacker/tests/unit/vnfm/infra_drivers/openstack/fixture_data/fixture_data_utils.py
@@ -375,11 +375,15 @@ def get_vim_connection_info_object():
def get_vnfd_dict():
- filename = os.path.join(
- os.path.dirname(os.path.abspath(__file__)), "../data/",
- 'test_tosca_image.yaml')
+ def_dir = os.path.join(
+ os.path.dirname(os.path.abspath(__file__)), "../data/etsi_nfv")
+ filename = os.path.join(def_dir, 'tosca_vnfd.yaml')
with open(filename) as f:
- vnfd_dict = {'vnfd': {'attributes': {'vnfd': str(yaml.safe_load(f))}}}
+ vnfd = yaml.safe_load(f)
+ vnfd['imports'] = [
+ f'{def_dir}/etsi_nfv_sol001_common_types.yaml',
+ f'{def_dir}/etsi_nfv_sol001_vnfd_types.yaml']
+ vnfd_dict = {'vnfd': {'attributes': {'vnfd': str(vnfd)}}}
vnfd_dict.update({'id': '7ed39362-c551-4ce7-9ad2-17a98a6cee3d',
'name': None, 'attributes': {'param_values': "",
'stack_name': 'vnflcm_7ed39362-c551-4ce7-9ad2-17a98a6cee3d'},
diff --git a/tacker/tests/unit/vnfm/infra_drivers/openstack/test_openstack.py b/tacker/tests/unit/vnfm/infra_drivers/openstack/test_openstack.py
index d6f0450c1..f7477ad49 100644
--- a/tacker/tests/unit/vnfm/infra_drivers/openstack/test_openstack.py
+++ b/tacker/tests/unit/vnfm/infra_drivers/openstack/test_openstack.py
@@ -18,67 +18,12 @@ import os
from unittest import mock
import yaml
-from oslo_serialization import jsonutils
-
from tacker import context
-from tacker.db.common_services import common_services_db_plugin
-from tacker.extensions import vnfm
-from tacker.objects import fields
from tacker.tests.unit import base
from tacker.tests.unit.db import utils
from tacker.vnfm.infra_drivers.openstack import openstack
-vnf_dict = {
- 'attributes': {
- 'heat_template': {
- 'outputs': {
- 'mgmt_ip-VDU1': {
- 'value': {
- 'get_attr': [
- 'CP1', 'fixed_ips', 0, 'ip_address']
- }
- }
- },
- 'description': 'Demo example\n',
- 'parameters': {},
- 'resources': {
- 'VDU1': {
- 'type': 'OS::Nova::Server',
- 'properties': {
- 'user_data_format': 'SOFTWARE_CONFIG',
- 'availability_zone': 'nova',
- 'image': 'cirros-0.5.2-x86_64-disk',
- 'config_drive': False,
- 'flavor': {'get_resource': 'VDU1_flavor'},
- 'networks': [{'port': {'get_resource': 'CP1'}}]
- }
- },
- 'CP1': {
- 'type': 'OS::Neutron::Port',
- 'properties': {
- 'port_security_enabled': False,
- 'network': 'net_mgmt'
- }
- },
- 'VDU1_flavor': {
- 'type': 'OS::Nova::Flavor',
- 'properties': {'vcpus': 1, 'disk': 1, 'ram': 512}
- }
- }
- }
- },
- 'status': 'ACTIVE',
- 'vnfd_id': '576acf48-b9df-491d-a57c-342de660ec78',
- 'tenant_id': '13d2ca8de70d48b2a2e0dbac2c327c0b',
- 'vim_id': '3f41faa7-5630-47d2-9d4a-1216953c8887',
- 'instance_id': 'd1121d3c-368b-4ac2-b39d-835aa3e4ccd8',
- 'placement_attr': {'vim_name': 'openstack-vim'},
- 'id': 'a27fc58e-66ae-4031-bba4-efede318c60b',
- 'name': 'vnf_create_1'
-}
-
-
class FakeHeatClient(mock.Mock):
class Stack(mock.Mock):
@@ -101,23 +46,12 @@ def _get_template(name):
class TestOpenStack(base.TestCase):
- hot_template = _get_template('hot_openwrt.yaml')
- hot_param_template = _get_template('hot_openwrt_params.yaml')
- hot_ipparam_template = _get_template('hot_openwrt_ipparams.yaml')
- tosca_vnfd_openwrt = _get_template('test_tosca_openwrt.yaml')
- tosca_vnfd_openwrt_param = _get_template('test_tosca_openwrt_param.yaml')
- config_data = _get_template('config_data.yaml')
def setUp(self):
super(TestOpenStack, self).setUp()
self.context = context.get_admin_context()
self.infra_driver = openstack.OpenStack()
self._mock_heat_client()
- mock.patch('tacker.db.common_services.common_services_db_plugin.'
- 'CommonServicesPluginDb.create_event'
- ).start()
- self._cos_db_plugin = \
- common_services_db_plugin.CommonServicesPluginDb()
self.addCleanup(mock.patch.stopall)
yaml.SafeLoader.add_constructor(
yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,
@@ -135,466 +69,9 @@ class TestOpenStack(base.TestCase):
patcher = mock.patch(target, new)
return patcher.start()
- def _get_vnfd(self, template):
- return {'vnfd': {'attributes': {'vnfd': template}}}
-
- def _get_expected_vnfd(self, template):
- return {'attributes': {'vnfd': template},
- 'description': 'OpenWRT with services',
- 'mgmt_driver': 'openwrt', 'name': 'OpenWRT',
- 'service_types': [{'service_type': 'vnfd',
- 'id': '4a4c2d44-8a52-4895-9a75-9d1c76c3e738'}],
- 'tenant_id': 'ad7ebc56538745a08ef7c5e97f8bd437',
- 'id': 'fb048660-dc1b-4f0f-bd89-b023666650ec'}
-
- def _get_expected_vnf_update_obj(self):
- return {'status': 'PENDING_CREATE', 'instance_id': None, 'name':
- 'test_openwrt', 'tenant_id':
- 'ad7ebc56538745a08ef7c5e97f8bd437', 'vnfd_id':
- 'eb094833-995e-49f0-a047-dfb56aaf7c4e', 'vnfd': {
- 'service_types': [{'service_type': 'vnfd', 'id':
- '4a4c2d44-8a52-4895-9a75-9d1c76c3e738'}], 'description':
- 'OpenWRT with services', 'tenant_id':
- 'ad7ebc56538745a08ef7c5e97f8bd437', 'mgmt_driver': 'openwrt',
- 'attributes': {'vnfd': self.tosca_vnfd_openwrt},
- 'id': 'fb048660-dc1b-4f0f-bd89-b023666650ec', 'name':
- 'openwrt_services'}, 'mgmt_ip_address': None,
- 'service_context': [],
- 'attributes': {'config': utils.update_config_data},
- 'id': 'eb84260e-5ff7-4332-b032-50a14d6c1123', 'description':
- 'OpenWRT with services'}
-
- def _get_expected_vnf_update_new_param_obj(self):
- return {'status': 'PENDING_CREATE', 'instance_id': None, 'name':
- 'test_openwrt', 'tenant_id':
- 'ad7ebc56538745a08ef7c5e97f8bd437', 'vnfd_id':
- 'eb094833-995e-49f0-a047-dfb56aaf7c4e', 'vnfd': {
- 'service_types': [{'service_type': 'vnfd', 'id':
- '4a4c2d44-8a52-4895-9a75-9d1c76c3e738'}], 'description':
- 'OpenWRT with services', 'tenant_id':
- 'ad7ebc56538745a08ef7c5e97f8bd437', 'mgmt_driver': 'openwrt',
- 'attributes': {'vnfd': self.tosca_vnfd_openwrt_param},
- 'id': 'fb048660-dc1b-4f0f-bd89-b023666650ec', 'name':
- 'openwrt_services'}, 'mgmt_url': None, 'service_context': [],
- 'attributes': {'heat_template': utils.hot_data,
- 'param_values': utils.update_new_param_data},
- 'id': 'eb84260e-5ff7-4332-b032-50a14d6c1123', 'description':
- 'OpenWRT with services'}
-
- def _get_expected_active_vnf(self):
- return {'status': 'ACTIVE',
- 'instance_id': None,
- 'name': 'test_openwrt',
- 'tenant_id': 'ad7ebc56538745a08ef7c5e97f8bd437',
- 'vnfd_id': 'eb094833-995e-49f0-a047-dfb56aaf7c4e',
- 'vnfd': {
- 'service_types': [{
- 'service_type': 'vnfd',
- 'id': '4a4c2d44-8a52-4895-9a75-9d1c76c3e738'}],
- 'description': 'OpenWRT with services',
- 'tenant_id': 'ad7ebc56538745a08ef7c5e97f8bd437',
- 'mgmt_driver': 'openwrt',
- 'infra_driver': 'heat',
- 'attributes': {'vnfd': self.tosca_vnfd_openwrt},
- 'id': 'fb048660-dc1b-4f0f-bd89-b023666650ec',
- 'name': 'openwrt_services'},
- 'mgmt_ip_address': '{"vdu1": "192.168.120.31"}',
- 'service_context': [],
- 'id': 'eb84260e-5ff7-4332-b032-50a14d6c1123',
- 'description': 'OpenWRT with services'}
-
def test_delete(self):
vnf_id = '4a4c2d44-8a52-4895-9a75-9d1c76c3e738'
self.infra_driver.delete(plugin=None, context=self.context,
vnf_id=vnf_id,
auth_attr=utils.get_vim_auth_obj())
self.heat_client.delete.assert_called_once_with(vnf_id)
-
- def test_update(self):
- vnf_obj = utils.get_dummy_vnf_config_attr()
- vnf_config_obj = utils.get_dummy_vnf_update_config()
- expected_vnf_update = self._get_expected_vnf_update_obj()
- vnf_id = '4a4c2d44-8a52-4895-9a75-9d1c76c3e738'
- self.infra_driver.update(plugin=None, context=self.context,
- vnf_id=vnf_id, vnf_dict=vnf_obj,
- vnf=vnf_config_obj,
- auth_attr=utils.get_vim_auth_obj())
- expected_vnf_update['attributes']['config'] = yaml.safe_load(
- expected_vnf_update['attributes']['config'])
- vnf_obj['attributes']['config'] = yaml.safe_load(vnf_obj['attributes'][
- 'config'])
- self.assertEqual(expected_vnf_update, vnf_obj)
-
- @mock.patch(
- 'tacker.vnfm.infra_drivers.openstack.vdu.Vdu')
- def test_heal_vdu(self, mock_vdu):
- self.infra_driver.heal_vdu(None, self.context, vnf_dict,
- mock.ANY)
- mock_vdu.assert_called_once_with(self.context, vnf_dict,
- mock.ANY)
-
- @mock.patch(
- 'tacker.vnfm.infra_drivers.openstack.vdu.Vdu')
- @mock.patch('tacker.vnfm.infra_drivers.openstack.openstack.LOG')
- def test_heal_vdu_failed(self, mock_log, mock_vdu):
- mock_vdu.side_effect = Exception
- self.assertRaises(vnfm.VNFHealFailed, self.infra_driver.heal_vdu,
- None, self.context, vnf_dict,
- mock.ANY)
- mock_log.error.assert_called_with(
- "VNF '%s' failed to heal", vnf_dict['id'])
-
- def test_update_new_param(self):
- vnf_obj = utils.get_dummy_vnf_param_attr()
- vnf_param_obj = utils.get_dummy_vnf_update_new_param()
- expected_vnf_update = self._get_expected_vnf_update_new_param_obj()
- vnf_id = '4a4c2d44-8a52-4895-9a75-9d1c76c3e738'
- self.infra_driver.update(plugin=None, context=self.context,
- vnf_id=vnf_id, vnf_dict=vnf_obj,
- vnf=vnf_param_obj,
- auth_attr=utils.get_vim_auth_obj())
- expected_vnf_update['attributes']['param_values'] = yaml.safe_load(
- expected_vnf_update['attributes']['param_values'])
- vnf_obj['attributes']['param_values'] = yaml.safe_load(
- vnf_obj['attributes']['param_values'])
- self.assertEqual(expected_vnf_update, vnf_obj)
-
- @mock.patch('tacker.vnfm.infra_drivers.openstack.openstack.LOG')
- def test_update_invalid_param(self, mock_log):
- vnf_obj = utils.get_dummy_vnf_param_attr()
- vnf_param_obj = utils.get_dummy_vnf_update_invalid_param()
- vnf_id = '4a4c2d44-8a52-4895-9a75-9d1c76c3e738'
- self.assertRaises(vnfm.VNFUpdateInvalidInput,
- self.infra_driver.update,
- plugin=None, context=self.context,
- vnf_id=vnf_id, vnf_dict=vnf_obj,
- vnf=vnf_param_obj,
- auth_attr=utils.get_vim_auth_obj())
- log_msg = "at vnf_id {} because all parameters "\
- "match the existing one.".format(vnf_id)
- mock_log.error.assert_called_with(log_msg)
-
- @mock.patch('tacker.vnfm.infra_drivers.openstack.openstack.LOG')
- def test_update_empty_param(self, mock_log):
- vnf_obj = utils.get_dummy_vnf_param_attr()
- vnf_param_obj = utils.get_dummy_vnf_update_empty_param()
- vnf_id = '4a4c2d44-8a52-4895-9a75-9d1c76c3e738'
- self.assertRaises(vnfm.VNFUpdateInvalidInput,
- self.infra_driver.update,
- plugin=None, context=self.context,
- vnf_id=vnf_id, vnf_dict=vnf_obj,
- vnf=vnf_param_obj,
- auth_attr=utils.get_vim_auth_obj())
- log_msg = "at vnf_id {} because the target "\
- "yaml is empty.".format(vnf_id)
- mock_log.error.assert_called_with(log_msg)
-
- def _get_expected_fields_tosca(self, template):
- return {'stack_name':
- 'test_openwrt_eb84260e-5ff7-4332-b032-50a14d6c1123',
- 'template': _get_template(template),
- 'timeout_mins': 10}
-
- def _get_expected_tosca_vnf(self,
- tosca_tpl_name,
- hot_tpl_name,
- param_values='',
- is_monitor=True,
- multi_vdus=False):
- tosca_tpl = _get_template(tosca_tpl_name)
- exp_tmpl = self._get_expected_vnfd(tosca_tpl)
- tosca_hw_dict = yaml.safe_load(_get_template(hot_tpl_name))
- dvc = {
- 'vnfd': exp_tmpl,
- 'description': 'OpenWRT with services',
- 'attributes': {
- 'heat_template': tosca_hw_dict,
- 'param_values': param_values
- },
- 'id': 'eb84260e-5ff7-4332-b032-50a14d6c1123',
- 'instance_id': None,
- 'mgmt_ip_address': None,
- 'name': 'test_openwrt',
- 'service_context': [],
- 'status': 'PENDING_CREATE',
- 'vnfd_id': 'eb094833-995e-49f0-a047-dfb56aaf7c4e',
- 'tenant_id': 'ad7ebc56538745a08ef7c5e97f8bd437'
- }
- # Add monitoring attributes for those yaml, which are having it
- if is_monitor:
- if multi_vdus:
- dvc['attributes'].update(
- {'monitoring_policy': bytes(
- '{"vdus": {"VDU1": {"ping": '
- '{"name": "ping", "actions": '
- '{"failure": "respawn"}, '
- '"parameters": {"count": 3, '
- '"interval": 10}, '
- '"monitoring_params": '
- '{"count": 3, "interval": 10}}}, '
- '"VDU2": {"ping": {"name": "ping", '
- '"actions": {"failure": "respawn"}, '
- '"parameters": {"count": 3, '
- '"interval": 10}, '
- '"monitoring_params": {"count": 3, '
- '"interval": 10}}}}}', 'utf-8')})
- else:
- dvc['attributes'].update(
- {'monitoring_policy': bytes(
- '{"vdus": {"VDU1": {"ping": '
- '{"name": "ping", "actions": '
- '{"failure": "respawn"}, '
- '"parameters": {"count": 3, '
- '"interval": 10}, '
- '"monitoring_params": '
- '{"count": 3, '
- '"interval": 10}}}}}', 'utf-8')})
-
- return dvc
-
- def _get_dummy_tosca_vnf(self, template, input_params=''):
-
- tosca_template = _get_template(template)
- vnf = utils.get_dummy_vnf()
- dtemplate = self._get_expected_vnfd(tosca_template)
-
- vnf['vnfd'] = dtemplate
- vnf['attributes'] = {}
- vnf['attributes']['param_values'] = input_params
- return vnf
-
- def _test_assert_equal_for_tosca_templates(self,
- tosca_tpl_name,
- hot_tpl_name,
- input_params='',
- files=None,
- is_monitor=True,
- multi_vdus=False):
- vnf = self._get_dummy_tosca_vnf(tosca_tpl_name, input_params)
- expected_result = '4a4c2d44-8a52-4895-9a75-9d1c76c3e738'
- expected_fields = self._get_expected_fields_tosca(hot_tpl_name)
- expected_vnf = self._get_expected_tosca_vnf(tosca_tpl_name,
- hot_tpl_name,
- input_params,
- is_monitor,
- multi_vdus)
- vnf['before_error_point'] = fields.ErrorPoint.PRE_VIM_CONTROL
- result = self.infra_driver.create(plugin=None, context=self.context,
- vnf=vnf,
- auth_attr=utils.get_vim_auth_obj())
- del vnf['before_error_point']
- actual_fields = self.heat_client.create.call_args[0][0]
- actual_fields["template"] = yaml.safe_load(actual_fields["template"])
- expected_fields["template"] = \
- yaml.safe_load(expected_fields["template"])
-
- if files:
- for k, v in actual_fields["files"].items():
- actual_fields["files"][k] = yaml.safe_load(v)
-
- expected_fields["files"] = {}
- for k, v in files.items():
- expected_fields["files"][k] = yaml.safe_load(_get_template(v))
-
- self.assertEqual(expected_fields, actual_fields)
- vnf["attributes"]["heat_template"] = yaml.safe_load(
- vnf["attributes"]["heat_template"])
- self.heat_client.create.assert_called_once_with(expected_fields)
- self.assertEqual(expected_result, result)
-
- if files:
- expected_fields["files"] = {}
- for k, v in files.items():
- expected_vnf["attributes"][k] = yaml.safe_load(
- _get_template(v))
- vnf["attributes"][k] = yaml.safe_load(
- vnf["attributes"][k])
- expected_vnf["attributes"]['scaling_group_names'] = {
- 'SP1': 'SP1_group'}
- vnf["attributes"]['scaling_group_names'] = jsonutils.loads(
- vnf["attributes"]['scaling_group_names']
- )
- self.assertEqual(expected_vnf, vnf)
-
- def test_create_tosca(self):
- # self.skipTest("Not ready yet")
- self._test_assert_equal_for_tosca_templates('test_tosca_openwrt.yaml',
- 'hot_tosca_openwrt.yaml')
-
- def test_create_tosca_with_userdata(self):
- self._test_assert_equal_for_tosca_templates(
- 'test_tosca_openwrt_userdata.yaml',
- 'hot_tosca_openwrt_userdata.yaml')
-
- def test_create_tosca_with_new_flavor(self):
- self._test_assert_equal_for_tosca_templates('test_tosca_flavor.yaml',
- 'hot_flavor.yaml')
-
- def test_create_tosca_with_new_flavor_with_defaults(self):
- self._test_assert_equal_for_tosca_templates(
- 'test_tosca_flavor_defaults.yaml',
- 'hot_flavor_defaults.yaml')
-
- def test_create_tosca_with_placement_policy_anti_affinity(self):
- self._test_assert_equal_for_tosca_templates(
- 'tosca_placement_policy_anti_affinity.yaml',
- 'hot_placement_policy_anti_affinity.yaml', is_monitor=False)
-
- def test_create_tosca_with_placement_policy_affinity(self):
- self._test_assert_equal_for_tosca_templates(
- 'tosca_placement_policy_affinity.yaml',
- 'hot_placement_policy_affinity.yaml', is_monitor=False)
-
- def test_create_tosca_with_placement_policy_soft_anti_affinity(self):
- self._test_assert_equal_for_tosca_templates(
- 'tosca_placement_policy_soft_anti_affinity.yaml',
- 'hot_placement_policy_soft_anti_affinity.yaml', is_monitor=False)
-
- def test_create_tosca_with_placement_policy_default_affinity(self):
- self._test_assert_equal_for_tosca_templates(
- 'tosca_placement_policy_default_affinity.yaml',
- 'hot_placement_policy_default_affinity.yaml', is_monitor=False)
-
- def test_create_tosca_with_flavor_and_capabilities(self):
- self._test_assert_equal_for_tosca_templates(
- 'test_tosca_flavor_and_capabilities.yaml',
- 'hot_flavor_and_capabilities.yaml')
-
- def test_create_tosca_with_flavor_no_units(self):
- self._test_assert_equal_for_tosca_templates(
- 'test_tosca_flavor_no_units.yaml',
- 'hot_flavor_no_units.yaml')
-
- def test_create_tosca_with_flavor_extra_specs_all_numa_count(self):
- self._test_assert_equal_for_tosca_templates(
- 'tosca_flavor_all_numa_count.yaml',
- 'hot_tosca_flavor_all_numa_count.yaml')
-
- def test_create_tosca_with_flavor_extra_specs_all_numa_nodes(self):
- self._test_assert_equal_for_tosca_templates(
- 'tosca_flavor_all_numa_nodes.yaml',
- 'hot_tosca_flavor_all_numa_nodes.yaml')
-
- def test_create_tosca_with_flavor_extra_specs_numa_node_count_trumps(self):
- self._test_assert_equal_for_tosca_templates(
- 'tosca_flavor_numa_nodes_count.yaml',
- 'hot_tosca_flavor_numa_nodes_count.yaml')
-
- def test_create_tosca_with_flavor_extra_specs_huge_pages(self):
- self._test_assert_equal_for_tosca_templates(
- 'tosca_flavor_huge_pages.yaml',
- 'hot_tosca_flavor_huge_pages.yaml')
-
- def test_create_tosca_with_flavor_extra_specs_cpu_allocations(self):
- self._test_assert_equal_for_tosca_templates(
- 'tosca_flavor_cpu_allocations.yaml',
- 'hot_tosca_flavor_cpu_allocations.yaml')
-
- def test_create_tosca_with_flavor_extra_specs_numa_nodes(self):
- self._test_assert_equal_for_tosca_templates(
- 'tosca_flavor_numa_nodes.yaml',
- 'hot_tosca_flavor_numa_nodes.yaml')
-
- def test_create_tosca_with_new_image(self):
- self._test_assert_equal_for_tosca_templates('test_tosca_image.yaml',
- 'hot_tosca_image.yaml')
-
- def test_create_tosca_sriov(self):
- self._test_assert_equal_for_tosca_templates(
- 'tosca_sriov.yaml',
- 'hot_tosca_sriov.yaml'
- )
-
- def test_create_tosca_vnic_normal(self):
- self._test_assert_equal_for_tosca_templates(
- 'tosca_vnic_port.yaml',
- 'hot_tosca_vnic_normal.yaml'
- )
-
- def test_create_tosca_mgmt_sriov_port(self):
- self._test_assert_equal_for_tosca_templates(
- 'tosca_mgmt_sriov.yaml',
- 'hot_tosca_mgmt_sriov.yaml'
- )
-
- def test_tosca_params(self):
- input_params = 'image: cirros\nflavor: m1.large'
- self._test_assert_equal_for_tosca_templates(
- 'tosca_generic_vnfd_params.yaml',
- 'hot_tosca_generic_vnfd_params.yaml',
- input_params
- )
-
- def test_tosca_artifacts_image_params(self):
- input_params = 'image_source: ' \
- 'http://download.cirros-cloud.net/' \
- '0.5.2/cirros-0.5.2-x86_64-disk.img'
- self._test_assert_equal_for_tosca_templates(
- 'tosca_artifacts_image_vnfd_params.yaml',
- 'hot_tosca_artifacts_image_vnfd_params.yaml',
- input_params
- )
-
- def test_create_tosca_scale(self):
- self._test_assert_equal_for_tosca_templates(
- 'tosca_scale.yaml',
- 'hot_scale_main.yaml',
- files={'SP1_res.yaml': 'hot_scale_custom.yaml'},
- is_monitor=False
- )
-
- def test_get_resource_info(self):
- vnf_obj = self._get_expected_active_vnf()
- self.assertRaises(vnfm.InfraDriverUnreachable,
- self.infra_driver.get_resource_info,
- plugin=None, context=self.context, vnf_info=vnf_obj,
- auth_attr=utils.get_vim_auth_obj(),
- region_name=None)
-
- def test_create_port_with_security_groups(self):
- self._test_assert_equal_for_tosca_templates(
- 'test_tosca_security_groups.yaml',
- 'hot_tosca_security_groups.yaml'
- )
-
- def test_create_port_with_allowed_address_pairs(self):
- self._test_assert_equal_for_tosca_templates(
- 'test_tosca_allowed_address_pairs.yaml',
- 'hot_tosca_allowed_address_pairs.yaml'
- )
-
- def test_create_port_with_mac_and_ip(self):
- self._test_assert_equal_for_tosca_templates(
- 'test_tosca_mac_ip.yaml',
- 'hot_tosca_mac_ip.yaml'
- )
-
- def test_create_tosca_alarm_respawn(self):
- self._test_assert_equal_for_tosca_templates(
- 'tosca_alarm_respawn.yaml',
- 'hot_tosca_alarm_respawn.yaml',
- is_monitor=False
- )
-
- def test_create_tosca_alarm_scale(self):
- self._test_assert_equal_for_tosca_templates(
- 'tosca_alarm_scale.yaml',
- 'hot_tosca_alarm_scale.yaml',
- files={'SP1_res.yaml': 'hot_alarm_scale_custom.yaml'},
- is_monitor=False
- )
-
- def test_create_tosca_with_alarm_monitoring_not_matched(self):
- self.assertRaises(vnfm.MetadataNotMatched,
- self._test_assert_equal_for_tosca_templates,
- 'tosca_alarm_metadata.yaml',
- 'hot_tosca_alarm_metadata.yaml',
- is_monitor=False
- )
-
- def test_create_tosca_monitoring_multi_vdus(self):
- self._test_assert_equal_for_tosca_templates(
- 'tosca_monitoring_multi_vdu.yaml',
- 'hot_tosca_monitoring_multi_vdu.yaml',
- multi_vdus=True
- )
diff --git a/tacker/tests/unit/vnfm/infra_drivers/openstack/test_openstack_driver.py b/tacker/tests/unit/vnfm/infra_drivers/openstack/test_openstack_driver.py
index 1f4b468f0..c751b33da 100644
--- a/tacker/tests/unit/vnfm/infra_drivers/openstack/test_openstack_driver.py
+++ b/tacker/tests/unit/vnfm/infra_drivers/openstack/test_openstack_driver.py
@@ -56,7 +56,7 @@ class FakeVNFMPlugin(mock.Mock):
def get_vnf(self, context, vnf_id):
filename = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "./data/",
- 'test_tosca_image.yaml')
+ 'etsi_nfv/tosca_vnfd.yaml')
with open(filename) as f:
vnfd_dict = {'vnfd_simple': str(yaml.safe_load(f))}
return {
diff --git a/tacker/tests/unit/vnfm/infra_drivers/openstack/test_vdu.py b/tacker/tests/unit/vnfm/infra_drivers/openstack/test_vdu.py
deleted file mode 100644
index 710ed7f05..000000000
--- a/tacker/tests/unit/vnfm/infra_drivers/openstack/test_vdu.py
+++ /dev/null
@@ -1,163 +0,0 @@
-# Copyright 2018 NTT DATA
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from unittest import mock
-
-from tacker import context
-from tacker.db.common_services import common_services_db_plugin
-from tacker.objects import heal_vnf_request
-from tacker.plugins.common import constants
-from tacker.tests.unit import base
-from tacker.vnfm.infra_drivers.openstack import vdu
-
-
-vnf_dict = {
- 'attributes': {
- 'heat_template': {
- 'outputs': {
- 'mgmt_ip-VDU1': {
- 'value': {
- 'get_attr': [
- 'CP1', 'fixed_ips', 0, 'ip_address']
- }
- }
- },
- 'description': 'Demo example\n',
- 'parameters': {},
- 'resources': {
- 'VDU1': {
- 'type': 'OS::Nova::Server',
- 'properties': {
- 'user_data_format': 'SOFTWARE_CONFIG',
- 'availability_zone': 'nova',
- 'image': 'cirros-0.5.2-x86_64-disk',
- 'config_drive': False,
- 'flavor': {'get_resource': 'VDU1_flavor'},
- 'networks': [{'port': {'get_resource': 'CP1'}}]
- }
- },
- 'CP1': {
- 'type': 'OS::Neutron::Port',
- 'properties': {
- 'port_security_enabled': False,
- 'network': 'net_mgmt'
- }
- },
- 'VDU1_flavor': {
- 'type': 'OS::Nova::Flavor',
- 'properties': {'vcpus': 1, 'disk': 1, 'ram': 512}
- }
- }
- }
- },
- 'status': 'ACTIVE',
- 'vnfd_id': '576acf48-b9df-491d-a57c-342de660ec78',
- 'tenant_id': '13d2ca8de70d48b2a2e0dbac2c327c0b',
- 'vim_id': '3f41faa7-5630-47d2-9d4a-1216953c8887',
- 'instance_id': 'd1121d3c-368b-4ac2-b39d-835aa3e4ccd8',
- 'placement_attr': {'vim_name': 'openstack-vim'},
- 'id': 'a27fc58e-66ae-4031-bba4-efede318c60b',
- 'name': 'vnf_create_1'
-}
-
-
-class FakeHeatClient(mock.Mock):
-
- class Stack(mock.Mock):
- stack_status = 'CREATE_COMPLETE'
- outputs = [{'output_value': '192.168.120.31', 'description':
- 'management ip address', 'output_key': 'mgmt_ip-vdu1'}]
-
- def create(self, *args, **kwargs):
- return {'stack': {'id': '4a4c2d44-8a52-4895-9a75-9d1c76c3e738'}}
-
- def get(self, id):
- return self.Stack()
-
- def update(self, stack_id, **kwargs):
- return self.Stack()
-
- def resource_mark_unhealthy(self, stack_id, resource_name,
- mark_unhealthy, resource_status_reason):
- return self.Stack()
-
-
-class TestVDU(base.TestCase):
-
- def setUp(self):
- super(TestVDU, self).setUp()
- self.context = context.get_admin_context()
- self._mock_heat_client()
-
- mock.patch('tacker.vnfm.vim_client.VimClient.get_vim').start()
- self.additional_paramas_obj = heal_vnf_request.HealVnfAdditionalParams(
- parameter='VDU1',
- cause=["Unable to reach while monitoring resource: 'VDU1'"])
- self.heal_request_data_obj = heal_vnf_request.HealVnfRequest(
- cause='VNF monitoring fails.',
- stack_id=vnf_dict['instance_id'],
- legacy_additional_params=[self.additional_paramas_obj])
- self.heal_vdu = vdu.Vdu(self.context, vnf_dict,
- self.heal_request_data_obj)
-
- mock.patch('tacker.db.common_services.common_services_db_plugin.'
- 'CommonServicesPluginDb.create_event'
- ).start()
- self._cos_db_plugin = \
- common_services_db_plugin.CommonServicesPluginDb()
- self.addCleanup(mock.patch.stopall)
-
- def _mock_heat_client(self):
- self.heat_client = mock.Mock(wraps=FakeHeatClient())
- fake_heat_client = mock.Mock()
- fake_heat_client.return_value = self.heat_client
- self._mock(
- 'tacker.vnfm.infra_drivers.openstack.heat_client.HeatClient',
- fake_heat_client)
-
- @mock.patch('tacker.vnfm.vim_client.VimClient.get_vim')
- def test_heal_vdu(self, mock_get_vim):
- mock_get_vim.return_value = mock.MagicMock()
-
- self.heal_vdu.heal_vdu()
-
- self.heat_client.update.assert_called_once_with(
- stack_id=vnf_dict['instance_id'], existing=True)
-
- self._cos_db_plugin.create_event.assert_called_with(
- self.context, res_id=vnf_dict['id'],
- res_type=constants.RES_TYPE_VNF, res_state=vnf_dict['status'],
- evt_type=constants.RES_EVT_HEAL, tstamp=mock.ANY,
- details=("HealVnfRequest invoked to update the stack '%s'" %
- vnf_dict['instance_id']))
-
- @mock.patch('tacker.vnfm.vim_client.VimClient.get_vim')
- def test_resource_mark_unhealthy(self, mock_get_vim):
- mock_get_vim.return_value = mock.MagicMock()
-
- self.heal_vdu._resource_mark_unhealthy()
-
- self.heat_client.resource_mark_unhealthy.assert_called_once_with(
- stack_id=vnf_dict['instance_id'],
- resource_name=self.additional_paramas_obj.parameter,
- mark_unhealthy=True,
- resource_status_reason=self.additional_paramas_obj.cause)
-
- self._cos_db_plugin.create_event.assert_called_with(
- self.context, res_id=vnf_dict['id'],
- res_type=constants.RES_TYPE_VNF, res_state=vnf_dict['status'],
- evt_type=constants.RES_EVT_HEAL, tstamp=mock.ANY,
- details="HealVnfRequest invoked to mark resource 'VDU1' "
- "to unhealthy.")
diff --git a/tacker/tests/unit/vnfm/monitor_drivers/__init__.py b/tacker/tests/unit/vnfm/monitor_drivers/__init__.py
deleted file mode 100644
index e69de29bb..000000000
diff --git a/tacker/tests/unit/vnfm/monitor_drivers/http_ping/__init__.py b/tacker/tests/unit/vnfm/monitor_drivers/http_ping/__init__.py
deleted file mode 100644
index e69de29bb..000000000
diff --git a/tacker/tests/unit/vnfm/monitor_drivers/http_ping/test_http_ping.py b/tacker/tests/unit/vnfm/monitor_drivers/http_ping/test_http_ping.py
deleted file mode 100644
index 38bebfa1a..000000000
--- a/tacker/tests/unit/vnfm/monitor_drivers/http_ping/test_http_ping.py
+++ /dev/null
@@ -1,56 +0,0 @@
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-
-import testtools
-from unittest import mock
-from urllib import error as urlerr
-
-from tacker.vnfm.monitor_drivers.http_ping import http_ping
-
-
-class TestVNFMonitorHTTPPing(testtools.TestCase):
-
- def setUp(self):
- super(TestVNFMonitorHTTPPing, self).setUp()
- self.monitor_http_ping = http_ping.VNFMonitorHTTPPing()
-
- @mock.patch('urllib.request.urlopen')
- def test_monitor_call_for_success(self, mock_urlopen):
- test_vnf = {}
- test_kwargs = {
- 'mgmt_ip': 'a.b.c.d'
- }
- self.monitor_http_ping.monitor_call(test_vnf,
- test_kwargs)
- mock_urlopen.assert_called_once_with('http://a.b.c.d:80', timeout=5)
-
- @mock.patch('urllib.request.urlopen')
- def test_monitor_call_for_failure(self, mock_urlopen):
- mock_urlopen.side_effect = urlerr.URLError("MOCK Error")
- test_vnf = {}
- test_kwargs = {
- 'mgmt_ip': 'a.b.c.d'
- }
- monitor_return = self.monitor_http_ping.monitor_call(test_vnf,
- test_kwargs)
- self.assertEqual('failure', monitor_return)
-
- def test_monitor_url(self):
- test_vnf = {
- 'monitor_url': 'a.b.c.d'
- }
- test_monitor_url = self.monitor_http_ping.monitor_url(mock.ANY,
- mock.ANY,
- test_vnf)
- self.assertEqual('a.b.c.d', test_monitor_url)
diff --git a/tacker/tests/unit/vnfm/monitor_drivers/ping/__init__.py b/tacker/tests/unit/vnfm/monitor_drivers/ping/__init__.py
deleted file mode 100644
index e69de29bb..000000000
diff --git a/tacker/tests/unit/vnfm/monitor_drivers/ping/test_ping.py b/tacker/tests/unit/vnfm/monitor_drivers/ping/test_ping.py
deleted file mode 100644
index b6a574746..000000000
--- a/tacker/tests/unit/vnfm/monitor_drivers/ping/test_ping.py
+++ /dev/null
@@ -1,97 +0,0 @@
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-
-import testtools
-from unittest import mock
-
-from tacker.vnfm.monitor_drivers.ping import ping
-
-
-class TestVNFMonitorPing(testtools.TestCase):
-
- def setUp(self):
- super(TestVNFMonitorPing, self).setUp()
- self.monitor_ping = ping.VNFMonitorPing()
-
- @mock.patch('tacker.agent.linux.utils.execute')
- def test_monitor_call_for_success(self, mock_utils_execute):
- test_vnf = {}
- test_kwargs = {
- 'mgmt_ip': 'a.b.c.d'
- }
- mock_ping_cmd = ['ping',
- '-c', 5,
- '-W', 5.0,
- '-i', 1.0,
- 'a.b.c.d']
- self.monitor_ping.monitor_call(test_vnf,
- test_kwargs)
- mock_utils_execute.assert_called_once_with(mock_ping_cmd,
- check_exit_code=True)
-
- @mock.patch('tacker.agent.linux.utils.execute')
- def test_monitor_call_for_failure(self, mock_utils_execute):
- mock_utils_execute.side_effect = RuntimeError()
- test_vnf = {}
- test_kwargs = {
- 'mgmt_ip': 'a.b.c.d'
- }
- monitor_return = self.monitor_ping.monitor_call(test_vnf,
- test_kwargs)
- self.assertEqual('failure', monitor_return)
-
- def test_monitor_url(self):
- test_vnf = {
- 'monitor_url': 'a.b.c.d'
- }
- test_monitor_url = self.monitor_ping.monitor_url(mock.ANY,
- mock.ANY,
- test_vnf)
- self.assertEqual('a.b.c.d', test_monitor_url)
-
- @mock.patch('tacker.agent.linux.utils.execute')
- def test_monitor_call_with_params(self, mock_utils_execute):
- check_ping_counts = 2
- check_ping_timeout = 5.0
- check_ping_interval = 0.5
- test_vnf = {}
- test_kwargs = {
- 'mgmt_ip': 'a:b:c:d:e:f:1:2',
- 'count': check_ping_counts,
- 'timeout': check_ping_timeout,
- 'interval': check_ping_interval
- }
- mock_ping_cmd = ['ping6',
- '-c', check_ping_counts,
- '-W', check_ping_timeout,
- '-i', check_ping_interval,
- 'a:b:c:d:e:f:1:2']
- self.monitor_ping.monitor_call(test_vnf,
- test_kwargs)
- mock_utils_execute.assert_called_once_with(mock_ping_cmd,
- check_exit_code=True)
-
- @mock.patch('tacker.agent.linux.utils.execute')
- def test_monitor_call_for_counts(self, mock_utils_execute):
- check_retury_counts = 5
- mock_utils_execute.side_effect = RuntimeError()
- test_vnf = {}
- test_kwargs = {
- 'mgmt_ip': 'a:b:c:d:e:f:1:2',
- 'retry': check_retury_counts
- }
- self.monitor_ping.monitor_call(test_vnf,
- test_kwargs)
- self.assertEqual(check_retury_counts,
- mock_utils_execute.call_count)
diff --git a/tacker/tests/unit/vnfm/monitor_drivers/zabbix/__init__.py b/tacker/tests/unit/vnfm/monitor_drivers/zabbix/__init__.py
deleted file mode 100644
index e69de29bb..000000000
diff --git a/tacker/tests/unit/vnfm/monitor_drivers/zabbix/test_zabbix.py b/tacker/tests/unit/vnfm/monitor_drivers/zabbix/test_zabbix.py
deleted file mode 100644
index d9d46dd41..000000000
--- a/tacker/tests/unit/vnfm/monitor_drivers/zabbix/test_zabbix.py
+++ /dev/null
@@ -1,54 +0,0 @@
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-
-from tacker.vnfm.monitor_drivers.zabbix import zabbix
-import testtools
-from unittest import mock
-
-
-class TestVNFMonitorZabbix(testtools.TestCase):
-
- def setUp(self):
- super(TestVNFMonitorZabbix, self).setUp()
- zabbix.VNFMonitorZabbix.tacker_token = 'a1b2c3d4e5'
- self.monitor_zabbix = zabbix.VNFMonitorZabbix()
-
- @mock.patch('tacker.vnfm.monitor_drivers.zabbix.zabbix.'
- 'VNFMonitorZabbix.add_to_appmonitor')
- def test_add_to_appmonitor(self, mock_ac):
- mock_ac.return_value = None
-
- test_vnf = {'vnfd': {'tenant_id': 'd1e6919c73074d18ab6cd49a02e08391'},
- 'id': 'b9af3cb5-6e43-4b2c-a056-67bda3f71e1a'}
- test_kwargs = {'vdus': {'VDU1':
- {'parameters':
- {'application':
- {'app_name': 'apache2',
- 'app_status': {'actionname': 'cmd',
- 'cmd-action': 'sudo service \
- apache2 restart',
- 'condition': ['down']},
- 'ssh_username': 'ubuntu',
- 'app_port': 80,
- 'ssh_password': 'ubuntu'}},
- 'name': 'zabbix',
- 'zabbix_username': 'Admin',
- 'zabbix_password': 'zabbix',
- 'zabbix_server_ip': '192.168.11.53',
- 'zabbix_server_port': 80,
- 'mgmt_ip': '192.168.11.206'}}}
-
- monitor_return = self.monitor_zabbix.\
- add_to_appmonitor(test_kwargs, test_vnf)
- self.assertEqual(None, monitor_return)
diff --git a/tacker/tests/unit/vnfm/policy_actions/__init__.py b/tacker/tests/unit/vnfm/policy_actions/__init__.py
deleted file mode 100644
index e69de29bb..000000000
diff --git a/tacker/tests/unit/vnfm/policy_actions/autoscaling/test_autoscaling.py b/tacker/tests/unit/vnfm/policy_actions/autoscaling/test_autoscaling.py
deleted file mode 100644
index e49447969..000000000
--- a/tacker/tests/unit/vnfm/policy_actions/autoscaling/test_autoscaling.py
+++ /dev/null
@@ -1,51 +0,0 @@
-# Copyright (c) 2014-2018 China Mobile (SuZhou) Software Technology Co.,Ltd.
-# All Rights Reserved
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import testtools
-from unittest import mock
-
-from tacker import context
-from tacker.db.common_services import common_services_db_plugin
-from tacker.plugins.common import constants
-from tacker.vnfm.policy_actions.autoscaling import autoscaling \
- as policy_actions_autoscaling
-
-
-class TestVNFActionAutoscaling(testtools.TestCase):
-
- def setUp(self):
- super(TestVNFActionAutoscaling, self).setUp()
- self.context = context.get_admin_context()
- mock.patch('tacker.db.common_services.common_services_db_plugin.'
- 'CommonServicesPluginDb.create_event'
- ).start()
- self._cos_db_plugin =\
- common_services_db_plugin.CommonServicesPluginDb()
-
- def test_execute_action(self):
- action_autoscaling = policy_actions_autoscaling.VNFActionAutoscaling()
- vnf_dict = {
- 'id': '00000000-0000-0000-0000-000000000001',
- 'status': 'fake-status'}
- plugin = mock.Mock()
- plugin.create_vnf_scale.return_value = None
- action_autoscaling.execute_action(plugin, self.context, vnf_dict, None)
- self._cos_db_plugin.create_event.assert_called_once_with(
- self.context, res_id=vnf_dict['id'],
- res_state=vnf_dict['status'],
- res_type=constants.RES_TYPE_VNF,
- evt_type=constants.RES_EVT_MONITOR,
- tstamp=mock.ANY, details="ActionAutoscalingHeat invoked")
- plugin.create_vnf_scale.assert_called_once_with(
- self.context, vnf_dict['id'], None)
diff --git a/tacker/tests/unit/vnfm/policy_actions/log/__init__.py b/tacker/tests/unit/vnfm/policy_actions/log/__init__.py
deleted file mode 100644
index e69de29bb..000000000
diff --git a/tacker/tests/unit/vnfm/policy_actions/log/test_log.py b/tacker/tests/unit/vnfm/policy_actions/log/test_log.py
deleted file mode 100644
index bfc5831d8..000000000
--- a/tacker/tests/unit/vnfm/policy_actions/log/test_log.py
+++ /dev/null
@@ -1,44 +0,0 @@
-# Copyright (c) 2014-2018 China Mobile (SuZhou) Software Technology Co.,Ltd.
-# All Rights Reserved
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import testtools
-from unittest import mock
-
-from tacker import context
-from tacker.db.common_services import common_services_db_plugin
-from tacker.plugins.common import constants
-from tacker.vnfm.policy_actions.log import log as policy_actions_log
-
-
-class TestVNFActionLog(testtools.TestCase):
-
- def setUp(self):
- super(TestVNFActionLog, self).setUp()
- self.context = context.get_admin_context()
- mock.patch('tacker.db.common_services.common_services_db_plugin.'
- 'CommonServicesPluginDb.create_event'
- ).start()
- self._cos_db_plugin =\
- common_services_db_plugin.CommonServicesPluginDb()
-
- def test_execute_action(self):
- action_log = policy_actions_log.VNFActionLog()
- vnf_dict = {'id': 'fake-id', 'status': 'fake_status'}
- action_log.execute_action(None, self.context, vnf_dict, None)
- self._cos_db_plugin.create_event.assert_called_once_with(
- self.context, res_id=vnf_dict['id'],
- res_state=vnf_dict['status'],
- res_type=constants.RES_TYPE_VNF,
- evt_type=constants.RES_EVT_MONITOR,
- tstamp=mock.ANY, details="ActionLogOnly invoked")
diff --git a/tacker/tests/unit/vnfm/policy_actions/respawn/test_respawn.py b/tacker/tests/unit/vnfm/policy_actions/respawn/test_respawn.py
deleted file mode 100644
index bfe03ec0d..000000000
--- a/tacker/tests/unit/vnfm/policy_actions/respawn/test_respawn.py
+++ /dev/null
@@ -1,73 +0,0 @@
-# Copyright (c) 2014-2018 China Mobile (SuZhou) Software Technology Co.,Ltd.
-# All Rights Reserved
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import testtools
-from unittest import mock
-
-from tacker.common import clients
-from tacker import context
-from tacker.db.common_services import common_services_db_plugin
-from tacker.plugins.common import constants
-from tacker.vnfm.infra_drivers.openstack import heat_client as hc
-from tacker.vnfm.policy_actions.respawn import respawn as \
- policy_actions_respawn
-from tacker.vnfm import vim_client
-
-
-class VNFActionRespawn(testtools.TestCase):
-
- def setUp(self):
- super(VNFActionRespawn, self).setUp()
- self.context = context.get_admin_context()
- mock.patch('tacker.db.common_services.common_services_db_plugin.'
- 'CommonServicesPluginDb.create_event'
- ).start()
- self._cos_db_plugin =\
- common_services_db_plugin.CommonServicesPluginDb()
-
- @mock.patch.object(clients.OpenstackClients, 'heat')
- @mock.patch.object(hc.HeatClient, 'delete')
- @mock.patch.object(vim_client.VimClient, 'get_vim')
- def test_execute_action(self, mock_get_vim, mock_hc_delete, mock_heat):
- action_respawn = policy_actions_respawn.VNFActionRespawn()
- vnf_dict = {
- 'id': 'fake-id',
- 'status': 'fake-status',
- 'attributes': {
- 'monitoring_policy': 'fake-monitoring-policy',
- 'failure_count': '1',
- 'dead_instance_id_1': '00000000-0000-0000-0000-00000000001'},
- 'vim_id': 'fake-vim-id',
- 'vim_auth': 'fake-vim-auth',
- 'instance_id': '00000000-0000-0000-0000-000000000002',
- 'placement_attr': {
- 'region_name': 'fake-region-name'}}
- mock_get_vim.return_value = {'vim_auth': {
- 'auth_url': 'http://fake-url/identity/v3'
- }}
- mock_hc_delete.return_value = True
- plugin = mock.Mock()
- plugin._mark_vnf_dead.return_value = True
- plugin.create_vnf_sync.return_value = {'id': 'fake-id'}
- plugin._vnf_monitor = mock.Mock()
- action_respawn.execute_action(plugin, self.context, vnf_dict, None)
- self._cos_db_plugin.create_event.assert_called_once_with(
- self.context, res_id=vnf_dict['id'],
- res_state=vnf_dict['status'],
- res_type=constants.RES_TYPE_VNF,
- evt_type=constants.RES_EVT_MONITOR,
- tstamp=mock.ANY, details="ActionRespawnHeat invoked")
- mock_get_vim.assert_called_once_with(self.context, vnf_dict['vim_id'])
- plugin.create_vnf_sync.assert_called_with(self.context, vnf_dict)
- plugin._vnf_monitor.mark_dead.assert_called_once_with(vnf_dict['id'])
diff --git a/tacker/tests/unit/vnfm/policy_actions/vdu_autoheal/__init__.py b/tacker/tests/unit/vnfm/policy_actions/vdu_autoheal/__init__.py
deleted file mode 100644
index e69de29bb..000000000
diff --git a/tacker/tests/unit/vnfm/policy_actions/vdu_autoheal/test_vdu_autoheal.py b/tacker/tests/unit/vnfm/policy_actions/vdu_autoheal/test_vdu_autoheal.py
deleted file mode 100644
index f1077d036..000000000
--- a/tacker/tests/unit/vnfm/policy_actions/vdu_autoheal/test_vdu_autoheal.py
+++ /dev/null
@@ -1,146 +0,0 @@
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-
-from datetime import datetime
-from unittest import mock
-
-from oslo_utils import uuidutils
-
-from tacker import context
-from tacker.db.nfvo import nfvo_db
-from tacker.objects import heal_vnf_request
-from tacker.tests.unit.db import base as db_base
-from tacker.vnfm import plugin
-from tacker.vnfm.policy_actions.vdu_autoheal import vdu_autoheal
-
-
-vnf_dict = {
- 'id': uuidutils.generate_uuid(),
- 'mgmt_ip_address': '{"VDU1": "a.b.c.d"}',
- 'vim_id': '6261579e-d6f3-49ad-8bc3-a9cb974778ff',
- 'instance_id': 'a737497c-761c-11e5-89c3-9cb6541d805d',
- 'attributes': {
- 'heat_template': {
- 'resources': {
- 'VDU1': {
- 'properties': {
- 'networks': [{'port': {'get_resource': 'CP1'}}]}
- }
- }
- }
- }
-}
-
-
-class FakeDriverManager(mock.Mock):
- def invoke(self, *args, **kwargs):
- if 'create' in args:
- return uuidutils.generate_uuid()
-
- if 'get_resource_info' in args:
- return {'resources': {'name': 'dummy_vnf',
- 'type': 'dummy',
- 'id': uuidutils.generate_uuid()}}
-
-
-class FakeVNFMonitor(mock.Mock):
- pass
-
-
-class TestVNFActionVduAutoheal(db_base.SqlTestCase):
-
- def setUp(self):
- super(TestVNFActionVduAutoheal, self).setUp()
- self.context = context.get_admin_context()
- self._mock_device_manager()
- self._mock_vnf_monitor()
- self._insert_dummy_vim()
- self.vnfm_plugin = plugin.VNFMPlugin()
- self.vdu_autoheal = vdu_autoheal.VNFActionVduAutoheal()
- self.addCleanup(mock.patch.stopall)
-
- def _mock_device_manager(self):
- self._device_manager = mock.Mock(wraps=FakeDriverManager())
- self._device_manager.__contains__ = mock.Mock(
- return_value=True)
- fake_device_manager = mock.Mock()
- fake_device_manager.return_value = self._device_manager
- self._mock(
- 'tacker.common.driver_manager.DriverManager', fake_device_manager)
-
- def _mock_vnf_monitor(self):
- self._vnf_monitor = mock.Mock(wraps=FakeVNFMonitor())
- fake_vnf_monitor = mock.Mock()
- fake_vnf_monitor.return_value = self._vnf_monitor
- self._mock(
- 'tacker.vnfm.monitor.VNFMonitor', fake_vnf_monitor)
-
- def _insert_dummy_vim(self):
- session = self.context.session
- vim_db = nfvo_db.Vim(
- id='6261579e-d6f3-49ad-8bc3-a9cb974778ff',
- tenant_id='ad7ebc56538745a08ef7c5e97f8bd437',
- name='fake_vim',
- description='fake_vim_description',
- type='test_vim',
- deleted_at=datetime.min,
- placement_attr={'regions': ['RegionOne']})
- vim_auth_db = nfvo_db.VimAuth(
- vim_id='6261579e-d6f3-49ad-8bc3-a9cb974778ff',
- password='encrypted_pw',
- auth_url='http://localhost:5000',
- vim_project={'name': 'test_project'},
- auth_cred={'username': 'test_user', 'user_domain_id': 'default',
- 'project_domain_id': 'default'})
- session.add(vim_db)
- session.add(vim_auth_db)
- session.flush()
-
- @mock.patch('tacker.vnfm.plugin.VNFMPlugin.heal_vnf')
- @mock.patch('yaml.safe_load')
- @mock.patch('tacker.objects.HealVnfRequest')
- def test_vdu_autoheal_execute_action(self, mock_heal_vnf_request,
- mock_safe_load,
- mock_heal_vnf):
- # Here yaml.safe_load is mock as in the test case i am passing
- # vnf_dict containing having vnf_dict['attributes']['heat_template']
- # value in json format so while excution it giving the error as
- # dict object has no read attribute where as in actual execution the
- # value of vnf_dict['attributes']['heat_template'] is in ymal format.
- mock_safe_load.return_value = vnf_dict['attributes']['heat_template']
- resource_list = ['VDU1', 'CP1']
- additional_params = []
- for resource in resource_list:
- additional_paramas_obj = heal_vnf_request.HealVnfAdditionalParams(
- parameter=resource,
- cause=["Unable to reach while monitoring resource: '%s'" %
- resource])
- additional_params.append(additional_paramas_obj)
- heal_request_data_obj = heal_vnf_request.HealVnfRequest(
- cause='VNF monitoring fails.',
- legacy_additional_params=additional_params)
- mock_heal_vnf_request.return_value = heal_request_data_obj
- self.vdu_autoheal.execute_action(self.vnfm_plugin, self.context,
- vnf_dict, args={'vdu_name': 'VDU1'})
- mock_heal_vnf.assert_called_once_with(self.context, vnf_dict['id'],
- heal_request_data_obj)
-
- @mock.patch('tacker.vnfm.policy_actions.vdu_autoheal.'
- 'vdu_autoheal.LOG')
- def test_vdu_autoheal_action_with_no_vdu_name(self, mock_log):
- expected_error_msg = ("VDU resource of vnf '%s' is not present for "
- "autoheal." % vnf_dict['id'])
- self.vdu_autoheal.execute_action(self.vnfm_plugin, self.context,
- vnf_dict, args={})
- mock_log.error.assert_called_with(expected_error_msg)
diff --git a/tacker/tests/unit/vnfm/test_k8s_plugin.py b/tacker/tests/unit/vnfm/test_k8s_plugin.py
deleted file mode 100644
index 7df19163e..000000000
--- a/tacker/tests/unit/vnfm/test_k8s_plugin.py
+++ /dev/null
@@ -1,241 +0,0 @@
-# Copyright 2015 Brocade Communications System, Inc.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from datetime import datetime
-from unittest import mock
-
-from oslo_utils import uuidutils
-
-from tacker import context
-from tacker.db.common_services import common_services_db_plugin
-from tacker.db.nfvo import nfvo_db
-from tacker.db.vnfm import vnfm_db
-from tacker.plugins.common import constants
-from tacker.tests.unit.db import base as db_base
-from tacker.tests.unit.db import utils
-from tacker.vnfm import plugin
-
-
-class FakeCVNFMonitor(mock.Mock):
- pass
-
-
-class FakePlugin(mock.Mock):
- pass
-
-
-class FakeK8SVimClient(mock.Mock):
- pass
-
-
-class TestCVNFMPlugin(db_base.SqlTestCase):
- def setUp(self):
- super(TestCVNFMPlugin, self).setUp()
- self.addCleanup(mock.patch.stopall)
- self.context = context.get_admin_context()
- self._mock_vim_client()
- self._stub_get_vim()
- self._mock_vnf_monitor()
- self._mock_vnf_maintenance_monitor()
- self._mock_vnf_maintenance_plugin()
- self._insert_dummy_vim()
- self.vnfm_plugin = plugin.VNFMPlugin()
- mock.patch('tacker.db.common_services.common_services_db_plugin.'
- 'CommonServicesPluginDb.create_event'
- ).start()
- mock.patch('tacker.db.vnfm.vnfm_db.VNFMPluginDb._mgmt_driver_name',
- return_value='noop').start()
- self.create = mock.patch('tacker.vnfm.infra_drivers.kubernetes.'
- 'kubernetes_driver.Kubernetes.create',
- return_value=uuidutils.
- generate_uuid()).start()
- self.create_wait = mock.patch('tacker.vnfm.infra_drivers.kubernetes.'
- 'kubernetes_driver.Kubernetes.'
- 'create_wait').start()
- self.update = mock.patch('tacker.vnfm.infra_drivers.kubernetes.'
- 'kubernetes_driver.Kubernetes.update').start()
- self.update_wait = mock.patch('tacker.vnfm.infra_drivers.kubernetes.'
- 'kubernetes_driver.Kubernetes.'
- 'update_wait').start()
- self.delete = mock.patch('tacker.vnfm.infra_drivers.kubernetes.'
- 'kubernetes_driver.Kubernetes.delete').start()
- self.delete_wait = mock.patch('tacker.vnfm.infra_drivers.kubernetes.'
- 'kubernetes_driver.Kubernetes.'
- 'delete_wait').start()
- self.scale = mock.patch('tacker.vnfm.infra_drivers.kubernetes.'
- 'kubernetes_driver.Kubernetes.scale',
- return_value=uuidutils.generate_uuid()).start()
- self.scale_wait = mock.patch('tacker.vnfm.infra_drivers.kubernetes.'
- 'kubernetes_driver.Kubernetes.scale_wait',
- return_value=uuidutils.
- generate_uuid()).start()
-
- def _fake_spawn(func, *args, **kwargs):
- func(*args, **kwargs)
-
- mock.patch.object(self.vnfm_plugin, 'spawn_n',
- _fake_spawn).start()
- self._cos_db_plugin =\
- common_services_db_plugin.CommonServicesPluginDb()
-
- def _mock_vim_client(self):
- self.vim_client = mock.Mock(wraps=FakeK8SVimClient())
- fake_vim_client = mock.Mock()
- fake_vim_client.return_value = self.vim_client
- self._mock(
- 'tacker.vnfm.vim_client.VimClient', fake_vim_client)
-
- def _stub_get_vim(self):
- vim_obj = {'vim_id': '6261579e-d6f3-49ad-8bc3-a9cb974778ff',
- 'vim_name': 'fake_vim',
- 'vim_auth': {'auth_url': 'http://localhost:6443',
- 'password': 'test_pw', 'username': 'test_user',
- 'project_name': 'test_project',
- 'ssl_ca_cert': None},
- 'vim_type': 'kubernetes'}
- self.vim_client.get_vim.return_value = vim_obj
-
- def _mock_vnf_monitor(self):
- self._vnf_monitor = mock.Mock(wraps=FakeCVNFMonitor())
- fake_vnf_monitor = mock.Mock()
- fake_vnf_monitor.return_value = self._vnf_monitor
- self._mock(
- 'tacker.vnfm.monitor.VNFMonitor', fake_vnf_monitor)
-
- def _mock_vnf_maintenance_monitor(self):
- self._vnf_maintenance_mon = mock.Mock(wraps=FakeCVNFMonitor())
- fake_vnf_maintenance_monitor = mock.Mock()
- fake_vnf_maintenance_monitor.return_value = self._vnf_maintenance_mon
- self._mock(
- 'tacker.vnfm.monitor.VNFMaintenanceAlarmMonitor',
- fake_vnf_maintenance_monitor)
-
- def _mock_vnf_maintenance_plugin(self):
- self._vnf_maintenance_plugin = mock.Mock(wraps=FakePlugin())
- fake_vnf_maintenance_plugin = mock.Mock()
- fake_vnf_maintenance_plugin.return_value = self._vnf_maintenance_plugin
- self._mock(
- 'tacker.plugins.fenix.FenixPlugin',
- fake_vnf_maintenance_plugin)
-
- def _insert_dummy_vnf_template(self):
- session = self.context.session
- vnf_template = vnfm_db.VNFD(
- id='eb094833-995e-49f0-a047-dfb56aaf7c4e',
- tenant_id='ad7ebc56538745a08ef7c5e97f8bd437',
- name='fake_template',
- description='fake_template_description',
- template_source='onboarded',
- deleted_at=datetime.min)
- session.add(vnf_template)
- session.flush()
- return vnf_template
-
- def _insert_dummy_vnf_template_inline(self):
- session = self.context.session
- vnf_template = vnfm_db.VNFD(
- id='d58bcc4e-d0cf-11e6-bf26-cec0c932ce01',
- tenant_id='ad7ebc56538745a08ef7c5e97f8bd437',
- name='tmpl-koeak4tqgoqo8cr4-dummy_inline_vnf',
- description='inline_fake_template_description',
- deleted_at=datetime.min,
- template_source='inline')
- session.add(vnf_template)
- session.flush()
- return vnf_template
-
- def _insert_dummy_vim(self):
- pass
- session = self.context.session
- vim_db = nfvo_db.Vim(
- id='6261579e-d6f3-49ad-8bc3-a9cb974778ff',
- tenant_id='ad7ebc56538745a08ef7c5e97f8bd437',
- name='fake_vim',
- description='fake_vim_description',
- type='kubernetes',
- deleted_at=datetime.min,
- placement_attr={'regions': ['default', 'kube-public',
- 'kube-system']})
- vim_auth_db = nfvo_db.VimAuth(
- vim_id='6261579e-d6f3-49ad-8bc3-a9cb974778ff',
- password='encrypted_pw',
- auth_url='http://localhost:6443',
- vim_project={'name': 'test_project'},
- auth_cred={'auth_url': 'https://localhost:6443',
- 'username': 'admin',
- 'bearer_token': None,
- 'ssl_ca_cert': 'test',
- 'project_name': 'default',
- 'type': 'kubernetes'})
- session.add(vim_db)
- session.add(vim_auth_db)
- session.flush()
-
- def test_create_cvnf_with_vnfd(self):
- self._insert_dummy_vnf_template()
- vnf_obj = utils.get_dummy_vnf_obj()
- result = self.vnfm_plugin.create_vnf(self.context, vnf_obj)
- self.assertIsNotNone(result)
- self.assertIn('id', result)
- self.assertIn('instance_id', result)
- self.assertIn('status', result)
- self.assertIn('attributes', result)
- self.assertIn('mgmt_ip_address', result)
- self.assertIn('created_at', result)
- self.assertIn('updated_at', result)
- self.assertEqual('ACTIVE', result['status'])
- self._cos_db_plugin.create_event.assert_called_with(
- self.context, evt_type=constants.RES_EVT_CREATE, res_id=mock.ANY,
- res_state=mock.ANY, res_type=constants.RES_TYPE_VNF,
- tstamp=mock.ANY, details=mock.ANY)
-
- @mock.patch('tacker.vnfm.plugin.VNFMPlugin.create_vnfd')
- def test_create_cvnf_from_template(self, mock_create_vnfd):
- self._insert_dummy_vnf_template_inline()
- mock_create_vnfd.return_value = {'id':
- 'd58bcc4e-d0cf-11e6-bf26'
- '-cec0c932ce01'}
- vnf_obj = utils.get_dummy_inline_cvnf_obj()
- result = self.vnfm_plugin.create_vnf(self.context, vnf_obj)
- self.assertIsNotNone(result)
- self.assertIn('id', result)
- self.assertIn('instance_id', result)
- self.assertIn('status', result)
- self.assertIn('attributes', result)
- self.assertIn('mgmt_ip_address', result)
- self.assertIn('created_at', result)
- self.assertIn('updated_at', result)
- self.assertEqual('ACTIVE', result['status'])
- mock_create_vnfd.assert_called_once_with(mock.ANY, mock.ANY)
- self._cos_db_plugin.create_event.assert_called_with(
- self.context, evt_type=constants.RES_EVT_CREATE,
- res_id=mock.ANY,
- res_state=mock.ANY, res_type=constants.RES_TYPE_VNF,
- tstamp=mock.ANY, details=mock.ANY)
-
- def test_delete_vnf(self):
- pass
-
- def test_update_vnf(self):
- pass
-
- def _test_scale_vnf(self, type):
- pass
-
- def test_scale_vnf_out(self):
- pass
-
- def test_scale_vnf_in(self):
- pass
diff --git a/tacker/tests/unit/vnfm/test_monitor.py b/tacker/tests/unit/vnfm/test_monitor.py
deleted file mode 100644
index c555390e8..000000000
--- a/tacker/tests/unit/vnfm/test_monitor.py
+++ /dev/null
@@ -1,295 +0,0 @@
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-
-from unittest import mock
-
-from oslo_serialization import jsonutils
-from oslo_utils import timeutils
-import testtools
-
-from tacker import context
-from tacker.db.common_services import common_services_db_plugin
-from tacker.plugins.common import constants
-from tacker.vnfm import monitor
-from tacker.vnfm import plugin
-
-MOCK_VNF_ID = 'a737497c-761c-11e5-89c3-9cb6541d805d'
-MOCK_VNF = {
- 'id': MOCK_VNF_ID,
- 'mgmt_ip_addresses': {
- 'vdu1': 'a.b.c.d'
- },
- 'monitoring_policy': {
- 'vdus': {
- 'vdu1': {
- 'ping': {
- 'actions': {
- 'failure': 'respawn'
- },
- 'monitoring_params': {
- 'count': 1,
- 'monitoring_delay': 0,
- 'interval': 0,
- 'timeout': 2
- }
- }
- }
- }
- },
- 'boot_at': timeutils.utcnow(),
- 'action_cb': mock.MagicMock()
-}
-
-
-MOCK_VNF_DEVICE_FOR_VDU_AUTOHEAL = {
- 'id': MOCK_VNF_ID,
- 'mgmt_ip_addresses': {
- 'vdu1': 'a.b.c.d'
- },
- 'monitoring_policy': {
- 'vdus': {
- 'vdu1': {
- 'ping': {
- 'actions': {
- 'failure': 'vdu_autoheal'
- },
- 'monitoring_params': {
- 'count': 1,
- 'monitoring_delay': 0,
- 'interval': 0,
- 'timeout': 2
- }
- }
- }
- }
- },
- 'boot_at': timeutils.utcnow(),
- 'action_cb': mock.MagicMock()
-}
-
-
-class TestVNFMonitor(testtools.TestCase):
-
- def setUp(self):
- super(TestVNFMonitor, self).setUp()
- p = mock.patch('tacker.common.driver_manager.DriverManager')
- self.mock_monitor_manager = p.start()
- mock.patch('tacker.db.common_services.common_services_db_plugin.'
- 'CommonServicesPluginDb.create_event'
- ).start()
- self._cos_db_plugin =\
- common_services_db_plugin.CommonServicesPluginDb()
- self.addCleanup(p.stop)
-
- def test_to_hosting_vnf(self):
- test_vnf_dict = {
- 'id': MOCK_VNF_ID,
- 'mgmt_ip_address': '{"vdu1": "a.b.c.d"}',
- 'attributes': {
- 'monitoring_policy': jsonutils.dump_as_bytes(
- MOCK_VNF['monitoring_policy'])
- }
- }
- action_cb = mock.MagicMock()
- expected_output = {
- 'id': MOCK_VNF_ID,
- 'action_cb': action_cb,
- 'mgmt_ip_addresses': {
- 'vdu1': 'a.b.c.d'
- },
- 'vnf': test_vnf_dict,
- 'monitoring_policy': MOCK_VNF['monitoring_policy']
- }
- output_dict = monitor.VNFMonitor.to_hosting_vnf(test_vnf_dict,
- action_cb)
- self.assertEqual(expected_output, output_dict)
-
- @mock.patch('tacker.vnfm.monitor.VNFMonitor.__run__')
- def test_add_hosting_vnf(self, mock_monitor_run):
- test_vnf_dict = {
- 'id': MOCK_VNF_ID,
- 'mgmt_ip_address': '{"vdu1": "a.b.c.d"}',
- 'attributes': {
- 'monitoring_policy': jsonutils.dump_as_bytes(
- MOCK_VNF['monitoring_policy'])
- },
- 'status': 'ACTIVE'
- }
- action_cb = mock.MagicMock()
- test_boot_wait = 30
- test_vnfmonitor = monitor.VNFMonitor(test_boot_wait)
- new_dict = test_vnfmonitor.to_hosting_vnf(test_vnf_dict, action_cb)
- test_vnfmonitor.add_hosting_vnf(new_dict)
- test_vnf_id = list(test_vnfmonitor._hosting_vnfs.keys())[0]
- self.assertEqual(MOCK_VNF_ID, test_vnf_id)
- self._cos_db_plugin.create_event.assert_called_with(
- mock.ANY, res_id=mock.ANY, res_type=constants.RES_TYPE_VNF,
- res_state=mock.ANY, evt_type=constants.RES_EVT_MONITOR,
- tstamp=mock.ANY, details=mock.ANY)
-
- @mock.patch('tacker.vnfm.monitor.VNFMonitor.__run__')
- def test_run_monitor(self, mock_monitor_run):
- test_hosting_vnf = MOCK_VNF
- test_hosting_vnf['vnf'] = {'status': 'ACTIVE'}
- test_boot_wait = 30
- mock_kwargs = {
- 'count': 1,
- 'monitoring_delay': 0,
- 'interval': 0,
- 'mgmt_ip': 'a.b.c.d',
- 'timeout': 2
- }
- test_vnfmonitor = monitor.VNFMonitor(test_boot_wait)
- self.mock_monitor_manager.invoke = mock.MagicMock()
- test_vnfmonitor._monitor_manager = self.mock_monitor_manager
- test_vnfmonitor.run_monitor(test_hosting_vnf)
- self.mock_monitor_manager \
- .invoke.assert_called_once_with('ping', 'monitor_call',
- vnf={'status': 'ACTIVE'},
- kwargs=mock_kwargs)
-
- @mock.patch('tacker.vnfm.monitor.VNFMonitor.__run__')
- @mock.patch('tacker.vnfm.monitor.VNFMonitor.monitor_call')
- def test_vdu_autoheal_action(self, mock_monitor_call, mock_monitor_run):
- test_hosting_vnf = MOCK_VNF_DEVICE_FOR_VDU_AUTOHEAL
- test_boot_wait = 30
- test_device_dict = {
- 'status': 'ACTIVE',
- 'id': MOCK_VNF_ID,
- 'mgmt_ip_address': '{"vdu1": "a.b.c.d"}',
- 'attributes': {
- 'monitoring_policy': jsonutils.dump_as_bytes(
- MOCK_VNF_DEVICE_FOR_VDU_AUTOHEAL['monitoring_policy'])
- }
- }
- test_hosting_vnf['vnf'] = test_device_dict
- mock_monitor_call.return_value = 'failure'
- test_vnfmonitor = monitor.VNFMonitor(test_boot_wait)
- test_vnfmonitor._monitor_manager = self.mock_monitor_manager
- test_vnfmonitor.run_monitor(test_hosting_vnf)
- test_hosting_vnf['action_cb'].assert_called_once_with(
- 'vdu_autoheal', vdu_name='vdu1')
-
- @mock.patch('tacker.vnfm.monitor.VNFMonitor.__run__')
- def test_update_hosting_vnf(self, mock_monitor_run):
- test_boot_wait = 30
- test_vnfmonitor = monitor.VNFMonitor(test_boot_wait)
- vnf_dict = {
- 'id': MOCK_VNF_ID,
- 'mgmt_ip_address': '{"vdu1": "a.b.c.d"}',
- 'mgmt_ip_addresses': 'a.b.c.d',
- 'vnf': {
- 'id': MOCK_VNF_ID,
- 'mgmt_ip_address': '{"vdu1": "a.b.c.d"}',
- 'attributes': {
- 'monitoring_policy': jsonutils.dump_as_bytes(
- MOCK_VNF['monitoring_policy'])
- },
- 'status': 'ACTIVE',
- }
- }
-
- test_vnfmonitor.add_hosting_vnf(vnf_dict)
- vnf_dict['status'] = 'PENDING_HEAL'
- test_vnfmonitor.update_hosting_vnf(vnf_dict)
- test_device_status = test_vnfmonitor._hosting_vnfs[MOCK_VNF_ID][
- 'vnf']['status']
- self.assertEqual('PENDING_HEAL', test_device_status)
-
-
-class TestVNFReservationAlarmMonitor(testtools.TestCase):
-
- def setUp(self):
- super(TestVNFReservationAlarmMonitor, self).setUp()
- self.context = context.get_admin_context()
- self.plugin = plugin.VNFMPlugin
-
- def test_process_alarm_for_vnf(self):
- vnf = {'id': 'a737497c-761c-11e5-89c3-9cb6541d805d'}
- trigger = {'params': {'data': {
- 'alarm_id': 'a737497c-761c-11e5-89c3-9cb6541d805d',
- 'current': 'alarm'}}}
- test_vnf_reservation_monitor = monitor.VNFReservationAlarmMonitor()
- response = test_vnf_reservation_monitor.process_alarm_for_vnf(
- vnf, trigger)
- self.assertEqual(response, True)
-
- @mock.patch('tacker.db.common_services.common_services_db_plugin.'
- 'CommonServicesPluginDb.create_event')
- @mock.patch('tacker.vnfm.plugin.VNFMPlugin.get_vnf_policies')
- def test_update_vnf_with_alarm(self, mock_get_vnf_policies,
- mock_db_service):
- mock_get_vnf_policies.return_value = [
- {'name': 'SP_RSV', 'type': 'tosca.policies.tacker.Scaling'}]
- mock_db_service.return_value = {
- 'event_type': 'MONITOR',
- 'resource_id': '9770fa22-747d-426e-9819-057a95cb778c',
- 'timestamp': '2018-10-30 06:01:45.628162',
- 'event_details': {'Alarm URL set successfully': {
- 'start_actions': 'alarm'}},
- 'resource_state': 'CREATE',
- 'id': '4583',
- 'resource_type': 'vnf'}
- vnf = {'id': 'a737497c-761c-11e5-89c3-9cb6541d805d',
- 'status': 'insufficient_data'}
- test_vnf_reservation_monitor = monitor.VNFReservationAlarmMonitor()
- policy_dict = {
- 'type': 'tosca.policies.tacker.Reservation',
- 'reservation': {'before_end_actions': ['SP_RSV'],
- 'end_actions': ['noop'],
- 'start_actions': ['SP_RSV'],
- 'properties': {
- 'lease_id':
- 'ffa079a0-9d6f-411d-ab15-89219c0ee14d'}}}
- response = test_vnf_reservation_monitor.update_vnf_with_reservation(
- self.plugin, self.context, vnf, policy_dict)
- self.assertEqual(len(response.keys()), 3)
-
-
-class TestVNFMaintenanceAlarmMonitor(testtools.TestCase):
-
- def setup(self):
- super(TestVNFMaintenanceAlarmMonitor, self).setUp()
-
- def test_process_alarm_for_vnf(self):
- vnf = {'id': MOCK_VNF_ID}
- trigger = {'params': {'data': {
- 'alarm_id': MOCK_VNF_ID, 'current': 'alarm'}}}
- test_vnf_maintenance_monitor = monitor.VNFMaintenanceAlarmMonitor()
- response = test_vnf_maintenance_monitor.process_alarm_for_vnf(
- vnf, trigger)
- self.assertEqual(response, True)
-
- @mock.patch('tacker.db.common_services.common_services_db_plugin.'
- 'CommonServicesPluginDb.create_event')
- def test_update_vnf_with_alarm(self, mock_db_service):
- mock_db_service.return_value = {
- 'event_type': 'MONITOR',
- 'resource_id': '9770fa22-747d-426e-9819-057a95cb778c',
- 'timestamp': '2018-10-30 06:01:45.628162',
- 'event_details': {'Alarm URL set successfully': {
- 'start_actions': 'alarm'}},
- 'resource_state': 'CREATE',
- 'id': '4583',
- 'resource_type': 'vnf'}
- vnf = {
- 'id': MOCK_VNF_ID,
- 'tenant_id': 'ad7ebc56538745a08ef7c5e97f8bd437',
- 'status': 'insufficient_data'}
- vdu_names = ['VDU1']
- test_vnf_maintenance_monitor = monitor.VNFMaintenanceAlarmMonitor()
- response = test_vnf_maintenance_monitor.update_vnf_with_maintenance(
- vnf, vdu_names)
- result_keys = len(response) + len(response.get('vdus', {}))
- self.assertEqual(result_keys, 4)
diff --git a/tacker/tests/unit/vnfm/test_plugin.py b/tacker/tests/unit/vnfm/test_plugin.py
index b5590df4f..0736e23e4 100644
--- a/tacker/tests/unit/vnfm/test_plugin.py
+++ b/tacker/tests/unit/vnfm/test_plugin.py
@@ -15,32 +15,21 @@
from datetime import datetime
from unittest import mock
-from unittest.mock import patch
import ddt
import iso8601
-from oslo_config import cfg
from oslo_utils import uuidutils
import yaml
-from tacker._i18n import _
-from tacker.common import exceptions
from tacker import context
-from tacker.db.common_services import common_services_db_plugin
from tacker.db.db_sqlalchemy import models
from tacker.db.nfvo import nfvo_db
-from tacker.db.nfvo import ns_db
from tacker.db.vnfm import vnfm_db
-from tacker.extensions import vnfm
from tacker import objects
-from tacker.objects import heal_vnf_request
-from tacker.plugins.common import constants
from tacker.tests.unit.conductor import fakes
from tacker.tests.unit.db import base as db_base
from tacker.tests.unit.db import utils
from tacker.tests.unit.vnflcm import fakes as vnflcm_fakes
-from tacker.tosca import utils as toscautils
-from tacker.vnfm import monitor
from tacker.vnfm import plugin
@@ -55,123 +44,20 @@ class FakeDriverManager(mock.Mock):
'id': uuidutils.generate_uuid()}}
-class FakeVNFMonitor(mock.Mock):
- def update_vnf_with_maintenance(self, vnf_dict, maintenance_vdus):
- url = 'http://local:9890/v1.0/vnfs/%s/maintenance/%s' % (
- vnf_dict['id'], vnf_dict['tenant_id'])
- return {'url': url,
- 'vdus': {'ALL': 'ad7ebc56',
- 'VDU1': '538745a0'}}
-
-
-class FakeGreenPool(mock.Mock):
- pass
-
-
class FakeVimClient(mock.Mock):
pass
-class FakePlugin(mock.Mock):
- pass
-
-
-class FakeException(Exception):
- pass
-
-
-class TestVNFMPluginMonitor(db_base.SqlTestCase):
- def setUp(self):
- super(TestVNFMPluginMonitor, self).setUp()
- self._mock_vnf_manager()
-
- def _mock_vnf_manager(self):
- self._vnf_manager = mock.Mock(wraps=FakeDriverManager())
- self._vnf_manager.__contains__ = mock.Mock(
- return_value=True)
- fake_vnf_manager = mock.Mock()
- fake_vnf_manager.return_value = self._vnf_manager
- self._mock(
- 'tacker.common.driver_manager.DriverManager', fake_vnf_manager)
-
- @mock.patch('tacker.db.vnfm.vnfm_db.VNFMPluginDb.get_vnfs')
- @mock.patch('tacker.vnfm.monitor.VNFMonitor.__run__')
- def test_init_monitoring(self, mock_run, mock_get_vnfs):
- vnf_id = uuidutils.generate_uuid()
- vnfs = [{
- 'id': vnf_id,
- 'vnf': {
- 'id': vnf_id,
- 'status': 'ACTIVE',
- 'name': 'fake_vnf',
- 'attributes': {
- 'monitoring_policy':
- '{"vdus": '
- '{"VDU1": {"ping": {"actions": {"failure": "respawn"},'
- '"name": "ping", "parameters": {"count": 3,'
- '"interval": 1, "monitoring_delay": 45, "timeout": 2},'
- '"monitoring_params": {"count": 3, "interval": 1,'
- '"monitoring_delay": 45, "timeout": 2}}}}}'}
- },
- 'name': 'fake_vnf',
- 'tenant_id': 'ad7ebc56538745a08ef7c5e97f8bd437',
- 'description': 'fake_vnf_description',
- 'instance_id': 'da85ea1a-4ec4-4201-bbb2-8d9249eca7ec',
- 'vnfd_id': 'eb094833-995e-49f0-a047-dfb56aaf7c4e',
- 'vim_id': '6261579e-d6f3-49ad-8bc3-a9cb974778ff',
- 'placement_attr': {'region': 'RegionOne'},
- 'status': 'ACTIVE',
- 'attributes': {
- 'monitoring_policy':
- '{"vdus": '
- '{"VDU1": {"ping": {"actions": {"failure": "respawn"},'
- '"name": "ping", "parameters": {"count": 3,'
- '"interval": 1, "monitoring_delay": 45, "timeout": 2},'
- '"monitoring_params": {"count": 3, "interval": 1,'
- '"monitoring_delay": 45, "timeout": 2}}}}}'},
- 'mgmt_ip_address': '{"VDU1": "a.b.c.d"}',
- 'deleted_at': datetime.min,
- 'mgmt_ip_addresses': 'a.b.c.d'
- }]
-
- mock_get_vnfs.return_value = vnfs
- # NOTE(bhagyashris): VNFMonitor class is using a singleton pattern
- # and '_hosting_vnfs' is defined as a class level attribute.
- # If one of the unit test adds a VNF to monitor it will show up here
- # provided both the unit tests runs in the same process.
- # Hence, you must reinitialize '_hosting_vnfs' to empty dict.
- monitor.VNFMonitor._hosting_vnfs = dict()
- vnfm_plugin = plugin.VNFMPlugin()
- hosting_vnfs = vnfm_plugin._vnf_monitor._hosting_vnfs.values()
- hosting_vnf = list(hosting_vnfs)[0]['vnf']
- self.assertEqual('{"VDU1": "a.b.c.d"}', hosting_vnf['mgmt_ip_address'])
- self.assertEqual(1, len(hosting_vnfs))
-
-
@ddt.ddt
class TestVNFMPlugin(db_base.SqlTestCase):
def setUp(self):
- # NOTE(h-asahina): set timer to 0 to reduce test time.
- cfg.CONF.set_override('boot_wait', 0, 'tacker')
- self.addClassCleanup(cfg.CONF.clear_override, 'boot_wait', 'tacker')
-
super(TestVNFMPlugin, self).setUp()
self.addCleanup(mock.patch.stopall)
self.context = context.get_admin_context()
self._mock_vim_client()
self._stub_get_vim()
- self._mock_vnf_monitor()
- self._mock_vnf_alarm_monitor()
- self._mock_vnf_reservation_monitor()
- self._mock_vnf_maintenance_monitor()
- self._mock_vnf_maintenance_plugin()
self._insert_dummy_vim()
self.vnfm_plugin = plugin.VNFMPlugin()
- mock.patch('tacker.db.common_services.common_services_db_plugin.'
- 'CommonServicesPluginDb.create_event'
- ).start()
- mock.patch('tacker.db.vnfm.vnfm_db.VNFMPluginDb._mgmt_driver_name',
- return_value='noop').start()
self.create = mock.patch('tacker.vnfm.infra_drivers.openstack.'
'openstack.OpenStack.create',
return_value=uuidutils.generate_uuid()).start()
@@ -201,8 +87,6 @@ class TestVNFMPlugin(db_base.SqlTestCase):
mock.patch.object(self.vnfm_plugin, 'spawn_n',
_fake_spawn).start()
- self._cos_db_plugin =\
- common_services_db_plugin.CommonServicesPluginDb()
def _mock_vim_client(self):
self.vim_client = mock.Mock(wraps=FakeVimClient())
@@ -219,44 +103,6 @@ class TestVNFMPlugin(db_base.SqlTestCase):
'test_project'}, 'vim_type': 'openstack'}
self.vim_client.get_vim.return_value = vim_obj
- def _mock_vnf_monitor(self):
- self._vnf_monitor = mock.Mock(wraps=FakeVNFMonitor())
- fake_vnf_monitor = mock.Mock()
- fake_vnf_monitor.return_value = self._vnf_monitor
- self._mock(
- 'tacker.vnfm.monitor.VNFMonitor', fake_vnf_monitor)
-
- def _mock_vnf_alarm_monitor(self):
- self._vnf_alarm_monitor = mock.Mock(wraps=FakeVNFMonitor())
- fake_vnf_alarm_monitor = mock.Mock()
- fake_vnf_alarm_monitor.return_value = self._vnf_alarm_monitor
- self._mock(
- 'tacker.vnfm.monitor.VNFAlarmMonitor', fake_vnf_alarm_monitor)
-
- def _mock_vnf_reservation_monitor(self):
- self._vnf_reservation_mon = mock.Mock(wraps=FakeVNFMonitor())
- fake_vnf_reservation_monitor = mock.Mock()
- fake_vnf_reservation_monitor.return_value = self._vnf_reservation_mon
- self._mock(
- 'tacker.vnfm.monitor.VNFReservationAlarmMonitor',
- fake_vnf_reservation_monitor)
-
- def _mock_vnf_maintenance_monitor(self):
- self._vnf_maintenance_mon = mock.Mock(wraps=FakeVNFMonitor())
- fake_vnf_maintenance_monitor = mock.Mock()
- fake_vnf_maintenance_monitor.return_value = self._vnf_maintenance_mon
- self._mock(
- 'tacker.vnfm.monitor.VNFMaintenanceAlarmMonitor',
- fake_vnf_maintenance_monitor)
-
- def _mock_vnf_maintenance_plugin(self):
- self._vnf_maintenance_plugin = mock.Mock(wraps=FakePlugin())
- fake_vnf_maintenance_plugin = mock.Mock()
- fake_vnf_maintenance_plugin.return_value = self._vnf_maintenance_plugin
- self._mock(
- 'tacker.plugins.fenix.FenixPlugin',
- fake_vnf_maintenance_plugin)
-
def _insert_dummy_vnf_template(self):
session = self.context.session
vnf_template = vnfm_db.VNFD(
@@ -367,844 +213,6 @@ class TestVNFMPlugin(db_base.SqlTestCase):
session.add(vim_auth_db)
session.flush()
- @mock.patch('tacker.vnfm.plugin.toscautils.updateimports')
- @mock.patch('tacker.vnfm.plugin.ToscaTemplate')
- @mock.patch('tacker.vnfm.plugin.toscautils.get_mgmt_driver')
- def test_create_vnfd(self, mock_get_mgmt_driver, mock_tosca_template,
- mock_update_imports):
- mock_get_mgmt_driver.return_value = 'noop'
- mock_tosca_template.return_value = mock.ANY
-
- vnfd_obj = utils.get_dummy_vnfd_obj()
- result = self.vnfm_plugin.create_vnfd(self.context, vnfd_obj)
- self.assertIsNotNone(result)
- self.assertIn('id', result)
- self.assertEqual('dummy_vnfd', result['name'])
- self.assertEqual('dummy_vnfd_description', result['description'])
- self.assertEqual('noop', result['mgmt_driver'])
- self.assertIn('service_types', result)
- self.assertIn('attributes', result)
- self.assertIn('created_at', result)
- self.assertIn('updated_at', result)
- self.assertIn('template_source', result)
- yaml_dict = yaml.safe_load(utils.tosca_vnfd_openwrt)
- mock_tosca_template.assert_called_once_with(
- a_file=False, yaml_dict_tpl=yaml_dict,
- local_defs=toscautils.tosca_tmpl_local_defs())
- mock_get_mgmt_driver.assert_called_once_with(mock.ANY)
- mock_update_imports.assert_called_once_with(yaml_dict)
- self._cos_db_plugin.create_event.assert_called_once_with(
- self.context, evt_type=constants.RES_EVT_CREATE, res_id=mock.ANY,
- res_state=constants.RES_EVT_ONBOARDED,
- res_type=constants.RES_TYPE_VNFD, tstamp=mock.ANY)
-
- def test_create_vnfd_without_tosca_definitions_version(self):
- vnfd_obj = utils.get_dummy_vnfd_obj()
- vnfd_obj['vnfd']['attributes']['vnfd'].pop('tosca_definitions_version')
- self.assertRaises(exceptions.Invalid,
- self.vnfm_plugin.create_vnfd,
- self.context, vnfd_obj)
-
- def test_create_vnfd_with_empty_description(self):
- vnfd_obj = utils.get_dummy_vnfd_obj()
- vnfd_obj['vnfd']['description'] = ''
- result = self.vnfm_plugin.create_vnfd(self.context, vnfd_obj)
- self.assertIsNotNone(result)
- # If vnfd description is an empty string, it sets the description of
- # vnfd to the description that is present in the vnfd tosca template.
- self.assertEqual(yaml.safe_load(
- vnfd_obj['vnfd']['attributes']['vnfd'])['description'],
- result['description'])
-
- def test_create_vnfd_empty_name(self):
- vnfd_obj = utils.get_dummy_vnfd_obj()
- vnfd_obj['vnfd']['name'] = ''
- result = self.vnfm_plugin.create_vnfd(self.context, vnfd_obj)
- self.assertIsNotNone(result)
- # If vnfd name is an empty string, it sets the name of vnfd to
- # the name that is present in the vnfd tosca template.
- self.assertEqual(yaml.safe_load(vnfd_obj['vnfd']['attributes']
- ['vnfd'])['metadata']['template_name'], result['name'])
-
- def test_create_vnfd_with_tosca_parser_failure(self):
- vnfd_obj = utils.get_invalid_vnfd_obj()
- self.assertRaises(vnfm.ToscaParserFailed,
- self.vnfm_plugin.create_vnfd,
- self.context, vnfd_obj)
-
- def test_create_vnfd_no_service_types(self):
- vnfd_obj = utils.get_dummy_vnfd_obj()
- vnfd_obj['vnfd'].pop('service_types')
- self.assertRaises(vnfm.ServiceTypesNotSpecified,
- self.vnfm_plugin.create_vnfd,
- self.context, vnfd_obj)
-
- def test_create_vnfd_without_dict_type_attributes(self):
- vnfd_obj = utils.get_dummy_vnfd_obj()
- # Convert dict to string.
- vnfd_obj['vnfd']['attributes']['vnfd'] = str(
- vnfd_obj['vnfd']['attributes']['vnfd'])
- self.assertRaises(vnfm.InvalidAPIAttributeType,
- self.vnfm_plugin.create_vnfd,
- self.context, vnfd_obj)
-
- def test_create_vnf_sync(self):
- self._insert_dummy_vnf_template()
- vnf_obj = utils.get_dummy_vnf_obj()
- vnf_dict = self.vnfm_plugin.create_vnf_sync(self.context,
- vnf_obj['vnf'])
- self.assertIsNotNone(vnf_dict)
- self.assertEqual('ACTIVE', vnf_dict['status'])
- self._cos_db_plugin.create_event.assert_called_with(
- self.context, evt_type=constants.RES_EVT_CREATE, res_id=mock.ANY,
- res_state=mock.ANY, res_type=constants.RES_TYPE_VNF,
- tstamp=mock.ANY, details=mock.ANY)
-
- def test_create_vnf_with_vnfd(self):
- self._insert_dummy_vnf_template()
- vnf_obj = utils.get_dummy_vnf_obj()
- result = self.vnfm_plugin.create_vnf(self.context, vnf_obj)
- self.assertIsNotNone(result)
- self.assertIn('id', result)
- self.assertIn('instance_id', result)
- self.assertIn('status', result)
- self.assertIn('attributes', result)
- self.assertIn('mgmt_ip_address', result)
- self.assertIn('created_at', result)
- self.assertIn('updated_at', result)
- self.assertEqual('ACTIVE', result['status'])
- self._cos_db_plugin.create_event.assert_called_with(
- self.context, evt_type=constants.RES_EVT_CREATE, res_id=mock.ANY,
- res_state=mock.ANY, res_type=constants.RES_TYPE_VNF,
- tstamp=mock.ANY, details=mock.ANY)
-
- @mock.patch('tacker.vnfm.plugin.VNFMPlugin.create_vnfd')
- def test_create_vnf_from_template(self, mock_create_vnfd):
- self._insert_dummy_vnf_template_inline()
- mock_create_vnfd.return_value = {'id':
- 'd58bcc4e-d0cf-11e6-bf26-cec0c932ce01'}
- vnf_obj = utils.get_dummy_inline_vnf_obj()
- result = self.vnfm_plugin.create_vnf(self.context, vnf_obj)
- self.assertIsNotNone(result)
- self.assertIn('id', result)
- self.assertIn('instance_id', result)
- self.assertIn('status', result)
- self.assertIn('attributes', result)
- self.assertIn('mgmt_ip_address', result)
- self.assertIn('created_at', result)
- self.assertIn('updated_at', result)
- self.assertEqual('ACTIVE', result['status'])
- mock_create_vnfd.assert_called_once_with(mock.ANY, mock.ANY)
- self._cos_db_plugin.create_event.assert_called_with(
- self.context, evt_type=constants.RES_EVT_CREATE,
- res_id=mock.ANY,
- res_state=mock.ANY, res_type=constants.RES_TYPE_VNF,
- tstamp=mock.ANY, details=mock.ANY)
-
- def test_create_vnf_with_param_values(self):
- self._insert_dummy_vnf_template()
- vnf_obj = utils.get_dummy_vnf_obj()
- vnf_obj['vnf']['attributes'] = {'param_values':
- {'image_name': 'cirros-0.5.2-x86_64-disk', 'flavor': 'm1.tiny'}}
- result = self.vnfm_plugin.create_vnf(self.context, vnf_obj)
- self.assertIsNotNone(result)
- self.assertEqual(vnf_obj['vnf']['attributes']['param_values'],
- result['attributes']['param_values'])
- self.assertEqual('ACTIVE', result['status'])
-
- def test_create_vnf_with_config_option(self):
- self._insert_dummy_vnf_template()
- vnf_obj = utils.get_dummy_vnf_obj()
- config = utils.get_dummy_vnf_config_obj()
- vnf_obj['vnf']['attributes'] = config['vnf']['attributes']
- result = self.vnfm_plugin.create_vnf(self.context, vnf_obj)
- self.assertEqual(vnf_obj['vnf']['attributes']['config'],
- result['attributes']['config'])
- self.assertEqual('ACTIVE', result['status'])
-
- def test_create_vnf_fail_with_invalid_infra_driver_exception(self):
- self.vim_client.get_vim.return_value['vim_type'] = 'test_invalid_vim'
- self._insert_dummy_vnf_template()
- vnf_obj = utils.get_dummy_vnf_obj()
- self.assertRaises(vnfm.InvalidInfraDriver,
- self.vnfm_plugin.create_vnf,
- self.context, vnf_obj)
-
- def test_create_vnf_with_invalid_param_and_config_format(self):
- self._insert_dummy_vnf_template()
- vnf_obj = utils.get_dummy_vnf_obj()
- vnf_obj['vnf']['attributes']['param_values'] = 'image_name'
- vnf_obj['vnf']['attributes']['config'] = 'test'
- self.assertRaises(vnfm.InvalidAPIAttributeType,
- self.vnfm_plugin.create_vnf,
- self.context, vnf_obj)
-
- @patch('tacker.vnfm.plugin.VNFMPlugin._delete_vnf')
- def test_create_vnf_fail(self, mock_delete_vnf):
- self._insert_dummy_vnf_template()
- vnf_obj = utils.get_dummy_vnf_obj()
- self.create.side_effect = vnfm.HeatClientException(msg='test')
- self.assertRaises(vnfm.HeatClientException,
- self.vnfm_plugin.create_vnf,
- self.context, vnf_obj)
- vnf_id = self.vnfm_plugin._delete_vnf.call_args[0][1]
- mock_delete_vnf.assert_called_once_with(self.context, vnf_id,
- force_delete=True)
-
- def test_create_vnf_create_wait_failed_exception(self):
- self._insert_dummy_vnf_template()
- vnf_obj = utils.get_dummy_vnf_obj()
- self.create_wait.side_effect = vnfm.VNFCreateWaitFailed(
- reason="failed")
- vnf_dict = self.vnfm_plugin.create_vnf(self.context, vnf_obj)
- self.assertEqual(constants.ERROR,
- vnf_dict['status'])
-
- @patch('tacker.vnfm.plugin.VNFMPlugin.mgmt_call')
- def test_create_vnf_mgmt_driver_exception(self, mock_mgmt_call):
- self._insert_dummy_vnf_template()
- vnf_obj = utils.get_dummy_vnf_obj()
- mock_mgmt_call.side_effect = exceptions.MgmtDriverException
- vnf_dict = self.vnfm_plugin.create_vnf(self.context, vnf_obj)
- self.assertEqual(constants.ERROR,
- vnf_dict['status'])
-
- @patch('tacker.db.vnfm.vnfm_db.VNFMPluginDb._create_vnf_post')
- @patch('tacker.db.vnfm.vnfm_db.VNFMPluginDb._create_vnf_pre')
- @patch('tacker.db.vnfm.vnfm_db.VNFMPluginDb._create_vnf_status')
- def test_create_vnf_with_alarm_url(self, mock_create_vnf_status,
- mock_create_vnf_pre,
- mock_create_vnf_post):
- self._insert_dummy_vnf_template()
- vnf_obj = utils.get_dummy_vnf_obj()
- alarm_url_dict = {'vdu_hcpu_usage_scaling_out':
- 'http://localhost/identity',
- 'vdu_lcpu_usage_scaling_in':
- 'http://localhost/identity'}
- self._vnf_alarm_monitor.update_vnf_with_alarm.return_value = \
- alarm_url_dict
- dummy_vnf = self._get_dummy_vnf(utils.vnfd_alarm_scale_tosca_template,
- status='PENDING_CREATE')
- mock_create_vnf_pre.return_value = dummy_vnf
- vnf_dict = self.vnfm_plugin.create_vnf(self.context, vnf_obj)
- self.assertEqual(alarm_url_dict['vdu_lcpu_usage_scaling_in'],
- vnf_dict['attributes']['vdu_lcpu_usage_scaling_in'])
- self.assertEqual(alarm_url_dict['vdu_hcpu_usage_scaling_out'],
- vnf_dict['attributes']['vdu_hcpu_usage_scaling_out'])
-
- @patch('tacker.vnfm.plugin.VNFMPlugin._create_vnf_wait')
- def test_show_vnf_details_vnf_inactive(self, mock_create_vnf_wait):
- self._insert_dummy_vnf_template()
- vnf_obj = utils.get_dummy_vnf_obj()
- result = self.vnfm_plugin.create_vnf(self.context, vnf_obj)
- self.assertRaises(vnfm.VNFInactive, self.vnfm_plugin.get_vnf_resources,
- self.context, result['id'])
-
- @patch('tacker.vnfm.infra_drivers.openstack.openstack.OpenStack.'
- 'get_resource_info')
- def test_show_vnf_details_vnf_active(self, mock_get_resource_info):
- self._insert_dummy_vnf_template()
- active_vnf = self._insert_dummy_vnf()
- mock_get_resource_info.return_value = {'resources': {'name':
- 'dummy_vnf',
- 'type': 'dummy',
- 'id':
- uuidutils.generate_uuid()}}
- resources = self.vnfm_plugin.get_vnf_resources(self.context,
- active_vnf['id'])[0]
- self.assertIn('name', resources)
- self.assertIn('type', resources)
- self.assertIn('id', resources)
-
- def test_delete_vnf(self):
- self._insert_dummy_vnf_template()
- dummy_vnf_obj = self._insert_dummy_vnf()
- self.vnfm_plugin.delete_vnf(self.context, dummy_vnf_obj[
- 'id'])
- self._vnf_monitor.delete_hosting_vnf.assert_called_with(mock.ANY)
- self.delete.assert_called_with(plugin=mock.ANY, context=mock.ANY,
- vnf_id=mock.ANY,
- auth_attr=mock.ANY,
- region_name=mock.ANY)
- self._cos_db_plugin.create_event.assert_called_with(
- self.context, evt_type=constants.RES_EVT_DELETE, res_id=mock.ANY,
- res_state=mock.ANY, res_type=constants.RES_TYPE_VNF,
- tstamp=mock.ANY, details=mock.ANY)
-
- def test_delete_vnf_in_pending_state(self):
- # delete_vnf will raise exception when VNF status in PENDING_*
- self._insert_dummy_vnf_template()
- dummy_vnf_obj = self._insert_dummy_pending_vnf(self.context)
- self.assertRaises(vnfm.VNFDeleteFailed,
- self.vnfm_plugin.delete_vnf,
- self.context,
- dummy_vnf_obj['id'])
-
- @ddt.data('PENDING_DELETE', 'PENDING_CREATE')
- def test_force_delete_vnf(self, status):
- self._insert_dummy_vnf_template()
- dummy_vnf_obj = self._insert_dummy_pending_vnf(self.context, status)
- vnfattr = {'vnf': {'attributes': {'force': True}}}
- self.vnfm_plugin.delete_vnf(self.context, dummy_vnf_obj[
- 'id'], vnf=vnfattr)
- self._vnf_monitor.delete_hosting_vnf.assert_called_with(mock.ANY)
- self.delete.assert_called_with(plugin=mock.ANY, context=mock.ANY,
- vnf_id=mock.ANY,
- auth_attr=mock.ANY,
- region_name=mock.ANY)
- self._cos_db_plugin.create_event.assert_called_with(
- self.context, evt_type=constants.RES_EVT_DELETE, res_id=mock.ANY,
- res_state=mock.ANY, res_type=constants.RES_TYPE_VNF,
- tstamp=mock.ANY, details="VNF Delete Complete")
-
- def test_force_delete_vnf_non_admin(self):
- self._insert_dummy_vnf_template()
- non_admin_context = context.Context(user_id=None,
- tenant_id=None,
- is_admin=False)
- dummy_vnf_obj = self._insert_dummy_pending_vnf(non_admin_context)
- vnfattr = {'vnf': {'attributes': {'force': True}}}
- self.assertRaises(exceptions.AdminRequired,
- self.vnfm_plugin.delete_vnf,
- non_admin_context,
- dummy_vnf_obj['id'], vnf=vnfattr)
-
- @patch('tacker.db.vnfm.vnfm_db.VNFMPluginDb._delete_vnf_post')
- def test_delete_vnf_fail(self, mock_delete_vnf_post):
- self.delete.side_effect = FakeException
- self._insert_dummy_vnf_template()
- dummy_device_obj = self._insert_dummy_vnf()
- self.assertRaises(FakeException,
- self.vnfm_plugin.delete_vnf, self.context,
- dummy_device_obj['id'])
- self._vnf_monitor.delete_hosting_vnf.assert_called_once_with(
- dummy_device_obj['id'])
- mock_delete_vnf_post.assert_called_once_with(self.context, mock.ANY,
- mock.ANY)
- self._cos_db_plugin.create_event.assert_called_with(
- self.context, evt_type=constants.RES_EVT_DELETE, res_id=mock.ANY,
- res_state=mock.ANY, res_type=constants.RES_TYPE_VNF,
- tstamp=mock.ANY, details=mock.ANY)
-
- @patch('tacker.db.vnfm.vnfm_db.VNFMPluginDb.set_vnf_error_status_reason')
- def test_delete_vnf_delete_wait_failed_exception(self,
- mock_set_vnf_error_status_reason):
- self._insert_dummy_vnf_template()
- dummy_vnf_obj = self._insert_dummy_vnf()
- self.delete_wait.side_effect = vnfm.VNFDeleteWaitFailed(
- reason='failed')
- self.vnfm_plugin.delete_vnf(self.context, dummy_vnf_obj['id'])
- mock_set_vnf_error_status_reason.assert_called_once_with(self.context,
- mock.ANY,
- mock.ANY)
-
- def test_delete_vnf_failed_with_status_pending_create(self):
- self._insert_dummy_vnf_template()
- dummy_device_obj_with_pending_create_status = self. \
- _insert_dummy_vnf(status="PENDING_CREATE")
- self.assertRaises(vnfm.VNFInUse, self.vnfm_plugin.delete_vnf,
- self.context,
- dummy_device_obj_with_pending_create_status['id'])
-
- def _insert_dummy_ns_template(self):
- session = self.context.session
- attributes = {
- 'nsd': 'imports: [VNF1, VNF2]\ntopology_template:\n inputs:\n '
- ' vl1_name: {default: net_mgmt, description: name of VL1'
- ' virtuallink, type: string}\n vl2_name: {default: '
- 'net0, description: name of VL2 virtuallink, type: string'
- '}\n node_templates:\n VL1:\n properties:\n '
- ' network_name: {get_input: vl1_name}\n vendor: '
- 'tacker\n type: tosca.nodes.nfv.VL\n VL2:\n '
- 'properties:\n network_name: {get_input: vl2_name}'
- '\n vendor: tacker\n type: tosca.nodes.nfv.VL'
- '\n VNF1:\n requirements:\n - {virtualLink1: '
- 'VL1}\n - {virtualLink2: VL2}\n type: tosca.node'
- 's.nfv.VNF1\n VNF2: {type: tosca.nodes.nfv.VNF2}\ntosca'
- '_definitions_version: tosca_simple_profile_for_nfv_1_0_0'
- '\n'}
- nsd_template = ns_db.NSD(
- id='eb094833-995e-49f0-a047-dfb56aaf7c4e',
- tenant_id='ad7ebc56538745a08ef7c5e97f8bd437',
- name='fake_template',
- vnfds={'tosca.nodes.nfv.VNF1': 'vnf1',
- 'tosca.nodes.nfv.VNF2': 'vnf2'},
- description='fake_nsd_template_description',
- deleted_at=datetime.min,
- template_source='onboarded')
- session.add(nsd_template)
- for (key, value) in attributes.items():
- attribute_db = ns_db.NSDAttribute(
- id=uuidutils.generate_uuid(),
- nsd_id='eb094833-995e-49f0-a047-dfb56aaf7c4e',
- key=key,
- value=value)
- session.add(attribute_db)
- session.flush()
- return nsd_template
-
- def _insert_dummy_ns(self):
- session = self.context.session
- ns = ns_db.NS(
- id='ba6bf017-f6f7-45f1-a280-57b073bf78ea',
- name='dummy_ns',
- tenant_id='ad7ebc56538745a08ef7c5e97f8bd437',
- status='ACTIVE',
- nsd_id='eb094833-995e-49f0-a047-dfb56aaf7c4e',
- vim_id='6261579e-d6f3-49ad-8bc3-a9cb974778ff',
- description='dummy_ns_description',
- vnf_ids='[5761579e-d6f3-49ad-8bc3-a9cb73477846,'
- '6261579e-d6f3-49ad-8bc3-a9cb974778fe]',
- deleted_at=datetime.min)
- session.add(ns)
- session.flush()
- return ns
-
- def test_delete_vnf_of_active_ns(self):
- self._insert_dummy_ns_template()
- self._insert_dummy_ns()
- self.assertRaises(vnfm.VNFInUse, self.vnfm_plugin.delete_vnf,
- self.context, '6261579e-d6f3-49ad-8bc3-a9cb974778fe')
-
- def test_update_vnf(self):
- self._insert_dummy_vnf_template()
- dummy_vnf_obj = self._insert_dummy_vnf()
- vnf_config_obj = utils.get_dummy_vnf_config_obj()
- result = self.vnfm_plugin.update_vnf(self.context, dummy_vnf_obj[
- 'id'], vnf_config_obj)
- self.assertIsNotNone(result)
- self.assertEqual(dummy_vnf_obj['id'], result['id'])
- self.assertIn('instance_id', result)
- self.assertIn('status', result)
- self.assertIn('attributes', result)
- self.assertIn('mgmt_ip_address', result)
- self.assertIn('updated_at', result)
- self._cos_db_plugin.create_event.assert_called_with(
- self.context, evt_type=constants.RES_EVT_UPDATE, res_id=mock.ANY,
- res_state=mock.ANY, res_type=constants.RES_TYPE_VNF,
- tstamp=mock.ANY)
-
- @patch('tacker.db.vnfm.vnfm_db.VNFMPluginDb._update_vnf_post')
- def test_update_vnf_with_exception(self, mock_update_vnf_post):
- self.update.side_effect = FakeException
- self._insert_dummy_vnf_template()
- dummy_device_obj = self._insert_dummy_vnf()
- vnf_config_obj = utils.get_dummy_vnf_config_obj()
- self.assertRaises(FakeException,
- self.vnfm_plugin.update_vnf, self.context,
- dummy_device_obj['id'], vnf_config_obj)
- self._vnf_monitor.delete_hosting_vnf.assert_called_once_with(
- dummy_device_obj['id'])
- mock_update_vnf_post.assert_called_once_with(self.context,
- dummy_device_obj['id'],
- constants.ERROR,
- mock.ANY,
- constants.PENDING_UPDATE,
- constants.RES_EVT_UPDATE)
-
- self._cos_db_plugin.create_event.assert_called_with(
- self.context, evt_type=constants.RES_EVT_UPDATE, res_id=mock.ANY,
- res_state=mock.ANY, res_type=constants.RES_TYPE_VNF,
- tstamp=mock.ANY)
-
- def test_update_vnf_invalid_config_format(self):
- self._insert_dummy_vnf_template()
- dummy_vnf_obj = self._insert_dummy_vnf()
- vnf_config_obj = utils.get_dummy_vnf_config_obj()
- vnf_config_obj['vnf']['attributes']['config'] = {'vdus': {
- 'vdu1': {'config': {'firewall': 'dummy_firewall_values'}}}}
- result = self.vnfm_plugin.update_vnf(self.context, dummy_vnf_obj[
- 'id'], vnf_config_obj)
- self.assertEqual(constants.ACTIVE, result['status'])
-
- @patch('tacker.db.vnfm.vnfm_db.VNFMPluginDb.set_vnf_error_status_reason')
- @mock.patch('tacker.vnfm.plugin.VNFMPlugin.mgmt_call')
- def test_update_vnf_fail_mgmt_driver_error(self, mock_mgmt_call,
- mock_set_vnf_error_status_reason):
- self._insert_dummy_vnf_template()
- dummy_vnf_obj = self._insert_dummy_vnf()
- vnf_config_obj = utils.get_dummy_vnf_config_obj()
- mock_mgmt_call.side_effect = exceptions.MgmtDriverException
- vnf_dict = self.vnfm_plugin.update_vnf(self.context,
- dummy_vnf_obj['id'],
- vnf_config_obj)
- self.assertEqual(constants.ERROR,
- vnf_dict['status'])
- mock_set_vnf_error_status_reason.assert_called_once_with(self.context,
- dummy_vnf_obj['id'],
- 'VNF configuration failed')
-
- @patch('tacker.db.vnfm.vnfm_db.VNFMPluginDb.set_vnf_error_status_reason')
- def test_update_vnf_fail_update_wait_error(self,
- mock_set_vnf_error_status_reason):
- self._insert_dummy_vnf_template()
- dummy_vnf_obj = self._insert_dummy_vnf()
- vnf_config_obj = utils.get_dummy_vnf_config_obj()
- self.update_wait.side_effect = vnfm.VNFUpdateWaitFailed(
- reason='failed')
- self.assertRaises(vnfm.VNFUpdateWaitFailed,
- self.vnfm_plugin.update_vnf, self.context,
- dummy_vnf_obj['id'], vnf_config_obj)
- self._vnf_monitor.\
- delete_hosting_vnf.assert_called_once_with(dummy_vnf_obj['id'])
- mock_set_vnf_error_status_reason.assert_called_once_with(self.context,
- dummy_vnf_obj['id'],
- 'VNF Update failed')
-
- def test_update_vnf_param(self):
- self._insert_dummy_vnf_template()
- dummy_device_obj = self._insert_dummy_vnf()
- vnf_param_obj = utils.get_dummy_vnf_param_obj()
- result = self.vnfm_plugin.update_vnf(self.context,
- dummy_device_obj['id'],
- vnf_param_obj)
- self.assertIsNotNone(result)
- self.assertEqual(dummy_device_obj['id'], result['id'])
- self.assertIn('instance_id', result)
- self.assertIn('status', result)
- self.assertIn('attributes', result)
- self.assertIn('mgmt_ip_address', result)
- self.assertIn('updated_at', result)
- self._cos_db_plugin.create_event.assert_called_with(
- self.context, evt_type=constants.RES_EVT_UPDATE, res_id=mock.ANY,
- res_state=mock.ANY, res_type=constants.RES_TYPE_VNF,
- tstamp=mock.ANY)
-
- def test_update_vnf_invalid_config_type(self):
- self._insert_dummy_vnf_template()
- dummy_device_obj = self._insert_dummy_vnf()
- vnf_param_obj = utils.get_dummy_vnf_invalid_config_type_obj()
- self.assertRaises(vnfm.InvalidAPIAttributeType,
- self.vnfm_plugin.update_vnf,
- self.context,
- dummy_device_obj['id'],
- vnf_param_obj)
-
- def test_update_vnf_invalid_param_type(self):
- self._insert_dummy_vnf_template()
- dummy_device_obj = self._insert_dummy_vnf()
- vnf_param_obj = utils.get_dummy_vnf_invalid_param_type_obj()
- self.assertRaises(vnfm.InvalidAPIAttributeType,
- self.vnfm_plugin.update_vnf,
- self.context,
- dummy_device_obj['id'],
- vnf_param_obj)
-
- def test_update_vnf_invalid_param_content(self):
- self.update.side_effect = vnfm.VNFUpdateInvalidInput(
- reason='failed')
- self._insert_dummy_vnf_template()
- dummy_device_obj = self._insert_dummy_vnf()
- vnf_param_obj = utils.get_dummy_vnf_invalid_param_content()
- self.assertRaises(vnfm.VNFUpdateInvalidInput,
- self.vnfm_plugin.update_vnf,
- self.context,
- dummy_device_obj['id'],
- vnf_param_obj)
-
- def _get_dummy_scaling_policy(self, type):
- vnf_scale = {}
- vnf_scale['scale'] = {}
- vnf_scale['scale']['type'] = type
- vnf_scale['scale']['policy'] = 'SP1'
- return vnf_scale
-
- def _get_scaling_vnf(self, type, invalid_policy_type=False):
- # create vnfd
- self._insert_dummy_vnf_template()
- self._insert_scaling_attributes_vnfd(invalid_policy_type)
-
- # create vnf
- dummy_vnf_obj = self._insert_dummy_vnf()
- self._insert_scaling_attributes_vnf()
-
- # scale vnf
- vnf_scale = self._get_dummy_scaling_policy(type)
- return dummy_vnf_obj, vnf_scale
-
- def _test_scale_vnf(self, type):
- dummy_vnf_obj, vnf_scale = self._get_scaling_vnf(type)
- self.vnfm_plugin.create_vnf_scale(
- self.context,
- dummy_vnf_obj['id'],
- vnf_scale)
- # validate
- self.scale.assert_called_once_with(
- plugin=mock.ANY,
- context=mock.ANY,
- auth_attr=mock.ANY,
- policy=mock.ANY,
- region_name=mock.ANY
- )
- self.scale_wait.assert_called_once_with(plugin=self.vnfm_plugin,
- context=self.context,
- auth_attr=mock.ANY,
- policy=mock.ANY,
- region_name=mock.ANY,
- last_event_id=mock.ANY)
- self._cos_db_plugin.create_event.assert_called_with(
- self.context,
- evt_type=constants.RES_EVT_SCALE,
- res_id='6261579e-d6f3-49ad-8bc3-a9cb974778fe',
- res_state='ACTIVE',
- res_type=constants.RES_TYPE_VNF,
- tstamp=mock.ANY)
-
- def test_scale_vnf_out(self):
- self._test_scale_vnf('out')
-
- def test_scale_vnf_in(self):
- self._test_scale_vnf('in')
-
- @patch('tacker.db.vnfm.vnfm_db.VNFMPluginDb._update_vnf_scaling_status')
- @patch('tacker.db.vnfm.vnfm_db.VNFMPluginDb.set_vnf_error_status_reason')
- def test_scale_vnf_with_vnf_policy_action_exception(self,
- mock_set_vnf_error_status_reason,
- mock_update_vnf_scaling_status):
- dummy_vnf_obj, vnf_scale = self._get_scaling_vnf('in')
- self.scale.side_effect = FakeException
- self.assertRaises(FakeException,
- self.vnfm_plugin.create_vnf_scale,
- self.context, dummy_vnf_obj['id'],
- vnf_scale)
- mock_update_vnf_scaling_status.assert_called_with(
- self.context, mock.ANY, [constants.PENDING_SCALE_IN],
- constants.ERROR, mock.ANY)
- mock_set_vnf_error_status_reason.assert_called_with(
- self.context, dummy_vnf_obj['id'], mock.ANY)
-
- @mock.patch('tacker.vnfm.plugin.VNFMPlugin.get_vnf_policies')
- def test_scale_vnf_with_policy_not_found_exception(self,
- mock_get_vnf_policies):
- dummy_vnf_obj, vnf_scale = self._get_scaling_vnf('in')
- mock_get_vnf_policies.return_value = None
- self.assertRaises(exceptions.VnfPolicyNotFound,
- self.vnfm_plugin.create_vnf_scale,
- self.context, dummy_vnf_obj['id'],
- vnf_scale)
-
- def test_scale_vnf_with_invalid_policy_type(self):
- dummy_vnf_obj, vnf_scale = self._get_scaling_vnf('in',
- invalid_policy_type=True)
- self.assertRaises(exceptions.VnfPolicyTypeInvalid,
- self.vnfm_plugin.create_vnf_scale,
- self.context, dummy_vnf_obj['id'], vnf_scale)
-
- def test_scale_vnf_with_invalid_policy_action(self):
- dummy_vnf_obj, vnf_scale = \
- self._get_scaling_vnf('test_invalid_policy_action')
- self.assertRaises(exceptions.VnfPolicyActionInvalid,
- self.vnfm_plugin.create_vnf_scale,
- self.context, dummy_vnf_obj['id'], vnf_scale)
-
- def test_scale_vnf_scale_wait_failed_exception(self):
- dummy_vnf_obj, vnf_scale = \
- self._get_scaling_vnf('in')
- self.scale_wait.side_effect = vnfm.VNFScaleWaitFailed(
- reason='test')
- self.assertRaises(vnfm.VNFScaleWaitFailed,
- self.vnfm_plugin.create_vnf_scale,
- self.context, dummy_vnf_obj['id'], vnf_scale)
-
- def _get_dummy_vnf(self, vnfd_template, status='ACTIVE'):
- dummy_vnf = utils.get_dummy_vnf()
- dummy_vnf['vnfd']['attributes']['vnfd'] = vnfd_template
- dummy_vnf['status'] = status
- dummy_vnf['instance_id'] = '4c00108e-c69d-4624-842d-389c77311c1d'
- dummy_vnf['vim_id'] = '437ac8ef-a8fb-4b6e-8d8a-a5e86a376e8b'
- return dummy_vnf
-
- def _create_vnf_trigger_data(self, policy_name, action_value):
- vnf_id = "6261579e-d6f3-49ad-8bc3-a9cb974778fe"
- trigger_request = {"trigger": {"action_name": action_value, "params": {
- "credential": "026kll6n", "data": {"current": "alarm",
- 'alarm_id':
- "b7fa9ffd-0a4f-4165-954b-5a8d0672a35f"}},
- "policy_name": policy_name}}
- expected_result = {"action_name": action_value, "params": {
- "credential": "026kll6n", "data": {"current": "alarm",
- "alarm_id": "b7fa9ffd-0a4f-4165-954b-5a8d0672a35f"}},
- "policy_name": policy_name}
- return vnf_id, trigger_request, expected_result
-
- @patch('tacker.vnfm.policy_actions.autoscaling.autoscaling.'
- 'VNFActionAutoscaling.execute_action')
- def _test_create_vnf_trigger(self, mock_execute_action,
- policy_name, action_value):
- vnf_id, trigger_request, expected_result = self.\
- _create_vnf_trigger_data(policy_name, action_value)
- self._vnf_alarm_monitor.process_alarm_for_vnf.return_value = True
- trigger_result = self.vnfm_plugin.create_vnf_trigger(self.context,
- vnf_id, trigger_request)
- self.assertEqual(expected_result, trigger_result)
-
- @patch('tacker.db.vnfm.vnfm_db.VNFMPluginDb.get_vnf')
- def test_create_vnf_trigger_respawn(self, mock_get_vnf):
- dummy_vnf = self._get_dummy_vnf(
- utils.vnfd_alarm_respawn_tosca_template)
- mock_get_vnf.return_value = dummy_vnf
- self._test_create_vnf_trigger(policy_name="vdu_hcpu_usage_respawning",
- action_value="respawn")
-
- @patch('tacker.db.vnfm.vnfm_db.VNFMPluginDb.get_vnf')
- def test_create_vnf_trigger_scale(self, mock_get_vnf):
- dummy_vnf = self._get_dummy_vnf(
- utils.vnfd_alarm_scale_tosca_template)
- mock_get_vnf.return_value = dummy_vnf
- self._test_create_vnf_trigger(policy_name="vdu_hcpu_usage_scaling_out",
- action_value="SP1-out")
-
- @patch('tacker.db.vnfm.vnfm_db.VNFMPluginDb.get_vnf')
- def test_create_vnf_trigger_multi_actions(self, mock_get_vnf):
- dummy_vnf = self._get_dummy_vnf(
- utils.vnfd_alarm_multi_actions_tosca_template)
- mock_get_vnf.return_value = dummy_vnf
- self._test_create_vnf_trigger(policy_name="mon_policy_multi_actions",
- action_value="respawn&log")
-
- @patch('tacker.db.vnfm.vnfm_db.VNFMPluginDb.get_vnf')
- def test_create_vnf_trigger_without_policy_actions(self, mock_get_vnf):
- dummy_vnf = self._get_dummy_vnf(
- utils.vnfd_alarm_multi_actions_tosca_template)
- mock_get_vnf.return_value = dummy_vnf
- vnf_id, trigger_request, _ = self._create_vnf_trigger_data(
- "mon_policy_multi_actions", "respawn&log")
- self._vnf_alarm_monitor.process_alarm_for_vnf.return_value = False
- self.assertRaises(exceptions.AlarmUrlInvalid,
- self.vnfm_plugin.create_vnf_trigger,
- self.context,
- vnf_id, trigger_request)
-
- @patch('tacker.db.vnfm.vnfm_db.VNFMPluginDb.get_vnf')
- def test_create_vnf_trigger_with_invalid_policy_name(self, mock_get_vnf):
- dummy_vnf = self._get_dummy_vnf(
- utils.vnfd_alarm_multi_actions_tosca_template)
- mock_get_vnf.return_value = dummy_vnf
- vnf_id, trigger_request, _ = self._create_vnf_trigger_data(
- "invalid_policy_name", "respawn&log")
- self.assertRaises(exceptions.TriggerNotFound,
- self.vnfm_plugin.create_vnf_trigger,
- self.context,
- vnf_id, trigger_request)
-
- @patch('tacker.db.vnfm.vnfm_db.VNFMPluginDb.get_vnf')
- @patch('tacker.vnfm.plugin.LOG')
- def test_create_vnf_trigger_scale_with_invalid_vnf_status(self,
- mock_log, mock_get_vnf):
- dummy_vnf = self._get_dummy_vnf(utils.vnfd_alarm_scale_tosca_template)
- dummy_vnf['status'] = "PENDING_CREATE"
- mock_get_vnf.return_value = dummy_vnf
- vnf_id, trigger_request, expected_result = self. \
- _create_vnf_trigger_data("vdu_hcpu_usage_scaling_out", "SP1-out")
- expected_error_msg = (_("Scaling Policy action skipped due to status"
- ' %(status)s for vnf %(vnfid)s') %
- {'status': dummy_vnf['status'],
- 'vnfid': dummy_vnf['id']})
- self._vnf_alarm_monitor.process_alarm_for_vnf.return_value = True
- trigger_result = self.vnfm_plugin.create_vnf_trigger(self.context,
- vnf_id, trigger_request)
-
- mock_log.info.assert_called_with(expected_error_msg)
- self.assertEqual(expected_result, trigger_result)
-
- @patch('tacker.db.vnfm.vnfm_db.VNFMPluginDb.get_vnf')
- def test_get_vnf_policies(self, mock_get_vnf):
- vnf_id = "6261579e-d6f3-49ad-8bc3-a9cb974778fe"
- dummy_vnf = self._get_dummy_vnf(
- utils.vnfd_alarm_respawn_tosca_template)
- mock_get_vnf.return_value = dummy_vnf
- policies = self.vnfm_plugin.get_vnf_policies(self.context, vnf_id,
- filters={'name': 'vdu1_cpu_usage_monitoring_policy'})
- self.assertEqual(1, len(policies))
-
- @mock.patch('tacker.vnfm.plugin.toscautils.get_mgmt_driver')
- def test_mgmt_driver(self, mock_get_mgmt_driver):
- mock_get_mgmt_driver.return_value = 'dummy_mgmt_driver'
-
- vnfd_obj = utils.get_dummy_vnfd_obj()
- self.assertRaises(vnfm.InvalidMgmtDriver,
- self.vnfm_plugin.create_vnfd,
- self.context, vnfd_obj)
-
- @mock.patch('tacker.vnfm.plugin.VNFMPlugin.get_vnf_policies')
- def test_get_vnf_policy_by_type(self, mock_get_vnf_policies):
- mock_get_vnf_policies.return_value = None
-
- self.assertRaises(exceptions.VnfPolicyTypeInvalid,
- self.vnfm_plugin.get_vnf_policy_by_type,
- self.context,
- uuidutils.generate_uuid(),
- policy_type='invalid_policy_type')
-
- @patch('tacker.vnfm.infra_drivers.openstack.openstack.OpenStack.'
- 'heal_vdu')
- @mock.patch('tacker.vnfm.monitor.VNFMonitor.update_hosting_vnf')
- def test_heal_vnf_vdu(self, mock_update_hosting_vnf, mock_heal_vdu):
- self._insert_dummy_vnf_template()
- dummy_device_obj = self._insert_dummy_vnf()
- additional_params_obj = heal_vnf_request.HealVnfAdditionalParams(
- parameter='VDU1',
- cause=["Unable to reach while monitoring resource: 'VDU1'"])
- heal_request_data_obj = heal_vnf_request.HealVnfRequest(
- stack_id=dummy_device_obj['instance_id'],
- cause='VNF monitoring fails.',
- legacy_additional_params=[additional_params_obj])
- result = self.vnfm_plugin.heal_vnf(self.context,
- dummy_device_obj['id'],
- heal_request_data_obj)
- self.assertIsNotNone(result)
- self.assertEqual(dummy_device_obj['id'], result['id'])
- self.assertIn('instance_id', result)
- self.assertIn('status', result)
- self.assertIn('attributes', result)
- self.assertIn('mgmt_ip_address', result)
- self.assertIn('updated_at', result)
- self.assertEqual('ACTIVE', result['status'])
- mock_heal_vdu.assert_called_with(plugin=self.vnfm_plugin,
- context=self.context, vnf_dict=mock.ANY,
- heal_request_data_obj=heal_request_data_obj)
-
- @patch('tacker.db.vnfm.vnfm_db.VNFMPluginDb.get_vnf')
- def test_create_vnf_trigger_scale_with_reservation(self, mock_get_vnf):
- dummy_vnf = self._get_dummy_vnf(
- utils.vnfd_instance_reservation_alarm_scale_tosca_template)
- mock_get_vnf.return_value = dummy_vnf
- self._test_create_vnf_trigger(policy_name="start_actions",
- action_value="SP_RSV-out")
-
- def test_create_placement_constraint(self):
- res_str = '[{"id_type": "RES_MGMT", "resource_id": ' + \
- '"2c6e5cc7-240d-4458-a683-1fe648351200", ' + \
- '"vim_connection_id": ' + \
- '"2a63bee3-0c43-4568-bcfa-b0cb733e064c"}]'
- placemnt = models.PlacementConstraint(
- id='c2947d8a-2c67-4e8f-ad6f-c0889b351c17',
- vnf_instance_id='7ddc38c3-a116-48b0-bfc1-68d7f306f467',
- affinity_or_anti_affinity='ANTI_AFFINITY',
- scope='ZONE',
- server_group_name='my_compute_placement_policy',
- resource=res_str,
- deleted_at=datetime.min)
- pls_list = [placemnt]
- vnf_inst = models.VnfInstance(
- id='7ddc38c3-a116-48b0-bfc1-68d7f306f467',
- vnf_provider=' ',
- vnf_product_name=' ',
- vnf_software_version=' ',
- vnfd_version=' ',
- vnfd_id='8d86480e-d4e6-4ee0-ba4d-08217118d6cb',
- instantiation_state=' ',
- tenant_id='9b3f0518-bf6b-4982-af32-d282ce577c8f',
- created_at=datetime(
- 2020, 1, 1, 1, 1, 1,
- tzinfo=iso8601.UTC),
- vnf_pkg_id=uuidutils.generate_uuid())
- self.context.session.add(vnf_inst)
- self.context.session.flush()
-
- self.vnfm_plugin.create_placement_constraint(
- self.context, pls_list)
-
def test_get_placement_constraint(self):
res_str = '[{"id_type": "RES_MGMT", "resource_id": ' + \
'"2c6e5cc7-240d-4458-a683-1fe648351200", ' + \
@@ -1334,22 +342,6 @@ class TestVNFMPlugin(db_base.SqlTestCase):
self.vnfm_plugin.delete_placement_constraint(
self.context, '7ddc38c3-a116-48b0-bfc1-68d7f306f467')
- def test_update_vnf_rollback_pre_scale(self):
- vnf_info = {}
- vnf_lcm_op_occ = vnflcm_fakes.vnflcm_rollback()
- vnf_info['vnf_lcm_op_occ'] = vnf_lcm_op_occ
- vnf_info['id'] = uuidutils.generate_uuid()
- self.vnfm_plugin._update_vnf_rollback_pre(
- self.context, vnf_info)
-
- def test_update_vnf_rollback_pre_insta(self):
- vnf_info = {}
- vnf_lcm_op_occ = vnflcm_fakes.vnflcm_rollback_insta()
- vnf_info['vnf_lcm_op_occ'] = vnf_lcm_op_occ
- vnf_info['id'] = uuidutils.generate_uuid()
- self.vnfm_plugin._update_vnf_rollback_pre(
- self.context, vnf_info)
-
def test_update_vnf_rollback_scale(self):
vnf_info = {}
vnf_lcm_op_occ = vnflcm_fakes.vnflcm_rollback()
@@ -1367,19 +359,3 @@ class TestVNFMPlugin(db_base.SqlTestCase):
self.vnfm_plugin._update_vnf_rollback(
self.context, vnf_info,
'ERROR', 'INACTIVE')
-
- def test_update_vnf_rollback_status_err_scale(self):
- vnf_info = {}
- vnf_lcm_op_occ = vnflcm_fakes.vnflcm_rollback()
- vnf_info['vnf_lcm_op_occ'] = vnf_lcm_op_occ
- vnf_info['id'] = uuidutils.generate_uuid()
- self.vnfm_plugin.update_vnf_rollback_status_err(
- self.context, vnf_info)
-
- def test_update_vnf_rollback_status_err_insta(self):
- vnf_info = {}
- vnf_lcm_op_occ = vnflcm_fakes.vnflcm_rollback_insta()
- vnf_info['vnf_lcm_op_occ'] = vnf_lcm_op_occ
- vnf_info['id'] = uuidutils.generate_uuid()
- self.vnfm_plugin.update_vnf_rollback_status_err(
- self.context, vnf_info)
diff --git a/tacker/tests/unit/vnfm/tosca/__init__.py b/tacker/tests/unit/vnfm/tosca/__init__.py
deleted file mode 100644
index e69de29bb..000000000
diff --git a/tacker/tests/unit/vnfm/tosca/test_utils.py b/tacker/tests/unit/vnfm/tosca/test_utils.py
deleted file mode 100644
index 96da5c9a9..000000000
--- a/tacker/tests/unit/vnfm/tosca/test_utils.py
+++ /dev/null
@@ -1,332 +0,0 @@
-# Copyright 2016 - Nokia
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import codecs
-import os
-
-import testtools
-import yaml
-
-from tacker.extensions import vnfm
-from tacker.tests import utils
-from tacker.tosca import utils as toscautils
-from toscaparser import tosca_template
-from translator.hot import tosca_translator
-
-
-def _get_template_abs(path):
- with codecs.open(path, encoding='utf-8', errors='strict') as f:
- return f.read()
-
-
-def _get_template(name):
- filename = os.path.join(
- os.path.dirname(os.path.abspath(__file__)),
- "../infra_drivers/openstack/data/", name)
- return _get_template_abs(filename)
-
-
-class TestToscaUtils(testtools.TestCase):
- tosca_openwrt = _get_template('test_tosca_openwrt.yaml')
- vnfd_dict = yaml.safe_load(tosca_openwrt)
- toscautils.updateimports(vnfd_dict)
-
- def setUp(self):
- super(TestToscaUtils, self).setUp()
- self.tosca = tosca_template.ToscaTemplate(
- parsed_params={}, a_file=False, yaml_dict_tpl=self.vnfd_dict,
- local_defs=toscautils.tosca_tmpl_local_defs())
- self.tosca_flavor = _get_template('test_tosca_flavor.yaml')
-
- def test_updateimport(self):
- importspath = os.path.abspath('./tacker/tosca/lib/')
- file1 = importspath + '/tacker_defs.yaml'
- file2 = importspath + '/tacker_nfv_defs.yaml'
- expected_imports = [file1, file2]
- self.assertEqual(expected_imports, self.vnfd_dict['imports'])
-
- def test_get_mgmt_driver(self):
- expected_mgmt_driver = 'openwrt'
- mgmt_driver = toscautils.get_mgmt_driver(self.tosca)
- self.assertEqual(expected_mgmt_driver, mgmt_driver)
-
- def test_get_vdu_monitoring(self):
- expected_monitoring = {'vdus': {'VDU1': {'ping': {
- 'actions':
- {'failure': 'respawn'},
- 'name': 'ping',
- 'parameters': {'count': 3,
- 'interval': 10},
- 'monitoring_params': {'count': 3,
- 'interval': 10}}}}}
- monitoring = toscautils.get_vdu_monitoring(self.tosca)
- self.assertEqual(expected_monitoring, monitoring)
-
- def test_get_mgmt_ports(self):
- expected_mgmt_ports = {'mgmt_ip-VDU1': 'CP1'}
- mgmt_ports = toscautils.get_mgmt_ports(self.tosca)
- self.assertEqual(expected_mgmt_ports, mgmt_ports)
-
- def test_post_process_template(self):
- tosca_post_process_tpl = _get_template(
- 'test_tosca_post_process_template.yaml')
- vnfd_dict = yaml.safe_load(tosca_post_process_tpl)
- toscautils.updateimports(vnfd_dict)
- tosca = tosca_template.ToscaTemplate(
- parsed_params={}, a_file=False, yaml_dict_tpl=vnfd_dict,
- local_defs=toscautils.tosca_tmpl_local_defs())
- toscautils.post_process_template(tosca)
-
- invalidNodes = 0
- deletedProperties = 0
- convertedValues = 0
- convertedProperties = 0
-
- for nt in tosca.nodetemplates:
- if (nt.type_definition.is_derived_from(toscautils.MONITORING) or
- nt.type_definition.is_derived_from(toscautils.FAILURE) or
- nt.type_definition.is_derived_from(toscautils.PLACEMENT)):
- invalidNodes += 1
-
- if nt.type in toscautils.delpropmap:
- for prop in toscautils.delpropmap[nt.type]:
- for p in nt.get_properties_objects():
- if prop == p.name:
- deletedProperties += 1
-
- if nt.type in toscautils.convert_prop_values:
- for prop in toscautils.convert_prop_values[nt.type]:
- convertmap = toscautils.convert_prop_values[nt.type][prop]
- for p in nt.get_properties_objects():
- if (prop == p.name and
- p.value in convertmap):
- convertedValues += 1
-
- if nt.type in toscautils.convert_prop:
- for prop in toscautils.convert_prop[nt.type]:
- for p in nt.get_properties_objects():
- if prop == p.name:
- convertedProperties += 1
-
- if nt.name == 'VDU1':
- vdu1_hints = nt.get_properties().get('scheduler_hints')
- vdu1_rsv = vdu1_hints.value.get('reservation')
-
- self.assertEqual(0, invalidNodes)
- self.assertEqual(0, deletedProperties)
- self.assertEqual(0, convertedValues)
- self.assertEqual(0, convertedProperties)
- self.assertEqual(vdu1_rsv, '459e94c9-efcd-4320-abf5-8c18cd82c331')
-
- def test_post_process_heat_template(self):
- tosca1 = tosca_template.ToscaTemplate(parsed_params={}, a_file=False,
- yaml_dict_tpl=self.vnfd_dict,
- local_defs=toscautils.tosca_tmpl_local_defs())
- toscautils.post_process_template(tosca1)
- translator = tosca_translator.TOSCATranslator(tosca1, {})
- heat_template_yaml = translator.translate()
- expected_heat_tpl = _get_template('hot_tosca_openwrt.yaml')
- mgmt_ports = toscautils.get_mgmt_ports(self.tosca)
- heat_tpl = toscautils.post_process_heat_template(
- heat_template_yaml, mgmt_ports, {}, {}, {})
-
- heatdict = yaml.safe_load(heat_tpl)
- expecteddict = yaml.safe_load(expected_heat_tpl)
- self.assertEqual(expecteddict, heatdict)
-
- def test_findvdus(self):
- vdus = toscautils.findvdus(self.tosca)
-
- self.assertEqual(1, len(vdus))
-
- for vdu in vdus:
- self.assertEqual(True, vdu.type_definition.is_derived_from(
- toscautils.TACKERVDU))
-
- def test_get_flavor_dict(self):
- vnfd_dict = yaml.safe_load(self.tosca_flavor)
- toscautils.updateimports(vnfd_dict)
- tosca = tosca_template.ToscaTemplate(
- a_file=False, yaml_dict_tpl=vnfd_dict,
- local_defs=toscautils.tosca_tmpl_local_defs())
- expected_flavor_dict = {
- "VDU1": {
- "vcpus": 2,
- "disk": 10,
- "ram": 512
- }
- }
- actual_flavor_dict = toscautils.get_flavor_dict(tosca)
- self.assertEqual(expected_flavor_dict, actual_flavor_dict)
-
- def test_add_resources_tpl_for_flavor(self):
- dummy_heat_dict = yaml.safe_load(_get_template(
- 'hot_flavor_and_capabilities.yaml'))
- expected_dict = yaml.safe_load(_get_template('hot_flavor.yaml'))
- dummy_heat_res = {
- "flavor": {
- "VDU1": {
- "vcpus": 2,
- "ram": 512,
- "disk": 10
- }
- }
- }
- toscautils.add_resources_tpl(dummy_heat_dict, dummy_heat_res)
- self.assertEqual(expected_dict, dummy_heat_dict)
-
- def test_get_flavor_dict_extra_specs_all_numa_count(self):
- tosca_fes_all_numa_count = _get_template(
- 'tosca_flavor_all_numa_count.yaml')
- vnfd_dict = yaml.safe_load(tosca_fes_all_numa_count)
- toscautils.updateimports(vnfd_dict)
- tosca = tosca_template.ToscaTemplate(
- a_file=False, yaml_dict_tpl=vnfd_dict,
- local_defs=toscautils.tosca_tmpl_local_defs())
- expected_flavor_dict = {
- "VDU1": {
- "vcpus": 8,
- "disk": 10,
- "ram": 4096,
- "extra_specs": {
- 'hw:cpu_policy': 'dedicated', 'hw:mem_page_size': 'any',
- 'hw:cpu_sockets': 2, 'hw:cpu_threads': 2,
- 'hw:numa_nodes': 2, 'hw:cpu_cores': 2,
- 'hw:cpu_threads_policy': 'avoid'
- }
- }
- }
- actual_flavor_dict = toscautils.get_flavor_dict(tosca)
- self.assertEqual(expected_flavor_dict, actual_flavor_dict)
-
- def test_get_flavor_dict_with_wrong_cpu(self):
- tosca_fes = _get_template(
- 'tosca_flavor_with_wrong_cpu.yaml')
- vnfd_dict = yaml.safe_load(tosca_fes)
- toscautils.updateimports(vnfd_dict)
- tosca = tosca_template.ToscaTemplate(
- a_file=False, yaml_dict_tpl=vnfd_dict,
- local_defs=toscautils.tosca_tmpl_local_defs())
-
- self.assertRaises(vnfm.CpuAllocationInvalidValues,
- toscautils.get_flavor_dict,
- tosca)
-
- def test_tacker_conf_heat_extra_specs_all_numa_count(self):
- tosca_fes_all_numa_count = _get_template(
- 'tosca_flavor_all_numa_count.yaml')
- vnfd_dict = yaml.safe_load(tosca_fes_all_numa_count)
- toscautils.updateimports(vnfd_dict)
- tosca = tosca_template.ToscaTemplate(
- a_file=False, yaml_dict_tpl=vnfd_dict,
- local_defs=toscautils.tosca_tmpl_local_defs())
- expected_flavor_dict = {
- "VDU1": {
- "vcpus": 8,
- "disk": 10,
- "ram": 4096,
- "extra_specs": {
- 'hw:cpu_policy': 'dedicated', 'hw:mem_page_size': 'any',
- 'hw:cpu_sockets': 2, 'hw:cpu_threads': 2,
- 'hw:numa_nodes': 2, 'hw:cpu_cores': 2,
- 'hw:cpu_threads_policy': 'avoid',
- 'aggregate_instance_extra_specs:nfv': 'true'
- }
- }
- }
- actual_flavor_dict = toscautils.get_flavor_dict(
- tosca, {"aggregate_instance_extra_specs:nfv": "true"})
- self.assertEqual(expected_flavor_dict, actual_flavor_dict)
-
- def test_add_resources_tpl_for_image(self):
- dummy_heat_dict = yaml.safe_load(_get_template(
- 'hot_image_before_processed_image.yaml'))
- expected_dict = yaml.safe_load(_get_template(
- 'hot_image_after_processed_image.yaml'))
- dummy_heat_res = {
- "image": {
- "VDU1": {
- "location": "http://URL/v1/openwrt.qcow2",
- "container_format": "bare",
- "disk_format": "raw"
- }
- }
- }
- toscautils.add_resources_tpl(dummy_heat_dict, dummy_heat_res)
- self.assertEqual(expected_dict, dummy_heat_dict)
-
- def test_convert_unsupported_res_prop_kilo_ver(self):
- unsupported_res_prop_dict = {'OS::Neutron::Port': {
- 'port_security_enabled': 'value_specs', }, }
- dummy_heat_dict = yaml.safe_load(_get_template(
- 'hot_tosca_openwrt.yaml'))
- expected_heat_dict = yaml.safe_load(_get_template(
- 'hot_tosca_openwrt_kilo.yaml'))
- toscautils.convert_unsupported_res_prop(dummy_heat_dict,
- unsupported_res_prop_dict)
- self.assertEqual(expected_heat_dict, dummy_heat_dict)
-
- def test_check_for_substitution_mappings(self):
- tosca_sb_map = _get_template_abs(
- utils.test_etc_sample('test-nsd-vnfd1.yaml'))
- param = {'substitution_mappings': {
- 'VL2': {'type': 'tosca.nodes.nfv.VL', 'properties': {
- 'network_name': 'net0', 'vendor': 'tacker'}},
- 'VL1': {'type': 'tosca.nodes.nfv.VL', 'properties': {
- 'network_name': 'net_mgmt', 'vendor': 'tacker'}},
- 'requirements': {'virtualLink2': 'VL2',
- 'virtualLink1': 'VL1'}}}
- template = yaml.safe_load(tosca_sb_map)
- toscautils.updateimports(template)
- toscautils.check_for_substitution_mappings(template, param)
- self.assertNotIn('substitution_mappings', param)
-
- def test_get_block_storage_details(self):
- tosca_vol = _get_template('tosca_block_storage.yaml')
- vnfd_dict = yaml.safe_load(tosca_vol)
- expected_dict = {
- 'volumes': {
- 'VB1': {
- 'image': 'cirros-0.5.2-x86_64-disk',
- 'size': '1'
- }
- },
- 'volume_attachments': {
- 'CB1': {
- 'instance_uuid': {'get_resource': 'VDU1'},
- 'mountpoint': '/dev/vdb',
- 'volume_id': {'get_resource': 'VB1'}}
- }
- }
- volume_details = toscautils.get_block_storage_details(vnfd_dict)
- self.assertEqual(expected_dict, volume_details)
-
- def test_get_block_storage_details_volume_id(self):
- tosca_vol = _get_template(
- 'test-tosca-vnfd-existing-block-storage.yaml')
- vnfd_dict = yaml.safe_load(tosca_vol)
- expected_dict = {
- 'volumes': {
- 'VB1': {
- 'volume_id': 'my_vol'
- }
- },
- 'volume_attachments': {
- 'CB1': {
- 'instance_uuid': {'get_resource': 'VDU1'},
- 'mountpoint': '/dev/vdb',
- 'volume_id': {'get_param': 'my_vol'}}
- }
- }
- volume_details = toscautils.get_block_storage_details(vnfd_dict)
- self.assertEqual(expected_dict, volume_details)
diff --git a/tacker/tosca/lib/tacker_defs.yaml b/tacker/tosca/lib/tacker_defs.yaml
deleted file mode 100644
index d1a94fac6..000000000
--- a/tacker/tosca/lib/tacker_defs.yaml
+++ /dev/null
@@ -1,382 +0,0 @@
-data_types:
- tosca.datatypes.tacker.ActionMap:
- properties:
- trigger:
- type: string
- required: true
- action:
- type: string
- required: true
- params:
- type: map
- entry_schema:
- type: string
- required: false
-
- tosca.datatypes.tacker.AppActionMap:
- properties:
- condition:
- type: map
- entry_schema:
- type: string
- required: false
- actionname:
- type: string
- required: false
- cmd-action:
- type: string
- required: false
-
- tosca.datatypes.tacker.AppInfoParams:
- properties:
- app_name:
- type: string
- required: true
- app_port:
- type: string
- required: true
- ssh_username:
- type: string
- required: false
- ssh_password:
- type: string
- required: false
- app_status:
- type: tosca.dataypes.tacker.AppActionMap
- required: false
- app_memory:
- type: tosca.dataypes.tacker.AppActionMap
- required: false
-
- tosca.datatypes.tacker.OSInfoParams:
- properties:
- os_agent_info:
- type: tosca.dataypes.tacker.AppActionMap
- required: false
- os_proc_value:
- type: tosca.datatypes.tacker.AppActionMap
- required: false
- os_cpu_load:
- type: tosca.datatypes.tacker.AppActionMap
- required: false
- os_cpu_usage:
- type: tosca.datatypes.tacker.AppActionMap
- required: false
-
-
- tosca.datatypes.tacker.AppMonitoringParams:
- properties:
- application:
- type: tosca.datatypes.tacker.AppInfoParams
- required: false
- OS:
- type: tosca.datatypes.tacker.OSInfoParams
- required: false
-
- tosca.datatypes.tacker.AppMonitoringType:
- properties:
- name:
- type: string
- required: true
- zabbix_username:
- type: string
- required: true
- zabbix_password:
- type: string
- required: true
- zabbix_server_ip:
- type: string
- required: true
- zabbix_server_port:
- type: int
- required: true
- parameters:
- type: tosca.datatypes.tacker.AppMonitoringParams
- required: false
-
-
-
- tosca.datatypes.tacker.MonitoringParams:
- properties:
- monitoring_delay:
- type: int
- required: false
- count:
- type: int
- required: false
- interval:
- type: int
- required: false
- timeout:
- type: int
- required: false
- retry:
- type: int
- required: false
- port:
- type: int
- required: false
-
- tosca.datatypes.tacker.MonitoringType:
- properties:
- name:
- type: string
- required: true
- actions:
- type: map
- required: true
- parameters:
- type: tosca.datatypes.tacker.MonitoringParams
- required: false
-
-
-
- tosca.datatypes.compute_properties:
- properties:
- num_cpus:
- type: integer
- required: true
- mem_size:
- type: string
- required: true
- disk_size:
- type: string
- required: false
- mem_page_size:
- type: string
- required: false
- numa_node_count:
- type: integer
- constraints:
- - greater_or_equal: 1
- required: false
- numa_nodes:
- type: map
- required: false
- cpu_allocation:
- type: map
- required: false
-
- tosca.datatypes.tacker.VirtualIP:
- properties:
- ip_address:
- type: string
- required: true
- description: The virtual IP address allowed to be paired with.
- mac_address:
- type: string
- required: false
- description: The mac address allowed to be paired with specific virtual IP.
-
-
- tosca.datatypes.nfv.VnfcConfigurableProperties:
- properties:
- num_cpus:
- type: float
- required: false
- mem_size:
- type: string
- required: false
- image:
- type: string
- required: false
- command:
- type: list
- entry_schema:
- type: string
- required: false
- args:
- type: list
- entry_schema:
- type: string
- required: false
- ports:
- type: list
- entry_schema:
- type: string
- required: false
- config:
- type: string
- required: false
-
- tosca.datatypes.tacker.VduReservationMetadata:
- properties:
- resource_type:
- # TODO(niraj-singh): Need to add constraints
- # ``valid_values: [ physical_host, virtual_instance ]``
- # once Bug #1815755 is fixed.
- type: string
- required: true
- default: virtual_instance
- id:
- type: string
- required: true
-
-policy_types:
- tosca.policies.tacker.Placement:
- derived_from: tosca.policies.Placement
- description: Defines policy for placement of VDU's.
- properties:
- policy:
- type: string
- required: false
- constraints:
- - valid_values: [ affinity, anti-affinity ]
- default: affinity
- description: Placement policy for target VDU's.
- strict:
- type: boolean
- required: false
- default: true
- description: If the policy is not mandatory, set this flag to 'false'. Setting this flag to 'false' allows the VDU deployment request to continue even if the nova-scheduler fails to assign compute hosts under the policy.
-
- tosca.policies.tacker.Failure:
- derived_from: tosca.policies.Root
- action:
- type: string
-
- tosca.policies.tacker.Failure.Respawn:
- derived_from: tosca.policies.tacker.Failure
- action: respawn
-
- tosca.policies.tacker.Failure.Terminate:
- derived_from: tosca.policies.tacker.Failure
- action: log_and_kill
-
- tosca.policies.tacker.Failure.Log:
- derived_from: tosca.policies.tacker.Failure
- action: log
-
- tosca.policies.tacker.Monitoring:
- derived_from: tosca.policies.Root
- properties:
- name:
- type: string
- required: true
- parameters:
- type: map
- entry_schema:
- type: string
- required: false
- actions:
- type: map
- entry_schema:
- type: string
- required: true
-
- tosca.policies.tacker.AppMonitoring:
- derived_from: tosca.policies.Root
- properties:
- name:
- type: string
- required: true
- parameters:
- type: map
- entry_schema:
- type: string
- required: false
-
-
-
- tosca.policies.tacker.Monitoring.NoOp:
- derived_from: tosca.policies.tacker.Monitoring
- properties:
- name: noop
-
- tosca.policies.tacker.Monitoring.Ping:
- derived_from: tosca.policies.tacker.Monitoring
- properties:
- name: ping
-
- tosca.policies.tacker.Monitoring.HttpPing:
- derived_from: tosca.policies.tacker.Monitoring.Ping
- properties:
- name: http-ping
-
- tosca.policies.tacker.Monitoring.Zabbix:
- derived_from: tosca.policies.tacker.Appmonitoring
- properties:
- name: zabbix
-
-
- tosca.policies.tacker.Alarming:
- derived_from: tosca.policies.Monitoring
- triggers:
- resize_compute:
- event_type:
- type: map
- entry_schema:
- type: string
- required: true
- metrics:
- type: string
- required: true
- condition:
- type: map
- entry_schema:
- type: string
- required: false
- action:
- type: map
- entry_schema:
- type: string
- required: true
-
- tosca.policies.tacker.Scaling:
- derived_from: tosca.policies.Scaling
- description: Defines policy for scaling the given targets.
- properties:
- increment:
- type: integer
- required: true
- description: Number of nodes to add or remove during the scale out/in.
- targets:
- type: list
- entry_schema:
- type: string
- required: true
- description: List of Scaling nodes.
- target_cpu_utilization_percentage:
- type: integer
- required: false
- description: The target average CPU utilization over all the pods which is used in Kubernetes environment
- min_instances:
- type: integer
- required: true
- description: Minimum number of instances to scale in.
- max_instances:
- type: integer
- required: true
- description: Maximum number of instances to scale out.
- default_instances:
- type: integer
- required: true
- description: Initial number of instances.
- cooldown:
- type: integer
- required: false
- default: 120
- description: Wait time (in seconds) between consecutive scaling operations. During the cooldown period, scaling action will be ignored
-
- tosca.policies.tacker.Reservation:
- derived_from: tosca.policies.Reservation
- reservation:
- start_actions:
- type: list
- entry_schema:
- type: string
- required: true
- before_end_actions:
- type: list
- entry_schema:
- type: string
- required: true
- end_actions:
- type: list
- entry_schema:
- type: string
- required: true
- properties:
- lease_id:
- type: string
- required: true
diff --git a/tacker/tosca/lib/tacker_nfv_defs.yaml b/tacker/tosca/lib/tacker_nfv_defs.yaml
deleted file mode 100644
index aefbdfe37..000000000
--- a/tacker/tosca/lib/tacker_nfv_defs.yaml
+++ /dev/null
@@ -1,395 +0,0 @@
-data_types:
- tosca.nfv.datatypes.pathType:
- properties:
- forwarder:
- type: string
- required: true
- capability:
- type: string
- required: true
- sfc_encap:
- type: boolean
- required: false
- default: true
-
- tosca.nfv.datatypes.aclType:
- properties:
- eth_type:
- type: string
- required: false
- eth_src:
- type: string
- required: false
- eth_dst:
- type: string
- required: false
- vlan_id:
- type: integer
- constraints:
- - in_range: [ 1, 4094 ]
- required: false
- vlan_pcp:
- type: integer
- constraints:
- - in_range: [ 0, 7 ]
- required: false
- mpls_label:
- type: integer
- constraints:
- - in_range: [ 16, 1048575]
- required: false
- mpls_tc:
- type: integer
- constraints:
- - in_range: [ 0, 7 ]
- required: false
- ip_dscp:
- type: integer
- constraints:
- - in_range: [ 0, 63 ]
- required: false
- ip_ecn:
- type: integer
- constraints:
- - in_range: [ 0, 3 ]
- required: false
- ip_src_prefix:
- type: string
- required: false
- ip_dst_prefix:
- type: string
- required: false
- ip_proto:
- type: integer
- constraints:
- - in_range: [ 1, 254 ]
- required: false
- destination_port_range:
- type: string
- required: false
- source_port_range:
- type: string
- required: false
- network_src_port_id:
- type: string
- required: false
- network_dst_port_id:
- type: string
- required: false
- network_id:
- type: string
- required: false
- network_name:
- type: string
- required: false
- tenant_id:
- type: string
- required: false
- icmpv4_type:
- type: integer
- constraints:
- - in_range: [ 0, 254 ]
- required: false
- icmpv4_code:
- type: integer
- constraints:
- - in_range: [ 0, 15 ]
- required: false
- arp_op:
- type: integer
- constraints:
- - in_range: [ 1, 25 ]
- required: false
- arp_spa:
- type: string
- required: false
- arp_tpa:
- type: string
- required: false
- arp_sha:
- type: string
- required: false
- arp_tha:
- type: string
- required: false
- ipv6_src:
- type: string
- required: false
- ipv6_dst:
- type: string
- required: false
- ipv6_flabel:
- type: integer
- constraints:
- - in_range: [ 0, 1048575]
- required: false
- icmpv6_type:
- type: integer
- constraints:
- - in_range: [ 0, 255]
- required: false
- icmpv6_code:
- type: integer
- constraints:
- - in_range: [ 0, 7]
- required: false
- ipv6_nd_target:
- type: string
- required: false
- ipv6_nd_sll:
- type: string
- required: false
- ipv6_nd_tll:
- type: string
- required: false
-
- tosca.nfv.datatypes.policyType:
- properties:
- type:
- type: string
- required: false
- constraints:
- - valid_values: [ ACL ]
- criteria:
- type: list
- required: true
- entry_schema:
- type: tosca.nfv.datatypes.aclType
-
-
- tosca.nfv.datatypes.policyTypeV2:
- properties:
- type:
- type: string
- required: false
- constraints:
- - valid_values: [ ACL ]
- criteria:
- type: list
- required: true
- entry_schema:
- type: tosca.nfv.datatypes.classifierType
-
- tosca.nfv.datatypes.classifierType:
- properties:
- name:
- type: string
- required: true
- classifier:
- type: tosca.nfv.datatypes.aclType
- required: true
-
-node_types:
- tosca.nodes.nfv.VDU.Tacker:
- derived_from: tosca.nodes.nfv.VDU
- capabilities:
- nfv_compute:
- type: tosca.datatypes.compute_properties
- properties:
- name:
- type: string
- required: false
- image:
-# type: tosca.artifacts.Deployment.Image.VM
- type: string
- required: false
- flavor:
- type: string
- required: false
- availability_zone:
- type: string
- required: false
- metadata:
- type: map
- entry_schema:
- type: string
- required: false
- config_drive:
- type: boolean
- default: false
- required: false
-
- placement_policy:
-# type: tosca.policies.tacker.Placement
- type: string
- required: false
- app_monitoring_policy:
-# type: tosca.policies.tacker.AppMonitoring
-# type: tosca.datatypes.tacker.AppMonitoringType
- type: map
- required: false
-
-
-
- monitoring_policy:
-# type: tosca.policies.tacker.Monitoring
-# type: tosca.datatypes.tacker.MonitoringType
- type: map
- required: false
-
- config:
- type: string
- required: false
-
- mgmt_driver:
- type: string
- default: noop
- required: false
-
- service_type:
- type: string
- required: false
-
- user_data:
- type: string
- required: false
-
- user_data_format:
- type: string
- required: false
-
- key_name:
- type: string
- required: false
-
- vnfcs:
- type: map
- required: false
- entry_schema:
- type: tosca.datatypes.nfv.VnfcConfigurableProperties
-
- namespace:
- type: string
- required: false
-
- mapping_ports:
- type: list
- required: false
- entry_schema:
- type: string
-
- labels:
- type: list
- required: false
- entry_schema:
- type: string
-
- reservation_metadata:
- type: tosca.datatypes.tacker.VduReservationMetadata
- required: false
-
- maintenance:
- type: boolean
- required: false
-
- tosca.nodes.nfv.CP.Tacker:
- derived_from: tosca.nodes.nfv.CP
- properties:
- mac_address:
- type: string
- required: false
- name:
- type: string
- required: false
- management:
- type: boolean
- required: false
- anti_spoofing_protection:
- type: boolean
- required: false
- allowed_address_pairs:
- type: list
- entry_schema:
- type: tosca.datatypes.tacker.VirtualIP
- required: false
- security_groups:
- type: list
- required: false
- type:
- type: string
- required: false
- constraints:
- - valid_values: [ sriov, vnic ]
-
- tosca.nodes.nfv.FP.TackerV2:
- derived_from: tosca.nodes.Root
- properties:
- id:
- type: integer
- required: false
- symmetrical:
- type: boolean
- required: false
- default: false
- correlation:
- type: string
- required: false
- constraints:
- - valid_values: [ mpls, nsh ]
- default: mpls
- policy:
- type: tosca.nfv.datatypes.policyTypeV2
- required: false
- description: policy to use to match traffic for this FP
- path:
- type: list
- required: true
- entry_schema:
- type: tosca.nfv.datatypes.pathType
-
- tosca.nodes.nfv.FP.Tacker:
- derived_from: tosca.nodes.Root
- properties:
- id:
- type: integer
- required: false
- symmetrical:
- type: boolean
- required: false
- default: false
- correlation:
- type: string
- required: false
- constraints:
- - valid_values: [ mpls, nsh ]
- default: mpls
- policy:
- type: tosca.nfv.datatypes.policyType
- required: true
- description: policy to use to match traffic for this FP
- path:
- type: list
- required: true
- entry_schema:
- type: tosca.nfv.datatypes.pathType
-
- tosca.nodes.nfv.VNFC.Tacker:
- derived_from: tosca.nodes.SoftwareComponent
- requirements:
- - host:
- node: tosca.nodes.nfv.VDU.Tacker
- relationship: tosca.relationships.HostedOn
-
- tosca.nodes.BlockStorage.Tacker:
- derived_from: tosca.nodes.BlockStorage
- properties:
- image:
- type: string
- required: false
- size:
- type: scalar-unit.size
- required: false
- constraints:
- - greater_or_equal: 1 MB
-
- tosca.nodes.BlockStorageAttachment:
- derived_from: tosca.nodes.Root
- properties:
- location:
- type: string
- required: true
- requirements:
- - virtualBinding:
- node: tosca.nodes.nfv.VDU.Tacker
- - virtualAttachment:
- node: tosca.nodes.BlockStorage.Tacker
diff --git a/tacker/tosca/utils.py b/tacker/tosca/utils.py
index 4271515db..8fc8f77b2 100644
--- a/tacker/tosca/utils.py
+++ b/tacker/tosca/utils.py
@@ -11,115 +11,28 @@
# License for the specific language governing permissions and limitations
# under the License.
-import collections
import os
import re
-import sys
import yaml
from collections import OrderedDict
from oslo_log import log as logging
-from oslo_serialization import jsonutils
from oslo_utils import uuidutils
-from tacker._i18n import _
-from tacker.common import exceptions
from tacker.common import log
from tacker.common import utils
from tacker.extensions import vnfm
-from tacker.plugins.common import constants
-from toscaparser import properties
from toscaparser.utils import yamlparser
LOG = logging.getLogger(__name__)
-FAILURE = 'tosca.policies.tacker.Failure'
-MONITORING = 'tosca.policies.Monitoring'
-SCALING = 'tosca.policies.Scaling'
-RESERVATION = 'tosca.policies.Reservation'
-PLACEMENT = 'tosca.policies.tacker.Placement'
-TACKERCP = 'tosca.nodes.nfv.CP.Tacker'
-TACKERVDU = 'tosca.nodes.nfv.VDU.Tacker'
-BLOCKSTORAGE = 'tosca.nodes.BlockStorage.Tacker'
-BLOCKSTORAGE_ATTACHMENT = 'tosca.nodes.BlockStorageAttachment'
-TOSCA_BINDS_TO = 'tosca.relationships.network.BindsTo'
-VDU = 'tosca.nodes.nfv.VDU'
-IMAGE = 'tosca.artifacts.Deployment.Image.VM'
ETSI_INST_LEVEL = 'tosca.policies.nfv.InstantiationLevels'
ETSI_SCALING_ASPECT = 'tosca.policies.nfv.ScalingAspects'
ETSI_SCALING_ASPECT_DELTA = 'tosca.policies.nfv.VduScalingAspectDeltas'
ETSI_INITIAL_DELTA = 'tosca.policies.nfv.VduInitialDelta'
HEAT_SOFTWARE_CONFIG = 'OS::Heat::SoftwareConfig'
-OS_RESOURCES = {
- 'flavor': 'get_flavor_dict',
- 'image': 'get_image_dict'
-}
-
-FLAVOR_PROPS = {
- "num_cpus": ("vcpus", 1, None),
- "disk_size": ("disk", 1, "GB"),
- "mem_size": ("ram", 512, "MB")
-}
-
-CPU_PROP_MAP = (('hw:cpu_policy', 'cpu_affinity'),
- ('hw:cpu_threads_policy', 'thread_allocation'),
- ('hw:cpu_sockets', 'socket_count'),
- ('hw:cpu_threads', 'thread_count'),
- ('hw:cpu_cores', 'core_count'))
-
-CPU_PROP_VAL_MAP = {'cpu_affinity': ('shared', 'dedicated')}
-
-CPU_PROP_KEY_SET = {'cpu_affinity', 'thread_allocation', 'socket_count',
- 'thread_count', 'core_count'}
-
-FLAVOR_EXTRA_SPECS_LIST = ('cpu_allocation',
- 'mem_page_size',
- 'numa_node_count',
- 'numa_nodes')
-
-delpropmap = {TACKERVDU: ('mgmt_driver', 'config', 'service_type',
- 'placement_policy', 'monitoring_policy',
- 'metadata', 'failure_policy'),
- TACKERCP: ('management',)}
-
-convert_prop = {TACKERCP: {'anti_spoofing_protection':
- 'port_security_enabled',
- 'type':
- 'binding:vnic_type'}}
-
-convert_prop_values = {TACKERCP: {'type': {'sriov': 'direct',
- 'vnic': 'normal'}}}
-
-deletenodes = (MONITORING, FAILURE, PLACEMENT)
-
-HEAT_RESOURCE_MAP = {
- "flavor": "OS::Nova::Flavor",
- "image": "OS::Glance::WebImage",
- "maintenance": "OS::Aodh::EventAlarm"
-}
-
SCALE_GROUP_RESOURCE = "OS::Heat::AutoScalingGroup"
-SCALE_POLICY_RESOURCE = "OS::Heat::ScalingPolicy"
-PLACEMENT_POLICY_RESOURCE = "OS::Nova::ServerGroup"
-
-
-@log.log
-def updateimports(template):
- path = os.path.dirname(os.path.abspath(__file__)) + '/lib/'
- defsfile = path + 'tacker_defs.yaml'
-
- if 'imports' in template:
- template['imports'].append(defsfile)
- else:
- template['imports'] = [defsfile]
-
- if 'nfv' in template.get('tosca_definitions_version', {}):
- nfvfile = path + 'tacker_nfv_defs.yaml'
-
- template['imports'].append(nfvfile)
-
- LOG.debug(path)
@log.log
@@ -157,322 +70,6 @@ def check_for_substitution_mappings(template, params):
raise vnfm.InvalidSubstitutionMapping(requirement=req_name)
-@log.log
-def get_vdu_monitoring(template):
- monitoring_dict = dict()
- policy_dict = dict()
- policy_dict['vdus'] = collections.OrderedDict()
- for nt in template.nodetemplates:
- if nt.type_definition.is_derived_from(TACKERVDU):
- mon_policy = nt.get_property_value('monitoring_policy') or 'noop'
- if mon_policy != 'noop':
- if 'parameters' in mon_policy:
- mon_policy['monitoring_params'] = mon_policy['parameters']
- policy_dict['vdus'][nt.name] = {}
- policy_dict['vdus'][nt.name][mon_policy['name']] = mon_policy
- if policy_dict.get('vdus'):
- monitoring_dict = policy_dict
- return monitoring_dict
-
-
-def get_vdu_applicationmonitoring(template):
- tpl_temp = "topology_template"
- n_temp = "node_templates"
- poly = "app_monitoring_policy"
- monitoring_dict = dict()
- policy_dict = dict()
- policy_dict['vdus'] = collections.OrderedDict()
- node_list = template[tpl_temp][n_temp].keys()
- for node in node_list:
- nt = template[tpl_temp][n_temp][node]
- if nt['type'] == TACKERVDU:
- if poly in nt['properties']:
- mon_policy = nt['properties'][poly]
- if mon_policy != 'noop':
- policy_dict['vdus'][node] = {}
- policy_dict['vdus'][node] = mon_policy
- del template[tpl_temp][n_temp][node]['properties'][poly]
- if policy_dict.get('vdus'):
- monitoring_dict = policy_dict
- return monitoring_dict
-
-
-@log.log
-def get_vdu_metadata(template, unique_id=None):
- metadata = dict()
- metadata.setdefault('vdus', {})
- for nt in template.nodetemplates:
- if nt.type_definition.is_derived_from(TACKERVDU):
- metadata_dict = nt.get_property_value('metadata') or None
- if metadata_dict:
- metadata_dict['metering.server_group'] = \
- (metadata_dict['metering.server_group'] + '-'
- + unique_id)[:15]
- metadata['vdus'][nt.name] = {}
- metadata['vdus'][nt.name].update(metadata_dict)
- return metadata
-
-
-@log.log
-def get_metadata_for_reservation(template, metadata):
- """Method used to add lease_id in metadata
-
- So that it can be used further while creating query_metadata
-
- :param template: ToscaTemplate object
- :param metadata: metadata dict
- :return: dictionary contains lease_id
- """
-
- metadata.setdefault('reservation', {})
- input_param_list = template.parsed_params.keys()
- # if lease_id is passed in the parameter file,
- # get it from template parsed_params.
- if 'lease_id' in input_param_list:
- metadata['reservation']['lease_id'] = template.parsed_params[
- 'lease_id']
- else:
- for policy in template.policies:
- if policy.entity_tpl['type'] == constants.POLICY_RESERVATION:
- metadata['reservation']['lease_id'] = policy.entity_tpl[
- 'reservation']['properties']['lease_id']
- break
- if not uuidutils.is_uuid_like(metadata['reservation']['lease_id']):
- raise exceptions.Invalid('Invalid UUID for lease_id')
- return metadata
-
-
-@log.log
-def pre_process_alarm_resources(vnf, template, vdu_metadata, unique_id=None):
- alarm_resources = dict()
- query_metadata = dict()
- alarm_actions = dict()
- for policy in template.policies:
- if policy.type_definition.is_derived_from(MONITORING):
- query_metadata.update(_process_query_metadata(
- vdu_metadata, policy, unique_id))
- alarm_actions.update(_process_alarm_actions(vnf, policy))
- if policy.type_definition.is_derived_from(RESERVATION):
- query_metadata.update(_process_query_metadata_reservation(
- vdu_metadata, policy))
- alarm_actions.update(_process_alarm_actions_for_reservation(
- vnf, policy))
- alarm_resources['event_types'] = {
- 'start_actions': {'event_type': 'lease.event.start_lease'},
- 'before_end_actions': {
- 'event_type': 'lease.event.before_end_lease'},
- 'end_actions': {'event_type': 'lease.event.end_lease'}}
- maintenance_actions = _process_alarm_actions_for_maintenance(vnf)
- if maintenance_actions:
- alarm_actions.update(maintenance_actions)
- alarm_resources['event_types'] = {}
- alarm_resources['event_types'].update({
- 'ALL_maintenance': {'event_type': 'maintenance.scheduled'}})
- alarm_resources['query_metadata'] = query_metadata
- alarm_resources['alarm_actions'] = alarm_actions
- return alarm_resources
-
-
-def _process_query_metadata(metadata, policy, unique_id):
- query_mtdata = dict()
- triggers = policy.entity_tpl['triggers']
- for trigger_name, trigger_dict in triggers.items():
- resource_type = trigger_dict.get('condition').get('resource_type')
- # TODO(phuoc): currently, Tacker only supports resource_type with
- # instance value. Other types such as instance_network_interface,
- # instance_disk can be supported in the future.
- if resource_type == 'instance':
- if not (trigger_dict.get('metadata') and metadata):
- raise vnfm.MetadataNotMatched()
- is_matched = False
- for vdu_name, metadata_dict in metadata['vdus'].items():
- trigger_dict['metadata'] = \
- (trigger_dict['metadata'] + '-' + unique_id)[:15]
- if trigger_dict['metadata'] == \
- metadata_dict['metering.server_group']:
- is_matched = True
- if not is_matched:
- raise vnfm.MetadataNotMatched()
- query_template = dict()
- query_template['str_replace'] = dict()
- query_template['str_replace']['template'] = \
- '{"=": {"server_group": "scaling_group_id"}}'
- scaling_group_param = \
- {'scaling_group_id': trigger_dict['metadata']}
- query_template['str_replace']['params'] = scaling_group_param
- else:
- raise vnfm.InvalidResourceType(resource_type=resource_type)
- query_mtdata[trigger_name] = query_template
- return query_mtdata
-
-
-def _process_query_metadata_reservation(metadata, policy):
- query_metadata = dict()
- policy_actions = list(policy.entity_tpl['reservation'].keys())
- policy_actions.remove('properties')
- for action in policy_actions:
- query_template = [{
- "field": 'traits.lease_id', "op": "eq",
- "value": metadata['reservation']['lease_id']}]
- query_metadata[action] = query_template
-
- return query_metadata
-
-
-def _process_alarm_actions(vnf, policy):
- # process alarm url here
- triggers = policy.entity_tpl['triggers']
- alarm_actions = dict()
- for trigger_name, trigger_dict in triggers.items():
- alarm_url = vnf['attributes'].get(trigger_name)
- if alarm_url:
- alarm_url = str(alarm_url)
- LOG.debug('Alarm url in heat %s', alarm_url)
- alarm_actions[trigger_name] = dict()
- alarm_actions[trigger_name]['alarm_actions'] = [alarm_url]
- return alarm_actions
-
-
-def _process_alarm_actions_for_reservation(vnf, policy):
- # process alarm url here
- alarm_actions = dict()
- policy_actions = list(policy.entity_tpl['reservation'].keys())
- policy_actions.remove('properties')
- for action in policy_actions:
- alarm_url = vnf['attributes'].get(action)
- if alarm_url:
- LOG.debug('Alarm url in heat %s', alarm_url)
- alarm_actions[action] = dict()
- alarm_actions[action]['alarm_actions'] = [alarm_url]
- return alarm_actions
-
-
-def _process_alarm_actions_for_maintenance(vnf):
- # process alarm url here
- alarm_actions = dict()
- maintenance_props = vnf['attributes'].get('maintenance', '{}')
- maintenance_props = jsonutils.loads(maintenance_props)
- maintenance_url = vnf['attributes'].get('maintenance_url', '')
- for vdu, access_key in maintenance_props.items():
- action = '%s_maintenance' % vdu
- alarm_url = '%s/%s' % (maintenance_url.rstrip('/'), access_key)
- if alarm_url:
- LOG.debug('Alarm url in heat %s', alarm_url)
- alarm_actions[action] = dict()
- alarm_actions[action]['alarm_actions'] = [alarm_url]
- return alarm_actions
-
-
-def get_volumes(template):
- volume_dict = dict()
- node_tpl = template['topology_template']['node_templates']
- for node_name in list(node_tpl.keys()):
- node_value = node_tpl[node_name]
- if node_value['type'] != BLOCKSTORAGE:
- continue
- volume_dict[node_name] = dict()
- block_properties = node_value.get('properties', {})
- if 'volume_id' in block_properties:
- volume_dict[node_name]['volume_id'] = block_properties['volume_id']
- del node_tpl[node_name]
- continue
- for prop_name, prop_value in block_properties.items():
- if prop_name == 'size':
- prop_value = \
- re.compile(r'(\d+)\s*(\w+)').match(prop_value).groups()[0]
- volume_dict[node_name][prop_name] = prop_value
- del node_tpl[node_name]
- return volume_dict
-
-
-@log.log
-def get_vol_attachments(template, volume_dict):
- vol_attach_dict = dict()
- node_tpl = template['topology_template']['node_templates']
- valid_properties = {
- 'location': 'mountpoint'
- }
- for node_name in list(node_tpl.keys()):
- node_value = node_tpl[node_name]
- if node_value['type'] != BLOCKSTORAGE_ATTACHMENT:
- continue
- vol_attach_dict[node_name] = dict()
- vol_attach_properties = node_value.get('properties', {})
- # parse properties
- for prop_name, prop_value in vol_attach_properties.items():
- if prop_name in valid_properties:
- vol_attach_dict[node_name][valid_properties[prop_name]] = \
- prop_value
- # parse requirements to get mapping of cinder volume <-> Nova instance
- for req in node_value.get('requirements', {}):
- if 'virtualBinding' in req:
- vol_attach_dict[node_name]['instance_uuid'] = \
- {'get_resource': req['virtualBinding']['node']}
- elif 'virtualAttachment' in req:
- node = req['virtualAttachment']['node']
- if 'volume_id' in volume_dict.get(node, {}):
- value = {'get_param': volume_dict[node]['volume_id']}
- else:
- value = {'get_resource': node}
- vol_attach_dict[node_name]['volume_id'] = value
- del node_tpl[node_name]
- return vol_attach_dict
-
-
-@log.log
-def get_block_storage_details(template):
- block_storage_details = dict()
- volume_dict = get_volumes(template)
- block_storage_details['volumes'] = volume_dict
- block_storage_details['volume_attachments'] = \
- get_vol_attachments(template, volume_dict)
- return block_storage_details
-
-
-@log.log
-def get_mgmt_ports(tosca):
- mgmt_ports = {}
- for nt in tosca.nodetemplates:
- if nt.type_definition.is_derived_from(TACKERCP):
- mgmt = nt.get_property_value('management') or None
- if mgmt:
- vdu = None
- for rel, node in nt.relationships.items():
- if rel.is_derived_from(TOSCA_BINDS_TO):
- vdu = node.name
- break
-
- if vdu is not None:
- name = 'mgmt_ip-%s' % vdu
- mgmt_ports[name] = nt.name
- LOG.debug('mgmt_ports: %s', mgmt_ports)
- return mgmt_ports
-
-
-@log.log
-def add_resources_tpl(heat_dict, hot_res_tpl):
- for res, res_dict in (hot_res_tpl).items():
- for vdu, vdu_dict in (res_dict).items():
- res_name = vdu + "_" + res
- heat_dict["resources"][res_name] = {
- "type": HEAT_RESOURCE_MAP[res],
- "properties": {}
- }
-
- if res == "maintenance":
- continue
- for prop, val in (vdu_dict).items():
- # change from 'get_input' to 'get_param' to meet HOT template
- if isinstance(val, dict):
- if 'get_input' in val:
- val['get_param'] = val.pop('get_input')
- heat_dict["resources"][res_name]["properties"][prop] = val
- if heat_dict["resources"].get(vdu):
- heat_dict["resources"][vdu]["properties"][res] = {
- "get_resource": res_name
- }
-
-
@log.log
def convert_unsupported_res_prop(heat_dict, unsupported_res_prop):
res_dict = heat_dict['resources']
@@ -522,8 +119,7 @@ def represent_odict(dump, tag, mapping, flow_style=None):
@log.log
-def post_process_heat_template(heat_tpl, mgmt_ports, metadata,
- alarm_resources, res_tpl, vol_res={},
+def post_process_heat_template(heat_tpl,
unsupported_res_prop=None, unique_id=None,
inst_req_info=None, grant_info=None,
tosca=None):
@@ -540,44 +136,6 @@ def post_process_heat_template(heat_tpl, mgmt_ports, metadata,
# End temporary workaround for heat-translator
#
heat_dict = yamlparser.simple_ordered_parse(heat_tpl)
- for outputname, portname in mgmt_ports.items():
- ipval = {'get_attr': [portname, 'fixed_ips', 0, 'ip_address']}
- output = {outputname: {'value': ipval}}
- if 'outputs' in heat_dict:
- heat_dict['outputs'].update(output)
- else:
- heat_dict['outputs'] = output
- LOG.debug('Added output for %s', outputname)
- if metadata.get('vdus'):
- for vdu_name, metadata_dict in metadata['vdus'].items():
- metadata_dict['metering.server_group'] = \
- (metadata_dict['metering.server_group'] + '-' + unique_id)[:15]
- if heat_dict['resources'].get(vdu_name):
- heat_dict['resources'][vdu_name]['properties']['metadata'] =\
- metadata_dict
- add_resources_tpl(heat_dict, res_tpl)
-
- query_metadata = alarm_resources.get('query_metadata')
- alarm_actions = alarm_resources.get('alarm_actions')
- event_types = alarm_resources.get('event_types')
- if query_metadata:
- for trigger_name, matching_metadata_dict in query_metadata.items():
- if heat_dict['resources'].get(trigger_name):
- query_mtdata = dict()
- query_mtdata['query'] = \
- query_metadata[trigger_name]
- heat_dict['resources'][trigger_name][
- 'properties'].update(query_mtdata)
- if alarm_actions:
- for trigger_name, alarm_actions_dict in alarm_actions.items():
- if heat_dict['resources'].get(trigger_name):
- heat_dict['resources'][trigger_name]['properties'].update(
- alarm_actions_dict)
- if event_types:
- for trigger_name, event_type in event_types.items():
- if heat_dict['resources'].get(trigger_name):
- heat_dict['resources'][trigger_name]['properties'].update(
- event_type)
for res in heat_dict["resources"].values():
if not res['type'] == HEAT_SOFTWARE_CONFIG:
@@ -586,8 +144,6 @@ def post_process_heat_template(heat_tpl, mgmt_ports, metadata,
if 'get_file' in config:
res["properties"]["config"] = open(config["get_file"]).read()
- if vol_res.get('volumes'):
- add_volume_resources(heat_dict, vol_res)
if unsupported_res_prop:
convert_unsupported_res_prop(heat_dict, unsupported_res_prop)
if grant_info:
@@ -609,9 +165,7 @@ def post_process_heat_template(heat_tpl, mgmt_ports, metadata,
@log.log
def post_process_heat_template_for_scaling(
- heat_tpl, mgmt_ports, metadata,
- alarm_resources, res_tpl, vol_res={},
- unsupported_res_prop=None, unique_id=None,
+ heat_tpl, unsupported_res_prop=None, unique_id=None,
inst_req_info=None, grant_info=None,
tosca=None):
heat_dict = yamlparser.simple_ordered_parse(heat_tpl)
@@ -634,7 +188,7 @@ def check_inst_req_info_for_scaling(heat_dict, inst_req_info):
if not inst_req_info.ext_virtual_links:
return
- def _get_mac_ip(exp_cp):
+ def _get_mac_ip(ext_cp):
mac = None
ip = None
for cp_conf in ext_cp.cp_config:
@@ -950,291 +504,6 @@ def _convert_grant_info_vdu(heat_dict, vdu_name, vnf_resources):
vnf_resource.resource_identifier})
-@log.log
-def add_volume_resources(heat_dict, vol_res):
- # Add cinder volumes
- for res_name, cinder_vol in vol_res['volumes'].items():
- if 'volume_id' in cinder_vol:
- continue
- heat_dict['resources'][res_name] = {
- 'type': 'OS::Cinder::Volume',
- 'properties': {}
- }
- for prop_name, prop_val in cinder_vol.items():
- heat_dict['resources'][res_name]['properties'][prop_name] = \
- prop_val
- # Add cinder volume attachments
- for res_name, cinder_vol in vol_res['volume_attachments'].items():
- heat_dict['resources'][res_name] = {
- 'type': 'OS::Cinder::VolumeAttachment',
- 'properties': {}
- }
- for prop_name, prop_val in cinder_vol.items():
- heat_dict['resources'][res_name]['properties'][prop_name] = \
- prop_val
-
-
-@log.log
-def post_process_template(template):
- def _add_scheduler_hints_property(nt):
- hints = nt.get_property_value('scheduler_hints')
- if hints is None:
- hints = OrderedDict()
- hints_schema = {'type': 'map', 'required': False,
- 'entry_schema': {'type': 'string'}}
- hints_prop = properties.Property('scheduler_hints',
- hints,
- hints_schema)
- nt.get_properties_objects().append(hints_prop)
- return hints
-
- for nt in template.nodetemplates:
- if (nt.type_definition.is_derived_from(MONITORING) or
- nt.type_definition.is_derived_from(FAILURE) or
- nt.type_definition.is_derived_from(PLACEMENT)):
- template.nodetemplates.remove(nt)
- continue
-
- if nt.type in delpropmap:
- for prop in delpropmap[nt.type]:
- for p in nt.get_properties_objects():
- if prop == p.name:
- nt.get_properties_objects().remove(p)
-
- # change the property value first before the property key
- if nt.type in convert_prop_values:
- for prop in convert_prop_values[nt.type]:
- for p in nt.get_properties_objects():
- if (prop == p.name and
- p.value in
- convert_prop_values[nt.type][prop]):
- v = convert_prop_values[nt.type][prop][p.value]
- p.value = v
-
- if nt.type in convert_prop:
- for prop in convert_prop[nt.type]:
- for p in nt.get_properties_objects():
- if prop == p.name:
- schema_dict = {'type': p.type}
- v = nt.get_property_value(p.name)
- newprop = properties.Property(
- convert_prop[nt.type][prop], v, schema_dict)
- nt.get_properties_objects().append(newprop)
- nt.get_properties_objects().remove(p)
-
- if nt.type_definition.is_derived_from(TACKERVDU):
- reservation_metadata = nt.get_property_value(
- 'reservation_metadata')
- if reservation_metadata is not None:
- hints = _add_scheduler_hints_property(nt)
-
- input_resource_type = reservation_metadata.get(
- 'resource_type')
- input_id = reservation_metadata.get('id')
-
- # Checking if 'resource_type' and 'id' is passed through a
- # input parameter file or not. If it's then get the value
- # from input parameter file.
- if (isinstance(input_resource_type, OrderedDict) and
- input_resource_type.get('get_input')):
- input_resource_type = template.parsed_params.get(
- input_resource_type.get('get_input'))
- # TODO(niraj-singh): Remove this validation once bug
- # 1815755 is fixed.
- if input_resource_type not in (
- 'physical_host', 'virtual_instance'):
- raise exceptions.Invalid(
- 'resoure_type must be physical_host'
- ' or virtual_instance')
-
- if (isinstance(input_id, OrderedDict) and
- input_id.get('get_input')):
- input_id = template.parsed_params.get(
- input_id.get('get_input'))
-
- if input_resource_type == 'physical_host':
- hints['reservation'] = input_id
- elif input_resource_type == 'virtual_instance':
- hints['group'] = input_id
- nt.get_properties_objects().remove(nt.get_properties().get(
- 'reservation_metadata'))
-
-
-@log.log
-def get_mgmt_driver(template):
- mgmt_driver = None
- for nt in template.nodetemplates:
- if nt.type_definition.is_derived_from(TACKERVDU):
- if (mgmt_driver and nt.get_property_value('mgmt_driver') !=
- mgmt_driver):
- raise vnfm.MultipleMGMTDriversSpecified()
- else:
- mgmt_driver = nt.get_property_value('mgmt_driver')
-
- return mgmt_driver
-
-
-def findvdus(template):
- vdus = []
- for nt in template.nodetemplates:
- if nt.type_definition.is_derived_from(TACKERVDU):
- vdus.append(nt)
- return vdus
-
-
-def find_maintenance_vdus(template):
- maintenance_vdu_names = list()
- vdus = findvdus(template)
- for nt in vdus:
- if nt.get_properties().get('maintenance'):
- maintenance_vdu_names.append(nt.name)
- return maintenance_vdu_names
-
-
-def get_flavor_dict(template, flavor_extra_input=None):
- flavor_dict = {}
- vdus = findvdus(template)
- for nt in vdus:
- flavor_tmp = nt.get_properties().get('flavor')
- if flavor_tmp:
- continue
- if nt.get_capabilities().get("nfv_compute"):
- flavor_dict[nt.name] = {}
- properties = nt.get_capabilities()["nfv_compute"].get_properties()
- for prop, (hot_prop, default, unit) in \
- (FLAVOR_PROPS).items():
- hot_prop_val = (properties[prop].value
- if properties.get(prop, None) else None)
- if unit and hot_prop_val:
- hot_prop_val = \
- utils.change_memory_unit(hot_prop_val, unit)
- flavor_dict[nt.name][hot_prop] = \
- hot_prop_val if hot_prop_val else default
- if any(p in properties for p in FLAVOR_EXTRA_SPECS_LIST):
- flavor_dict[nt.name]['extra_specs'] = {}
- es_dict = flavor_dict[nt.name]['extra_specs']
- populate_flavor_extra_specs(es_dict, properties,
- flavor_extra_input)
- return flavor_dict
-
-
-def populate_flavor_extra_specs(es_dict, properties, flavor_extra_input):
- if 'mem_page_size' in properties:
- mval = properties['mem_page_size'].value
- if str(mval).isdigit():
- mval = mval * 1024
- elif mval not in ('small', 'large', 'any'):
- raise vnfm.HugePageSizeInvalidInput(
- error_msg_details=(mval + ":Invalid Input"))
- es_dict['hw:mem_page_size'] = mval
- if 'numa_nodes' in properties and 'numa_node_count' in properties:
- LOG.warning('Both numa_nodes and numa_node_count have been '
- 'specified; numa_node definitions will be ignored and '
- 'numa_node_count will be applied')
- if 'numa_node_count' in properties:
- es_dict['hw:numa_nodes'] = \
- properties['numa_node_count'].value
- if 'numa_nodes' in properties and 'numa_node_count' not in properties:
- nodes_dict = dict(properties['numa_nodes'].value)
- dval = list(nodes_dict.values())
- ncount = 0
- for ndict in dval:
- invalid_input = set(ndict.keys()) - {'id', 'vcpus', 'mem_size'}
- if invalid_input:
- raise vnfm.NumaNodesInvalidKeys(
- error_msg_details=(', '.join(invalid_input)),
- valid_keys="id, vcpus and mem_size")
- if 'id' in ndict and 'vcpus' in ndict:
- vk = "hw:numa_cpus." + str(ndict['id'])
- vval = ",".join([str(x) for x in ndict['vcpus']])
- es_dict[vk] = vval
- if 'id' in ndict and 'mem_size' in ndict:
- mk = "hw:numa_mem." + str(ndict['id'])
- es_dict[mk] = ndict['mem_size']
- ncount += 1
- es_dict['hw:numa_nodes'] = ncount
- if 'cpu_allocation' in properties:
- cpu_dict = dict(properties['cpu_allocation'].value)
- invalid_input = set(cpu_dict.keys()) - CPU_PROP_KEY_SET
- if invalid_input:
- raise vnfm.CpuAllocationInvalidKeys(
- error_msg_details=(', '.join(invalid_input)),
- valid_keys=(', '.join(CPU_PROP_KEY_SET)))
- for(k, v) in CPU_PROP_MAP:
- if v not in cpu_dict:
- continue
- if CPU_PROP_VAL_MAP.get(v, None):
- if cpu_dict[v] not in CPU_PROP_VAL_MAP[v]:
- raise vnfm.CpuAllocationInvalidValues(
- error_msg_details=cpu_dict[v],
- valid_values=CPU_PROP_VAL_MAP[v])
- es_dict[k] = cpu_dict[v]
- if flavor_extra_input:
- es_dict.update(flavor_extra_input)
-
-
-def get_image_dict(template):
- image_dict = {}
- vdus = findvdus(template)
- for vdu in vdus:
- if not vdu.entity_tpl.get("artifacts"):
- continue
- artifacts = vdu.entity_tpl["artifacts"]
- for name, artifact in (artifacts).items():
- if ('type' in artifact and
- artifact["type"] == IMAGE):
- if 'file' not in artifact:
- raise vnfm.FilePathMissing()
- image_dict[vdu.name] = {
- "location": artifact["file"],
- "container_format": "bare",
- "disk_format": "raw",
- "name": name
- }
- return image_dict
-
-
-def get_resources_dict(template, flavor_extra_input=None):
- res_dict = dict()
- for res, method in (OS_RESOURCES).items():
- res_method = getattr(sys.modules[__name__], method)
- if res == 'flavor':
- res_dict[res] = res_method(template, flavor_extra_input)
- else:
- res_dict[res] = res_method(template)
- return res_dict
-
-
-def add_maintenance_resources(template, res_tpl):
- res_dict = {}
- maintenance_vdus = find_maintenance_vdus(template)
- maintenance_vdus.append('ALL')
- if maintenance_vdus:
- for vdu_name in maintenance_vdus:
- res_dict[vdu_name] = {}
- res_tpl['maintenance'] = res_dict
-
-
-@log.log
-def get_policy_dict(template, policy_type):
- policy_dict = dict()
- for policy in template.policies:
- if (policy.type_definition.is_derived_from(policy_type)):
- policy_attrs = dict()
- policy_attrs['targets'] = policy.targets
- policy_dict[policy.name] = policy_attrs
- return policy_dict
-
-
-@log.log
-def get_scaling_policy(template):
- scaling_policy_names = list()
- for policy in template.policies:
- if (policy.type_definition.is_derived_from(SCALING)):
- scaling_policy_names.append(policy.name)
- return scaling_policy_names
-
-
@log.log
def get_scaling_group_dict(ht_template, scaling_policy_names):
scaling_group_dict = dict()
@@ -1263,8 +532,8 @@ def get_sub_heat_tmpl_name(tmpl_name):
return uuidutils.generate_uuid() + tmpl_name
-def update_nested_scaling_resources(nested_resources, mgmt_ports, metadata,
- res_tpl, unsupported_res_prop=None,
+def update_nested_scaling_resources(nested_resources,
+ unsupported_res_prop=None,
grant_info=None, inst_req_info=None):
nested_tpl = dict()
yaml.SafeDumper.add_representer(
@@ -1274,11 +543,6 @@ def update_nested_scaling_resources(nested_resources, mgmt_ports, metadata,
nested_resources.items():
nested_resources_dict =\
yamlparser.simple_ordered_parse(nested_resources_yaml)
- if metadata.get('vdus'):
- for vdu_name, metadata_dict in metadata['vdus'].items():
- if nested_resources_dict['resources'].get(vdu_name):
- vdu_dict = nested_resources_dict['resources'][vdu_name]
- vdu_dict['properties']['metadata'] = metadata_dict
convert_grant_info(nested_resources_dict, grant_info)
# Replace external virtual links if specified in the inst_req_info
@@ -1286,7 +550,6 @@ def update_nested_scaling_resources(nested_resources, mgmt_ports, metadata,
for ext_vl in inst_req_info.ext_virtual_links:
_convert_ext_vls(nested_resources_dict, ext_vl)
- add_resources_tpl(nested_resources_dict, res_tpl)
for res in nested_resources_dict["resources"].values():
if not res['type'] == HEAT_SOFTWARE_CONFIG:
continue
@@ -1298,16 +561,6 @@ def update_nested_scaling_resources(nested_resources, mgmt_ports, metadata,
convert_unsupported_res_prop(nested_resources_dict,
unsupported_res_prop)
- if mgmt_ports:
- for outputname, portname in mgmt_ports.items():
- ipval = {'get_attr': [portname, 'fixed_ips', 0, 'ip_address']}
- output = {outputname: {'value': ipval}}
- if 'outputs' in nested_resources_dict:
- nested_resources_dict['outputs'].update(output)
- else:
- nested_resources_dict['outputs'] = output
- LOG.debug(_('Added output for %s'), outputname)
-
nested_tpl[nested_resource_name] =\
yaml.safe_dump(nested_resources_dict)
diff --git a/tacker/vnflcm/vnflcm_driver.py b/tacker/vnflcm/vnflcm_driver.py
index 6a33d9e83..426291dcf 100644
--- a/tacker/vnflcm/vnflcm_driver.py
+++ b/tacker/vnflcm/vnflcm_driver.py
@@ -231,12 +231,6 @@ def revert_to_error_rollback(function):
"instance %(id)s. Error: %(error)s",
{"id": vnf_instance.id, "error": e})
- try:
- self._update_vnf_rollback_status_err(context, vnf_info)
- except Exception as e:
- LOG.error("Failed to revert scale info for event "
- "instance %(id)s. Error: %(error)s",
- {"id": vnf_instance.id, "error": e})
try:
self._vnf_instance_update(context, vnf_instance)
except Exception as e:
@@ -1625,7 +1619,6 @@ class VnfLcmDriver(abstract_driver.VnfInstanceAbstractDriver):
scale_id_list = []
scale_name_list = []
grp_id = None
- self._update_vnf_rollback_pre(context, vnf_info)
if vnf_lcm_op_occs.operation == 'SCALE':
if vim_connection_info.vim_type != 'kubernetes':
# NOTE(ueha): The logic of Scale for OpenStack VIM is widely
@@ -1755,9 +1748,6 @@ class VnfLcmDriver(abstract_driver.VnfInstanceAbstractDriver):
vnf_lcm_op_occs.error_point = EP.PRE_VIM_CONTROL
- def _update_vnf_rollback_pre(self, context, vnf_info):
- self._vnfm_plugin._update_vnf_rollback_pre(context, vnf_info)
-
def _update_vnf_rollback(self, context, vnf_info,
vnf_instance, vnf_lcm_op_occs):
if vnf_lcm_op_occs.operation == 'SCALE':
@@ -1771,9 +1761,6 @@ class VnfLcmDriver(abstract_driver.VnfInstanceAbstractDriver):
vnf_lcm_op_occs.save()
vnf_instance.save()
- def _update_vnf_rollback_status_err(self, context, vnf_info):
- self._vnfm_plugin.update_vnf_rollback_status_err(context, vnf_info)
-
def _rollback_vnf_post(
self,
context,
diff --git a/tacker/vnfm/infra_drivers/abstract_driver.py b/tacker/vnfm/infra_drivers/abstract_driver.py
index f97379e7e..33a4c3a92 100644
--- a/tacker/vnfm/infra_drivers/abstract_driver.py
+++ b/tacker/vnfm/infra_drivers/abstract_driver.py
@@ -67,10 +67,6 @@ class VnfAbstractDriver(extensions.PluginInterface, metaclass=abc.ABCMeta):
'''Fetches optional details of a VNF'''
pass
- @abc.abstractmethod
- def heal_vdu(self, plugin, context, vnf_dict, heal_request_data):
- pass
-
@abc.abstractmethod
def pre_instantiation_vnf(self, context, vnf_instance,
vim_connection_info, vnf_software_images):
diff --git a/tacker/vnfm/infra_drivers/kubernetes/k8s/tosca_kube_object.py b/tacker/vnfm/infra_drivers/kubernetes/k8s/tosca_kube_object.py
deleted file mode 100644
index 1c809e00c..000000000
--- a/tacker/vnfm/infra_drivers/kubernetes/k8s/tosca_kube_object.py
+++ /dev/null
@@ -1,250 +0,0 @@
-# All Rights Reserved.
-#
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-class ToscaKubeObject(object):
-
- """ToscaKubeObject holds the basic struct of a VDU.
-
- That is used for translating TOSCA to Kubernetes templates.
- """
-
- def __init__(self, name=None, namespace=None, mapping_ports=None,
- containers=None, network_name=None,
- mgmt_connection_point=False, scaling_object=None,
- service_type=None, labels=None, annotations=None):
- self._name = name
- self._namespace = namespace
- self._mapping_ports = mapping_ports
- self._containers = containers
- self._network_name = network_name
- self._mgmt_connection_point = mgmt_connection_point
- self._scaling_object = scaling_object
- self._service_type = service_type
- self._labels = labels
- self._annotations = annotations
-
- @property
- def name(self):
- return self._name
-
- @name.setter
- def name(self, name):
- self._name = name
-
- @property
- def namespace(self):
- return self._namespace
-
- @namespace.setter
- def namespace(self, namespace):
- self._namespace = namespace
-
- @property
- def mapping_ports(self):
- return self._mapping_ports
-
- @mapping_ports.setter
- def mapping_ports(self, mapping_ports):
- self._mapping_ports = mapping_ports
-
- @property
- def containers(self):
- return self._containers
-
- @containers.setter
- def containers(self, containers):
- self._containers = containers
-
- @property
- def network_name(self):
- return self._network_name
-
- @network_name.setter
- def network_name(self, network_name):
- self._network_name = network_name
-
- @property
- def mgmt_connection_point(self):
- return self._mgmt_connection_point
-
- @mgmt_connection_point.setter
- def mgmt_connection_point(self, mgmt_connection_point):
- self._mgmt_connection_point = mgmt_connection_point
-
- @property
- def scaling_object(self):
- return self._scaling_object
-
- @scaling_object.setter
- def scaling_object(self, scaling_object):
- self._scaling_object = scaling_object
-
- @property
- def service_type(self):
- return self._service_type
-
- @service_type.setter
- def service_type(self, service_type):
- self._service_type = service_type
-
- @property
- def labels(self):
- return self._labels
-
- @labels.setter
- def labels(self, labels):
- self._labels = labels
-
- @property
- def annotations(self):
- return self._annotations
-
- @annotations.setter
- def annotations(self, annotations):
- self._annotations = annotations
-
-
-class Container(object):
- """Container holds the basic structs of a container"""
- def __init__(self, name=None, num_cpus=None, mem_size=None, image=None,
- command=None, args=None, ports=None, config=None):
- self._name = name
- self._num_cpus = num_cpus
- self._mem_size = mem_size
- self._image = image
- self._command = command
- self._args = args
- self._ports = ports
- self._config = config
-
- @property
- def name(self):
- return self._name
-
- @name.setter
- def name(self, name):
- self._name = name
-
- @property
- def num_cpus(self):
- return self._num_cpus
-
- @num_cpus.setter
- def num_cpus(self, num_cpus):
- self._num_cpus = num_cpus
-
- @property
- def mem_size(self):
- return self._mem_size
-
- @mem_size.setter
- def mem_size(self, mem_size):
- self._mem_size = mem_size
-
- @property
- def image(self):
- return self._image
-
- @image.setter
- def image(self, image):
- self._image = image
-
- @property
- def command(self):
- return self._command
-
- @command.setter
- def command(self, command):
- self._command = command
-
- @property
- def args(self):
- return self._args
-
- @args.setter
- def args(self, args):
- self._args = args
-
- @property
- def ports(self):
- return self._ports
-
- @ports.setter
- def ports(self, ports):
- self._ports = ports
-
- @property
- def config(self):
- return self._config
-
- @config.setter
- def config(self, config):
- self._config = config
-
-
-class ScalingObject(object):
- """ScalingObject holds the basic struct of a horizontal pod auto-scaling"""
- def __init__(self, scaling_name=None, min_replicas=None, max_replicas=None,
- scale_target_name=None,
- target_cpu_utilization_percentage=None):
- self._scaling_name = scaling_name
- self._min_replicas = min_replicas
- self._max_replicas = max_replicas
- self._scale_target_name = scale_target_name
- self._target_cpu_utilization_percentage = \
- target_cpu_utilization_percentage
-
- @property
- def scaling_name(self):
- return self._scaling_name
-
- @scaling_name.setter
- def scaling_name(self, scaling_name):
- self._scaling_name = scaling_name
-
- @property
- def min_replicas(self):
- return self._min_replicas
-
- @min_replicas.setter
- def min_replicas(self, min_replicas):
- self._min_replicas = min_replicas
-
- @property
- def max_replicas(self):
- return self._max_replicas
-
- @max_replicas.setter
- def max_replicas(self, max_replicas):
- self._max_replicas = max_replicas
-
- @property
- def scale_target_name(self):
- return self._scale_target_name
-
- @scale_target_name.setter
- def scale_target_name(self, scale_target_name):
- self._scale_target_name = scale_target_name
-
- @property
- def target_cpu_utilization_percentage(self):
- return self._target_cpu_utilization_percentage
-
- @target_cpu_utilization_percentage.setter
- def target_cpu_utilization_percentage(self,
- target_cpu_utilization_percentage):
- self._target_cpu_utilization_percentage = \
- target_cpu_utilization_percentage
diff --git a/tacker/vnfm/infra_drivers/kubernetes/k8s/translate_inputs.py b/tacker/vnfm/infra_drivers/kubernetes/k8s/translate_inputs.py
deleted file mode 100644
index 33771a372..000000000
--- a/tacker/vnfm/infra_drivers/kubernetes/k8s/translate_inputs.py
+++ /dev/null
@@ -1,282 +0,0 @@
-# All Rights Reserved.
-#
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from oslo_config import cfg
-
-import json
-
-from oslo_log import log as logging
-from oslo_utils import uuidutils
-
-from tacker.common import log
-from tacker.extensions import vnfm
-from tacker.tosca import utils as toscautils
-from tacker.vnfm.infra_drivers.kubernetes.k8s import tosca_kube_object
-
-from toscaparser.functions import GetInput
-from toscaparser import tosca_template
-import toscaparser.utils.yamlparser
-
-LOG = logging.getLogger(__name__)
-CONF = cfg.CONF
-
-YAML_LOADER = toscaparser.utils.yamlparser.load_yaml
-SCALING = 'tosca.policies.Scaling'
-TACKER_CP = 'tosca.nodes.nfv.CP.Tacker'
-TACKER_VL = 'tosca.nodes.nfv.VL'
-COLON_CHARACTER = ':'
-WHITE_SPACE_CHARACTER = ' '
-NON_WHITE_SPACE_CHARACTER = ''
-TOSCA_LINKS_TO = 'tosca.relationships.network.LinksTo'
-TOSCA_BINDS_TO = 'tosca.relationships.network.BindsTo'
-
-ALLOWED_KUBERNETES_OBJECT_PROPS = ('namespace', 'mapping_ports', 'labels',
- 'vnfcs', 'service_type',
- 'mgmt_driver', 'config_drive')
-ALLOWED_CONTAINER_OBJECT_PROPS = ('num_cpus', 'mem_size', 'image', 'config',
- 'command', 'args', 'ports')
-ALLOWED_SCALING_OBJECT_PROPS = ('min_instances', 'max_instances',
- 'target_cpu_utilization_percentage')
-
-SCALAR_UNIT_DICT = {'B': 1, 'kB': 1000, 'KiB': 1024, 'MB': 1000000,
- 'MiB': 1048576, 'GB': 1000000000,
- 'GiB': 1073741824, 'TB': 1000000000000,
- 'TiB': 1099511627776}
-
-
-class Parser(object):
- """Convert TOSCA template to Tosca Kube object"""
-
- def __init__(self, vnfd_dict):
- self.vnfd_dict = vnfd_dict
-
- def loader(self):
- """Load TOSCA template and start parsing"""
-
- try:
- parserd_params = None
- toscautils.updateimports(self.vnfd_dict)
-
- tosca = tosca_template.\
- ToscaTemplate(parsed_params=parserd_params,
- a_file=False,
- yaml_dict_tpl=self.vnfd_dict,
- local_defs=toscautils.tosca_tmpl_local_defs())
- except Exception as e:
- LOG.error("tosca-parser error: %s", str(e))
- raise vnfm.ToscaParserFailed(error_msg_details=str(e))
-
- # Initiate a list tosca_kube_object which are defined from VDU
- tosca_kube_objects = []
- vdus = toscautils.findvdus(tosca)
-
- for node_template in vdus:
- vdu_name = node_template.name
- tosca_kube_obj = self.tosca_to_kube_mapping(node_template)
-
- # Find network name in which VDU is attached
- network_names = self.find_networks(tosca, vdu_name)
- if network_names:
- annotations_pad = \
- json.dumps([{"name": "%s" % net}
- for net in network_names])
- tosca_kube_obj.annotations =\
- {'k8s.v1.cni.cncf.io/networks': annotations_pad}
-
- # If connection_point is True, Tacker will manage its service ip
- tosca_kube_obj.mgmt_connection_point = \
- self.check_mgmt_cp(tosca, vdu_name)
-
- # Find scaling policy that is used for this VDU, different to
- # VM-based VNF, there are no alarm policies.
- tosca_kube_obj.scaling_object = \
- self.get_scaling_policy(tosca, vdu_name)
- tosca_kube_objects.append(tosca_kube_obj)
-
- return tosca_kube_objects
-
- @log.log
- def tosca_to_kube_mapping(self, node_template):
- """Map TOSCA template to ToscaKubeObject properties"""
- tosca_props = self.get_properties(node_template)
- self.check_unsupported_key(tosca_props,
- ALLOWED_KUBERNETES_OBJECT_PROPS)
- tosca_kube_obj = tosca_kube_object.ToscaKubeObject()
-
- # tosca_kube_obj name is used for tracking Kubernetes resources
- service_name = 'svc-' + node_template.name + '-' + \
- uuidutils.generate_uuid()
- tosca_kube_obj.name = service_name[:15]
- tosca_kube_obj.namespace = tosca_props.get('namespace')
- tosca_kube_obj.mapping_ports = tosca_props.get('mapping_ports')
- tosca_kube_obj.labels = tosca_props.get('labels')
-
- # Find config properties of VNFComponents in each VDU node
- vnfc_config_props = tosca_props.get('vnfcs')
- container_objects = self.vnfc_configurable_to_container_mapping(
- vnfc_config_props)
- tosca_kube_obj.containers = container_objects
-
- # In labels, we define which type of Service VNF will be deployed
- service_labels = dict()
- if tosca_kube_obj.labels:
- for label in tosca_kube_obj.labels:
- label = label.replace(
- WHITE_SPACE_CHARACTER,
- NON_WHITE_SPACE_CHARACTER)
- labels = label.split(COLON_CHARACTER)
- service_labels.update({labels[0]: labels[1]})
- tosca_kube_obj.labels = service_labels
- tosca_kube_obj.service_type = tosca_props.get('service_type')
- return tosca_kube_obj
-
- @log.log
- def vnfc_configurable_to_container_mapping(self, vnfc_config_properties):
- """Map VnfcConfigurableProperties to list of containers"""
- containers = list()
- for container_name, container_props in vnfc_config_properties.items():
- container = tosca_kube_object.Container()
- container.name = container_name
- self.check_unsupported_key(container_props,
- ALLOWED_CONTAINER_OBJECT_PROPS)
- container.num_cpus = container_props.get('num_cpus')
- memory_size = container_props.get('mem_size')
- container.mem_size = self.process_memory(memory_size)
- container.image = container_props.get('image')
- container.config = container_props.get('config')
- container.command = container_props.get('command')
- container.args = container_props.get('args')
- container.ports = container_props.get('ports')
- containers.append(container)
- return containers
-
- @log.log
- def process_memory(self, mem_value):
- """Translate memory size with unit to a number of byte memory"""
- # Memory size has the pattern e.g. 512 MB, 1024 MB or 1 GB
- parser_memory = mem_value.split(WHITE_SPACE_CHARACTER)
- memory_value = parser_memory[0]
- memory_unit = parser_memory[1]
- memory_real_value = 0
-
- # Translate memory's byte size based on SCALAR_UNIT_DICT
- if memory_unit in SCALAR_UNIT_DICT:
- memory_real_value = \
- int(memory_value) * SCALAR_UNIT_DICT[memory_unit]
- return memory_real_value
-
- @log.log
- def get_scaling_policy(self, tosca, vdu_name):
- """Find scaling policy which is used for VDU"""
- if len(tosca.policies) == 0:
- scaling_obj = None
- else:
- count = 0
- scaling_obj = tosca_kube_object.ScalingObject()
- for policy in tosca.policies:
- if policy.type_definition.is_derived_from(SCALING) \
- and vdu_name in policy.targets:
- count = count + 1
- policy_props = policy.properties
- # NOTE(ueha): check_unsupported_key() is commented out to
- # resolve vnf create error due to required
- # parameters of policies.
- # self.check_unsupported_key(policy_props,
- # ALLOWED_SCALING_OBJECT_PROPS)
- scaling_obj.scaling_name = policy.name
- scaling_obj.target_cpu_utilization_percentage = \
- policy_props.get(
- 'target_cpu_utilization_percentage')
- scaling_obj.min_replicas = \
- policy_props.get('min_instances')
- scaling_obj.max_replicas = \
- policy_props.get('max_instances')
-
- if count > 1:
- # Because in Kubernetes environment, we can attach only one
- # scaling policy to Deployment. If user provides more than one
- # policy this error will happen when count > 1
- LOG.error("Tacker only support one scaling policy per VDU")
- raise vnfm.InvalidKubernetesScalingPolicyNumber
-
- return scaling_obj
-
- @log.log
- def find_networks(self, tosca, vdu_name):
- """Find networks which VDU is attached based on vdu_name."""
- networks = []
- network_names = []
- for node_template in tosca.nodetemplates:
- if node_template.type_definition.is_derived_from(TACKER_CP):
- match = False
- links_to = None
- binds_to = None
- for rel, node in node_template.relationships.items():
- if not links_to and rel.is_derived_from(TOSCA_LINKS_TO):
- links_to = node
- elif not binds_to and rel.is_derived_from(TOSCA_BINDS_TO):
- binds_to = node
- if binds_to.name == vdu_name:
- match = True
- if match:
- networks.append(links_to.name)
-
- for node_template in tosca.nodetemplates:
- if node_template.type_definition.is_derived_from(TACKER_VL):
- tosca_props = self.get_properties(node_template)
- if node_template.name in networks:
- for key, value in tosca_props.items():
- if key == 'network_name':
- network_names.append(value)
- if network_names:
- return network_names
-
- @log.log
- def check_mgmt_cp(self, tosca, vdu_name):
- """Check if management for connection point is enabled"""
- mgmt_connection_point = False
- for nt in tosca.nodetemplates:
- if nt.type_definition.is_derived_from(TACKER_CP):
- mgmt = nt.get_property_value('management') or None
- if mgmt:
- vdu = None
- for rel, node in nt.relationships.items():
- if rel.is_derived_from(TOSCA_BINDS_TO):
- vdu = node.name
- break
- if vdu == vdu_name:
- mgmt_connection_point = True
- LOG.debug('mgmt_connection_point: %s', mgmt_connection_point)
- return mgmt_connection_point
-
- @log.log
- def get_properties(self, node_template):
- """Return a list of property node template objects."""
- tosca_props = {}
- for prop in node_template.get_properties_objects():
- if isinstance(prop.value, GetInput):
- tosca_props[prop.name] = {'get_param': prop.value.input_name}
- else:
- tosca_props[prop.name] = prop.value
- return tosca_props
-
- def check_unsupported_key(self, input_values, support_key):
- """collect all unsupported keys"""
- found_keys = []
- for key in input_values:
- if key not in support_key:
- found_keys.append(key)
- if len(found_keys) > 0:
- raise vnfm.InvalidKubernetesInputParameter(found_keys=found_keys)
diff --git a/tacker/vnfm/infra_drivers/kubernetes/k8s/translate_outputs.py b/tacker/vnfm/infra_drivers/kubernetes/k8s/translate_outputs.py
index f27146257..415794012 100644
--- a/tacker/vnfm/infra_drivers/kubernetes/k8s/translate_outputs.py
+++ b/tacker/vnfm/infra_drivers/kubernetes/k8s/translate_outputs.py
@@ -128,50 +128,6 @@ class Transformer(object):
"VolumeAttachment": 'create_volume_attachment',
}
- def transform(self, tosca_kube_objects):
- """transform function translates from tosca_kube_object to
-
- kubernetes_object (ConfigMap, Deployment, Service, HPA)
- """
-
- # kubernetes_objects store all kubernetes objects that are transformed
- # from TOSCA VNF template
- kubernetes_objects = dict()
- for tosca_kube_obj in tosca_kube_objects:
- namespace = tosca_kube_obj.namespace
- kubernetes_objects['namespace'] = namespace
- kubernetes_objects['objects'] = list()
- kube_obj_name = tosca_kube_obj.name
- new_kube_obj_name = self.pre_process_name(kube_obj_name)
-
- # translate environments to ConfigMap objects
- for container in tosca_kube_obj.containers:
- config_map_object = \
- self.init_configmap(container_props=container,
- kube_obj_name=new_kube_obj_name)
- if config_map_object:
- kubernetes_objects['objects'].append(config_map_object)
-
- # translate Deployment object
- deployment_object = \
- self.init_deployment(tosca_kube_obj=tosca_kube_obj,
- kube_obj_name=new_kube_obj_name)
- kubernetes_objects['objects'].append(deployment_object)
-
- # translate to Horizontal Pod Autoscaler object
- hpa_object = self.init_hpa(tosca_kube_obj=tosca_kube_obj,
- kube_obj_name=new_kube_obj_name)
- if hpa_object:
- kubernetes_objects['objects'].append(hpa_object)
-
- # translate to Service object
- service_object = self.init_service(
- tosca_kube_obj=tosca_kube_obj,
- kube_obj_name=new_kube_obj_name)
- kubernetes_objects['objects'].append(service_object)
-
- return kubernetes_objects
-
def _gen_k8s_obj_from_name(self, obj_name):
"""Generate kubernetes object
@@ -279,51 +235,6 @@ class Transformer(object):
)
return response
- def deploy(self, kubernetes_objects):
- """Deploy Kubernetes objects on Kubernetes VIM and return
-
- a list name of services
- """
- deployment_names = list()
- namespace = kubernetes_objects.get('namespace')
- k8s_objects = kubernetes_objects.get('objects')
-
- for k8s_object in k8s_objects:
- object_type = k8s_object.kind
-
- if object_type == 'ConfigMap':
- self.core_v1_api_client.create_namespaced_config_map(
- namespace=namespace,
- body=k8s_object)
- LOG.debug('Successfully created ConfigMap %s',
- k8s_object.metadata.name)
- elif object_type == 'Deployment':
- self.app_v1_api_client.create_namespaced_deployment(
- namespace=namespace,
- body=k8s_object)
- LOG.debug('Successfully created Deployment %s',
- k8s_object.metadata.name)
- elif object_type == 'HorizontalPodAutoscaler':
- self.scaling_api_client. \
- create_namespaced_horizontal_pod_autoscaler(
- namespace=namespace,
- body=k8s_object)
- LOG.debug('Successfully created Horizontal Pod Autoscaler %s',
- k8s_object.metadata.name)
- elif object_type == 'Service':
- self.core_v1_api_client.create_namespaced_service(
- namespace=namespace,
- body=k8s_object)
- LOG.debug('Successfully created Service %s',
- k8s_object.metadata.name)
- deployment_names.append(namespace)
- deployment_names.append(k8s_object.metadata.name)
-
- # return a string that contains all deployment namespace and names
- # for tracking resources pattern:
- # namespace1,deployment1,namespace2,deployment2,namespace3,deployment3
- return ",".join(deployment_names)
-
def deploy_k8s(self, kubernetes_objects):
"""Deploy kubernetes
@@ -422,225 +333,3 @@ class Transformer(object):
v1_object_meta = client.V1ObjectMeta()
self._init_k8s_obj(v1_object_meta, content)
return v1_object_meta
-
- # config_labels configures label
- def config_labels(self, deployment_name=None, scaling_name=None):
- label = dict()
- if deployment_name:
- label.update({"selector": deployment_name})
- if scaling_name:
- label.update({"scaling_name": scaling_name})
- return label
-
- # Init resource requirement for container
- def init_resource_requirements(self, container):
- limits = dict()
- requests = dict()
- if container.num_cpus:
- limits.update({'cpu': container.num_cpus})
- requests.update({'cpu': container.num_cpus})
- if container.mem_size:
- limits.update({'memory': container.mem_size})
- requests.update({'memory': container.mem_size})
- return client.V1ResourceRequirements(limits=limits,
- requests=requests)
-
- def init_envs(self, container_props, name):
- config = container_props.config
- config_dict = self.pre_process_config(config)
- configmap_name = name
-
- list_envs = []
- for key in config_dict:
- config_map_ref = client.V1ConfigMapKeySelector(
- key=key,
- name=configmap_name)
- env_var = client.V1EnvVarSource(
- config_map_key_ref=config_map_ref)
- env_object = client.V1EnvVar(
- name=key,
- value_from=env_var)
- list_envs.append(env_object)
- return list_envs
-
- # Init container object
- def init_containers(self, container_props, limit_resource, name):
- list_env_var = self.init_envs(container_props, name)
- container_name = self.pre_process_name(container_props.name)
- list_container_port = list()
- if container_props.ports:
- for container_port in container_props.ports:
- port = int(container_port)
- cport = client.V1ContainerPort(container_port=port)
- list_container_port.append(cport)
- container = client.V1Container(
- name=container_name,
- image=container_props.image,
- ports=list_container_port,
- resources=limit_resource,
- command=container_props.command,
- args=container_props.args,
- env=list_env_var,
- image_pull_policy="IfNotPresent")
- return container
-
- # init_deployment initializes Kubernetes Pod object
- def init_deployment(self, tosca_kube_obj, kube_obj_name):
- """Instantiate the deployment object"""
-
- deployment_name = kube_obj_name
- # Create a list of container, which made a Pod
- containers = list()
- for container_prop in tosca_kube_obj.containers:
- limit_resource = self.init_resource_requirements(container_prop)
- container = self.init_containers(
- container_props=container_prop,
- limit_resource=limit_resource,
- name=deployment_name)
- containers.append(container)
-
- # Make a label with pattern {"selector": "deployment_name"}
- if tosca_kube_obj.scaling_object:
- scaling_name = tosca_kube_obj.scaling_object.scaling_name
- update_label = self.config_labels(deployment_name=deployment_name,
- scaling_name=scaling_name)
- else:
- update_label = self.config_labels(deployment_name=deployment_name)
- if tosca_kube_obj.labels:
- if 'selector' in update_label:
- del update_label['selector']
- update_label.update(tosca_kube_obj.labels)
- labels = update_label
-
- # Create and configure a spec section
- pod_template = client.V1PodTemplateSpec(
- metadata=client.V1ObjectMeta(
- labels=labels, annotations=tosca_kube_obj.annotations),
- spec=client.V1PodSpec(containers=containers))
- # Create the specification of deployment
- label_selector = client.V1LabelSelector(match_labels=labels)
- deployment_spec = client.V1DeploymentSpec(
- template=pod_template, selector=label_selector)
- metadata = client.V1ObjectMeta(name=deployment_name, labels=labels)
-
- # Instantiate the deployment object
- deployment = client.V1Deployment(
- api_version="apps/v1",
- kind="Deployment",
- metadata=metadata,
- spec=deployment_spec)
- return deployment
-
- # init_hpa initializes Kubernetes Horizon Pod Auto-scaling object
- def init_hpa(self, tosca_kube_obj, kube_obj_name):
- scaling_props = tosca_kube_obj.scaling_object
- hpa = None
- if scaling_props:
- min_replicas = scaling_props.min_replicas
- max_replicas = scaling_props.max_replicas
- cpu_util = scaling_props.target_cpu_utilization_percentage
- deployment_name = kube_obj_name
-
- # Create target Deployment object
- target = client.V1CrossVersionObjectReference(
- api_version="apps/v1",
- kind="Deployment",
- name=deployment_name)
- # Create the specification of horizon pod auto-scaling
- hpa_spec = client.V1HorizontalPodAutoscalerSpec(
- min_replicas=min_replicas,
- max_replicas=max_replicas,
- target_cpu_utilization_percentage=cpu_util,
- scale_target_ref=target)
- metadata = client.V1ObjectMeta(name=deployment_name)
- # Create Horizon Pod Auto-Scaling
- hpa = client.V1HorizontalPodAutoscaler(
- api_version="autoscaling/v1",
- kind="HorizontalPodAutoscaler",
- spec=hpa_spec,
- metadata=metadata)
- return hpa
-
- # init_service initializes Kubernetes service object
- def init_service(self, tosca_kube_obj, kube_obj_name):
- list_service_port = list()
- service_label = tosca_kube_obj.labels
- for port in tosca_kube_obj.mapping_ports:
- if COLON_CHARACTER in port:
- ports = port.split(COLON_CHARACTER)
- published_port = int(ports[0])
- target_port = int(ports[1])
- else:
- target_port = published_port = int(port)
- service_port = client.V1ServicePort(
- name=str(published_port),
- port=published_port,
- target_port=target_port)
- list_service_port.append(service_port)
-
- deployment_name = kube_obj_name
- selector_by_name = self.config_labels(deployment_name)
- if tosca_kube_obj.labels:
- selectors = tosca_kube_obj.labels.copy()
- else:
- selectors = selector_by_name
- if tosca_kube_obj.mgmt_connection_point:
- service_label['management_connection'] = 'True'
- if tosca_kube_obj.network_name:
- service_label['network_name'] = tosca_kube_obj.network_name
- service_label['vdu_name'] = tosca_kube_obj.name
-
- metadata = client.V1ObjectMeta(name=deployment_name,
- labels=service_label)
- if tosca_kube_obj.service_type:
- service_type = tosca_kube_obj.service_type
- else:
- service_type = None
- service_spec = client.V1ServiceSpec(
- selector=selectors,
- ports=list_service_port,
- type=service_type)
-
- service = client.V1Service(
- api_version="v1",
- kind="Service",
- spec=service_spec,
- metadata=metadata)
- return service
-
- # init_config_map initializes Kubernetes ConfigMap object
- def init_configmap(self, container_props, kube_obj_name):
- config_map = None
- if container_props.config:
- configmap_name = kube_obj_name
- metadata = client.V1ObjectMeta(name=configmap_name)
- config_dict = self.pre_process_config(container_props.config)
- config_map = client.V1ConfigMap(
- api_version="v1",
- kind="ConfigMap",
- data=config_dict,
- metadata=metadata)
- return config_map
-
- def pre_process_name(self, name):
- # replace '_' by '-' to meet Kubernetes' requirement
- new_name = name.replace(DASH_CHARACTER, HYPHEN_CHARACTER).lower()
- return new_name
-
- def pre_process_config(self, config):
- # Split by separating lines
- config_dict = {}
- if config:
- configs = config.split(NEWLINE_CHARACTER)
- for config_item in configs:
- # Ignore if config_item is null
- if config_item:
- # Strip all types of white-space characters
- config_item = config_item.replace(
- WHITE_SPACE_CHARACTER,
- NON_WHITE_SPACE_CHARACTER)
- config_prop = config_item.split(COLON_CHARACTER)
- config_dict[config_prop[0]] = config_prop[1]
- # config_dict has the pattern such as
- # {'param1': 'key1', 'param0': 'key0'}
- return config_dict
diff --git a/tacker/vnfm/infra_drivers/kubernetes/kubernetes_driver.py b/tacker/vnfm/infra_drivers/kubernetes/kubernetes_driver.py
index 3e80acd2c..c085e3dbb 100644
--- a/tacker/vnfm/infra_drivers/kubernetes/kubernetes_driver.py
+++ b/tacker/vnfm/infra_drivers/kubernetes/kubernetes_driver.py
@@ -46,7 +46,6 @@ from tacker.vnflcm import utils as vnflcm_utils
from tacker.vnfm.infra_drivers import abstract_driver
from tacker.vnfm.infra_drivers.kubernetes.helm import helm_client
from tacker.vnfm.infra_drivers.kubernetes.k8s import translate_outputs
-from tacker.vnfm.infra_drivers.kubernetes import translate_template
from tacker.vnfm.infra_drivers.kubernetes import utils as k8s_utils
from tacker.vnfm.infra_drivers import scale_driver
from urllib.parse import urlparse
@@ -119,105 +118,13 @@ class Kubernetes(abstract_driver.VnfAbstractDriver,
def get_description(self):
return 'Kubernetes infra driver'
- @log.log
- def create(self, plugin, context, vnf, auth_attr):
- """Create function
-
- Create ConfigMap, Deployment, Service and Horizontal Pod Autoscaler
- objects. Return a string that contains all deployment namespace and
- names for tracking resources.
- """
- LOG.debug('vnf %s', vnf)
- # initialize Kubernetes APIs
- auth_cred, file_descriptor = self.get_auth_creds(auth_attr)
- try:
- core_v1_api_client = self.kubernetes.get_core_v1_api_client(
- auth=auth_cred)
- app_v1_api_client = self.kubernetes.get_app_v1_api_client(
- auth=auth_cred)
- scaling_api_client = self.kubernetes.get_scaling_api_client(
- auth=auth_cred)
- tosca_to_kubernetes = translate_template.TOSCAToKubernetes(
- vnf=vnf,
- core_v1_api_client=core_v1_api_client,
- app_v1_api_client=app_v1_api_client,
- scaling_api_client=scaling_api_client)
- deployment_names = tosca_to_kubernetes.deploy_kubernetes_objects()
- except Exception as e:
- LOG.error('Creating VNF got an error due to %s', e)
- raise
- finally:
- self.clean_authenticate_vim(auth_cred, file_descriptor)
- return deployment_names
+ def create(self, plugin, context, vnf):
+ # NOTE: This method was used for Legacy API, and leave this method
+ # because define as abstractmethod in super class.
+ pass
def create_wait(self, plugin, context, vnf_dict, vnf_id, auth_attr):
- """Create wait function
-
- Create wait function will marked VNF is ACTIVE when all status state
- from Pod objects is RUNNING.
- """
- # initialize Kubernetes APIs
- if '{' not in vnf_id and '}' not in vnf_id:
- auth_cred, file_descriptor = self.get_auth_creds(auth_attr)
- try:
- core_v1_api_client = \
- self.kubernetes.get_core_v1_api_client(auth=auth_cred)
- deployment_info = vnf_id.split(COMMA_CHARACTER)
- mgmt_ips = dict()
- pods_information = self._get_pods_information(
- core_v1_api_client=core_v1_api_client,
- deployment_info=deployment_info)
- status = self.get_pod_status(pods_information)
- stack_retries = self.STACK_RETRIES
- error_reason = None
- while status == 'Pending' and stack_retries > 0:
- time.sleep(self.STACK_RETRY_WAIT)
- pods_information = \
- self._get_pods_information(
- core_v1_api_client=core_v1_api_client,
- deployment_info=deployment_info)
- status = self.get_pod_status(pods_information)
- LOG.debug('status: %s', status)
- stack_retries = stack_retries - 1
-
- LOG.debug('VNF initializing status: %(service_name)s '
- '%(status)s',
- {'service_name': str(deployment_info),
- 'status': status})
- if stack_retries == 0 and status != 'Running':
- error_reason = _(
- "Resource creation is not completed within"
- " {wait} seconds as creation of stack {stack}"
- " is not completed").format(
- wait=(
- self.STACK_RETRIES *
- self.STACK_RETRY_WAIT),
- stack=vnf_id)
- LOG.error("VNF Creation failed: %(reason)s",
- {'reason': error_reason})
- raise vnfm.VNFCreateWaitFailed(reason=error_reason)
- elif stack_retries != 0 and status != 'Running':
- raise vnfm.VNFCreateWaitFailed(reason=error_reason)
-
- for i in range(0, len(deployment_info), 2):
- namespace = deployment_info[i]
- deployment_name = deployment_info[i + 1]
- service_info = core_v1_api_client.read_namespaced_service(
- name=deployment_name,
- namespace=namespace)
- if service_info.metadata.labels.get(
- "management_connection"):
- vdu_name = service_info.metadata.labels.\
- get("vdu_name").split("-")[1]
- mgmt_ip = service_info.spec.cluster_ip
- mgmt_ips.update({vdu_name: mgmt_ip})
- vnf_dict['mgmt_ip_address'] = jsonutils.dump_as_bytes(
- mgmt_ips)
- except Exception as e:
- LOG.error('Creating wait VNF got an error due to %s', e)
- raise
- finally:
- self.clean_authenticate_vim(auth_cred, file_descriptor)
+ pass
def create_wait_k8s(self, k8s_objs, k8s_client_dict, vnf_instance):
try:
@@ -642,73 +549,6 @@ class Kubernetes(abstract_driver.VnfAbstractDriver,
# TODO(phuoc): do nothing, will update it if we need actions
pass
- def _delete_legacy(self, vnf_id, auth_cred):
- """Delete function"""
- # initialize Kubernetes APIs
- try:
- core_v1_api_client = self.kubernetes.get_core_v1_api_client(
- auth=auth_cred)
- app_v1_api_client = self.kubernetes.get_app_v1_api_client(
- auth=auth_cred)
- scaling_api_client = self.kubernetes.get_scaling_api_client(
- auth=auth_cred)
- deployment_names = vnf_id.split(COMMA_CHARACTER)
-
- for i in range(0, len(deployment_names), 2):
- namespace = deployment_names[i]
- deployment_name = deployment_names[i + 1]
- # delete ConfigMap if it exists
- try:
- body = {}
- core_v1_api_client.delete_namespaced_config_map(
- namespace=namespace,
- name=deployment_name,
- body=body)
- LOG.debug('Successfully deleted ConfigMap %s',
- deployment_name)
- except Exception as e:
- LOG.debug(e)
- pass
- # delete Service if it exists
- try:
- core_v1_api_client.delete_namespaced_service(
- namespace=namespace,
- name=deployment_name)
- LOG.debug('Successfully deleted Service %s',
- deployment_name)
- except Exception as e:
- LOG.debug(e)
- pass
- # delete Horizon Pod Auto-scaling if it exists
- try:
- body = client.V1DeleteOptions()
- scaling_api_client.\
- delete_namespaced_horizontal_pod_autoscaler(
- namespace=namespace,
- name=deployment_name,
- body=body)
- LOG.debug('Successfully deleted Horizon Pod Auto-Scaling '
- '%s', deployment_name)
- except Exception as e:
- LOG.debug(e)
- pass
- # delete Deployment if it exists
- try:
- body = client.V1DeleteOptions(
- propagation_policy='Foreground',
- grace_period_seconds=5)
- app_v1_api_client.delete_namespaced_deployment(
- namespace=namespace,
- name=deployment_name,
- body=body)
- LOG.debug('Successfully deleted Deployment %s',
- deployment_name)
- except Exception as e:
- LOG.debug(e)
- pass
- except Exception:
- raise
-
def _select_delete_api(self, k8s_client_dict, namespace, name,
kind, api_version, body):
"""select kubernetes delete api and call"""
@@ -852,140 +692,72 @@ class Kubernetes(abstract_driver.VnfAbstractDriver,
"""Delete function"""
auth_cred, file_descriptor = self.get_auth_creds(auth_attr)
try:
- if not vnf_instance:
- # execute legacy delete method
- self._delete_legacy(vnf_id, auth_cred)
+ # check use_helm flag
+ inst_vnf_info = vnf_instance.instantiated_vnf_info
+ if self._is_use_helm_flag(inst_vnf_info.additional_params):
+ self._helm_uninstall(context, vnf_instance)
+ return
+ # initialize Kubernetes APIs
+ k8s_client_dict = self.kubernetes.\
+ get_k8s_client_dict(auth=auth_cred)
+ # get V1DeleteOptions for deleting an API object
+ body = {}
+ vnf_resources = objects.VnfResourceList.get_by_vnf_instance_id(
+ context, vnf_instance.id)
+ if terminate_vnf_req:
+ if terminate_vnf_req.termination_type == 'GRACEFUL':
+ grace_period_seconds = terminate_vnf_req.\
+ graceful_termination_timeout
+ elif terminate_vnf_req.termination_type == 'FORCEFUL':
+ grace_period_seconds = 0
+
+ body = client.V1DeleteOptions(
+ propagation_policy='Foreground',
+ grace_period_seconds=grace_period_seconds)
else:
- # check use_helm flag
- inst_vnf_info = vnf_instance.instantiated_vnf_info
- if self._is_use_helm_flag(inst_vnf_info.additional_params):
- self._helm_uninstall(context, vnf_instance)
- return
- # initialize Kubernetes APIs
- k8s_client_dict = self.kubernetes.\
- get_k8s_client_dict(auth=auth_cred)
- # get V1DeleteOptions for deleting an API object
- body = {}
- vnf_resources = objects.VnfResourceList.get_by_vnf_instance_id(
- context, vnf_instance.id)
- if terminate_vnf_req:
- if terminate_vnf_req.termination_type == 'GRACEFUL':
- grace_period_seconds = terminate_vnf_req.\
- graceful_termination_timeout
- elif terminate_vnf_req.termination_type == 'FORCEFUL':
- grace_period_seconds = 0
+ body = client.V1DeleteOptions(
+ propagation_policy='Foreground')
- body = client.V1DeleteOptions(
- propagation_policy='Foreground',
- grace_period_seconds=grace_period_seconds)
- else:
- body = client.V1DeleteOptions(
- propagation_policy='Foreground')
-
- # follow the order below to resolve dependency when deleting
- ordered_kind = [
- # 1.
- 'Deployment', 'Job', 'DaemonSet', 'StatefulSet',
- # 2.
- 'Pod',
- # 3.
- 'PersistentVolumeClaim', 'ConfigMap', 'Secret',
- 'PriorityClass',
- # 4.
- 'PersistentVolume',
- # 5.
- 'StorageClass',
- # 6. Except for 1 to 5 above, delete before `Namespace`
- 'Service', 'LimitRange', 'PodTemplate', 'Node',
- 'ResourceQuota', 'ServiceAccount', 'APIService',
- 'ReplicaSet', 'ControllerRevision',
- 'HorizontalPodAutoscaler', 'Lease', 'NetworkPolicy',
- 'ClusterRole', 'ClusterRoleBinding', 'Role', 'RoleBinding',
- 'VolumeAttachment',
- # 7. Delete `Namespace` finally
- 'Namespace'
- ]
- namespace = vnf_instance.vnf_metadata['namespace']
- for kind in ordered_kind:
- for vnf_resource in vnf_resources:
- obj_kind = vnf_resource.resource_type.\
- split(COMMA_CHARACTER)[1]
- if obj_kind == kind:
- self._delete_k8s_obj(
- kind=obj_kind,
- k8s_client_dict=k8s_client_dict,
- vnf_resource=vnf_resource,
- body=body, namespace=namespace)
+ # follow the order below to resolve dependency when deleting
+ ordered_kind = [
+ # 1.
+ 'Deployment', 'Job', 'DaemonSet', 'StatefulSet',
+ # 2.
+ 'Pod',
+ # 3.
+ 'PersistentVolumeClaim', 'ConfigMap', 'Secret',
+ 'PriorityClass',
+ # 4.
+ 'PersistentVolume',
+ # 5.
+ 'StorageClass',
+ # 6. Except for 1 to 5 above, delete before `Namespace`
+ 'Service', 'LimitRange', 'PodTemplate', 'Node',
+ 'ResourceQuota', 'ServiceAccount', 'APIService',
+ 'ReplicaSet', 'ControllerRevision',
+ 'HorizontalPodAutoscaler', 'Lease', 'NetworkPolicy',
+ 'ClusterRole', 'ClusterRoleBinding', 'Role', 'RoleBinding',
+ 'VolumeAttachment',
+ # 7. Delete `Namespace` finally
+ 'Namespace'
+ ]
+ namespace = vnf_instance.vnf_metadata['namespace']
+ for kind in ordered_kind:
+ for vnf_resource in vnf_resources:
+ obj_kind = vnf_resource.resource_type.\
+ split(COMMA_CHARACTER)[1]
+ if obj_kind == kind:
+ self._delete_k8s_obj(
+ kind=obj_kind,
+ k8s_client_dict=k8s_client_dict,
+ vnf_resource=vnf_resource,
+ body=body, namespace=namespace)
except Exception as e:
LOG.error('Deleting VNF got an error due to %s', e)
raise
finally:
self.clean_authenticate_vim(auth_cred, file_descriptor)
- def _delete_wait_legacy(self, vnf_id, auth_cred):
- """Delete wait function for legacy
-
- This function is used to checking a containerized VNF is deleted
- completely or not. We do it by get information of Kubernetes objects.
- When Tacker can not get any information about service, the VNF will be
- marked as deleted.
- """
- try:
- core_v1_api_client = self.kubernetes.get_core_v1_api_client(
- auth=auth_cred)
- app_v1_api_client = self.kubernetes.get_app_v1_api_client(
- auth=auth_cred)
- scaling_api_client = self.kubernetes.get_scaling_api_client(
- auth=auth_cred)
-
- deployment_names = vnf_id.split(COMMA_CHARACTER)
- keep_going = True
- stack_retries = self.STACK_RETRIES
- while keep_going and stack_retries > 0:
- count = 0
- for i in range(0, len(deployment_names), 2):
- namespace = deployment_names[i]
- deployment_name = deployment_names[i + 1]
- try:
- core_v1_api_client.read_namespaced_config_map(
- namespace=namespace,
- name=deployment_name)
- count = count + 1
- except Exception:
- pass
- try:
- core_v1_api_client.read_namespaced_service(
- namespace=namespace,
- name=deployment_name)
- count = count + 1
- except Exception:
- pass
- try:
- scaling_api_client.\
- read_namespaced_horizontal_pod_autoscaler(
- namespace=namespace,
- name=deployment_name)
- count = count + 1
- except Exception:
- pass
- try:
- app_v1_api_client.read_namespaced_deployment(
- namespace=namespace,
- name=deployment_name)
- count = count + 1
- except Exception:
- pass
- stack_retries = stack_retries - 1
- # If one of objects is still alive, keeps on waiting
- if count > 0:
- keep_going = True
- time.sleep(self.STACK_RETRY_WAIT)
- else:
- keep_going = False
- except Exception as e:
- LOG.error('Deleting wait VNF got an error due to %s', e)
- raise
-
def select_k8s_obj_read_api(self, k8s_client_dict, namespace, name,
kind, api_version):
"""select kubernetes read api and call"""
@@ -1051,106 +823,56 @@ class Kubernetes(abstract_driver.VnfAbstractDriver,
auth_cred, file_descriptor = self.get_auth_creds(auth_attr)
try:
- if not vnf_instance:
- # execute legacy delete_wait method
- self._delete_wait_legacy(vnf_id, auth_cred)
- else:
- vnf_resources = objects.VnfResourceList.\
- get_by_vnf_instance_id(context, vnf_instance.id)
- k8s_client_dict = self.kubernetes.\
- get_k8s_client_dict(auth=auth_cred)
- namespace = vnf_instance.vnf_metadata['namespace']
+ vnf_resources = objects.VnfResourceList.\
+ get_by_vnf_instance_id(context, vnf_instance.id)
+ k8s_client_dict = self.kubernetes.\
+ get_k8s_client_dict(auth=auth_cred)
+ namespace = vnf_instance.vnf_metadata['namespace']
- keep_going = True
- stack_retries = self.STACK_RETRIES
+ keep_going = True
+ stack_retries = self.STACK_RETRIES
- while keep_going and stack_retries > 0:
- count = 0
+ while keep_going and stack_retries > 0:
+ count = 0
- for vnf_resource in vnf_resources:
- name = vnf_resource.resource_name
- api_version = vnf_resource.resource_type.\
- split(COMMA_CHARACTER)[0]
- kind = vnf_resource.resource_type.\
- split(COMMA_CHARACTER)[1]
+ for vnf_resource in vnf_resources:
+ name = vnf_resource.resource_name
+ api_version = vnf_resource.resource_type.\
+ split(COMMA_CHARACTER)[0]
+ kind = vnf_resource.resource_type.\
+ split(COMMA_CHARACTER)[1]
- if not k8s_client_dict.get(api_version):
- continue
- try:
- self.select_k8s_obj_read_api(
- k8s_client_dict=k8s_client_dict,
- namespace=namespace,
- name=name,
- kind=kind,
- api_version=api_version)
- count = count + 1
- except Exception:
- pass
+ if not k8s_client_dict.get(api_version):
+ continue
+ try:
+ self.select_k8s_obj_read_api(
+ k8s_client_dict=k8s_client_dict,
+ namespace=namespace,
+ name=name,
+ kind=kind,
+ api_version=api_version)
+ count = count + 1
+ except Exception:
+ pass
- stack_retries = stack_retries - 1
- # If one of objects is still alive, keeps on waiting
- if count > 0:
- keep_going = True
- time.sleep(self.STACK_RETRY_WAIT)
- else:
- keep_going = False
+ stack_retries = stack_retries - 1
+ # If one of objects is still alive, keeps on waiting
+ if count > 0:
+ keep_going = True
+ time.sleep(self.STACK_RETRY_WAIT)
+ else:
+ keep_going = False
- # check use_helm flag
- inst_vnf_info = vnf_instance.instantiated_vnf_info
- if self._is_use_helm_flag(inst_vnf_info.additional_params):
- self._post_helm_uninstall(context, vnf_instance)
+ # check use_helm flag
+ inst_vnf_info = vnf_instance.instantiated_vnf_info
+ if self._is_use_helm_flag(inst_vnf_info.additional_params):
+ self._post_helm_uninstall(context, vnf_instance)
except Exception as e:
LOG.error('Deleting wait VNF got an error due to %s', e)
raise
finally:
self.clean_authenticate_vim(auth_cred, file_descriptor)
- def _scale_legacy(self, policy, auth_cred):
- LOG.debug("VNF are scaled by updating instance of deployment")
-
- app_v1_api_client = self.kubernetes.get_app_v1_api_client(
- auth=auth_cred)
- scaling_api_client = self.kubernetes.get_scaling_api_client(
- auth=auth_cred)
- deployment_names = policy['instance_id'].split(COMMA_CHARACTER)
- policy_name = policy['name']
- policy_action = policy['action']
-
- for i in range(0, len(deployment_names), 2):
- namespace = deployment_names[i]
- deployment_name = deployment_names[i + 1]
- deployment_info = app_v1_api_client.\
- read_namespaced_deployment(namespace=namespace,
- name=deployment_name)
- scaling_info = scaling_api_client.\
- read_namespaced_horizontal_pod_autoscaler(
- namespace=namespace,
- name=deployment_name)
-
- replicas = deployment_info.status.replicas
- scale_replicas = replicas
- vnf_scaling_name = deployment_info.metadata.labels.\
- get("scaling_name")
- if vnf_scaling_name == policy_name:
- if policy_action == 'out':
- scale_replicas = replicas + 1
- elif policy_action == 'in':
- scale_replicas = replicas - 1
-
- min_replicas = scaling_info.spec.min_replicas
- max_replicas = scaling_info.spec.max_replicas
- if (scale_replicas < min_replicas) or \
- (scale_replicas > max_replicas):
- LOG.debug("Scaling replicas is out of range. The number of"
- " replicas keeps %(number)s replicas",
- {'number': replicas})
- scale_replicas = replicas
- deployment_info.spec.replicas = scale_replicas
- app_v1_api_client.patch_namespaced_deployment_scale(
- namespace=namespace,
- name=deployment_name,
- body=deployment_info)
-
def _call_read_scale_api(self, app_v1_api_client, namespace, name, kind):
"""select kubernetes read scale api and call"""
def convert(name):
@@ -1328,106 +1050,62 @@ class Kubernetes(abstract_driver.VnfAbstractDriver,
# initialize Kubernetes APIs
auth_cred, file_descriptor = self.get_auth_creds(auth_attr)
try:
- if not policy.get('vnf_instance_id'):
- # execute legacy scale method
- self._scale_legacy(policy, auth_cred)
- else:
- vnf_instance = objects.VnfInstance.get_by_id(
- context, policy['vnf_instance_id'])
- # check use_helm flag
- inst_vnf_info = vnf_instance.instantiated_vnf_info
- additional_params = inst_vnf_info.additional_params
- if self._is_use_helm_flag(additional_params):
- self._helm_scale(context, vnf_instance, policy)
- return
- namespace = vnf_instance.vnf_metadata['namespace']
- vnf_resources = objects.VnfResourceList.get_by_vnf_instance_id(
- context, policy['vnf_instance_id'])
- app_v1_api_client = self.kubernetes.get_app_v1_api_client(
- auth=auth_cred)
- aspect_id = policy['name']
- vdu_defs = policy['vdu_defs']
- vdu_mapping = additional_params.get('vdu_mapping')
- kind, name, _, vdu_properties = self._get_scale_target_info(
- aspect_id, vdu_defs, vnf_resources, vdu_mapping)
+ vnf_instance = objects.VnfInstance.get_by_id(
+ context, policy['vnf_instance_id'])
+ # check use_helm flag
+ inst_vnf_info = vnf_instance.instantiated_vnf_info
+ additional_params = inst_vnf_info.additional_params
+ if self._is_use_helm_flag(additional_params):
+ self._helm_scale(context, vnf_instance, policy)
+ return
+ namespace = vnf_instance.vnf_metadata['namespace']
+ vnf_resources = objects.VnfResourceList.get_by_vnf_instance_id(
+ context, policy['vnf_instance_id'])
+ app_v1_api_client = self.kubernetes.get_app_v1_api_client(
+ auth=auth_cred)
+ aspect_id = policy['name']
+ vdu_defs = policy['vdu_defs']
+ vdu_mapping = additional_params.get('vdu_mapping')
+ kind, name, _, vdu_properties = self._get_scale_target_info(
+ aspect_id, vdu_defs, vnf_resources, vdu_mapping)
- scale_info = self._call_read_scale_api(
- app_v1_api_client=app_v1_api_client,
- namespace=namespace,
- name=name,
- kind=kind)
+ scale_info = self._call_read_scale_api(
+ app_v1_api_client=app_v1_api_client,
+ namespace=namespace,
+ name=name,
+ kind=kind)
- current_replicas = scale_info.status.replicas
- vdu_profile = vdu_properties.get('vdu_profile')
- if policy['action'] == 'out':
- scale_replicas = current_replicas + policy['delta_num']
- elif policy['action'] == 'in':
- scale_replicas = current_replicas - policy['delta_num']
+ current_replicas = scale_info.status.replicas
+ vdu_profile = vdu_properties.get('vdu_profile')
+ if policy['action'] == 'out':
+ scale_replicas = current_replicas + policy['delta_num']
+ elif policy['action'] == 'in':
+ scale_replicas = current_replicas - policy['delta_num']
- max_replicas = vdu_profile.get('max_number_of_instances')
- min_replicas = vdu_profile.get('min_number_of_instances')
- if (scale_replicas < min_replicas) or \
- (scale_replicas > max_replicas):
- error_reason = (
- "The number of target replicas after"
- " scaling [{after_replicas}] is out of range").\
- format(
- after_replicas=scale_replicas)
- raise vnfm.CNFScaleFailed(reason=error_reason)
+ max_replicas = vdu_profile.get('max_number_of_instances')
+ min_replicas = vdu_profile.get('min_number_of_instances')
+ if (scale_replicas < min_replicas) or \
+ (scale_replicas > max_replicas):
+ error_reason = (
+ "The number of target replicas after"
+ " scaling [{after_replicas}] is out of range").\
+ format(
+ after_replicas=scale_replicas)
+ raise vnfm.CNFScaleFailed(reason=error_reason)
- scale_info.spec.replicas = scale_replicas
- self._call_patch_scale_api(
- app_v1_api_client=app_v1_api_client,
- namespace=namespace,
- name=name,
- kind=kind,
- body=scale_info)
+ scale_info.spec.replicas = scale_replicas
+ self._call_patch_scale_api(
+ app_v1_api_client=app_v1_api_client,
+ namespace=namespace,
+ name=name,
+ kind=kind,
+ body=scale_info)
except Exception as e:
LOG.error('Scaling VNF got an error due to %s', e)
raise
finally:
self.clean_authenticate_vim(auth_cred, file_descriptor)
- def _scale_wait_legacy(self, policy, auth_cred):
- core_v1_api_client = self.kubernetes.get_core_v1_api_client(
- auth=auth_cred)
- deployment_info = policy['instance_id'].split(",")
-
- pods_information = self._get_pods_information(
- core_v1_api_client=core_v1_api_client,
- deployment_info=deployment_info)
- status = self.get_pod_status(pods_information)
-
- stack_retries = self.STACK_RETRIES
- error_reason = None
- while status == 'Pending' and stack_retries > 0:
- time.sleep(self.STACK_RETRY_WAIT)
-
- pods_information = self._get_pods_information(
- core_v1_api_client=core_v1_api_client,
- deployment_info=deployment_info)
- status = self.get_pod_status(pods_information)
-
- # LOG.debug('status: %s', status)
- stack_retries = stack_retries - 1
-
- LOG.debug('VNF initializing status: %(service_name)s %(status)s',
- {'service_name': str(deployment_info), 'status': status})
-
- if stack_retries == 0 and status != 'Running':
- error_reason = _("Resource creation is not completed within"
- " {wait} seconds as creation of stack {stack}"
- " is not completed").format(
- wait=(self.STACK_RETRIES *
- self.STACK_RETRY_WAIT),
- stack=policy['instance_id'])
- LOG.error("VNF Creation failed: %(reason)s",
- {'reason': error_reason})
- raise vnfm.VNFCreateWaitFailed(reason=error_reason)
-
- elif stack_retries != 0 and status != 'Running':
- raise vnfm.VNFCreateWaitFailed(reason=error_reason)
-
def is_match_pod_naming_rule(self, rsc_kind, rsc_name, pod_name):
match_result = None
if rsc_kind == 'Pod':
@@ -1469,70 +1147,65 @@ class Kubernetes(abstract_driver.VnfAbstractDriver,
# initialize Kubernetes APIs
auth_cred, file_descriptor = self.get_auth_creds(auth_attr)
try:
- if not policy.get('vnf_instance_id'):
- # execute legacy scale_wait method
- self._scale_wait_legacy(policy, auth_cred)
- else:
- vnf_instance = objects.VnfInstance.get_by_id(
- context, policy['vnf_instance_id'])
- namespace = vnf_instance.vnf_metadata['namespace']
- vnf_resources = objects.VnfResourceList.get_by_vnf_instance_id(
- context, policy['vnf_instance_id'])
- core_v1_api_client = self.kubernetes.get_core_v1_api_client(
- auth=auth_cred)
- app_v1_api_client = self.kubernetes.get_app_v1_api_client(
- auth=auth_cred)
- aspect_id = policy['name']
- vdu_defs = policy['vdu_defs']
- inst_vnf_info = vnf_instance.instantiated_vnf_info
- additional_params = inst_vnf_info.additional_params
- vdu_mapping = additional_params.get('vdu_mapping')
- kind, name, _, _ = self._get_scale_target_info(
- aspect_id, vdu_defs, vnf_resources, vdu_mapping)
+ vnf_instance = objects.VnfInstance.get_by_id(
+ context, policy['vnf_instance_id'])
+ namespace = vnf_instance.vnf_metadata['namespace']
+ vnf_resources = objects.VnfResourceList.get_by_vnf_instance_id(
+ context, policy['vnf_instance_id'])
+ core_v1_api_client = self.kubernetes.get_core_v1_api_client(
+ auth=auth_cred)
+ app_v1_api_client = self.kubernetes.get_app_v1_api_client(
+ auth=auth_cred)
+ aspect_id = policy['name']
+ vdu_defs = policy['vdu_defs']
+ inst_vnf_info = vnf_instance.instantiated_vnf_info
+ additional_params = inst_vnf_info.additional_params
+ vdu_mapping = additional_params.get('vdu_mapping')
+ kind, name, _, _ = self._get_scale_target_info(
+ aspect_id, vdu_defs, vnf_resources, vdu_mapping)
- scale_info = self._call_read_scale_api(
- app_v1_api_client=app_v1_api_client,
- namespace=namespace,
- name=name,
- kind=kind)
- status = 'Pending'
- stack_retries = self.STACK_RETRIES
- error_reason = None
- while status == 'Pending' and stack_retries > 0:
- pods_information = list()
- respone = core_v1_api_client.list_namespaced_pod(
- namespace=namespace)
- for pod in respone.items:
- match_result = self.is_match_pod_naming_rule(
- kind, name, pod.metadata.name)
- if match_result:
- pods_information.append(pod)
+ scale_info = self._call_read_scale_api(
+ app_v1_api_client=app_v1_api_client,
+ namespace=namespace,
+ name=name,
+ kind=kind)
+ status = 'Pending'
+ stack_retries = self.STACK_RETRIES
+ error_reason = None
+ while status == 'Pending' and stack_retries > 0:
+ pods_information = list()
+ respone = core_v1_api_client.list_namespaced_pod(
+ namespace=namespace)
+ for pod in respone.items:
+ match_result = self.is_match_pod_naming_rule(
+ kind, name, pod.metadata.name)
+ if match_result:
+ pods_information.append(pod)
- status = self.get_pod_status(pods_information)
- if status == 'Running' and \
- scale_info.spec.replicas != len(pods_information):
- status = 'Pending'
+ status = self.get_pod_status(pods_information)
+ if status == 'Running' and \
+ scale_info.spec.replicas != len(pods_information):
+ status = 'Pending'
- if status == 'Pending':
- stack_retries = stack_retries - 1
- time.sleep(self.STACK_RETRY_WAIT)
- elif status == 'Unknown':
- error_reason = (
- "CNF Scale failed caused by the Pod status"
- " is Unknown")
- raise vnfm.CNFScaleWaitFailed(reason=error_reason)
-
- if stack_retries == 0 and status != 'Running':
+ if status == 'Pending':
+ stack_retries = stack_retries - 1
+ time.sleep(self.STACK_RETRY_WAIT)
+ elif status == 'Unknown':
error_reason = (
- "CNF Scale failed to complete within"
- " {wait} seconds while waiting for the aspect_id"
- " {aspect_id} to be scaled").format(
- wait=(self.STACK_RETRIES *
- self.STACK_RETRY_WAIT),
- aspect_id=aspect_id)
- LOG.error("CNF Scale failed: %(reason)s",
- {'reason': error_reason})
+ "CNF Scale failed caused by the Pod status"
+ " is Unknown")
raise vnfm.CNFScaleWaitFailed(reason=error_reason)
+
+ if stack_retries == 0 and status != 'Running':
+ error_reason = (
+ "CNF Scale failed to complete within"
+ " {wait} seconds while waiting for the aspect_id"
+ " {aspect_id} to be scaled").format(
+ wait=(self.STACK_RETRIES * self.STACK_RETRY_WAIT),
+ aspect_id=aspect_id)
+ LOG.error("CNF Scale failed: %(reason)s",
+ {'reason': error_reason})
+ raise vnfm.CNFScaleWaitFailed(reason=error_reason)
except Exception as e:
LOG.error('Scaling wait CNF got an error due to %s', e)
raise
@@ -1569,9 +1242,6 @@ class Kubernetes(abstract_driver.VnfAbstractDriver,
file_path = vim_auth.pop('ca_cert_file')
self.kubernetes.close_tmp_file(file_descriptor, file_path)
- def heal_vdu(self, plugin, context, vnf_dict, heal_request_data):
- pass
-
def _is_use_helm_flag(self, additional_params):
if not additional_params:
return False
@@ -1896,50 +1566,42 @@ class Kubernetes(abstract_driver.VnfAbstractDriver,
auth_attr = vim_connection_info.access_info
use_helm_flag = self._is_use_helm_flag(
instantiate_vnf_req.additional_params)
- if not target_k8s_files and not use_helm_flag:
- # The case is based on TOSCA for CNF operation.
- # It is out of the scope of this patch.
- instance_id = self.create(
- None, context, vnf_instance, auth_attr)
- return instance_id
+ auth_cred, file_descriptor = self.get_auth_creds(auth_attr)
+ k8s_client_dict = self.kubernetes.get_k8s_client_dict(auth_cred)
+ transformer = translate_outputs.Transformer(
+ None, None, None, k8s_client_dict)
+ deployment_dict_list = list()
+ if use_helm_flag:
+ k8s_objs = self._helm_install(
+ context, vnf_instance, vim_connection_info,
+ instantiate_vnf_req, vnf_package_path, transformer)
else:
- auth_cred, file_descriptor = self.get_auth_creds(auth_attr)
- k8s_client_dict = self.kubernetes.get_k8s_client_dict(auth_cred)
- transformer = translate_outputs.Transformer(
- None, None, None, k8s_client_dict)
- deployment_dict_list = list()
- if use_helm_flag:
- k8s_objs = self._helm_install(
- context, vnf_instance, vim_connection_info,
- instantiate_vnf_req, vnf_package_path, transformer)
+ k8s_objs = transformer.get_k8s_objs_from_yaml(
+ target_k8s_files, vnf_package_path, namespace)
+ k8s_objs = transformer.deploy_k8s(k8s_objs)
+ vnfd_dict['current_error_point'] = EP.POST_VIM_CONTROL
+ k8s_objs = self.create_wait_k8s(
+ k8s_objs, k8s_client_dict, vnf_instance)
+ for k8s_obj in k8s_objs:
+ deployment_dict = dict()
+ deployment_dict['namespace'] = k8s_obj.get('namespace')
+ if k8s_obj.get('object').metadata:
+ deployment_dict['name'] = k8s_obj.get('object').\
+ metadata.name
else:
- k8s_objs = transformer.\
- get_k8s_objs_from_yaml(target_k8s_files, vnf_package_path,
- namespace)
- k8s_objs = transformer.deploy_k8s(k8s_objs)
- vnfd_dict['current_error_point'] = EP.POST_VIM_CONTROL
- k8s_objs = self.create_wait_k8s(
- k8s_objs, k8s_client_dict, vnf_instance)
- for k8s_obj in k8s_objs:
- deployment_dict = dict()
- deployment_dict['namespace'] = k8s_obj.get('namespace')
- if k8s_obj.get('object').metadata:
- deployment_dict['name'] = k8s_obj.get('object').\
- metadata.name
- else:
- deployment_dict['name'] = ''
- deployment_dict['apiVersion'] = k8s_obj.get(
- 'object').api_version
- deployment_dict['kind'] = k8s_obj.get('object').kind
- deployment_dict['status'] = k8s_obj.get('status')
- deployment_dict_list.append(deployment_dict)
- deployment_str_list = [str(x) for x in deployment_dict_list]
- # all the deployment object will store into resource_info_str.
- # and the instance_id is created from all deployment_dict.
- resource_info_str = ';'.join(deployment_str_list)
- self.clean_authenticate_vim(auth_cred, file_descriptor)
- vnfd_dict['instance_id'] = resource_info_str
- return resource_info_str
+ deployment_dict['name'] = ''
+ deployment_dict['apiVersion'] = k8s_obj.get(
+ 'object').api_version
+ deployment_dict['kind'] = k8s_obj.get('object').kind
+ deployment_dict['status'] = k8s_obj.get('status')
+ deployment_dict_list.append(deployment_dict)
+ deployment_str_list = [str(x) for x in deployment_dict_list]
+ # all the deployment object will store into resource_info_str.
+ # and the instance_id is created from all deployment_dict.
+ resource_info_str = ';'.join(deployment_str_list)
+ self.clean_authenticate_vim(auth_cred, file_descriptor)
+ vnfd_dict['instance_id'] = resource_info_str
+ return resource_info_str
def _post_helm_install(self, context, vim_connection_info,
instantiate_vnf_req, transformer, namespace):
diff --git a/tacker/vnfm/infra_drivers/kubernetes/translate_template.py b/tacker/vnfm/infra_drivers/kubernetes/translate_template.py
deleted file mode 100644
index a0f3b060b..000000000
--- a/tacker/vnfm/infra_drivers/kubernetes/translate_template.py
+++ /dev/null
@@ -1,121 +0,0 @@
-# All Rights Reserved.
-#
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import yaml
-
-from oslo_config import cfg
-from oslo_log import log as logging
-from toscaparser.utils import yamlparser
-
-from tacker.common import exceptions
-from tacker.common import log
-from tacker.extensions import common_services as cs
-from tacker.extensions import vnfm
-from tacker.vnfm.infra_drivers.kubernetes.k8s import translate_inputs
-from tacker.vnfm.infra_drivers.kubernetes.k8s import translate_outputs
-
-LOG = logging.getLogger(__name__)
-CONF = cfg.CONF
-
-
-class TOSCAToKubernetes(object):
-
- def __init__(self, vnf, core_v1_api_client,
- app_v1_api_client, scaling_api_client):
- self.vnf = vnf
- self.core_v1_api_client = core_v1_api_client
- self.app_v1_api_client = app_v1_api_client
- self.scaling_api_client = scaling_api_client
- self.attributes = {}
- self.vnfd_yaml = None
-
- def generate_tosca_kube_objects(self):
- """Load TOSCA template and return tosca_kube_objects"""
-
- vnfd_dict = self.process_input()
- parser = translate_inputs.Parser(vnfd_dict)
- return parser.loader()
-
- def deploy_kubernetes_objects(self):
- """Translate tosca_kube_objects to Kubernetes objects and deploy them.
-
- Return a string that contains all deployment namespace and names
- """
-
- tosca_kube_objects = self.generate_tosca_kube_objects()
- transformer = translate_outputs.Transformer(
- core_v1_api_client=self.core_v1_api_client,
- app_v1_api_client=self.app_v1_api_client,
- scaling_api_client=self.scaling_api_client,
- k8s_client_dict=None
- )
- kubernetes_objects = transformer.transform(tosca_kube_objects)
- deployment_names = transformer.deploy(
- kubernetes_objects=kubernetes_objects)
- # return namespaces and service names for tracking resources
- return deployment_names
-
- def process_input(self):
- """Process input of vnfd template"""
-
- self.attributes = self.vnf['vnfd']['attributes'].copy()
- self.vnfd_yaml = self.attributes.pop('vnfd', None)
- if self.vnfd_yaml is None:
- LOG.error("VNFD is not provided, so no vnf is created !!")
- raise exceptions.InvalidInput("VNFD template is None.")
- LOG.debug('vnfd_yaml %s', self.vnfd_yaml)
- vnfd_dict = yamlparser.simple_ordered_parse(self.vnfd_yaml)
- LOG.debug('vnfd_dict %s', vnfd_dict)
-
- # Read parameter and process inputs
- if 'get_input' in str(vnfd_dict):
- self._process_parameterized_input(self.vnf['attributes'],
- vnfd_dict)
- return vnfd_dict
-
- @log.log
- def _update_params(self, original, paramvalues):
- for key, value in (original).items():
- if not isinstance(value, dict) or 'get_input' not in str(value):
- pass
- elif isinstance(value, dict):
- if 'get_input' in value:
- if value['get_input'] in paramvalues:
- original[key] = paramvalues[value['get_input']]
- else:
- LOG.error('Key missing Value: %s', key)
- raise cs.InputValuesMissing(key=key)
- else:
- self._update_params(value, paramvalues)
-
- @log.log
- def _process_parameterized_input(self, attrs, vnfd_dict):
- param_vattrs_yaml = attrs.pop('param_values', None)
- if param_vattrs_yaml:
- try:
- param_vattrs_dict = yaml.safe_load(param_vattrs_yaml)
- LOG.debug('param_vattrs_yaml', param_vattrs_dict)
- for node in vnfd_dict['topology_template']['node_templates'].\
- values():
- if 'get_input' in str(node):
- self._update_params(node, param_vattrs_dict)
- except Exception as e:
- LOG.error("Not Well Formed: %s", str(e))
- raise vnfm.ParamYAMLNotWellFormed(
- error_msg_details=str(e))
- else:
- self._update_params(vnfd_dict, param_vattrs_dict)
- else:
- raise cs.ParamYAMLInputMissing()
diff --git a/tacker/vnfm/infra_drivers/noop.py b/tacker/vnfm/infra_drivers/noop.py
index 467bc632c..935663185 100644
--- a/tacker/vnfm/infra_drivers/noop.py
+++ b/tacker/vnfm/infra_drivers/noop.py
@@ -74,9 +74,6 @@ class VnfNoop(abstract_driver.VnfAbstractDriver):
region_name=None):
return {'noop': {'id': uuidutils.generate_uuid(), 'type': 'noop'}}
- def heal_vdu(self, plugin, context, vnf_dict, heal_request_data):
- pass
-
def pre_instantiation_vnf(self, context, vnf_instance,
vim_connection_info, image_data):
pass
diff --git a/tacker/vnfm/infra_drivers/openstack/openstack.py b/tacker/vnfm/infra_drivers/openstack/openstack.py
index 89cfbe388..40f808623 100644
--- a/tacker/vnfm/infra_drivers/openstack/openstack.py
+++ b/tacker/vnfm/infra_drivers/openstack/openstack.py
@@ -34,7 +34,6 @@ from tacker._i18n import _
from tacker.common import exceptions
from tacker.common import log
from tacker.common import utils
-from tacker.db.common_services import common_services_db_plugin
from tacker.extensions import vnflcm
from tacker.extensions import vnfm
from tacker import manager
@@ -50,7 +49,6 @@ from tacker.vnfm.infra_drivers.openstack import glance_client as gc
from tacker.vnfm.infra_drivers.openstack import heat_client as hc
from tacker.vnfm.infra_drivers.openstack import translate_template
from tacker.vnfm.infra_drivers.openstack import update_template as ut
-from tacker.vnfm.infra_drivers.openstack import vdu
from tacker.vnfm.infra_drivers import scale_driver
from tacker.vnfm.lcm_user_data.constants import USER_DATA_TIMEOUT
from tacker.vnfm.lcm_user_data import utils as user_data_utils
@@ -120,7 +118,6 @@ class OpenStack(abstract_driver.VnfAbstractDriver,
self.IMAGE_RETRY_WAIT = 10
self.LOCK_RETRIES = 10
self.LOCK_RETRY_WAIT = 10
- self._cos_db_plg = common_services_db_plugin.CommonServicesPluginDb()
def get_type(self):
return 'openstack'
@@ -960,14 +957,6 @@ class OpenStack(abstract_driver.VnfAbstractDriver,
except Exception:
raise vnfm.InfraDriverUnreachable(service="Heat API service")
- def heal_vdu(self, plugin, context, vnf_dict, heal_request_data_obj):
- try:
- heal_vdu = vdu.Vdu(context, vnf_dict, heal_request_data_obj)
- heal_vdu.heal_vdu()
- except Exception:
- LOG.error("VNF '%s' failed to heal", vnf_dict['id'])
- raise vnfm.VNFHealFailed(vnf_id=vnf_dict['id'])
-
@log.log
def pre_instantiation_vnf(
self, context, vnf_instance, vim_connection_info,
diff --git a/tacker/vnfm/infra_drivers/openstack/translate_template.py b/tacker/vnfm/infra_drivers/openstack/translate_template.py
index 24baf9300..5ac553865 100644
--- a/tacker/vnfm/infra_drivers/openstack/translate_template.py
+++ b/tacker/vnfm/infra_drivers/openstack/translate_template.py
@@ -24,7 +24,6 @@ from tacker.common import exceptions
from tacker.common import log
from tacker.extensions import common_services as cs
from tacker.extensions import vnfm
-from tacker.plugins.common import constants
from tacker.tosca import utils as toscautils
@@ -64,11 +63,9 @@ class TOSCAToHOT(object):
self.vnfd_yaml = None
self.unsupported_props = {}
self.heat_template_yaml = None
- self.monitoring_dict = None
self.nested_resources = dict()
self.fields = None
self.STACK_FLAVOR_EXTRA = cfg.CONF.openstack_vim.flavor_extra_specs
- self.appmonitoring_dict = None
self.grant_info = grant_info
self.inst_req_info = inst_req_info
@@ -86,13 +83,6 @@ class TOSCAToHOT(object):
self.fields['template'] = self.heat_template_yaml
if not self.vnf['attributes'].get('heat_template'):
self.vnf['attributes']['heat_template'] = self.fields['template']
- if self.monitoring_dict:
- self.vnf['attributes'][
- 'monitoring_policy'] = jsonutils.dump_as_bytes(
- self.monitoring_dict)
- if self.appmonitoring_dict:
- self.vnf['attributes']['app_monitoring_policy'] = \
- jsonutils.dump_as_bytes(self.appmonitoring_dict)
@log.log
def _get_vnfd(self):
@@ -151,96 +141,6 @@ class TOSCAToHOT(object):
else:
self._update_params(value, paramvalues, True)
- @log.log
- def _process_parameterized_input(self, dev_attrs, vnfd_dict):
- param_vattrs_yaml = dev_attrs.pop('param_values', None)
- if param_vattrs_yaml:
- try:
- param_vattrs_dict = yaml.safe_load(param_vattrs_yaml)
- LOG.debug('param_vattrs_yaml', param_vattrs_dict)
- except Exception as e:
- LOG.error("Not Well Formed: %s", str(e))
- raise vnfm.ParamYAMLNotWellFormed(
- error_msg_details=str(e))
- else:
- self._update_params(vnfd_dict, param_vattrs_dict)
- else:
- raise cs.ParamYAMLInputMissing()
-
- @log.log
- def _process_vdu_network_interfaces(self, vdu_id, vdu_dict, properties,
- template_dict):
-
- networks_list = []
- properties['networks'] = networks_list
- for network_param in vdu_dict['network_interfaces'].values():
- port = None
- if 'addresses' in network_param:
- ip_list = network_param.pop('addresses', [])
- if not isinstance(ip_list, list):
- raise vnfm.IPAddrInvalidInput()
- mgmt_flag = network_param.pop('management', False)
- port, template_dict =\
- self._handle_port_creation(vdu_id, network_param,
- template_dict,
- ip_list, mgmt_flag)
- if network_param.pop('management', False):
- port, template_dict = self._handle_port_creation(vdu_id,
- network_param,
- template_dict,
- [], True)
- if port is not None:
- network_param = {
- 'port': {'get_resource': port}
- }
- networks_list.append(dict(network_param))
- return vdu_dict, template_dict
-
- @log.log
- def _make_port_dict(self):
- port_dict = {'type': 'OS::Neutron::Port'}
- if self.unsupported_props:
- port_dict['properties'] = {
- 'value_specs': {
- 'port_security_enabled': False
- }
- }
- else:
- port_dict['properties'] = {
- 'port_security_enabled': False
- }
- port_dict['properties'].setdefault('fixed_ips', [])
- return port_dict
-
- @log.log
- def _make_mgmt_outputs_dict(self, vdu_id, port, template_dict):
- mgmt_ip = 'mgmt_ip-%s' % vdu_id
- outputs_dict = template_dict['outputs']
- outputs_dict[mgmt_ip] = {
- 'description': 'management ip address',
- 'value': {
- 'get_attr': [port, 'fixed_ips', 0, 'ip_address']
- }
- }
- template_dict['outputs'] = outputs_dict
- return template_dict
-
- @log.log
- def _handle_port_creation(self, vdu_id, network_param,
- template_dict, ip_list=None,
- mgmt_flag=False):
- ip_list = ip_list or []
- port = '%s-%s-port' % (vdu_id, network_param['network'])
- port_dict = self._make_port_dict()
- if mgmt_flag:
- template_dict = self._make_mgmt_outputs_dict(vdu_id, port,
- template_dict)
- for ip in ip_list:
- port_dict['properties']['fixed_ips'].append({"ip_address": ip})
- port_dict['properties'].update(network_param)
- template_dict['resources'][port] = port_dict
- return port, template_dict
-
@log.log
def _get_unsupported_resource_props(self, heat_client):
unsupported_resource_props = {}
@@ -266,12 +166,6 @@ class TOSCAToHOT(object):
LOG.error("Params not Well Formed: %s", str(e))
raise vnfm.ParamYAMLNotWellFormed(error_msg_details=str(e))
- appmonitoring_dict = \
- toscautils.get_vdu_applicationmonitoring(vnfd_dict)
-
- block_storage_details = toscautils.get_block_storage_details(
- vnfd_dict)
- toscautils.updateimports(vnfd_dict)
if 'substitution_mappings' in str(vnfd_dict):
toscautils.check_for_substitution_mappings(
vnfd_dict,
@@ -289,21 +183,6 @@ class TOSCAToHOT(object):
raise vnfm.ToscaParserFailed(error_msg_details=str(e))
unique_id = uuidutils.generate_uuid()
- metadata = toscautils.get_vdu_metadata(tosca, unique_id=unique_id)
- for policy in tosca.policies:
- if policy.entity_tpl['type'] == constants.POLICY_RESERVATION:
- metadata = toscautils.get_metadata_for_reservation(
- tosca, metadata)
- break
-
- alarm_resources = toscautils.pre_process_alarm_resources(
- self.vnf, tosca, metadata, unique_id=unique_id)
- monitoring_dict = toscautils.get_vdu_monitoring(tosca)
- mgmt_ports = toscautils.get_mgmt_ports(tosca)
- res_tpl = toscautils.get_resources_dict(tosca,
- self.STACK_FLAVOR_EXTRA)
- toscautils.post_process_template(tosca)
- scaling_policy_names = toscautils.get_scaling_policy(tosca)
try:
translator = tosca_translator.TOSCATranslator(tosca, parsed_params)
@@ -329,27 +208,15 @@ class TOSCAToHOT(object):
if self.nested_resources:
nested_tpl = toscautils.update_nested_scaling_resources(
- self.nested_resources,
- mgmt_ports, metadata, res_tpl, self.unsupported_props,
+ self.nested_resources, self.unsupported_props,
grant_info=grant_info, inst_req_info=inst_req_info)
self.fields['files'] = nested_tpl
for nested_resource_name in nested_tpl.keys():
self.vnf['attributes'][nested_resource_name] =\
nested_tpl[nested_resource_name]
- mgmt_ports.clear()
-
- if scaling_policy_names:
- scaling_group_dict = toscautils.get_scaling_group_dict(
- heat_template_yaml, scaling_policy_names)
- self.vnf['attributes']['scaling_group_names'] =\
- jsonutils.dump_as_bytes(scaling_group_dict)
-
- if self.vnf['attributes'].get('maintenance', None):
- toscautils.add_maintenance_resources(tosca, res_tpl)
heat_template_yaml = toscautils.post_process_heat_template(
- heat_template_yaml, mgmt_ports, metadata, alarm_resources,
- res_tpl, block_storage_details, self.unsupported_props,
+ heat_template_yaml, self.unsupported_props,
unique_id=unique_id, inst_req_info=inst_req_info,
grant_info=grant_info, tosca=tosca)
@@ -358,8 +225,7 @@ class TOSCAToHOT(object):
self.nested_resources[nested_resource_name] = \
toscautils.post_process_heat_template_for_scaling(
self.nested_resources[nested_resource_name],
- mgmt_ports, metadata, alarm_resources,
- res_tpl, block_storage_details, self.unsupported_props,
+ self.unsupported_props,
unique_id=unique_id, inst_req_info=inst_req_info,
grant_info=grant_info, tosca=tosca)
except Exception as e:
@@ -368,9 +234,6 @@ class TOSCAToHOT(object):
raise
self.heat_template_yaml = heat_template_yaml
- self.monitoring_dict = monitoring_dict
- self.metadata = metadata
- self.appmonitoring_dict = appmonitoring_dict
@log.log
def represent_odict(self, dump, tag, mapping, flow_style=None):
diff --git a/tacker/vnfm/infra_drivers/openstack/vdu.py b/tacker/vnfm/infra_drivers/openstack/vdu.py
deleted file mode 100644
index ef7041b04..000000000
--- a/tacker/vnfm/infra_drivers/openstack/vdu.py
+++ /dev/null
@@ -1,93 +0,0 @@
-# Copyright 2018 NTT DATA
-# All Rights Reserved.
-#
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from oslo_config import cfg
-from oslo_log import log as logging
-
-from tacker.plugins.common import constants
-from tacker.vnfm.infra_drivers.openstack import heat_client as hc
-from tacker.vnfm import utils as vnfm_utils
-from tacker.vnfm import vim_client
-
-
-LOG = logging.getLogger(__name__)
-CONF = cfg.CONF
-
-
-class Vdu(object):
- def __init__(self, context, vnf_dict, heal_request_data_obj):
- super(Vdu, self).__init__()
- self.context = context
- self.vnf_dict = vnf_dict
- self.heal_request_data_obj = heal_request_data_obj
- self.stack_id = self.heal_request_data_obj.stack_id
- vim_id = self.vnf_dict['vim_id']
- vim_res = vim_client.VimClient().get_vim(context, vim_id)
- placement_attr = vnf_dict.get('placement_attr', {})
- auth_attr = vim_res['vim_auth']
- region_name = placement_attr.get('region_name', None)
- self.heat_client = hc.HeatClient(auth_attr=auth_attr,
- region_name=region_name)
-
- def _get_resource_status(self, stack_id, rsc_name):
- # Get the status of VDU resource from heat
- vdu_resource = self.heat_client.resource_get(stack_id=stack_id,
- rsc_name=rsc_name)
- return vdu_resource.resource_status
-
- def _resource_mark_unhealthy(self):
- """Mark the resource unhealthy using heat."""
-
- additional_params = self.heal_request_data_obj.\
- legacy_additional_params
- for additional_param in additional_params:
- resource_name = additional_param.parameter
- res_status = self._get_resource_status(self.stack_id,
- resource_name)
- if res_status != 'CHECK_FAILED':
- self.heat_client.resource_mark_unhealthy(
- stack_id=self.stack_id,
- resource_name=resource_name, mark_unhealthy=True,
- resource_status_reason=additional_param.cause)
- LOG.debug("Heat stack '%s' resource '%s' marked as "
- "unhealthy", self.stack_id,
- resource_name)
- evt_details = (("HealVnfRequest invoked to mark resource "
- "'%s' to unhealthy.") % resource_name)
- vnfm_utils.log_events(self.context, self.vnf_dict,
- constants.RES_EVT_HEAL,
- evt_details)
- else:
- LOG.debug("Heat stack '%s' resource '%s' already mark "
- "unhealthy.", self.stack_id,
- resource_name)
-
- def heal_vdu(self):
- """Update stack using heat.
-
- This will re-create the resource which are mark as unhealthy.
- """
-
- # Mark all the resources as unhealthy
- self._resource_mark_unhealthy()
- self.heat_client.update(stack_id=self.stack_id,
- existing=True)
- LOG.debug("Heat stack '%s' update initiated to revive "
- "unhealthy resources.", self.stack_id)
- evt_details = (("HealVnfRequest invoked to update the stack "
- "'%s'") % self.stack_id)
- vnfm_utils.log_events(self.context, self.vnf_dict,
- constants.RES_EVT_HEAL, evt_details)
diff --git a/tacker/vnfm/mgmt_drivers/abstract_driver.py b/tacker/vnfm/mgmt_drivers/abstract_driver.py
deleted file mode 100644
index f4dd7b108..000000000
--- a/tacker/vnfm/mgmt_drivers/abstract_driver.py
+++ /dev/null
@@ -1,78 +0,0 @@
-# Copyright 2013, 2014 Intel Corporation.
-# All Rights Reserved.
-#
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import abc
-
-from tacker.api import extensions
-
-
-class VnfMGMTAbstractDriver(extensions.PluginInterface, metaclass=abc.ABCMeta):
-
- @abc.abstractmethod
- def get_type(self):
- """Return one of predefined type of the hosting vnf drivers."""
- pass
-
- @abc.abstractmethod
- def get_name(self):
- """Return a symbolic name for the service VM plugin."""
- pass
-
- @abc.abstractmethod
- def get_description(self):
- pass
-
- def mgmt_create_pre(self, plugin, context, vnf):
- pass
-
- def mgmt_create_post(self, plugin, context, vnf):
- pass
-
- def mgmt_update_pre(self, plugin, context, vnf):
- pass
-
- def mgmt_update_post(self, plugin, context, vnf):
- pass
-
- def mgmt_delete_pre(self, plugin, context, vnf):
- pass
-
- def mgmt_delete_post(self, plugin, context, vnf):
- pass
-
- def mgmt_get_config(self, plugin, context, vnf):
- """Get a dict of objects.
-
- Returns dict of file-like objects which will be passed to hosting
- vnf.
- It depends on drivers how to use it.
- for nova case, it can be used for meta data, file injection or
- config drive
- i.e.
- metadata case: nova --meta =
- file injection case: nova --file :
- config drive case: nova --config-drive=true --file \
- :
- """
- return {}
-
- @abc.abstractmethod
- def mgmt_ip_address(self, plugin, context, vnf):
- pass
-
- @abc.abstractmethod
- def mgmt_call(self, plugin, context, vnf, kwargs):
- pass
diff --git a/tacker/vnfm/mgmt_drivers/noop.py b/tacker/vnfm/mgmt_drivers/noop.py
deleted file mode 100644
index fc7b09721..000000000
--- a/tacker/vnfm/mgmt_drivers/noop.py
+++ /dev/null
@@ -1,41 +0,0 @@
-# Copyright 2013, 2014 Intel Corporation.
-# All Rights Reserved.
-#
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from oslo_log import log as logging
-
-from tacker.vnfm.mgmt_drivers import abstract_driver
-
-
-LOG = logging.getLogger(__name__)
-
-
-class VnfMgmtNoop(abstract_driver.VnfMGMTAbstractDriver):
- def get_type(self):
- return 'noop'
-
- def get_name(self):
- return 'noop'
-
- def get_description(self):
- return 'Tacker VNFMgmt Noop Driver'
-
- def mgmt_ip_address(self, plugin, context, vnf):
- LOG.debug('mgmt_ip_address %s', vnf)
- return 'noop-mgmt-url'
-
- def mgmt_call(self, plugin, context, vnf, kwargs):
- LOG.debug('mgmt_call %(vnf)s %(kwargs)s',
- {'vnf': vnf, 'kwargs': kwargs})
diff --git a/tacker/vnfm/mgmt_drivers/openwrt/__init__.py b/tacker/vnfm/mgmt_drivers/openwrt/__init__.py
deleted file mode 100644
index e69de29bb..000000000
diff --git a/tacker/vnfm/mgmt_drivers/openwrt/openwrt.py b/tacker/vnfm/mgmt_drivers/openwrt/openwrt.py
deleted file mode 100644
index 1aeafcb30..000000000
--- a/tacker/vnfm/mgmt_drivers/openwrt/openwrt.py
+++ /dev/null
@@ -1,113 +0,0 @@
-# Copyright 2015 Intel Corporation.
-# All Rights Reserved.
-#
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from oslo_config import cfg
-from oslo_log import log as logging
-from oslo_serialization import jsonutils
-import yaml
-
-from tacker._i18n import _
-from tacker.common import cmd_executer
-from tacker.common import exceptions
-from tacker.common import log
-from tacker.vnfm.mgmt_drivers import abstract_driver
-from tacker.vnfm.mgmt_drivers import constants as mgmt_constants
-
-
-LOG = logging.getLogger(__name__)
-OPTS = [
- cfg.StrOpt('user', default='root', help=_('User name to login openwrt')),
- cfg.StrOpt('password', default='', help=_('Password to login openwrt')),
-]
-cfg.CONF.register_opts(OPTS, 'openwrt')
-
-
-def config_opts():
- return [('openwrt', OPTS)]
-
-
-class VnfMgmtOpenWRT(abstract_driver.VnfMGMTAbstractDriver):
- def get_type(self):
- return 'openwrt'
-
- def get_name(self):
- return 'openwrt'
-
- def get_description(self):
- return 'Tacker VNFMgmt OpenWRT Driver'
-
- def mgmt_ip_address(self, plugin, context, vnf):
- LOG.debug('mgmt_ip_address %s', vnf)
- return vnf.get('mgmt_ip_address', '')
-
- @log.log
- def _config_service(self, mgmt_ip_address, service, config):
- user = cfg.CONF.openwrt.user
- password = cfg.CONF.openwrt.password
- package = service
- if service == "dhcp":
- package = "dnsmasq"
- try:
- cmd = "uci import %s; /etc/init.d/%s restart" % (service, package)
- LOG.debug('execute command: %(cmd)s on mgmt_ip_address '
- '%(mgmt_ip)s',
- {'cmd': cmd,
- 'mgmt_ip': mgmt_ip_address})
- commander = cmd_executer.RemoteCommandExecutor(
- user, password, mgmt_ip_address)
- commander.execute_command(cmd, input_data=config)
- except Exception as ex:
- LOG.error("While executing command on remote "
- "%(mgmt_ip)s: %(exception)s",
- {'mgmt_ip': mgmt_ip_address,
- 'exception': ex})
- raise exceptions.MgmtDriverException()
-
- @log.log
- def mgmt_call(self, plugin, context, vnf, kwargs):
- if (kwargs[mgmt_constants.KEY_ACTION] !=
- mgmt_constants.ACTION_UPDATE_VNF):
- return
- dev_attrs = vnf.get('attributes', {})
-
- mgmt_ip_address = jsonutils.loads(vnf.get('mgmt_ip_address', '{}'))
- if not mgmt_ip_address:
- return
-
- vdus_config = dev_attrs.get('config', '')
- config_yaml = yaml.safe_load(vdus_config)
- if not config_yaml:
- return
- vdus_config_dict = config_yaml.get('vdus', {})
- for vdu, vdu_dict in vdus_config_dict.items():
- config = vdu_dict.get('config', {})
- for key, conf_value in config.items():
- KNOWN_SERVICES = ('firewall', 'network', 'dhcp', 'qos')
- if key not in KNOWN_SERVICES:
- continue
- mgmt_ip_address = mgmt_ip_address.get(vdu, '')
- if not mgmt_ip_address:
- LOG.warning('tried to configure unknown mgmt '
- 'address on VNF %(vnf)s VDU %(vdu)s',
- {'vnf': vnf.get('name'),
- 'vdu': vdu})
- continue
-
- if isinstance(mgmt_ip_address, list):
- for ip_address in mgmt_ip_address:
- self._config_service(ip_address, key, conf_value)
- else:
- self._config_service(mgmt_ip_address, key, conf_value)
diff --git a/tacker/vnfm/monitor.py b/tacker/vnfm/monitor.py
deleted file mode 100644
index 49035fcc3..000000000
--- a/tacker/vnfm/monitor.py
+++ /dev/null
@@ -1,469 +0,0 @@
-# Copyright 2015 Intel Corporation.
-# All Rights Reserved.
-#
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import ast
-import copy
-import inspect
-import random
-import string
-import threading
-import time
-
-from oslo_config import cfg
-from oslo_log import log as logging
-from oslo_serialization import jsonutils
-from oslo_utils import timeutils
-
-from tacker._i18n import _
-from tacker.common import driver_manager
-from tacker.common import exceptions
-from tacker import context as t_context
-from tacker.plugins.common import constants
-from tacker.vnfm import utils as vnfm_utils
-
-LOG = logging.getLogger(__name__)
-CONF = cfg.CONF
-OPTS = [
- cfg.IntOpt('check_intvl',
- default=10,
- help=_("check interval for monitor")),
-]
-CONF.register_opts(OPTS, group='monitor')
-
-
-def config_opts():
- return [('monitor', OPTS),
- ('tacker', VNFMonitor.OPTS),
- ('tacker', VNFAlarmMonitor.OPTS),
- ('tacker', VNFAppMonitor.OPTS)]
-
-
-class VNFMonitor(object):
- """VNF Monitor."""
-
- _instance = None
- _hosting_vnfs = dict() # vnf_id => dict of parameters
- _status_check_intvl = 0
- _lock = threading.RLock()
-
- OPTS = [
- cfg.ListOpt(
- 'monitor_driver', default=['ping', 'http_ping'],
- help=_('Monitor driver to communicate with '
- 'Hosting VNF/logical service '
- 'instance tacker plugin will use')),
- ]
- cfg.CONF.register_opts(OPTS, 'tacker')
-
- def __new__(cls, boot_wait, check_intvl=None):
- if not cls._instance:
- cls._instance = super(VNFMonitor, cls).__new__(cls)
- return cls._instance
-
- def __init__(self, boot_wait, check_intvl=None):
- self._monitor_manager = driver_manager.DriverManager(
- 'tacker.tacker.monitor.drivers',
- cfg.CONF.tacker.monitor_driver)
-
- self.boot_wait = boot_wait
- if check_intvl is None:
- check_intvl = cfg.CONF.monitor.check_intvl
- self._status_check_intvl = check_intvl
- LOG.debug('Spawning VNF monitor thread')
- threading.Thread(target=self.__run__).start()
-
- def __run__(self):
- while(1):
- time.sleep(self._status_check_intvl)
-
- with self._lock:
- for hosting_vnf in VNFMonitor._hosting_vnfs.values():
- if hosting_vnf.get('dead', False) or (
- hosting_vnf['vnf']['status'] ==
- constants.PENDING_HEAL):
- LOG.debug(
- 'monitor skips for DEAD/PENDING_HEAL vnf %s',
- hosting_vnf)
- continue
- try:
- self.run_monitor(hosting_vnf)
- except Exception as ex:
- LOG.exception("Unknown exception: Monitoring failed "
- "for VNF '%s' due to '%s' ",
- hosting_vnf['id'], ex)
-
- @staticmethod
- def to_hosting_vnf(vnf_dict, action_cb):
- return {
- 'id': vnf_dict['id'],
- 'mgmt_ip_addresses': jsonutils.loads(
- vnf_dict['mgmt_ip_address']),
- 'action_cb': action_cb,
- 'vnf': vnf_dict,
- 'monitoring_policy': jsonutils.loads(
- vnf_dict['attributes']['monitoring_policy'])
- }
-
- def add_hosting_vnf(self, new_vnf):
- LOG.debug('Adding host %(id)s, Mgmt IP %(ips)s',
- {'id': new_vnf['id'],
- 'ips': new_vnf['mgmt_ip_addresses']})
- new_vnf['boot_at'] = timeutils.utcnow()
- with self._lock:
- VNFMonitor._hosting_vnfs[new_vnf['id']] = new_vnf
-
- attrib_dict = new_vnf['vnf']['attributes']
- mon_policy_dict = attrib_dict['monitoring_policy']
- evt_details = (("VNF added for monitoring. "
- "mon_policy_dict = %s,") % (mon_policy_dict))
- vnfm_utils.log_events(t_context.get_admin_context(),
- new_vnf['vnf'],
- constants.RES_EVT_MONITOR, evt_details)
-
- def delete_hosting_vnf(self, vnf_id):
- LOG.debug('deleting vnf_id %(vnf_id)s', {'vnf_id': vnf_id})
- with self._lock:
- hosting_vnf = VNFMonitor._hosting_vnfs.pop(vnf_id, None)
- if hosting_vnf:
- LOG.debug('deleting vnf_id %(vnf_id)s, Mgmt IP %(ips)s',
- {'vnf_id': vnf_id,
- 'ips': hosting_vnf['mgmt_ip_addresses']})
-
- def update_hosting_vnf(self, updated_vnf_dict, evt_details=None):
- with self._lock:
- vnf_to_update = VNFMonitor._hosting_vnfs.get(
- updated_vnf_dict.get('id'))
- if vnf_to_update:
- updated_vnf = copy.deepcopy(updated_vnf_dict)
- vnf_to_update['vnf'] = updated_vnf
- vnf_to_update['mgmt_ip_addresses'] = jsonutils.loads(
- updated_vnf_dict['mgmt_ip_address'])
-
- if evt_details is not None:
- vnfm_utils.log_events(t_context.get_admin_context(),
- vnf_to_update['vnf'],
- constants.RES_EVT_HEAL,
- evt_details=evt_details)
-
- def run_monitor(self, hosting_vnf):
- mgmt_ips = hosting_vnf['mgmt_ip_addresses']
- vdupolicies = hosting_vnf['monitoring_policy']['vdus']
-
- vnf_delay = hosting_vnf['monitoring_policy'].get(
- 'monitoring_delay', self.boot_wait)
-
- for vdu in vdupolicies:
- if hosting_vnf.get('dead') or (
- hosting_vnf['vnf']['status']) == constants.PENDING_HEAL:
- return
-
- policy = vdupolicies[vdu]
- for driver in policy:
- params = policy[driver].get('monitoring_params', {})
-
- vdu_delay = params.get('monitoring_delay', vnf_delay)
-
- if not timeutils.is_older_than(hosting_vnf['boot_at'],
- vdu_delay):
- continue
-
- actions = policy[driver].get('actions', {})
- params['mgmt_ip'] = mgmt_ips[vdu]
-
- driver_return = self.monitor_call(driver,
- hosting_vnf['vnf'],
- params)
-
- LOG.debug('driver_return %s', driver_return)
-
- if driver_return in actions:
- action = actions[driver_return]
- hosting_vnf['action_cb'](action, vdu_name=vdu)
-
- def mark_dead(self, vnf_id):
- VNFMonitor._hosting_vnfs[vnf_id]['dead'] = True
-
- def _invoke(self, driver, **kwargs):
- method = inspect.stack()[1][3]
- return self._monitor_manager.invoke(
- driver, method, **kwargs)
-
- def monitor_get_config(self, vnf_dict):
- return self._invoke(
- vnf_dict, monitor=self, vnf=vnf_dict)
-
- def monitor_url(self, vnf_dict):
- return self._invoke(
- vnf_dict, monitor=self, vnf=vnf_dict)
-
- def monitor_call(self, driver, vnf_dict, kwargs):
- return self._invoke(driver,
- vnf=vnf_dict, kwargs=kwargs)
-
-
-class VNFAppMonitor(object):
- """VNF App monitor"""
- OPTS = [
- cfg.ListOpt(
- 'app_monitor_driver', default=['zabbix'],
- help=_('App monitoring driver to communicate with '
- 'Hosting VNF/logical service '
- 'instance tacker plugin will use')),
- ]
- cfg.CONF.register_opts(OPTS, 'tacker')
-
- def __init__(self):
- self._application_monitor_manager = driver_manager.DriverManager(
- 'tacker.tacker.app_monitor.drivers',
- cfg.CONF.tacker.app_monitor_driver)
-
- def _create_app_monitoring_dict(self, dev_attrs, mgmt_ip_address):
- app_policy = 'app_monitoring_policy'
- appmonitoring_dict = ast.literal_eval(
- dev_attrs[app_policy].decode('utf-8'))
- vdulist = appmonitoring_dict['vdus'].keys()
-
- for vduname in vdulist:
- temp = ast.literal_eval(mgmt_ip_address)
- appmonitoring_dict['vdus'][vduname]['mgmt_ip'] = temp[vduname]
- return appmonitoring_dict
-
- def create_app_dict(self, context, vnf_dict):
- dev_attrs = vnf_dict['attributes']
- mgmt_ip_address = vnf_dict['mgmt_ip_address'].decode("utf-8")
- return self._create_app_monitoring_dict(dev_attrs, mgmt_ip_address)
-
- def _invoke(self, driver, **kwargs):
- method = inspect.stack()[1][3]
- return self._application_monitor_manager.\
- invoke(driver, method, **kwargs)
-
- def add_to_appmonitor(self, applicationvnfdict, vnf_dict):
- vdunode = applicationvnfdict['vdus'].keys()
- driver = applicationvnfdict['vdus'][vdunode[0]]['name']
- kwargs = applicationvnfdict
- return self._invoke(driver, vnf=vnf_dict, kwargs=kwargs)
-
-
-class VNFAlarmMonitor(object):
- """VNF Alarm monitor"""
- OPTS = [
- cfg.ListOpt(
- 'alarm_monitor_driver', default=['ceilometer'],
- help=_('Alarm monitoring driver to communicate with '
- 'Hosting VNF/logical service '
- 'instance tacker plugin will use')),
- ]
- cfg.CONF.register_opts(OPTS, 'tacker')
-
- # get alarm here
- def __init__(self):
- self._alarm_monitor_manager = driver_manager.DriverManager(
- 'tacker.tacker.alarm_monitor.drivers',
- cfg.CONF.tacker.alarm_monitor_driver)
-
- def update_vnf_with_alarm(self, plugin, context, vnf, policy_dict):
- triggers = policy_dict['triggers']
- alarm_url = dict()
- for trigger_name, trigger_dict in triggers.items():
- params = dict()
- params['vnf_id'] = vnf['id']
- params['mon_policy_name'] = trigger_name
- driver = trigger_dict['event_type']['implementation']
- # TODO(Tung Doan) trigger_dict.get('actions') needs to be used
- policy_action = trigger_dict.get('action')
- if len(policy_action) == 0:
- vnfm_utils.log_events(t_context.get_admin_context(), vnf,
- constants.RES_EVT_MONITOR,
- "Alarm not set: policy action missing")
- return
- # Other backend policies with the construct (policy, action)
- # ex: (SP1, in), (SP1, out)
-
- def _refactor_backend_policy(bk_policy_name, bk_action_name):
- policy = '%(policy_name)s-%(action_name)s' % {
- 'policy_name': bk_policy_name,
- 'action_name': bk_action_name}
- return policy
-
- for index, policy_action_name in enumerate(policy_action):
- filters = {'name': policy_action_name}
- bkend_policies =\
- plugin.get_vnf_policies(context, vnf['id'], filters)
- if bkend_policies:
- bkend_policy = bkend_policies[0]
- if bkend_policy['type'] == constants.POLICY_SCALING:
- cp = trigger_dict['condition'].\
- get('comparison_operator')
- scaling_type = 'out' if cp == 'gt' else 'in'
- policy_action[index] = _refactor_backend_policy(
- policy_action_name, scaling_type)
-
- # Support multiple action. Ex: respawn % notify
- action_name = '%'.join(policy_action)
-
- params['mon_policy_action'] = action_name
- alarm_url[trigger_name] =\
- self.call_alarm_url(driver, vnf, params)
- details = "Alarm URL set successfully: %s" % alarm_url
- vnfm_utils.log_events(t_context.get_admin_context(), vnf,
- constants.RES_EVT_MONITOR, details)
- return alarm_url
-
- def process_alarm_for_vnf(self, vnf, trigger):
- """call in plugin"""
- params = trigger['params']
- mon_prop = trigger['trigger']
- alarm_dict = dict()
- alarm_dict['alarm_id'] = params['data'].get('alarm_id')
- alarm_dict['status'] = params['data'].get('current')
- trigger_name, trigger_dict = list(mon_prop.items())[0]
- driver = trigger_dict['event_type']['implementation']
- return self.process_alarm(driver, vnf, alarm_dict)
-
- def _invoke(self, driver, **kwargs):
- method = inspect.stack()[1][3]
- return self._alarm_monitor_manager.invoke(
- driver, method, **kwargs)
-
- def call_alarm_url(self, driver, vnf_dict, kwargs):
- return self._invoke(driver,
- vnf=vnf_dict, kwargs=kwargs)
-
- def process_alarm(self, driver, vnf_dict, kwargs):
- return self._invoke(driver,
- vnf=vnf_dict, kwargs=kwargs)
-
-
-class VNFReservationAlarmMonitor(VNFAlarmMonitor):
- """VNF Reservation Alarm monitor"""
-
- def update_vnf_with_reservation(self, plugin, context, vnf, policy_dict):
-
- alarm_url = dict()
-
- def create_alarm_action(action, action_list, scaling_type):
- params = dict()
- params['vnf_id'] = vnf['id']
- params['mon_policy_name'] = action
- driver = 'ceilometer'
-
- def _refactor_backend_policy(bk_policy_name, bk_action_name):
- policy = '%(policy_name)s%(action_name)s' % {
- 'policy_name': bk_policy_name,
- 'action_name': bk_action_name}
- return policy
-
- for index, policy_action_name in enumerate(action_list):
- filters = {'name': policy_action_name}
- bkend_policies = \
- plugin.get_vnf_policies(context, vnf['id'], filters)
- if bkend_policies:
- if constants.POLICY_SCALING in str(bkend_policies[0]):
- action_list[index] = _refactor_backend_policy(
- policy_action_name, scaling_type)
-
- # Support multiple action. Ex: respawn % notify
- action_name = '%'.join(action_list)
- params['mon_policy_action'] = action_name
- alarm_url[action] = \
- self.call_alarm_url(driver, vnf, params)
- details = "Alarm URL set successfully: %s" % alarm_url
- vnfm_utils.log_events(t_context.get_admin_context(), vnf,
- constants.RES_EVT_MONITOR,
- details)
-
- before_end_action = policy_dict['reservation']['before_end_actions']
- end_action = policy_dict['reservation']['end_actions']
- start_action = policy_dict['reservation']['start_actions']
-
- scaling_policies = \
- plugin.get_vnf_policies(
- context, vnf['id'], filters={
- 'type': constants.POLICY_SCALING})
-
- if len(scaling_policies) == 0:
- raise exceptions.VnfPolicyNotFound(
- policy=constants.POLICY_SCALING, vnf_id=vnf['id'])
-
- for scaling_policy in scaling_policies:
- # validating start_action for scale-out policy action
- if scaling_policy['name'] not in start_action:
- raise exceptions.Invalid(
- 'Not a valid template: start_action must contain'
- ' %s as scaling-out action' % scaling_policy['name'])
-
- # validating before_end and end_actions for scale-in policy action
- if scaling_policy['name'] not in before_end_action:
- if scaling_policy['name'] not in end_action:
- raise exceptions.Invalid(
- 'Not a valid template:'
- ' before_end_action or end_action'
- ' should contain scaling policy: %s'
- % scaling_policy['name'])
-
- for action in constants.RESERVATION_POLICY_ACTIONS:
- scaling_type = "-out" if action == 'start_actions' else "-in"
- create_alarm_action(action, policy_dict[
- 'reservation'][action], scaling_type)
-
- return alarm_url
-
- def process_alarm_for_vnf(self, vnf, trigger):
- """call in plugin"""
- params = trigger['params']
- alarm_dict = dict()
- alarm_dict['alarm_id'] = params['data'].get('alarm_id')
- alarm_dict['status'] = params['data'].get('current')
- driver = 'ceilometer'
- return self.process_alarm(driver, vnf, alarm_dict)
-
-
-class VNFMaintenanceAlarmMonitor(VNFAlarmMonitor):
- """VNF Maintenance Alarm monitor"""
-
- def update_vnf_with_maintenance(self, vnf, vdu_names):
- maintenance = dict()
- vdus = dict()
- params = dict()
- params['vnf_id'] = vnf['id']
- params['mon_policy_name'] = 'maintenance'
- params['mon_policy_action'] = vnf['tenant_id']
- driver = 'ceilometer'
-
- url = self.call_alarm_url(driver, vnf, params)
- maintenance['url'] = url[:url.rindex('/')]
- vdu_names.append('ALL')
- for vdu in vdu_names:
- access_key = ''.join(
- random.SystemRandom().choice(
- string.ascii_lowercase + string.digits)
- for _ in range(8))
- vdus[vdu] = access_key
- maintenance.update({'vdus': vdus})
- details = "Alarm URL set successfully: %s" % maintenance['url']
- vnfm_utils.log_events(t_context.get_admin_context(), vnf,
- constants.RES_EVT_MONITOR, details)
- return maintenance
-
- def process_alarm_for_vnf(self, vnf, trigger):
- """call in plugin"""
- params = trigger['params']
- alarm_dict = dict()
- alarm_dict['alarm_id'] = params['data'].get('alarm_id')
- alarm_dict['status'] = params['data'].get('current')
- driver = 'ceilometer'
- return self.process_alarm(driver, vnf, alarm_dict)
diff --git a/tacker/vnfm/monitor_drivers/__init__.py b/tacker/vnfm/monitor_drivers/__init__.py
deleted file mode 100644
index e69de29bb..000000000
diff --git a/tacker/vnfm/monitor_drivers/abstract_driver.py b/tacker/vnfm/monitor_drivers/abstract_driver.py
deleted file mode 100644
index 6195b9e11..000000000
--- a/tacker/vnfm/monitor_drivers/abstract_driver.py
+++ /dev/null
@@ -1,80 +0,0 @@
-# All Rights Reserved.
-#
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import abc
-
-from tacker.api import extensions
-
-
-class VNFMonitorAbstractDriver(extensions.PluginInterface,
- metaclass=abc.ABCMeta):
-
- @abc.abstractmethod
- def get_type(self):
- """Return one of predefined type of the hosting vnf drivers."""
- pass
-
- @abc.abstractmethod
- def get_name(self):
- """Return a symbolic name for the VNF Monitor plugin."""
- pass
-
- @abc.abstractmethod
- def get_description(self):
- """Return description of VNF Monitor plugin."""
- pass
-
- def monitor_get_config(self, plugin, context, vnf):
- """Return dict of monitor configuration data.
-
- :param plugin:
- :param context:
- :param vnf:
- :returns: dict
- :returns: dict of monitor configuration data
- """
- return {}
-
- @abc.abstractmethod
- def monitor_url(self, plugin, context, vnf):
- """Return the url of vnf to monitor.
-
- :param plugin:
- :param context:
- :param vnf:
- :returns: string
- :returns: url of vnf to monitor
- """
- pass
-
- @abc.abstractmethod
- def monitor_call(self, vnf, kwargs):
- """Monitor.
-
- Return boolean value True if VNF is healthy
- or return an event string like 'failure' or 'calls-capacity-reached'
- for specific VNF health condition.
-
- :param vnf:
- :param kwargs:
- :returns: boolean
- :returns: True if VNF is healthy
- """
- pass
-
- def monitor_service_driver(self, plugin, context, vnf,
- service_instance):
- # use same monitor driver to communicate with service
- return self.get_name()
diff --git a/tacker/vnfm/monitor_drivers/ceilometer/__init__.py b/tacker/vnfm/monitor_drivers/ceilometer/__init__.py
deleted file mode 100644
index e69de29bb..000000000
diff --git a/tacker/vnfm/monitor_drivers/ceilometer/ceilometer.py b/tacker/vnfm/monitor_drivers/ceilometer/ceilometer.py
deleted file mode 100644
index 2e91eef0a..000000000
--- a/tacker/vnfm/monitor_drivers/ceilometer/ceilometer.py
+++ /dev/null
@@ -1,99 +0,0 @@
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-
-import netaddr
-
-from oslo_config import cfg
-from oslo_log import log as logging
-import random
-import string
-from tacker._i18n import _
-from tacker.common import utils
-from tacker.vnfm.monitor_drivers import abstract_driver
-
-
-LOG = logging.getLogger(__name__)
-
-OPTS = [
- cfg.HostAddressOpt('host', default=utils.get_hostname(),
- help=_('Address which drivers use to trigger')),
- cfg.PortOpt('port', default=9890,
- help=_('port number which drivers use to trigger'))
-]
-cfg.CONF.register_opts(OPTS, group='ceilometer')
-
-
-def config_opts():
- return [('ceilometer', OPTS)]
-
-
-ALARM_INFO = (
- ALARM_ACTIONS, OK_ACTIONS, REPEAT_ACTIONS, ALARM,
- INSUFFICIENT_DATA_ACTIONS, DESCRIPTION, ENABLED, TIME_CONSTRAINTS,
- SEVERITY,
-) = ('alarm_actions', 'ok_actions', 'repeat_actions', 'alarm',
- 'insufficient_data_actions', 'description', 'enabled', 'time_constraints',
- 'severity',
- )
-
-
-class VNFMonitorCeilometer(
- abstract_driver.VNFMonitorAbstractDriver):
- def get_type(self):
- return 'ceilometer'
-
- def get_name(self):
- return 'ceilometer'
-
- def get_description(self):
- return 'Tacker VNFMonitor Ceilometer Driver'
-
- def _create_alarm_url(self, vnf_id, mon_policy_name, mon_policy_action):
- # alarm_url = 'http://host:port/v1.0/vnfs/vnf-uuid/monitoring-policy
- # -name/action-name?key=8785'
- host = cfg.CONF.ceilometer.host
- port = cfg.CONF.ceilometer.port
- LOG.info("Tacker in heat listening on %(host)s:%(port)s",
- {'host': host,
- 'port': port})
- origin = "http://%(host)s:%(port)s/v1.0/vnfs" % {
- 'host': host, 'port': port}
- if netaddr.valid_ipv6(host):
- origin = "http://[%(host)s]:%(port)s/v1.0/vnfs" % {
- 'host': host, 'port': port}
- access_key = ''.join(
- random.SystemRandom().choice(
- string.ascii_lowercase + string.digits)
- for _ in range(8))
- alarm_url = "".join([origin, '/', vnf_id, '/', mon_policy_name, '/',
- mon_policy_action, '/', access_key])
- return alarm_url
-
- def call_alarm_url(self, vnf, kwargs):
- '''must be used after call heat-create in plugin'''
- return self._create_alarm_url(**kwargs)
-
- def _process_alarm(self, alarm_id, status):
- if alarm_id and status == ALARM:
- return True
-
- def process_alarm(self, vnf, kwargs):
- '''Check alarm state. if available, will be processed'''
- return self._process_alarm(**kwargs)
-
- def monitor_url(self, plugin, context, vnf):
- pass
-
- def monitor_call(self, vnf, kwargs):
- pass
diff --git a/tacker/vnfm/monitor_drivers/http_ping/__init__.py b/tacker/vnfm/monitor_drivers/http_ping/__init__.py
deleted file mode 100644
index e69de29bb..000000000
diff --git a/tacker/vnfm/monitor_drivers/http_ping/http_ping.py b/tacker/vnfm/monitor_drivers/http_ping/http_ping.py
deleted file mode 100644
index 612213d54..000000000
--- a/tacker/vnfm/monitor_drivers/http_ping/http_ping.py
+++ /dev/null
@@ -1,86 +0,0 @@
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-
-import netaddr
-from urllib import error as urlerr
-from urllib import request as urlreq
-
-from oslo_config import cfg
-from oslo_log import log as logging
-
-from tacker._i18n import _
-from tacker.common import log
-from tacker.vnfm.monitor_drivers import abstract_driver
-
-
-LOG = logging.getLogger(__name__)
-OPTS = [
- cfg.IntOpt('retry', default=5,
- help=_('Number of times to retry')),
- cfg.IntOpt('timeout', default=1,
- help=_('Number of seconds to wait for a response')),
- cfg.IntOpt('port', default=80,
- help=_('HTTP port number to send request'))
-]
-cfg.CONF.register_opts(OPTS, 'monitor_http_ping')
-
-
-def config_opts():
- return [('monitor_http_ping', OPTS)]
-
-
-class VNFMonitorHTTPPing(abstract_driver.VNFMonitorAbstractDriver):
- def get_type(self):
- return 'http_ping'
-
- def get_name(self):
- return 'HTTP ping'
-
- def get_description(self):
- return 'Tacker HTTP Ping Driver for VNF'
-
- def monitor_url(self, plugin, context, vnf):
- LOG.debug('monitor_url %s', vnf)
- return vnf.get('monitor_url', '')
-
- def _is_pingable(self, mgmt_ip='', retry=5, timeout=5, port=80, **kwargs):
- """Checks whether the server is reachable by using urllib.
-
- Waits for connectivity for `timeout` seconds,
- and if connection refused, it will retry `retry`
- times.
- :param mgmt_ip: IP to check
- :param retry: times to reconnect if connection refused
- :param timeout: seconds to wait for connection
- :param port: port number to check connectivity
- :return: bool - True or False depending on pingability.
- """
- url = 'http://' + mgmt_ip + ':' + str(port)
- if netaddr.valid_ipv6(mgmt_ip):
- url = 'http://[' + mgmt_ip + ']:' + str(port)
-
- for retry_index in range(int(retry)):
- try:
- urlreq.urlopen(url, timeout=timeout)
- return True
- except urlerr.URLError:
- LOG.error('Unable to reach to the url %s', url)
- return 'failure'
-
- @log.log
- def monitor_call(self, vnf, kwargs):
- if not kwargs['mgmt_ip']:
- return
-
- return self._is_pingable(**kwargs)
diff --git a/tacker/vnfm/monitor_drivers/ping/__init__.py b/tacker/vnfm/monitor_drivers/ping/__init__.py
deleted file mode 100644
index e69de29bb..000000000
diff --git a/tacker/vnfm/monitor_drivers/ping/ping.py b/tacker/vnfm/monitor_drivers/ping/ping.py
deleted file mode 100644
index 18a3408f3..000000000
--- a/tacker/vnfm/monitor_drivers/ping/ping.py
+++ /dev/null
@@ -1,100 +0,0 @@
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-
-import netaddr
-
-from oslo_config import cfg
-from oslo_log import log as logging
-
-from tacker._i18n import _
-from tacker.agent.linux import utils as linux_utils
-from tacker.common import log
-from tacker.vnfm.monitor_drivers import abstract_driver
-
-
-LOG = logging.getLogger(__name__)
-OPTS = [
- cfg.IntOpt('count', default=5,
- help=_('Number of ICMP packets to send')),
- cfg.FloatOpt('timeout', default=5,
- help=_('Number of seconds to wait for a response')),
- cfg.FloatOpt('interval', default=1,
- help=_('Number of seconds to wait between packets')),
- cfg.IntOpt('retry', default=1,
- help=_('Number of ping retries'))
-]
-cfg.CONF.register_opts(OPTS, 'monitor_ping')
-
-
-def config_opts():
- return [('monitor_ping', OPTS)]
-
-
-class VNFMonitorPing(abstract_driver.VNFMonitorAbstractDriver):
- def get_type(self):
- return 'ping'
-
- def get_name(self):
- return 'ping'
-
- def get_description(self):
- return 'Tacker VNFMonitor Ping Driver'
-
- def monitor_url(self, plugin, context, vnf):
- LOG.debug('monitor_url %s', vnf)
- return vnf.get('monitor_url', '')
-
- def _is_pingable(self, mgmt_ip="", count=None, timeout=None,
- interval=None, retry=None, **kwargs):
- """Checks whether an IP address is reachable by pinging.
-
- Use linux utils to execute the ping (ICMP ECHO) command.
- Sends 5 packets with an interval of 1 seconds and timeout of 1
- seconds. Runtime error implies unreachability else IP is pingable.
- :param ip: IP to check
- :return: bool - True or string 'failure' depending on pingability.
- """
- cmd_ping = 'ping'
- if netaddr.valid_ipv6(mgmt_ip):
- cmd_ping = 'ping6'
-
- if not count:
- count = cfg.CONF.monitor_ping.count
- if not timeout:
- timeout = cfg.CONF.monitor_ping.timeout
- if not interval:
- interval = cfg.CONF.monitor_ping.interval
- if not retry:
- retry = cfg.CONF.monitor_ping.retry
-
- ping_cmd = [cmd_ping,
- '-c', count,
- '-W', timeout,
- '-i', interval,
- mgmt_ip]
-
- for retry_range in range(int(retry)):
- try:
- linux_utils.execute(ping_cmd, check_exit_code=True)
- return True
- except RuntimeError:
- LOG.error("Cannot ping ip address: %s", mgmt_ip)
- return 'failure'
-
- @log.log
- def monitor_call(self, vnf, kwargs):
- if not kwargs['mgmt_ip']:
- return
-
- return self._is_pingable(**kwargs)
diff --git a/tacker/vnfm/monitor_drivers/token.py b/tacker/vnfm/monitor_drivers/token.py
deleted file mode 100644
index 8fafdaec7..000000000
--- a/tacker/vnfm/monitor_drivers/token.py
+++ /dev/null
@@ -1,37 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-from keystoneauth1.identity import v3
-from keystoneauth1 import session
-
-
-class Token(object):
- def __init__(self, username, password, project_name,
- auth_url, user_domain_name, project_domain_name):
- self.username = username
- self.password = password
- self.auth_url = auth_url
- self.project_name = project_name
- self.user_domain_name = user_domain_name
- self.project_domain_name = project_domain_name
-
- def create_token(self):
- auth = v3.Password(auth_url=self.auth_url,
- username=self.username,
- password=self.password,
- project_name=self.project_name,
- user_domain_name=self.user_domain_name,
- project_domain_name=self.project_domain_name)
- sess = session.Session(auth=auth)
- token_id = sess.auth.get_token(sess)
- return token_id
diff --git a/tacker/vnfm/monitor_drivers/zabbix/__init__.py b/tacker/vnfm/monitor_drivers/zabbix/__init__.py
deleted file mode 100644
index e69de29bb..000000000
diff --git a/tacker/vnfm/monitor_drivers/zabbix/zabbix.py b/tacker/vnfm/monitor_drivers/zabbix/zabbix.py
deleted file mode 100644
index 9577e60b1..000000000
--- a/tacker/vnfm/monitor_drivers/zabbix/zabbix.py
+++ /dev/null
@@ -1,416 +0,0 @@
-# All Rights Reserved.
-#
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import netaddr
-import requests
-import time
-
-import copy
-from oslo_log import log as logging
-from oslo_serialization import jsonutils
-from tacker.vnfm.monitor_drivers import abstract_driver
-from tacker.vnfm.monitor_drivers.zabbix import zabbix_api as zapi
-
-
-LOG = logging.getLogger(__name__)
-
-
-class VNFMonitorZabbix(abstract_driver.VNFMonitorAbstractDriver):
- params = ['application', 'OS']
-
- def __init__(self):
- self.kwargs = None
- self.vnf = None
- self.vduname = []
- self.URL = None
- self.hostinfo = {}
- self.tenant_id = None
-
- def get_type(self):
- """Return one of predefined type of the hosting vnf drivers."""
- plugin_type = 'zabbix'
- return plugin_type
-
- def get_name(self):
- """Return a symbolic name for the VNF Monitor plugin."""
- plugin_name = 'zabbix'
- return plugin_name
-
- def get_description(self):
- """Return description of VNF Monitor plugin."""
- plugin_descript = 'Tacker VNFMonitor Zabbix Driver'
- return plugin_descript
-
- def monitor_get_config(self, plugin, context, vnf):
- """Return dict of monitor configuration data.
-
- :param plugin:
- :param context:
- :param vnf:
- :returns: dict
- :returns: dict of monitor configuration data
- """
- return {}
-
- def monitor_url(self, plugin, context, vnf):
- """Return the url of vnf to monitor.
-
- :param plugin:
- :param context:
- :param vnf:
- :returns: string
- :returns: url of vnf to monitor
- """
- pass
-
- def send_post(self, query):
- response = requests.post(self.URL, headers=zapi.HEADERS,
- data=jsonutils.dump_as_bytes(query))
- return dict(response.json())
-
- @staticmethod
- def check_error(response):
- try:
- if 'result' not in response:
- raise ValueError
- except ValueError:
- LOG.error('Cannot request error : %s', response['error']['data'])
-
- def create_graph(self, itemid, name, nodename):
- temp_graph_api = copy.deepcopy(zapi.dGRAPH_CREATE_API)
- gitems = [{'itemid': itemid, 'color': '00AA00'}]
- temp_graph_api['auth'] = \
- self.hostinfo[nodename]['zbx_info']['zabbix_token']
- temp_graph_api['params']['gitems'] = gitems
- temp_graph_api['params']['name'] = name
- response = self.send_post(temp_graph_api)
- VNFMonitorZabbix.check_error(response)
-
- def create_action(self):
- for vdu in self.vduname:
- temp_action_api = copy.deepcopy(zapi.dACTION_CREATE_API)
- temp_action_api['auth'] = \
- self.hostinfo[vdu]['zbx_info']['zabbix_token']
- tempname_api = temp_action_api['params']['operations'][0]
- temp_filter = temp_action_api['params']['filter']
- for info in (self.hostinfo[vdu]['actioninfo']):
- tempname_api['opcommand_hst'][0]['hostid'] = \
- self.hostinfo[vdu]['hostid']
- now = time.localtime()
- rtime = str(now.tm_hour) + str(now.tm_min) + str(now.tm_sec)
- temp_name = "Trigger Action " + \
- str(
- vdu + rtime + " " +
- info['item'] + " " + info['action']
- )
- temp_action_api['params']['name'] = temp_name
-
- if (info['action'] == 'cmd') and \
- (info['item'] != 'os_agent_info'):
-
- tempname_api['opcommand']['command'] = info['cmd-action']
-
- elif (info['item'] == 'os_agent_info') \
- and (info['action'] == 'cmd'):
-
- tempname_api['opcommand']['authtype'] = 0
- tempname_api['opcommand']['username'] = \
- self.hostinfo[vdu]['appinfo']['ssh_username']
- tempname_api['opcommand']['password'] = \
- self.hostinfo[vdu]['appinfo']['ssh_password']
- tempname_api['opcommand']['type'] = 2
- tempname_api['opcommand']['command'] = info['cmd-action']
- tempname_api['opcommand']['port'] = 22
-
- temp_filter['conditions'][0]['value'] = info['trigger_id']
- response = self.send_post(temp_action_api)
- VNFMonitorZabbix.check_error(response)
- continue
-
- temp_filter['conditions'][0]['value'] = info['trigger_id']
- response = self.send_post(temp_action_api)
- VNFMonitorZabbix.check_error(response)
-
- def create_vdu_host(self):
- for vdu in self.vduname:
- temp_host_api = zapi.dHOST_CREATE_API
- temp_group_api = zapi.dGROUP_GET_API
- temp_host_api['auth'] = \
- self.hostinfo[vdu]['zbx_info']['zabbix_token']
- temp_group_api['auth'] = \
- self.hostinfo[vdu]['zbx_info']['zabbix_token']
- response = self.send_post(temp_group_api)
- gid = response['result'][0]['groupid']
- temp_host_api['params']['host'] = str(vdu)
- if type(self.hostinfo[vdu]['mgmt_ip']) is list:
- for vduip in (self.hostinfo[vdu]['mgmt_ip']):
- temp_host_api['params']['interfaces'][0]['ip'] = vduip
- temp_host_api['params']['templates'][0]['templateid'] = \
- self.hostinfo[vdu]['template_id'][0]
- temp_host_api['params']['groups'][0]['groupid'] = gid
- response = self.send_post(temp_host_api)
- else:
- temp_host_api['params']['interfaces'][0]['ip'] = \
- self.hostinfo[vdu]['mgmt_ip']
- temp_host_api['params']['templates'][0]['templateid'] = \
- self.hostinfo[vdu]['template_id'][0]
- temp_host_api['params']['groups'][0]['groupid'] = gid
- response = self.send_post(temp_host_api)
- if 'error' in response:
- now = time.localtime()
- rtime = str(now.tm_hour) + str(now.tm_min) + str(now.tm_sec)
- temp_host_api['params']['host'] = str(vdu) + rtime
- response = self.send_post(temp_host_api)
- self.hostinfo[vdu]['hostid'] = response['result']['hostids'][0]
-
- def create_trigger(self, trigger_params, vduname):
- temp_trigger_api = copy.deepcopy(zapi.dTRIGGER_CREATE_API)
- temp_trigger_api['auth'] = \
- self.hostinfo[vduname]['zbx_info']['zabbix_token']
- temp_trigger_api['params'] = trigger_params
- temp_trigger_api['templateid'] = \
- str(
- self.hostinfo[vduname]['template_id'][0])
- response = self.send_post(temp_trigger_api)
- VNFMonitorZabbix.check_error(response)
- return response['result']
-
- def _create_trigger(self):
-
- trigger_params = []
- trig_act_pa = []
- for vdu in self.vduname:
- temp_trigger_list = copy.deepcopy(zapi.dTRIGGER_LIST)
-
- temp_vdu_name = self.hostinfo[vdu]['appinfo']['app_name']
- temp_vdu_port = self.hostinfo[vdu]['appinfo']['app_port']
- for para in VNFMonitorZabbix.params:
- for item in self.hostinfo[vdu]['parameters'][para]:
- action_list = copy.deepcopy(zapi.dACTION_LIST)
- temp_item = self.hostinfo[vdu]['parameters'][para][item]
-
- if ('app_name' != item)\
- and ('app_port' != item) \
- and ('ssh_username' != item) \
- and ('ssh_password' != item):
-
- if 'condition' \
- in temp_item:
- temp_con = temp_item['condition']
-
- if len(temp_con) == 2:
- temp_comparrision = temp_con[0]
- temp_comparrision_value = temp_con[1]
- temp_trigger_list[item][0]['expression'] += \
- self.hostinfo[vdu]['template_name'] + ':'\
- + str(
- zapi.dITEM_KEY_COMP[item].replace(
- '*', str(temp_vdu_name))) \
- + str(
- zapi.COMP_VALUE[temp_comparrision]) \
- + str(
- temp_comparrision_value)
-
- else:
- temp_comparrision = temp_con[0]
- if 'os_agent_info' == item:
- temp_trigger_list[item][0]['expression'] += \
- self.hostinfo[vdu]['template_name'] + ':' \
- + str(zapi.dITEM_KEY_COMP[item])
-
- else:
- temp_trigger_list[item][0]['expression'] += \
- self.hostinfo[vdu]['template_name'] + ':' \
- + str(
- zapi.dITEM_KEY_COMP[item].replace(
- '*', str(temp_vdu_port))) \
- + str(
- zapi.COMP_VALUE[temp_comparrision])
- if 'actionname' in temp_item:
- trig_act_pa.append(temp_trigger_list[item][0])
- response = self.create_trigger(trig_act_pa, vdu)
- del trig_act_pa[:]
- action_list['action'] = \
- temp_item['actionname']
- action_list['trigger_id'] = \
- response['triggerids'][0]
- action_list['item'] = item
- if 'cmd' == \
- temp_item['actionname']:
-
- action_list['cmd-action'] = \
- temp_item['cmd-action']
- self.hostinfo[vdu]['actioninfo'].append(
- action_list)
-
- else:
- trigger_params.append(
- temp_trigger_list[item][0])
-
- if len(trigger_params) != 0:
- self.create_trigger(trigger_params, vdu)
- del trigger_params[:]
-
- def create_item(self):
- # Create _ITEM
- for vdu in self.vduname:
- temp_item_api = copy.deepcopy(zapi.dITEM_CREATE_API)
- temp_item_api['auth'] = \
- self.hostinfo[vdu]['zbx_info']['zabbix_token']
- self.hostinfo[vdu]['appinfo'] = \
- copy.deepcopy(zapi.dAPP_INFO)
- temp_app = self.hostinfo[vdu]['parameters']['application']
- temp_item_api['params']['hostid'] = \
- self.hostinfo[vdu]['template_id'][0]
-
- for para in VNFMonitorZabbix.params:
- if 'application' == para:
- for app_info in temp_app:
- self.hostinfo[vdu]['appinfo'][app_info] = \
- temp_app[app_info]
-
- for item in self.hostinfo[vdu]['parameters'][para]:
- if ('app_name' != item) and ('app_port' != item) \
- and ('ssh_username' != item) \
- and ('ssh_password' != item):
-
- temp_item_api['params']['name'] = \
- zapi.dITEM_KEY_INFO[item]['name']
- temp_item_api['params']['value_type'] = \
- zapi.dITEM_KEY_INFO[item]['value_type']
-
- if item == 'app_status':
- temp = zapi.dITEM_KEY_INFO[item]['key_']
- temp_item_api['params']['key_'] = temp.replace(
- '*', str(
- self.hostinfo[vdu]['appinfo']['app_port']))
-
- elif item == 'app_memory':
- temp = zapi.dITEM_KEY_INFO[item]['key_']
- temp_item_api['params']['key_'] = temp.replace(
- '*',
- str(
- self.hostinfo[vdu]['appinfo']['app_name']))
-
- else:
- temp_item_api['params']['key_'] = \
- zapi.dITEM_KEY_INFO[item]['key_']
- response = self.send_post(temp_item_api)
- self.create_graph(
- response['result']['itemids'][0],
- temp_item_api['params']['name'], vdu)
- VNFMonitorZabbix.check_error(response)
-
- def create_template(self):
- temp_template_api = copy.deepcopy(zapi.dTEMPLATE_CREATE_API)
-
- for vdu in self.vduname:
- temp_template_api['params']['host'] = "Tacker Template " + str(vdu)
- temp_template_api['auth'] = \
- self.hostinfo[vdu]['zbx_info']['zabbix_token']
- response = self.send_post(temp_template_api)
-
- if 'error' in response:
- if "already exists." in response['error']['data']:
- now = time.localtime()
- rtime = str(now.tm_hour) + str(now.tm_min) + str(
- now.tm_sec)
- temp_template_api['params']['host'] = \
- "Tacker Template " + str(vdu) + rtime
- response = self.send_post(temp_template_api)
- VNFMonitorZabbix.check_error(response)
- self.hostinfo[vdu]['template_id'] = \
- response['result']['templateids']
- self.hostinfo[vdu]['template_name'] =\
- temp_template_api['params']['host']
-
- def add_host_to_zabbix(self):
-
- self.create_template()
- self.create_item()
- self._create_trigger()
- self.create_vdu_host()
- self.create_action()
-
- def get_token_from_zbxserver(self, node):
-
- temp_auth_api = copy.deepcopy(zapi.dAUTH_API)
- temp_auth_api['params']['user'] = \
- self.hostinfo[node]['zbx_info']['zabbix_user']
- temp_auth_api['params']['password'] = \
- self.hostinfo[node]['zbx_info']['zabbix_pass']
- zabbixip = \
- self.hostinfo[node]['zbx_info']['zabbix_ip']
- zabbixport = \
- self.hostinfo[node]['zbx_info']['zabbix_port']
- self.URL = "http://" + zabbixip + ":" + \
- str(zabbixport) + zapi.URL
- if netaddr.valid_ipv6(zabbixip):
- self.URL = "http://[" + zabbixip + "]:" + \
- str(zabbixport) + zapi.URL
- response = requests.post(
- self.URL,
- headers=zapi.HEADERS,
- data=jsonutils.dump_as_bytes(temp_auth_api)
- )
- response_dict = dict(response.json())
- VNFMonitorZabbix.check_error(response_dict)
- LOG.info('Success Connect Zabbix Server')
- return response_dict['result']
-
- def set_zbx_info(self, node):
- self.hostinfo[node]['zbx_info'] = \
- copy.deepcopy(zapi.dZBX_INFO)
- self.hostinfo[node]['zbx_info']['zabbix_user'] = \
- self.kwargs['vdus'][node]['zabbix_username']
- self.hostinfo[node]['zbx_info']['zabbix_pass'] = \
- self.kwargs['vdus'][node]['zabbix_password']
- self.hostinfo[node]['zbx_info']['zabbix_ip'] = \
- self.kwargs['vdus'][node]['zabbix_server_ip']
- self.hostinfo[node]['zbx_info']['zabbix_port'] = \
- self.kwargs['vdus'][node]['zabbix_server_port']
- self.hostinfo[node]['zbx_info']['zabbix_token'] = \
- self.get_token_from_zbxserver(node)
-
- def set_vdu_info(self):
- temp_vduname = self.kwargs['vdus'].keys()
- for node in temp_vduname:
- if 'application' in \
- self.kwargs['vdus'][node]['parameters'] \
- and 'OS'\
- in self.kwargs['vdus'][node]['parameters']:
- self.vduname.append(node)
- self.hostinfo[node] = copy.deepcopy(zapi.dVDU_INFO)
- self.set_zbx_info(node)
- self.hostinfo[node]['mgmt_ip'] = \
- self.kwargs['vdus'][node]['mgmt_ip']
- self.hostinfo[node]['parameters'] = \
- self.kwargs['vdus'][node]['parameters']
- self.hostinfo[node]['vdu_id'] = self.vnf['id']
-
- def add_to_appmonitor(self, vnf, kwargs):
-
- self.__init__()
- self.kwargs = kwargs
- self.vnf = vnf
- self.set_vdu_info()
- self.tenant_id = self.vnf['vnfd']['tenant_id']
- self.add_host_to_zabbix()
-
- def monitor_call(self, vnf, kwargs):
- pass
-
- def monitor_app_driver(self, plugin, context, vnf, service_instance):
- return self.get_name()
diff --git a/tacker/vnfm/monitor_drivers/zabbix/zabbix_api.py b/tacker/vnfm/monitor_drivers/zabbix/zabbix_api.py
deleted file mode 100644
index fb1015a5f..000000000
--- a/tacker/vnfm/monitor_drivers/zabbix/zabbix_api.py
+++ /dev/null
@@ -1,214 +0,0 @@
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-URL = "/zabbix/api_jsonrpc.php"
-
-HEADERS = {'Content-Type': 'application/json-rpc'}
-dAUTH_API = {'jsonrpc': "2.0",
- 'method': 'user.login',
- 'params': {'user': None,
- 'password': None},
- 'id': 1,
- 'auth': None}
-
-COMP_VALUE = {'greater': '>',
- 'less': '<',
- 'and greater': '>=',
- 'and less': '<=',
- 'down': '=0'}
-
-
-dTEMPLATE_CREATE_API = {'jsonrpc': "2.0", 'method': "template.create",
- 'params': {'host': "", 'groups': {'groupid': 1},
- 'hosts': []},
- 'id': 1004, 'auth': None}
-
-dITEM_CREATE_API = {'jsonrpc': "2.0",
- 'method': "item.create",
- 'params': {'hostid': None,
- 'interfaceid': 'NULL',
- 'name': "",
- 'key_': "",
- 'type': 0,
- 'value_type': 3,
- 'delay': 1},
- 'id': 1,
- 'auth': None}
-
-
-dITEM_KEY_VALUE = {'os_agent_info': 'agent.ping',
- 'os_cpu_usage': 'system.cpu.util[,iowait]',
- 'os_cpu_load': 'system.cpu.load[percpu,avg1]',
- 'os_proc_value': 'proc.num[,,run]',
- 'app_status': 'net.tcp.port[ ,*]',
- 'app_memory': 'proc.mem[*,root]'}
-
-dITEM_KEY_COMP = {'os_agent_info': str(
- dITEM_KEY_VALUE['os_agent_info'] + '.nodata(15s)}=1'),
- 'os_cpu_usage': str(
- dITEM_KEY_VALUE['os_cpu_usage'] + '.avg(5s)}'),
- 'os_cpu_load': str(
- dITEM_KEY_VALUE['os_cpu_load'] + '.avg(5s)}'),
- 'os_proc_value': str(
- dITEM_KEY_VALUE['os_proc_value'] + '.avg(5s)}'),
- 'app_status': str(
- dITEM_KEY_VALUE['app_status'] + '.last(,5)}'),
- 'app_memory': str(
- dITEM_KEY_VALUE['app_memory'] + '.avg(5s)}')}
-
-dITEM_KEY_INFO = {'os_proc_value': {'name': 'process number',
- 'key_': str(
- dITEM_KEY_VALUE['os_proc_value']),
- 'value_type': 3},
- 'os_cpu_load': {'name': 'cpu load',
- 'key_': str(
- dITEM_KEY_VALUE['os_cpu_load']),
- 'value_type': 0},
- 'os_cpu_usage': {'name': 'cpu util usage',
- 'key_': str(
- dITEM_KEY_VALUE['os_cpu_usage']),
- 'value_type': 0},
- 'os_agent_info': {'name': 'Zabbix agent status check',
- 'key_': str(
- dITEM_KEY_VALUE['os_agent_info']),
- 'value_type': 0},
- 'app_status': {'name': ' service status check',
- 'key_': str(
- dITEM_KEY_VALUE['app_status']),
- 'value_type': 3},
- 'app_memory': {'name': ' service memory usage',
- 'key_': str(
- dITEM_KEY_VALUE['app_memory']),
- 'value_type': 3}}
-
-
-dTRIGGER_CREATE_API = {'jsonrpc': "2.0",
- 'method': "trigger.create",
- 'templateid': None,
- 'auth': None,
- 'id': 1004}
-
-dTRIGGER_INFO = {'itemname': None,
- 'cmdname': None,
- 'cmd-action': None}
-
-dTRIGGER_LIST = {'os_agent_info': [{'description': 'Zabbix agent on '
- '{HOST.NAME} is '
- 'unreachable '
- 'for 15 seconds',
- 'expression': '{', 'priority': 3}],
- 'app_status': [{'description': 'Service is down '
- 'on {HOST.NAME}',
- 'expression': '{', 'priority': 3}],
- 'app_memory': [{'description': 'Process Memory '
- 'is lacking '
- 'on {HOST.NAME}',
- 'expression': '{', 'priority': 3}],
- 'os_cpu_usage': [{'description': 'Disk I/O is '
- 'overloaded '
- 'on {HOST.NAME}',
- 'expression': '{', 'priority': 3}],
- 'os_cpu_load': [{'description': 'Processor load '
- 'is too high '
- 'on {HOST.NAME}',
- 'expression': '{', 'priority': 3}],
- 'os_proc_value': [{'description': 'Too many '
- 'processes running '
- 'on {HOST.NAME}',
- 'expression': '{', 'priority': 3}]}
-
-dACTION_CREATE_API = {'jsonrpc': "2.0",
- 'method': "action.create",
- 'params': {'name': '',
- 'eventsource': 0,
- 'status': 0,
- 'esc_period': 120,
- 'def_shortdata': "{TRIGGER.NAME}:"
- "{TRIGGER.STATUS}",
- 'def_longdata': "{TRIGGER.NAME}: "
- "{TR`IGGER.STATUS}\r\n"
- "Last value: "
- "{ITEM.LASTVALUE]"
- "\r\n\r\n{TRIGGER.URL}",
- "filter": {"evaltype": 0,
- "conditions": [{'conditiontype': 2,
- 'operator': 0,
- 'value': None}]},
- 'operations': [{'operationtype': 1,
- 'esc_period': 0,
- 'esc_step_from': 1,
- 'esc_step_to': 2,
- 'evaltype': 0,
- 'opcommand': {
- 'command': None,
- 'type': 0,
- 'execute_on': 0},
- 'opcommand_hst': [
- {'hostid': None}]
- }]},
- 'auth': None, 'id': 1}
-
-dHOST_CREATE_API = {'jsonrpc': "2.0",
- 'method': "host.create",
- 'params': {'host': 'ubuntu',
- 'interfaces': [
- {'type': 1,
- 'main': 1,
- 'useip': 1,
- 'dns': "",
- 'ip': None,
- 'port': "10050"}],
- 'templates': [{'templateid': None}],
- 'groups': [{'groupid': None}]},
- 'id': 4, 'auth': None}
-
-dGROUP_GET_API = {'jsonrpc': "2.0", 'method': "hostgroup.get",
- 'params': {'output': 'extend',
- 'filter': {'name': ["Zabbix servers", ]}},
- 'id': 1, 'auth': None}
-
-dGRAPH_CREATE_API = {'jsonrpc': '2.0', 'method': 'graph.create',
- 'params': {'name': None,
- 'width': 900,
- 'height': 200,
- 'gitems': []},
- 'auth': None, 'id': 1004}
-
-dAPP_INFO = {'app_port': None,
- 'app_name': None,
- 'ssh_username': None,
- 'ssh_password': None}
-
-dZBX_INFO = {'zabbix_user': None,
- 'zabbix_pass': None,
- 'zabbix_ip': None,
- 'zabbix_port': None,
- 'zabbix_token': None
- }
-
-dACTION_LIST = {'item': None,
- 'action': None,
- 'trigger_id': None,
- 'cmd-action': None}
-
-dVDU_INFO = {'template_id': None,
- 'template_name': None,
- 'hostid': None,
- 'group_id': None,
- 'mgmt_ip': None,
- 'vdu_id': None,
- 'parameters': None,
- 'actioninfo': [],
- 'appinfo': None,
- 'zbx_info': None
- }
diff --git a/tacker/vnfm/plugin.py b/tacker/vnfm/plugin.py
index 43b7895e4..2a5841190 100644
--- a/tacker/vnfm/plugin.py
+++ b/tacker/vnfm/plugin.py
@@ -14,30 +14,13 @@
# License for the specific language governing permissions and limitations
# under the License.
-import inspect
-import yaml
-
-import eventlet
from oslo_config import cfg
from oslo_log import log as logging
-from oslo_serialization import jsonutils
-from oslo_utils import excutils
-from oslo_utils import uuidutils
-from toscaparser.tosca_template import ToscaTemplate
from tacker._i18n import _
-from tacker.api.v1 import attributes
from tacker.common import driver_manager
-from tacker.common import exceptions
-from tacker.common import utils
from tacker import context as t_context
from tacker.db.vnfm import vnfm_db
-from tacker.extensions import vnfm
-from tacker.plugins.common import constants
-from tacker.plugins import fenix
-from tacker.tosca import utils as toscautils
-from tacker.vnfm.mgmt_drivers import constants as mgmt_constants
-from tacker.vnfm import monitor
from tacker.vnfm import vim_client
@@ -46,72 +29,10 @@ CONF = cfg.CONF
def config_opts():
- return [('tacker', VNFMMgmtMixin.OPTS),
- ('tacker', VNFMPlugin.OPTS_INFRA_DRIVER),
- ('tacker', VNFMPlugin.OPTS_POLICY_ACTION)]
+ return [('tacker', VNFMPlugin.OPTS_INFRA_DRIVER)]
-class VNFMMgmtMixin(object):
- OPTS = [
- cfg.ListOpt(
- 'mgmt_driver', default=['noop', 'openwrt'],
- help=_('MGMT driver to communicate with '
- 'Hosting VNF/logical service '
- 'instance tacker plugin will use')),
- cfg.IntOpt('boot_wait', default=30,
- help=_('Time interval to wait for VM to boot'))
- ]
- cfg.CONF.register_opts(OPTS, 'tacker')
-
- def __init__(self):
- super(VNFMMgmtMixin, self).__init__()
- self._mgmt_manager = driver_manager.DriverManager(
- 'tacker.tacker.mgmt.drivers', cfg.CONF.tacker.mgmt_driver)
-
- def _invoke(self, vnf_dict, **kwargs):
- method = inspect.stack()[1][3]
- return self._mgmt_manager.invoke(
- self._mgmt_driver_name(vnf_dict), method, **kwargs)
-
- def mgmt_create_pre(self, context, vnf_dict):
- return self._invoke(
- vnf_dict, plugin=self, context=context, vnf=vnf_dict)
-
- def mgmt_create_post(self, context, vnf_dict):
- return self._invoke(
- vnf_dict, plugin=self, context=context, vnf=vnf_dict)
-
- def mgmt_update_pre(self, context, vnf_dict):
- return self._invoke(
- vnf_dict, plugin=self, context=context, vnf=vnf_dict)
-
- def mgmt_update_post(self, context, vnf_dict):
- return self._invoke(
- vnf_dict, plugin=self, context=context, vnf=vnf_dict)
-
- def mgmt_delete_pre(self, context, vnf_dict):
- return self._invoke(
- vnf_dict, plugin=self, context=context, vnf=vnf_dict)
-
- def mgmt_delete_post(self, context, vnf_dict):
- return self._invoke(
- vnf_dict, plugin=self, context=context, vnf=vnf_dict)
-
- def mgmt_get_config(self, context, vnf_dict):
- return self._invoke(
- vnf_dict, plugin=self, context=context, vnf=vnf_dict)
-
- def mgmt_ip_address(self, context, vnf_dict):
- return self._invoke(
- vnf_dict, plugin=self, context=context, vnf=vnf_dict)
-
- def mgmt_call(self, context, vnf_dict, kwargs):
- return self._invoke(
- vnf_dict, plugin=self, context=context, vnf=vnf_dict,
- kwargs=kwargs)
-
-
-class VNFMPlugin(vnfm_db.VNFMPluginDb, VNFMMgmtMixin):
+class VNFMPlugin(vnfm_db.VNFMPluginDb):
"""VNFMPlugin which supports VNFM framework.
Plugin which supports Tacker framework
@@ -124,254 +45,21 @@ class VNFMPlugin(vnfm_db.VNFMPluginDb, VNFMMgmtMixin):
]
cfg.CONF.register_opts(OPTS_INFRA_DRIVER, 'tacker')
- OPTS_POLICY_ACTION = [
- cfg.ListOpt(
- 'policy_action', default=['autoscaling', 'respawn',
- 'vdu_autoheal', 'log', 'log_and_kill'],
- help=_('Hosting vnf drivers tacker plugin will use')),
- ]
- cfg.CONF.register_opts(OPTS_POLICY_ACTION, 'tacker')
-
supported_extension_aliases = ['vnfm']
def __init__(self):
super(VNFMPlugin, self).__init__()
- self._pool = eventlet.GreenPool()
- self.boot_wait = cfg.CONF.tacker.boot_wait
self.vim_client = vim_client.VimClient()
self._vnf_manager = driver_manager.DriverManager(
'tacker.tacker.vnfm.drivers',
cfg.CONF.tacker.infra_driver)
- self._vnf_action = driver_manager.DriverManager(
- 'tacker.tacker.policy.actions',
- cfg.CONF.tacker.policy_action)
- self._vnf_monitor = monitor.VNFMonitor(self.boot_wait)
- self._vnf_alarm_monitor = monitor.VNFAlarmMonitor()
- self._vnf_reservation_monitor = monitor.VNFReservationAlarmMonitor()
- self._vnf_maintenance_monitor = monitor.VNFMaintenanceAlarmMonitor()
- self._vnf_app_monitor = monitor.VNFAppMonitor()
- self._vnf_maintenance_plugin = fenix.FenixPlugin()
- self._init_monitoring()
-
- def _init_monitoring(self):
- context = t_context.get_admin_context()
- vnfs = self.get_vnfs(context)
- for vnf in vnfs:
- # Add tenant_id in context object as it is required
- # to get VNF in monitoring.
- context.tenant_id = vnf['tenant_id']
- self.add_vnf_to_monitor(context, vnf)
+ # NOTE(ueha): Workaround to suppress the following Exception:
+ # oslo_db.sqlalchemy.enginefacade.AlreadyStartedError
+ _ = self.get_vnfs(t_context.get_admin_context())
def spawn_n(self, function, *args, **kwargs):
self._pool.spawn_n(function, *args, **kwargs)
- def create_vnfd(self, context, vnfd):
- vnfd_data = vnfd['vnfd']
- template = vnfd_data['attributes'].get('vnfd')
- if isinstance(template, dict):
- # TODO(sripriya) remove this yaml dump once db supports storing
- # json format of yaml files in a separate column instead of
- # key value string pairs in vnf attributes table
- vnfd_data['attributes']['vnfd'] = yaml.safe_dump(
- template)
- else:
- raise vnfm.InvalidAPIAttributeType(atype=type(template))
- if "tosca_definitions_version" not in template:
- raise exceptions.Invalid('Not a valid template: '
- 'tosca_definitions_version is missing.')
-
- LOG.debug('vnfd %s', vnfd_data)
-
- service_types = vnfd_data.get('service_types')
- if not attributes.is_attr_set(service_types):
- LOG.error('service type must be specified')
- raise vnfm.ServiceTypesNotSpecified()
- for service_type in service_types:
- # TODO(yamahata):
- # framework doesn't know what services are valid for now.
- # so doesn't check it here yet.
- pass
- if 'template_source' in vnfd_data:
- template_source = vnfd_data.get('template_source')
- else:
- template_source = 'onboarded'
- vnfd['vnfd']['template_source'] = template_source
-
- self._parse_template_input(vnfd)
- return super(VNFMPlugin, self).create_vnfd(
- context, vnfd)
-
- def _parse_template_input(self, vnfd):
- vnfd_dict = vnfd['vnfd']
- vnfd_yaml = vnfd_dict['attributes'].get('vnfd')
- if vnfd_yaml is None:
- return
-
- inner_vnfd_dict = yaml.safe_load(vnfd_yaml)
- LOG.debug('vnfd_dict: %s', inner_vnfd_dict)
-
- # Prepend the tacker_defs.yaml import file with the full
- # path to the file
- toscautils.updateimports(inner_vnfd_dict)
-
- try:
- tosca = ToscaTemplate(
- a_file=False,
- yaml_dict_tpl=inner_vnfd_dict,
- local_defs=toscautils.tosca_tmpl_local_defs())
- except Exception as e:
- LOG.exception("tosca-parser error: %s", str(e))
- raise vnfm.ToscaParserFailed(error_msg_details=str(e))
-
- if ('description' not in vnfd_dict or
- vnfd_dict['description'] == ''):
- vnfd_dict['description'] = inner_vnfd_dict.get(
- 'description', '')
- if (('name' not in vnfd_dict or
- not len(vnfd_dict['name'])) and
- 'metadata' in inner_vnfd_dict):
- vnfd_dict['name'] = inner_vnfd_dict['metadata'].get(
- 'template_name', '')
-
- vnfd_dict['mgmt_driver'] = toscautils.get_mgmt_driver(
- tosca)
-
- if vnfd_dict['mgmt_driver'] not in cfg.CONF.tacker.mgmt_driver:
- LOG.error("Invalid mgmt_driver in TOSCA template")
- raise vnfm.InvalidMgmtDriver(
- mgmt_driver_name=vnfd_dict['mgmt_driver'])
-
- LOG.debug('vnfd %s', vnfd)
-
- def add_vnf_to_monitor(self, context, vnf_dict):
- dev_attrs = vnf_dict['attributes']
- mgmt_ip_address = vnf_dict['mgmt_ip_address']
- if 'monitoring_policy' in dev_attrs and mgmt_ip_address:
- def action_cb(action, **kwargs):
- LOG.debug('policy action: %s', action)
- self._vnf_action.invoke(
- action, 'execute_action', plugin=self, context=context,
- vnf_dict=hosting_vnf['vnf'], args=kwargs)
-
- hosting_vnf = self._vnf_monitor.to_hosting_vnf(
- vnf_dict, action_cb)
- LOG.debug('hosting_vnf: %s', hosting_vnf)
- self._vnf_monitor.add_hosting_vnf(hosting_vnf)
-
- def add_alarm_url_to_vnf(self, context, vnf_dict):
- vnfd_yaml = vnf_dict['vnfd']['attributes'].get('vnfd', '')
- vnfd_dict = yaml.safe_load(vnfd_yaml)
- if not (vnfd_dict and vnfd_dict.get('tosca_definitions_version')):
- return
- try:
- toscautils.updateimports(vnfd_dict)
- tosca_vnfd = ToscaTemplate(
- a_file=False,
- yaml_dict_tpl=vnfd_dict,
- local_defs=toscautils.tosca_tmpl_local_defs())
- except Exception as e:
- LOG.exception("tosca-parser error: %s", str(e))
- raise vnfm.ToscaParserFailed(error_msg_details=str(e))
- polices = vnfd_dict['topology_template'].get('policies', [])
- for policy_dict in polices:
- name, policy = list(policy_dict.items())[0]
- if policy['type'] in constants.POLICY_ALARMING:
- alarm_url =\
- self._vnf_alarm_monitor.update_vnf_with_alarm(
- self, context, vnf_dict, policy)
- vnf_dict['attributes']['alarming_policy'] = vnf_dict['id']
- vnf_dict['attributes'].update(alarm_url)
- elif policy['type'] in constants.POLICY_RESERVATION:
- alarm_url = \
- self._vnf_reservation_monitor.update_vnf_with_reservation(
- self, context, vnf_dict, policy)
- vnf_dict['attributes']['reservation_policy'] = vnf_dict['id']
- vnf_dict['attributes'].update(alarm_url)
- maintenance_vdus = toscautils.find_maintenance_vdus(tosca_vnfd)
- maintenance = \
- self._vnf_maintenance_monitor.update_vnf_with_maintenance(
- vnf_dict, maintenance_vdus)
- vnf_dict['attributes'].update({
- 'maintenance': jsonutils.dumps(maintenance['vdus'])})
- vnf_dict['attributes']['maintenance_url'] = maintenance['url']
-
- def add_vnf_to_appmonitor(self, context, vnf_dict):
- appmonitor = self._vnf_app_monitor.create_app_dict(context, vnf_dict)
- self._vnf_app_monitor.add_to_appmonitor(appmonitor, vnf_dict)
-
- def config_vnf(self, context, vnf_dict):
- config = vnf_dict['attributes'].get('config')
- if not config:
- self._vnf_maintenance_plugin.create_vnf_constraints(self, context,
- vnf_dict)
- return
- if isinstance(config, str):
- # TODO(dkushwaha) remove this load once db supports storing
- # json format of yaml files in a separate column instead of
- # key value string pairs in vnf attributes table.
- config = yaml.safe_load(config)
-
- eventlet.sleep(self.boot_wait) # wait for vm to be ready
- vnf_id = vnf_dict['id']
- update = {
- 'vnf': {
- 'id': vnf_id,
- 'attributes': {'config': config},
- }
- }
- self.update_vnf(context, vnf_id, update)
-
- def _get_infra_driver(self, context, vnf_info):
- vim_res = self.get_vim(context, vnf_info)
- return vim_res['vim_type'], vim_res['vim_auth']
-
- def _create_vnf_wait(self, context, vnf_dict, auth_attr, driver_name):
- vnf_id = vnf_dict['id']
- instance_id = self._instance_id(vnf_dict)
- create_failed = False
-
- try:
- self._vnf_manager.invoke(
- driver_name, 'create_wait', plugin=self, context=context,
- vnf_dict=vnf_dict, vnf_id=instance_id,
- auth_attr=auth_attr)
- except vnfm.VNFCreateWaitFailed as e:
- LOG.error("VNF Create failed for vnf_id %s", vnf_id)
- create_failed = True
- vnf_dict['status'] = constants.ERROR
- self.set_vnf_error_status_reason(context, vnf_id, str(e))
-
- if instance_id is None or create_failed:
- mgmt_ip_address = None
- else:
- # mgmt_ip_address = self.mgmt_ip_address(context, vnf_dict)
- # FIXME(yamahata):
- mgmt_ip_address = vnf_dict['mgmt_ip_address']
-
- self._create_vnf_post(
- context, vnf_id, instance_id, mgmt_ip_address, vnf_dict)
- self.mgmt_create_post(context, vnf_dict)
-
- if instance_id is None or create_failed:
- return
-
- vnf_dict['mgmt_ip_address'] = mgmt_ip_address
-
- kwargs = {
- mgmt_constants.KEY_ACTION: mgmt_constants.ACTION_CREATE_VNF,
- mgmt_constants.KEY_KWARGS: {'vnf': vnf_dict},
- }
- new_status = constants.ACTIVE
- try:
- self.mgmt_call(context, vnf_dict, kwargs)
- except exceptions.MgmtDriverException:
- LOG.error('VNF configuration failed')
- new_status = constants.ERROR
- self.set_vnf_error_status_reason(context, vnf_id,
- 'Unable to configure VDU')
- vnf_dict['status'] = new_status
- self._create_vnf_status(context, vnf_id, new_status)
-
def get_vim(self, context, vnf):
region_name = vnf.setdefault('placement_attr', {}).get(
'region_name', None)
@@ -380,717 +68,3 @@ class VNFMPlugin(vnfm_db.VNFMPluginDb, VNFMMgmtMixin):
vnf['placement_attr']['vim_name'] = vim_res['vim_name']
vnf['vim_id'] = vim_res['vim_id']
return vim_res
-
- def _create_vnf(self, context, vnf, vim_auth, driver_name):
- vnf_dict = self._create_vnf_pre(
- context, vnf) if not vnf.get('id') else vnf
- vnf_id = vnf_dict['id']
- LOG.debug('vnf_dict %s', vnf_dict)
- if driver_name == 'openstack':
- self.mgmt_create_pre(context, vnf_dict)
- self.add_alarm_url_to_vnf(context, vnf_dict)
- vnf_dict['attributes']['maintenance_group'] = uuidutils.generate_uuid()
-
- try:
- instance_id = self._vnf_manager.invoke(
- driver_name, 'create', plugin=self,
- context=context, vnf=vnf_dict, auth_attr=vim_auth)
- except Exception:
- LOG.error('Fail to create vnf %s in infra_driver, '
- 'so delete this vnf',
- vnf_dict['id'])
- with excutils.save_and_reraise_exception():
- self._delete_vnf(context, vnf_id, force_delete=True)
-
- vnf_dict['instance_id'] = instance_id
- return vnf_dict
-
- def create_vnf(self, context, vnf):
- vnf_info = vnf['vnf']
- name = vnf_info['name']
-
- # if vnfd_template specified, create vnfd from template
- # create template dictionary structure same as needed in create_vnfd()
- if vnf_info.get('vnfd_template'):
- vnfd_name = utils.generate_resource_name(name, 'inline')
- vnfd = {'vnfd': {'attributes': {'vnfd': vnf_info['vnfd_template']},
- 'name': vnfd_name,
- 'template_source': 'inline',
- 'service_types': [{'service_type': 'vnfd'}]}}
- vnf_info['vnfd_id'] = self.create_vnfd(context, vnfd).get('id')
-
- infra_driver, vim_auth = self._get_infra_driver(context, vnf_info)
- if infra_driver not in self._vnf_manager:
- LOG.error('unknown vim driver '
- '%(infra_driver)s in %(drivers)s',
- {'infra_driver': infra_driver,
- 'drivers': cfg.CONF.tacker.infra_driver})
- raise vnfm.InvalidInfraDriver(vim_name=infra_driver)
-
- vnf_attributes = vnf_info['attributes']
- if vnf_attributes.get('param_values'):
- param = vnf_attributes['param_values']
- if isinstance(param, dict):
- # TODO(sripriya) remove this yaml dump once db supports storing
- # json format of yaml files in a separate column instead of
- # key value string pairs in vnf attributes table
- vnf_attributes['param_values'] = yaml.safe_dump(param)
- else:
- raise vnfm.InvalidAPIAttributeType(atype=type(param))
- if vnf_attributes.get('config'):
- config = vnf_attributes['config']
- if isinstance(config, dict):
- # TODO(sripriya) remove this yaml dump once db supports storing
- # json format of yaml files in a separate column instead of
- # key value string pairs in vnf attributes table
- vnf_attributes['config'] = yaml.safe_dump(config)
- else:
- raise vnfm.InvalidAPIAttributeType(atype=type(config))
-
- vnf_dict = self._create_vnf(context, vnf_info, vim_auth, infra_driver)
-
- def create_vnf_wait():
- self._create_vnf_wait(context, vnf_dict, vim_auth, infra_driver)
-
- if 'app_monitoring_policy' in vnf_dict['attributes']:
- self.add_vnf_to_appmonitor(context, vnf_dict)
- if vnf_dict['status'] is not constants.ERROR:
- self.add_vnf_to_monitor(context, vnf_dict)
-
- self.config_vnf(context, vnf_dict)
- self.spawn_n(create_vnf_wait)
- return vnf_dict
-
- # not for wsgi, but for service to create hosting vnf
- # the vnf is NOT added to monitor.
- def create_vnf_sync(self, context, vnf):
- infra_driver, vim_auth = self._get_infra_driver(context, vnf)
- vnf_dict = self._create_vnf(context, vnf, vim_auth, infra_driver)
- self._create_vnf_wait(context, vnf_dict, vim_auth, infra_driver)
- return vnf_dict
-
- def _heal_vnf_wait(self, context, vnf_dict, vim_auth, driver_name):
- kwargs = {
- mgmt_constants.KEY_ACTION: mgmt_constants.ACTION_HEAL_VNF,
- mgmt_constants.KEY_KWARGS: {'vnf': vnf_dict},
- }
- new_status = constants.ACTIVE
- placement_attr = vnf_dict['placement_attr']
- region_name = placement_attr.get('region_name')
-
- try:
- self._vnf_manager.invoke(
- driver_name, 'heal_wait', plugin=self,
- context=context, vnf_dict=vnf_dict, auth_attr=vim_auth,
- region_name=region_name)
- self.mgmt_call(context, vnf_dict, kwargs)
-
- except vnfm.VNFHealWaitFailed as e:
- with excutils.save_and_reraise_exception():
- new_status = constants.ERROR
- self._vnf_monitor.delete_hosting_vnf(vnf_dict['id'])
- self._vnf_maintenance_plugin.post(context, vnf_dict)
- self.set_vnf_error_status_reason(context, vnf_dict['id'],
- str(e))
- except exceptions.MgmtDriverException as e:
- LOG.error('VNF configuration failed')
- new_status = constants.ERROR
- self._vnf_monitor.delete_hosting_vnf(vnf_dict['id'])
- self._vnf_maintenance_plugin.post(context, vnf_dict)
- self.set_vnf_error_status_reason(context, vnf_dict['id'], str(e))
-
- del vnf_dict['heal_stack_id']
- vnf_dict['status'] = new_status
- self.mgmt_update_post(context, vnf_dict)
-
- # Update vnf status to 'ACTIVE' so that monitoring can be resumed.
- evt_details = ("Ends the heal vnf request for VNF '%s'" %
- vnf_dict['id'])
- self._vnf_monitor.update_hosting_vnf(vnf_dict, evt_details)
- self._vnf_maintenance_plugin.update_vnf_instances(self, context,
- vnf_dict)
- self._vnf_maintenance_plugin.post(context, vnf_dict)
- # _update_vnf_post() method updates vnf_status and mgmt_ip_address
- self._update_vnf_post(context, vnf_dict['id'],
- new_status, vnf_dict,
- constants.PENDING_HEAL,
- constants.RES_EVT_HEAL)
-
- def _update_vnf_wait(self, context, vnf_dict, vim_auth, driver_name):
- kwargs = {
- mgmt_constants.KEY_ACTION: mgmt_constants.ACTION_UPDATE_VNF,
- mgmt_constants.KEY_KWARGS: {'vnf': vnf_dict},
- }
- new_status = constants.ACTIVE
- placement_attr = vnf_dict['placement_attr']
- region_name = placement_attr.get('region_name')
-
- try:
- self._vnf_manager.invoke(
- driver_name, 'update_wait', plugin=self,
- context=context, vnf_dict=vnf_dict, auth_attr=vim_auth,
- region_name=region_name)
-
- self.mgmt_call(context, vnf_dict, kwargs)
- except vnfm.VNFUpdateWaitFailed as e:
- with excutils.save_and_reraise_exception():
- new_status = constants.ERROR
- self._vnf_monitor.delete_hosting_vnf(vnf_dict['id'])
- self.set_vnf_error_status_reason(context, vnf_dict['id'],
- str(e))
- except exceptions.MgmtDriverException as e:
- LOG.error('VNF configuration failed')
- new_status = constants.ERROR
- self._vnf_monitor.delete_hosting_vnf(vnf_dict['id'])
- self.set_vnf_error_status_reason(context, vnf_dict['id'], str(e))
- vnf_dict['status'] = new_status
- self.mgmt_update_post(context, vnf_dict)
- self._update_vnf_post(context, vnf_dict['id'], new_status,
- vnf_dict, constants.PENDING_UPDATE,
- constants.RES_EVT_UPDATE)
- self._vnf_maintenance_plugin.create_vnf_constraints(self, context,
- vnf_dict)
-
- def update_vnf(self, context, vnf_id, vnf):
- vnf_attributes = vnf['vnf']['attributes']
- if vnf_attributes.get('config'):
- config = vnf_attributes['config']
- if isinstance(config, dict):
- # TODO(sripriya) remove this yaml dump once db supports storing
- # json format of yaml files in a separate column instead of
- # key value string pairs in vnf attributes table
- vnf_attributes['config'] = yaml.safe_dump(config)
- else:
- raise vnfm.InvalidAPIAttributeType(atype=type(config))
-
- if vnf_attributes.get('param_values'):
- param = vnf_attributes['param_values']
- if isinstance(param, dict):
- # TODO(sripriya) remove this yaml dump once db supports storing
- # json format of yaml files in a separate column instead of
- # key value string pairs in vnf attributes table
- vnf_attributes['param_values'] = yaml.safe_dump(param)
- else:
- raise vnfm.InvalidAPIAttributeType(atype=type(param))
-
- vnf_dict = self._update_vnf_pre(context, vnf_id,
- constants.PENDING_UPDATE)
- driver_name, vim_auth = self._get_infra_driver(context, vnf_dict)
- instance_id = self._instance_id(vnf_dict)
-
- try:
- self.mgmt_update_pre(context, vnf_dict)
- self._vnf_manager.invoke(
- driver_name, 'update', plugin=self, context=context,
- vnf_id=instance_id, vnf_dict=vnf_dict,
- vnf=vnf, auth_attr=vim_auth)
- except vnfm.VNFUpdateInvalidInput:
- with excutils.save_and_reraise_exception():
- vnf_dict['status'] = constants.ACTIVE
- self._update_vnf_post(context, vnf_id,
- constants.ACTIVE,
- vnf_dict, constants.PENDING_UPDATE,
- constants.RES_EVT_UPDATE)
- except Exception as e:
- with excutils.save_and_reraise_exception():
- vnf_dict['status'] = constants.ERROR
- self._vnf_monitor.delete_hosting_vnf(vnf_id)
- self.set_vnf_error_status_reason(context, vnf_dict['id'],
- str(e))
- self.mgmt_update_post(context, vnf_dict)
- self._update_vnf_post(context, vnf_id,
- constants.ERROR,
- vnf_dict, constants.PENDING_UPDATE,
- constants.RES_EVT_UPDATE)
-
- self.spawn_n(self._update_vnf_wait, context, vnf_dict, vim_auth,
- driver_name)
- return vnf_dict
-
- def heal_vnf(self, context, vnf_id, heal_request_data_obj):
- vnf_dict = self._update_vnf_pre(context, vnf_id,
- constants.PENDING_HEAL)
- driver_name, vim_auth = self._get_infra_driver(context, vnf_dict)
- # Update vnf status to 'PENDING_HEAL' so that monitoring can
- # be paused.
- evt_details = ("Starts heal vnf request for VNF '%s'. "
- "Reason to Heal VNF: '%s'" % (vnf_dict['id'],
- heal_request_data_obj.cause))
- self._vnf_monitor.update_hosting_vnf(vnf_dict, evt_details)
-
- try:
- vnf_dict['heal_stack_id'] = heal_request_data_obj.stack_id
- self._vnf_maintenance_plugin.project_instance_pre(context,
- vnf_dict)
- self.mgmt_update_pre(context, vnf_dict)
- self._vnf_manager.invoke(
- driver_name, 'heal_vdu', plugin=self,
- context=context, vnf_dict=vnf_dict,
- heal_request_data_obj=heal_request_data_obj)
- except vnfm.VNFHealFailed as e:
- with excutils.save_and_reraise_exception():
- vnf_dict['status'] = constants.ERROR
- self._vnf_monitor.delete_hosting_vnf(vnf_id)
- self.set_vnf_error_status_reason(context, vnf_dict['id'],
- str(e))
- self.mgmt_update_post(context, vnf_dict)
- self._vnf_maintenance_plugin.post(context, vnf_dict)
- self._update_vnf_post(context, vnf_id,
- constants.ERROR,
- vnf_dict, constants.PENDING_HEAL,
- constants.RES_EVT_HEAL)
-
- self.spawn_n(self._heal_vnf_wait, context, vnf_dict, vim_auth,
- driver_name)
-
- return vnf_dict
-
- def _delete_vnf_wait(self, context, vnf_dict, auth_attr, driver_name):
- instance_id = self._instance_id(vnf_dict)
- e = None
- if instance_id:
- placement_attr = vnf_dict['placement_attr']
- region_name = placement_attr.get('region_name')
- try:
- self._vnf_manager.invoke(
- driver_name,
- 'delete_wait',
- plugin=self,
- context=context,
- vnf_id=instance_id,
- auth_attr=auth_attr,
- region_name=region_name)
- except Exception as e_:
- e = e_
- vnf_dict['status'] = constants.ERROR
- vnf_dict['error_reason'] = str(e)
- LOG.exception('_delete_vnf_wait')
- self.set_vnf_error_status_reason(context, vnf_dict['id'],
- vnf_dict['error_reason'])
-
- self._vnf_maintenance_plugin.delete_vnf_constraints(self, context,
- vnf_dict)
- self.mgmt_delete_post(context, vnf_dict)
- self._delete_vnf_post(context, vnf_dict, e)
-
- def _delete_vnf(self, context, vnf_id, force_delete=False):
-
- vnf_dict = self._delete_vnf_pre(context, vnf_id,
- force_delete=force_delete)
- driver_name, vim_auth = self._get_infra_driver(context, vnf_dict)
- self._vnf_monitor.delete_hosting_vnf(vnf_id)
- instance_id = self._instance_id(vnf_dict)
- placement_attr = vnf_dict['placement_attr']
- region_name = placement_attr.get('region_name')
- kwargs = {
- mgmt_constants.KEY_ACTION: mgmt_constants.ACTION_DELETE_VNF,
- mgmt_constants.KEY_KWARGS: {'vnf': vnf_dict},
- }
- try:
- self._vnf_maintenance_plugin.project_instance_pre(context,
- vnf_dict)
- self.mgmt_delete_pre(context, vnf_dict)
- self.mgmt_call(context, vnf_dict, kwargs)
- if instance_id:
- self._vnf_manager.invoke(driver_name,
- 'delete',
- plugin=self,
- context=context,
- vnf_id=instance_id,
- auth_attr=vim_auth,
- region_name=region_name)
- except Exception as e:
- # TODO(yamahata): when the device is already deleted. mask
- # the error, and delete row in db
- # Other case mark error
- with excutils.save_and_reraise_exception():
- if not force_delete:
- vnf_dict['status'] = constants.ERROR
- vnf_dict['error_reason'] = str(e)
- self.set_vnf_error_status_reason(context, vnf_dict['id'],
- vnf_dict['error_reason'])
- self.mgmt_delete_post(context, vnf_dict)
- self._delete_vnf_post(context, vnf_dict, e)
-
- if force_delete:
- self._delete_vnf_force(context, vnf_dict['id'])
- self.mgmt_delete_post(context, vnf_dict)
- self._delete_vnf_post(context, vnf_dict, None, force_delete=True)
- else:
- self.spawn_n(self._delete_vnf_wait, context, vnf_dict, vim_auth,
- driver_name)
-
- def delete_vnf(self, context, vnf_id, vnf=None):
-
- # Extract "force_delete" from request's body
- force_delete = False
- if vnf and vnf['vnf'].get('attributes').get('force'):
- force_delete = vnf['vnf'].get('attributes').get('force')
- if force_delete and not context.is_admin:
- LOG.error("force delete is admin only operation")
- raise exceptions.AdminRequired(reason="Admin only operation")
-
- self._delete_vnf(context, vnf_id, force_delete=force_delete)
-
- def _handle_vnf_scaling(self, context, policy):
- # validate
- def _validate_scaling_policy():
- type = policy['type']
-
- if type not in constants.POLICY_ACTIONS:
- raise exceptions.VnfPolicyTypeInvalid(
- type=type,
- valid_types=constants.POLICY_ACTIONS.keys(),
- policy=policy['name']
- )
- action = policy['action']
-
- if action not in constants.POLICY_ACTIONS[type]:
- raise exceptions.VnfPolicyActionInvalid(
- action=action,
- valid_actions=constants.POLICY_ACTIONS[type],
- policy=policy['name']
- )
-
- LOG.info("Policy %s is validated successfully", policy['name'])
-
- def _get_status():
- if policy['action'] == constants.ACTION_SCALE_IN:
- status = constants.PENDING_SCALE_IN
- else:
- status = constants.PENDING_SCALE_OUT
-
- return status
-
- # pre
- def _handle_vnf_scaling_pre():
- status = _get_status()
- result = self._update_vnf_scaling_status(context,
- policy,
- [constants.ACTIVE],
- status)
- LOG.debug("Policy %(policy)s vnf is at %(status)s",
- {'policy': policy['name'],
- 'status': status})
- self._vnf_maintenance_plugin.project_instance_pre(context,
- result)
- return result
-
- # post
- def _handle_vnf_scaling_post(new_status, mgmt_ip_address=None):
- status = _get_status()
- result = self._update_vnf_scaling_status(context,
- policy,
- [status],
- new_status,
- mgmt_ip_address)
- LOG.debug("Policy %(policy)s vnf is at %(status)s",
- {'policy': policy['name'],
- 'status': new_status})
- action = 'delete' if policy['action'] == 'in' else 'update'
- self._vnf_maintenance_plugin.update_vnf_instances(self, context,
- result,
- action=action)
- return result
-
- # action
- def _vnf_policy_action():
- try:
- last_event_id = self._vnf_manager.invoke(
- infra_driver,
- 'scale',
- plugin=self,
- context=context,
- auth_attr=vim_auth,
- policy=policy,
- region_name=region_name
- )
- LOG.debug("Policy %s action is started successfully",
- policy['name'])
- return last_event_id
- except Exception as e:
- LOG.error("Policy %s action is failed to start",
- policy)
- with excutils.save_and_reraise_exception():
- vnf['status'] = constants.ERROR
- self.set_vnf_error_status_reason(
- context, policy['vnf']['id'], str(e))
- _handle_vnf_scaling_post(constants.ERROR)
-
- # wait
- def _vnf_policy_action_wait():
- try:
- LOG.debug("Policy %s action is in progress",
- policy['name'])
- mgmt_ip_address = self._vnf_manager.invoke(
- infra_driver,
- 'scale_wait',
- plugin=self,
- context=context,
- auth_attr=vim_auth,
- policy=policy,
- region_name=region_name,
- last_event_id=last_event_id
- )
- LOG.debug("Policy %s action is completed successfully",
- policy['name'])
- _handle_vnf_scaling_post(constants.ACTIVE, mgmt_ip_address)
- # TODO(kanagaraj-manickam): Add support for config and mgmt
- except Exception as e:
- LOG.error("Policy %s action is failed to complete",
- policy['name'])
- with excutils.save_and_reraise_exception():
- self.set_vnf_error_status_reason(
- context, policy['vnf']['id'], str(e))
- _handle_vnf_scaling_post(constants.ERROR)
-
- _validate_scaling_policy()
-
- vnf = _handle_vnf_scaling_pre()
- policy['instance_id'] = vnf['instance_id']
-
- infra_driver, vim_auth = self._get_infra_driver(context, vnf)
- region_name = vnf.get('placement_attr', {}).get('region_name', None)
- last_event_id = _vnf_policy_action()
- self.spawn_n(_vnf_policy_action_wait)
-
- return policy
-
- def _make_policy_dict(self, vnf, name, policy):
- p = {}
- p['type'] = policy.get('type')
- p['properties'] = policy.get('properties') or policy.get(
- 'triggers') or policy.get('reservation')
- p['vnf'] = vnf
- p['name'] = name
- p['id'] = uuidutils.generate_uuid()
- return p
-
- def get_vnf_policies(
- self, context, vnf_id, filters=None, fields=None):
- vnf = self.get_vnf(context, vnf_id)
- vnfd_tmpl = yaml.safe_load(vnf['vnfd']['attributes']['vnfd'])
- policy_list = []
-
- polices = vnfd_tmpl['topology_template'].get('policies', [])
- for policy_dict in polices:
- for name, policy in policy_dict.items():
- def _add(policy):
- p = self._make_policy_dict(vnf, name, policy)
- p['name'] = name
- policy_list.append(p)
-
- # Check for filters
- if filters.get('name') or filters.get('type'):
- if name == filters.get('name'):
- _add(policy)
- break
- elif policy['type'] == filters.get('type'):
- _add(policy)
- break
- else:
- continue
-
- _add(policy)
-
- return policy_list
-
- def get_vnf_policy(
- self, context, policy_id, vnf_id, fields=None):
- policies = self.get_vnf_policies(context,
- vnf_id,
- filters={'name': policy_id})
- if policies:
- return policies[0]
- else:
- return None
-
- def create_vnf_scale(self, context, vnf_id, scale):
- policy_ = self.get_vnf_policy(context,
- scale['scale']['policy'],
- vnf_id)
- if not policy_:
- raise exceptions.VnfPolicyNotFound(policy=scale['scale']['policy'],
- vnf_id=vnf_id)
- policy_.update({'action': scale['scale']['type']})
- self._handle_vnf_scaling(context, policy_)
-
- return scale['scale']
-
- def get_vnf_policy_by_type(self, context, vnf_id, policy_type=None, fields=None): # noqa
- policies = self.get_vnf_policies(context,
- vnf_id,
- filters={'type': policy_type})
- if policies:
- return policies[0]
-
- raise exceptions.VnfPolicyTypeInvalid(
- type=policy_type, policy=None,
- valid_types=constants.VALID_POLICY_TYPES)
-
- def _validate_alarming_policy(self, context, vnf_id, trigger):
- # validate alarm status
-
- # Trigger will contain only one action in trigger['trigger'] as it
- # filtered in _get_vnf_triggers().
- # Getting action from trigger to decide which process_alarm_for_vnf
- # method will be called.
- if list(trigger['trigger'])[0]\
- in constants.RESERVATION_POLICY_ACTIONS:
- if not self._vnf_reservation_monitor.process_alarm_for_vnf(
- vnf_id, trigger):
- raise exceptions.AlarmUrlInvalid(vnf_id=vnf_id)
- else:
- if not self._vnf_alarm_monitor.process_alarm_for_vnf(
- vnf_id, trigger):
- raise exceptions.AlarmUrlInvalid(vnf_id=vnf_id)
-
- # validate policy action. if action is composite, split it.
- # ex: respawn%notify
- action = trigger['action_name']
- action_list = action.split('%')
- pl_action_dict = dict()
- pl_action_dict['policy_actions'] = dict()
- pl_action_dict['policy_actions']['def_actions'] = list()
- pl_action_dict['policy_actions']['custom_actions'] = dict()
- for action in action_list:
- # validate policy action. if action is composite, split it.
- # ex: SP1-in, SP1-out
- action_ = None
- if action in constants.DEFAULT_ALARM_ACTIONS:
- pl_action_dict['policy_actions']['def_actions'].append(action)
- policy_ = self.get_vnf_policy(context, action, vnf_id)
- if not policy_:
- sp_action = action.split('-')
- if len(sp_action) == 2:
- bk_policy_name = sp_action[0]
- bk_policy_action = sp_action[1]
- policies_ = self.get_vnf_policies(
- context, vnf_id, filters={'name': bk_policy_name})
- if policies_:
- policy_ = policies_[0]
- action_ = bk_policy_action
- if policy_:
- pl_action_dict['policy_actions']['custom_actions'].update(
- {policy_['id']: {'bckend_policy': policy_,
- 'bckend_action': action_}})
-
- LOG.debug("Trigger %s is validated successfully", trigger)
-
- return pl_action_dict
- # validate url
-
- def _get_vnf_triggers(self, context, vnf_id, filters=None, fields=None):
- if filters.get('name') in constants.RESERVATION_POLICY_ACTIONS:
- policy = self.get_vnf_policy_by_type(
- context, vnf_id, policy_type=constants.POLICY_RESERVATION)
- else:
- policy = self.get_vnf_policy_by_type(
- context, vnf_id, policy_type=constants.POLICY_ALARMING)
- triggers = policy['properties']
- vnf_trigger = dict()
- for trigger_name, trigger_dict in triggers.items():
- if trigger_name == filters.get('name'):
- vnf_trigger['trigger'] = {trigger_name: trigger_dict}
- vnf_trigger['vnf'] = policy['vnf']
- break
-
- return vnf_trigger
-
- def get_vnf_trigger(self, context, vnf_id, trigger_name):
- trigger = self._get_vnf_triggers(
- context, vnf_id, filters={'name': trigger_name})
- if not trigger:
- raise exceptions.TriggerNotFound(
- trigger_name=trigger_name,
- vnf_id=vnf_id
- )
- return trigger
-
- def _handle_vnf_monitoring(self, context, trigger):
-
- vnf_dict = trigger['vnf']
- # Multiple actions support
- if trigger.get('policy_actions'):
- policy_actions = trigger['policy_actions']
- if policy_actions.get('def_actions'):
- for action in policy_actions['def_actions']:
- self._vnf_action.invoke(
- action, 'execute_action', plugin=self, context=context,
- vnf_dict=vnf_dict, args={})
- if policy_actions.get('custom_actions'):
- custom_actions = policy_actions['custom_actions']
- for pl_action, pl_action_dict in custom_actions.items():
- bckend_policy = pl_action_dict['bckend_policy']
- bckend_action = pl_action_dict['bckend_action']
- bckend_policy_type = bckend_policy['type']
- if bckend_policy_type == constants.POLICY_SCALING:
- if vnf_dict['status'] != constants.ACTIVE:
- LOG.info(_("Scaling Policy action "
- "skipped due to status "
- "%(status)s for vnf %(vnfid)s") %
- {"status": vnf_dict['status'],
- "vnfid": vnf_dict['id']})
- return
- action = 'autoscaling'
- scale = {}
- scale.setdefault('scale', {})
- scale['scale']['type'] = bckend_action
- scale['scale']['policy'] = bckend_policy['name']
- self._vnf_action.invoke(
- action, 'execute_action', plugin=self,
- context=context, vnf_dict=vnf_dict, args=scale)
-
- def create_vnf_trigger(
- self, context, vnf_id, trigger):
- trigger_ = self.get_vnf_trigger(
- context, vnf_id, trigger['trigger']['policy_name'])
- # action_name before analyzing
- trigger_.update({'action_name': trigger['trigger']['action_name']})
- trigger_.update({'params': trigger['trigger']['params']})
- policy_actions = self._validate_alarming_policy(
- context, vnf_id, trigger_)
- if policy_actions:
- trigger_.update(policy_actions)
- self._handle_vnf_monitoring(context, trigger_)
- return trigger['trigger']
-
- def get_vnf_resources(self, context, vnf_id, fields=None, filters=None):
- vnf_info = self.get_vnf(context, vnf_id)
- infra_driver, vim_auth = self._get_infra_driver(context, vnf_info)
- region_name = vnf_info.get('placement_attr', {}).\
- get('region_name', None)
- if vnf_info['status'] == constants.ACTIVE:
- vnf_details = self._vnf_manager.invoke(infra_driver,
- 'get_resource_info',
- plugin=self,
- context=context,
- vnf_info=vnf_info,
- auth_attr=vim_auth,
- region_name=region_name)
- resources = [{'name': name,
- 'type': info.get('type'),
- 'id': info.get('id')}
- for name, info in vnf_details.items()]
- return resources
- # Raise exception when VNF.status != ACTIVE
- else:
- raise vnfm.VNFInactive(vnf_id=vnf_id,
- message=_(' Cannot fetch details'))
-
- def create_vnf_maintenance(self, context, vnf_id, maintenance):
- _maintenance = self._vnf_maintenance_plugin.validate_maintenance(
- maintenance.copy())
- vnf = self.get_vnf(context, vnf_id)
- _maintenance['vnf'] = vnf
- self._vnf_maintenance_plugin.handle_maintenance(
- self, context, _maintenance)
- policy_action = _maintenance.get('policy_action', '')
- if policy_action:
- self._vnf_action.invoke(
- policy_action['action'], 'execute_action', plugin=self,
- context=context, vnf_dict=vnf, args=policy_action['args'])
- else:
- self._vnf_maintenance_plugin.request(self, context, vnf,
- _maintenance)
- return maintenance['maintenance']
diff --git a/tacker/vnfm/policy_actions/__init__.py b/tacker/vnfm/policy_actions/__init__.py
deleted file mode 100644
index e69de29bb..000000000
diff --git a/tacker/vnfm/policy_actions/abstract_action.py b/tacker/vnfm/policy_actions/abstract_action.py
deleted file mode 100644
index 056c9c1b6..000000000
--- a/tacker/vnfm/policy_actions/abstract_action.py
+++ /dev/null
@@ -1,36 +0,0 @@
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import abc
-
-
-class AbstractPolicyAction(object, metaclass=abc.ABCMeta):
- @abc.abstractmethod
- def get_type(self):
- """Return one of predefined type of the hosting vnf drivers."""
- pass
-
- @abc.abstractmethod
- def get_name(self):
- """Return a symbolic name for the service VM plugin."""
- pass
-
- @abc.abstractmethod
- def get_description(self):
- pass
-
- @abc.abstractmethod
- def execute_action(self, plugin, context, vnf_dict, args):
- """args: policy is enabled to execute with additional arguments."""
- pass
diff --git a/tacker/vnfm/policy_actions/autoscaling/__init__.py b/tacker/vnfm/policy_actions/autoscaling/__init__.py
deleted file mode 100644
index e69de29bb..000000000
diff --git a/tacker/vnfm/policy_actions/autoscaling/autoscaling.py b/tacker/vnfm/policy_actions/autoscaling/autoscaling.py
deleted file mode 100644
index 7e2a84109..000000000
--- a/tacker/vnfm/policy_actions/autoscaling/autoscaling.py
+++ /dev/null
@@ -1,39 +0,0 @@
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-
-from oslo_log import log as logging
-
-from tacker.plugins.common import constants
-from tacker.vnfm.policy_actions import abstract_action
-from tacker.vnfm import utils as vnfm_utils
-
-LOG = logging.getLogger(__name__)
-
-
-class VNFActionAutoscaling(abstract_action.AbstractPolicyAction):
- def get_type(self):
- return 'autoscaling'
-
- def get_name(self):
- return 'autoscaling'
-
- def get_description(self):
- return 'Tacker VNF auto-scaling policy'
-
- def execute_action(self, plugin, context, vnf_dict, args):
- vnf_id = vnf_dict['id']
- vnfm_utils.log_events(context, vnf_dict,
- constants.RES_EVT_MONITOR,
- "ActionAutoscalingHeat invoked")
- plugin.create_vnf_scale(context, vnf_id, args)
diff --git a/tacker/vnfm/policy_actions/log/__init__.py b/tacker/vnfm/policy_actions/log/__init__.py
deleted file mode 100644
index e69de29bb..000000000
diff --git a/tacker/vnfm/policy_actions/log/log.py b/tacker/vnfm/policy_actions/log/log.py
deleted file mode 100644
index fb0d56c14..000000000
--- a/tacker/vnfm/policy_actions/log/log.py
+++ /dev/null
@@ -1,61 +0,0 @@
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-
-from oslo_log import log as logging
-
-from tacker.plugins.common import constants
-from tacker.vnfm.policy_actions import abstract_action
-from tacker.vnfm import utils as vnfm_utils
-
-LOG = logging.getLogger(__name__)
-
-
-class VNFActionLog(abstract_action.AbstractPolicyAction):
- def get_type(self):
- return 'log'
-
- def get_name(self):
- return 'log'
-
- def get_description(self):
- return 'Tacker VNF logging policy'
-
- def execute_action(self, plugin, context, vnf_dict, args):
- vnf_id = vnf_dict['id']
- LOG.error('vnf %s dead', vnf_id)
- vnfm_utils.log_events(context, vnf_dict,
- constants.RES_EVT_MONITOR,
- "ActionLogOnly invoked")
-
-
-class VNFActionLogAndKill(abstract_action.AbstractPolicyAction):
- def get_type(self):
- return 'log_and_kill'
-
- def get_name(self):
- return 'log_and_kill'
-
- def get_description(self):
- return 'Tacker VNF log_and_kill policy'
-
- def execute_action(self, plugin, context, vnf_dict, args):
- vnfm_utils.log_events(context, vnf_dict,
- constants.RES_EVT_MONITOR,
- "ActionLogAndKill invoked")
- vnf_id = vnf_dict['id']
- if plugin._mark_vnf_dead(vnf_dict['id']):
- if vnf_dict['attributes'].get('monitoring_policy'):
- plugin._vnf_monitor.mark_dead(vnf_dict['id'])
- plugin.delete_vnf(context, vnf_id)
- LOG.error('vnf %s dead', vnf_id)
diff --git a/tacker/vnfm/policy_actions/respawn/__init__.py b/tacker/vnfm/policy_actions/respawn/__init__.py
deleted file mode 100644
index e69de29bb..000000000
diff --git a/tacker/vnfm/policy_actions/respawn/respawn.py b/tacker/vnfm/policy_actions/respawn/respawn.py
deleted file mode 100644
index 1a89d0b81..000000000
--- a/tacker/vnfm/policy_actions/respawn/respawn.py
+++ /dev/null
@@ -1,86 +0,0 @@
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-
-from oslo_log import log as logging
-
-from tacker.plugins.common import constants
-from tacker.vnfm.infra_drivers.openstack import heat_client as hc
-from tacker.vnfm.policy_actions import abstract_action
-from tacker.vnfm import utils as vnfm_utils
-from tacker.vnfm import vim_client
-
-LOG = logging.getLogger(__name__)
-
-
-class VNFActionRespawn(abstract_action.AbstractPolicyAction):
- def get_type(self):
- return 'respawn'
-
- def get_name(self):
- return 'respawn'
-
- def get_description(self):
- return 'Tacker VNF respawning policy'
-
- def execute_action(self, plugin, context, vnf_dict, args):
- vnf_id = vnf_dict['id']
- LOG.info('vnf %s is dead and needs to be respawned', vnf_id)
- attributes = vnf_dict['attributes']
- vim_id = vnf_dict['vim_id']
-
- def _update_failure_count():
- failure_count = int(attributes.get('failure_count', '0')) + 1
- failure_count_str = str(failure_count)
- LOG.debug("vnf %(vnf_id)s failure count %(failure_count)s",
- {'vnf_id': vnf_id, 'failure_count': failure_count_str})
- attributes['failure_count'] = failure_count_str
- attributes['dead_instance_id_' + failure_count_str] = vnf_dict[
- 'instance_id']
-
- def _fetch_vim(vim_uuid):
- vim_res = vim_client.VimClient().get_vim(context, vim_uuid)
- return vim_res
-
- def _delete_heat_stack(vim_auth):
- placement_attr = vnf_dict.get('placement_attr', {})
- region_name = placement_attr.get('region_name')
- heatclient = hc.HeatClient(auth_attr=vim_auth,
- region_name=region_name)
- heatclient.delete(vnf_dict['instance_id'])
- LOG.debug("Heat stack %s delete initiated",
- vnf_dict['instance_id'])
- vnfm_utils.log_events(context, vnf_dict,
- constants.RES_EVT_MONITOR,
- "ActionRespawnHeat invoked")
-
- def _respawn_vnf():
- update_vnf_dict = plugin.create_vnf_sync(context, vnf_dict)
- LOG.info('respawned new vnf %s', update_vnf_dict['id'])
- plugin.config_vnf(context, update_vnf_dict)
- return update_vnf_dict
-
- if plugin._mark_vnf_dead(vnf_dict['id']):
- _update_failure_count()
- vim_res = _fetch_vim(vim_id)
- if vnf_dict['attributes'].get('monitoring_policy'):
- plugin._vnf_monitor.mark_dead(vnf_dict['id'])
- _delete_heat_stack(vim_res['vim_auth'])
- updated_vnf = _respawn_vnf()
- plugin.add_vnf_to_monitor(context, updated_vnf)
- LOG.debug("VNF %s added to monitor thread",
- updated_vnf['id'])
- if vnf_dict['attributes'].get('alarming_policy'):
- _delete_heat_stack(vim_res['vim_auth'])
- vnf_dict['attributes'].pop('alarming_policy')
- _respawn_vnf()
diff --git a/tacker/vnfm/policy_actions/vdu_autoheal/__init__.py b/tacker/vnfm/policy_actions/vdu_autoheal/__init__.py
deleted file mode 100644
index e69de29bb..000000000
diff --git a/tacker/vnfm/policy_actions/vdu_autoheal/vdu_autoheal.py b/tacker/vnfm/policy_actions/vdu_autoheal/vdu_autoheal.py
deleted file mode 100644
index 94f87d52f..000000000
--- a/tacker/vnfm/policy_actions/vdu_autoheal/vdu_autoheal.py
+++ /dev/null
@@ -1,75 +0,0 @@
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-
-from oslo_log import log as logging
-import yaml
-
-from tacker import objects
-from tacker.vnfm.policy_actions import abstract_action
-
-
-LOG = logging.getLogger(__name__)
-
-
-class VNFActionVduAutoheal(abstract_action.AbstractPolicyAction):
- def get_type(self):
- return 'vdu_autoheal'
-
- def get_name(self):
- return 'vdu_autoheal'
-
- def get_description(self):
- return 'Tacker VNF vdu_autoheal policy'
-
- def execute_action(self, plugin, context, vnf_dict, args):
- vdu_name = args.get('vdu_name')
- stack_id = args.get('stack_id', vnf_dict['instance_id'])
- heat_tpl = args.get('heat_tpl', 'heat_template')
- cause = args.get('cause', [])
- if vdu_name is None:
- LOG.error("VDU resource of vnf '%s' is not present for "
- "autoheal." % vnf_dict['id'])
- return
-
- def _get_vdu_resources():
- """Get all the resources linked to the VDU.
-
- Returns: resource list for eg. ['VDU1', CP1]
- """
- resource_list = [vdu_name]
- heat_template = yaml.safe_load(vnf_dict['attributes'].get(
- heat_tpl))
- vdu_resources = heat_template['resources'].get(vdu_name)
- cp_resources = vdu_resources['properties'].get('networks')
- for resource in cp_resources:
- resource_list.append(resource['port'].get('get_resource'))
-
- return resource_list
-
- if not cause or type(cause) is not list:
- cause = ["Unable to reach while monitoring resource: '%s'",
- "Failed to monitor VDU resource '%s'"]
- resource_list = _get_vdu_resources()
- additional_params = []
- for resource in resource_list:
- additional_params_obj = objects.HealVnfAdditionalParams(
- parameter=resource, cause=[cause[0] % resource])
- additional_params.append(additional_params_obj)
-
- heal_request_data_obj = objects.HealVnfRequest(
- stack_id=stack_id,
- cause=(cause[-1] % vdu_name),
- legacy_additional_params=additional_params)
-
- plugin.heal_vnf(context, vnf_dict['id'], heal_request_data_obj)
diff --git a/tacker/vnfm/utils.py b/tacker/vnfm/utils.py
deleted file mode 100644
index 7003baf2b..000000000
--- a/tacker/vnfm/utils.py
+++ /dev/null
@@ -1,34 +0,0 @@
-# Copyright 2018, OpenStack Foundation.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# Borrowed from nova code base, more utilities will be added/borrowed as and
-# when needed.
-
-"""Utilities and helper functions."""
-
-from oslo_utils import timeutils
-
-from tacker.db.common_services import common_services_db_plugin
-from tacker.plugins.common import constants
-
-
-def log_events(context, vnf_dict, evt_type, evt_details):
- _cos_db_plg = common_services_db_plugin.CommonServicesPluginDb()
- _cos_db_plg.create_event(context, res_id=vnf_dict['id'],
- res_type=constants.RES_TYPE_VNF,
- res_state=vnf_dict['status'],
- evt_type=evt_type,
- tstamp=timeutils.utcnow(),
- details=evt_details)
diff --git a/test-requirements.txt b/test-requirements.txt
index 9942e35e2..c56cde1a6 100644
--- a/test-requirements.txt
+++ b/test-requirements.txt
@@ -18,7 +18,6 @@ tempest>=22.0.0 # Apache-2.0
testtools>=2.2.0 # MIT
WebTest>=2.0.27 # MIT
python-barbicanclient>=4.5.2 # Apache-2.0
-python-blazarclient>=1.0.1 # Apache-2.0
requests-mock>=1.2.0 # Apache-2.0
PyMySQL>=0.10.1 # MIT
freezegun>=1.2.2 # Apache-2.0
diff --git a/tox.ini b/tox.ini
index f8479585c..18d42ba0e 100644
--- a/tox.ini
+++ b/tox.ini
@@ -30,12 +30,6 @@ setenv = {[testenv]setenv}
commands =
stestr --test-path=./tacker/tests/functional run --slowest --concurrency 2 {posargs}
-[testenv:dsvm-functional-legacy]
-setenv = {[testenv]setenv}
-
-commands =
- stestr --test-path=./tacker/tests/functional/legacy run --slowest --concurrency 1 {posargs}
-
[testenv:dsvm-functional-sol]
setenv = {[testenv]setenv}