Placement enhancement enables to AZ reselection

This patch enables availability zone reselected by stack retry
when "instantiate" "scale" "heal" operations in v2 API fails
due to availability zone is unavailable.

Note that precondition of using these functions is to use
StandardUserData as UserData class.

Implements: blueprint enhance-placement
Change-Id: Icc9eb7a8fffbf35d2e005a9703dcefe66f097584
This commit is contained in:
Ken Fujimoto 2022-12-14 01:38:09 +00:00
parent d300f6d750
commit cb069e5dbb
24 changed files with 1478 additions and 7 deletions

View File

@ -606,6 +606,30 @@
keycloak_http_port: 8080
keycloak_https_port: 8443
- job:
name: tacker-functional-devstack-multinode-sol-v2-az-retry
parent: tacker-functional-devstack-multinode-sol-v2
description: |
Multinodes job for retry of AZ selection in SOL V2 devstack-based functional tests
host-vars:
controller-tacker:
devstack_local_conf:
post-config:
$TACKER_CONF:
v2_nfvo:
test_grant_zone_list: az-1
v2_vnfm:
placement_fallback_best_effort: true
server_notification:
server_notification: true
devstack_services:
n-cpu: true
placement-client: true
tox_envlist: dsvm-functional-sol-v2-az-retry
vars:
setup_multi_az: true
controller_tacker_hostname: "{{ hostvars['controller-tacker']['ansible_hostname'] }}"
- job:
name: tacker-compliance-devstack-multinode-sol
parent: tacker-functional-devstack-multinode-legacy
@ -640,4 +664,5 @@
- tacker-functional-devstack-multinode-sol-multi-tenant
- tacker-functional-devstack-multinode-sol-kubernetes-multi-tenant
- tacker-functional-devstack-kubernetes-oidc-auth
- tacker-functional-devstack-multinode-sol-v2-az-retry
- tacker-compliance-devstack-multinode-sol

View File

@ -11,6 +11,8 @@
when: prometheus_setup is defined and prometheus_setup | bool
- role: setup-multi-tenant-vim
when: setup_multi_tenant is defined and setup_multi_tenant | bool
- role: setup-multi-az
when: setup_multi_az is defined and setup_multi_az | bool
- role: bindep
bindep_profile: test
bindep_dir: "{{ zuul_work_dir }}"

View File

@ -0,0 +1,3 @@
aggregate_name: aggregate-1
zone_name: az-1
flavor_name: sample4G

View File

@ -0,0 +1,17 @@
- block:
- name: Create OpenStack availability zone
shell: |
openstack --os-cloud devstack-admin aggregate create \
{{ aggregate_name }}
openstack --os-cloud devstack-admin aggregate set \
--zone {{ zone_name }} {{ aggregate_name }}
openstack --os-cloud devstack-admin aggregate add host \
{{ aggregate_name }} {{ controller_tacker_hostname }}
- name: Create OpenStack flavor
shell: |
openstack --os-cloud devstack-admin \
flavor create --ram 4096 --disk 4 --vcpus 2 {{ flavor_name }}
when:
- inventory_hostname == 'controller'

View File

@ -34,7 +34,7 @@ VNFM_OPTS = [
help=_('Max content length for list APIs.')),
cfg.IntOpt('openstack_vim_stack_create_timeout',
default=20,
help=_('Timeout (in minuts) of heat stack creation.')),
help=_('Timeout (in minutes) of heat stack creation.')),
cfg.IntOpt('kubernetes_vim_rsc_wait_timeout',
default=500,
help=_('Timeout (second) of k8s res creation.')),
@ -64,6 +64,20 @@ VNFM_OPTS = [
default=0, # 0 means no paging
help=_('Paged response size of the query result for '
'VNF PM job.')),
cfg.BoolOpt('placement_fallback_best_effort',
default=False,
help=_('If True, fallbackBestEffort setting is enabled '
'and run Availability Zone reselection.')),
cfg.IntOpt('placement_az_select_retry',
default=0, # 0 means unlimited number of retries
help=_('Number of retries to reselect Availability Zone.')),
cfg.StrOpt('placement_az_resource_error',
default=(r'Resource CREATE failed: ResourceInError: '
r'resources\.(.*)\.(.*): (.*)|'
r'Resource UPDATE failed: resources\.(.*): '
r'Resource CREATE failed: ResourceInError: '
r'resources\.(.*): (.*)'),
help=_('Error message for Availability Zone reselection.')),
# NOTE: This is for test use since it is convenient to be able to delete
# under development.
cfg.BoolOpt('test_enable_lcm_op_occ_delete',

View File

@ -126,6 +126,8 @@ def _make_affected_vnfc(vnfc, change_type, strgs):
changeType=change_type,
computeResource=vnfc.computeResource
)
if vnfc.obj_attr_is_set('zoneId'):
affected_vnfc.zoneId = vnfc.zoneId
if vnfc.obj_attr_is_set('metadata'):
affected_vnfc.metadata = vnfc.metadata
if vnfc.obj_attr_is_set('vnfcCpInfo'):

View File

@ -695,7 +695,9 @@ class VnfLcmDriverV2(object):
plc_const = objects.PlacementConstraintV1(
affinityOrAntiAffinity=key,
scope=scope.upper(),
resource=res_refs)
resource=res_refs,
fallbackBestEffort=(
CONF.v2_vnfm.placement_fallback_best_effort))
plc_consts.append(plc_const)
if plc_consts:

View File

@ -0,0 +1,52 @@
# Copyright (C) 2022 Nippon Telegraph and Telephone Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from tacker.sol_refactored.common import http_client
LOG = logging.getLogger(__name__)
class NovaClient(object):
def __init__(self, vim_info):
auth = http_client.KeystonePasswordAuthHandle(
auth_url=vim_info.interfaceInfo['endpoint'],
username=vim_info.accessInfo['username'],
password=vim_info.accessInfo['password'],
project_name=vim_info.accessInfo['project'],
user_domain_name=vim_info.accessInfo['userDomain'],
project_domain_name=vim_info.accessInfo['projectDomain']
)
self.client = http_client.HttpClient(auth,
service_type='compute')
def get_zone(self):
path = "os-availability-zone/detail"
resp, body = self.client.do_request(path, "GET",
expected_status=[200])
def _use_zone_for_retry(zone):
for host_info in zone['hosts'].values():
for service in host_info.keys():
if service == 'nova-compute':
return zone['zoneState']['available']
return False
zone_list = {zone['zoneName'] for zone in body['availabilityZoneInfo']
if _use_zone_for_retry(zone)}
return zone_list

View File

@ -17,6 +17,7 @@
import json
import os
import pickle
import re
import subprocess
import yaml
@ -29,6 +30,7 @@ from tacker.sol_refactored.common import config
from tacker.sol_refactored.common import exceptions as sol_ex
from tacker.sol_refactored.common import vnf_instance_utils as inst_utils
from tacker.sol_refactored.infra_drivers.openstack import heat_utils
from tacker.sol_refactored.infra_drivers.openstack import nova_utils
from tacker.sol_refactored.infra_drivers.openstack import userdata_default
from tacker.sol_refactored import objects
from tacker.sol_refactored.objects.v2 import fields as v2fields
@ -89,17 +91,28 @@ class Openstack(object):
def instantiate(self, req, inst, grant_req, grant, vnfd):
# make HOT
fields = self._make_hot(req, inst, grant_req, grant, vnfd)
vdu_ids = self._get_vdu_id_from_fields(fields)
# create or update stack
vim_info = inst_utils.select_vim_info(inst.vimConnectionInfo)
heat_client = heat_utils.HeatClient(vim_info)
stack_name = heat_utils.get_stack_name(inst)
stack_id = heat_client.get_stack_id(stack_name)
if stack_id is None:
fields['stack_name'] = stack_name
stack_id = heat_client.create_stack(fields)
try:
stack_id = heat_client.create_stack(fields)
except sol_ex.StackOperationFailed as ex:
self._update_stack_retry(heat_client, fields, inst, stack_id,
ex, vim_info, vdu_ids)
stack_id = heat_client.get_stack_id(stack_name)
else:
heat_client.update_stack(f'{stack_name}/{stack_id}', fields)
try:
heat_client.update_stack(f'{stack_name}/{stack_id}', fields)
except sol_ex.StackOperationFailed as ex:
self._update_stack_retry(heat_client, fields, inst, stack_id,
ex, vim_info, vdu_ids)
# make instantiated_vnf_info
self._make_instantiated_vnf_info(req, inst, grant_req, grant, vnfd,
@ -158,6 +171,7 @@ class Openstack(object):
def scale(self, req, inst, grant_req, grant, vnfd):
# make HOT
fields = self._make_hot(req, inst, grant_req, grant, vnfd)
vdu_ids = self._get_vdu_id_from_fields(fields)
vim_info = inst_utils.select_vim_info(inst.vimConnectionInfo)
heat_client = heat_utils.HeatClient(vim_info)
@ -182,7 +196,14 @@ class Openstack(object):
# update stack
stack_name = heat_utils.get_stack_name(inst)
fields = self._update_fields(heat_client, stack_name, fields)
heat_client.update_stack(stack_name, fields)
try:
heat_client.update_stack(stack_name, fields)
except sol_ex.StackOperationFailed as ex:
if req.type == 'SCALE_OUT':
self._update_stack_retry(heat_client, fields, inst, None, ex,
vim_info, vdu_ids)
else:
raise ex
# make instantiated_vnf_info
self._make_instantiated_vnf_info(req, inst, grant_req, grant, vnfd,
@ -285,7 +306,13 @@ class Openstack(object):
# stack delete and create
heat_client.delete_stack(stack_name)
stack_id = heat_client.create_stack(fields)
try:
stack_id = heat_client.create_stack(fields)
except sol_ex.StackOperationFailed as ex:
vdu_ids = self._get_vdu_id_from_grant_req(grant_req, inst)
self._update_stack_retry(heat_client, fields, inst, None,
ex, vim_info, vdu_ids)
stack_id = heat_client.get_stack_id(stack_name)
else:
# mark unhealthy to target resources.
# As the target resources has been already selected in
@ -311,7 +338,13 @@ class Openstack(object):
storage_info.virtualStorageDescId)
# update stack
heat_client.update_stack(stack_name, fields)
try:
heat_client.update_stack(stack_name, fields)
except sol_ex.StackOperationFailed as ex:
vdu_ids = self._get_vdu_id_from_grant_req(grant_req, inst)
self._update_stack_retry(heat_client, fields, inst, None,
ex, vim_info, vdu_ids)
stack_id = inst.instantiatedVnfInfo.metadata['stack_id']
# make instantiated_vnf_info
@ -608,6 +641,90 @@ class Openstack(object):
self._make_instantiated_vnf_info(req, inst, grant_req, grant, vnfd,
heat_client, is_rollback=True)
def _update_stack_retry(self, heat_client, fields, inst, stack_id,
error_ex, vim_info, vdu_ids):
if not CONF.v2_vnfm.placement_fallback_best_effort:
raise error_ex
vdu_dict = fields['parameters']['nfv']['VDU']
failed_zone = self._check_and_get_failed_zone(
error_ex.detail, vdu_dict)
if failed_zone is None:
raise error_ex
stack_name = heat_utils.get_stack_name(inst, stack_id)
nova_client = nova_utils.NovaClient(vim_info)
zone_list = nova_client.get_zone()
used_zone_list = {parameters.get('locationConstraints')
for parameters in vdu_dict.values()
if parameters.get('locationConstraints') is not None}
if (inst.obj_attr_is_set('instantiatedVnfInfo') and
inst.instantiatedVnfInfo.obj_attr_is_set('vnfcResourceInfo')):
used_zone_list |= {vnfc.metadata.get('zone') for vnfc
in inst.instantiatedVnfInfo.vnfcResourceInfo
if vnfc.metadata.get('zone') is not None}
available_zone_list = zone_list - used_zone_list
used_zone_list.discard(failed_zone)
retry_count = (CONF.v2_vnfm.placement_az_select_retry
if CONF.v2_vnfm.placement_az_select_retry
else len(zone_list))
while retry_count > 0:
if available_zone_list:
new_zone = available_zone_list.pop()
elif used_zone_list:
new_zone = used_zone_list.pop()
else:
message = ("Availability Zone reselection failed. "
"No Availability Zone available.")
LOG.error(message)
raise error_ex
for vdu_id, parameters in vdu_dict.items():
if vdu_id in vdu_ids:
if parameters.get('locationConstraints') == failed_zone:
parameters['locationConstraints'] = new_zone
LOG.debug("stack fields: %s", fields)
try:
heat_client.update_stack(stack_name, fields)
return
except sol_ex.StackOperationFailed as ex:
failed_zone = self._check_and_get_failed_zone(
ex.detail, vdu_dict)
if failed_zone is None:
raise ex
retry_count -= 1
error_ex = ex
else:
message = ("Availability Zone reselection failed. "
"Reached the retry count limit.")
LOG.error(message)
raise error_ex
def _check_and_get_failed_zone(self, ex_detail, vdu_dict):
if re.match(CONF.v2_vnfm.placement_az_resource_error, ex_detail):
match_result = re.search(r'resources\.((.*)-([0-9]+))', ex_detail)
if match_result is None:
LOG.warning("CONF v2_vnfm.placement_az_resource_error is "
"invalid. Please check.")
return None
vdu_id = match_result.group(1)
return vdu_dict.get(vdu_id, {}).get('locationConstraints')
def _get_vdu_id_from_fields(self, fields):
vdu_dict = fields['parameters']['nfv']['VDU']
return set(vdu_dict.keys())
def _get_vdu_id_from_grant_req(self, grant_req, inst):
vnfc_res_ids = [res_def.resource.resourceId
for res_def in grant_req.removeResources
if res_def.type == 'COMPUTE']
vdu_ids = {_rsc_with_idx(vnfc.vduId, vnfc.metadata.get('vdu_idx'))
for vnfc in inst.instantiatedVnfInfo.vnfcResourceInfo
if vnfc.computeResource.resourceId in vnfc_res_ids}
return vdu_ids
def _make_hot(self, req, inst, grant_req, grant, vnfd, is_rollback=False):
if grant_req.operation == v2fields.LcmOperationType.INSTANTIATE:
flavour_id = req.flavourId
@ -1100,6 +1217,7 @@ class Openstack(object):
nfv_dict['VDU'])
if zone is not None:
metadata['zone'] = zone
vnfc_res_info.zoneId = zone
def _make_instantiated_vnf_info(self, req, inst, grant_req, grant, vnfd,
heat_client, is_rollback=False, stack_id=None):

View File

@ -0,0 +1,180 @@
# Copyright (C) 2022 Nippon Telegraph and Telephone Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import time
from tacker.tests.functional.sol_v2_common import paramgen
from tacker.tests.functional.sol_v2_common import test_vnflcm_basic_common
class AzRetryTest(test_vnflcm_basic_common.CommonVnfLcmTest):
@classmethod
def setUpClass(cls):
super(AzRetryTest, cls).setUpClass()
cur_dir = os.path.dirname(__file__)
# tacker/tests/functional/sol_v2_az_retry(here)
# /etc
image_dir = os.path.join(
cur_dir, "../../etc/samples/etsi/nfv/common/Files/images")
image_file = "cirros-0.5.2-x86_64-disk.img"
image_path = os.path.abspath(os.path.join(image_dir, image_file))
# tacker/tests/functional/sol_v2_az_retry(here)
# /sol_refactored
userdata_dir = os.path.join(
cur_dir, "../../../sol_refactored/infra_drivers/openstack")
userdata_file = "userdata_standard.py"
userdata_path = os.path.abspath(
os.path.join(userdata_dir, userdata_file))
# for update_stack_retry test
pkg_path_1 = os.path.join(cur_dir,
"../sol_v2_common/samples/userdata_standard_az_retry")
cls.vnf_pkg_1, cls.vnfd_id_1 = cls.create_vnf_package(
pkg_path_1, image_path=image_path, userdata_path=userdata_path)
@classmethod
def tearDownClass(cls):
super(AzRetryTest, cls).tearDownClass()
cls.delete_vnf_package(cls.vnf_pkg_1)
def setUp(self):
super().setUp()
def _get_vdu_indexes(self, inst, vdu):
return {
vnfc['metadata'].get('vdu_idx')
for vnfc in inst['instantiatedVnfInfo']['vnfcResourceInfo']
if vnfc['vduId'] == vdu
}
def _get_vnfc_by_vdu_index(self, inst, vdu, index):
for vnfc in inst['instantiatedVnfInfo']['vnfcResourceInfo']:
if (vnfc['vduId'] == vdu and
vnfc['metadata'].get('vdu_idx') == index):
return vnfc
def _get_vnfc_zone(self, inst, vdu, index):
vnfc = self._get_vnfc_by_vdu_index(inst, vdu, index)
return vnfc['metadata'].get('zone')
def _delete_instance(self, inst_id):
for _ in range(3):
resp, body = self.delete_vnf_instance(inst_id)
if resp.status_code == 204: # OK
return
elif resp.status_code == 409:
# may happen. there is a bit time between lcmocc become
# COMPLETED and lock of terminate is freed.
time.sleep(3)
else:
break
self.assertTrue(False)
def test_update_stack_retry(self):
"""Test _update_stack_retry function using StandardUserData
* Note:
This test focuses on recreate the vnfc in another AZ
if the AZ is not available.
* About LCM operations:
This test includes the following operations.
if it the test was successful, do not run any further tests.
Also, the second case is scale out VNF instances to 4 times
and checks the availability zone.
- Create VNF instance
- 1. Instantiate VNF instance
- Show VNF instance / check
- 2. Scale out operation
- Show VNF instance / check
- Terminate VNF instance
- Delete VNF instance
"""
net_ids = self.get_network_ids(['net0', 'net1', 'net_mgmt'])
subnet_ids = self.get_subnet_ids(['subnet0', 'subnet1'])
vdu_idx = 0
expect_vdu_idx_num = {0}
inst_result = []
# Set to the maximum number of VNFC instances
MAX_SCALE_COUNT = 4
# Create VNF instance
create_req = paramgen.sample6_create(self.vnfd_id_1)
resp, body = self.create_vnf_instance(create_req)
self.assertEqual(201, resp.status_code)
inst_id = body['id']
# 1. Instantiate VNF instance
instantiate_req = paramgen.sample6_instantiate(
net_ids, subnet_ids, self.auth_url)
resp, body = self.instantiate_vnf_instance(inst_id, instantiate_req)
self.assertEqual(202, resp.status_code)
lcmocc_id = os.path.basename(resp.headers['Location'])
self.wait_lcmocc_complete(lcmocc_id)
# Show VNF instance
resp, inst = self.show_vnf_instance(inst_id)
inst_result.append(inst)
self.assertEqual(200, resp.status_code)
# check number of VDUs and indexes
self.assertEqual(expect_vdu_idx_num,
self._get_vdu_indexes(inst_result[vdu_idx], 'VDU1'))
while (self._get_vnfc_zone(
inst_result[vdu_idx], 'VDU1', vdu_idx) != 'nova'
and vdu_idx < MAX_SCALE_COUNT):
vdu_idx += 1
expect_vdu_idx_num.add(vdu_idx)
# 2. Scale out operation
scale_out_req = paramgen.sample6_scale_out()
resp, body = self.scale_vnf_instance(inst_id, scale_out_req)
self.assertEqual(202, resp.status_code)
lcmocc_id = os.path.basename(resp.headers['Location'])
self.wait_lcmocc_complete(lcmocc_id)
# Show VNF instance
resp, inst = self.show_vnf_instance(inst_id)
inst_result.append(inst)
self.assertEqual(200, resp.status_code)
# check number of VDUs and indexes
self.assertEqual(expect_vdu_idx_num,
self._get_vdu_indexes(inst_result[vdu_idx], 'VDU1'))
# check zone of VDUs
self.assertEqual('nova',
self._get_vnfc_zone(inst_result[vdu_idx], 'VDU1', vdu_idx))
# Terminate VNF instance
terminate_req = paramgen.sample6_terminate()
resp, body = self.terminate_vnf_instance(inst_id, terminate_req)
self.assertEqual(202, resp.status_code)
lcmocc_id = os.path.basename(resp.headers['Location'])
self.wait_lcmocc_complete(lcmocc_id)
# Delete VNF instance
self._delete_instance(inst_id)

View File

@ -1340,3 +1340,84 @@ def sample5_terminate():
return {
"terminationType": "FORCEFUL"
}
# sample6 is for retry AZ selection test of StandardUserData
#
def sample6_create(vnfd_id):
return {
"vnfdId": vnfd_id,
"vnfInstanceName": "sample6",
"vnfInstanceDescription": "test for retry of AZ selection"
}
def sample6_terminate():
return {
"terminationType": "FORCEFUL"
}
def sample6_instantiate(net_ids, subnet_ids, auth_url):
ext_vl_1 = {
"id": "ext_vl_id_net1",
"resourceId": net_ids['net1'],
"extCps": [
{
"cpdId": "VDU1_CP1",
"cpConfig": {
"VDU1_CP1_1": {
"cpProtocolData": [{
"layerProtocol": "IP_OVER_ETHERNET",
"ipOverEthernet": {
"ipAddresses": [{
"type": "IPV4",
"numDynamicAddresses": 1}]}}]}
}
}
]
}
return {
"flavourId": "simple",
"instantiationLevelId": "instantiation_level_1",
"extVirtualLinks": [ext_vl_1],
"extManagedVirtualLinks": [
{
"id": "ext_managed_vl_1",
"vnfVirtualLinkDescId": "internalVL1",
"resourceId": net_ids['net_mgmt']
},
],
"vimConnectionInfo": {
"vim1": {
"vimType": "ETSINFV.OPENSTACK_KEYSTONE.V_3",
"vimId": uuidutils.generate_uuid(),
"interfaceInfo": {"endpoint": auth_url},
"accessInfo": {
"username": "nfv_user",
"region": "RegionOne",
"password": "devstack",
"project": "nfv",
"projectDomain": "Default",
"userDomain": "Default"
}
}
},
"additionalParams": {
"lcm-operation-user-data": "./UserData/userdata_standard.py",
"lcm-operation-user-data-class": "StandardUserData"
}
}
def sample6_scale_out():
return {
"type": "SCALE_OUT",
"aspectId": "VDU1_scale",
"numberOfSteps": 1,
"additionalParams": {
"lcm-operation-user-data": "./UserData/userdata_standard.py",
"lcm-operation-user-data-class": "StandardUserData"
}
}

View File

@ -0,0 +1,52 @@
heat_template_version: 2013-05-23
description: 'VDU1 HOT for Sample VNF'
parameters:
flavor:
type: string
image-VDU1:
type: string
zone:
type: string
net1:
type: string
net2:
type: string
net3:
type: string
resources:
VDU1:
type: OS::Nova::Server
properties:
flavor: { get_param: flavor }
name: VDU1
image: { get_param: image-VDU1 }
networks:
- port:
get_resource: VDU1_CP1
# replace the following line to Port ID when extManagedVLs' Ports are
# specified in InstantiateVnfRequest
- port:
get_resource: VDU1_CP2
- port:
get_resource: VDU1_CP3
availability_zone: { get_param: zone }
# extVL without FixedIP or with numDynamicAddresses
VDU1_CP1:
type: OS::Neutron::Port
properties:
network: { get_param: net1 }
# CPs of internal VLs are deleted when extManagedVLs and port are
# specified in InstantiateVnfRequest
VDU1_CP2:
type: OS::Neutron::Port
properties:
network: { get_param: net2 }
VDU1_CP3:
type: OS::Neutron::Port
properties:
network: { get_param: net3 }

View File

@ -0,0 +1,46 @@
heat_template_version: 2013-05-23
description: 'For Test of AZ selection retry: sample6'
parameters:
nfv:
type: json
resources:
VDU1:
type: VDU1.yaml
properties:
flavor: { get_param: [ nfv, VDU, VDU1, computeFlavourId ] }
image-VDU1: { get_param: [ nfv, VDU, VDU1, vcImageId ] }
zone: { get_param: [ nfv, VDU, VDU1, locationConstraints] }
net1: { get_param: [ nfv, CP, VDU1_CP1, network ] }
net2: { get_resource: internalVL1 }
net3: { get_resource: internalVL2 }
# delete the following lines when extManagedVLs are specified in InstantiateVnfRequest
internalVL1:
type: OS::Neutron::Net
internalVL2:
type: OS::Neutron::Net
internalVL1_subnet:
type: OS::Neutron::Subnet
properties:
ip_version: 4
network:
get_resource: internalVL1
cidr: 192.168.3.0/24
internalVL2_subnet:
type: OS::Neutron::Subnet
properties:
ip_version: 4
network:
get_resource: internalVL2
cidr: 192.168.4.0/24
nfvi_node_affinity:
type: OS::Nova::ServerGroup
properties:
name: nfvi_node_affinity
policies: [ 'affinity' ]
outputs: {}

View File

@ -0,0 +1,265 @@
tosca_definitions_version: tosca_simple_yaml_1_2
description: Simple deployment flavour for Sample VNF
imports:
- etsi_nfv_sol001_common_types.yaml
- etsi_nfv_sol001_vnfd_types.yaml
- v2_sample6_types.yaml
topology_template:
inputs:
descriptor_id:
type: string
descriptor_version:
type: string
provider:
type: string
product_name:
type: string
software_version:
type: string
vnfm_info:
type: list
entry_schema:
type: string
flavour_id:
type: string
flavour_description:
type: string
substitution_mappings:
node_type: company.provider.VNF
properties:
flavour_id: simple
requirements:
virtual_link_external1_1: [ VDU1_CP1, virtual_link ]
node_templates:
VNF:
type: company.provider.VNF
properties:
flavour_description: A simple flavour
interfaces:
Vnflcm:
instantiate_start:
implementation: sample-script
instantiate_end:
implementation: sample-script
terminate_start:
implementation: sample-script
terminate_end:
implementation: sample-script
scale_start:
implementation: sample-script
scale_end:
implementation: sample-script
heal_start:
implementation: sample-script
heal_end:
implementation: sample-script
change_external_connectivity_start:
implementation: sample-script
change_external_connectivity_end:
implementation: sample-script
modify_information_start:
implementation: sample-script
modify_information_end:
implementation: sample-script
artifacts:
sample-script:
description: Sample script
type: tosca.artifacts.Implementation.Python
file: ../Scripts/sample_script.py
VDU1:
type: tosca.nodes.nfv.Vdu.Compute
properties:
name: VDU1
description: VDU1 compute node
vdu_profile:
min_number_of_instances: 1
max_number_of_instances: 5
sw_image_data:
name: cirros-0.5.2-x86_64-disk
version: '0.5.2'
checksum:
algorithm: sha-256
hash: 932fcae93574e242dc3d772d5235061747dfe537668443a1f0567d893614b464
container_format: bare
disk_format: qcow2
min_disk: 0 GB
min_ram: 256 MB
size: 12 GB
capabilities:
virtual_compute:
properties:
requested_additional_capabilities:
properties:
requested_additional_capability_name: sample4G
support_mandatory: true
target_performance_parameters:
entry_schema: test
virtual_memory:
virtual_mem_size: 4096 MB
virtual_cpu:
num_virtual_cpu: 2
virtual_local_storage:
- size_of_storage: 4 GB
VDU1_CP1:
type: tosca.nodes.nfv.VduCp
properties:
layer_protocols: [ ipv4 ]
order: 0
requirements:
- virtual_binding: VDU1
VDU1_CP2:
type: tosca.nodes.nfv.VduCp
properties:
layer_protocols: [ ipv4 ]
order: 1
requirements:
- virtual_binding: VDU1
- virtual_link: internalVL1
VDU1_CP3:
type: tosca.nodes.nfv.VduCp
properties:
layer_protocols: [ ipv4 ]
order: 2
requirements:
- virtual_binding: VDU1
- virtual_link: internalVL2
internalVL1:
type: tosca.nodes.nfv.VnfVirtualLink
properties:
connectivity_type:
layer_protocols: [ ipv4 ]
description: External Managed Virtual link in the VNF
vl_profile:
max_bitrate_requirements:
root: 1048576
leaf: 1048576
min_bitrate_requirements:
root: 1048576
leaf: 1048576
virtual_link_protocol_data:
- associated_layer_protocol: ipv4
l3_protocol_data:
ip_version: ipv4
cidr: 192.168.3.0/24
internalVL2:
type: tosca.nodes.nfv.VnfVirtualLink
properties:
connectivity_type:
layer_protocols: [ ipv4 ]
description: External Managed Virtual link in the VNF
vl_profile:
max_bitrate_requirements:
root: 1048576
leaf: 1048576
min_bitrate_requirements:
root: 1048576
leaf: 1048576
virtual_link_protocol_data:
- associated_layer_protocol: ipv4
l3_protocol_data:
ip_version: ipv4
cidr: 192.168.4.0/24
groups:
affinityOrAntiAffinityGroup1:
type: tosca.groups.nfv.PlacementGroup
members: [ VDU1 ]
policies:
- scaling_aspects:
type: tosca.policies.nfv.ScalingAspects
properties:
aspects:
VDU1_scale:
name: VDU1_scale
description: VDU1 scaling aspect
max_scale_level: 2
step_deltas:
- delta_1
- VDU1_initial_delta:
type: tosca.policies.nfv.VduInitialDelta
properties:
initial_delta:
number_of_instances: 1
targets: [ VDU1 ]
- VDU1_scaling_aspect_deltas:
type: tosca.policies.nfv.VduScalingAspectDeltas
properties:
aspect: VDU1_scale
deltas:
delta_1:
number_of_instances: 1
targets: [ VDU1 ]
- instantiation_levels:
type: tosca.policies.nfv.InstantiationLevels
properties:
levels:
instantiation_level_1:
description: Smallest size
scale_info:
VDU1_scale:
scale_level: 0
instantiation_level_2:
description: Largest size
scale_info:
VDU1_scale:
scale_level: 1
default_level: instantiation_level_1
- VDU1_instantiation_levels:
type: tosca.policies.nfv.VduInstantiationLevels
properties:
levels:
instantiation_level_1:
number_of_instances: 1
instantiation_level_2:
number_of_instances: 2
targets: [ VDU1 ]
- internalVL1_instantiation_levels:
type: tosca.policies.nfv.VirtualLinkInstantiationLevels
properties:
levels:
instantiation_level_1:
bitrate_requirements:
root: 1048576
leaf: 1048576
instantiation_level_2:
bitrate_requirements:
root: 1048576
leaf: 1048576
targets: [ internalVL1 ]
- internalVL2_instantiation_levels:
type: tosca.policies.nfv.VirtualLinkInstantiationLevels
properties:
levels:
instantiation_level_1:
bitrate_requirements:
root: 1048576
leaf: 1048576
instantiation_level_2:
bitrate_requirements:
root: 1048576
leaf: 1048576
targets: [ internalVL2 ]
- policy_antiaffinity_group:
type: tosca.policies.nfv.AntiAffinityRule
targets: [ affinityOrAntiAffinityGroup1 ]
properties:
scope: nfvi_node

View File

@ -0,0 +1,31 @@
tosca_definitions_version: tosca_simple_yaml_1_2
description: Sample VNF
imports:
- etsi_nfv_sol001_common_types.yaml
- etsi_nfv_sol001_vnfd_types.yaml
- v2_sample6_types.yaml
- v2_sample6_df_simple.yaml
topology_template:
inputs:
selected_flavour:
type: string
description: VNF deployment flavour selected by the consumer. It is provided in the API
node_templates:
VNF:
type: company.provider.VNF
properties:
flavour_id: { get_input: selected_flavour }
descriptor_id: b1bb0ce7-ebca-4fa7-95ed-4840d7000000
provider: Company
product_name: Sample VNF
software_version: '1.0'
descriptor_version: '1.0'
vnfm_info:
- Tacker
requirements:
#- virtual_link_external # mapped in lower-level templates
#- virtual_link_internal # mapped in lower-level templates

View File

@ -0,0 +1,55 @@
tosca_definitions_version: tosca_simple_yaml_1_2
description: VNF type definition
imports:
- etsi_nfv_sol001_common_types.yaml
- etsi_nfv_sol001_vnfd_types.yaml
node_types:
company.provider.VNF:
derived_from: tosca.nodes.nfv.VNF
properties:
descriptor_id:
type: string
constraints: [ valid_values: [ b1bb0ce7-ebca-4fa7-95ed-4840d7000000 ] ]
default: b1bb0ce7-ebca-4fa7-95ed-4840d7000000
descriptor_version:
type: string
constraints: [ valid_values: [ '1.0' ] ]
default: '1.0'
provider:
type: string
constraints: [ valid_values: [ 'Company' ] ]
default: 'Company'
product_name:
type: string
constraints: [ valid_values: [ 'Sample VNF' ] ]
default: 'Sample VNF'
software_version:
type: string
constraints: [ valid_values: [ '1.0' ] ]
default: '1.0'
vnfm_info:
type: list
entry_schema:
type: string
constraints: [ valid_values: [ Tacker ] ]
default: [ Tacker ]
flavour_id:
type: string
constraints: [ valid_values: [ simple ] ]
default: simple
flavour_description:
type: string
default: "flavour"
requirements:
- virtual_link_external1:
capability: tosca.capabilities.nfv.VirtualLinkable
- virtual_link_external2:
capability: tosca.capabilities.nfv.VirtualLinkable
- virtual_link_internal:
capability: tosca.capabilities.nfv.VirtualLinkable
interfaces:
Vnflcm:
type: tosca.interfaces.nfv.Vnflcm

View File

@ -0,0 +1,46 @@
# Copyright (C) 2022 Nippon Telegraph and Telephone Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import pickle
import sys
class FailScript(object):
def __init__(self, vnfc_param):
self.vnfc_param = vnfc_param
def run(self):
operation = 'change_vnfpkg'
if self.vnfc_param['is_rollback']:
operation += '_rollback'
if os.path.exists(f'/tmp/{operation}'):
raise Exception(f'test {operation} error')
def main():
vnfc_param = pickle.load(sys.stdin.buffer)
script = FailScript(vnfc_param)
script.run()
if __name__ == "__main__":
try:
main()
os._exit(0)
except Exception as ex:
sys.stderr.write(str(ex))
sys.stderr.flush()
os._exit(1)

View File

@ -0,0 +1,68 @@
# Copyright (C) 2022 Nippon Telegraph and Telephone Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import os
import pickle
import sys
class FailScript(object):
"""Define error method for each operation
For example:
def instantiate_start(self):
if os.path.exists('/tmp/instantiate_start')
raise Exception('test instantiate_start error')
"""
def __init__(self, req, inst, grant_req, grant, csar_dir):
self.req = req
self.inst = inst
self.grant_req = grant_req
self.grant = grant
self.csar_dir = csar_dir
def _fail(self, method):
if os.path.exists(f'/tmp/{method}'):
raise Exception(f'test {method} error')
def __getattr__(self, name):
return functools.partial(self._fail, name)
def main():
script_dict = pickle.load(sys.stdin.buffer)
operation = script_dict['operation']
req = script_dict['request']
inst = script_dict['vnf_instance']
grant_req = script_dict['grant_request']
grant = script_dict['grant_response']
csar_dir = script_dict['tmp_csar_dir']
script = FailScript(req, inst, grant_req, grant, csar_dir)
getattr(script, operation)()
if __name__ == "__main__":
try:
main()
os._exit(0)
except Exception as ex:
sys.stderr.write(str(ex))
sys.stderr.flush()
os._exit(1)

View File

@ -0,0 +1,4 @@
TOSCA-Meta-File-Version: 1.0
CSAR-Version: 1.1
Created-by: Onboarding portal
Entry-Definitions: Definitions/v2_sample6_top.vnfd.yaml

View File

@ -0,0 +1,70 @@
# Copyright (C) 2022 Nippon Telegraph and Telephone Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import os
import shutil
import tempfile
from oslo_utils import uuidutils
from tacker.tests.functional.sol_v2_common import paramgen
from tacker.tests.functional.sol_v2_common import utils
zip_file_name = os.path.basename(os.path.abspath(".")) + '.zip'
tmp_dir = tempfile.mkdtemp()
vnfd_id = uuidutils.generate_uuid()
# tacker/tests/etc...
# /functional/sol_v2_common/samples/sampleX
image_dir = "../../../../etc/samples/etsi/nfv/common/Files/images/"
image_file = "cirros-0.5.2-x86_64-disk.img"
image_path = os.path.abspath(image_dir + image_file)
# tacker/sol_refactored/infra_drivers/openstack/userdata_standard.py
# /tests/functional/sol_v2_common/samples/sampleX
userdata_dir = "../../../../../sol_refactored/infra_drivers/openstack/"
userdata_file = "userdata_standard.py"
userdata_path = os.path.abspath(userdata_dir + userdata_file)
utils.make_zip(".", tmp_dir, vnfd_id, image_path=image_path,
userdata_path=userdata_path)
shutil.copy(os.path.join(tmp_dir, zip_file_name), ".")
shutil.rmtree(tmp_dir)
create_req = paramgen.sample6_create(vnfd_id)
terminate_req = paramgen.sample6_terminate()
net_ids = utils.get_network_ids(['net0', 'net1', 'net_mgmt'])
subnet_ids = utils.get_subnet_ids(['subnet0', 'subnet1'])
instantiate_req = paramgen.sample6_instantiate(
net_ids, subnet_ids, "http://localhost/identity/v3")
scale_out_req = paramgen.sample6_scale_out()
with open("create_req", "w") as f:
f.write(json.dumps(create_req, indent=2))
with open("terminate_req", "w") as f:
f.write(json.dumps(terminate_req, indent=2))
with open("instantiate_req", "w") as f:
f.write(json.dumps(instantiate_req, indent=2))
with open("scale_out_req", "w") as f:
f.write(json.dumps(scale_out_req, indent=2))

View File

@ -898,6 +898,7 @@ class TestVnfLcmDriverV2(base.BaseTestCase):
expected_placement_constraints = [{
'affinityOrAntiAffinity': 'ANTI_AFFINITY',
'fallbackBestEffort': False,
'scope': 'NFVI_NODE',
'resource': []}]
vdu_def_ids = (check_reses['COMPUTE']['VDU1'] +
@ -1081,6 +1082,7 @@ class TestVnfLcmDriverV2(base.BaseTestCase):
expected_placement_constraints = [{
'affinityOrAntiAffinity': 'ANTI_AFFINITY',
'fallbackBestEffort': False,
'scope': 'NFVI_NODE',
'resource': []}]
vdu_def_ids = check_reses['COMPUTE']['VDU1']

View File

@ -2816,6 +2816,7 @@ _expected_inst_info_S = {
"resourceId": "res_id_VDU1_1",
"vimLevelResourceType": "OS::Nova::Server"
},
"zoneId": "zone1",
"vnfcCpInfo": [
{
"id": "VDU1_CP1-res_id_VDU1_1",
@ -2850,6 +2851,7 @@ _expected_inst_info_S = {
"resourceId": "res_id_VDU1_0",
"vimLevelResourceType": "OS::Nova::Server"
},
"zoneId": "zone1",
"vnfcCpInfo": [
{
"id": "VDU1_CP1-res_id_VDU1_0",
@ -3472,6 +3474,67 @@ _grant_req_example = {
}
}
# example for _update_stack_retry
_fields_example_instantiate = {
'parameters': {
'nfv': {
'VDU': {
'VDU1-0': {
'computeFlavourId': 'm1.tiny',
'vcImageId': 'cirros-0.5.2-x86_64-disk',
'locationConstraints': 'az-1'
},
'VDU2-0': {
'computeFlavourId': 'm1.tiny'
},
'VDU2-VirtualStorage-0': {
'vcImageId': '0fea3414-93c0-46f5-b042-857be40e9fc7'
}
}
}
}
}
_fields_example_scale = {
'parameters': {
'nfv': {
'VDU': {
'VDU1-0': {
'computeFlavourId': 'm1.tiny',
'vcImageId': 'cirros-0.5.2-x86_64-disk',
'locationConstraints': 'az-2'
},
'VDU2-0': {
'computeFlavourId': 'm1.tiny',
},
'VDU2-VirtualStorage-0': {
'vcImageId': '0fea3414-93c0-46f5-b042-857be40e9fc7'
},
'VDU1-1': {
'computeFlavourId': 'm1.tiny',
'vcImageId': 'cirros-0.5.2-x86_64-disk',
'locationConstraints': 'az-1'
},
'VDU1-2': {
'computeFlavourId': 'm1.tiny',
'vcImageId': 'cirros-0.5.2-x86_64-disk',
'locationConstraints': 'az-1'
}
}
}
}
}
_update_retry_instantiated_vnfinfo = {
"vnfcResourceInfo": [
{
"metadata": {
"zone": "az-2"
}
}
]
}
CONF = config.CONF
@ -4036,3 +4099,270 @@ class TestOpenstack(base.BaseTestCase):
# check
result = inst.to_dict()["instantiatedVnfInfo"]
self._check_inst_info(_expected_inst_info_vnfc_updated, result)
@mock.patch.object(openstack.heat_utils.HeatClient, 'update_stack')
def test_update_stack_retry_fallback_best_effort_false(
self, mock_update_stack):
# prepare
# Default value for placement_fallback_best_effort is False.
# Set it in the unit test. In that case, do not retry.
CONF.v2_vnfm.placement_fallback_best_effort = False
sol_detail = ("Resource CREATE failed: ResourceInError: resources."
"VDU1-0.resources.VDU1: Went to status ERROR due to "
"\"Message: No valid host was found. , Code: 500\"")
error_ex = sol_ex.StackOperationFailed(sol_detail=sol_detail,
sol_title="stack failed")
ex = self.assertRaises(sol_ex.StackOperationFailed,
self.driver._update_stack_retry, mock.Mock(), mock.Mock(),
mock.Mock(), mock.Mock(), error_ex, mock.Mock(), mock.Mock())
self.assertEqual(error_ex.detail, ex.detail)
mock_update_stack.assert_not_called()
@mock.patch.object(openstack.heat_utils.HeatClient, 'update_stack')
def test_update_stack_retry_other_detail(self, mock_update_stack):
# prepare
CONF.v2_vnfm.placement_fallback_best_effort = True
fields_example = copy.deepcopy(_fields_example_instantiate)
sol_detail = ("Resource CREATE failed: unit test")
error_ex = sol_ex.StackOperationFailed(sol_detail=sol_detail,
sol_title="stack failed")
ex = self.assertRaises(sol_ex.StackOperationFailed,
self.driver._update_stack_retry, mock.Mock(), fields_example,
mock.Mock(), mock.Mock(), error_ex, mock.Mock(), mock.Mock())
self.assertEqual(error_ex.detail, ex.detail)
mock_update_stack.assert_not_called()
@mock.patch.object(openstack.heat_utils.HeatClient, 'update_stack')
def test_update_stack_retry_not_match_vdu_id(self, mock_update_stack):
# prepare
CONF.v2_vnfm.placement_fallback_best_effort = True
CONF.v2_vnfm.placement_az_resource_error = (
r'Resource CREATE failed: ResourceInError: '
r'error\.(.*)\.(.*): (.*)')
fields_example = copy.deepcopy(_fields_example_instantiate)
sol_detail = ("Resource CREATE failed: ResourceInError: error."
"VDU1-0.res.VDU1: Went to status ERROR due to "
"\"Message: No valid host was found. , Code: 500\"")
error_ex = sol_ex.StackOperationFailed(sol_detail=sol_detail,
sol_title="stack failed")
ex = self.assertRaises(sol_ex.StackOperationFailed,
self.driver._update_stack_retry, mock.Mock(), fields_example,
mock.Mock(), mock.Mock(), error_ex, mock.Mock(), mock.Mock())
self.assertEqual(error_ex.detail, ex.detail)
mock_update_stack.assert_not_called()
@mock.patch.object(openstack.heat_utils.HeatClient, 'update_stack')
@mock.patch.object(openstack.nova_utils.NovaClient, 'get_zone')
def test_update_stack_retry_retry_out_no_zone(self, mock_get_zone,
mock_update_stack):
# prepare
CONF.v2_vnfm.placement_fallback_best_effort = True
CONF.v2_vnfm.placement_az_select_retry = 10
vim_info = objects.VimConnectionInfo.from_dict(
_vim_connection_info_example)
inst = objects.VnfInstanceV2(id=uuidutils.generate_uuid())
fields_example = copy.deepcopy(_fields_example_instantiate)
heat_client = openstack.heat_utils.HeatClient(vim_info)
vdu_ids = {"VDU1-0", "VDU2-0", "VDU2-VirtualStorage-0"}
sol_detail = ("Resource CREATE failed: ResourceInError: resources."
"VDU1-0.resources.VDU1: Went to status ERROR due to "
"\"Message: No valid host was found. , Code: 500\"")
error_ex = sol_ex.StackOperationFailed(sol_detail=sol_detail,
sol_title="stack failed")
mock_get_zone.return_value = {'az-1', 'az-2', 'az-3', 'az-4'}
def _retry(stack_name, fields):
sol_detail = ("Resource UPDATE failed: resources.VDU1-0: "
"Resource CREATE failed: ResourceInError: "
"resources.VDU1: Went to status ERROR due to "
"\"Message: No valid host was found. , "
"Code: 500\"")
raise sol_ex.StackOperationFailed(sol_detail=sol_detail,
sol_title="stack failed")
mock_update_stack.side_effect = _retry
# execute
self.assertRaises(sol_ex.StackOperationFailed,
self.driver._update_stack_retry, heat_client, fields_example,
inst, STACK_ID, error_ex, vim_info, vdu_ids)
self.assertEqual(len(mock_get_zone.return_value) - 1,
mock_update_stack.call_count)
@mock.patch.object(openstack.heat_utils.HeatClient, 'update_stack')
@mock.patch.object(openstack.nova_utils.NovaClient, 'get_zone')
def test_update_stack_retry_retry_out_retry_limit(self, mock_get_zone,
mock_update_stack):
# prepare
CONF.v2_vnfm.placement_fallback_best_effort = True
CONF.v2_vnfm.placement_az_select_retry = 3
vim_info = objects.VimConnectionInfo.from_dict(
_vim_connection_info_example)
inst = objects.VnfInstanceV2(id=uuidutils.generate_uuid())
fields_example = copy.deepcopy(_fields_example_instantiate)
heat_client = openstack.heat_utils.HeatClient(vim_info)
vdu_ids = {"VDU1-0", "VDU2-0", "VDU2-VirtualStorage-0"}
sol_detail = ("Resource CREATE failed: ResourceInError: resources."
"VDU1-0.resources.VDU1: Went to status ERROR due to "
"\"Message: No valid host was found. , Code: 500\"")
error_ex = sol_ex.StackOperationFailed(sol_detail=sol_detail,
sol_title="stack failed")
mock_get_zone.return_value = {'az-1', 'az-2', 'az-3', 'az-4', 'az-5'}
def _retry(stack_name, fields):
sol_detail = ("Resource UPDATE failed: resources.VDU1-0: "
"Resource CREATE failed: ResourceInError: "
"resources.VDU1: Went to status ERROR due to "
"\"Message: No valid host was found. , "
"Code: 500\"")
raise sol_ex.StackOperationFailed(sol_detail=sol_detail,
sol_title="stack failed")
mock_update_stack.side_effect = _retry
# execute
self.assertRaises(sol_ex.StackOperationFailed,
self.driver._update_stack_retry, heat_client, fields_example,
inst, STACK_ID, error_ex, vim_info, vdu_ids)
self.assertEqual(CONF.v2_vnfm.placement_az_select_retry,
mock_update_stack.call_count)
@mock.patch.object(openstack.heat_utils.HeatClient, 'update_stack')
@mock.patch.object(openstack.nova_utils.NovaClient, 'get_zone')
def test_update_stack_retry_check_zone_value(self, mock_get_zone,
mock_update_stack):
# prepare
CONF.v2_vnfm.placement_fallback_best_effort = True
vim_info = objects.VimConnectionInfo.from_dict(
_vim_connection_info_example)
inst = objects.VnfInstanceV2(
id=uuidutils.generate_uuid(),
instantiatedVnfInfo=(
objects.VnfInstanceV2_InstantiatedVnfInfo.from_dict(
_update_retry_instantiated_vnfinfo))
)
fields_example = copy.deepcopy(_fields_example_scale)
heat_client = openstack.heat_utils.HeatClient(vim_info)
vdu_ids = {"VDU1-1", "VDU1-2"}
sol_detail = ("Resource CREATE failed: ResourceInError: resources."
"VDU1-1.resources.VDU1: Went to status ERROR due to "
"\"Message: No valid host was found. , Code: 500\"")
error_ex = sol_ex.StackOperationFailed(sol_detail=sol_detail,
sol_title="stack failed")
mock_get_zone.return_value = {'az-1', 'az-2', 'az-3', 'az-4'}
use_zone_list = []
def _retry(stack_name, fields):
vdu_dict = fields['parameters']['nfv']['VDU']
use_zone = {vdu_id: parameters.get('locationConstraints')
for vdu_id, parameters in vdu_dict.items()
if parameters.get('locationConstraints') is not None}
use_zone_list.append(use_zone)
if mock_update_stack.call_count >= 2:
return
else:
sol_detail = ("Resource UPDATE failed: resources.VDU1-1: "
"Resource CREATE failed: ResourceInError: "
"resources.VDU1: Went to status ERROR due to "
"\"Message: No valid host was found. , "
"Code: 500\"")
raise sol_ex.StackOperationFailed(sol_detail=sol_detail,
sol_title="stack failed")
mock_update_stack.side_effect = _retry
used_zone = 'az-2'
# execute
self.driver._update_stack_retry(heat_client, fields_example, inst,
STACK_ID, error_ex, vim_info, vdu_ids)
self.assertEqual(2, mock_update_stack.call_count)
self.assertEqual(use_zone_list[0]['VDU1-1'],
use_zone_list[0]['VDU1-2'])
self.assertEqual(use_zone_list[1]['VDU1-1'],
use_zone_list[1]['VDU1-2'])
self.assertNotEqual(use_zone_list[0]['VDU1-0'],
use_zone_list[0]['VDU1-1'])
self.assertNotEqual(use_zone_list[1]['VDU1-0'],
use_zone_list[1]['VDU1-1'])
self.assertNotEqual(used_zone, use_zone_list[0]['VDU1-1'])
self.assertNotEqual(used_zone, use_zone_list[1]['VDU1-1'])
@mock.patch.object(openstack.heat_utils.HeatClient, 'update_stack')
@mock.patch.object(openstack.nova_utils.NovaClient, 'get_zone')
def test_update_stack_retry_use_used_zone(self, mock_get_zone,
mock_update_stack):
# prepare
CONF.v2_vnfm.placement_fallback_best_effort = True
vim_info = objects.VimConnectionInfo.from_dict(
_vim_connection_info_example)
inst = objects.VnfInstanceV2(
# required fields
id=uuidutils.generate_uuid(),
instantiatedVnfInfo=(
objects.VnfInstanceV2_InstantiatedVnfInfo.from_dict(
_update_retry_instantiated_vnfinfo))
)
fields_example = copy.deepcopy(_fields_example_scale)
heat_client = openstack.heat_utils.HeatClient(vim_info)
vdu_ids = {"VDU1-1", "VDU1-2"}
sol_detail = ("Resource CREATE failed: ResourceInError: resources."
"VDU1-1.resources.VDU1: Went to status ERROR due to "
"\"Message: No valid host was found. , Code: 500\"")
error_ex = sol_ex.StackOperationFailed(sol_detail=sol_detail,
sol_title="stack failed")
mock_get_zone.return_value = {'az-1', 'az-2', 'az-3', 'az-4'}
use_zone_list = []
def _retry(stack_name, fields):
vdu_dict = fields['parameters']['nfv']['VDU']
use_zone = {vdu_id: parameters.get('locationConstraints')
for vdu_id, parameters in vdu_dict.items()
if parameters.get('locationConstraints') is not None}
use_zone_list.append(use_zone)
if mock_update_stack.call_count >= 3:
return
else:
sol_detail = ("Resource UPDATE failed: resources.VDU1-1: "
"Resource CREATE failed: ResourceInError: "
"resources.VDU1: Went to status ERROR due to "
"\"Message: No valid host was found. , "
"Code: 500\"")
raise sol_ex.StackOperationFailed(sol_detail=sol_detail,
sol_title="stack failed")
mock_update_stack.side_effect = _retry
expected_zone = 'az-2'
# execute
self.driver._update_stack_retry(heat_client, fields_example, inst,
STACK_ID, error_ex, vim_info, vdu_ids)
self.assertEqual(3, mock_update_stack.call_count)
self.assertEqual(expected_zone, use_zone_list[2]['VDU1-1'])
self.assertEqual(use_zone_list[2]['VDU1-1'],
use_zone_list[2]['VDU1-2'])

View File

@ -90,6 +90,12 @@ setenv = {[testenv]setenv}
commands =
stestr --test-path=./tacker/tests/functional/sol_kubernetes_multi_tenant run --slowest --concurrency 1 {posargs}
[testenv:dsvm-functional-sol-v2-az-retry]
setenv = {[testenv]setenv}
commands =
stestr --test-path=./tacker/tests/functional/sol_v2_az_retry run --slowest --concurrency 1 {posargs}
[testenv:dsvm-compliance-sol-api]
passenv =
{[testenv]passenv}