Merge "Turn on FT voting for stable/victoria" into stable/victoria

This commit is contained in:
Zuul 2021-01-25 02:59:58 +00:00 committed by Gerrit Code Review
commit 81c4b03f37
22 changed files with 231 additions and 77 deletions

View File

@ -1,20 +1,23 @@
- nodeset:
name: openstack-2-nodes-focal
name: openstack-3-nodes-focal
nodes:
- name: controller
label: ubuntu-focal
- name: compute1
label: ubuntu-focal
- name: compute2
label: ubuntu-focal
groups:
# Nodes running the compute service
- name: compute
nodes:
- controller
- compute1
- compute2
# Nodes that are not the controller
- name: subnode
nodes:
- compute1
- compute2
# Switch node for multinode networking setup
- name: switch
nodes:
@ -23,13 +26,14 @@
- name: peers
nodes:
- compute1
- compute2
- job:
name: tacker-functional-devstack-multinode-python3
parent: devstack
description: |
Base multinodes job for devstack-based functional tests
nodeset: openstack-2-nodes-focal
nodeset: openstack-3-nodes-focal
pre-run: playbooks/devstack/pre.yaml
run: playbooks/devstack/run.yaml
post-run: playbooks/devstack/post.yaml
@ -76,6 +80,11 @@
Q_SERVICE_PLUGIN_CLASSES: router,neutron.services.metering.metering_plugin.MeteringPlugin,networking_sfc.services.flowclassifier.plugin.FlowClassifierPlugin,neutron.services.qos.qos_plugin.QoSPlugin,qos
Q_ML2_PLUGIN_EXT_DRIVERS: port_security,qos
L2_AGENT_EXTENSIONS: qos
devstack_local_conf:
post-config:
$NEUTRON_DHCP_CONF:
DEFAULT:
enable_isolated_metadata: True
test_matrix_configs: [neutron]
zuul_work_dir: src/opendev.org/openstack/tacker
host-vars:
@ -110,7 +119,7 @@
n-api: true
n-api-meta: true
n-cond: true
n-cpu: true
n-cpu: false
n-novnc: true
n-sch: true
placement-api: true
@ -149,6 +158,7 @@
devstack_localrc:
CELLSV2_SETUP: singleconductor
PHYSICAL_NETWORK: mgmtphysnet0
OVS_BRIDGE_MAPPINGS: public:br-ex,mgmtphysnet0:br-infra
DATABASE_TYPE: mysql
devstack_services:
q-agt: true
@ -174,5 +184,4 @@
- release-notes-jobs-python3
check:
jobs:
- tacker-functional-devstack-multinode-python3:
voting: false
- tacker-functional-devstack-multinode-python3

View File

@ -154,7 +154,7 @@ class ViewBuilder(base.BaseViewBuilder):
if isinstance(vnf_lcm_subscription.id, str):
decode_id = vnf_lcm_subscription.id
else:
decode_id = vnf_lcm_subscription.id.decode()
decode_id = vnf_lcm_subscription.id
return {
"_links": {
"self": {
@ -168,12 +168,12 @@ class ViewBuilder(base.BaseViewBuilder):
if 'filter' in vnf_lcm_subscription:
filter_dict = json.loads(vnf_lcm_subscription.filter)
return {
'id': vnf_lcm_subscription.id.decode(),
'id': vnf_lcm_subscription.id,
'filter': filter_dict,
'callbackUri': vnf_lcm_subscription.callback_uri.decode(),
}
return {
'id': vnf_lcm_subscription.id.decode(),
'id': vnf_lcm_subscription.id,
'callbackUri': vnf_lcm_subscription.callback_uri.decode(),
}
else:

View File

@ -337,7 +337,7 @@ class VnfLcmController(wsgi.Controller):
placement_attr=placement_attr,
status=constants.INACTIVE,
error_reason=None,
deleted_at=datetime.min)
deleted_at=datetime.datetime.min)
context.session.add(vnf_db)
for key, value in attributes.items():
arg = vnfm_db.VNFAttribute(
@ -423,6 +423,7 @@ class VnfLcmController(wsgi.Controller):
vnf_product_name=vnfd.vnf_product_name,
vnf_software_version=vnfd.vnf_software_version,
vnfd_version=vnfd.vnfd_version,
vnf_pkg_id=vnfd.package_uuid,
tenant_id=request.context.project_id,
vnf_metadata=req_body.get('metadata'))
@ -444,7 +445,10 @@ class VnfLcmController(wsgi.Controller):
# roll back db changes
self._destroy_vnf(context, vnf_instance)
vnf_instance.destroy(context)
self._update_package_usage_state(context, vnf_package)
if 'vnf_package' not in locals():
LOG.error("vnf_package is not assigned")
else:
self._update_package_usage_state(context, vnf_package)
# create notification data
notification = {

View File

@ -17,6 +17,8 @@
Tacker base exception handling.
"""
import re
from oslo_log import log as logging
import webob.exc
from webob import util as woutil
@ -206,6 +208,18 @@ class DuplicateResourceName(TackerException):
class DuplicateEntity(Conflict):
message = _("%(_type)s already exist with given %(entry)s")
def __init__(self, *args, **kwargs):
# oslo.db does not parse duplicate key error correctly
# if MySQL server is >=8.0.19. (oslo.db BUG 1896916)
# TODO(kden): revert once the issue is resolved.
if len(kwargs["entry"]) == 1:
matched = re.match(
r"(?P<tbl>[^\.]+)\.uniq_(?P=tbl)0(?P<columns>.+)$",
kwargs["entry"][0])
if matched is not None:
kwargs["entry"] = matched.group("columns").split("0")
super(DuplicateEntity, self).__init__(*args, **kwargs)
class ValidationError(BadRequest):
message = "%(detail)s"

View File

@ -387,9 +387,8 @@ class Conductor(manager.Manager):
deploy_flavour.package_uuid = package_uuid
deploy_flavour.flavour_id = flavour['flavour_id']
deploy_flavour.flavour_description = flavour['flavour_description']
if flavour.get('instantiation_levels'):
deploy_flavour.instantiation_levels = \
flavour.get('instantiation_levels')
deploy_flavour.instantiation_levels = \
flavour.get('instantiation_levels')
deploy_flavour.create()
sw_images = flavour.get('sw_images')
@ -1335,9 +1334,8 @@ class Conductor(manager.Manager):
p_list)
def _update_placement(self, context, vnf_dict, vnf_instance):
self.vnfm_plugin.update_placement_constraint(context,
vnf_dict,
vnf_instance)
self.vnfm_plugin.update_placement_constraint_heal(
context, vnf_dict, vnf_instance)
def _delete_placement(self, context, vnf_instance_id):
self.vnfm_plugin.delete_placement_constraint(context,
@ -1382,8 +1380,8 @@ class Conductor(manager.Manager):
try:
LOG.debug("Update vnf lcm %s %s",
(vnf_lcm_op_occs_id,
operation_state))
vnf_lcm_op_occs_id,
operation_state)
vnf_notif = self._get_vnf_notify(context, vnf_lcm_op_occs_id)
vnf_notif.operation_state = operation_state
if operation_state == fields.LcmOccsOperationState.FAILED_TEMP:
@ -1483,20 +1481,20 @@ class Conductor(manager.Manager):
# Notification shipping
for line in vnf_lcm_subscriptions:
notification['subscriptionId'] = line.id.decode()
notification['subscriptionId'] = line.id
if (notification.get('notificationType') ==
'VnfLcmOperationOccurrenceNotification'):
notification['_links'] = {}
notification['_links']['subscription'] = {}
notification['_links']['subscription']['href'] = \
CONF.vnf_lcm.endpoint_url + \
"/vnflcm/v1/subscriptions/" + line.id.decode()
"/vnflcm/v1/subscriptions/" + line.id
else:
notification['links'] = {}
notification['links']['subscription'] = {}
notification['links']['subscription']['href'] = \
CONF.vnf_lcm.endpoint_url + \
"/vnflcm/v1/subscriptions/" + line.id.decode()
"/vnflcm/v1/subscriptions/" + line.id
notification['timeStamp'] = datetime.datetime.utcnow(
).isoformat()
try:

View File

@ -1 +1 @@
2c5211036579
d25c7c865ce8

View File

@ -0,0 +1,85 @@
# Copyright 2020 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# flake8: noqa: E402
"""mod_vnflcm_subscription
Revision ID: d25c7c865ce8
Revises: 2c5211036579
Create Date: 2020-10-15 14:27:04.946002
"""
# revision identifiers, used by Alembic.
revision = 'd25c7c865ce8'
down_revision = '2c5211036579'
from alembic import op
import sqlalchemy as sa
from tacker.db import types
from tacker.db import migration
def upgrade(active_plugins=None, options=None):
op.alter_column('vnf_lcm_filters', 'subscription_uuid',
type_=types.Uuid(length=36), existing_type=sa.String(length=255),
nullable=False)
sta_str = "json_unquote(json_extract('filter','$.operationTypes'))"
op.add_column(
'vnf_lcm_filters',
sa.Column('operation_types',
sa.LargeBinary(length=65536),
sa.Computed(sta_str)))
op.add_column(
'vnf_lcm_filters',
sa.Column('operation_types_len',
sa.Integer,
sa.Computed("ifnull(json_length('operation_types'),0)")))
op.drop_column('vnf_lcm_filters', 'operation_states')
op.drop_column('vnf_lcm_filters', 'operation_states_len')
op.alter_column('vnf_lcm_op_occs', 'operation_state',
type_=sa.String(length=16), existing_type=sa.String(length=255))
op.alter_column('vnf_lcm_op_occs', 'operation',
type_=sa.String(length=16),existing_type=sa.String(length=255))
op.add_column('vnf_lcm_op_occs',
sa.Column('is_cancel_pending', sa.Boolean, nullable=False)),
op.add_column('vnf_lcm_op_occs',
sa.Column('resource_changes', sa.JSON(), nullable=True))
op.add_column('vnf_lcm_op_occs',
sa.Column('error_point', sa.Integer, nullable=True))
op.add_column('vnf_lcm_op_occs',
sa.Column('changed_info', sa.JSON(), nullable=True))
op.add_column('vnf_lcm_op_occs',
sa.Column('created_at', sa.DateTime(), nullable=False))
op.add_column('vnf_lcm_op_occs',
sa.Column('updated_at', sa.DateTime(), nullable=True))
op.add_column('vnf_lcm_op_occs',
sa.Column('deleted_at', sa.DateTime(), nullable=True))
pass

View File

@ -25,7 +25,6 @@ def register_all():
# function in order for it to be registered by services that may
# need to receive it via RPC.
__import__('tacker.objects.heal_vnf_request')
__import__('tacker.objects.vnfd')
__import__('tacker.objects.vnf_package')
__import__('tacker.objects.vnf_package_vnfd')
__import__('tacker.objects.vnf_deployment_flavour')
@ -44,5 +43,4 @@ def register_all():
__import__('tacker.objects.scale_vnf_request')
__import__('tacker.objects.grant')
__import__('tacker.objects.grant_request')
__import__('tacker.objects.vnfd')
__import__('tacker.objects.vnfd_attribute')

View File

@ -235,7 +235,7 @@ def _vnf_lcm_subscriptions_create(context, values, filter):
callbackUri = values.callback_uri
if filter:
notification_type = filter.get('notificationTypes')
operation_type = filter.get('operationTypese')
operation_type = filter.get('operationTypes')
vnf_lcm_subscriptions_id = _vnf_lcm_subscriptions_id_get(
context,
@ -244,7 +244,7 @@ def _vnf_lcm_subscriptions_create(context, values, filter):
operation_type=operation_type)
if vnf_lcm_subscriptions_id:
raise Exception("303" + vnf_lcm_subscriptions_id.id.decode())
raise Exception("303" + vnf_lcm_subscriptions_id)
_add_filter_data(context, values.id, filter)
@ -253,7 +253,7 @@ def _vnf_lcm_subscriptions_create(context, values, filter):
callbackUri)
if vnf_lcm_subscriptions_id:
raise Exception("303" + vnf_lcm_subscriptions_id.id.decode())
raise Exception("303" + vnf_lcm_subscriptions_id.id)
_add_filter_data(context, values.id, {})
return values

View File

@ -554,8 +554,6 @@ class VnfPackage(base.TackerObject, base.TackerPersistentObject,
# check if vnf package is used by any vnf instances.
query = context.session.query(
func.count(models.VnfInstance.id)).\
filter_by(
instantiation_state=fields.VnfInstanceState.INSTANTIATED).\
filter_by(tenant_id=self.tenant_id).\
filter_by(vnfd_id=self.vnfd.vnfd_id).\
filter_by(deleted=False)

View File

@ -415,6 +415,9 @@ class FenixPlugin(object):
return attrs
instances = []
if not vnf_dict['instance_id']:
return instances
client = self._get_openstack_clients(context, vnf_dict)
resources = client.heat.resources.list(vnf_dict['instance_id'],
nested_depth=2)

View File

@ -23,6 +23,7 @@ from keystoneclient import adapter
from neutronclient.v2_0 import client as neutron_client
from novaclient import client as nova_client
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils
from tempest.lib import base
@ -32,9 +33,11 @@ from tacker.tests.functional import clients
from tacker.tests.utils import read_file
from tacker import version
from tackerclient.common import exceptions
from tackerclient.v1_0 import client as tacker_client
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
@ -125,7 +128,7 @@ class BaseTackerTest(base.BaseTestCase):
@classmethod
def tackerclient(cls):
auth_session = cls.get_auth_session()
return tacker_client.Client(session=auth_session)
return tacker_client.Client(session=auth_session, retries=5)
@classmethod
def novaclient(cls):
@ -204,11 +207,17 @@ class BaseTackerTest(base.BaseTestCase):
def wait_until_vnf_status(self, vnf_id, target_status, timeout,
sleep_interval):
start_time = int(time.time())
status = None
while True:
vnf_result = self.client.show_vnf(vnf_id)
status = vnf_result['vnf']['status']
if (status == target_status) or (
(int(time.time()) - start_time) > timeout):
try:
vnf_result = self.client.show_vnf(vnf_id)
status = vnf_result['vnf']['status']
if status == target_status:
break
except exceptions.InternalServerError:
pass
if int(time.time()) - start_time > timeout:
break
time.sleep(sleep_interval)
@ -233,18 +242,24 @@ class BaseTackerTest(base.BaseTestCase):
constants.VNF_CIRROS_CREATE_TIMEOUT,
constants.ACTIVE_SLEEP_TIME)
def wait_until_vnf_delete(self, vnf_id, timeout):
def wait_until_vnf_delete(self, vnf_id, timeout, sleep_interval=1):
start_time = int(time.time())
while True:
status = None
try:
vnf_result = self.client.show_vnf(vnf_id)
time.sleep(1)
except Exception:
status = vnf_result['vnf']['status']
except exceptions.NotFound:
return
status = vnf_result['vnf']['status']
if (status != 'PENDING_DELETE') or ((
int(time.time()) - start_time) > timeout):
except Exception as e:
LOG.error("Failed to get vnf status: %s", str(e))
if status is not None and status != 'PENDING_DELETE':
raise Exception("Failed with status: %s" % status)
if int(time.time()) - start_time > timeout:
raise Exception("Timeout for deleting vnf %s.",
vnf_id)
time.sleep(sleep_interval)
def wait_until_vnf_dead(self, vnf_id, timeout, sleep_interval):
self.wait_until_vnf_status(vnf_id, 'DEAD', timeout,

View File

@ -10,6 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
import time
import yaml
from oslo_config import cfg
@ -50,6 +51,10 @@ class TestRemoteCommandExecutor(base.BaseTackerTest):
def _test_cmd_executor(self, vnfd_file, vnf_name):
vnf_id, mgmt_ip = self._test_create_vnf(vnfd_file, vnf_name)
# Wait for VM initialization. It takes time after VM started until
# sshd starts. It is a bit ad-hoc but adopted it for simplicity.
time.sleep(100)
# Login on VNF instance, and execute 'hostname' command to verify
# connection and command output.
usr = 'cirros'

View File

@ -14,6 +14,8 @@
# License for the specific language governing permissions and limitations
# under the License.
import time
from keystoneauth1 import exceptions
from keystoneauth1.identity import v3
from keystoneauth1 import session
@ -24,6 +26,8 @@ from oslo_log import log as logging
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
KEYSTONE_CLIENT_RETRIES = 3
KEYSTONE_RETRY_WAIT = 5
class Keystone(object):
@ -51,5 +55,17 @@ class Keystone(object):
verify = 'True' == kwargs.pop('cert_verify', 'False')
auth_plugin = v3.Password(**kwargs)
ses = self.get_session(auth_plugin=auth_plugin, verify=verify)
cli = client.Client('v3', session=ses)
client_retries = KEYSTONE_CLIENT_RETRIES
while client_retries > 0:
try:
cli = client.Client('v3', session=ses)
break
except exceptions.InternalServerError:
LOG.warning("keystone service responds with 500 "
"(InternalServerError).")
client_retries = client_retries - 1
if client_retries == 0:
raise
time.sleep(KEYSTONE_RETRY_WAIT)
return cli

View File

@ -12,6 +12,7 @@
# License for the specific language governing permissions and limitations
# under the License.
import time
import unittest
from oslo_serialization import jsonutils
@ -111,11 +112,15 @@ class VnfTestAlarmMonitor(base.BaseTackerTest):
self.addCleanup(self.wait_until_vnf_delete, vnf_id,
constants.VNF_CIRROS_DELETE_TIMEOUT)
# TODO(kden): restore this test once Change #757537 is reverted.
@unittest.skip("Until ceilometer is restored")
def test_vnf_alarm_respawn(self):
self._test_vnf_tosca_alarm(
'sample-tosca-alarm-respawn.yaml',
'alarm and respawn-vnf')
# TODO(kden): restore this test once Change #757537 is reverted.
@unittest.skip("Until ceilometer is restored")
def test_vnf_alarm_scale(self):
self._test_vnf_tosca_alarm(
'sample-tosca-alarm-scale.yaml',

View File

@ -12,15 +12,14 @@
# License for the specific language governing permissions and limitations
# under the License.
import ast
import time
import datetime
import json
import testtools
import time
import yaml
from blazarclient import exception
from oslo_serialization import jsonutils
from tacker.plugins.common import constants as evt_constants
from tacker.tests import constants
from tacker.tests.functional import base
@ -126,20 +125,10 @@ class VnfTestReservationMonitor(base.BaseTackerTest):
vnf = self.client.show_vnf(vnf_id)['vnf']
# {"VDU1": ["10.0.0.14", "10.0.0.5"]}
if scale_type == 'scaling-in' and vdu_count == 0:
try:
# After sacling-in the vnf['mgmt_ip_address'] will be the
# list containg null values. As vnf['mgmt_ip_address']
# is string so we can not access ip address list for VDU1
# so converting that into the dict using ast lib.
# If the list contains the null value then it will raise
# ValueError so on the basis of that we are confirming the
# scaling-in is successful.
ast.literal_eval(vnf['mgmt_ip_address'])
self.fail("Scaling-in should not contain "
"mgmt_ip_address")
except ValueError:
assert True, ("Management Ip address list for VDU1 "
"contains null values.")
self.assertFalse(jsonutils.loads(vnf.get('mgmt_ip_address',
'{}')),
"Once scaling-in completes, mgmt_ip_address"
" should be empty.")
elif scale_type == 'scaling-out':
self.assertEqual(vdu_count, len(json.loads(
vnf['mgmt_ip_address'])['VDU1']))
@ -193,7 +182,7 @@ class VnfTestReservationMonitor(base.BaseTackerTest):
blazarclient = self.blazarclient()
reservations = [{'disk_gb': 0,
'vcpus': 1, 'memory_mb': 1,
'amount': 1, 'affinity': False,
'amount': 2, 'affinity': True,
'resource_properties': '',
'resource_type': 'virtual:instance'}]
events = []
@ -208,6 +197,7 @@ class VnfTestReservationMonitor(base.BaseTackerTest):
blazar_host = blazarclient.host.create(
hypervisor.hypervisor_hostname)
host_added = True
break
except exception.BlazarClientException:
pass

View File

@ -437,7 +437,7 @@ class VnfPackageTest(base.BaseTackerTest):
id=self.package_id1, base_path=self.base_url),
"GET", body={}, headers={})
self.assertEqual(200, response[0].status_code)
self.assertEqual('12804503', response[0].headers['Content-Length'])
self.assertEqual('12804568', response[0].headers['Content-Length'])
def test_fetch_vnf_package_content_combined_download(self):
"""Combine two partial downloads using 'Range' requests for csar zip"""
@ -458,7 +458,7 @@ class VnfPackageTest(base.BaseTackerTest):
zipf.writestr(file_path, data)
# Partial download 2
range_ = 'bytes=11-12804503'
range_ = 'bytes=11-12804568'
headers = {'Range': range_}
response_2 = self.http_client.do_request(
'{base_path}/{id}/package_content'.format(
@ -471,7 +471,7 @@ class VnfPackageTest(base.BaseTackerTest):
size_2 = int(response_2[0].headers['Content-Length'])
total_size = size_1 + size_2
self.assertEqual(True, zipfile.is_zipfile(zip_file_path))
self.assertEqual(12804503, total_size)
self.assertEqual(12804568, total_size)
zip_file_path.close()
def test_fetch_vnf_package_artifacts(self):

View File

@ -130,7 +130,7 @@ class TestConductor(SqlTestCase, unit_base.FixturedTestCase):
self.subscription_authentication = json.dumps(
auth_params).encode()
self.id = uuidsentinel.lcm_subscription_id.encode()
self.id = uuidsentinel.lcm_subscription_id
self.callback_uri = 'https://localhost/callback'.encode()
def __getattr__(self, name):

View File

@ -609,9 +609,9 @@ class TestVnflcmDriver(db_base.SqlTestCase):
# Vnf resource software images will be created during
# instantiation.
self.assertEqual(1, mock_resource_create.call_count)
# Invoke will be called 6 times, 3 for deleting the vnf
# resources and 3 during instantiation.
self.assertEqual(6, self._vnf_manager.invoke.call_count)
# Invoke will be called 7 times, 3 for deleting the vnf
# resources and 4 during instantiation.
self.assertEqual(7, self._vnf_manager.invoke.call_count)
expected_msg = ("Request received for healing vnf '%s' "
"is completed successfully")
mock_log.info.assert_called_with(expected_msg,

View File

@ -307,7 +307,7 @@ def _process_query_metadata(metadata, policy, unique_id):
def _process_query_metadata_reservation(metadata, policy):
query_metadata = dict()
policy_actions = policy.entity_tpl['reservation'].keys()
policy_actions = list(policy.entity_tpl['reservation'].keys())
policy_actions.remove('properties')
for action in policy_actions:
query_template = [{
@ -335,7 +335,7 @@ def _process_alarm_actions(vnf, policy):
def _process_alarm_actions_for_reservation(vnf, policy):
# process alarm url here
alarm_actions = dict()
policy_actions = policy.entity_tpl['reservation'].keys()
policy_actions = list(policy.entity_tpl['reservation'].keys())
policy_actions.remove('properties')
for action in policy_actions:
alarm_url = vnf['attributes'].get(action)

View File

@ -303,10 +303,11 @@ class VnfLcmDriver(abstract_driver.VnfInstanceAbstractDriver):
vim_connection_info, instantiate_vnf_req):
vnfd_dict = vnflcm_utils._get_vnfd_dict(context, vnf_instance.vnfd_id,
instantiate_vnf_req.flavour_id)
base_hot_dict, nested_hot_dict = vnflcm_utils. \
get_base_nest_hot_dict(context,
instantiate_vnf_req.flavour_id,
vnf_instance.vnfd_id)
base_hot_dict, nested_hot_dict = \
vnflcm_utils.get_base_nest_hot_dict(
context,
instantiate_vnf_req.flavour_id,
vnf_instance.vnfd_id)
vnf_package_path = None
if base_hot_dict is not None:
vnf_package_path = vnflcm_utils._get_vnf_package_path(
@ -544,10 +545,22 @@ class VnfLcmDriver(abstract_driver.VnfInstanceAbstractDriver):
instantiate_vnf_request = objects.InstantiateVnfRequest.\
from_vnf_instance(vnf_instance)
vnf_instance.instantiated_vnf_info.reinitialize()
vnf_instance.task_state = fields.VnfInstanceTaskState.INSTANTIATING
vnfd_dict = vnflcm_utils._get_vnfd_dict(
context, vnf_instance.vnfd_id, instantiate_vnf_request.flavour_id)
vnflcm_utils._build_instantiated_vnf_info(
vnfd_dict, instantiate_vnf_request, vnf_instance,
vim_connection_info.vim_id)
try:
self._instantiate_vnf(context, vnf_instance, vnf_dict,
vim_connection_info, instantiate_vnf_request)
self._vnf_manager.invoke(
vim_connection_info.vim_type, 'post_vnf_instantiation',
context=context, vnf_instance=vnf_instance,
vim_connection_info=vim_connection_info)
except Exception as exc:
with excutils.save_and_reraise_exception() as exc_ctxt:
exc_ctxt.reraise = False

View File

@ -163,10 +163,11 @@ class OpenStack(abstract_driver.VnfAbstractDriver,
if user_data_path is not None and user_data_class is not None:
LOG.info('Execute user data and create heat-stack.')
base_hot_dict, nested_hot_dict = vnflcm_utils. \
get_base_nest_hot_dict(context,
inst_req_info.flavour_id,
vnf_instance.vnfd_id)
base_hot_dict, nested_hot_dict = \
vnflcm_utils.get_base_nest_hot_dict(
context,
inst_req_info.flavour_id,
vnf_instance.vnfd_id)
if base_hot_dict is None:
error_reason = _("failed to get Base HOT.")
raise vnfm.LCMUserDataFailed(reason=error_reason)