NFP - Failure handling in Orchestrator

Added following support :
1) Context managers,
   1.1) To be used with python 'with' statement.
   1.2) support 'retry', 'ignore' and 'lock' functions.
         -> retry : retry a func for n counts
         -> ignore: Ignore certain expected exceptions.
         -> lock: Lock a db transaction
   1.3) NFP module need not handle all possible exceptions
        as try-except branches.

2) Single class Exception Handling :
   All the exceptions from module will be caught by nfp/core
   and the registered exception handler will be invoked with
   all the relevant details (event, data, context, exception..)

3) Used 'context manager' retry function with client methods,
   Neutronclient, Novaclient etc.. especially for GET methods.
   E.x, GET_TOKEN is retried 'n' times to overcome any
   temporary failures with keystone.

Change-Id: Ia821938b9f607799ebeaa1c0e2ddda74ebc96fd8
Partial-Bug: 1668198
This commit is contained in:
mak-454
2017-02-27 15:39:39 +05:30
parent 0bce1217af
commit 4662f535f0
23 changed files with 2940 additions and 1634 deletions

View File

@@ -2,6 +2,6 @@
test_command=OS_STDOUT_CAPTURE=${OS_STDOUT_CAPTURE:-1} \
OS_STDERR_CAPTURE=${OS_STDERR_CAPTURE:-1} \
OS_TEST_TIMEOUT=${OS_TEST_TIMEOUT:-120} \
${PYTHON:-python} -m subunit.run discover -t ./ ${OS_TEST_PATH:-./gbpservice/neutron/tests/unit/nfp/core/} $LISTOPT $IDOPTION
${PYTHON:-python} -m subunit.run discover -t ./ ${OS_TEST_PATH:-./gbpservice/neutron/tests/unit/nfp/orchestrator/} $LISTOPT $IDOPTION
test_id_option=--load-list $IDFILE
test_list_option=--list

View File

@@ -13,8 +13,8 @@
import eventlet
from eventlet import greenpool
import sys
import threading
import time
from keystoneclient import exceptions as k_exceptions
from keystoneclient.v2_0 import client as keyclient
@@ -287,6 +287,7 @@ class NFPContext(object):
'active_threads': [],
'sc_node_count': 0,
'sc_gateway_type_nodes': [],
'network_functions': [],
'update': False}
if nfp_context_store.context:
nfp_context_store.context.update({sc_instance_id: context})
@@ -453,46 +454,55 @@ class NFPNodeDriver(driver_base.NodeDriverBase):
context._plugin_context = self._get_resource_owner_context(
context._plugin_context)
network_function_id = self._create_network_function(context)
except Exception:
# NFPContext.clear_nfp_context(context.instance['id'])
exc_type, exc_value, exc_traceback = sys.exc_info()
message = "Traceback: %s" % (exc_value)
LOG.error(message)
network_function_id = ''
finally:
self._set_node_instance_network_function_map(
context.plugin_session, context.current_node['id'],
context.instance['id'], network_function_id)
except Exception as e:
NFPContext.clear_nfp_context(context.instance['id'])
raise e
self._wait_for_node_operation_completion(context,
network_function_id,
self._wait_for_node_operation_completion(
context, network_function_id,
nfp_constants.CREATE)
def _wait_for_node_operation_completion(self, context,
network_function_id,
def _wait_for_node_operation_completion(self, context, network_function_id,
operation):
# Check for NF status in a separate thread
LOG.debug("Spawning thread for nf ACTIVE poll operation: %s" % (
operation))
nfp_context = NFPContext.get_nfp_context(context.instance['id'])
nfp_context['sc_node_count'] -= 1
nfp_context['network_functions'].append(network_function_id)
# At last wait for the threads to complete, success/failure/timeout
if nfp_context['sc_node_count'] == 0:
network_functions = nfp_context['network_functions']
for network_function in network_functions:
LOG.debug("Spawning thread for nf ACTIVE poll")
if operation == nfp_constants.DELETE:
gth = nfp_context['thread_pool'].spawn(
self._wait_for_network_function_delete_completion,
context, network_function_id)
context, network_function)
else:
gth = nfp_context['thread_pool'].spawn(
self._wait_for_network_function_operation_completion,
context, network_function_id, operation=operation)
context, network_function, operation=operation)
nfp_context['active_threads'].append(gth)
LOG.debug("Active Threads count (%d), sc_node_count (%d)" % (
len(nfp_context['active_threads']), nfp_context['sc_node_count']))
nfp_context['sc_node_count'] -= 1
# At last wait for the threads to complete, success/failure/timeout
if nfp_context['sc_node_count'] == 0:
message = "Active Threads count (%d), sc_node_count (%d)" % (
len(nfp_context['active_threads']),
nfp_context['sc_node_count'])
LOG.debug(message)
nfp_context['thread_pool'].waitall()
# Get the results
for gth in nfp_context['active_threads']:
self._wait(gth, context)
NFPContext.clear_nfp_context(context.instance['id'])
else:
NFPContext.store_nfp_context(context.instance['id'], **nfp_context)
@@ -537,31 +547,29 @@ class NFPNodeDriver(driver_base.NodeDriverBase):
context.plugin_session,
context.current_node['id'],
context.instance['id'])
if not network_function_map:
NFPContext.store_nfp_context(
context.instance['id'],
sc_gateway_type_nodes=[],
sc_node_count=nfp_context['sc_node_count'] - 1)
return
network_function_id = network_function_map.network_function_id
try:
self.nfp_notifier.delete_network_function(
context=context.plugin_context,
network_function_id=network_function_id)
except Exception as e:
NFPContext.clear_nfp_context(context.instance['id'])
LOG.exception(_LE("Delete Network service Failed"))
network_function_id = None
if network_function_map:
self._delete_node_instance_network_function_map(
context.plugin_session,
context.current_node['id'],
context.instance['id'])
raise e
network_function_id = network_function_map.network_function_id
if network_function_id:
try:
self.nfp_notifier.delete_network_function(
context=context.plugin_context,
network_function_id=(
network_function_map.network_function_id))
except Exception:
# NFPContext.clear_nfp_context(context.instance['id'])
LOG.exception(_LE("Delete Network service Failed"))
exc_type, exc_value, exc_traceback = sys.exc_info()
message = "Traceback: %s" % (exc_value)
LOG.error(message)
self._update_ptg(context)
self._wait_for_node_operation_completion(context,
network_function_id,
self._wait_for_node_operation_completion(context, network_function_id,
nfp_constants.DELETE)
def update_policy_target_added(self, context, policy_target):
@@ -685,31 +693,21 @@ class NFPNodeDriver(driver_base.NodeDriverBase):
def _wait_for_network_function_delete_completion(self, context,
network_function_id):
# [REVISIT: (akash) do we need to do error handling here]
if not network_function_id:
return
time_waited = 0
network_function = None
curr_time = start_time = int(time.time())
timeout = cfg.CONF.nfp_node_driver.service_delete_timeout
while curr_time - start_time < timeout:
curr_time = int(time.time())
while time_waited < cfg.CONF.nfp_node_driver.service_delete_timeout:
network_function = self.nfp_notifier.get_network_function(
context.plugin_context, network_function_id)
if network_function:
LOG.debug("Got %s nf result for NF: %s with status:%s,"
"time waited: %s" % (network_function_id, 'delete',
time_waited, network_function['status']))
if not network_function:
if not network_function or (
network_function['status'] == nfp_constants.ERROR):
break
eventlet.sleep(5)
time_waited = time_waited + 5
LOG.debug("Deleting sci nf mapping")
self._delete_node_instance_network_function_map(
context.plugin_session,
context.current_node['id'],
context.instance['id'])
LOG.debug("sci nf mapping got deleted. NF got deldted.")
if network_function:
LOG.error(_LE("Delete network function %(network_function)s "
"failed"),
@@ -719,13 +717,14 @@ class NFPNodeDriver(driver_base.NodeDriverBase):
def _wait_for_network_function_operation_completion(self, context,
network_function_id,
operation):
if not network_function_id:
raise NodeInstanceCreateFailed()
time_waited = 0
network_function = None
timeout = cfg.CONF.nfp_node_driver.service_create_timeout
curr_time = start_time = int(time.time())
while curr_time - start_time < timeout:
curr_time = int(time.time())
while time_waited < timeout:
network_function = self.nfp_notifier.get_network_function(
context.plugin_context, network_function_id)
LOG.debug("Got %s nf result for NF: %s with status:%s,"
@@ -1095,6 +1094,16 @@ class NFPNodeDriver(driver_base.NodeDriverBase):
if not vip_ip:
raise VipNspNotSetonProvider()
if service_targets:
for provider_port in service_targets['provider_ports']:
provider_port['allowed_address_pairs'] = [
{'ip_address': vip_ip}]
port = {
'port': provider_port
}
context.core_plugin.update_port(
context.plugin_context, provider_port['id'], port)
provider = {
'pt': service_targets.get('provider_pt_objs', []),
'ptg': service_targets.get('provider_ptg', []),

View File

@@ -565,137 +565,6 @@ class NFPDBTestCase(SqlTestCase):
self.session,
mgmt_port_id)
def create_network_function_device_interface(self, attributes=None,
create_nfd=True):
if attributes is None:
nfd = (self.create_network_function_device()['id']
if create_nfd else None)
attributes = {
'tenant_id': 'tenant_id',
'plugged_in_port_id': {
'id': 'myid2_ha_port',
'port_model': nfp_constants.NEUTRON_PORT,
'port_classification': nfp_constants.MONITOR,
'port_role': nfp_constants.ACTIVE_PORT
},
'interface_position': 1,
'mapped_real_port_id': 'myid2',
'network_function_device_id': nfd
}
return self.nfp_db.create_network_function_device_interface(
self.session, attributes)
def test_create_network_function_device_interface(self):
attrs = {
'tenant_id': 'tenant_id',
'plugged_in_port_id': {
'id': 'myid2_ha_port',
'port_model': nfp_constants.NEUTRON_PORT,
'port_classification': nfp_constants.MONITOR,
'port_role': nfp_constants.ACTIVE_PORT
},
'interface_position': 1,
'mapped_real_port_id': 'myid2',
'network_function_device_id': (
self.create_network_function_device()['id'])
}
network_function_device_interface = (
self.create_network_function_device_interface(attrs))
for key in attrs:
if key == 'mgmt_port_id':
self.assertEqual(attrs[key]['id'],
network_function_device_interface[key])
continue
self.assertEqual(attrs[key],
network_function_device_interface[key])
self.assertIsNotNone(network_function_device_interface['id'])
def test_get_network_function_device_interface(self):
attrs_all = {
'tenant_id': 'tenant_id',
'plugged_in_port_id': {
'id': 'myid2_ha_port',
'port_model': nfp_constants.NEUTRON_PORT,
'port_classification': nfp_constants.MONITOR,
'port_role': nfp_constants.ACTIVE_PORT
},
'interface_position': 1,
'mapped_real_port_id': 'myid2',
'network_function_device_id': (
self.create_network_function_device()['id'])
}
network_function_device_interface = (
self.create_network_function_device_interface(attrs_all))
db_network_function_device_interface = (
self.nfp_db.get_network_function_device_interface(
self.session, network_function_device_interface['id']))
for key in attrs_all:
self.assertEqual(attrs_all[key],
db_network_function_device_interface[key])
def test_list_network_function_device_interface(self):
network_function_device_interface = (
self.create_network_function_device_interface())
network_function_device_interfaces = (
self.nfp_db.get_network_function_device_interfaces(
self.session))
self.assertEqual(1, len(network_function_device_interfaces))
self.assertEqual(network_function_device_interface['id'],
network_function_device_interfaces[0]['id'])
def test_list_network_function_device_interfaces_with_filters(self):
attrs = {
'tenant_id': 'tenant_id',
'plugged_in_port_id': {
'id': 'myid2_ha_port',
'port_model': nfp_constants.NEUTRON_PORT,
'port_classification': nfp_constants.MONITOR,
'port_role': nfp_constants.ACTIVE_PORT
},
'interface_position': 1,
'mapped_real_port_id': 'myid2',
'network_function_device_id': (
self.create_network_function_device()['id'])
}
network_function_device_interface = (
self.create_network_function_device_interface(attrs))
filters = {
'interface_position': [1]
}
network_function_device_interfaces = (
self.nfp_db.get_network_function_device_interfaces(
self.session, filters=filters))
self.assertEqual(1, len(network_function_device_interfaces))
self.assertEqual(network_function_device_interface['id'],
network_function_device_interfaces[0]['id'])
filters = {'interface_position': [100]}
network_function_device_interfaces = (
self.nfp_db.get_network_function_device_interfaces(
self.session, filters=filters))
self.assertEqual([], network_function_device_interfaces)
def test_update_network_function_device_interface(self):
network_function_device_interface = (
self.create_network_function_device_interface())
self.assertIsNotNone(network_function_device_interface['id'])
updated_nfdi = {'interface_position': 2}
nfdi = self.nfp_db.update_network_function_device_interface(
self.session, network_function_device_interface['id'],
updated_nfdi)
self.assertEqual(2, nfdi['interface_position'])
def test_delete_network_function_device_interface(self):
network_function_device_interface = (
self.create_network_function_device_interface())
self.assertIsNotNone(network_function_device_interface['id'])
self.nfp_db.delete_network_function_device_interface(
self.session, network_function_device_interface['id'])
self.assertRaises(nfp_exc.NetworkFunctionDeviceInterfaceNotFound,
self.nfp_db.get_network_function_device_interface,
self.session,
network_function_device_interface['id'])
def _get_gateway_details(self):
return dict(
id=str(uuid.uuid4()),

View File

@@ -85,6 +85,8 @@ class DummyEvent(object):
self.context = {}
self.desc = DummyDesc()
self.context = self.data
class Desc(object):
@@ -313,6 +315,7 @@ class DeviceOrchestratorTestCase(unittest.TestCase):
mock_update_nfd.assert_called_with(ndo_handler.db_session,
orig_event_data['id'],
orig_event_data)
ndo_handler._controller.reset_mock()
@mock.patch.object(nfpdb.NFPDbBase, 'update_network_function_device')
def test_health_check(self, mock_update_nfd):
@@ -524,7 +527,6 @@ class DeviceOrchestratorTestCase(unittest.TestCase):
def test_device_configuration_complete(self,
mock_update_nfd, mock_get_nfd):
ndo_handler = self._initialize_ndo_handler()
tmp_data = copy.deepcopy(self.event.data)
device = self.event.data
status = 'ACTIVE'
device = {'nfp_context': device}
@@ -549,16 +551,9 @@ class DeviceOrchestratorTestCase(unittest.TestCase):
'id': 'device_id',
'reference_count': 0
}
ndo_handler.device_configuration_complete(self.event)
mock_update_nfd.assert_called_with(ndo_handler.db_session,
device[
'nfp_context'][
'network_function_device'][
'id'],
{'reference_count': (
reference_count)})
self.event.data = tmp_data
ndo_handler.device_configuration_complete(self.event)
ndo_handler._controller.reset_mock()
@mock.patch.object(nfpdb.NFPDbBase, 'get_network_function_device')
@mock.patch.object(nfpdb.NFPDbBase, 'update_network_function_device')
@@ -588,6 +583,7 @@ class DeviceOrchestratorTestCase(unittest.TestCase):
event_id = 'DELETE_CONFIGURATION'
ndo_handler._create_event = mock.MagicMock(return_value=True)
delete_event_req.context = delete_event_req.data
ndo_handler.delete_network_function_device(delete_event_req)
ndo_handler._create_event.assert_called_with(
event_id=event_id,
@@ -627,16 +623,15 @@ class DeviceOrchestratorTestCase(unittest.TestCase):
mock_get_nfd.return_value = {
'id': 'device_id',
'interfaces_in_use': 1
'interfaces_in_use': 1,
'reference_count': 1,
}
ndo_handler.unplug_interfaces(self.event)
orig_event_data['interfaces_in_use'] -= len(orig_event_data['ports'])
mock_update_nfd.assert_called_with(ndo_handler.db_session,
orig_event_data['id'],
{'interfaces_in_use': (
orig_event_data[
'interfaces_in_use'])})
orig_event_data['id'], mock.ANY)
orig_event_data = copy.deepcopy(self.event.data)
orig_event_data['status_description'] = (
ndo_handler.status_map['ACTIVE'])
@@ -742,6 +737,7 @@ class DeviceOrchestratorTestCase(unittest.TestCase):
'network_function_device'][
'id'],
device)
ndo_handler._controller.reset_mock()
self.event.data = tmp_data
@@ -781,8 +777,11 @@ class DeviceOrchestratorTestCase(unittest.TestCase):
device['nfp_context'][
'network_function_device'][
'id'],
{'reference_count': (
reference_count)})
{'status': 'ERROR',
'status_description':
'Configuring Device Failed.',
'id': 'vm-id'})
ndo_handler._controller.reset_mock()
self.event.data = tmp_data

View File

@@ -20,10 +20,10 @@ from oslo_config import cfg
from gbpservice.neutron.tests.unit.nfp.orchestrator.db import test_nfp_db
from gbpservice.nfp.common import constants as nfp_constants
from gbpservice.nfp.common import exceptions as nfp_exc
from gbpservice.nfp.core import context as nfp_core_context
from gbpservice.nfp.core import context as nfp_context
from gbpservice.nfp.core import controller # noqa
from gbpservice.nfp.core.event import Event as NFP_EVENT
from gbpservice.nfp.core import log as nfp_logging
from gbpservice.nfp.lib import nfp_context_manager
from gbpservice.nfp.lib import transport
from gbpservice.nfp.orchestrator.modules import (
service_orchestrator as nso)
@@ -33,6 +33,9 @@ from gbpservice.nfp.orchestrator.openstack import openstack_driver
import uuid as pyuuid
nfp_context_manager.sql_lock_support = False
def Event(**kwargs):
data = kwargs.get('data')
key = pyuuid.uuid4()
@@ -91,8 +94,7 @@ class NSORpcHandlerTestCase(NSOModuleTestCase):
"context", {'resource_owner_context':
{'tenant_id': 'tenant_id'}})
mock_create_network_function.assert_called_once_with(
"context", {'resource_owner_context':
{'tenant_id': 'tenant_id'}})
"context", mock.ANY)
@mock.patch.object(nso.ServiceOrchestrator,
"get_network_function")
@@ -291,13 +293,15 @@ class ServiceOrchestratorTestCase(NSOModuleTestCase):
mock_get_admin_token,
mock_get_admin_tenant_id):
network_function = self.create_network_function()
nfp_core_context.get_nfp_context = mock.MagicMock(
return_value={})
nfp_context.init()
mock_get_admin_token.return_value = 'admin_token'
mock_get_admin_tenant_id.return_value = 'admin_tenant_id'
transport.parse_service_flavor_string = mock.MagicMock(
return_value={'device_type': 'VM',
'service_vendor': 'vyos'})
self.service_orchestrator._create_event = mock.MagicMock(
return_value='')
self.service_orchestrator.delete_network_function(
self.context, network_function['id'])
self.assertRaises(nfp_exc.NetworkFunctionNotFound,
@@ -327,8 +331,8 @@ class ServiceOrchestratorTestCase(NSOModuleTestCase):
self.session, network_function_id)
mock_get_admin_token.return_value = 'admin_token'
mock_get_admin_tenant_id.return_value = 'admin_tenant_id'
nfp_core_context.get_nfp_context = mock.MagicMock(
return_value={})
nfp_context.init()
transport.parse_service_flavor_string = mock.MagicMock(
return_value={'device_type': 'VM',
'service_vendor': 'vyos'})
@@ -340,7 +344,8 @@ class ServiceOrchestratorTestCase(NSOModuleTestCase):
def test_event_create_network_function_instance(self):
network_function_instance = self.create_network_function_instance()
network_function = self.nfp_db.get_network_function(self.session,
network_function = self.nfp_db.get_network_function(
self.session,
network_function_instance['network_function_id'])
network_function_port_info = [
{
@@ -374,25 +379,11 @@ class ServiceOrchestratorTestCase(NSOModuleTestCase):
'provider': {'pt': None}
}
test_event = Event(data=create_nfi_request)
nfp_logging.store_logging_context(path='create')
test_event.context = create_nfi_request
test_event.context['log_context'] = nfp_context.init_log_context()
self.service_orchestrator.create_network_function_instance(
test_event)
def test_event_handle_device_created(self):
nfd = self.create_network_function_device()
nfi = self.create_network_function_instance(create_nfd=False)
request_data = {
'network_function_instance_id': nfi['id'],
'network_function_device_id': nfd['id']
}
test_event = Event(data=request_data)
self.assertIsNone(nfi['network_function_device_id'])
self.service_orchestrator.handle_device_created(
test_event)
db_nfi = self.nfp_db.get_network_function_instance(
self.session, nfi['id'])
self.assertEqual(nfd['id'], db_nfi['network_function_device_id'])
@mock.patch.object(
nso.ServiceOrchestrator, "_create_event")
@mock.patch.object(
@@ -433,9 +424,9 @@ class ServiceOrchestratorTestCase(NSOModuleTestCase):
'network_function_instance_id': nfi['id'],
'network_function_device_id': nfd['id']
}
test_event = Event(data=request_data)
test_event = Event(data=request_data, context=request_data)
test_event.context['log_context'] = nfp_context.init_log_context()
self.assertIsNone(nfi['network_function_device_id'])
nfp_logging.store_logging_context(path='create')
self.service_orchestrator.handle_device_create_failed(
test_event)
db_nfi = self.nfp_db.get_network_function_instance(
@@ -468,6 +459,7 @@ class ServiceOrchestratorTestCase(NSOModuleTestCase):
'uuid': 'a1251c79-f661-440e-aab2-a1f401865daf:'}
}
test_event = Event(data=request_data)
test_event.context = request_data
status = self.service_orchestrator.check_for_user_config_complete(
test_event)
mock_is_config_complete.assert_called_once_with(
@@ -491,7 +483,7 @@ class ServiceOrchestratorTestCase(NSOModuleTestCase):
'uuid': 'a1251c79-f661-440e-aab2-a1f401865daf:'}
}
test_event = Event(data=request_data)
nfp_logging.store_logging_context(path='create')
test_event.context = request_data
status = self.service_orchestrator.check_for_user_config_complete(
test_event)
mock_is_config_complete.assert_called_once_with(
@@ -516,6 +508,7 @@ class ServiceOrchestratorTestCase(NSOModuleTestCase):
'uuid': 'a1251c79-f661-440e-aab2-a1f401865daf:'}
}
test_event = Event(data=request_data)
test_event.context = request_data
status = self.service_orchestrator.check_for_user_config_complete(
test_event)
mock_is_config_complete.assert_called_once_with(
@@ -542,8 +535,8 @@ class ServiceOrchestratorTestCase(NSOModuleTestCase):
'config_policy_id': 'config_policy_id',
'network_function_id': network_function['id']
}
test_event = Event(data=request_data)
nfp_logging.store_logging_context(path='create')
test_event = Event(data=request_data, context=request_data)
test_event.context['log_context'] = nfp_context.init_log_context()
self.service_orchestrator.handle_user_config_failed(test_event)
db_nf = self.nfp_db.get_network_function(
self.session, network_function['id'])
@@ -566,6 +559,7 @@ class ServiceOrchestratorTestCase(NSOModuleTestCase):
'config_policy_id': 'config_policy_id',
'network_function_id': network_function['id']}
test_event = Event(data=request_data)
test_event.context = request_data
mock_service_type.return_value = 'firewall'
status = self.service_orchestrator.check_for_user_config_deleted(
test_event)
@@ -588,6 +582,7 @@ class ServiceOrchestratorTestCase(NSOModuleTestCase):
'config_policy_id': 'config_policy_id',
'network_function_id': network_function['id']}
test_event = Event(data=request_data)
test_event.context = request_data
status = self.service_orchestrator.check_for_user_config_deleted(
test_event)
mock_is_config_delete_complete.assert_called_once_with(
@@ -610,7 +605,8 @@ class ServiceOrchestratorTestCase(NSOModuleTestCase):
'network_function_id': network_function['id'],
'action': 'update'}
test_event = Event(data=request_data)
nfp_logging.store_logging_context(path='create')
test_event.context = request_data
test_event.context['log_context'] = nfp_context.init_log_context()
status = self.service_orchestrator.check_for_user_config_deleted(
test_event)
mock_is_config_delete_complete.assert_called_once_with(
@@ -657,8 +653,7 @@ class ServiceOrchestratorTestCase(NSOModuleTestCase):
network_function['network_function_instances'])
mock_get_admin_token.return_value = 'admin_token'
mock_get_admin_tenant_id.return_value = 'admin_tenant_id'
nfp_core_context.get_nfp_context = mock.MagicMock(
return_value={})
nfp_context.init()
self.service_orchestrator.delete_network_function(
self.context, network_function['id'])
db_nf = self.nfp_db.get_network_function(
@@ -675,6 +670,7 @@ class ServiceOrchestratorTestCase(NSOModuleTestCase):
network_function['network_function_instances'])
data = {'network_function_instance': nfi}
test_event = Event(data=data)
test_event.context = data
self.service_orchestrator.delete_network_function_instance(
test_event)
db_nfi = self.nfp_db.get_network_function_instance(
@@ -773,13 +769,13 @@ class ServiceOrchestratorTestCase(NSOModuleTestCase):
transport.parse_service_flavor_string = mock.MagicMock(
return_value={'device_type': 'VM',
'service_vendor': 'vyos'})
nfp_core_context.get_nfp_context = mock.MagicMock(
return_value={})
with mock.patch.object(
self.service_orchestrator.config_driver,
"handle_consumer_ptg_operations") as\
mock_handle_consumer_ptg_added:
mock_handle_consumer_ptg_added.return_value = 'stack_id'
nfp_context.init()
self.service_orchestrator.handle_consumer_ptg_added(
self.context, network_function_id, policy_target_group)
db_nf = self.nfp_db.get_network_function(
@@ -819,13 +815,12 @@ class ServiceOrchestratorTestCase(NSOModuleTestCase):
transport.parse_service_flavor_string = mock.MagicMock(
return_value={'device_type': 'VM',
'service_vendor': 'vyos'})
nfp_core_context.get_nfp_context = mock.MagicMock(
return_value={})
with mock.patch.object(
self.service_orchestrator.config_driver,
"handle_consumer_ptg_operations") as\
mock_handle_consumer_ptg_removed:
mock_handle_consumer_ptg_removed.return_value = 'stack_id'
nfp_context.init()
self.service_orchestrator.handle_consumer_ptg_removed(
self.context, network_function_id, policy_target_group)
db_nf = self.nfp_db.get_network_function(

View File

@@ -117,7 +117,7 @@ APPLY_USER_CONFIG_IN_PROGRESS_MAXRETRY = 20
UPDATE_USER_CONFIG_PREPARING_TO_START_SPACING = 10
UPDATE_USER_CONFIG_PREPARING_TO_START_MAXRETRY = 20
UPDATE_USER_CONFIG_STILL_IN_PROGRESS_MAXRETRY = 20
UPDATE_USER_CONFIG_STILL_IN_PROGRESS_MAXRETRY = 300
DELETE_USER_CONFIG_IN_PROGRESS_SPACING = 10
DELETE_USER_CONFIG_IN_PROGRESS_MAXRETRY = 20

View File

@@ -0,0 +1,213 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
from gbpservice.nfp.core import log as nfp_logging
from gbpservice.nfp.lib import nfp_exceptions
LOG = nfp_logging.getLogger(__name__)
sql_lock_support = True
class ContextManager(object):
def __init__(self, session=None, suppress=tuple()):
# suppress tuple holds the kind of exceptions
# the we don't have re-raise
self.session = session
self.suppress = suppress
def __enter__(self):
pass
def __exit__(self, Exptype, expvalue, traceback):
if self.suppress and Exptype:
if Exptype in self.suppress:
return False
for exception in self.suppress:
if isinstance(Exptype, exception):
return False
if not self.suppress and traceback:
return True
else:
return False
def retry(self, method, *args, **kwargs):
tries = kwargs.pop('tries', 1)
delay = 2
backoff = 2
while tries > 1:
# Loop for 'tries-1' times and
# the last time without any try-catch
try:
return method(*args, **kwargs)
except Exception:
msg = " %s retrying in %s seconds " % (self.__class__, delay)
LOG.error(msg)
time.sleep(delay)
tries -= 1
delay *= backoff
return method(*args, **kwargs)
class NfpDbContextManager(ContextManager):
def new(self, **kwargs):
return NfpDbContextManager(**kwargs)
def lock(self, session, method, *args, **kwargs):
if not sql_lock_support:
return method(session, *args, **kwargs)
with session.begin(subtransactions=True):
session.execute("SELECT GET_LOCK('nfp_db_lock', -1)")
ret = method(session, *args, **kwargs)
session.execute("SELECT RELEASE_LOCK('nfp_db_lock')")
return ret
def __enter__(self):
super(NfpDbContextManager, self).__enter__()
return self
def __exit__(self, Exptype, expvalue, traceback):
if super(NfpDbContextManager, self).__exit__(
Exptype, expvalue, traceback):
raise nfp_exceptions.DbException(Exptype, str(expvalue), traceback)
# By default exit method returns False, if False is returned
# the with block re-raises the exception. To suppress that
# True should be returned explicitly
return True
class NfpNovaContextManager(ContextManager):
def new(self, **kwargs):
return NfpNovaContextManager(**kwargs)
def __enter__(self):
super(NfpNovaContextManager, self).__enter__()
return self
def __exit__(self, Exptype, expvalue, traceback):
if super(NfpNovaContextManager, self).__exit__(
Exptype, expvalue, traceback):
raise nfp_exceptions.NovaException(
Exptype, str(expvalue), traceback)
# By default exit method returns False, if False is returned
# the with block re-raises the exception. To suppress that
# True should be returned explicitly
return True
class NfpKeystoneContextManager(ContextManager):
def new(self, **kwargs):
return NfpKeystoneContextManager(**kwargs)
def __enter__(self):
super(NfpKeystoneContextManager, self).__enter__()
return self
def __exit__(self, Exptype, expvalue, traceback):
if super(NfpKeystoneContextManager, self).__exit__(
Exptype, expvalue, traceback):
raise nfp_exceptions.KeystoneException(
Exptype, str(expvalue), traceback)
# By default exit method returns False, if False is returned
# the with block re-raises the exception. To suppress that
# True should be returned explicitly
return True
class NfpNeutronContextManager(ContextManager):
def new(self, **kwargs):
return NfpNeutronContextManager(**kwargs)
def __enter__(self):
super(NfpNeutronContextManager, self).__enter__()
return self
def __exit__(self, Exptype, expvalue, traceback):
if super(NfpNeutronContextManager, self).__exit__(
Exptype, expvalue, traceback):
raise nfp_exceptions.NeutronException(
Exptype, str(expvalue), traceback)
# By default exit method returns False, if False is returned
# the with block re-raises the exception. To suppress that
# True should be returned explicitly
return True
class NfpHeatContextManager(ContextManager):
def new(self, **kwargs):
return NfpHeatContextManager(**kwargs)
def __enter__(self):
super(NfpHeatContextManager, self).__enter__()
return self
def __exit__(self, Exptype, expvalue, traceback):
if super(NfpHeatContextManager, self).__exit__(
Exptype, expvalue, traceback):
raise nfp_exceptions.HeatException(
Exptype, str(expvalue), traceback)
# By default exit method returns False, if False is returned
# the with block re-raises the exception. To suppress that
# True should be returned explicitly
return True
class NfpGBPContextManager(ContextManager):
def new(self, **kwargs):
return NfpGBPContextManager(**kwargs)
def __enter__(self):
super(NfpGBPContextManager, self).__enter__()
return self
def __exit__(self, Exptype, expvalue, traceback):
if super(NfpGBPContextManager, self).__exit__(
Exptype, expvalue, traceback):
raise nfp_exceptions.GBPException(
Exptype, str(expvalue), traceback)
# By default exit method returns False, if False is returned
# the with block re-raises the exception. To suppress that
# True should be returned explicitly
return True
# Create the respective instances once, so that no need
# to instantiate them again any where
DbContextManager = NfpDbContextManager()
NovaContextManager = NfpNovaContextManager()
KeystoneContextManager = NfpKeystoneContextManager()
NeutronContextManager = NfpNeutronContextManager()
HeatContextManager = NfpHeatContextManager()
GBPContextManager = NfpGBPContextManager()

View File

@@ -0,0 +1,41 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
class GenericException(Exception):
def __init__(self, type, value, traceback):
super(GenericException, self).__init__(type, value)
class DbException(GenericException):
pass
class NeutronException(GenericException):
pass
class NovaException(GenericException):
pass
class KeystoneException(GenericException):
pass
class GBPException(GenericException):
pass
class HeatException(GenericException):
pass

View File

@@ -283,12 +283,14 @@ def get_response_from_configurator(conf):
message = ("get_notification ->"
"GET request failed. Reason : %s" % (rce))
LOG.error(message)
return []
return "get_notification -> GET request failed. Reason : %s" % (
rce)
except Exception as e:
message = ("get_notification ->"
"GET request failed. Reason : %s" % (e))
LOG.error(message)
return []
return "get_notification -> GET request failed. Reason : %s" % (
e)
elif conf.backend == UNIX_REST:
try:
@@ -304,13 +306,15 @@ def get_response_from_configurator(conf):
"GET request failed. Reason : %s" % (
rce))
LOG.error(message)
return []
return "get_notification -> GET request failed. Reason : %s" % (
rce)
except Exception as e:
message = ("get_notification ->"
"GET request failed. Reason : %s" % (
e))
LOG.error(message)
return []
return "get_notification -> GET request failed. Reason : %s" % (
e)
else:
rpc_cbs_data = []

View File

@@ -10,6 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
from gbpservice.nfp.lib import nfp_context_manager as nfp_ctx_mgr
from gbpservice.nfp.orchestrator.openstack import openstack_driver
from gbpservice.nfp.orchestrator.coal.networking import(
nfp_neutron_network_driver as neutron_nd
@@ -17,6 +19,7 @@ from gbpservice.nfp.orchestrator.coal.networking import(
class NFPGBPNetworkDriver(neutron_nd.NFPNeutronNetworkDriver):
def __init__(self, config):
self.config = config
super(NFPGBPNetworkDriver, self).__init__(config)
@@ -26,20 +29,28 @@ class NFPGBPNetworkDriver(neutron_nd.NFPNeutronNetworkDriver):
pass
def create_port(self, token, tenant_id, net_id, name=None):
port = self.network_handler.create_policy_target(token, tenant_id,
net_id, name)
with nfp_ctx_mgr.GBPContextManager as gcm:
port = gcm.retry(self.network_handler.create_policy_target,
token, tenant_id, net_id, name)
return port
def delete_port(self, token, port_id):
self.network_handler.delete_policy_target(token, port_id)
with nfp_ctx_mgr.GBPContextManager as gcm:
gcm.retry(
self.network_handler.delete_policy_target,
token, port_id)
def get_port_id(self, token, port_id):
pt = self.network_handler.get_policy_target(token, port_id)
with nfp_ctx_mgr.GBPContextManager as gcm:
pt = gcm.retry(
self.network_handler.get_policy_target, token, port_id)
return pt['port_id']
def update_port(self, token, port_id, port):
pt = self.network_handler.update_policy_target(token, port_id,
port)
with nfp_ctx_mgr.GBPContextManager as gcm:
pt = gcm.retry(
self.network_handler.update_policy_target,
token, port_id, port)
return pt['port_id']
def get_neutron_port_details(self, token, port_id):
@@ -59,22 +70,25 @@ class NFPGBPNetworkDriver(neutron_nd.NFPNeutronNetworkDriver):
return port_details
def get_networks(self, token, filters):
return self.network_handler.get_policy_target_groups(token,
filters=filters)
with nfp_ctx_mgr.GBPContextManager as gcm:
return gcm.retry(
self.network_handler.get_policy_target_groups,
token, filters=filters)
def set_promiscuos_mode(self, token, port_id, enable_port_security):
port_id = self.get_port_id(token, port_id)
# self.network_handler = openstack_driver.NeutronClient(self.config)
super(NFPGBPNetworkDriver, self).set_promiscuos_mode(token,
port_id, enable_port_security)
super(NFPGBPNetworkDriver, self).set_promiscuos_mode(
token, port_id, enable_port_security)
# self.network_handler = openstack_driver.GBPClient(self.config)
def set_promiscuos_mode_fast(self, token, port_id, enable_port_security):
# self.network_handler = openstack_driver.NeutronClient(self.config)
super(NFPGBPNetworkDriver, self).set_promiscuos_mode(token,
port_id, enable_port_security)
super(NFPGBPNetworkDriver, self).set_promiscuos_mode(
token, port_id, enable_port_security)
# self.network_handler = openstack_driver.GBPClient(self.config)
def get_service_profile(self, token, service_profile_id):
return self.network_handler.get_service_profile(token,
with nfp_ctx_mgr.GBPContextManager as gcm:
return gcm.retry(self.network_handler.get_service_profile, token,
service_profile_id)

View File

@@ -16,6 +16,7 @@ class NFPNetworkDriverBase(object):
Handles ports, operations on them
"""
def __init__(self):
pass

View File

@@ -10,6 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
from gbpservice.nfp.lib import nfp_context_manager as nfp_ctx_mgr
from gbpservice.nfp.orchestrator.openstack import openstack_driver
from gbpservice.nfp.orchestrator.coal.networking import(
nfp_network_driver_base as ndb
@@ -27,22 +29,29 @@ class NFPNeutronNetworkDriver(ndb.NFPNetworkDriverBase):
pass
def create_port(self, token, tenant_id, net_id, name=None):
port = self.neutron_client.create_port(token, tenant_id, net_id,
with nfp_ctx_mgr.NeutronContextManager as ncm:
port = ncm.retry(
self.neutron_client.create_port,
token, tenant_id, net_id,
attrs={'name': name})
return port
def delete_port(self, token, port_id):
self.neutron_client.delete_port(token, port_id)
with nfp_ctx_mgr.NeutronContextManager as ncm:
ncm.retry(self.neutron_client.delete_port, token, port_id)
def get_port_id(self, token, port_id):
return port_id
def update_port(self, token, port_id, port):
port = self.neutron_client.update_port(token, port_id, **port)
with nfp_ctx_mgr.NeutronContextManager as ncm:
port = ncm.retry(self.neutron_client.update_port,
token, port_id, **port)
return port['port']
def get_port_and_subnet_details(self, token, port_id):
port = self.neutron_client.get_port(token, port_id)
with nfp_ctx_mgr.NeutronContextManager as ncm:
port = ncm.retry(self.neutron_client.get_port, token, port_id)
# ip
ip = port['port']['fixed_ips'][0]['ip_address']
@@ -52,14 +61,17 @@ class NFPNeutronNetworkDriver(ndb.NFPNetworkDriverBase):
# gateway ip
subnet_id = port['port']['fixed_ips'][0]['subnet_id']
subnet = self.neutron_client.get_subnet(token, subnet_id)
with nfp_ctx_mgr.NeutronContextManager as ncm:
subnet = ncm.retry(
self.neutron_client.get_subnet, token, subnet_id)
cidr = subnet['subnet']['cidr']
gateway_ip = subnet['subnet']['gateway_ip']
return (ip, mac, cidr, gateway_ip, port, subnet)
def get_port_details(self, token, port_id):
port = self.neutron_client.get_port(token, port_id)
with nfp_ctx_mgr.NeutronContextManager as ncm:
port = ncm.retry(self.neutron_client.get_port, token, port_id)
# ip
ip = port['port']['fixed_ips'][0]['ip_address']
@@ -69,18 +81,18 @@ class NFPNeutronNetworkDriver(ndb.NFPNetworkDriverBase):
# gateway ip
subnet_id = port['port']['fixed_ips'][0]['subnet_id']
subnet = self.neutron_client.get_subnet(token, subnet_id)
with nfp_ctx_mgr.NeutronContextManager as ncm:
subnet = ncm.retry(
self.neutron_client.get_subnet, token, subnet_id)
cidr = subnet['subnet']['cidr']
gateway_ip = subnet['subnet']['gateway_ip']
return (ip, mac, cidr, gateway_ip, port, subnet)
def set_promiscuos_mode(self, token, port_id, enable_port_security):
if not enable_port_security:
port_security = False
else:
port_security = True
self.neutron_client.update_port(token, port_id,
port_security = bool(enable_port_security)
with nfp_ctx_mgr.NeutronContextManager as ncm:
ncm.retry(self.neutron_client.update_port, token, port_id,
security_groups=[],
port_security_enabled=port_security)

View File

@@ -15,7 +15,6 @@ import copy
import time
from heatclient import exc as heat_exc
from keystoneclient import exceptions as k_exceptions
from neutron._i18n import _LE
from neutron._i18n import _LI
from neutron._i18n import _LW
@@ -23,14 +22,15 @@ from neutron.db import api as db_api
from neutron.plugins.common import constants as pconst
from oslo_config import cfg
from oslo_serialization import jsonutils
from oslo_utils import excutils
import yaml
from gbpservice.neutron.services.grouppolicy.common import constants as gconst
from gbpservice.neutron.services.servicechain.plugins.ncp import plumber_base
from gbpservice.nfp.common import constants as nfp_constants
from gbpservice.nfp.common import utils
from gbpservice.nfp.core import context as module_context
from gbpservice.nfp.core import log as nfp_logging
from gbpservice.nfp.lib import nfp_context_manager as nfp_ctx_mgr
from gbpservice.nfp.lib import transport
from gbpservice.nfp.orchestrator.config_drivers.heat_client import HeatClient
from gbpservice.nfp.orchestrator.db import nfp_db as nfp_db
@@ -98,7 +98,9 @@ class HeatDriver(object):
self.keystone_conf = config.nfp_keystone_authtoken
keystone_version = self.keystone_conf.auth_version
self.v2client = self.keystoneclient._get_v2_keystone_admin_client()
with nfp_ctx_mgr.KeystoneContextManager as kcm:
self.v2client = kcm.retry(
self.keystoneclient._get_v2_keystone_admin_client, tries=3)
self.admin_id = self.v2client.users.find(
name=self.keystone_conf.admin_user).id
self.admin_role = self._get_role_by_name(
@@ -107,32 +109,27 @@ class HeatDriver(object):
self.v2client, "heat_stack_owner", keystone_version)
def _resource_owner_tenant_id(self):
auth_token = self.keystoneclient.get_scoped_keystone_token(
with nfp_ctx_mgr.KeystoneContextManager as kcm:
auth_token = kcm.retry(
self.keystoneclient.get_scoped_keystone_token,
self.keystone_conf.admin_user,
self.keystone_conf.admin_password,
self.keystone_conf.admin_tenant_name)
try:
tenant_id = self.keystoneclient.get_tenant_id(
auth_token, self.keystone_conf.admin_tenant_name)
self.keystone_conf.admin_tenant_name, tries=3)
tenant_id = kcm.retry(
self.keystoneclient.get_tenant_id,
auth_token, self.keystone_conf.admin_tenant_name, tries=3)
return tenant_id
except k_exceptions.NotFound:
with excutils.save_and_reraise_exception(reraise=True):
LOG.error(_LE('No tenant with name %(tenant)s exists.'),
{'tenant': self.keystone_conf.admin_tenant_name})
except k_exceptions.NoUniqueMatch:
with excutils.save_and_reraise_exception(reraise=True):
LOG.error(
_LE('Multiple tenants matches found for %(tenant)s'),
{'tenant': self.keystone_conf.admin_tenant_name})
def _get_resource_owner_context(self):
if cfg.CONF.heat_driver.is_service_admin_owned:
tenant_id = self._resource_owner_tenant_id()
auth_token = self.keystoneclient.get_scoped_keystone_token(
with nfp_ctx_mgr.KeystoneContextManager as kcm:
auth_token = kcm.retry(
self.keystoneclient.get_scoped_keystone_token,
self.keystone_conf.admin_user,
self.keystone_conf.admin_password,
self.keystone_conf.admin_tenant_name,
tenant_id)
tenant_id, tries=3)
return auth_token, tenant_id
def _get_role_by_name(self, keystone_client, name, keystone_version):
@@ -174,7 +171,9 @@ class HeatDriver(object):
if keystone_version == 'v2.0':
return self._assign_admin_user_to_project_v2_keystone(project_id)
else:
v3client = self.keystoneclient._get_v3_keystone_admin_client()
with nfp_ctx_mgr.KeystoneContextManager as kcm:
v3client = kcm.retry(
self.keystoneclient._get_v3_keystone_admin_client, tries=3)
admin_id = v3client.users.find(
name=self.keystone_conf.admin_user).id
admin_role = self._get_role_by_name(v3client, "admin",
@@ -190,11 +189,13 @@ class HeatDriver(object):
def keystone(self, user, pwd, tenant_name, tenant_id=None):
if tenant_id:
return self.keystoneclient.get_scoped_keystone_token(
user, pwd, tenant_name, tenant_id)
with nfp_ctx_mgr.KeystoneContextManager as kcm:
return kcm.retry(self.keystoneclient.get_scoped_keystone_token,
user, pwd, tenant_name, tenant_id, tries=3)
else:
return self.keystoneclient.get_scoped_keystone_token(
user, pwd, tenant_name)
with nfp_ctx_mgr.KeystoneContextManager as kcm:
return kcm.retry(self.keystoneclient.get_scoped_keystone_token,
user, pwd, tenant_name, tries=3)
def _get_heat_client(self, tenant_id, assign_admin=False):
# REVISIT(Akash) Need to discuss use cases why it is needed,
@@ -207,8 +208,8 @@ class HeatDriver(object):
LOG.exception(_LE("Failed to assign admin user to project"))
return None
'''
logging_context = nfp_logging.get_logging_context()
auth_token = logging_context['auth_token']
nfp_context = module_context.get()
auth_token = nfp_context['log_context']['auth_token']
timeout_mins, timeout_seconds = divmod(STACK_ACTION_WAIT_TIME, 60)
if timeout_seconds:
@@ -246,7 +247,9 @@ class HeatDriver(object):
'network_function_instance')
if network_function_instance:
for port in network_function_instance.get('port_info'):
port_info = db_handler.get_port_info(db_session, port)
with nfp_ctx_mgr.DbContextManager:
port_info = db_handler.get_port_info(db_session,
port)
if port_info['port_model'] != nfp_constants.GBP_PORT:
return
@@ -255,27 +258,30 @@ class HeatDriver(object):
nfp_context)
service_type = service_details['service_details']['service_type']
if service_type in [pconst.LOADBALANCER, pconst.LOADBALANCERV2]:
logging_context = nfp_logging.get_logging_context()
auth_token = logging_context['auth_token']
if service_type in [pconst.LOADBALANCER]:
auth_token = nfp_context['log_context']['auth_token']
provider_tenant_id = nfp_context['tenant_id']
provider = service_details['provider_ptg']
self._create_policy_target_for_vip(
auth_token, provider_tenant_id, provider, service_type)
def _get_provider_ptg_info(self, auth_token, sci_id):
servicechain_instance = self.gbp_client.get_servicechain_instance(
with nfp_ctx_mgr.GBPContextManager as gcm:
servicechain_instance = gcm.retry(
self.gbp_client.get_servicechain_instance,
auth_token, sci_id)
provider_ptg_id = servicechain_instance['provider_ptg_id']
provider_ptg = self.gbp_client.get_policy_target_group(
provider_ptg = gcm.retry(self.gbp_client.get_policy_target_group,
auth_token, provider_ptg_id)
return provider_ptg
def _pre_stack_cleanup(self, network_function):
logging_context = nfp_logging.get_logging_context()
auth_token = logging_context['auth_token']
nfp_context = module_context.get()
auth_token = nfp_context['log_context']['auth_token']
with nfp_ctx_mgr.GBPContextManager:
service_profile = self.gbp_client.get_service_profile(
auth_token, network_function['service_profile_id'])
service_type = service_profile['service_type']
service_details = transport.parse_service_flavor_string(
service_profile['service_flavor'])
@@ -283,7 +289,8 @@ class HeatDriver(object):
else False)
if (service_type in [pconst.LOADBALANCER, pconst.LOADBALANCERV2]) and (
not base_mode_support):
provider = self._get_provider_ptg_info(auth_token,
provider = self._get_provider_ptg_info(
auth_token,
network_function['service_chain_id'])
provider_tenant_id = provider['tenant_id']
self._update_policy_targets_for_vip(
@@ -298,7 +305,8 @@ class HeatDriver(object):
def _get_vip_pt(self, auth_token, vip_port_id):
vip_pt = None
filters = {'port_id': vip_port_id}
policy_targets = self.gbp_client.get_policy_targets(
with nfp_ctx_mgr.GBPContextManager as gcm:
policy_targets = gcm.retry(self.gbp_client.get_policy_targets,
auth_token,
filters=filters)
if policy_targets:
@@ -311,7 +319,9 @@ class HeatDriver(object):
lb_vip = None
lb_vip_name = None
provider_l2p_subnets = self.neutron_client.get_subnets(
with nfp_ctx_mgr.NeutronContextManager as ncm:
provider_l2p_subnets = ncm.retry(
self.neutron_client.get_subnets,
auth_token,
filters={'id': provider['subnets']})
for subnet in provider_l2p_subnets:
@@ -324,16 +334,21 @@ class HeatDriver(object):
{"provider_ptg": provider})
return lb_vip, lb_vip_name
if service_type == pconst.LOADBALANCER:
lb_pool_ids = self.neutron_client.get_pools(
with nfp_ctx_mgr.NeutronContextManager as ncm:
lb_pool_ids = ncm.retry(
self.neutron_client.get_pools,
auth_token,
filters={'subnet_id': [provider_subnet['id']]})
if lb_pool_ids and lb_pool_ids[0]['vip_id']:
lb_vip = self.neutron_client.get_vip(
lb_vip = ncm.retry(
self.neutron_client.get_vip,
auth_token, lb_pool_ids[0]['vip_id'])['vip']
lb_vip_name = ("service_target_vip_pt" +
lb_pool_ids[0]['vip_id'])
elif service_type == pconst.LOADBALANCERV2:
loadbalancers = self.neutron_client.get_loadbalancers(
with nfp_ctx_mgr.NeutronContextManager as ncm:
loadbalancers = ncm.retry(
self.neutron_client.get_loadbalancers,
auth_token,
filters={'vip_subnet_id': [provider_subnet['id']]})
if loadbalancers:
@@ -346,16 +361,36 @@ class HeatDriver(object):
lb_vip_name = 'vip-' + loadbalancer['id']
return lb_vip, lb_vip_name
def _get_lb_service_targets(self, auth_token, provider):
service_targets = []
if provider.get("policy_targets"):
filters = {'id': provider.get("policy_targets")}
else:
filters = {'policy_target_group_id': provider['id']}
with nfp_ctx_mgr.GBPContextManager as gcm:
policy_targets = gcm.retry(self.gbp_client.get_policy_targets,
auth_token,
filters=filters)
for policy_target in policy_targets:
if ('endpoint' in policy_target['name'] and
self._is_service_target(policy_target)):
service_targets.append(policy_target)
return service_targets
def _create_policy_target_for_vip(self, auth_token,
provider_tenant_id,
provider, service_type):
admin_token = self.keystoneclient.get_admin_token()
with nfp_ctx_mgr.KeystoneContextManager as kcm:
admin_token = kcm.retry(
self.keystoneclient.get_admin_token, tries=3)
lb_vip, vip_name = self._get_lb_vip(auth_token, provider, service_type)
service_targets = self._get_lb_service_targets(admin_token, provider)
if not (lb_vip and service_targets):
return None
vip_pt = self.gbp_client.create_policy_target(
with nfp_ctx_mgr.GBPContextManager as gcm:
vip_pt = gcm.retry(self.gbp_client.create_policy_target,
auth_token, provider_tenant_id, provider['id'],
vip_name, lb_vip['port_id'])
@@ -365,27 +400,38 @@ class HeatDriver(object):
service_target_port_id = service_target['port_id']
policy_target_info = {'cluster_id': vip_pt['id']}
self.gbp_client.update_policy_target(admin_token,
with nfp_ctx_mgr.GBPContextManager as gcm:
gcm.retry(self.gbp_client.update_policy_target,
admin_token,
service_target_id, policy_target_info)
service_target_port = self.neutron_client.get_port(
admin_token, service_target_port_id)['port']
with nfp_ctx_mgr.NeutronContextManager as ncm:
service_target_port = ncm.retry(self.neutron_client.get_port,
admin_token,
service_target_port_id)['port']
vip_ip = service_target_port[
'allowed_address_pairs'][0]['ip_address']
# Update allowed address pairs entry came through cluster_id
# updation with provider_port mac address.
updated_port = {
'allowed_address_pairs': [{'ip_address': vip_ip,
'allowed_address_pairs': [
{
'ip_address': vip_ip,
'mac_address': service_target_port['mac_address']}]
}
self.neutron_client.update_port(
admin_token, service_target_port_id, **updated_port)
with nfp_ctx_mgr.NeutronContextManager as ncm:
ncm.retry(self.neutron_client.update_port,
admin_token, service_target_port_id,
**updated_port)
def _update_policy_targets_for_vip(self, auth_token,
provider_tenant_id,
provider, service_type):
admin_token = self.keystoneclient.get_admin_token()
with nfp_ctx_mgr.KeystoneContextManager as kcm:
admin_token = kcm.retry(
self.keystoneclient.get_admin_token, tries=3)
lb_vip, vip_name = self._get_lb_vip(auth_token, provider, service_type)
service_targets = self._get_lb_service_targets(admin_token, provider)
if not (lb_vip and service_targets):
@@ -394,23 +440,25 @@ class HeatDriver(object):
for service_target in service_targets:
service_target_id = service_target['id']
policy_target_info = {'cluster_id': ''}
self.gbp_client.update_policy_target(admin_token,
with nfp_ctx_mgr.GBPContextManager as gcm:
gcm.retry(self.gbp_client.update_policy_target,
admin_token,
service_target_id, policy_target_info)
def _get_lb_service_targets(self, auth_token, provider):
service_targets = []
def _get_provider_pt(self, auth_token, provider):
if provider.get("policy_targets"):
filters = {'id': provider.get("policy_targets")}
else:
filters = {'policy_target_group_id': provider['id']}
policy_targets = self.gbp_client.get_policy_targets(
filters = {'policy_target_group': provider['id']}
with nfp_ctx_mgr.GBPContextManager as gcm:
policy_targets = gcm.retry(self.gbp_client.get_policy_targets,
auth_token,
filters=filters)
for policy_target in policy_targets:
if ('endpoint' in policy_target['name'] and
self._is_service_target(policy_target)):
service_targets.append(policy_target)
return service_targets
return policy_target
return None
def _is_service_target(self, policy_target):
if policy_target['name'] and (policy_target['name'].startswith(
@@ -424,7 +472,9 @@ class HeatDriver(object):
def _get_member_ips(self, auth_token, ptg):
member_addresses = []
if ptg.get("policy_targets"):
policy_targets = self.gbp_client.get_policy_targets(
with nfp_ctx_mgr.GBPContextManager as gcm:
policy_targets = gcm.retry(
self.gbp_client.get_policy_targets,
auth_token,
filters={'id': ptg.get("policy_targets")})
else:
@@ -433,7 +483,8 @@ class HeatDriver(object):
if not self._is_service_target(policy_target):
port_id = policy_target.get("port_id")
if port_id:
port = self.neutron_client.get_port(
with nfp_ctx_mgr.NeutronContextManager as ncm:
port = ncm.retry(self.neutron_client.get_port,
auth_token, port_id)['port']
ip_address = port.get('fixed_ips')[0].get("ip_address")
member_addresses.append(ip_address)
@@ -535,15 +586,34 @@ class HeatDriver(object):
is_template_aws_version,
"OS::Neutron::LBaaS::Pool"
)
healthmonitors = self._get_all_heat_resource_keys(
stack_template[resources_key],
is_template_aws_version,
"OS::Neutron::LBaaS::HealthMonitor"
)
if not pools:
return
for member_ip in member_ips:
# Add "depends_on" to make sure resources get created sequentially.
# First member should be created after
# all pools and healthmonitors creation completed.
# Other members should be created one by one.
prev_member = None
pools_and_hms = [] + pools + healthmonitors
for pool in pools:
for member_ip in member_ips:
member_name = 'mem-' + member_ip + '-' + pool
stack_template[resources_key][member_name] = (
member_template = (
self._generate_lbv2_member_template(
is_template_aws_version,
member_ip, stack_template, pool_name=pool))
if prev_member:
member_template.update({"depends_on": prev_member})
# No previous member means it's the first member
else:
member_template.update({"depends_on": pools_and_hms})
stack_template[resources_key][member_name] = member_template
prev_member = member_name
def _generate_pool_members(self, auth_token, stack_template,
config_param_values, provider_ptg,
@@ -567,17 +637,21 @@ class HeatDriver(object):
def _get_consumers_for_chain(self, auth_token, provider):
filters = {'id': provider['provided_policy_rule_sets']}
provided_prs = self.gbp_client.get_policy_rule_sets(
with nfp_ctx_mgr.GBPContextManager as gcm:
provided_prs = gcm.retry(self.gbp_client.get_policy_rule_sets,
auth_token, filters=filters)
redirect_prs = None
for prs in provided_prs:
filters = {'id': prs['policy_rules']}
policy_rules = self.gbp_client.get_policy_rules(
with nfp_ctx_mgr.GBPContextManager as gcm:
policy_rules = gcm.retry(self.gbp_client.get_policy_rules,
auth_token, filters=filters)
for policy_rule in policy_rules:
filters = {'id': policy_rule['policy_actions'],
'action_type': [gconst.GP_ACTION_REDIRECT]}
policy_actions = self.gbp_client.get_policy_actions(
with nfp_ctx_mgr.GBPContextManager as gcm:
policy_actions = gcm.retry(
self.gbp_client.get_policy_actions,
auth_token, filters=filters)
if policy_actions:
redirect_prs = prs
@@ -717,7 +791,9 @@ class HeatDriver(object):
stack_template['resources'], is_template_aws_version,
'OS::Neutron::FirewallPolicy')[0]
provider_l2p_subnets = self.neutron_client.get_subnets(
with nfp_ctx_mgr.NeutronContextManager as ncm:
provider_l2p_subnets = ncm.retry(
self.neutron_client.get_subnets,
auth_token,
filters={'id': provider['subnets']})
for subnet in provider_l2p_subnets:
@@ -738,7 +814,9 @@ class HeatDriver(object):
if consumer_ptgs:
filters = {'id': consumer_ptgs}
consumer_ptgs_details = self.gbp_client.get_policy_target_groups(
with nfp_ctx_mgr.GBPContextManager as gcm:
consumer_ptgs_details = gcm.retry(
self.gbp_client.get_policy_target_groups,
auth_token, filters)
# Revisit(Magesh): What is the name updated below ?? FW or Rule?
@@ -748,7 +826,8 @@ class HeatDriver(object):
continue
fw_template_properties.update({'name': consumer['id'][:3]})
for subnet_id in consumer['subnets']:
subnet = self.neutron_client.get_subnet(
with nfp_ctx_mgr.NeutronContextManager as ncm:
subnet = ncm.retry(self.neutron_client.get_subnet,
auth_token, subnet_id)['subnet']
if subnet['name'].startswith(APIC_OWNED_RES):
continue
@@ -760,7 +839,9 @@ class HeatDriver(object):
if consumer_eps:
filters = {'id': consumer_eps}
consumer_eps_details = self.gbp_client.get_external_policies(
with nfp_ctx_mgr.GBPContextManager as gcm:
consumer_eps_details = gcm.retry(
self.gbp_client.get_external_policies,
auth_token, filters)
for consumer_ep in consumer_eps_details:
fw_template_properties.update({'name': consumer_ep['id'][:3]})
@@ -800,14 +881,16 @@ class HeatDriver(object):
def _get_management_gw_ip(self, auth_token):
filters = {'name': [SVC_MGMT_PTG_NAME]}
svc_mgmt_ptgs = self.gbp_client.get_policy_target_groups(
with nfp_ctx_mgr.GBPContextManager as gcm:
svc_mgmt_ptgs = gcm.retry(self.gbp_client.get_policy_target_groups,
auth_token, filters)
if not svc_mgmt_ptgs:
LOG.error(_LE("Service Management Group is not created by Admin"))
return None
else:
mgmt_subnet_id = svc_mgmt_ptgs[0]['subnets'][0]
mgmt_subnet = self.neutron_client.get_subnet(
with nfp_ctx_mgr.NeutronContextManager as ncm:
mgmt_subnet = ncm.retry(self.neutron_client.get_subnet,
auth_token, mgmt_subnet_id)['subnet']
mgmt_gw_ip = mgmt_subnet['gateway_ip']
return mgmt_gw_ip
@@ -829,8 +912,9 @@ class HeatDriver(object):
else False)
network_function_id = nfp_context['network_function']['id']
service_chain_instance_id = service_details['servicechain_instance'][
'id']
# service_profile = service_details['service_profile']
service_chain_instance_id = service_details[
'servicechain_instance']['id']
consumer_port = service_details['consumer_port']
provider_port = service_details['provider_port']
mgmt_ip = service_details['mgmt_ip']
@@ -873,7 +957,9 @@ class HeatDriver(object):
if not mgmt_gw_ip:
return None
services_nsp = self.gbp_client.get_network_service_policies(
with nfp_ctx_mgr.GBPContextManager as gcm:
services_nsp = gcm.retry(
self.gbp_client.get_network_service_policies,
auth_token,
filters={'name': ['nfp_services_nsp']})
if not services_nsp:
@@ -888,12 +974,16 @@ class HeatDriver(object):
"name": "vpn_svc_external_access"}]
}
}
nsp = self.gbp_client.create_network_service_policy(
with nfp_ctx_mgr.GBPContextManager as gcm:
nsp = gcm.retry(
self.gbp_client.create_network_service_policy,
auth_token, fip_nsp)
else:
nsp = services_nsp[0]
stitching_pts = self.gbp_client.get_policy_targets(
with nfp_ctx_mgr.GBPContextManager as gcm:
stitching_pts = gcm.retry(
self.gbp_client.get_policy_targets,
auth_token,
filters={'port_id': [consumer_port['id']]})
if not stitching_pts:
@@ -903,15 +993,11 @@ class HeatDriver(object):
stitching_ptg_id = (
stitching_pts[0]['policy_target_group_id'])
try:
self.gbp_client.update_policy_target_group(
with nfp_ctx_mgr.GBPContextManager as gcm:
gcm.retry(self.gbp_client.update_policy_target_group,
auth_token, stitching_ptg_id,
{'policy_target_group': {
'network_service_policy_id': nsp['id']}})
except Exception:
LOG.error(_LE("problem in accesing external segment or "
"nat_pool, seems they have not created"))
return None
stitching_port_fip = self._get_consumer_fip(auth_token,
consumer_port)
@@ -1037,20 +1123,28 @@ class HeatDriver(object):
config_param_values['Subnet'] = (
provider_port['fixed_ips'][0]['subnet_id']
if consumer_port else None)
l2p = self.gbp_client.get_l2_policy(
with nfp_ctx_mgr.GBPContextManager as gcm:
l2p = gcm.retry(self.gbp_client.get_l2_policy,
auth_token, provider['l2_policy_id'])
l3p = self.gbp_client.get_l3_policy(
l3p = gcm.retry(self.gbp_client.get_l3_policy,
auth_token, l2p['l3_policy_id'])
config_param_values['RouterId'] = l3p['routers'][0]
mgmt_gw_ip = self._get_management_gw_ip(auth_token)
if not mgmt_gw_ip:
return None, None
with nfp_ctx_mgr.NeutronContextManager as ncm:
provider_cidr = ncm.retry(
self.neutron_client.get_subnet,
auth_token, provider_port['fixed_ips'][0][
'subnet_id'])['subnet']['cidr']
provider_cidr = provider_cidr
stitching_port_fip = self._get_consumer_fip(auth_token,
consumer_port)
if not stitching_port_fip:
return None
return None, None
if not base_mode_support:
# stack_params['ServiceDescription'] = nf_desc
siteconn_keys = self._get_site_conn_keys(
stack_template[resources_key],
is_template_aws_version,
@@ -1082,7 +1176,9 @@ class HeatDriver(object):
return (stack_template, stack_params)
def _get_consumer_fip(self, token, consumer_port):
ext_net = self.neutron_client.get_networks(
with nfp_ctx_mgr.NeutronContextManager as ncm:
ext_net = ncm.retry(
self.neutron_client.get_networks,
token, filters={'name': [INTERNET_OUT_EXT_NET_NAME]})
if not ext_net:
LOG.error(_LE("'internet_out_network_name' not configured"
@@ -1095,7 +1191,8 @@ class HeatDriver(object):
'floating_network_id': [ext_net[0]['id']]}
try:
# return floatingip of the stitching port -> consumer_port['id']
return self.neutron_client.get_floating_ips(token,
with nfp_ctx_mgr.NeutronContextManager as ncm:
return ncm.retry(self.neutron_client.get_floating_ips, token,
**filters)[0]['floating_ip_address']
except Exception:
LOG.error(_LE("Floating IP for VPN Service has either exhausted"
@@ -1110,7 +1207,9 @@ class HeatDriver(object):
nf_desc = None
common_desc = {'network_function_id': str(network_function['id'])}
provider_cidr = provider_subnet = None
provider_l2p_subnets = self.neutron_client.get_subnets(
with nfp_ctx_mgr.NeutronContextManager as ncm:
provider_l2p_subnets = ncm.retry(
self.neutron_client.get_subnets,
auth_token, filters={'id': provider['subnets']})
for subnet in provider_l2p_subnets:
if not subnet['name'].startswith(APIC_OWNED_RES):
@@ -1157,7 +1256,9 @@ class HeatDriver(object):
if not base_mode_support:
provider_port_mac = provider_port['mac_address']
provider_cidr = self.neutron_client.get_subnet(
with nfp_ctx_mgr.NeutronContextManager as ncm:
provider_cidr = ncm.retry(
self.neutron_client.get_subnet,
auth_token, provider_port['fixed_ips'][0][
'subnet_id'])['subnet']['cidr']
else:
@@ -1236,12 +1337,14 @@ class HeatDriver(object):
config_param_values['Subnet'] = (
provider_port['fixed_ips'][0]['subnet_id']
if consumer_port else None)
l2p = self.gbp_client.get_l2_policy(
with nfp_ctx_mgr.GBPContextManager as gcm:
l2p = gcm.retry(self.gbp_client.get_l2_policy,
auth_token, provider['l2_policy_id'])
l3p = self.gbp_client.get_l3_policy(
l3p = gcm.retry(self.gbp_client.get_l3_policy,
auth_token, l2p['l3_policy_id'])
config_param_values['RouterId'] = l3p['routers'][0]
stitching_subnet = self.neutron_client.get_subnet(
with nfp_ctx_mgr.NeutronContextManager as ncm:
stitching_subnet = ncm.retry(self.neutron_client.get_subnet,
auth_token,
consumer['subnets'][0])['subnet']
stitching_cidr = stitching_subnet['cidr']
@@ -1249,7 +1352,9 @@ class HeatDriver(object):
if not mgmt_gw_ip:
return None, None
if not update:
services_nsp = self.gbp_client.get_network_service_policies(
with nfp_ctx_mgr.GBPContextManager as gcm:
services_nsp = gcm.retry(
self.gbp_client.get_network_service_policies,
auth_token,
filters={'name': ['nfp_services_nsp']})
if not services_nsp:
@@ -1264,12 +1369,16 @@ class HeatDriver(object):
"name": "vpn_svc_external_access"}]
}
}
nsp = self.gbp_client.create_network_service_policy(
with nfp_ctx_mgr.GBPContextManager as gcm:
nsp = gcm.retry(
self.gbp_client.create_network_service_policy,
auth_token, fip_nsp)
else:
nsp = services_nsp[0]
if not base_mode_support:
stitching_pts = self.gbp_client.get_policy_targets(
with nfp_ctx_mgr.GBPContextManager as gcm:
stitching_pts = gcm.retry(
self.gbp_client.get_policy_targets,
auth_token,
filters={'port_id': [consumer_port['id']]})
if not stitching_pts:
@@ -1280,13 +1389,17 @@ class HeatDriver(object):
stitching_pts[0]['policy_target_group_id'])
else:
stitching_ptg_id = consumer['id']
self.gbp_client.update_policy_target_group(
with nfp_ctx_mgr.GBPContextManager as gcm:
gcm.retry(self.gbp_client.update_policy_target_group,
auth_token, stitching_ptg_id,
{'policy_target_group': {
'network_service_policy_id': nsp['id']}})
if not base_mode_support:
ext_net = self.neutron_client.get_networks(
auth_token, filters={'name': [INTERNET_OUT_EXT_NET_NAME]})
with nfp_ctx_mgr.NeutronContextManager as ncm:
ext_net = ncm.retry(
self.neutron_client.get_networks,
auth_token,
filters={'name': [INTERNET_OUT_EXT_NET_NAME]})
if not ext_net:
LOG.error(_LE("'internet_out_network_name' not configured"
" in [heat_driver] or Network %(network)s is"
@@ -1295,7 +1408,9 @@ class HeatDriver(object):
return None, None
filters = {'port_id': [consumer_port['id']],
'floating_network_id': [ext_net[0]['id']]}
floatingips = self.neutron_client.get_floating_ips(
with nfp_ctx_mgr.NeutronContextManager as ncm:
floatingips = ncm.retry(
self.neutron_client.get_floating_ips,
auth_token, filters=filters)
if not floatingips:
LOG.error(_LE("Floating IP for VPN Service has been "
@@ -1385,8 +1500,11 @@ class HeatDriver(object):
network_function_instance = network_function_details.get(
'network_function_instance')
service_profile_id = network_function['service_profile_id']
admin_token = self.keystoneclient.get_admin_token()
service_profile = self.gbp_client.get_service_profile(
with nfp_ctx_mgr.KeystoneContextManager as kcm:
admin_token = kcm.retry(
self.keystoneclient.get_admin_token, tries=3)
with nfp_ctx_mgr.GBPContextManager as gcm:
service_profile = gcm.retry(self.gbp_client.get_service_profile,
admin_token, service_profile_id)
service_details = transport.parse_service_flavor_string(
@@ -1400,20 +1518,24 @@ class HeatDriver(object):
config_policy_id = network_function['config_policy_id']
service_id = network_function['service_id']
servicechain_node = self.gbp_client.get_servicechain_node(admin_token,
service_id)
with nfp_ctx_mgr.GBPContextManager as gcm:
servicechain_node = gcm.retry(
self.gbp_client.get_servicechain_node,
admin_token, service_id)
service_chain_id = network_function['service_chain_id']
servicechain_instance = self.gbp_client.get_servicechain_instance(
servicechain_instance = gcm.retry(
self.gbp_client.get_servicechain_instance,
admin_token,
service_chain_id)
provider_ptg_id = servicechain_instance['provider_ptg_id']
consumer_ptg_id = servicechain_instance['consumer_ptg_id']
provider_ptg = self.gbp_client.get_policy_target_group(
provider_ptg = gcm.retry(self.gbp_client.get_policy_target_group,
admin_token,
provider_ptg_id)
consumer_ptg = None
if consumer_ptg_id and consumer_ptg_id != 'N/A':
consumer_ptg = self.gbp_client.get_policy_target_group(
consumer_ptg = gcm.retry(
self.gbp_client.get_policy_target_group,
admin_token,
consumer_ptg_id)
@@ -1424,34 +1546,44 @@ class HeatDriver(object):
policy_target = None
if network_function_instance:
for port in network_function_instance.get('port_info'):
with nfp_ctx_mgr.DbContextManager:
port_info = db_handler.get_port_info(db_session, port)
port_classification = port_info['port_classification']
if port_info['port_model'] == nfp_constants.GBP_PORT:
policy_target_id = port_info['id']
port_id = self.gbp_client.get_policy_targets(
with nfp_ctx_mgr.GBPContextManager as gcm:
port_id = gcm.retry(
self.gbp_client.get_policy_targets,
admin_token,
filters={'id': policy_target_id})[0]['port_id']
policy_target = self.gbp_client.get_policy_target(
policy_target = gcm.retry(
self.gbp_client.get_policy_target,
admin_token, policy_target_id)
else:
port_id = port_info['id']
if port_classification == nfp_constants.CONSUMER:
consumer_port = self.neutron_client.get_port(
with nfp_ctx_mgr.NeutronContextManager as ncm:
consumer_port = ncm.retry(self.neutron_client.get_port,
admin_token, port_id)['port']
if policy_target:
with nfp_ctx_mgr.GBPContextManager as gcm:
consumer_policy_target_group = (
self.gbp_client.get_policy_target_group(
gcm.retry(
self.gbp_client.get_policy_target_group,
admin_token,
policy_target['policy_target_group_id']))
elif port_classification == nfp_constants.PROVIDER:
LOG.info(_LI("provider info: %(p_info)s"),
{'p_info': port_id})
provider_port = self.neutron_client.get_port(
with nfp_ctx_mgr.NeutronContextManager as ncm:
provider_port = ncm.retry(self.neutron_client.get_port,
admin_token, port_id)['port']
if policy_target:
with nfp_ctx_mgr.GBPContextManager as gcm:
provider_policy_target_group = (
self.gbp_client.get_policy_target_group(
gcm.retry(
self.gbp_client.get_policy_target_group,
admin_token,
policy_target['policy_target_group_id']))
@@ -1569,8 +1701,8 @@ class HeatDriver(object):
heatclient = self._get_heat_client(tenant_id)
if not heatclient:
return failure_status
try:
stack = heatclient.get(stack_id)
with nfp_ctx_mgr.HeatContextManager as hcm:
stack = hcm.retry(heatclient.get, stack_id)
if stack.stack_status == 'DELETE_FAILED':
return failure_status
elif stack.stack_status == 'CREATE_COMPLETE':
@@ -1590,10 +1722,6 @@ class HeatDriver(object):
'UPDATE_IN_PROGRESS', 'CREATE_IN_PROGRESS',
'DELETE_IN_PROGRESS']:
return intermediate_status
except Exception:
LOG.exception(_LE("Retrieving the stack %(stack)s failed."),
{'stack': stack_id})
return failure_status
def check_config_complete(self, nfp_context):
success_status = "COMPLETED"
@@ -1606,8 +1734,8 @@ class HeatDriver(object):
heatclient = self._get_heat_client(provider_tenant_id)
if not heatclient:
return failure_status
try:
stack = heatclient.get(stack_id)
with nfp_ctx_mgr.HeatContextManager as hcm:
stack = hcm.retry(heatclient.get, stack_id)
if stack.stack_status == 'DELETE_FAILED':
return failure_status
elif stack.stack_status == 'CREATE_COMPLETE':
@@ -1627,10 +1755,6 @@ class HeatDriver(object):
'UPDATE_IN_PROGRESS', 'CREATE_IN_PROGRESS',
'DELETE_IN_PROGRESS']:
return intermediate_status
except Exception:
LOG.exception(_LE("Retrieving the stack %(stack)s failed."),
{'stack': stack_id})
return failure_status
def is_config_delete_complete(self, stack_id, tenant_id,
network_function=None):
@@ -1640,8 +1764,8 @@ class HeatDriver(object):
heatclient = self._get_heat_client(tenant_id)
if not heatclient:
return failure_status
try:
stack = heatclient.get(stack_id)
with nfp_ctx_mgr.HeatContextManager as hcm:
stack = hcm.retry(heatclient.get, stack_id)
if stack.stack_status == 'DELETE_FAILED':
return failure_status
elif stack.stack_status == 'CREATE_COMPLETE':
@@ -1660,10 +1784,6 @@ class HeatDriver(object):
'UPDATE_IN_PROGRESS', 'CREATE_IN_PROGRESS',
'DELETE_IN_PROGRESS']:
return intermediate_status
except Exception:
LOG.exception(_LE("Retrieving the stack %(stack)s failed."),
{'stack': stack_id})
return failure_status
def get_service_details_from_nfp_context(self, nfp_context):
network_function = nfp_context['network_function']
@@ -1741,16 +1861,9 @@ class HeatDriver(object):
if not stack_template and not stack_params:
return None
try:
stack = heatclient.create(stack_name, stack_template, stack_params)
except Exception as err:
LOG.error(_LE("Heat stack creation failed for template : "
"%(template)s and stack parameters : %(params)s "
"with Error: %(error)s") %
{'template': stack_template, 'params': stack_params,
'error': err})
return None
with nfp_ctx_mgr.HeatContextManager as hcm:
stack = hcm.retry(heatclient.create, stack_name,
stack_template, stack_params)
stack_id = stack['stack']['id']
LOG.info(_LI("Created stack with ID %(stack_id)s and "
"name %(stack_name)s for provider PTG %(provider)s"),
@@ -1799,15 +1912,9 @@ class HeatDriver(object):
# Heat does not accept space in stack name
stack_name = stack_name.replace(" ", "")
try:
stack = heatclient.create(stack_name, stack_template, stack_params)
except Exception as err:
LOG.error(_LE("Heat stack creation failed for template : "
"%(template)s and stack parameters : %(params)s "
"with Error: %(error)s") %
{'template': stack_template, 'params': stack_params,
'error': err})
return None
with nfp_ctx_mgr.HeatContextManager as hcm:
stack = hcm.retry(heatclient.create, stack_name,
stack_template, stack_params)
stack_id = stack['stack']['id']
LOG.info(_LI("Created stack with ID %(stack_id)s and "
@@ -1825,13 +1932,15 @@ class HeatDriver(object):
return None
if network_function:
self._pre_stack_cleanup(network_function)
heatclient.delete(stack_id)
with nfp_ctx_mgr.HeatContextManager as hcm:
hcm.retry(heatclient.delete, stack_id)
except Exception as err:
# Log the error and continue with VM delete in case of *aas
# cleanup failure
LOG.exception(_LE("Cleaning up the service chain stack failed "
"with Error: %(error)s"), {'error': err})
return None
return stack_id
def is_update_config_supported(self, service_type):
@@ -1869,40 +1978,19 @@ class HeatDriver(object):
return None
if stack_id:
try:
heatclient.update(stack_id, stack_template, stack_params)
except Exception as err:
msg = ('Node update failed. There can be a chance if the '
'service is LOADBALANCER, the related '
'configuration would have been lost. Please check '
'with the ADMIN for issue of failure and '
're-initiate the update node once again.')
LOG.exception(_LE('%(msg)s NODE-ID: %(node_id)s '
'INSTANCE-ID: %(instance_id)s '
'TenantID: %(tenant_id)s . '
'ERROR: %(err)s') %
{'msg': msg,
'node_id': service_chain_node['id'],
'instance_id': service_chain_instance['id'],
'tenant_id': provider_tenant_id,
'err': str(err)})
return None
with nfp_ctx_mgr.HeatContextManager as hcm:
hcm.retry(heatclient.update, stack_id,
stack_template, stack_params)
if not stack_id:
stack_name = ("stack_" + service_chain_instance['name'] +
service_chain_node['name'] +
service_chain_instance['id'][:8] +
service_chain_node['id'][:8] + '-' +
time.strftime("%Y%m%d%H%M%S"))
try:
stack = heatclient.create(stack_name, stack_template,
stack_params)
except Exception as err:
msg = ('Fatal error. Heat Stack creation failed while '
'update of node. To recover,please delete the '
'associated provider of Tenant ID - %r . Details '
'- %r' % (provider_tenant_id, str(err)))
LOG.exception(_LE('%(msg)s') % {'msg': msg})
return None
with nfp_ctx_mgr.HeatContextManager as hcm:
stack = hcm.retry(heatclient.create, stack_name,
stack_template, stack_params)
stack_id = stack["stack"]["id"]
return stack_id

View File

@@ -0,0 +1,518 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
class Subnet(object):
def __init__(self, data):
self.data = data
def purge(self):
if self.data:
return {
'cidr': self.data.get('cidr'),
'id': self.data.get('id'),
'gateway_ip': self.data.get('gateway_ip'),
'name': self.data.get('name')
}
return self.data
class Port(object):
def __init__(self, data):
self.data = data
def purge(self):
if self.data:
return {
'id': self.data.get('id'),
'ip_address': self.data.get('ip_address'),
'mac_address': self.data.get('mac_address'),
'mac': self.data.get('mac'),
'name': self.data.get('name'),
'fixed_ips': self.data.get('fixed_ips'),
'gateway_ip': self.data.get('gateway_ip'),
'neutron_port': self.data.get('neutron_port'),
'cidr': self.data.get('cidr')
}
return self.data
class Pt(object):
def __init__(self, data):
self.data = data
def purge(self):
if self.data:
return {
'id': self.data.get('id'),
'port_id': self.data.get('port_id'),
'policy_target_group_id': self.data.get(
'policy_target_group_id'),
'group_default_gateway': self.data.get(
'group_default_gateway'),
'proxy_gateway': self.data.get(
'proxy_gateway')
}
return self.data
class Ptg(object):
def __init__(self, data):
self.data = data
def purge(self):
if self.data:
return {
'id': self.data.get('id'),
'name': self.data.get('name'),
'provided_policy_rule_sets': self.data.get(
'provided_policy_rule_sets'),
'proxied_group_id': self.data.get(
'proxied_group_id'),
'policy_targets': self.data.get('policy_targets'),
'tenant_id': self.data.get('tenant_id'),
'subnets': self.data.get('subnets'),
'l2_policy_id': self.data.get('l2_policy_id')
}
return self.data
class NetworkFunctionDevice(object):
def __init__(self, data):
self.data = data
def purge(self):
if self.data:
return {
'id': self.data.get('id'),
'interfaces_in_use': self.data.get('interfaces_in_use'),
'status': self.data.get('status'),
'mgmt_ip_address': self.data.get('mgmt_ip_address'),
'monitoring_port_id': self.data.get('monitoring_port_id'),
'reference_count': self.data.get('reference_count'),
'mgmt_port_id': self.data.get('mgmt_port_id'),
'tenant_id': self.data.get('tenant_id'),
}
return self.data
class NetworkFunctionInstance(object):
def __init__(self, data):
self.data = data
def purge(self):
if self.data:
return {
'id': self.data.get('id'),
'status': self.data.get('status'),
'port_info': self.data.get('port_info'),
'network_function_device_id': self.data.get(
'network_function_device_id'),
'tenant_id': self.data.get('tenant_id'),
'name': self.data.get('name')
}
return self.data
class NetworkFunction(object):
def __init__(self, data):
self.data = data
def purge(self):
if self.data:
return {
'name': self.data.get('name'),
'status': self.data.get('status'),
'service_id': self.data.get('service_id'),
'config_policy_id': self.data.get('config_policy_id'),
'service_profile_id': self.data.get('service_profile_id'),
'service_chain_id': self.data.get('service_chain_id'),
'id': self.data.get('id'),
'tenant_id': self.data.get('tenant_id'),
'network_function_instances': self.data.get(
'network_function_instances'),
'description': self.data.get('description')
}
return self.data
class ResourceOwnerContext(object):
def __init__(self, data):
self.data = data
def purge(self):
if self.data:
return {
'admin_token': self.data.get('admin_token'),
'admin_tenant_id': self.data.get('admin_tenant_id'),
'tenant_name': self.data.get('tenant_name'),
'tenant': self.data.get('tenant')
}
return self.data
class Management(object):
def __init__(self, data):
self.data = data
def purge(self):
if self.data:
return {
'port': self.data.get('port')
}
return self.data
class Provider(object):
def __init__(self, data):
self.data = data
def purge(self):
if self.data:
context = {
'subnet': Subnet(
self.data.get('subnet')).purge(),
'port_model': self.data.get('port_model'),
'port_classification': self.data.get('port_classification')
}
if type(self.data.get('pt')) is list:
pt_list = []
for pt in self.data['pt']:
pt_list.append(Pt(pt).purge())
context['pt'] = pt_list
else:
context['pt'] = Pt(self.data.get('pt')).purge()
if type(self.data.get('ptg')) is list:
ptg_list = []
for ptg in self.data['ptg']:
ptg_list.append(Ptg(ptg).purge())
context['ptg'] = ptg_list
else:
context['ptg'] = Ptg(self.data.get('ptg')).purge()
if type(self.data.get('port')) is list:
port_list = []
for port in self.data['port']:
port_list.append(Port(port).purge())
context['port'] = port_list
else:
context['port'] = Port(self.data.get('port')).purge()
return context
return self.data
class Consumer(object):
def __init__(self, data):
self.data = data
def purge(self):
if self.data:
context = {
'subnet': Subnet(
self.data.get('subnet')).purge(),
'port_model': self.data.get('port_model'),
'port_classification': self.data.get('port_classification')
}
if type(self.data.get('pt')) is list:
pt_list = []
for pt in self.data['pt']:
pt_list.append(Pt(pt).purge())
context['pt'] = pt_list
else:
context['pt'] = Pt(self.data.get('pt')).purge()
if type(self.data.get('ptg')) is list:
ptg_list = []
for ptg in self.data['ptg']:
ptg_list.append(Ptg(ptg).purge())
context['ptg'] = ptg_list
else:
context['ptg'] = Ptg(self.data.get('ptg')).purge()
if type(self.data.get('port')) is list:
port_list = []
for port in self.data['port']:
port_list.append(Port(port).purge())
context['port'] = port_list
else:
context['port'] = Port(self.data.get('port')).purge()
return context
return self.data
class ScNodes(object):
def __init__(self, data):
self.data = data
def purge(self):
if self.data:
sc_service_profile = self.data.get('sc_service_profile')
context = {'sc_service_profile': {}}
if sc_service_profile:
context['sc_service_profile'][
'service_type'] = sc_service_profile.get('service_type')
return context
return self.data
class ServiceChainSpecs(object):
def __init__(self, data):
self.data = data
def purge(self):
if self.data:
sc_nodes = self.data.get('sc_nodes')
if type(sc_nodes) is list:
context = []
for sc_node in sc_nodes:
context.append(ScNodes(sc_node).purge())
return {
'sc_nodes': context
}
else:
return {
'sc_nodes': ScNodes(sc_nodes).purge()
}
return self.data
class ServiceChainInstance(object):
def __init__(self, data):
self.data = data
def purge(self):
if self.data:
return {
'id': self.data.get('id'),
'config_param_values': self.data.get('config_param_values'),
'name': self.data.get('name')
}
return self.data
class ConsumingPtgsDetails(object):
def __init__(self, data):
self.data = data
def purge(self):
if self.data:
context = {}
context['ptg'] = Ptg(self.data.get('ptg')).purge()
subnets = self.data.get('subnets')
if type(subnets) is list:
subnet_ctxt = []
for subnet in subnets:
subnet_ctxt.append(Subnet(subnet).purge())
context['subnets'] = subnet_ctxt
else:
context['subnets'] = Subnet(subnets).purge()
return context
return self.data
class ServiceChainNode(object):
def __init__(self, data):
self.data = data
def purge(self):
if self.data:
return {
'service_profile_id': self.data.get('service_profile_id'),
'service_type': self.data.get('service_type'),
'config': self.data.get('config'),
'name': self.data.get('name'),
'id': self.data.get('id')
}
return self.data
class ServiceDetails(object):
def __init__(self, data):
self.data = data
def purge(self):
if self.data:
return {
'service_vendor': self.data.get('service_vendor'),
'service_type': self.data.get('service_type'),
'network_mode': self.data.get('network_mode'),
'image_name': self.data.get('image_name'),
'device_type': self.data.get('device_type'),
}
return self.data
class ConsumingEpsDetails(object):
def __init__(self, data):
self.data = data
def purge(self):
if self.data:
return {
'id': self.data.get('id')
}
return self.data
class ServerGrpId(object):
def __init__(self, data):
self.data = data
def purge(self):
if self.data:
return {
'result': self.data.get('result')
}
return self.data
class ServiceProfile(object):
def __init__(self, data):
self.data = data
def purge(self):
if self.data:
return {
'id': self.data.get('id'),
'service_flavor': self.data.get('service_flavor'),
'service_type': self.data.get('service_type')
}
return self.data
class LogContext(object):
def __init__(self, data):
self.data = data
def purge(self):
if self.data:
return {
'meta_id': self.data.get('meta_id', '-'),
'nfi_id': self.data.get('nfi_id', '-'),
'nfd_id': self.data.get('nfd_id', '-'),
'path': self.data.get('path'),
'auth_token': self.data.get('auth_token'),
'namespace': self.data.get('namespace')
}
return self.data
class NfpContext(object):
def __init__(self, data):
self.data = data
def purge(self):
context = {
'active_nfd_ids': self.data.get('active_nfd_ids'),
'device_without_plugging': self.data.get(
'device_without_plugging'),
'id': self.data.get('id'), # event id
'key': self.data.get('key'), # event key
'admin_token': self.data.get('admin_token'),
'event_desc': self.data.get('event_desc'),
'config_policy_id': self.data.get('config_policy_id'),
'management_ptg_id': self.data.get('management_ptg_id'),
'network_function_mode': self.data.get('network_function_mode'),
'files': self.data.get('files'),
'base_mode_support': self.data.get('base_mode_support'),
'share_existing_device': self.data.get('share_existing_device'),
'tenant_id': self.data.get('tenant_id'),
'binding_key': self.data.get('binding_key'),
'provider_metadata': self.data.get('provider_metadata'),
'admin_tenant_id': self.data.get('admin_tenant_id'),
'is_nfi_in_graph': self.data.get('is_nfi_in_graph'),
'network_function_device': NetworkFunctionDevice(
self.data.get('network_function_device')).purge(),
'network_function_instance': NetworkFunctionInstance(
self.data.get('network_function_instance')).purge(),
'network_function': NetworkFunction(
self.data.get('network_function')).purge(),
'resource_owner_context': ResourceOwnerContext(
self.data.get('resource_owner_context')).purge(),
'management': Management(
self.data.get('management')).purge(),
'provider': Provider(
self.data.get('provider')).purge(),
'consumer': Consumer(
self.data.get('consumer')).purge(),
'service_chain_instance': ServiceChainInstance(
self.data.get('service_chain_instance')).purge(),
'service_details': ServiceDetails(
self.data.get('service_details')).purge(),
'service_chain_node': ServiceChainNode(
self.data.get('service_chain_node')).purge(),
'server_grp_id': ServerGrpId(
self.data.get('server_grp_id')).purge(),
'service_profile': ServiceProfile(
self.data.get('service_profile')).purge(),
'log_context': LogContext(self.data.get('log_context')).purge(),
'enable_port_security': self.data.get('enable_port_security')
}
service_chain_specs = self.data.get('service_chain_specs')
if type(service_chain_specs) is list:
ctxt = []
for sc_specs in service_chain_specs:
ctxt.append(ServiceChainSpecs(sc_specs).purge())
context['service_chain_specs'] = ctxt
else:
context['service_chain_specs'] = ServiceChainSpecs(
service_chain_specs).purge()
consuming_ptgs_details = self.data.get('consuming_ptgs_details')
if type(consuming_ptgs_details) is list:
ctxt = []
for ptgs_details in consuming_ptgs_details:
ctxt.append(ConsumingPtgsDetails(ptgs_details).purge())
context['consuming_ptgs_details'] = ctxt
else:
context['consuming_ptgs_details'] = ConsumingPtgsDetails(
consuming_ptgs_details).purge()
consuming_eps_details = self.data.get('consuming_eps_details')
if type(consuming_eps_details) is list:
ctxt = []
for eps_details in consuming_eps_details:
ctxt.append(ConsumingEpsDetails(eps_details).purge())
context['consuming_eps_details'] = ctxt
else:
context['consuming_eps_details'] = ConsumingEpsDetails(
consuming_eps_details).purge()
return context

View File

@@ -138,7 +138,8 @@ class CommonDbMixin(object):
collection = self._apply_filters_to_query(collection, model, filters)
if limit and page_reverse and sorts:
sorts = [(s[0], not s[1]) for s in sorts]
collection = sqlalchemyutils.paginate_query(collection, model, limit,
collection = sqlalchemyutils.paginate_query(
collection, model, limit,
sorts, marker_obj=marker_obj)
return collection

View File

@@ -176,13 +176,23 @@ class NFPDbBase(common_db_mixin.CommonDbMixin):
def get_network_function_instances(self, session, filters=None,
fields=None, sorts=None, limit=None,
marker=None, page_reverse=False):
port_info = None
marker_obj = self._get_marker_obj(
'network_function_instances', limit, marker)
return self._get_collection(
if filters:
port_info = filters.pop('port_info', None)
nfis = self._get_collection(
session, nfp_db_model.NetworkFunctionInstance,
self._make_network_function_instance_dict,
filters=filters, fields=fields, sorts=sorts, limit=limit,
marker_obj=marker_obj, page_reverse=page_reverse)
filtered_nfis = []
if port_info:
for nfi in nfis:
if port_info == nfi['port_info']:
filtered_nfis.append(nfi)
return filtered_nfis
return nfis
def _set_mgmt_port_for_nfd(self, session, network_function_device_db,
network_function_device, is_update=False):
@@ -265,8 +275,8 @@ class NFPDbBase(common_db_mixin.CommonDbMixin):
if updated_provider_metadata_str:
updated_provider_metadata = jsonutils.loads(
updated_provider_metadata_str)
if (type(updated_provider_metadata) is dict
and updated_provider_metadata and provider_metadata):
if (type(updated_provider_metadata) is dict and
updated_provider_metadata and provider_metadata):
updated_provider_metadata.update(provider_metadata)
provider_metadata_str = jsonutils.dumps(updated_provider_metadata)
else:
@@ -467,119 +477,6 @@ class NFPDbBase(common_db_mixin.CommonDbMixin):
return self._get_by_id(
session, nfp_db_model.NetworkInfo, network_id)
def _set_plugged_in_port_for_nfd_interface(self, session, nfd_interface_db,
interface, is_update=False):
plugged_in_port_id = interface.get('plugged_in_port_id')
if not plugged_in_port_id:
if not is_update:
nfd_interface_db.plugged_in_port_id = None
return
with session.begin(subtransactions=True):
port_info_db = nfp_db_model.PortInfo(
id=plugged_in_port_id['id'],
port_model=plugged_in_port_id['port_model'],
port_classification=plugged_in_port_id['port_classification'],
port_role=plugged_in_port_id['port_role'])
if is_update:
session.merge(port_info_db)
else:
session.add(port_info_db)
session.flush()
nfd_interface_db.plugged_in_port_id = port_info_db['id']
del interface['plugged_in_port_id']
def create_network_function_device_interface(self, session,
nfd_interface):
with session.begin(subtransactions=True):
mapped_real_port_id = nfd_interface.get('mapped_real_port_id')
nfd_interface_db = nfp_db_model.NetworkFunctionDeviceInterface(
id=(nfd_interface.get('id') or uuidutils.generate_uuid()),
tenant_id=nfd_interface['tenant_id'],
interface_position=nfd_interface['interface_position'],
mapped_real_port_id=mapped_real_port_id,
network_function_device_id=(
nfd_interface['network_function_device_id']))
self._set_plugged_in_port_for_nfd_interface(
session, nfd_interface_db, nfd_interface)
session.add(nfd_interface_db)
return self._make_network_function_device_interface_dict(
nfd_interface_db)
def update_network_function_device_interface(self, session,
nfd_interface_id,
updated_nfd_interface):
with session.begin(subtransactions=True):
nfd_interface_db = self._get_network_function_device_interface(
session, nfd_interface_id)
self._set_plugged_in_port_for_nfd_interface(
session, nfd_interface_db, updated_nfd_interface,
is_update=True)
nfd_interface_db.update(updated_nfd_interface)
return self._make_network_function_device_interface_dict(
nfd_interface_db)
def delete_network_function_device_interface(
self, session, network_function_device_interface_id):
with session.begin(subtransactions=True):
network_function_device_interface_db = (
self._get_network_function_device_interface(
session, network_function_device_interface_id))
if network_function_device_interface_db.plugged_in_port_id:
self.delete_port_info(
session,
network_function_device_interface_db.plugged_in_port_id)
session.delete(network_function_device_interface_db)
def _get_network_function_device_interface(self, session,
network_function_device_interface_id):
try:
return self._get_by_id(
session,
nfp_db_model.NetworkFunctionDeviceInterface,
network_function_device_interface_id)
except exc.NoResultFound:
raise nfp_exc.NetworkFunctionDeviceInterfaceNotFound(
network_function_device_interface_id=(
network_function_device_interface_id))
def get_network_function_device_interface(
self, session, network_function_device_interface_id,
fields=None):
network_function_device_interface = (
self._get_network_function_device_interface(
session, network_function_device_interface_id))
return self._make_network_function_device_interface_dict(
network_function_device_interface, fields)
def get_network_function_device_interfaces(self, session, filters=None,
fields=None, sorts=None,
limit=None, marker=None,
page_reverse=False):
marker_obj = self._get_marker_obj(
'network_function_device_interfaces', limit, marker)
return self._get_collection(
session,
nfp_db_model.NetworkFunctionDeviceInterface,
self._make_network_function_device_interface_dict,
filters=filters, fields=fields,
sorts=sorts, limit=limit,
marker_obj=marker_obj,
page_reverse=page_reverse)
def _make_network_function_device_interface_dict(self, nfd_interface,
fields=None):
res = {
'id': nfd_interface['id'],
'tenant_id': nfd_interface['tenant_id'],
'plugged_in_port_id': nfd_interface['plugged_in_port_id'],
'interface_position': nfd_interface['interface_position'],
'mapped_real_port_id': nfd_interface['mapped_real_port_id'],
'network_function_device_id': (
nfd_interface['network_function_device_id'])
}
return res
def _make_port_info_dict(self, port_info, fields):
res = {
'id': port_info['id'],

View File

@@ -145,24 +145,6 @@ class NetworkFunctionDevice(BASE, model_base.HasId, model_base.HasTenant,
gateway_port = sa.Column(sa.String(36), nullable=True)
class NetworkFunctionDeviceInterface(BASE, model_base.HasId,
model_base.HasTenant):
"""Represents the Network Function Device"""
__tablename__ = 'nfp_network_function_device_interfaces'
plugged_in_port_id = sa.Column(sa.String(36),
sa.ForeignKey('nfp_port_infos.id',
ondelete='SET NULL'),
nullable=True)
interface_position = sa.Column(sa.Integer(), nullable=True)
mapped_real_port_id = sa.Column(sa.String(36), nullable=True)
network_function_device_id = sa.Column(
sa.String(36),
sa.ForeignKey('nfp_network_function_devices.id',
ondelete='SET NULL'),
nullable=True)
class ClusterInfo(BASE, model_base.HasId, model_base.HasTenant):
"""
This table contains info about the ports participating in

View File

@@ -14,13 +14,13 @@ import ast
from collections import defaultdict
from neutron._i18n import _LE
from neutron._i18n import _LW
from oslo_utils import excutils
from gbpservice.nfp.common import constants as nfp_constants
from gbpservice.nfp.common import data_formatter as df
from gbpservice.nfp.common import exceptions
from gbpservice.nfp.core import executor as nfp_executor
from gbpservice.nfp.core import log as nfp_logging
from gbpservice.nfp.lib import nfp_context_manager as nfp_ctx_mgr
from gbpservice.nfp.orchestrator.coal.networking import (
nfp_gbp_network_driver
)
@@ -70,26 +70,24 @@ class OrchestrationDriver(object):
self.config = config
def _get_admin_tenant_id(self, token=None):
try:
with nfp_ctx_mgr.KeystoneContextManager as kcm:
if not token:
token = self.identity_handler.get_admin_token()
admin_tenant_id = self.identity_handler.get_tenant_id(
token,
token = kcm.retry(
self.identity_handler.get_admin_token, tries=3)
admin_tenant_name = (
self.config.nfp_keystone_authtoken.admin_tenant_name)
admin_tenant_id = kcm.retry(self.identity_handler.get_tenant_id,
token,
admin_tenant_name, tries=3)
return admin_tenant_id
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Failed to get admin's tenant ID"))
def _get_token(self, device_data_token):
try:
with nfp_ctx_mgr.KeystoneContextManager as kcm:
token = (device_data_token
if device_data_token
else self.identity_handler.get_admin_token())
except Exception:
LOG.error(_LE('Failed to get token'))
return None
else kcm.retry(
self.identity_handler.get_admin_token, tries=3))
return token
def _is_device_sharing_supported(self):
@@ -161,36 +159,26 @@ class OrchestrationDriver(object):
token = self._get_token(device_data.get('token'))
if not token:
return None
try:
metadata = self.compute_handler_nova.get_image_metadata(
with nfp_ctx_mgr.NovaContextManager as ncm:
metadata = ncm.retry(self.compute_handler_nova.get_image_metadata,
token,
self._get_admin_tenant_id(token=token),
image_name)
except Exception as e:
LOG.error(_LE('Failed to get image metadata for image '
'name: %(image_name)s. Error: %(error)s'),
{'image_name': image_name, 'error': e})
return None
provider_metadata = self._verify_provider_metadata(image_name,
metadata)
provider_metadata = self._verify_provider_metadata(
image_name, metadata)
if not provider_metadata:
return {}
return provider_metadata
def _get_provider_metadata_fast(self, token,
admin_tenant_id, image_name, device_data):
try:
metadata = self.compute_handler_nova.get_image_metadata(
with nfp_ctx_mgr.NovaContextManager as ncm:
metadata = ncm.retry(self.compute_handler_nova.get_image_metadata,
token,
admin_tenant_id,
image_name)
except Exception as e:
LOG.error(_LE('Failed to get image metadata for image '
'name: %(image_name)s. Error: %(error)s'),
{'image_name': image_name, 'error': e})
return None
provider_metadata = self._verify_provider_metadata(image_name,
metadata)
provider_metadata = self._verify_provider_metadata(
image_name, metadata)
if not provider_metadata:
return {}
return provider_metadata
@@ -356,20 +344,24 @@ class OrchestrationDriver(object):
pre_launch_executor.add_job('UPDATE_PROVIDER_METADATA',
self._update_provider_metadata_fast,
token, admin_tenant_id, image_name, device_data,
token, admin_tenant_id,
image_name, device_data,
result_store=provider_metadata_result)
pre_launch_executor.add_job('GET_INTERFACES_FOR_DEVICE_CREATE',
self._get_interfaces_for_device_create,
token, admin_tenant_id, network_handler, device_data)
token, admin_tenant_id,
network_handler, device_data)
pre_launch_executor.add_job('GET_IMAGE_ID',
self.get_image_id,
self.compute_handler_nova, token, admin_tenant_id,
self.compute_handler_nova, token,
admin_tenant_id,
image_name, result_store=image_id_result)
pre_launch_executor.fire()
interfaces, image_id, provider_metadata = (
self._validate_pre_launch_executor_results(network_handler,
self._validate_pre_launch_executor_results(
network_handler,
device_data,
image_name,
image_id_result,
@@ -424,7 +416,8 @@ class OrchestrationDriver(object):
create_instance_executor.fire()
instance_id, mgmt_neutron_port_info = (
self._validate_create_instance_executor_results(network_handler,
self._validate_create_instance_executor_results(
network_handler,
device_data,
interfaces,
instance_id_result,
@@ -577,17 +570,11 @@ class OrchestrationDriver(object):
if not mgmt_neutron_port_info:
LOG.error(_LE('Failed to get management port details. '))
try:
self.compute_handler_nova.delete_instance(
with nfp_ctx_mgr.NovaContextManager as ncm:
ncm.retry(self.compute_handler_nova.delete_instance,
token,
admin_tenant_id,
instance_id)
except Exception as e:
LOG.error(_LE('Failed to delete %(device_type)s instance.'
'Error: %(error)s'),
{'device_type': (
device_data['service_details']['device_type']),
'error': e})
self._delete_interfaces(device_data, interfaces,
network_handler=network_handler)
return None, _
@@ -644,15 +631,13 @@ class OrchestrationDriver(object):
#
# this method will be invoked again
# once the device instance deletion is completed
try:
self.compute_handler_nova.delete_instance(
with nfp_ctx_mgr.NovaContextManager.new(
suppress=(Exception,)) as ncm:
ncm.retry(self.compute_handler_nova.delete_instance,
token,
device_data['tenant_id'],
device_data['id'])
except Exception:
LOG.error(_LE('Failed to delete %(instance)s instance'),
{'instance':
device_data['service_details']['device_type']})
else:
# device instance deletion is done, delete remaining resources
try:
@@ -703,17 +688,11 @@ class OrchestrationDriver(object):
if not token:
return None
try:
device = self.compute_handler_nova.get_instance(
with nfp_ctx_mgr.NovaContextManager.new(suppress=(Exception,)) as ncm:
device = ncm.retry(self.compute_handler_nova.get_instance,
device_data['token'],
device_data['tenant_id'],
device_data['id'])
except Exception:
if ignore_failure:
return None
LOG.error(_LE('Failed to get %(instance)s instance details'),
{'instance': device_data['service_details']['device_type']})
return None # TODO(RPM): should we raise an Exception here?
return device['status']
@@ -878,20 +857,15 @@ class OrchestrationDriver(object):
if provider_metadata.get('supports_hotplug') is False:
return True
try:
with nfp_ctx_mgr.NovaContextManager.new(suppress=(Exception,)) as ncm:
for port in device_data['ports']:
port_id = network_handler.get_port_id(token, port['id'])
self.compute_handler_nova.detach_interface(
ncm.retry(self.compute_handler_nova.detach_interface,
token,
device_data['tenant_id'],
device_data['id'],
port_id)
except Exception as e:
LOG.error(_LE('Failed to unplug interface(s) from the device.'
'Error: %(error)s'), {'error': e})
return None
else:
return True
@_set_network_handler
@@ -989,5 +963,6 @@ class OrchestrationDriver(object):
device_data, network_handler=network_handler)
if not device_data:
return None
return df.get_network_function_info(
device_data, resource_type)

View File

@@ -12,6 +12,10 @@
from oslo_config import cfg as oslo_config
from gbpservice.nfp.common import constants as nfp_constants
from gbpservice.nfp.core import context
from gbpservice.nfp.orchestrator import context as module_context
context.NfpContext = module_context.NfpContext
openstack_opts = [
oslo_config.StrOpt('auth_host',

View File

@@ -16,9 +16,11 @@ import oslo_messaging as messaging
from gbpservice.nfp.common import constants as nfp_constants
from gbpservice.nfp.common import topics as nsf_topics
from gbpservice.nfp.common import utils as nfp_utils
from gbpservice.nfp.core import context as module_context
from gbpservice.nfp.core.event import Event
from gbpservice.nfp.core import module as nfp_api
from gbpservice.nfp.core.rpc import RpcAgent
from gbpservice.nfp.lib import nfp_context_manager as nfp_ctx_mgr
from gbpservice.nfp.lib import transport
from gbpservice.nfp.orchestrator.db import nfp_db as nfp_db
from gbpservice.nfp.orchestrator.drivers import orchestration_driver
@@ -91,12 +93,14 @@ class RpcHandler(object):
self.conf = conf
self._controller = controller
self.rpc_event_mapping = {
nfp_constants.HEALTHMONITOR_RESOURCE: ['HEALTH_MONITOR_COMPLETE',
nfp_constants.HEALTHMONITOR_RESOURCE: [
'HEALTH_MONITOR_COMPLETE',
'DEVICE_NOT_REACHABLE',
'DEVICE_NOT_REACHABLE',
'PERIODIC_HM_DEVICE_REACHABLE',
'PERIODIC_HM_DEVICE_NOT_REACHABLE', ],
nfp_constants.GENERIC_CONFIG: ['DEVICE_CONFIGURED',
nfp_constants.GENERIC_CONFIG: [
'DEVICE_CONFIGURED',
'DELETE_CONFIGURATION_COMPLETED',
'DEVICE_CONFIGURATION_FAILED'],
}
@@ -146,13 +150,17 @@ class RpcHandler(object):
# RPC APIs status notification from Configurator
def network_function_notification(self, context, notification_data):
try:
nfp_context = module_context.init()
info = notification_data.get('info')
responses = notification_data.get('notification')
request_info = info.get('context')
operation = request_info.get('operation')
logging_context = request_info.get('logging_context')
nfp_logging.store_logging_context(**logging_context)
logging_context = request_info.get('logging_context', {})
# nfp_context = request_info.get('nfp_context')
nfp_context['log_context'] = logging_context
if 'nfp_context' in request_info:
nfp_context['event_desc'] = request_info[
'nfp_context'].get('event_desc', {})
for response in responses:
resource = response.get('resource')
@@ -196,9 +204,6 @@ class RpcHandler(object):
self._create_event(event_id=event_id,
event_data=event_data,
key=key)
nfp_logging.clear_logging_context()
except Exception as err:
LOG.error(_LE("Exception: in RPC handler: %(err)s"), {'err': err})
class DeviceOrchestrator(nfp_api.NfpEventHandler):
@@ -312,7 +317,7 @@ class DeviceOrchestrator(nfp_api.NfpEventHandler):
def handle_event(self, event):
try:
event_data = event.data
event_data = event.context
NFD = event_data.get('network_function_device_id')
NF = event_data.get('network_function_id')
NFI = event_data.get('network_function_instance_id')
@@ -336,6 +341,10 @@ class DeviceOrchestrator(nfp_api.NfpEventHandler):
'error': e})
_, _, tb = sys.exc_info()
traceback.print_tb(tb)
raise e
def handle_exception(self, event, exception):
return ExceptionHandler.handle(self, event, exception)
# Helper functions
def _log_event_created(self, event_id, event_data):
@@ -373,12 +382,15 @@ class DeviceOrchestrator(nfp_api.NfpEventHandler):
id=event_id,
data=event_data)
self._controller.post_event(ev)
self._log_event_created(event_id, event_data)
nfp_context = module_context.get()
self._log_event_created(event_id, nfp_context)
else:
# Same module API, so calling corresponding function directly.
nfp_context = module_context.get()
event = self._controller.new_event(
id=event_id,
data=event_data)
data=event_data,
context=nfp_context)
self.handle_event(event)
def _release_cnfd_lock(self, device):
@@ -387,8 +399,8 @@ class DeviceOrchestrator(nfp_api.NfpEventHandler):
ev = self._controller.new_event(
id='CREATE_NETWORK_FUNCTION_DEVICE',
data=device, key=nf_id + nfi_id)
if 'binding_key' in device:
ev.binding_key = device['binding_key']
if device.get('binding_key'):
ev.binding_key = device.get('binding_key')
LOG.debug("Releasing tenant based lock for "
"CREATE_NETWORK_FUNCTION_DEVICE event with binding "
"key: %s" % ev.binding_key)
@@ -436,12 +448,15 @@ class DeviceOrchestrator(nfp_api.NfpEventHandler):
device['status_description'] = self.status_map.get(state)
def _get_port(self, port_id):
with nfp_ctx_mgr.DbContextManager:
return self.nsf_db.get_port_info(self.db_session, port_id)
def _get_ports(self, port_ids):
data_ports = []
for port_id in port_ids:
port_info = self.nsf_db.get_port_info(self.db_session, port_id)
with nfp_ctx_mgr.DbContextManager:
port_info = self.nsf_db.get_port_info(self.db_session,
port_id)
data_ports.append(port_info)
return data_ports
@@ -453,7 +468,9 @@ class DeviceOrchestrator(nfp_api.NfpEventHandler):
device_info['id'] = device_id
device_info['reference_count'] = 0
device_info['interfaces_in_use'] = 0
device = self.nsf_db.create_network_function_device(self.db_session,
with nfp_ctx_mgr.DbContextManager:
device = self.nsf_db.create_network_function_device(
self.db_session,
device_info)
return device
@@ -463,29 +480,36 @@ class DeviceOrchestrator(nfp_api.NfpEventHandler):
updated_device = copy.deepcopy(device)
updated_device.pop('reference_count', None)
updated_device.pop('interfaces_in_use', None)
with nfp_ctx_mgr.DbContextManager:
self.nsf_db.update_network_function_device(self.db_session,
updated_device['id'],
updated_device)
device.update(updated_device)
def _delete_network_function_device_db(self, device_id, device):
self.nsf_db.delete_network_function_device(self.db_session, device_id)
with nfp_ctx_mgr.DbContextManager:
self.nsf_db.delete_network_function_device(self.db_session,
device_id)
def _get_network_function_info(self, device_id):
nfi_filters = {'network_function_device_id': [device_id]}
with nfp_ctx_mgr.DbContextManager:
network_function_instances = (
self.nsf_db.get_network_function_instances(self.db_session,
nfi_filters))
network_function_ids = [nf['network_function_id']
for nf in network_function_instances]
network_functions = (
self.nsf_db.get_network_functions(self.db_session,
self.nsf_db.get_network_functions(
self.db_session,
{'id': network_function_ids}))
return network_functions
def _get_network_function_devices(self, filters=None):
network_function_devices = self.nsf_db.get_network_function_devices(
self.db_session, filters)
with nfp_ctx_mgr.DbContextManager:
network_function_devices = (
self.nsf_db.get_network_function_devices(self.db_session,
filters))
for device in network_function_devices:
mgmt_port_id = device.pop('mgmt_port_id')
mgmt_port_id = self._get_port(mgmt_port_id)
@@ -497,23 +521,39 @@ class DeviceOrchestrator(nfp_api.NfpEventHandler):
return network_function_devices
def _increment_device_ref_count(self, device):
self.nsf_db.increment_network_function_device_count(self.db_session,
device['id'], 'reference_count')
with nfp_ctx_mgr.DbContextManager:
self.nsf_db.increment_network_function_device_count(
self.db_session,
device['id'],
'reference_count')
device['reference_count'] += 1
def _decrement_device_ref_count(self, device):
self.nsf_db.decrement_network_function_device_count(self.db_session,
device['id'], 'reference_count')
with nfp_ctx_mgr.DbContextManager:
self.nsf_db.decrement_network_function_device_count(
self.db_session,
device['id'],
'reference_count')
device['reference_count'] -= 1
def _increment_device_interface_count(self, device):
self.nsf_db.increment_network_function_device_count(self.db_session,
device['id'], 'interfaces_in_use', len(device['ports']))
with nfp_ctx_mgr.DbContextManager:
self.nsf_db.increment_network_function_device_count(
self.db_session,
device['id'],
'interfaces_in_use',
len(device['ports']))
device['interfaces_in_use'] += len(device['ports'])
def _decrement_device_interface_count(self, device):
self.nsf_db.decrement_network_function_device_count(self.db_session,
device['id'], 'interfaces_in_use', len(device['ports']))
with nfp_ctx_mgr.DbContextManager:
self.nsf_db.decrement_network_function_device_count(
self.db_session,
device['id'],
'interfaces_in_use',
len(device['ports']))
device['interfaces_in_use'] -= len(device['ports'])
def _get_orchestration_driver(self, service_vendor):
@@ -542,7 +582,9 @@ class DeviceOrchestrator(nfp_api.NfpEventHandler):
nsi_port_info = []
for port_id in network_function_instance.pop('port_info'):
port_info = self.nsf_db.get_port_info(self.db_session, port_id)
with nfp_ctx_mgr.DbContextManager:
port_info = self.nsf_db.get_port_info(self.db_session,
port_id)
nsi_port_info.append(port_info)
device_data['ports'] = nsi_port_info
@@ -671,13 +713,25 @@ class DeviceOrchestrator(nfp_api.NfpEventHandler):
def _post_create_nfd_events(self, event, nfp_context, device):
nfp_context['event_desc'] = event.desc.to_dict()
# Updating nfi with nfd_id before device spawning
# to stop orchestration to move further.
nfi = {
'network_function_device_id': device['id'],
}
with nfp_ctx_mgr.DbContextManager:
nfi = self.nsf_db.update_network_function_instance(
self.db_session,
device['network_function_instance_id'], nfi)
# This event is act as a dummy event for nfp,
# for non-hotplug sharing it will be used
self._create_event(event_id='DEVICE_CREATED',
event_data=device)
self._create_event(event_id='DEVICE_SPAWNING',
event_data=nfp_context,
is_poll_event=True,
original_event=event,
max_times=nfp_constants.DEVICE_SPAWNING_MAXRETRY)
self._create_event(event_id='DEVICE_CREATED',
event_data=device)
# Create path
def create_network_function_device(self, event):
@@ -687,7 +741,7 @@ class DeviceOrchestrator(nfp_api.NfpEventHandler):
new service or it creates new device instance
"""
nfp_context = event.data
nfp_context = event.context
nfd_request = self._prepare_failure_case_device_data(nfp_context)
service_details = nfp_context['service_details']
@@ -720,9 +774,11 @@ class DeviceOrchestrator(nfp_api.NfpEventHandler):
device = self._create_nfd_entry(nfp_context, driver_device_info,
device_data, service_details)
self._increment_device_ref_count(device)
self._increment_device_interface_count(device)
nfd_id = device.get('network_function_device_id',
'-') if device else '-'
nfp_logging.update_logging_context(nfd_id=nfd_id)
nfp_context['log_context']['nfd_id'] = nfd_id
self._update_nfp_context_with_ports(nfp_context, driver_device_info)
self._post_create_nfd_events(event, nfp_context, device)
@@ -731,26 +787,24 @@ class DeviceOrchestrator(nfp_api.NfpEventHandler):
nf_id = nfp_context['network_function']['id']
nfi_id = nfp_context['network_function_instance']['id']
du_event = self._controller.new_event(id="DEVICE_UP",
key=nf_id + nfi_id,
data=nfp_context)
key=nf_id + nfi_id)
hc_event = self._controller.new_event(
id="PERFORM_INITIAL_HEALTH_CHECK", key=nf_id + nfi_id,
data=nfp_context)
id="PERFORM_INITIAL_HEALTH_CHECK",
key=nf_id + nfi_id)
plug_int_event = self._controller.new_event(id="PLUG_INTERFACES",
key=nf_id + nfi_id,
data=nfp_context)
key=nf_id + nfi_id)
GRAPH = ({
du_event: [hc_event, plug_int_event]})
self._controller.post_graph(GRAPH, du_event,
graph_str='HEALTH_MONITOR_GRAPH')
self._controller.post_graph(
GRAPH, du_event, graph_str='HEALTH_MONITOR_GRAPH')
@nfp_api.poll_event_desc(event='DEVICE_SPAWNING',
spacing=nfp_constants.DEVICE_SPAWNING_SPACING)
def check_device_is_up(self, event):
nfp_context = event.data
nfp_context = event.context
service_details = nfp_context['service_details']
network_function_device = nfp_context['network_function_device']
@@ -799,26 +853,21 @@ class DeviceOrchestrator(nfp_api.NfpEventHandler):
id='CREATE_DEVICE_CONFIGURATION',
key=nf_id,
serialize=serialize,
binding_key=binding_key,
data=nfp_context)
binding_key=binding_key)
check_heat_config = self._controller.new_event(
id='SEND_USER_CONFIG',
key=nf_id,
data=nfp_context)
key=nf_id)
user_config_event = self._controller.new_event(
id='INITIATE_USER_CONFIG',
key=nf_id,
serialize=serialize,
binding_key=binding_key,
data=nfp_context)
binding_key=binding_key)
device_configured_event = self._controller.new_event(
id='CONFIGURATION_COMPLETE',
key=nf_id,
data=nfp_context)
key=nf_id)
device_periodic_hm_event = self._controller.new_event(
id='PERFORM_PERIODIC_HEALTH_CHECK',
key=nf_id + nfi_id,
data=nfp_context)
key=nf_id + nfi_id)
# Start periodic health monitor after device configuration
@@ -832,39 +881,35 @@ class DeviceOrchestrator(nfp_api.NfpEventHandler):
graph_str='DEVICE_CONFIGURATION_GRAPH')
def device_up(self, event, serialize_config=False):
nfp_context = event.data
nfp_context = event.context
# Get the results of PLUG_INTERFACES & PERFORM_INITIAL_HEALTH_CHECK
# events results.
nf_id = nfp_context['network_function']['id']
nfi_id = nfp_context['network_function_instance']['id']
nfi = {
'status': nfp_constants.ACTIVE}
nfi = self.nsf_db.update_network_function_instance(
self.db_session, nfi_id, nfi)
event_key = nf_id + nfi_id
device = self._prepare_failure_case_device_data(nfp_context)
# Get the results of PLUG_INTERFACES & PERFORM_INITIAL_HEALTH_CHECK
# events results.
results = event.result
nfd_event = self._controller.new_event(
id='CREATE_NETWORK_FUNCTION_DEVICE',
key=event_key,
desc_dict=nfp_context.pop('event_desc'))
if 'binding_key' in nfp_context:
nfd_event.binding_key = nfp_context['binding_key']
LOG.debug("Releasing tenant based lock for "
"CREATE_NETWORK_FUNCTION_DEVICE event with binding "
"key: %s" % nfd_event.binding_key)
for result in results:
if result.result.lower() != 'success':
self._controller.event_complete(nfd_event)
# Release CNFD Event lock
self._release_cnfd_lock(device)
self._create_event(event_id='DEVICE_CREATE_FAILED',
event_data=device)
return self._controller.event_complete(event, result='FAILED')
network_function_device = nfp_context['network_function_device']
nfd_id = network_function_device.get('id',
'-') if network_function_device else '-'
nfp_logging.update_logging_context(nfd_id=nfd_id)
nfd_id = '-'
if network_function_device:
nfd_id = network_function_device.get('id', '-')
nfp_context['log_context']['nfd_id'] = nfd_id
# Update NFI to ACTIVE State
nfi = {
'status': nfp_constants.ACTIVE}
nfi = self.nsf_db.update_network_function_instance(
self.db_session, nfi_id, nfi)
self._update_network_function_device_db(
network_function_device, nfp_constants.ACTIVE)
@@ -874,14 +919,16 @@ class DeviceOrchestrator(nfp_api.NfpEventHandler):
{'device_id': network_function_device['id']})
LOG.debug("Device detail:%s"
% network_function_device)
self._controller.event_complete(nfd_event)
# Release CNFD Event lock
self._release_cnfd_lock(device)
self._post_configure_device_graph(nfp_context,
serialize=serialize_config)
event.key = nf_id + nfi_id
self._controller.event_complete(event)
def prepare_health_check_device_info(self, event, periodicity):
nfp_context = event.data
nfp_context = event.context
service_details = nfp_context['service_details']
network_function_device = nfp_context['network_function_device']
@@ -941,6 +988,11 @@ class DeviceOrchestrator(nfp_api.NfpEventHandler):
self._controller.event_complete(event, result="SUCCESS")
def perform_periodic_health_check(self, event):
event_results = event.result
for result in event_results:
if result.result.lower() != "success":
return self._controller.event_complete(event, result="FAILED")
device, orchestration_driver = (
self.prepare_health_check_device_info(event,
nfp_constants.FOREVER))
@@ -959,7 +1011,6 @@ class DeviceOrchestrator(nfp_api.NfpEventHandler):
self._controller.event_complete(event, result="SUCCESS")
def perform_initial_health_check(self, event):
device, orchestration_driver = (
self.prepare_health_check_device_info(event,
nfp_constants.INITIAL))
@@ -969,7 +1020,6 @@ class DeviceOrchestrator(nfp_api.NfpEventHandler):
if not hm_req:
self._controller.event_complete(event, result="FAILED")
return None
self.configurator_rpc.create_network_function_device_config(device,
hm_req)
LOG.debug("Health Check RPC sent to configurator for device: "
@@ -977,8 +1027,11 @@ class DeviceOrchestrator(nfp_api.NfpEventHandler):
device['id'], hm_req))
def _get_service_type(self, service_profile_id):
admin_token = self.keystoneclient.get_admin_token()
service_profile = self.gbpclient.get_service_profile(
with nfp_ctx_mgr.KeystoneContextManager as kcm:
admin_token = kcm.retry(
self.keystoneclient.get_admin_token, tries=3)
with nfp_ctx_mgr.GBPContextManager as gcm:
service_profile = gcm.retry(self.gbpclient.get_service_profile,
admin_token, service_profile_id)
return service_profile['service_type'].lower()
@@ -998,9 +1051,14 @@ class DeviceOrchestrator(nfp_api.NfpEventHandler):
'network_function_instance',
network_function_instance_id)
admin_token = self.keystoneclient.get_admin_token()
service_profile = self.gbpclient.get_service_profile(
admin_token, network_function['service_profile_id'])
with nfp_ctx_mgr.KeystoneContextManager as kcm:
admin_token = kcm.retry(
self.keystoneclient.get_admin_token, tries=3)
with nfp_ctx_mgr.GBPContextManager as gcm:
service_profile = gcm.retry(
self.gbpclient.get_service_profile,
admin_token,
network_function['service_profile_id'])
service_details = transport.parse_service_flavor_string(
service_profile['service_flavor'])
@@ -1041,19 +1099,24 @@ class DeviceOrchestrator(nfp_api.NfpEventHandler):
device['token'] = admin_token
device['tenant_id'] = (
network_function_details['admin_tenant_id'])
device['service_profile'] = service_profile
return device
def health_monitor_complete(self, event, result='SUCCESS'):
nfp_context = event.data['nfp_context']
# device = nfp_context['network_function_device']
# network_function = nfp_context['network_function']
# Invoke event_complete for original event which is
# PERFORM_INITIAL_HEALTH_CHECK
event_desc = nfp_context.pop('event_desc')
nfp_context.pop('id')
key = nfp_context.pop('key')
event = self._controller.new_event(id="PERFORM_INITIAL_HEALTH_CHECK",
event_desc = nfp_context.pop('event_desc', None)
nfp_context.pop('id', None)
key = nfp_context.pop('key', None)
self._controller.event_complete(event)
new_event = self._controller.new_event(
id="PERFORM_INITIAL_HEALTH_CHECK",
key=key, desc_dict=event_desc)
self._controller.event_complete(event, result=result)
self._controller.event_complete(new_event, result=result)
def plug_interfaces(self, event, is_event_call=True):
if is_event_call:
@@ -1088,7 +1151,7 @@ class DeviceOrchestrator(nfp_api.NfpEventHandler):
# so, we should not generate CONFIGURE_DEVICE & should not update
# DB with HEALTH_CHECK_COMPLETED.
nfp_context = event.data
nfp_context = event.context
service_details = nfp_context['service_details']
network_function_device = nfp_context['network_function_device']
@@ -1104,7 +1167,6 @@ class DeviceOrchestrator(nfp_api.NfpEventHandler):
orchestration_driver = self._get_orchestration_driver(
service_details['service_vendor'])
ports = self._make_ports_dict(
nfp_context.get('explicit_consumer', consumer),
provider, 'port')
@@ -1125,13 +1187,12 @@ class DeviceOrchestrator(nfp_api.NfpEventHandler):
orchestration_driver.plug_network_function_device_interfaces(
device))
if _ifaces_plugged_in:
self._increment_device_interface_count(device)
# self._increment_device_interface_count(device)
# [REVISIT(mak)] - Check how incremented ref count can be
# updated in DB
self._controller.event_complete(event, result="SUCCESS")
else:
self._create_event(event_id="PLUG_INTERFACE_FAILED",
event_data=nfp_context,
is_internal_event=True)
self._controller.event_complete(event, result="FAILED")
@@ -1152,7 +1213,7 @@ class DeviceOrchestrator(nfp_api.NfpEventHandler):
device, config_params)
def create_device_configuration(self, event):
nfp_context = event.data
nfp_context = event.context
service_details = nfp_context['service_details']
consumer = nfp_context['consumer']
@@ -1233,43 +1294,35 @@ class DeviceOrchestrator(nfp_api.NfpEventHandler):
self._controller.event_complete(event=event, result='SUCCESS')
def configuration_complete(self, event):
nfp_context = event.data
nfp_context = event.context
nf_id = nfp_context['network_function']['id']
event_results = event.result
for result in event_results:
if result.result.lower() != "success":
return self._controller.event_complete(event)
device = self._prepare_failure_case_device_data(nfp_context)
self._create_event(event_id='DEVICE_CREATE_FAILED',
event_data=device)
return self._controller.event_complete(event, result="FAILED")
sc_event = self._controller.new_event(id="SERVICE_CONFIGURED",
key=nf_id,
data=nfp_context)
key=nf_id)
self._controller.post_event(sc_event)
self._controller.event_complete(event)
self._controller.event_complete(event, result="SUCCESS")
def device_configuration_complete(self, event, result='SUCCESS'):
nfp_context = event.data['nfp_context']
device = nfp_context['network_function_device']
if result.lower() == 'success':
self._update_network_function_device_db(
device, nfp_constants.ACTIVE)
LOG.info(_LI(
"Configuration completed for device with NFD:%(device_id)s. "
"Updated DB status to ACTIVE."),
{'device_id': device['id']})
LOG.debug("Device detail:%s" % device)
# Invoke event_complete for original event which is
# CREATE_DEVICE_CONFIGURATION
self._increment_device_ref_count(device)
event_desc = nfp_context.pop('event_desc')
key = nfp_context.pop('key')
event_desc = nfp_context.pop('event_desc', None)
key = nfp_context.pop('key', None)
self._controller.event_complete(event)
event = self._controller.new_event(id="CREATE_DEVICE_CONFIGURATION",
key=key, desc_dict=event_desc)
event.binding_key = nfp_context.pop('binding_key')
event.binding_key = nfp_context.pop('binding_key', None)
self._controller.event_complete(event, result=result)
def delete_network_function_device(self, event):
network_function_details = event.data
network_function_details = event.context
nfd = network_function_details['network_function_device']
if not nfd:
self._controller.event_complete(event, result="SUCCESS")
@@ -1295,15 +1348,16 @@ class DeviceOrchestrator(nfp_api.NfpEventHandler):
is_internal_event=True)
nf_id = device['network_function_id']
dnfd_event = (
self._controller.new_event(id='DELETE_NETWORK_FUNCTION_DEVICE',
self._controller.new_event(
id='DELETE_NETWORK_FUNCTION_DEVICE',
key=nf_id,
binding_key=nf_id,
desc_dict=device.get(
'event_desc')))
desc_dict=device.get('event_desc')))
self._controller.event_complete(dnfd_event, result='FAILED')
# TODO(Mahesh): If driver returns ERROR, then we are not
# proceeding further.
# Stale vms will exist in this case. Need to handle this case where
# TODO(mak): If driver returns ERROR,
# then we are not proceeding further
# Stale vms will exist in this case.
# Need to handle this case where
# driver returned None So dont initiate configurator API but call
# unplug_interfaces and device delete to delete vms.
return None
@@ -1315,6 +1369,7 @@ class DeviceOrchestrator(nfp_api.NfpEventHandler):
def unplug_interfaces(self, event):
result = "SUCCESS"
device = event.data
self._decrement_device_ref_count(device)
orchestration_driver = self._get_orchestration_driver(
device['service_details']['service_vendor'])
@@ -1343,12 +1398,14 @@ class DeviceOrchestrator(nfp_api.NfpEventHandler):
binding_key=nfd_id,
serialize=True))
self._controller.post_event(unplug_interfaces)
self._controller.event_complete(event)
def delete_device(self, event):
# Update status in DB, send DEVICE_DELETED event to NSO.
device = event.data
orchestration_driver = self._get_orchestration_driver(
device['service_details']['service_vendor'])
network_function = (
self.nsf_db.get_network_function(
self.db_session,
@@ -1360,15 +1417,16 @@ class DeviceOrchestrator(nfp_api.NfpEventHandler):
data=device)
self._controller.post_event(chm_event)
self._decrement_device_ref_count(device)
orchestration_driver.delete_network_function_device(device)
self._create_event(event_id='DEVICE_BEING_DELETED',
self._create_event(
event_id='DEVICE_BEING_DELETED',
event_data=device,
is_poll_event=True,
original_event=event,
max_times=nfp_constants.DEVICE_BEING_DELETED_MAXRETRY)
@nfp_api.poll_event_desc(event='DEVICE_BEING_DELETED',
@nfp_api.poll_event_desc(
event='DEVICE_BEING_DELETED',
spacing=nfp_constants.DEVICE_BEING_DELETED_SPACING)
def check_device_deleted(self, event):
device = event.data
@@ -1434,14 +1492,14 @@ class DeviceOrchestrator(nfp_api.NfpEventHandler):
return device
def handle_plug_interface_failed(self, event):
nfp_context = event.data
nfp_context = event.context
device = self._prepare_failure_case_device_data(nfp_context)
self._release_cnfd_lock(device)
status = nfp_constants.ACTIVE
# self._release_cnfd_lock(device)
status = nfp_context['network_function_device']['status']
desc = "Failed to plug interfaces"
self._update_network_function_device_db(device, status, desc)
self._create_event(event_id='DEVICE_CREATE_FAILED',
event_data=device)
# self._create_event(event_id='DEVICE_CREATE_FAILED',
# event_data=device)
def handle_device_not_reachable(self, event):
device = event.data
@@ -1449,8 +1507,8 @@ class DeviceOrchestrator(nfp_api.NfpEventHandler):
desc = 'Device not reachable, Health Check Failed'
self._update_network_function_device_db(device, status, desc)
device['network_function_device_id'] = device['id']
self._create_event(event_id='DEVICE_CREATE_FAILED',
event_data=device)
# self._create_event(event_id='DEVICE_CREATE_FAILED',
# event_data=device)
self.health_monitor_complete(event, result='FAILED')
def periodic_hm_handle_device_reachable(self, event):
@@ -1466,15 +1524,16 @@ class DeviceOrchestrator(nfp_api.NfpEventHandler):
self._update_network_function_device_db(device, status, desc)
def handle_device_config_failed(self, event):
# device = event.data
nfp_context = event.data['nfp_context']
device = nfp_context['network_function_device']
status = nfp_constants.ERROR
status = device['status']
desc = 'Configuring Device Failed.'
self._update_network_function_device_db(device, status, desc)
device['network_function_device_id'] = device['id']
self._create_event(event_id='DEVICE_CREATE_FAILED',
event_data=event.data)
# self._create_event(event_id='DEVICE_CREATE_FAILED',
# event_data=event.data)
LOG.debug("Device create failed for device: %s, with "
"data: %s" % (device['id'], device))
self.device_configuration_complete(event, result='FAILED')
@@ -1497,9 +1556,9 @@ class DeviceOrchestrator(nfp_api.NfpEventHandler):
status = nfp_constants.ERROR
desc = 'Exception in driver, driver return None'
self._update_network_function_device_db(device, status, desc)
device['network_function_device_id'] = device['id']
self._create_event(event_id='DEVICE_CREATE_FAILED',
event_data=device)
# device['network_function_device_id'] = device['id']
# self._create_event(event_id='DEVICE_CREATE_FAILED',
# event_data=device)
def update_config_params(self, event):
self._create_event(event_id='DEVICE_CONFIG_PARAMETERS_UPDATED',
@@ -1522,6 +1581,7 @@ class NDOConfiguratorRpcApi(object):
topic=nsf_topics.NFP_NDO_CONFIGURATOR_TOPIC)
def _get_request_info(self, device, operation):
nfp_context = module_context.get()
request_info = {
'nf_id': device['network_function_id'],
'nfi_id': (
@@ -1529,9 +1589,11 @@ class NDOConfiguratorRpcApi(object):
'nfd_id': device['id'],
'requester': nfp_constants.DEVICE_ORCHESTRATOR,
'operation': operation,
'logging_context': nfp_logging.get_logging_context(),
'logging_context': nfp_context['log_context'],
# So that notification callbacks can work on cached data
'nfp_context': device.get('nfp_context', None)
# 'orig_nfp_context': device.get('orig_nfp_context'),
'nfp_context': device.get('nfp_context', None),
'service_profile': device.get('service_profile'),
}
nfd_ip = device.get('mgmt_ip_address')
request_info.update({'device_ip': nfd_ip})
@@ -1544,7 +1606,7 @@ class NDOConfiguratorRpcApi(object):
config_params['info'] = {
'service_type': device_data['service_details']['service_type'],
'service_vendor': device_data['service_details']['service_vendor'],
'context': request_info
'context': request_info,
}
if device_data.get('service_feature'):
config_params['info'].update(
@@ -1564,7 +1626,6 @@ class NDOConfiguratorRpcApi(object):
config_params,
'CREATE',
True)
nfp_logging.clear_logging_context()
def delete_network_function_device_config(self, device_data,
config_params):
@@ -1577,4 +1638,231 @@ class NDOConfiguratorRpcApi(object):
config_params,
'DELETE',
True)
nfp_logging.clear_logging_context()
class ExceptionHandler(object):
@staticmethod
def event_method_mapping(event_id):
event_handler_mapping = {
"CREATE_NETWORK_FUNCTION_DEVICE": (
ExceptionHandler.create_network_function_device),
"DEVICE_SPAWNING": ExceptionHandler.device_spawning,
"PERFORM_INITIAL_HEALTH_CHECK":
ExceptionHandler.perform_initial_health_check,
"DEVICE_UP": ExceptionHandler.device_up,
"PLUG_INTERFACES": ExceptionHandler.plug_interfaces,
"HEALTH_MONITOR_COMPLETE":
ExceptionHandler.health_monitor_complete,
"CREATE_DEVICE_CONFIGURATION":
ExceptionHandler.create_device_configuration,
"DEVICE_CONFIGURED":
ExceptionHandler.device_configuration_complete,
"CONFIGURATION_COMPLETE": ExceptionHandler.configuration_complete,
"DELETE_NETWORK_FUNCTION_DEVICE": (
ExceptionHandler.delete_network_function_device),
"DELETE_CONFIGURATION":
ExceptionHandler.delete_device_configuration,
"DELETE_CONFIGURATION_COMPLETED": (
ExceptionHandler.delete_configuration_complete),
"UNPLUG_INTERFACES": ExceptionHandler.unplug_interfaces,
"DELETE_DEVICE": ExceptionHandler.delete_device,
"DEVICE_BEING_DELETED": ExceptionHandler.device_being_deleted,
"PERIODIC_HM_DEVICE_NOT_REACHABLE": (
ExceptionHandler.periodic_hm_handle_device_not_reachable),
"DEVICE_NOT_REACHABLE": (
ExceptionHandler.health_monitor_complete),
"DEVICE_CONFIGURATION_FAILED": (
ExceptionHandler.device_configuration_complete),
"PERFORM_PERIODIC_HEALTH_CHECK": (
ExceptionHandler.perform_periodic_health_check),
}
if event_id not in event_handler_mapping:
raise Exception("Invalid event ID")
else:
return event_handler_mapping[event_id]
@staticmethod
def handle(orchestrator, event, exception):
exc_type, exc_value, exc_traceback = sys.exc_info()
message = "Traceback: %s" % traceback.format_exception(
exc_type, exc_value, exc_traceback)
LOG.error(message)
exception_handler = ExceptionHandler.event_method_mapping(event.id)
return exception_handler(orchestrator, event, exception)
@staticmethod
def create_network_function_device(orchestrator, event, exception):
nfp_context = event.context
network_function = nfp_context['network_function']
# [REVISIT: AKASH] Updating NF from device_orchestrator is wrong way
# of doing, but still doing it, will correct it later
orchestrator.nsf_db.update_network_function(
orchestrator.db_session,
network_function['id'],
{'status': nfp_constants.ERROR})
orchestrator._controller.event_complete(event, result='FAILED')
@staticmethod
def perform_initial_health_check(orchestrator, event, exception):
orchestrator._controller.event_complete(event, result='FAILED')
@staticmethod
def device_up(orchestrator, event, exception):
nfp_context = event.context
network_function = nfp_context['network_function']
device = orchestrator._prepare_failure_case_device_data(nfp_context)
orchestrator._release_cnfd_lock(device)
orchestrator.nsf_db.update_network_function(
orchestrator.db_session,
network_function['id'],
{'status': nfp_constants.ERROR})
orchestrator._controller.event_complete(event, result='FAILED')
@staticmethod
def plug_interfaces(orchestrator, event, exception):
nfp_context = event.context
device = orchestrator._prepare_failure_case_device_data(nfp_context)
status = nfp_context['network_function_device']['status']
desc = "Failed to plug interfaces"
orchestrator._update_network_function_device_db(device, status, desc)
orchestrator._controller.event_complete(event, result='FAILED')
@staticmethod
def health_monitor_complete(orchestrator, event, exception):
nfp_context = event.data['nfp_context']
event_desc = nfp_context.pop('event_desc', None)
nfp_context.pop('id', None)
key = nfp_context.pop('key', None)
ev = orchestrator._controller.new_event(
id="PERFORM_INITIAL_HEALTH_CHECK",
key=key, desc_dict=event_desc)
orchestrator._controller.event_complete(ev, result='FAILED')
orchestrator._controller.event_complete(event, result='FAILED')
@staticmethod
def create_device_configuration(orchestrator, event, exception):
orchestrator._controller.event_complete(event, result='FAILED')
@staticmethod
def device_configuration_complete(orchestrator, event, exception):
nfp_context = event.data['nfp_context']
event_desc = nfp_context.pop('event_desc')
key = nfp_context.pop('key')
ev = orchestrator._controller.new_event(
id="CREATE_DEVICE_CONFIGURATION",
key=key, desc_dict=event_desc)
ev.binding_key = nfp_context.pop('binding_key')
orchestrator._controller.event_complete(ev, result='FAILED')
orchestrator._controller.event_complete(event, result='FAILED')
@staticmethod
def configuration_complete(orchestrator, event, exception):
nfp_context = event.context
network_function = nfp_context['network_function']
orchestrator.nsf_db.update_network_function(
orchestrator.db_session,
network_function['id'],
{'status': nfp_constants.ERROR})
orchestrator._controller.event_complete(event, result='FAILED')
@staticmethod
def delete_network_function_device(orchestrator, event, exception):
network_function_details = event.context
device = network_function_details['network_function_device']
status = nfp_constants.ERROR
desc = 'Exception in driver, driver return None'
orchestrator._update_network_function_device_db(device, status, desc)
orchestrator._controller.event_complete(event, result='FAILED')
@staticmethod
def delete_device_configuration(orchestrator, event, exception):
device = event.data
status = nfp_constants.ERROR
desc = 'Exception in driver, driver return None'
orchestrator._update_network_function_device_db(device, status, desc)
nf_id = device['network_function_id']
dnfd_event = (
orchestrator._controller.new_event(
id='DELETE_NETWORK_FUNCTION_DEVICE',
key=nf_id,
binding_key=nf_id,
desc_dict=device.get('event_desc')))
orchestrator._controller.event_complete(dnfd_event, result='FAILED')
orchestrator._controller.event_complete(event, result='FAILED')
@staticmethod
def delete_configuration_complete(orchestrator, event, exception):
device = event.data
status = nfp_constants.ERROR
desc = 'Exception in driver, driver return None'
orchestrator._update_network_function_device_db(device, status, desc)
nf_id = device['network_function_id']
dnfd_event = (
orchestrator._controller.new_event(
id='DELETE_NETWORK_FUNCTION_DEVICE',
key=nf_id,
binding_key=nf_id,
desc_dict=device.get('event_desc')))
orchestrator._controller.event_complete(dnfd_event, result='FAILED')
orchestrator._controller.event_complete(event, result='FAILED')
@staticmethod
def unplug_interfaces(orchestrator, event, exception):
device = event.data
nf_id = device['network_function_id']
dnfd_event = (
orchestrator._controller.new_event(
id='DELETE_NETWORK_FUNCTION_DEVICE',
key=nf_id,
binding_key=nf_id,
desc_dict=device.get('event_desc')))
orchestrator._controller.event_complete(dnfd_event, result='FAILED')
orchestrator._controller.event_complete(event, result='FAILED')
@staticmethod
def delete_device(orchestrator, event, exception):
device = event.data
status = nfp_constants.ERROR
desc = 'Exception in driver, driver return None'
orchestrator._update_network_function_device_db(device, status, desc)
nf_id = device['network_function_id']
dnfd_event = (
orchestrator._controller.new_event(
id='DELETE_NETWORK_FUNCTION_DEVICE',
key=nf_id,
binding_key=nf_id,
desc_dict=device.get('event_desc')))
orchestrator._controller.event_complete(dnfd_event, result='FAILED')
orchestrator._controller.event_complete(event, result='FAILED')
@staticmethod
def device_being_deleted(orchestrator, event, exception):
return {'poll': True}
@staticmethod
def device_spawning(orchestrator, event, exception):
nfp_context = event.context
network_function = nfp_context['network_function']
device = orchestrator._prepare_failure_case_device_data(nfp_context)
status = nfp_constants.ERROR
desc = 'Exception in driver, driver return None'
orchestrator._update_network_function_device_db(device, status, desc)
orchestrator._release_cnfd_lock(device)
orchestrator.nsf_db.update_network_function(
orchestrator.db_session,
network_function['id'],
{'status': nfp_constants.ERROR})
orchestrator._controller.event_complete(event, result='FAILED')
return {'poll': False}
@staticmethod
def periodic_hm_handle_device_not_reachable(orchestrator,
event, exception):
orchestrator._controller.event_complete(event, result='FAILED')
@staticmethod
def perform_periodic_health_check(orchestrator, event, exception):
orchestrator._controller.event_complete(event, result='FAILED')

File diff suppressed because it is too large Load Diff