NFP - Failure handling in Orchestrator
Added following support : 1) Context managers, 1.1) To be used with python 'with' statement. 1.2) support 'retry', 'ignore' and 'lock' functions. -> retry : retry a func for n counts -> ignore: Ignore certain expected exceptions. -> lock: Lock a db transaction 1.3) NFP module need not handle all possible exceptions as try-except branches. 2) Single class Exception Handling : All the exceptions from module will be caught by nfp/core and the registered exception handler will be invoked with all the relevant details (event, data, context, exception..) 3) Used 'context manager' retry function with client methods, Neutronclient, Novaclient etc.. especially for GET methods. E.x, GET_TOKEN is retried 'n' times to overcome any temporary failures with keystone. Change-Id: Ia821938b9f607799ebeaa1c0e2ddda74ebc96fd8 Partial-Bug: 1668198
This commit is contained in:
@@ -2,6 +2,6 @@
|
|||||||
test_command=OS_STDOUT_CAPTURE=${OS_STDOUT_CAPTURE:-1} \
|
test_command=OS_STDOUT_CAPTURE=${OS_STDOUT_CAPTURE:-1} \
|
||||||
OS_STDERR_CAPTURE=${OS_STDERR_CAPTURE:-1} \
|
OS_STDERR_CAPTURE=${OS_STDERR_CAPTURE:-1} \
|
||||||
OS_TEST_TIMEOUT=${OS_TEST_TIMEOUT:-120} \
|
OS_TEST_TIMEOUT=${OS_TEST_TIMEOUT:-120} \
|
||||||
${PYTHON:-python} -m subunit.run discover -t ./ ${OS_TEST_PATH:-./gbpservice/neutron/tests/unit/nfp/core/} $LISTOPT $IDOPTION
|
${PYTHON:-python} -m subunit.run discover -t ./ ${OS_TEST_PATH:-./gbpservice/neutron/tests/unit/nfp/orchestrator/} $LISTOPT $IDOPTION
|
||||||
test_id_option=--load-list $IDFILE
|
test_id_option=--load-list $IDFILE
|
||||||
test_list_option=--list
|
test_list_option=--list
|
||||||
|
@@ -13,8 +13,8 @@
|
|||||||
|
|
||||||
import eventlet
|
import eventlet
|
||||||
from eventlet import greenpool
|
from eventlet import greenpool
|
||||||
|
import sys
|
||||||
import threading
|
import threading
|
||||||
import time
|
|
||||||
|
|
||||||
from keystoneclient import exceptions as k_exceptions
|
from keystoneclient import exceptions as k_exceptions
|
||||||
from keystoneclient.v2_0 import client as keyclient
|
from keystoneclient.v2_0 import client as keyclient
|
||||||
@@ -287,6 +287,7 @@ class NFPContext(object):
|
|||||||
'active_threads': [],
|
'active_threads': [],
|
||||||
'sc_node_count': 0,
|
'sc_node_count': 0,
|
||||||
'sc_gateway_type_nodes': [],
|
'sc_gateway_type_nodes': [],
|
||||||
|
'network_functions': [],
|
||||||
'update': False}
|
'update': False}
|
||||||
if nfp_context_store.context:
|
if nfp_context_store.context:
|
||||||
nfp_context_store.context.update({sc_instance_id: context})
|
nfp_context_store.context.update({sc_instance_id: context})
|
||||||
@@ -453,46 +454,55 @@ class NFPNodeDriver(driver_base.NodeDriverBase):
|
|||||||
context._plugin_context = self._get_resource_owner_context(
|
context._plugin_context = self._get_resource_owner_context(
|
||||||
context._plugin_context)
|
context._plugin_context)
|
||||||
network_function_id = self._create_network_function(context)
|
network_function_id = self._create_network_function(context)
|
||||||
|
except Exception:
|
||||||
|
# NFPContext.clear_nfp_context(context.instance['id'])
|
||||||
|
exc_type, exc_value, exc_traceback = sys.exc_info()
|
||||||
|
message = "Traceback: %s" % (exc_value)
|
||||||
|
LOG.error(message)
|
||||||
|
network_function_id = ''
|
||||||
|
|
||||||
|
finally:
|
||||||
self._set_node_instance_network_function_map(
|
self._set_node_instance_network_function_map(
|
||||||
context.plugin_session, context.current_node['id'],
|
context.plugin_session, context.current_node['id'],
|
||||||
context.instance['id'], network_function_id)
|
context.instance['id'], network_function_id)
|
||||||
except Exception as e:
|
|
||||||
NFPContext.clear_nfp_context(context.instance['id'])
|
|
||||||
raise e
|
|
||||||
|
|
||||||
self._wait_for_node_operation_completion(context,
|
self._wait_for_node_operation_completion(
|
||||||
network_function_id,
|
context, network_function_id,
|
||||||
nfp_constants.CREATE)
|
nfp_constants.CREATE)
|
||||||
|
|
||||||
def _wait_for_node_operation_completion(self, context,
|
def _wait_for_node_operation_completion(self, context, network_function_id,
|
||||||
network_function_id,
|
|
||||||
operation):
|
operation):
|
||||||
# Check for NF status in a separate thread
|
# Check for NF status in a separate thread
|
||||||
LOG.debug("Spawning thread for nf ACTIVE poll operation: %s" % (
|
LOG.debug("Spawning thread for nf ACTIVE poll operation: %s" % (
|
||||||
operation))
|
operation))
|
||||||
nfp_context = NFPContext.get_nfp_context(context.instance['id'])
|
nfp_context = NFPContext.get_nfp_context(context.instance['id'])
|
||||||
if operation == nfp_constants.DELETE:
|
|
||||||
gth = nfp_context['thread_pool'].spawn(
|
|
||||||
self._wait_for_network_function_delete_completion,
|
|
||||||
context, network_function_id)
|
|
||||||
else:
|
|
||||||
gth = nfp_context['thread_pool'].spawn(
|
|
||||||
self._wait_for_network_function_operation_completion,
|
|
||||||
context, network_function_id, operation=operation)
|
|
||||||
|
|
||||||
nfp_context['active_threads'].append(gth)
|
|
||||||
|
|
||||||
LOG.debug("Active Threads count (%d), sc_node_count (%d)" % (
|
|
||||||
len(nfp_context['active_threads']), nfp_context['sc_node_count']))
|
|
||||||
|
|
||||||
nfp_context['sc_node_count'] -= 1
|
nfp_context['sc_node_count'] -= 1
|
||||||
|
nfp_context['network_functions'].append(network_function_id)
|
||||||
# At last wait for the threads to complete, success/failure/timeout
|
# At last wait for the threads to complete, success/failure/timeout
|
||||||
if nfp_context['sc_node_count'] == 0:
|
if nfp_context['sc_node_count'] == 0:
|
||||||
|
network_functions = nfp_context['network_functions']
|
||||||
|
for network_function in network_functions:
|
||||||
|
LOG.debug("Spawning thread for nf ACTIVE poll")
|
||||||
|
if operation == nfp_constants.DELETE:
|
||||||
|
gth = nfp_context['thread_pool'].spawn(
|
||||||
|
self._wait_for_network_function_delete_completion,
|
||||||
|
context, network_function)
|
||||||
|
else:
|
||||||
|
gth = nfp_context['thread_pool'].spawn(
|
||||||
|
self._wait_for_network_function_operation_completion,
|
||||||
|
context, network_function, operation=operation)
|
||||||
|
|
||||||
|
nfp_context['active_threads'].append(gth)
|
||||||
|
|
||||||
|
message = "Active Threads count (%d), sc_node_count (%d)" % (
|
||||||
|
len(nfp_context['active_threads']),
|
||||||
|
nfp_context['sc_node_count'])
|
||||||
|
LOG.debug(message)
|
||||||
nfp_context['thread_pool'].waitall()
|
nfp_context['thread_pool'].waitall()
|
||||||
# Get the results
|
# Get the results
|
||||||
for gth in nfp_context['active_threads']:
|
for gth in nfp_context['active_threads']:
|
||||||
self._wait(gth, context)
|
self._wait(gth, context)
|
||||||
|
|
||||||
NFPContext.clear_nfp_context(context.instance['id'])
|
NFPContext.clear_nfp_context(context.instance['id'])
|
||||||
else:
|
else:
|
||||||
NFPContext.store_nfp_context(context.instance['id'], **nfp_context)
|
NFPContext.store_nfp_context(context.instance['id'], **nfp_context)
|
||||||
@@ -537,31 +547,29 @@ class NFPNodeDriver(driver_base.NodeDriverBase):
|
|||||||
context.plugin_session,
|
context.plugin_session,
|
||||||
context.current_node['id'],
|
context.current_node['id'],
|
||||||
context.instance['id'])
|
context.instance['id'])
|
||||||
|
network_function_id = None
|
||||||
if not network_function_map:
|
if network_function_map:
|
||||||
NFPContext.store_nfp_context(
|
|
||||||
context.instance['id'],
|
|
||||||
sc_gateway_type_nodes=[],
|
|
||||||
sc_node_count=nfp_context['sc_node_count'] - 1)
|
|
||||||
return
|
|
||||||
|
|
||||||
network_function_id = network_function_map.network_function_id
|
|
||||||
try:
|
|
||||||
self.nfp_notifier.delete_network_function(
|
|
||||||
context=context.plugin_context,
|
|
||||||
network_function_id=network_function_id)
|
|
||||||
except Exception as e:
|
|
||||||
NFPContext.clear_nfp_context(context.instance['id'])
|
|
||||||
LOG.exception(_LE("Delete Network service Failed"))
|
|
||||||
self._delete_node_instance_network_function_map(
|
self._delete_node_instance_network_function_map(
|
||||||
context.plugin_session,
|
context.plugin_session,
|
||||||
context.current_node['id'],
|
context.current_node['id'],
|
||||||
context.instance['id'])
|
context.instance['id'])
|
||||||
raise e
|
network_function_id = network_function_map.network_function_id
|
||||||
|
|
||||||
|
if network_function_id:
|
||||||
|
try:
|
||||||
|
self.nfp_notifier.delete_network_function(
|
||||||
|
context=context.plugin_context,
|
||||||
|
network_function_id=(
|
||||||
|
network_function_map.network_function_id))
|
||||||
|
except Exception:
|
||||||
|
# NFPContext.clear_nfp_context(context.instance['id'])
|
||||||
|
LOG.exception(_LE("Delete Network service Failed"))
|
||||||
|
exc_type, exc_value, exc_traceback = sys.exc_info()
|
||||||
|
message = "Traceback: %s" % (exc_value)
|
||||||
|
LOG.error(message)
|
||||||
|
|
||||||
self._update_ptg(context)
|
self._update_ptg(context)
|
||||||
self._wait_for_node_operation_completion(context,
|
self._wait_for_node_operation_completion(context, network_function_id,
|
||||||
network_function_id,
|
|
||||||
nfp_constants.DELETE)
|
nfp_constants.DELETE)
|
||||||
|
|
||||||
def update_policy_target_added(self, context, policy_target):
|
def update_policy_target_added(self, context, policy_target):
|
||||||
@@ -685,31 +693,21 @@ class NFPNodeDriver(driver_base.NodeDriverBase):
|
|||||||
|
|
||||||
def _wait_for_network_function_delete_completion(self, context,
|
def _wait_for_network_function_delete_completion(self, context,
|
||||||
network_function_id):
|
network_function_id):
|
||||||
|
# [REVISIT: (akash) do we need to do error handling here]
|
||||||
|
if not network_function_id:
|
||||||
|
return
|
||||||
|
|
||||||
time_waited = 0
|
time_waited = 0
|
||||||
network_function = None
|
network_function = None
|
||||||
curr_time = start_time = int(time.time())
|
while time_waited < cfg.CONF.nfp_node_driver.service_delete_timeout:
|
||||||
timeout = cfg.CONF.nfp_node_driver.service_delete_timeout
|
|
||||||
|
|
||||||
while curr_time - start_time < timeout:
|
|
||||||
curr_time = int(time.time())
|
|
||||||
network_function = self.nfp_notifier.get_network_function(
|
network_function = self.nfp_notifier.get_network_function(
|
||||||
context.plugin_context, network_function_id)
|
context.plugin_context, network_function_id)
|
||||||
if network_function:
|
if not network_function or (
|
||||||
LOG.debug("Got %s nf result for NF: %s with status:%s,"
|
network_function['status'] == nfp_constants.ERROR):
|
||||||
"time waited: %s" % (network_function_id, 'delete',
|
|
||||||
time_waited, network_function['status']))
|
|
||||||
if not network_function:
|
|
||||||
break
|
break
|
||||||
eventlet.sleep(5)
|
eventlet.sleep(5)
|
||||||
time_waited = time_waited + 5
|
time_waited = time_waited + 5
|
||||||
|
|
||||||
LOG.debug("Deleting sci nf mapping")
|
|
||||||
self._delete_node_instance_network_function_map(
|
|
||||||
context.plugin_session,
|
|
||||||
context.current_node['id'],
|
|
||||||
context.instance['id'])
|
|
||||||
LOG.debug("sci nf mapping got deleted. NF got deldted.")
|
|
||||||
|
|
||||||
if network_function:
|
if network_function:
|
||||||
LOG.error(_LE("Delete network function %(network_function)s "
|
LOG.error(_LE("Delete network function %(network_function)s "
|
||||||
"failed"),
|
"failed"),
|
||||||
@@ -719,13 +717,14 @@ class NFPNodeDriver(driver_base.NodeDriverBase):
|
|||||||
def _wait_for_network_function_operation_completion(self, context,
|
def _wait_for_network_function_operation_completion(self, context,
|
||||||
network_function_id,
|
network_function_id,
|
||||||
operation):
|
operation):
|
||||||
|
if not network_function_id:
|
||||||
|
raise NodeInstanceCreateFailed()
|
||||||
|
|
||||||
time_waited = 0
|
time_waited = 0
|
||||||
network_function = None
|
network_function = None
|
||||||
timeout = cfg.CONF.nfp_node_driver.service_create_timeout
|
timeout = cfg.CONF.nfp_node_driver.service_create_timeout
|
||||||
curr_time = start_time = int(time.time())
|
|
||||||
|
|
||||||
while curr_time - start_time < timeout:
|
while time_waited < timeout:
|
||||||
curr_time = int(time.time())
|
|
||||||
network_function = self.nfp_notifier.get_network_function(
|
network_function = self.nfp_notifier.get_network_function(
|
||||||
context.plugin_context, network_function_id)
|
context.plugin_context, network_function_id)
|
||||||
LOG.debug("Got %s nf result for NF: %s with status:%s,"
|
LOG.debug("Got %s nf result for NF: %s with status:%s,"
|
||||||
@@ -1095,6 +1094,16 @@ class NFPNodeDriver(driver_base.NodeDriverBase):
|
|||||||
if not vip_ip:
|
if not vip_ip:
|
||||||
raise VipNspNotSetonProvider()
|
raise VipNspNotSetonProvider()
|
||||||
|
|
||||||
|
if service_targets:
|
||||||
|
for provider_port in service_targets['provider_ports']:
|
||||||
|
provider_port['allowed_address_pairs'] = [
|
||||||
|
{'ip_address': vip_ip}]
|
||||||
|
port = {
|
||||||
|
'port': provider_port
|
||||||
|
}
|
||||||
|
context.core_plugin.update_port(
|
||||||
|
context.plugin_context, provider_port['id'], port)
|
||||||
|
|
||||||
provider = {
|
provider = {
|
||||||
'pt': service_targets.get('provider_pt_objs', []),
|
'pt': service_targets.get('provider_pt_objs', []),
|
||||||
'ptg': service_targets.get('provider_ptg', []),
|
'ptg': service_targets.get('provider_ptg', []),
|
||||||
|
@@ -565,137 +565,6 @@ class NFPDBTestCase(SqlTestCase):
|
|||||||
self.session,
|
self.session,
|
||||||
mgmt_port_id)
|
mgmt_port_id)
|
||||||
|
|
||||||
def create_network_function_device_interface(self, attributes=None,
|
|
||||||
create_nfd=True):
|
|
||||||
if attributes is None:
|
|
||||||
nfd = (self.create_network_function_device()['id']
|
|
||||||
if create_nfd else None)
|
|
||||||
attributes = {
|
|
||||||
'tenant_id': 'tenant_id',
|
|
||||||
'plugged_in_port_id': {
|
|
||||||
'id': 'myid2_ha_port',
|
|
||||||
'port_model': nfp_constants.NEUTRON_PORT,
|
|
||||||
'port_classification': nfp_constants.MONITOR,
|
|
||||||
'port_role': nfp_constants.ACTIVE_PORT
|
|
||||||
},
|
|
||||||
'interface_position': 1,
|
|
||||||
'mapped_real_port_id': 'myid2',
|
|
||||||
'network_function_device_id': nfd
|
|
||||||
}
|
|
||||||
return self.nfp_db.create_network_function_device_interface(
|
|
||||||
self.session, attributes)
|
|
||||||
|
|
||||||
def test_create_network_function_device_interface(self):
|
|
||||||
attrs = {
|
|
||||||
'tenant_id': 'tenant_id',
|
|
||||||
'plugged_in_port_id': {
|
|
||||||
'id': 'myid2_ha_port',
|
|
||||||
'port_model': nfp_constants.NEUTRON_PORT,
|
|
||||||
'port_classification': nfp_constants.MONITOR,
|
|
||||||
'port_role': nfp_constants.ACTIVE_PORT
|
|
||||||
},
|
|
||||||
'interface_position': 1,
|
|
||||||
'mapped_real_port_id': 'myid2',
|
|
||||||
'network_function_device_id': (
|
|
||||||
self.create_network_function_device()['id'])
|
|
||||||
}
|
|
||||||
|
|
||||||
network_function_device_interface = (
|
|
||||||
self.create_network_function_device_interface(attrs))
|
|
||||||
for key in attrs:
|
|
||||||
if key == 'mgmt_port_id':
|
|
||||||
self.assertEqual(attrs[key]['id'],
|
|
||||||
network_function_device_interface[key])
|
|
||||||
continue
|
|
||||||
self.assertEqual(attrs[key],
|
|
||||||
network_function_device_interface[key])
|
|
||||||
self.assertIsNotNone(network_function_device_interface['id'])
|
|
||||||
|
|
||||||
def test_get_network_function_device_interface(self):
|
|
||||||
attrs_all = {
|
|
||||||
'tenant_id': 'tenant_id',
|
|
||||||
'plugged_in_port_id': {
|
|
||||||
'id': 'myid2_ha_port',
|
|
||||||
'port_model': nfp_constants.NEUTRON_PORT,
|
|
||||||
'port_classification': nfp_constants.MONITOR,
|
|
||||||
'port_role': nfp_constants.ACTIVE_PORT
|
|
||||||
},
|
|
||||||
'interface_position': 1,
|
|
||||||
'mapped_real_port_id': 'myid2',
|
|
||||||
'network_function_device_id': (
|
|
||||||
self.create_network_function_device()['id'])
|
|
||||||
}
|
|
||||||
network_function_device_interface = (
|
|
||||||
self.create_network_function_device_interface(attrs_all))
|
|
||||||
db_network_function_device_interface = (
|
|
||||||
self.nfp_db.get_network_function_device_interface(
|
|
||||||
self.session, network_function_device_interface['id']))
|
|
||||||
for key in attrs_all:
|
|
||||||
self.assertEqual(attrs_all[key],
|
|
||||||
db_network_function_device_interface[key])
|
|
||||||
|
|
||||||
def test_list_network_function_device_interface(self):
|
|
||||||
network_function_device_interface = (
|
|
||||||
self.create_network_function_device_interface())
|
|
||||||
network_function_device_interfaces = (
|
|
||||||
self.nfp_db.get_network_function_device_interfaces(
|
|
||||||
self.session))
|
|
||||||
self.assertEqual(1, len(network_function_device_interfaces))
|
|
||||||
self.assertEqual(network_function_device_interface['id'],
|
|
||||||
network_function_device_interfaces[0]['id'])
|
|
||||||
|
|
||||||
def test_list_network_function_device_interfaces_with_filters(self):
|
|
||||||
attrs = {
|
|
||||||
'tenant_id': 'tenant_id',
|
|
||||||
'plugged_in_port_id': {
|
|
||||||
'id': 'myid2_ha_port',
|
|
||||||
'port_model': nfp_constants.NEUTRON_PORT,
|
|
||||||
'port_classification': nfp_constants.MONITOR,
|
|
||||||
'port_role': nfp_constants.ACTIVE_PORT
|
|
||||||
},
|
|
||||||
'interface_position': 1,
|
|
||||||
'mapped_real_port_id': 'myid2',
|
|
||||||
'network_function_device_id': (
|
|
||||||
self.create_network_function_device()['id'])
|
|
||||||
}
|
|
||||||
network_function_device_interface = (
|
|
||||||
self.create_network_function_device_interface(attrs))
|
|
||||||
filters = {
|
|
||||||
'interface_position': [1]
|
|
||||||
}
|
|
||||||
network_function_device_interfaces = (
|
|
||||||
self.nfp_db.get_network_function_device_interfaces(
|
|
||||||
self.session, filters=filters))
|
|
||||||
self.assertEqual(1, len(network_function_device_interfaces))
|
|
||||||
self.assertEqual(network_function_device_interface['id'],
|
|
||||||
network_function_device_interfaces[0]['id'])
|
|
||||||
filters = {'interface_position': [100]}
|
|
||||||
network_function_device_interfaces = (
|
|
||||||
self.nfp_db.get_network_function_device_interfaces(
|
|
||||||
self.session, filters=filters))
|
|
||||||
self.assertEqual([], network_function_device_interfaces)
|
|
||||||
|
|
||||||
def test_update_network_function_device_interface(self):
|
|
||||||
network_function_device_interface = (
|
|
||||||
self.create_network_function_device_interface())
|
|
||||||
self.assertIsNotNone(network_function_device_interface['id'])
|
|
||||||
updated_nfdi = {'interface_position': 2}
|
|
||||||
nfdi = self.nfp_db.update_network_function_device_interface(
|
|
||||||
self.session, network_function_device_interface['id'],
|
|
||||||
updated_nfdi)
|
|
||||||
self.assertEqual(2, nfdi['interface_position'])
|
|
||||||
|
|
||||||
def test_delete_network_function_device_interface(self):
|
|
||||||
network_function_device_interface = (
|
|
||||||
self.create_network_function_device_interface())
|
|
||||||
self.assertIsNotNone(network_function_device_interface['id'])
|
|
||||||
self.nfp_db.delete_network_function_device_interface(
|
|
||||||
self.session, network_function_device_interface['id'])
|
|
||||||
self.assertRaises(nfp_exc.NetworkFunctionDeviceInterfaceNotFound,
|
|
||||||
self.nfp_db.get_network_function_device_interface,
|
|
||||||
self.session,
|
|
||||||
network_function_device_interface['id'])
|
|
||||||
|
|
||||||
def _get_gateway_details(self):
|
def _get_gateway_details(self):
|
||||||
return dict(
|
return dict(
|
||||||
id=str(uuid.uuid4()),
|
id=str(uuid.uuid4()),
|
||||||
|
@@ -85,6 +85,8 @@ class DummyEvent(object):
|
|||||||
self.context = {}
|
self.context = {}
|
||||||
self.desc = DummyDesc()
|
self.desc = DummyDesc()
|
||||||
|
|
||||||
|
self.context = self.data
|
||||||
|
|
||||||
|
|
||||||
class Desc(object):
|
class Desc(object):
|
||||||
|
|
||||||
@@ -313,6 +315,7 @@ class DeviceOrchestratorTestCase(unittest.TestCase):
|
|||||||
mock_update_nfd.assert_called_with(ndo_handler.db_session,
|
mock_update_nfd.assert_called_with(ndo_handler.db_session,
|
||||||
orig_event_data['id'],
|
orig_event_data['id'],
|
||||||
orig_event_data)
|
orig_event_data)
|
||||||
|
ndo_handler._controller.reset_mock()
|
||||||
|
|
||||||
@mock.patch.object(nfpdb.NFPDbBase, 'update_network_function_device')
|
@mock.patch.object(nfpdb.NFPDbBase, 'update_network_function_device')
|
||||||
def test_health_check(self, mock_update_nfd):
|
def test_health_check(self, mock_update_nfd):
|
||||||
@@ -524,7 +527,6 @@ class DeviceOrchestratorTestCase(unittest.TestCase):
|
|||||||
def test_device_configuration_complete(self,
|
def test_device_configuration_complete(self,
|
||||||
mock_update_nfd, mock_get_nfd):
|
mock_update_nfd, mock_get_nfd):
|
||||||
ndo_handler = self._initialize_ndo_handler()
|
ndo_handler = self._initialize_ndo_handler()
|
||||||
tmp_data = copy.deepcopy(self.event.data)
|
|
||||||
device = self.event.data
|
device = self.event.data
|
||||||
status = 'ACTIVE'
|
status = 'ACTIVE'
|
||||||
device = {'nfp_context': device}
|
device = {'nfp_context': device}
|
||||||
@@ -549,16 +551,9 @@ class DeviceOrchestratorTestCase(unittest.TestCase):
|
|||||||
'id': 'device_id',
|
'id': 'device_id',
|
||||||
'reference_count': 0
|
'reference_count': 0
|
||||||
}
|
}
|
||||||
ndo_handler.device_configuration_complete(self.event)
|
|
||||||
mock_update_nfd.assert_called_with(ndo_handler.db_session,
|
|
||||||
device[
|
|
||||||
'nfp_context'][
|
|
||||||
'network_function_device'][
|
|
||||||
'id'],
|
|
||||||
{'reference_count': (
|
|
||||||
reference_count)})
|
|
||||||
|
|
||||||
self.event.data = tmp_data
|
ndo_handler.device_configuration_complete(self.event)
|
||||||
|
ndo_handler._controller.reset_mock()
|
||||||
|
|
||||||
@mock.patch.object(nfpdb.NFPDbBase, 'get_network_function_device')
|
@mock.patch.object(nfpdb.NFPDbBase, 'get_network_function_device')
|
||||||
@mock.patch.object(nfpdb.NFPDbBase, 'update_network_function_device')
|
@mock.patch.object(nfpdb.NFPDbBase, 'update_network_function_device')
|
||||||
@@ -588,6 +583,7 @@ class DeviceOrchestratorTestCase(unittest.TestCase):
|
|||||||
event_id = 'DELETE_CONFIGURATION'
|
event_id = 'DELETE_CONFIGURATION'
|
||||||
ndo_handler._create_event = mock.MagicMock(return_value=True)
|
ndo_handler._create_event = mock.MagicMock(return_value=True)
|
||||||
|
|
||||||
|
delete_event_req.context = delete_event_req.data
|
||||||
ndo_handler.delete_network_function_device(delete_event_req)
|
ndo_handler.delete_network_function_device(delete_event_req)
|
||||||
ndo_handler._create_event.assert_called_with(
|
ndo_handler._create_event.assert_called_with(
|
||||||
event_id=event_id,
|
event_id=event_id,
|
||||||
@@ -627,16 +623,15 @@ class DeviceOrchestratorTestCase(unittest.TestCase):
|
|||||||
|
|
||||||
mock_get_nfd.return_value = {
|
mock_get_nfd.return_value = {
|
||||||
'id': 'device_id',
|
'id': 'device_id',
|
||||||
'interfaces_in_use': 1
|
'interfaces_in_use': 1,
|
||||||
|
'reference_count': 1,
|
||||||
}
|
}
|
||||||
ndo_handler.unplug_interfaces(self.event)
|
ndo_handler.unplug_interfaces(self.event)
|
||||||
|
|
||||||
orig_event_data['interfaces_in_use'] -= len(orig_event_data['ports'])
|
orig_event_data['interfaces_in_use'] -= len(orig_event_data['ports'])
|
||||||
mock_update_nfd.assert_called_with(ndo_handler.db_session,
|
mock_update_nfd.assert_called_with(ndo_handler.db_session,
|
||||||
orig_event_data['id'],
|
orig_event_data['id'], mock.ANY)
|
||||||
{'interfaces_in_use': (
|
|
||||||
orig_event_data[
|
|
||||||
'interfaces_in_use'])})
|
|
||||||
orig_event_data = copy.deepcopy(self.event.data)
|
orig_event_data = copy.deepcopy(self.event.data)
|
||||||
orig_event_data['status_description'] = (
|
orig_event_data['status_description'] = (
|
||||||
ndo_handler.status_map['ACTIVE'])
|
ndo_handler.status_map['ACTIVE'])
|
||||||
@@ -742,6 +737,7 @@ class DeviceOrchestratorTestCase(unittest.TestCase):
|
|||||||
'network_function_device'][
|
'network_function_device'][
|
||||||
'id'],
|
'id'],
|
||||||
device)
|
device)
|
||||||
|
ndo_handler._controller.reset_mock()
|
||||||
|
|
||||||
self.event.data = tmp_data
|
self.event.data = tmp_data
|
||||||
|
|
||||||
@@ -781,8 +777,11 @@ class DeviceOrchestratorTestCase(unittest.TestCase):
|
|||||||
device['nfp_context'][
|
device['nfp_context'][
|
||||||
'network_function_device'][
|
'network_function_device'][
|
||||||
'id'],
|
'id'],
|
||||||
{'reference_count': (
|
{'status': 'ERROR',
|
||||||
reference_count)})
|
'status_description':
|
||||||
|
'Configuring Device Failed.',
|
||||||
|
'id': 'vm-id'})
|
||||||
|
ndo_handler._controller.reset_mock()
|
||||||
|
|
||||||
self.event.data = tmp_data
|
self.event.data = tmp_data
|
||||||
|
|
||||||
|
@@ -20,10 +20,10 @@ from oslo_config import cfg
|
|||||||
from gbpservice.neutron.tests.unit.nfp.orchestrator.db import test_nfp_db
|
from gbpservice.neutron.tests.unit.nfp.orchestrator.db import test_nfp_db
|
||||||
from gbpservice.nfp.common import constants as nfp_constants
|
from gbpservice.nfp.common import constants as nfp_constants
|
||||||
from gbpservice.nfp.common import exceptions as nfp_exc
|
from gbpservice.nfp.common import exceptions as nfp_exc
|
||||||
from gbpservice.nfp.core import context as nfp_core_context
|
from gbpservice.nfp.core import context as nfp_context
|
||||||
from gbpservice.nfp.core import controller # noqa
|
from gbpservice.nfp.core import controller # noqa
|
||||||
from gbpservice.nfp.core.event import Event as NFP_EVENT
|
from gbpservice.nfp.core.event import Event as NFP_EVENT
|
||||||
from gbpservice.nfp.core import log as nfp_logging
|
from gbpservice.nfp.lib import nfp_context_manager
|
||||||
from gbpservice.nfp.lib import transport
|
from gbpservice.nfp.lib import transport
|
||||||
from gbpservice.nfp.orchestrator.modules import (
|
from gbpservice.nfp.orchestrator.modules import (
|
||||||
service_orchestrator as nso)
|
service_orchestrator as nso)
|
||||||
@@ -33,6 +33,9 @@ from gbpservice.nfp.orchestrator.openstack import openstack_driver
|
|||||||
import uuid as pyuuid
|
import uuid as pyuuid
|
||||||
|
|
||||||
|
|
||||||
|
nfp_context_manager.sql_lock_support = False
|
||||||
|
|
||||||
|
|
||||||
def Event(**kwargs):
|
def Event(**kwargs):
|
||||||
data = kwargs.get('data')
|
data = kwargs.get('data')
|
||||||
key = pyuuid.uuid4()
|
key = pyuuid.uuid4()
|
||||||
@@ -89,10 +92,9 @@ class NSORpcHandlerTestCase(NSOModuleTestCase):
|
|||||||
with mock.patch.object(identity_client, "Client"):
|
with mock.patch.object(identity_client, "Client"):
|
||||||
self.rpc_handler.create_network_function(
|
self.rpc_handler.create_network_function(
|
||||||
"context", {'resource_owner_context':
|
"context", {'resource_owner_context':
|
||||||
{'tenant_id': 'tenant_id'}})
|
{'tenant_id': 'tenant_id'}})
|
||||||
mock_create_network_function.assert_called_once_with(
|
mock_create_network_function.assert_called_once_with(
|
||||||
"context", {'resource_owner_context':
|
"context", mock.ANY)
|
||||||
{'tenant_id': 'tenant_id'}})
|
|
||||||
|
|
||||||
@mock.patch.object(nso.ServiceOrchestrator,
|
@mock.patch.object(nso.ServiceOrchestrator,
|
||||||
"get_network_function")
|
"get_network_function")
|
||||||
@@ -291,13 +293,15 @@ class ServiceOrchestratorTestCase(NSOModuleTestCase):
|
|||||||
mock_get_admin_token,
|
mock_get_admin_token,
|
||||||
mock_get_admin_tenant_id):
|
mock_get_admin_tenant_id):
|
||||||
network_function = self.create_network_function()
|
network_function = self.create_network_function()
|
||||||
nfp_core_context.get_nfp_context = mock.MagicMock(
|
nfp_context.init()
|
||||||
return_value={})
|
|
||||||
mock_get_admin_token.return_value = 'admin_token'
|
mock_get_admin_token.return_value = 'admin_token'
|
||||||
mock_get_admin_tenant_id.return_value = 'admin_tenant_id'
|
mock_get_admin_tenant_id.return_value = 'admin_tenant_id'
|
||||||
transport.parse_service_flavor_string = mock.MagicMock(
|
transport.parse_service_flavor_string = mock.MagicMock(
|
||||||
return_value={'device_type': 'VM',
|
return_value={'device_type': 'VM',
|
||||||
'service_vendor': 'vyos'})
|
'service_vendor': 'vyos'})
|
||||||
|
self.service_orchestrator._create_event = mock.MagicMock(
|
||||||
|
return_value='')
|
||||||
|
|
||||||
self.service_orchestrator.delete_network_function(
|
self.service_orchestrator.delete_network_function(
|
||||||
self.context, network_function['id'])
|
self.context, network_function['id'])
|
||||||
self.assertRaises(nfp_exc.NetworkFunctionNotFound,
|
self.assertRaises(nfp_exc.NetworkFunctionNotFound,
|
||||||
@@ -327,8 +331,8 @@ class ServiceOrchestratorTestCase(NSOModuleTestCase):
|
|||||||
self.session, network_function_id)
|
self.session, network_function_id)
|
||||||
mock_get_admin_token.return_value = 'admin_token'
|
mock_get_admin_token.return_value = 'admin_token'
|
||||||
mock_get_admin_tenant_id.return_value = 'admin_tenant_id'
|
mock_get_admin_tenant_id.return_value = 'admin_tenant_id'
|
||||||
nfp_core_context.get_nfp_context = mock.MagicMock(
|
nfp_context.init()
|
||||||
return_value={})
|
|
||||||
transport.parse_service_flavor_string = mock.MagicMock(
|
transport.parse_service_flavor_string = mock.MagicMock(
|
||||||
return_value={'device_type': 'VM',
|
return_value={'device_type': 'VM',
|
||||||
'service_vendor': 'vyos'})
|
'service_vendor': 'vyos'})
|
||||||
@@ -340,7 +344,8 @@ class ServiceOrchestratorTestCase(NSOModuleTestCase):
|
|||||||
|
|
||||||
def test_event_create_network_function_instance(self):
|
def test_event_create_network_function_instance(self):
|
||||||
network_function_instance = self.create_network_function_instance()
|
network_function_instance = self.create_network_function_instance()
|
||||||
network_function = self.nfp_db.get_network_function(self.session,
|
network_function = self.nfp_db.get_network_function(
|
||||||
|
self.session,
|
||||||
network_function_instance['network_function_id'])
|
network_function_instance['network_function_id'])
|
||||||
network_function_port_info = [
|
network_function_port_info = [
|
||||||
{
|
{
|
||||||
@@ -374,25 +379,11 @@ class ServiceOrchestratorTestCase(NSOModuleTestCase):
|
|||||||
'provider': {'pt': None}
|
'provider': {'pt': None}
|
||||||
}
|
}
|
||||||
test_event = Event(data=create_nfi_request)
|
test_event = Event(data=create_nfi_request)
|
||||||
nfp_logging.store_logging_context(path='create')
|
test_event.context = create_nfi_request
|
||||||
|
test_event.context['log_context'] = nfp_context.init_log_context()
|
||||||
self.service_orchestrator.create_network_function_instance(
|
self.service_orchestrator.create_network_function_instance(
|
||||||
test_event)
|
test_event)
|
||||||
|
|
||||||
def test_event_handle_device_created(self):
|
|
||||||
nfd = self.create_network_function_device()
|
|
||||||
nfi = self.create_network_function_instance(create_nfd=False)
|
|
||||||
request_data = {
|
|
||||||
'network_function_instance_id': nfi['id'],
|
|
||||||
'network_function_device_id': nfd['id']
|
|
||||||
}
|
|
||||||
test_event = Event(data=request_data)
|
|
||||||
self.assertIsNone(nfi['network_function_device_id'])
|
|
||||||
self.service_orchestrator.handle_device_created(
|
|
||||||
test_event)
|
|
||||||
db_nfi = self.nfp_db.get_network_function_instance(
|
|
||||||
self.session, nfi['id'])
|
|
||||||
self.assertEqual(nfd['id'], db_nfi['network_function_device_id'])
|
|
||||||
|
|
||||||
@mock.patch.object(
|
@mock.patch.object(
|
||||||
nso.ServiceOrchestrator, "_create_event")
|
nso.ServiceOrchestrator, "_create_event")
|
||||||
@mock.patch.object(
|
@mock.patch.object(
|
||||||
@@ -433,9 +424,9 @@ class ServiceOrchestratorTestCase(NSOModuleTestCase):
|
|||||||
'network_function_instance_id': nfi['id'],
|
'network_function_instance_id': nfi['id'],
|
||||||
'network_function_device_id': nfd['id']
|
'network_function_device_id': nfd['id']
|
||||||
}
|
}
|
||||||
test_event = Event(data=request_data)
|
test_event = Event(data=request_data, context=request_data)
|
||||||
|
test_event.context['log_context'] = nfp_context.init_log_context()
|
||||||
self.assertIsNone(nfi['network_function_device_id'])
|
self.assertIsNone(nfi['network_function_device_id'])
|
||||||
nfp_logging.store_logging_context(path='create')
|
|
||||||
self.service_orchestrator.handle_device_create_failed(
|
self.service_orchestrator.handle_device_create_failed(
|
||||||
test_event)
|
test_event)
|
||||||
db_nfi = self.nfp_db.get_network_function_instance(
|
db_nfi = self.nfp_db.get_network_function_instance(
|
||||||
@@ -468,6 +459,7 @@ class ServiceOrchestratorTestCase(NSOModuleTestCase):
|
|||||||
'uuid': 'a1251c79-f661-440e-aab2-a1f401865daf:'}
|
'uuid': 'a1251c79-f661-440e-aab2-a1f401865daf:'}
|
||||||
}
|
}
|
||||||
test_event = Event(data=request_data)
|
test_event = Event(data=request_data)
|
||||||
|
test_event.context = request_data
|
||||||
status = self.service_orchestrator.check_for_user_config_complete(
|
status = self.service_orchestrator.check_for_user_config_complete(
|
||||||
test_event)
|
test_event)
|
||||||
mock_is_config_complete.assert_called_once_with(
|
mock_is_config_complete.assert_called_once_with(
|
||||||
@@ -491,7 +483,7 @@ class ServiceOrchestratorTestCase(NSOModuleTestCase):
|
|||||||
'uuid': 'a1251c79-f661-440e-aab2-a1f401865daf:'}
|
'uuid': 'a1251c79-f661-440e-aab2-a1f401865daf:'}
|
||||||
}
|
}
|
||||||
test_event = Event(data=request_data)
|
test_event = Event(data=request_data)
|
||||||
nfp_logging.store_logging_context(path='create')
|
test_event.context = request_data
|
||||||
status = self.service_orchestrator.check_for_user_config_complete(
|
status = self.service_orchestrator.check_for_user_config_complete(
|
||||||
test_event)
|
test_event)
|
||||||
mock_is_config_complete.assert_called_once_with(
|
mock_is_config_complete.assert_called_once_with(
|
||||||
@@ -516,6 +508,7 @@ class ServiceOrchestratorTestCase(NSOModuleTestCase):
|
|||||||
'uuid': 'a1251c79-f661-440e-aab2-a1f401865daf:'}
|
'uuid': 'a1251c79-f661-440e-aab2-a1f401865daf:'}
|
||||||
}
|
}
|
||||||
test_event = Event(data=request_data)
|
test_event = Event(data=request_data)
|
||||||
|
test_event.context = request_data
|
||||||
status = self.service_orchestrator.check_for_user_config_complete(
|
status = self.service_orchestrator.check_for_user_config_complete(
|
||||||
test_event)
|
test_event)
|
||||||
mock_is_config_complete.assert_called_once_with(
|
mock_is_config_complete.assert_called_once_with(
|
||||||
@@ -542,8 +535,8 @@ class ServiceOrchestratorTestCase(NSOModuleTestCase):
|
|||||||
'config_policy_id': 'config_policy_id',
|
'config_policy_id': 'config_policy_id',
|
||||||
'network_function_id': network_function['id']
|
'network_function_id': network_function['id']
|
||||||
}
|
}
|
||||||
test_event = Event(data=request_data)
|
test_event = Event(data=request_data, context=request_data)
|
||||||
nfp_logging.store_logging_context(path='create')
|
test_event.context['log_context'] = nfp_context.init_log_context()
|
||||||
self.service_orchestrator.handle_user_config_failed(test_event)
|
self.service_orchestrator.handle_user_config_failed(test_event)
|
||||||
db_nf = self.nfp_db.get_network_function(
|
db_nf = self.nfp_db.get_network_function(
|
||||||
self.session, network_function['id'])
|
self.session, network_function['id'])
|
||||||
@@ -566,6 +559,7 @@ class ServiceOrchestratorTestCase(NSOModuleTestCase):
|
|||||||
'config_policy_id': 'config_policy_id',
|
'config_policy_id': 'config_policy_id',
|
||||||
'network_function_id': network_function['id']}
|
'network_function_id': network_function['id']}
|
||||||
test_event = Event(data=request_data)
|
test_event = Event(data=request_data)
|
||||||
|
test_event.context = request_data
|
||||||
mock_service_type.return_value = 'firewall'
|
mock_service_type.return_value = 'firewall'
|
||||||
status = self.service_orchestrator.check_for_user_config_deleted(
|
status = self.service_orchestrator.check_for_user_config_deleted(
|
||||||
test_event)
|
test_event)
|
||||||
@@ -588,6 +582,7 @@ class ServiceOrchestratorTestCase(NSOModuleTestCase):
|
|||||||
'config_policy_id': 'config_policy_id',
|
'config_policy_id': 'config_policy_id',
|
||||||
'network_function_id': network_function['id']}
|
'network_function_id': network_function['id']}
|
||||||
test_event = Event(data=request_data)
|
test_event = Event(data=request_data)
|
||||||
|
test_event.context = request_data
|
||||||
status = self.service_orchestrator.check_for_user_config_deleted(
|
status = self.service_orchestrator.check_for_user_config_deleted(
|
||||||
test_event)
|
test_event)
|
||||||
mock_is_config_delete_complete.assert_called_once_with(
|
mock_is_config_delete_complete.assert_called_once_with(
|
||||||
@@ -610,7 +605,8 @@ class ServiceOrchestratorTestCase(NSOModuleTestCase):
|
|||||||
'network_function_id': network_function['id'],
|
'network_function_id': network_function['id'],
|
||||||
'action': 'update'}
|
'action': 'update'}
|
||||||
test_event = Event(data=request_data)
|
test_event = Event(data=request_data)
|
||||||
nfp_logging.store_logging_context(path='create')
|
test_event.context = request_data
|
||||||
|
test_event.context['log_context'] = nfp_context.init_log_context()
|
||||||
status = self.service_orchestrator.check_for_user_config_deleted(
|
status = self.service_orchestrator.check_for_user_config_deleted(
|
||||||
test_event)
|
test_event)
|
||||||
mock_is_config_delete_complete.assert_called_once_with(
|
mock_is_config_delete_complete.assert_called_once_with(
|
||||||
@@ -657,8 +653,7 @@ class ServiceOrchestratorTestCase(NSOModuleTestCase):
|
|||||||
network_function['network_function_instances'])
|
network_function['network_function_instances'])
|
||||||
mock_get_admin_token.return_value = 'admin_token'
|
mock_get_admin_token.return_value = 'admin_token'
|
||||||
mock_get_admin_tenant_id.return_value = 'admin_tenant_id'
|
mock_get_admin_tenant_id.return_value = 'admin_tenant_id'
|
||||||
nfp_core_context.get_nfp_context = mock.MagicMock(
|
nfp_context.init()
|
||||||
return_value={})
|
|
||||||
self.service_orchestrator.delete_network_function(
|
self.service_orchestrator.delete_network_function(
|
||||||
self.context, network_function['id'])
|
self.context, network_function['id'])
|
||||||
db_nf = self.nfp_db.get_network_function(
|
db_nf = self.nfp_db.get_network_function(
|
||||||
@@ -675,6 +670,7 @@ class ServiceOrchestratorTestCase(NSOModuleTestCase):
|
|||||||
network_function['network_function_instances'])
|
network_function['network_function_instances'])
|
||||||
data = {'network_function_instance': nfi}
|
data = {'network_function_instance': nfi}
|
||||||
test_event = Event(data=data)
|
test_event = Event(data=data)
|
||||||
|
test_event.context = data
|
||||||
self.service_orchestrator.delete_network_function_instance(
|
self.service_orchestrator.delete_network_function_instance(
|
||||||
test_event)
|
test_event)
|
||||||
db_nfi = self.nfp_db.get_network_function_instance(
|
db_nfi = self.nfp_db.get_network_function_instance(
|
||||||
@@ -773,13 +769,13 @@ class ServiceOrchestratorTestCase(NSOModuleTestCase):
|
|||||||
transport.parse_service_flavor_string = mock.MagicMock(
|
transport.parse_service_flavor_string = mock.MagicMock(
|
||||||
return_value={'device_type': 'VM',
|
return_value={'device_type': 'VM',
|
||||||
'service_vendor': 'vyos'})
|
'service_vendor': 'vyos'})
|
||||||
nfp_core_context.get_nfp_context = mock.MagicMock(
|
|
||||||
return_value={})
|
|
||||||
with mock.patch.object(
|
with mock.patch.object(
|
||||||
self.service_orchestrator.config_driver,
|
self.service_orchestrator.config_driver,
|
||||||
"handle_consumer_ptg_operations") as\
|
"handle_consumer_ptg_operations") as\
|
||||||
mock_handle_consumer_ptg_added:
|
mock_handle_consumer_ptg_added:
|
||||||
mock_handle_consumer_ptg_added.return_value = 'stack_id'
|
mock_handle_consumer_ptg_added.return_value = 'stack_id'
|
||||||
|
nfp_context.init()
|
||||||
|
|
||||||
self.service_orchestrator.handle_consumer_ptg_added(
|
self.service_orchestrator.handle_consumer_ptg_added(
|
||||||
self.context, network_function_id, policy_target_group)
|
self.context, network_function_id, policy_target_group)
|
||||||
db_nf = self.nfp_db.get_network_function(
|
db_nf = self.nfp_db.get_network_function(
|
||||||
@@ -819,13 +815,12 @@ class ServiceOrchestratorTestCase(NSOModuleTestCase):
|
|||||||
transport.parse_service_flavor_string = mock.MagicMock(
|
transport.parse_service_flavor_string = mock.MagicMock(
|
||||||
return_value={'device_type': 'VM',
|
return_value={'device_type': 'VM',
|
||||||
'service_vendor': 'vyos'})
|
'service_vendor': 'vyos'})
|
||||||
nfp_core_context.get_nfp_context = mock.MagicMock(
|
|
||||||
return_value={})
|
|
||||||
with mock.patch.object(
|
with mock.patch.object(
|
||||||
self.service_orchestrator.config_driver,
|
self.service_orchestrator.config_driver,
|
||||||
"handle_consumer_ptg_operations") as\
|
"handle_consumer_ptg_operations") as\
|
||||||
mock_handle_consumer_ptg_removed:
|
mock_handle_consumer_ptg_removed:
|
||||||
mock_handle_consumer_ptg_removed.return_value = 'stack_id'
|
mock_handle_consumer_ptg_removed.return_value = 'stack_id'
|
||||||
|
nfp_context.init()
|
||||||
self.service_orchestrator.handle_consumer_ptg_removed(
|
self.service_orchestrator.handle_consumer_ptg_removed(
|
||||||
self.context, network_function_id, policy_target_group)
|
self.context, network_function_id, policy_target_group)
|
||||||
db_nf = self.nfp_db.get_network_function(
|
db_nf = self.nfp_db.get_network_function(
|
||||||
|
@@ -117,7 +117,7 @@ APPLY_USER_CONFIG_IN_PROGRESS_MAXRETRY = 20
|
|||||||
UPDATE_USER_CONFIG_PREPARING_TO_START_SPACING = 10
|
UPDATE_USER_CONFIG_PREPARING_TO_START_SPACING = 10
|
||||||
UPDATE_USER_CONFIG_PREPARING_TO_START_MAXRETRY = 20
|
UPDATE_USER_CONFIG_PREPARING_TO_START_MAXRETRY = 20
|
||||||
|
|
||||||
UPDATE_USER_CONFIG_STILL_IN_PROGRESS_MAXRETRY = 20
|
UPDATE_USER_CONFIG_STILL_IN_PROGRESS_MAXRETRY = 300
|
||||||
|
|
||||||
DELETE_USER_CONFIG_IN_PROGRESS_SPACING = 10
|
DELETE_USER_CONFIG_IN_PROGRESS_SPACING = 10
|
||||||
DELETE_USER_CONFIG_IN_PROGRESS_MAXRETRY = 20
|
DELETE_USER_CONFIG_IN_PROGRESS_MAXRETRY = 20
|
||||||
|
213
gbpservice/nfp/lib/nfp_context_manager.py
Normal file
213
gbpservice/nfp/lib/nfp_context_manager.py
Normal file
@@ -0,0 +1,213 @@
|
|||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
|
||||||
|
import time
|
||||||
|
|
||||||
|
from gbpservice.nfp.core import log as nfp_logging
|
||||||
|
from gbpservice.nfp.lib import nfp_exceptions
|
||||||
|
|
||||||
|
LOG = nfp_logging.getLogger(__name__)
|
||||||
|
|
||||||
|
sql_lock_support = True
|
||||||
|
|
||||||
|
|
||||||
|
class ContextManager(object):
|
||||||
|
|
||||||
|
def __init__(self, session=None, suppress=tuple()):
|
||||||
|
# suppress tuple holds the kind of exceptions
|
||||||
|
# the we don't have re-raise
|
||||||
|
self.session = session
|
||||||
|
self.suppress = suppress
|
||||||
|
|
||||||
|
def __enter__(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def __exit__(self, Exptype, expvalue, traceback):
|
||||||
|
|
||||||
|
if self.suppress and Exptype:
|
||||||
|
if Exptype in self.suppress:
|
||||||
|
return False
|
||||||
|
for exception in self.suppress:
|
||||||
|
if isinstance(Exptype, exception):
|
||||||
|
return False
|
||||||
|
if not self.suppress and traceback:
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
return False
|
||||||
|
|
||||||
|
def retry(self, method, *args, **kwargs):
|
||||||
|
tries = kwargs.pop('tries', 1)
|
||||||
|
delay = 2
|
||||||
|
backoff = 2
|
||||||
|
while tries > 1:
|
||||||
|
# Loop for 'tries-1' times and
|
||||||
|
# the last time without any try-catch
|
||||||
|
try:
|
||||||
|
return method(*args, **kwargs)
|
||||||
|
except Exception:
|
||||||
|
msg = " %s retrying in %s seconds " % (self.__class__, delay)
|
||||||
|
LOG.error(msg)
|
||||||
|
|
||||||
|
time.sleep(delay)
|
||||||
|
tries -= 1
|
||||||
|
delay *= backoff
|
||||||
|
return method(*args, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
class NfpDbContextManager(ContextManager):
|
||||||
|
|
||||||
|
def new(self, **kwargs):
|
||||||
|
return NfpDbContextManager(**kwargs)
|
||||||
|
|
||||||
|
def lock(self, session, method, *args, **kwargs):
|
||||||
|
if not sql_lock_support:
|
||||||
|
return method(session, *args, **kwargs)
|
||||||
|
with session.begin(subtransactions=True):
|
||||||
|
session.execute("SELECT GET_LOCK('nfp_db_lock', -1)")
|
||||||
|
ret = method(session, *args, **kwargs)
|
||||||
|
session.execute("SELECT RELEASE_LOCK('nfp_db_lock')")
|
||||||
|
return ret
|
||||||
|
|
||||||
|
def __enter__(self):
|
||||||
|
super(NfpDbContextManager, self).__enter__()
|
||||||
|
return self
|
||||||
|
|
||||||
|
def __exit__(self, Exptype, expvalue, traceback):
|
||||||
|
if super(NfpDbContextManager, self).__exit__(
|
||||||
|
Exptype, expvalue, traceback):
|
||||||
|
raise nfp_exceptions.DbException(Exptype, str(expvalue), traceback)
|
||||||
|
|
||||||
|
# By default exit method returns False, if False is returned
|
||||||
|
# the with block re-raises the exception. To suppress that
|
||||||
|
# True should be returned explicitly
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
class NfpNovaContextManager(ContextManager):
|
||||||
|
|
||||||
|
def new(self, **kwargs):
|
||||||
|
return NfpNovaContextManager(**kwargs)
|
||||||
|
|
||||||
|
def __enter__(self):
|
||||||
|
super(NfpNovaContextManager, self).__enter__()
|
||||||
|
return self
|
||||||
|
|
||||||
|
def __exit__(self, Exptype, expvalue, traceback):
|
||||||
|
if super(NfpNovaContextManager, self).__exit__(
|
||||||
|
Exptype, expvalue, traceback):
|
||||||
|
raise nfp_exceptions.NovaException(
|
||||||
|
Exptype, str(expvalue), traceback)
|
||||||
|
|
||||||
|
# By default exit method returns False, if False is returned
|
||||||
|
# the with block re-raises the exception. To suppress that
|
||||||
|
# True should be returned explicitly
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
class NfpKeystoneContextManager(ContextManager):
|
||||||
|
|
||||||
|
def new(self, **kwargs):
|
||||||
|
return NfpKeystoneContextManager(**kwargs)
|
||||||
|
|
||||||
|
def __enter__(self):
|
||||||
|
super(NfpKeystoneContextManager, self).__enter__()
|
||||||
|
return self
|
||||||
|
|
||||||
|
def __exit__(self, Exptype, expvalue, traceback):
|
||||||
|
if super(NfpKeystoneContextManager, self).__exit__(
|
||||||
|
Exptype, expvalue, traceback):
|
||||||
|
raise nfp_exceptions.KeystoneException(
|
||||||
|
Exptype, str(expvalue), traceback)
|
||||||
|
# By default exit method returns False, if False is returned
|
||||||
|
# the with block re-raises the exception. To suppress that
|
||||||
|
# True should be returned explicitly
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
class NfpNeutronContextManager(ContextManager):
|
||||||
|
|
||||||
|
def new(self, **kwargs):
|
||||||
|
return NfpNeutronContextManager(**kwargs)
|
||||||
|
|
||||||
|
def __enter__(self):
|
||||||
|
super(NfpNeutronContextManager, self).__enter__()
|
||||||
|
return self
|
||||||
|
|
||||||
|
def __exit__(self, Exptype, expvalue, traceback):
|
||||||
|
if super(NfpNeutronContextManager, self).__exit__(
|
||||||
|
Exptype, expvalue, traceback):
|
||||||
|
raise nfp_exceptions.NeutronException(
|
||||||
|
Exptype, str(expvalue), traceback)
|
||||||
|
|
||||||
|
# By default exit method returns False, if False is returned
|
||||||
|
# the with block re-raises the exception. To suppress that
|
||||||
|
# True should be returned explicitly
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
class NfpHeatContextManager(ContextManager):
|
||||||
|
|
||||||
|
def new(self, **kwargs):
|
||||||
|
return NfpHeatContextManager(**kwargs)
|
||||||
|
|
||||||
|
def __enter__(self):
|
||||||
|
super(NfpHeatContextManager, self).__enter__()
|
||||||
|
return self
|
||||||
|
|
||||||
|
def __exit__(self, Exptype, expvalue, traceback):
|
||||||
|
if super(NfpHeatContextManager, self).__exit__(
|
||||||
|
Exptype, expvalue, traceback):
|
||||||
|
raise nfp_exceptions.HeatException(
|
||||||
|
Exptype, str(expvalue), traceback)
|
||||||
|
|
||||||
|
# By default exit method returns False, if False is returned
|
||||||
|
# the with block re-raises the exception. To suppress that
|
||||||
|
# True should be returned explicitly
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
class NfpGBPContextManager(ContextManager):
|
||||||
|
|
||||||
|
def new(self, **kwargs):
|
||||||
|
return NfpGBPContextManager(**kwargs)
|
||||||
|
|
||||||
|
def __enter__(self):
|
||||||
|
super(NfpGBPContextManager, self).__enter__()
|
||||||
|
return self
|
||||||
|
|
||||||
|
def __exit__(self, Exptype, expvalue, traceback):
|
||||||
|
if super(NfpGBPContextManager, self).__exit__(
|
||||||
|
Exptype, expvalue, traceback):
|
||||||
|
raise nfp_exceptions.GBPException(
|
||||||
|
Exptype, str(expvalue), traceback)
|
||||||
|
|
||||||
|
# By default exit method returns False, if False is returned
|
||||||
|
# the with block re-raises the exception. To suppress that
|
||||||
|
# True should be returned explicitly
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
||||||
|
# Create the respective instances once, so that no need
|
||||||
|
# to instantiate them again any where
|
||||||
|
|
||||||
|
DbContextManager = NfpDbContextManager()
|
||||||
|
NovaContextManager = NfpNovaContextManager()
|
||||||
|
KeystoneContextManager = NfpKeystoneContextManager()
|
||||||
|
NeutronContextManager = NfpNeutronContextManager()
|
||||||
|
HeatContextManager = NfpHeatContextManager()
|
||||||
|
GBPContextManager = NfpGBPContextManager()
|
41
gbpservice/nfp/lib/nfp_exceptions.py
Normal file
41
gbpservice/nfp/lib/nfp_exceptions.py
Normal file
@@ -0,0 +1,41 @@
|
|||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
|
||||||
|
class GenericException(Exception):
|
||||||
|
|
||||||
|
def __init__(self, type, value, traceback):
|
||||||
|
super(GenericException, self).__init__(type, value)
|
||||||
|
|
||||||
|
|
||||||
|
class DbException(GenericException):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class NeutronException(GenericException):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class NovaException(GenericException):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class KeystoneException(GenericException):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class GBPException(GenericException):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class HeatException(GenericException):
|
||||||
|
pass
|
@@ -132,8 +132,8 @@ class RestApi(object):
|
|||||||
Return:Http response
|
Return:Http response
|
||||||
"""
|
"""
|
||||||
url = self.url % (
|
url = self.url % (
|
||||||
self.rest_server_address,
|
self.rest_server_address,
|
||||||
self.rest_server_port, path)
|
self.rest_server_port, path)
|
||||||
data = jsonutils.dumps(body)
|
data = jsonutils.dumps(body)
|
||||||
try:
|
try:
|
||||||
headers = {"content-type": "application/json"}
|
headers = {"content-type": "application/json"}
|
||||||
@@ -234,13 +234,13 @@ def send_request_to_configurator(conf, context, body,
|
|||||||
resp, content = unix_rc.post(method_name,
|
resp, content = unix_rc.post(method_name,
|
||||||
body=body)
|
body=body)
|
||||||
message = ("%s -> POST response: (%s) body : %s " %
|
message = ("%s -> POST response: (%s) body : %s " %
|
||||||
(method_name, content, body))
|
(method_name, content, body))
|
||||||
LOG.debug(message)
|
LOG.debug(message)
|
||||||
elif method_type.lower() in [nfp_constants.UPDATE]:
|
elif method_type.lower() in [nfp_constants.UPDATE]:
|
||||||
resp, content = unix_rc.put(method_name,
|
resp, content = unix_rc.put(method_name,
|
||||||
body=body)
|
body=body)
|
||||||
message = ("%s -> PUT response: (%s) body : %s " %
|
message = ("%s -> PUT response: (%s) body : %s " %
|
||||||
(method_name, content, body))
|
(method_name, content, body))
|
||||||
LOG.debug(message)
|
LOG.debug(message)
|
||||||
else:
|
else:
|
||||||
message = ("%s api not supported" % (method_name))
|
message = ("%s api not supported" % (method_name))
|
||||||
@@ -283,12 +283,14 @@ def get_response_from_configurator(conf):
|
|||||||
message = ("get_notification ->"
|
message = ("get_notification ->"
|
||||||
"GET request failed. Reason : %s" % (rce))
|
"GET request failed. Reason : %s" % (rce))
|
||||||
LOG.error(message)
|
LOG.error(message)
|
||||||
return []
|
return "get_notification -> GET request failed. Reason : %s" % (
|
||||||
|
rce)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
message = ("get_notification ->"
|
message = ("get_notification ->"
|
||||||
"GET request failed. Reason : %s" % (e))
|
"GET request failed. Reason : %s" % (e))
|
||||||
LOG.error(message)
|
LOG.error(message)
|
||||||
return []
|
return "get_notification -> GET request failed. Reason : %s" % (
|
||||||
|
e)
|
||||||
|
|
||||||
elif conf.backend == UNIX_REST:
|
elif conf.backend == UNIX_REST:
|
||||||
try:
|
try:
|
||||||
@@ -304,13 +306,15 @@ def get_response_from_configurator(conf):
|
|||||||
"GET request failed. Reason : %s" % (
|
"GET request failed. Reason : %s" % (
|
||||||
rce))
|
rce))
|
||||||
LOG.error(message)
|
LOG.error(message)
|
||||||
return []
|
return "get_notification -> GET request failed. Reason : %s" % (
|
||||||
|
rce)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
message = ("get_notification ->"
|
message = ("get_notification ->"
|
||||||
"GET request failed. Reason : %s" % (
|
"GET request failed. Reason : %s" % (
|
||||||
e))
|
e))
|
||||||
LOG.error(message)
|
LOG.error(message)
|
||||||
return []
|
return "get_notification -> GET request failed. Reason : %s" % (
|
||||||
|
e)
|
||||||
|
|
||||||
else:
|
else:
|
||||||
rpc_cbs_data = []
|
rpc_cbs_data = []
|
||||||
|
@@ -10,6 +10,8 @@
|
|||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
|
from gbpservice.nfp.lib import nfp_context_manager as nfp_ctx_mgr
|
||||||
|
|
||||||
from gbpservice.nfp.orchestrator.openstack import openstack_driver
|
from gbpservice.nfp.orchestrator.openstack import openstack_driver
|
||||||
from gbpservice.nfp.orchestrator.coal.networking import(
|
from gbpservice.nfp.orchestrator.coal.networking import(
|
||||||
nfp_neutron_network_driver as neutron_nd
|
nfp_neutron_network_driver as neutron_nd
|
||||||
@@ -17,6 +19,7 @@ from gbpservice.nfp.orchestrator.coal.networking import(
|
|||||||
|
|
||||||
|
|
||||||
class NFPGBPNetworkDriver(neutron_nd.NFPNeutronNetworkDriver):
|
class NFPGBPNetworkDriver(neutron_nd.NFPNeutronNetworkDriver):
|
||||||
|
|
||||||
def __init__(self, config):
|
def __init__(self, config):
|
||||||
self.config = config
|
self.config = config
|
||||||
super(NFPGBPNetworkDriver, self).__init__(config)
|
super(NFPGBPNetworkDriver, self).__init__(config)
|
||||||
@@ -26,55 +29,66 @@ class NFPGBPNetworkDriver(neutron_nd.NFPNeutronNetworkDriver):
|
|||||||
pass
|
pass
|
||||||
|
|
||||||
def create_port(self, token, tenant_id, net_id, name=None):
|
def create_port(self, token, tenant_id, net_id, name=None):
|
||||||
port = self.network_handler.create_policy_target(token, tenant_id,
|
with nfp_ctx_mgr.GBPContextManager as gcm:
|
||||||
net_id, name)
|
port = gcm.retry(self.network_handler.create_policy_target,
|
||||||
|
token, tenant_id, net_id, name)
|
||||||
return port
|
return port
|
||||||
|
|
||||||
def delete_port(self, token, port_id):
|
def delete_port(self, token, port_id):
|
||||||
self.network_handler.delete_policy_target(token, port_id)
|
with nfp_ctx_mgr.GBPContextManager as gcm:
|
||||||
|
gcm.retry(
|
||||||
|
self.network_handler.delete_policy_target,
|
||||||
|
token, port_id)
|
||||||
|
|
||||||
def get_port_id(self, token, port_id):
|
def get_port_id(self, token, port_id):
|
||||||
pt = self.network_handler.get_policy_target(token, port_id)
|
with nfp_ctx_mgr.GBPContextManager as gcm:
|
||||||
|
pt = gcm.retry(
|
||||||
|
self.network_handler.get_policy_target, token, port_id)
|
||||||
return pt['port_id']
|
return pt['port_id']
|
||||||
|
|
||||||
def update_port(self, token, port_id, port):
|
def update_port(self, token, port_id, port):
|
||||||
pt = self.network_handler.update_policy_target(token, port_id,
|
with nfp_ctx_mgr.GBPContextManager as gcm:
|
||||||
port)
|
pt = gcm.retry(
|
||||||
|
self.network_handler.update_policy_target,
|
||||||
|
token, port_id, port)
|
||||||
return pt['port_id']
|
return pt['port_id']
|
||||||
|
|
||||||
def get_neutron_port_details(self, token, port_id):
|
def get_neutron_port_details(self, token, port_id):
|
||||||
#self.network_handler = openstack_driver.NeutronClient(self.config)
|
# self.network_handler = openstack_driver.NeutronClient(self.config)
|
||||||
port_details = (
|
port_details = (
|
||||||
super(NFPGBPNetworkDriver, self).get_port_and_subnet_details(
|
super(NFPGBPNetworkDriver, self).get_port_and_subnet_details(
|
||||||
token, port_id))
|
token, port_id))
|
||||||
#self.network_handler = openstack_driver.GBPClient(self.config)
|
# self.network_handler = openstack_driver.GBPClient(self.config)
|
||||||
return port_details
|
return port_details
|
||||||
|
|
||||||
def get_port_details(self, token, port_id):
|
def get_port_details(self, token, port_id):
|
||||||
_port_id = self.get_port_id(token, port_id)
|
_port_id = self.get_port_id(token, port_id)
|
||||||
#self.network_handler = openstack_driver.NeutronClient(self.config)
|
# self.network_handler = openstack_driver.NeutronClient(self.config)
|
||||||
port_details = super(NFPGBPNetworkDriver, self).get_port_details(
|
port_details = super(NFPGBPNetworkDriver, self).get_port_details(
|
||||||
token, _port_id)
|
token, _port_id)
|
||||||
#self.network_handler = openstack_driver.GBPClient(self.config)
|
# self.network_handler = openstack_driver.GBPClient(self.config)
|
||||||
return port_details
|
return port_details
|
||||||
|
|
||||||
def get_networks(self, token, filters):
|
def get_networks(self, token, filters):
|
||||||
return self.network_handler.get_policy_target_groups(token,
|
with nfp_ctx_mgr.GBPContextManager as gcm:
|
||||||
filters=filters)
|
return gcm.retry(
|
||||||
|
self.network_handler.get_policy_target_groups,
|
||||||
|
token, filters=filters)
|
||||||
|
|
||||||
def set_promiscuos_mode(self, token, port_id, enable_port_security):
|
def set_promiscuos_mode(self, token, port_id, enable_port_security):
|
||||||
port_id = self.get_port_id(token, port_id)
|
port_id = self.get_port_id(token, port_id)
|
||||||
#self.network_handler = openstack_driver.NeutronClient(self.config)
|
# self.network_handler = openstack_driver.NeutronClient(self.config)
|
||||||
super(NFPGBPNetworkDriver, self).set_promiscuos_mode(token,
|
super(NFPGBPNetworkDriver, self).set_promiscuos_mode(
|
||||||
port_id, enable_port_security)
|
token, port_id, enable_port_security)
|
||||||
#self.network_handler = openstack_driver.GBPClient(self.config)
|
# self.network_handler = openstack_driver.GBPClient(self.config)
|
||||||
|
|
||||||
def set_promiscuos_mode_fast(self, token, port_id, enable_port_security):
|
def set_promiscuos_mode_fast(self, token, port_id, enable_port_security):
|
||||||
#self.network_handler = openstack_driver.NeutronClient(self.config)
|
# self.network_handler = openstack_driver.NeutronClient(self.config)
|
||||||
super(NFPGBPNetworkDriver, self).set_promiscuos_mode(token,
|
super(NFPGBPNetworkDriver, self).set_promiscuos_mode(
|
||||||
port_id, enable_port_security)
|
token, port_id, enable_port_security)
|
||||||
#self.network_handler = openstack_driver.GBPClient(self.config)
|
# self.network_handler = openstack_driver.GBPClient(self.config)
|
||||||
|
|
||||||
def get_service_profile(self, token, service_profile_id):
|
def get_service_profile(self, token, service_profile_id):
|
||||||
return self.network_handler.get_service_profile(token,
|
with nfp_ctx_mgr.GBPContextManager as gcm:
|
||||||
service_profile_id)
|
return gcm.retry(self.network_handler.get_service_profile, token,
|
||||||
|
service_profile_id)
|
||||||
|
@@ -16,6 +16,7 @@ class NFPNetworkDriverBase(object):
|
|||||||
|
|
||||||
Handles ports, operations on them
|
Handles ports, operations on them
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
@@ -10,6 +10,8 @@
|
|||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
|
from gbpservice.nfp.lib import nfp_context_manager as nfp_ctx_mgr
|
||||||
|
|
||||||
from gbpservice.nfp.orchestrator.openstack import openstack_driver
|
from gbpservice.nfp.orchestrator.openstack import openstack_driver
|
||||||
from gbpservice.nfp.orchestrator.coal.networking import(
|
from gbpservice.nfp.orchestrator.coal.networking import(
|
||||||
nfp_network_driver_base as ndb
|
nfp_network_driver_base as ndb
|
||||||
@@ -27,22 +29,29 @@ class NFPNeutronNetworkDriver(ndb.NFPNetworkDriverBase):
|
|||||||
pass
|
pass
|
||||||
|
|
||||||
def create_port(self, token, tenant_id, net_id, name=None):
|
def create_port(self, token, tenant_id, net_id, name=None):
|
||||||
port = self.neutron_client.create_port(token, tenant_id, net_id,
|
with nfp_ctx_mgr.NeutronContextManager as ncm:
|
||||||
attrs={'name': name})
|
port = ncm.retry(
|
||||||
return port
|
self.neutron_client.create_port,
|
||||||
|
token, tenant_id, net_id,
|
||||||
|
attrs={'name': name})
|
||||||
|
return port
|
||||||
|
|
||||||
def delete_port(self, token, port_id):
|
def delete_port(self, token, port_id):
|
||||||
self.neutron_client.delete_port(token, port_id)
|
with nfp_ctx_mgr.NeutronContextManager as ncm:
|
||||||
|
ncm.retry(self.neutron_client.delete_port, token, port_id)
|
||||||
|
|
||||||
def get_port_id(self, token, port_id):
|
def get_port_id(self, token, port_id):
|
||||||
return port_id
|
return port_id
|
||||||
|
|
||||||
def update_port(self, token, port_id, port):
|
def update_port(self, token, port_id, port):
|
||||||
port = self.neutron_client.update_port(token, port_id, **port)
|
with nfp_ctx_mgr.NeutronContextManager as ncm:
|
||||||
|
port = ncm.retry(self.neutron_client.update_port,
|
||||||
|
token, port_id, **port)
|
||||||
return port['port']
|
return port['port']
|
||||||
|
|
||||||
def get_port_and_subnet_details(self, token, port_id):
|
def get_port_and_subnet_details(self, token, port_id):
|
||||||
port = self.neutron_client.get_port(token, port_id)
|
with nfp_ctx_mgr.NeutronContextManager as ncm:
|
||||||
|
port = ncm.retry(self.neutron_client.get_port, token, port_id)
|
||||||
|
|
||||||
# ip
|
# ip
|
||||||
ip = port['port']['fixed_ips'][0]['ip_address']
|
ip = port['port']['fixed_ips'][0]['ip_address']
|
||||||
@@ -52,14 +61,17 @@ class NFPNeutronNetworkDriver(ndb.NFPNetworkDriverBase):
|
|||||||
|
|
||||||
# gateway ip
|
# gateway ip
|
||||||
subnet_id = port['port']['fixed_ips'][0]['subnet_id']
|
subnet_id = port['port']['fixed_ips'][0]['subnet_id']
|
||||||
subnet = self.neutron_client.get_subnet(token, subnet_id)
|
with nfp_ctx_mgr.NeutronContextManager as ncm:
|
||||||
|
subnet = ncm.retry(
|
||||||
|
self.neutron_client.get_subnet, token, subnet_id)
|
||||||
cidr = subnet['subnet']['cidr']
|
cidr = subnet['subnet']['cidr']
|
||||||
gateway_ip = subnet['subnet']['gateway_ip']
|
gateway_ip = subnet['subnet']['gateway_ip']
|
||||||
|
|
||||||
return (ip, mac, cidr, gateway_ip, port, subnet)
|
return (ip, mac, cidr, gateway_ip, port, subnet)
|
||||||
|
|
||||||
def get_port_details(self, token, port_id):
|
def get_port_details(self, token, port_id):
|
||||||
port = self.neutron_client.get_port(token, port_id)
|
with nfp_ctx_mgr.NeutronContextManager as ncm:
|
||||||
|
port = ncm.retry(self.neutron_client.get_port, token, port_id)
|
||||||
|
|
||||||
# ip
|
# ip
|
||||||
ip = port['port']['fixed_ips'][0]['ip_address']
|
ip = port['port']['fixed_ips'][0]['ip_address']
|
||||||
@@ -69,20 +81,20 @@ class NFPNeutronNetworkDriver(ndb.NFPNetworkDriverBase):
|
|||||||
|
|
||||||
# gateway ip
|
# gateway ip
|
||||||
subnet_id = port['port']['fixed_ips'][0]['subnet_id']
|
subnet_id = port['port']['fixed_ips'][0]['subnet_id']
|
||||||
subnet = self.neutron_client.get_subnet(token, subnet_id)
|
with nfp_ctx_mgr.NeutronContextManager as ncm:
|
||||||
|
subnet = ncm.retry(
|
||||||
|
self.neutron_client.get_subnet, token, subnet_id)
|
||||||
cidr = subnet['subnet']['cidr']
|
cidr = subnet['subnet']['cidr']
|
||||||
gateway_ip = subnet['subnet']['gateway_ip']
|
gateway_ip = subnet['subnet']['gateway_ip']
|
||||||
|
|
||||||
return (ip, mac, cidr, gateway_ip, port, subnet)
|
return (ip, mac, cidr, gateway_ip, port, subnet)
|
||||||
|
|
||||||
def set_promiscuos_mode(self, token, port_id, enable_port_security):
|
def set_promiscuos_mode(self, token, port_id, enable_port_security):
|
||||||
if not enable_port_security:
|
port_security = bool(enable_port_security)
|
||||||
port_security = False
|
with nfp_ctx_mgr.NeutronContextManager as ncm:
|
||||||
else:
|
ncm.retry(self.neutron_client.update_port, token, port_id,
|
||||||
port_security = True
|
security_groups=[],
|
||||||
self.neutron_client.update_port(token, port_id,
|
port_security_enabled=port_security)
|
||||||
security_groups=[],
|
|
||||||
port_security_enabled=port_security)
|
|
||||||
|
|
||||||
def get_service_profile(self, token, service_profile_id):
|
def get_service_profile(self, token, service_profile_id):
|
||||||
return {}
|
return {}
|
||||||
|
@@ -37,7 +37,7 @@ class HeatClient(object):
|
|||||||
self.timeout_mins = timeout_mins
|
self.timeout_mins = timeout_mins
|
||||||
# REVISIT(ashu): The base class is a old style class. We have to
|
# REVISIT(ashu): The base class is a old style class. We have to
|
||||||
# change when it is updated
|
# change when it is updated
|
||||||
#gbp_heat_api_client.HeatClient.__init__(
|
# gbp_heat_api_client.HeatClient.__init__(
|
||||||
# self, context, heat_uri, password, auth_token)
|
# self, context, heat_uri, password, auth_token)
|
||||||
|
|
||||||
def create(self, name, data, parameters=None):
|
def create(self, name, data, parameters=None):
|
||||||
@@ -65,7 +65,7 @@ class HeatClient(object):
|
|||||||
self.stacks.delete(stack_id)
|
self.stacks.delete(stack_id)
|
||||||
except heat_exc.HTTPNotFound:
|
except heat_exc.HTTPNotFound:
|
||||||
LOG.warning(_LW("Stack %(stack)s created by service chain driver "
|
LOG.warning(_LW("Stack %(stack)s created by service chain driver "
|
||||||
"is not found at cleanup"), {'stack': stack_id})
|
"is not found at cleanup"), {'stack': stack_id})
|
||||||
|
|
||||||
def get(self, stack_id):
|
def get(self, stack_id):
|
||||||
return self.stacks.get(stack_id)
|
return self.stacks.get(stack_id)
|
||||||
|
File diff suppressed because it is too large
Load Diff
518
gbpservice/nfp/orchestrator/context.py
Normal file
518
gbpservice/nfp/orchestrator/context.py
Normal file
@@ -0,0 +1,518 @@
|
|||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
|
||||||
|
class Subnet(object):
|
||||||
|
|
||||||
|
def __init__(self, data):
|
||||||
|
self.data = data
|
||||||
|
|
||||||
|
def purge(self):
|
||||||
|
if self.data:
|
||||||
|
return {
|
||||||
|
'cidr': self.data.get('cidr'),
|
||||||
|
'id': self.data.get('id'),
|
||||||
|
'gateway_ip': self.data.get('gateway_ip'),
|
||||||
|
'name': self.data.get('name')
|
||||||
|
}
|
||||||
|
return self.data
|
||||||
|
|
||||||
|
|
||||||
|
class Port(object):
|
||||||
|
|
||||||
|
def __init__(self, data):
|
||||||
|
self.data = data
|
||||||
|
|
||||||
|
def purge(self):
|
||||||
|
if self.data:
|
||||||
|
return {
|
||||||
|
'id': self.data.get('id'),
|
||||||
|
'ip_address': self.data.get('ip_address'),
|
||||||
|
'mac_address': self.data.get('mac_address'),
|
||||||
|
'mac': self.data.get('mac'),
|
||||||
|
'name': self.data.get('name'),
|
||||||
|
'fixed_ips': self.data.get('fixed_ips'),
|
||||||
|
'gateway_ip': self.data.get('gateway_ip'),
|
||||||
|
'neutron_port': self.data.get('neutron_port'),
|
||||||
|
'cidr': self.data.get('cidr')
|
||||||
|
}
|
||||||
|
return self.data
|
||||||
|
|
||||||
|
|
||||||
|
class Pt(object):
|
||||||
|
|
||||||
|
def __init__(self, data):
|
||||||
|
self.data = data
|
||||||
|
|
||||||
|
def purge(self):
|
||||||
|
if self.data:
|
||||||
|
return {
|
||||||
|
'id': self.data.get('id'),
|
||||||
|
'port_id': self.data.get('port_id'),
|
||||||
|
'policy_target_group_id': self.data.get(
|
||||||
|
'policy_target_group_id'),
|
||||||
|
'group_default_gateway': self.data.get(
|
||||||
|
'group_default_gateway'),
|
||||||
|
'proxy_gateway': self.data.get(
|
||||||
|
'proxy_gateway')
|
||||||
|
}
|
||||||
|
return self.data
|
||||||
|
|
||||||
|
|
||||||
|
class Ptg(object):
|
||||||
|
|
||||||
|
def __init__(self, data):
|
||||||
|
self.data = data
|
||||||
|
|
||||||
|
def purge(self):
|
||||||
|
if self.data:
|
||||||
|
return {
|
||||||
|
'id': self.data.get('id'),
|
||||||
|
'name': self.data.get('name'),
|
||||||
|
'provided_policy_rule_sets': self.data.get(
|
||||||
|
'provided_policy_rule_sets'),
|
||||||
|
'proxied_group_id': self.data.get(
|
||||||
|
'proxied_group_id'),
|
||||||
|
'policy_targets': self.data.get('policy_targets'),
|
||||||
|
'tenant_id': self.data.get('tenant_id'),
|
||||||
|
'subnets': self.data.get('subnets'),
|
||||||
|
'l2_policy_id': self.data.get('l2_policy_id')
|
||||||
|
}
|
||||||
|
return self.data
|
||||||
|
|
||||||
|
|
||||||
|
class NetworkFunctionDevice(object):
|
||||||
|
|
||||||
|
def __init__(self, data):
|
||||||
|
self.data = data
|
||||||
|
|
||||||
|
def purge(self):
|
||||||
|
if self.data:
|
||||||
|
return {
|
||||||
|
'id': self.data.get('id'),
|
||||||
|
'interfaces_in_use': self.data.get('interfaces_in_use'),
|
||||||
|
'status': self.data.get('status'),
|
||||||
|
'mgmt_ip_address': self.data.get('mgmt_ip_address'),
|
||||||
|
'monitoring_port_id': self.data.get('monitoring_port_id'),
|
||||||
|
'reference_count': self.data.get('reference_count'),
|
||||||
|
'mgmt_port_id': self.data.get('mgmt_port_id'),
|
||||||
|
'tenant_id': self.data.get('tenant_id'),
|
||||||
|
}
|
||||||
|
return self.data
|
||||||
|
|
||||||
|
|
||||||
|
class NetworkFunctionInstance(object):
|
||||||
|
|
||||||
|
def __init__(self, data):
|
||||||
|
self.data = data
|
||||||
|
|
||||||
|
def purge(self):
|
||||||
|
if self.data:
|
||||||
|
return {
|
||||||
|
'id': self.data.get('id'),
|
||||||
|
'status': self.data.get('status'),
|
||||||
|
'port_info': self.data.get('port_info'),
|
||||||
|
'network_function_device_id': self.data.get(
|
||||||
|
'network_function_device_id'),
|
||||||
|
'tenant_id': self.data.get('tenant_id'),
|
||||||
|
'name': self.data.get('name')
|
||||||
|
}
|
||||||
|
return self.data
|
||||||
|
|
||||||
|
|
||||||
|
class NetworkFunction(object):
|
||||||
|
|
||||||
|
def __init__(self, data):
|
||||||
|
self.data = data
|
||||||
|
|
||||||
|
def purge(self):
|
||||||
|
if self.data:
|
||||||
|
return {
|
||||||
|
'name': self.data.get('name'),
|
||||||
|
'status': self.data.get('status'),
|
||||||
|
'service_id': self.data.get('service_id'),
|
||||||
|
'config_policy_id': self.data.get('config_policy_id'),
|
||||||
|
'service_profile_id': self.data.get('service_profile_id'),
|
||||||
|
'service_chain_id': self.data.get('service_chain_id'),
|
||||||
|
'id': self.data.get('id'),
|
||||||
|
'tenant_id': self.data.get('tenant_id'),
|
||||||
|
'network_function_instances': self.data.get(
|
||||||
|
'network_function_instances'),
|
||||||
|
'description': self.data.get('description')
|
||||||
|
}
|
||||||
|
return self.data
|
||||||
|
|
||||||
|
|
||||||
|
class ResourceOwnerContext(object):
|
||||||
|
|
||||||
|
def __init__(self, data):
|
||||||
|
self.data = data
|
||||||
|
|
||||||
|
def purge(self):
|
||||||
|
if self.data:
|
||||||
|
return {
|
||||||
|
'admin_token': self.data.get('admin_token'),
|
||||||
|
'admin_tenant_id': self.data.get('admin_tenant_id'),
|
||||||
|
'tenant_name': self.data.get('tenant_name'),
|
||||||
|
'tenant': self.data.get('tenant')
|
||||||
|
}
|
||||||
|
return self.data
|
||||||
|
|
||||||
|
|
||||||
|
class Management(object):
|
||||||
|
|
||||||
|
def __init__(self, data):
|
||||||
|
self.data = data
|
||||||
|
|
||||||
|
def purge(self):
|
||||||
|
if self.data:
|
||||||
|
return {
|
||||||
|
'port': self.data.get('port')
|
||||||
|
}
|
||||||
|
return self.data
|
||||||
|
|
||||||
|
|
||||||
|
class Provider(object):
|
||||||
|
|
||||||
|
def __init__(self, data):
|
||||||
|
self.data = data
|
||||||
|
|
||||||
|
def purge(self):
|
||||||
|
if self.data:
|
||||||
|
context = {
|
||||||
|
'subnet': Subnet(
|
||||||
|
self.data.get('subnet')).purge(),
|
||||||
|
'port_model': self.data.get('port_model'),
|
||||||
|
'port_classification': self.data.get('port_classification')
|
||||||
|
}
|
||||||
|
|
||||||
|
if type(self.data.get('pt')) is list:
|
||||||
|
pt_list = []
|
||||||
|
for pt in self.data['pt']:
|
||||||
|
pt_list.append(Pt(pt).purge())
|
||||||
|
context['pt'] = pt_list
|
||||||
|
else:
|
||||||
|
context['pt'] = Pt(self.data.get('pt')).purge()
|
||||||
|
|
||||||
|
if type(self.data.get('ptg')) is list:
|
||||||
|
ptg_list = []
|
||||||
|
for ptg in self.data['ptg']:
|
||||||
|
ptg_list.append(Ptg(ptg).purge())
|
||||||
|
context['ptg'] = ptg_list
|
||||||
|
else:
|
||||||
|
context['ptg'] = Ptg(self.data.get('ptg')).purge()
|
||||||
|
|
||||||
|
if type(self.data.get('port')) is list:
|
||||||
|
port_list = []
|
||||||
|
for port in self.data['port']:
|
||||||
|
port_list.append(Port(port).purge())
|
||||||
|
context['port'] = port_list
|
||||||
|
else:
|
||||||
|
context['port'] = Port(self.data.get('port')).purge()
|
||||||
|
|
||||||
|
return context
|
||||||
|
return self.data
|
||||||
|
|
||||||
|
|
||||||
|
class Consumer(object):
|
||||||
|
|
||||||
|
def __init__(self, data):
|
||||||
|
self.data = data
|
||||||
|
|
||||||
|
def purge(self):
|
||||||
|
if self.data:
|
||||||
|
context = {
|
||||||
|
'subnet': Subnet(
|
||||||
|
self.data.get('subnet')).purge(),
|
||||||
|
'port_model': self.data.get('port_model'),
|
||||||
|
'port_classification': self.data.get('port_classification')
|
||||||
|
}
|
||||||
|
if type(self.data.get('pt')) is list:
|
||||||
|
pt_list = []
|
||||||
|
for pt in self.data['pt']:
|
||||||
|
pt_list.append(Pt(pt).purge())
|
||||||
|
context['pt'] = pt_list
|
||||||
|
else:
|
||||||
|
context['pt'] = Pt(self.data.get('pt')).purge()
|
||||||
|
|
||||||
|
if type(self.data.get('ptg')) is list:
|
||||||
|
ptg_list = []
|
||||||
|
for ptg in self.data['ptg']:
|
||||||
|
ptg_list.append(Ptg(ptg).purge())
|
||||||
|
context['ptg'] = ptg_list
|
||||||
|
else:
|
||||||
|
context['ptg'] = Ptg(self.data.get('ptg')).purge()
|
||||||
|
|
||||||
|
if type(self.data.get('port')) is list:
|
||||||
|
port_list = []
|
||||||
|
for port in self.data['port']:
|
||||||
|
port_list.append(Port(port).purge())
|
||||||
|
context['port'] = port_list
|
||||||
|
else:
|
||||||
|
context['port'] = Port(self.data.get('port')).purge()
|
||||||
|
|
||||||
|
return context
|
||||||
|
return self.data
|
||||||
|
|
||||||
|
|
||||||
|
class ScNodes(object):
|
||||||
|
|
||||||
|
def __init__(self, data):
|
||||||
|
self.data = data
|
||||||
|
|
||||||
|
def purge(self):
|
||||||
|
if self.data:
|
||||||
|
sc_service_profile = self.data.get('sc_service_profile')
|
||||||
|
context = {'sc_service_profile': {}}
|
||||||
|
if sc_service_profile:
|
||||||
|
context['sc_service_profile'][
|
||||||
|
'service_type'] = sc_service_profile.get('service_type')
|
||||||
|
return context
|
||||||
|
return self.data
|
||||||
|
|
||||||
|
|
||||||
|
class ServiceChainSpecs(object):
|
||||||
|
|
||||||
|
def __init__(self, data):
|
||||||
|
self.data = data
|
||||||
|
|
||||||
|
def purge(self):
|
||||||
|
if self.data:
|
||||||
|
sc_nodes = self.data.get('sc_nodes')
|
||||||
|
if type(sc_nodes) is list:
|
||||||
|
context = []
|
||||||
|
for sc_node in sc_nodes:
|
||||||
|
context.append(ScNodes(sc_node).purge())
|
||||||
|
return {
|
||||||
|
'sc_nodes': context
|
||||||
|
}
|
||||||
|
else:
|
||||||
|
return {
|
||||||
|
'sc_nodes': ScNodes(sc_nodes).purge()
|
||||||
|
}
|
||||||
|
return self.data
|
||||||
|
|
||||||
|
|
||||||
|
class ServiceChainInstance(object):
|
||||||
|
|
||||||
|
def __init__(self, data):
|
||||||
|
self.data = data
|
||||||
|
|
||||||
|
def purge(self):
|
||||||
|
if self.data:
|
||||||
|
return {
|
||||||
|
'id': self.data.get('id'),
|
||||||
|
'config_param_values': self.data.get('config_param_values'),
|
||||||
|
'name': self.data.get('name')
|
||||||
|
}
|
||||||
|
return self.data
|
||||||
|
|
||||||
|
|
||||||
|
class ConsumingPtgsDetails(object):
|
||||||
|
|
||||||
|
def __init__(self, data):
|
||||||
|
self.data = data
|
||||||
|
|
||||||
|
def purge(self):
|
||||||
|
if self.data:
|
||||||
|
context = {}
|
||||||
|
context['ptg'] = Ptg(self.data.get('ptg')).purge()
|
||||||
|
subnets = self.data.get('subnets')
|
||||||
|
if type(subnets) is list:
|
||||||
|
subnet_ctxt = []
|
||||||
|
for subnet in subnets:
|
||||||
|
subnet_ctxt.append(Subnet(subnet).purge())
|
||||||
|
context['subnets'] = subnet_ctxt
|
||||||
|
else:
|
||||||
|
context['subnets'] = Subnet(subnets).purge()
|
||||||
|
return context
|
||||||
|
return self.data
|
||||||
|
|
||||||
|
|
||||||
|
class ServiceChainNode(object):
|
||||||
|
|
||||||
|
def __init__(self, data):
|
||||||
|
self.data = data
|
||||||
|
|
||||||
|
def purge(self):
|
||||||
|
if self.data:
|
||||||
|
return {
|
||||||
|
'service_profile_id': self.data.get('service_profile_id'),
|
||||||
|
'service_type': self.data.get('service_type'),
|
||||||
|
'config': self.data.get('config'),
|
||||||
|
'name': self.data.get('name'),
|
||||||
|
'id': self.data.get('id')
|
||||||
|
}
|
||||||
|
return self.data
|
||||||
|
|
||||||
|
|
||||||
|
class ServiceDetails(object):
|
||||||
|
|
||||||
|
def __init__(self, data):
|
||||||
|
self.data = data
|
||||||
|
|
||||||
|
def purge(self):
|
||||||
|
if self.data:
|
||||||
|
return {
|
||||||
|
'service_vendor': self.data.get('service_vendor'),
|
||||||
|
'service_type': self.data.get('service_type'),
|
||||||
|
'network_mode': self.data.get('network_mode'),
|
||||||
|
'image_name': self.data.get('image_name'),
|
||||||
|
'device_type': self.data.get('device_type'),
|
||||||
|
}
|
||||||
|
return self.data
|
||||||
|
|
||||||
|
|
||||||
|
class ConsumingEpsDetails(object):
|
||||||
|
|
||||||
|
def __init__(self, data):
|
||||||
|
self.data = data
|
||||||
|
|
||||||
|
def purge(self):
|
||||||
|
if self.data:
|
||||||
|
return {
|
||||||
|
'id': self.data.get('id')
|
||||||
|
}
|
||||||
|
return self.data
|
||||||
|
|
||||||
|
|
||||||
|
class ServerGrpId(object):
|
||||||
|
|
||||||
|
def __init__(self, data):
|
||||||
|
self.data = data
|
||||||
|
|
||||||
|
def purge(self):
|
||||||
|
if self.data:
|
||||||
|
return {
|
||||||
|
'result': self.data.get('result')
|
||||||
|
}
|
||||||
|
return self.data
|
||||||
|
|
||||||
|
|
||||||
|
class ServiceProfile(object):
|
||||||
|
|
||||||
|
def __init__(self, data):
|
||||||
|
self.data = data
|
||||||
|
|
||||||
|
def purge(self):
|
||||||
|
if self.data:
|
||||||
|
return {
|
||||||
|
'id': self.data.get('id'),
|
||||||
|
'service_flavor': self.data.get('service_flavor'),
|
||||||
|
'service_type': self.data.get('service_type')
|
||||||
|
}
|
||||||
|
return self.data
|
||||||
|
|
||||||
|
|
||||||
|
class LogContext(object):
|
||||||
|
|
||||||
|
def __init__(self, data):
|
||||||
|
self.data = data
|
||||||
|
|
||||||
|
def purge(self):
|
||||||
|
if self.data:
|
||||||
|
return {
|
||||||
|
'meta_id': self.data.get('meta_id', '-'),
|
||||||
|
'nfi_id': self.data.get('nfi_id', '-'),
|
||||||
|
'nfd_id': self.data.get('nfd_id', '-'),
|
||||||
|
'path': self.data.get('path'),
|
||||||
|
'auth_token': self.data.get('auth_token'),
|
||||||
|
'namespace': self.data.get('namespace')
|
||||||
|
}
|
||||||
|
return self.data
|
||||||
|
|
||||||
|
|
||||||
|
class NfpContext(object):
|
||||||
|
|
||||||
|
def __init__(self, data):
|
||||||
|
self.data = data
|
||||||
|
|
||||||
|
def purge(self):
|
||||||
|
|
||||||
|
context = {
|
||||||
|
'active_nfd_ids': self.data.get('active_nfd_ids'),
|
||||||
|
'device_without_plugging': self.data.get(
|
||||||
|
'device_without_plugging'),
|
||||||
|
'id': self.data.get('id'), # event id
|
||||||
|
'key': self.data.get('key'), # event key
|
||||||
|
'admin_token': self.data.get('admin_token'),
|
||||||
|
'event_desc': self.data.get('event_desc'),
|
||||||
|
'config_policy_id': self.data.get('config_policy_id'),
|
||||||
|
'management_ptg_id': self.data.get('management_ptg_id'),
|
||||||
|
'network_function_mode': self.data.get('network_function_mode'),
|
||||||
|
'files': self.data.get('files'),
|
||||||
|
'base_mode_support': self.data.get('base_mode_support'),
|
||||||
|
'share_existing_device': self.data.get('share_existing_device'),
|
||||||
|
'tenant_id': self.data.get('tenant_id'),
|
||||||
|
'binding_key': self.data.get('binding_key'),
|
||||||
|
'provider_metadata': self.data.get('provider_metadata'),
|
||||||
|
'admin_tenant_id': self.data.get('admin_tenant_id'),
|
||||||
|
'is_nfi_in_graph': self.data.get('is_nfi_in_graph'),
|
||||||
|
'network_function_device': NetworkFunctionDevice(
|
||||||
|
self.data.get('network_function_device')).purge(),
|
||||||
|
'network_function_instance': NetworkFunctionInstance(
|
||||||
|
self.data.get('network_function_instance')).purge(),
|
||||||
|
'network_function': NetworkFunction(
|
||||||
|
self.data.get('network_function')).purge(),
|
||||||
|
'resource_owner_context': ResourceOwnerContext(
|
||||||
|
self.data.get('resource_owner_context')).purge(),
|
||||||
|
'management': Management(
|
||||||
|
self.data.get('management')).purge(),
|
||||||
|
'provider': Provider(
|
||||||
|
self.data.get('provider')).purge(),
|
||||||
|
'consumer': Consumer(
|
||||||
|
self.data.get('consumer')).purge(),
|
||||||
|
'service_chain_instance': ServiceChainInstance(
|
||||||
|
self.data.get('service_chain_instance')).purge(),
|
||||||
|
'service_details': ServiceDetails(
|
||||||
|
self.data.get('service_details')).purge(),
|
||||||
|
'service_chain_node': ServiceChainNode(
|
||||||
|
self.data.get('service_chain_node')).purge(),
|
||||||
|
'server_grp_id': ServerGrpId(
|
||||||
|
self.data.get('server_grp_id')).purge(),
|
||||||
|
'service_profile': ServiceProfile(
|
||||||
|
self.data.get('service_profile')).purge(),
|
||||||
|
'log_context': LogContext(self.data.get('log_context')).purge(),
|
||||||
|
'enable_port_security': self.data.get('enable_port_security')
|
||||||
|
}
|
||||||
|
|
||||||
|
service_chain_specs = self.data.get('service_chain_specs')
|
||||||
|
if type(service_chain_specs) is list:
|
||||||
|
ctxt = []
|
||||||
|
for sc_specs in service_chain_specs:
|
||||||
|
ctxt.append(ServiceChainSpecs(sc_specs).purge())
|
||||||
|
context['service_chain_specs'] = ctxt
|
||||||
|
else:
|
||||||
|
context['service_chain_specs'] = ServiceChainSpecs(
|
||||||
|
service_chain_specs).purge()
|
||||||
|
|
||||||
|
consuming_ptgs_details = self.data.get('consuming_ptgs_details')
|
||||||
|
if type(consuming_ptgs_details) is list:
|
||||||
|
ctxt = []
|
||||||
|
for ptgs_details in consuming_ptgs_details:
|
||||||
|
ctxt.append(ConsumingPtgsDetails(ptgs_details).purge())
|
||||||
|
context['consuming_ptgs_details'] = ctxt
|
||||||
|
else:
|
||||||
|
context['consuming_ptgs_details'] = ConsumingPtgsDetails(
|
||||||
|
consuming_ptgs_details).purge()
|
||||||
|
|
||||||
|
consuming_eps_details = self.data.get('consuming_eps_details')
|
||||||
|
if type(consuming_eps_details) is list:
|
||||||
|
ctxt = []
|
||||||
|
for eps_details in consuming_eps_details:
|
||||||
|
ctxt.append(ConsumingEpsDetails(eps_details).purge())
|
||||||
|
context['consuming_eps_details'] = ctxt
|
||||||
|
else:
|
||||||
|
context['consuming_eps_details'] = ConsumingEpsDetails(
|
||||||
|
consuming_eps_details).purge()
|
||||||
|
|
||||||
|
return context
|
@@ -121,7 +121,7 @@ class CommonDbMixin(object):
|
|||||||
def _apply_dict_extend_functions(self, resource_type,
|
def _apply_dict_extend_functions(self, resource_type,
|
||||||
response, db_object):
|
response, db_object):
|
||||||
for func in self._dict_extend_functions.get(
|
for func in self._dict_extend_functions.get(
|
||||||
resource_type, []):
|
resource_type, []):
|
||||||
args = (response, db_object)
|
args = (response, db_object)
|
||||||
if isinstance(func, basestring):
|
if isinstance(func, basestring):
|
||||||
func = getattr(self, func, None)
|
func = getattr(self, func, None)
|
||||||
@@ -138,8 +138,9 @@ class CommonDbMixin(object):
|
|||||||
collection = self._apply_filters_to_query(collection, model, filters)
|
collection = self._apply_filters_to_query(collection, model, filters)
|
||||||
if limit and page_reverse and sorts:
|
if limit and page_reverse and sorts:
|
||||||
sorts = [(s[0], not s[1]) for s in sorts]
|
sorts = [(s[0], not s[1]) for s in sorts]
|
||||||
collection = sqlalchemyutils.paginate_query(collection, model, limit,
|
collection = sqlalchemyutils.paginate_query(
|
||||||
sorts, marker_obj=marker_obj)
|
collection, model, limit,
|
||||||
|
sorts, marker_obj=marker_obj)
|
||||||
return collection
|
return collection
|
||||||
|
|
||||||
def _get_collection(self, session, model, dict_func, filters=None,
|
def _get_collection(self, session, model, dict_func, filters=None,
|
||||||
|
@@ -176,13 +176,23 @@ class NFPDbBase(common_db_mixin.CommonDbMixin):
|
|||||||
def get_network_function_instances(self, session, filters=None,
|
def get_network_function_instances(self, session, filters=None,
|
||||||
fields=None, sorts=None, limit=None,
|
fields=None, sorts=None, limit=None,
|
||||||
marker=None, page_reverse=False):
|
marker=None, page_reverse=False):
|
||||||
|
port_info = None
|
||||||
marker_obj = self._get_marker_obj(
|
marker_obj = self._get_marker_obj(
|
||||||
'network_function_instances', limit, marker)
|
'network_function_instances', limit, marker)
|
||||||
return self._get_collection(
|
if filters:
|
||||||
|
port_info = filters.pop('port_info', None)
|
||||||
|
nfis = self._get_collection(
|
||||||
session, nfp_db_model.NetworkFunctionInstance,
|
session, nfp_db_model.NetworkFunctionInstance,
|
||||||
self._make_network_function_instance_dict,
|
self._make_network_function_instance_dict,
|
||||||
filters=filters, fields=fields, sorts=sorts, limit=limit,
|
filters=filters, fields=fields, sorts=sorts, limit=limit,
|
||||||
marker_obj=marker_obj, page_reverse=page_reverse)
|
marker_obj=marker_obj, page_reverse=page_reverse)
|
||||||
|
filtered_nfis = []
|
||||||
|
if port_info:
|
||||||
|
for nfi in nfis:
|
||||||
|
if port_info == nfi['port_info']:
|
||||||
|
filtered_nfis.append(nfi)
|
||||||
|
return filtered_nfis
|
||||||
|
return nfis
|
||||||
|
|
||||||
def _set_mgmt_port_for_nfd(self, session, network_function_device_db,
|
def _set_mgmt_port_for_nfd(self, session, network_function_device_db,
|
||||||
network_function_device, is_update=False):
|
network_function_device, is_update=False):
|
||||||
@@ -249,9 +259,9 @@ class NFPDbBase(common_db_mixin.CommonDbMixin):
|
|||||||
del network_function_device['monitoring_port_network']
|
del network_function_device['monitoring_port_network']
|
||||||
|
|
||||||
def _set_provider_metadata_for_nfd(self, session,
|
def _set_provider_metadata_for_nfd(self, session,
|
||||||
network_function_device_db,
|
network_function_device_db,
|
||||||
network_function_device,
|
network_function_device,
|
||||||
is_update=False):
|
is_update=False):
|
||||||
nfd_db = network_function_device_db
|
nfd_db = network_function_device_db
|
||||||
provider_metadata = nfd_db['provider_metadata']
|
provider_metadata = nfd_db['provider_metadata']
|
||||||
|
|
||||||
@@ -259,14 +269,14 @@ class NFPDbBase(common_db_mixin.CommonDbMixin):
|
|||||||
if provider_metadata:
|
if provider_metadata:
|
||||||
provider_metadata = jsonutils.loads(provider_metadata)
|
provider_metadata = jsonutils.loads(provider_metadata)
|
||||||
updated_provider_metadata_str = network_function_device.pop(
|
updated_provider_metadata_str = network_function_device.pop(
|
||||||
'provider_metadata', {})
|
'provider_metadata', {})
|
||||||
if not updated_provider_metadata_str:
|
if not updated_provider_metadata_str:
|
||||||
return
|
return
|
||||||
if updated_provider_metadata_str:
|
if updated_provider_metadata_str:
|
||||||
updated_provider_metadata = jsonutils.loads(
|
updated_provider_metadata = jsonutils.loads(
|
||||||
updated_provider_metadata_str)
|
updated_provider_metadata_str)
|
||||||
if (type(updated_provider_metadata) is dict
|
if (type(updated_provider_metadata) is dict and
|
||||||
and updated_provider_metadata and provider_metadata):
|
updated_provider_metadata and provider_metadata):
|
||||||
updated_provider_metadata.update(provider_metadata)
|
updated_provider_metadata.update(provider_metadata)
|
||||||
provider_metadata_str = jsonutils.dumps(updated_provider_metadata)
|
provider_metadata_str = jsonutils.dumps(updated_provider_metadata)
|
||||||
else:
|
else:
|
||||||
@@ -318,15 +328,15 @@ class NFPDbBase(common_db_mixin.CommonDbMixin):
|
|||||||
|
|
||||||
def update_network_function_device(self, session,
|
def update_network_function_device(self, session,
|
||||||
network_function_device_id,
|
network_function_device_id,
|
||||||
updated_network_function_device):
|
updated_network_function_device):
|
||||||
with session.begin(subtransactions=True):
|
with session.begin(subtransactions=True):
|
||||||
network_function_device_db = self._get_network_function_device(
|
network_function_device_db = self._get_network_function_device(
|
||||||
session, network_function_device_id)
|
session, network_function_device_id)
|
||||||
if updated_network_function_device.get('provider_metadata'):
|
if updated_network_function_device.get('provider_metadata'):
|
||||||
updated_network_function_device[
|
updated_network_function_device[
|
||||||
'provider_metadata'] = jsonutils.dumps(
|
'provider_metadata'] = jsonutils.dumps(
|
||||||
updated_network_function_device[
|
updated_network_function_device[
|
||||||
'provider_metadata'])
|
'provider_metadata'])
|
||||||
if updated_network_function_device.get('mgmt_port_id'):
|
if updated_network_function_device.get('mgmt_port_id'):
|
||||||
self._set_mgmt_port_for_nfd(
|
self._set_mgmt_port_for_nfd(
|
||||||
session,
|
session,
|
||||||
@@ -347,9 +357,9 @@ class NFPDbBase(common_db_mixin.CommonDbMixin):
|
|||||||
updated_network_function_device,
|
updated_network_function_device,
|
||||||
is_update=True)
|
is_update=True)
|
||||||
self._set_provider_metadata_for_nfd(
|
self._set_provider_metadata_for_nfd(
|
||||||
session, network_function_device_db,
|
session, network_function_device_db,
|
||||||
updated_network_function_device,
|
updated_network_function_device,
|
||||||
is_update=True)
|
is_update=True)
|
||||||
mgmt_port_id = (
|
mgmt_port_id = (
|
||||||
updated_network_function_device.pop('mgmt_port_id', None))
|
updated_network_function_device.pop('mgmt_port_id', None))
|
||||||
if mgmt_port_id:
|
if mgmt_port_id:
|
||||||
@@ -365,7 +375,7 @@ class NFPDbBase(common_db_mixin.CommonDbMixin):
|
|||||||
network_function_device_db.update(updated_network_function_device)
|
network_function_device_db.update(updated_network_function_device)
|
||||||
updated_network_function_device['mgmt_port_id'] = mgmt_port_id
|
updated_network_function_device['mgmt_port_id'] = mgmt_port_id
|
||||||
updated_network_function_device[
|
updated_network_function_device[
|
||||||
'monitoring_port_id'] = monitoring_port_id
|
'monitoring_port_id'] = monitoring_port_id
|
||||||
|
|
||||||
return self._make_network_function_device_dict(
|
return self._make_network_function_device_dict(
|
||||||
network_function_device_db)
|
network_function_device_db)
|
||||||
@@ -418,10 +428,10 @@ class NFPDbBase(common_db_mixin.CommonDbMixin):
|
|||||||
value = network_function_device[field_name]
|
value = network_function_device[field_name]
|
||||||
value += updated_value
|
value += updated_value
|
||||||
update_device = (
|
update_device = (
|
||||||
{field_name: value})
|
{field_name: value})
|
||||||
self.update_network_function_device(session,
|
self.update_network_function_device(session,
|
||||||
network_function_device_id,
|
network_function_device_id,
|
||||||
update_device)
|
update_device)
|
||||||
|
|
||||||
def decrement_network_function_device_count(self, session,
|
def decrement_network_function_device_count(self, session,
|
||||||
network_function_device_id,
|
network_function_device_id,
|
||||||
@@ -433,10 +443,10 @@ class NFPDbBase(common_db_mixin.CommonDbMixin):
|
|||||||
value = network_function_device[field_name]
|
value = network_function_device[field_name]
|
||||||
value -= updated_value
|
value -= updated_value
|
||||||
update_device = (
|
update_device = (
|
||||||
{field_name: value})
|
{field_name: value})
|
||||||
self.update_network_function_device(session,
|
self.update_network_function_device(session,
|
||||||
network_function_device_id,
|
network_function_device_id,
|
||||||
update_device)
|
update_device)
|
||||||
|
|
||||||
def get_port_info(self, session, port_id, fields=None):
|
def get_port_info(self, session, port_id, fields=None):
|
||||||
port_info = self._get_port_info(session, port_id)
|
port_info = self._get_port_info(session, port_id)
|
||||||
@@ -467,119 +477,6 @@ class NFPDbBase(common_db_mixin.CommonDbMixin):
|
|||||||
return self._get_by_id(
|
return self._get_by_id(
|
||||||
session, nfp_db_model.NetworkInfo, network_id)
|
session, nfp_db_model.NetworkInfo, network_id)
|
||||||
|
|
||||||
def _set_plugged_in_port_for_nfd_interface(self, session, nfd_interface_db,
|
|
||||||
interface, is_update=False):
|
|
||||||
plugged_in_port_id = interface.get('plugged_in_port_id')
|
|
||||||
if not plugged_in_port_id:
|
|
||||||
if not is_update:
|
|
||||||
nfd_interface_db.plugged_in_port_id = None
|
|
||||||
return
|
|
||||||
with session.begin(subtransactions=True):
|
|
||||||
port_info_db = nfp_db_model.PortInfo(
|
|
||||||
id=plugged_in_port_id['id'],
|
|
||||||
port_model=plugged_in_port_id['port_model'],
|
|
||||||
port_classification=plugged_in_port_id['port_classification'],
|
|
||||||
port_role=plugged_in_port_id['port_role'])
|
|
||||||
if is_update:
|
|
||||||
session.merge(port_info_db)
|
|
||||||
else:
|
|
||||||
session.add(port_info_db)
|
|
||||||
session.flush()
|
|
||||||
nfd_interface_db.plugged_in_port_id = port_info_db['id']
|
|
||||||
del interface['plugged_in_port_id']
|
|
||||||
|
|
||||||
def create_network_function_device_interface(self, session,
|
|
||||||
nfd_interface):
|
|
||||||
with session.begin(subtransactions=True):
|
|
||||||
mapped_real_port_id = nfd_interface.get('mapped_real_port_id')
|
|
||||||
nfd_interface_db = nfp_db_model.NetworkFunctionDeviceInterface(
|
|
||||||
id=(nfd_interface.get('id') or uuidutils.generate_uuid()),
|
|
||||||
tenant_id=nfd_interface['tenant_id'],
|
|
||||||
interface_position=nfd_interface['interface_position'],
|
|
||||||
mapped_real_port_id=mapped_real_port_id,
|
|
||||||
network_function_device_id=(
|
|
||||||
nfd_interface['network_function_device_id']))
|
|
||||||
self._set_plugged_in_port_for_nfd_interface(
|
|
||||||
session, nfd_interface_db, nfd_interface)
|
|
||||||
session.add(nfd_interface_db)
|
|
||||||
|
|
||||||
return self._make_network_function_device_interface_dict(
|
|
||||||
nfd_interface_db)
|
|
||||||
|
|
||||||
def update_network_function_device_interface(self, session,
|
|
||||||
nfd_interface_id,
|
|
||||||
updated_nfd_interface):
|
|
||||||
with session.begin(subtransactions=True):
|
|
||||||
nfd_interface_db = self._get_network_function_device_interface(
|
|
||||||
session, nfd_interface_id)
|
|
||||||
self._set_plugged_in_port_for_nfd_interface(
|
|
||||||
session, nfd_interface_db, updated_nfd_interface,
|
|
||||||
is_update=True)
|
|
||||||
nfd_interface_db.update(updated_nfd_interface)
|
|
||||||
return self._make_network_function_device_interface_dict(
|
|
||||||
nfd_interface_db)
|
|
||||||
|
|
||||||
def delete_network_function_device_interface(
|
|
||||||
self, session, network_function_device_interface_id):
|
|
||||||
with session.begin(subtransactions=True):
|
|
||||||
network_function_device_interface_db = (
|
|
||||||
self._get_network_function_device_interface(
|
|
||||||
session, network_function_device_interface_id))
|
|
||||||
if network_function_device_interface_db.plugged_in_port_id:
|
|
||||||
self.delete_port_info(
|
|
||||||
session,
|
|
||||||
network_function_device_interface_db.plugged_in_port_id)
|
|
||||||
session.delete(network_function_device_interface_db)
|
|
||||||
|
|
||||||
def _get_network_function_device_interface(self, session,
|
|
||||||
network_function_device_interface_id):
|
|
||||||
try:
|
|
||||||
return self._get_by_id(
|
|
||||||
session,
|
|
||||||
nfp_db_model.NetworkFunctionDeviceInterface,
|
|
||||||
network_function_device_interface_id)
|
|
||||||
except exc.NoResultFound:
|
|
||||||
raise nfp_exc.NetworkFunctionDeviceInterfaceNotFound(
|
|
||||||
network_function_device_interface_id=(
|
|
||||||
network_function_device_interface_id))
|
|
||||||
|
|
||||||
def get_network_function_device_interface(
|
|
||||||
self, session, network_function_device_interface_id,
|
|
||||||
fields=None):
|
|
||||||
network_function_device_interface = (
|
|
||||||
self._get_network_function_device_interface(
|
|
||||||
session, network_function_device_interface_id))
|
|
||||||
return self._make_network_function_device_interface_dict(
|
|
||||||
network_function_device_interface, fields)
|
|
||||||
|
|
||||||
def get_network_function_device_interfaces(self, session, filters=None,
|
|
||||||
fields=None, sorts=None,
|
|
||||||
limit=None, marker=None,
|
|
||||||
page_reverse=False):
|
|
||||||
marker_obj = self._get_marker_obj(
|
|
||||||
'network_function_device_interfaces', limit, marker)
|
|
||||||
return self._get_collection(
|
|
||||||
session,
|
|
||||||
nfp_db_model.NetworkFunctionDeviceInterface,
|
|
||||||
self._make_network_function_device_interface_dict,
|
|
||||||
filters=filters, fields=fields,
|
|
||||||
sorts=sorts, limit=limit,
|
|
||||||
marker_obj=marker_obj,
|
|
||||||
page_reverse=page_reverse)
|
|
||||||
|
|
||||||
def _make_network_function_device_interface_dict(self, nfd_interface,
|
|
||||||
fields=None):
|
|
||||||
res = {
|
|
||||||
'id': nfd_interface['id'],
|
|
||||||
'tenant_id': nfd_interface['tenant_id'],
|
|
||||||
'plugged_in_port_id': nfd_interface['plugged_in_port_id'],
|
|
||||||
'interface_position': nfd_interface['interface_position'],
|
|
||||||
'mapped_real_port_id': nfd_interface['mapped_real_port_id'],
|
|
||||||
'network_function_device_id': (
|
|
||||||
nfd_interface['network_function_device_id'])
|
|
||||||
}
|
|
||||||
return res
|
|
||||||
|
|
||||||
def _make_port_info_dict(self, port_info, fields):
|
def _make_port_info_dict(self, port_info, fields):
|
||||||
res = {
|
res = {
|
||||||
'id': port_info['id'],
|
'id': port_info['id'],
|
||||||
@@ -665,21 +562,21 @@ class NFPDbBase(common_db_mixin.CommonDbMixin):
|
|||||||
with session.begin(subtransactions=True):
|
with session.begin(subtransactions=True):
|
||||||
for cluster_info in cluster_infos:
|
for cluster_info in cluster_infos:
|
||||||
cluster_info = nfp_db_model.ClusterInfo(
|
cluster_info = nfp_db_model.ClusterInfo(
|
||||||
id=cluster_info['id'],
|
id=cluster_info['id'],
|
||||||
tenant_id=cluster_info['tenant_id'],
|
tenant_id=cluster_info['tenant_id'],
|
||||||
network_function_device_id=cluster_info[
|
network_function_device_id=cluster_info[
|
||||||
'network_function_device_id'],
|
'network_function_device_id'],
|
||||||
cluster_group=cluster_info['cluster_group'],
|
cluster_group=cluster_info['cluster_group'],
|
||||||
virtual_ip=cluster_info['virtual_ip'],
|
virtual_ip=cluster_info['virtual_ip'],
|
||||||
multicast_ip=cluster_info.get('multicast_ip', None),
|
multicast_ip=cluster_info.get('multicast_ip', None),
|
||||||
cluster_name=cluster_info.get('cluster_name', None))
|
cluster_name=cluster_info.get('cluster_name', None))
|
||||||
session.add(cluster_info)
|
session.add(cluster_info)
|
||||||
|
|
||||||
def get_cluster_info(self, session, _id):
|
def get_cluster_info(self, session, _id):
|
||||||
try:
|
try:
|
||||||
return self._get_by_id(
|
return self._get_by_id(
|
||||||
session,
|
session,
|
||||||
nfp_db_model.ClusterInfo, _id)
|
nfp_db_model.ClusterInfo, _id)
|
||||||
except exc.NoResultFound:
|
except exc.NoResultFound:
|
||||||
raise nfp_exc.ClusterInfoNotFound(id=_id)
|
raise nfp_exc.ClusterInfoNotFound(id=_id)
|
||||||
|
|
||||||
@@ -687,7 +584,7 @@ class NFPDbBase(common_db_mixin.CommonDbMixin):
|
|||||||
sorts=None, limit=None, marker=None,
|
sorts=None, limit=None, marker=None,
|
||||||
page_reverse=False):
|
page_reverse=False):
|
||||||
marker_obj = self._get_marker_obj(
|
marker_obj = self._get_marker_obj(
|
||||||
'nfd_cluster_mapping_info', limit, marker)
|
'nfd_cluster_mapping_info', limit, marker)
|
||||||
return self._get_collection(session,
|
return self._get_collection(session,
|
||||||
nfp_db_model.ClusterInfo,
|
nfp_db_model.ClusterInfo,
|
||||||
self._get_cluster_info_dict,
|
self._get_cluster_info_dict,
|
||||||
@@ -719,27 +616,27 @@ class NFPDbBase(common_db_mixin.CommonDbMixin):
|
|||||||
|
|
||||||
def add_service_gateway_details(self, session, service_gw_details):
|
def add_service_gateway_details(self, session, service_gw_details):
|
||||||
primary_gw_vip_pt, secondary_gw_vip_pt = self._get_vip_pt_ids(
|
primary_gw_vip_pt, secondary_gw_vip_pt = self._get_vip_pt_ids(
|
||||||
service_gw_details.get('gateway_vips'))
|
service_gw_details.get('gateway_vips'))
|
||||||
if isinstance(service_gw_details['primary_instance_gw_pt'], dict):
|
if isinstance(service_gw_details['primary_instance_gw_pt'], dict):
|
||||||
primary_instance_gw_pt = service_gw_details[
|
primary_instance_gw_pt = service_gw_details[
|
||||||
'primary_instance_gw_pt']['id']
|
'primary_instance_gw_pt']['id']
|
||||||
secondary_instance_gw_pt = service_gw_details.get(
|
secondary_instance_gw_pt = service_gw_details.get(
|
||||||
'secondary_instance_gw_pt', {}).get('id')
|
'secondary_instance_gw_pt', {}).get('id')
|
||||||
else:
|
else:
|
||||||
primary_instance_gw_pt = service_gw_details[
|
primary_instance_gw_pt = service_gw_details[
|
||||||
'primary_instance_gw_pt']
|
'primary_instance_gw_pt']
|
||||||
secondary_instance_gw_pt = service_gw_details.get(
|
secondary_instance_gw_pt = service_gw_details.get(
|
||||||
'secondary_instance_gw_pt')
|
'secondary_instance_gw_pt')
|
||||||
with session.begin(subtransactions=True):
|
with session.begin(subtransactions=True):
|
||||||
gw_detail = nfp_db_model.ServiceGatewayDetails(
|
gw_detail = nfp_db_model.ServiceGatewayDetails(
|
||||||
id=service_gw_details['id'],
|
id=service_gw_details['id'],
|
||||||
network_function_id=service_gw_details[
|
network_function_id=service_gw_details[
|
||||||
'network_function_id'],
|
'network_function_id'],
|
||||||
gateway_ptg=service_gw_details['gw_ptg'],
|
gateway_ptg=service_gw_details['gw_ptg'],
|
||||||
primary_instance_gw_pt=primary_instance_gw_pt,
|
primary_instance_gw_pt=primary_instance_gw_pt,
|
||||||
secondary_instance_gw_pt=secondary_instance_gw_pt,
|
secondary_instance_gw_pt=secondary_instance_gw_pt,
|
||||||
primary_gw_vip_pt=primary_gw_vip_pt,
|
primary_gw_vip_pt=primary_gw_vip_pt,
|
||||||
secondary_gw_vip_pt=secondary_gw_vip_pt
|
secondary_gw_vip_pt=secondary_gw_vip_pt
|
||||||
)
|
)
|
||||||
session.add(gw_detail)
|
session.add(gw_detail)
|
||||||
return gw_detail
|
return gw_detail
|
||||||
@@ -762,7 +659,7 @@ class NFPDbBase(common_db_mixin.CommonDbMixin):
|
|||||||
try:
|
try:
|
||||||
with session.begin(subtransactions=True):
|
with session.begin(subtransactions=True):
|
||||||
return self._get_gw_info_dict(session.query(svc_gw).filter(
|
return self._get_gw_info_dict(session.query(svc_gw).filter(
|
||||||
svc_gw.gateway_ptg == _id).all())
|
svc_gw.gateway_ptg == _id).all())
|
||||||
except exc.NoResultFound:
|
except exc.NoResultFound:
|
||||||
raise
|
raise
|
||||||
|
|
||||||
@@ -771,7 +668,7 @@ class NFPDbBase(common_db_mixin.CommonDbMixin):
|
|||||||
try:
|
try:
|
||||||
with session.begin(subtransactions=True):
|
with session.begin(subtransactions=True):
|
||||||
return self._get_gw_info_dict(session.query(svc_gw).filter(
|
return self._get_gw_info_dict(session.query(svc_gw).filter(
|
||||||
svc_gw.network_function_id == nf_id).one())
|
svc_gw.network_function_id == nf_id).one())
|
||||||
except exc.NoResultFound:
|
except exc.NoResultFound:
|
||||||
LOG.warning(_LW("Gateway detail doesn't exist for Network Function"
|
LOG.warning(_LW("Gateway detail doesn't exist for Network Function"
|
||||||
" %s ") % nf_id)
|
" %s ") % nf_id)
|
||||||
|
@@ -80,7 +80,7 @@ class NSIPortAssociation(BASE):
|
|||||||
|
|
||||||
|
|
||||||
class NetworkFunctionInstance(BASE, model_base.HasId, model_base.HasTenant,
|
class NetworkFunctionInstance(BASE, model_base.HasId, model_base.HasTenant,
|
||||||
HasStatusDescription):
|
HasStatusDescription):
|
||||||
"""Represents the Network Function Instance"""
|
"""Represents the Network Function Instance"""
|
||||||
__tablename__ = 'nfp_network_function_instances'
|
__tablename__ = 'nfp_network_function_instances'
|
||||||
|
|
||||||
@@ -101,7 +101,7 @@ class NetworkFunctionInstance(BASE, model_base.HasId, model_base.HasTenant,
|
|||||||
|
|
||||||
|
|
||||||
class NetworkFunction(BASE, model_base.HasId, model_base.HasTenant,
|
class NetworkFunction(BASE, model_base.HasId, model_base.HasTenant,
|
||||||
HasStatusDescription):
|
HasStatusDescription):
|
||||||
"""Represents the Network Function object"""
|
"""Represents the Network Function object"""
|
||||||
__tablename__ = 'nfp_network_functions'
|
__tablename__ = 'nfp_network_functions'
|
||||||
|
|
||||||
@@ -118,7 +118,7 @@ class NetworkFunction(BASE, model_base.HasId, model_base.HasTenant,
|
|||||||
|
|
||||||
|
|
||||||
class NetworkFunctionDevice(BASE, model_base.HasId, model_base.HasTenant,
|
class NetworkFunctionDevice(BASE, model_base.HasId, model_base.HasTenant,
|
||||||
HasStatusDescription):
|
HasStatusDescription):
|
||||||
"""Represents the Network Function Device"""
|
"""Represents the Network Function Device"""
|
||||||
__tablename__ = 'nfp_network_function_devices'
|
__tablename__ = 'nfp_network_function_devices'
|
||||||
|
|
||||||
@@ -127,15 +127,15 @@ class NetworkFunctionDevice(BASE, model_base.HasId, model_base.HasTenant,
|
|||||||
mgmt_ip_address = sa.Column(sa.String(36), nullable=True)
|
mgmt_ip_address = sa.Column(sa.String(36), nullable=True)
|
||||||
mgmt_port_id = sa.Column(sa.String(36),
|
mgmt_port_id = sa.Column(sa.String(36),
|
||||||
sa.ForeignKey('nfp_port_infos.id',
|
sa.ForeignKey('nfp_port_infos.id',
|
||||||
ondelete= 'SET NULL'),
|
ondelete='SET NULL'),
|
||||||
nullable=True)
|
nullable=True)
|
||||||
monitoring_port_id = sa.Column(sa.String(36),
|
monitoring_port_id = sa.Column(sa.String(36),
|
||||||
sa.ForeignKey('nfp_port_infos.id',
|
sa.ForeignKey('nfp_port_infos.id',
|
||||||
ondelete= 'SET NULL'),
|
ondelete='SET NULL'),
|
||||||
nullable=True)
|
nullable=True)
|
||||||
monitoring_port_network = sa.Column(sa.String(36),
|
monitoring_port_network = sa.Column(sa.String(36),
|
||||||
sa.ForeignKey('nfp_network_infos.id',
|
sa.ForeignKey('nfp_network_infos.id',
|
||||||
ondelete= 'SET NULL'),
|
ondelete='SET NULL'),
|
||||||
nullable=True)
|
nullable=True)
|
||||||
service_vendor = sa.Column(sa.String(36), nullable=False, index=True)
|
service_vendor = sa.Column(sa.String(36), nullable=False, index=True)
|
||||||
max_interfaces = sa.Column(sa.Integer(), nullable=False)
|
max_interfaces = sa.Column(sa.Integer(), nullable=False)
|
||||||
@@ -145,24 +145,6 @@ class NetworkFunctionDevice(BASE, model_base.HasId, model_base.HasTenant,
|
|||||||
gateway_port = sa.Column(sa.String(36), nullable=True)
|
gateway_port = sa.Column(sa.String(36), nullable=True)
|
||||||
|
|
||||||
|
|
||||||
class NetworkFunctionDeviceInterface(BASE, model_base.HasId,
|
|
||||||
model_base.HasTenant):
|
|
||||||
"""Represents the Network Function Device"""
|
|
||||||
__tablename__ = 'nfp_network_function_device_interfaces'
|
|
||||||
|
|
||||||
plugged_in_port_id = sa.Column(sa.String(36),
|
|
||||||
sa.ForeignKey('nfp_port_infos.id',
|
|
||||||
ondelete='SET NULL'),
|
|
||||||
nullable=True)
|
|
||||||
interface_position = sa.Column(sa.Integer(), nullable=True)
|
|
||||||
mapped_real_port_id = sa.Column(sa.String(36), nullable=True)
|
|
||||||
network_function_device_id = sa.Column(
|
|
||||||
sa.String(36),
|
|
||||||
sa.ForeignKey('nfp_network_function_devices.id',
|
|
||||||
ondelete='SET NULL'),
|
|
||||||
nullable=True)
|
|
||||||
|
|
||||||
|
|
||||||
class ClusterInfo(BASE, model_base.HasId, model_base.HasTenant):
|
class ClusterInfo(BASE, model_base.HasId, model_base.HasTenant):
|
||||||
"""
|
"""
|
||||||
This table contains info about the ports participating in
|
This table contains info about the ports participating in
|
||||||
@@ -179,8 +161,8 @@ class ClusterInfo(BASE, model_base.HasId, model_base.HasTenant):
|
|||||||
class ServiceGatewayDetails(BASE, model_base.HasId):
|
class ServiceGatewayDetails(BASE, model_base.HasId):
|
||||||
__tablename__ = 'nfp_service_gateway_info'
|
__tablename__ = 'nfp_service_gateway_info'
|
||||||
network_function_id = sa.Column(sa.String(36), sa.ForeignKey(
|
network_function_id = sa.Column(sa.String(36), sa.ForeignKey(
|
||||||
'nfp_network_functions.id', ondelete='CASCADE'), nullable=False,
|
'nfp_network_functions.id', ondelete='CASCADE'), nullable=False,
|
||||||
primary_key=True)
|
primary_key=True)
|
||||||
gateway_ptg = sa.Column(sa.String(36), nullable=False)
|
gateway_ptg = sa.Column(sa.String(36), nullable=False)
|
||||||
primary_instance_gw_pt = sa.Column(sa.String(36), nullable=True)
|
primary_instance_gw_pt = sa.Column(sa.String(36), nullable=True)
|
||||||
secondary_instance_gw_pt = sa.Column(sa.String(36), nullable=True)
|
secondary_instance_gw_pt = sa.Column(sa.String(36), nullable=True)
|
||||||
|
@@ -14,13 +14,13 @@ import ast
|
|||||||
from collections import defaultdict
|
from collections import defaultdict
|
||||||
from neutron._i18n import _LE
|
from neutron._i18n import _LE
|
||||||
from neutron._i18n import _LW
|
from neutron._i18n import _LW
|
||||||
from oslo_utils import excutils
|
|
||||||
|
|
||||||
from gbpservice.nfp.common import constants as nfp_constants
|
from gbpservice.nfp.common import constants as nfp_constants
|
||||||
from gbpservice.nfp.common import data_formatter as df
|
from gbpservice.nfp.common import data_formatter as df
|
||||||
from gbpservice.nfp.common import exceptions
|
from gbpservice.nfp.common import exceptions
|
||||||
from gbpservice.nfp.core import executor as nfp_executor
|
from gbpservice.nfp.core import executor as nfp_executor
|
||||||
from gbpservice.nfp.core import log as nfp_logging
|
from gbpservice.nfp.core import log as nfp_logging
|
||||||
|
from gbpservice.nfp.lib import nfp_context_manager as nfp_ctx_mgr
|
||||||
from gbpservice.nfp.orchestrator.coal.networking import (
|
from gbpservice.nfp.orchestrator.coal.networking import (
|
||||||
nfp_gbp_network_driver
|
nfp_gbp_network_driver
|
||||||
)
|
)
|
||||||
@@ -70,27 +70,25 @@ class OrchestrationDriver(object):
|
|||||||
self.config = config
|
self.config = config
|
||||||
|
|
||||||
def _get_admin_tenant_id(self, token=None):
|
def _get_admin_tenant_id(self, token=None):
|
||||||
try:
|
with nfp_ctx_mgr.KeystoneContextManager as kcm:
|
||||||
if not token:
|
if not token:
|
||||||
token = self.identity_handler.get_admin_token()
|
token = kcm.retry(
|
||||||
admin_tenant_id = self.identity_handler.get_tenant_id(
|
self.identity_handler.get_admin_token, tries=3)
|
||||||
token,
|
admin_tenant_name = (
|
||||||
self.config.nfp_keystone_authtoken.admin_tenant_name)
|
self.config.nfp_keystone_authtoken.admin_tenant_name)
|
||||||
|
admin_tenant_id = kcm.retry(self.identity_handler.get_tenant_id,
|
||||||
|
token,
|
||||||
|
admin_tenant_name, tries=3)
|
||||||
return admin_tenant_id
|
return admin_tenant_id
|
||||||
except Exception:
|
|
||||||
with excutils.save_and_reraise_exception():
|
|
||||||
LOG.error(_LE("Failed to get admin's tenant ID"))
|
|
||||||
|
|
||||||
def _get_token(self, device_data_token):
|
def _get_token(self, device_data_token):
|
||||||
|
|
||||||
try:
|
with nfp_ctx_mgr.KeystoneContextManager as kcm:
|
||||||
token = (device_data_token
|
token = (device_data_token
|
||||||
if device_data_token
|
if device_data_token
|
||||||
else self.identity_handler.get_admin_token())
|
else kcm.retry(
|
||||||
except Exception:
|
self.identity_handler.get_admin_token, tries=3))
|
||||||
LOG.error(_LE('Failed to get token'))
|
return token
|
||||||
return None
|
|
||||||
return token
|
|
||||||
|
|
||||||
def _is_device_sharing_supported(self):
|
def _is_device_sharing_supported(self):
|
||||||
return False
|
return False
|
||||||
@@ -161,36 +159,26 @@ class OrchestrationDriver(object):
|
|||||||
token = self._get_token(device_data.get('token'))
|
token = self._get_token(device_data.get('token'))
|
||||||
if not token:
|
if not token:
|
||||||
return None
|
return None
|
||||||
try:
|
with nfp_ctx_mgr.NovaContextManager as ncm:
|
||||||
metadata = self.compute_handler_nova.get_image_metadata(
|
metadata = ncm.retry(self.compute_handler_nova.get_image_metadata,
|
||||||
token,
|
token,
|
||||||
self._get_admin_tenant_id(token=token),
|
self._get_admin_tenant_id(token=token),
|
||||||
image_name)
|
image_name)
|
||||||
except Exception as e:
|
provider_metadata = self._verify_provider_metadata(
|
||||||
LOG.error(_LE('Failed to get image metadata for image '
|
image_name, metadata)
|
||||||
'name: %(image_name)s. Error: %(error)s'),
|
|
||||||
{'image_name': image_name, 'error': e})
|
|
||||||
return None
|
|
||||||
provider_metadata = self._verify_provider_metadata(image_name,
|
|
||||||
metadata)
|
|
||||||
if not provider_metadata:
|
if not provider_metadata:
|
||||||
return {}
|
return {}
|
||||||
return provider_metadata
|
return provider_metadata
|
||||||
|
|
||||||
def _get_provider_metadata_fast(self, token,
|
def _get_provider_metadata_fast(self, token,
|
||||||
admin_tenant_id, image_name, device_data):
|
admin_tenant_id, image_name, device_data):
|
||||||
try:
|
with nfp_ctx_mgr.NovaContextManager as ncm:
|
||||||
metadata = self.compute_handler_nova.get_image_metadata(
|
metadata = ncm.retry(self.compute_handler_nova.get_image_metadata,
|
||||||
token,
|
token,
|
||||||
admin_tenant_id,
|
admin_tenant_id,
|
||||||
image_name)
|
image_name)
|
||||||
except Exception as e:
|
provider_metadata = self._verify_provider_metadata(
|
||||||
LOG.error(_LE('Failed to get image metadata for image '
|
image_name, metadata)
|
||||||
'name: %(image_name)s. Error: %(error)s'),
|
|
||||||
{'image_name': image_name, 'error': e})
|
|
||||||
return None
|
|
||||||
provider_metadata = self._verify_provider_metadata(image_name,
|
|
||||||
metadata)
|
|
||||||
if not provider_metadata:
|
if not provider_metadata:
|
||||||
return {}
|
return {}
|
||||||
return provider_metadata
|
return provider_metadata
|
||||||
@@ -209,7 +197,7 @@ class OrchestrationDriver(object):
|
|||||||
try:
|
try:
|
||||||
image_name = self._get_image_name(device_data)
|
image_name = self._get_image_name(device_data)
|
||||||
provider_metadata = self._get_provider_metadata(device_data,
|
provider_metadata = self._get_provider_metadata(device_data,
|
||||||
image_name)
|
image_name)
|
||||||
LOG.debug("Provider metadata, specified in image: %s"
|
LOG.debug("Provider metadata, specified in image: %s"
|
||||||
% provider_metadata)
|
% provider_metadata)
|
||||||
if provider_metadata:
|
if provider_metadata:
|
||||||
@@ -229,7 +217,7 @@ class OrchestrationDriver(object):
|
|||||||
return provider_metadata
|
return provider_metadata
|
||||||
|
|
||||||
def _update_provider_metadata_fast(self, token, admin_tenant_id,
|
def _update_provider_metadata_fast(self, token, admin_tenant_id,
|
||||||
image_name, device_data):
|
image_name, device_data):
|
||||||
provider_metadata = None
|
provider_metadata = None
|
||||||
try:
|
try:
|
||||||
provider_metadata = self._get_provider_metadata_fast(
|
provider_metadata = self._get_provider_metadata_fast(
|
||||||
@@ -249,7 +237,7 @@ class OrchestrationDriver(object):
|
|||||||
except Exception:
|
except Exception:
|
||||||
LOG.error(_LE("Error while getting metadata for image name: "
|
LOG.error(_LE("Error while getting metadata for image name: "
|
||||||
"%(image_name)s, proceeding with default values"),
|
"%(image_name)s, proceeding with default values"),
|
||||||
{'image_name': image_name})
|
{'image_name': image_name})
|
||||||
return provider_metadata
|
return provider_metadata
|
||||||
|
|
||||||
def _get_image_name(self, device_data):
|
def _get_image_name(self, device_data):
|
||||||
@@ -355,25 +343,29 @@ class OrchestrationDriver(object):
|
|||||||
provider_metadata_result = {}
|
provider_metadata_result = {}
|
||||||
|
|
||||||
pre_launch_executor.add_job('UPDATE_PROVIDER_METADATA',
|
pre_launch_executor.add_job('UPDATE_PROVIDER_METADATA',
|
||||||
self._update_provider_metadata_fast,
|
self._update_provider_metadata_fast,
|
||||||
token, admin_tenant_id, image_name, device_data,
|
token, admin_tenant_id,
|
||||||
result_store=provider_metadata_result)
|
image_name, device_data,
|
||||||
|
result_store=provider_metadata_result)
|
||||||
pre_launch_executor.add_job('GET_INTERFACES_FOR_DEVICE_CREATE',
|
pre_launch_executor.add_job('GET_INTERFACES_FOR_DEVICE_CREATE',
|
||||||
self._get_interfaces_for_device_create,
|
self._get_interfaces_for_device_create,
|
||||||
token, admin_tenant_id, network_handler, device_data)
|
token, admin_tenant_id,
|
||||||
|
network_handler, device_data)
|
||||||
pre_launch_executor.add_job('GET_IMAGE_ID',
|
pre_launch_executor.add_job('GET_IMAGE_ID',
|
||||||
self.get_image_id,
|
self.get_image_id,
|
||||||
self.compute_handler_nova, token, admin_tenant_id,
|
self.compute_handler_nova, token,
|
||||||
image_name, result_store=image_id_result)
|
admin_tenant_id,
|
||||||
|
image_name, result_store=image_id_result)
|
||||||
|
|
||||||
pre_launch_executor.fire()
|
pre_launch_executor.fire()
|
||||||
|
|
||||||
interfaces, image_id, provider_metadata = (
|
interfaces, image_id, provider_metadata = (
|
||||||
self._validate_pre_launch_executor_results(network_handler,
|
self._validate_pre_launch_executor_results(
|
||||||
device_data,
|
network_handler,
|
||||||
image_name,
|
device_data,
|
||||||
image_id_result,
|
image_name,
|
||||||
provider_metadata_result))
|
image_id_result,
|
||||||
|
provider_metadata_result))
|
||||||
if not interfaces:
|
if not interfaces:
|
||||||
return None
|
return None
|
||||||
|
|
||||||
@@ -386,10 +378,10 @@ class OrchestrationDriver(object):
|
|||||||
interfaces_to_attach.append({'port': interface['port_id']})
|
interfaces_to_attach.append({'port': interface['port_id']})
|
||||||
if provider_metadata.get('supports_hotplug') is False:
|
if provider_metadata.get('supports_hotplug') is False:
|
||||||
self._update_interfaces_for_non_hotplug_support(
|
self._update_interfaces_for_non_hotplug_support(
|
||||||
network_handler,
|
network_handler,
|
||||||
interfaces,
|
interfaces,
|
||||||
interfaces_to_attach,
|
interfaces_to_attach,
|
||||||
device_data)
|
device_data)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.error(_LE('Failed to fetch list of interfaces to attach'
|
LOG.error(_LE('Failed to fetch list of interfaces to attach'
|
||||||
' for device creation %(error)s'), {'error': e})
|
' for device creation %(error)s'), {'error': e})
|
||||||
@@ -405,30 +397,31 @@ class OrchestrationDriver(object):
|
|||||||
volume_support = device_data['volume_support']
|
volume_support = device_data['volume_support']
|
||||||
volume_size = device_data['volume_size']
|
volume_size = device_data['volume_size']
|
||||||
create_instance_executor.add_job(
|
create_instance_executor.add_job(
|
||||||
'CREATE_INSTANCE', self.create_instance,
|
'CREATE_INSTANCE', self.create_instance,
|
||||||
self.compute_handler_nova, token,
|
self.compute_handler_nova, token,
|
||||||
admin_tenant_id, image_id, flavor,
|
admin_tenant_id, image_id, flavor,
|
||||||
interfaces_to_attach, instance_name,
|
interfaces_to_attach, instance_name,
|
||||||
volume_support, volume_size,
|
volume_support, volume_size,
|
||||||
files=device_data.get('files'),
|
files=device_data.get('files'),
|
||||||
user_data=device_data.get('user_data'),
|
user_data=device_data.get('user_data'),
|
||||||
result_store=instance_id_result)
|
result_store=instance_id_result)
|
||||||
|
|
||||||
create_instance_executor.add_job(
|
create_instance_executor.add_job(
|
||||||
'GET_NEUTRON_PORT_DETAILS',
|
'GET_NEUTRON_PORT_DETAILS',
|
||||||
self.get_neutron_port_details,
|
self.get_neutron_port_details,
|
||||||
network_handler, token,
|
network_handler, token,
|
||||||
management_interface['port_id'],
|
management_interface['port_id'],
|
||||||
result_store=port_details_result)
|
result_store=port_details_result)
|
||||||
|
|
||||||
create_instance_executor.fire()
|
create_instance_executor.fire()
|
||||||
|
|
||||||
instance_id, mgmt_neutron_port_info = (
|
instance_id, mgmt_neutron_port_info = (
|
||||||
self._validate_create_instance_executor_results(network_handler,
|
self._validate_create_instance_executor_results(
|
||||||
device_data,
|
network_handler,
|
||||||
interfaces,
|
device_data,
|
||||||
instance_id_result,
|
interfaces,
|
||||||
port_details_result))
|
instance_id_result,
|
||||||
|
port_details_result))
|
||||||
if not instance_id:
|
if not instance_id:
|
||||||
return None
|
return None
|
||||||
|
|
||||||
@@ -527,29 +520,29 @@ class OrchestrationDriver(object):
|
|||||||
enable_port_security = device_data.get('enable_port_security')
|
enable_port_security = device_data.get('enable_port_security')
|
||||||
if not device_data['interfaces_to_attach']:
|
if not device_data['interfaces_to_attach']:
|
||||||
for port in device_data['ports']:
|
for port in device_data['ports']:
|
||||||
if (port['port_classification'] ==
|
if (port['port_classification'] ==
|
||||||
nfp_constants.PROVIDER):
|
nfp_constants.PROVIDER):
|
||||||
if (device_data['service_details'][
|
if (device_data['service_details'][
|
||||||
'service_type'].lower()
|
'service_type'].lower()
|
||||||
in [nfp_constants.FIREWALL.lower(),
|
in [nfp_constants.FIREWALL.lower(),
|
||||||
nfp_constants.VPN.lower()]):
|
nfp_constants.VPN.lower()]):
|
||||||
network_handler.set_promiscuos_mode(
|
network_handler.set_promiscuos_mode(
|
||||||
token, port['id'], enable_port_security)
|
token, port['id'], enable_port_security)
|
||||||
port_id = network_handler.get_port_id(
|
port_id = network_handler.get_port_id(
|
||||||
token, port['id'])
|
token, port['id'])
|
||||||
interfaces_to_attach.append({'port': port_id})
|
interfaces_to_attach.append({'port': port_id})
|
||||||
for port in device_data['ports']:
|
for port in device_data['ports']:
|
||||||
if (port['port_classification'] ==
|
if (port['port_classification'] ==
|
||||||
nfp_constants.CONSUMER):
|
nfp_constants.CONSUMER):
|
||||||
if (device_data['service_details'][
|
if (device_data['service_details'][
|
||||||
'service_type'].lower()
|
'service_type'].lower()
|
||||||
in [nfp_constants.FIREWALL.lower(),
|
in [nfp_constants.FIREWALL.lower(),
|
||||||
nfp_constants.VPN.lower()]):
|
nfp_constants.VPN.lower()]):
|
||||||
network_handler.set_promiscuos_mode(
|
network_handler.set_promiscuos_mode(
|
||||||
token, port['id'], enable_port_security)
|
token, port['id'], enable_port_security)
|
||||||
port_id = network_handler.get_port_id(
|
port_id = network_handler.get_port_id(
|
||||||
token, port['id'])
|
token, port['id'])
|
||||||
interfaces_to_attach.append({'port': port_id})
|
interfaces_to_attach.append({'port': port_id})
|
||||||
else:
|
else:
|
||||||
for interface in device_data['interfaces_to_attach']:
|
for interface in device_data['interfaces_to_attach']:
|
||||||
interfaces_to_attach.append(
|
interfaces_to_attach.append(
|
||||||
@@ -557,11 +550,11 @@ class OrchestrationDriver(object):
|
|||||||
interfaces.append({'id': interface['id']})
|
interfaces.append({'id': interface['id']})
|
||||||
|
|
||||||
def _validate_create_instance_executor_results(self,
|
def _validate_create_instance_executor_results(self,
|
||||||
network_handler,
|
network_handler,
|
||||||
device_data,
|
device_data,
|
||||||
interfaces,
|
interfaces,
|
||||||
instance_id_result,
|
instance_id_result,
|
||||||
port_details_result):
|
port_details_result):
|
||||||
token = device_data['token']
|
token = device_data['token']
|
||||||
admin_tenant_id = device_data['admin_tenant_id']
|
admin_tenant_id = device_data['admin_tenant_id']
|
||||||
instance_id = instance_id_result.get('result', None)
|
instance_id = instance_id_result.get('result', None)
|
||||||
@@ -577,17 +570,11 @@ class OrchestrationDriver(object):
|
|||||||
|
|
||||||
if not mgmt_neutron_port_info:
|
if not mgmt_neutron_port_info:
|
||||||
LOG.error(_LE('Failed to get management port details. '))
|
LOG.error(_LE('Failed to get management port details. '))
|
||||||
try:
|
with nfp_ctx_mgr.NovaContextManager as ncm:
|
||||||
self.compute_handler_nova.delete_instance(
|
ncm.retry(self.compute_handler_nova.delete_instance,
|
||||||
token,
|
token,
|
||||||
admin_tenant_id,
|
admin_tenant_id,
|
||||||
instance_id)
|
instance_id)
|
||||||
except Exception as e:
|
|
||||||
LOG.error(_LE('Failed to delete %(device_type)s instance.'
|
|
||||||
'Error: %(error)s'),
|
|
||||||
{'device_type': (
|
|
||||||
device_data['service_details']['device_type']),
|
|
||||||
'error': e})
|
|
||||||
self._delete_interfaces(device_data, interfaces,
|
self._delete_interfaces(device_data, interfaces,
|
||||||
network_handler=network_handler)
|
network_handler=network_handler)
|
||||||
return None, _
|
return None, _
|
||||||
@@ -644,15 +631,13 @@ class OrchestrationDriver(object):
|
|||||||
#
|
#
|
||||||
# this method will be invoked again
|
# this method will be invoked again
|
||||||
# once the device instance deletion is completed
|
# once the device instance deletion is completed
|
||||||
try:
|
with nfp_ctx_mgr.NovaContextManager.new(
|
||||||
self.compute_handler_nova.delete_instance(
|
suppress=(Exception,)) as ncm:
|
||||||
token,
|
|
||||||
device_data['tenant_id'],
|
ncm.retry(self.compute_handler_nova.delete_instance,
|
||||||
device_data['id'])
|
token,
|
||||||
except Exception:
|
device_data['tenant_id'],
|
||||||
LOG.error(_LE('Failed to delete %(instance)s instance'),
|
device_data['id'])
|
||||||
{'instance':
|
|
||||||
device_data['service_details']['device_type']})
|
|
||||||
else:
|
else:
|
||||||
# device instance deletion is done, delete remaining resources
|
# device instance deletion is done, delete remaining resources
|
||||||
try:
|
try:
|
||||||
@@ -703,19 +688,13 @@ class OrchestrationDriver(object):
|
|||||||
if not token:
|
if not token:
|
||||||
return None
|
return None
|
||||||
|
|
||||||
try:
|
with nfp_ctx_mgr.NovaContextManager.new(suppress=(Exception,)) as ncm:
|
||||||
device = self.compute_handler_nova.get_instance(
|
device = ncm.retry(self.compute_handler_nova.get_instance,
|
||||||
device_data['token'],
|
device_data['token'],
|
||||||
device_data['tenant_id'],
|
device_data['tenant_id'],
|
||||||
device_data['id'])
|
device_data['id'])
|
||||||
except Exception:
|
|
||||||
if ignore_failure:
|
|
||||||
return None
|
|
||||||
LOG.error(_LE('Failed to get %(instance)s instance details'),
|
|
||||||
{'instance': device_data['service_details']['device_type']})
|
|
||||||
return None # TODO(RPM): should we raise an Exception here?
|
|
||||||
|
|
||||||
return device['status']
|
return device['status']
|
||||||
|
|
||||||
@_set_network_handler
|
@_set_network_handler
|
||||||
def plug_network_function_device_interfaces(self, device_data,
|
def plug_network_function_device_interfaces(self, device_data,
|
||||||
@@ -868,9 +847,9 @@ class OrchestrationDriver(object):
|
|||||||
if image_name:
|
if image_name:
|
||||||
provider_metadata = (
|
provider_metadata = (
|
||||||
self._update_provider_metadata_fast(token,
|
self._update_provider_metadata_fast(token,
|
||||||
device_data['tenant_id'],
|
device_data['tenant_id'],
|
||||||
image_name,
|
image_name,
|
||||||
device_data))
|
device_data))
|
||||||
|
|
||||||
if not provider_metadata:
|
if not provider_metadata:
|
||||||
LOG.debug('Failed to get provider metadata for'
|
LOG.debug('Failed to get provider metadata for'
|
||||||
@@ -878,20 +857,15 @@ class OrchestrationDriver(object):
|
|||||||
|
|
||||||
if provider_metadata.get('supports_hotplug') is False:
|
if provider_metadata.get('supports_hotplug') is False:
|
||||||
return True
|
return True
|
||||||
try:
|
|
||||||
|
with nfp_ctx_mgr.NovaContextManager.new(suppress=(Exception,)) as ncm:
|
||||||
for port in device_data['ports']:
|
for port in device_data['ports']:
|
||||||
port_id = network_handler.get_port_id(token, port['id'])
|
port_id = network_handler.get_port_id(token, port['id'])
|
||||||
self.compute_handler_nova.detach_interface(
|
ncm.retry(self.compute_handler_nova.detach_interface,
|
||||||
token,
|
token,
|
||||||
device_data['tenant_id'],
|
device_data['tenant_id'],
|
||||||
device_data['id'],
|
device_data['id'],
|
||||||
port_id)
|
port_id)
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
LOG.error(_LE('Failed to unplug interface(s) from the device.'
|
|
||||||
'Error: %(error)s'), {'error': e})
|
|
||||||
return None
|
|
||||||
else:
|
|
||||||
return True
|
return True
|
||||||
|
|
||||||
@_set_network_handler
|
@_set_network_handler
|
||||||
@@ -986,8 +960,9 @@ class OrchestrationDriver(object):
|
|||||||
|
|
||||||
if is_delete:
|
if is_delete:
|
||||||
device_data = self.get_delete_device_data(
|
device_data = self.get_delete_device_data(
|
||||||
device_data, network_handler=network_handler)
|
device_data, network_handler=network_handler)
|
||||||
if not device_data:
|
if not device_data:
|
||||||
return None
|
return None
|
||||||
|
|
||||||
return df.get_network_function_info(
|
return df.get_network_function_info(
|
||||||
device_data, resource_type)
|
device_data, resource_type)
|
||||||
|
@@ -12,6 +12,10 @@
|
|||||||
from oslo_config import cfg as oslo_config
|
from oslo_config import cfg as oslo_config
|
||||||
|
|
||||||
from gbpservice.nfp.common import constants as nfp_constants
|
from gbpservice.nfp.common import constants as nfp_constants
|
||||||
|
from gbpservice.nfp.core import context
|
||||||
|
from gbpservice.nfp.orchestrator import context as module_context
|
||||||
|
|
||||||
|
context.NfpContext = module_context.NfpContext
|
||||||
|
|
||||||
openstack_opts = [
|
openstack_opts = [
|
||||||
oslo_config.StrOpt('auth_host',
|
oslo_config.StrOpt('auth_host',
|
||||||
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -39,7 +39,7 @@ class OpenstackApi(object):
|
|||||||
config.bind_port))
|
config.bind_port))
|
||||||
self.username = username or config.nfp_keystone_authtoken.admin_user
|
self.username = username or config.nfp_keystone_authtoken.admin_user
|
||||||
self.password = password or (
|
self.password = password or (
|
||||||
config.nfp_keystone_authtoken.admin_password)
|
config.nfp_keystone_authtoken.admin_password)
|
||||||
self.tenant_name = (tenant_name or
|
self.tenant_name = (tenant_name or
|
||||||
config.nfp_keystone_authtoken.admin_tenant_name)
|
config.nfp_keystone_authtoken.admin_tenant_name)
|
||||||
self.token = None
|
self.token = None
|
||||||
@@ -600,13 +600,13 @@ class NeutronClient(OpenstackApi):
|
|||||||
# 'security_group_id': 'c90c7b29-f653-4c41-ae1a-0290dc64e020'}
|
# 'security_group_id': 'c90c7b29-f653-4c41-ae1a-0290dc64e020'}
|
||||||
sg_rule_info = {"security_group_rule": attrs}
|
sg_rule_info = {"security_group_rule": attrs}
|
||||||
return neutron.create_security_group_rule(
|
return neutron.create_security_group_rule(
|
||||||
body=sg_rule_info)['security_group_rule']
|
body=sg_rule_info)['security_group_rule']
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
err = ("Failed to get security groups from"
|
err = ("Failed to get security groups from"
|
||||||
" Openstack Neutron service's response"
|
" Openstack Neutron service's response"
|
||||||
" KeyError :: %s" % (ex))
|
" KeyError :: %s" % (ex))
|
||||||
LOG.error(err)
|
LOG.error(err)
|
||||||
#raise Exception(err)
|
# raise Exception(err)
|
||||||
|
|
||||||
def get_ports(self, token, filters=None):
|
def get_ports(self, token, filters=None):
|
||||||
""" List Ports
|
""" List Ports
|
||||||
@@ -938,7 +938,7 @@ class NeutronClient(OpenstackApi):
|
|||||||
neutron = neutron_client.Client(token=token,
|
neutron = neutron_client.Client(token=token,
|
||||||
endpoint_url=self.network_service)
|
endpoint_url=self.network_service)
|
||||||
loadbalancers = neutron.list_loadbalancers(**filters).get(
|
loadbalancers = neutron.list_loadbalancers(**filters).get(
|
||||||
'loadbalancers', [])
|
'loadbalancers', [])
|
||||||
return loadbalancers
|
return loadbalancers
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
err = ("Failed to read pool list from"
|
err = ("Failed to read pool list from"
|
||||||
@@ -1272,7 +1272,7 @@ class GBPClient(OpenstackApi):
|
|||||||
gbp = gbp_client.Client(token=token,
|
gbp = gbp_client.Client(token=token,
|
||||||
endpoint_url=self.network_service)
|
endpoint_url=self.network_service)
|
||||||
return gbp.create_network_service_policy(
|
return gbp.create_network_service_policy(
|
||||||
body=network_service_policy_info)['network_service_policy']
|
body=network_service_policy_info)['network_service_policy']
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
err = ("Failed to create network service policy "
|
err = ("Failed to create network service policy "
|
||||||
"Error :: %s" % (ex))
|
"Error :: %s" % (ex))
|
||||||
@@ -1294,7 +1294,7 @@ class GBPClient(OpenstackApi):
|
|||||||
endpoint_url=self.network_service)
|
endpoint_url=self.network_service)
|
||||||
filters = filters if filters is not None else {}
|
filters = filters if filters is not None else {}
|
||||||
return gbp.list_network_service_policies(**filters)[
|
return gbp.list_network_service_policies(**filters)[
|
||||||
'network_service_policies']
|
'network_service_policies']
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
err = ("Failed to list network service policies. Reason %s" % ex)
|
err = ("Failed to list network service policies. Reason %s" % ex)
|
||||||
LOG.error(err)
|
LOG.error(err)
|
||||||
|
Reference in New Issue
Block a user