NFP - Refactor to support extensions such as network device cluster
Refactor orchestrator to support network function device cluster framework and more granular events. Change-Id: Ifebd7a0c8ef2c4a5de6821e3e0b918dbfcb5820a Co-Authored-By: ashutosh mishra <mca.ashu4@gmail.com> Closes-Bug: 1645235
This commit is contained in:
committed by
Subrahmanyam Ongole
parent
b6fc1a39c7
commit
be792291c1
@@ -21,6 +21,11 @@ log_forward_ip_address=
|
||||
log_forward_port=514
|
||||
log_level=debug
|
||||
|
||||
[CONFIG_AGENTS]
|
||||
# Python path for top level directory that contain
|
||||
# config agents.
|
||||
agents=gbpservice.contrib.nfp.configurator.agents
|
||||
|
||||
[CONFIG_DRIVERS]
|
||||
# Python path for top level directory that contain
|
||||
# config drivers.
|
||||
|
||||
@@ -14,6 +14,8 @@ import ast
|
||||
import copy
|
||||
|
||||
from gbpservice.contrib.nfp.config_orchestrator.common import common
|
||||
from gbpservice.nfp.common import constants as const
|
||||
from gbpservice.nfp.common import data_formatter as df
|
||||
from gbpservice.nfp.core import log as nfp_logging
|
||||
from gbpservice.nfp.lib import transport
|
||||
|
||||
@@ -25,7 +27,7 @@ import oslo_messaging as messaging
|
||||
LOG = nfp_logging.getLogger(__name__)
|
||||
|
||||
"""
|
||||
RPC handler for Firwrall service
|
||||
RPC handler for Firewall service
|
||||
"""
|
||||
|
||||
|
||||
@@ -84,6 +86,7 @@ class FwAgent(firewall_db.Firewall_db_mixin):
|
||||
def _prepare_resource_context_dicts(self, **kwargs):
|
||||
# Prepare context_dict
|
||||
context = kwargs.get('context')
|
||||
context_resource_data = kwargs.pop('context_resource_data')
|
||||
ctx_dict = context.to_dict()
|
||||
# Collecting db entry required by configurator.
|
||||
# Addind service_info to neutron context and sending
|
||||
@@ -91,15 +94,30 @@ class FwAgent(firewall_db.Firewall_db_mixin):
|
||||
db = self._context(**kwargs)
|
||||
rsrc_ctx_dict = copy.deepcopy(ctx_dict)
|
||||
rsrc_ctx_dict.update({'service_info': db})
|
||||
rsrc_ctx_dict.update({'resource_data': context_resource_data})
|
||||
return ctx_dict, rsrc_ctx_dict
|
||||
|
||||
def _get_resource_data(self, description, resource_type):
|
||||
resource_data = df.get_network_function_info(description,
|
||||
resource_type)
|
||||
return resource_data
|
||||
|
||||
def _update_request_data(self, body, description):
|
||||
pass
|
||||
|
||||
def _data_wrapper(self, context, firewall, host, nf, reason):
|
||||
# Hardcoding the position for fetching data since we are owning
|
||||
# its positional change
|
||||
description = ast.literal_eval((nf['description'].split('\n'))[1])
|
||||
description.update({'tenant_id': firewall['tenant_id']})
|
||||
context_resource_data = self._get_resource_data(description,
|
||||
const.FIREWALL)
|
||||
fw_mac = description['provider_ptg_info'][0]
|
||||
# REVISIT(dpak): We need to avoid resource description
|
||||
# dependency in OTC and instead use neutron context description.
|
||||
firewall.update({'description': str(description)})
|
||||
kwargs = {'context': context,
|
||||
'context_resource_data': context_resource_data,
|
||||
'firewall_policy_id': firewall[
|
||||
'firewall_policy_id'],
|
||||
'description': str(description),
|
||||
@@ -119,6 +137,7 @@ class FwAgent(firewall_db.Firewall_db_mixin):
|
||||
body = common.prepare_request_data(nfp_context, resource,
|
||||
resource_type, resource_data,
|
||||
description['service_vendor'])
|
||||
self._update_request_data(body, description)
|
||||
return body
|
||||
|
||||
def _fetch_nf_from_resource_desc(self, desc):
|
||||
|
||||
@@ -15,6 +15,7 @@ import copy
|
||||
|
||||
from gbpservice.contrib.nfp.config_orchestrator.common import common
|
||||
from gbpservice.nfp.common import constants as const
|
||||
from gbpservice.nfp.common import data_formatter as df
|
||||
from gbpservice.nfp.core import log as nfp_logging
|
||||
from gbpservice.nfp.lib import transport
|
||||
|
||||
@@ -100,6 +101,7 @@ class LbAgent(loadbalancer_db.LoadBalancerPluginDb):
|
||||
def _prepare_resource_context_dicts(self, **kwargs):
|
||||
# Prepare context_dict
|
||||
context = kwargs.get('context')
|
||||
context_resource_data = kwargs.pop('context_resource_data')
|
||||
ctx_dict = context.to_dict()
|
||||
# Collecting db entry required by configurator.
|
||||
# Addind service_info to neutron context and sending
|
||||
@@ -107,11 +109,25 @@ class LbAgent(loadbalancer_db.LoadBalancerPluginDb):
|
||||
db = self._context(**kwargs)
|
||||
rsrc_ctx_dict = copy.deepcopy(ctx_dict)
|
||||
rsrc_ctx_dict.update({'service_info': db})
|
||||
rsrc_ctx_dict.update({'resource_data': context_resource_data})
|
||||
return ctx_dict, rsrc_ctx_dict
|
||||
|
||||
def _get_resource_data(self, description, resource_type):
|
||||
resource_data = df.get_network_function_info(description,
|
||||
resource_type)
|
||||
return resource_data
|
||||
|
||||
def _update_request_data(self, body, description):
|
||||
pass
|
||||
|
||||
def _data_wrapper(self, context, tenant_id, name, reason, nf, **kwargs):
|
||||
nfp_context = {}
|
||||
description = ast.literal_eval((nf['description'].split('\n'))[1])
|
||||
description.update({'tenant_id': tenant_id})
|
||||
# REVISIT(dpak): We need to avoid resource description
|
||||
# dependency in OTC and instead use neutron context description.
|
||||
context_resource_data = self._get_resource_data(description,
|
||||
const.LOADBALANCER)
|
||||
if name.lower() == 'pool_health_monitor':
|
||||
pool_id = kwargs.get('pool_id')
|
||||
kwargs['health_monitor'].update({'description': str(description)})
|
||||
@@ -130,7 +146,8 @@ class LbAgent(loadbalancer_db.LoadBalancerPluginDb):
|
||||
args = {'tenant_id': tenant_id,
|
||||
'pool_id': pool_id,
|
||||
'context': context,
|
||||
'description': str(description)}
|
||||
'description': str(description),
|
||||
'context_resource_data': context_resource_data}
|
||||
ctx_dict, rsrc_ctx_dict = self._prepare_resource_context_dicts(
|
||||
**args)
|
||||
|
||||
@@ -145,6 +162,7 @@ class LbAgent(loadbalancer_db.LoadBalancerPluginDb):
|
||||
body = common.prepare_request_data(nfp_context, resource,
|
||||
resource_type, resource_data,
|
||||
description['service_vendor'])
|
||||
self._update_request_data(body, description)
|
||||
return body
|
||||
|
||||
def _post(self, context, tenant_id, name, nf, **kwargs):
|
||||
|
||||
@@ -16,6 +16,7 @@ import copy
|
||||
from gbpservice.contrib.nfp.config_orchestrator.common import common
|
||||
from gbpservice.contrib.nfp.config_orchestrator.common import lbv2_constants
|
||||
from gbpservice.nfp.common import constants as const
|
||||
from gbpservice.nfp.common import data_formatter as df
|
||||
from gbpservice.nfp.core import log as nfp_logging
|
||||
from gbpservice.nfp.lib import transport
|
||||
|
||||
@@ -93,7 +94,7 @@ class Lbv2Agent(loadbalancer_dbv2.LoadBalancerPluginDbv2):
|
||||
kwargs['tenant_id'] = context.tenant_id
|
||||
core_db = self._get_core_context(context, kwargs['tenant_id'])
|
||||
# REVISIT(jiahao): _get_lb_context() fails for flavor_id, disable it
|
||||
# for now. Sent the whole core_db to cofigurator
|
||||
# for now. Sent the whole core_db to configurator
|
||||
# lb_db = self._get_lb_context(**kwargs)
|
||||
# db = self._filter_service_info_with_resource(lb_db, core_db)
|
||||
db = core_db
|
||||
@@ -102,6 +103,7 @@ class Lbv2Agent(loadbalancer_dbv2.LoadBalancerPluginDbv2):
|
||||
def _prepare_resource_context_dicts(self, **kwargs):
|
||||
# Prepare context_dict
|
||||
context = kwargs.get('context')
|
||||
context_resource_data = kwargs.pop('context_resource_data')
|
||||
ctx_dict = context.to_dict()
|
||||
# Collecting db entry required by configurator.
|
||||
# Addind service_info to neutron context and sending
|
||||
@@ -109,11 +111,17 @@ class Lbv2Agent(loadbalancer_dbv2.LoadBalancerPluginDbv2):
|
||||
db = self._context(**kwargs)
|
||||
rsrc_ctx_dict = copy.deepcopy(ctx_dict)
|
||||
rsrc_ctx_dict.update({'service_info': db})
|
||||
rsrc_ctx_dict.update({'resource_data': context_resource_data})
|
||||
return ctx_dict, rsrc_ctx_dict
|
||||
|
||||
def _data_wrapper(self, context, tenant_id, name, reason, nf, **kwargs):
|
||||
nfp_context = {}
|
||||
description = ast.literal_eval((nf['description'].split('\n'))[1])
|
||||
description.update({'tenant_id': tenant_id})
|
||||
context_resource_data = df.get_network_function_info(
|
||||
description, const.LOADBALANCERV2)
|
||||
# REVISIT(dpak): We need to avoid resource description
|
||||
# dependency in OTC and instead use neutron context description.
|
||||
if name.lower() == 'loadbalancer':
|
||||
lb_id = kwargs['loadbalancer']['id']
|
||||
kwargs['loadbalancer'].update({'description': str(description)})
|
||||
@@ -142,7 +150,8 @@ class Lbv2Agent(loadbalancer_dbv2.LoadBalancerPluginDbv2):
|
||||
args = {'tenant_id': tenant_id,
|
||||
'lb_id': lb_id,
|
||||
'context': context,
|
||||
'description': str(description)}
|
||||
'description': str(description),
|
||||
'context_resource_data': context_resource_data}
|
||||
|
||||
ctx_dict, rsrc_ctx_dict = self._prepare_resource_context_dicts(**args)
|
||||
|
||||
|
||||
@@ -13,6 +13,9 @@
|
||||
import ast
|
||||
import copy
|
||||
from gbpservice.contrib.nfp.config_orchestrator.common import common
|
||||
from gbpservice.nfp.common import constants as const
|
||||
from gbpservice.nfp.common import data_formatter as df
|
||||
from gbpservice.nfp.common import utils
|
||||
from gbpservice.nfp.core import log as nfp_logging
|
||||
from gbpservice.nfp.lib import transport
|
||||
|
||||
@@ -38,14 +41,6 @@ class VpnAgent(vpn_db.VPNPluginDb, vpn_db.VPNPluginRpcDbMixin):
|
||||
self._sc = sc
|
||||
self._db_inst = super(VpnAgent, self)
|
||||
|
||||
def _get_dict_desc_from_string(self, vpn_svc):
|
||||
svc_desc = vpn_svc.split(";")
|
||||
desc = {}
|
||||
for ele in svc_desc:
|
||||
s_ele = ele.split("=")
|
||||
desc.update({s_ele[0]: s_ele[1]})
|
||||
return desc
|
||||
|
||||
def _get_vpn_context(self, context, tenant_id, vpnservice_id,
|
||||
ikepolicy_id, ipsecpolicy_id,
|
||||
ipsec_site_conn_id, desc):
|
||||
@@ -85,7 +80,8 @@ class VpnAgent(vpn_db.VPNPluginDb, vpn_db.VPNPluginRpcDbMixin):
|
||||
return None
|
||||
|
||||
def _prepare_resource_context_dicts(self, context, tenant_id,
|
||||
resource, resource_data):
|
||||
resource, resource_data,
|
||||
context_resource_data):
|
||||
# Prepare context_dict
|
||||
ctx_dict = context.to_dict()
|
||||
# Collecting db entry required by configurator.
|
||||
@@ -95,15 +91,28 @@ class VpnAgent(vpn_db.VPNPluginDb, vpn_db.VPNPluginRpcDbMixin):
|
||||
resource_data)
|
||||
rsrc_ctx_dict = copy.deepcopy(ctx_dict)
|
||||
rsrc_ctx_dict.update({'service_info': db})
|
||||
rsrc_ctx_dict.update({'resource_data': context_resource_data})
|
||||
return ctx_dict, rsrc_ctx_dict
|
||||
|
||||
def _get_resource_data(self, description, resource_type):
|
||||
resource_data = df.get_network_function_info(description,
|
||||
resource_type)
|
||||
return resource_data
|
||||
|
||||
def _update_request_data(self, body, description):
|
||||
pass
|
||||
|
||||
def _data_wrapper(self, context, tenant_id, nf, **kwargs):
|
||||
nfp_context = {}
|
||||
str_description = nf['description'].split('\n')[1]
|
||||
description = self._get_dict_desc_from_string(
|
||||
str_description)
|
||||
description, str_description = (
|
||||
utils.get_vpn_description_from_nf(nf))
|
||||
description.update({'tenant_id': tenant_id})
|
||||
context_resource_data = self._get_resource_data(description,
|
||||
const.VPN)
|
||||
resource = kwargs['rsrc_type']
|
||||
resource_data = kwargs['resource']
|
||||
# REVISIT(dpak): We need to avoid resource description
|
||||
# dependency in OTC and instead use neutron context description.
|
||||
resource_data['description'] = str_description
|
||||
if resource.lower() == 'ipsec_site_connection':
|
||||
nfp_context = {'network_function_id': nf['id'],
|
||||
@@ -112,7 +121,8 @@ class VpnAgent(vpn_db.VPNPluginDb, vpn_db.VPNPluginRpcDbMixin):
|
||||
|
||||
ctx_dict, rsrc_ctx_dict = self.\
|
||||
_prepare_resource_context_dicts(context, tenant_id,
|
||||
resource, resource_data)
|
||||
resource, resource_data,
|
||||
context_resource_data)
|
||||
nfp_context.update({'neutron_context': ctx_dict,
|
||||
'requester': 'nas_service',
|
||||
'logging_context':
|
||||
@@ -122,6 +132,7 @@ class VpnAgent(vpn_db.VPNPluginDb, vpn_db.VPNPluginRpcDbMixin):
|
||||
body = common.prepare_request_data(nfp_context, resource,
|
||||
resource_type, kwargs,
|
||||
description['service_vendor'])
|
||||
self._update_request_data(body, description)
|
||||
return body
|
||||
|
||||
def _fetch_nf_from_resource_desc(self, desc):
|
||||
|
||||
@@ -192,13 +192,14 @@ class AgentBaseEventHandler(nfp_api.NfpEventHandler):
|
||||
context = agent_info['context']
|
||||
service_vendor = agent_info['service_vendor']
|
||||
service_type = agent_info['resource_type']
|
||||
|
||||
service_feature = agent_info['service_feature']
|
||||
if not is_generic_config:
|
||||
sa_req_list[0]['resource_data']['context'] = sa_req_list[
|
||||
0]['resource_data'].pop('neutron_context')
|
||||
|
||||
# Get the service driver and invoke its method
|
||||
driver = self._get_driver(service_type, service_vendor)
|
||||
driver = self._get_driver(service_type, service_vendor,
|
||||
service_feature)
|
||||
|
||||
# Service driver should return "success" on successful API
|
||||
# processing. All other return values and exceptions are
|
||||
|
||||
@@ -182,12 +182,12 @@ class FWaasEventHandler(nfp_api.NfpEventHandler):
|
||||
self.plugin_rpc = FwaasRpcSender(sc, self.host,
|
||||
self.drivers, self.rpcmgr)
|
||||
|
||||
def _get_driver(self, service_vendor):
|
||||
def _get_driver(self, service_vendor, service_feature):
|
||||
""" Retrieves driver object given the service type
|
||||
|
||||
"""
|
||||
|
||||
driver_id = const.SERVICE_TYPE + service_vendor
|
||||
driver_id = const.SERVICE_TYPE + service_vendor + service_feature
|
||||
return self.drivers[driver_id]
|
||||
|
||||
def _is_firewall_rule_exists(self, fw):
|
||||
@@ -222,8 +222,10 @@ class FWaasEventHandler(nfp_api.NfpEventHandler):
|
||||
# the API context alongside other relevant information like
|
||||
# service vendor and type. Agent info is constructed inside
|
||||
# the demuxer library.
|
||||
service_vendor = ev.data['context']['agent_info']['service_vendor']
|
||||
driver = self._get_driver(service_vendor)
|
||||
agent_info = ev.data['context']['agent_info']
|
||||
service_vendor = agent_info['service_vendor']
|
||||
service_feature = agent_info.get('service_feature', '')
|
||||
driver = self._get_driver(service_vendor, service_feature)
|
||||
|
||||
self.method = getattr(driver, "%s" % (ev.id.lower()))
|
||||
self.invoke_driver_for_plugin_api(ev)
|
||||
@@ -370,6 +372,8 @@ def load_drivers(conf):
|
||||
driver_obj = driver_name(conf=conf)
|
||||
drivers[service_type] = driver_obj
|
||||
|
||||
msg = ("Firewall loaded drivers: %s" % drivers)
|
||||
LOG.info(msg)
|
||||
return drivers
|
||||
|
||||
|
||||
|
||||
@@ -16,6 +16,7 @@ from gbpservice.contrib.nfp.configurator.agents import agent_base
|
||||
from gbpservice.contrib.nfp.configurator.lib import (
|
||||
generic_config_constants as gen_cfg_const)
|
||||
from gbpservice.contrib.nfp.configurator.lib import constants as common_const
|
||||
from gbpservice.contrib.nfp.configurator.lib import data_parser
|
||||
from gbpservice.contrib.nfp.configurator.lib import utils
|
||||
from gbpservice.nfp.core import event as nfp_event
|
||||
from gbpservice.nfp.core import log as nfp_logging
|
||||
@@ -49,6 +50,7 @@ class GenericConfigRpcManager(agent_base.AgentBaseRPCManager):
|
||||
|
||||
"""
|
||||
|
||||
self.parse = data_parser.DataParser()
|
||||
super(GenericConfigRpcManager, self).__init__(sc, conf)
|
||||
|
||||
def _send_event(self, context, resource_data, event_id, event_key=None):
|
||||
@@ -142,7 +144,7 @@ class GenericConfigRpcManager(agent_base.AgentBaseRPCManager):
|
||||
self._send_event(context,
|
||||
resource_data,
|
||||
gen_cfg_const.EVENT_CONFIGURE_HEALTHMONITOR,
|
||||
resource_data['vmid'])
|
||||
resource_data['nfds'][0]['vmid'])
|
||||
|
||||
def clear_healthmonitor(self, context, resource_data):
|
||||
"""Enqueues event for worker to process clear healthmonitor request.
|
||||
@@ -158,7 +160,7 @@ class GenericConfigRpcManager(agent_base.AgentBaseRPCManager):
|
||||
self._send_event(context,
|
||||
resource_data,
|
||||
gen_cfg_const.EVENT_CLEAR_HEALTHMONITOR,
|
||||
resource_data['vmid'])
|
||||
resource_data['nfds'][0]['vmid'])
|
||||
|
||||
|
||||
class GenericConfigEventHandler(agent_base.AgentBaseEventHandler,
|
||||
@@ -175,7 +177,7 @@ class GenericConfigEventHandler(agent_base.AgentBaseEventHandler,
|
||||
sc, drivers, rpcmgr)
|
||||
self.sc = sc
|
||||
|
||||
def _get_driver(self, service_type, service_vendor):
|
||||
def _get_driver(self, service_type, service_vendor, service_feature):
|
||||
"""Retrieves service driver object based on service type input.
|
||||
|
||||
Currently, service drivers are identified with service type. Support
|
||||
@@ -189,7 +191,7 @@ class GenericConfigEventHandler(agent_base.AgentBaseEventHandler,
|
||||
|
||||
"""
|
||||
|
||||
return self.drivers[service_type + service_vendor]
|
||||
return self.drivers[service_type + service_vendor + service_feature]
|
||||
|
||||
def handle_event(self, ev):
|
||||
"""Processes the generated events in worker context.
|
||||
@@ -218,7 +220,7 @@ class GenericConfigEventHandler(agent_base.AgentBaseEventHandler,
|
||||
# Process HM poll events
|
||||
elif ev.id == gen_cfg_const.EVENT_CONFIGURE_HEALTHMONITOR:
|
||||
resource_data = ev.data.get('resource_data')
|
||||
periodicity = resource_data.get('periodicity')
|
||||
periodicity = resource_data['nfds'][0]['periodicity']
|
||||
EV_CONF_HM_MAXRETRY = (
|
||||
gen_cfg_const.EVENT_CONFIGURE_HEALTHMONITOR_MAXRETRY)
|
||||
if periodicity == gen_cfg_const.INITIAL:
|
||||
@@ -246,6 +248,7 @@ class GenericConfigEventHandler(agent_base.AgentBaseEventHandler,
|
||||
context = agent_info['context']
|
||||
service_type = agent_info['resource_type']
|
||||
service_vendor = agent_info['service_vendor']
|
||||
service_feature = agent_info.get('service_feature', '')
|
||||
|
||||
try:
|
||||
msg = ("Worker process with ID: %s starting "
|
||||
@@ -253,7 +256,8 @@ class GenericConfigEventHandler(agent_base.AgentBaseEventHandler,
|
||||
% (os.getpid(), ev.id, str(service_type)))
|
||||
LOG.debug(msg)
|
||||
|
||||
driver = self._get_driver(service_type, service_vendor)
|
||||
driver = self._get_driver(service_type, service_vendor,
|
||||
service_feature)
|
||||
|
||||
# Invoke service driver methods based on event type received
|
||||
result = getattr(driver, "%s" % ev.id.lower())(context,
|
||||
@@ -265,12 +269,14 @@ class GenericConfigEventHandler(agent_base.AgentBaseEventHandler,
|
||||
result = common_const.FAILED
|
||||
|
||||
if ev.id == gen_cfg_const.EVENT_CONFIGURE_HEALTHMONITOR:
|
||||
if (resource_data.get('periodicity') == gen_cfg_const.INITIAL and
|
||||
if (resource_data['nfds'][0][
|
||||
'periodicity'] == gen_cfg_const.INITIAL and
|
||||
result == common_const.SUCCESS):
|
||||
notification_data = self._prepare_notification_data(ev, result)
|
||||
self.notify._notification(notification_data)
|
||||
return {'poll': False}
|
||||
elif resource_data.get('periodicity') == gen_cfg_const.FOREVER:
|
||||
elif resource_data['nfds'][0][
|
||||
'periodicity'] == gen_cfg_const.FOREVER:
|
||||
if result == common_const.FAILED:
|
||||
"""If health monitoring fails continuously for 5 times
|
||||
send fail notification to orchestrator
|
||||
@@ -419,6 +425,8 @@ def load_drivers(conf):
|
||||
driver_obj = driver_name(conf=conf)
|
||||
drivers[service_type] = driver_obj
|
||||
|
||||
msg = ("Generic config agent loaded drivers: %s" % drivers)
|
||||
LOG.info(msg)
|
||||
return drivers
|
||||
|
||||
|
||||
|
||||
@@ -408,7 +408,7 @@ class LBaaSEventHandler(agent_base.AgentBaseEventHandler,
|
||||
"""
|
||||
self.context = context.get_admin_context_without_session()
|
||||
|
||||
def _get_driver(self, service_vendor):
|
||||
def _get_driver(self, service_vendor, service_feature):
|
||||
"""Retrieves service driver instance based on service type
|
||||
and service vendor.
|
||||
|
||||
@@ -417,7 +417,7 @@ class LBaaSEventHandler(agent_base.AgentBaseEventHandler,
|
||||
Returns: Service driver instance
|
||||
|
||||
"""
|
||||
driver = lb_constants.SERVICE_TYPE + service_vendor
|
||||
driver = lb_constants.SERVICE_TYPE + service_vendor + service_feature
|
||||
return self.drivers[driver]
|
||||
|
||||
def handle_event(self, ev):
|
||||
@@ -458,7 +458,7 @@ class LBaaSEventHandler(agent_base.AgentBaseEventHandler,
|
||||
"""
|
||||
pass
|
||||
else:
|
||||
msg = ("Successfully handled event %s" % (ev.id))
|
||||
msg = ("Completed handling event %s" % (ev.id))
|
||||
LOG.info(msg)
|
||||
self.sc.event_complete(ev)
|
||||
|
||||
@@ -468,7 +468,8 @@ class LBaaSEventHandler(agent_base.AgentBaseEventHandler,
|
||||
vip = data['vip']
|
||||
agent_info = ev.data['context'].pop('agent_info')
|
||||
service_vendor = agent_info['service_vendor']
|
||||
driver = self._get_driver(service_vendor)
|
||||
service_feature = agent_info['service_feature']
|
||||
driver = self._get_driver(service_vendor, service_feature)
|
||||
|
||||
try:
|
||||
if operation == lb_constants.CREATE:
|
||||
@@ -481,7 +482,7 @@ class LBaaSEventHandler(agent_base.AgentBaseEventHandler,
|
||||
self.plugin_rpc.vip_deleted(vip,
|
||||
lb_constants.ACTIVE, agent_info)
|
||||
return # Don't update object status for delete operation
|
||||
except Exception:
|
||||
except Exception as e:
|
||||
if operation == lb_constants.DELETE:
|
||||
msg = ("Failed to delete vip %s" % (vip['id']))
|
||||
self.plugin_rpc.vip_deleted(vip,
|
||||
@@ -491,6 +492,7 @@ class LBaaSEventHandler(agent_base.AgentBaseEventHandler,
|
||||
self.plugin_rpc.update_status('vip', vip['id'],
|
||||
lb_constants.ERROR,
|
||||
agent_info, vip)
|
||||
raise e
|
||||
else:
|
||||
self.plugin_rpc.update_status('vip', vip['id'],
|
||||
lb_constants.ACTIVE,
|
||||
@@ -511,10 +513,11 @@ class LBaaSEventHandler(agent_base.AgentBaseEventHandler,
|
||||
pool = data['pool']
|
||||
agent_info = context.pop('agent_info')
|
||||
service_vendor = agent_info['service_vendor']
|
||||
service_feature = agent_info['service_feature']
|
||||
try:
|
||||
if operation == lb_constants.CREATE:
|
||||
driver_name = data['driver_name']
|
||||
driver_id = driver_name + service_vendor
|
||||
driver_id = driver_name + service_vendor + service_feature
|
||||
if (driver_id) not in self.drivers.keys():
|
||||
msg = ('No device driver on agent: %s.' % (driver_name))
|
||||
LOG.error(msg)
|
||||
@@ -527,14 +530,16 @@ class LBaaSEventHandler(agent_base.AgentBaseEventHandler,
|
||||
LBaaSEventHandler.instance_mapping[pool['id']] = driver_name
|
||||
elif operation == lb_constants.UPDATE:
|
||||
old_pool = data['old_pool']
|
||||
driver = self._get_driver(service_vendor) # pool['id'])
|
||||
driver = self._get_driver(service_vendor,
|
||||
service_feature) # pool['id'])
|
||||
driver.update_pool(old_pool, pool, context)
|
||||
elif operation == lb_constants.DELETE:
|
||||
driver = self._get_driver(service_vendor) # pool['id'])
|
||||
driver = self._get_driver(service_vendor,
|
||||
service_feature) # pool['id'])
|
||||
driver.delete_pool(pool, context)
|
||||
del LBaaSEventHandler.instance_mapping[pool['id']]
|
||||
return # Don't update object status for delete operation
|
||||
except Exception:
|
||||
except Exception as e:
|
||||
if operation == lb_constants.DELETE:
|
||||
msg = ("Failed to delete pool %s" % (pool['id']))
|
||||
LOG.warn(msg)
|
||||
@@ -543,6 +548,7 @@ class LBaaSEventHandler(agent_base.AgentBaseEventHandler,
|
||||
self.plugin_rpc.update_status('pool', pool['id'],
|
||||
lb_constants.ERROR,
|
||||
agent_info, pool)
|
||||
raise e
|
||||
else:
|
||||
self.plugin_rpc.update_status('pool', pool['id'],
|
||||
lb_constants.ACTIVE,
|
||||
@@ -563,7 +569,9 @@ class LBaaSEventHandler(agent_base.AgentBaseEventHandler,
|
||||
member = data['member']
|
||||
agent_info = ev.data['context'].pop('agent_info')
|
||||
service_vendor = agent_info['service_vendor']
|
||||
driver = self._get_driver(service_vendor) # member['pool_id'])
|
||||
service_feature = agent_info['service_feature']
|
||||
driver = self._get_driver(service_vendor,
|
||||
service_feature) # member['pool_id'])
|
||||
try:
|
||||
if operation == lb_constants.CREATE:
|
||||
driver.create_member(member, context)
|
||||
@@ -573,7 +581,7 @@ class LBaaSEventHandler(agent_base.AgentBaseEventHandler,
|
||||
elif operation == lb_constants.DELETE:
|
||||
driver.delete_member(member, context)
|
||||
return # Don't update object status for delete operation
|
||||
except Exception:
|
||||
except Exception as e:
|
||||
if operation == lb_constants.DELETE:
|
||||
msg = ("Failed to delete member %s" % (member['id']))
|
||||
LOG.warn(msg)
|
||||
@@ -581,6 +589,7 @@ class LBaaSEventHandler(agent_base.AgentBaseEventHandler,
|
||||
self.plugin_rpc.update_status('member', member['id'],
|
||||
lb_constants.ERROR,
|
||||
agent_info, member)
|
||||
raise e
|
||||
else:
|
||||
self.plugin_rpc.update_status('member', member['id'],
|
||||
lb_constants.ACTIVE,
|
||||
@@ -602,7 +611,8 @@ class LBaaSEventHandler(agent_base.AgentBaseEventHandler,
|
||||
health_monitor = data['health_monitor']
|
||||
pool_id = data['pool_id']
|
||||
service_vendor = agent_info['service_vendor']
|
||||
driver = self._get_driver(service_vendor) # (pool_id)
|
||||
service_feature = agent_info['service_feature']
|
||||
driver = self._get_driver(service_vendor, service_feature) # (pool_id)
|
||||
assoc_id = {'pool_id': pool_id,
|
||||
'monitor_id': health_monitor['id']}
|
||||
try:
|
||||
@@ -618,7 +628,7 @@ class LBaaSEventHandler(agent_base.AgentBaseEventHandler,
|
||||
driver.delete_pool_health_monitor(health_monitor, pool_id,
|
||||
context)
|
||||
return # Don't update object status for delete operation
|
||||
except Exception:
|
||||
except Exception as e:
|
||||
if operation == lb_constants.DELETE:
|
||||
msg = ("Failed to delete pool health monitor."
|
||||
" assoc_id: %s" % (assoc_id))
|
||||
@@ -627,6 +637,7 @@ class LBaaSEventHandler(agent_base.AgentBaseEventHandler,
|
||||
self.plugin_rpc.update_status(
|
||||
'health_monitor', assoc_id, lb_constants.ERROR,
|
||||
agent_info, health_monitor)
|
||||
raise e
|
||||
else:
|
||||
self.plugin_rpc.update_status(
|
||||
'health_monitor', assoc_id, lb_constants.ACTIVE,
|
||||
|
||||
@@ -180,9 +180,9 @@ class VPNaasEventHandler(nfp_api.NfpEventHandler):
|
||||
self._drivers = drivers
|
||||
self._plugin_rpc = VpnaasRpcSender(self._sc)
|
||||
|
||||
def _get_driver(self, service_vendor):
|
||||
def _get_driver(self, service_vendor, service_feature):
|
||||
|
||||
driver_id = const.SERVICE_TYPE + service_vendor
|
||||
driver_id = const.SERVICE_TYPE + service_vendor + service_feature
|
||||
return self._drivers[driver_id]
|
||||
|
||||
def handle_event(self, ev):
|
||||
@@ -203,9 +203,10 @@ class VPNaasEventHandler(nfp_api.NfpEventHandler):
|
||||
% (os.getpid(),
|
||||
ev.id, const.VPN_GENERIC_CONFIG_RPC_TOPIC))
|
||||
LOG.debug(msg)
|
||||
service_vendor = (
|
||||
ev.data['context']['agent_info']['service_vendor'])
|
||||
driver = self._get_driver(service_vendor)
|
||||
agent_info = ev.data['context']['agent_info']
|
||||
service_vendor = agent_info['service_vendor']
|
||||
service_feature = agent_info['service_feature']
|
||||
driver = self._get_driver(service_vendor, service_feature)
|
||||
setattr(VPNaasEventHandler, "service_driver", driver)
|
||||
self._vpnservice_updated(ev, driver)
|
||||
except Exception as err:
|
||||
@@ -277,7 +278,6 @@ class VPNaasEventHandler(nfp_api.NfpEventHandler):
|
||||
Returns: None
|
||||
"""
|
||||
try:
|
||||
|
||||
return self.service_driver.check_status(context, svc_context)
|
||||
except Exception as err:
|
||||
msg = ("Failed to sync ipsec connection information. %s."
|
||||
|
||||
@@ -21,6 +21,14 @@ from gbpservice.nfp.core import log as nfp_logging
|
||||
LOG = nfp_logging.getLogger(__name__)
|
||||
|
||||
|
||||
def set_class_attr(**kwargs):
|
||||
def f(class_obj):
|
||||
for key, value in kwargs.items():
|
||||
setattr(class_obj, key.lower(), value.lower())
|
||||
return class_obj
|
||||
return f
|
||||
|
||||
|
||||
class BaseDriver(object):
|
||||
""" Implements common functions for drivers.
|
||||
|
||||
@@ -45,6 +53,9 @@ class BaseDriver(object):
|
||||
Returns: SUCCESS/FAILED
|
||||
|
||||
"""
|
||||
|
||||
resource_data = self.parse.parse_data(const.HEALTHMONITOR,
|
||||
resource_data)
|
||||
ip = resource_data.get('mgmt_ip')
|
||||
port = str(self.port)
|
||||
command = 'nc ' + ip + ' ' + port + ' -z'
|
||||
|
||||
@@ -10,7 +10,6 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import ast
|
||||
import requests
|
||||
|
||||
from gbpservice.nfp.core import log as nfp_logging
|
||||
@@ -21,6 +20,7 @@ from gbpservice.contrib.nfp.configurator.drivers.base import base_driver
|
||||
from gbpservice.contrib.nfp.configurator.drivers.firewall.vyos import (
|
||||
vyos_fw_constants as const)
|
||||
from gbpservice.contrib.nfp.configurator.lib import constants as common_const
|
||||
from gbpservice.contrib.nfp.configurator.lib import data_parser
|
||||
from gbpservice.contrib.nfp.configurator.lib import fw_constants as fw_const
|
||||
|
||||
LOG = nfp_logging.getLogger(__name__)
|
||||
@@ -85,7 +85,7 @@ class FwGenericConfigDriver(base_driver.BaseDriver):
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
pass
|
||||
self.parse = data_parser.DataParser()
|
||||
|
||||
def _configure_static_ips(self, resource_data):
|
||||
""" Configure static IPs for provider and stitching interfaces
|
||||
@@ -107,10 +107,9 @@ class FwGenericConfigDriver(base_driver.BaseDriver):
|
||||
stitching_ip=resource_data.get('stitching_ip'),
|
||||
stitching_cidr=resource_data.get('stitching_cidr'),
|
||||
stitching_mac=resource_data.get('stitching_mac'),
|
||||
provider_interface_position=resource_data.get(
|
||||
'provider_interface_index'),
|
||||
stitching_interface_position=resource_data.get(
|
||||
'stitching_interface_index'))
|
||||
monitoring_ip=resource_data.get('monitoring_ip'),
|
||||
monitoring_cidr=resource_data.get('monitoring_cidr'),
|
||||
monitoring_mac=resource_data.get('monitoring_mac'))
|
||||
mgmt_ip = resource_data['mgmt_ip']
|
||||
|
||||
url = const.request_url % (mgmt_ip,
|
||||
@@ -158,6 +157,8 @@ class FwGenericConfigDriver(base_driver.BaseDriver):
|
||||
|
||||
"""
|
||||
|
||||
resource_data = self.parse.parse_data(common_const.INTERFACES,
|
||||
resource_data)
|
||||
mgmt_ip = resource_data['mgmt_ip']
|
||||
|
||||
try:
|
||||
@@ -287,6 +288,8 @@ class FwGenericConfigDriver(base_driver.BaseDriver):
|
||||
|
||||
"""
|
||||
|
||||
resource_data = self.parse.parse_data(common_const.INTERFACES,
|
||||
resource_data)
|
||||
try:
|
||||
result_static_ips = self._clear_static_ips(resource_data)
|
||||
except Exception as err:
|
||||
@@ -346,9 +349,16 @@ class FwGenericConfigDriver(base_driver.BaseDriver):
|
||||
|
||||
"""
|
||||
|
||||
forward_routes = resource_data.get('forward_route')
|
||||
resource_data = self.parse.parse_data(common_const.ROUTES,
|
||||
resource_data)
|
||||
mgmt_ip = resource_data.get('mgmt_ip')
|
||||
source_cidrs = resource_data.get('source_cidrs')
|
||||
gateway_ip = resource_data.get('gateway_ip')
|
||||
gateway_ip = resource_data.get('stitching_gw_ip')
|
||||
if not forward_routes:
|
||||
source_cidrs = [resource_data.get('stitching_cidr')]
|
||||
else:
|
||||
source_cidrs = [resource_data.get('provider_cidr'),
|
||||
resource_data.get('stitching_cidr')]
|
||||
|
||||
url = const.request_url % (mgmt_ip, self.port,
|
||||
'add-source-route')
|
||||
@@ -395,8 +405,11 @@ class FwGenericConfigDriver(base_driver.BaseDriver):
|
||||
|
||||
"""
|
||||
|
||||
resource_data = self.parse.parse_data(common_const.ROUTES,
|
||||
resource_data)
|
||||
mgmt_ip = resource_data.get('mgmt_ip')
|
||||
source_cidrs = resource_data.get('source_cidrs')
|
||||
source_cidrs = [resource_data.get('provider_cidr'),
|
||||
resource_data.get('stitching_cidr')]
|
||||
|
||||
url = const.request_url % (mgmt_ip, self.port,
|
||||
'delete-source-route')
|
||||
@@ -430,6 +443,8 @@ class FwGenericConfigDriver(base_driver.BaseDriver):
|
||||
return err_msg
|
||||
|
||||
|
||||
@base_driver.set_class_attr(SERVICE_TYPE=fw_const.SERVICE_TYPE,
|
||||
SERVICE_VENDOR=const.VYOS)
|
||||
class FwaasDriver(FwGenericConfigDriver):
|
||||
""" Firewall as a service driver for handling firewall
|
||||
service configuration requests.
|
||||
@@ -440,9 +455,6 @@ class FwaasDriver(FwGenericConfigDriver):
|
||||
|
||||
"""
|
||||
|
||||
service_type = fw_const.SERVICE_TYPE
|
||||
service_vendor = const.VYOS
|
||||
|
||||
def __init__(self, conf):
|
||||
self.conf = conf
|
||||
self.timeout = const.REST_TIMEOUT
|
||||
@@ -451,32 +463,6 @@ class FwaasDriver(FwGenericConfigDriver):
|
||||
self.port = const.CONFIGURATION_SERVER_PORT
|
||||
super(FwaasDriver, self).__init__()
|
||||
|
||||
def _get_firewall_attribute(self, firewall):
|
||||
""" Retrieves management IP from the firewall resource received
|
||||
|
||||
:param firewall: firewall dictionary containing rules
|
||||
and other objects
|
||||
|
||||
Returns: management IP
|
||||
|
||||
"""
|
||||
|
||||
description = ast.literal_eval(firewall["description"])
|
||||
if not description.get('vm_management_ip'):
|
||||
msg = ("Failed to find vm_management_ip.")
|
||||
LOG.debug(msg)
|
||||
raise
|
||||
|
||||
if not description.get('service_vendor'):
|
||||
msg = ("Failed to find service_vendor.")
|
||||
LOG.debug(msg)
|
||||
raise
|
||||
|
||||
msg = ("Found vm_management_ip %s."
|
||||
% description['vm_management_ip'])
|
||||
LOG.debug(msg)
|
||||
return description['vm_management_ip']
|
||||
|
||||
def create_firewall(self, context, firewall, host):
|
||||
""" Implements firewall creation
|
||||
|
||||
@@ -490,10 +476,12 @@ class FwaasDriver(FwGenericConfigDriver):
|
||||
|
||||
"""
|
||||
|
||||
resource_data = self.parse.parse_data(common_const.FIREWALL, context)
|
||||
|
||||
msg = ("Processing create firewall request in FWaaS Driver "
|
||||
"for Firewall ID: %s." % firewall['id'])
|
||||
LOG.debug(msg)
|
||||
mgmt_ip = self._get_firewall_attribute(firewall)
|
||||
mgmt_ip = resource_data.get('mgmt_ip')
|
||||
url = const.request_url % (mgmt_ip,
|
||||
self.port,
|
||||
'configure-firewall-rule')
|
||||
@@ -536,7 +524,8 @@ class FwaasDriver(FwGenericConfigDriver):
|
||||
|
||||
"""
|
||||
|
||||
mgmt_ip = self._get_firewall_attribute(firewall)
|
||||
resource_data = self.parse.parse_data(common_const.FIREWALL, context)
|
||||
mgmt_ip = resource_data.get('mgmt_ip')
|
||||
url = const.request_url % (mgmt_ip,
|
||||
self.port,
|
||||
'update-firewall-rule')
|
||||
@@ -578,7 +567,8 @@ class FwaasDriver(FwGenericConfigDriver):
|
||||
|
||||
"""
|
||||
|
||||
mgmt_ip = self._get_firewall_attribute(firewall)
|
||||
resource_data = self.parse.parse_data(common_const.FIREWALL, context)
|
||||
mgmt_ip = resource_data.get('mgmt_ip')
|
||||
url = const.request_url % (mgmt_ip,
|
||||
self.port,
|
||||
'delete-firewall-rule')
|
||||
|
||||
@@ -10,12 +10,11 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import ast
|
||||
|
||||
from gbpservice.contrib.nfp.configurator.drivers.base import base_driver
|
||||
from gbpservice.contrib.nfp.configurator.drivers.loadbalancer.v1.\
|
||||
haproxy import (haproxy_rest_client)
|
||||
from gbpservice.contrib.nfp.configurator.lib import constants as common_const
|
||||
from gbpservice.contrib.nfp.configurator.lib import data_parser
|
||||
from gbpservice.contrib.nfp.configurator.lib import lb_constants
|
||||
from gbpservice.nfp.core import log as nfp_logging
|
||||
|
||||
@@ -24,12 +23,13 @@ LOG = nfp_logging.getLogger(__name__)
|
||||
DRIVER_NAME = 'loadbalancer'
|
||||
|
||||
|
||||
class LbGenericConfigDriver(object):
|
||||
class LbGenericConfigDriver(base_driver.BaseDriver):
|
||||
""" Loadbalancer generic configuration driver class for handling device
|
||||
configuration requests.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
pass
|
||||
self.parse = data_parser.DataParser()
|
||||
|
||||
def configure_interfaces(self, context, resource_data):
|
||||
""" Configure interfaces for the service VM.
|
||||
@@ -43,6 +43,8 @@ class LbGenericConfigDriver(object):
|
||||
|
||||
"""
|
||||
|
||||
resource_data = self.parse.parse_data(common_const.INTERFACES,
|
||||
resource_data)
|
||||
mgmt_ip = resource_data['mgmt_ip']
|
||||
|
||||
try:
|
||||
@@ -71,14 +73,13 @@ class LbGenericConfigDriver(object):
|
||||
return lb_constants.STATUS_SUCCESS
|
||||
|
||||
|
||||
class HaproxyOnVmDriver(LbGenericConfigDriver, base_driver.BaseDriver):
|
||||
@base_driver.set_class_attr(SERVICE_TYPE=lb_constants.SERVICE_TYPE,
|
||||
SERVICE_VENDOR=common_const.HAPROXY)
|
||||
class HaproxyOnVmDriver(LbGenericConfigDriver):
|
||||
"""Main driver which gets registered with LB agent and Generic Config agent
|
||||
in configurator and these agents pass all *aaS neutron and generic
|
||||
config requests to this class.
|
||||
"""
|
||||
|
||||
service_type = 'loadbalancer'
|
||||
service_vendor = 'haproxy'
|
||||
pool_to_device = {}
|
||||
|
||||
def __init__(self, plugin_rpc=None, conf=None):
|
||||
@@ -96,7 +97,11 @@ class HaproxyOnVmDriver(LbGenericConfigDriver, base_driver.BaseDriver):
|
||||
return client
|
||||
|
||||
def _get_device_for_pool(self, pool_id, context):
|
||||
device = HaproxyOnVmDriver.pool_to_device.get(pool_id, None)
|
||||
resource_data = self.parse.parse_data(common_const.LOADBALANCER,
|
||||
context)
|
||||
role = resource_data.get('role', '')
|
||||
key = pool_id + role
|
||||
device = HaproxyOnVmDriver.pool_to_device.get(key, None)
|
||||
if device is not None:
|
||||
return device
|
||||
|
||||
@@ -106,16 +111,11 @@ class HaproxyOnVmDriver(LbGenericConfigDriver, base_driver.BaseDriver):
|
||||
if vip is None:
|
||||
return None
|
||||
else:
|
||||
vip_desc = ast.literal_eval(vip['description'])
|
||||
device = vip_desc['floating_ip']
|
||||
device = resource_data['mgmt_ip']
|
||||
if device:
|
||||
HaproxyOnVmDriver.pool_to_device[pool_id] = device
|
||||
HaproxyOnVmDriver.pool_to_device[key] = device
|
||||
return device
|
||||
|
||||
def _get_interface_mac(self, vip):
|
||||
vip_desc = ast.literal_eval(vip['description'])
|
||||
return vip_desc['provider_interface_mac']
|
||||
|
||||
def _expand_expected_codes(self, codes):
|
||||
"""Expand the expected code string in set of codes.
|
||||
|
||||
@@ -135,7 +135,7 @@ class HaproxyOnVmDriver(LbGenericConfigDriver, base_driver.BaseDriver):
|
||||
retval.add(code)
|
||||
return retval
|
||||
|
||||
def _prepare_haproxy_frontend(self, vip):
|
||||
def _prepare_haproxy_frontend(self, vip, resource_data):
|
||||
vip_ip = vip['address']
|
||||
vip_port_number = vip['protocol_port']
|
||||
protocol = vip['protocol']
|
||||
@@ -156,7 +156,7 @@ class HaproxyOnVmDriver(LbGenericConfigDriver, base_driver.BaseDriver):
|
||||
try:
|
||||
if protocol == lb_constants.PROTOCOL_HTTP:
|
||||
frontend['option'].update({'forwardfor': True})
|
||||
provider_interface_mac = self._get_interface_mac(vip)
|
||||
provider_interface_mac = resource_data['provider_mac']
|
||||
frontend.update({'provider_interface_mac': provider_interface_mac})
|
||||
except Exception as e:
|
||||
raise e
|
||||
@@ -371,10 +371,10 @@ class HaproxyOnVmDriver(LbGenericConfigDriver, base_driver.BaseDriver):
|
||||
|
||||
return backend
|
||||
|
||||
def _create_vip(self, vip, device_addr):
|
||||
def _create_vip(self, vip, device_addr, resource_data):
|
||||
try:
|
||||
client = self._get_rest_client(device_addr)
|
||||
frontend = self._prepare_haproxy_frontend(vip)
|
||||
frontend = self._prepare_haproxy_frontend(vip, resource_data)
|
||||
body = {"frnt:%s" % vip['id']: frontend}
|
||||
client.create_resource("frontend", body)
|
||||
except Exception as e:
|
||||
@@ -487,6 +487,8 @@ class HaproxyOnVmDriver(LbGenericConfigDriver, base_driver.BaseDriver):
|
||||
return stats
|
||||
|
||||
def create_vip(self, vip, context):
|
||||
resource_data = self.parse.parse_data(common_const.LOADBALANCER,
|
||||
context)
|
||||
msg = ("Handling create vip [vip=%s]" % (vip))
|
||||
LOG.info(msg)
|
||||
try:
|
||||
@@ -501,7 +503,7 @@ class HaproxyOnVmDriver(LbGenericConfigDriver, base_driver.BaseDriver):
|
||||
self._create_pool_health_monitor(hm,
|
||||
vip['pool_id'], device_addr)
|
||||
|
||||
self._create_vip(vip, device_addr)
|
||||
self._create_vip(vip, device_addr, resource_data)
|
||||
except Exception as e:
|
||||
msg = ("Failed to create vip %s. %s"
|
||||
% (vip['id'], str(e).capitalize()))
|
||||
@@ -512,6 +514,8 @@ class HaproxyOnVmDriver(LbGenericConfigDriver, base_driver.BaseDriver):
|
||||
LOG.info(msg)
|
||||
|
||||
def update_vip(self, old_vip, vip, context):
|
||||
resource_data = self.parse.parse_data(common_const.LOADBALANCER,
|
||||
context)
|
||||
msg = ("Handling update vip [old_vip=%s, vip=%s]" % (old_vip, vip))
|
||||
LOG.info(msg)
|
||||
try:
|
||||
@@ -539,11 +543,11 @@ class HaproxyOnVmDriver(LbGenericConfigDriver, base_driver.BaseDriver):
|
||||
context)
|
||||
pool = logical_device['pool']
|
||||
self._create_pool(pool, device_addr)
|
||||
self._create_vip(vip, device_addr)
|
||||
self._create_vip(vip, device_addr, resource_data)
|
||||
return
|
||||
|
||||
client = self._get_rest_client(device_addr)
|
||||
body = self._prepare_haproxy_frontend(vip)
|
||||
body = self._prepare_haproxy_frontend(vip, resource_data)
|
||||
client.update_resource("frontend/frnt:%s" % vip['id'], body)
|
||||
except Exception as e:
|
||||
msg = ("Failed to update vip %s. %s"
|
||||
@@ -604,12 +608,9 @@ class HaproxyOnVmDriver(LbGenericConfigDriver, base_driver.BaseDriver):
|
||||
msg = ("Handling delete pool [pool=%s]" % (pool))
|
||||
LOG.info(msg)
|
||||
try:
|
||||
device = HaproxyOnVmDriver.pool_to_device.get(pool['id'], None)
|
||||
# if pool is not known, do nothing
|
||||
if device is None:
|
||||
return
|
||||
|
||||
device_addr = self._get_device_for_pool(pool['id'], context)
|
||||
if device_addr is None:
|
||||
return
|
||||
if (pool['vip_id'] and
|
||||
device_addr):
|
||||
self._delete_pool(pool, device_addr)
|
||||
@@ -626,7 +627,8 @@ class HaproxyOnVmDriver(LbGenericConfigDriver, base_driver.BaseDriver):
|
||||
msg = ("Handling create member [member=%s] " % (member))
|
||||
LOG.info(msg)
|
||||
try:
|
||||
device_addr = self._get_device_for_pool(member['pool_id'], context)
|
||||
device_addr = self._get_device_for_pool(member['pool_id'],
|
||||
context)
|
||||
if device_addr is not None:
|
||||
self._create_member(member, device_addr, context)
|
||||
except Exception as e:
|
||||
@@ -648,7 +650,8 @@ class HaproxyOnVmDriver(LbGenericConfigDriver, base_driver.BaseDriver):
|
||||
if device_addr is not None:
|
||||
self._delete_member(old_member, device_addr)
|
||||
|
||||
device_addr = self._get_device_for_pool(member['pool_id'], context)
|
||||
device_addr = self._get_device_for_pool(member['pool_id'],
|
||||
context)
|
||||
if device_addr is not None:
|
||||
self._create_member(member, device_addr, context)
|
||||
except Exception as e:
|
||||
|
||||
@@ -131,7 +131,7 @@ class OctaviaDataModelBuilder(object):
|
||||
if not amphorae:
|
||||
raise exceptions.IncompleteData(
|
||||
"Amphora information is missing")
|
||||
# REVISIT(jiahao): vrrp_group, topology, server_group_id are not
|
||||
# REVISIT(jiahao): cluster_group, topology, affinity_group_id are not
|
||||
# included yet
|
||||
args.update({
|
||||
'vip': vip,
|
||||
|
||||
@@ -18,6 +18,7 @@ from gbpservice.contrib.nfp.configurator.drivers.base import base_driver
|
||||
from gbpservice.contrib.nfp.configurator.drivers.vpn.vyos import (
|
||||
vyos_vpn_constants as const)
|
||||
from gbpservice.contrib.nfp.configurator.lib import constants as common_const
|
||||
from gbpservice.contrib.nfp.configurator.lib import data_parser
|
||||
from gbpservice.contrib.nfp.configurator.lib import vpn_constants as vpn_const
|
||||
from gbpservice.nfp.core import log as nfp_logging
|
||||
|
||||
@@ -262,7 +263,7 @@ class VPNServiceValidator(object):
|
||||
local_cidr = tokens[1].split('=')[1]
|
||||
return local_cidr
|
||||
|
||||
def validate(self, context, vpnsvc):
|
||||
def validate(self, context, resource_data):
|
||||
"""
|
||||
Get the vpn services for this tenant
|
||||
Check for overlapping lcidr - (not allowed)
|
||||
@@ -273,7 +274,9 @@ class VPNServiceValidator(object):
|
||||
|
||||
Returns: None
|
||||
"""
|
||||
lcidr = self._get_local_cidr(vpnsvc)
|
||||
|
||||
vpnsvc = resource_data.get('resource')
|
||||
lcidr = resource_data.get('provider_cidr')
|
||||
filters = {'tenant_id': [context['tenant_id']]}
|
||||
t_vpnsvcs = self.agent.get_vpn_services(
|
||||
context, filters=filters)
|
||||
@@ -303,6 +306,7 @@ class VpnGenericConfigDriver(base_driver.BaseDriver):
|
||||
|
||||
def __init__(self):
|
||||
self.timeout = const.REST_TIMEOUT
|
||||
self.parse = data_parser.DataParser()
|
||||
|
||||
def _configure_static_ips(self, resource_data):
|
||||
""" Configure static IPs for provider and stitching interfaces
|
||||
@@ -323,11 +327,7 @@ class VpnGenericConfigDriver(base_driver.BaseDriver):
|
||||
provider_mac=resource_data.get('provider_mac'),
|
||||
stitching_ip=resource_data.get('stitching_ip'),
|
||||
stitching_cidr=resource_data.get('stitching_cidr'),
|
||||
stitching_mac=resource_data.get('stitching_mac'),
|
||||
provider_interface_position=resource_data.get(
|
||||
'provider_interface_index'),
|
||||
stitching_interface_position=resource_data.get(
|
||||
'stitching_interface_index'))
|
||||
stitching_mac=resource_data.get('stitching_mac'))
|
||||
mgmt_ip = resource_data['mgmt_ip']
|
||||
|
||||
url = const.request_url % (mgmt_ip,
|
||||
@@ -385,6 +385,9 @@ class VpnGenericConfigDriver(base_driver.BaseDriver):
|
||||
Returns: SUCCESS/Failure message with reason.
|
||||
|
||||
"""
|
||||
|
||||
resource_data = self.parse.parse_data(common_const.INTERFACES,
|
||||
resource_data)
|
||||
mgmt_ip = resource_data['mgmt_ip']
|
||||
|
||||
try:
|
||||
@@ -536,6 +539,8 @@ class VpnGenericConfigDriver(base_driver.BaseDriver):
|
||||
|
||||
"""
|
||||
|
||||
resource_data = self.parse.parse_data(common_const.INTERFACES,
|
||||
resource_data)
|
||||
try:
|
||||
result_static_ips = self._clear_static_ips(resource_data)
|
||||
except Exception as err:
|
||||
@@ -605,15 +610,22 @@ class VpnGenericConfigDriver(base_driver.BaseDriver):
|
||||
Returns: SUCCESS/Failure message with reason.
|
||||
|
||||
"""
|
||||
|
||||
forward_routes = resource_data.get('forward_route')
|
||||
resource_data = self.parse.parse_data(common_const.ROUTES,
|
||||
resource_data)
|
||||
mgmt_ip = resource_data.get('mgmt_ip')
|
||||
source_cidrs = resource_data.get('source_cidrs')
|
||||
gateway_ip = resource_data.get('gateway_ip')
|
||||
gateway_ip = resource_data.get('stitching_gw_ip')
|
||||
if not forward_routes:
|
||||
source_cidrs = [resource_data.get('stitching_cidr')]
|
||||
else:
|
||||
source_cidrs = [resource_data.get('provider_cidr'),
|
||||
resource_data.get('stitching_cidr')]
|
||||
|
||||
stitching_url = const.request_url % (mgmt_ip,
|
||||
const.CONFIGURATION_SERVER_PORT,
|
||||
'add-stitching-route')
|
||||
st_data = jsonutils.dumps({'gateway_ip': gateway_ip})
|
||||
|
||||
try:
|
||||
resp = requests.post(
|
||||
stitching_url, data=st_data, timeout=self.timeout)
|
||||
@@ -685,14 +697,17 @@ class VpnGenericConfigDriver(base_driver.BaseDriver):
|
||||
|
||||
"""
|
||||
# clear the static stitching gateway route
|
||||
resource_data = self.parse.parse_data(common_const.ROUTES,
|
||||
resource_data)
|
||||
mgmt_ip = resource_data.get('mgmt_ip')
|
||||
source_cidrs = resource_data.get('source_cidrs')
|
||||
source_cidrs = [resource_data.get('provider_cidr'),
|
||||
resource_data.get('stitching_cidr')]
|
||||
|
||||
stitching_url = const.request_url % (mgmt_ip,
|
||||
const.CONFIGURATION_SERVER_PORT,
|
||||
'delete-stitching-route')
|
||||
st_data = jsonutils.dumps(
|
||||
{'gateway_ip': resource_data.get('gateway_ip')})
|
||||
{'gateway_ip': resource_data.get('stitching_gw_ip')})
|
||||
try:
|
||||
resp = requests.post(
|
||||
stitching_url, data=st_data, timeout=self.timeout)
|
||||
@@ -740,15 +755,14 @@ class VpnGenericConfigDriver(base_driver.BaseDriver):
|
||||
"Response Content: %r" % (resp.status_code, resp.content))
|
||||
|
||||
|
||||
@base_driver.set_class_attr(SERVICE_TYPE=vpn_const.SERVICE_TYPE,
|
||||
SERVICE_VENDOR=const.SERVICE_VENDOR)
|
||||
class VpnaasIpsecDriver(VpnGenericConfigDriver):
|
||||
"""
|
||||
Driver class for implementing VPN IPSEC configuration
|
||||
requests from VPNaas Plugin.
|
||||
"""
|
||||
|
||||
service_type = vpn_const.SERVICE_TYPE
|
||||
service_vendor = const.SERVICE_VENDOR
|
||||
|
||||
def __init__(self, conf):
|
||||
self.conf = conf
|
||||
self.port = const.CONFIGURATION_SERVER_PORT
|
||||
@@ -820,37 +834,6 @@ class VpnaasIpsecDriver(VpnGenericConfigDriver):
|
||||
if item['id'] == conn['id']:
|
||||
item['status'] = vpn_const.STATE_INIT
|
||||
|
||||
def _get_fip_from_vpnsvc(self, vpn_svc):
|
||||
svc_desc = vpn_svc['description']
|
||||
tokens = svc_desc.split(';')
|
||||
fip = tokens[0].split('=')[1]
|
||||
return fip
|
||||
|
||||
def _get_fip(self, svc_context):
|
||||
return self._get_fip_from_vpnsvc(svc_context['service'])
|
||||
|
||||
def _get_ipsec_tunnel_local_cidr_from_vpnsvc(self, vpn_svc):
|
||||
svc_desc = vpn_svc['description']
|
||||
tokens = svc_desc.split(';')
|
||||
tunnel_local_cidr = tokens[1].split('=')[1]
|
||||
return tunnel_local_cidr
|
||||
|
||||
def _get_ipsec_tunnel_local_cidr(self, svc_context):
|
||||
return self._get_ipsec_tunnel_local_cidr_from_vpnsvc(
|
||||
svc_context['service'])
|
||||
|
||||
def _get_stitching_fixed_ip(self, conn):
|
||||
desc = conn['description']
|
||||
tokens = desc.split(';')
|
||||
fixed_ip = tokens[3].split('=')[1]
|
||||
return fixed_ip
|
||||
|
||||
def _get_user_access_ip(self, conn):
|
||||
desc = conn['description']
|
||||
tokens = desc.split(';')
|
||||
access_ip = tokens[2].split('=')[1]
|
||||
return access_ip
|
||||
|
||||
def _ipsec_conn_correct_enc_algo(self, conn):
|
||||
ike_enc_algo = conn['ikepolicy']['encryption_algorithm']
|
||||
ipsec_enc_algo = conn['ipsecpolicy']['encryption_algorithm']
|
||||
@@ -881,7 +864,23 @@ class VpnaasIpsecDriver(VpnGenericConfigDriver):
|
||||
filters['peer_address'] = peer_address
|
||||
return filters
|
||||
|
||||
def _ipsec_create_conn(self, context, mgmt_fip, conn):
|
||||
def _get_ipsec_tunnel_local_cidr_from_vpnsvc(self, vpn_svc):
|
||||
svc_desc = vpn_svc['description']
|
||||
tokens = svc_desc.split(';')
|
||||
tunnel_local_cidr = tokens[1].split('=')[1]
|
||||
|
||||
standby_fip = None
|
||||
try:
|
||||
standby_fip = tokens[9].split('=')[1]
|
||||
except Exception:
|
||||
pass
|
||||
return tunnel_local_cidr, standby_fip
|
||||
|
||||
def _get_ipsec_tunnel_local_cidr(self, svc_context):
|
||||
return self._get_ipsec_tunnel_local_cidr_from_vpnsvc(
|
||||
svc_context['service'])
|
||||
|
||||
def _ipsec_create_conn(self, context, mgmt_fip, resource_data):
|
||||
"""
|
||||
Get the context for this ipsec conn and make POST to the service VM.
|
||||
:param context: Dictionary which holds all the required data for
|
||||
@@ -892,14 +891,20 @@ class VpnaasIpsecDriver(VpnGenericConfigDriver):
|
||||
Returns: None
|
||||
"""
|
||||
|
||||
conn = resource_data.get('resource')
|
||||
svc_context = self.agent.get_vpn_servicecontext(
|
||||
context, self._get_filters(conn_id=conn['id']))[0]
|
||||
tunnel_local_cidr = self._get_ipsec_tunnel_local_cidr(svc_context)
|
||||
|
||||
tunnel_local_cidr, standby_fip = self._get_ipsec_tunnel_local_cidr(
|
||||
svc_context)
|
||||
if standby_fip:
|
||||
svc_context['siteconns'][0]['connection']['standby_fip'] = (
|
||||
standby_fip)
|
||||
conn = svc_context['siteconns'][0]['connection']
|
||||
svc_context['siteconns'][0]['connection']['stitching_fixed_ip'] = (
|
||||
self._get_stitching_fixed_ip(conn))
|
||||
resource_data['stitching_ip'])
|
||||
svc_context['siteconns'][0]['connection']['access_ip'] = (
|
||||
self._get_user_access_ip(conn))
|
||||
resource_data['stitching_floating_ip'])
|
||||
msg = "IPSec: Pushing ipsec configuration %s" % conn
|
||||
LOG.info(msg)
|
||||
conn['tunnel_local_cidr'] = tunnel_local_cidr
|
||||
@@ -921,7 +926,8 @@ class VpnaasIpsecDriver(VpnGenericConfigDriver):
|
||||
svc_context = self.agent.get_vpn_servicecontext(
|
||||
context, self._get_filters(conn_id=conn['id']))[0]
|
||||
|
||||
tunnel_local_cidr = self._get_ipsec_tunnel_local_cidr(svc_context)
|
||||
tunnel_local_cidr, _ = self._get_ipsec_tunnel_local_cidr(
|
||||
svc_context)
|
||||
|
||||
tunnel = {}
|
||||
tunnel['peer_address'] = conn['peer_address']
|
||||
@@ -1013,7 +1019,7 @@ class VpnaasIpsecDriver(VpnGenericConfigDriver):
|
||||
context, conn, msg)
|
||||
|
||||
def _ipsec_delete_tunnel(self, context, mgmt_fip,
|
||||
conn):
|
||||
resource_data):
|
||||
"""
|
||||
Make DELETE to the service VM to delete the tunnel.
|
||||
|
||||
@@ -1025,7 +1031,8 @@ class VpnaasIpsecDriver(VpnGenericConfigDriver):
|
||||
Returns: None
|
||||
"""
|
||||
|
||||
lcidr = self._get_ipsec_tunnel_local_cidr_from_vpnsvc(conn)
|
||||
conn = resource_data.get('resource')
|
||||
lcidr = resource_data['provider_cidr']
|
||||
|
||||
tunnel = {}
|
||||
tunnel['peer_address'] = conn['peer_address']
|
||||
@@ -1076,7 +1083,7 @@ class VpnaasIpsecDriver(VpnGenericConfigDriver):
|
||||
"""
|
||||
|
||||
c_state = None
|
||||
lcidr = self._get_ipsec_tunnel_local_cidr(svc_context)
|
||||
lcidr, _ = self._get_ipsec_tunnel_local_cidr(svc_context)
|
||||
if conn['status'] == vpn_const.STATE_INIT:
|
||||
tunnel = {
|
||||
'peer_address': conn['peer_address'],
|
||||
@@ -1098,19 +1105,11 @@ class VpnaasIpsecDriver(VpnGenericConfigDriver):
|
||||
return c_state, True
|
||||
return c_state, False
|
||||
|
||||
def _get_vm_mgmt_ip_from_desc(self, desc):
|
||||
svc_desc = desc['description']
|
||||
tokens = svc_desc.split(';')
|
||||
vm_mgmt_ip = tokens[0].split('=')[1]
|
||||
return vm_mgmt_ip
|
||||
|
||||
def create_vpn_service(self, context, resource_data):
|
||||
|
||||
svc = resource_data.get('resource')
|
||||
msg = "Validating VPN service %s " % svc
|
||||
msg = "Validating VPN service %s " % resource_data.get('resource')
|
||||
LOG.info(msg)
|
||||
validator = VPNServiceValidator(self.agent)
|
||||
validator.validate(context, svc)
|
||||
validator.validate(context, resource_data)
|
||||
|
||||
def create_ipsec_conn(self, context, resource_data):
|
||||
"""
|
||||
@@ -1124,7 +1123,7 @@ class VpnaasIpsecDriver(VpnGenericConfigDriver):
|
||||
"""
|
||||
|
||||
conn = resource_data.get('resource')
|
||||
mgmt_fip = self._get_vm_mgmt_ip_from_desc(conn)
|
||||
mgmt_fip = resource_data['mgmt_ip']
|
||||
msg = "IPsec: create siteconnection %s" % conn
|
||||
LOG.info(msg)
|
||||
"""
|
||||
@@ -1140,7 +1139,7 @@ class VpnaasIpsecDriver(VpnGenericConfigDriver):
|
||||
. Add peer for different peer
|
||||
b) First conn, create complete ipsec profile
|
||||
"""
|
||||
t_lcidr = self._get_ipsec_tunnel_local_cidr_from_vpnsvc(conn)
|
||||
t_lcidr = resource_data['provider_cidr']
|
||||
if t_lcidr in conn['peer_cidrs']:
|
||||
msg = ("IPSec: Tunnel remote cidr %s conflicts "
|
||||
"with local cidr." % t_lcidr)
|
||||
@@ -1161,7 +1160,7 @@ class VpnaasIpsecDriver(VpnGenericConfigDriver):
|
||||
LOG.error(msg)
|
||||
try:
|
||||
if not tenant_conns:
|
||||
self._ipsec_create_conn(context, mgmt_fip, conn)
|
||||
self._ipsec_create_conn(context, mgmt_fip, resource_data)
|
||||
else:
|
||||
"""
|
||||
Check if this conn has overlapping peer
|
||||
@@ -1203,14 +1202,14 @@ class VpnaasIpsecDriver(VpnGenericConfigDriver):
|
||||
conn = resource_data.get('resource')
|
||||
msg = "IPsec: delete siteconnection %s" % conn
|
||||
LOG.info(msg)
|
||||
mgmt_fip = self._get_vm_mgmt_ip_from_desc(conn)
|
||||
mgmt_fip = resource_data['mgmt_ip']
|
||||
|
||||
tenant_conns = self._ipsec_get_tenant_conns(
|
||||
context, mgmt_fip, conn, on_delete=True)
|
||||
try:
|
||||
if tenant_conns:
|
||||
self._ipsec_delete_tunnel(
|
||||
context, mgmt_fip, conn)
|
||||
context, mgmt_fip, resource_data)
|
||||
else:
|
||||
self._ipsec_delete_connection(
|
||||
context, mgmt_fip, conn)
|
||||
@@ -1228,7 +1227,13 @@ class VpnaasIpsecDriver(VpnGenericConfigDriver):
|
||||
|
||||
Returns: None
|
||||
"""
|
||||
fip = self._get_fip(svc_context)
|
||||
|
||||
vpn_desc = self.parse.parse_data(common_const.VPN, context)
|
||||
if type(vpn_desc) == list:
|
||||
fip = vpn_desc[0]['mgmt_ip']
|
||||
else:
|
||||
fip = vpn_desc['mgmt_ip']
|
||||
|
||||
conn = svc_context['siteconns'][0]['connection']
|
||||
|
||||
try:
|
||||
@@ -1255,6 +1260,10 @@ class VpnaasIpsecDriver(VpnGenericConfigDriver):
|
||||
|
||||
Returns: None
|
||||
"""
|
||||
|
||||
vpn_desc = self.parse.parse_data(common_const.VPN, context)
|
||||
resource_data.update(vpn_desc)
|
||||
|
||||
msg = ("Handling VPN service update notification '%s'",
|
||||
resource_data.get('reason', ''))
|
||||
LOG.info(msg)
|
||||
|
||||
@@ -27,8 +27,6 @@ NOTIFICATION_QUEUE = 'configurator-notifications'
|
||||
FIREWALL = 'firewall'
|
||||
VPN = 'vpn'
|
||||
LOADBALANCER = 'loadbalancer'
|
||||
HEALTHMONITOR = 'healthmonitor'
|
||||
VPN = 'vpn'
|
||||
VYOS = 'vyos'
|
||||
LOADBALANCERV2 = 'loadbalancerv2'
|
||||
HAPROXY = 'haproxy'
|
||||
@@ -40,6 +38,10 @@ POST = 'post'
|
||||
PUT = 'put'
|
||||
UNHANDLED = "UNHANDLED"
|
||||
|
||||
HEALTHMONITOR = 'healthmonitor'
|
||||
INTERFACES = 'interfaces'
|
||||
ROUTES = 'routes'
|
||||
|
||||
SUCCESS_CODES = [200, 201, 202, 203, 204]
|
||||
ERROR_CODES = [400, 404, 500]
|
||||
|
||||
@@ -50,5 +52,5 @@ STATUS_ERROR = "ERROR"
|
||||
STATUS_SUCCESS = "SUCCESS"
|
||||
UNHANDLED = "UNHANDLED"
|
||||
|
||||
AGENTS_PKG = 'gbpservice.contrib.nfp.configurator.agents'
|
||||
AGENTS_PKG = ['gbpservice.contrib.nfp.configurator.agents']
|
||||
CONFIGURATOR_RPC_TOPIC = 'configurator'
|
||||
|
||||
96
gbpservice/contrib/nfp/configurator/lib/data_parser.py
Normal file
96
gbpservice/contrib/nfp/configurator/lib/data_parser.py
Normal file
@@ -0,0 +1,96 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import copy
|
||||
|
||||
from gbpservice.nfp.common import constants as const
|
||||
from gbpservice.nfp.core import log as nfp_logging
|
||||
|
||||
LOG = nfp_logging.getLogger(__name__)
|
||||
|
||||
|
||||
class DataParser(object):
|
||||
''' A library to parse device and service configuration and
|
||||
transform them into a dictionary of key-value pairs
|
||||
|
||||
'''
|
||||
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
def parse_data(self, resource, data):
|
||||
''' Parser function exposed to the configurator modules.
|
||||
|
||||
:param resource: Resource name (HEALTHMONITOR/INTERFACES/ROUTES/
|
||||
FIREWALL/LOADBALANCER/LOADBALANCERV2/VPN)
|
||||
:param data: Resource data dictionary in case of device configuration
|
||||
and context in case of service configuration
|
||||
|
||||
Returns: a dictionary if nfds/nfs contains a single element else
|
||||
a list of dictionaries where each dictionary corresponds
|
||||
to each element in nfds/nfs
|
||||
'''
|
||||
|
||||
config_data_list = []
|
||||
|
||||
if data.get('nfds'):
|
||||
tenant_id = data['tenant_id']
|
||||
nf_config_list = data['nfds']
|
||||
elif data.get('resource_data'):
|
||||
tenant_id = data['resource_data']['tenant_id']
|
||||
nf_config_list = data['resource_data']['nfs']
|
||||
else:
|
||||
msg = ("The given schema of data dictionary is not supported "
|
||||
"by the data parser library. Returning the input. "
|
||||
"Input data is: %s" % data)
|
||||
LOG.debug(msg)
|
||||
return data
|
||||
|
||||
for nf_config in nf_config_list:
|
||||
self.resource_data = {}
|
||||
self.resource_data.update({
|
||||
'tenant_id': tenant_id,
|
||||
'role': nf_config['role'],
|
||||
'mgmt_ip': nf_config['svc_mgmt_fixed_ip']})
|
||||
|
||||
self._parse_config_data(nf_config, resource)
|
||||
config_data_list.append(copy.deepcopy(self.resource_data))
|
||||
|
||||
return (config_data_list[0]
|
||||
if len(config_data_list) == 1
|
||||
else config_data_list)
|
||||
|
||||
def _parse_config_data(self, nfd, resource):
|
||||
if resource.lower() == const.HEALTHMONITOR_RESOURCE:
|
||||
return self.resource_data.update(
|
||||
{'periodicity': nfd['periodicity'],
|
||||
'vmid': nfd['vmid']})
|
||||
|
||||
networks = nfd['networks']
|
||||
for network in networks:
|
||||
prefix = network['type']
|
||||
port = network['ports'][0]
|
||||
self.resource_data.update({
|
||||
(prefix + '_cidr'): network['cidr'],
|
||||
(prefix + '_ip'): port['fixed_ip'],
|
||||
(prefix + '_floating_ip'): port['floating_ip'],
|
||||
(prefix + '_mac'): port['mac'],
|
||||
(prefix + '_gw_ip'): network['gw_ip']})
|
||||
|
||||
vips = nfd.get('vips')
|
||||
if not vips:
|
||||
return
|
||||
for vip in vips:
|
||||
prefix = vip['type'] + '_vip'
|
||||
self.resource_data.update({
|
||||
(prefix + '_ip'): vip['ip'],
|
||||
(prefix + '_mac'): vip['mac']})
|
||||
@@ -77,10 +77,7 @@ class ServiceAgentDemuxer(object):
|
||||
# Get service type based on the fact that for some request data
|
||||
# formats the 'type' key is absent. Check for invalid types
|
||||
service_type = request_data['info'].get('service_type').lower()
|
||||
if (service_type not in const.supported_service_types):
|
||||
return const.invalid_service_type
|
||||
else:
|
||||
return service_type
|
||||
return service_type
|
||||
|
||||
def get_service_agent_info(self, operation, resource_type,
|
||||
request_data, is_generic_config):
|
||||
@@ -112,6 +109,10 @@ class ServiceAgentDemuxer(object):
|
||||
if str(service_vendor) == 'None':
|
||||
service_vendor = vendor_map[resource_type]
|
||||
|
||||
service_feature = request_data['info'].get('service_feature')
|
||||
if not service_feature:
|
||||
service_feature = ''
|
||||
|
||||
for config_data in request_data['config']:
|
||||
sa_info = {}
|
||||
|
||||
@@ -144,7 +145,10 @@ class ServiceAgentDemuxer(object):
|
||||
else:
|
||||
if is_nfp_svc:
|
||||
resource_type = const.NFP_SERVICE
|
||||
method = resource_type_to_method_map[resource_type]
|
||||
try:
|
||||
method = resource_type_to_method_map[resource_type]
|
||||
except Exception:
|
||||
method = 'handle_config'
|
||||
|
||||
sa_info.update({'method': method,
|
||||
'resource_data': data,
|
||||
@@ -152,6 +156,7 @@ class ServiceAgentDemuxer(object):
|
||||
# This is the API context
|
||||
'context': context,
|
||||
'service_vendor': service_vendor.lower(),
|
||||
'service_feature': service_feature,
|
||||
'resource_type': resource_type.lower(),
|
||||
'resource': resource.lower()},
|
||||
'is_generic_config': is_generic_config})
|
||||
|
||||
@@ -23,6 +23,6 @@ MAX_FAIL_COUNT = 28 # 5 secs delay * 28 = 140 secs
|
||||
INITIAL = 'initial'
|
||||
FOREVER = 'forever'
|
||||
|
||||
#POLLING EVENTS SPACING AND MAXRETRIES
|
||||
# POLLING EVENTS SPACING AND MAXRETRIES
|
||||
EVENT_CONFIGURE_HEALTHMONITOR_SPACING = 10
|
||||
EVENT_CONFIGURE_HEALTHMONITOR_MAXRETRY = 40
|
||||
EVENT_CONFIGURE_HEALTHMONITOR_MAXRETRY = 50
|
||||
|
||||
@@ -1,102 +0,0 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""Defines schema of the resource.This file contains all the generic config
|
||||
resources. Schema name defined here must be same as resource name in the
|
||||
request_data.
|
||||
Format of request data for network function device configuration API:
|
||||
|
||||
request_data {
|
||||
info {
|
||||
version: <v1/v2/v3>
|
||||
}
|
||||
config [
|
||||
{
|
||||
'resource': <healthmonitor/routes/interfaces>,
|
||||
'kwargs': <resource parameters>
|
||||
},
|
||||
{
|
||||
'resource': <healthmonitor/routes/interfaces>,
|
||||
'kwargs': <resource parameters>
|
||||
}, ...
|
||||
]
|
||||
}
|
||||
|
||||
"""
|
||||
|
||||
|
||||
""" Request data schema.
|
||||
"""
|
||||
|
||||
request_data = {'info': {},
|
||||
'config': []
|
||||
}
|
||||
|
||||
|
||||
""" Request data info schema.
|
||||
This is a key of request data which also needs to be validated.
|
||||
"""
|
||||
|
||||
request_data_info = {'context': "",
|
||||
'service_type': "",
|
||||
'service_vendor': ""}
|
||||
|
||||
|
||||
""" Request data config schema.
|
||||
This is a key of request data which also needs to be validated.
|
||||
"""
|
||||
|
||||
request_data_config = {'resource': "",
|
||||
'resource_data': ""
|
||||
}
|
||||
|
||||
|
||||
"""Interface resource schema.
|
||||
This resource is used by orchestrator to attach/detach interfaces after
|
||||
service vm is launched successfully.
|
||||
"""
|
||||
|
||||
interfaces = {'mgmt_ip': "",
|
||||
'provider_ip': "",
|
||||
'provider_cidr': "",
|
||||
'provider_interface_index': "",
|
||||
'stitching_ip': "",
|
||||
'stitching_cidr': "",
|
||||
'stitching_interface_index': "",
|
||||
'provider_mac': "",
|
||||
'stitching_mac': ""
|
||||
}
|
||||
|
||||
|
||||
"""Routes resource schema.
|
||||
This resource is used by orchestrator to configure routes after service
|
||||
vm is launched successfully.
|
||||
"""
|
||||
|
||||
routes = {'mgmt_ip': "",
|
||||
'source_cidrs': "",
|
||||
'destination_cidr': "",
|
||||
'gateway_ip': "",
|
||||
'provider_mac': "",
|
||||
'provider_interface_index': "",
|
||||
}
|
||||
|
||||
|
||||
"""Health monitor resource schema.
|
||||
This resource is used by orchestrator to check service vm health after
|
||||
service vm is launched successfully.
|
||||
"""
|
||||
|
||||
healthmonitor = {'vmid': "",
|
||||
'mgmt_ip': "",
|
||||
'periodicity': "",
|
||||
}
|
||||
@@ -1,102 +0,0 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from gbpservice.nfp.core import log as nfp_logging
|
||||
|
||||
from gbpservice.contrib.nfp.configurator.lib import constants as const
|
||||
import gbpservice.contrib.nfp.configurator.lib.schema as schema
|
||||
LOG = nfp_logging.getLogger(__name__)
|
||||
|
||||
|
||||
class SchemaValidator(object):
|
||||
""" Validates request data against standard resource schemas given in schema.py
|
||||
|
||||
Validation is focused on keys. It cross checks if resources in
|
||||
request_data has all the keys given in the schema of that resource.
|
||||
"""
|
||||
|
||||
def decode(self, request_data, is_generic_config):
|
||||
""" Validate request data against resource schema.
|
||||
|
||||
:param: request_data
|
||||
|
||||
Returns: True - If schema validation is successful.
|
||||
False - If schema validation fails.
|
||||
|
||||
"""
|
||||
try:
|
||||
if not self.validate_schema(request_data, schema.request_data):
|
||||
return False
|
||||
|
||||
if ('service_type' in request_data['info'] and
|
||||
'service_vendor' in request_data['info'] and
|
||||
'context' in request_data['info']):
|
||||
pass
|
||||
elif not self.validate_schema(request_data['info'],
|
||||
schema.request_data_info):
|
||||
return False
|
||||
|
||||
for config in request_data['config']:
|
||||
if not self.validate_schema(config,
|
||||
schema.request_data_config):
|
||||
return False
|
||||
|
||||
resource_type = config['resource']
|
||||
resource = config['resource_data']
|
||||
|
||||
"""Do not validate kwargs for
|
||||
1) *aaS apis
|
||||
2) generic config of loadbalancer for resource
|
||||
interfaces and routes
|
||||
"""
|
||||
if (not is_generic_config or
|
||||
(request_data['info'][
|
||||
'service_type'] in [const.LOADBALANCER,
|
||||
const.LOADBALANCERV2] and
|
||||
resource_type != const.HEALTHMONITOR)):
|
||||
continue
|
||||
|
||||
resource_schema = getattr(schema, resource_type)
|
||||
if not self.validate_schema(resource, resource_schema):
|
||||
return False
|
||||
except Exception as e:
|
||||
LOG.error(e)
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def validate_schema(self, resource, resource_schema):
|
||||
""" Validate resource against resource_schema
|
||||
|
||||
:param resource
|
||||
:param resource_schema
|
||||
|
||||
Returns: True/False
|
||||
"""
|
||||
diff = set(resource_schema.keys()) - set(resource.keys())
|
||||
|
||||
# If resource has unexpected extra keywords
|
||||
if len(resource.keys()) > len(resource_schema.keys()):
|
||||
diff = set(resource.keys()) - set(resource_schema.keys())
|
||||
msg = ("FAILED: resource=%s has unexpected extra keys=%s,"
|
||||
" expected keys are= %s " % (resource, list(diff),
|
||||
resource_schema.keys()))
|
||||
LOG.error(msg)
|
||||
return False
|
||||
elif len(diff) == 0:
|
||||
return True
|
||||
else:
|
||||
msg = ("FAILED: resource=%s does not contain keys=%s,"
|
||||
" expected keys are= %s " % (resource, list(diff),
|
||||
resource_schema.keys()))
|
||||
LOG.error(msg)
|
||||
return False
|
||||
@@ -76,12 +76,14 @@ class ConfiguratorUtils(object):
|
||||
continue
|
||||
if hasattr(class_obj, 'service_vendor'):
|
||||
key += class_obj.service_vendor
|
||||
if hasattr(class_obj, 'service_feature'):
|
||||
key += class_obj.service_feature
|
||||
if key:
|
||||
driver_objects[key] = class_obj
|
||||
|
||||
return driver_objects
|
||||
|
||||
def load_agents(self, pkg):
|
||||
def load_agents(self):
|
||||
"""Load all the agents inside pkg.
|
||||
|
||||
@param pkg : package
|
||||
@@ -91,24 +93,26 @@ class ConfiguratorUtils(object):
|
||||
|
||||
"""
|
||||
imported_service_agents = []
|
||||
base_agent = __import__(pkg,
|
||||
globals(), locals(), ['agents'], -1)
|
||||
agents_dir = base_agent.__path__[0]
|
||||
syspath = sys.path
|
||||
sys.path = [agents_dir] + syspath
|
||||
try:
|
||||
files = os.listdir(agents_dir)
|
||||
except OSError:
|
||||
msg = ("Failed to read files from dir %s" % (agents_dir))
|
||||
LOG.error(msg)
|
||||
files = []
|
||||
pkgs = self.conf.CONFIG_AGENTS.agents
|
||||
for pkg in pkgs:
|
||||
base_agent = __import__(pkg,
|
||||
globals(), locals(), ['agents'], -1)
|
||||
agents_dir = base_agent.__path__[0]
|
||||
syspath = sys.path
|
||||
sys.path = [agents_dir] + syspath
|
||||
try:
|
||||
files = os.listdir(agents_dir)
|
||||
except OSError:
|
||||
msg = ("Failed to read files from dir %s" % (agents_dir))
|
||||
LOG.error(msg)
|
||||
files = []
|
||||
|
||||
for fname in files:
|
||||
if fname.endswith(".py") and fname != '__init__.py':
|
||||
agent = __import__(pkg, globals(),
|
||||
locals(), [fname[:-3]], -1)
|
||||
imported_service_agents += [
|
||||
eval('agent.%s' % (fname[:-3]))]
|
||||
# modules += [__import__(fname[:-3])]
|
||||
sys.path = syspath
|
||||
for fname in files:
|
||||
if fname.endswith(".py") and fname != '__init__.py':
|
||||
agent = __import__(pkg, globals(),
|
||||
locals(), [fname[:-3]], -1)
|
||||
imported_service_agents += [
|
||||
eval('agent.%s' % (fname[:-3]))]
|
||||
# modules += [__import__(fname[:-3])]
|
||||
sys.path = syspath
|
||||
return imported_service_agents
|
||||
|
||||
@@ -32,3 +32,12 @@ nfp_configurator_config_drivers_opts = [
|
||||
|
||||
oslo_config.CONF.register_opts(nfp_configurator_config_drivers_opts,
|
||||
"CONFIG_DRIVERS")
|
||||
|
||||
nfp_configurator_config_agents_opts = [
|
||||
oslo_config.ListOpt(
|
||||
'agents',
|
||||
default=['gbpservice.contrib.nfp.configurator.agents'],
|
||||
help='Config agents directory')]
|
||||
|
||||
oslo_config.CONF.register_opts(nfp_configurator_config_agents_opts,
|
||||
"CONFIG_AGENTS")
|
||||
|
||||
@@ -14,7 +14,6 @@ from oslo_log import helpers as log_helpers
|
||||
|
||||
from gbpservice.contrib.nfp.configurator.lib import constants as const
|
||||
from gbpservice.contrib.nfp.configurator.lib import demuxer
|
||||
from gbpservice.contrib.nfp.configurator.lib import schema_validator
|
||||
from gbpservice.contrib.nfp.configurator.lib import utils
|
||||
from gbpservice.nfp.core import log as nfp_logging
|
||||
from gbpservice.nfp.core import rpc
|
||||
@@ -42,7 +41,6 @@ class ConfiguratorRpcManager(object):
|
||||
self.cm = cm
|
||||
self.conf = conf
|
||||
self.demuxer = demuxer
|
||||
self.sv = schema_validator.SchemaValidator()
|
||||
|
||||
def _get_service_agent_instance(self, service_type):
|
||||
"""Provides service agent instance based on service type.
|
||||
@@ -69,9 +67,6 @@ class ConfiguratorRpcManager(object):
|
||||
Returns: None
|
||||
|
||||
"""
|
||||
if not self.sv.decode(request_data, is_generic_config):
|
||||
msg = ("Decode failed for request_data=%s" % (request_data))
|
||||
raise Exception(msg)
|
||||
|
||||
# Retrieves service type from RPC data
|
||||
service_type = self.demuxer.get_service_type(request_data)
|
||||
@@ -427,7 +422,7 @@ def get_configurator_module_instance(sc, conf):
|
||||
conf_utils = utils.ConfiguratorUtils(conf)
|
||||
|
||||
# Loads all the service agents under AGENT_PKG module path
|
||||
cm.imported_sas = conf_utils.load_agents(const.AGENTS_PKG)
|
||||
cm.imported_sas = conf_utils.load_agents()
|
||||
msg = ("Configurator loaded service agents from %s location."
|
||||
% (cm.imported_sas))
|
||||
LOG.info(msg)
|
||||
|
||||
@@ -32,7 +32,7 @@ BASE_VPN_VERSION = '1.0'
|
||||
AGENT_TYPE_VPN = 'NFP Vpn agent'
|
||||
ACTIVE = 'ACTIVE'
|
||||
ERROR = 'ERROR'
|
||||
TIMEOUT = 20
|
||||
TIMEOUT = 80
|
||||
|
||||
|
||||
class VPNAgentHostingServiceNotFound(exceptions.NeutronException):
|
||||
|
||||
@@ -16,7 +16,7 @@ RUN pip install\
|
||||
python-keystoneclient\
|
||||
oslo.config==3.6.0\
|
||||
oslo.log==2.4.0\
|
||||
oslo.messaging==4.2.0\
|
||||
oslo.messaging==4.6.1\
|
||||
oslo.db==4.4.0\
|
||||
oslo.policy\
|
||||
pecan==1.0.4\
|
||||
|
||||
@@ -1,6 +1,4 @@
|
||||
#!/bin/sh
|
||||
|
||||
# nfp_pecan serves as REST server inside nfp_controller
|
||||
mkdir -p /var/run/nfp
|
||||
PIDFILE=/var/run/nfp/nfp_pecan.pid
|
||||
tmp_pidfile=$(tempfile -s .nfp_pecan.init)
|
||||
|
||||
@@ -62,18 +62,18 @@ class GenericConfigRpcManagerTestCase(base.BaseTestCase):
|
||||
|
||||
agent, sc = self._get_GenericConfigRpcManager_object()
|
||||
arg_dict = {'context': self.fo.context,
|
||||
'resource_data': self.fo.kwargs}
|
||||
'resource_data': self.fo._fake_resource_data()}
|
||||
with mock.patch.object(
|
||||
sc, 'new_event', return_value='foo') as mock_sc_event, (
|
||||
mock.patch.object(sc, 'post_event')) as mock_sc_rpc_event:
|
||||
call_method = getattr(agent, method.lower())
|
||||
|
||||
call_method(self.fo.context, self.fo.kwargs)
|
||||
call_method(self.fo.context, self.fo._fake_resource_data())
|
||||
|
||||
if 'HEALTHMONITOR' in method:
|
||||
mock_sc_event.assert_called_with(id=method,
|
||||
data=arg_dict,
|
||||
key=self.fo.kwargs['vmid'])
|
||||
key=self.fo.vmid)
|
||||
else:
|
||||
mock_sc_event.assert_called_with(id=method,
|
||||
data=arg_dict, key=None)
|
||||
@@ -229,10 +229,10 @@ class GenericConfigEventHandlerTestCase(base.BaseTestCase):
|
||||
mock_delete_src_routes.assert_called_with(
|
||||
self.fo.context, resource_data)
|
||||
elif const.EVENT_CONFIGURE_HEALTHMONITOR in ev.id:
|
||||
if periodicity == const.EVENT_CONFIGURE_HEALTHMONITOR_MAXRETRY:
|
||||
if periodicity == const.INITIAL:
|
||||
mock_hm_poll_event.assert_called_with(
|
||||
ev, max_times=(
|
||||
const.EVENT_CONFIGURE_HEALTHMONITOR_MAXRETRY))
|
||||
ev,
|
||||
max_times=const.EVENT_CONFIGURE_HEALTHMONITOR_MAXRETRY)
|
||||
elif periodicity == const.FOREVER:
|
||||
mock_hm_poll_event.assert_called_with(ev)
|
||||
elif ev.id == const.EVENT_CLEAR_HEALTHMONITOR:
|
||||
@@ -250,7 +250,7 @@ class GenericConfigEventHandlerTestCase(base.BaseTestCase):
|
||||
|
||||
"""
|
||||
|
||||
agent, sc = self._get_GenericConfigEventHandler_object()
|
||||
agent, _ = self._get_GenericConfigEventHandler_object()
|
||||
driver = mock.Mock()
|
||||
|
||||
with mock.patch.object(
|
||||
@@ -334,7 +334,8 @@ class GenericConfigEventHandlerTestCase(base.BaseTestCase):
|
||||
"""
|
||||
|
||||
ev = fo.FakeEventGenericConfig()
|
||||
ev.data['resource_data'].update({'periodicity': const.FOREVER})
|
||||
ev.data['resource_data']['nfds'][0].update(
|
||||
{'periodicity': const.FOREVER})
|
||||
ev.id = 'CONFIGURE_HEALTHMONITOR forever'
|
||||
self._test_handle_event(ev)
|
||||
|
||||
|
||||
@@ -124,7 +124,8 @@ class FwGenericConfigDriverTestCase(base.BaseTestCase):
|
||||
self.driver.configure_routes(self.fo.context, self.kwargs)
|
||||
|
||||
data = list()
|
||||
data.append(self.fo.data_for_add_src_route)
|
||||
data.append(self.fo.data_for_add_src_route[0])
|
||||
data.append(self.fo.data_for_add_src_route[1])
|
||||
data = jsonutils.dumps(data)
|
||||
mock_post.assert_called_with(
|
||||
self.fo.get_url_for_api('add_src_route'),
|
||||
@@ -146,7 +147,8 @@ class FwGenericConfigDriverTestCase(base.BaseTestCase):
|
||||
self.fo.context, self.kwargs)
|
||||
|
||||
data = list()
|
||||
data.append(self.fo.data_for_del_src_route)
|
||||
data.append(self.fo.data_for_del_src_route[0])
|
||||
data.append(self.fo.data_for_del_src_route[1])
|
||||
data = jsonutils.dumps(data)
|
||||
mock_delete.assert_called_with(
|
||||
self.fo.get_url_for_api('del_src_route'),
|
||||
@@ -186,7 +188,7 @@ class FwaasDriverTestCase(base.BaseTestCase):
|
||||
mock.patch.object(
|
||||
self.resp, 'json', return_value=self.fake_resp_dict)):
|
||||
mock_post.configure_mock(status_code=200)
|
||||
self.driver.create_firewall(self.fo.context,
|
||||
self.driver.create_firewall(self.fo.firewall_api_context(),
|
||||
self.fo.firewall, self.fo.host)
|
||||
mock_post.assert_called_with(self.fo.get_url_for_api('config_fw'),
|
||||
data=self.firewall,
|
||||
@@ -204,7 +206,7 @@ class FwaasDriverTestCase(base.BaseTestCase):
|
||||
requests, 'put', return_value=self.resp) as mock_put, (
|
||||
mock.patch.object(
|
||||
self.resp, 'json', return_value=self.fake_resp_dict)):
|
||||
self.driver.update_firewall(self.fo.context,
|
||||
self.driver.update_firewall(self.fo.firewall_api_context(),
|
||||
self.fo.firewall, self.fo.host)
|
||||
mock_put.assert_called_with(self.fo.get_url_for_api('update_fw'),
|
||||
data=self.firewall,
|
||||
@@ -222,7 +224,7 @@ class FwaasDriverTestCase(base.BaseTestCase):
|
||||
requests, 'delete', return_value=self.resp) as mock_delete, (
|
||||
mock.patch.object(
|
||||
self.resp, 'json', return_value=self.fake_resp_dict)):
|
||||
self.driver.delete_firewall(self.fo.context,
|
||||
self.driver.delete_firewall(self.fo.firewall_api_context(),
|
||||
self.fo.firewall, self.fo.host)
|
||||
mock_delete.assert_called_with(
|
||||
self.fo.get_url_for_api('delete_fw'),
|
||||
|
||||
@@ -98,7 +98,7 @@ class HaproxyOnVmDriverTestCase(base.BaseTestCase):
|
||||
|
||||
mock_request.status_code = 200
|
||||
if method_name == 'DELETE_VIP':
|
||||
driver.delete_vip(self.fo.vip, self.fo.context)
|
||||
driver.delete_vip(self.fo.vip, self.fo.lb_api_context())
|
||||
mock_request.assert_called_with(
|
||||
'DELETE',
|
||||
data=None,
|
||||
@@ -106,7 +106,7 @@ class HaproxyOnVmDriverTestCase(base.BaseTestCase):
|
||||
timeout=self.data.timeout,
|
||||
url=self.data.delete_vip_url)
|
||||
elif method_name == 'CREATE_VIP':
|
||||
driver.create_vip(self.fo.vip, self.fo.context)
|
||||
driver.create_vip(self.fo.vip, self.fo.lb_api_context())
|
||||
data = jsonutils.dumps(self.data.create_vip_data)
|
||||
mock_request.assert_called_with(
|
||||
'POST',
|
||||
@@ -120,7 +120,7 @@ class HaproxyOnVmDriverTestCase(base.BaseTestCase):
|
||||
driver.update_vip(
|
||||
self.fo.old_vip,
|
||||
self.fo.vip,
|
||||
self.fo.context)
|
||||
self.fo.lb_api_context())
|
||||
data = jsonutils.dumps(self.data.update_vip_data)
|
||||
mock_request.assert_called_with(
|
||||
'PUT',
|
||||
@@ -129,14 +129,14 @@ class HaproxyOnVmDriverTestCase(base.BaseTestCase):
|
||||
timeout=self.data.timeout,
|
||||
url=self.data.update_vip_url)
|
||||
elif method_name == 'CREATE_POOL':
|
||||
driver.create_pool(self.fo.pool, self.fo.context)
|
||||
driver.create_pool(self.fo.pool, self.fo.lb_api_context())
|
||||
elif method_name == 'DELETE_POOL':
|
||||
driver.delete_pool(self.fo.pool, self.fo.context)
|
||||
driver.delete_pool(self.fo.pool, self.fo.lb_api_context())
|
||||
elif method_name == 'UPDATE_POOL':
|
||||
driver.update_pool(
|
||||
self.fo.old_pool,
|
||||
self.fo.pool,
|
||||
self.fo.context)
|
||||
self.fo.lb_api_context())
|
||||
data = jsonutils.dumps(self.data.update_pool_data)
|
||||
mock_request.assert_called_with(
|
||||
'PUT',
|
||||
@@ -145,7 +145,8 @@ class HaproxyOnVmDriverTestCase(base.BaseTestCase):
|
||||
timeout=self.data.timeout,
|
||||
url=self.data.update_pool_url)
|
||||
elif method_name == 'CREATE_MEMBER':
|
||||
driver.create_member(self.fo.member[0], self.fo.context)
|
||||
driver.create_member(self.fo.member[0],
|
||||
self.fo.lb_api_context())
|
||||
data = jsonutils.dumps(self.data.create_member_data)
|
||||
mock_request.assert_called_with(
|
||||
'PUT',
|
||||
@@ -154,7 +155,8 @@ class HaproxyOnVmDriverTestCase(base.BaseTestCase):
|
||||
timeout=self.data.timeout,
|
||||
url=self.data.create_member_url)
|
||||
elif method_name == 'DELETE_MEMBER':
|
||||
driver.delete_member(self.fo.member[0], self.fo.context)
|
||||
driver.delete_member(self.fo.member[0],
|
||||
self.fo.lb_api_context())
|
||||
data = jsonutils.dumps(self.data.delete_member_data)
|
||||
mock_request.assert_called_with(
|
||||
'PUT',
|
||||
@@ -166,7 +168,7 @@ class HaproxyOnVmDriverTestCase(base.BaseTestCase):
|
||||
driver.update_member(
|
||||
self.fo.old_member[0],
|
||||
self.fo.member[0],
|
||||
self.fo.context)
|
||||
self.fo.lb_api_context())
|
||||
data = jsonutils.dumps(self.data.update_member_data)
|
||||
mock_request.assert_called_with(
|
||||
'PUT',
|
||||
@@ -177,7 +179,7 @@ class HaproxyOnVmDriverTestCase(base.BaseTestCase):
|
||||
elif method_name == 'CREATE_POOL_HEALTH_MONITOR':
|
||||
driver.create_pool_health_monitor(
|
||||
self.fo.hm[0], self.fo._get_pool_object()[0]['id'],
|
||||
self.fo.context)
|
||||
self.fo.lb_api_context())
|
||||
data = jsonutils.dumps(self.data.create_hm_data)
|
||||
mock_request.assert_called_with(
|
||||
'PUT',
|
||||
@@ -188,7 +190,7 @@ class HaproxyOnVmDriverTestCase(base.BaseTestCase):
|
||||
elif method_name == 'DELETE_POOL_HEALTH_MONITOR':
|
||||
driver.delete_pool_health_monitor(
|
||||
self.fo.hm[0], self.fo._get_pool_object()[0]['id'],
|
||||
self.fo.context)
|
||||
self.fo.lb_api_context())
|
||||
data = jsonutils.dumps(self.data.delete_hm_data)
|
||||
mock_request.assert_called_with(
|
||||
'PUT',
|
||||
@@ -200,7 +202,7 @@ class HaproxyOnVmDriverTestCase(base.BaseTestCase):
|
||||
driver.update_pool_health_monitor(
|
||||
self.fo.old_hm[0],
|
||||
self.fo.hm[0], self.fo._get_pool_object()[0]['id'],
|
||||
self.fo.context)
|
||||
self.fo.lb_api_context())
|
||||
data = jsonutils.dumps(self.data.update_hm_data)
|
||||
mock_request.assert_called_with(
|
||||
'PUT',
|
||||
|
||||
@@ -277,8 +277,8 @@ class VPNSvcValidatorTestCase(base.BaseTestCase):
|
||||
"""
|
||||
|
||||
context = self.test_dict.make_service_context()
|
||||
svc = self.test_dict._create_vpnservice_obj()['resource']
|
||||
description = str(svc['description'])
|
||||
svc = self.test_dict._create_vpnservice_obj()
|
||||
description = str(svc['resource']['description'])
|
||||
description = description.split(';')
|
||||
description[1] = 'tunnel_local_cidr=12.0.6.0/24'
|
||||
description = ";".join(description)
|
||||
@@ -299,7 +299,7 @@ class VPNSvcValidatorTestCase(base.BaseTestCase):
|
||||
with mock.patch.object(self.plugin_rpc, "update_status") as mock_valid:
|
||||
self.valid_obj.validate(
|
||||
context,
|
||||
self.test_dict._create_vpnservice_obj()['resource'])
|
||||
self.test_dict._create_vpnservice_obj())
|
||||
mock_valid.assert_called_with(
|
||||
context,
|
||||
self.test_dict.vpn_vpnsvc_active)
|
||||
|
||||
@@ -1,178 +0,0 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import gbpservice.contrib.nfp.configurator.lib.schema as schema
|
||||
import gbpservice.contrib.nfp.configurator.lib.schema_validator as sv
|
||||
from neutron.tests import base
|
||||
|
||||
|
||||
class SchemaResources(object):
|
||||
"""SchemaResources is a helper class which contains all the dummy resources
|
||||
needed for TestSchemaValidator class
|
||||
"""
|
||||
resource_healthmonitor = 'healthmonitor'
|
||||
resource_interfaces = 'interfaces'
|
||||
resource_routes = 'routes'
|
||||
|
||||
request_data = {'info': {'context': "",
|
||||
'service_type': "",
|
||||
'service_vendor': ""},
|
||||
'config': [{'resource': resource_healthmonitor,
|
||||
'resource_data': {}
|
||||
}]
|
||||
}
|
||||
|
||||
request_data_info = {'context': "",
|
||||
'service_type': "",
|
||||
'service_vendor': ""}
|
||||
|
||||
request_data_config = {'resource': resource_healthmonitor,
|
||||
'resource_data': {}
|
||||
}
|
||||
|
||||
interfaces = {'mgmt_ip': '127.0.0.1',
|
||||
'provider_ip': '11.0.0.4',
|
||||
'provider_cidr': '11.0.0.0/24',
|
||||
'provider_interface_index': '2',
|
||||
'stitching_ip': '33.0.0.4',
|
||||
'stitching_cidr': '33.0.0.0/24',
|
||||
'stitching_interface_index': '3',
|
||||
'provider_mac': 'e1:6d:af:23:b8:91',
|
||||
'stitching_mac': 'e1:6d:af:23:b8:11',
|
||||
}
|
||||
|
||||
routes = {'mgmt_ip': '127.0.0.1',
|
||||
'source_cidrs': '11.0.0.0/24',
|
||||
'destination_cidr': '22.0.0.0/24',
|
||||
'gateway_ip': '11.0.0.1',
|
||||
'provider_mac': 'e1:6d:af:23:b8:91',
|
||||
'provider_interface_index': '2',
|
||||
}
|
||||
|
||||
healthmonitor = {'vmid': '6350c0fd-07f8-46ff-b797-62acd2371234',
|
||||
'mgmt_ip': '127.0.0.1',
|
||||
'periodicity': 'initial'
|
||||
}
|
||||
|
||||
|
||||
class TestSchemaValidator(base.BaseTestCase):
|
||||
"""TestSchemaValidator is a test class to test schema_validator.py using
|
||||
unittest framework
|
||||
"""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(TestSchemaValidator, self).__init__(*args, **kwargs)
|
||||
self.sv = sv.SchemaValidator()
|
||||
self.sr = SchemaResources()
|
||||
|
||||
def make_request_data(self, resource, kwargs):
|
||||
"""Prepares request data
|
||||
|
||||
:param resource - resource_name
|
||||
:param kwargs - kwargs
|
||||
|
||||
Returns: request_data
|
||||
|
||||
"""
|
||||
|
||||
request_data = {'info': {'context': "",
|
||||
'service_type': "",
|
||||
'service_vendor': ""},
|
||||
'config': [{'resource': resource,
|
||||
'resource_data': kwargs
|
||||
}]
|
||||
}
|
||||
return request_data
|
||||
|
||||
def test_validate_schema_for_request_data(self):
|
||||
"""Test case to test validate_schema() of schema_validator.py for
|
||||
'request_data' schema
|
||||
"""
|
||||
result = self.sv.validate_schema(self.sr.request_data,
|
||||
schema.request_data)
|
||||
self.assertTrue(result)
|
||||
|
||||
def test_validate_schema_for_request_data_info(self):
|
||||
"""Test case to test validate_schema() of schema_validator.py for
|
||||
'request_data_info' schema
|
||||
"""
|
||||
result = self.sv.validate_schema(self.sr.request_data_info,
|
||||
schema.request_data_info)
|
||||
self.assertTrue(result)
|
||||
|
||||
def test_validate_schema_for_request_data_config(self):
|
||||
"""Test case to test validate_schema() of schema_validator.py for
|
||||
'request_data_config' schema
|
||||
"""
|
||||
result = self.sv.validate_schema(self.sr.request_data_config,
|
||||
schema.request_data_config)
|
||||
self.assertTrue(result)
|
||||
|
||||
def test_validate_schema_for_interfaces(self):
|
||||
"""Test case to test validate_schema() of schema_validator.py for
|
||||
'interfaces' schema
|
||||
"""
|
||||
result = self.sv.validate_schema(self.sr.interfaces,
|
||||
schema.interfaces)
|
||||
self.assertTrue(result)
|
||||
|
||||
def test_validate_schema_for_routes(self):
|
||||
"""Test case to test validate_schema() of schema_validator.py for
|
||||
'routes' schema
|
||||
"""
|
||||
result = self.sv.validate_schema(self.sr.routes,
|
||||
schema.routes)
|
||||
self.assertTrue(result)
|
||||
|
||||
def test_validate_schema_for_healthmonitor(self):
|
||||
"""Test case to test validate_schema() of schema_validator.py for
|
||||
'healthmonitor' schema
|
||||
"""
|
||||
result = self.sv.validate_schema(self.sr.healthmonitor,
|
||||
schema.healthmonitor)
|
||||
self.assertTrue(result)
|
||||
|
||||
def test_decode_for_interfaces(self):
|
||||
"""Test case to test decode() of schema_validator.py for request_data
|
||||
with resource 'interfaces'
|
||||
"""
|
||||
request_data = self.make_request_data(self.sr.resource_interfaces,
|
||||
self.sr.interfaces)
|
||||
result = self.sv.decode(request_data, True)
|
||||
self.assertTrue(result)
|
||||
|
||||
def test_decode_for_routes(self):
|
||||
"""Test case to test decode() of schema_validator.py for request_data
|
||||
with resource 'routes'
|
||||
"""
|
||||
request_data = self.make_request_data(self.sr.resource_routes,
|
||||
self.sr.routes)
|
||||
result = self.sv.decode(request_data, True)
|
||||
self.assertTrue(result)
|
||||
|
||||
def test_decode_for_healthmonitor(self):
|
||||
"""Test case to test decode() of schema_validator.py for request_data
|
||||
with resource 'healthmonitor'
|
||||
"""
|
||||
request_data = self.make_request_data(self.sr.resource_healthmonitor,
|
||||
self.sr.healthmonitor)
|
||||
result = self.sv.decode(request_data, True)
|
||||
self.assertTrue(result)
|
||||
|
||||
def test_decode_for_neutron_apis(self):
|
||||
"""Test case to test decode() of schema_validator.py for *aaS apis
|
||||
"""
|
||||
request_data = self.make_request_data('firewall',
|
||||
{})
|
||||
request_data['info']['service_type'] = 'firewall'
|
||||
result = self.sv.decode(request_data, False)
|
||||
self.assertTrue(result)
|
||||
@@ -60,7 +60,7 @@ class ConfiguratorRpcManagerTestCase(base.BaseTestCase):
|
||||
|
||||
"""
|
||||
|
||||
sc, rpc_mgr = self._get_ConfiguratorRpcManager_object()
|
||||
_, rpc_mgr = self._get_ConfiguratorRpcManager_object()
|
||||
agent = mock.Mock()
|
||||
|
||||
request_data = {'batch': {
|
||||
@@ -81,10 +81,10 @@ class ConfiguratorRpcManagerTestCase(base.BaseTestCase):
|
||||
else self.fo.fake_request_data_generic_single()))}
|
||||
}
|
||||
if batch:
|
||||
request_data_actual, request_data_expected = (
|
||||
request_data_actual, _ = (
|
||||
request_data['batch'].values())
|
||||
else:
|
||||
request_data_actual, request_data_expected = (
|
||||
request_data_actual, _ = (
|
||||
request_data['single'].values())
|
||||
|
||||
with mock.patch.object(rpc_mgr,
|
||||
@@ -99,19 +99,6 @@ class ConfiguratorRpcManagerTestCase(base.BaseTestCase):
|
||||
rpc_mgr.delete_network_function_device_config(
|
||||
self.fo.context, request_data_actual)
|
||||
|
||||
context = request_data_expected['info']['context']
|
||||
|
||||
agent_info = {}
|
||||
agent_info.update(
|
||||
{'resource': request_data_expected['config'][0][
|
||||
'resource'],
|
||||
'resource_type': request_data_expected['info'][
|
||||
'service_type'],
|
||||
'service_vendor': request_data_expected['info'][
|
||||
'service_vendor'],
|
||||
'context': context,
|
||||
'notification_data': {}
|
||||
})
|
||||
notification_data = dict()
|
||||
sa_req_list = self.fo.fake_sa_req_list()
|
||||
|
||||
@@ -141,7 +128,7 @@ class ConfiguratorRpcManagerTestCase(base.BaseTestCase):
|
||||
|
||||
"""
|
||||
|
||||
sc, rpc_mgr = self._get_ConfiguratorRpcManager_object()
|
||||
_, rpc_mgr = self._get_ConfiguratorRpcManager_object()
|
||||
agent = mock.Mock()
|
||||
method = {'CREATE': 'create_network_function_config',
|
||||
'UPDATE': 'update_network_function_config',
|
||||
@@ -153,8 +140,7 @@ class ConfiguratorRpcManagerTestCase(base.BaseTestCase):
|
||||
mock.patch.object(agent, 'process_request')) as mock_request:
|
||||
|
||||
getattr(rpc_mgr, method[operation.split('_')[0]])(
|
||||
self.fo.fw_context,
|
||||
request_data)
|
||||
self.fo.firewall_api_context(), request_data)
|
||||
|
||||
notification_data = dict()
|
||||
data = self.fo.fake_sa_req_list_fw()
|
||||
|
||||
@@ -20,31 +20,24 @@ class FakeObjects(object):
|
||||
empty_dict = {}
|
||||
context = 'APIcontext'
|
||||
neutron_context = {'neutron context for *aaS': {}}
|
||||
fw_context = {
|
||||
'agent_info': {
|
||||
'resource': 'firewall',
|
||||
'service_vendor': 'vyos',
|
||||
'context': {'requester': 'device_orch',
|
||||
'logging_context': {}},
|
||||
'resource_type': 'firewall'},
|
||||
'notification_data': {}, 'service_info': {},
|
||||
'resource': 'firewall'}
|
||||
firewall = 'firewall'
|
||||
host = 'host'
|
||||
conf = 'conf'
|
||||
kwargs = {'vmid': 'vmid'}
|
||||
vmid = 'b238e3f12fb64ebcbda2b3330700bf00'
|
||||
rpcmgr = 'rpcmgr'
|
||||
drivers = 'drivers'
|
||||
provider_interface_position = 'provider_interface_position'
|
||||
data_for_interface = dict(provider_mac="00:0a:95:9d:68:16",
|
||||
stitching_mac="00:0a:95:9d:68:16")
|
||||
data_for_add_src_route = {'source_cidr': "1.2.3.4/24",
|
||||
'gateway_ip': "1.2.3.4"}
|
||||
data_for_del_src_route = {'source_cidr': '1.2.3.4/24'}
|
||||
data_for_interface = dict(provider_mac="fa:16:3e:d9:4c:33",
|
||||
stitching_mac="fa:16:3e:da:ca:4d")
|
||||
data_for_add_src_route = [{'source_cidr': "11.0.1.0/24",
|
||||
'gateway_ip': "192.168.0.1"},
|
||||
{'source_cidr': "192.168.0.0/28",
|
||||
'gateway_ip': "192.168.0.1"}]
|
||||
data_for_del_src_route = [{'source_cidr': '11.0.1.0/24'},
|
||||
{'source_cidr': '192.168.0.0/28'}]
|
||||
timeout = 180
|
||||
|
||||
def get_url_for_api(self, api):
|
||||
url = 'http://172.24.4.5:8888/'
|
||||
url = 'http://11.0.0.37:8888/'
|
||||
api_url_map = {
|
||||
'log_forward': 'configure-rsyslog-as-client',
|
||||
'add_static_ip': 'add_static_ip',
|
||||
@@ -65,12 +58,46 @@ class FakeObjects(object):
|
||||
return dict(
|
||||
provider_ip="11.0.1.1",
|
||||
provider_cidr="11.0.1.0/24",
|
||||
provider_mac="00:0a:95:9d:68:16",
|
||||
provider_mac="fa:16:3e:d9:4c:33",
|
||||
stitching_ip="192.168.0.3",
|
||||
stitching_cidr="192.168.0.0/28",
|
||||
stitching_mac="00:0a:95:9d:68:16",
|
||||
provider_interface_position="2",
|
||||
stitching_interface_position="3")
|
||||
stitching_mac="fa:16:3e:da:ca:4d",
|
||||
monitoring_cidr=None,
|
||||
monitoring_ip=None,
|
||||
monitoring_mac=None)
|
||||
|
||||
def firewall_api_context(self):
|
||||
fw_context = {
|
||||
'agent_info': {
|
||||
'resource': 'firewall',
|
||||
'service_vendor': 'vyos',
|
||||
'context': {'requester': 'device_orch',
|
||||
'logging_context': {}},
|
||||
'resource_type': 'firewall'},
|
||||
'notification_data': {}, 'service_info': {},
|
||||
"resource_data": {
|
||||
"forward_route": True,
|
||||
"tenant_id": "ac33b4c2d80f485a86ea515c09c74949",
|
||||
"nfs": [{
|
||||
"role": "master",
|
||||
"svc_mgmt_fixed_ip": "11.0.0.37",
|
||||
"networks": [
|
||||
{"cidr": "11.0.1.0/24",
|
||||
"gw_ip": "",
|
||||
"type": "provider",
|
||||
"ports": [{
|
||||
"mac": "fa:16:3e:d9:4c:33",
|
||||
"fixed_ip": "11.0.1.1",
|
||||
"floating_ip": ""}]},
|
||||
{"cidr": "192.168.0.0/28",
|
||||
"gw_ip": "192.168.0.1 ",
|
||||
"type": "stitching",
|
||||
"ports": [{
|
||||
"mac": "fa:16:3e:da:ca:4d",
|
||||
"fixed_ip": "192.168.0.3",
|
||||
"floating_ip": ""}]}
|
||||
]}]}}
|
||||
return fw_context
|
||||
|
||||
def fake_request_data_generic_bulk(self):
|
||||
""" A sample bulk request data for generic APIs
|
||||
@@ -92,27 +119,51 @@ class FakeObjects(object):
|
||||
"config": [{
|
||||
"resource": "interfaces",
|
||||
"resource_data": {
|
||||
"stitching_interface_index": 3,
|
||||
"stitching_mac": "fa:16:3e:da:ca:4d",
|
||||
"provider_ip": "11.0.1.1",
|
||||
"mgmt_ip": "11.0.0.37",
|
||||
"provider_interface_index": 2,
|
||||
"stitching_cidr": "192.168.0.0/28",
|
||||
"provider_mac": "fa:16:3e:d9:4c:33",
|
||||
"provider_cidr": "11.0.1.0/24",
|
||||
"stitching_ip": "192.168.0.3",
|
||||
}
|
||||
}, {
|
||||
"forward_route": True,
|
||||
"tenant_id": "ac33b4c2d80f485a86ea515c09c74949",
|
||||
"nfds": [{
|
||||
"role": "master",
|
||||
"svc_mgmt_fixed_ip": "11.0.0.37",
|
||||
"networks": [
|
||||
{"cidr": "11.0.1.0/24",
|
||||
"gw_ip": "",
|
||||
"type": "provider",
|
||||
"ports": [{
|
||||
"mac": "fa:16:3e:d9:4c:33",
|
||||
"fixed_ip": "11.0.1.1",
|
||||
"floating_ip": ""}]},
|
||||
{"cidr": "192.168.0.0/28",
|
||||
"gw_ip": "192.168.0.1 ",
|
||||
"type": "stitching",
|
||||
"ports": [{
|
||||
"mac": "fa:16:3e:da:ca:4d",
|
||||
"fixed_ip": "192.168.0.3",
|
||||
"floating_ip": ""}]}
|
||||
]}]}},
|
||||
{
|
||||
"resource": "routes",
|
||||
"resource_data": {
|
||||
"provider_interface_index": 2,
|
||||
"provider_mac": "fa:16:3e:d9:4c:33",
|
||||
"gateway_ip": "192.168.0.1",
|
||||
"destination_cidr": "192.168.0.0/28",
|
||||
"mgmt_ip": "11.0.0.37",
|
||||
"source_cidrs": ["11.0.1.0/24", "192.168.0.0/28"]
|
||||
}
|
||||
}]
|
||||
"forward_route": True,
|
||||
"tenant_id": "ac33b4c2d80f485a86ea515c09c74949",
|
||||
"nfds": [{
|
||||
"role": "master",
|
||||
"svc_mgmt_fixed_ip": "11.0.0.37",
|
||||
"networks": [
|
||||
{"cidr": "11.0.1.0/24",
|
||||
"gw_ip": "",
|
||||
"type": "provider",
|
||||
"ports": [{
|
||||
"mac": "fa:16:3e:d9:4c:33",
|
||||
"fixed_ip": "11.0.1.1",
|
||||
"floating_ip": ""}]},
|
||||
{"cidr": "192.168.0.0/28",
|
||||
"gw_ip": "192.168.0.1 ",
|
||||
"type": "stitching",
|
||||
"ports": [{
|
||||
"mac": "fa:16:3e:da:ca:4d",
|
||||
"fixed_ip": "192.168.0.3",
|
||||
"floating_ip": ""}]}
|
||||
]}]}}]
|
||||
}
|
||||
return request_data
|
||||
|
||||
@@ -170,6 +221,7 @@ class FakeObjects(object):
|
||||
request_data = [{
|
||||
"agent_info": {
|
||||
"service_vendor": "vyos",
|
||||
"service_feature": "",
|
||||
"resource": "firewall",
|
||||
"context": {
|
||||
"requester": "device_orch",
|
||||
@@ -216,55 +268,86 @@ class FakeObjects(object):
|
||||
|
||||
"""
|
||||
|
||||
sa_req_list = [
|
||||
{
|
||||
"agent_info": {
|
||||
"service_vendor": "vyos",
|
||||
"resource": "interfaces",
|
||||
"context": {
|
||||
"requester": "device_orch",
|
||||
"logging_context": {}
|
||||
},
|
||||
"resource_type": "firewall"
|
||||
},
|
||||
"method": "configure_interfaces",
|
||||
"resource_data": {
|
||||
"stitching_interface_index": 3,
|
||||
"stitching_mac": "fa:16:3e:da:ca:4d",
|
||||
"provider_ip": "11.0.1.1",
|
||||
"mgmt_ip": "11.0.0.37",
|
||||
"provider_interface_index": 2,
|
||||
"stitching_cidr": "192.168.0.0/28",
|
||||
"provider_mac": "fa:16:3e:d9:4c:33",
|
||||
"provider_cidr": "11.0.1.0/24",
|
||||
"stitching_ip": "192.168.0.3"
|
||||
},
|
||||
"is_generic_config": True
|
||||
},
|
||||
{
|
||||
"agent_info": {
|
||||
"service_vendor": "vyos",
|
||||
"resource": "routes",
|
||||
"context": {
|
||||
"requester": "device_orch",
|
||||
"logging_context": {}
|
||||
},
|
||||
"resource_type": "firewall"
|
||||
},
|
||||
"method": "configure_routes",
|
||||
"resource_data": {
|
||||
"mgmt_ip": "11.0.0.37",
|
||||
"gateway_ip": "192.168.0.1",
|
||||
"provider_mac": "fa:16:3e:d9:4c:33",
|
||||
"destination_cidr": "192.168.0.0/28",
|
||||
"provider_interface_index": 2,
|
||||
"source_cidrs": [
|
||||
"11.0.1.0/24",
|
||||
"192.168.0.0/28"
|
||||
]
|
||||
},
|
||||
"is_generic_config": True
|
||||
sa_req_list = [{
|
||||
'agent_info': {
|
||||
'service_vendor': 'vyos',
|
||||
'service_feature': '',
|
||||
'resource': 'interfaces',
|
||||
'resource_type': 'firewall',
|
||||
'context': {
|
||||
'logging_context': {},
|
||||
'requester': 'device_orch'
|
||||
}
|
||||
},
|
||||
'method': 'configure_interfaces',
|
||||
'resource_data': {
|
||||
'forward_route': True,
|
||||
'tenant_id': 'ac33b4c2d80f485a86ea515c09c74949',
|
||||
'nfds': [{
|
||||
'role': 'master',
|
||||
'networks': [{
|
||||
'cidr': '11.0.1.0/24',
|
||||
'gw_ip': '',
|
||||
'type': 'provider',
|
||||
'ports': [{
|
||||
'fixed_ip': '11.0.1.1',
|
||||
'mac': 'fa:16:3e:d9:4c:33',
|
||||
'floating_ip': ''
|
||||
}]
|
||||
}, {
|
||||
'cidr': '192.168.0.0/28',
|
||||
'gw_ip': '192.168.0.1 ',
|
||||
'type': 'stitching',
|
||||
'ports': [{
|
||||
'mac': 'fa:16:3e:da:ca:4d',
|
||||
'floating_ip': '',
|
||||
'fixed_ip': '192.168.0.3'
|
||||
}]
|
||||
}],
|
||||
'svc_mgmt_fixed_ip': '11.0.0.37'
|
||||
}]
|
||||
},
|
||||
'is_generic_config': True
|
||||
}, {
|
||||
'agent_info': {
|
||||
'service_vendor': 'vyos',
|
||||
'service_feature': '',
|
||||
'resource': 'routes',
|
||||
'resource_type': 'firewall',
|
||||
'context': {
|
||||
'logging_context': {},
|
||||
'requester': 'device_orch'
|
||||
}
|
||||
},
|
||||
'method': 'configure_routes',
|
||||
'resource_data': {
|
||||
'forward_route': True,
|
||||
'tenant_id': 'ac33b4c2d80f485a86ea515c09c74949',
|
||||
'nfds': [{
|
||||
'role': 'master',
|
||||
'networks': [{
|
||||
'cidr': '11.0.1.0/24',
|
||||
'gw_ip': '',
|
||||
'type': 'provider',
|
||||
'ports': [{
|
||||
'fixed_ip': '11.0.1.1',
|
||||
'mac': 'fa:16:3e:d9:4c:33',
|
||||
'floating_ip': ''
|
||||
}]
|
||||
}, {
|
||||
'cidr': '192.168.0.0/28',
|
||||
'gw_ip': '192.168.0.1 ',
|
||||
'type': 'stitching',
|
||||
'ports': [{
|
||||
'mac': 'fa:16:3e:da:ca:4d',
|
||||
'floating_ip': '',
|
||||
'fixed_ip': '192.168.0.3'
|
||||
}]
|
||||
}],
|
||||
'svc_mgmt_fixed_ip': '11.0.0.37'
|
||||
}]
|
||||
},
|
||||
'is_generic_config': True}]
|
||||
|
||||
return sa_req_list
|
||||
|
||||
@@ -276,21 +359,35 @@ class FakeObjects(object):
|
||||
"""
|
||||
|
||||
resource_data = {
|
||||
'fake_resource_data': 'data',
|
||||
'forward_route': True,
|
||||
'tenant_id': 'ac33b4c2d80f485a86ea515c09c74949',
|
||||
'fail_count': 0,
|
||||
'nfds': [{
|
||||
'role': 'master',
|
||||
'networks': [{
|
||||
'cidr': '11.0.1.0/24',
|
||||
'gw_ip': '',
|
||||
'type': 'provider',
|
||||
'ports': [{
|
||||
'fixed_ip': '11.0.1.1',
|
||||
'mac': 'fa:16:3e:d9:4c:33',
|
||||
'floating_ip': ''
|
||||
}]
|
||||
}, {
|
||||
'cidr': '192.168.0.0/28',
|
||||
'gw_ip': '192.168.0.1',
|
||||
'type': 'stitching',
|
||||
'ports': [{
|
||||
'mac': 'fa:16:3e:da:ca:4d',
|
||||
'floating_ip': '',
|
||||
'fixed_ip': '192.168.0.3'
|
||||
}]
|
||||
}],
|
||||
'svc_mgmt_fixed_ip': '11.0.0.37',
|
||||
'periodicity': 'initial',
|
||||
'provider_ip': '11.0.1.1',
|
||||
'provider_cidr': '11.0.1.0/24',
|
||||
'provider_mac': '00:0a:95:9d:68:16',
|
||||
'stitching_ip': '192.168.0.3',
|
||||
'stitching_cidr': '192.168.0.0/28',
|
||||
'destination_cidr': '192.168.0.0/28',
|
||||
'stitching_mac': '00:0a:95:9d:68:16',
|
||||
'provider_interface_index': '2',
|
||||
'stitching_interface_index': '3',
|
||||
'mgmt_ip': '172.24.4.5',
|
||||
'source_cidrs': ['1.2.3.4/24'],
|
||||
'gateway_ip': '1.2.3.4'
|
||||
}
|
||||
'vmid': 'b238e3f12fb64ebcbda2b3330700bf00'
|
||||
}]}
|
||||
|
||||
return resource_data
|
||||
|
||||
def _fake_firewall_obj(self):
|
||||
|
||||
@@ -65,6 +65,39 @@ class FakeObjects(object):
|
||||
'subnets': self._get_subnets_object()}}
|
||||
return context_logical_device
|
||||
|
||||
def lb_api_context(self):
|
||||
context = {
|
||||
'agent_info': {
|
||||
'resource': 'firewall',
|
||||
'service_vendor': 'vyos',
|
||||
'context': {'requester': 'device_orch',
|
||||
'logging_context': {}},
|
||||
'resource_type': 'firewall'},
|
||||
'notification_data': {}, 'service_info': {},
|
||||
"resource_data": {
|
||||
"forward_route": True,
|
||||
"tenant_id": "ac33b4c2d80f485a86ea515c09c74949",
|
||||
"nfs": [{
|
||||
"role": "master",
|
||||
"svc_mgmt_fixed_ip": "11.0.0.37",
|
||||
"networks": [
|
||||
{"cidr": "11.0.1.0/24",
|
||||
"gw_ip": "",
|
||||
"type": "provider",
|
||||
"ports": [{
|
||||
"mac": "fa:16:3e:d9:4c:33",
|
||||
"fixed_ip": "11.0.1.1",
|
||||
"floating_ip": ""}]},
|
||||
{"cidr": "192.168.0.0/28",
|
||||
"gw_ip": "192.168.0.1 ",
|
||||
"type": "stitching",
|
||||
"ports": [{
|
||||
"mac": "fa:16:3e:da:ca:4d",
|
||||
"fixed_ip": "192.168.0.3",
|
||||
"floating_ip": ""}]}
|
||||
]}]}}
|
||||
return context
|
||||
|
||||
def get_request_data_for_vip(self):
|
||||
"""Returns request data needed for create_vip method.
|
||||
|
||||
@@ -420,6 +453,7 @@ class FakeEvent(object):
|
||||
'context': {'notification_data': {},
|
||||
'resource': 'context_resource',
|
||||
'agent_info': {'service_vendor': '',
|
||||
'service_feature': '',
|
||||
'context': {},
|
||||
'resource': ''
|
||||
}
|
||||
@@ -462,7 +496,7 @@ class AssertionData(object):
|
||||
"mode": "tcp",
|
||||
"default_backend":
|
||||
"bck:6350c0fd-07f8-46ff-b797-62acd23760de",
|
||||
"provider_interface_mac": "aa:bb:cc:dd:ee:ff"
|
||||
"provider_interface_mac": "fa:16:3e:d9:4c:33"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -474,7 +508,7 @@ class AssertionData(object):
|
||||
"mode": "tcp",
|
||||
"default_backend":
|
||||
"bck:6350c0fd-07f8-46ff-b797-62acd23760de",
|
||||
"provider_interface_mac": "aa:bb:cc:dd:ee:ff"
|
||||
"provider_interface_mac": "fa:16:3e:d9:4c:33"
|
||||
}
|
||||
|
||||
update_vip_url = ('http://192.168.100.149:1234/frontend/frnt:'
|
||||
|
||||
@@ -70,8 +70,11 @@ class VPNTestData(object):
|
||||
self.data_for_interface = {"stitching_mac": "00:0a:95:9d:68:25",
|
||||
"provider_mac": "00:0a:95:9d:68:16"}
|
||||
self.data_for_add_src_route = [{"source_cidr": "1.2.3.4/24",
|
||||
"gateway_ip": "1.2.3.4/24"},
|
||||
{"source_cidr": "1.2.3.4/24",
|
||||
"gateway_ip": "1.2.3.4/24"}]
|
||||
self.data_for_del_src_route = [{"source_cidr": "1.2.3.4/24"}]
|
||||
self.data_for_del_src_route = [{"source_cidr": "1.2.3.4/24"},
|
||||
{"source_cidr": "1.2.3.4/24"}]
|
||||
self.conn_id = 'ac3a0e54-cdf2-4ea7-ac2f-7c0225ab9af6'
|
||||
self.data_ = {"local_cidr": "11.0.6.0/24",
|
||||
"peer_address": "1.103.2.2",
|
||||
@@ -358,8 +361,28 @@ class VPNTestData(object):
|
||||
'user_name': u'neutron',
|
||||
'agent_info': {'context': {},
|
||||
'resource': {}},
|
||||
|
||||
}
|
||||
"resource_data": {
|
||||
"forward_route": True,
|
||||
"tenant_id": "ac33b4c2d80f485a86ea515c09c74949",
|
||||
"nfs": [{
|
||||
"role": "master",
|
||||
"svc_mgmt_fixed_ip": "192.168.20.75",
|
||||
"networks": [
|
||||
{"cidr": "11.0.2.0/24",
|
||||
"gw_ip": "",
|
||||
"type": "provider",
|
||||
"ports": [{
|
||||
"mac": "fa:16:3e:d9:4c:33",
|
||||
"fixed_ip": "11.0.1.1",
|
||||
"floating_ip": ""}]},
|
||||
{"cidr": "192.168.0.0/28",
|
||||
"gw_ip": "192.168.0.1 ",
|
||||
"type": "stitching",
|
||||
"ports": [{
|
||||
"mac": "fa:16:3e:da:ca:4d",
|
||||
"fixed_ip": "192.168.0.3",
|
||||
"floating_ip": ""}]}
|
||||
]}]}}
|
||||
|
||||
def make_service_context(self, operation_type=None):
|
||||
'''
|
||||
@@ -535,7 +558,38 @@ class VPNTestData(object):
|
||||
A sample keyword arguments for configurator
|
||||
Returns: resource_data
|
||||
'''
|
||||
resource_data = {'service_type': 'vpn',
|
||||
|
||||
resource_data = {
|
||||
'forward_route': True,
|
||||
'tenant_id': 'ac33b4c2d80f485a86ea515c09c74949',
|
||||
'fail_count': 0,
|
||||
'nfds': [{
|
||||
'role': 'master',
|
||||
'networks': [{
|
||||
'cidr': '1.2.3.4/24',
|
||||
'gw_ip': '',
|
||||
'type': 'provider',
|
||||
'ports': [{
|
||||
'fixed_ip': '1.2.3.4/24',
|
||||
'mac': '00:0a:95:9d:68:16',
|
||||
'floating_ip': ''
|
||||
}]
|
||||
}, {
|
||||
'cidr': '1.2.3.4/24',
|
||||
'gw_ip': '1.2.3.4/24',
|
||||
'type': 'stitching',
|
||||
'ports': [{
|
||||
'mac': '00:0a:95:9d:68:25',
|
||||
'floating_ip': '',
|
||||
'fixed_ip': '1.2.3.4/24'
|
||||
}]
|
||||
}],
|
||||
'svc_mgmt_fixed_ip': '192.168.20.75',
|
||||
'periodicity': 'initial',
|
||||
'vmid': 'b238e3f12fb64ebcbda2b3330700bf00'
|
||||
}]}
|
||||
|
||||
'''resource_data = {'service_type': 'vpn',
|
||||
'vm_mgmt_ip': '192.168.20.75',
|
||||
'mgmt_ip': '192.168.20.75',
|
||||
'source_cidrs': ['1.2.3.4/24'],
|
||||
@@ -552,7 +606,7 @@ class VPNTestData(object):
|
||||
'provider_mac': '00:0a:95:9d:68:16',
|
||||
'stitching_mac': '00:0a:95:9d:68:25',
|
||||
'context': {'notification_data': 'hello'}
|
||||
}
|
||||
}'''
|
||||
return resource_data
|
||||
|
||||
|
||||
|
||||
@@ -1009,9 +1009,9 @@ class NFPNodeDriver(driver_base.NodeDriverBase):
|
||||
raise VipNspNotSetonProvider()
|
||||
|
||||
provider = {
|
||||
'pt': service_targets.get('provider_pt_objs', [None])[0],
|
||||
'ptg': service_targets.get('provider_ptg', [None])[0],
|
||||
'port': service_targets.get('provider_ports', [None])[0],
|
||||
'pt': service_targets.get('provider_pt_objs', []),
|
||||
'ptg': service_targets.get('provider_ptg', []),
|
||||
'port': service_targets.get('provider_ports', []),
|
||||
'subnet': service_targets.get('provider_subnet', None),
|
||||
'port_model': nfp_constants.GBP_PORT,
|
||||
'port_classification': nfp_constants.PROVIDER}
|
||||
@@ -1021,11 +1021,11 @@ class NFPNodeDriver(driver_base.NodeDriverBase):
|
||||
consumer_ports = None
|
||||
|
||||
if service_targets.get('consumer_pt_objs'):
|
||||
consumer_pt = service_targets.get('consumer_pt_objs')[0]
|
||||
consumer_pt = service_targets.get('consumer_pt_objs')
|
||||
if service_targets.get('consumer_ptg'):
|
||||
consumer_ptg = service_targets.get('consumer_ptg')[0]
|
||||
consumer_ptg = service_targets.get('consumer_ptg')
|
||||
if service_targets.get('consumer_ports'):
|
||||
consumer_ports = service_targets.get('consumer_ports')[0]
|
||||
consumer_ports = service_targets.get('consumer_ports')
|
||||
|
||||
consumer = {
|
||||
'pt': consumer_pt,
|
||||
|
||||
@@ -235,7 +235,8 @@ class Test_Process_Model(unittest.TestCase):
|
||||
controller._manager.manager_run()
|
||||
pids = controller._manager._resource_map.keys()
|
||||
self.assertTrue(len(pids) == 2)
|
||||
self.assertFalse(old_childs[0] in pids)
|
||||
if pid not in old_childs:
|
||||
self.assertFalse(old_childs[0] in pids)
|
||||
self.assertTrue(old_childs[1] in pids)
|
||||
|
||||
def test_post_event_with_no_handler(self):
|
||||
|
||||
@@ -16,6 +16,7 @@ import unittest
|
||||
|
||||
from oslo_config import cfg
|
||||
|
||||
from gbpservice.nfp.common import constants as nfp_constants
|
||||
from gbpservice.nfp.common import exceptions
|
||||
from gbpservice.nfp.orchestrator.drivers import (
|
||||
orchestration_driver
|
||||
@@ -225,7 +226,7 @@ class OrchestrationDriverTestCase(unittest.TestCase):
|
||||
{'id': '4',
|
||||
'port_model': 'neutron',
|
||||
'port_classification': 'consumer'}],
|
||||
'vendor_data': {},
|
||||
'provider_metadata': {},
|
||||
'token': str(pyuuid.uuid4()),
|
||||
'tenant_id': str(pyuuid.uuid4())}
|
||||
|
||||
@@ -261,7 +262,7 @@ class OrchestrationDriverTestCase(unittest.TestCase):
|
||||
'service_type': 'firewall',
|
||||
'service_vendor': 'vyos',
|
||||
'network_mode': 'gbp'},
|
||||
'vendor_data': {},
|
||||
'provider_metadata': {},
|
||||
'ports': [{'id': '3',
|
||||
'port_model': 'gbp',
|
||||
'port_classification': 'provider'},
|
||||
@@ -286,10 +287,11 @@ class OrchestrationDriverTestCase(unittest.TestCase):
|
||||
'mgmt_ip_address': 'a.b.c.d'}
|
||||
|
||||
self.assertIsInstance(
|
||||
driver.get_network_function_device_healthcheck_info(device_data),
|
||||
driver.get_network_function_device_config(device_data,
|
||||
nfp_constants.HEALTHMONITOR_RESOURCE),
|
||||
dict, msg='')
|
||||
|
||||
def test_get_network_function_device_config_info(self):
|
||||
def test_get_network_function_device_config(self):
|
||||
driver = orchestration_driver.OrchestrationDriver(
|
||||
cfg.CONF,
|
||||
supports_device_sharing=True,
|
||||
@@ -315,6 +317,7 @@ class OrchestrationDriverTestCase(unittest.TestCase):
|
||||
'port_model': 'gbp',
|
||||
'port_classification': 'provider'}]}
|
||||
|
||||
reply = driver.get_network_function_device_config_info(device_data)
|
||||
reply = driver.get_network_function_device_config(device_data,
|
||||
nfp_constants.GENERIC_CONFIG)
|
||||
self.assertIsInstance(reply, dict, msg='')
|
||||
self.assertTrue('config' in reply)
|
||||
|
||||
@@ -633,7 +633,9 @@ class DummyDictionaries(object):
|
||||
network_function_details = {
|
||||
'network_function': {
|
||||
'status': 'ACTIVE',
|
||||
'description': '',
|
||||
'description': '\nuser_access_ip=\'192.168.203.12\';'
|
||||
'fixed_ip=\'11.0.3.4\';'
|
||||
'tunnel_local_cidr=\'11.0.3.0/24\'',
|
||||
'config_policy_id': '57d6b523-ae89-41cd-9b63-9bfb054a20b6',
|
||||
'tenant_id': 'ee27b1d0d7f04ac390ee7ec4b2fd5b13',
|
||||
'network_function_instances': [
|
||||
|
||||
@@ -30,7 +30,10 @@ with mock.patch('oslo_config.cfg.CONF.register_opts') as opt:
|
||||
|
||||
|
||||
class DummyController(object):
|
||||
def event_complete(self, event):
|
||||
def event_complete(self, event, result=None, return_value=None):
|
||||
return
|
||||
|
||||
def new_event(self, id, data, key):
|
||||
return
|
||||
|
||||
|
||||
@@ -107,6 +110,10 @@ class HaproxyDummyDriver(object):
|
||||
def get_network_function_device_status(self):
|
||||
pass
|
||||
|
||||
def get_network_function_device_config(self, device, config,
|
||||
is_delete=False):
|
||||
pass
|
||||
|
||||
|
||||
class DummyExtensionManager(object):
|
||||
drivers = 'dummy-driver'
|
||||
@@ -313,16 +320,13 @@ class DeviceOrchestratorTestCase(unittest.TestCase):
|
||||
|
||||
ndo_handler.configurator_rpc.create_network_function_device_config = (
|
||||
mock.MagicMock(return_value=101))
|
||||
orchestration_driver.get_network_function_device_healthcheck_info = (
|
||||
orchestration_driver.get_network_function_device_config = (
|
||||
mock.MagicMock(return_value=param_req))
|
||||
|
||||
status = 'HEALTH_CHECK_PENDING'
|
||||
self.event.data['management'] = {'port': {'ip_address': '127.0.0.1'}}
|
||||
self.event.desc = Desc()
|
||||
orig_event_data = {}
|
||||
orig_event_data['id'] = self.event.data['id']
|
||||
orig_event_data['status'] = status
|
||||
orig_event_data['status_description'] = ndo_handler.status_map[status]
|
||||
orig_event_data['mgmt_ip_address'] = self.event.data[
|
||||
'management']['port']['ip_address']
|
||||
orig_event_data['service_details'] = self.event.data['service_details']
|
||||
@@ -334,11 +338,9 @@ class DeviceOrchestratorTestCase(unittest.TestCase):
|
||||
orig_event_data['nfp_context'] = {'event_desc': event_desc,
|
||||
'id': self.event.id,
|
||||
'key': self.event.key}
|
||||
|
||||
orig_event_data['tenant_id'] = self.event.data[
|
||||
'resource_owner_context']['admin_tenant_id']
|
||||
ndo_handler.perform_health_check(self.event)
|
||||
mock_update_nfd.assert_called_with(ndo_handler.db_session,
|
||||
orig_event_data['id'],
|
||||
orig_event_data)
|
||||
ndo_handler.configurator_rpc.create_network_function_device_config.\
|
||||
assert_called_with(orig_event_data, param_req)
|
||||
|
||||
@@ -384,11 +386,17 @@ class DeviceOrchestratorTestCase(unittest.TestCase):
|
||||
@mock.patch.object(nfpdb.NFPDbBase, 'update_network_function_device')
|
||||
def test_create_device_configuration(self, mock_update_nfd):
|
||||
ndo_handler = self._initialize_ndo_handler()
|
||||
config_params = {'param1': 'value1', 'parama2': 'value2'}
|
||||
orchestration_driver.get_create_network_function_device_config_info = (
|
||||
config_params = {
|
||||
'param1': 'value1',
|
||||
'parama2': 'value2',
|
||||
'config': [{'resource_data': {'forward_route': True}}]}
|
||||
orchestration_driver.get_network_function_device_config = (
|
||||
mock.MagicMock(return_value=config_params))
|
||||
ndo_handler.configurator_rpc.create_network_function_device_config = (
|
||||
mock.MagicMock(return_value=True))
|
||||
ndo_handler._create_event = mock.MagicMock(return_value=True)
|
||||
ndo_handler._controller.event_complete = mock.MagicMock(
|
||||
return_value=None)
|
||||
self.event.data['management'] = {'port': {'ip_address': '127.0.0.1'}}
|
||||
self.event.data['provider']['port'] = {
|
||||
'ip_address': '127.0.0.1', 'mac_address': 'xx:xx:xx:xx'}
|
||||
@@ -398,7 +406,10 @@ class DeviceOrchestratorTestCase(unittest.TestCase):
|
||||
'cidr': '11.0.0.0/24', 'gateway_ip': '11.0.0.1'}
|
||||
self.event.data['consumer']['subnet'] = {
|
||||
'cidr': '11.0.0.0/24', 'gateway_ip': '11.0.0.1'}
|
||||
|
||||
self.event.data['network_function_device'][
|
||||
'mgmt_ip_address'] = self.event.data['management']['port'][
|
||||
'ip_address']
|
||||
self.event.data['service_chain_specs'] = []
|
||||
self.event.desc = Desc()
|
||||
device = {}
|
||||
device['mgmt_ip_address'] = self.event.data[
|
||||
@@ -416,6 +427,8 @@ class DeviceOrchestratorTestCase(unittest.TestCase):
|
||||
'id': self.event.id, 'key': self.event.key,
|
||||
'network_function_device': self.event.data[
|
||||
'network_function_device']}
|
||||
device['tenant_id'] = self.event.data[
|
||||
'resource_owner_context']['admin_tenant_id']
|
||||
device.update({
|
||||
'provider_mac': self.event.data['provider']['port']['mac_address'],
|
||||
'network_function_instance_id': self.event.data[
|
||||
@@ -511,7 +524,7 @@ class DeviceOrchestratorTestCase(unittest.TestCase):
|
||||
ndo_handler = self._initialize_ndo_handler()
|
||||
config_params = {'param1': 'value1', 'parama2': 'value2'}
|
||||
self.event = DummyEvent(101, 'ACTIVE')
|
||||
orchestration_driver.get_network_function_device_config_info = (
|
||||
orchestration_driver.get_network_function_device_config = (
|
||||
mock.MagicMock(return_value=config_params))
|
||||
ndo_handler.configurator_rpc.delete_network_function_device_config = (
|
||||
mock.MagicMock(return_value=True))
|
||||
|
||||
@@ -68,9 +68,6 @@ class NFPNodeDriverTestCase(
|
||||
"ClientAddressPoolCidr": {
|
||||
"type": "string", "description": "Pool"
|
||||
},
|
||||
"ServiceDescription": {
|
||||
"type": "string", "description": "fip;tunnel_local-cidr"
|
||||
}
|
||||
},
|
||||
"resources": {
|
||||
"SSLVPNConnection": {
|
||||
@@ -97,9 +94,6 @@ class NFPNodeDriverTestCase(
|
||||
"get_param": "Subnet"
|
||||
},
|
||||
"admin_state_up": 'true',
|
||||
"description": {
|
||||
"get_param": "ServiceDescription"
|
||||
},
|
||||
"name": "VPNService"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,6 +12,7 @@ nfp_modules_path=gbpservice.nfp.orchestrator.modules
|
||||
backend=rpc
|
||||
# supported service vendors
|
||||
supported_vendors=vyos,nfp,haproxy,haproxy_lbaasv2
|
||||
monitoring_ptg_l3policy_id=l3policy-id
|
||||
|
||||
[PROXY_AGENT]
|
||||
# Number of worker process to be spawned.
|
||||
|
||||
@@ -12,7 +12,9 @@
|
||||
|
||||
FIREWALL = 'firewall'
|
||||
LOADBALANCER = 'loadbalancer'
|
||||
LOADBALANCERV2 = 'loadbalancerv2'
|
||||
VPN = 'vpn'
|
||||
GENERIC_CONFIG = 'generic_config'
|
||||
|
||||
GBP_MODE = "gbp"
|
||||
NEUTRON_MODE = "neutron"
|
||||
@@ -26,6 +28,7 @@ GBP_NETWORK = "gbp_group"
|
||||
|
||||
PROVIDER = "provider"
|
||||
CONSUMER = "consumer"
|
||||
STITCHING = "stitching"
|
||||
MANAGEMENT = "management"
|
||||
MONITOR = "monitoring"
|
||||
GATEWAY_TYPE = "gateway"
|
||||
@@ -108,10 +111,10 @@ DELETE_USER_CONFIG_IN_PROGRESS_SPACING = 10
|
||||
DELETE_USER_CONFIG_IN_PROGRESS_MAXRETRY = 20
|
||||
|
||||
CHECK_USER_CONFIG_COMPLETE_SPACING = 10
|
||||
CHECK_USER_CONFIG_COMPLETE_MAXRETRY = 40
|
||||
CHECK_USER_CONFIG_COMPLETE_MAXRETRY = 20
|
||||
|
||||
PULL_NOTIFICATIONS_SPACING = 10
|
||||
|
||||
#nfp_node_deriver_config
|
||||
SERVICE_CREATE_TIMEOUT = 600
|
||||
SERVICE_DELETE_TIMEOUT = 120
|
||||
SERVICE_DELETE_TIMEOUT = 300
|
||||
|
||||
153
gbpservice/nfp/common/data_formatter.py
Normal file
153
gbpservice/nfp/common/data_formatter.py
Normal file
@@ -0,0 +1,153 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import copy
|
||||
|
||||
from gbpservice.nfp.common import constants as const
|
||||
|
||||
''' The generic data format that is common for device and
|
||||
service configuration.
|
||||
|
||||
'''
|
||||
|
||||
NFP_DATA_FORMAT = {
|
||||
'config': [{
|
||||
'resource': '',
|
||||
'resource_data': {
|
||||
'tenant_id': '',
|
||||
'nfds': [{
|
||||
'role': 'master',
|
||||
'svc_mgmt_fixed_ip': '',
|
||||
'networks': [{
|
||||
'type': '',
|
||||
'cidr': '',
|
||||
'gw_ip': '',
|
||||
'ports': [{
|
||||
'fixed_ip': '',
|
||||
'floating_ip': '',
|
||||
'mac': ''}] # ports
|
||||
}] # networks
|
||||
}] # nfds
|
||||
} # resource_data
|
||||
}] # config
|
||||
} # NFP_DATA_FORMAT
|
||||
|
||||
|
||||
def _fill_service_specific_info(nfd, device_data, **kwargs):
|
||||
''' Service specific data formatting is done here.
|
||||
|
||||
:param nfd: A partly built nested dict from NFP_DATA_FORMAT
|
||||
:param device_data: Device data dictionary
|
||||
:param kwargs: service specific arguments
|
||||
|
||||
Returns: nfd dict
|
||||
|
||||
'''
|
||||
|
||||
network_schema = kwargs.get('network_schema')
|
||||
resource_type = kwargs.get('resource_type')
|
||||
provider_network = nfd['networks'][0]
|
||||
provider_port = provider_network['ports'][0]
|
||||
|
||||
if resource_type == const.FIREWALL:
|
||||
nfd['svc_mgmt_fixed_ip'] = device_data.get('vm_management_ip')
|
||||
provider_port['mac'] = device_data.get('provider_ptg_info')[0]
|
||||
elif resource_type == const.VPN:
|
||||
stitching_network = nfd['networks'][1]
|
||||
stitching_port = stitching_network['ports'][0]
|
||||
nfd['svc_mgmt_fixed_ip'] = device_data.get('fip')
|
||||
provider_network['cidr'] = device_data.get('tunnel_local_cidr')
|
||||
stitching_port['fixed_ip'] = device_data.get('fixed_ip')
|
||||
stitching_port['floating_ip'] = device_data.get('user_access_ip')
|
||||
stitching_network['cidr'] = device_data.get('stitching_cidr')
|
||||
stitching_network['gw_ip'] = device_data.get('stitching_gateway')
|
||||
management_network = copy.deepcopy(network_schema)
|
||||
management_network['type'] = const.MANAGEMENT
|
||||
management_network['gw_ip'] = device_data.get('mgmt_gw_ip')
|
||||
nfd['networks'].append(management_network)
|
||||
elif resource_type in [const.LOADBALANCER, const.LOADBALANCERV2]:
|
||||
nfd['svc_mgmt_fixed_ip'] = device_data.get('floating_ip')
|
||||
provider_port['mac'] = device_data.get('provider_interface_mac')
|
||||
return nfd
|
||||
|
||||
|
||||
def get_network_function_info(device_data, resource_type):
|
||||
''' Returns a generic configuration format for both device
|
||||
and service configuration.
|
||||
|
||||
:param device_data: Data to be formatted. Type: dict
|
||||
:param resource_type: (healthmonitor/device_config/firewall/
|
||||
vpn/loadbalancer/loadbalancerv2)
|
||||
|
||||
Return: dictionary
|
||||
|
||||
'''
|
||||
|
||||
SERVICE_TYPES = [const.FIREWALL, const.VPN,
|
||||
const.LOADBALANCER, const.LOADBALANCERV2]
|
||||
config = copy.deepcopy(NFP_DATA_FORMAT)
|
||||
|
||||
mgmt_ip = device_data.get('mgmt_ip_address')
|
||||
tenant_id = device_data.get('tenant_id')
|
||||
provider_ip = device_data.get('provider_ip')
|
||||
provider_mac = device_data.get('provider_mac')
|
||||
provider_cidr = device_data.get('provider_cidr')
|
||||
stitching_ip = device_data.get('consumer_ip')
|
||||
stitching_mac = device_data.get('consumer_mac')
|
||||
stitching_cidr = device_data.get('consumer_cidr')
|
||||
stitching_gateway_ip = device_data.get('consumer_gateway_ip')
|
||||
|
||||
resource_data = config['config'][0]['resource_data']
|
||||
resource_data['tenant_id'] = tenant_id
|
||||
|
||||
nfd = resource_data['nfds'][0]
|
||||
nfd['role'] = 'master'
|
||||
nfd['svc_mgmt_fixed_ip'] = mgmt_ip
|
||||
|
||||
if resource_type == const.HEALTHMONITOR_RESOURCE:
|
||||
nfd['periodicity'] = 'initial'
|
||||
nfd['vmid'] = device_data['id']
|
||||
config['config'][0]['resource'] = const.HEALTHMONITOR_RESOURCE
|
||||
return config
|
||||
|
||||
provider_network = nfd['networks'][0]
|
||||
network_schema = copy.deepcopy(provider_network)
|
||||
provider_network['type'] = const.PROVIDER
|
||||
provider_network['cidr'] = provider_cidr
|
||||
provider_network['gw_ip'] = ''
|
||||
stitching_network = copy.deepcopy(network_schema)
|
||||
stitching_network['type'] = const.STITCHING
|
||||
stitching_network['cidr'] = stitching_cidr
|
||||
stitching_network['gw_ip'] = stitching_gateway_ip
|
||||
nfd['networks'].append(stitching_network)
|
||||
|
||||
provider_port = provider_network['ports'][0]
|
||||
provider_port['fixed_ip'] = provider_ip
|
||||
provider_port['floating_ip'] = ''
|
||||
provider_port['mac'] = provider_mac
|
||||
stitching_port = stitching_network['ports'][0]
|
||||
stitching_port['fixed_ip'] = stitching_ip
|
||||
stitching_port['floating_ip'] = ''
|
||||
stitching_port['mac'] = stitching_mac
|
||||
|
||||
if resource_type in SERVICE_TYPES:
|
||||
nfd = _fill_service_specific_info(nfd, device_data,
|
||||
network_schema=network_schema,
|
||||
resource_type=resource_type)
|
||||
resource_data['nfs'] = resource_data.pop('nfds')
|
||||
return config['config'][0]['resource_data']
|
||||
|
||||
config['config'][0]['resource'] = const.INTERFACE_RESOURCE
|
||||
config['config'].append(config['config'][0].copy())
|
||||
config['config'][1]['resource'] = const.ROUTES_RESOURCE
|
||||
|
||||
return config
|
||||
55
gbpservice/nfp/common/utils.py
Normal file
55
gbpservice/nfp/common/utils.py
Normal file
@@ -0,0 +1,55 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
|
||||
from gbpservice.nfp.common import constants as nfp_constants
|
||||
|
||||
NEUTRON_ML2_CONF = "/etc/neutron/plugins/ml2/ml2_conf.ini"
|
||||
|
||||
|
||||
def _parse_service_flavor_string(service_flavor_str):
|
||||
service_details = {}
|
||||
if ',' not in service_flavor_str:
|
||||
service_details['device_type'] = 'nova'
|
||||
service_details['service_vendor'] = service_flavor_str
|
||||
else:
|
||||
service_flavor_dict = dict(item.split('=') for item
|
||||
in service_flavor_str.split(','))
|
||||
service_details = {key.strip(): value.strip() for key, value
|
||||
in service_flavor_dict.iteritems()}
|
||||
return service_details
|
||||
|
||||
|
||||
def _get_dict_desc_from_string(vpn_svc):
|
||||
svc_desc = vpn_svc.split(";")
|
||||
desc = {}
|
||||
for ele in svc_desc:
|
||||
s_ele = ele.split("=")
|
||||
desc.update({s_ele[0]: s_ele[1]})
|
||||
return desc
|
||||
|
||||
|
||||
def get_vpn_description_from_nf(network_function):
|
||||
str_description = network_function['description'].split('\n')[1]
|
||||
description = _get_dict_desc_from_string(
|
||||
str_description)
|
||||
return description, str_description
|
||||
|
||||
|
||||
def is_vpn_in_service_chain(sc_specs):
|
||||
for spec in sc_specs:
|
||||
nodes = spec['sc_nodes']
|
||||
for node in nodes:
|
||||
service_type = node['sc_service_profile']['service_type']
|
||||
if service_type.lower() == nfp_constants.VPN:
|
||||
return True
|
||||
return False
|
||||
@@ -62,15 +62,17 @@ class NFPGBPNetworkDriver(neutron_nd.NFPNeutronNetworkDriver):
|
||||
return self.network_handler.get_policy_target_groups(token,
|
||||
filters=filters)
|
||||
|
||||
def set_promiscuos_mode(self, token, port_id):
|
||||
def set_promiscuos_mode(self, token, port_id, enable_port_security):
|
||||
port_id = self.get_port_id(token, port_id)
|
||||
#self.network_handler = openstack_driver.NeutronClient(self.config)
|
||||
super(NFPGBPNetworkDriver, self).set_promiscuos_mode(token, port_id)
|
||||
super(NFPGBPNetworkDriver, self).set_promiscuos_mode(token,
|
||||
port_id, enable_port_security)
|
||||
#self.network_handler = openstack_driver.GBPClient(self.config)
|
||||
|
||||
def set_promiscuos_mode_fast(self, token, port_id):
|
||||
def set_promiscuos_mode_fast(self, token, port_id, enable_port_security):
|
||||
#self.network_handler = openstack_driver.NeutronClient(self.config)
|
||||
super(NFPGBPNetworkDriver, self).set_promiscuos_mode(token, port_id)
|
||||
super(NFPGBPNetworkDriver, self).set_promiscuos_mode(token,
|
||||
port_id, enable_port_security)
|
||||
#self.network_handler = openstack_driver.GBPClient(self.config)
|
||||
|
||||
def get_service_profile(self, token, service_profile_id):
|
||||
|
||||
@@ -34,5 +34,5 @@ class NFPNetworkDriverBase(object):
|
||||
def get_port_details(self, token, port_id):
|
||||
pass
|
||||
|
||||
def set_promiscuos_mode(self, token, port_id):
|
||||
def set_promiscuos_mode(self, token, port_id, enable_port_security):
|
||||
pass
|
||||
|
||||
@@ -21,6 +21,7 @@ class NFPNeutronNetworkDriver(ndb.NFPNetworkDriverBase):
|
||||
def __init__(self, config):
|
||||
# self.network_handler = openstack_driver.NeutronClient(config)
|
||||
self.neutron_client = openstack_driver.NeutronClient(config)
|
||||
self.config = config
|
||||
|
||||
def setup_traffic_steering(self):
|
||||
pass
|
||||
@@ -72,12 +73,16 @@ class NFPNeutronNetworkDriver(ndb.NFPNetworkDriverBase):
|
||||
cidr = subnet['subnet']['cidr']
|
||||
gateway_ip = subnet['subnet']['gateway_ip']
|
||||
|
||||
return (ip, mac, cidr, gateway_ip)
|
||||
return (ip, mac, cidr, gateway_ip, port, subnet)
|
||||
|
||||
def set_promiscuos_mode(self, token, port_id):
|
||||
def set_promiscuos_mode(self, token, port_id, enable_port_security):
|
||||
if not enable_port_security:
|
||||
port_security = False
|
||||
else:
|
||||
port_security = True
|
||||
self.neutron_client.update_port(token, port_id,
|
||||
security_groups=[],
|
||||
port_security_enabled=False)
|
||||
port_security_enabled=port_security)
|
||||
|
||||
def get_service_profile(self, token, service_profile_id):
|
||||
return {}
|
||||
|
||||
@@ -29,6 +29,7 @@ import yaml
|
||||
from gbpservice.neutron.services.grouppolicy.common import constants as gconst
|
||||
from gbpservice.neutron.services.servicechain.plugins.ncp import plumber_base
|
||||
from gbpservice.nfp.common import constants as nfp_constants
|
||||
from gbpservice.nfp.common import utils
|
||||
from gbpservice.nfp.core import log as nfp_logging
|
||||
from gbpservice.nfp.lib import transport
|
||||
from gbpservice.nfp.orchestrator.config_drivers.heat_client import HeatClient
|
||||
@@ -71,7 +72,6 @@ cfg.CONF.register_opts(HEAT_DRIVER_OPTS,
|
||||
|
||||
SC_METADATA = ('{"sc_instance":"%s", "floating_ip": "%s", '
|
||||
'"provider_interface_mac": "%s", '
|
||||
'"standby_provider_interface_mac": "%s",'
|
||||
'"network_function_id": "%s",'
|
||||
'"service_vendor": "%s"}')
|
||||
|
||||
@@ -369,7 +369,7 @@ class HeatDriver(object):
|
||||
provider_pt_id = provider_pt['id']
|
||||
|
||||
policy_target_info = {'cluster_id': ''}
|
||||
vip_pt = self._get_vip_pt(auth_token, lb_vip['port_id'])
|
||||
vip_pt = self._get_vip_pt(auth_token, lb_vip.get('port_id'))
|
||||
if vip_pt:
|
||||
self.gbp_client.update_policy_target(auth_token, vip_pt['id'],
|
||||
policy_target_info)
|
||||
@@ -836,17 +836,133 @@ class HeatDriver(object):
|
||||
keys.append(key)
|
||||
return keys
|
||||
|
||||
def _get_resource_desc(self, nfp_context, service_details):
|
||||
# This function prepares the description corresponding to service_type
|
||||
# with required parameters, which NCO sends to NFP controller
|
||||
device_type = service_details['service_details']['device_type']
|
||||
base_mode_support = (True if device_type == 'None'
|
||||
else False)
|
||||
|
||||
network_function_id = nfp_context['network_function']['id']
|
||||
service_chain_instance_id = service_details['servicechain_instance'][
|
||||
'id']
|
||||
consumer_port = service_details['consumer_port']
|
||||
provider_port = service_details['provider_port']
|
||||
mgmt_ip = service_details['mgmt_ip']
|
||||
|
||||
auth_token = nfp_context['resource_owner_context']['admin_token']
|
||||
tenant_id = nfp_context['tenant_id']
|
||||
|
||||
service_type = service_details['service_details']['service_type']
|
||||
service_vendor = service_details['service_details']['service_vendor']
|
||||
nf_desc = ''
|
||||
|
||||
if not base_mode_support:
|
||||
provider_port_mac = provider_port['mac_address']
|
||||
provider_cidr = service_details['provider_subnet']['cidr']
|
||||
else:
|
||||
return
|
||||
|
||||
if service_type == pconst.LOADBALANCER:
|
||||
nf_desc = str((SC_METADATA % (service_chain_instance_id,
|
||||
mgmt_ip,
|
||||
provider_port_mac,
|
||||
network_function_id,
|
||||
service_vendor)))
|
||||
elif service_type == pconst.LOADBALANCERV2:
|
||||
nf_desc = str((SC_METADATA % (service_chain_instance_id,
|
||||
mgmt_ip,
|
||||
provider_port_mac,
|
||||
network_function_id,
|
||||
service_vendor)))
|
||||
elif service_type == pconst.FIREWALL:
|
||||
firewall_desc = {'vm_management_ip': mgmt_ip,
|
||||
'provider_ptg_info': [provider_port_mac],
|
||||
'provider_cidr': provider_cidr,
|
||||
'service_vendor': service_vendor,
|
||||
'network_function_id': network_function_id}
|
||||
nf_desc = str(firewall_desc)
|
||||
elif service_type == pconst.VPN:
|
||||
stitching_cidr = service_details['consumer_subnet']['cidr']
|
||||
mgmt_gw_ip = self._get_management_gw_ip(auth_token)
|
||||
if not mgmt_gw_ip:
|
||||
return None
|
||||
|
||||
services_nsp = self.gbp_client.get_network_service_policies(
|
||||
auth_token,
|
||||
filters={'name': ['nfp_services_nsp']})
|
||||
if not services_nsp:
|
||||
fip_nsp = {
|
||||
'network_service_policy': {
|
||||
'name': 'nfp_services_nsp',
|
||||
'description': 'nfp_implicit_resource',
|
||||
'shared': False,
|
||||
'tenant_id': tenant_id,
|
||||
'network_service_params': [
|
||||
{"type": "ip_pool", "value": "nat_pool",
|
||||
"name": "vpn_svc_external_access"}]
|
||||
}
|
||||
}
|
||||
nsp = self.gbp_client.create_network_service_policy(
|
||||
auth_token, fip_nsp)
|
||||
else:
|
||||
nsp = services_nsp[0]
|
||||
|
||||
stitching_pts = self.gbp_client.get_policy_targets(
|
||||
auth_token,
|
||||
filters={'port_id': [consumer_port['id']]})
|
||||
if not stitching_pts:
|
||||
LOG.error(_LE("Policy target is not created for the "
|
||||
"stitching port"))
|
||||
return None
|
||||
stitching_ptg_id = (
|
||||
stitching_pts[0]['policy_target_group_id'])
|
||||
|
||||
try:
|
||||
self.gbp_client.update_policy_target_group(
|
||||
auth_token, stitching_ptg_id,
|
||||
{'policy_target_group': {
|
||||
'network_service_policy_id': nsp['id']}})
|
||||
except Exception:
|
||||
LOG.error(_LE("problem in accesing external segment or "
|
||||
"nat_pool, seems they have not created"))
|
||||
return None
|
||||
|
||||
stitching_port_fip = self._get_consumer_fip(auth_token,
|
||||
consumer_port['id'])
|
||||
if not stitching_port_fip:
|
||||
return None
|
||||
desc = ('fip=' + mgmt_ip +
|
||||
";tunnel_local_cidr=" +
|
||||
provider_cidr + ";user_access_ip=" +
|
||||
stitching_port_fip + ";fixed_ip=" +
|
||||
consumer_port['fixed_ips'][0]['ip_address'] +
|
||||
';service_vendor=' + service_vendor +
|
||||
';stitching_cidr=' + stitching_cidr +
|
||||
';stitching_gateway=' + service_details[
|
||||
'consumer_subnet']['gateway_ip'] +
|
||||
';mgmt_gw_ip=' + mgmt_gw_ip +
|
||||
';network_function_id=' + network_function_id)
|
||||
nf_desc = str(desc)
|
||||
|
||||
return nf_desc
|
||||
|
||||
def get_neutron_resource_description(self, nfp_context):
|
||||
service_details = self.get_service_details_from_nfp_context(
|
||||
nfp_context)
|
||||
|
||||
nf_desc = self._get_resource_desc(nfp_context, service_details)
|
||||
return nf_desc
|
||||
|
||||
def _create_node_config_data(self, auth_token, tenant_id,
|
||||
service_chain_node, service_chain_instance,
|
||||
provider, provider_port, consumer,
|
||||
consumer_port, network_function,
|
||||
mgmt_ip, service_details):
|
||||
|
||||
nf_desc = None
|
||||
common_desc = {'network_function_id': network_function['id']}
|
||||
common_desc = {'network_function_id': str(network_function['id'])}
|
||||
|
||||
service_type = service_details['service_details']['service_type']
|
||||
service_vendor = service_details['service_details']['service_vendor']
|
||||
device_type = service_details['service_details']['device_type']
|
||||
base_mode_support = (True if device_type == 'None'
|
||||
else False)
|
||||
@@ -881,13 +997,7 @@ class HeatDriver(object):
|
||||
else 'properties')
|
||||
|
||||
if not base_mode_support:
|
||||
provider_port_mac = provider_port['mac_address']
|
||||
provider_cidr = service_details['provider_subnet']['cidr']
|
||||
provider_subnet = service_details['provider_subnet']
|
||||
else:
|
||||
provider_port_mac = ''
|
||||
provider_cidr = ''
|
||||
standby_provider_port_mac = None
|
||||
|
||||
if service_type == pconst.LOADBALANCER:
|
||||
self._generate_pool_members(
|
||||
@@ -898,13 +1008,6 @@ class HeatDriver(object):
|
||||
if not base_mode_support:
|
||||
config_param_values[
|
||||
'service_chain_metadata'] = str(common_desc)
|
||||
nf_desc = str((SC_METADATA % (service_chain_instance['id'],
|
||||
mgmt_ip,
|
||||
provider_port_mac,
|
||||
standby_provider_port_mac,
|
||||
network_function['id'],
|
||||
service_vendor)))
|
||||
|
||||
lb_pool_key = self._get_heat_resource_key(
|
||||
stack_template[resources_key],
|
||||
is_template_aws_version,
|
||||
@@ -921,12 +1024,6 @@ class HeatDriver(object):
|
||||
if not base_mode_support:
|
||||
config_param_values[
|
||||
'service_chain_metadata'] = str(common_desc)
|
||||
nf_desc = str((SC_METADATA % (service_chain_instance['id'],
|
||||
mgmt_ip,
|
||||
provider_port_mac,
|
||||
standby_provider_port_mac,
|
||||
network_function['id'],
|
||||
service_vendor)))
|
||||
|
||||
lb_loadbalancer_key = self._get_heat_resource_key(
|
||||
stack_template[resources_key],
|
||||
@@ -944,12 +1041,6 @@ class HeatDriver(object):
|
||||
self._modify_fw_resources_name(
|
||||
stack_template, provider, is_template_aws_version)
|
||||
if not base_mode_support:
|
||||
firewall_desc = {'vm_management_ip': mgmt_ip,
|
||||
'provider_ptg_info': [provider_port_mac],
|
||||
'provider_cidr': provider_cidr,
|
||||
'service_vendor': service_vendor,
|
||||
'network_function_id': network_function[
|
||||
'id']}
|
||||
|
||||
fw_key = self._get_heat_resource_key(
|
||||
stack_template[resources_key],
|
||||
@@ -957,8 +1048,6 @@ class HeatDriver(object):
|
||||
'OS::Neutron::Firewall')
|
||||
stack_template[resources_key][fw_key][properties_key][
|
||||
'description'] = str(common_desc)
|
||||
|
||||
nf_desc = str(firewall_desc)
|
||||
elif service_type == pconst.VPN:
|
||||
config_param_values['Subnet'] = (
|
||||
provider_port['fixed_ips'][0]['subnet_id']
|
||||
@@ -968,86 +1057,15 @@ class HeatDriver(object):
|
||||
l3p = self.gbp_client.get_l3_policy(
|
||||
auth_token, l2p['l3_policy_id'])
|
||||
config_param_values['RouterId'] = l3p['routers'][0]
|
||||
stitching_cidr = service_details['consumer_subnet']['cidr']
|
||||
mgmt_gw_ip = self._get_management_gw_ip(auth_token)
|
||||
if not mgmt_gw_ip:
|
||||
return None, None
|
||||
|
||||
services_nsp = self.gbp_client.get_network_service_policies(
|
||||
auth_token,
|
||||
filters={'name': ['nfp_services_nsp']})
|
||||
if not services_nsp:
|
||||
fip_nsp = {
|
||||
'network_service_policy': {
|
||||
'name': 'nfp_services_nsp',
|
||||
'description': 'nfp_implicit_resource',
|
||||
'shared': False,
|
||||
'tenant_id': tenant_id,
|
||||
'network_service_params': [
|
||||
{"type": "ip_pool", "value": "nat_pool",
|
||||
"name": "vpn_svc_external_access"}]
|
||||
}
|
||||
}
|
||||
nsp = self.gbp_client.create_network_service_policy(
|
||||
auth_token, fip_nsp)
|
||||
else:
|
||||
nsp = services_nsp[0]
|
||||
stitching_port_fip = self._get_consumer_fip(auth_token,
|
||||
consumer_port['id'])
|
||||
if not stitching_port_fip:
|
||||
return None
|
||||
if not base_mode_support:
|
||||
stitching_pts = self.gbp_client.get_policy_targets(
|
||||
auth_token,
|
||||
filters={'port_id': [consumer_port['id']]})
|
||||
if not stitching_pts:
|
||||
LOG.error(_LE("Policy target is not created for the "
|
||||
"stitching port"))
|
||||
return None, None
|
||||
stitching_ptg_id = (
|
||||
stitching_pts[0]['policy_target_group_id'])
|
||||
else:
|
||||
stitching_ptg_id = consumer['id']
|
||||
try:
|
||||
self.gbp_client.update_policy_target_group(
|
||||
auth_token, stitching_ptg_id,
|
||||
{'policy_target_group': {
|
||||
'network_service_policy_id': nsp['id']}})
|
||||
except Exception:
|
||||
LOG.error(_LE("problem in accesing external segment or "
|
||||
"nat_pool, seems they have not created"))
|
||||
return None, None
|
||||
stitching_port_fip = ""
|
||||
|
||||
if not base_mode_support:
|
||||
floatingips = (
|
||||
self.neutron_client.get_floating_ips(auth_token))
|
||||
if not floatingips:
|
||||
LOG.error(_LE("Floating IP for VPN Service has been "
|
||||
"disassociated Manually"))
|
||||
return None, None
|
||||
|
||||
for fip in floatingips:
|
||||
if consumer_port['id'] == fip['port_id']:
|
||||
stitching_port_fip = fip['floating_ip_address']
|
||||
break
|
||||
if not stitching_port_fip:
|
||||
LOG.error(_LE("Floatingip retrival has failed."))
|
||||
return None, None
|
||||
|
||||
try:
|
||||
desc = ('fip=' + mgmt_ip +
|
||||
";tunnel_local_cidr=" +
|
||||
provider_cidr + ";user_access_ip=" +
|
||||
stitching_port_fip + ";fixed_ip=" +
|
||||
consumer_port['fixed_ips'][0]['ip_address'] +
|
||||
';service_vendor=' + service_vendor +
|
||||
';stitching_cidr=' + stitching_cidr +
|
||||
';stitching_gateway=' + service_details[
|
||||
'consumer_subnet']['gateway_ip'] +
|
||||
';mgmt_gw_ip=' + mgmt_gw_ip +
|
||||
';network_function_id=' + network_function['id'])
|
||||
except Exception:
|
||||
LOG.error(_LE("Problem in preparing description, some of "
|
||||
"the fields might not have initialized"))
|
||||
return None, None
|
||||
stack_params['ServiceDescription'] = desc
|
||||
siteconn_keys = self._get_site_conn_keys(
|
||||
stack_template[resources_key],
|
||||
is_template_aws_version,
|
||||
@@ -1060,14 +1078,14 @@ class HeatDriver(object):
|
||||
stack_template[resources_key],
|
||||
is_template_aws_version,
|
||||
'OS::Neutron::VPNService')
|
||||
vpn_description, _ = (
|
||||
utils.get_vpn_description_from_nf(network_function))
|
||||
vpnsvc_desc = {'fip': vpn_description['user_access_ip'],
|
||||
'ip': vpn_description['fixed_ip'],
|
||||
'cidr': vpn_description['tunnel_local_cidr']}
|
||||
vpnsvc_desc.update(common_desc)
|
||||
stack_template[resources_key][vpnservice_key][properties_key][
|
||||
'description'] = str(common_desc)
|
||||
|
||||
nf_desc = str(desc)
|
||||
|
||||
if nf_desc:
|
||||
network_function['description'] = network_function[
|
||||
'description'] + '\n' + nf_desc
|
||||
'description'] = str(vpnsvc_desc)
|
||||
|
||||
for parameter in stack_template.get(parameters_key) or []:
|
||||
if parameter in config_param_values:
|
||||
@@ -1078,6 +1096,24 @@ class HeatDriver(object):
|
||||
{'stack_data': stack_template, 'params': stack_params})
|
||||
return (stack_template, stack_params)
|
||||
|
||||
def _get_consumer_fip(self, token, consumer_port):
|
||||
stitching_port_fip = None
|
||||
floatingips = (
|
||||
self.neutron_client.get_floating_ips(token))
|
||||
if not floatingips:
|
||||
LOG.error(_LE("Floating IP for VPN Service has been "
|
||||
"disassociated Manually"))
|
||||
return None
|
||||
|
||||
for fip in floatingips:
|
||||
if consumer_port == fip['port_id']:
|
||||
stitching_port_fip = fip['floating_ip_address']
|
||||
break
|
||||
if not stitching_port_fip:
|
||||
LOG.error(_LE("Floatingip retrival has failed."))
|
||||
return None
|
||||
return stitching_port_fip
|
||||
|
||||
def _update_node_config(self, auth_token, tenant_id, service_profile,
|
||||
service_chain_node, service_chain_instance,
|
||||
provider, consumer_port, network_function,
|
||||
@@ -1139,7 +1175,6 @@ class HeatDriver(object):
|
||||
else:
|
||||
provider_port_mac = ''
|
||||
provider_cidr = ''
|
||||
standby_provider_port_mac = None
|
||||
|
||||
service_vendor = service_details['service_vendor']
|
||||
if service_type == pconst.LOADBALANCER:
|
||||
@@ -1154,7 +1189,6 @@ class HeatDriver(object):
|
||||
nf_desc = str((SC_METADATA % (service_chain_instance['id'],
|
||||
mgmt_ip,
|
||||
provider_port_mac,
|
||||
standby_provider_port_mac,
|
||||
network_function['id'],
|
||||
service_vendor)))
|
||||
|
||||
@@ -1177,7 +1211,6 @@ class HeatDriver(object):
|
||||
nf_desc = str((SC_METADATA % (service_chain_instance['id'],
|
||||
mgmt_ip,
|
||||
provider_port_mac,
|
||||
standby_provider_port_mac,
|
||||
network_function['id'],
|
||||
service_vendor)))
|
||||
|
||||
@@ -1288,11 +1321,11 @@ class HeatDriver(object):
|
||||
'gateway_ip'] +
|
||||
';mgmt_gw_ip=' + mgmt_gw_ip +
|
||||
';network_function_id=' + network_function['id'])
|
||||
except Exception:
|
||||
except Exception as e:
|
||||
LOG.error(_LE("Problem in preparing description, some of "
|
||||
"the fields might not have initialized"))
|
||||
"the fields might not have initialized. "
|
||||
"Error: %(error)s"), {'error': e})
|
||||
return None, None
|
||||
stack_params['ServiceDescription'] = desc
|
||||
siteconn_keys = self._get_site_conn_keys(
|
||||
stack_template[resources_key],
|
||||
is_template_aws_version,
|
||||
@@ -1630,12 +1663,11 @@ class HeatDriver(object):
|
||||
|
||||
def get_service_details_from_nfp_context(self, nfp_context):
|
||||
network_function = nfp_context['network_function']
|
||||
# network_function_instance = nfp_context['network_function_instance']
|
||||
service_details = nfp_context['service_details']
|
||||
mgmt_ip = nfp_context['management']['port']['ip_address']
|
||||
mgmt_ip = ''
|
||||
if nfp_context.get('network_function_device'):
|
||||
mgmt_ip = nfp_context['network_function_device']['mgmt_ip_address']
|
||||
config_policy_id = network_function['config_policy_id']
|
||||
# service_id = network_function['service_id']
|
||||
# service_chain_id = network_function['service_chain_id']
|
||||
servicechain_instance = nfp_context['service_chain_instance']
|
||||
servicechain_node = nfp_context['service_chain_node']
|
||||
|
||||
@@ -1728,7 +1760,6 @@ class HeatDriver(object):
|
||||
nfp_context)
|
||||
|
||||
network_function = nfp_context['network_function']
|
||||
# service_profile = service_details['service_profile']
|
||||
service_chain_node = service_details['servicechain_node']
|
||||
service_chain_instance = service_details['servicechain_instance']
|
||||
provider = service_details['provider_ptg']
|
||||
|
||||
@@ -18,6 +18,7 @@ from neutron._i18n import _LW
|
||||
from oslo_utils import excutils
|
||||
|
||||
from gbpservice.nfp.common import constants as nfp_constants
|
||||
from gbpservice.nfp.common import data_formatter as df
|
||||
from gbpservice.nfp.common import exceptions
|
||||
from gbpservice.nfp.core import executor as nfp_executor
|
||||
from gbpservice.nfp.core import log as nfp_logging
|
||||
@@ -143,21 +144,21 @@ class OrchestrationDriver(object):
|
||||
device_data, interface,
|
||||
network_handler=network_handler)
|
||||
|
||||
def _verify_vendor_data(self, image_name, metadata):
|
||||
vendor_data = {}
|
||||
def _verify_provider_metadata(self, image_name, metadata):
|
||||
provider_metadata = {}
|
||||
try:
|
||||
for attr in metadata:
|
||||
if attr in nfp_constants.METADATA_SUPPORTED_ATTRIBUTES:
|
||||
vendor_data[attr] = ast.literal_eval(metadata[attr])
|
||||
provider_metadata[attr] = ast.literal_eval(metadata[attr])
|
||||
except Exception as e:
|
||||
LOG.error(_LE('Wrong metadata: %(metadata)s provided for '
|
||||
'image name: %(image_name)s. Error: %(error)s'),
|
||||
{'image_name': image_name, 'metadata': metadata,
|
||||
'error': e})
|
||||
return None
|
||||
return vendor_data
|
||||
return provider_metadata
|
||||
|
||||
def _get_vendor_data(self, device_data, image_name):
|
||||
def _get_provider_metadata(self, device_data, image_name):
|
||||
token = self._get_token(device_data.get('token'))
|
||||
if not token:
|
||||
return None
|
||||
@@ -171,12 +172,13 @@ class OrchestrationDriver(object):
|
||||
'name: %(image_name)s. Error: %(error)s'),
|
||||
{'image_name': image_name, 'error': e})
|
||||
return None
|
||||
vendor_data = self._verify_vendor_data(image_name, metadata)
|
||||
if not vendor_data:
|
||||
provider_metadata = self._verify_provider_metadata(image_name,
|
||||
metadata)
|
||||
if not provider_metadata:
|
||||
return {}
|
||||
return vendor_data
|
||||
return provider_metadata
|
||||
|
||||
def _get_vendor_data_fast(self, token,
|
||||
def _get_provider_metadata_fast(self, token,
|
||||
admin_tenant_id, image_name, device_data):
|
||||
try:
|
||||
metadata = self.compute_handler_nova.get_image_metadata(
|
||||
@@ -188,67 +190,71 @@ class OrchestrationDriver(object):
|
||||
'name: %(image_name)s. Error: %(error)s'),
|
||||
{'image_name': image_name, 'error': e})
|
||||
return None
|
||||
vendor_data = self._verify_vendor_data(image_name, metadata)
|
||||
if not vendor_data:
|
||||
provider_metadata = self._verify_provider_metadata(image_name,
|
||||
metadata)
|
||||
if not provider_metadata:
|
||||
return {}
|
||||
return vendor_data
|
||||
return provider_metadata
|
||||
|
||||
def _update_self_with_vendor_data(self, vendor_data, attr):
|
||||
def _update_self_with_provider_metadata(self, provider_metadata, attr):
|
||||
attr_value = getattr(self, attr)
|
||||
if attr in vendor_data:
|
||||
setattr(self, attr, vendor_data[attr])
|
||||
if attr in provider_metadata:
|
||||
setattr(self, attr, provider_metadata[attr])
|
||||
else:
|
||||
LOG.info(_LI("Vendor data specified in image, doesn't contains "
|
||||
"%(attr)s value, proceeding with default value "
|
||||
"%(default)s"),
|
||||
LOG.info(_LI("Provider metadata specified in image, doesn't"
|
||||
" contains %(attr)s value, proceeding with default"
|
||||
" value %(default)s"),
|
||||
{'attr': attr, 'default': attr_value})
|
||||
|
||||
def _update_vendor_data(self, device_data, token=None):
|
||||
vendor_data = {}
|
||||
def _update_provider_metadata(self, device_data, token=None):
|
||||
provider_metadata = {}
|
||||
try:
|
||||
image_name = self._get_image_name(device_data)
|
||||
vendor_data = self._get_vendor_data(device_data, image_name)
|
||||
LOG.info(_LI("Vendor data, specified in image: %(vendor_data)s"),
|
||||
{'vendor_data': vendor_data})
|
||||
if vendor_data:
|
||||
self._update_self_with_vendor_data(
|
||||
vendor_data,
|
||||
provider_metadata = self._get_provider_metadata(device_data,
|
||||
image_name)
|
||||
LOG.info(_LI("Provider metadata, specified in image:"
|
||||
" %(provider_metadata)s"),
|
||||
{'provider_metadata': provider_metadata})
|
||||
if provider_metadata:
|
||||
self._update_self_with_provider_metadata(
|
||||
provider_metadata,
|
||||
nfp_constants.MAXIMUM_INTERFACES)
|
||||
self._update_self_with_vendor_data(
|
||||
vendor_data,
|
||||
self._update_self_with_provider_metadata(
|
||||
provider_metadata,
|
||||
nfp_constants.SUPPORTS_HOTPLUG)
|
||||
else:
|
||||
LOG.info(_LI("No vendor data specified in image, "
|
||||
"proceeding with default values"))
|
||||
LOG.info(_LI("No provider metadata specified in image,"
|
||||
" proceeding with default values"))
|
||||
except Exception:
|
||||
LOG.error(_LE("Error while getting metadata for image name:"
|
||||
"%(image_name)s, proceeding with default values"),
|
||||
{'image_name': image_name})
|
||||
return vendor_data
|
||||
return provider_metadata
|
||||
|
||||
def _update_vendor_data_fast(self, token, admin_tenant_id,
|
||||
image_name, device_data):
|
||||
vendor_data = None
|
||||
def _update_provider_metadata_fast(self, token, admin_tenant_id,
|
||||
image_name, device_data):
|
||||
provider_metadata = None
|
||||
try:
|
||||
vendor_data = self._get_vendor_data_fast(
|
||||
provider_metadata = self._get_provider_metadata_fast(
|
||||
token, admin_tenant_id, image_name, device_data)
|
||||
LOG.info(_LI("Vendor data, specified in image: %(vendor_data)s"),
|
||||
{'vendor_data': vendor_data})
|
||||
if vendor_data:
|
||||
self._update_self_with_vendor_data(
|
||||
vendor_data,
|
||||
LOG.info(_LI("Provider metadata, specified in image:"
|
||||
" %(provider_metadata)s"),
|
||||
{'provider_metadata': provider_metadata})
|
||||
if provider_metadata:
|
||||
self._update_self_with_provider_metadata(
|
||||
provider_metadata,
|
||||
nfp_constants.MAXIMUM_INTERFACES)
|
||||
self._update_self_with_vendor_data(
|
||||
vendor_data,
|
||||
self._update_self_with_provider_metadata(
|
||||
provider_metadata,
|
||||
nfp_constants.SUPPORTS_HOTPLUG)
|
||||
else:
|
||||
LOG.info(_LI("No vendor data specified in image, "
|
||||
"proceeding with default values"))
|
||||
LOG.info(_LI("No provider metadata specified in image,"
|
||||
" proceeding with default values"))
|
||||
except Exception:
|
||||
LOG.error(_LE("Error while getting metadata for image name: "
|
||||
"%(image_name)s, proceeding with default values"),
|
||||
{'image_name': image_name})
|
||||
return vendor_data
|
||||
return provider_metadata
|
||||
|
||||
def _get_image_name(self, device_data):
|
||||
if device_data['service_details'].get('image_name'):
|
||||
@@ -292,12 +298,12 @@ class OrchestrationDriver(object):
|
||||
def create_instance(self, nova, token, admin_tenant_id,
|
||||
image_id, flavor, interfaces_to_attach,
|
||||
instance_name, volume_support,
|
||||
volume_size, files=None):
|
||||
volume_size, files=None, server_grp_id=None):
|
||||
try:
|
||||
instance_id = nova.create_instance(
|
||||
token, admin_tenant_id,
|
||||
image_id, flavor, interfaces_to_attach, instance_name,
|
||||
volume_support, volume_size, files=files)
|
||||
token, admin_tenant_id, image_id, flavor,
|
||||
interfaces_to_attach, instance_name, volume_support,
|
||||
volume_size, files=files, server_grp_id=server_grp_id)
|
||||
return instance_id
|
||||
except Exception as e:
|
||||
LOG.error(_LE('Failed to create instance.'
|
||||
@@ -340,6 +346,106 @@ class OrchestrationDriver(object):
|
||||
:raises: exceptions.IncompleteData,
|
||||
exceptions.ComputePolicyNotSupported
|
||||
"""
|
||||
self._validate_create_nfd_data(device_data)
|
||||
|
||||
token = device_data['token']
|
||||
admin_tenant_id = device_data['admin_tenant_id']
|
||||
image_name = self._get_image_name(device_data)
|
||||
|
||||
pre_launch_executor = nfp_executor.TaskExecutor(jobs=3)
|
||||
|
||||
image_id_result = {}
|
||||
provider_metadata_result = {}
|
||||
|
||||
pre_launch_executor.add_job('UPDATE_PROVIDER_METADATA',
|
||||
self._update_provider_metadata_fast,
|
||||
token, admin_tenant_id, image_name, device_data,
|
||||
result_store=provider_metadata_result)
|
||||
pre_launch_executor.add_job('GET_INTERFACES_FOR_DEVICE_CREATE',
|
||||
self._get_interfaces_for_device_create,
|
||||
token, admin_tenant_id, network_handler, device_data)
|
||||
pre_launch_executor.add_job('GET_IMAGE_ID',
|
||||
self.get_image_id,
|
||||
self.compute_handler_nova, token, admin_tenant_id,
|
||||
image_name, result_store=image_id_result)
|
||||
|
||||
pre_launch_executor.fire()
|
||||
|
||||
interfaces, image_id, provider_metadata = (
|
||||
self._validate_pre_launch_executor_results(network_handler,
|
||||
device_data,
|
||||
image_name,
|
||||
image_id_result,
|
||||
provider_metadata_result))
|
||||
if not interfaces:
|
||||
return None
|
||||
|
||||
management_interface = interfaces[0]
|
||||
flavor = self._get_service_instance_flavor(device_data)
|
||||
|
||||
interfaces_to_attach = []
|
||||
try:
|
||||
for interface in interfaces:
|
||||
interfaces_to_attach.append({'port': interface['port_id']})
|
||||
if provider_metadata.get('supports_hotplug') is False:
|
||||
self._update_interfaces_for_non_hotplug_support(
|
||||
network_handler,
|
||||
interfaces,
|
||||
interfaces_to_attach,
|
||||
device_data)
|
||||
except Exception as e:
|
||||
LOG.error(_LE('Failed to fetch list of interfaces to attach'
|
||||
' for device creation %(error)s'), {'error': e})
|
||||
self._delete_interfaces(device_data, interfaces,
|
||||
network_handler=network_handler)
|
||||
return None
|
||||
|
||||
instance_name = device_data['name']
|
||||
|
||||
create_instance_executor = nfp_executor.TaskExecutor(jobs=3)
|
||||
instance_id_result = {}
|
||||
port_details_result = {}
|
||||
volume_support = device_data['volume_support']
|
||||
volume_size = device_data['volume_size']
|
||||
create_instance_executor.add_job(
|
||||
'CREATE_INSTANCE', self.create_instance,
|
||||
self.compute_handler_nova, token,
|
||||
admin_tenant_id, image_id, flavor,
|
||||
interfaces_to_attach, instance_name,
|
||||
volume_support, volume_size,
|
||||
files=device_data.get('files'),
|
||||
result_store=instance_id_result)
|
||||
|
||||
create_instance_executor.add_job(
|
||||
'GET_NEUTRON_PORT_DETAILS',
|
||||
self.get_neutron_port_details,
|
||||
network_handler, token,
|
||||
management_interface['port_id'],
|
||||
result_store=port_details_result)
|
||||
|
||||
create_instance_executor.fire()
|
||||
|
||||
instance_id, mgmt_neutron_port_info = (
|
||||
self._validate_create_instance_executor_results(network_handler,
|
||||
device_data,
|
||||
interfaces,
|
||||
instance_id_result,
|
||||
port_details_result))
|
||||
if not instance_id:
|
||||
return None
|
||||
|
||||
mgmt_ip_address = mgmt_neutron_port_info['ip_address']
|
||||
return {'id': instance_id,
|
||||
'name': instance_name,
|
||||
'provider_metadata': provider_metadata,
|
||||
'mgmt_ip_address': mgmt_ip_address,
|
||||
'mgmt_port_id': interfaces[0],
|
||||
'mgmt_neutron_port_info': mgmt_neutron_port_info,
|
||||
'max_interfaces': self.maximum_interfaces,
|
||||
'interfaces_in_use': len(interfaces_to_attach),
|
||||
'description': ''} # TODO(RPM): what should be the description
|
||||
|
||||
def _validate_create_nfd_data(self, device_data):
|
||||
if (
|
||||
any(key not in device_data
|
||||
for key in ['service_details',
|
||||
@@ -374,48 +480,38 @@ class OrchestrationDriver(object):
|
||||
raise exceptions.ComputePolicyNotSupported(
|
||||
compute_policy=device_data['service_details']['device_type'])
|
||||
|
||||
token = device_data['token']
|
||||
admin_tenant_id = device_data['admin_tenant_id']
|
||||
image_name = self._get_image_name(device_data)
|
||||
|
||||
executor = nfp_executor.TaskExecutor(jobs=3)
|
||||
|
||||
image_id_result = {}
|
||||
vendor_data_result = {}
|
||||
|
||||
executor.add_job('UPDATE_VENDOR_DATA',
|
||||
self._update_vendor_data_fast,
|
||||
token, admin_tenant_id, image_name, device_data,
|
||||
result_store=vendor_data_result)
|
||||
executor.add_job('GET_INTERFACES_FOR_DEVICE_CREATE',
|
||||
self._get_interfaces_for_device_create,
|
||||
token, admin_tenant_id, network_handler, device_data)
|
||||
executor.add_job('GET_IMAGE_ID',
|
||||
self.get_image_id,
|
||||
self.compute_handler_nova, token, admin_tenant_id,
|
||||
image_name, result_store=image_id_result)
|
||||
|
||||
executor.fire()
|
||||
|
||||
def _validate_pre_launch_executor_results(self, network_handler,
|
||||
device_data,
|
||||
image_name,
|
||||
image_id_result,
|
||||
provider_metadata_result,
|
||||
server_grp_id_result=None):
|
||||
interfaces = device_data.pop('interfaces', None)
|
||||
if not interfaces:
|
||||
LOG.exception(_LE('Failed to get interfaces for device creation.'))
|
||||
return None
|
||||
else:
|
||||
management_interface = interfaces[0]
|
||||
return None, _, _
|
||||
|
||||
image_id = image_id_result.get('result', None)
|
||||
if not image_id:
|
||||
LOG.error(_LE('Failed to get image id for device creation.'))
|
||||
self._delete_interfaces(device_data, interfaces,
|
||||
network_handler=network_handler)
|
||||
return None
|
||||
return None, _, _
|
||||
|
||||
vendor_data = vendor_data_result.get('result', None)
|
||||
if not vendor_data:
|
||||
LOG.warning(_LW('Failed to get vendor data for device creation.'))
|
||||
vendor_data = {}
|
||||
if server_grp_id_result and not server_grp_id_result.get('result'):
|
||||
LOG.error(_LE('Validation failed for Nova anti-affinity '
|
||||
'server group.'))
|
||||
return None, _, _
|
||||
|
||||
provider_metadata = provider_metadata_result.get('result', None)
|
||||
if not provider_metadata:
|
||||
LOG.warning(_LW('Failed to get provider metadata for'
|
||||
' device creation.'))
|
||||
provider_metadata = {}
|
||||
|
||||
return interfaces, image_id, provider_metadata
|
||||
|
||||
def _get_service_instance_flavor(self, device_data):
|
||||
if device_data['service_details'].get('flavor'):
|
||||
flavor = device_data['service_details']['flavor']
|
||||
else:
|
||||
@@ -423,80 +519,60 @@ class OrchestrationDriver(object):
|
||||
"service flavor field, using default "
|
||||
"flavor: m1.medium"))
|
||||
flavor = 'm1.medium'
|
||||
return flavor
|
||||
|
||||
interfaces_to_attach = []
|
||||
try:
|
||||
for interface in interfaces:
|
||||
interfaces_to_attach.append({'port': interface['port_id']})
|
||||
if vendor_data.get('supports_hotplug') is False:
|
||||
if not device_data['interfaces_to_attach']:
|
||||
for port in device_data['ports']:
|
||||
if (port['port_classification'] ==
|
||||
nfp_constants.PROVIDER):
|
||||
if (device_data['service_details'][
|
||||
'service_type'].lower()
|
||||
in [nfp_constants.FIREWALL.lower(),
|
||||
nfp_constants.VPN.lower()]):
|
||||
network_handler.set_promiscuos_mode(
|
||||
token, port['id'])
|
||||
port_id = network_handler.get_port_id(
|
||||
token, port['id'])
|
||||
interfaces_to_attach.append({'port': port_id})
|
||||
for port in device_data['ports']:
|
||||
if (port['port_classification'] ==
|
||||
nfp_constants.CONSUMER):
|
||||
if (device_data['service_details'][
|
||||
'service_type'].lower()
|
||||
in [nfp_constants.FIREWALL.lower(),
|
||||
nfp_constants.VPN.lower()]):
|
||||
network_handler.set_promiscuos_mode(
|
||||
token, port['id'])
|
||||
port_id = network_handler.get_port_id(
|
||||
token, port['id'])
|
||||
interfaces_to_attach.append({'port': port_id})
|
||||
else:
|
||||
for interface in device_data['interfaces_to_attach']:
|
||||
interfaces_to_attach.append(
|
||||
{'port': interface['port']})
|
||||
interfaces.append({'id': interface['id']})
|
||||
|
||||
except Exception as e:
|
||||
LOG.error(_LE('Failed to fetch list of interfaces to attach'
|
||||
' for device creation %(error)s'), {'error': e})
|
||||
self._delete_interfaces(device_data, interfaces,
|
||||
network_handler=network_handler)
|
||||
return None
|
||||
|
||||
instance_name = device_data['name']
|
||||
instance_id_result = {}
|
||||
port_details_result = {}
|
||||
volume_support = device_data['volume_support']
|
||||
volume_size = device_data['volume_size']
|
||||
executor.add_job('CREATE_INSTANCE',
|
||||
self.create_instance,
|
||||
self.compute_handler_nova,
|
||||
token, admin_tenant_id, image_id, flavor,
|
||||
interfaces_to_attach, instance_name,
|
||||
volume_support, volume_size,
|
||||
files=device_data.get('files'),
|
||||
result_store=instance_id_result)
|
||||
|
||||
executor.add_job('GET_NEUTRON_PORT_DETAILS',
|
||||
self.get_neutron_port_details,
|
||||
network_handler, token,
|
||||
management_interface['port_id'],
|
||||
result_store=port_details_result)
|
||||
|
||||
executor.fire()
|
||||
def _update_interfaces_for_non_hotplug_support(self, network_handler,
|
||||
interfaces,
|
||||
interfaces_to_attach,
|
||||
device_data):
|
||||
token = device_data['token']
|
||||
enable_port_security = device_data.get('enable_port_security')
|
||||
if not device_data['interfaces_to_attach']:
|
||||
for port in device_data['ports']:
|
||||
if (port['port_classification'] ==
|
||||
nfp_constants.PROVIDER):
|
||||
if (device_data['service_details'][
|
||||
'service_type'].lower()
|
||||
in [nfp_constants.FIREWALL.lower(),
|
||||
nfp_constants.VPN.lower()]):
|
||||
network_handler.set_promiscuos_mode(
|
||||
token, port['id'], enable_port_security)
|
||||
port_id = network_handler.get_port_id(
|
||||
token, port['id'])
|
||||
interfaces_to_attach.append({'port': port_id})
|
||||
for port in device_data['ports']:
|
||||
if (port['port_classification'] ==
|
||||
nfp_constants.CONSUMER):
|
||||
if (device_data['service_details'][
|
||||
'service_type'].lower()
|
||||
in [nfp_constants.FIREWALL.lower(),
|
||||
nfp_constants.VPN.lower()]):
|
||||
network_handler.set_promiscuos_mode(
|
||||
token, port['id'], enable_port_security)
|
||||
port_id = network_handler.get_port_id(
|
||||
token, port['id'])
|
||||
interfaces_to_attach.append({'port': port_id})
|
||||
else:
|
||||
for interface in device_data['interfaces_to_attach']:
|
||||
interfaces_to_attach.append(
|
||||
{'port': interface['port']})
|
||||
interfaces.append({'id': interface['id']})
|
||||
|
||||
def _validate_create_instance_executor_results(self,
|
||||
network_handler,
|
||||
device_data,
|
||||
interfaces,
|
||||
instance_id_result,
|
||||
port_details_result):
|
||||
token = device_data['token']
|
||||
admin_tenant_id = device_data['admin_tenant_id']
|
||||
instance_id = instance_id_result.get('result', None)
|
||||
if not instance_id:
|
||||
LOG.error(_LE('Failed to create %(device_type)s instance.'))
|
||||
self._delete_interfaces(device_data, interfaces,
|
||||
network_handler=network_handler)
|
||||
return None
|
||||
return None, _
|
||||
|
||||
mgmt_ip_address = None
|
||||
mgmt_neutron_port_info = port_details_result.get('result', None)
|
||||
|
||||
if not mgmt_neutron_port_info:
|
||||
@@ -514,18 +590,8 @@ class OrchestrationDriver(object):
|
||||
'error': e})
|
||||
self._delete_interfaces(device_data, interfaces,
|
||||
network_handler=network_handler)
|
||||
return None
|
||||
|
||||
mgmt_ip_address = mgmt_neutron_port_info['ip_address']
|
||||
return {'id': instance_id,
|
||||
'name': instance_name,
|
||||
'vendor_data': vendor_data,
|
||||
'mgmt_ip_address': mgmt_ip_address,
|
||||
'mgmt_port_id': interfaces[0],
|
||||
'mgmt_neutron_port_info': mgmt_neutron_port_info,
|
||||
'max_interfaces': self.maximum_interfaces,
|
||||
'interfaces_in_use': len(interfaces_to_attach),
|
||||
'description': ''} # TODO(RPM): what should be the description
|
||||
return None, _
|
||||
return instance_id, mgmt_neutron_port_info
|
||||
|
||||
@_set_network_handler
|
||||
def delete_network_function_device(self, device_data,
|
||||
@@ -697,9 +763,10 @@ class OrchestrationDriver(object):
|
||||
|
||||
token = device_data['token']
|
||||
tenant_id = device_data['tenant_id']
|
||||
vendor_data = device_data['vendor_data']
|
||||
provider_metadata = device_data['provider_metadata']
|
||||
enable_port_security = device_data.get('enable_port_security')
|
||||
|
||||
if vendor_data.get('supports_hotplug') is False:
|
||||
if provider_metadata.get('supports_hotplug') is False:
|
||||
return True
|
||||
try:
|
||||
executor = nfp_executor.TaskExecutor(jobs=10)
|
||||
@@ -714,7 +781,7 @@ class OrchestrationDriver(object):
|
||||
executor.add_job(
|
||||
'SET_PROMISCUOS_MODE',
|
||||
network_handler.set_promiscuos_mode_fast,
|
||||
token, port['id'])
|
||||
token, port['id'], enable_port_security)
|
||||
executor.add_job(
|
||||
'ATTACH_INTERFACE',
|
||||
self.compute_handler_nova.attach_interface,
|
||||
@@ -733,7 +800,7 @@ class OrchestrationDriver(object):
|
||||
executor.add_job(
|
||||
'SET_PROMISCUOS_MODE',
|
||||
network_handler.set_promiscuos_mode_fast,
|
||||
token, port['id'])
|
||||
token, port['id'], enable_port_security)
|
||||
executor.add_job(
|
||||
'ATTACH_INTERFACE',
|
||||
self.compute_handler_nova.attach_interface,
|
||||
@@ -749,36 +816,6 @@ class OrchestrationDriver(object):
|
||||
else:
|
||||
return True
|
||||
|
||||
def _set_promiscous_mode(self, token, service_type,
|
||||
port_ids, network_handler=None):
|
||||
for port_id in port_ids:
|
||||
if (service_type.lower() in [nfp_constants.FIREWALL.lower(),
|
||||
nfp_constants.VPN.lower()]):
|
||||
network_handler.set_promiscuos_mode(token, port_id)
|
||||
|
||||
def _get_data_port_ids(self, token, ports, service_type,
|
||||
network_handler=None, set_promiscous_mode=False):
|
||||
# return data_port_ids in sequential format i.e.
|
||||
# provider port_id, then consumer port_id
|
||||
data_port_ids = []
|
||||
|
||||
for port in ports:
|
||||
if port['port_classification'] == nfp_constants.PROVIDER:
|
||||
provider_port_id = network_handler.get_port_id(token,
|
||||
port['id'])
|
||||
data_port_ids.append(provider_port_id)
|
||||
break
|
||||
for port in ports:
|
||||
if port['port_classification'] == nfp_constants.CONSUMER:
|
||||
consumer_port_id = network_handler.get_port_id(token,
|
||||
port['id'])
|
||||
data_port_ids.append(consumer_port_id)
|
||||
|
||||
if set_promiscous_mode:
|
||||
self._set_promiscous_mode(token, service_type, data_port_ids,
|
||||
network_handler)
|
||||
return data_port_ids
|
||||
|
||||
@_set_network_handler
|
||||
def unplug_network_function_device_interfaces(self, device_data,
|
||||
network_handler=None):
|
||||
@@ -827,18 +864,19 @@ class OrchestrationDriver(object):
|
||||
return None
|
||||
|
||||
image_name = self._get_image_name(device_data)
|
||||
vendor_data = {}
|
||||
provider_metadata = {}
|
||||
if image_name:
|
||||
vendor_data = (
|
||||
self._update_vendor_data_fast(token,
|
||||
device_data['tenant_id'],
|
||||
image_name,
|
||||
device_data))
|
||||
provider_metadata = (
|
||||
self._update_provider_metadata_fast(token,
|
||||
device_data['tenant_id'],
|
||||
image_name,
|
||||
device_data))
|
||||
|
||||
if not vendor_data:
|
||||
LOG.warning(_LW('Failed to get vendor data for device deletion.'))
|
||||
if not provider_metadata:
|
||||
LOG.warning(_LW('Failed to get provider metadata for'
|
||||
' device deletion.'))
|
||||
|
||||
if vendor_data.get('supports_hotplug') is False:
|
||||
if provider_metadata.get('supports_hotplug') is False:
|
||||
return True
|
||||
try:
|
||||
for port in device_data['ports']:
|
||||
@@ -856,75 +894,20 @@ class OrchestrationDriver(object):
|
||||
else:
|
||||
return True
|
||||
|
||||
def get_network_function_device_healthcheck_info(self, device_data):
|
||||
""" Get the health check information for NFD
|
||||
|
||||
:param device_data: NFD
|
||||
:type device_data: dict
|
||||
|
||||
:returns: dict -- It has the following scheme
|
||||
{
|
||||
'config': [
|
||||
{
|
||||
'resource': 'healthmonitor',
|
||||
'resource_data': {
|
||||
...
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
:raises: exceptions.IncompleteData
|
||||
"""
|
||||
if (
|
||||
any(key not in device_data
|
||||
for key in ['id',
|
||||
'mgmt_ip_address'])
|
||||
):
|
||||
raise exceptions.IncompleteData()
|
||||
|
||||
return {
|
||||
'config': [
|
||||
{
|
||||
'resource': nfp_constants.HEALTHMONITOR_RESOURCE,
|
||||
'resource_data': {
|
||||
'vmid': device_data['id'],
|
||||
'mgmt_ip': device_data['mgmt_ip_address'],
|
||||
'periodicity': 'initial'
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
@_set_network_handler
|
||||
def get_network_function_device_config_info(self, device_data,
|
||||
network_handler=None):
|
||||
def get_delete_device_data(self, device_data, network_handler=None):
|
||||
""" Get the configuration information for NFD
|
||||
|
||||
:param device_data: NFD
|
||||
:type device_data: dict
|
||||
|
||||
:returns: None -- On Failure
|
||||
:returns: dict -- It has the following scheme
|
||||
{
|
||||
'config': [
|
||||
{
|
||||
'resource': 'interfaces',
|
||||
'resource_data': {
|
||||
...
|
||||
}
|
||||
},
|
||||
{
|
||||
'resource': 'routes',
|
||||
'resource_data': {
|
||||
...
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
:returns: dict
|
||||
|
||||
:raises: exceptions.IncompleteData
|
||||
|
||||
"""
|
||||
|
||||
if (
|
||||
any(key not in device_data
|
||||
for key in ['service_details',
|
||||
@@ -946,7 +929,9 @@ class OrchestrationDriver(object):
|
||||
'port_classification',
|
||||
'port_model'])
|
||||
):
|
||||
raise exceptions.IncompleteData()
|
||||
LOG.error(_LE('Incomplete device data received for delete '
|
||||
'network function device.'))
|
||||
return None
|
||||
|
||||
token = self._get_token(device_data.get('token'))
|
||||
if not token:
|
||||
@@ -963,7 +948,7 @@ class OrchestrationDriver(object):
|
||||
for port in device_data['ports']:
|
||||
if port['port_classification'] == nfp_constants.PROVIDER:
|
||||
try:
|
||||
(provider_ip, provider_mac, provider_cidr, dummy) = (
|
||||
(provider_ip, provider_mac, provider_cidr, dummy, _, _) = (
|
||||
network_handler.get_port_details(token, port['id'])
|
||||
)
|
||||
except Exception:
|
||||
@@ -973,7 +958,7 @@ class OrchestrationDriver(object):
|
||||
elif port['port_classification'] == nfp_constants.CONSUMER:
|
||||
try:
|
||||
(consumer_ip, consumer_mac, consumer_cidr,
|
||||
consumer_gateway_ip) = (
|
||||
consumer_gateway_ip, _, _) = (
|
||||
network_handler.get_port_details(token, port['id'])
|
||||
)
|
||||
except Exception:
|
||||
@@ -981,106 +966,28 @@ class OrchestrationDriver(object):
|
||||
' for get device config info operation'))
|
||||
return None
|
||||
|
||||
return {
|
||||
'config': [
|
||||
{
|
||||
'resource': nfp_constants.INTERFACE_RESOURCE,
|
||||
'resource_data': {
|
||||
'mgmt_ip': device_data['mgmt_ip_address'],
|
||||
'provider_ip': provider_ip,
|
||||
'provider_cidr': provider_cidr,
|
||||
'provider_interface_index': 2,
|
||||
'stitching_ip': consumer_ip,
|
||||
'stitching_cidr': consumer_cidr,
|
||||
'stitching_interface_index': 3,
|
||||
'provider_mac': provider_mac,
|
||||
'stitching_mac': consumer_mac,
|
||||
}
|
||||
},
|
||||
{
|
||||
'resource': nfp_constants.ROUTES_RESOURCE,
|
||||
'resource_data': {
|
||||
'mgmt_ip': device_data['mgmt_ip_address'],
|
||||
'source_cidrs': ([provider_cidr, consumer_cidr]
|
||||
if consumer_cidr
|
||||
else [provider_cidr]),
|
||||
'destination_cidr': consumer_cidr,
|
||||
'provider_mac': provider_mac,
|
||||
'gateway_ip': consumer_gateway_ip,
|
||||
'provider_interface_index': 2
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
device_data.update({
|
||||
'provider_ip': provider_ip, 'provider_mac': provider_mac,
|
||||
'provider_cidr': provider_cidr, 'consumer_ip': consumer_ip,
|
||||
'consumer_mac': consumer_mac, 'consumer_cidr': consumer_cidr,
|
||||
'consumer_gateway_ip': consumer_gateway_ip})
|
||||
|
||||
return device_data
|
||||
|
||||
@_set_network_handler
|
||||
def get_create_network_function_device_config_info(self, device_data,
|
||||
network_handler=None):
|
||||
def get_network_function_device_config(self, device_data,
|
||||
resource_type, is_delete=False,
|
||||
network_handler=None):
|
||||
""" Get the configuration information for NFD
|
||||
|
||||
:param device_data: NFD
|
||||
:type device_data: dict
|
||||
:returns: dict
|
||||
|
||||
:returns: None -- On Failure
|
||||
:returns: dict -- It has the following scheme
|
||||
{
|
||||
'config': [
|
||||
{
|
||||
'resource': 'interfaces',
|
||||
'resource_data': {
|
||||
...
|
||||
}
|
||||
},
|
||||
{
|
||||
'resource': 'routes',
|
||||
'resource_data': {
|
||||
...
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
:raises: exceptions.IncompleteData
|
||||
"""
|
||||
|
||||
mgmt_ip = device_data.get('mgmt_ip', None)
|
||||
provider_ip = device_data.get('provider_ip', None)
|
||||
provider_mac = device_data.get('provider_mac', None)
|
||||
provider_cidr = device_data.get('provider_cidr', None)
|
||||
consumer_ip = device_data.get('consumer_ip', None)
|
||||
consumer_mac = device_data.get('consumer_mac', None)
|
||||
consumer_cidr = device_data.get('consumer_cidr', None)
|
||||
consumer_gateway_ip = device_data.get('consumer_gateway_ip', None)
|
||||
|
||||
return {
|
||||
'config': [
|
||||
{
|
||||
'resource': nfp_constants.INTERFACE_RESOURCE,
|
||||
'resource_data': {
|
||||
'mgmt_ip': mgmt_ip,
|
||||
'provider_ip': provider_ip,
|
||||
'provider_cidr': provider_cidr,
|
||||
'provider_interface_index': 2,
|
||||
'stitching_ip': consumer_ip,
|
||||
'stitching_cidr': consumer_cidr,
|
||||
'stitching_interface_index': 3,
|
||||
'provider_mac': provider_mac,
|
||||
'stitching_mac': consumer_mac,
|
||||
},
|
||||
|
||||
},
|
||||
{
|
||||
'resource': nfp_constants.ROUTES_RESOURCE,
|
||||
'resource_data': {
|
||||
'mgmt_ip': mgmt_ip,
|
||||
'source_cidrs': ([provider_cidr, consumer_cidr]
|
||||
if consumer_cidr
|
||||
else [provider_cidr]),
|
||||
'destination_cidr': consumer_cidr,
|
||||
'provider_mac': provider_mac,
|
||||
'gateway_ip': consumer_gateway_ip,
|
||||
'provider_interface_index': 2
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
if is_delete:
|
||||
device_data = self.get_delete_device_data(
|
||||
device_data, network_handler=network_handler)
|
||||
if not device_data:
|
||||
return None
|
||||
return df.get_network_function_info(
|
||||
device_data, resource_type)
|
||||
|
||||
@@ -46,7 +46,10 @@ nfp_orchestrator_opts = [
|
||||
'supported_vendors',
|
||||
default=[nfp_constants.VYOS_VENDOR, nfp_constants.HAPROXY_VENDOR,
|
||||
nfp_constants.HAPROXY_LBAASV2, nfp_constants.NFP_VENDOR],
|
||||
help="Supported service vendors for nfp")]
|
||||
help="Supported service vendors for nfp"),
|
||||
oslo_config.StrOpt('monitoring_ptg_l3policy_id',
|
||||
default='')
|
||||
]
|
||||
|
||||
oslo_config.CONF.register_opts(nfp_orchestrator_opts, 'orchestrator')
|
||||
|
||||
@@ -54,7 +57,7 @@ device_orchestrator_opts = [
|
||||
oslo_config.BoolOpt('volume_support',
|
||||
default=False, help='cinder volume support'),
|
||||
oslo_config.StrOpt('volume_size',
|
||||
default='2', help='cinder volume size'),
|
||||
default='2', help='cinder volume size')
|
||||
]
|
||||
|
||||
oslo_config.CONF.register_opts(device_orchestrator_opts, 'device_orchestrator')
|
||||
|
||||
@@ -16,6 +16,7 @@ import oslo_messaging as messaging
|
||||
|
||||
from gbpservice.nfp.common import constants as nfp_constants
|
||||
from gbpservice.nfp.common import topics as nsf_topics
|
||||
from gbpservice.nfp.common import utils as nfp_utils
|
||||
from gbpservice.nfp.core.event import Event
|
||||
from gbpservice.nfp.core import module as nfp_api
|
||||
from gbpservice.nfp.core.rpc import RpcAgent
|
||||
@@ -82,15 +83,12 @@ class RpcHandler(object):
|
||||
self.conf = conf
|
||||
self._controller = controller
|
||||
self.rpc_event_mapping = {
|
||||
'healthmonitor': ['HEALTH_MONITOR_COMPLETE',
|
||||
'DEVICE_NOT_REACHABLE',
|
||||
'DEVICE_NOT_REACHABLE'],
|
||||
'interfaces': ['DEVICE_CONFIGURED',
|
||||
'DELETE_CONFIGURATION_COMPLETED',
|
||||
'DEVICE_CONFIGURATION_FAILED'],
|
||||
'routes': ['DEVICE_CONFIGURED',
|
||||
'DELETE_CONFIGURATION_COMPLETED',
|
||||
'DEVICE_CONFIGURATION_FAILED'],
|
||||
nfp_constants.HEALTHMONITOR_RESOURCE: ['HEALTH_MONITOR_COMPLETE',
|
||||
'DEVICE_NOT_REACHABLE',
|
||||
'DEVICE_NOT_REACHABLE'],
|
||||
nfp_constants.GENERIC_CONFIG: ['DEVICE_CONFIGURED',
|
||||
'DELETE_CONFIGURATION_COMPLETED',
|
||||
'DEVICE_CONFIGURATION_FAILED'],
|
||||
}
|
||||
|
||||
def _log_event_created(self, event_id, event_data):
|
||||
@@ -129,6 +127,8 @@ class RpcHandler(object):
|
||||
resource = response.get('resource')
|
||||
data = response.get('data')
|
||||
result = data.get('status_code')
|
||||
if resource != nfp_constants.HEALTHMONITOR_RESOURCE:
|
||||
resource = nfp_constants.GENERIC_CONFIG
|
||||
|
||||
is_delete_request = True if operation == 'delete' else False
|
||||
|
||||
@@ -138,6 +138,8 @@ class RpcHandler(object):
|
||||
event_id = self.rpc_event_mapping[resource][0]
|
||||
|
||||
if result.lower() != 'success':
|
||||
LOG.info(_LI("NDO RPC HAndler response data: %(data)s") % {
|
||||
'data': data})
|
||||
if is_delete_request:
|
||||
# Ignore any deletion errors, generate SUCCESS event
|
||||
event_id = self.rpc_event_mapping[resource][1]
|
||||
@@ -540,6 +542,7 @@ class DeviceOrchestrator(nfp_api.NfpEventHandler):
|
||||
device_data['service_details'] = service_details
|
||||
device_data['service_details']['network_mode'] = nfp_constants.GBP_MODE
|
||||
device_data['service_vendor'] = service_details['service_vendor']
|
||||
device_data['server_grp_id'] = nfp_context.get('server_grp_id')
|
||||
device_data['interfaces_to_attach'] = (
|
||||
nfp_context.get('interfaces_to_attach'))
|
||||
|
||||
@@ -547,6 +550,53 @@ class DeviceOrchestrator(nfp_api.NfpEventHandler):
|
||||
device_data['files'] = nfp_context['files']
|
||||
return device_data
|
||||
|
||||
def _create_nfd_entry(self, nfp_context, driver_device_info,
|
||||
device_data, service_details):
|
||||
nfp_context['provider_metadata'] = driver_device_info.get(
|
||||
'provider_metadata')
|
||||
# Update nfp_context management with newly created mgmt port
|
||||
management = nfp_context['management']
|
||||
management['port'] = driver_device_info[
|
||||
'mgmt_neutron_port_info']['neutron_port']
|
||||
management['port']['ip_address'] = management[
|
||||
'port']['fixed_ips'][0]['ip_address']
|
||||
management['subnet'] = driver_device_info[
|
||||
'mgmt_neutron_port_info']['neutron_subnet']
|
||||
|
||||
# Update newly created device with required params
|
||||
device = self._update_device_data(driver_device_info, device_data)
|
||||
device['network_function_device_id'] = device['id']
|
||||
|
||||
name = '%s_%s_%s_%s' % (
|
||||
device['provider_name'],
|
||||
service_details['service_type'],
|
||||
nfp_context['resource_owner_context']['tenant_name'][:6],
|
||||
device['network_function_device_id'][:3])
|
||||
device['name'] = name
|
||||
# Create DB entry with status as DEVICE_SPAWNING
|
||||
network_function_device = (
|
||||
self._create_network_function_device_db(device,
|
||||
'DEVICE_SPAWNING'))
|
||||
|
||||
nfp_context['network_function_device'] = network_function_device
|
||||
return device
|
||||
|
||||
def _update_nfp_context_with_ports(self, nfp_context, device):
|
||||
# REVISIT(mak) Wrong but nfp_db method needs in this format
|
||||
network_function_device = nfp_context['network_function_device']
|
||||
network_function_device['mgmt_port_id'] = device['mgmt_port_id']
|
||||
|
||||
def _post_create_nfd_events(self, event, nfp_context, device):
|
||||
|
||||
nfp_context['event_desc'] = event.desc.to_dict()
|
||||
self._create_event(event_id='DEVICE_SPAWNING',
|
||||
event_data=nfp_context,
|
||||
is_poll_event=True,
|
||||
original_event=event,
|
||||
max_times=nfp_constants.DEVICE_SPAWNING_MAXRETRY)
|
||||
self._create_event(event_id='DEVICE_CREATED',
|
||||
event_data=device)
|
||||
|
||||
# Create path
|
||||
def create_network_function_device(self, event):
|
||||
""" Returns device instance for a new service
|
||||
@@ -555,8 +605,6 @@ class DeviceOrchestrator(nfp_api.NfpEventHandler):
|
||||
new service or it creates new device instance
|
||||
"""
|
||||
|
||||
device = None
|
||||
|
||||
nfp_context = event.data
|
||||
nfd_request = self._prepare_failure_case_device_data(nfp_context)
|
||||
service_details = nfp_context['service_details']
|
||||
@@ -587,41 +635,11 @@ class DeviceOrchestrator(nfp_api.NfpEventHandler):
|
||||
self._controller.event_complete(event)
|
||||
return None
|
||||
|
||||
nfp_context['vendor_data'] = driver_device_info.get('vendor_data')
|
||||
management = nfp_context['management']
|
||||
management['port'] = driver_device_info[
|
||||
'mgmt_neutron_port_info']['neutron_port']
|
||||
management['port']['ip_address'] = management[
|
||||
'port']['fixed_ips'][0]['ip_address']
|
||||
management['subnet'] = driver_device_info[
|
||||
'mgmt_neutron_port_info']['neutron_subnet']
|
||||
device = self._create_nfd_entry(nfp_context, driver_device_info,
|
||||
device_data, service_details)
|
||||
self._update_nfp_context_with_ports(nfp_context, driver_device_info)
|
||||
|
||||
# Update newly created device with required params
|
||||
device = self._update_device_data(driver_device_info, device_data)
|
||||
device['network_function_device_id'] = device['id']
|
||||
|
||||
name = '%s_%s_%s_%s' % (
|
||||
device['provider_name'],
|
||||
service_details['service_type'],
|
||||
nfp_context['resource_owner_context']['tenant_name'][:6],
|
||||
device['network_function_device_id'][:3])
|
||||
device['name'] = name
|
||||
# Create DB entry with status as DEVICE_SPAWNING
|
||||
network_function_device = (
|
||||
self._create_network_function_device_db(device,
|
||||
'DEVICE_SPAWNING'))
|
||||
|
||||
# REVISIT(mak) Wrong but nfp_db method needs in this format
|
||||
network_function_device['mgmt_port_id'] = device['mgmt_port_id']
|
||||
nfp_context['network_function_device'] = network_function_device
|
||||
nfp_context['event_desc'] = event.desc.to_dict()
|
||||
self._create_event(event_id='DEVICE_SPAWNING',
|
||||
event_data=nfp_context,
|
||||
is_poll_event=True,
|
||||
original_event=event,
|
||||
max_times=nfp_constants.DEVICE_SPAWNING_MAXRETRY)
|
||||
self._create_event(event_id='DEVICE_CREATED',
|
||||
event_data=device)
|
||||
self._post_create_nfd_events(event, nfp_context, device)
|
||||
|
||||
def _post_device_up_event_graph(self, nfp_context):
|
||||
nf_id = nfp_context['network_function']['id']
|
||||
@@ -666,20 +684,6 @@ class DeviceOrchestrator(nfp_api.NfpEventHandler):
|
||||
orchestration_driver.get_network_function_device_status(device))
|
||||
|
||||
if is_device_up == nfp_constants.ACTIVE:
|
||||
# [REVISIT(mak)] - Update interfaces count here before
|
||||
# sending health monitor rpc in PERFORM_HEALTH_CHECK event.
|
||||
# [REVISIT(mak)] to handle a very corner case where
|
||||
# PLUG_INTERFACES completes later than HEALTHMONITOR.
|
||||
# till proper fix is identified.
|
||||
provider = nfp_context['provider']['ptg']
|
||||
consumer = nfp_context['consumer']['ptg']
|
||||
network_function_device = nfp_context['network_function_device']
|
||||
|
||||
if provider:
|
||||
network_function_device['interfaces_in_use'] += 1
|
||||
if consumer:
|
||||
network_function_device['interfaces_in_use'] += 1
|
||||
|
||||
self._post_device_up_event_graph(nfp_context)
|
||||
|
||||
return STOP_POLLING
|
||||
@@ -711,7 +715,7 @@ class DeviceOrchestrator(nfp_api.NfpEventHandler):
|
||||
key=nf_id,
|
||||
data=nfp_context)
|
||||
user_config_event = self._controller.new_event(
|
||||
id='APPLY_USER_CONFIG',
|
||||
id='INITIATE_USER_CONFIG',
|
||||
key=nf_id,
|
||||
serialize=serialize,
|
||||
binding_key=binding_key,
|
||||
@@ -763,7 +767,6 @@ class DeviceOrchestrator(nfp_api.NfpEventHandler):
|
||||
self._controller.event_complete(nfd_event)
|
||||
self._post_configure_device_graph(nfp_context,
|
||||
serialize=serialize_config)
|
||||
# event.key = event_key
|
||||
self._controller.event_complete(event)
|
||||
|
||||
def perform_health_check(self, event):
|
||||
@@ -774,6 +777,7 @@ class DeviceOrchestrator(nfp_api.NfpEventHandler):
|
||||
network_function = nfp_context['network_function']
|
||||
network_function_instance = nfp_context['network_function_instance']
|
||||
mgmt_ip_address = nfp_context['management']['port']['ip_address']
|
||||
tenant_id = nfp_context['resource_owner_context']['admin_tenant_id']
|
||||
|
||||
# The driver tells which protocol / port to monitor ??
|
||||
orchestration_driver = self._get_orchestration_driver(
|
||||
@@ -781,6 +785,7 @@ class DeviceOrchestrator(nfp_api.NfpEventHandler):
|
||||
nfp_context['event_desc'] = event.desc.to_dict()
|
||||
device = {
|
||||
'id': network_function_device['id'],
|
||||
'tenant_id': tenant_id,
|
||||
'mgmt_ip_address': mgmt_ip_address,
|
||||
'service_details': service_details,
|
||||
'network_function_id': network_function['id'],
|
||||
@@ -790,8 +795,8 @@ class DeviceOrchestrator(nfp_api.NfpEventHandler):
|
||||
}
|
||||
|
||||
hm_req = (
|
||||
orchestration_driver.get_network_function_device_healthcheck_info(
|
||||
device))
|
||||
orchestration_driver.get_network_function_device_config(
|
||||
device, nfp_constants.HEALTHMONITOR_RESOURCE))
|
||||
if not hm_req:
|
||||
self._controller.event_complete(event, result="FAILED")
|
||||
return None
|
||||
@@ -802,10 +807,6 @@ class DeviceOrchestrator(nfp_api.NfpEventHandler):
|
||||
"%s with health check parameters: %s" % (
|
||||
device['id'], hm_req))
|
||||
|
||||
device['status'] = 'HEALTH_CHECK_PENDING'
|
||||
self._update_network_function_device_db(device,
|
||||
'HEALTH_CHECK_PENDING')
|
||||
|
||||
def _get_service_type(self, service_profile_id):
|
||||
admin_token = self.keystoneclient.get_admin_token()
|
||||
service_profile = self.gbpclient.get_service_profile(
|
||||
@@ -914,7 +915,7 @@ class DeviceOrchestrator(nfp_api.NfpEventHandler):
|
||||
def plug_interfaces_fast(self, event):
|
||||
|
||||
# In this case, the event will be
|
||||
# happening in paralell with HEALTHMONITORIN,
|
||||
# happening in parallel with HEALTHMONITORING,
|
||||
# so, we should not generate CONFIGURE_DEVICE & should not update
|
||||
# DB with HEALTH_CHECK_COMPLETED.
|
||||
|
||||
@@ -936,12 +937,6 @@ class DeviceOrchestrator(nfp_api.NfpEventHandler):
|
||||
service_details['service_vendor'])
|
||||
|
||||
ports = self._make_ports_dict(consumer, provider, 'port')
|
||||
# Modify interface_in_use as dummy interfaces_in_use has been included
|
||||
# for health check
|
||||
if provider:
|
||||
network_function_device['interfaces_in_use'] -= 1
|
||||
if consumer:
|
||||
network_function_device['interfaces_in_use'] -= 1
|
||||
|
||||
device = {
|
||||
'id': network_function_device['id'],
|
||||
@@ -951,7 +946,7 @@ class DeviceOrchestrator(nfp_api.NfpEventHandler):
|
||||
'tenant_id': tenant_id,
|
||||
'interfaces_in_use': network_function_device['interfaces_in_use'],
|
||||
'status': network_function_device['status'],
|
||||
'vendor_data': nfp_context['vendor_data']}
|
||||
'provider_metadata': nfp_context['provider_metadata']}
|
||||
|
||||
_ifaces_plugged_in = (
|
||||
orchestration_driver.plug_network_function_device_interfaces(
|
||||
@@ -972,8 +967,8 @@ class DeviceOrchestrator(nfp_api.NfpEventHandler):
|
||||
orchestration_driver = self._get_orchestration_driver(
|
||||
device['service_details']['service_vendor'])
|
||||
config_params = (
|
||||
orchestration_driver.get_network_function_device_config_info(
|
||||
device))
|
||||
orchestration_driver.get_network_function_device_config(
|
||||
device, nfp_constants.GENERIC_CONFIG))
|
||||
if not config_params:
|
||||
self._create_event(event_id='DRIVER_ERROR',
|
||||
event_data=device,
|
||||
@@ -993,6 +988,7 @@ class DeviceOrchestrator(nfp_api.NfpEventHandler):
|
||||
network_function = nfp_context['network_function']
|
||||
network_function_instance = nfp_context['network_function_instance']
|
||||
network_function_device = nfp_context['network_function_device']
|
||||
tenant_id = nfp_context['resource_owner_context']['admin_tenant_id']
|
||||
|
||||
binding_key = service_details[
|
||||
'service_vendor'].lower() + network_function['id']
|
||||
@@ -1000,7 +996,9 @@ class DeviceOrchestrator(nfp_api.NfpEventHandler):
|
||||
orchestration_driver = self._get_orchestration_driver(
|
||||
service_details['service_vendor'])
|
||||
device = {
|
||||
'mgmt_ip': management['port']['ip_address'],
|
||||
'tenant_id': tenant_id,
|
||||
'mgmt_ip_address': management['port']['ip_address'],
|
||||
'mgmt_ip': network_function_device['mgmt_ip_address'],
|
||||
'provider_ip': provider['port']['ip_address'],
|
||||
'provider_cidr': provider['subnet']['cidr'],
|
||||
'provider_mac': provider['port']['mac_address'],
|
||||
@@ -1013,14 +1011,10 @@ class DeviceOrchestrator(nfp_api.NfpEventHandler):
|
||||
'consumer_gateway_ip': consumer[
|
||||
'subnet']['gateway_ip']})
|
||||
|
||||
config_params = (
|
||||
orchestration_driver.
|
||||
get_create_network_function_device_config_info(
|
||||
device))
|
||||
nfp_context['event_desc'] = event.desc.to_dict()
|
||||
device.update({
|
||||
'id': network_function_device['id'],
|
||||
'mgmt_ip_address': management['port']['ip_address'],
|
||||
'mgmt_ip_address': network_function_device['mgmt_ip_address'],
|
||||
'service_details': service_details,
|
||||
'network_function_id': network_function['id'],
|
||||
'network_function_instance_id': network_function_instance['id'],
|
||||
@@ -1030,12 +1024,26 @@ class DeviceOrchestrator(nfp_api.NfpEventHandler):
|
||||
'network_function_device': network_function_device,
|
||||
'binding_key': binding_key}})
|
||||
|
||||
config_params = (
|
||||
orchestration_driver.
|
||||
get_network_function_device_config(
|
||||
device, nfp_constants.GENERIC_CONFIG))
|
||||
|
||||
if not config_params:
|
||||
self._create_event(event_id='DRIVER_ERROR',
|
||||
event_data=device,
|
||||
is_internal_event=True)
|
||||
self._controller.event_complete(event, result="FAILED")
|
||||
return None
|
||||
# Set forward_route as False in resource_data for configurator to
|
||||
# handle routes differently, when vpn is in service chain
|
||||
if nfp_utils.is_vpn_in_service_chain(
|
||||
nfp_context['service_chain_specs']):
|
||||
config_params['config'][0][
|
||||
'resource_data']['forward_route'] = False
|
||||
else:
|
||||
config_params['config'][0][
|
||||
'resource_data']['forward_route'] = True
|
||||
# Sends RPC to configurator to create generic config
|
||||
self.configurator_rpc.create_network_function_device_config(
|
||||
device, config_params)
|
||||
@@ -1097,8 +1105,8 @@ class DeviceOrchestrator(nfp_api.NfpEventHandler):
|
||||
orchestration_driver = self._get_orchestration_driver(
|
||||
device['service_details']['service_vendor'])
|
||||
config_params = (
|
||||
orchestration_driver.get_network_function_device_config_info(
|
||||
device))
|
||||
orchestration_driver.get_network_function_device_config(
|
||||
device, nfp_constants.GENERIC_CONFIG, is_delete=True))
|
||||
if not config_params:
|
||||
self._create_event(event_id='DRIVER_ERROR',
|
||||
event_data=device,
|
||||
@@ -1172,19 +1180,27 @@ class DeviceOrchestrator(nfp_api.NfpEventHandler):
|
||||
status = orchestration_driver.get_network_function_device_status(
|
||||
device, ignore_failure=True)
|
||||
if not status:
|
||||
device_id = device['id']
|
||||
del device['id']
|
||||
orchestration_driver.delete_network_function_device(device)
|
||||
self._delete_network_function_device_db(device_id, device)
|
||||
nf_id = device['network_function_id']
|
||||
dnfd_event = (
|
||||
self._controller.new_event(id='DELETE_NETWORK_FUNCTION_DEVICE',
|
||||
key=nf_id,
|
||||
binding_key=nf_id,
|
||||
desc_dict=device[
|
||||
'event_desc']))
|
||||
self._controller.event_complete(dnfd_event, result='SUCCESS')
|
||||
return STOP_POLLING
|
||||
try:
|
||||
device_id = device['id']
|
||||
del device['id']
|
||||
orchestration_driver.delete_network_function_device(device)
|
||||
self._delete_network_function_device_db(device_id, device)
|
||||
if device.get('event_desc'):
|
||||
nf_id = device['network_function_id']
|
||||
dnfd_event = (
|
||||
self._controller.new_event(
|
||||
id='DELETE_NETWORK_FUNCTION_DEVICE',
|
||||
key=nf_id,
|
||||
binding_key=nf_id,
|
||||
desc_dict=device['event_desc']))
|
||||
self._controller.event_complete(
|
||||
dnfd_event, result='SUCCESS')
|
||||
return STOP_POLLING
|
||||
except Exception as exc:
|
||||
device['id'] = device_id
|
||||
msg = "Exception - %s - in DEVICE_BEING_DELETED" % exc
|
||||
LOG.error(msg)
|
||||
return CONTINUE_POLLING
|
||||
else:
|
||||
return CONTINUE_POLLING
|
||||
|
||||
@@ -1241,7 +1257,6 @@ class DeviceOrchestrator(nfp_api.NfpEventHandler):
|
||||
self.health_monitor_complete(event, result='FAILED')
|
||||
|
||||
def handle_device_config_failed(self, event):
|
||||
#device = event.data
|
||||
nfp_context = event.data['nfp_context']
|
||||
|
||||
device = nfp_context['network_function_device']
|
||||
@@ -1302,7 +1317,6 @@ class NDOConfiguratorRpcApi(object):
|
||||
'operation': operation,
|
||||
'logging_context': nfp_logging.get_logging_context(),
|
||||
# So that notification callbacks can work on cached data
|
||||
#'orig_nfp_context': device.get('orig_nfp_context'),
|
||||
'nfp_context': device.get('nfp_context', None)
|
||||
}
|
||||
nfd_ip = device['mgmt_ip_address']
|
||||
@@ -1318,6 +1332,11 @@ class NDOConfiguratorRpcApi(object):
|
||||
'service_vendor': device_data['service_details']['service_vendor'],
|
||||
'context': request_info
|
||||
}
|
||||
if device_data.get('service_feature'):
|
||||
config_params['info'].update(
|
||||
{'service_feature': device_data.get('service_feature')})
|
||||
if config_params.get('service_info'):
|
||||
config_params['info'].update(config_params.pop('service_info'))
|
||||
|
||||
def create_network_function_device_config(self, device_data,
|
||||
config_params):
|
||||
|
||||
@@ -69,6 +69,8 @@ def events_init(controller, config, service_orchestrator):
|
||||
'POLICY_TARGET_ADD', 'POLICY_TARGET_REMOVE',
|
||||
'CONSUMER_ADD', 'CONSUMER_REMOVE',
|
||||
'APPLY_USER_CONFIG_IN_PROGRESS',
|
||||
'INITIATE_USER_CONFIG',
|
||||
'UPDATE_NETWORK_FUNCTION_DESCRIPTION',
|
||||
'UPDATE_USER_CONFIG_PREPARING_TO_START',
|
||||
'UPDATE_USER_CONFIG_IN_PROGRESS',
|
||||
'UPDATE_USER_CONFIG_STILL_IN_PROGRESS',
|
||||
@@ -76,7 +78,7 @@ def events_init(controller, config, service_orchestrator):
|
||||
'CONFIG_APPLIED', 'USER_CONFIG_APPLIED', 'USER_CONFIG_DELETED',
|
||||
'USER_CONFIG_DELETE_FAILED', 'USER_CONFIG_UPDATE_FAILED',
|
||||
'USER_CONFIG_FAILED', 'CHECK_USER_CONFIG_COMPLETE',
|
||||
'SERVICE_CONFIGURED',
|
||||
'SERVICE_CONFIGURED', 'CREATE_NETWORK_FUNCTION_INSTANCE_DB',
|
||||
'DELETE_NETWORK_FUNCTION_DB']
|
||||
events_to_register = []
|
||||
for event in events:
|
||||
@@ -264,9 +266,9 @@ class RpcHandlerConfigurator(object):
|
||||
}
|
||||
|
||||
def _log_event_created(self, event_id, event_data):
|
||||
LOG.debug("Service Orchestrator, RPC Handler for configurator,"
|
||||
"Created event, %s(event_name)s with "
|
||||
"event data: %(event_data)s",
|
||||
LOG.info(_LI("Service Orchestrator, RPC Handler for configurator,"
|
||||
"Created event, %(event_name)s with "
|
||||
"event data: %(event_data)s"),
|
||||
{'event_name': event_id, 'event_data': event_data})
|
||||
|
||||
def _create_event(self, event_id, event_data=None,
|
||||
@@ -436,6 +438,9 @@ class ServiceOrchestrator(nfp_api.NfpEventHandler):
|
||||
"POLICY_TARGET_REMOVE": self.policy_target_remove_user_config,
|
||||
"CONSUMER_ADD": self.consumer_ptg_add_user_config,
|
||||
"CONSUMER_REMOVE": self.consumer_ptg_remove_user_config,
|
||||
"INITIATE_USER_CONFIG": self.initiate_user_config,
|
||||
"UPDATE_NETWORK_FUNCTION_DESCRIPTION": (
|
||||
self.update_network_function_description),
|
||||
"APPLY_USER_CONFIG_IN_PROGRESS": (
|
||||
self.apply_user_config_in_progress),
|
||||
"CHECK_USER_CONFIG_COMPLETE": (
|
||||
@@ -455,6 +460,8 @@ class ServiceOrchestrator(nfp_api.NfpEventHandler):
|
||||
"USER_CONFIG_UPDATE_FAILED": self.handle_update_user_config_failed,
|
||||
"USER_CONFIG_FAILED": self.handle_user_config_failed,
|
||||
"SERVICE_CONFIGURED": self.handle_service_configured,
|
||||
"CREATE_NETWORK_FUNCTION_INSTANCE_DB": (
|
||||
self.create_network_function_instance_db),
|
||||
"DELETE_NETWORK_FUNCTION_DB": self.delete_network_function_db
|
||||
}
|
||||
if event_id not in event_handler_mapping:
|
||||
@@ -805,7 +812,12 @@ class ServiceOrchestrator(nfp_api.NfpEventHandler):
|
||||
service_config_str)
|
||||
else:
|
||||
# Create and event to perform Network service instance
|
||||
self.create_network_function_instance_db(nfp_context)
|
||||
ev = self._controller.new_event(
|
||||
id='CREATE_NETWORK_FUNCTION_INSTANCE_DB',
|
||||
data=nfp_context,
|
||||
key=network_function['id'])
|
||||
self._controller.post_event(ev)
|
||||
# self.create_network_function_instance_db(nfp_context)
|
||||
|
||||
nfp_logging.clear_logging_context()
|
||||
return network_function
|
||||
@@ -919,7 +931,21 @@ class ServiceOrchestrator(nfp_api.NfpEventHandler):
|
||||
is_poll_event=True, original_event=event,
|
||||
max_times=nfp_constants.DELETE_USER_CONFIG_IN_PROGRESS_MAXRETRY)
|
||||
|
||||
def create_network_function_instance_db(self, nfp_context):
|
||||
def _update_nfp_context(self, nfp_context):
|
||||
provider = nfp_context['provider']
|
||||
consumer = nfp_context['consumer']
|
||||
provider['pt'] = provider['pt'][0]
|
||||
provider['ptg'] = provider['ptg'][0]
|
||||
provider['port'] = provider['port'][0]
|
||||
if consumer['pt']:
|
||||
consumer['pt'] = consumer['pt'][0]
|
||||
if consumer['ptg']:
|
||||
consumer['ptg'] = consumer['ptg'][0]
|
||||
if consumer['port']:
|
||||
consumer['port'] = consumer['port'][0]
|
||||
|
||||
def create_network_function_instance_db(self, event):
|
||||
nfp_context = event.data
|
||||
|
||||
network_function = nfp_context['network_function']
|
||||
service_details = nfp_context['service_details']
|
||||
@@ -948,6 +974,9 @@ class ServiceOrchestrator(nfp_api.NfpEventHandler):
|
||||
|
||||
nfp_context['network_function_instance'] = nfi_db
|
||||
LOG.info(_LI("[Event:CreateService]"))
|
||||
|
||||
self._update_nfp_context(nfp_context)
|
||||
|
||||
ev = self._controller.new_event(
|
||||
id='CREATE_NETWORK_FUNCTION_INSTANCE',
|
||||
data=nfp_context,
|
||||
@@ -1094,7 +1123,10 @@ class ServiceOrchestrator(nfp_api.NfpEventHandler):
|
||||
original_event=event,
|
||||
max_times=nfp_constants.APPLY_USER_CONFIG_IN_PROGRESS_MAXRETRY)
|
||||
|
||||
def apply_user_config(self, event):
|
||||
def initiate_user_config(self, event):
|
||||
# Split the user config creation in 2 steps,
|
||||
# get, update the description in network function and
|
||||
# apply user config
|
||||
event_results = event.result
|
||||
for c_event in event_results:
|
||||
if c_event.id == "SEND_USER_CONFIG" and (
|
||||
@@ -1103,9 +1135,47 @@ class ServiceOrchestrator(nfp_api.NfpEventHandler):
|
||||
event, result="SUCCESS")
|
||||
return
|
||||
nfp_context = event.data
|
||||
nfp_core_context.store_nfp_context(nfp_context)
|
||||
network_function = nfp_context['network_function']
|
||||
ev = self._controller.new_event(
|
||||
id='UPDATE_NETWORK_FUNCTION_DESCRIPTION',
|
||||
data=nfp_context,
|
||||
key=network_function['id'])
|
||||
self._controller.post_event(ev)
|
||||
|
||||
def update_network_function_description(self, event):
|
||||
# 1) Generate and store resource description as nf description,
|
||||
# which needs to send to configurator
|
||||
# 2) Create apply user config event
|
||||
nfp_context = event.data
|
||||
network_function = nfp_context['network_function']
|
||||
network_function['description'] = str(network_function['description'])
|
||||
neutron_resource_desc = (
|
||||
self.config_driver.get_neutron_resource_description(
|
||||
nfp_context))
|
||||
if not neutron_resource_desc:
|
||||
LOG.error(_LE("Preparing neutron resource description failed in "
|
||||
"config driver, marking user config as Failed for "
|
||||
"network function: %(nf)s"),
|
||||
{'nf': network_function})
|
||||
self._create_event('USER_CONFIG_FAILED',
|
||||
event_data=nfp_context, is_internal_event=True)
|
||||
self._controller.event_complete(event, result='FAILED')
|
||||
return
|
||||
nf_desc = network_function[
|
||||
'description'] + '\n' + neutron_resource_desc
|
||||
nfp_context['network_function'].update({'description': nf_desc})
|
||||
self.db_handler.update_network_function(
|
||||
self.db_session, network_function['id'], {'description': nf_desc})
|
||||
ev = self._controller.new_event(
|
||||
id='APPLY_USER_CONFIG',
|
||||
data=nfp_context,
|
||||
key=network_function['id'])
|
||||
self._controller.post_event(ev)
|
||||
|
||||
def apply_user_config(self, event):
|
||||
nfp_context = event.data
|
||||
nfp_core_context.store_nfp_context(nfp_context)
|
||||
network_function = nfp_context['network_function']
|
||||
nfp_context['config_policy_id'] = self.config_driver.apply_heat_config(
|
||||
nfp_context) # Heat driver to launch stack
|
||||
nfp_context['network_function_id'] = network_function['id']
|
||||
@@ -1118,11 +1188,11 @@ class ServiceOrchestrator(nfp_api.NfpEventHandler):
|
||||
|
||||
LOG.debug("handle_device_active config_policy_id: %s"
|
||||
% (nfp_context['config_policy_id']))
|
||||
|
||||
nfp_context['network_function'].update({
|
||||
'config_policy_id': nfp_context['config_policy_id'],
|
||||
'description': network_function['description']})
|
||||
|
||||
nfp_context['network_function'].update(
|
||||
{'config_policy_id': nfp_context['config_policy_id']})
|
||||
self.db_handler.update_network_function(
|
||||
self.db_session, network_function['id'],
|
||||
{'config_policy_id': nfp_context['config_policy_id']})
|
||||
nfp_context['event_desc'] = event.desc.to_dict()
|
||||
self._create_event(
|
||||
'CHECK_USER_CONFIG_COMPLETE',
|
||||
@@ -1131,11 +1201,6 @@ class ServiceOrchestrator(nfp_api.NfpEventHandler):
|
||||
original_event=event,
|
||||
max_times=nfp_constants.CHECK_USER_CONFIG_COMPLETE_MAXRETRY)
|
||||
|
||||
self.db_handler.update_network_function(
|
||||
self.db_session, network_function['id'],
|
||||
{'config_policy_id': nfp_context['config_policy_id'],
|
||||
'description': network_function['description']})
|
||||
|
||||
def handle_update_user_config(self, event):
|
||||
'''
|
||||
Handler to apply any updates in user config.
|
||||
@@ -1215,7 +1280,8 @@ class ServiceOrchestrator(nfp_api.NfpEventHandler):
|
||||
'config_policy_id': config_id,
|
||||
'tenant_id': network_function['tenant_id'],
|
||||
'network_function_id': network_function['id'],
|
||||
'network_function_details': network_function_details
|
||||
'network_function_details': network_function_details,
|
||||
'operation': request_data['operation']
|
||||
}
|
||||
if not config_id:
|
||||
event_id = ('USER_CONFIG_UPDATE_FAILED'
|
||||
@@ -1252,6 +1318,10 @@ class ServiceOrchestrator(nfp_api.NfpEventHandler):
|
||||
# Trigger RPC to notify the Create_Service caller with status
|
||||
|
||||
def handle_driver_error(self, network_function_id):
|
||||
LOG.error(_LE("Error occurred while processing network function "
|
||||
"CRUD operations, marking network function: %(nf_id)s "
|
||||
"as ERROR to initiate cleanup."),
|
||||
{'nf_id': network_function_id})
|
||||
network_function_details = self.get_network_function_details(
|
||||
network_function_id)
|
||||
network_function_id = network_function_details.get(
|
||||
@@ -1389,7 +1459,7 @@ class ServiceOrchestrator(nfp_api.NfpEventHandler):
|
||||
# Complete the original event APPLY_USER_CONFIG here
|
||||
event_desc = nfp_context.pop('event_desc')
|
||||
apply_config_event = self._controller.new_event(
|
||||
id='APPLY_USER_CONFIG',
|
||||
id='INITIATE_USER_CONFIG',
|
||||
key=network_function['id'],
|
||||
desc_dict=event_desc)
|
||||
apply_config_event.binding_key = binding_key
|
||||
@@ -1401,7 +1471,7 @@ class ServiceOrchestrator(nfp_api.NfpEventHandler):
|
||||
# Complete the original event DEVICE_ACTIVE here
|
||||
event_desc = nfp_context.pop('event_desc')
|
||||
apply_config_event = self._controller.new_event(
|
||||
id='APPLY_USER_CONFIG',
|
||||
id='INITIATE_USER_CONFIG',
|
||||
key=network_function['id'],
|
||||
desc_dict=event_desc)
|
||||
apply_config_event.binding_key = binding_key
|
||||
@@ -2020,7 +2090,8 @@ class ServiceOrchestrator(nfp_api.NfpEventHandler):
|
||||
def get_network_function_context(self, network_function_id):
|
||||
network_function_details = self.get_network_function_details(
|
||||
network_function_id)
|
||||
|
||||
network_function_device = (
|
||||
network_function_details['network_function_device'])
|
||||
ports_info = []
|
||||
for id in network_function_details[
|
||||
'network_function_instance']['port_info']:
|
||||
@@ -2028,16 +2099,15 @@ class ServiceOrchestrator(nfp_api.NfpEventHandler):
|
||||
ports_info.append(port_info)
|
||||
|
||||
mngmt_port_info = None
|
||||
mgmt_port_id = network_function_details[
|
||||
'network_function_device']['mgmt_port_id']
|
||||
if mgmt_port_id is not None:
|
||||
mngmt_port_info = self.get_port_info(mgmt_port_id)
|
||||
|
||||
monitor_port_id = network_function_details[
|
||||
'network_function_device']['monitoring_port_id']
|
||||
monitor_port_info = None
|
||||
if monitor_port_id is not None:
|
||||
monitor_port_info = self.get_port_info(monitor_port_id)
|
||||
if network_function_device:
|
||||
mgmt_port_id = network_function_device['mgmt_port_id']
|
||||
if mgmt_port_id is not None:
|
||||
mngmt_port_info = self.get_port_info(mgmt_port_id)
|
||||
|
||||
monitor_port_id = network_function_device['monitoring_port_id']
|
||||
if monitor_port_id is not None:
|
||||
monitor_port_info = self.get_port_info(monitor_port_id)
|
||||
|
||||
nf_context = {'network_function_details': network_function_details,
|
||||
'ports_info': ports_info,
|
||||
|
||||
@@ -15,6 +15,7 @@ from keystoneclient.v2_0 import client as identity_client
|
||||
from keystoneclient.v3 import client as keyclientv3
|
||||
from neutronclient.v2_0 import client as neutron_client
|
||||
from novaclient import client as nova_client
|
||||
from novaclient import exceptions as nova_exc
|
||||
|
||||
from gbpservice.nfp.core import log as nfp_logging
|
||||
LOG = nfp_logging.getLogger(__name__)
|
||||
@@ -368,11 +369,71 @@ class NovaClient(OpenstackApi):
|
||||
LOG.error(err)
|
||||
raise Exception(err)
|
||||
|
||||
def delete_affinity_group(self, token, tenant_id, nf_id):
|
||||
""" Deletes a server group
|
||||
|
||||
:param token: A scoped token
|
||||
:param tenant_id: Tenant UUID
|
||||
:param nf_id: Network Function UUID
|
||||
|
||||
Returns: None
|
||||
|
||||
"""
|
||||
|
||||
nova_version = 2.15
|
||||
nova = nova_client.Client(nova_version, auth_token=token,
|
||||
tenant_id=tenant_id,
|
||||
auth_url=self.identity_service)
|
||||
|
||||
try:
|
||||
affinity_group = nova.server_groups.find(name=nf_id)
|
||||
affinity_group_id = affinity_group.to_dict()['id']
|
||||
nova.server_groups.delete(affinity_group_id)
|
||||
msg = ("Successfully deleted Nova Server Anti-Affinity "
|
||||
"Group: %s" % nf_id)
|
||||
LOG.info(msg)
|
||||
except nova_exc.NotFound:
|
||||
pass
|
||||
except Exception as err:
|
||||
msg = ("Failed to delete Nova Server Anti-Affinity Group "
|
||||
"with name %s. Error: %s" % (nf_id, err))
|
||||
LOG.error(msg)
|
||||
|
||||
def create_affinity_group(self, token, tenant_id, nf_id):
|
||||
""" Creates a server group
|
||||
|
||||
:param token: A scoped token
|
||||
:param tenant_id: Tenant UUID
|
||||
:param nf_id: Network Function UUID
|
||||
|
||||
Returns: Nova server-group json object
|
||||
|
||||
"""
|
||||
|
||||
nova_version = 2.15
|
||||
kwargs = dict(name=nf_id, policies=['soft-anti-affinity'])
|
||||
nova = nova_client.Client(nova_version, auth_token=token,
|
||||
tenant_id=tenant_id,
|
||||
auth_url=self.identity_service)
|
||||
|
||||
try:
|
||||
affinity_group = nova.server_groups.create(**kwargs)
|
||||
affinity_group_id = affinity_group.to_dict()['id']
|
||||
msg = ("Successfully created Nova Server Anti-Affinity "
|
||||
"Group: %s" % nf_id)
|
||||
LOG.info(msg)
|
||||
return affinity_group_id
|
||||
except Exception as err:
|
||||
msg = ("Failed to create Nova Server Anti-Affinity Group. "
|
||||
"Error: %s" % err)
|
||||
LOG.error(msg)
|
||||
return None
|
||||
|
||||
def create_instance(self, token, tenant_id, image_id, flavor,
|
||||
nw_port_id_list, name, volume_support,
|
||||
volume_size, secgroup_name=None,
|
||||
metadata=None, files=None, config_drive=False,
|
||||
userdata=None, key_name='', different_hosts=None,
|
||||
userdata=None, key_name='', server_grp_id=None,
|
||||
):
|
||||
""" Launch a VM with given details
|
||||
|
||||
@@ -389,7 +450,7 @@ class NovaClient(OpenstackApi):
|
||||
"src": <file_contents>}]
|
||||
:param userdata: user data file name
|
||||
:param key_name: Nova keypair name
|
||||
:param different_hosts: Different host filter (List)
|
||||
:param server_grp_id: Nova server group UUID
|
||||
:param volume_support: volume support to launch instance
|
||||
:param volume_size: cinder volume size in GB
|
||||
:return: VM instance UUID
|
||||
@@ -424,8 +485,8 @@ class NovaClient(OpenstackApi):
|
||||
]
|
||||
kwargs.update(block_device_mapping_v2=block_device_mapping_v2)
|
||||
|
||||
if different_hosts:
|
||||
kwargs.update(scheduler_hints={"different_host": different_hosts})
|
||||
if server_grp_id:
|
||||
kwargs.update(scheduler_hints={"group": server_grp_id})
|
||||
if key_name != '':
|
||||
kwargs.update(key_name=key_name)
|
||||
if config_drive is True:
|
||||
@@ -862,7 +923,8 @@ class GBPClient(OpenstackApi):
|
||||
raise Exception(err)
|
||||
|
||||
def create_policy_target(self, token, tenant_id,
|
||||
policy_target_group_id, name, port_id=None):
|
||||
policy_target_group_id, name, port_id=None,
|
||||
description=''):
|
||||
""" Creates a GBP Policy Target
|
||||
|
||||
:param token: A scoped token
|
||||
@@ -881,6 +943,8 @@ class GBPClient(OpenstackApi):
|
||||
policy_target_info['policy_target'].update({'name': name})
|
||||
if port_id:
|
||||
policy_target_info["policy_target"]["port_id"] = port_id
|
||||
if description:
|
||||
policy_target_info["policy_target"]["description"] = description
|
||||
|
||||
try:
|
||||
gbp = gbp_client.Client(token=token,
|
||||
@@ -984,7 +1048,8 @@ class GBPClient(OpenstackApi):
|
||||
LOG.error(err)
|
||||
raise Exception(err)
|
||||
|
||||
def create_l2_policy(self, token, tenant_id, name, l3_policy_id=None):
|
||||
def create_l2_policy(self, token, tenant_id, name, l3_policy_id=None,
|
||||
description=''):
|
||||
|
||||
l2_policy_info = {
|
||||
"l2_policy": {
|
||||
@@ -994,6 +1059,8 @@ class GBPClient(OpenstackApi):
|
||||
}
|
||||
if l3_policy_id:
|
||||
l2_policy_info["l2_policy"].update({'l3_policy_id': l3_policy_id})
|
||||
if description:
|
||||
l2_policy_info["description"].update({'description': description})
|
||||
|
||||
try:
|
||||
gbp = gbp_client.Client(token=token,
|
||||
|
||||
@@ -25,6 +25,7 @@ import yaml
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
SUCCESS = 'SUCCESS'
|
||||
FAILED = 'FAILED'
|
||||
|
||||
notifications = []
|
||||
FW_SCRIPT_PATH = ("/usr/local/lib/python2.7/dist-packages/" +
|
||||
@@ -102,19 +103,31 @@ class Controller(rest.RestController):
|
||||
notification_data = []
|
||||
|
||||
for config_data in config_datas:
|
||||
resource = config_data['resource']
|
||||
if resource == 'healthmonitor':
|
||||
self._configure_healthmonitor(config_data)
|
||||
elif resource == 'interfaces':
|
||||
self._configure_interfaces(config_data)
|
||||
elif resource == 'routes':
|
||||
self._add_routes(config_data)
|
||||
elif (config_data['resource'] in ['ansible', 'heat',
|
||||
'custom_json']):
|
||||
self._apply_user_config(config_data)
|
||||
notification_data.append(
|
||||
{'resource': config_data['resource'],
|
||||
'data': {'status_code': SUCCESS}})
|
||||
try:
|
||||
resource = config_data['resource']
|
||||
if resource == 'healthmonitor':
|
||||
self._configure_healthmonitor(config_data)
|
||||
elif resource == 'interfaces':
|
||||
self._configure_interfaces(config_data)
|
||||
elif resource == 'routes':
|
||||
self._add_routes(config_data)
|
||||
elif (config_data['resource'] in ['ansible', 'heat',
|
||||
'custom_json']):
|
||||
self._apply_user_config(config_data)
|
||||
else:
|
||||
status_msg = 'Unsupported resource'
|
||||
notification_data.append(
|
||||
{'resource': resource,
|
||||
'data': {'status_code': FAILED,
|
||||
'status_msg': status_msg}})
|
||||
notification_data.append(
|
||||
{'resource': config_data['resource'],
|
||||
'data': {'status_code': SUCCESS}})
|
||||
except Exception as ex:
|
||||
notification_data.append(
|
||||
{'resource': resource,
|
||||
'data': {'status_code': FAILED,
|
||||
'status_msg': str(ex)}})
|
||||
|
||||
context = body['info']['context']
|
||||
self._push_notification(context, notification_data,
|
||||
@@ -147,40 +160,63 @@ class Controller(rest.RestController):
|
||||
"data : %(interface_data)s ") %
|
||||
{'interface_data': config_data})
|
||||
|
||||
def get_source_cidrs_and_gateway_ip(self, route_info):
|
||||
nfds = route_info['resource_data']['nfds']
|
||||
source_cidrs = []
|
||||
for nfd in nfds:
|
||||
for network in nfd['networks']:
|
||||
source_cidrs.append(network['cidr'])
|
||||
if network['type'] == 'stitching':
|
||||
gateway_ip = network['gw_ip']
|
||||
return source_cidrs, gateway_ip
|
||||
|
||||
def _add_routes(self, route_info):
|
||||
LOG.info(_LI("Configuring routes with configuration "
|
||||
"data : %(route_data)s ") %
|
||||
{'route_data': route_info['resource_data']})
|
||||
source_cidrs = route_info['resource_data']['source_cidrs']
|
||||
gateway_ip = route_info['resource_data']['gateway_ip']
|
||||
source_cidrs, gateway_ip = self.get_source_cidrs_and_gateway_ip(
|
||||
route_info)
|
||||
default_route_commands = []
|
||||
for cidr in source_cidrs:
|
||||
source_interface = self._get_if_name_by_cidr(cidr)
|
||||
try:
|
||||
source_interface = self._get_if_name_by_cidr(cidr)
|
||||
except Exception:
|
||||
raise Exception("Some of the interfaces do not have "
|
||||
"IP Address")
|
||||
try:
|
||||
interface_number_string = source_interface.split("eth", 1)[1]
|
||||
except IndexError:
|
||||
LOG.error(_LE("Retrieved wrong interface %(interface)s for "
|
||||
"configuring routes") %
|
||||
{'interface': source_interface})
|
||||
routing_table_number = 20 + int(interface_number_string)
|
||||
ip_rule_command = "ip rule add from %s table %s" % (
|
||||
cidr, routing_table_number)
|
||||
out1 = subprocess.Popen(ip_rule_command, shell=True,
|
||||
stdout=subprocess.PIPE).stdout.read()
|
||||
ip_rule_command = "ip rule add to %s table main" % (cidr)
|
||||
out2 = subprocess.Popen(ip_rule_command, shell=True,
|
||||
stdout=subprocess.PIPE).stdout.read()
|
||||
ip_route_command = "ip route add table %s default via %s" % (
|
||||
routing_table_number, gateway_ip)
|
||||
default_route_commands.append(ip_route_command)
|
||||
output = "%s\n%s" % (out1, out2)
|
||||
LOG.info(_LI("Static route configuration result: %(output)s") %
|
||||
{'output': output})
|
||||
try:
|
||||
routing_table_number = 20 + int(interface_number_string)
|
||||
|
||||
ip_rule_command = "ip rule add from %s table %s" % (
|
||||
cidr, routing_table_number)
|
||||
out1 = subprocess.Popen(ip_rule_command, shell=True,
|
||||
stdout=subprocess.PIPE).stdout.read()
|
||||
ip_rule_command = "ip rule add to %s table main" % (cidr)
|
||||
out2 = subprocess.Popen(ip_rule_command, shell=True,
|
||||
stdout=subprocess.PIPE).stdout.read()
|
||||
ip_route_command = "ip route add table %s default via %s" % (
|
||||
routing_table_number, gateway_ip)
|
||||
default_route_commands.append(ip_route_command)
|
||||
output = "%s\n%s" % (out1, out2)
|
||||
LOG.info(_LI("Static route configuration result: %(output)s") %
|
||||
{'output': output})
|
||||
except Exception as ex:
|
||||
raise Exception("Failed to add static routes: %(ex)s" % {
|
||||
'ex': str(ex)})
|
||||
for command in default_route_commands:
|
||||
out = subprocess.Popen(command, shell=True,
|
||||
stdout=subprocess.PIPE).stdout.read()
|
||||
LOG.info(_LI("Static route configuration result: %(output)s") %
|
||||
{'output': out})
|
||||
try:
|
||||
out = subprocess.Popen(command, shell=True,
|
||||
stdout=subprocess.PIPE).stdout.read()
|
||||
LOG.info(_LI("Static route configuration result: %(output)s") %
|
||||
{'output': out})
|
||||
except Exception as ex:
|
||||
raise Exception("Failed to add static routes: %(ex)s" % {
|
||||
'ex': str(ex)})
|
||||
|
||||
def _get_if_name_by_cidr(self, cidr):
|
||||
interfaces = netifaces.interfaces()
|
||||
|
||||
Reference in New Issue
Block a user