LBaaS V1 removal from nfp for newton support.

1) Removed LBaaS V1 code from nfp.
2) LBaaSV2 will be default LB service.
3) Renamed 'haproxy_lbaasv2' vendor to 'haproxy'.

Change-Id: Ieb0db3ce00f36f69d2587db278f1f9adc8de8666
This commit is contained in:
pkharat 2017-03-19 17:34:02 +05:30 committed by DhuldevValekar
parent 6fc3115e14
commit 8c2a736870
35 changed files with 175 additions and 5076 deletions

View File

@ -11,13 +11,11 @@
# under the License.
FW_NFP_CONFIGAGENT_TOPIC = 'nfp-firewall-agent'
LB_NFP_CONFIGAGENT_TOPIC = 'nfp-lbaas-agent'
LBV2_NFP_CONFIGAGENT_TOPIC = 'nfp-lbaasv2-agent'
VPN_NFP_CONFIGAGENT_TOPIC = 'nfp-vpn_agent'
NFP_NSO_TOPIC = "nfp-service-orchestrator"
FW_NFP_PLUGIN_TOPIC = 'q-firewall-plugin'
LB_NFP_PLUGIN_TOPIC = 'n-lbaas-plugin'
LBV2_NFP_PLUGIN_TOPIC = 'n-lbaasv2-plugin'
VPN_NFP_PLUGIN_TOPIC = 'vpn_plugin'
DEVICE_ORCH_TOPIC = 'nfp-configurator-ndo'

View File

@ -1,367 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ast
import copy
from gbpservice._i18n import _LI
from gbpservice.contrib.nfp.config_orchestrator.common import common
from gbpservice.nfp.common import constants as const
from gbpservice.nfp.common import data_formatter as df
from gbpservice.nfp.core import context as module_context
from gbpservice.nfp.core import log as nfp_logging
from gbpservice.nfp.lib import transport
from neutron_lbaas.db.loadbalancer import loadbalancer_db
from oslo_log import helpers as log_helpers
import oslo_messaging as messaging
LOG = nfp_logging.getLogger(__name__)
"""
RPC handler for Loadbalancer service
"""
class LbAgent(loadbalancer_db.LoadBalancerPluginDb):
RPC_API_VERSION = const.LOADBALANCER_RPC_API_VERSION
target = messaging.Target(version=RPC_API_VERSION)
def __init__(self, conf, sc):
super(LbAgent, self).__init__()
self._conf = conf
self._sc = sc
self._db_inst = super(LbAgent, self)
def _get_pools(self, **kwargs):
context = kwargs.get('context')
filters = {'tenant_id': [kwargs.get('tenant_id')],
'id': [kwargs.get('pool_id')]}
args = {'context': context, 'filters': filters}
pools = self._db_inst.get_pools(**args)
for pool in pools:
pool['description'] = kwargs.get('description')
return pools
def _get_vips(self, **kwargs):
context = kwargs.get('context')
filters = {'tenant_id': [kwargs.get('tenant_id')],
'pool_id': [kwargs.get('pool_id')]}
args = {'context': context, 'filters': filters}
vips = self._db_inst.get_vips(**args)
for vip in vips:
vip['description'] = kwargs.get('description')
return vips
def _get_members(self, **kwargs):
context = kwargs.get('context')
filters = {'tenant_id': [kwargs.get('tenant_id')],
'pool_id': [kwargs.get('pool_id')]}
args = {'context': context, 'filters': filters}
members = self._db_inst.get_members(**args)
for member in members:
member.update({'description': kwargs.get('description')})
return members
def _get_health_monitors(self, **kwargs):
context = kwargs.get('context')
filters = {'tenant_id': [kwargs.get('tenant_id')],
'pool_id': [kwargs.get('pool_id')]}
args = {'context': context, 'filters': filters}
health_monitors = self._db_inst.get_health_monitors(**args)
for health_monitor in health_monitors:
health_monitor.update({'description': kwargs.get('description')})
return health_monitors
def _get_lb_context(self, **kwargs):
pools = self._get_pools(**kwargs)
vips = self._get_vips(**kwargs)
members = self._get_members(**kwargs)
healthmonitors = self._get_health_monitors(**kwargs)
return {'pools': pools,
'vips': vips,
'members': members,
'health_monitors': healthmonitors}
def _context(self, **kwargs):
context = kwargs.get('context')
if context.is_admin:
kwargs['tenant_id'] = context.tenant_id
lb_db = self._get_lb_context(**kwargs)
return lb_db
def _prepare_resource_context_dicts(self, **kwargs):
# Prepare context_dict
context = kwargs.get('context')
context_resource_data = kwargs.pop('context_resource_data')
ctx_dict = context.to_dict()
# Collecting db entry required by configurator.
# Addind service_info to neutron context and sending
# dictionary format to the configurator.
db = self._context(**kwargs)
rsrc_ctx_dict = copy.deepcopy(ctx_dict)
rsrc_ctx_dict.update({'service_info': db})
rsrc_ctx_dict.update({'resource_data': context_resource_data})
return ctx_dict, rsrc_ctx_dict
def _get_resource_data(self, description, resource_type):
resource_data = df.get_network_function_info(description,
resource_type)
return resource_data
def _update_request_data(self, body, description):
pass
def _data_wrapper(self, context, tenant_id, name, reason, nf, **kwargs):
nfp_context = {}
description = ast.literal_eval((nf['description'].split('\n'))[1])
description.update({'tenant_id': tenant_id})
# REVISIT(dpak): We need to avoid resource description
# dependency in OTC and instead use neutron context description.
context_resource_data = self._get_resource_data(description,
const.LOADBALANCER)
if name.lower() == 'pool_health_monitor':
pool_id = kwargs.get('pool_id')
kwargs['health_monitor'].update({'description': str(description)})
elif name.lower() == 'pool':
pool_id = kwargs['pool'].get('id')
kwargs['pool']['description'] = str(description)
elif name.lower() == 'vip':
pool_id = kwargs['vip'].get('pool_id')
kwargs['vip']['description'] = str(description)
nfp_context = {'network_function_id': nf['id'],
'vip_id': kwargs['vip']['id']}
else:
kwargs[name.lower()].update({'description': str(description)})
pool_id = kwargs[name.lower()].get('pool_id')
args = {'tenant_id': tenant_id,
'pool_id': pool_id,
'context': context,
'description': str(description),
'context_resource_data': context_resource_data}
ctx_dict, rsrc_ctx_dict = self._prepare_resource_context_dicts(
**args)
nfp_context.update({'neutron_context': ctx_dict,
'requester': 'nas_service',
'logging_context':
module_context.get()['log_context']})
resource_type = 'loadbalancer'
resource = name
resource_data = {'neutron_context': rsrc_ctx_dict}
resource_data.update(**kwargs)
body = common.prepare_request_data(nfp_context, resource,
resource_type, resource_data,
description['service_vendor'])
self._update_request_data(body, description)
return body
def _post(self, context, tenant_id, name, nf, **kwargs):
body = self._data_wrapper(context, tenant_id, name,
'CREATE', nf, **kwargs)
transport.send_request_to_configurator(self._conf,
context, body, "CREATE")
def _put(self, context, tenant_id, name, nf, **kwargs):
body = self._data_wrapper(context, tenant_id, name,
'UPDATE', nf, **kwargs)
transport.send_request_to_configurator(self._conf,
context, body, "UPDATE")
def _delete(self, context, tenant_id, name, nf, **kwargs):
body = self._data_wrapper(context, tenant_id, name,
'DELETE', nf, **kwargs)
transport.send_request_to_configurator(self._conf,
context, body, "DELETE")
def _get_pool(self, context, pool_id):
pool = None
try:
pool = self._db_inst.get_pool(context, pool_id)
except Exception as e:
msg = ("%s" % (e))
LOG.error(msg)
return pool
def _fetch_nf_from_resource_desc(self, desc):
desc_dict = ast.literal_eval(desc)
nf_id = desc_dict['network_function_id']
return nf_id
@log_helpers.log_method_call
def create_vip(self, context, vip):
nfp_context = module_context.init()
LOG.info(_LI("Received RPC CREATE VIP for VIP:%(vip)s"),
{'vip': vip})
# Fetch nf_id from description of the resource
nf_id = self._fetch_nf_from_resource_desc(vip["description"])
nfp_context['log_context']['meta_id'] = nf_id
nf = common.get_network_function_details(context, nf_id)
self._post(context, vip['tenant_id'], 'vip', nf, vip=vip)
@log_helpers.log_method_call
def update_vip(self, context, old_vip, vip):
nfp_context = module_context.init()
LOG.info(_LI("Received RPC UPDATE VIP for VIP:%(vip)s "
"and OLD_VIP:%(old_vip)s"),
{'vip': vip,
'old_vip': old_vip})
# Fetch nf_id from description of the resource
nf_id = self._fetch_nf_from_resource_desc(vip["description"])
nfp_context['log_context']['meta_id'] = nf_id
nf = common.get_network_function_details(context, nf_id)
self._put(context, vip['tenant_id'], 'vip', nf, old_vip=old_vip,
vip=vip)
@log_helpers.log_method_call
def delete_vip(self, context, vip):
nfp_context = module_context.init()
LOG.info(_LI("Received RPC DELETE VIP for VIP:%(vip)s"),
{'vip': vip})
# Fetch nf_id from description of the resource
nf_id = self._fetch_nf_from_resource_desc(vip["description"])
nfp_context['log_context']['meta_id'] = nf_id
nf = common.get_network_function_details(context, nf_id)
self._delete(context, vip['tenant_id'], 'vip', nf, vip=vip)
@log_helpers.log_method_call
def create_pool(self, context, pool, driver_name):
nfp_context = module_context.init()
LOG.info(_LI("Received RPC CREATE POOL for Pool:%(pool)s"),
{'pool': pool})
# Fetch nf_id from description of the resource
nf_id = self._fetch_nf_from_resource_desc(pool["description"])
nfp_context['log_context']['meta_id'] = nf_id
nf = common.get_network_function_details(context, nf_id)
self._post(
context, pool['tenant_id'],
'pool', nf, pool=pool, driver_name=driver_name)
@log_helpers.log_method_call
def update_pool(self, context, old_pool, pool):
nfp_context = module_context.init()
LOG.info(_LI("Received RPC UPDATE POOL with Pool: %(pool)s "
"and Old_Pool:%(old_pool)s"),
{'pool': pool,
'old_pool': old_pool})
# Fetch nf_id from description of the resource
nf_id = self._fetch_nf_from_resource_desc(pool["description"])
nfp_context['log_context']['meta_id'] = nf_id
nf = common.get_network_function_details(context, nf_id)
self._put(context, pool['tenant_id'], 'pool', nf, old_pool=old_pool,
pool=pool)
@log_helpers.log_method_call
def delete_pool(self, context, pool):
nfp_context = module_context.init()
LOG.info(_LI("Received RPC DELETE POOL for Pool:%(pool)s"),
{'pool': pool})
# Fetch nf_id from description of the resource
nf_id = self._fetch_nf_from_resource_desc(pool["description"])
nfp_context['log_context']['meta_id'] = nf_id
nf = common.get_network_function_details(context, nf_id)
self._delete(context, pool['tenant_id'], 'pool', nf, pool=pool)
@log_helpers.log_method_call
def create_member(self, context, member):
nfp_context = module_context.init()
LOG.info(_LI("Received RPC CREATE MEMBER for Member:%(member)s"),
{'member': member})
# Fetch pool from pool_id
pool = self._get_pool(context, member['pool_id'])
# Fetch nf_id from description of the resource
nf_id = self._fetch_nf_from_resource_desc(pool["description"])
nfp_context['log_context']['meta_id'] = nf_id
nf = common.get_network_function_details(context, nf_id)
self._post(context, member['tenant_id'], 'member', nf, member=member)
@log_helpers.log_method_call
def update_member(self, context, old_member, member):
nfp_context = module_context.init()
LOG.info(_LI("Received RPC UPDATE MEMBER with Member:%(member)s "
"and Old_Member:%(old_member)s"),
{'member': member,
'old_member': old_member})
# Fetch pool from pool_id
pool = self._get_pool(context, member['pool_id'])
# Fetch nf_id from description of the resource
nf_id = self._fetch_nf_from_resource_desc(pool["description"])
nfp_context['log_context']['meta_id'] = nf_id
nf = common.get_network_function_details(context, nf_id)
self._put(context, member['tenant_id'], 'member', nf,
old_member=old_member, member=member)
@log_helpers.log_method_call
def delete_member(self, context, member):
nfp_context = module_context.init()
LOG.info(_LI("Received RPC DELETE MEMBER for Member:%(member)s"),
{'member': member})
# Fetch pool from pool_id
pool = self._get_pool(context, member['pool_id'])
# Fetch nf_id from description of the resource
nf_id = self._fetch_nf_from_resource_desc(pool["description"])
nfp_context['log_context']['meta_id'] = nf_id
nf = common.get_network_function_details(context, nf_id)
self._delete(
context, member['tenant_id'], 'member',
nf, member=member)
@log_helpers.log_method_call
def create_pool_health_monitor(self, context, health_monitor, pool_id):
nfp_context = module_context.init()
LOG.info(_LI("Received RPC CREATE POOL HEALTH MONITOR for HM:%(hm)s"),
{'hm': health_monitor})
# Fetch pool from pool_id
pool = self._get_pool(context, pool_id)
# Fetch nf_id from description of the resource
nf_id = self._fetch_nf_from_resource_desc(pool["description"])
nfp_context['log_context']['meta_id'] = nf_id
nf = common.get_network_function_details(context, nf_id)
self._post(context, health_monitor[
'tenant_id'], 'pool_health_monitor',
nf, health_monitor=health_monitor, pool_id=pool_id)
@log_helpers.log_method_call
def update_pool_health_monitor(self, context, old_health_monitor,
health_monitor, pool_id):
nfp_context = module_context.init()
LOG.info(_LI("Received RPC UPDATE POOL HEALTH MONITOR with "
"HM:%(hm)s and Old_HM:%(old_hm)s"),
{'hm': health_monitor,
'old_hm': old_health_monitor})
# Fetch pool from pool_id
pool = self._get_pool(context, pool_id)
# Fetch nf_id from description of the resource
nf_id = self._fetch_nf_from_resource_desc(pool["description"])
nfp_context['log_context']['meta_id'] = nf_id
nf = common.get_network_function_details(context, nf_id)
self._put(context, health_monitor[
'tenant_id'], 'pool_health_monitor',
nf, old_health_monitor=old_health_monitor,
health_monitor=health_monitor, pool_id=pool_id)
@log_helpers.log_method_call
def delete_pool_health_monitor(self, context, health_monitor, pool_id):
nfp_context = module_context.init()
LOG.info(_LI("Received RPC DELETE POOL HEALTH MONITOR for HM:%(hm)s"),
{'hm': health_monitor})
# Fetch pool from pool_id
pool = self._get_pool(context, pool_id)
# Fetch nf_id from description of the resource
nf_id = self._fetch_nf_from_resource_desc(pool["description"])
nfp_context['log_context']['meta_id'] = nf_id
nf = common.get_network_function_details(context, nf_id)
self._delete(
context, health_monitor['tenant_id'], 'pool_health_monitor',
nf, health_monitor=health_monitor, pool_id=pool_id)

View File

@ -115,74 +115,6 @@ class FirewallNotifier(object):
firewall_id=firewall_id)
class LoadbalancerNotifier(object):
def __init__(self, conf, sc):
self._sc = sc
self._conf = conf
def update_status(self, context, notification_data):
nfp_context = module_context.init()
notification = notification_data['notification'][0]
request_info = notification_data.get('info')
request_context = request_info.get('context')
logging_context = request_context.get('logging_context', {})
nfp_context['log_context'] = logging_context
resource_data = notification['data']
obj_type = resource_data['obj_type']
obj_id = resource_data['obj_id']
status = resource_data['status']
LOG.info(_LI("Received LB's update_status API. Making an "
"update_status RPC call to plugin for %(obj_type)s:"
"%(obj_id)s with status:%(status)s"),
{'obj_type': obj_type,
'obj_id': obj_id,
'status': status})
# RPC call to plugin to update status of the resource
rpcClient = transport.RPCClient(a_topics.LB_NFP_PLUGIN_TOPIC)
rpcClient.cctxt = rpcClient.client.prepare(
version=const.LOADBALANCER_RPC_API_VERSION)
rpcClient.cctxt.cast(context, 'update_status',
obj_type=obj_type,
obj_id=obj_id,
status=status)
def update_pool_stats(self, context, notification_data):
nfp_context = module_context.init()
notification = notification_data['notification'][0]
request_info = notification_data.get('info')
request_context = request_info.get('context')
logging_context = request_context.get('logging_context', {})
nfp_context['log_context'] = logging_context
resource_data = notification['data']
pool_id = resource_data['pool_id']
stats = resource_data['stats']
host = resource_data['host']
LOG.info(_LI("Received LB's update_pool_stats API, making an "
"update_pool_stats RPC cast to plugin for updating "
"pool stats for pool: %(pool)s"),
{'pool': pool_id})
# RPC cast to plugin to update stats of pool
rpcClient = transport.RPCClient(a_topics.LB_NFP_PLUGIN_TOPIC)
rpcClient.cctxt = rpcClient.client.prepare(
version=const.LOADBALANCER_RPC_API_VERSION)
rpcClient.cctxt.cast(context, 'update_pool_stats',
pool_id=pool_id,
stats=stats,
host=host)
def vip_deleted(self, context, notification_data):
pass
class LoadbalancerV2Notifier(object):
def __init__(self, conf, sc):
@ -273,7 +205,6 @@ class VpnNotifier(object):
ServicetypeToHandlerMap = {'firewall': FirewallNotifier,
'loadbalancer': LoadbalancerNotifier,
'loadbalancerv2': LoadbalancerV2Notifier,
'vpn': VpnNotifier}

View File

@ -0,0 +1,45 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg as oslo_config
from gbpservice.nfp.core import context
from gbpservice.nfp.orchestrator import context as module_context
context.NfpContext = module_context.NfpContext
openstack_opts = [
oslo_config.StrOpt('auth_host',
default='localhost',
help='Openstack controller IP Address'),
# REVISIT: In future, use nfp_user with admin role instead of admin_user
oslo_config.StrOpt('admin_user',
help='Admin user name to create service VMs'),
oslo_config.StrOpt('admin_password',
help='Admin password to create service VMs'),
# REVISIT: In future, use nfp_tenant_name instead of admin_tenant_name
oslo_config.StrOpt('admin_tenant_name',
help='Admin tenant name to create service VMs'),
oslo_config.StrOpt('admin_tenant_id',
help='Admin tenant ID to create service VMs'),
oslo_config.StrOpt('auth_protocol',
default='http', help='Auth protocol used.'),
oslo_config.IntOpt('auth_port',
default='5000', help='Auth protocol used.'),
oslo_config.IntOpt('bind_port',
default='9696', help='Auth protocol used.'),
oslo_config.StrOpt('auth_version',
default='v2.0', help='Auth protocol used.'),
oslo_config.StrOpt('auth_uri',
default='', help='Auth URI.'),
]
oslo_config.CONF.register_opts(openstack_opts, "nfp_keystone_authtoken")

View File

@ -14,8 +14,6 @@ from gbpservice.contrib.nfp.config_orchestrator.common import (
topics as a_topics)
from gbpservice.contrib.nfp.config_orchestrator.handlers.config import (
firewall as fw)
from gbpservice.contrib.nfp.config_orchestrator.handlers.config import (
loadbalancer as lb)
from gbpservice.contrib.nfp.config_orchestrator.handlers.config import (
loadbalancerv2 as lbv2)
from gbpservice.contrib.nfp.config_orchestrator.handlers.config import vpn
@ -35,25 +33,6 @@ def rpc_init(sc, conf):
manager=fwrpcmgr
)
lb_report_state = {
'binary': 'NCO',
'host': cfg.CONF.host,
'topic': a_topics.LB_NFP_CONFIGAGENT_TOPIC,
'plugin_topic': a_topics.LB_NFP_PLUGIN_TOPIC,
'agent_type': 'NFP Loadbalancer agent',
'configurations': {'device_drivers': ['loadbalancer']},
'start_flag': True,
'report_interval': 10
}
lbrpcmgr = lb.LbAgent(conf, sc)
lbagent = RpcAgent(
sc,
host=cfg.CONF.host,
topic=a_topics.LB_NFP_CONFIGAGENT_TOPIC,
manager=lbrpcmgr,
report_state=lb_report_state
)
lbv2_report_state = {
'binary': 'NCO',
'host': cfg.CONF.host,
@ -100,7 +79,7 @@ def rpc_init(sc, conf):
manager=rpchandler,
)
sc.register_rpc_agents([fwagent, lbagent, lbv2agent, vpnagent, rpcagent])
sc.register_rpc_agents([fwagent, lbv2agent, vpnagent, rpcagent])
def nfp_module_init(sc, conf):

View File

@ -1,884 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
from gbpservice._i18n import _LI
from gbpservice.contrib.nfp.configurator.agents import agent_base
from gbpservice.contrib.nfp.configurator.lib import data_filter
from gbpservice.contrib.nfp.configurator.lib import lb_constants
from gbpservice.contrib.nfp.configurator.lib import utils
from gbpservice.nfp.core import event as nfp_event
from gbpservice.nfp.core import log as nfp_logging
from gbpservice.nfp.core import module as nfp_api
from neutron import context
LOG = nfp_logging.getLogger(__name__)
class LBaasRpcSender(data_filter.Filter):
"""Implements LBaaS response path to Neutron plugin.
Methods of this class are invoked by LBaasEventHandler class
and also by driver class for sending response from driver to
the LBaaS Neutron plugin.
"""
def __init__(self, sc):
self.notify = agent_base.AgentBaseNotification(sc)
def get_logical_device(self, pool_id, context):
""" Calls data filter library to get logical device from pool_id.
:param pool_id: Neutron LBaaS pool id
:param context: RPC context
Returns: logical_device
"""
LOG.info(_LI("Sending RPC call 'Get Logical Device' "
"for pool: %(pool_id)s"),
{'pool_id': pool_id})
return self.call(
context,
self.make_msg(
'get_logical_device',
pool_id=pool_id
)
)
def update_status(self, obj_type, obj_id, status, agent_info, obj=None):
""" Enqueues the response from LBaaS operation to neutron plugin.
:param obj_type: Neutron LBaaS object type
:param obj_id: Neutron LBaaS object id
:param status: Neutron LBaaS object status to be set
:param agent_info: Agent info which carries context which is needed
in config_orch to send response back to *aaS plugin
:param obj: Neutron LBaaS object
Returns: None
"""
msg = {'info': {'service_type': lb_constants.SERVICE_TYPE,
'context': agent_info['context']},
'notification': [{'resource': agent_info['resource'],
'data': {'obj_type': obj_type,
'obj_id': obj_id,
'notification_type': 'update_status',
'status': status,
obj_type: obj}}]
}
LOG.info(_LI("Sending Notification 'Update Status' "
"for resource: %(resource)s with status:"
"%(status)s"),
{'resource': agent_info['resource'],
'status': status})
self.notify._notification(msg)
def update_pool_stats(self, pool_id, stats, context, pool=None):
""" Enqueues the response from LBaaS operation to neutron plugin.
:param pool_id: Neutron LBaaS pool id
:param stats: statistics of that pool
:param context: RPC context
Returns: None
"""
msg = {'info': {'service_type': lb_constants.SERVICE_TYPE,
'context': context.to_dict()},
'notification': [{'resource': 'pool',
'data': {'pool_id': pool_id,
'stats': stats,
'notification_type': (
'update_pool_stats'),
'pool': pool_id}}]
}
LOG.info(_LI("Sending Notification 'Update Pool Stats' "
"for pool: %(pool_id)s with stats:%(stats)s"),
{'pool_id': pool_id,
'stats': stats})
self.notify._notification(msg)
def vip_deleted(self, vip, status, agent_info):
""" Enqueues the response from LBaaS operation to neutron plugin.
:param vip: Neutron LBaaS vip resource
:param status: Neutron LBaaS vip resource status
:param agent_info: Agent info which carries context which is needed
in config_orch to send response back to *aaS plugin
Returns: None
"""
msg = {'info': {'service_type': lb_constants.SERVICE_TYPE,
'context': agent_info['context']},
'notification': [{'resource': agent_info['resource'],
'data': {'vip_id': vip['id'],
'vip': vip,
'notification_type': 'vip_deleted',
'status': status}}]
}
LOG.info(_LI("Sending Notification 'VIP Deleted' "
"for vip: %(vip_id)s with status:%(status)s"),
{'vip_id': vip['id'],
'status': status})
self.notify._notification(msg)
class LBaaSRpcManager(agent_base.AgentBaseRPCManager):
"""Implements APIs invoked by configurator for processing RPC messages.
RPC client of configurator module receives RPC messages from REST server
and invokes the API of this class. The instance of this class is registered
with configurator module using register_service_agent API. Configurator
module identifies the service agent object based on service type and
invokes one of the methods of this class to configure the device.
"""
def __init__(self, sc, conf):
"""Instantiates child and parent class objects.
:param sc: Service Controller object that is used for interfacing
with core service controller.
:param conf: Configuration object that is used for configuration
parameter access.
"""
super(LBaaSRpcManager, self).__init__(sc, conf)
def _send_event(self, event_id, data, serialize=False, binding_key=None,
key=None):
"""Posts an event to framework.
:param event_id: Unique identifier for the event
:param data: event data
:param serialize: boolean value used to serialize the event
:param binding_key: binding key to be used for serialization
:param key: event key
Returns: None
"""
ev = self.sc.new_event(id=event_id, data=data)
ev.key = key
ev.serialize = serialize
ev.binding_key = binding_key
self.sc.post_event(ev)
def create_vip(self, context, vip):
"""Enqueues event for worker to process create vip request.
:param context: RPC context
:param vip: vip resource to be created
Returns: None
"""
LOG.info(_LI("Received request 'Create VIP' for VIP:%(vip_id)s "),
{'vip_id': vip['id']})
arg_dict = {'context': context,
'vip': vip,
}
self._send_event(lb_constants.EVENT_CREATE_VIP, arg_dict,
serialize=True, binding_key=vip['pool_id'],
key=vip['id'])
def update_vip(self, context, old_vip, vip):
"""Enqueues event for worker to process update vip request.
:param context: RPC context
:param old_vip: old vip resource to be updated
:param vip: new vip resource
Returns: None
"""
old_val, new_val = self.get_diff_of_dict(old_vip, vip)
arg_dict = {'context': context,
'old_vip': old_vip,
'vip': vip,
}
LOG.info(_LI("Received request 'Update VIP' for VIP:%(vip_id)s "
"with new Param:%(new_val)s and old Param:%(old_val)s"),
{'vip_id': vip['id'],
'new_val': new_val,
'old_val': old_val})
self._send_event(lb_constants.EVENT_UPDATE_VIP, arg_dict,
serialize=True, binding_key=vip['pool_id'],
key=vip['id'])
def delete_vip(self, context, vip):
"""Enqueues event for worker to process delete vip request.
:param context: RPC context
:param vip: vip resource to be deleted
Returns: None
"""
arg_dict = {'context': context,
'vip': vip,
}
LOG.info(_LI("Received 'Delete VIP' request for VIP:%(vip_id)s "),
{'vip_id': vip['id']})
self._send_event(lb_constants.EVENT_DELETE_VIP, arg_dict,
serialize=True, binding_key=vip['pool_id'],
key=vip['id'])
def create_pool(self, context, pool, driver_name):
"""Enqueues event for worker to process create pool request.
:param context: RPC context
:param pool: pool resource to be created
:param driver_name: service vendor driver name
Returns: None
"""
arg_dict = {'context': context,
'pool': pool,
'driver_name': driver_name,
}
LOG.info(_LI("Received request 'Create Pool' for Pool:%(pool_id)s "),
{'pool_id': pool['id']})
self._send_event(lb_constants.EVENT_CREATE_POOL, arg_dict,
serialize=True, binding_key=pool['id'],
key=pool['id'])
def update_pool(self, context, old_pool, pool):
"""Enqueues event for worker to process update pool request.
:param context: RPC context
:param old_pool: old pool resource to be updated
:param pool: new pool resource
Returns: None
"""
old_val, new_val = self.get_diff_of_dict(old_pool, pool)
arg_dict = {'context': context,
'old_pool': old_pool,
'pool': pool,
}
LOG.info(_LI("Received request 'Update Pool' for Pool:%(pool_id)s "
"with new Param:%(new_val)s and old Param:%(old_val)s"),
{'pool_id': pool['id'],
'new_val': new_val,
'old_val': old_val})
self._send_event(lb_constants.EVENT_UPDATE_POOL, arg_dict,
serialize=True, binding_key=pool['id'],
key=pool['id'])
def delete_pool(self, context, pool):
"""Enqueues event for worker to process delete pool request.
:param context: RPC context
:param pool: pool resource to be deleted
Returns: None
"""
arg_dict = {'context': context,
'pool': pool,
}
LOG.info(_LI("Received request 'Delete Pool' for Pool:%(pool_id)s "),
{'pool_id': pool['id']})
self._send_event(lb_constants.EVENT_DELETE_POOL, arg_dict,
serialize=True, binding_key=pool['id'],
key=pool['id'])
def create_member(self, context, member):
"""Enqueues event for worker to process create member request.
:param context: RPC context
:param member: member resource to be created
Returns: None
"""
arg_dict = {'context': context,
'member': member,
}
LOG.info(_LI("Received request 'Create Member' for Pool:"
"%(pool_id)s "),
{'pool_id': member['pool_id']})
self._send_event(lb_constants.EVENT_CREATE_MEMBER, arg_dict,
serialize=True, binding_key=member['pool_id'],
key=member['id'])
def update_member(self, context, old_member, member):
"""Enqueues event for worker to process update member request.
:param context: RPC context
:param old_member: old member resource to be updated
:param member: new member resource
Returns: None
"""
old_val, new_val = self.get_diff_of_dict(old_member, member)
arg_dict = {'context': context,
'old_member': old_member,
'member': member,
}
LOG.info(_LI("Received request 'Update Member' for Member:"
"%(member_id)s in Pool:%(pool_id)s with new Param:"
"%(new_val)s and old Param:%(old_val)s"),
{'member_id': member['id'],
'pool_id': member['pool_id'],
'new_val': new_val,
'old_val': old_val})
self._send_event(lb_constants.EVENT_UPDATE_MEMBER, arg_dict,
serialize=True, binding_key=member['pool_id'],
key=member['id'])
def delete_member(self, context, member):
"""Enqueues event for worker to process delete member request.
:param context: RPC context
:param member: member resource to be deleted
Returns: None
"""
arg_dict = {'context': context,
'member': member,
}
LOG.info(_LI("Received request 'Delete Member' for Pool:%(pool_id)s "),
{'pool_id': member['pool_id']})
self._send_event(lb_constants.EVENT_DELETE_MEMBER, arg_dict,
serialize=True, binding_key=member['pool_id'],
key=member['id'])
def create_pool_health_monitor(self, context, health_monitor, pool_id):
"""Enqueues event for worker to process create health monitor request.
:param context: RPC context
:param health_monitor: health_monitor resource to be created
:param pool_id: pool_id to which health monitor is associated
Returns: None
"""
arg_dict = {'context': context,
'health_monitor': health_monitor,
'pool_id': pool_id,
}
LOG.info(_LI("Received request 'Create Pool Health Monitor' for "
"Pool:%(pool_id)s and Health monitor:%(hm)s"),
{'pool_id': pool_id,
'hm': health_monitor['id']})
self._send_event(lb_constants.EVENT_CREATE_POOL_HEALTH_MONITOR,
arg_dict, serialize=True, binding_key=pool_id,
key=health_monitor['id'])
def update_pool_health_monitor(self, context, old_health_monitor,
health_monitor, pool_id):
"""Enqueues event for worker to process update health monitor request.
:param context: RPC context
:param old_health_monitor: health_monitor resource to be updated
:param health_monitor: new health_monitor resource
:param pool_id: pool_id to which health monitor is associated
Returns: None
"""
old_val, new_val = self.get_diff_of_dict(
old_health_monitor, health_monitor)
arg_dict = {'context': context,
'old_health_monitor': old_health_monitor,
'health_monitor': health_monitor,
'pool_id': pool_id,
}
LOG.info(_LI("Received request 'Update Pool Health Monitor' for "
"Pool Health Monitor:%(hm_id)s with new Param:"
"%(new_val)s and old Param:%(old_val)s"),
{'hm_id': health_monitor['id'],
'new_val': new_val,
'old_val': old_val})
self._send_event(lb_constants.EVENT_UPDATE_POOL_HEALTH_MONITOR,
arg_dict, serialize=True, binding_key=pool_id,
key=health_monitor['id'])
def delete_pool_health_monitor(self, context, health_monitor, pool_id):
"""Enqueues event for worker to process delete health monitor request.
:param context: RPC context
:param health_monitor: health_monitor resource to be deleted
:param pool_id: pool_id to which health monitor is associated
Returns: None
"""
arg_dict = {'context': context,
'health_monitor': health_monitor,
'pool_id': pool_id,
}
LOG.info(_LI("Received request 'Delete Pool Health Monitor' for "
"Pool:%(pool_id)s and Health monitor:%(hm)s"),
{'pool_id': pool_id,
'hm': health_monitor['id']})
self._send_event(lb_constants.EVENT_DELETE_POOL_HEALTH_MONITOR,
arg_dict, serialize=True, binding_key=pool_id,
key=health_monitor['id'])
def agent_updated(self, context, payload):
"""Enqueues event for worker to process agent updated request.
:param context: RPC context
:param payload: payload
Returns: None
"""
arg_dict = {'context': context,
'payload': payload}
LOG.info(_LI("Received request 'Agent Updated' "))
self._send_event(lb_constants.EVENT_AGENT_UPDATED, arg_dict)
class LBaaSEventHandler(agent_base.AgentBaseEventHandler,
nfp_api.NfpEventHandler):
"""Implements event handlers and their helper methods.
Object of this class is registered with the event class of core service
controller. Based on the event key, handle_event method of this class is
invoked by core service controller.
"""
instance_mapping = {}
def __init__(self, sc, drivers, rpcmgr):
self.sc = sc
self.drivers = drivers
self.rpcmgr = rpcmgr
self.plugin_rpc = LBaasRpcSender(sc)
"""REVISIT (pritam):
Remove neutron context dependency. As of now
because config agent needs context in notification, and internal
poll event like collect_stats() does not have context, creating
context here, but should get rid of this in future.
"""
self.context = context.get_admin_context_without_session()
def _get_driver(self, service_vendor, service_feature):
"""Retrieves service driver instance based on service type
and service vendor.
:param service_vendor: service vendor
Returns: Service driver instance
"""
driver = lb_constants.SERVICE_TYPE + service_vendor + service_feature
return self.drivers[driver]
def handle_event(self, ev):
"""Processes generated events in worker context.
Processes following events.
- create pool
- update pool
- delete pool
- create vip
- update vip
- delete vip
- create member
- update member
- delete member
- create pool health monitor
- update pool health monitor
- delete pool health monitor
- agent updated
Enqueues responses into notification queue.
Returns: None
"""
msg = ("Starting handling event '%s' " % (ev.id))
LOG.info(msg)
try:
method = getattr(self, "_%s" % (ev.id.lower()))
method(ev)
except Exception as err:
msg = ("Failed to handle event %s. Reason is %s"
% (ev.id, str(err).capitalize()))
LOG.error(msg)
finally:
if ev.id == lb_constants.EVENT_COLLECT_STATS:
"""Do not say event done for this event as it is
to be executed forever
"""
pass
else:
msg = ("Completed handling event '%s'" % (ev.id))
LOG.info(msg)
self.sc.event_complete(ev)
def _handle_event_vip(self, ev, operation):
data = ev.data
context = data['context']
vip = data['vip']
agent_info = ev.data['context'].pop('agent_info')
service_vendor = agent_info['service_vendor']
service_feature = agent_info['service_feature']
driver = self._get_driver(service_vendor, service_feature)
try:
if operation == lb_constants.CREATE:
driver.create_vip(vip, context)
elif operation == lb_constants.UPDATE:
old_vip = data['old_vip']
driver.update_vip(old_vip, vip, context)
elif operation == lb_constants.DELETE:
driver.delete_vip(vip, context)
self.plugin_rpc.vip_deleted(vip,
lb_constants.ACTIVE, agent_info)
return # Don't update object status for delete operation
except Exception as e:
if operation == lb_constants.DELETE:
msg = ("Failed to delete vip %s" % (vip['id']))
self.plugin_rpc.vip_deleted(vip,
lb_constants.ACTIVE, agent_info)
LOG.warn(msg)
else:
self.plugin_rpc.update_status('vip', vip['id'],
lb_constants.ERROR,
agent_info, vip)
raise e
else:
self.plugin_rpc.update_status('vip', vip['id'],
lb_constants.ACTIVE,
agent_info, vip)
def _create_vip(self, ev):
self._handle_event_vip(ev, lb_constants.CREATE)
def _update_vip(self, ev):
self._handle_event_vip(ev, lb_constants.UPDATE)
def _delete_vip(self, ev):
self._handle_event_vip(ev, lb_constants.DELETE)
def _handle_event_pool(self, ev, operation):
data = ev.data
context = data['context']
pool = data['pool']
agent_info = context.pop('agent_info')
service_vendor = agent_info['service_vendor']
service_feature = agent_info['service_feature']
try:
if operation == lb_constants.CREATE:
driver_name = data['driver_name']
driver_id = driver_name + service_vendor + service_feature
if (driver_id) not in self.drivers.keys():
msg = ('No device driver on agent: %s.' % (driver_name))
LOG.error(msg)
self.plugin_rpc.update_status('pool', pool['id'],
lb_constants.ERROR,
agent_info, pool)
return
driver = self.drivers[driver_id]
driver.create_pool(pool, context)
LBaaSEventHandler.instance_mapping[pool['id']] = driver_name
elif operation == lb_constants.UPDATE:
old_pool = data['old_pool']
driver = self._get_driver(service_vendor,
service_feature) # pool['id'])
driver.update_pool(old_pool, pool, context)
elif operation == lb_constants.DELETE:
driver = self._get_driver(service_vendor,
service_feature) # pool['id'])
driver.delete_pool(pool, context)
del LBaaSEventHandler.instance_mapping[pool['id']]
return # Don't update object status for delete operation
except Exception as e:
if operation == lb_constants.DELETE:
msg = ("Failed to delete pool %s" % (pool['id']))
LOG.warn(msg)
del LBaaSEventHandler.instance_mapping[pool['id']]
else:
self.plugin_rpc.update_status('pool', pool['id'],
lb_constants.ERROR,
agent_info, pool)
raise e
else:
self.plugin_rpc.update_status('pool', pool['id'],
lb_constants.ACTIVE,
agent_info, pool)
def _create_pool(self, ev):
self._handle_event_pool(ev, lb_constants.CREATE)
def _update_pool(self, ev):
self._handle_event_pool(ev, lb_constants.UPDATE)
def _delete_pool(self, ev):
self._handle_event_pool(ev, lb_constants.DELETE)
def _handle_event_member(self, ev, operation):
data = ev.data
context = data['context']
member = data['member']
agent_info = ev.data['context'].pop('agent_info')
service_vendor = agent_info['service_vendor']
service_feature = agent_info['service_feature']
driver = self._get_driver(service_vendor,
service_feature) # member['pool_id'])
try:
if operation == lb_constants.CREATE:
driver.create_member(member, context)
elif operation == lb_constants.UPDATE:
old_member = data['old_member']
driver.update_member(old_member, member, context)
elif operation == lb_constants.DELETE:
driver.delete_member(member, context)
return # Don't update object status for delete operation
except Exception as e:
if operation == lb_constants.DELETE:
msg = ("Failed to delete member %s" % (member['id']))
LOG.warn(msg)
else:
self.plugin_rpc.update_status('member', member['id'],
lb_constants.ERROR,
agent_info, member)
raise e
else:
self.plugin_rpc.update_status('member', member['id'],
lb_constants.ACTIVE,
agent_info, member)
def _create_member(self, ev):
self._handle_event_member(ev, lb_constants.CREATE)
def _update_member(self, ev):
self._handle_event_member(ev, lb_constants.UPDATE)
def _delete_member(self, ev):
self._handle_event_member(ev, lb_constants.DELETE)
def _handle_event_pool_health_monitor(self, ev, operation):
data = ev.data
context = data['context']
agent_info = context.pop('agent_info')
health_monitor = data['health_monitor']
pool_id = data['pool_id']
service_vendor = agent_info['service_vendor']
service_feature = agent_info['service_feature']
driver = self._get_driver(service_vendor, service_feature) # (pool_id)
assoc_id = {'pool_id': pool_id,
'monitor_id': health_monitor['id']}
try:
if operation == lb_constants.CREATE:
driver.create_pool_health_monitor(health_monitor, pool_id,
context)
elif operation == lb_constants.UPDATE:
old_health_monitor = data['old_health_monitor']
driver.update_pool_health_monitor(old_health_monitor,
health_monitor, pool_id,
context)
elif operation == lb_constants.DELETE:
driver.delete_pool_health_monitor(health_monitor, pool_id,
context)
return # Don't update object status for delete operation
except Exception as e:
if operation == lb_constants.DELETE:
msg = ("Failed to delete pool health monitor."
" assoc_id: %s" % (assoc_id))
LOG.warn(msg)
else:
self.plugin_rpc.update_status(
'health_monitor', assoc_id, lb_constants.ERROR,
agent_info, health_monitor)
raise e
else:
self.plugin_rpc.update_status(
'health_monitor', assoc_id, lb_constants.ACTIVE,
agent_info, health_monitor)
def _create_pool_health_monitor(self, ev):
self._handle_event_pool_health_monitor(ev, lb_constants.CREATE)
def _update_pool_health_monitor(self, ev):
self._handle_event_pool_health_monitor(ev, lb_constants.UPDATE)
def _delete_pool_health_monitor(self, ev):
self._handle_event_pool_health_monitor(ev, lb_constants.DELETE)
def _agent_updated(self, ev):
""" REVISIT(pritam): Implement this method """
return None
def _collect_stats(self, ev):
self.sc.poll_event(ev)
@nfp_api.poll_event_desc(event=lb_constants.EVENT_COLLECT_STATS,
spacing=60)
def collect_stats(self, ev):
for pool_id, driver_name in LBaaSEventHandler.instance_mapping.items():
driver_id = lb_constants.SERVICE_TYPE + driver_name
driver = self.drivers[driver_id]
try:
stats = driver.get_stats(pool_id)
if stats:
self.plugin_rpc.update_pool_stats(pool_id, stats,
self.context)
except Exception:
msg = ("Error updating statistics on pool %s" % (pool_id))
LOG.error(msg)
def events_init(sc, drivers, rpcmgr):
"""Registers events with core service controller.
All the events will come to handle_event method of class instance
registered in 'handler' field.
:param drivers: Driver instances registered with the service agent
:param rpcmgr: Instance to receive all the RPC messages from configurator
module.
Returns: None
"""
ev_ids = [lb_constants.EVENT_CREATE_POOL, lb_constants.EVENT_UPDATE_POOL,
lb_constants.EVENT_DELETE_POOL,
lb_constants.EVENT_CREATE_VIP, lb_constants.EVENT_UPDATE_VIP,
lb_constants.EVENT_DELETE_VIP,
lb_constants.EVENT_CREATE_MEMBER,
lb_constants.EVENT_UPDATE_MEMBER,
lb_constants.EVENT_DELETE_MEMBER,
lb_constants.EVENT_CREATE_POOL_HEALTH_MONITOR,
lb_constants.EVENT_UPDATE_POOL_HEALTH_MONITOR,
lb_constants.EVENT_DELETE_POOL_HEALTH_MONITOR,
lb_constants.EVENT_AGENT_UPDATED,
lb_constants.EVENT_COLLECT_STATS
]
evs = []
for ev_id in ev_ids:
ev = nfp_event.Event(id=ev_id, handler=LBaaSEventHandler(sc, drivers,
rpcmgr))
evs.append(ev)
sc.register_events(evs)
def load_drivers(sc, conf):
"""Imports all the driver files.
Returns: Dictionary of driver objects with a specified service type and/or
vendor name
"""
cutils = utils.ConfiguratorUtils(conf)
drivers = cutils.load_drivers(lb_constants.SERVICE_TYPE)
plugin_rpc = LBaasRpcSender(sc)
for service_type, dobj in six.iteritems(drivers):
'''LB Driver constructor needs plugin_rpc as a param'''
instantiated_dobj = dobj(plugin_rpc=plugin_rpc, conf=conf)
drivers[service_type] = instantiated_dobj
return drivers
def register_service_agent(cm, sc, conf, rpcmgr):
"""Registers Loadbalaner service agent with configurator module.
:param cm: Instance of configurator module
:param sc: Instance of core service controller
:param conf: Instance of oslo configuration
:param rpcmgr: Instance containing RPC methods which are invoked by
configurator module on corresponding RPC message arrival
"""
service_type = lb_constants.SERVICE_TYPE
cm.register_service_agent(service_type, rpcmgr)
def init_agent(cm, sc, conf):
"""Initializes Loadbalaner agent.
:param cm: Instance of configuration module
:param sc: Instance of core service controller
:param conf: Instance of oslo configuration
"""
try:
drivers = load_drivers(sc, conf)
except Exception as err:
msg = ("Loadbalaner agent failed to load service drivers. Reason:%s"
% (str(err).capitalize()))
LOG.error(msg)
raise err
else:
msg = ("Loadbalaner agent loaded service"
" drivers successfully.")
LOG.debug(msg)
rpcmgr = LBaaSRpcManager(sc, conf)
try:
events_init(sc, drivers, rpcmgr)
except Exception as err:
msg = ("Loadbalaner agent failed to initialize events. Reason:%s"
% (str(err).capitalize()))
LOG.error(msg)
raise err
else:
msg = ("Loadbalaner agent initialized"
" events successfully.")
LOG.debug(msg)
try:
register_service_agent(cm, sc, conf, rpcmgr)
except Exception as err:
msg = ("Failed to register Loadbalaner agent with"
" configurator module. Reason:%s" % (str(err).capitalize()))
LOG.error(msg)
raise err
else:
msg = ("Loadbalaner agent registered with configuration"
" module successfully.")
LOG.debug(msg)
def _start_collect_stats(sc):
"""Enqueues poll event for worker to collect pool stats periodically.
Agent keeps map of pool_id:driver. As part of handling this event,
stats for pool_id is requested from agent inside service vm
"""
arg_dict = {}
ev = sc.new_event(id=lb_constants.EVENT_COLLECT_STATS, data=arg_dict)
sc.post_event(ev)
def init_agent_complete(cm, sc, conf):
# _start_collect_stats(sc)
msg = ("Initialization of loadbalancer agent completed.")
LOG.info(msg)

View File

@ -1,759 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from gbpservice.contrib.nfp.configurator.drivers.base import base_driver
from gbpservice.contrib.nfp.configurator.drivers.loadbalancer.v1.\
haproxy import (haproxy_rest_client)
from gbpservice.contrib.nfp.configurator.lib import constants as common_const
from gbpservice.contrib.nfp.configurator.lib import data_parser
from gbpservice.contrib.nfp.configurator.lib import lb_constants
from gbpservice.nfp.core import log as nfp_logging
LOG = nfp_logging.getLogger(__name__)
DRIVER_NAME = 'loadbalancer'
class LbGenericConfigDriver(base_driver.BaseDriver):
""" Loadbalancer generic configuration driver class for handling device
configuration requests.
"""
def __init__(self):
self.parse = data_parser.DataParser()
def configure_interfaces(self, context, resource_data):
""" Configure interfaces for the service VM.
Internally it configures log forwarding in service vm
:param context: neutron context
:param resource_data: resource data containing service vm
related details
Returns: SUCCESS/FAILED with reason.
"""
resource_data = self.parse.parse_data(common_const.INTERFACES,
resource_data)
mgmt_ip = resource_data['mgmt_ip']
try:
result_log_forward = self._configure_log_forwarding(
lb_constants.REQUEST_URL, mgmt_ip,
self.port)
except Exception as err:
msg = ("Failed to configure log forwarding for service at %s. "
"Error: %s" % (mgmt_ip, err))
LOG.error(msg)
else:
if result_log_forward == common_const.UNHANDLED:
pass
elif result_log_forward != lb_constants.STATUS_SUCCESS:
msg = ("Failed to configure log forwarding for service at %s. "
"Error: %s" % (mgmt_ip, err))
LOG.error(msg)
# Failure in log forward configuration won't break chain
# creation. However, error will be logged for detecting
# failure.
else:
msg = ("Configured log forwarding for service at %s."
% (mgmt_ip))
LOG.info(msg)
return lb_constants.STATUS_SUCCESS
@base_driver.set_class_attr(SERVICE_TYPE=lb_constants.SERVICE_TYPE,
SERVICE_VENDOR=common_const.HAPROXY)
class HaproxyOnVmDriver(LbGenericConfigDriver):
"""Main driver which gets registered with LB agent and Generic Config agent
in configurator and these agents pass all *aaS neutron and generic
config requests to this class.
"""
pool_to_device = {}
def __init__(self, plugin_rpc=None, conf=None):
self.plugin_rpc = plugin_rpc
self.conf = conf
self.timeout = 60
self.port = lb_constants.HAPROXY_AGENT_LISTEN_PORT
super(HaproxyOnVmDriver, self).__init__()
def _get_rest_client(self, ip_addr):
client = haproxy_rest_client.HttpRequests(
ip_addr, self.port,
lb_constants.REQUEST_RETRIES,
lb_constants.REQUEST_TIMEOUT)
return client
def _get_device_for_pool(self, pool_id, context):
resource_data = self.parse.parse_data(common_const.LOADBALANCER,
context)
role = resource_data.get('role', '')
key = pool_id + role
device = HaproxyOnVmDriver.pool_to_device.get(key, None)
if device is not None:
return device
logical_device = self.plugin_rpc.get_logical_device(pool_id,
context)
vip = logical_device.get('vip', None)
if vip is None:
return None
else:
device = resource_data['mgmt_ip']
if device:
HaproxyOnVmDriver.pool_to_device[key] = device
return device
def _expand_expected_codes(self, codes):
"""Expand the expected code string in set of codes.
200-204 -> 200, 201, 202, 204
200, 203 -> 200, 203
"""
retval = set()
for code in codes.replace(',', ' ').split(' '):
code = code.strip()
if not code:
continue
elif '-' in code:
low, hi = code.split('-')[:2]
retval.update(str(i) for i in xrange(int(low), int(hi) + 1))
else:
retval.add(code)
return retval
def _prepare_haproxy_frontend(self, vip, resource_data):
vip_ip = vip['address']
vip_port_number = vip['protocol_port']
protocol = vip['protocol']
frontend = {
'option': {},
'bind': '%s:%d' % (vip_ip, vip_port_number),
'mode': lb_constants.PROTOCOL_MAP[protocol],
'default_backend': "bck:%s" % vip['pool_id']
}
if vip['connection_limit'] >= 0:
frontend.update({'maxconn': '%s' % vip['connection_limit']})
if protocol in [lb_constants.PROTOCOL_HTTP,
lb_constants.PROTOCOL_HTTPS]:
frontend['option'].update({'httplog': True})
else:
frontend['option'].update({'tcplog': True})
try:
if protocol == lb_constants.PROTOCOL_HTTP:
frontend['option'].update({'forwardfor': True})
provider_interface_mac = resource_data['provider_mac']
frontend.update({'provider_interface_mac': provider_interface_mac})
except Exception as e:
raise e
return frontend
def _prepare_haproxy_backend(self, pool, context):
logical_device = self.plugin_rpc.get_logical_device(pool['id'],
context)
protocol = pool['protocol']
lb_method = pool['lb_method']
monitor = None
for monitor in logical_device['healthmonitors']:
break
server_addon = ''
backend = {
'mode': '%s' % lb_constants.PROTOCOL_MAP[protocol],
'balance': '%s' % lb_constants.BALANCE_MAP.get(
lb_method, 'roundrobin'),
'option': {},
'timeout': {},
'server': {}
}
try:
if protocol == lb_constants.PROTOCOL_HTTP:
backend['option'].update({'forwardfor': True})
# health monitor options
if monitor:
# server addon options
server_addon = ('check inter %(delay)ds fall %(max_retries)d'
% monitor)
backend['timeout'].update({'check': '%ds'
% monitor['timeout']})
if monitor['type'] in (lb_constants.HEALTH_MONITOR_HTTP,
lb_constants.HEALTH_MONITOR_HTTPS):
backend['option'].update(
{'httpchk': '%(http_method)s %(url_path)s' % monitor})
backend.update({'http-check expect': 'rstatus %s'
% '|'.join(
self._expand_expected_codes(
monitor['expected_codes']))})
if monitor['type'] == lb_constants.HEALTH_MONITOR_HTTPS:
backend['option'].update({'ssl-hello-chk': True})
# session persistance options
vip = logical_device['vip']
persistence = vip.get('session_persistence')
if persistence:
if persistence['type'] == 'SOURCE_IP':
backend.update({'stick-table type': 'ip size 10k'})
backend.update({'stick on': 'src'})
elif persistence['type'] == 'HTTP_COOKIE':
backend.update({'cookie': 'SRV insert indirect nocache'})
elif (persistence['type'] == 'APP_COOKIE' and
persistence.get('cookie_name')):
backend.update({'appsession': '%s len 56 timeout 3h' %
persistence['cookie_name']})
# server options
for member in logical_device['members']:
backend['server'].update(
{"srvr:%s" % member['id']: [
'%(address)s:%(protocol_port)s' % member,
'weight %(weight)s' % member, server_addon]}
)
if (vip.get('session_persistence') and
vip['session_persistence']['type'] == 'HTTP_COOKIE'):
backend['server'][member['id']].append(
'cookie %d'
% logical_device['members'].index(
member['id']))
return backend
except Exception as e:
raise e
def _prepare_haproxy_backend_with_member(self, member, backend, context):
logical_device = self.plugin_rpc.get_logical_device(member['pool_id'],
context)
vip = logical_device['vip']
monitor = None
# chose first monitor
for monitor in logical_device['healthmonitors']:
break
# update backend with the new server
if monitor:
server_addon = ('check inter %(delay)ds fall %(max_retries)d'
% monitor)
else:
server_addon = ''
try:
backend['server'].update(
{'srvr:%s' % member['id']: [
'%(address)s:%(protocol_port)s' % member,
'weight %(weight)s' % member, server_addon]})
except Exception as e:
raise e
if (vip.get('session_persistence') and
vip['session_persistence']['type'] == 'HTTP_COOKIE'):
backend['server'][member['id']].append(
'cookie %d' % logical_device['members'].index(member['id']))
return backend
def _prepare_backend_adding_health_monitor_to_pool(self, health_monitor,
pool_id,
backend):
# server addon options
server_addon = ('check inter %(delay)ds fall %(max_retries)d'
% health_monitor)
for server in backend['server'].itervalues():
total_lines = len(server)
for index, line in enumerate(server):
if 'check' in line:
server[index] = server_addon
break
elif total_lines == index + 1:
server.append(server_addon)
try:
backend['timeout'].update({'check': '%ds'
% health_monitor['timeout']})
if health_monitor['type'] in (lb_constants.HEALTH_MONITOR_HTTP,
lb_constants.HEALTH_MONITOR_HTTPS):
backend['option'].update(
{'httpchk': ('%(http_method)s %(url_path)s'
% health_monitor)})
backend.update({'http-check expect': 'rstatus %s' % (
'|'.join(self._expand_expected_codes(
health_monitor['expected_codes'])))})
if health_monitor['type'] == lb_constants.PROTOCOL_HTTPS:
backend['option'].update({'ssl-hello-chk': True})
except Exception as e:
raise e
return backend
def _prepare_backend_deleting_health_monitor_from_pool(self,
health_monitor,
pool_id,
backend, context):
logical_device = self.plugin_rpc.get_logical_device(pool_id, context)
remaining_hms_type = []
for monitor in logical_device['healthmonitors']:
if health_monitor['type'] != monitor['type']:
remaining_hms_type.append(monitor['type'])
# Remove http, https corresponding configuration
# Not removing http or https configuration if any 1 of them,
# present in remaining hms type.
try:
if ((lb_constants.HEALTH_MONITOR_HTTP and
lb_constants.HEALTH_MONITOR_HTTPS)
not in remaining_hms_type and health_monitor['type'] in
(lb_constants.HEALTH_MONITOR_HTTP,
lb_constants.HEALTH_MONITOR_HTTPS)):
del backend['option']['httpchk']
del backend['http-check expect']
if health_monitor['type'] == lb_constants.HEALTH_MONITOR_HTTPS:
del backend['option']['ssl-hello-chk']
server_addon = ('check inter %(delay)ds fall %(max_retries)d'
% health_monitor)
for server in backend['server'].itervalues():
for index, line in enumerate(server):
if 'check' in line:
if len(logical_device['healthmonitors']) == 0:
del server[index]
else:
server[index] = server_addon
break
if len(logical_device['healthmonitors']) == 0:
del backend['timeout']['check']
except Exception as e:
raise e
return backend
def _prepare_backend_updating_health_monitor_for_pool(self, health_monitor,
pool_id,
backend):
# update backend by updatinig the health monitor
# server addon options
server_addon = ('check inter %(delay)ds fall %(max_retries)d'
% health_monitor)
for server in backend['server'].itervalues():
health_chk_index_in_srvr_list = 0
for line in server:
if 'check' in line:
server[health_chk_index_in_srvr_list] = server_addon
break
else:
health_chk_index_in_srvr_list += 1
try:
backend['timeout'].update({'check': '%ds'
% health_monitor['timeout']})
if health_monitor['type'] in (lb_constants.HEALTH_MONITOR_HTTP,
lb_constants.HEALTH_MONITOR_HTTPS):
backend['option'].update(
{'httpchk': ('%(http_method)s %(url_path)s'
% health_monitor)})
backend.update({'http-check expect': 'rstatus %s' % '|'.join(
self._expand_expected_codes(
health_monitor['expected_codes']))})
if health_monitor['type'] == lb_constants.HEALTH_MONITOR_HTTPS:
backend['option'].update({'ssl-hello-chk': True})
except Exception as e:
raise e
return backend
def _create_vip(self, vip, device_addr, resource_data):
try:
client = self._get_rest_client(device_addr)
frontend = self._prepare_haproxy_frontend(vip, resource_data)
body = {"frnt:%s" % vip['id']: frontend}
client.create_resource("frontend", body)
except Exception as e:
raise e
def _delete_vip(self, vip, device_addr):
try:
client = self._get_rest_client(device_addr)
client.delete_resource("frontend/frnt:%s" % vip['id'])
except Exception as e:
raise e
def _create_pool(self, pool, device_addr, context):
try:
client = self._get_rest_client(device_addr)
backend = self._prepare_haproxy_backend(pool, context)
body = {'bck:%s' % pool['id']: backend}
client.create_resource("backend", body)
except Exception as e:
raise e
def _delete_pool(self, pool, device_addr):
try:
client = self._get_rest_client(device_addr)
client.delete_resource("backend/bck:%s" % pool['id'])
except Exception as e:
raise e
def _create_member(self, member, device_addr, context):
try:
client = self._get_rest_client(device_addr)
backend = client.get_resource("backend/bck:%s"
% member['pool_id'])
backend = self._prepare_haproxy_backend_with_member(
member, backend, context)
client.update_resource("backend/bck:%s" % member['pool_id'],
backend)
except Exception as e:
raise e
def _delete_member(self, member, device_addr):
try:
client = self._get_rest_client(device_addr)
backend = client.get_resource("backend/bck:%s"
% member['pool_id'])
# update backend with the server deleted from that
del backend['server']['srvr:%s' % member['id']]
client.update_resource("backend/bck:%s" % member['pool_id'],
backend)
except Exception as e:
raise e
def _create_pool_health_monitor(self, hm, pool_id, device_addr):
try:
client = self._get_rest_client(device_addr)
backend = client.get_resource("backend/bck:%s" % pool_id)
backend = self._prepare_backend_adding_health_monitor_to_pool(
hm,
pool_id,
backend)
client.update_resource("backend/bck:%s" % pool_id, backend)
except Exception as e:
raise e
def _delete_pool_health_monitor(self, hm, pool_id,
device_addr, context):
try:
client = self._get_rest_client(device_addr)
backend = client.get_resource("backend/bck:%s" % pool_id)
backend = self._prepare_backend_deleting_health_monitor_from_pool(
hm,
pool_id,
backend,
context)
client.update_resource("backend/bck:%s" % pool_id, backend)
except Exception as e:
raise e
@classmethod
def get_name(self):
return DRIVER_NAME
def get_stats(self, pool_id):
stats = {}
try:
# if pool is not known, do nothing
device = HaproxyOnVmDriver.pool_to_device.get(pool_id, None)
if device is None:
return stats
device_addr = self._get_device_for_pool(pool_id, None)
# create REST client object
client = self._get_rest_client(device_addr)
stats = client.get_resource('stats/%s' % pool_id)
for key, value in stats.get('members', {}).items():
if key.find(":") != -1:
member_id = key[key.find(":") + 1:]
del stats['members'][key]
stats['members'][member_id] = value
except Exception as e:
msg = ("Failed to get stats. %s"
% str(e).capitalize())
LOG.error(msg)
raise e
return stats
def create_vip(self, vip, context):
resource_data = self.parse.parse_data(common_const.LOADBALANCER,
context)
msg = ("Handling 'Create VIP' for VIP:%s with Pool:%s"
"and tenant:%s"
% (vip['id'], vip['pool_id'], vip['tenant_id']))
LOG.info(msg)
try:
device_addr = self._get_device_for_pool(vip['pool_id'], context)
logical_device = self.plugin_rpc.get_logical_device(vip['pool_id'],
context)
self._create_pool(logical_device['pool'], device_addr, context)
for member in logical_device['members']:
self._create_member(member, device_addr, context)
for hm in logical_device['healthmonitors']:
self._create_pool_health_monitor(hm,
vip['pool_id'], device_addr)
self._create_vip(vip, device_addr, resource_data)
except Exception as e:
msg = ("Failed to create vip %s. %s"
% (vip['id'], str(e).capitalize()))
LOG.error(msg)
raise e
else:
msg = ("Created vip %s." % vip['id'])
LOG.info(msg)
def update_vip(self, old_vip, vip, context):
resource_data = self.parse.parse_data(common_const.LOADBALANCER,
context)
msg = ("Handling 'Update VIP' for VIP:%s and Old_VIP:%s" % (
vip['id'], old_vip['id']))
LOG.info(msg)
try:
device_addr = self._get_device_for_pool(old_vip['pool_id'],
context)
# if old_vip is either not having associated to pool
# or not created
if (not old_vip['pool_id'] or
device_addr is None):
return
# is vip's pool changed
if not vip['pool_id'] == old_vip['pool_id']:
msg = (" VIP pool id changed to %s. Deleting old VIP:%s "
% (vip['pool_id'], old_vip['pool_id']))
LOG.info(msg)
# Delete the old VIP
self._delete_vip(old_vip, device_addr)
# Create the new VIP along with pool
logical_device = self.plugin_rpc.get_logical_device(
vip['pool_id'],
context)
pool = logical_device['pool']
self._create_pool(pool, device_addr)
self._create_vip(vip, device_addr, resource_data)
return
client = self._get_rest_client(device_addr)
body = self._prepare_haproxy_frontend(vip, resource_data)
client.update_resource("frontend/frnt:%s" % vip['id'], body)
except Exception as e:
msg = ("Failed to update vip %s. %s"
% (vip['id'], str(e).capitalize()))
LOG.error(msg)
raise e
else:
msg = ("Updated VIP:%s." % vip['id'])
LOG.info(msg)
def delete_vip(self, vip, context):
msg = ("Handling 'Delete VIP' for VIP:%s" % (vip['id']))
LOG.info(msg)
try:
device_addr = self._get_device_for_pool(vip['pool_id'], context)
logical_device = self.plugin_rpc.get_logical_device(vip['pool_id'],
context)
self._delete_vip(vip, device_addr)
pool = logical_device['pool']
self._delete_pool(pool, device_addr)
except Exception as e:
msg = ("Failed to delete vip %s. %s"
% (vip['id'], str(e).capitalize()))
LOG.error(msg)
raise e
else:
msg = ("Deleted vip %s." % vip['id'])
LOG.info(msg)
def create_pool(self, pool, context):
# nothing to do here because a pool needs a vip to be useful
msg = ("Handled 'Create Pool' for Pool:%s" % (pool['id']))
LOG.info(msg)
def update_pool(self, old_pool, pool, context):
msg = ("Handling 'Update Pool' for Pool:%s and Old_Pool:%s"
% (pool['id'], old_pool['id']))
LOG.info(msg)
try:
device_addr = self._get_device_for_pool(pool['id'], context)
if (pool['vip_id'] and
device_addr is not None):
client = self._get_rest_client(device_addr)
backend = self._prepare_haproxy_backend(pool, context)
body = backend
client.update_resource("backend/bck:%s" % pool['id'], body)
except Exception as e:
msg = ("Failed to update pool from %s to %s. %s"
% (old_pool['id'], pool['id'], str(e).capitalize()))
LOG.error(msg)
raise e
else:
msg = ("Updated pool from %s to %s."
% (old_pool['id'], pool['id']))
LOG.info(msg)
def delete_pool(self, pool, context):
msg = ("Handling 'Delete Pool' for Pool:%s" % (pool['id']))
LOG.info(msg)
try:
device_addr = self._get_device_for_pool(pool['id'], context)
if device_addr is None:
return
if (pool['vip_id'] and
device_addr):
self._delete_pool(pool, device_addr)
except Exception as e:
msg = ("Failed to delete pool: %s. %s"
% (pool['id'], str(e).capitalize()))
LOG.error(msg)
raise e
else:
msg = ("Deleted pool:%s." % pool['id'])
LOG.info(msg)
def create_member(self, member, context):
msg = ("Handling 'Create Member' for Member:%s with Pool:%s "
% (member['id'], member['pool_id']))
LOG.info(msg)
try:
device_addr = self._get_device_for_pool(member['pool_id'],
context)
if device_addr is not None:
self._create_member(member, device_addr, context)
except Exception as e:
msg = ("Failed to create member %s. %s"
% (member['id'], str(e).capitalize()))
LOG.error(msg)
raise e
else:
msg = ("Created member %s." % member['id'])
LOG.info(msg)
def update_member(self, old_member, member, context):
msg = ("Handling 'Update Member' for Member:%s with Old_Member:%s"
% (member['id'], old_member['id']))
LOG.info(msg)
try:
device_addr = self._get_device_for_pool(old_member['pool_id'],
context)
if device_addr is not None:
self._delete_member(old_member, device_addr)
device_addr = self._get_device_for_pool(member['pool_id'],
context)
if device_addr is not None:
self._create_member(member, device_addr, context)
except Exception as e:
msg = ("Failed to update member %s. %s"
% (member['id'], str(e).capitalize()))
LOG.error(msg)
raise e
else:
msg = ("updated member %s." % member['id'])
LOG.info(msg)
def delete_member(self, member, context):
msg = ("Handling 'Delete Member' for Member:%s " % (member['id']))
LOG.info(msg)
try:
device_addr = self._get_device_for_pool(member['pool_id'],
context)
if device_addr is not None:
self._delete_member(member, device_addr)
except Exception as e:
msg = ("Failed to delete member %s. %s"
% (member['id'], str(e).capitalize()))
LOG.error(msg)
raise e
else:
msg = ("Deleted member %s." % member['id'])
LOG.info(msg)
def create_pool_health_monitor(self, health_monitor, pool_id, context):
msg = ("Handling 'Create Pool Health Monitor' for "
"Healthmonitor:%s and Pool:%s"
% (health_monitor['id'], pool_id))
LOG.info(msg)
try:
device_addr = self._get_device_for_pool(pool_id, context)
if device_addr is not None:
self._create_pool_health_monitor(health_monitor, pool_id,
device_addr)
except Exception as e:
msg = ("Failed to create pool health monitor: %s with "
"pool ID: %s. %s"
% (str(health_monitor), pool_id, str(e).capitalize()))
LOG.error(msg)
raise e
else:
msg = ("Created pool health monitor:%s with Pool: %s"
% (health_monitor['id'], pool_id))
LOG.info(msg)
def update_pool_health_monitor(self, old_health_monitor, health_monitor,
pool_id, context):
msg = ("Handling 'Update Pool Health Monitor' for HM:%s "
"with Old_HM:%s and Pool:%s"
% (health_monitor['id'], old_health_monitor['id'], pool_id))
LOG.info(msg)
try:
device_addr = self._get_device_for_pool(pool_id, context)
if device_addr is not None:
client = self._get_rest_client(device_addr)
backend = client.get_resource("backend/bck:%s" % pool_id)
# update backend deleting the health monitor from it
# server addon options
backend = (
self._prepare_backend_updating_health_monitor_for_pool(
health_monitor,
pool_id,
backend))
client.update_resource("backend/bck:%s" % pool_id, backend)
except Exception as e:
msg = ("Failed to update health monitor from %s to "
"%s for pool: %s. %s"
% (str(old_health_monitor), str(health_monitor),
pool_id, str(e).capitalize()))
LOG.error(msg)
raise e
else:
msg = ("Updated health monitor from %s to %s for Pool:%s"
% (old_health_monitor['id'],
health_monitor['id'], pool_id))
LOG.info(msg)
def delete_pool_health_monitor(self, health_monitor, pool_id, context):
msg = ("Handling 'Delete Pool Health Monitor' for HM:%s Pool:%s"
% (health_monitor['id'], pool_id))
LOG.info(msg)
try:
device_addr = self._get_device_for_pool(pool_id, context)
if device_addr is not None:
self._delete_pool_health_monitor(health_monitor, pool_id,
device_addr, context)
except Exception as e:
msg = ("Failed to delete pool health monitor: %s with "
"pool ID: %s. %s"
% (str(health_monitor), pool_id, str(e).capitalize()))
LOG.error(msg)
raise e
else:
msg = ("Deleted pool health monitor: %s for Pool:%s"
% (health_monitor['id'], pool_id))
LOG.info(msg)

View File

@ -1,97 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json as jsonutils
import requests
import urlparse
from gbpservice.nfp.core import log as nfp_logging
LOG = nfp_logging.getLogger(__name__)
class HttpRequests(object):
"""Encapsulates Python requests module
Uses python-requests library to perform API request to the REST server
"""
def __init__(self, host, port, retries=0, request_timeout=30):
self._host = host
self._port = port
self._retries = retries
self._request_timeout = request_timeout
self.rest_server_url = 'http://' + self._host + ':' + str(self._port)
self.pool = requests.Session()
def do_request(self, method, url=None, headers=None, data=None,
timeout=30):
response = None
try:
response = self.pool.request(method, url=url,
headers=headers, data=data,
timeout=timeout)
except Exception as e:
msg = ("[Request:%s, URL:%s, Body:%s] Failed.Reason:%s"
% (method, url, data, e))
LOG.error(msg)
raise Exception(msg)
return response
def request(self, method, uri, body=None,
content_type="application/json"):
headers = {"Content-Type": content_type}
url = urlparse.urljoin(self.rest_server_url, uri)
response = self.do_request(method, url=url, headers=headers,
data=body,
timeout=self._request_timeout)
if response is None:
msg = ("[Request:%s, URL:%s, Body:%s] Failed.HTTP response is None"
".Request timed out" % (method, url, body))
LOG.error(msg)
raise Exception(msg)
status = response.status_code
# Not Found (404) is OK for DELETE. Ignore it here
if method == 'DELETE' and status == 404:
return
elif status not in (200, 201, 204):
# requests.codes.ok = 200, requests.codes.created = 201,
# requests.codes.no_content = 204
msg = ("[Request:%s, URL:%s, Body:%s] Failed with status:%s"
% (method, url, body, status))
LOG.error(msg)
raise Exception(msg)
else:
msg = ("[Request:%s, URL:%s, Body:%s] executed successfully"
% (method, url, body))
LOG.debug(msg)
response.body = response.content
return response
def create_resource(self, resource_path, resource_data):
response = self.request("POST", resource_path,
jsonutils.dumps(resource_data))
return response.json()
def update_resource(self, resource_path, resource_data):
response = self.request("PUT", resource_path,
jsonutils.dumps(resource_data))
return response.json()
def delete_resource(self, resource_path):
return self.request("DELETE", resource_path)
def get_resource(self, resource_path):
response = self.request("GET", resource_path)
return response.json()

View File

@ -28,7 +28,6 @@ from gbpservice.contrib.nfp.configurator.drivers.loadbalancer.\
v2.haproxy.rest_api_driver import HaproxyAmphoraLoadBalancerDriver
from gbpservice.contrib.nfp.configurator.lib import constants as common_const
from gbpservice.contrib.nfp.configurator.lib import data_parser
from gbpservice.contrib.nfp.configurator.lib import lb_constants
from gbpservice.contrib.nfp.configurator.lib import lbv2_constants
from gbpservice.nfp.common import exceptions
from gbpservice.nfp.core import log as nfp_logging
@ -68,7 +67,7 @@ class LbGenericConfigDriver(object):
try:
result_log_forward = self._configure_log_forwarding(
lb_constants.REQUEST_URL, mgmt_ip,
lbv2_constants.REQUEST_URL, mgmt_ip,
self.port)
except Exception as err:
msg = ("Failed to configure log forwarding for service at %s. "
@ -78,7 +77,7 @@ class LbGenericConfigDriver(object):
else:
if result_log_forward == common_const.UNHANDLED:
pass
elif result_log_forward != lb_constants.STATUS_SUCCESS:
elif result_log_forward != lbv2_constants.STATUS_SUCCESS:
msg = ("Failed to configure log forwarding for service at %s. "
% mgmt_ip)
LOG.error(msg)
@ -88,7 +87,7 @@ class LbGenericConfigDriver(object):
"Result: %s" % (mgmt_ip, result_log_forward))
LOG.info(msg)
return lb_constants.STATUS_SUCCESS
return lbv2_constants.STATUS_SUCCESS
# As we use the rest client and amphora image from Octavia,

View File

@ -10,6 +10,6 @@
# License for the specific language governing permissions and limitations
# under the License.
SERVICE_VENDOR = 'haproxy_lbaasv2'
SERVICE_VENDOR = 'haproxy'
DRIVER_NAME = 'loadbalancerv2'
CONFIGURATION_SERVER_PORT = '9443'

View File

@ -10,7 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
supported_service_types = ['firewall', 'vpn', 'loadbalancer', 'loadbalancerv2']
supported_service_types = ['firewall', 'vpn', 'loadbalancerv2']
NFP_SERVICE_LIST = ['heat', 'ansible']
invalid_service_type = 'invalid'
NFP_SERVICE = 'nfp_service'
@ -26,11 +26,9 @@ RABBITMQ_HOST = '127.0.0.1' # send notifications to 'RABBITMQ_HOST'
NOTIFICATION_QUEUE = 'configurator-notifications'
FIREWALL = 'firewall'
VPN = 'vpn'
LOADBALANCER = 'loadbalancer'
VYOS = 'vyos'
LOADBALANCERV2 = 'loadbalancerv2'
HAPROXY = 'haproxy'
HAPROXY_LBAASV2 = 'haproxy_lbaasv2'
HAPROXY_LBAASV2 = 'haproxy'
CREATE = 'create'
UPDATE = 'update'
DELETE = 'delete'

View File

@ -12,9 +12,6 @@
import copy
from gbpservice.contrib.nfp.configurator.lib import (
filter_constants as constants)
class Filter(object):
""" Filter class which provides data asked in a specific format.
@ -240,56 +237,3 @@ class Filter(object):
"""
return vpnservices.values()
def _get_logical_device(self, context, filters):
""" Get logical device from context after applying filter.
Logical device here means pool and its related resources like vip,
member,hm etc
:param context
e.g context = {'service_info': { 'pools': [pools],
'members': [members],
'vips': [vips],
'health_monitors': [health_monitors],
'ports': [ports],
'subnets': [subnets],
}
}
:param filters e.g {'pool_id': pool_id}
Returns: logical divice
"""
service_info = context['service_info']
pool_id = filters.get('pool_id')
pool = self.get_record(service_info['pools'], 'id', pool_id)
retval = {}
retval['pool'] = pool # self._make_pool_dict(pool)
if 'vip_id' in pool and pool['vip_id'] is not None:
vip = self.get_record(
service_info['vips'], 'id', pool['vip_id'])
retval['vip'] = vip # self._make_vip_dict(vip)
pool_members = pool['members']
retval['members'] = []
for pm in pool_members:
member = self.get_record(service_info['members'], 'id', pm)
if (member['status'] in constants.ACTIVE_PENDING_STATUSES or
member['status'] == constants.INACTIVE):
retval['members'].append(member)
pool_health_monitors = pool['health_monitors_status']
retval['healthmonitors'] = []
for phm in pool_health_monitors:
if phm['status'] in constants.ACTIVE_PENDING_STATUSES:
health_monitor = self.get_record(
service_info['health_monitors'],
'id', phm['monitor_id'])
retval['healthmonitors'].append(health_monitor)
retval['driver'] = pool['provider']
return retval

View File

@ -101,7 +101,6 @@ class ServiceAgentDemuxer(object):
sa_info_list = []
vendor_map = {const.FIREWALL: const.VYOS,
const.LOADBALANCER: const.HAPROXY,
const.VPN: const.VYOS,
const.LOADBALANCERV2: const.HAPROXY_LBAASV2}
@ -119,8 +118,6 @@ class ServiceAgentDemuxer(object):
resource_type_to_method_map = {
const.FIREWALL: (operation + '_' + config_data['resource']),
const.VPN: ('vpnservice_updated'),
const.LOADBALANCER: (operation + '_' + config_data[
'resource']),
const.LOADBALANCERV2: (operation + '_' + config_data[
'resource']),
const.NFP_SERVICE: ('run' + '_' + const.NFP_SERVICE),

View File

@ -1,99 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
SERVICE_TYPE = 'loadbalancer'
NEUTRON = 'neutron'
LBAAS_AGENT_RPC_TOPIC = 'lbaas_agent'
LBAAS_GENERIC_CONFIG_RPC_TOPIC = 'lbaas_generic_config'
LBAAS_PLUGIN_RPC_TOPIC = 'n-lbaas-plugin'
AGENT_TYPE_LOADBALANCER = 'OC Loadbalancer agent'
# Service operation status constants
ACTIVE = "ACTIVE"
DOWN = "DOWN"
CREATED = "CREATED"
PENDING_CREATE = "PENDING_CREATE"
PENDING_UPDATE = "PENDING_UPDATE"
PENDING_DELETE = "PENDING_DELETE"
INACTIVE = "INACTIVE"
ERROR = "ERROR"
STATUS_SUCCESS = "SUCCESS"
ACTIVE_PENDING_STATUSES = (
ACTIVE,
PENDING_CREATE,
PENDING_UPDATE
)
""" HTTP request/response """
HAPROXY_AGENT_LISTEN_PORT = 1234
REQUEST_URL = "http://%s:%s/%s"
HTTP_REQ_METHOD_POST = 'POST'
HTTP_REQ_METHOD_GET = 'GET'
HTTP_REQ_METHOD_PUT = 'PUT'
HTTP_REQ_METHOD_DELETE = 'DELETE'
CONTENT_TYPE_HEADER = 'Content-type'
JSON_CONTENT_TYPE = 'application/json'
LB_METHOD_ROUND_ROBIN = 'ROUND_ROBIN'
LB_METHOD_LEAST_CONNECTIONS = 'LEAST_CONNECTIONS'
LB_METHOD_SOURCE_IP = 'SOURCE_IP'
PROTOCOL_TCP = 'TCP'
PROTOCOL_HTTP = 'HTTP'
PROTOCOL_HTTPS = 'HTTPS'
HEALTH_MONITOR_PING = 'PING'
HEALTH_MONITOR_TCP = 'TCP'
HEALTH_MONITOR_HTTP = 'HTTP'
HEALTH_MONITOR_HTTPS = 'HTTPS'
LBAAS = 'lbaas'
PROTOCOL_MAP = {
PROTOCOL_TCP: 'tcp',
PROTOCOL_HTTP: 'http',
PROTOCOL_HTTPS: 'https',
}
BALANCE_MAP = {
LB_METHOD_ROUND_ROBIN: 'roundrobin',
LB_METHOD_LEAST_CONNECTIONS: 'leastconn',
LB_METHOD_SOURCE_IP: 'source'
}
REQUEST_RETRIES = 0
REQUEST_TIMEOUT = 120
# Operations
CREATE = 'create'
UPDATE = 'update'
DELETE = 'delete'
""" Event ids """
EVENT_CREATE_POOL = 'CREATE_POOL'
EVENT_UPDATE_POOL = 'UPDATE_POOL'
EVENT_DELETE_POOL = 'DELETE_POOL'
EVENT_CREATE_VIP = 'CREATE_VIP'
EVENT_UPDATE_VIP = 'UPDATE_VIP'
EVENT_DELETE_VIP = 'DELETE_VIP'
EVENT_CREATE_MEMBER = 'CREATE_MEMBER'
EVENT_UPDATE_MEMBER = 'UPDATE_MEMBER'
EVENT_DELETE_MEMBER = 'DELETE_MEMBER'
EVENT_CREATE_POOL_HEALTH_MONITOR = 'CREATE_POOL_HEALTH_MONITOR'
EVENT_UPDATE_POOL_HEALTH_MONITOR = 'UPDATE_POOL_HEALTH_MONITOR'
EVENT_DELETE_POOL_HEALTH_MONITOR = 'DELETE_POOL_HEALTH_MONITOR'
EVENT_AGENT_UPDATED = 'AGENT_UPDATED'
EVENT_COLLECT_STATS = 'COLLECT_STATS'

View File

@ -48,6 +48,7 @@ PENDING_UPDATE = "PENDING_UPDATE"
PENDING_DELETE = "PENDING_DELETE"
INACTIVE = "INACTIVE"
ERROR = "ERROR"
STATUS_SUCCESS = "SUCCESS"
ACTIVE_PENDING_STATUSES = (
ACTIVE,
@ -55,6 +56,8 @@ ACTIVE_PENDING_STATUSES = (
PENDING_UPDATE
)
REQUEST_URL = "http://%s:%s/%s"
# Constants to extend status strings in neutron.plugins.common.constants
ONLINE = 'ONLINE'
OFFLINE = 'OFFLINE'

View File

@ -1,145 +0,0 @@
# Copyright (c) 2013 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from gbpservice.contrib.nfp.config_orchestrator.common import topics
from gbpservice.contrib.nfp.configurator.drivers.loadbalancer.v1.haproxy\
import haproxy_lb_driver
from neutron.api.v2 import attributes
from neutron.plugins.common import constants
from neutron_lbaas.db.loadbalancer import loadbalancer_db as lb_db
from neutron_lbaas.extensions import loadbalancer
from neutron_lbaas.services.loadbalancer.drivers.common import (
agent_driver_base as adb
)
from neutron_lib import constants as n_constants
from neutron_lib import exceptions as n_exc
from oslo_db import exception
from oslo_utils import excutils
from oslo_utils import uuidutils
class HaproxyOnVMPluginDriver(adb.AgentDriverBase):
device_driver = haproxy_lb_driver.DRIVER_NAME
def __init__(self, plugin):
# Monkey patch LB agent topic and LB agent type
adb.l_const.LOADBALANCER_AGENT = topics.LB_NFP_CONFIGAGENT_TOPIC
adb.q_const.AGENT_TYPE_LOADBALANCER = 'NFP Loadbalancer agent'
super(HaproxyOnVMPluginDriver, self).__init__(plugin)
def _nfp_create_port_for_vip(self, context, vip_db, subnet_id, ip_address):
# resolve subnet and create port
subnet = self._core_plugin.get_subnet(context, subnet_id)
fixed_ip = {'subnet_id': subnet['id']}
if ip_address and ip_address != attributes.ATTR_NOT_SPECIFIED:
fixed_ip['ip_address'] = ip_address
if subnet.get('gateway_ip') == ip_address:
raise n_exc.IpAddressInUse(net_id=subnet['network_id'],
ip_address=ip_address)
port_data = {
'tenant_id': vip_db.tenant_id,
'name': 'vip-' + vip_db.id,
'network_id': subnet['network_id'],
'mac_address': attributes.ATTR_NOT_SPECIFIED,
'admin_state_up': False,
'device_id': '',
'device_owner': n_constants.DEVICE_OWNER_LOADBALANCER,
'fixed_ips': [fixed_ip]
}
port = self._core_plugin.create_port(context, {'port': port_data})
vip_db.port_id = port['id']
with context.session.begin(subtransactions=True):
vip = self._get_resource(context, lb_db.Vip, vip_db.id)
vip.update({'port_id': port['id']})
context.session.flush()
# explicitly sync session with db
# context.session.flush()
vip_db = self._get_resource(context, lb_db.Vip, vip_db.id)
lb_db.LoadBalancerPluginDb._create_port_for_vip = _nfp_create_port_for_vip
def nfp_create_vip(self, context, vip):
v = vip['vip']
tenant_id = v['tenant_id']
with context.session.begin(subtransactions=True):
if v['pool_id']:
pool = self._get_resource(context, lb_db.Pool, v['pool_id'])
# validate that the pool has same tenant
if pool['tenant_id'] != tenant_id:
raise n_exc.NotAuthorized()
# validate that the pool has same protocol
if pool['protocol'] != v['protocol']:
raise loadbalancer.ProtocolMismatch(
vip_proto=v['protocol'],
pool_proto=pool['protocol'])
if pool['status'] == constants.PENDING_DELETE:
raise loadbalancer.StateInvalid(state=pool['status'],
id=pool['id'])
vip_db = lb_db.Vip(id=uuidutils.generate_uuid(),
tenant_id=tenant_id,
name=v['name'],
description=v['description'],
port_id=None,
protocol_port=v['protocol_port'],
protocol=v['protocol'],
pool_id=v['pool_id'],
connection_limit=v['connection_limit'],
admin_state_up=v['admin_state_up'],
status=constants.PENDING_CREATE)
session_info = v['session_persistence']
if session_info:
s_p = self._create_session_persistence_db(
session_info,
vip_db['id'])
vip_db.session_persistence = s_p
try:
context.session.add(vip_db)
context.session.flush()
except exception.DBDuplicateEntry:
raise loadbalancer.VipExists(pool_id=v['pool_id'])
try:
# create a port to reserve address for IPAM
# do it outside the transaction to avoid rpc calls
self._create_port_for_vip(
context, vip_db, v['subnet_id'], v.get('address'))
except Exception:
# catch any kind of exceptions
with excutils.save_and_reraise_exception():
context.session.delete(vip_db)
context.session.flush()
if v['pool_id']:
# fetching pool again
pool = self._get_resource(context, lb_db.Pool, v['pool_id'])
# (NOTE): we rely on the fact that pool didn't change between
# above block and here
vip_db['pool_id'] = v['pool_id']
pool['vip_id'] = vip_db['id']
vip_db = self._get_resource(context, lb_db.Vip, vip_db['id'])
return self._make_vip_dict(vip_db)
lb_db.LoadBalancerPluginDb.create_vip = nfp_create_vip

View File

@ -1,65 +0,0 @@
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description": "Template to test Haproxy Loadbalacer service",
"Parameters": {
"Subnet": {
"Description": "Pool Subnet CIDR, on which VIP port should be created",
"Type": "String"
},
"vip_ip": {
"Description": "VIP IP Address",
"Type": "String"
},
"service_chain_metadata": {
"Description": "sc metadata",
"Type": "String"
}
},
"Resources" : {
"HttpHM": {
"Type": "OS::Neutron::HealthMonitor",
"Properties": {
"admin_state_up": true,
"delay": 20,
"expected_codes": "200",
"http_method": "GET",
"max_retries": 3,
"timeout": 10,
"type": "HTTP",
"url_path": "/"
}
},
"HaproxyPool": {
"Type": "OS::Neutron::Pool",
"Properties": {
"admin_state_up": true,
"description": "Haproxy pool from teplate",
"lb_method": "ROUND_ROBIN",
"monitors": [{"Ref":"HttpHM"}],
"name": "Haproxy pool",
"provider": "loadbalancer",
"protocol": "HTTP",
"subnet_id": {"Ref":"Subnet"},
"vip": {
"subnet": {"Ref":"Subnet"},
"address": {"Ref":"vip_ip"},
"name": "Haproxy vip",
"description": {"Ref":"service_chain_metadata"},
"protocol_port": 80,
"connection_limit": -1,
"admin_state_up": true
}
}
},
"HaproxyLb": {
"Type": "OS::Neutron::LoadBalancer",
"Properties": {
"pool_id": {"Ref":"HaproxyPool"},
"protocol_port": 80
}
}
}
}

View File

@ -1,76 +0,0 @@
#!/bin/bash
# This is utility script to move from LBaaSV1 to LBaaSV2 and vice-versa.
# This can be used only when NFP LBaaSV1 or LBaaSV2 is already installed.
# script usage:
# bash lb_version_change.sh <lb version which you want to move to>
# e.g bash lb_version_change.sh v1 ---> move from v2 to v1
# or bash lb_version_change.sh v2 ---> move from v1 to v2
move_to_v2() { #LBaaSV1 to LBaaSV2
exists=$(gbp service-profile-show LB-V2)
if [[ "$exists" ]] ; then
echo "It is already LBaaSV2 version on the system."
exit
fi
# Change service plugin
sudo sed -i "s/lbaas/lbaasv2/g" /etc/neutron/neutron.conf
# Change service provider
sudo sed -i "s/LOADBALANCER:loadbalancer:gbpservice.contrib.nfp.service_plugins.loadbalancer.drivers.nfp_lbaas_plugin_driver.HaproxyOnVMPluginDriver/LOADBALANCERV2:loadbalancerv2:gbpservice.contrib.nfp.service_plugins.loadbalancer.drivers.nfp_lbaasv2_plugin_driver.HaproxyOnVMPluginDriver/g" /etc/neutron/neutron_lbaas.conf
gbp service-profile-delete LB
gbp service-profile-create --servicetype LOADBALANCERV2 --insertion-mode l3 --shared True --service-flavor service_vendor=haproxy_lbaasv2,device_type=nova --vendor NFP LB-V2
echo "---- Please follow below steps now ----"
echo "1) Restart neutron service 'q-svc'"
echo "2) If LBaaSV2 image is not there then please upload using command "
echo " glance image-create --name haproxy_lbaasv2 --disk-format qcow2 --container-format bare --visibility public --file <image file location>"
}
move_to_v1() { #LBaaSV2 to LBaaSV1
exists=$(gbp service-profile-show LB)
if [[ "$exists" ]] ; then
echo "It is already LBaaSV1 version on the system."
exit
fi
# Change service plugin
sudo sed -i "s/lbaasv2/lbaas/g" /etc/neutron/neutron.conf
# Change service provider
sudo sed -i "s/LOADBALANCERV2:loadbalancerv2:gbpservice.contrib.nfp.service_plugins.loadbalancer.drivers.nfp_lbaasv2_plugin_driver.HaproxyOnVMPluginDriver/LOADBALANCER:loadbalancer:gbpservice.contrib.nfp.service_plugins.loadbalancer.drivers.nfp_lbaas_plugin_driver.HaproxyOnVMPluginDriver/g" /etc/neutron/neutron_lbaas.conf
gbp service-profile-delete LB-V2
gbp service-profile-create --servicetype LOADBALANCER --insertion-mode l3 --shared True --service-flavor service_vendor=haproxy,device_type=nova --vendor NFP LB
echo "---- Please follow below steps now ----"
echo "1) Restart neutron service 'q-svc'"
echo "2) If LBaaSV1 image is not there then please upload using command - "
echo " glance image-create --name haproxy --disk-format qcow2 --container-format bare --visibility public --file <image file location>"
}
usage() {
echo -e "\nUsage: bash lbass_version_change.sh <v2/v1>"
}
case $1 in
"v2")
move_to_v2
;;
"v1")
move_to_v1
;;
*)
usage
;;
esac

View File

@ -16,8 +16,6 @@ import uuid
from gbpservice.contrib.nfp.config_orchestrator.common import common
from gbpservice.contrib.nfp.config_orchestrator.handlers.config import (
firewall)
from gbpservice.contrib.nfp.config_orchestrator.handlers.config import (
loadbalancer)
from gbpservice.contrib.nfp.config_orchestrator.handlers.config import vpn
from gbpservice.contrib.nfp.config_orchestrator.handlers.notification import (
handler as notif_handler)
@ -116,16 +114,6 @@ class GeneralConfigStructure(object):
return True
return False
def verify_loadbalancer_header_data(self, data, resource=None):
if all(k in data for k in ["neutron_context", "requester"]):
if resource == "vip":
if not all(k in data for k in ["network_function_id",
"vip_id"]):
return False
if data['requester'] == 'nas_service':
return True
return False
def verify_vpn_header_data(self, data, resource=None):
if all(k in data for k in ["neutron_context", "requester"]):
if resource == "ipsec_site_connection":
@ -136,19 +124,6 @@ class GeneralConfigStructure(object):
return True
return False
def verify_loadbalancer_structure(self, blob_data, resource):
if all(k in blob_data for k in ["neutron_context", resource]):
context = blob_data["neutron_context"]
try:
if context['service_info']:
data = context['service_info']
if all(k in data for k in ["pools", "vips", "members",
"health_monitors"]):
return True
except AttributeError:
return False
return False
def verify_vpn_structure(self, blob_data, resource):
if all(k in blob_data for k in ["neutron_context", "resource",
"rsrc_id", "reason"]):
@ -254,336 +229,6 @@ class FirewallTestCase(base.BaseTestCase):
self.fw_handler.delete_firewall(self.context, self.fw, self.host)
class LoadBalanceTestCase(base.BaseTestCase):
def setUp(self):
super(LoadBalanceTestCase, self).setUp()
self.conf = Conf()
self.lb_handler = loadbalancer.LbAgent(self.conf, 'sc')
self.context = TestContext().get_context()
import_path = ("neutron_lbaas.db.loadbalancer.loadbalancer_db."
"LoadBalancerPluginDb")
self.import_gp_api = import_path + '.get_pools'
self.import_gv_api = import_path + '.get_vips'
self.import_gm_api = import_path + '.get_members'
self.import_ghm_api = import_path + '.get_health_monitors'
self.import_lib = 'gbpservice.nfp.lib.transport'
self._call = 'oslo_messaging.rpc.client._CallContext.call'
self._get_pool = import_path + '.get_pool'
def _cast_loadbalancer(self, conf, context, body,
method_type, device_config=False,
network_function_event=False):
g_cnfg = GeneralConfigStructure()
try:
resource = body['config'][0]['resource']
if resource == 'pool_health_monitor':
resource = 'health_monitor'
self.assertTrue(g_cnfg._check_general_structure(
body, 'loadbalancer', resource))
except Exception:
self.assertTrue(False)
def _call_to_get_network_function_desc(self):
data = call_network_function_info()
data['network_function']['description'] = ("\n" + str(
{'service_vendor': 'xyz'}))
return data['network_function']
def _call_data(self, context, method, **kwargs):
if method.lower() == "get_network_function_details":
data = call_network_function_info()
data['network_function']['description'] = "\n" + str(
{'service_vendor': 'xyz'})
return data['network_function']
return []
def _loadbalancer_data(self, resource):
data = {'tenant_id': str(uuid.uuid4()),
'id': str(uuid.uuid4())
}
if resource.lower() not in ['member', 'health_monitor']:
desc = str({'network_function_id': str(uuid.uuid4())})
data.update({'description': desc})
if resource.lower() == 'vip':
data.update({'pool_id': str(uuid.uuid4())})
return data
def _get_mocked_pool(self, context, pool_id):
return {'id': pool_id,
'description': str({'network_function_id': str(uuid.uuid4())})}
def test_create_vip(self):
import_send = self.import_lib + '.send_request_to_configurator'
with mock.patch(self.import_gp_api) as gp, mock.patch(
self.import_gv_api) as gv, mock.patch(
self.import_gm_api) as gm, mock.patch(
self.import_ghm_api) as ghm, mock.patch(
self._call) as mock_call, mock.patch(
import_send) as mock_send:
gp.return_value = []
gv.return_value = []
gm.return_value = []
ghm.return_value = []
network_function_desc = self._call_to_get_network_function_desc()
common.get_network_function_details = mock.MagicMock(
return_value=network_function_desc)
mock_call.side_effect = self._call_data
mock_send.side_effect = self._cast_loadbalancer
vip = self._loadbalancer_data('vip')
self.lb_handler.create_vip(self.context, vip)
def test_update_vip(self):
import_send = self.import_lib + '.send_request_to_configurator'
with mock.patch(self.import_gp_api) as gp, mock.patch(
self.import_gv_api) as gv, mock.patch(
self.import_gm_api) as gm, mock.patch(
self.import_ghm_api) as ghm, mock.patch(
self._call) as mock_call, mock.patch(
import_send) as mock_send:
gp.return_value = []
gv.return_value = []
gm.return_value = []
ghm.return_value = []
network_function_desc = self._call_to_get_network_function_desc()
common.get_network_function_details = mock.MagicMock(
return_value=network_function_desc)
mock_call.side_effect = self._call_data
mock_send.side_effect = self._cast_loadbalancer
old_vip = self._loadbalancer_data('vip')
vip = self._loadbalancer_data('vip')
self.lb_handler.update_vip(self.context, old_vip, vip)
def test_delete_vip(self):
import_send = self.import_lib + '.send_request_to_configurator'
with mock.patch(self.import_gp_api) as gp, mock.patch(
self.import_gv_api) as gv, mock.patch(
self.import_gm_api) as gm, mock.patch(
self.import_ghm_api) as ghm, mock.patch(
self._call) as mock_call, mock.patch(
import_send) as mock_send:
gp.return_value = []
gv.return_value = []
gm.return_value = []
ghm.return_value = []
network_function_desc = self._call_to_get_network_function_desc()
common.get_network_function_details = mock.MagicMock(
return_value=network_function_desc)
mock_call.side_effect = self._call_data
mock_send.side_effect = self._cast_loadbalancer
vip = self._loadbalancer_data('vip')
self.lb_handler.delete_vip(self.context, vip)
def test_create_pool(self):
import_send = self.import_lib + '.send_request_to_configurator'
with mock.patch(self.import_gp_api) as gp, mock.patch(
self.import_gv_api) as gv, mock.patch(
self.import_gm_api) as gm, mock.patch(
self.import_ghm_api) as ghm, mock.patch(
self._call) as mock_call, mock.patch(
import_send) as mock_send:
gp.return_value = []
gv.return_value = []
gm.return_value = []
ghm.return_value = []
network_function_desc = self._call_to_get_network_function_desc()
common.get_network_function_details = mock.MagicMock(
return_value=network_function_desc)
mock_call.side_effect = self._call_data
mock_send.side_effect = self._cast_loadbalancer
pool = self._loadbalancer_data('pool')
driver_name = "dummy"
self.lb_handler.create_pool(self.context, pool, driver_name)
def test_update_pool(self):
import_send = self.import_lib + '.send_request_to_configurator'
with mock.patch(self.import_gp_api) as gp, mock.patch(
self.import_gv_api) as gv, mock.patch(
self.import_gm_api) as gm, mock.patch(
self.import_ghm_api) as ghm, mock.patch(
self._call) as mock_call, mock.patch(
import_send) as mock_send:
gp.return_value = []
gv.return_value = []
gm.return_value = []
ghm.return_value = []
network_function_desc = self._call_to_get_network_function_desc()
common.get_network_function_details = mock.MagicMock(
return_value=network_function_desc)
mock_call.side_effect = self._call_data
mock_send.side_effect = self._cast_loadbalancer
old_pool = self._loadbalancer_data('pool')
pool = self._loadbalancer_data('pool')
self.lb_handler.update_pool(self.context, old_pool, pool)
def test_delete_pool(self):
import_send = self.import_lib + '.send_request_to_configurator'
with mock.patch(self.import_gp_api) as gp, mock.patch(
self.import_gv_api) as gv, mock.patch(
self.import_gm_api) as gm, mock.patch(
self.import_ghm_api) as ghm, mock.patch(
self._call) as mock_call, mock.patch(
import_send) as mock_send:
gp.return_value = []
gv.return_value = []
gm.return_value = []
ghm.return_value = []
network_function_desc = self._call_to_get_network_function_desc()
common.get_network_function_details = mock.MagicMock(
return_value=network_function_desc)
mock_call.side_effect = self._call_data
mock_send.side_effect = self._cast_loadbalancer
pool = self._loadbalancer_data('pool')
self.lb_handler.delete_pool(self.context, pool)
def test_create_member(self):
import_send = self.import_lib + '.send_request_to_configurator'
with mock.patch(self.import_gp_api) as gp, mock.patch(
self.import_gv_api) as gv, mock.patch(
self.import_gm_api) as gm, mock.patch(
self.import_ghm_api) as ghm, mock.patch(
self._call) as mock_call, mock.patch(
self._get_pool) as mock_pool, mock.patch(
import_send) as mock_send:
gp.return_value = []
gv.return_value = []
gm.return_value = []
ghm.return_value = []
network_function_desc = self._call_to_get_network_function_desc()
common.get_network_function_details = mock.MagicMock(
return_value=network_function_desc)
mock_call.side_effect = self._call_data
mock_send.side_effect = self._cast_loadbalancer
mock_pool.side_effect = self._get_mocked_pool
member = self._loadbalancer_data('member')
member.update({'pool_id': str(uuid.uuid4())})
self.lb_handler.create_member(self.context, member)
def test_update_member(self):
import_send = self.import_lib + '.send_request_to_configurator'
with mock.patch(self.import_gp_api) as gp, mock.patch(
self.import_gv_api) as gv, mock.patch(
self.import_gm_api) as gm, mock.patch(
self.import_ghm_api) as ghm, mock.patch(
self._call) as mock_call, mock.patch(
self._get_pool) as mock_pool, mock.patch(
import_send) as mock_send:
gp.return_value = []
gv.return_value = []
gm.return_value = []
ghm.return_value = []
network_function_desc = self._call_to_get_network_function_desc()
common.get_network_function_details = mock.MagicMock(
return_value=network_function_desc)
mock_call.side_effect = self._call_data
mock_send.side_effect = self._cast_loadbalancer
mock_pool.side_effect = self._get_mocked_pool
old_member = self._loadbalancer_data('member')
member = self._loadbalancer_data('member')
pool_id = str(uuid.uuid4())
old_member.update({'pool_id': pool_id})
member.update({'pool_id': pool_id})
self.lb_handler.update_member(self.context, old_member, member)
def test_delete_member(self):
import_send = self.import_lib + '.send_request_to_configurator'
with mock.patch(self.import_gp_api) as gp, mock.patch(
self.import_gv_api) as gv, mock.patch(
self.import_gm_api) as gm, mock.patch(
self.import_ghm_api) as ghm, mock.patch(
self._call) as mock_call, mock.patch(
self._get_pool) as mock_pool, mock.patch(
import_send) as mock_send:
gp.return_value = []
gv.return_value = []
gm.return_value = []
ghm.return_value = []
network_function_desc = self._call_to_get_network_function_desc()
common.get_network_function_details = mock.MagicMock(
return_value=network_function_desc)
mock_call.side_effect = self._call_data
mock_send.side_effect = self._cast_loadbalancer
mock_pool.side_effect = self._get_mocked_pool
member = self._loadbalancer_data('member')
member.update({'pool_id': str(uuid.uuid4())})
self.lb_handler.delete_member(self.context, member)
def test_create_pool_health_monitor(self):
import_send = self.import_lib + '.send_request_to_configurator'
with mock.patch(self.import_gp_api) as gp, mock.patch(
self.import_gv_api) as gv, mock.patch(
self.import_gm_api) as gm, mock.patch(
self.import_ghm_api) as ghm, mock.patch(
self._call) as mock_call, mock.patch(
self._get_pool) as mock_pool, mock.patch(
import_send) as mock_send:
gp.return_value = []
gv.return_value = []
gm.return_value = []
ghm.return_value = []
mock_call.side_effect = self._call_data
mock_send.side_effect = self._cast_loadbalancer
mock_pool.side_effect = self._get_mocked_pool
network_function_desc = self._call_to_get_network_function_desc()
common.get_network_function_details = mock.MagicMock(
return_value=network_function_desc)
hm = self._loadbalancer_data('health_monitor')
pool_id = str(uuid.uuid4())
self.lb_handler.create_pool_health_monitor(
self.context, hm, pool_id)
def test_update_pool_health_monitor(self):
import_send = self.import_lib + '.send_request_to_configurator'
with mock.patch(self.import_gp_api) as gp, mock.patch(
self.import_gv_api) as gv, mock.patch(
self.import_gm_api) as gm, mock.patch(
self.import_ghm_api) as ghm, mock.patch(
self._call) as mock_call, mock.patch(
self._get_pool) as mock_pool, mock.patch(
import_send) as mock_send:
gp.return_value = []
gv.return_value = []
gm.return_value = []
ghm.return_value = []
network_function_desc = self._call_to_get_network_function_desc()
common.get_network_function_details = mock.MagicMock(
return_value=network_function_desc)
mock_call.side_effect = self._call_data
mock_send.side_effect = self._cast_loadbalancer
mock_pool.side_effect = self._get_mocked_pool
old_hm = self._loadbalancer_data('health_monitor')
hm = self._loadbalancer_data('health_monitor')
pool_id = str(uuid.uuid4())
self.lb_handler.update_pool_health_monitor(
self.context, old_hm, hm, pool_id)
def test_delete_pool_health_monitor(self):
import_send = self.import_lib + '.send_request_to_configurator'
with mock.patch(self.import_gp_api) as gp, mock.patch(
self.import_gv_api) as gv, mock.patch(
self.import_gm_api) as gm, mock.patch(
self.import_ghm_api) as ghm, mock.patch(
self._call) as mock_call, mock.patch(
self._get_pool) as mock_pool, mock.patch(
import_send) as mock_send:
gp.return_value = []
gv.return_value = []
gm.return_value = []
ghm.return_value = []
network_function_desc = self._call_to_get_network_function_desc()
common.get_network_function_details = mock.MagicMock(
return_value=network_function_desc)
mock_call.side_effect = self._call_data
mock_send.side_effect = self._cast_loadbalancer
mock_pool.side_effect = self._get_mocked_pool
hm = self._loadbalancer_data('health_monitor')
pool_id = str(uuid.uuid4())
self.lb_handler.delete_pool_health_monitor(
self.context, hm, pool_id)
class VPNTestCase(base.BaseTestCase):
def setUp(self):
@ -762,64 +407,6 @@ class FirewallNotifierTestCase(base.BaseTestCase):
notification_data)
class LoadbalancerNotifierTestCase(base.BaseTestCase):
class Controller(object):
def new_event(self, **kwargs):
return
def post_event(self, event):
return
def setUp(self):
super(LoadbalancerNotifierTestCase, self).setUp()
self.conf = Conf()
self.n_handler = notif_handler.NaasNotificationHandler(
self.conf, self.Controller())
self.context = TestContext().get_context()
def _get_rpc_client(self):
class Context(object):
def cast(self, context, method, host='', pool_id='',
stats='', body=''):
return {}
class RCPClient(object):
def __init__(self):
self.cctxt = Context()
return RCPClient()
def get_notification_data(self):
return {'info': {'service_type': 'loadbalancer',
'context': {'logging_context': {}}},
'notification': [{'data': {'obj_type': 'lb',
'obj_id': '123',
'status': 'set_firewall_status',
'notification_type':
'update_status',
'host': 'localhost'}
}]
}
def test_update_status(self):
notification_data = self.get_notification_data()
rpc_client = self._get_rpc_client()
transport.RPCClient = mock.MagicMock(return_value=rpc_client)
self.n_handler.handle_notification(self.context,
notification_data)
def test_update_pool_stats(self):
notification_data = self.get_notification_data()
notification_data['notification'][0]['data'][
'notification_type'] = 'update_pool_stats'
rpc_client = self._get_rpc_client()
transport.RPCClient = mock.MagicMock(return_value=rpc_client)
self.n_handler.handle_notification(self.context,
notification_data)
class VpnNotifierTestCase(base.BaseTestCase):
class Controller(object):

View File

@ -1,588 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from gbpservice.contrib.nfp.configurator.agents import loadbalancer_v1 as lb
from gbpservice.contrib.nfp.configurator.lib import demuxer
from gbpservice.contrib.nfp.configurator.modules import configurator
from gbpservice.contrib.tests.unit.nfp.configurator.test_data import (
lb_test_data as test_data)
from neutron.tests import base
class LBaasRpcSenderTest(base.BaseTestCase):
"""Implements test cases for LBaasRpcSender class methods of
loadbalancer agent.
"""
@mock.patch(__name__ + '.test_data.FakeObjects.conf')
@mock.patch(__name__ + '.test_data.FakeObjects.sc')
def _get_configurator_rpc_manager_object(self, sc, conf):
""" Retrieves RPC manager object of configurator.
:param sc: mocked service controller object of process model framework
:param conf: mocked OSLO configuration file
Returns: object of RPC manager of configurator, and mock object of
service controller and oslo configurator.
"""
cm = configurator.ConfiguratorModule(sc)
dmx = demuxer.ServiceAgentDemuxer()
rpc_mgr = configurator.ConfiguratorRpcManager(sc, cm, conf, dmx)
return sc, conf, rpc_mgr
def test_update_status(self):
"""Implements test case for update_status method
of loadbalancer agent's LBaasRpcSender class.
Returns: none
"""
sc, conf, rpc_mgr = self._get_configurator_rpc_manager_object()
agent = lb.LBaasRpcSender(sc)
agent_info = {'context': 'context', 'resource': 'pool'}
agent.update_status('pool', 'object_id',
'status', agent_info, 'pool')
def test_update_pool_stats(self):
"""Implements test case for update_pool_stats method
of loadbalancer agent's LBaasRpcSender class.
Returns: none
"""
sc, conf, rpc_mgr = self._get_configurator_rpc_manager_object()
agent = lb.LBaasRpcSender(sc)
context = test_data.Context()
agent.update_pool_stats('pool_id', 'stats', context)
def test_get_logical_device(self):
"""Implements test case for get_logical_device method
of loadbalancer agent's LBaasRpcSender class.
Returns: none
"""
sc, conf, rpc_mgr = self._get_configurator_rpc_manager_object()
agent = lb.LBaasRpcSender(sc)
agent.get_logical_device(
'6350c0fd-07f8-46ff-b797-62acd23760de',
test_data.FakeObjects()._get_context_logical_device())
class LBaaSRpcManagerTest(base.BaseTestCase):
"""Implements test cases for LBaaSRpcManager class methods of
loadbalancer agent.
"""
def __init__(self, *args, **kwargs):
super(LBaaSRpcManagerTest, self).__init__(*args, **kwargs)
self.fo = test_data.FakeObjects()
self.foo = test_data.Foo()
self.arg_dict_vip = {
'context': self.fo.context,
'vip': self.fo._get_vip_object()[0],
}
self.arg_dict_vip_update = {
'context': self.fo.context,
'vip': self.fo._get_vip_object()[0],
'old_vip': self.fo._get_vip_object()[0],
}
self.arg_dict_pool_create = {
'context': self.fo.context,
'pool': self.fo._get_pool_object()[0],
'driver_name': 'loadbalancer',
}
self.arg_dict_pool_update = {
'context': self.fo.context,
'pool': self.fo._get_pool_object()[0],
'old_pool': self.fo._get_pool_object()[0]}
self.arg_dict_pool_delete = {
'context': self.fo.context,
'pool': self.fo._get_pool_object()[0],
}
self.arg_dict_member = {
'context': self.fo.context,
'member': self.fo._get_member_object()[0],
}
self.arg_dict_member_update = {
'context': self.fo.context,
'member': self.fo._get_member_object()[0],
'old_member': self.fo._get_member_object()[0],
}
self.arg_dict_health_monitor = {
'context': self.fo.context,
'health_monitor': self.fo._get_hm_object()[0],
'pool_id': self.fo._get_pool_object()[0]['id'],
}
self.arg_dict_health_monitor_update = {
'context': self.fo.context,
'health_monitor': self.fo._get_hm_object()[0],
'old_health_monitor': self.fo._get_hm_object()[0],
'pool_id': self.fo._get_pool_object()[0]['id'],
}
@mock.patch(__name__ + '.test_data.FakeObjects.conf')
@mock.patch(__name__ + '.test_data.FakeObjects.sc')
def _get_configurator_rpc_manager_object(self, sc, conf):
""" Retrieves RPC manager object of configurator.
:param sc: mocked service controller object of process model framework
:param conf: mocked OSLO configuration file
Returns: object of RPC manager of configurator, and mock object of
service controller and oslo configurator.
"""
cm = configurator.ConfiguratorModule(sc)
dmx = demuxer.ServiceAgentDemuxer()
rpc_mgr = configurator.ConfiguratorRpcManager(sc, cm, conf, dmx)
return sc, conf, rpc_mgr
def _get_lbaas_rpc_manager_object(self, conf, sc):
""" Retrieves RPC manager object of loadbalancer agent.
:param sc: mocked service controller object of process model framework
:param conf: mocked OSLO configuration file
Returns: objects of RPC manager, service controller of
loadbalancer agent
"""
agent = lb.LBaaSRpcManager(sc, conf)
return agent, sc
def _test_rpc_manager(self, operation, request_data, args):
""" Tests all create/update/delete operation of RPC manager of
loadbalancer agent.
Returns: none
"""
sc, conf, rpc_mgr = self._get_configurator_rpc_manager_object()
agent, sc = self._get_lbaas_rpc_manager_object(conf, sc)
method = self.fo.method
with mock.patch.object(
sc, 'new_event', return_value=self.foo) as mock_sc_new_event, (
mock.patch.object(
sc, 'post_event')) as mock_sc_post_event, (
mock.patch.object(
rpc_mgr, '_get_service_agent_instance', return_value=agent)):
getattr(rpc_mgr, method[operation])(self.fo.context, request_data)
mock_sc_new_event.assert_called_with(id=operation, data=args)
mock_sc_post_event.assert_called_with(self.foo)
def test_create_vip_rpc_manager(self):
"""Implements test case for create vip method
of loadbalancer agent's RPC manager.
Returns: none
"""
self._test_rpc_manager(
'CREATE_VIP',
self.fo.get_request_data_for_vip(),
self.arg_dict_vip)
def test_delete_vip_rpc_manager(self):
"""Implements test case for delete vip method
of loadbalancer agent's RPC manager.
Returns: none
"""
self._test_rpc_manager(
'DELETE_VIP',
self.fo.get_request_data_for_vip(),
self.arg_dict_vip)
def test_update_vip_rpc_manager(self):
"""Implements test case for update vip method
of loadbalancer agent's RPC manager.
Returns: none
"""
self._test_rpc_manager(
'UPDATE_VIP',
self.fo.get_request_data_for_vip_update(),
self.arg_dict_vip_update)
def test_create_pool_rpc_manager(self):
"""Implements test case for create pool method
of loadbalancer agent's RPC manager.
Returns: none
"""
self._test_rpc_manager(
'CREATE_POOL',
self.fo.get_request_data_for_create_pool(),
self.arg_dict_pool_create)
def test_delete_pool_rpc_manager(self):
"""Implements test case for delete pool method
of loadbalancer agent's RPC manager.
Returns: none
"""
self._test_rpc_manager(
'DELETE_POOL',
self.fo.get_request_data_for_delete_pool(),
self.arg_dict_pool_delete)
def test_update_pool_rpc_manager(self):
"""Implements test case for update pool method
of loadbalancer agent's RPC manager.
Returns: none
"""
self._test_rpc_manager(
'UPDATE_POOL',
self.fo.get_request_data_for_update_pool(),
self.arg_dict_pool_update)
def test_create_member_rpc_manager(self):
"""Implements test case for create member method
of loadbalancer agent's RPC manager.
Returns: none
"""
self._test_rpc_manager(
'CREATE_MEMBER',
self.fo.get_request_data_for_member(),
self.arg_dict_member)
def test_delete_member_rpc_manager(self):
"""Implements test case for delete member method
of loadbalancer agent's RPC manager.
Returns: none
"""
self._test_rpc_manager(
'DELETE_MEMBER',
self.fo.get_request_data_for_member(),
self.arg_dict_member)
def test_update_member_rpc_manager(self):
"""Implements test case for update member method
of loadbalancer agent's RPC manager.
Returns: none
"""
self._test_rpc_manager(
'UPDATE_MEMBER',
self.fo.get_request_data_for_update_member(),
self.arg_dict_member_update)
def test_CREATE_POOL_HEALTH_MONITOR_rpc_manager(self):
"""Implements test case for create pool_health_monitor method
of loadbalancer agent's RPC manager.
Returns: none
"""
self._test_rpc_manager(
'CREATE_POOL_HEALTH_MONITOR',
self.fo.get_request_data_for_pool_hm(),
self.arg_dict_health_monitor)
def test_DELETE_POOL_HEALTH_MONITOR_rpc_manager(self):
"""Implements test case for delete pool_health_monitor method
of loadbalancer agent's RPC manager.
Returns: none
"""
self._test_rpc_manager(
'DELETE_POOL_HEALTH_MONITOR',
self.fo.get_request_data_for_pool_hm(),
self.arg_dict_health_monitor)
def test_UPDATE_POOL_HEALTH_MONITOR_rpc_manager(self):
"""Implements test case for update pool_health_monitor method
of loadbalancer agent's RPC manager.
Returns: none
"""
self._test_rpc_manager(
'UPDATE_POOL_HEALTH_MONITOR',
self.fo.get_request_data_for_update_pool_hm(),
self.arg_dict_health_monitor_update)
class LBaasEventHandlerTestCase(base.BaseTestCase):
"""Implement test cases for LBaaSEventHandler class methods of
loadbalancer agent.
"""
def __init__(self, *args, **kwargs):
super(LBaasEventHandlerTestCase, self).__init__(*args, **kwargs)
self.fo = test_data.FakeObjects()
self.ev = test_data.FakeEvent()
self.drivers = {'loadbalancer': mock.Mock()}
def _get_lb_handler_objects(self, sc, drivers, rpcmgr):
""" Retrieves EventHandler object of loadbalancer agent.
:param sc: mocked service controller object of process model framework
:param drivers: mocked drivers object of loadbalancer object
:param rpcmgr: mocked RPC manager object loadbalancer object
Returns: objects of LBaaSEventHandler of loadbalancer agent
"""
agent = lb.LBaaSEventHandler(sc, drivers, rpcmgr)
return agent
@mock.patch(__name__ + '.test_data.FakeObjects.rpcmgr')
@mock.patch(__name__ + '.test_data.FakeObjects.sc')
def _test_handle_event(self, sc, rpcmgr):
""" Tests all create/update/delete operation of LBaaSEventHandler of
loadbalancer agent.
Returns: none
"""
agent = self._get_lb_handler_objects(sc, self.drivers, rpcmgr)
driver = self.drivers['loadbalancer']
with mock.patch.object(
agent, '_get_driver', return_value=driver), (
mock.patch.object(
driver, 'create_vip')) as mock_create_vip, (
mock.patch.object(
driver, 'delete_vip')) as mock_delete_vip, (
mock.patch.object(
driver, 'update_vip')) as mock_update_vip, (
mock.patch.object(
driver, 'create_pool')) as mock_create_pool, (
mock.patch.object(
driver, 'delete_pool')) as mock_delete_pool, (
mock.patch.object(
driver, 'update_pool')) as mock_update_pool, (
mock.patch.object(
driver, 'create_member')) as mock_create_member, (
mock.patch.object(
driver, 'delete_member')) as mock_delete_member, (
mock.patch.object(
driver, 'update_member')) as mock_update_member, (
mock.patch.object(
driver, 'create_pool_health_monitor')) as mock_create_poolhm, (
mock.patch.object(
driver, 'delete_pool_health_monitor')) as mock_delete_poolhm, (
mock.patch.object(
driver, 'update_pool_health_monitor')) as mock_update_poolhm:
vip = self.fo._get_vip_object()[0]
old_vip = self.fo._get_vip_object()[0]
pool = self.fo._get_pool_object()[0]
old_pool = self.fo._get_pool_object()[0]
member = self.fo._get_member_object()[0]
old_member = self.fo._get_member_object()[0]
hm = self.fo._get_hm_object()[0]
old_hm = self.fo._get_hm_object()[0]
pool_id = '6350c0fd-07f8-46ff-b797-62acd23760de'
agent.handle_event(self.ev)
if self.ev.id == 'CREATE_VIP':
mock_create_vip.assert_called_with(vip, self.fo.vip_context)
elif self.ev.id == 'DELETE_VIP':
mock_delete_vip.assert_called_with(vip, self.fo.vip_context)
elif self.ev.id == 'UPDATE_VIP':
mock_update_vip.assert_called_with(
old_vip, vip, self.fo.vip_context)
elif self.ev.id == 'CREATE_POOL':
mock_create_pool.assert_called_with(
pool, self.fo.vip_context)
elif self.ev.id == 'DELETE_POOL':
mock_delete_pool.assert_called_with(
pool, self.fo.vip_context)
elif self.ev.id == 'UPDATE_POOL':
mock_update_pool.assert_called_with(
old_pool, pool, self.fo.vip_context)
elif self.ev.id == 'CREATE_MEMBER':
mock_create_member.assert_called_with(
member, self.fo.context_test)
elif self.ev.id == 'DELETE_MEMBER':
mock_delete_member.assert_called_with(
member, self.fo.context_test)
elif self.ev.id == 'UPDATE_MEMBER':
mock_update_member.assert_called_with(
old_member, member, self.fo.context_test)
elif self.ev.id == 'CREATE_POOL_HEALTH_MONITOR':
mock_create_poolhm.assert_called_with(
hm, pool_id, self.fo.context_test)
elif self.ev.id == 'DELETE_POOL_HEALTH_MONITOR':
mock_delete_poolhm.assert_called_with(
hm, pool_id, self.fo.context_test)
elif self.ev.id == 'UPDATE_POOL_HEALTH_MONITOR':
mock_update_poolhm.assert_called_with(
old_hm, hm, pool_id, self.fo.context_test)
def test_create_vip_event_handler(self):
"""Implements test case for create vip method
of loadbalancer agent's LBaaSEventHandler class.
Returns: none
"""
self.ev.id = 'CREATE_VIP'
self._test_handle_event()
def test_delete_vip_event_handler(self):
"""Implements test case for delete vip method
of loadbalancer agent's LBaaSEventHandler class.
Returns: none
"""
self.ev.id = 'DELETE_VIP'
self._test_handle_event()
def test_update_vip_event_handler(self):
"""Implements test case for update vip method
of loadbalancer agent's LBaaSEventHandler class.
Returns: none
"""
self.ev.id = 'UPDATE_VIP'
self._test_handle_event()
def test_create_pool_event_handler(self):
"""Implements test case for create pool method
of loadbalancer agent's LBaaSEventHandler class.
Returns: none
"""
self.ev.id = 'CREATE_POOL'
self._test_handle_event()
def test_delete_pool_event_handler(self):
"""Implements test case for delete pool method
of loadbalancer agent's LBaaSEventHandler class.
Returns: none
"""
self.ev.id = 'DELETE_POOL'
self._test_handle_event()
def test_update_pool_event_handler(self):
"""Implements test case for update pool method
of loadbalancer agent's LBaaSEventHandler class.
Returns: none
"""
self.ev.id = 'UPDATE_POOL'
self._test_handle_event()
def test_create_member_event_handler(self):
"""Implements test case for create member method
of loadbalancer agent's LBaaSEventHandler class.
Returns: none
"""
self.ev.id = 'CREATE_MEMBER'
self._test_handle_event()
def test_delete_member_event_handler(self):
"""Implements test case for delete member method
of loadbalancer agent's LBaaSEventHandler class.
Returns: none
"""
self.ev.id = 'DELETE_MEMBER'
self._test_handle_event()
def test_update_member_event_handler(self):
"""Implements test case for update member method
of loadbalancer agent's LBaaSEventHandler class.
Returns: none
"""
self.ev.id = 'UPDATE_MEMBER'
self._test_handle_event()
def test_create_pool_hm_event_handler(self):
"""Implements test case for create pool_health_monitor method
of loadbalancer agent's LBaaSEventHandler class.
Returns: none
"""
self.ev.id = 'CREATE_POOL_HEALTH_MONITOR'
self._test_handle_event()
def test_delete_pool_hm_event_handler(self):
"""Implements test case for delete pool_health_monitor method
of loadbalancer agent's LBaaSEventHandler class.
Returns: none
"""
self.ev.id = 'DELETE_POOL_HEALTH_MONITOR'
self._test_handle_event()
def test_update_pool_hm_event_handler(self):
"""Implements test case for update pool_health_monitor method
of loadbalancer agent's LBaaSEventHandler class.
Returns: none
"""
self.ev.id = 'UPDATE_POOL_HEALTH_MONITOR'
self._test_handle_event()

View File

@ -1,323 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from gbpservice.contrib.nfp.configurator.agents import loadbalancer_v1 as lb
from gbpservice.contrib.nfp.configurator.drivers.loadbalancer.v1.\
haproxy import (haproxy_lb_driver as lb_driver)
from gbpservice.contrib.nfp.configurator.drivers.loadbalancer.v1.\
haproxy import (haproxy_rest_client as _rest_client)
from gbpservice.contrib.tests.unit.nfp.configurator.test_data import (
lb_test_data as test_data)
from neutron.tests import base
from oslo_serialization import jsonutils
class HaproxyOnVmDriverTestCase(base.BaseTestCase):
""" Implements test cases for haproxy loadbalancer driver. """
def __init__(self, *args, **kwargs):
super(HaproxyOnVmDriverTestCase, self).__init__(*args, **kwargs)
self.fo = test_data.FakeObjects()
self.data = test_data.AssertionData()
self.driver = lb_driver.HaproxyOnVmDriver()
self.resp = mock.Mock()
self.fake_resp_dict = {'status': True,
'config_success': True,
'delete_success': True}
self.fo.vip = self.fo._get_vip_object()[0]
self.fo.old_vip = self.fo._get_vip_object()[0]
self.fo.pool = self.fo._get_pool_object()[0]
self.fo.old_pool = self.fo._get_pool_object()[0]
self.fo.hm = self.fo._get_hm_object()
self.fo.old_hm = self.fo._get_hm_object()
self.fo.member = self.fo._get_member_object()
self.fo.old_member = self.fo._get_member_object()
self.vip = self.fo.vip
self.resp.status_code = 200
self.get_resource = {
'server': {
'resource': [],
'srvr:4910851f-4af7-4592-ad04-08b508c6fa21': []},
'timeout': {}}
@mock.patch(__name__ + '.test_data.FakeObjects.rpcmgr')
@mock.patch(__name__ + '.test_data.FakeObjects.drivers')
@mock.patch(__name__ + '.test_data.FakeObjects.sc')
def _get_lb_handler_objects(self, sc, drivers, rpcmgr):
""" Retrieves EventHandler object of loadbalancer agent.
:param sc: mocked service controller object of process model framework
:param drivers: mocked drivers object of loadbalancer object
:param rpcmgr: mocked RPC manager object loadbalancer object
Returns: objects of LBaaSEventHandler of loadbalancer agent
"""
agent = lb.LBaaSEventHandler(sc, drivers, rpcmgr)
return agent
def _test_lbaasdriver(self, method_name):
""" Tests all create/update/delete operation of loadbalancer driver.
Returns: none
"""
agent = self._get_lb_handler_objects()
driver = lb_driver.HaproxyOnVmDriver(agent.plugin_rpc)
rest_client = _rest_client.HttpRequests(
self.data.url, self.data.port)
logical_device_return_value = {
'vip': self.fo.vip,
'old_vip': self.fo.old_vip,
'pool': self.fo.pool,
'healthmonitors': self.fo.hm,
'members': self.fo.member}
with mock.patch.object(
agent.plugin_rpc, 'get_logical_device',
return_value=logical_device_return_value), (
mock.patch.object(
driver, '_get_rest_client', return_value=rest_client)), (
mock.patch.object(
rest_client.pool, 'request',
return_value=self.resp)) as mock_request, (
mock.patch.object(
rest_client, 'get_resource',
return_value=self.get_resource)) as mock_get_resource:
mock_request.status_code = 200
if method_name == 'DELETE_VIP':
driver.delete_vip(self.fo.vip, self.fo.lb_api_context())
mock_request.assert_called_with(
'DELETE',
data=None,
headers=self.data.header,
timeout=self.data.timeout,
url=self.data.delete_vip_url)
elif method_name == 'CREATE_VIP':
driver.create_vip(self.fo.vip, self.fo.lb_api_context())
data = jsonutils.dumps(self.data.create_vip_data)
mock_request.assert_called_with(
'POST',
data=data,
headers=self.data.header,
timeout=30,
url=self.data.create_vip_url)
mock_get_resource.assert_called_with(
self.data.create_vip_resources)
elif method_name == 'UPDATE_VIP':
driver.update_vip(
self.fo.old_vip,
self.fo.vip,
self.fo.lb_api_context())
data = jsonutils.dumps(self.data.update_vip_data)
mock_request.assert_called_with(
'PUT',
data=data,
headers=self.data.header,
timeout=self.data.timeout,
url=self.data.update_vip_url)
elif method_name == 'CREATE_POOL':
driver.create_pool(self.fo.pool, self.fo.lb_api_context())
elif method_name == 'DELETE_POOL':
driver.delete_pool(self.fo.pool, self.fo.lb_api_context())
elif method_name == 'UPDATE_POOL':
driver.update_pool(
self.fo.old_pool,
self.fo.pool,
self.fo.lb_api_context())
data = jsonutils.dumps(self.data.update_pool_data)
mock_request.assert_called_with(
'PUT',
data=data,
headers=self.data.header,
timeout=self.data.timeout,
url=self.data.update_pool_url)
elif method_name == 'CREATE_MEMBER':
driver.create_member(self.fo.member[0],
self.fo.lb_api_context())
data = jsonutils.dumps(self.data.create_member_data)
mock_request.assert_called_with(
'PUT',
data=data,
headers=self.data.header,
timeout=self.data.timeout,
url=self.data.create_member_url)
elif method_name == 'DELETE_MEMBER':
driver.delete_member(self.fo.member[0],
self.fo.lb_api_context())
data = jsonutils.dumps(self.data.delete_member_data)
mock_request.assert_called_with(
'PUT',
data=data,
headers=self.data.header,
timeout=self.data.timeout,
url=self.data.delete_member_url)
elif method_name == 'UPDATE_MEMBER':
driver.update_member(
self.fo.old_member[0],
self.fo.member[0],
self.fo.lb_api_context())
data = jsonutils.dumps(self.data.update_member_data)
mock_request.assert_called_with(
'PUT',
data=data,
headers=self.data.header,
timeout=self.data.timeout,
url=self.data.update_member_url)
elif method_name == 'CREATE_POOL_HEALTH_MONITOR':
driver.create_pool_health_monitor(
self.fo.hm[0], self.fo._get_pool_object()[0]['id'],
self.fo.lb_api_context())
data = jsonutils.dumps(self.data.create_hm_data)
mock_request.assert_called_with(
'PUT',
data=data,
headers=self.data.header,
timeout=self.data.timeout,
url=self.data.create_hm_url)
elif method_name == 'DELETE_POOL_HEALTH_MONITOR':
driver.delete_pool_health_monitor(
self.fo.hm[0], self.fo._get_pool_object()[0]['id'],
self.fo.lb_api_context())
data = jsonutils.dumps(self.data.delete_hm_data)
mock_request.assert_called_with(
'PUT',
data=data,
headers=self.data.header,
timeout=self.data.timeout,
url=self.data.delete_hm_url)
elif method_name == 'UPDATE_POOL_HEALTH_MONITOR':
driver.update_pool_health_monitor(
self.fo.old_hm[0],
self.fo.hm[0], self.fo._get_pool_object()[0]['id'],
self.fo.lb_api_context())
data = jsonutils.dumps(self.data.update_hm_data)
mock_request.assert_called_with(
'PUT',
data=data,
headers=self.data.header,
timeout=self.data.timeout,
url=self.data.update_hm_url)
def test_vip_create_lbaasdriver(self):
"""Implements test case for create vip method of loadbalancer driver.
Returns: none
"""
self._test_lbaasdriver('CREATE_VIP')
def test_vip_delete_lbaasdriver(self):
"""Implements test case for delete vip method of loadbalancer driver.
Returns: none
"""
self._test_lbaasdriver('DELETE_VIP')
def test_vip_update_lbaasdriver(self):
"""Implements test case for update vip method of loadbalancer driver.
Returns: none
"""
self._test_lbaasdriver('UPDATE_VIP')
def test_pool_create_lbaasdriver(self):
"""Implements test case for create pool method of loadbalancer driver.
Returns: none
"""
self._test_lbaasdriver('CREATE_POOL')
def test_pool_delete_lbaasdriver(self):
"""Implements test case for delete vip method of loadbalancer driver.
Returns: none
"""
self._test_lbaasdriver('DELETE_POOL')
def test_pool_update_lbaasdriver(self):
"""Implements test case for update vip method of loadbalancer driver.
Returns: none
"""
self._test_lbaasdriver('UPDATE_POOL')
def test_member_create_lbaasdriver(self):
"""Implements test case for create member method of loadbalancer driver.
Returns: none
"""
self._test_lbaasdriver('CREATE_MEMBER')
def test_member_delete_lbaasdriver(self):
"""Implements test case for delete member method of loadbalancer driver.
Returns: none
"""
self._test_lbaasdriver('DELETE_MEMBER')
def test_member_update_lbaasdriver(self):
"""Implements test case for update member method of loadbalancer driver.
Returns: none
"""
self._test_lbaasdriver('UPDATE_MEMBER')
def test_pool_health_monitor_create_lbaasdriver(self):
"""Implements test case for create pool_health_monitor method of
loadbalancer driver.
Returns: none
"""
self._test_lbaasdriver('CREATE_POOL_HEALTH_MONITOR')
def test_pool_health_monitor_delete_lbaasdriver(self):
"""Implements test case for delete pool_health_monitor method
of loadbalancer driver.
Returns: none
"""
self._test_lbaasdriver('DELETE_POOL_HEALTH_MONITOR')
def test_pool_health_monitor_update_lbaasdriver(self):
"""Implements test case for update pool_health_monitor method
of loadbalancer driver.
Returns: none
"""
self._test_lbaasdriver('UPDATE_POOL_HEALTH_MONITOR')

View File

@ -252,94 +252,11 @@ class BaseTestCase(base.BaseTestCase):
"description": ";".join(self.desc1)
}]
self.pools = [{
"status": "ACTIVE",
"lb_method": "ROUND_ROBIN",
"protocol": "TCP",
"description": "",
"health_monitors": [],
"members":
["4910851f-4af7-4592-ad04-08b508c6fa21",
"76d2a5fc-b39f-4419-9f33-3b21cf16fe47"],
"status_description": None,
"id": "6350c0fd-07f8-46ff-797-62acd23760de",
"vip_id": "7a755739-1bbb-4211-9130-b6c82d9169a5",
"name": "lb-pool",
"admin_state_up": True,
"subnet_id": "b31cdafe-bdf3-4c19-b768-34d623d77d6c",
"tenant_id": "f6b09b7a590642d8ac6de73df0ab0686",
"health_monitors_status": [],
"provider": "haproxy"
}]
self.vips = [{
"status": "ACTIVE",
"protocol": "TCP",
"description": "",
"address": "42.0.0.14",
"protocol_port": 22,
"port_id": "cfd9fcc0-c27b-478b-985e-8dd73f2c16e8",
"id": "7a755739-1bbb-4211-9130-b6c82d9169a5",
"status_description": None,
"name": "lb-vip",
"admin_state_up": True,
"subnet_id": "b31cdafe-bdf3-4c19-b768-34d623d77d6c",
"tenant_id": "f6b09b7a590642d8ac6de73df0ab0686",
"connection_limit": -1,
"pool_id": "6350c0fd-07f8-46ff-b797-62acd23760de",
"session_persistence": None
}]
self.health_monitors = [{
"admin_state_up": True,
"tenant_id": "f6b09b7a590642d8ac6de73df0ab0686",
"delay": 10,
"max_retries": 3,
"timeout": 10,
"pools": [],
"type": "PING",
"id": "c30d8a88-c719-4b93-aa64-c58efb397d86"
}]
self.members = [{
"admin_state_up": True,
"status": "ACTIVE",
"status_description": None,
"weight": 1,
"address": "42.0.0.11",
"tenant_id": "f6b09b7a590642d8ac6de73df0ab0686",
"protocol_port": 80,
"id": "4910851f-4af7-4592-ad04-08b508c6fa21",
"pool_id": "6350c0fd-07f8-46ff-b797-62acd23760de"
},
{
"admin_state_up": True,
"status": "ACTIVE",
"status_description": None,
"weight": 1,
"address": "42.0.0.13",
"tenant_id": "f6b09b7a590642d8ac6de73df0ab0686",
"protocol_port": 22,
"id": "76d2a5fc-b39f-4419-9f33-3b21cf16fe47",
"pool_id": "6350c0fd-07f8-46ff-b797-62acd23760de"
}]
# update the below lists as per the future requirements
self.firewalls = []
self.firewall_policies = []
self.firewall_rules = []
def _test_get_lb_info(self):
"""Prepares LB service_info needed for LB context
Returns: LB service info
"""
self.service_info['pools'] = self.pools
self.service_info['members'] = self.members
self.service_info['vips'] = self.vips
self.service_info['health_monitors'] = self.health_monitors
self.service_info['ports'] = self.ports
self.service_info['subnets'] = self.subnets
return self.service_info
def _test_get_vpn_info(self):
"""Prepares VPN service_info needed for VPN context

View File

@ -13,7 +13,6 @@
import filter_base
from gbpservice.contrib.nfp.configurator.lib import data_filter
import mock
class FilterTest(filter_base.BaseTestCase):
@ -51,16 +50,6 @@ class FilterTest(filter_base.BaseTestCase):
self.context['service_info'] = service_info
return self.context
def _make_lb_service_context(self):
"""Make the context for the lb service
Returns: lb service context
"""
service_info = self._test_get_lb_info()
self.context['service_info'] = service_info
return self.context
def _make_fw_service_context(self):
"""Make the context for the fw service
@ -71,30 +60,6 @@ class FilterTest(filter_base.BaseTestCase):
self.context['service_info'] = service_info
return self.context
def test_make_msg(self):
"""Test make_msg() of data_filter.py
"""
retval = self.filter_obj.make_msg('get_logical_device',
pool_id=self.pools[0]['id'])
self.assertEqual(retval, {'method': 'get_logical_device',
'args': {'pool_id': self.pools[0]['id']}})
def test_make_msg_empty(self):
"""Test make_msg() of data_filter.py
"""
retval = self.filter_obj.make_msg('get_logical_device')
self.assertEqual(retval, {'args': {}, 'method': 'get_logical_device'})
def test_call(self):
"""Test call() of data_filter.py
"""
with mock.patch.object(self.filter_obj, "call") as call_mock:
call_mock.return_value = True
retval = self._make_test(self._make_lb_service_context(),
'get_logical_device',
pool_id=[self.pools[0]['id']])
self.assertTrue(retval)
def test_get_vpn_service_with_tenantid(self):
"""Test get_vpn_services() of data_filter.py by passing
only tenant_id in filters
@ -126,22 +91,6 @@ class FilterTest(filter_base.BaseTestCase):
peer_address=[self.ipsec_site_connections[0]['peer_address']])
self.assertEqual(retval, self.ipsec_site_connections)
def test_get_logical_device(self):
"""Test get_logical_device() of data_filter.py
"""
retval = self._make_test(self._make_lb_service_context(),
'get_logical_device',
pool_id=self.pools[0]['id'])
self.ports[0]['fixed_ips'] = self.subnets[1]
self.vips[0]['port'] = self.ports[0]
expected = {'pool': self.pools[0],
'vip': self.vips[0],
'members': self.members[0],
'healthmonitors': {}
}
self.assertNotEqual(retval, expected)
def test_get_vpn_servicecontext_ipsec_service_type(self):
"""Test get_vpn_servicecontext() of data_filter.py
based on ipsec service type

View File

@ -1,578 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" This class provides data that is needed for calling methods of
agent and driver.
"""
class Foo(object):
key = 'key'
serialize = 'serialize'
binding_key = 'binding_key'
class Context(object):
def to_dict(self):
return {}
class FakeObjects(object):
sc = 'sc'
context = {'notification_data': {},
'resource': 'context_resource'}
context_pool = {'notification_data': {},
'resource': 'vip'}
conf = 'conf'
rpcmgr = 'rpcmgr'
nqueue = 'nqueue'
drivers = ['haproxy']
vip_context = {'notification_data': {}, 'resource': 'context_resource'}
context_test = {'notification_data': {}, 'resource': 'context_resource'}
method = {'CREATE_VIP': 'create_network_function_config',
'DELETE_VIP': 'delete_network_function_config',
'UPDATE_VIP': 'update_network_function_config',
'CREATE_POOL': 'create_network_function_config',
'DELETE_POOL': 'delete_network_function_config',
'UPDATE_POOL': 'update_network_function_config',
'CREATE_MEMBER': 'create_network_function_config',
'DELETE_MEMBER': 'delete_network_function_config',
'UPDATE_MEMBER': 'update_network_function_config',
'CREATE_POOL_HEALTH_MONITOR': 'create_network_function_config',
'DELETE_POOL_HEALTH_MONITOR': 'delete_network_function_config',
'UPDATE_POOL_HEALTH_MONITOR': 'update_network_function_config'}
def _get_context_logical_device(self):
context_logical_device = {
'service_info': {
'pools': self._get_pool_object(),
'vips': self._get_vip_object(),
'members': self._get_member_object(),
'health_monitors': self._get_hm_object(),
'ports': self._get_ports_object(),
'subnets': self._get_subnets_object()}}
return context_logical_device
def lb_api_context(self):
context = {
'agent_info': {
'resource': 'firewall',
'service_vendor': 'vyos',
'context': {'requester': 'device_orch',
'logging_context': {}},
'resource_type': 'firewall'},
'notification_data': {}, 'service_info': {},
"resource_data": {
"forward_route": True,
"tenant_id": "ac33b4c2d80f485a86ea515c09c74949",
"nfs": [{
"role": "master",
"svc_mgmt_fixed_ip": "11.0.0.37",
"networks": [
{"cidr": "11.0.1.0/24",
"gw_ip": "",
"type": "provider",
"ports": [{
"mac": "fa:16:3e:d9:4c:33",
"fixed_ip": "11.0.1.1",
"floating_ip": ""}]},
{"cidr": "192.168.0.0/28",
"gw_ip": "192.168.0.1 ",
"type": "stitching",
"ports": [{
"mac": "fa:16:3e:da:ca:4d",
"fixed_ip": "192.168.0.3",
"floating_ip": ""}]}
]}]}}
return context
def get_request_data_for_vip(self):
"""Returns request data needed for create_vip method.
Returns: request_data
"""
request_data = {
"info": {'context': {"logging_context": {}},
'service_type': "loadbalancer",
'service_vendor': "haproxy",
},
"config": [{
"resource": "vip",
"resource_data": {
"neutron_context": self.context,
"vip": self._get_vip_object()[0]
}}]}
return request_data
def get_request_data_for_vip_update(self):
"""Returns request data needed for update_vip method.
Returns: request_data
"""
request_data = {
"info": {'context': {"logging_context": {}},
'service_type': "loadbalancer",
'service_vendor': "haproxy",
},
"config": [{
"resource": "vip",
"resource_data": {
"neutron_context": self.context,
"vip": self._get_vip_object()[0],
"old_vip": self._get_vip_object()[0]
}}]}
return request_data
def get_request_data_for_create_pool(self):
"""Returns request data needed for create_pool method.
Returns: request_data
"""
request_data = {
"info": {'context': {"logging_context": {}},
'service_type': "loadbalancer",
'service_vendor': "haproxy",
},
"config": [{
"resource": "pool",
"resource_data": {
"neutron_context": self.context,
"pool": self._get_pool_object()[0],
"driver_name": "loadbalancer"
}}]}
return request_data
def get_request_data_for_delete_pool(self):
"""Returns request data needed for delete_pool method.
Returns: request_data
"""
request_data = {
"info": {'context': {"logging_context": {}},
'service_type': "loadbalancer",
'service_vendor': "haproxy",
},
"config": [{
"resource": "pool",
"resource_data": {
"neutron_context": self.context,
"pool": self._get_pool_object()[0]
}}]}
return request_data
def get_request_data_for_update_pool(self):
"""Returns request data needed for update_pool method.
Returns: request_data
"""
request_data = {
"info": {'context': {"logging_context": {}},
'service_type': "loadbalancer",
'service_vendor': "haproxy",
},
"config": [{
"resource": "pool",
"resource_data": {
"neutron_context": self.context,
"pool": self._get_pool_object()[0],
"old_pool": self._get_pool_object()[0]
}}]}
return request_data
def get_request_data_for_member(self):
"""Returns request data needed for create_member method.
Returns: request_data
"""
request_data = {
"info": {'context': {"logging_context": {}},
'service_type': "loadbalancer",
'service_vendor': "haproxy",
},
"config": [{
"resource": "member",
"resource_data": {
"neutron_context": self.context,
"member": self._get_member_object()[0],
}}]}
return request_data
def get_request_data_for_pool_hm(self):
"""Returns request data needed for create_pool_health_monitor method.
Returns: request_data
"""
request_data = {
"info": {'context': {"logging_context": {}},
'service_type': "loadbalancer",
'service_vendor': "haproxy",
},
"config": [{
"resource": "pool_health_monitor",
"resource_data": {
"neutron_context": self.context,
"health_monitor": self._get_hm_object()[0],
"pool_id": self._get_pool_object()[0]['id']
}}]}
return request_data
def get_request_data_for_update_pool_hm(self):
"""Returns request data needed for update_pool_health_monitor method.
Returns: request_data
"""
request_data = {
"info": {'context': {"logging_context": {}},
'service_type': "loadbalancer",
'service_vendor': "haproxy",
},
"config": [{
"resource": "pool_health_monitor",
"resource_data": {
"neutron_context": self.context,
"health_monitor": self._get_hm_object()[0],
"pool_id": self._get_pool_object()[0]['id'],
"old_health_monitor": self._get_hm_object()[0]
}}]}
return request_data
def get_request_data_for_update_member(self):
"""Returns request data needed for update_member method.
Returns: request_data
"""
request_data = {
"info": {'context': {"logging_context": {}},
'service_type': "loadbalancer",
'service_vendor': "haproxy",
},
"config": [{
"resource": "member",
"resource_data": {
"neutron_context": self.context,
"member": self._get_member_object()[0],
"old_member": self._get_member_object()[0]
}}]}
return request_data
def _get_vip_object(self):
"""Returns objects that contains vip related information.
Returns: vip
"""
vip = [{"status": "ACTIVE",
"protocol": "TCP",
"description": '{"floating_ip": "192.168.100.149",'
'"provider_interface_mac":'
'"aa:bb:cc:dd:ee:ff"}',
"address": "42.0.0.14",
"protocol_port": 22,
"port_id": "cfd9fcc0-c27b-478b-985e-8dd73f2c16e8",
"id": "7a755739-1bbb-4211-9130-b6c82d9169a5",
"status_description": None,
"name": "lb-vip",
"admin_state_up": True,
"subnet_id": "b31cdafe-bdf3-4c19-b768-34d623d77d6c",
"tenant_id": "f6b09b7a590642d8ac6de73df0ab0686",
"connection_limit": -1,
"pool_id": "6350c0fd-07f8-46ff-b797-62acd23760de",
"session_persistence": None}]
return vip
def _get_pool_object(self):
"""Returns objects that contains pool related information.
Returns: pool
"""
pool = [{"status": "ACTIVE",
"lb_method": "ROUND_ROBIN",
"protocol": "TCP",
"description": "",
"health_monitors": [],
"members":
[
"4910851f-4af7-4592-ad04-08b508c6fa21"
],
"status_description": None,
"id": "6350c0fd-07f8-46ff-b797-62acd23760de",
"vip_id": "7a755739-1bbb-4211-9130-b6c82d9169a5",
"name": "lb-pool",
"admin_state_up": True,
"subnet_id": "b31cdafe-bdf3-4c19-b768-34d623d77d6c",
"tenant_id": "f6b09b7a590642d8ac6de73df0ab0686",
"health_monitors_status": [],
"provider": "haproxy"}]
return pool
def _get_member_object(self):
"""Returns objects that contains member related information.
Returns: member
"""
member = [{
"admin_state_up": True,
"status": "ACTIVE",
"status_description": None,
"weight": 1,
"address": "42.0.0.11",
"tenant_id": "f6b09b7a590642d8ac6de73df0ab0686",
"protocol_port": 80,
"id": "4910851f-4af7-4592-ad04-08b508c6fa21",
"pool_id": "6350c0fd-07f8-46ff-b797-62acd23760de"}]
return member
def _get_hm_object(self):
"""Returns objects that contains health_monitor related information.
Returns: hm
"""
hm = [{
"admin_state_up": True,
"tenant_id": "f6b09b7a590642d8ac6de73df0ab0686",
"delay": 10,
"max_retries": 3,
"timeout": 10,
"pools": [],
"type": "PING",
"id": "c30d8a88-c719-4b93-aa64-c58efb397d86"
}]
return hm
def _get_ports_object(self):
"""Returns objects that contains health_monitor related information.
Returns: hm
"""
ports = [{"status": "ACTIVE",
"name": "",
"allowed_address_pairs": [],
"admin_state_up": True,
"network_id": "92f423a7-f44e-4726-b453-c8a1369a3ad0",
"tenant_id": "5e67167662f94fd5987e12a68ea6c1d8",
"extra_dhcp_opts": [],
"binding:vnic_type": "normal",
"device_owner": "network:dhcp",
"mac_address": "fa:16:3e:01:19:11",
"fixed_ips": [
{
"subnet_id": "2670bdcd-1bcf-4b97-858d-ab0d621983cc",
"ip_address": "11.0.0.3"
},
{
"subnet_id": "94aee832-935b-4e23-8f90-b6a81b0195b1",
"ip_address": "192.168.0.2"
}
],
"id": "cfd9fcc0-c27b-478b-985e-8dd73f2c16e8",
"security_groups": [],
"device_id": ("dhcpf986c817-fd54-5bae-a8e4-e473b69100d2-"
"92f423a7-f44e-4726-b453-c8a1369a3ad0")
}]
return ports
def _get_subnets_object(self):
"""Returns objects that contains health_monitor related information.
Returns: hm
"""
subnets = [{
"name": "apic_owned_ew-consumer",
"enable_dhcp": True,
"network_id": "0ced2567-47a0-4b67-be52-0e9695e8b0e6",
"tenant_id": "5e67167662f94fd5987e12a68ea6c1d8",
"dns_nameservers": [],
"gateway_ip": "11.0.3.1",
"ipv6_ra_mode": None,
"allocation_pools": [
{
"start": "11.0.3.2",
"end": "11.0.3.254"
}
],
"host_routes": [],
"ip_version": 4,
"ipv6_address_mode": None,
"cidr": "11.0.3.0/24",
"id": "ea9ff596-51bc-4381-8aff-ee9f0ef7e319"
}]
return subnets
""" This class provides all the data needed for event.
"""
class FakeEvent(object):
def __init__(self):
fo = FakeObjects()
kwargs = {'key': 'value'}
self.data = {
'context': {'notification_data': {},
'resource': 'context_resource',
'agent_info': {'service_vendor': '',
'service_feature': '',
'context': {},
'resource': ''
}
},
'vip': fo._get_vip_object()[0],
'old_vip': fo._get_vip_object()[0],
'pool': fo._get_pool_object()[0],
'old_pool': fo._get_pool_object()[0],
'member': fo._get_member_object()[0],
'old_member': fo._get_member_object()[0],
'health_monitor': fo._get_hm_object()[0],
'old_health_monitor': fo._get_hm_object()[0],
'pool_id': '6350c0fd-07f8-46ff-b797-62acd23760de',
'driver_name': 'loadbalancer',
'host': 'host',
'kwargs': kwargs,
}
""" This class provides assertion data for HaproxyOnVmDriverTestCase.
This class provides assertion data that is expected for mock method to
call by assert_called_with function. This assertion data is depend on
input data used to call method which uses mocking.
"""
class AssertionData(object):
url = '192.168.100.149'
port = '1234'
header = {'Content-Type': 'application/json'}
timeout = 30
delete_vip_url = ('http://192.168.100.149:1234/backend/'
'bck:6350c0fd-07f8-46ff-b797-62acd23760de')
create_vip_data = {"frnt:7a755739-1bbb-4211-9130-b6c82d9169a5":
{"option": {"tcplog": True},
"bind": "42.0.0.14:22",
"mode": "tcp",
"default_backend":
"bck:6350c0fd-07f8-46ff-b797-62acd23760de",
"provider_interface_mac": "fa:16:3e:d9:4c:33"
}
}
create_vip_url = 'http://192.168.100.149:1234/frontend'
create_vip_resources = 'backend/bck:6350c0fd-07f8-46ff-b797-62acd23760de'
update_vip_data = {"option": {"tcplog": True},
"bind": "42.0.0.14:22",
"mode": "tcp",
"default_backend":
"bck:6350c0fd-07f8-46ff-b797-62acd23760de",
"provider_interface_mac": "fa:16:3e:d9:4c:33"
}
update_vip_url = ('http://192.168.100.149:1234/frontend/frnt:'
'7a755739-1bbb-4211-9130-b6c82d9169a5')
update_pool_data = {"mode": "tcp",
"balance": "roundrobin",
"option": {},
"timeout": {"check": "10s"},
"server": {
"srvr:4910851f-4af7-4592-ad04-08b508c6fa21":
["42.0.0.11:80", "weight 1",
"check inter 10s fall 3"]
},
}
update_pool_url = ('http://192.168.100.149:1234/backend/bck:'
'6350c0fd-07f8-46ff-b797-62acd23760de')
create_member_data = {"timeout": {},
"server":
{
"srvr:4910851f-4af7-4592-ad04-08b508c6fa21":
["42.0.0.11:80", "weight 1",
"check inter 10s fall 3"],
"resource": []
}
}
create_member_url = ('http://192.168.100.149:1234/backend/bck:'
'6350c0fd-07f8-46ff-b797-62acd23760de')
delete_member_data = {"timeout": {},
"server": {"resource": []}
}
delete_member_url = ('http://192.168.100.149:1234/backend/bck:'
'6350c0fd-07f8-46ff-b797-62acd23760de')
update_member_data = create_member_data
update_member_url = ('http://192.168.100.149:1234/backend/bck:'
'6350c0fd-07f8-46ff-b797-62acd23760de')
create_hm_data = {"timeout": {"check": "10s"},
"server":
{
"srvr:4910851f-4af7-4592-ad04-08b508c6fa21": [],
"resource": []
}
}
create_hm_url = ('http://192.168.100.149:1234/backend/bck:'
'6350c0fd-07f8-46ff-b797-62acd23760de')
delete_hm_data = {"timeout": {},
"server":
{
"srvr:4910851f-4af7-4592-ad04-08b508c6fa21": [],
"resource": []
}
}
delete_hm_url = ('http://192.168.100.149:1234/backend/bck:'
'6350c0fd-07f8-46ff-b797-62acd23760de')
update_hm_data = create_hm_data
update_hm_url = ('http://192.168.100.149:1234/backend/bck:'
'6350c0fd-07f8-46ff-b797-62acd23760de')

View File

@ -298,15 +298,13 @@ class NFPContext(object):
class NFPNodeDriver(driver_base.NodeDriverBase):
SUPPORTED_SERVICE_TYPES = [
pconst.LOADBALANCER, pconst.FIREWALL, pconst.VPN,
pconst.FIREWALL, pconst.VPN,
pconst.LOADBALANCERV2]
vendor_name = nfp_constants.NFP_VENDOR.upper()
required_heat_resources = {
pconst.LOADBALANCERV2: ['OS::Neutron::LBaaS::LoadBalancer',
'OS::Neutron::LBaaS::Listener',
'OS::Neutron::LBaaS::Pool'],
pconst.LOADBALANCER: ['OS::Neutron::LoadBalancer',
'OS::Neutron::Pool'],
pconst.FIREWALL: ['OS::Neutron::Firewall',
'OS::Neutron::FirewallPolicy'],
pconst.VPN: ['OS::Neutron::VPNService'],
@ -574,8 +572,7 @@ class NFPNodeDriver(driver_base.NodeDriverBase):
nfp_constants.DELETE)
def update_policy_target_added(self, context, policy_target):
if context.current_profile['service_type'] in [pconst.LOADBALANCER,
pconst.LOADBALANCERV2]:
if context.current_profile['service_type'] == pconst.LOADBALANCERV2:
if self._is_service_target(policy_target):
return
context._plugin_context = self._get_resource_owner_context(
@ -594,8 +591,7 @@ class NFPNodeDriver(driver_base.NodeDriverBase):
operation=nfp_constants.UPDATE)
def update_policy_target_removed(self, context, policy_target):
if context.current_profile['service_type'] in [pconst.LOADBALANCER,
pconst.LOADBALANCERV2]:
if context.current_profile['service_type'] == pconst.LOADBALANCERV2:
if self._is_service_target(policy_target):
return
context._plugin_context = self._get_resource_owner_context(
@ -987,13 +983,9 @@ class NFPNodeDriver(driver_base.NodeDriverBase):
allowed_chain_combinations = [
[pconst.VPN],
[pconst.VPN, pconst.FIREWALL],
[pconst.VPN, pconst.LOADBALANCER],
[pconst.VPN, pconst.FIREWALL, pconst.LOADBALANCER],
[pconst.VPN, pconst.FIREWALL, pconst.LOADBALANCERV2],
[pconst.FIREWALL],
[pconst.FIREWALL, pconst.LOADBALANCER],
[pconst.FIREWALL, pconst.LOADBALANCERV2],
[pconst.LOADBALANCER],
[pconst.LOADBALANCERV2]]
if service_type_list_in_chain not in allowed_chain_combinations:
@ -1086,8 +1078,7 @@ class NFPNodeDriver(driver_base.NodeDriverBase):
self._get_consumers_for_provider(context,
service_targets['provider_ptg'][0])
if context.current_profile['service_type'] in [pconst.LOADBALANCER,
pconst.LOADBALANCERV2]:
if context.current_profile['service_type'] == pconst.LOADBALANCERV2:
config_param_values = sc_instance.get('config_param_values', {})
if config_param_values:
config_param_values = jsonutils.loads(config_param_values)

View File

@ -20,70 +20,80 @@ class DummyDictionaries(object):
testing the heat_driver test cases.
"""
DEFAULT_LB_CONFIG = {
u'heat_template_version': u'2013-05-23',
u'description': u'Configuration for Haproxy Neutron LB service',
u'parameters': {
u'Subnet': {
u'type': u'string',
u'description': u'Pool Subnet-CIDR, on which VIP port created'
DEFAULT_LBV2_CONFIG = {
"heat_template_version": "2015-10-15",
"description": "Configuration for Haproxy Neutron LB V2 service",
"parameters": {
"lb_port": {
"type": "number",
"default": 80,
"description": "Port used by the load balancer"
},
u'vip_ip': {
u'type': u'string',
u'description': u'VIP IP Address'
"app_port": {
"type": "number",
"default": 80,
"description": "Port used by the servers"
},
u'service_chain_metadata': {
u'type': u'string',
u'description': u'sc metadata'
"Subnet": {
"type": "string",
"description": "Subnet on which the LB will be located"
},
"vip_ip": {
"type": "string",
"description": "VIP IP Address"
},
"service_chain_metadata": {
"type": "string",
"description": "sc metadata"
}
},
u'resources': {
u'LoadBalancerPool': {
u'type': u'OS::Neutron::Pool',
u'properties': {
u'lb_method': u'ROUND_ROBIN',
u'protocol': u'TCP',
u'name': u'Haproxy pool-lb-chain',
u'admin_state_up': True,
u'subnet_id': {
u'get_param': u'Subnet'
},
u'vip': {
u'subnet': {
u'get_param': u'Subnet'
},
u'description': {
u'get_param': u'service_chain_metadata'
},
u'admin_state_up': True,
u'connection_limit': -1,
u'address': {
u'get_param': u'vip_ip'
},
u'protocol_port': 3939,
u'name': u'LoadBalancerPool vip-lb-chain'
},
u'provider': u'haproxy_on_vm',
u'monitors': [{u'get_resource': u'HealthMonitor'}],
u'description': u'Haproxy pool from template'
"resources": {
"monitor": {
"type": "OS::Neutron::LBaaS::HealthMonitor",
"properties": {
"delay": 3,
"type": "HTTP",
"timeout": 3,
"max_retries": 3,
"pool": {
"get_resource": "pool"
}
}
},
u'HealthMonitor': {
u'type': u'OS::Neutron::HealthMonitor',
u'properties': {
u'delay': 20,
u'max_retries': 5,
u'type': u'PING',
u'timeout': 10,
u'admin_state_up': True
"pool": {
"type": "OS::Neutron::LBaaS::Pool",
"properties": {
"lb_algorithm": "ROUND_ROBIN",
"protocol": "HTTP",
"listener": {
"get_resource": "listener"
}
}
},
u'LoadBalancer': {
u'type': u'OS::Neutron::LoadBalancer',
u'properties': {
u'protocol_port': 101,
u'pool_id': {
u'get_resource': u'LoadBalancerPool'
"listener": {
"type": "OS::Neutron::LBaaS::Listener",
"properties": {
"loadbalancer": {
"get_resource": "loadbalancer"
},
"protocol": "HTTP",
"protocol_port": {
"get_param": "lb_port"
}
}
},
"loadbalancer": {
"type": "OS::Neutron::LBaaS::LoadBalancer",
"properties": {
"vip_subnet": {
"get_param": "Subnet"
},
"provider": "loadbalancerv2",
"vip_address": {
"get_param": "vip_ip"
},
"description": {
"get_param": "service_chain_metadata"
}
}
}
@ -493,17 +503,6 @@ class DummyDictionaries(object):
'fw_policy_key': u'sc_firewall_policy'
}
pool_members = {
'type': 'OS::Neutron::PoolMember',
'properties': {
'protocol_port': 101,
'admin_state_up': True,
'pool_id': {'get_resource': u'LoadBalancerPool'},
'weight': 1,
'address': u'42.0.0.13'
}
}
fw_scn_config = "{\"heat_template_version\": \"2013-05-23\",\
\"description\": \"Template to deploy firewall\", \"resources\":\
{\"sc_firewall_rule3\": {\"type\": \"OS::Neutron::FirewallRule\",\
@ -531,28 +530,30 @@ class DummyDictionaries(object):
\'vm_management_ip\': u'192.168.20.138', \'provider_ptg_info\':\
[\'fa:16:3e:28:7d:b2\']}\", \"name\": \"serviceVM_infra_FW\"}}}}"
lb_scn_config = "{\"heat_template_version\": \"2013-05-23\",\
\"description\": \"Configuration for F5 Neutron Loadbalacer service\",\
lbv2_scn_config = "{\"heat_template_version\": \"2015-10-15\",\
\"description\": \"Configuration for Haproxy Neutron LB V2 service\",\
\"parameters\": {\"Subnet\": {\"type\": \"string\", \"description\":\
\"Pool Subnet CIDR, on which VIP port should be created\"},\
\"Subnet on which the load balancer will be located\"}, \
\"service_chain_metadata\": {\"type\": \"string\", \"description\":\
\"sc metadata\"}, \"vip_ip\": {\"type\": \"string\", \"description\":\
\"VIP IP Address\"}}, \"resources\": {\"LoadBalancerPool\": {\"type\":\
\"OS::Neutron::Pool\", \"properties\": {\"lb_method\":\
\"ROUND_ROBIN\", \"protocol\": \"TCP\", \"name\": \"F5 LB pool\",\
\"admin_state_up\": true, \"subnet_id\": {\"get_param\": \"Subnet\"},\
\"vip\": {\"subnet\": {\"get_param\": \"Subnet\"}, \"description\":\
{\"get_param\": \"service_chain_metadata\"}, \"admin_state_up\": true,\
\"connection_limit\": -1, \"address\": {\"get_param\": \"vip_ip\"},\
\"protocol_port\": 80, \"name\": \"LoadBalancerPool vip\"},\
\"provider\": \"F5\", \"monitors\": [{\"get_resource\":\
\"HealthMonitor\"}], \"description\": \"F5 LB pool from template\"}},\
\"HealthMonitor\": {\"type\": \"OS::Neutron::HealthMonitor\",\
\"properties\": {\"delay\": 20, \"max_retries\": 5, \"type\":\
\"PING\", \"timeout\": 10, \"admin_state_up\": true}},\
\"LoadBalancer\": {\"type\": \"OS::Neutron::LoadBalancer\",\
\"properties\": {\"protocol_port\": 80, \"pool_id\": {\"get_resource\"\
:\"LoadBalancerPool\"}}}}}"
\"VIP IP Address\"}, \"app_port\": {\"default\": 80, \"type\":\
\"number\", \"description\": \"Port used by the servers\"}, \
\"lb_port\": {\"default\": 80, \"type\": \"number\", \"description\":\
\"Port used by the load balancer\"}}, \"resources\": {\"listener\":\
{\"type\": \"OS::Neutron::LBaaS::Listener\", \"properties\":\
{\"protocol_port\": {\"get_param\": \"lb_port\"}, \"protocol\":\
\"HTTP\", \"loadbalancer\": {\"get_resource\": \"loadbalancer\"}}},\
\"loadbalancer\": {\"type\": \"OS::Neutron::LBaaS::LoadBalancer\",\
\"properties\": {\"vip_subnet\": {\"get_param\": \"Subnet\"},\
\"vip_address\": {\"get_param\": \"vip_ip\"}, \"description\":\
{\"get_param\": \"service_chain_metadata\"}, \"provider\":\
\"loadbalancerv2\"}}, \"monitor\": {\"type\":\
\"OS::Neutron::LBaaS::HealthMonitor\", \"properties\": {\"delay\": 3,\
\"max_retries\": 3, \"type\": \"HTTP\", \"pool\": {\"get_resource\":\
\"pool\"}, \"timeout\": 3}}, \"pool\": {\"type\": \
\"OS::Neutron::LBaaS::Pool\", \"properties\":\
{\"lb_algorithm\": \"ROUND_ROBIN\", \"listener\": {\"get_resource\":\
\"listener\"}, \"protocol\": \"HTTP\"}}}}"
vpn_scn_config = "{\"description\":\"Createsnewvpnservice-ike+ipsec+\
vpnservice+site-siteconnection(s)\", \"heat_template_version\
@ -594,9 +595,9 @@ class DummyDictionaries(object):
u'service_type': u'VPN'
}
lb_service_profile = {
lbv2_service_profile = {
u'service_flavor': u'haproxy',
u'service_type': u'LOADBALANCER'
u'service_type': u'LOADBALANCERV2'
}
fw_service_chain_node = {
@ -611,10 +612,10 @@ class DummyDictionaries(object):
u'config': vpn_scn_config
}
lb_service_chain_node = {
lbv2_service_chain_node = {
u'id': u'012345678919',
u'name': u'scn_lb',
u'config': lb_scn_config
u'config': lbv2_scn_config
}
service_chain_instance = {

View File

@ -234,24 +234,6 @@ class TestHeatDriver(unittest2.TestCase):
auth_token, self.mock_dict.provider_ptg)
self.assertEqual(member_ips, expected_member_ips)
def test_generate_lb_member_template(self):
is_template_aws_version = False
member_ip = '11.0.0.4'
pool_res_name = 'HaproxyPool'
stack_template = self.mock_dict.DEFAULT_LB_CONFIG
expected_member_template = {
'type': 'OS::Neutron::PoolMember',
'properties': {
'protocol_port': 101, 'admin_state_up': True,
'pool_id': {'get_resource': 'HaproxyPool'},
'weight': 1, 'address': '11.0.0.4'
}
}
member_template = self.heat_driver_obj._generate_lb_member_template(
is_template_aws_version,
pool_res_name, member_ip, stack_template)
self.assertEqual(member_template, expected_member_template)
def test_modify_fw_resources_name(self):
is_template_aws_version = False
stack_template = copy.deepcopy(self.mock_dict.DEFAULT_FW_CONFIG)
@ -266,43 +248,24 @@ class TestHeatDriver(unittest2.TestCase):
def test_get_heat_resource_key(self):
is_template_aws_version = False
resource_name = 'OS::Neutron::Pool'
template_resource_dict = self.mock_dict.DEFAULT_LB_CONFIG['resources']
expected_heat_resource_key = 'LoadBalancerPool'
resource_name = 'OS::Neutron::Firewall'
template_resource_dict = self.mock_dict.DEFAULT_FW_CONFIG['resources']
expected_heat_resource_key = 'sc_firewall'
heat_resource_key = self.heat_driver_obj._get_heat_resource_key(
template_resource_dict, is_template_aws_version, resource_name)
self.assertEqual(heat_resource_key, expected_heat_resource_key)
def test_get_all_heat_resource_keys(self):
is_template_aws_version = False
resource_name = 'OS::Neutron::Pool'
template_resource_dict = self.mock_dict.DEFAULT_LB_CONFIG['resources']
expected_heat_resource_keys = ['LoadBalancerPool']
resource_name = 'OS::Neutron::Firewall'
template_resource_dict = self.mock_dict.DEFAULT_FW_CONFIG['resources']
expected_heat_resource_keys = ['sc_firewall']
all_heat_resource_keys = (
self.heat_driver_obj._get_all_heat_resource_keys(
template_resource_dict, is_template_aws_version,
resource_name))
self.assertEqual(all_heat_resource_keys, expected_heat_resource_keys)
@mock.patch.object(neutron_client.Client, "show_port")
@mock.patch.object(gbp_client.Client, "list_policy_targets")
def test_generate_pool_members(self, list_pt_mock_obj, show_port_mock_obj):
list_pt_mock_obj.return_value = self.mock_dict.policy_targets
show_port_mock_obj.return_value = self.mock_dict.port_info
is_template_aws_version = False
stack_template = self.mock_dict.DEFAULT_LB_CONFIG
auth_token = "81273djs138"
config_param_values = {}
expected_pool_members = self.mock_dict.pool_members
self.heat_driver_obj._generate_pool_members(
auth_token,
stack_template,
config_param_values,
self.mock_dict.provider_ptg,
is_template_aws_version)
generated_pool_members = stack_template['resources']['mem-42.0.0.13']
self.assertEqual(generated_pool_members, expected_pool_members)
def test_append_firewall_rule(self):
stack_template = copy.deepcopy(self.mock_dict.DEFAULT_FW_CONFIG)
provider_cidr = '192.169.0.0/29'
@ -429,44 +392,6 @@ class TestHeatDriver(unittest2.TestCase):
stack_template['resources']['sc_firewall_policy'],
copy.deepcopy(self.mock_dict.updated_template_sc_firewall_policy))
@mock.patch.object(gbp_client.Client, "create_policy_target")
@mock.patch.object(gbp_client.Client, "update_policy_target")
@mock.patch.object(neutron_client.Client, "list_subnets")
@mock.patch.object(neutron_client.Client, "list_pools")
@mock.patch.object(neutron_client.Client, "show_vip")
def test_create_policy_target_for_vip(self, vip, pools, subnets,
pt, pt_update):
pt.return_value = {
'policy_target': {
'name': 'service_target_provider_0132c_00b93'
}
}
subnets.return_value = self.mock_dict.subnets_info
pools.return_value = {
'pools': [{
'vip_id': '1234'
}]
}
vip.return_value = {
'vip': {
'port_id': '1234'
}
}
auth_token = 'adsdsdd'
provider_tenant_id = '8ae6701128994ab281dde6b92207bb19'
provider = self.mock_dict.provider_ptg
self.heat_driver_obj.gbp_client.get_policy_targets = (
mock.MagicMock(
return_value=self.mock_dict.policy_targets[
'policy_targets']))
self.heat_driver_obj.keystoneclient.get_admin_token = (
mock.MagicMock(return_value='token'))
self.heat_driver_obj._create_policy_target_for_vip(
auth_token, provider_tenant_id, provider,
'LOADBALANCER')
pools.assert_called_once_with(
subnet_id=[subnets.return_value['subnets'][0]['id']])
@mock.patch.object(neutron_client.Client, "list_networks")
def test_create_node_config_data_vpn(self, mock_list_networks):
self.mock_objects()
@ -621,9 +546,10 @@ class TestHeatDriver(unittest2.TestCase):
return_value={'auth_token': '7fd6701128994ab281ccb6b92207bb15'})
service_details = {}
service_details['service_profile'] = self.mock_dict.lb_service_profile
service_details['service_profile'] = (
self.mock_dict.lbv2_service_profile)
service_details['servicechain_node'] = (
self.mock_dict.lb_service_chain_node)
self.mock_dict.lbv2_service_chain_node)
service_details['servicechain_instance'] = (
self.mock_dict.service_chain_instance)
service_details['policy_target_group'] = self.mock_dict.provider_ptg

View File

@ -472,8 +472,8 @@ class TestServiceChainInstance(NFPNodeDriverTestCase):
'plumbing_type': 'endpoint'
}
node_id = self._nfp_create_profiled_servicechain_node(
service_type=constants.LOADBALANCER)['servicechain_node'][
'id']
service_type=constants.LOADBALANCERV2)[
'servicechain_node']['id']
spec = self.create_servicechain_spec(
nodes=[node_id],
expected_res_status=201)['servicechain_spec']
@ -730,7 +730,7 @@ class TestServiceChainInstance(NFPNodeDriverTestCase):
@mock.patch.object(nfp_node_driver.NFPClientApi, 'get_plumbing_info')
def test_policy_target_add_remove(self, plumbing_info):
prof = self._create_service_profile(
service_type='LOADBALANCER',
service_type='LOADBALANCERV2',
vendor=self.SERVICE_PROFILE_VENDOR,
insertion_mode='l3', service_flavor='haproxy')['service_profile']
node = self.create_servicechain_node(

View File

@ -11,7 +11,7 @@ nfp_modules_path=gbpservice.nfp.orchestrator.modules
# REST - where fip access is available for OTC NFP controller
backend=rpc
# supported service vendors
supported_vendors=vyos,nfp,haproxy,haproxy_lbaasv2
supported_vendors=vyos,nfp,haproxy
monitoring_ptg_l3policy_id=l3policy-id
[PROXY_AGENT]

View File

@ -11,7 +11,6 @@
# under the License.
FIREWALL = 'firewall'
LOADBALANCER = 'loadbalancer'
LOADBALANCERV2 = 'loadbalancerv2'
VPN = 'vpn'
GENERIC_CONFIG = 'generic_config'
@ -86,7 +85,6 @@ METADATA_SUPPORTED_ATTRIBUTES = [MAXIMUM_INTERFACES,
SUPPORTS_SHARING,
SUPPORTS_HOTPLUG]
LOADBALANCER_RPC_API_VERSION = "2.0"
LOADBALANCERV2_RPC_API_VERSION = "1.0"
HEALTHMONITOR_RESOURCE = 'healthmonitor'
@ -95,8 +93,7 @@ ROUTES_RESOURCE = 'routes'
MANAGEMENT_INTERFACE_NAME = 'mgmt_interface'
VYOS_VENDOR = 'vyos'
HAPROXY_VENDOR = 'haproxy'
HAPROXY_LBAASV2 = 'haproxy_lbaasv2'
HAPROXY_LBAASV2 = 'haproxy'
NFP_VENDOR = 'nfp'
L3_INSERTION_MODE = "l3"

View File

@ -74,7 +74,7 @@ def _fill_service_specific_info(nfd, device_data, **kwargs):
management_network['type'] = const.MANAGEMENT
management_network['gw_ip'] = device_data.get('mgmt_gw_ip')
nfd['networks'].append(management_network)
elif resource_type in [const.LOADBALANCER, const.LOADBALANCERV2]:
elif resource_type == const.LOADBALANCERV2:
nfd['svc_mgmt_fixed_ip'] = device_data.get('floating_ip')
provider_port['mac'] = device_data.get('provider_interface_mac')
return nfd
@ -93,7 +93,7 @@ def get_network_function_info(device_data, resource_type):
'''
SERVICE_TYPES = [const.FIREWALL, const.VPN,
const.LOADBALANCER, const.LOADBALANCERV2]
const.LOADBALANCERV2]
config = copy.deepcopy(NFP_DATA_FORMAT)
mgmt_ip = device_data.get('mgmt_ip_address')

View File

@ -241,8 +241,7 @@ class HeatDriver(object):
db_session = db_api.get_session()
service_details = self.get_service_details(network_function_details)
service_profile = service_details['service_profile']
if service_profile['service_type'] in [pconst.LOADBALANCER,
pconst.LOADBALANCERV2]:
if service_profile['service_type'] == pconst.LOADBALANCERV2:
network_function_instance = network_function_details.get(
'network_function_instance')
if network_function_instance:
@ -254,16 +253,7 @@ class HeatDriver(object):
return
def _post_stack_create(self, nfp_context):
service_details = self.get_service_details_from_nfp_context(
nfp_context)
service_type = service_details['service_details']['service_type']
if service_type in [pconst.LOADBALANCER]:
auth_token = nfp_context['log_context']['auth_token']
provider_tenant_id = nfp_context['tenant_id']
provider = service_details['provider_ptg']
self._create_policy_target_for_vip(
auth_token, provider_tenant_id, provider, service_type)
return
def _get_provider_ptg_info(self, auth_token, sci_id):
with nfp_ctx_mgr.GBPContextManager as gcm:
@ -287,7 +277,7 @@ class HeatDriver(object):
service_profile['service_flavor'])
base_mode_support = (True if service_details['device_type'] == 'None'
else False)
if (service_type in [pconst.LOADBALANCER, pconst.LOADBALANCERV2]) and (
if (service_type == pconst.LOADBALANCERV2) and (
not base_mode_support):
provider = self._get_provider_ptg_info(
auth_token,
@ -333,19 +323,7 @@ class HeatDriver(object):
"policy target group %(provider_ptg)s"),
{"provider_ptg": provider})
return lb_vip, lb_vip_name
if service_type == pconst.LOADBALANCER:
with nfp_ctx_mgr.NeutronContextManager as ncm:
lb_pool_ids = ncm.retry(
self.neutron_client.get_pools,
auth_token,
filters={'subnet_id': [provider_subnet['id']]})
if lb_pool_ids and lb_pool_ids[0]['vip_id']:
lb_vip = ncm.retry(
self.neutron_client.get_vip,
auth_token, lb_pool_ids[0]['vip_id'])['vip']
lb_vip_name = ("service_target_vip_pt" +
lb_pool_ids[0]['vip_id'])
elif service_type == pconst.LOADBALANCERV2:
if service_type == pconst.LOADBALANCERV2:
with nfp_ctx_mgr.NeutronContextManager as ncm:
loadbalancers = ncm.retry(
self.neutron_client.get_loadbalancers,
@ -377,54 +355,6 @@ class HeatDriver(object):
service_targets.append(policy_target)
return service_targets
def _create_policy_target_for_vip(self, auth_token,
provider_tenant_id,
provider, service_type):
with nfp_ctx_mgr.KeystoneContextManager as kcm:
admin_token = kcm.retry(
self.keystoneclient.get_admin_token, tries=3)
lb_vip, vip_name = self._get_lb_vip(auth_token, provider, service_type)
service_targets = self._get_lb_service_targets(admin_token, provider)
if not (lb_vip and service_targets):
return None
with nfp_ctx_mgr.GBPContextManager as gcm:
vip_pt = gcm.retry(self.gbp_client.create_policy_target,
auth_token, provider_tenant_id, provider['id'],
vip_name, lb_vip['port_id'])
# Set cluster_id as vip_pt
for service_target in service_targets:
service_target_id = service_target['id']
service_target_port_id = service_target['port_id']
policy_target_info = {'cluster_id': vip_pt['id']}
with nfp_ctx_mgr.GBPContextManager as gcm:
gcm.retry(self.gbp_client.update_policy_target,
admin_token,
service_target_id, policy_target_info)
with nfp_ctx_mgr.NeutronContextManager as ncm:
service_target_port = ncm.retry(self.neutron_client.get_port,
admin_token,
service_target_port_id)['port']
vip_ip = service_target_port[
'allowed_address_pairs'][0]['ip_address']
# Update allowed address pairs entry came through cluster_id
# updation with provider_port mac address.
updated_port = {
'allowed_address_pairs': [
{
'ip_address': vip_ip,
'mac_address': service_target_port['mac_address']}]
}
with nfp_ctx_mgr.NeutronContextManager as ncm:
ncm.retry(self.neutron_client.update_port,
admin_token, service_target_port_id,
**updated_port)
def _update_policy_targets_for_vip(self, auth_token,
provider_tenant_id,
provider, service_type):
@ -490,53 +420,6 @@ class HeatDriver(object):
member_addresses.append(ip_address)
return member_addresses
def _generate_lb_member_template(self, is_template_aws_version,
pool_res_name, member_ip, stack_template):
type_key = 'Type' if is_template_aws_version else 'type'
properties_key = ('Properties' if is_template_aws_version
else 'properties')
resources_key = 'Resources' if is_template_aws_version else 'resources'
res_key = 'Ref' if is_template_aws_version else 'get_resource'
lbaas_pool_key = self._get_heat_resource_key(
stack_template[resources_key],
is_template_aws_version,
"OS::Neutron::Pool")
lbaas_vip_key = self._get_heat_resource_key(
stack_template[resources_key],
is_template_aws_version,
"OS::Neutron::LoadBalancer")
vip_port = stack_template[resources_key][lbaas_pool_key][
properties_key]['vip']['protocol_port']
member_port = stack_template[resources_key][lbaas_vip_key][
properties_key].get('protocol_port')
protocol_port = member_port if member_port else vip_port
return {type_key: "OS::Neutron::PoolMember",
properties_key: {
"address": member_ip,
"admin_state_up": True,
"pool_id": {res_key: pool_res_name},
"protocol_port": protocol_port,
"weight": 1}}
def _modify_lb_resources_name(self, stack_template, provider_ptg,
is_template_aws_version):
resources_key = 'Resources' if is_template_aws_version else 'resources'
type_key = 'Type' if is_template_aws_version else 'type'
properties_key = ('Properties' if is_template_aws_version
else 'properties')
for resource in stack_template[resources_key]:
if stack_template[resources_key][resource][type_key] == (
'OS::Neutron::Pool'):
# Include provider name in Pool, VIP name.
ptg_name = '-' + provider_ptg['name']
stack_template[resources_key][resource][
properties_key]['name'] += ptg_name
stack_template[resources_key][resource][
properties_key]['vip']['name'] += ptg_name
def _generate_lbv2_member_template(self, is_template_aws_version,
member_ip, stack_template,
pool_name="pool"):
@ -615,26 +498,6 @@ class HeatDriver(object):
stack_template[resources_key][member_name] = member_template
prev_member = member_name
def _generate_pool_members(self, auth_token, stack_template,
config_param_values, provider_ptg,
is_template_aws_version):
resources_key = 'Resources' if is_template_aws_version else 'resources'
self._modify_lb_resources_name(
stack_template, provider_ptg, is_template_aws_version)
member_ips = self._get_member_ips(auth_token, provider_ptg)
if not member_ips:
return
pool_res_name = self._get_heat_resource_key(
stack_template[resources_key],
is_template_aws_version,
"OS::Neutron::Pool")
for member_ip in member_ips:
member_name = 'mem-' + member_ip
stack_template[resources_key][member_name] = (
self._generate_lb_member_template(
is_template_aws_version, pool_res_name,
member_ip, stack_template))
def _get_consumers_for_chain(self, auth_token, provider):
filters = {'id': provider['provided_policy_rule_sets']}
with nfp_ctx_mgr.GBPContextManager as gcm:
@ -932,13 +795,7 @@ class HeatDriver(object):
else:
return
if service_type == pconst.LOADBALANCER:
nf_desc = str((SC_METADATA % (service_chain_instance_id,
mgmt_ip,
provider_port_mac,
network_function_id,
service_vendor)))
elif service_type == pconst.LOADBALANCERV2:
if service_type == pconst.LOADBALANCERV2:
nf_desc = str((SC_METADATA % (service_chain_instance_id,
mgmt_ip,
provider_port_mac,
@ -1070,23 +927,7 @@ class HeatDriver(object):
if not base_mode_support:
provider_subnet = service_details['provider_subnet']
if service_type == pconst.LOADBALANCER:
self._generate_pool_members(
auth_token, stack_template, config_param_values,
provider, is_template_aws_version)
config_param_values['Subnet'] = provider_subnet['id']
config_param_values['service_chain_metadata'] = ""
if not base_mode_support:
config_param_values[
'service_chain_metadata'] = str(common_desc)
lb_pool_key = self._get_heat_resource_key(
stack_template[resources_key],
is_template_aws_version,
'OS::Neutron::Pool')
stack_template[resources_key][lb_pool_key][properties_key][
'description'] = str(common_desc)
elif service_type == pconst.LOADBALANCERV2:
if service_type == pconst.LOADBALANCERV2:
self._generate_lbaasv2_pool_members(
auth_token, stack_template, config_param_values,
provider, is_template_aws_version)
@ -1266,29 +1107,7 @@ class HeatDriver(object):
provider_cidr = ''
service_vendor = service_details['service_vendor']
if service_type == pconst.LOADBALANCER:
self._generate_pool_members(
auth_token, stack_template, config_param_values,
provider, is_template_aws_version)
config_param_values['Subnet'] = provider_subnet['id']
config_param_values['service_chain_metadata'] = ""
if not base_mode_support:
config_param_values[
'service_chain_metadata'] = str(common_desc)
nf_desc = str((SC_METADATA % (service_chain_instance['id'],
mgmt_ip,
provider_port_mac,
network_function['id'],
service_vendor)))
lb_pool_key = self._get_heat_resource_key(
stack_template[resources_key],
is_template_aws_version,
'OS::Neutron::Pool')
stack_template[resources_key][lb_pool_key][properties_key][
'description'] = str(common_desc)
elif service_type == pconst.LOADBALANCERV2:
if service_type == pconst.LOADBALANCERV2:
self._generate_lbaasv2_pool_members(
auth_token, stack_template, config_param_values,
provider, is_template_aws_version)
@ -2031,8 +1850,7 @@ class HeatDriver(object):
mgmt_ip = service_details['mgmt_ip']
stack_id = service_details['config_policy_id']
if service_profile['service_type'] in [pconst.LOADBALANCER,
pconst.LOADBALANCERV2]:
if service_profile['service_type'] == pconst.LOADBALANCERV2:
if self._is_service_target(policy_target):
return
auth_token, resource_owner_tenant_id = (

View File

@ -48,7 +48,7 @@ oslo_config.CONF.register_opts(openstack_opts, "nfp_keystone_authtoken")
nfp_orchestrator_opts = [
oslo_config.ListOpt(
'supported_vendors',
default=[nfp_constants.VYOS_VENDOR, nfp_constants.HAPROXY_VENDOR,
default=[nfp_constants.VYOS_VENDOR,
nfp_constants.HAPROXY_LBAASV2, nfp_constants.NFP_VENDOR],
help="Supported service vendors for nfp"),
oslo_config.StrOpt('monitoring_ptg_l3policy_id',