LBaaS V2 resource data format change
Adding code in LBaaS V2 driver to support resource data format change. LBaas V2 driver should work according to resource data sent by orchestration side. Change-Id: Ib94bb125445a4dee597ca196ab74b8c5d6f7c848
This commit is contained in:
@@ -27,6 +27,7 @@ from gbpservice.contrib.nfp.configurator.drivers.loadbalancer.\
|
||||
from gbpservice.contrib.nfp.configurator.drivers.loadbalancer.\
|
||||
v2.haproxy.rest_api_driver import HaproxyAmphoraLoadBalancerDriver
|
||||
from gbpservice.contrib.nfp.configurator.lib import constants as common_const
|
||||
from gbpservice.contrib.nfp.configurator.lib import data_parser
|
||||
from gbpservice.contrib.nfp.configurator.lib import lb_constants
|
||||
from gbpservice.contrib.nfp.configurator.lib import lbv2_constants
|
||||
from gbpservice.nfp.common import exceptions
|
||||
@@ -61,7 +62,8 @@ class LbGenericConfigDriver(object):
|
||||
send by neutron plugin
|
||||
Returns: SUCCESS/Failure message with reason.
|
||||
"""
|
||||
|
||||
resource_data = self.parse.parse_data(
|
||||
common_const.INTERFACES, resource_data)
|
||||
mgmt_ip = resource_data['mgmt_ip']
|
||||
|
||||
try:
|
||||
@@ -320,11 +322,12 @@ class OctaviaDataModelBuilder(object):
|
||||
return ret
|
||||
|
||||
|
||||
@base_driver.set_class_attr(
|
||||
SERVICE_TYPE=lbv2_constants.SERVICE_TYPE,
|
||||
SERVICE_VENDOR=haproxy_driver_constants.SERVICE_VENDOR)
|
||||
class HaproxyLoadBalancerDriver(LbGenericConfigDriver,
|
||||
base_driver.BaseDriver):
|
||||
|
||||
service_type = lbv2_constants.SERVICE_TYPE
|
||||
service_vendor = haproxy_driver_constants.SERVICE_VENDOR
|
||||
# amphorae = {"loadbalancer_id": [o_data_models.Amphora(
|
||||
# lb_network_ip, id, status)]}
|
||||
amphorae = {}
|
||||
@@ -340,7 +343,7 @@ class HaproxyLoadBalancerDriver(LbGenericConfigDriver,
|
||||
super(HaproxyLoadBalancerDriver, self).__init__()
|
||||
self.conf = conf
|
||||
self.port = haproxy_driver_constants.CONFIGURATION_SERVER_PORT
|
||||
|
||||
self.parse = data_parser.DataParser()
|
||||
self.amphora_driver = HaproxyAmphoraLoadBalancerDriver()
|
||||
self.cert_manager = LocalCertManager()
|
||||
|
||||
@@ -359,17 +362,17 @@ class HaproxyLoadBalancerDriver(LbGenericConfigDriver,
|
||||
def get_amphora(self, loadbalancer_id):
|
||||
return self.amphorae.get(loadbalancer_id)
|
||||
|
||||
def add_amphora(self, loadbalancer_id, description,
|
||||
def add_amphora(self, context, loadbalancer_id, description,
|
||||
status=constants.ACTIVE):
|
||||
sc_metadata = ast.literal_eval(description)
|
||||
if not (sc_metadata.get('floating_ip') and
|
||||
sc_metadata.get('network_function_id')):
|
||||
rdata = self.parse.parse_data(common_const.LOADBALANCERV2, context)
|
||||
if not (rdata['mgmt_ip'] and sc_metadata.get('network_function_id')):
|
||||
raise exceptions.IncompleteData(
|
||||
"Amphora information is missing")
|
||||
if not self.get_amphora(loadbalancer_id):
|
||||
# REVISIT(jiahao): use network_function_id as amphora id
|
||||
amp = o_data_models.Amphora(
|
||||
lb_network_ip=sc_metadata['floating_ip'],
|
||||
lb_network_ip=rdata['mgmt_ip'],
|
||||
id=sc_metadata['network_function_id'],
|
||||
status=status)
|
||||
self.amphorae[loadbalancer_id] = [amp]
|
||||
@@ -379,8 +382,9 @@ class HaproxyCommonManager(object):
|
||||
|
||||
def __init__(self, driver):
|
||||
self.driver = driver
|
||||
self.parse = data_parser.DataParser()
|
||||
|
||||
def _deploy(self, obj):
|
||||
def _deploy(self, context, obj):
|
||||
pass
|
||||
|
||||
def create(self, context, obj):
|
||||
@@ -456,10 +460,10 @@ class HaproxyLoadBalancerManager(HaproxyCommonManager):
|
||||
raise exceptions.IncompleteData(
|
||||
"VIP subnet information is not found")
|
||||
|
||||
sc_metadata = ast.literal_eval(
|
||||
loadbalancer_dict['description'])
|
||||
sc_metadata = self.parse.parse_data(
|
||||
common_const.LOADBALANCERV2, context)
|
||||
vrrp_port = n_data_models.Port(
|
||||
mac_address=sc_metadata['provider_interface_mac'])
|
||||
mac_address=sc_metadata['provider_mac'])
|
||||
if vrrp_port is None:
|
||||
raise exceptions.IncompleteData(
|
||||
"VRRP port information is not found")
|
||||
@@ -473,7 +477,7 @@ class HaproxyLoadBalancerManager(HaproxyCommonManager):
|
||||
return amphorae_network_config
|
||||
|
||||
def create(self, context, loadbalancer):
|
||||
self.driver.add_amphora(loadbalancer['id'],
|
||||
self.driver.add_amphora(context, loadbalancer['id'],
|
||||
loadbalancer['description'])
|
||||
loadbalancer_o_obj = self.driver.o_models_builder.\
|
||||
get_loadbalancer_octavia_model(loadbalancer)
|
||||
@@ -492,7 +496,7 @@ class HaproxyLoadBalancerManager(HaproxyCommonManager):
|
||||
LOG.info(msg)
|
||||
|
||||
def update(self, context, old_loadbalancer, loadbalancer):
|
||||
self.driver.add_amphora(loadbalancer['id'],
|
||||
self.driver.add_amphora(context, loadbalancer['id'],
|
||||
loadbalancer['description'])
|
||||
loadbalancer_o_obj = self.driver.o_models_builder.\
|
||||
get_loadbalancer_octavia_model(loadbalancer)
|
||||
@@ -546,8 +550,8 @@ class HaproxyLoadBalancerManager(HaproxyCommonManager):
|
||||
|
||||
class HaproxyListenerManager(HaproxyCommonManager):
|
||||
|
||||
def _deploy(self, listener):
|
||||
self.driver.add_amphora(listener['loadbalancer_id'],
|
||||
def _deploy(self, context, listener):
|
||||
self.driver.add_amphora(context, listener['loadbalancer_id'],
|
||||
listener['description'])
|
||||
listener_o_obj = self.driver.o_models_builder.\
|
||||
get_listener_octavia_model(listener)
|
||||
@@ -557,17 +561,17 @@ class HaproxyListenerManager(HaproxyCommonManager):
|
||||
self.clean_certs(listener['tenant_id'], cert_ids)
|
||||
|
||||
def create(self, context, listener):
|
||||
self._deploy(listener)
|
||||
self._deploy(context, listener)
|
||||
msg = ("LB %s, created %s" % (self.__class__.__name__, listener['id']))
|
||||
LOG.info(msg)
|
||||
|
||||
def update(self, context, old_listener, listener):
|
||||
self._deploy(listener)
|
||||
self._deploy(context, listener)
|
||||
msg = ("LB %s, updated %s" % (self.__class__.__name__, listener['id']))
|
||||
LOG.info(msg)
|
||||
|
||||
def delete(self, context, listener):
|
||||
self.driver.add_amphora(listener['loadbalancer_id'],
|
||||
self.driver.add_amphora(context, listener['loadbalancer_id'],
|
||||
listener['description'])
|
||||
listener_o_obj = self.driver.o_models_builder.\
|
||||
get_listener_octavia_model(listener)
|
||||
@@ -586,8 +590,8 @@ class HaproxyPoolManager(HaproxyCommonManager):
|
||||
if default_pool['id'] == pool_id:
|
||||
pool['listener']['default_pool'] = None
|
||||
|
||||
def _deploy(self, pool):
|
||||
self.driver.add_amphora(pool['loadbalancer_id'],
|
||||
def _deploy(self, context, pool):
|
||||
self.driver.add_amphora(context, pool['loadbalancer_id'],
|
||||
pool['description'])
|
||||
pool_o_obj = self.driver.o_models_builder.\
|
||||
get_pool_octavia_model(pool)
|
||||
@@ -601,26 +605,26 @@ class HaproxyPoolManager(HaproxyCommonManager):
|
||||
self.clean_certs(pool['tenant_id'], cert_ids)
|
||||
|
||||
def create(self, context, pool):
|
||||
self._deploy(pool)
|
||||
self._deploy(context, pool)
|
||||
msg = ("LB %s, created %s" % (self.__class__.__name__, pool['id']))
|
||||
LOG.info(msg)
|
||||
|
||||
def update(self, context, old_pool, pool):
|
||||
self._deploy(pool)
|
||||
self._deploy(context, pool)
|
||||
msg = ("LB %s, updated %s" % (self.__class__.__name__, pool['id']))
|
||||
LOG.info(msg)
|
||||
|
||||
def delete(self, context, pool):
|
||||
self._remove_pool(pool)
|
||||
self._deploy(pool)
|
||||
self._deploy(context, pool)
|
||||
msg = ("LB %s, deleted %s" % (self.__class__.__name__, pool['id']))
|
||||
LOG.info(msg)
|
||||
|
||||
|
||||
class HaproxyMemberManager(HaproxyCommonManager):
|
||||
|
||||
def _deploy(self, member):
|
||||
self.driver.add_amphora(member['pool']['loadbalancer_id'],
|
||||
def _deploy(self, context, member):
|
||||
self.driver.add_amphora(context, member['pool']['loadbalancer_id'],
|
||||
member['description'])
|
||||
member_o_obj = self.driver.o_models_builder.\
|
||||
get_member_octavia_model(member)
|
||||
@@ -642,26 +646,26 @@ class HaproxyMemberManager(HaproxyCommonManager):
|
||||
break
|
||||
|
||||
def create(self, context, member):
|
||||
self._deploy(member)
|
||||
self._deploy(context, member)
|
||||
msg = ("LB %s, created %s" % (self.__class__.__name__, member['id']))
|
||||
LOG.info(msg)
|
||||
|
||||
def update(self, context, old_member, member):
|
||||
self._deploy(member)
|
||||
self._deploy(context, member)
|
||||
msg = ("LB %s, updated %s" % (self.__class__.__name__, member['id']))
|
||||
LOG.info(msg)
|
||||
|
||||
def delete(self, context, member):
|
||||
self._remove_member(member)
|
||||
self._deploy(member)
|
||||
self._deploy(context, member)
|
||||
msg = ("LB %s, deleted %s" % (self.__class__.__name__, member['id']))
|
||||
LOG.info(msg)
|
||||
|
||||
|
||||
class HaproxyHealthMonitorManager(HaproxyCommonManager):
|
||||
|
||||
def _deploy(self, hm):
|
||||
self.driver.add_amphora(hm['pool']['loadbalancer_id'],
|
||||
def _deploy(self, context, hm):
|
||||
self.driver.add_amphora(context, hm['pool']['loadbalancer_id'],
|
||||
hm['description'])
|
||||
hm_o_obj = self.driver.o_models_builder.\
|
||||
get_healthmonitor_octavia_model(hm)
|
||||
@@ -680,17 +684,17 @@ class HaproxyHealthMonitorManager(HaproxyCommonManager):
|
||||
default_pool['healthmonitor'] = None
|
||||
|
||||
def create(self, context, hm):
|
||||
self._deploy(hm)
|
||||
self._deploy(context, hm)
|
||||
msg = ("LB %s, created %s" % (self.__class__.__name__, hm['id']))
|
||||
LOG.info(msg)
|
||||
|
||||
def update(self, context, old_hm, hm):
|
||||
self._deploy(hm)
|
||||
self._deploy(context, hm)
|
||||
msg = ("LB %s, updated %s" % (self.__class__.__name__, hm['id']))
|
||||
LOG.info(msg)
|
||||
|
||||
def delete(self, context, hm):
|
||||
self._remove_healthmonitor(hm)
|
||||
self._deploy(hm)
|
||||
self._deploy(context, hm)
|
||||
msg = ("LB %s, deleted %s" % (self.__class__.__name__, hm['id']))
|
||||
LOG.info(msg)
|
||||
|
||||
@@ -468,7 +468,8 @@ class TestHeatDriver(unittest.TestCase):
|
||||
self.heat_driver_obj.keystoneclient.get_admin_token = (
|
||||
mock.MagicMock(return_value='token'))
|
||||
self.heat_driver_obj._create_policy_target_for_vip(
|
||||
auth_token, provider_tenant_id, provider)
|
||||
auth_token, provider_tenant_id, provider,
|
||||
'LOADBALANCER')
|
||||
pools.assert_called_once_with(
|
||||
subnet_id=[subnets.return_value['subnets'][0]['id']])
|
||||
|
||||
|
||||
@@ -252,13 +252,13 @@ class HeatDriver(object):
|
||||
nfp_context)
|
||||
service_type = service_details['service_details']['service_type']
|
||||
|
||||
if service_type in [pconst.LOADBALANCER]:
|
||||
if service_type in [pconst.LOADBALANCER, pconst.LOADBALANCERV2]:
|
||||
logging_context = nfp_logging.get_logging_context()
|
||||
auth_token = logging_context['auth_token']
|
||||
provider_tenant_id = nfp_context['tenant_id']
|
||||
provider = service_details['provider_ptg']
|
||||
self._create_policy_target_for_vip(
|
||||
auth_token, provider_tenant_id, provider)
|
||||
auth_token, provider_tenant_id, provider, service_type)
|
||||
|
||||
def _get_provider_ptg_info(self, auth_token, sci_id):
|
||||
servicechain_instance = self.gbp_client.get_servicechain_instance(
|
||||
@@ -278,13 +278,13 @@ class HeatDriver(object):
|
||||
service_profile['service_flavor'])
|
||||
base_mode_support = (True if service_details['device_type'] == 'None'
|
||||
else False)
|
||||
if (service_type in [pconst.LOADBALANCER]) and (
|
||||
if (service_type in [pconst.LOADBALANCER, pconst.LOADBALANCERV2]) and (
|
||||
not base_mode_support):
|
||||
provider = self._get_provider_ptg_info(auth_token,
|
||||
network_function['service_chain_id'])
|
||||
provider_tenant_id = provider['tenant_id']
|
||||
self._update_policy_targets_for_vip(
|
||||
auth_token, provider_tenant_id, provider)
|
||||
auth_token, provider_tenant_id, provider, service_type)
|
||||
|
||||
def _post_stack_cleanup(self, network_function):
|
||||
#TODO(ashu): In post stack cleanup, need to delete vip pt, currently
|
||||
@@ -303,7 +303,7 @@ class HeatDriver(object):
|
||||
|
||||
return vip_pt
|
||||
|
||||
def _get_lb_vip(self, auth_token, provider):
|
||||
def _get_lb_vip(self, auth_token, provider, service_type):
|
||||
provider_subnet = None
|
||||
lb_vip = None
|
||||
lb_vip_name = None
|
||||
@@ -315,7 +315,12 @@ class HeatDriver(object):
|
||||
if not subnet['name'].startswith(APIC_OWNED_RES):
|
||||
provider_subnet = subnet
|
||||
break
|
||||
if provider_subnet:
|
||||
if not provider_subnet:
|
||||
LOG.error(_LE("Unable to get provider subnet for provider "
|
||||
"policy target group %(provider_ptg)s") %
|
||||
{"provider_ptg": provider})
|
||||
return lb_vip, lb_vip_name
|
||||
if service_type == pconst.LOADBALANCER:
|
||||
lb_pool_ids = self.neutron_client.get_pools(
|
||||
auth_token,
|
||||
filters={'subnet_id': [provider_subnet['id']]})
|
||||
@@ -324,16 +329,29 @@ class HeatDriver(object):
|
||||
auth_token, lb_pool_ids[0]['vip_id'])['vip']
|
||||
lb_vip_name = ("service_target_vip_pt" +
|
||||
lb_pool_ids[0]['vip_id'])
|
||||
elif service_type == pconst.LOADBALANCERV2:
|
||||
loadbalancers = self.neutron_client.get_loadbalancers(
|
||||
auth_token,
|
||||
filters={'vip_subnet_id': [provider_subnet['id']]})
|
||||
if loadbalancers:
|
||||
loadbalancer = loadbalancers[0]
|
||||
lb_vip = {}
|
||||
lb_vip['ip_address'] = loadbalancer['vip_address']
|
||||
lb_vip['port_id'] = loadbalancer['vip_port_id']
|
||||
# lbaasv2 dont have vip resource, so considering loadbalancer
|
||||
# id as vip_name
|
||||
lb_vip_name = 'vip-' + loadbalancer['id']
|
||||
return lb_vip, lb_vip_name
|
||||
|
||||
def _create_policy_target_for_vip(self, auth_token,
|
||||
provider_tenant_id, provider):
|
||||
provider_tenant_id,
|
||||
provider, service_type):
|
||||
provider_pt_id = ''
|
||||
admin_token = self.keystoneclient.get_admin_token()
|
||||
lb_vip, vip_name = self._get_lb_vip(auth_token, provider)
|
||||
lb_vip, vip_name = self._get_lb_vip(auth_token, provider, service_type)
|
||||
provider_pt = self._get_provider_pt(admin_token, provider)
|
||||
if not provider_pt:
|
||||
return
|
||||
if not (lb_vip and provider_pt):
|
||||
return None
|
||||
provider_pt_id = provider_pt['id']
|
||||
provider_port_id = provider_pt['port_id']
|
||||
|
||||
@@ -360,13 +378,15 @@ class HeatDriver(object):
|
||||
admin_token, provider_pt['port_id'], **updated_port)
|
||||
|
||||
def _update_policy_targets_for_vip(self, auth_token,
|
||||
provider_tenant_id, provider):
|
||||
provider_tenant_id,
|
||||
provider, service_type):
|
||||
provider_pt_id = ''
|
||||
admin_token = self.keystoneclient.get_admin_token()
|
||||
lb_vip, vip_name = self._get_lb_vip(auth_token, provider)
|
||||
lb_vip, vip_name = self._get_lb_vip(auth_token, provider, service_type)
|
||||
provider_pt = self._get_provider_pt(admin_token, provider)
|
||||
if provider_pt:
|
||||
provider_pt_id = provider_pt['id']
|
||||
if not (lb_vip and provider_pt):
|
||||
return None
|
||||
provider_pt_id = provider_pt['id']
|
||||
|
||||
policy_target_info = {'cluster_id': ''}
|
||||
vip_pt = self._get_vip_pt(auth_token, lb_vip.get('port_id'))
|
||||
|
||||
@@ -837,6 +837,29 @@ class NeutronClient(OpenstackApi):
|
||||
LOG.error(err)
|
||||
raise Exception(err)
|
||||
|
||||
def get_loadbalancers(self, token, filters=None):
|
||||
""" List Loadbalancers
|
||||
|
||||
:param token: A scoped_token
|
||||
:param filters: Parameters for list filter
|
||||
example for filter: ?tenant_id=%s&id=%s
|
||||
|
||||
:return: Loadbalancers List
|
||||
|
||||
"""
|
||||
try:
|
||||
neutron = neutron_client.Client(token=token,
|
||||
endpoint_url=self.network_service)
|
||||
loadbalancers = neutron.list_loadbalancers(**filters).get(
|
||||
'loadbalancers', [])
|
||||
return loadbalancers
|
||||
except Exception as ex:
|
||||
err = ("Failed to read pool list from"
|
||||
" Openstack Neutron service's response"
|
||||
" KeyError :: %s" % (ex))
|
||||
LOG.error(err)
|
||||
raise Exception(err)
|
||||
|
||||
def get_vip(self, token, vip_id):
|
||||
""" Get vip details
|
||||
|
||||
|
||||
Reference in New Issue
Block a user