Add support for multiple L4 protocols withing same LB

This change introduces a way of providing multiple listeners and pools
on the same Load Balancer, but with different L4 protocols.

In case there are more than one unique protocol defined in Octavia pools
or Octavia listeners the OVN driver now adds another Load_Balancer row
per protocol, but with the same name. The selection of which OVN
Load_Balancer should be used is done by a protocol key.

This change also adds possibility to expose two listeners on the same
port, but with different L4 protocol. The common use-case is providing
a DNS service that listens both on UDP and TCP sockets.

Partial-Bug: #1789157
Change-Id: I2ee13b75713e7538d1b4f70207a90a95315eb914
This commit is contained in:
Maciej Józefczyk 2019-12-17 12:37:48 +00:00
parent ec629c2e0c
commit 15260b7439
4 changed files with 1125 additions and 430 deletions

View File

@ -237,6 +237,12 @@ class OvnProviderHelper(object):
REQ_TYPE_HANDLE_VIP_FIP: self.handle_vip_fip,
}
@staticmethod
def _is_lb_empty(external_ids):
"""Check if there is no pool or listener defined."""
return not any([k.startswith('listener') or k.startswith('pool')
for k in external_ids])
def _check_and_set_ssl_files(self):
# TODO(reedip): Make ovsdb_monitor's _check_and_set_ssl_files() public
# This is a copy of ovsdb_monitor._check_and_set_ssl_files
@ -304,6 +310,25 @@ class OvnProviderHelper(object):
ovn_const.OVN_NETWORK_NAME_EXT_ID_KEY))
return router, network
def _clean_lb_if_empty(self, ovn_lb, lb_id, external_ids):
commands = []
if OvnProviderHelper._is_lb_empty(external_ids):
# Verify if its only OVN LB defined. If so - leave with
# undefined protocol. If there is different for other protocol
# remove this one.
try:
defined_ovn_lbs = self._find_ovn_lbs(lb_id)
except idlutils.RowNotFound:
defined_ovn_lbs = []
if len(defined_ovn_lbs) == 1:
commands.append(
self.ovn_nbdb_api.db_set(
'Load_Balancer', ovn_lb.uuid, ('protocol', [])))
else:
# Delete the lb.
commands.append(self.ovn_nbdb_api.lb_del(ovn_lb.uuid))
return commands
def lb_delete_lrp_assoc_handler(self, row):
try:
router, network = self._get_nw_router_info_on_interface_event(row)
@ -382,25 +407,26 @@ class OvnProviderHelper(object):
port_name = vip_lp.external_ids.get(ovn_const.OVN_PORT_NAME_EXT_ID_KEY)
lb_id = port_name[len(ovn_const.LB_VIP_PORT_PREFIX):]
try:
ovn_lb = self._find_ovn_lb(lb_id)
ovn_lbs = self._find_ovn_lbs(lb_id)
except idlutils.RowNotFound:
LOG.debug("Loadbalancer %s not found!", lb_id)
return
fip = vip_lp.external_ids.get(ovn_const.OVN_PORT_FIP_EXT_ID_KEY)
lb_vip_fip = ovn_lb.external_ids.get(LB_EXT_IDS_VIP_FIP_KEY)
request_info = {'lb_id': lb_id,
'vip_fip': fip}
if fip and fip != lb_vip_fip:
request_info['action'] = REQ_INFO_ACTION_ASSOCIATE
elif fip is None and fip != lb_vip_fip:
request_info['action'] = REQ_INFO_ACTION_DISASSOCIATE
else:
return
self.add_request({'type': REQ_TYPE_HANDLE_VIP_FIP,
'info': request_info})
# Loop over all defined LBs with given ID, because it is possible
# than there is more than one (for more than 1 L4 protocol).
for lb in ovn_lbs:
fip = vip_lp.external_ids.get(ovn_const.OVN_PORT_FIP_EXT_ID_KEY)
lb_vip_fip = lb.external_ids.get(LB_EXT_IDS_VIP_FIP_KEY)
request_info = {'ovn_lb': lb,
'vip_fip': fip}
if fip and fip != lb_vip_fip:
request_info['action'] = REQ_INFO_ACTION_ASSOCIATE
elif fip is None and fip != lb_vip_fip:
request_info['action'] = REQ_INFO_ACTION_DISASSOCIATE
else:
continue
self.add_request({'type': REQ_TYPE_HANDLE_VIP_FIP,
'info': request_info})
def _find_lb_in_ls(self, network):
"""Find LB associated to a Network using Network information
@ -454,18 +480,94 @@ class OvnProviderHelper(object):
LOG.error(msg)
raise driver_exceptions.UpdateStatusError(msg)
def _find_ovn_lb(self, lb_id):
"""Find the Loadbalancer in OVN with the given lb_id as its name
def _find_ovn_lbs(self, lb_id, protocol=None):
"""Find the Loadbalancers in OVN with the given lb_id as its name
This function searches for the LoadBalancer whose Name has the pattern
This function searches for the LoadBalancers whose Name has the pattern
passed in lb_id.
Input: String format of LoadBalancer ID provided by Octavia in its API
@param lb_id: LoadBalancer ID provided by Octavia in its API
request. Note that OVN saves the above ID in the 'name' column.
Output: LoadBalancer row matching the lb_id
Exception: RowNotFound can be generated if the LoadBalancer is not
found.
@type lb_id: str
@param protocol: Loadbalancer protocol.
@type protocol: str or None if not defined.
:returns: LoadBalancer row if protocol specified
or list of rows matching the lb_id.
:raises: RowNotFound can be generated if the LoadBalancer is not
found.
"""
return self.ovn_nbdb_api.lookup('Load_Balancer', lb_id)
lbs = self.ovn_nbdb_api.db_find_rows(
'Load_Balancer', ('name', '=', lb_id)).execute()
if not protocol:
if lbs:
return lbs
raise idlutils.RowNotFound(table='Load_Balancer',
col='name', match=lb_id)
# If there is only one LB without protocol defined, so
# it is 'clean' LB record without any listener.
if len(lbs) == 1 and not lbs[0].protocol:
return lbs[0]
# Search for other lbs.
for lb in lbs:
if lb.protocol[0].upper() == protocol.upper():
return lb
raise idlutils.RowNotFound(table='Load_Balancer',
col='name', match=lb_id)
def _get_or_create_ovn_lb(self, lb_id, protocol, admin_state_up):
"""Find or create ovn lb with given protocol
Find the loadbalancer configured with given protocol or
create required if not found
"""
# Make sure that its lowercase - OVN NBDB stores lowercases
# for this field.
protocol = protocol.lower()
ovn_lbs = self._find_ovn_lbs(lb_id)
lbs_with_required_protocol = [
ovn_lb for ovn_lb in ovn_lbs
if protocol in ovn_lb.protocol]
lbs_with_no_protocol = [ovn_lb for ovn_lb in ovn_lbs
if not ovn_lb.protocol]
if lbs_with_required_protocol:
# We found existing LB with required
# protocol, just return it.
return lbs_with_required_protocol[0]
elif lbs_with_no_protocol:
ovn_lb = lbs_with_no_protocol[0]
# Set required protocol here.
self.ovn_nbdb_api.db_set(
'Load_Balancer', ovn_lb.uuid,
('protocol', protocol)).execute(check_error=True)
else:
# NOTE(mjozefcz): Looks like loadbalancer with given protocol
# doesn't exist. Try to add it with required protocol
# by copy the existing one data.
lb_info = {
'id': lb_id,
'protocol': protocol,
'vip_address': ovn_lbs[0].external_ids.get(
LB_EXT_IDS_VIP_KEY),
'vip_port_id':
ovn_lbs[0].external_ids.get(
LB_EXT_IDS_VIP_PORT_ID_KEY),
LB_EXT_IDS_LR_REF_KEY:
ovn_lbs[0].external_ids.get(
LB_EXT_IDS_LR_REF_KEY),
LB_EXT_IDS_LS_REFS_KEY:
ovn_lbs[0].external_ids.get(
LB_EXT_IDS_LS_REFS_KEY),
'admin_state_up': admin_state_up}
# NOTE(mjozefcz): Handle vip_fip info if exists.
vip_fip = ovn_lbs[0].external_ids.get(LB_EXT_IDS_VIP_FIP_KEY)
if vip_fip:
lb_info.update({LB_EXT_IDS_VIP_FIP_KEY: vip_fip})
self.lb_create(lb_info, protocol=protocol)
# Looks like we've just added new LB
# or updated exising, empty one.
return self._find_ovn_lbs(
lb_id,
protocol=protocol)
def _find_ovn_lb_with_pool_key(self, pool_key):
lbs = self.ovn_nbdb_api.db_list_rows(
@ -474,7 +576,7 @@ class OvnProviderHelper(object):
if pool_key in lb.external_ids:
return lb
def _find_ovn_lb_by_id(self, pool_id):
def _find_ovn_lb_by_pool_id(self, pool_id):
pool_key = self._get_pool_key(pool_id)
ovn_lb = self._find_ovn_lb_with_pool_key(pool_key)
if not ovn_lb:
@ -591,13 +693,13 @@ class OvnProviderHelper(object):
commands.append(self.ovn_nbdb_api.ls_lb_add(
net, ovn_lb.uuid, may_exist=True))
# Multiple routers in lr_rf are separated with ','
lr_rf = {LB_EXT_IDS_LR_REF_KEY: ovn_lr.name} if not lr_rf else {
LB_EXT_IDS_LR_REF_KEY: "%s,%s" % (lr_rf, ovn_lr.name)}
commands.append(
self.ovn_nbdb_api.db_set('Load_Balancer', ovn_lb.uuid,
('external_ids', lr_rf))
)
if ovn_lr.name not in str(lr_rf):
# Multiple routers in lr_rf are separated with ','
lr_rf = {LB_EXT_IDS_LR_REF_KEY: ovn_lr.name} if not lr_rf else {
LB_EXT_IDS_LR_REF_KEY: "%s,%s" % (lr_rf, ovn_lr.name)}
commands.append(
self.ovn_nbdb_api.db_set('Load_Balancer', ovn_lb.uuid,
('external_ids', lr_rf)))
return commands
def _update_lb_to_lr_association(self, ovn_lb, ovn_lr, delete=False):
@ -738,7 +840,7 @@ class OvnProviderHelper(object):
return False
def check_lb_protocol(self, lb_id, listener_protocol):
ovn_lb = self._find_ovn_lb(lb_id)
ovn_lb = self._find_ovn_lbs(lb_id, protocol=listener_protocol)
if not ovn_lb:
return False
elif not self._is_listener_in_lb(ovn_lb):
@ -746,13 +848,15 @@ class OvnProviderHelper(object):
else:
return str(listener_protocol).lower() in ovn_lb.protocol
def lb_create(self, loadbalancer):
def lb_create(self, loadbalancer, protocol=None):
port = None
try:
# Get the port id of the vip and store it in the external_ids.
# This is required to delete the port when the loadbalancer is
# deleted.
network_driver = get_network_driver()
network_driver = get_network_driver()
if loadbalancer.get('vip_port_id'):
# In case we don't have vip_network_id
port = network_driver.neutron_client.show_port(
loadbalancer['vip_port_id'])['port']
elif (loadbalancer.get('vip_network_id') and
loadbalancer.get('vip_address')):
ports = network_driver.neutron_client.list_ports(
network_id=loadbalancer['vip_network_id'])
for p in ports['ports']:
@ -760,25 +864,63 @@ class OvnProviderHelper(object):
if ip['ip_address'] == loadbalancer['vip_address']:
port = p
break
# In case port is not found for the vip_address we will see an
# exception when port['id'] is accessed.
# If protocol set make sure its lowercase
protocol = protocol.lower() if protocol else None
# In case port is not found for the vip_address we will see an
# exception when port['id'] is accessed.
external_ids = {
LB_EXT_IDS_VIP_KEY: loadbalancer['vip_address'],
LB_EXT_IDS_VIP_PORT_ID_KEY:
loadbalancer.get('vip_port_id') or port['id'],
'enabled': str(loadbalancer['admin_state_up'])}
# In case vip_fip was passed - use it.
vip_fip = loadbalancer.get(LB_EXT_IDS_VIP_FIP_KEY)
if vip_fip:
external_ids[LB_EXT_IDS_VIP_FIP_KEY] = vip_fip
# In case of lr_ref passed - use it.
lr_ref = loadbalancer.get(LB_EXT_IDS_LR_REF_KEY)
if lr_ref:
external_ids[LB_EXT_IDS_LR_REF_KEY] = lr_ref
try:
self.ovn_nbdb_api.db_create(
'Load_Balancer', name=loadbalancer['id'], protocol='tcp',
external_ids={LB_EXT_IDS_VIP_KEY: loadbalancer['vip_address'],
LB_EXT_IDS_VIP_PORT_ID_KEY: port['id'],
'enabled': str(loadbalancer['admin_state_up'])}
).execute(check_error=True)
ovn_lb = self._find_ovn_lb(loadbalancer['id'])
'Load_Balancer', name=loadbalancer['id'],
protocol=protocol,
external_ids=external_ids).execute(check_error=True)
ovn_lb = self._find_ovn_lbs(
loadbalancer['id'],
protocol=protocol)
ovn_lb = ovn_lb if protocol else ovn_lb[0]
commands = self._update_lb_to_ls_association(
ovn_lb, network_id=loadbalancer['vip_network_id'],
ovn_lb, network_id=port['network_id'],
associate=True)
ls_name = utils.ovn_name(loadbalancer['vip_network_id'])
ls_name = utils.ovn_name(port['network_id'])
ovn_ls = self.ovn_nbdb_api.ls_get(ls_name).execute(
check_error=True)
ovn_lr = self._find_lr_of_ls(ovn_ls)
if ovn_lr:
commands.extend(self._update_lb_to_lr_association(
ovn_lb, ovn_lr))
# NOTE(mjozefcz): In case of LS references where passed -
# apply LS to the new LB. That could happend in case we
# need another loadbalancer for other L4 protocol.
ls_refs = loadbalancer.get(LB_EXT_IDS_LS_REFS_KEY)
if ls_refs:
try:
ls_refs = jsonutils.loads(ls_refs)
except ValueError:
ls_refs = {}
for ls in ls_refs:
# Skip previously added LS because we don't want
# to duplicate.
if ls == ovn_ls.name:
continue
commands.extend(self._update_lb_to_ls_association(
ovn_lb, network_id=ls.replace('neutron-', ''),
associate=True))
self._execute_commands(commands)
operating_status = constants.ONLINE
# The issue is that since OVN doesnt support any HMs,
@ -809,101 +951,104 @@ class OvnProviderHelper(object):
return status
def lb_delete(self, loadbalancer):
commands = []
port_id = None
status = {'loadbalancers': [{"id": loadbalancer['id'],
"provisioning_status": "DELETED",
"operating_status": "OFFLINE"}],
'listeners': [],
'pools': [],
'members': []}
ovn_lbs = None
try:
status = {'loadbalancers': [{"id": loadbalancer['id'],
"provisioning_status": "DELETED",
"operating_status": "OFFLINE"}]}
ovn_lb = None
try:
ovn_lb = self._find_ovn_lb(loadbalancer['id'])
except idlutils.RowNotFound:
LOG.warning("Loadbalancer %s not found in OVN Northbound DB."
"Setting the Loadbalancer status to DELETED "
"in Octavia", str(loadbalancer['id']))
return status
if not ovn_lb:
return status
ovn_lbs = self._find_ovn_lbs(loadbalancer['id'])
except idlutils.RowNotFound:
LOG.warning("Loadbalancer %s not found in OVN Northbound DB. "
"Setting the Loadbalancer status to DELETED "
"in Octavia", str(loadbalancer['id']))
return status
if loadbalancer['cascade']:
status['members'] = []
status['pools'] = []
status['listeners'] = []
# Delete all pools
for key, value in ovn_lb.external_ids.items():
if key.startswith(LB_EXT_IDS_POOL_PREFIX):
pool_id = key.split('_')[1]
# Delete all members in the pool
if value and len(value.split(',')) > 0:
for mem_info in value.split(','):
status['members'].append({
'id': mem_info.split('_')[1],
'provisioning_status': constants.DELETED})
status['pools'].append(
{"id": pool_id,
"provisioning_status": constants.DELETED})
if key.startswith(LB_EXT_IDS_LISTENER_PREFIX):
status['listeners'].append({
'id': key.split('_')[1],
'provisioning_status': constants.DELETED,
'operating_status': constants.OFFLINE})
# Clear the status dict of any key having [] value
# Python 3.6 doesnt allow deleting an element in a
# dict while iterating over it. So first get a list of keys.
# https://cito.github.io/blog/never-iterate-a-changing-dict/
status = {key: value for key, value in status.items() if value}
ls_refs = ovn_lb.external_ids.get(LB_EXT_IDS_LS_REFS_KEY, {})
if ls_refs:
try:
ls_refs = jsonutils.loads(ls_refs)
except ValueError:
ls_refs = {}
# Delete the VIP Port
self.delete_vip_port(ovn_lb.external_ids[
LB_EXT_IDS_VIP_PORT_ID_KEY])
for ls_name in ls_refs.keys():
ovn_ls = self.ovn_nbdb_api.ls_get(ls_name).execute(
check_error=True)
if ovn_ls:
commands.append(
self.ovn_nbdb_api.ls_lb_del(ovn_ls.uuid, ovn_lb.uuid))
# Delete LB from all Networks the LB is indirectly associated
for ls in self._find_lb_in_table(ovn_lb, 'Logical_Switch'):
commands.append(
self.ovn_nbdb_api.ls_lb_del(ls.uuid, ovn_lb.uuid,
if_exists=True))
lr_ref = ovn_lb.external_ids.get(LB_EXT_IDS_LR_REF_KEY, {})
if lr_ref:
for lr in self.ovn_nbdb_api.tables[
'Logical_Router'].rows.values():
if lr.name == lr_ref:
commands.append(self.ovn_nbdb_api.lr_lb_del(
lr.uuid, ovn_lb.uuid))
break
# Delete LB from all Routers the LB is indirectly associated
for lr in self._find_lb_in_table(ovn_lb, 'Logical_Router'):
commands.append(
self.ovn_nbdb_api.lr_lb_del(lr.uuid, ovn_lb.uuid,
if_exists=True))
# Save the port ID before deleting the LoadBalancer
port_id = ovn_lb.external_ids[LB_EXT_IDS_VIP_PORT_ID_KEY]
commands.append(self.ovn_nbdb_api.lb_del(ovn_lb.uuid))
self._execute_commands(commands)
# We need to delete the vip port but not fail LB delete if port
# delete fails. Can happen when Port deleted manually by user.
network_driver = get_network_driver()
network_driver.neutron_client.delete_port(port_id)
except n_exc.PortNotFoundClient:
LOG.warning("Port %s could not be found. Please "
"check Neutron logs", port_id)
try:
port_id = ovn_lbs[0].external_ids[LB_EXT_IDS_VIP_PORT_ID_KEY]
for ovn_lb in ovn_lbs:
status = self._lb_delete(loadbalancer, ovn_lb, status)
# Clear the status dict of any key having [] value
# Python 3.6 doesnt allow deleting an element in a
# dict while iterating over it. So first get a list of keys.
# https://cito.github.io/blog/never-iterate-a-changing-dict/
status = {key: value for key, value in status.items() if value}
except Exception:
LOG.exception(EXCEPTION_MSG, "deletion of loadbalancer")
status = {
'loadbalancers': [{"id": loadbalancer['id'],
"provisioning_status": constants.ERROR,
"operating_status": constants.ERROR}]}
try:
# Delete VIP port from neutron.
self.delete_vip_port(port_id)
except n_exc.PortNotFoundClient:
LOG.warning("Port %s could not be found. Please "
"check Neutron logs", port_id)
return status
def _lb_delete(self, loadbalancer, ovn_lb, status):
commands = []
if loadbalancer['cascade']:
# Delete all pools
for key, value in ovn_lb.external_ids.items():
if key.startswith(LB_EXT_IDS_POOL_PREFIX):
pool_id = key.split('_')[1]
# Delete all members in the pool
if value and len(value.split(',')) > 0:
for mem_info in value.split(','):
status['members'].append({
'id': mem_info.split('_')[1],
'provisioning_status': constants.DELETED})
status['pools'].append(
{"id": pool_id,
"provisioning_status": constants.DELETED})
if key.startswith(LB_EXT_IDS_LISTENER_PREFIX):
status['listeners'].append({
'id': key.split('_')[1],
'provisioning_status': constants.DELETED,
'operating_status': constants.OFFLINE})
ls_refs = ovn_lb.external_ids.get(LB_EXT_IDS_LS_REFS_KEY, {})
if ls_refs:
try:
ls_refs = jsonutils.loads(ls_refs)
except ValueError:
ls_refs = {}
for ls_name in ls_refs.keys():
try:
ovn_ls = self.ovn_nbdb_api.ls_get(ls_name).execute(
check_error=True)
commands.append(
self.ovn_nbdb_api.ls_lb_del(ovn_ls.uuid, ovn_lb.uuid)
)
except idlutils.RowNotFound:
LOG.warning("LogicalSwitch %s could not be found. Cannot "
"delete LB from it", ovn_ls.uuid)
# Delete LB from all Networks the LB is indirectly associated
for ls in self._find_lb_in_table(ovn_lb, 'Logical_Switch'):
commands.append(
self.ovn_nbdb_api.ls_lb_del(ls.uuid, ovn_lb.uuid,
if_exists=True))
lr_ref = ovn_lb.external_ids.get(LB_EXT_IDS_LR_REF_KEY, {})
if lr_ref:
for lr in self.ovn_nbdb_api.tables[
'Logical_Router'].rows.values():
if lr.name == lr_ref:
commands.append(self.ovn_nbdb_api.lr_lb_del(
lr.uuid, ovn_lb.uuid))
break
# Delete LB from all Routers the LB is indirectly associated
for lr in self._find_lb_in_table(ovn_lb, 'Logical_Router'):
commands.append(
self.ovn_nbdb_api.lr_lb_del(lr.uuid, ovn_lb.uuid,
if_exists=True))
commands.append(self.ovn_nbdb_api.lb_del(ovn_lb.uuid))
self._execute_commands(commands)
return status
def lb_failover(self, loadbalancer):
@ -913,30 +1058,36 @@ class OvnProviderHelper(object):
return status
def lb_update(self, loadbalancer):
try:
lb_status = {'id': loadbalancer['id'],
'provisioning_status': constants.ACTIVE}
status = {'loadbalancers': [lb_status]}
if 'admin_state_up' not in loadbalancer:
return status
lb_enabled = loadbalancer['admin_state_up']
lb_status = {'id': loadbalancer['id'],
'provisioning_status': constants.ACTIVE}
status = {'loadbalancers': [lb_status]}
if 'admin_state_up' not in loadbalancer:
return status
lb_enabled = loadbalancer['admin_state_up']
ovn_lb = self._find_ovn_lb(loadbalancer['id'])
if ovn_lb.external_ids['enabled'] != str(lb_enabled):
commands = []
enable_info = {'enabled': str(lb_enabled)}
ovn_lb.external_ids['enabled'] = str(lb_enabled)
commands.append(
self.ovn_nbdb_api.db_set('Load_Balancer', ovn_lb.uuid,
('external_ids', enable_info)))
commands.extend(
self._refresh_lb_vips(ovn_lb.uuid, ovn_lb.external_ids))
self._execute_commands(commands)
if lb_enabled:
operating_status = constants.ONLINE
else:
operating_status = constants.OFFLINE
lb_status['operating_status'] = operating_status
try:
ovn_lbs = self._find_ovn_lbs(loadbalancer['id'])
# It should be unique for all the LBS for all protocols,
# so we could just easly loop over all defined for given
# Octavia LB.
for ovn_lb in ovn_lbs:
if str(ovn_lb.external_ids['enabled']) != str(lb_enabled):
commands = []
enable_info = {'enabled': str(lb_enabled)}
ovn_lb.external_ids['enabled'] = str(lb_enabled)
commands.append(
self.ovn_nbdb_api.db_set('Load_Balancer', ovn_lb.uuid,
('external_ids', enable_info))
)
commands.extend(
self._refresh_lb_vips(ovn_lb.uuid,
ovn_lb.external_ids))
self._execute_commands(commands)
if lb_enabled:
operating_status = constants.ONLINE
else:
operating_status = constants.OFFLINE
lb_status['operating_status'] = operating_status
except Exception:
LOG.exception(EXCEPTION_MSG, "update of loadbalancer")
lb_status['provisioning_status'] = constants.ERROR
@ -944,21 +1095,24 @@ class OvnProviderHelper(object):
return status
def listener_create(self, listener):
ovn_lb = self._get_or_create_ovn_lb(
listener['loadbalancer_id'],
listener['protocol'],
listener['admin_state_up'])
external_ids = copy.deepcopy(ovn_lb.external_ids)
listener_key = self._get_listener_key(
listener['id'], is_enabled=listener['admin_state_up'])
if listener.get('default_pool_id'):
pool_key = self._get_pool_key(listener['default_pool_id'])
else:
pool_key = ''
external_ids[listener_key] = self._make_listener_key_value(
listener['protocol_port'], pool_key)
listener_info = {listener_key: external_ids[listener_key]}
try:
ovn_lb = self._find_ovn_lb(listener['loadbalancer_id'])
external_ids = copy.deepcopy(ovn_lb.external_ids)
listener_key = self._get_listener_key(
listener['id'], is_enabled=listener['admin_state_up'])
if listener.get('default_pool_id'):
pool_key = self._get_pool_key(listener['default_pool_id'])
else:
pool_key = ''
external_ids[listener_key] = self._make_listener_key_value(
listener['protocol_port'], pool_key)
listener_info = {listener_key: external_ids[listener_key]}
commands = []
commands.append(
self.ovn_nbdb_api.db_set('Load_Balancer', ovn_lb.uuid,
@ -995,73 +1149,95 @@ class OvnProviderHelper(object):
return status
def listener_delete(self, listener):
status = {
'listeners': [{"id": listener['id'],
"provisioning_status": constants.DELETED,
"operating_status": constants.OFFLINE}],
'loadbalancers': [{"id": listener['loadbalancer_id'],
"provisioning_status": constants.ACTIVE}]}
try:
ovn_lb = self._find_ovn_lb(listener['loadbalancer_id'])
external_ids = copy.deepcopy(ovn_lb.external_ids)
listener_key = self._get_listener_key(listener['id'])
ovn_lb = self._find_ovn_lbs(
listener['loadbalancer_id'],
protocol=listener['protocol'])
except idlutils.RowNotFound:
# Listener already deleted.
return status
if listener_key in external_ids:
external_ids = copy.deepcopy(ovn_lb.external_ids)
listener_key = self._get_listener_key(listener['id'])
if listener_key in external_ids:
try:
commands = []
commands.append(
self.ovn_nbdb_api.db_remove(
'Load_Balancer', ovn_lb.uuid, 'external_ids',
(listener_key))
)
(listener_key)))
# Drop current listener from LB.
del external_ids[listener_key]
# Set LB protocol to undefined only if there are no more
# listeners and pools defined in the LB.
commands.extend(
self._refresh_lb_vips(ovn_lb.uuid, external_ids)
)
self._clean_lb_if_empty(
ovn_lb, listener['loadbalancer_id'], external_ids))
commands.extend(
self._refresh_lb_vips(ovn_lb.uuid, external_ids))
self._execute_commands(commands)
status = {
'listeners': [{"id": listener['id'],
"provisioning_status": constants.DELETED,
"operating_status": constants.OFFLINE}],
'loadbalancers': [{"id": listener['loadbalancer_id'],
"provisioning_status": constants.ACTIVE}]}
except Exception:
LOG.exception(EXCEPTION_MSG, "deletion of listener")
status = {
'listeners': [{"id": listener['id'],
"provisioning_status": constants.ERROR,
"operating_status": constants.ERROR}],
'loadbalancers': [{"id": listener['loadbalancer_id'],
"provisioning_status": constants.ACTIVE}]}
except Exception:
LOG.exception(EXCEPTION_MSG, "deletion of listener")
status = {
'listeners': [{
"id": listener['id'],
"provisioning_status": constants.ERROR,
"operating_status": constants.ERROR}],
'loadbalancers': [{
"id": listener['loadbalancer_id'],
"provisioning_status": constants.ACTIVE}]}
return status
def listener_update(self, listener):
# NOTE(mjozefcz): Based on
# https://docs.openstack.org/api-ref/load-balancer/v2/?expanded=update-a-listener-detail
# there is no possibility to update listener protocol or port.
listener_status = {'id': listener['id'],
'provisioning_status': constants.ACTIVE}
pool_status = []
status = {
'listeners': [listener_status],
'loadbalancers': [{'id': listener['loadbalancer_id'],
'provisioning_status': constants.ACTIVE}],
'pools': pool_status}
try:
listener_status = {'id': listener['id'],
'provisioning_status': constants.ACTIVE}
pool_status = []
status = {
'listeners': [listener_status],
'loadbalancers': [{'id': listener['loadbalancer_id'],
'provisioning_status': constants.ACTIVE}],
'pools': pool_status}
ovn_lb = self._find_ovn_lbs(
listener['loadbalancer_id'],
protocol=listener['protocol'])
except idlutils.RowNotFound:
LOG.exception(EXCEPTION_MSG, "update of listener")
# LB row not found during updating a listner. That is a problem.
status['listeners'][0]['provisioning_status'] = constants.ERROR
status['loadbalancers'][0]['provisioning_status'] = constants.ERROR
return status
ovn_lb = self._find_ovn_lb(listener['loadbalancer_id'])
l_key_when_enabled = self._get_listener_key(listener['id'])
l_key_when_disabled = self._get_listener_key(
listener['id'], is_enabled=False)
l_key_when_enabled = self._get_listener_key(listener['id'])
l_key_when_disabled = self._get_listener_key(
listener['id'], is_enabled=False)
external_ids = copy.deepcopy(ovn_lb.external_ids)
external_ids = copy.deepcopy(ovn_lb.external_ids)
if 'admin_state_up' not in listener and (
'default_pool_id' not in listener):
return status
if 'admin_state_up' not in listener and (
'default_pool_id' not in listener):
return status
l_key_to_add = {}
if l_key_when_enabled in external_ids:
present_l_key = l_key_when_enabled
elif l_key_when_disabled in external_ids:
present_l_key = l_key_when_disabled
else:
# Something is terribly wrong. This cannot happen.
return status
l_key_to_add = {}
if l_key_when_enabled in external_ids:
present_l_key = l_key_when_enabled
elif l_key_when_disabled in external_ids:
present_l_key = l_key_when_disabled
else:
# Something is terribly wrong. This cannot happen.
return status
try:
commands = []
new_l_key = None
l_key_to_remove = None
@ -1116,21 +1292,23 @@ class OvnProviderHelper(object):
'provisioning_status': constants.ERROR}],
'loadbalancers': [{'id': listener['loadbalancer_id'],
'provisioning_status': constants.ACTIVE}]}
return status
def pool_create(self, pool):
ovn_lb = self._get_or_create_ovn_lb(
pool['loadbalancer_id'],
pool['protocol'],
pool['admin_state_up'])
external_ids = copy.deepcopy(ovn_lb.external_ids)
pool_key = self._get_pool_key(pool['id'],
is_enabled=pool['admin_state_up'])
external_ids[pool_key] = ''
if pool['listener_id']:
listener_key = self._get_listener_key(pool['listener_id'])
if listener_key in ovn_lb.external_ids:
external_ids[listener_key] = str(
external_ids[listener_key]) + str(pool_key)
try:
ovn_lb = self._find_ovn_lb(pool['loadbalancer_id'])
external_ids = copy.deepcopy(ovn_lb.external_ids)
pool_key = self._get_pool_key(pool['id'],
is_enabled=pool['admin_state_up'])
external_ids[pool_key] = ''
if pool['listener_id']:
listener_key = self._get_listener_key(pool['listener_id'])
if listener_key in ovn_lb.external_ids:
external_ids[listener_key] = str(
external_ids[listener_key]) + str(pool_key)
self.ovn_nbdb_api.db_set(
'Load_Balancer', ovn_lb.uuid,
('external_ids', external_ids)).execute(check_error=True)
@ -1162,11 +1340,23 @@ class OvnProviderHelper(object):
return status
def pool_delete(self, pool):
status = {
'pools': [{"id": pool['id'],
"provisioning_status": constants.DELETED}],
'loadbalancers': [{"id": pool['loadbalancer_id'],
"provisioning_status": constants.ACTIVE}]}
try:
ovn_lb = self._find_ovn_lbs(
pool['loadbalancer_id'],
pool['protocol'])
except idlutils.RowNotFound:
# LB row not found that means pool is deleted.
return status
pool_key = self._get_pool_key(pool['id'])
commands = []
external_ids = copy.deepcopy(ovn_lb.external_ids)
try:
ovn_lb = self._find_ovn_lb(pool['loadbalancer_id'])
pool_key = self._get_pool_key(pool['id'])
commands = []
external_ids = copy.deepcopy(ovn_lb.external_ids)
if pool_key in ovn_lb.external_ids:
commands.append(
self.ovn_nbdb_api.db_remove('Load_Balancer', ovn_lb.uuid,
@ -1196,13 +1386,11 @@ class OvnProviderHelper(object):
'external_ids', (pool_key_when_disabled))
)
commands.extend(
self._clean_lb_if_empty(
ovn_lb, pool['loadbalancer_id'], external_ids))
self._execute_commands(commands)
status = {
'pools': [{"id": pool['id'],
"provisioning_status": constants.DELETED}],
'loadbalancers': [{"id": pool['loadbalancer_id'],
"provisioning_status": constants.ACTIVE}]}
if listener_id:
status['listeners'] = [{
'id': listener_id,
@ -1218,24 +1406,32 @@ class OvnProviderHelper(object):
return status
def pool_update(self, pool):
pool_status = {'id': pool['id'],
'provisioning_status': constants.ACTIVE}
status = {
'pools': [pool_status],
'loadbalancers': [{'id': pool['loadbalancer_id'],
'provisioning_status': constants.ACTIVE}]}
if 'admin_state_up' not in pool:
return status
try:
pool_status = {'id': pool['id'],
'provisioning_status': constants.ACTIVE}
status = {
'pools': [pool_status],
'loadbalancers': [{'id': pool['loadbalancer_id'],
'provisioning_status': constants.ACTIVE}]}
if 'admin_state_up' not in pool:
return status
ovn_lb = self._find_ovn_lbs(
pool['loadbalancer_id'], protocol=pool['protocol'])
except idlutils.RowNotFound:
LOG.exception(EXCEPTION_MSG, "update of pool")
# LB row not found during updating a listner. That is a problem.
status['pool'][0]['provisioning_status'] = constants.ERROR
status['loadbalancers'][0]['provisioning_status'] = constants.ERROR
return status
ovn_lb = self._find_ovn_lb(pool['loadbalancer_id'])
pool_key = self._get_pool_key(pool['id'])
p_key_when_disabled = self._get_pool_key(pool['id'],
is_enabled=False)
pool_key = self._get_pool_key(pool['id'])
p_key_when_disabled = self._get_pool_key(pool['id'],
is_enabled=False)
external_ids = copy.deepcopy(ovn_lb.external_ids)
p_key_to_remove = None
p_key_to_add = {}
external_ids = copy.deepcopy(ovn_lb.external_ids)
p_key_to_remove = None
p_key_to_add = {}
try:
if pool['admin_state_up']:
if p_key_when_disabled in external_ids:
p_key_to_add[pool_key] = external_ids[p_key_when_disabled]
@ -1320,7 +1516,7 @@ class OvnProviderHelper(object):
def member_create(self, member):
try:
pool_key, ovn_lb = self._find_ovn_lb_by_id(
pool_key, ovn_lb = self._find_ovn_lb_by_pool_id(
member['pool_id'])
self._add_member(member, ovn_lb, pool_key)
pool = {"id": member['pool_id'],
@ -1386,7 +1582,7 @@ class OvnProviderHelper(object):
def member_delete(self, member):
try:
pool_key, ovn_lb = self._find_ovn_lb_by_id(
pool_key, ovn_lb = self._find_ovn_lb_by_pool_id(
member['pool_id'])
pool_status = self._remove_member(member, ovn_lb, pool_key)
pool = {"id": member['pool_id'],
@ -1440,7 +1636,7 @@ class OvnProviderHelper(object):
def member_update(self, member):
try:
pool_key, ovn_lb = self._find_ovn_lb_by_id(
pool_key, ovn_lb = self._find_ovn_lb_by_pool_id(
member['pool_id'])
status = {
'pools': [{'id': member['pool_id'],
@ -1476,7 +1672,7 @@ class OvnProviderHelper(object):
return status
def _get_existing_pool_members(self, pool_id):
pool_key, ovn_lb = self._find_ovn_lb_by_id(pool_id)
pool_key, ovn_lb = self._find_ovn_lb_by_pool_id(pool_id)
if not ovn_lb:
msg = _("Loadbalancer with pool %s does not exist") % pool_key
raise driver_exceptions.DriverError(msg)
@ -1535,11 +1731,7 @@ class OvnProviderHelper(object):
network_driver.neutron_client.delete_port(port_id)
def handle_vip_fip(self, fip_info):
try:
ovn_lb = self._find_ovn_lb(fip_info['lb_id'])
except idlutils.RowNotFound:
LOG.debug("Loadbalancer %s not found!", fip_info['lb_id'])
return
ovn_lb = fip_info['ovn_lb']
external_ids = copy.deepcopy(ovn_lb.external_ids)
commands = []
@ -1627,6 +1819,7 @@ class OvnProviderDriver(driver_base.ProviderDriver):
admin_state_up = True
request_info = {'id': pool.pool_id,
'loadbalancer_id': pool.loadbalancer_id,
'protocol': pool.protocol,
'listener_id': pool.listener_id,
'admin_state_up': admin_state_up}
request = {'type': REQ_TYPE_POOL_CREATE,
@ -1638,6 +1831,7 @@ class OvnProviderDriver(driver_base.ProviderDriver):
self.member_delete(member)
request_info = {'id': pool.pool_id,
'protocol': pool.protocol,
'loadbalancer_id': pool.loadbalancer_id}
request = {'type': REQ_TYPE_POOL_DELETE,
'info': request_info}
@ -1648,7 +1842,8 @@ class OvnProviderDriver(driver_base.ProviderDriver):
self._check_for_supported_protocols(new_pool.protocol)
if not isinstance(new_pool.lb_algorithm, o_datamodels.UnsetType):
self._check_for_supported_algorithms(new_pool.lb_algorithm)
request_info = {'id': new_pool.pool_id,
request_info = {'id': old_pool.pool_id,
'protocol': old_pool.protocol,
'loadbalancer_id': old_pool.loadbalancer_id}
if not isinstance(new_pool.admin_state_up, o_datamodels.UnsetType):
@ -1657,21 +1852,8 @@ class OvnProviderDriver(driver_base.ProviderDriver):
'info': request_info}
self._ovn_helper.add_request(request)
# Listener
def _check_listener_protocol(self, listener):
self._check_for_supported_protocols(listener.protocol)
if not self._ovn_helper.check_lb_protocol(
listener.loadbalancer_id, listener.protocol):
msg = (_('The loadbalancer %(lb)s does not support %(proto)s '
'protocol') % {
'lb': listener.loadbalancer_id,
'proto': listener.protocol})
raise driver_exceptions.UnsupportedOptionError(
user_fault_string=msg,
operator_fault_string=msg)
def listener_create(self, listener):
self._check_listener_protocol(listener)
self._check_for_supported_protocols(listener.protocol)
admin_state_up = listener.admin_state_up
if isinstance(admin_state_up, o_datamodels.UnsetType):
admin_state_up = True
@ -1688,16 +1870,16 @@ class OvnProviderDriver(driver_base.ProviderDriver):
def listener_delete(self, listener):
request_info = {'id': listener.listener_id,
'loadbalancer_id': listener.loadbalancer_id,
'protocol_port': listener.protocol_port}
'protocol_port': listener.protocol_port,
'protocol': listener.protocol}
request = {'type': REQ_TYPE_LISTENER_DELETE,
'info': request_info}
self._ovn_helper.add_request(request)
def listener_update(self, old_listener, new_listener):
if not isinstance(new_listener.protocol, o_datamodels.UnsetType):
self._check_listener_protocol(new_listener)
request_info = {'id': new_listener.listener_id,
'loadbalancer_id': old_listener.loadbalancer_id,
'protocol': old_listener.protocol,
'protocol_port': old_listener.protocol_port}
if not isinstance(new_listener.admin_state_up, o_datamodels.UnsetType):
@ -1721,7 +1903,7 @@ class OvnProviderDriver(driver_base.ProviderDriver):
return False
def _ip_version_differs(self, member):
_, ovn_lb = self._ovn_helper._find_ovn_lb_by_id(member.pool_id)
_, ovn_lb = self._ovn_helper._find_ovn_lb_by_pool_id(member.pool_id)
lb_vip = ovn_lb.external_ids[LB_EXT_IDS_VIP_KEY]
return netaddr.IPNetwork(lb_vip).version != (
netaddr.IPNetwork(member.address).version)

View File

@ -13,8 +13,9 @@
# License for the specific language governing permissions and limitations
# under the License.
import mock
import copy
import mock
from neutron.common import utils as n_utils
from neutron_lib.plugins import directory
from octavia_lib.api.drivers import data_models as octavia_data_model
@ -65,6 +66,8 @@ class TestOctaviaOvnProviderDriver(
self.fake_network_driver.get_subnet = self._mock_get_subnet
self.fake_network_driver.neutron_client.list_ports = (
self._mock_list_ports)
self.fake_network_driver.neutron_client.show_port = (
self._mock_show_port)
self.fake_network_driver.neutron_client.\
delete_port.return_value = True
self._local_net_cache = {}
@ -80,6 +83,11 @@ class TestOctaviaOvnProviderDriver(
def _mock_list_ports(self, **kwargs):
return self._local_port_cache
def _mock_show_port(self, port_id):
for port in self._local_port_cache['ports']:
if port['id'] == port_id:
return {'port': port}
def _create_provider_network(self):
e1 = self._make_network(self.fmt, 'e1', True,
arg_list=('router:external',
@ -129,7 +137,6 @@ class TestOctaviaOvnProviderDriver(
m_pool.loadbalancer_id = loadbalancer_id
m_pool.members = []
m_pool.admin_state_up = admin_state_up
m_pool.protocol = protocol
m_pool.lb_algorithm = lb_algorithm
if listener_id:
m_pool.listener_id = listener_id
@ -189,7 +196,26 @@ class TestOctaviaOvnProviderDriver(
def _validate_loadbalancers(self, expected_lbs):
observed_lbs = self._get_loadbalancers()
self.assertItemsEqual(expected_lbs, observed_lbs)
# NOTE (mjozefcz): assertItemsEqual works only on first level
# of comparison, if dicts inside dics are in diffrent
# order it would fail.
self.assertEqual(len(expected_lbs),
len(observed_lbs))
for expected_lb in expected_lbs:
# search for LB with same name and protocol
found = False
for observed_lb in observed_lbs:
if (observed_lb.get('name') ==
expected_lb.get('name') and
observed_lb.get('protocol') ==
expected_lb.get('protocol')):
self.assertEqual(expected_lb, observed_lb)
found = True
if not found:
raise Exception("Expected LB %(name)s for protocol %(proto)s "
"not found in observed_lbs", {
'name': expected_lb.get('name'),
'proto': expected_lb.get('proto')})
def _is_lb_associated_to_ls(self, lb_name, ls_name):
return self._is_lb_associated_to_tab(
@ -238,30 +264,54 @@ class TestOctaviaOvnProviderDriver(
net_id = LR_REF_KEY_HEADER + '%s' % net_id
if add_ref:
if net_id in lb_data[ovn_driver.LB_EXT_IDS_LS_REFS_KEY]:
ref_ct = lb_data[
ovn_driver.LB_EXT_IDS_LS_REFS_KEY][net_id] + 1
lb_data[ovn_driver.LB_EXT_IDS_LS_REFS_KEY][net_id] = ref_ct
else:
if net_id not in lb_data[ovn_driver.LB_EXT_IDS_LS_REFS_KEY]:
lb_data[ovn_driver.LB_EXT_IDS_LS_REFS_KEY][net_id] = 1
else:
ref_ct = lb_data[ovn_driver.LB_EXT_IDS_LS_REFS_KEY][net_id] - 1
if ref_ct > 0:
lb_data[ovn_driver.LB_EXT_IDS_LS_REFS_KEY][net_id] = ref_ct
else:
ref_ct = lb_data[ovn_driver.LB_EXT_IDS_LS_REFS_KEY][net_id]
if ref_ct <= 0:
del lb_data[ovn_driver.LB_EXT_IDS_LS_REFS_KEY][net_id]
def _wait_for_status(self, expected_status, check_call=True):
call_count = len(expected_status)
expected_calls = [mock.call(status) for status in expected_status]
def _wait_for_status(self, expected_statuses, check_call=True):
call_count = len(expected_statuses)
update_loadbalancer_status = (
self._o_driver_lib.update_loadbalancer_status)
n_utils.wait_until_true(
lambda: update_loadbalancer_status.call_count == call_count,
timeout=10)
if check_call:
self._o_driver_lib.update_loadbalancer_status.assert_has_calls(
expected_calls, any_order=True)
# NOTE(mjozefcz): The updates are send in parallel and includes
# dicts with unordered lists inside. So we can't simply use
# assert_has_calls here. Sample structure:
# {'listeners': [],
# 'loadbalancers': [{'id': 'a', 'provisioning_status': 'ACTIVE'}],
# 'members': [{'id': 'b', 'provisioning_status': 'DELETED'},
# {'id': 'c', 'provisioning_status': 'DELETED'}],
# 'pools': [{'id': 'd', 'operating_status': 'ONLINE',
# 'provisioning_status': 'ACTIVE'}]},
updated_statuses = []
for call in update_loadbalancer_status.mock_calls:
updated_statuses.append(call.args[0])
calls_found = []
for expected_status in expected_statuses:
for updated_status in updated_statuses:
# Find status update having equal keys
if updated_status.keys() == expected_status.keys():
val_check = []
# Withing this status update check if all values of
# expected keys match.
for k, v in expected_status.items():
val_check.append(
sorted(expected_status[k],
key=lambda x: x['id']) ==
sorted(updated_status[k],
key=lambda x: x['id']))
if False in val_check:
# At least one value don't match.
continue
calls_found.append(expected_status)
break
# Validate if we found all expected calls.
self.assertItemsEqual(expected_statuses, calls_found)
def _wait_for_status_and_validate(self, lb_data, expected_status,
check_call=True):
@ -399,9 +449,55 @@ class TestOctaviaOvnProviderDriver(
'neutron:vip': lb_data['model'].vip_address,
'neutron:vip_port_id': vip_net_info[3],
'enabled': str(lb_data['model'].admin_state_up)}
# NOTE(mjozefcz): By default we don't set protocol. We don't know if
# listener/pool would be TCP or UDP, so do not set it.
expected_protocols = set()
# Lets fetch list of L4 protocols defined for this LB.
for p in lb_data['pools']:
expected_protocols.add(p.protocol.lower())
for l in lb_data['listeners']:
expected_protocols.add(l.protocol.lower())
# If there is no protocol lets add default - empty [].
expected_protocols = list(expected_protocols)
if len(expected_protocols) == 0:
expected_protocols.append(None)
expected_lbs = [{'name': lb_data['model'].loadbalancer_id,
'protocol': [protocol] if protocol else [],
'vips': {},
'external_ids': copy.deepcopy(external_ids)}
for protocol in expected_protocols]
def _get_lb_field_by_protocol(protocol, field='external_ids'):
"Get needed external_ids and pass by reference"
lb = [lb for lb in expected_lbs
if lb.get('protocol') == [protocol]]
return lb[0].get(field)
# For every connected subnet to the LB set the ref
# counter.
for net_id, ref_ct in lb_data[
ovn_driver.LB_EXT_IDS_LS_REFS_KEY].items():
for lb in expected_lbs:
# If given LB hasn't VIP configured from
# this network we shouldn't touch it here.
if net_id == 'neutron-%s' % lb_data['model'].vip_network_id:
lb.get('external_ids')[
ovn_driver.LB_EXT_IDS_LS_REFS_KEY][net_id] = 1
# For every connected router set it here.
if lb_data.get(ovn_driver.LB_EXT_IDS_LR_REF_KEY):
for lb in expected_lbs:
lb.get('external_ids')[
ovn_driver.LB_EXT_IDS_LR_REF_KEY] = lb_data[
ovn_driver.LB_EXT_IDS_LR_REF_KEY]
pool_info = {}
for p in lb_data.get('pools', []):
external_ids = _get_lb_field_by_protocol(
p.protocol.lower(),
field='external_ids')
p_members = ""
for m in p.members:
if not m.admin_state_up:
@ -412,23 +508,31 @@ class TestOctaviaOvnProviderDriver(
p_members += "," + m_info
else:
p_members = m_info
# Bump up LS refs counter if needed.
if m.subnet_id:
# Need to get the network_id.
for port in self._local_port_cache['ports']:
for fixed_ip in port['fixed_ips']:
if fixed_ip['subnet_id'] == m.subnet_id:
ex = external_ids[
ovn_driver.LB_EXT_IDS_LS_REFS_KEY]
act = ex.get(
'neutron-%s' % port['network_id'], 0)
ex['neutron-%s' % port['network_id']] = act + 1
break
pool_key = 'pool_' + p.pool_id
if not p.admin_state_up:
pool_key += ':D'
external_ids[pool_key] = p_members
pool_info[p.pool_id] = p_members
for net_id, ref_ct in lb_data[
ovn_driver.LB_EXT_IDS_LS_REFS_KEY].items():
external_ids[ovn_driver.LB_EXT_IDS_LS_REFS_KEY][net_id] = ref_ct
if lb_data.get(ovn_driver.LB_EXT_IDS_LR_REF_KEY):
external_ids[
ovn_driver.LB_EXT_IDS_LR_REF_KEY] = lb_data[
ovn_driver.LB_EXT_IDS_LR_REF_KEY]
expected_vips = {}
expected_protocol = ['tcp']
for l in lb_data['listeners']:
expected_vips = _get_lb_field_by_protocol(
l.protocol.lower(),
field='vips')
external_ids = _get_lb_field_by_protocol(
l.protocol.lower(),
field='external_ids')
listener_k = 'listener_' + str(l.listener_id)
if lb_data['model'].admin_state_up and l.admin_state_up:
vip_k = lb_data['model'].vip_address + ":" + str(
@ -447,11 +551,6 @@ class TestOctaviaOvnProviderDriver(
elif lb_data.get('pools', []):
external_ids[listener_k] += 'pool_' + lb_data[
'pools'][0].pool_id
expected_lbs = [{'name': lb_data['model'].loadbalancer_id,
'protocol': expected_protocol,
'vips': expected_vips,
'external_ids': external_ids}]
return expected_lbs
def _extract_member_info(self, member):
@ -462,10 +561,12 @@ class TestOctaviaOvnProviderDriver(
return mem_info[:-1]
def _create_pool_and_validate(self, lb_data, pool_name,
protocol=None,
listener_id=None):
lb_pools = lb_data['pools']
m_pool = self._create_pool_model(lb_data['model'].loadbalancer_id,
pool_name,
protocol=protocol,
listener_id=listener_id)
lb_pools.append(m_pool)
self._o_driver_lib.update_loadbalancer_status.reset_mock()
@ -567,9 +668,10 @@ class TestOctaviaOvnProviderDriver(
if pool_name and p.name == pool_name:
return p
def _get_listener_from_lb_data(self, lb_data, protocol_port):
def _get_listener_from_lb_data(self, lb_data, protocol, protocol_port):
for l in lb_data['listeners']:
if l.protocol_port == protocol_port:
if (l.protocol_port == protocol_port and
l.protocol == protocol):
return l
def _get_pool_listeners(self, lb_data, pool_id):
@ -721,9 +823,10 @@ class TestOctaviaOvnProviderDriver(
self._wait_for_status_and_validate(lb_data, [expected_status])
def _update_listener_and_validate(self, lb_data, protocol_port,
def _update_listener_and_validate(self, lb_data, protocol_port=80,
admin_state_up=None, protocol='TCP'):
m_listener = self._get_listener_from_lb_data(lb_data, protocol_port)
m_listener = self._get_listener_from_lb_data(
lb_data, protocol, protocol_port)
self._o_driver_lib.update_loadbalancer_status.reset_mock()
old_admin_state_up = m_listener.admin_state_up
operating_status = 'ONLINE'
@ -752,8 +855,10 @@ class TestOctaviaOvnProviderDriver(
self._wait_for_status_and_validate(lb_data, [expected_status])
def _delete_listener_and_validate(self, lb_data, protocol_port=80):
m_listener = self._get_listener_from_lb_data(lb_data, protocol_port)
def _delete_listener_and_validate(self, lb_data, protocol='TCP',
protocol_port=80):
m_listener = self._get_listener_from_lb_data(
lb_data, protocol, protocol_port)
lb_data['listeners'].remove(m_listener)
self._o_driver_lib.update_loadbalancer_status.reset_mock()
self.ovn_driver.listener_delete(m_listener)
@ -789,7 +894,10 @@ class TestOctaviaOvnProviderDriver(
expected_status = {
'loadbalancers': [{"id": lb_data['model'].loadbalancer_id,
"provisioning_status": "DELETED",
"operating_status": "OFFLINE"}]
"operating_status": "OFFLINE"}],
'listeners': [],
'pools': [],
'members': [],
}
del lb_data['model']
self._wait_for_status_and_validate(lb_data, [expected_status])
@ -798,12 +906,21 @@ class TestOctaviaOvnProviderDriver(
lb_data = self._create_load_balancer_and_validate(
{'vip_network': 'vip_network',
'cidr': '10.0.0.0/24'})
self._create_pool_and_validate(lb_data, "p1")
self._create_pool_and_validate(lb_data, "p1",
protocol='TCP')
self._update_pool_and_validate(lb_data, "p1")
self._update_pool_and_validate(lb_data, "p1", admin_state_up=True)
self._update_pool_and_validate(lb_data, "p1", admin_state_up=False)
self._update_pool_and_validate(lb_data, "p1", admin_state_up=True)
self._create_pool_and_validate(lb_data, "p2")
self._create_pool_and_validate(lb_data, "p2",
protocol='UDP')
self._create_pool_and_validate(lb_data, "p3",
protocol='TCP')
self._update_pool_and_validate(lb_data, "p3",
admin_state_up=False)
self._update_pool_and_validate(lb_data, "p3",
admin_state_up=True)
self._update_pool_and_validate(lb_data, "p3",
admin_state_up=False)
self._create_pool_and_validate(lb_data, "p4",
protocol='UDP')
self._delete_pool_and_validate(lb_data, "p2")
self._delete_pool_and_validate(lb_data, "p1")
self._delete_load_balancer_and_validate(lb_data)
@ -812,50 +929,72 @@ class TestOctaviaOvnProviderDriver(
lb_data = self._create_load_balancer_and_validate(
{'vip_network': 'vip_network',
'cidr': '10.0.0.0/24'})
self._create_pool_and_validate(lb_data, "p1")
pool_id = lb_data['pools'][0].pool_id
self._create_member_and_validate(
lb_data, pool_id, lb_data['vip_net_info'][1],
lb_data['vip_net_info'][0], '10.0.0.10')
self._update_member_and_validate(lb_data, pool_id, "10.0.0.10")
# TCP Pool
self._create_pool_and_validate(lb_data, "p1",
protocol='TCP')
# UDP Pool
self._create_pool_and_validate(lb_data, "p2",
protocol='UDP')
pool_1_id = lb_data['pools'][0].pool_id
pool_2_id = lb_data['pools'][1].pool_id
# Members for TCP Pool
self._create_member_and_validate(
lb_data, pool_id, lb_data['vip_net_info'][1],
lb_data, pool_1_id, lb_data['vip_net_info'][1],
lb_data['vip_net_info'][0], '10.0.0.10')
self._update_member_and_validate(lb_data, pool_1_id, "10.0.0.10")
self._create_member_and_validate(
lb_data, pool_1_id, lb_data['vip_net_info'][1],
lb_data['vip_net_info'][0], '10.0.0.11')
# Members for UDP Pool
self._create_member_and_validate(
lb_data, pool_2_id, lb_data['vip_net_info'][1],
lb_data['vip_net_info'][0], '10.0.0.10')
self._update_member_and_validate(lb_data, pool_1_id, "10.0.0.10")
self._create_member_and_validate(
lb_data, pool_2_id, lb_data['vip_net_info'][1],
lb_data['vip_net_info'][0], '10.0.0.11')
# Disable loadbalancer
self._update_load_balancer_and_validate(lb_data,
admin_state_up=False)
# Enable loadbalancer back
self._update_load_balancer_and_validate(lb_data,
admin_state_up=True)
self._delete_member_and_validate(lb_data, pool_id,
# Delete members from TCP Pool
self._delete_member_and_validate(lb_data, pool_1_id,
lb_data['vip_net_info'][0],
'10.0.0.10')
self._delete_member_and_validate(lb_data, pool_id,
self._delete_member_and_validate(lb_data, pool_1_id,
lb_data['vip_net_info'][0],
'10.0.0.11')
# Add again member to TCP Pool
self._create_member_and_validate(
lb_data, pool_id, lb_data['vip_net_info'][1],
lb_data, pool_1_id, lb_data['vip_net_info'][1],
lb_data['vip_net_info'][0], '10.0.0.10')
# Create new networks and add member to TCP pool from it.
net20_info = self._create_net('net20', '20.0.0.0/24')
net20 = net20_info[0]
subnet20 = net20_info[1]
self._create_member_and_validate(lb_data, pool_id, subnet20, net20,
self._create_member_and_validate(lb_data, pool_1_id, subnet20, net20,
'20.0.0.4')
self._create_member_and_validate(lb_data, pool_id, subnet20, net20,
self._create_member_and_validate(lb_data, pool_1_id, subnet20, net20,
'20.0.0.6')
net30_info = self._create_net('net30', '30.0.0.0/24')
net30 = net30_info[0]
subnet30 = net30_info[1]
self._create_member_and_validate(lb_data, pool_id, subnet30, net30,
self._create_member_and_validate(lb_data, pool_1_id, subnet30, net30,
'30.0.0.6')
self._delete_member_and_validate(lb_data, pool_id, net20, '20.0.0.6')
self._delete_member_and_validate(lb_data, pool_1_id, net20, '20.0.0.6')
# Test creating Member without subnet
m_member = self._create_member_model(pool_id,
m_member = self._create_member_model(pool_1_id,
None,
'30.0.0.7', 80)
self.assertRaises(o_exceptions.UnsupportedOptionError,
@ -863,36 +1002,76 @@ class TestOctaviaOvnProviderDriver(
# Deleting the pool should also delete the members.
self._delete_pool_and_validate(lb_data, "p1")
# Delete the whole LB.
self._delete_load_balancer_and_validate(lb_data)
def test_listener(self):
lb_data = self._create_load_balancer_and_validate(
{'vip_network': 'vip_network',
'cidr': '10.0.0.0/24'})
self._create_pool_and_validate(lb_data, "p1")
pool_id = lb_data['pools'][0].pool_id
self._create_member_and_validate(
lb_data, pool_id, lb_data['vip_net_info'][1],
lb_data['vip_net_info'][0], '10.0.0.4')
self._create_pool_and_validate(lb_data, "p1",
protocol='TCP')
self._create_pool_and_validate(lb_data, "p2",
protocol='UDP')
pool_1_id = lb_data['pools'][0].pool_id
pool_2_id = lb_data['pools'][1].pool_id
net_info = self._create_net('net1', '20.0.0.0/24')
self._create_member_and_validate(lb_data, pool_id,
net_info[1], net_info[0], '20.0.0.4')
self._create_listener_and_validate(lb_data, pool_id, 80)
self._update_listener_and_validate(lb_data, 80)
self._update_listener_and_validate(lb_data, 80, admin_state_up=True)
self._update_listener_and_validate(lb_data, 80, admin_state_up=False)
self._update_listener_and_validate(lb_data, 80, admin_state_up=True)
self._create_listener_and_validate(lb_data, pool_id, 82)
self._delete_listener_and_validate(lb_data, 82)
self._delete_listener_and_validate(lb_data, 80)
self._delete_member_and_validate(lb_data, pool_id,
# Create member in first pool
self._create_member_and_validate(
lb_data, pool_1_id, lb_data['vip_net_info'][1],
lb_data['vip_net_info'][0], '10.0.0.4')
self._create_member_and_validate(lb_data, pool_1_id,
net_info[1], net_info[0], '20.0.0.4')
# Create member in second pool
self._create_member_and_validate(
lb_data, pool_2_id, lb_data['vip_net_info'][1],
lb_data['vip_net_info'][0], '10.0.0.4')
self._create_member_and_validate(lb_data, pool_2_id,
net_info[1], net_info[0], '20.0.0.4')
# Play around first listener linked to first pool.
self._create_listener_and_validate(
lb_data, pool_1_id, 80, protocol='TCP')
self._update_listener_and_validate(lb_data, protocol_port=80)
self._update_listener_and_validate(
lb_data, protocol_port=80, admin_state_up=True)
self._update_listener_and_validate(
lb_data, protocol_port=80, admin_state_up=False)
self._update_listener_and_validate(
lb_data, protocol_port=80, admin_state_up=True)
self._create_listener_and_validate(
lb_data, pool_1_id, protocol_port=82, protocol='TCP')
# Play around second listener linked to second pool.
self._create_listener_and_validate(
lb_data, pool_2_id, 53, protocol='UDP')
self._update_listener_and_validate(lb_data, 53, protocol='UDP')
self._update_listener_and_validate(
lb_data, protocol_port=53, protocol='UDP', admin_state_up=True)
self._update_listener_and_validate(
lb_data, protocol_port=53, protocol='UDP', admin_state_up=False)
self._update_listener_and_validate(
lb_data, protocol_port=53, protocol='UDP', admin_state_up=True)
self._create_listener_and_validate(
lb_data, pool_2_id, protocol_port=21, protocol='UDP')
# Delete listeners linked to first pool.
self._delete_listener_and_validate(
lb_data, protocol_port=82, protocol='TCP')
self._delete_listener_and_validate(
lb_data, protocol_port=80, protocol='TCP')
# Delete first pool members.
self._delete_member_and_validate(lb_data, pool_1_id,
net_info[0], '20.0.0.4')
self._delete_member_and_validate(lb_data, pool_id,
self._delete_member_and_validate(lb_data, pool_1_id,
lb_data['vip_net_info'][0],
'10.0.0.4')
# Delete empty, first pool
self._delete_pool_and_validate(lb_data, "p1")
# Delete the rest
self._delete_load_balancer_and_validate(lb_data)
def _test_cascade_delete(self, pool=True, listener=True, member=True):
@ -900,14 +1079,22 @@ class TestOctaviaOvnProviderDriver(
{'vip_network': 'vip_network',
'cidr': '10.0.0.0/24'})
if pool:
self._create_pool_and_validate(lb_data, "p1")
pool_id = lb_data['pools'][0].pool_id
self._create_pool_and_validate(lb_data, "p1", protocol='TCP')
self._create_pool_and_validate(lb_data, "p2", protocol='UDP')
pool_1_id = lb_data['pools'][0].pool_id
pool_2_id = lb_data['pools'][1].pool_id
if member:
self._create_member_and_validate(
lb_data, pool_id, lb_data['vip_net_info'][1],
lb_data, pool_1_id, lb_data['vip_net_info'][1],
lb_data['vip_net_info'][0], '10.0.0.10')
self._create_member_and_validate(
lb_data, pool_2_id, lb_data['vip_net_info'][1],
lb_data['vip_net_info'][0], '10.0.0.10')
if listener:
self._create_listener_and_validate(lb_data, pool_id, 80)
self._create_listener_and_validate(
lb_data, pool_1_id, protocol_port=80, protocol='TCP')
self._create_listener_and_validate(
lb_data, pool_2_id, protocol_port=53, protocol='UDP')
self._delete_load_balancer_and_validate(lb_data, cascade=True)
@ -937,12 +1124,6 @@ class TestOctaviaOvnProviderDriver(
self.assertRaises(o_exceptions.UnsupportedOptionError,
self.ovn_driver.listener_create, m_listener)
self._create_listener_and_validate(lb_data)
self.assertRaises(o_exceptions.UnsupportedOptionError,
self._create_listener_and_validate,
lb_data, protocol_port=80, protocol='UDP')
self.assertRaises(o_exceptions.UnsupportedOptionError,
self._update_listener_and_validate,
lb_data, protocol_port=80, protocol='UDP')
self._delete_load_balancer_and_validate(lb_data)
def _test_lrp_event_handler(self, cascade=False):
@ -1065,10 +1246,10 @@ class TestOctaviaOvnProviderDriver(
{'vip_network': 'vip_network',
'cidr': '10.0.0.0/24'})
self._create_listener_and_validate(lb_data)
self._create_pool_and_validate(lb_data, "p1",
lb_data['listeners'][0].listener_id)
self._delete_pool_and_validate(lb_data, "p1",
lb_data['listeners'][0].listener_id)
self._create_pool_and_validate(
lb_data, "p1", listener_id=lb_data['listeners'][0].listener_id)
self._delete_pool_and_validate(
lb_data, "p1", listener_id=lb_data['listeners'][0].listener_id)
self._delete_listener_and_validate(lb_data)
self._delete_load_balancer_and_validate(lb_data)

View File

@ -11,6 +11,7 @@
# License for the specific language governing permissions and limitations
# under the License.
#
import copy
import os
import mock
@ -134,6 +135,7 @@ class TestOvnProviderDriver(TestOvnOctaviaBase):
add_req_thread = mock.patch.object(ovn_driver.OvnProviderHelper,
'add_request')
self.ovn_lb = mock.MagicMock()
self.ovn_lb.name = 'foo_ovn_lb'
self.ovn_lb.external_ids = {
ovn_driver.LB_EXT_IDS_VIP_KEY: '10.22.33.4'}
self.mock_add_request = add_req_thread.start()
@ -262,8 +264,10 @@ class TestOvnProviderDriver(TestOvnOctaviaBase):
project_id=self.project_id,
vip_address=self.vip_address,
vip_network_id=self.vip_network_id)
mock.patch.object(ovn_driver.OvnProviderHelper, '_find_ovn_lb',
return_value=self.ovn_lb).start()
mock.patch.object(
ovn_driver.OvnProviderHelper, '_find_ovn_lbs',
side_effect=lambda x, protocol=None:
self.ovn_lb if protocol else [self.ovn_lb]).start()
mock.patch.object(
ovn_driver.OvnProviderHelper, 'get_member_info',
return_value=[
@ -438,9 +442,32 @@ class TestOvnProviderDriver(TestOvnOctaviaBase):
self.driver.listener_create(self.ref_listener)
self.mock_add_request.assert_called_once_with(expected_dict)
def test_listener_create_unsupported_protocol(self):
self.assertRaises(exceptions.UnsupportedOptionError,
self.driver.listener_create, self.fail_listener)
def test_listener_create_multiple_protocols(self):
self.ovn_lb.protocol = ['tcp']
info = {'id': self.ref_listener.listener_id,
'protocol': self.ref_listener.protocol,
'protocol_port': self.ref_listener.protocol_port,
'default_pool_id': self.ref_listener.default_pool_id,
'admin_state_up': self.ref_listener.admin_state_up,
'loadbalancer_id': self.ref_listener.loadbalancer_id}
expected_dict = {'type': ovn_driver.REQ_TYPE_LISTENER_CREATE,
'info': info}
self.driver.listener_create(self.ref_listener)
self.mock_add_request.assert_called_once_with(expected_dict)
self.ovn_lb.protocol = ['UDP']
info['protocol'] = 'UDP'
expected_dict = {'type': ovn_driver.REQ_TYPE_LISTENER_CREATE,
'info': info}
self.driver.listener_create(self.ref_listener)
def test_listener_update(self):
info = {'id': self.ref_listener.listener_id,
'protocol_port': self.ref_listener.protocol_port,
'protocol': self.ref_pool.protocol,
'admin_state_up': self.ref_listener.admin_state_up,
'loadbalancer_id': self.ref_listener.loadbalancer_id}
if self.ref_listener.default_pool_id:
@ -450,22 +477,10 @@ class TestOvnProviderDriver(TestOvnOctaviaBase):
self.driver.listener_update(self.ref_listener, self.ref_listener)
self.mock_add_request.assert_called_once_with(expected_dict)
@mock.patch.object(ovn_driver.OvnProviderHelper, '_is_listener_in_lb',
return_value=True)
def test_listener_failure(self, mock_listener):
self.assertRaises(exceptions.UnsupportedOptionError,
self.driver.listener_create, self.fail_listener)
self.assertRaises(exceptions.UnsupportedOptionError,
self.driver.listener_update, self.ref_listener,
self.fail_listener)
self.ovn_lb.protocol = ['TCP']
self.assertRaises(exceptions.UnsupportedOptionError,
self.driver.listener_create,
self.ref_listener_udp)
def test_listener_delete(self):
info = {'id': self.ref_listener.listener_id,
'protocol_port': self.ref_listener.protocol_port,
'protocol': self.ref_pool.protocol,
'loadbalancer_id': self.ref_listener.loadbalancer_id}
expected_dict = {'type': ovn_driver.REQ_TYPE_LISTENER_DELETE,
'info': info}
@ -516,7 +531,7 @@ class TestOvnProviderDriver(TestOvnOctaviaBase):
self.driver.loadbalancer_failover(info['id'])
self.mock_add_request.assert_called_once_with(expected_dict)
def test_pool_create_http(self):
def test_pool_create_unsupported_protocol(self):
self.ref_pool.protocol = 'HTTP'
self.assertRaises(exceptions.UnsupportedOptionError,
self.driver.pool_create, self.ref_pool)
@ -530,6 +545,7 @@ class TestOvnProviderDriver(TestOvnOctaviaBase):
info = {'id': self.ref_pool.pool_id,
'loadbalancer_id': self.ref_pool.loadbalancer_id,
'listener_id': self.ref_pool.listener_id,
'protocol': self.ref_pool.protocol,
'admin_state_up': self.ref_pool.admin_state_up}
expected_dict = {'type': ovn_driver.REQ_TYPE_POOL_CREATE,
'info': info}
@ -540,6 +556,7 @@ class TestOvnProviderDriver(TestOvnOctaviaBase):
self.ref_pool.admin_state_up = data_models.UnsetType()
info = {'id': self.ref_pool.pool_id,
'loadbalancer_id': self.ref_pool.loadbalancer_id,
'protocol': self.ref_pool.protocol,
'listener_id': self.ref_pool.listener_id,
'admin_state_up': True}
expected_dict = {'type': ovn_driver.REQ_TYPE_POOL_CREATE,
@ -551,6 +568,7 @@ class TestOvnProviderDriver(TestOvnOctaviaBase):
# Pretent we don't have members
self.ref_pool.members = []
info = {'id': self.ref_pool.pool_id,
'protocol': self.ref_pool.protocol,
'loadbalancer_id': self.ref_pool.loadbalancer_id}
expected = {'type': ovn_driver.REQ_TYPE_POOL_DELETE,
'info': info}
@ -559,6 +577,7 @@ class TestOvnProviderDriver(TestOvnOctaviaBase):
def test_pool_delete_with_members(self):
info = {'id': self.ref_pool.pool_id,
'protocol': self.ref_pool.protocol,
'loadbalancer_id': self.ref_pool.loadbalancer_id}
expected = {'type': ovn_driver.REQ_TYPE_POOL_DELETE,
'info': info}
@ -578,6 +597,7 @@ class TestOvnProviderDriver(TestOvnOctaviaBase):
def test_pool_update(self):
info = {'id': self.ref_update_pool.pool_id,
'loadbalancer_id': self.ref_update_pool.loadbalancer_id,
'protocol': self.ref_pool.protocol,
'admin_state_up': self.ref_update_pool.admin_state_up}
expected_dict = {'type': ovn_driver.REQ_TYPE_POOL_UPDATE,
'info': info}
@ -629,10 +649,12 @@ class TestOvnProviderHelper(TestOvnOctaviaBase):
'admin_state_up': False}
self.ports = {'ports': [{
'fixed_ips': [{'ip_address': self.vip_address}],
'network_id': self.vip_network_id,
'id': self.port1_id}]}
self.pool = {'id': self.pool_id,
'loadbalancer_id': self.loadbalancer_id,
'listener_id': self.listener_id,
'protocol': "TCP",
'admin_state_up': False}
self.member = {'id': self.member_id,
'address': self.member_address,
@ -646,6 +668,7 @@ class TestOvnProviderHelper(TestOvnOctaviaBase):
'add_request')
self.mock_add_request = add_req_thread.start()
self.ovn_lb = mock.MagicMock()
self.ovn_lb.protocol = ['tcp']
self.ovn_lb.uuid = uuidutils.generate_uuid()
self.member_line = (
'member_%s_%s:%s' %
@ -664,8 +687,13 @@ class TestOvnProviderHelper(TestOvnOctaviaBase):
mock.patch.object(self.helper,
'_find_ovn_lb_with_pool_key',
return_value=self.ovn_lb).start()
mock.patch.object(ovn_driver.OvnProviderHelper, '_find_ovn_lb',
return_value=self.ovn_lb).start()
self.mock_find_ovn_lbs = mock.patch.object(
ovn_driver.OvnProviderHelper, '_find_ovn_lbs',
side_effect=lambda x, protocol=None:
self.ovn_lb if protocol else [self.ovn_lb])
self.mock_find_ovn_lbs.start()
mock.patch.object(self.helper,
'_get_pool_listeners',
return_value=[]).start()
@ -727,6 +755,114 @@ class TestOvnProviderHelper(TestOvnOctaviaBase):
(self.helper.ovn_nbdb_api.ls_get.return_value.
execute.return_value) = self.network
def test__is_lb_empty(self):
f = self.helper._is_lb_empty
self.assertFalse(f(self.ovn_lb.external_ids))
self.ovn_lb.external_ids.pop('listener_%s' % self.listener_id)
self.assertFalse(f(self.ovn_lb.external_ids))
self.ovn_lb.external_ids.pop('pool_%s' % self.pool_id)
self.assertTrue(f(self.ovn_lb.external_ids))
def test__find_ovn_lbs(self):
self.mock_find_ovn_lbs.stop()
f = self.helper._find_ovn_lbs
self.helper.ovn_nbdb_api.db_find_rows.return_value.\
execute.return_value = [self.ovn_lb]
# Without protocol specified return a list
found = f(self.ovn_lb.id)
self.assertListEqual(found, [self.ovn_lb])
self.helper.ovn_nbdb_api.db_find_rows.assert_called_once_with(
'Load_Balancer', ('name', '=', self.ovn_lb.id))
self.helper.ovn_nbdb_api.db_find_rows.reset_mock()
# With protocol specified return an instance
found = f(self.ovn_lb.id, protocol='tcp')
self.assertEqual(found, self.ovn_lb)
self.helper.ovn_nbdb_api.db_find_rows.reset_mock()
# LB with given protocol not found
self.helper.ovn_nbdb_api.db_find_rows.return_value.\
execute.return_value = []
self.assertRaises(
idlutils.RowNotFound,
f,
self.ovn_lb.id,
protocol='UDP')
# Multiple protocols
udp_lb = copy.copy(self.ovn_lb)
udp_lb.protocol = ['udp']
self.helper.ovn_nbdb_api.db_find_rows.return_value.\
execute.return_value = [self.ovn_lb, udp_lb]
found = f(self.ovn_lb.id)
self.assertListEqual(found, [self.ovn_lb, udp_lb])
def test__get_or_create_ovn_lb_no_lb_found(self):
self.mock_find_ovn_lbs.stop()
self.helper.ovn_nbdb_api.db_find_rows.return_value.\
execute.return_value = []
self.assertRaises(
idlutils.RowNotFound,
self.helper._get_or_create_ovn_lb,
self.ovn_lb.name,
protocol='TCP',
admin_state_up='True')
@mock.patch.object(ovn_driver.OvnProviderHelper, 'lb_create')
def test__get_or_create_ovn_lb_required_proto_not_found(self, lbc):
udp_lb = copy.copy(self.ovn_lb)
udp_lb.protocol = ['udp']
self.mock_find_ovn_lbs.stop()
self.helper.ovn_nbdb_api.db_find_rows.return_value.\
execute.side_effect = [[udp_lb], [self.ovn_lb]]
self.helper._get_or_create_ovn_lb(
self.ovn_lb.name,
protocol='TCP',
admin_state_up='True')
expected_lb_info = {
'id': self.ovn_lb.name,
'protocol': 'tcp',
'vip_address': udp_lb.external_ids.get(
ovn_driver.LB_EXT_IDS_VIP_KEY),
'vip_port_id':
udp_lb.external_ids.get(
ovn_driver.LB_EXT_IDS_VIP_PORT_ID_KEY),
ovn_driver.LB_EXT_IDS_LR_REF_KEY:
udp_lb.external_ids.get(
ovn_driver.LB_EXT_IDS_LR_REF_KEY),
ovn_driver.LB_EXT_IDS_LS_REFS_KEY:
udp_lb.external_ids.get(
ovn_driver.LB_EXT_IDS_LS_REFS_KEY),
'admin_state_up': 'True',
ovn_driver.LB_EXT_IDS_VIP_FIP_KEY:
udp_lb.external_ids.get(
ovn_driver.LB_EXT_IDS_VIP_FIP_KEY)}
lbc.assert_called_once_with(expected_lb_info, protocol='tcp')
def test__get_or_create_ovn_lb_found(self):
self.mock_find_ovn_lbs.stop()
self.helper.ovn_nbdb_api.db_find_rows.return_value.\
execute.return_value = [self.ovn_lb]
found = self.helper._get_or_create_ovn_lb(
self.ovn_lb.name,
protocol='TCP',
admin_state_up='True')
self.assertEqual(found, self.ovn_lb)
def test__get_or_create_ovn_lb_lb_without_protocol(self):
self.mock_find_ovn_lbs.stop()
self.ovn_lb.protocol = []
self.helper.ovn_nbdb_api.db_find_rows.return_value.\
execute.return_value = [self.ovn_lb]
found = self.helper._get_or_create_ovn_lb(
self.ovn_lb.name,
protocol='TCP',
admin_state_up='True')
self.assertEqual(found, self.ovn_lb)
self.helper.ovn_nbdb_api.db_set.assert_called_once_with(
'Load_Balancer', self.ovn_lb.uuid, ('protocol', 'tcp'))
@mock.patch('ovn_octavia_provider.driver.get_network_driver')
def test_lb_create_disabled(self, net_dr):
self.lb['admin_state_up'] = False
@ -743,7 +879,7 @@ class TestOvnProviderHelper(TestOvnOctaviaBase):
ovn_driver.LB_EXT_IDS_VIP_PORT_ID_KEY: mock.ANY,
'enabled': 'False'},
name=mock.ANY,
protocol='tcp')
protocol=None)
@mock.patch('ovn_octavia_provider.driver.get_network_driver')
def test_lb_create_enabled(self, net_dr):
@ -761,12 +897,43 @@ class TestOvnProviderHelper(TestOvnOctaviaBase):
ovn_driver.LB_EXT_IDS_VIP_PORT_ID_KEY: mock.ANY,
'enabled': 'True'},
name=mock.ANY,
protocol='tcp')
protocol=None)
@mock.patch('ovn_octavia_provider.driver.get_network_driver')
def test_lb_create_on_multi_protocol(self, net_dr):
"""This test situation when new protocol is added
to the same loadbalancer and we need to add
additional OVN lb with the same name.
"""
self.lb['admin_state_up'] = True
self.lb['protocol'] = 'UDP'
self.lb[ovn_driver.LB_EXT_IDS_LR_REF_KEY] = 'foo'
self.lb[ovn_driver.LB_EXT_IDS_LS_REFS_KEY] = "{\"neutron-foo\": 1}"
net_dr.return_value.neutron_client.list_ports.return_value = (
self.ports)
status = self.helper.lb_create(self.lb, protocol='UDP')
self.assertEqual(status['loadbalancers'][0]['provisioning_status'],
constants.ACTIVE)
self.assertEqual(status['loadbalancers'][0]['operating_status'],
constants.ONLINE)
self.helper.ovn_nbdb_api.db_create.assert_called_once_with(
'Load_Balancer', external_ids={
ovn_driver.LB_EXT_IDS_VIP_KEY: mock.ANY,
ovn_driver.LB_EXT_IDS_VIP_PORT_ID_KEY: mock.ANY,
ovn_driver.LB_EXT_IDS_LR_REF_KEY: 'foo',
'enabled': 'True'},
name=mock.ANY,
protocol='udp')
self.helper._update_lb_to_ls_association.assert_has_calls([
mock.call(self.ovn_lb, associate=True,
network_id=self.lb['vip_network_id']),
mock.call(self.ovn_lb, associate=True, network_id='foo')])
@mock.patch('ovn_octavia_provider.driver.get_network_driver')
@mock.patch.object(ovn_driver.OvnProviderHelper, 'delete_vip_port')
def test_lb_create_exception(self, del_port, net_dr):
self.helper._find_ovn_lb.side_effect = [RuntimeError]
self.helper._find_ovn_lbs.side_effect = [RuntimeError]
net_dr.return_value.neutron_client.list_ports.return_value = (
self.ports)
status = self.helper.lb_create(self.lb)
@ -791,7 +958,7 @@ class TestOvnProviderHelper(TestOvnOctaviaBase):
@mock.patch.object(ovn_driver.OvnProviderHelper, 'delete_vip_port')
def test_lb_delete_row_not_found(self, del_port):
self.helper._find_ovn_lb.side_effect = [idlutils.RowNotFound]
self.helper._find_ovn_lbs.side_effect = [idlutils.RowNotFound]
status = self.helper.lb_delete(self.lb)
self.assertEqual(status['loadbalancers'][0]['provisioning_status'],
constants.DELETED)
@ -803,13 +970,14 @@ class TestOvnProviderHelper(TestOvnOctaviaBase):
@mock.patch('ovn_octavia_provider.driver.get_network_driver')
@mock.patch.object(ovn_driver.OvnProviderHelper, 'delete_vip_port')
def test_lb_delete_exception(self, del_port, net_dr):
del_port.side_effect = [RuntimeError]
self.helper.ovn_nbdb_api.lb_del.side_effect = [RuntimeError]
status = self.helper.lb_delete(self.lb)
self.assertEqual(status['loadbalancers'][0]['provisioning_status'],
constants.ERROR)
self.assertEqual(status['loadbalancers'][0]['operating_status'],
constants.ERROR)
self.helper.ovn_nbdb_api.lb_del.assert_not_called()
self.helper.ovn_nbdb_api.lb_del.assert_called_once_with(
self.ovn_lb.uuid)
del_port.assert_called_once_with('foo_port')
@mock.patch('ovn_octavia_provider.driver.get_network_driver')
@ -817,12 +985,13 @@ class TestOvnProviderHelper(TestOvnOctaviaBase):
def test_lb_delete_port_not_found(self, del_port, net_dr):
net_dr.return_value.neutron_client.delete_port.return_value = None
del_port.side_effect = [n_exc.PortNotFoundClient]
status = self.helper.lb_delete(self.lb)
status = self.helper.lb_delete(self.ovn_lb)
self.assertEqual(status['loadbalancers'][0]['provisioning_status'],
constants.DELETED)
self.assertEqual(status['loadbalancers'][0]['operating_status'],
constants.OFFLINE)
self.helper.ovn_nbdb_api.lb_del.assert_not_called()
self.helper.ovn_nbdb_api.lb_del.assert_called_once_with(
self.ovn_lb.uuid)
del_port.assert_called_once_with('foo_port')
@mock.patch('ovn_octavia_provider.driver.get_network_driver')
@ -860,6 +1029,20 @@ class TestOvnProviderHelper(TestOvnOctaviaBase):
self.helper.ovn_nbdb_api.lr_lb_del.assert_called_once_with(
self.router.uuid, self.ovn_lb.uuid)
@mock.patch('ovn_octavia_provider.driver.get_network_driver')
def test_lb_delete_multiple_protocols(self, net_dr):
net_dr.return_value.neutron_client.delete_port.return_value = None
self.mock_find_ovn_lbs.stop()
udp_lb = copy.copy(self.ovn_lb)
udp_lb.protocol = ['udp']
udp_lb.uuid = 'foo_uuid'
self.helper.ovn_nbdb_api.db_find_rows.return_value.\
execute.return_value = [self.ovn_lb, udp_lb]
self.helper.lb_delete(self.lb)
self.helper.ovn_nbdb_api.lb_del.assert_has_calls([
mock.call(self.ovn_lb.uuid),
mock.call(udp_lb.uuid)])
def test_lb_failover(self):
status = self.helper.lb_failover(self.lb)
self.assertEqual(status['loadbalancers'][0]['provisioning_status'],
@ -867,6 +1050,7 @@ class TestOvnProviderHelper(TestOvnOctaviaBase):
@mock.patch.object(ovn_driver.OvnProviderHelper, '_refresh_lb_vips')
def test_lb_update_disabled(self, refresh_vips):
self.lb['admin_state_up'] = False
status = self.helper.lb_update(self.lb)
self.assertEqual(status['loadbalancers'][0]['provisioning_status'],
constants.ACTIVE)
@ -880,6 +1064,8 @@ class TestOvnProviderHelper(TestOvnOctaviaBase):
@mock.patch.object(ovn_driver.OvnProviderHelper, '_refresh_lb_vips')
def test_lb_update_enabled(self, refresh_vips):
# Change the mock, its enabled by default.
self.ovn_lb.external_ids.update({'enabled': False})
self.lb['admin_state_up'] = True
status = self.helper.lb_update(self.lb)
self.assertEqual(status['loadbalancers'][0]['provisioning_status'],
@ -892,8 +1078,35 @@ class TestOvnProviderHelper(TestOvnOctaviaBase):
'Load_Balancer', self.ovn_lb.uuid,
('external_ids', {'enabled': 'True'}))
@mock.patch.object(ovn_driver.OvnProviderHelper, '_refresh_lb_vips')
def test_lb_update_enabled_multiple_protocols(self, refresh_vips):
self.mock_find_ovn_lbs.stop()
self.ovn_lb.external_ids.update({'enabled': 'False'})
udp_lb = copy.deepcopy(self.ovn_lb)
udp_lb.protocol = ['udp']
udp_lb.uuid = 'foo_uuid'
self.helper.ovn_nbdb_api.db_find_rows.return_value.\
execute.return_value = [self.ovn_lb, udp_lb]
self.lb['admin_state_up'] = True
status = self.helper.lb_update(self.lb)
self.assertEqual(status['loadbalancers'][0]['provisioning_status'],
constants.ACTIVE)
self.assertEqual(status['loadbalancers'][0]['operating_status'],
constants.ONLINE)
refresh_vips.assert_has_calls([
mock.call(self.ovn_lb.uuid, self.ovn_lb.external_ids),
mock.ANY,
mock.ANY,
mock.call(udp_lb.uuid, udp_lb.external_ids)],
any_order=False)
self.helper.ovn_nbdb_api.db_set.assert_has_calls([
mock.call('Load_Balancer', self.ovn_lb.uuid,
('external_ids', {'enabled': 'True'})),
mock.call('Load_Balancer', udp_lb.uuid,
('external_ids', {'enabled': 'True'}))])
def test_lb_update_exception(self):
self.helper._find_ovn_lb.side_effect = [RuntimeError]
self.helper._find_ovn_lbs.side_effect = [RuntimeError]
status = self.helper.lb_update(self.lb)
self.assertEqual(status['loadbalancers'][0]['provisioning_status'],
constants.ERROR)
@ -966,7 +1179,7 @@ class TestOvnProviderHelper(TestOvnOctaviaBase):
self.helper.ovn_nbdb_api.db_set.call_count)
def test_listener_create_exception(self):
self.helper._find_ovn_lb.side_effect = [RuntimeError]
self.helper.ovn_nbdb_api.db_set.side_effect = [RuntimeError]
status = self.helper.listener_create(self.listener)
self.assertEqual(status['loadbalancers'][0]['provisioning_status'],
constants.ACTIVE)
@ -996,14 +1209,23 @@ class TestOvnProviderHelper(TestOvnOctaviaBase):
self.assertEqual(status['pools'][0]['provisioning_status'],
constants.ACTIVE)
def test_listener_update_exception(self):
self.helper._find_ovn_lb.side_effect = [RuntimeError]
def test_listener_update_row_not_found(self):
self.helper._find_ovn_lbs.side_effect = [idlutils.RowNotFound]
status = self.helper.listener_update(self.listener)
self.assertEqual(status['loadbalancers'][0]['provisioning_status'],
constants.ERROR)
self.assertEqual(status['listeners'][0]['provisioning_status'],
constants.ERROR)
self.helper.ovn_nbdb_api.db_set.assert_not_called()
@mock.patch.object(ovn_driver.OvnProviderHelper, '_refresh_lb_vips')
def test_listener_update_exception(self, refresh_vips):
refresh_vips.side_effect = [RuntimeError]
status = self.helper.listener_update(self.listener)
self.assertEqual(status['loadbalancers'][0]['provisioning_status'],
constants.ACTIVE)
self.assertEqual(status['listeners'][0]['provisioning_status'],
constants.ERROR)
self.helper.ovn_nbdb_api.db_set.assert_not_called()
@mock.patch.object(ovn_driver.OvnProviderHelper, '_refresh_lb_vips')
def test_listener_update_listener_enabled(self, refresh_vips):
@ -1064,8 +1286,18 @@ class TestOvnProviderHelper(TestOvnOctaviaBase):
constants.OFFLINE)
self.helper.ovn_nbdb_api.db_remove.assert_not_called()
def test_listener_delete_row_not_found(self):
self.helper._find_ovn_lbs.side_effect = [idlutils.RowNotFound]
status = self.helper.listener_delete(self.listener)
self.assertEqual(status['loadbalancers'][0]['provisioning_status'],
constants.ACTIVE)
self.assertEqual(status['listeners'][0]['provisioning_status'],
constants.DELETED)
self.assertEqual(status['listeners'][0]['operating_status'],
constants.OFFLINE)
def test_listener_delete_exception(self):
self.helper._find_ovn_lb.side_effect = [RuntimeError]
self.helper.ovn_nbdb_api.db_remove.side_effect = [RuntimeError]
status = self.helper.listener_delete(self.listener)
self.assertEqual(status['loadbalancers'][0]['provisioning_status'],
constants.ACTIVE)
@ -1090,6 +1322,41 @@ class TestOvnProviderHelper(TestOvnOctaviaBase):
refresh_vips.assert_called_once_with(
self.ovn_lb.uuid, self.ovn_lb.external_ids)
@mock.patch.object(ovn_driver.OvnProviderHelper, '_is_lb_empty')
def test_listener_delete_ovn_lb_not_empty(self, lb_empty):
lb_empty.return_value = False
self.helper.listener_delete(self.listener)
self.helper.ovn_nbdb_api.db_remove.assert_called_once_with(
'Load_Balancer', self.ovn_lb.uuid,
'external_ids', 'listener_%s' % self.listener_id)
self.helper.ovn_nbdb_api.lb_del.assert_not_called()
@mock.patch.object(ovn_driver.OvnProviderHelper, '_is_lb_empty')
def test_listener_delete_ovn_lb_empty_lb_empty(self, lb_empty):
lb_empty.return_value = True
self.helper.listener_delete(self.listener)
self.helper.ovn_nbdb_api.db_remove.assert_called_once_with(
'Load_Balancer', self.ovn_lb.uuid,
'external_ids', 'listener_%s' % self.listener_id)
self.helper.ovn_nbdb_api.lb_del.assert_not_called()
# Assert that protocol has been set to [].
self.helper.ovn_nbdb_api.db_set.assert_has_calls([
mock.call('Load_Balancer', self.ovn_lb.uuid,
('protocol', []))])
@mock.patch.object(ovn_driver.OvnProviderHelper, '_is_lb_empty')
def test_listener_delete_ovn_lb_empty_lb_not_empty(self, lb_empty):
self.mock_find_ovn_lbs.stop()
self.helper.ovn_nbdb_api.db_find_rows.return_value.\
execute.side_effect = [[self.ovn_lb], []]
lb_empty.return_value = True
self.helper.listener_delete(self.listener)
self.helper.ovn_nbdb_api.db_remove.assert_called_once_with(
'Load_Balancer', self.ovn_lb.uuid,
'external_ids', 'listener_%s' % self.listener_id)
self.helper.ovn_nbdb_api.lb_del.assert_called_once_with(
self.ovn_lb.uuid)
def test_pool_create(self):
status = self.helper.pool_create(self.pool)
self.assertEqual(status['loadbalancers'][0]['provisioning_status'],
@ -1111,7 +1378,7 @@ class TestOvnProviderHelper(TestOvnOctaviaBase):
constants.OFFLINE)
def test_pool_create_exception(self):
self.helper._find_ovn_lb.side_effect = [RuntimeError]
self.helper.ovn_nbdb_api.db_set.side_effect = [RuntimeError]
status = self.helper.pool_update(self.pool)
self.assertEqual(status['pools'][0]['provisioning_status'],
constants.ERROR)
@ -1223,15 +1490,23 @@ class TestOvnProviderHelper(TestOvnOctaviaBase):
self.helper.ovn_nbdb_api.db_set.assert_has_calls(
expected_calls)
def test_pool_delete_row_not_found(self):
self.helper._find_ovn_lbs.side_effect = [idlutils.RowNotFound]
status = self.helper.pool_delete(self.pool)
self.assertEqual(status['loadbalancers'][0]['provisioning_status'],
constants.ACTIVE)
self.assertEqual(status['pools'][0]['provisioning_status'],
constants.DELETED)
self.helper.ovn_nbdb_api.db_remove.assert_not_called()
self.helper.ovn_nbdb_api.db_set.assert_not_called()
def test_pool_delete_exception(self):
self.helper._find_ovn_lb.side_effect = [RuntimeError]
self.helper.ovn_nbdb_api.db_set.side_effect = [RuntimeError]
status = self.helper.pool_delete(self.pool)
self.assertEqual(status['loadbalancers'][0]['provisioning_status'],
constants.ACTIVE)
self.assertEqual(status['pools'][0]['provisioning_status'],
constants.ERROR)
self.helper.ovn_nbdb_api.db_remove.assert_not_called()
self.helper.ovn_nbdb_api.db_set.assert_not_called()
def test_pool_delete_associated_listeners(self):
self.helper._get_pool_listeners.return_value = ['listener1']
@ -1263,6 +1538,41 @@ class TestOvnProviderHelper(TestOvnOctaviaBase):
'Load_Balancer', self.ovn_lb.uuid,
'external_ids', 'pool_%s:D' % self.pool_id)
@mock.patch.object(ovn_driver.OvnProviderHelper, '_is_lb_empty')
def test_pool_delete_ovn_lb_not_empty(self, lb_empty):
lb_empty.return_value = False
self.helper.pool_delete(self.pool)
self.helper.ovn_nbdb_api.db_remove.assert_called_once_with(
'Load_Balancer', self.ovn_lb.uuid,
'external_ids', 'pool_%s' % self.pool_id)
self.helper.ovn_nbdb_api.lb_del.assert_not_called()
@mock.patch.object(ovn_driver.OvnProviderHelper, '_is_lb_empty')
def test_pool_delete_ovn_lb_empty_lb_empty(self, lb_empty):
lb_empty.return_value = True
self.helper.pool_delete(self.pool)
self.helper.ovn_nbdb_api.db_remove.assert_called_once_with(
'Load_Balancer', self.ovn_lb.uuid,
'external_ids', 'pool_%s' % self.pool_id)
self.helper.ovn_nbdb_api.lb_del.assert_not_called()
# Assert that protocol has been set to [].
self.helper.ovn_nbdb_api.db_set.assert_called_with(
'Load_Balancer', self.ovn_lb.uuid,
('protocol', []))
@mock.patch.object(ovn_driver.OvnProviderHelper, '_is_lb_empty')
def test_pool_delete_ovn_lb_empty_lb_not_empty(self, lb_empty):
self.mock_find_ovn_lbs.stop()
self.helper.ovn_nbdb_api.db_find_rows.return_value.\
execute.side_effect = [[self.ovn_lb], []]
lb_empty.return_value = True
self.helper.pool_delete(self.pool)
self.helper.ovn_nbdb_api.db_remove.assert_called_once_with(
'Load_Balancer', self.ovn_lb.uuid,
'external_ids', 'pool_%s' % self.pool_id)
self.helper.ovn_nbdb_api.lb_del.assert_called_once_with(
self.ovn_lb.uuid)
def test_member_create(self):
self.ovn_lb.external_ids = mock.MagicMock()
status = self.helper.member_create(self.member)
@ -1959,7 +2269,7 @@ class TestOvnProviderHelper(TestOvnOctaviaBase):
'info':
{'action': 'associate',
'vip_fip': '10.0.0.1',
'lb_id': 'foo'},
'ovn_lb': self.ovn_lb},
'type': 'handle_vip_fip'}
self.mock_add_request.assert_called_once_with(expected_call)
@ -1985,7 +2295,7 @@ class TestOvnProviderHelper(TestOvnOctaviaBase):
'info':
{'action': 'disassociate',
'vip_fip': None,
'lb_id': 'foo'},
'ovn_lb': self.ovn_lb},
'type': 'handle_vip_fip'}
self.mock_add_request.assert_called_once_with(expected_call)
@ -2000,7 +2310,7 @@ class TestOvnProviderHelper(TestOvnOctaviaBase):
self.mock_add_request.assert_not_called()
@mock.patch('ovn_octavia_provider.driver.OvnProviderHelper.'
'_find_ovn_lb')
'_find_ovn_lbs')
def test_vip_port_update_handler_lb_not_found(self, lb):
lb.side_effect = [idlutils.RowNotFound]
self.switch_port_event = ovn_driver.LogicalSwitchPortUpdateEvent(
@ -2014,13 +2324,39 @@ class TestOvnProviderHelper(TestOvnOctaviaBase):
self.mock_add_request.assert_not_called()
@mock.patch('ovn_octavia_provider.driver.OvnProviderHelper.'
'_find_ovn_lb')
'_find_ovn_lbs')
def test_vip_port_update_handler_multiple_lbs(self, lb):
lb1 = mock.MagicMock()
lb2 = mock.MagicMock()
lb.return_value = [lb1, lb2]
self.switch_port_event = ovn_driver.LogicalSwitchPortUpdateEvent(
self.helper)
port_name = '%s%s' % (ovn_const.LB_VIP_PORT_PREFIX, 'foo')
attrs = {'external_ids':
{ovn_const.OVN_PORT_NAME_EXT_ID_KEY: port_name}}
row = fakes.FakeOvsdbRow.create_one_ovsdb_row(
attrs=attrs)
self.switch_port_event.run(mock.ANY, row, mock.ANY)
def expected_call(lb):
return {'type': 'handle_vip_fip',
'info':
{'action': mock.ANY,
'vip_fip': None,
'ovn_lb': lb}}
self.mock_add_request.assert_has_calls([
mock.call(expected_call(lb1)),
mock.call(expected_call(lb2))])
@mock.patch('ovn_octavia_provider.driver.OvnProviderHelper.'
'_find_ovn_lbs')
def test_handle_vip_fip_disassociate(self, flb):
lb = mock.MagicMock()
fip_info = {
'action': 'disassociate',
'vip_fip': None,
'lb_id': 'foo'}
lb = mock.MagicMock()
'ovn_lb': lb}
flb.return_value = lb
self.helper.handle_vip_fip(fip_info)
calls = [
@ -2031,13 +2367,14 @@ class TestOvnProviderHelper(TestOvnOctaviaBase):
self.helper.ovn_nbdb_api.assert_has_calls(calls)
@mock.patch('ovn_octavia_provider.driver.OvnProviderHelper.'
'_find_ovn_lb')
'_find_ovn_lbs')
@mock.patch('ovn_octavia_provider.driver.get_network_driver')
def test_handle_vip_fip_associate(self, net_dr, fb):
lb = mock.MagicMock()
fip_info = {
'action': 'associate',
'vip_fip': '10.0.0.123',
'lb_id': 'foo'}
'ovn_lb': lb}
members = 'member_%s_%s:%s' % (self.member_id,
self.member_address,
self.member_port)
@ -2046,7 +2383,6 @@ class TestOvnProviderHelper(TestOvnOctaviaBase):
'pool_%s' % self.pool_id: members,
'neutron:vip': '172.26.21.20'}
lb = mock.MagicMock()
lb.external_ids = external_ids
fb.return_value = lb
@ -2062,15 +2398,6 @@ class TestOvnProviderHelper(TestOvnOctaviaBase):
'172.26.21.20:80': '192.168.2.149:1010'}))]
self.helper.ovn_nbdb_api.assert_has_calls(calls)
@mock.patch('ovn_octavia_provider.driver.OvnProviderHelper.'
'_find_ovn_lb')
@mock.patch('ovn_octavia_provider.driver.get_network_driver')
def test_handle_vip_fip_lb_not_found(self, net_dr, fb):
fip_info = {'lb_id': 'foo'}
fb.side_effect = [idlutils.RowNotFound]
self.helper.handle_vip_fip(fip_info)
self.helper.ovn_nbdb_api.assert_not_called()
@mock.patch.object(ovn_driver, 'atexit')
def test_ovsdb_connections(self, mock_atexit):
ovn_driver.OvnProviderHelper.ovn_nbdb_api = None

View File

@ -0,0 +1,5 @@
---
fixes:
- |
OVN Octavia provider driver now supports both TCP and UDP
pool/listener protocols configured in the same Octavia Load Balancer.