Browse Source

Centralize traffic when LB and member has FIP

When Load Balancer and its member has FIP assigned
and environment is configured to use DVR the member
FIP needs to be centralized. It is current core OVN
limitation, that should be solved in [1].

This patch adds this mechanism to OVN Client and
OVN Octavia provider driver.

It covers cases:

1) FIP association on port that is a member of
   some LB - make it centralized.
2) FIP association on LB VIP - find a members
   FIPs and centralized them.
3) Add a member to LB that has FIP already
   configured - checks if a member has FIP
   and centralize it.
4) The reverse of each of the above cases.

In addition I needed to extend OVN LB member external_id
entry to add an information about member subnet_id
in order to easly track member port from mechanism OVN
driver.
That means I needed also to support both old and new
conventions. This patch adds also this code.

Old convention:
member_`member_id`_`ip_address`:`port`

New convention:
member_`member_id`_`ip_address`:`port`_`subnet_id`

[1] https://bugzilla.redhat.com/show_bug.cgi?id=1793897

Related-Bug: #1860662

Change-Id: I254f0ac28f7585b699a8238e01ffb37dd70282ef
tags/7.1.0
Maciej Józefczyk 1 month ago
parent
commit
57ac38921e
7 changed files with 842 additions and 187 deletions
  1. +12
    -0
      networking_ovn/common/constants.py
  2. +167
    -2
      networking_ovn/common/ovn_client.py
  3. +243
    -97
      networking_ovn/octavia/ovn_driver.py
  4. +24
    -21
      networking_ovn/tests/functional/octavia/test_ovn_driver.py
  5. +1
    -0
      networking_ovn/tests/unit/fakes.py
  6. +140
    -2
      networking_ovn/tests/unit/l3/test_l3_ovn.py
  7. +255
    -65
      networking_ovn/tests/unit/octavia/test_ovn_driver.py

+ 12
- 0
networking_ovn/common/constants.py View File

@@ -157,9 +157,21 @@ MAINTENANCE_DELETE_TYPE_ORDER = {
# peer router port (connecting to the logical router).
DEFAULT_ADDR_FOR_LSP_WITH_PEER = 'router'

# FIP ACTIONS
FIP_ACTION_ASSOCIATE = 'fip_associate'
FIP_ACTION_DISASSOCIATE = 'fip_disassociate'

# Loadbalancer constants
LRP_PREFIX = "lrp-"
LB_VIP_PORT_PREFIX = "ovn-lb-vip-"
LB_EXT_IDS_LS_REFS_KEY = 'ls_refs'
LB_EXT_IDS_LR_REF_KEY = 'lr_ref'
LB_EXT_IDS_POOL_PREFIX = 'pool_'
LB_EXT_IDS_LISTENER_PREFIX = 'listener_'
LB_EXT_IDS_MEMBER_PREFIX = 'member_'
LB_EXT_IDS_VIP_KEY = 'neutron:vip'
LB_EXT_IDS_VIP_FIP_KEY = 'neutron:vip_fip'
LB_EXT_IDS_VIP_PORT_ID_KEY = 'neutron:vip_port_id'

# Hash Ring constants
HASH_RING_NODES_TIMEOUT = 60


+ 167
- 2
networking_ovn/common/ovn_client.py View File

@@ -745,6 +745,21 @@ class OVNClient(object):
if self._nb_idl.is_col_present('NAT', 'external_ids'):
columns['external_ids'] = ext_ids

# TODO(mjozefcz): Remove this workaround when OVN LB
# will support both decentralized FIPs on LB and member.
lb_member_fip = self._is_lb_member_fip(context, floatingip)
if (config.is_ovn_distributed_floating_ip() and
lb_member_fip):
LOG.warning("Port %s is configured as a member "
"of one of OVN Load_Balancers and "
"Load_Balancer has FIP assigned. "
"In order to make traffic work member "
"FIP needs to be centralized, even if "
"this environment is configured as DVR. "
"Removing logical_port and external_mac from "
"NAT entry.", floatingip['port_id'])
columns.pop('logical_port', None)
columns.pop('external_mac', None)
commands.append(self._nb_idl.add_nat_rule_in_lrouter(gw_lrouter_name,
**columns))

@@ -761,12 +776,158 @@ class OVNClient(object):
self._nb_idl.db_set('Logical_Switch_Port', private_lsp.uuid,
('external_ids', port_fip))
)
if not lb_member_fip:
commands.extend(
self._handle_lb_fip_cmds(
context, private_lsp,
action=ovn_const.FIP_ACTION_ASSOCIATE))
else:
LOG.warning("LSP for floatingip %s, has not been found! "
"Cannot set FIP on VIP.",
floatingip['id'])
self._transaction(commands, txn=txn)

def _is_lb_member_fip(self, context, fip):
port = self._plugin.get_port(
context, fip['port_id'])
member_subnet = [ip['subnet_id'] for ip in port['fixed_ips']
if ip['ip_address'] == fip['fixed_ip_address']]
if not member_subnet:
return False
member_subnet = member_subnet[0]

ls = self._nb_idl.lookup(
'Logical_Switch', utils.ovn_name(port['network_id']))
for lb in ls.load_balancer:
for ext_id in lb.external_ids.keys():
if ext_id.startswith(ovn_const.LB_EXT_IDS_POOL_PREFIX):
members = lb.external_ids[ext_id]
if not members:
continue
for member in members.split(','):
if ('%s:' % fip['fixed_ip_address'] in member and
'_%s' % member_subnet in member):
return True
return False

def _handle_lb_fip_cmds(self, context, lb_lsp,
action=ovn_const.FIP_ACTION_ASSOCIATE):
commands = []
if not config.is_ovn_distributed_floating_ip():
return commands

lb_lsp_fip_port = lb_lsp.external_ids.get(
ovn_const.OVN_PORT_NAME_EXT_ID_KEY, '')

if not lb_lsp_fip_port.startswith(ovn_const.LB_VIP_PORT_PREFIX):
return commands

# This is a FIP on LB VIP.
# Loop over members and delete FIP external_mac/logical_port enteries.
# Find all LBs with this LSP as VIP.
lbs = self._nb_idl.db_find_rows(
'Load_Balancer',
('external_ids', '=', {
ovn_const.LB_EXT_IDS_VIP_PORT_ID_KEY: lb_lsp.name})
).execute(check_error=True)
for lb in lbs:
# GET all LS where given LB is linked.
ls_linked = [
item
for item in self._nb_idl.db_find_rows(
'Logical_Switch').execute(check_error=True)
if lb in item.load_balancer]

if not ls_linked:
return

# Find out IP addresses and subnets of configured members.
members_to_verify = []
for ext_id in lb.external_ids.keys():
if ext_id.startswith(ovn_const.LB_EXT_IDS_POOL_PREFIX):
members = lb.external_ids[ext_id]
if not members:
continue
for member in members.split(','):
# NOTE(mjozefcz): Remove this workaround in W release.
# Last argument of member info is a subnet_id from
# from which member comes from.
# member_`id`_`ip`:`port`_`subnet_ip`
member_info = member.split('_')
if len(member_info) >= 4:
m = {}
m['id'] = member_info[1]
m['ip'] = member_info[2].split(':')[0]
m['subnet_id'] = member_info[3]
try:
subnet = self._plugin.get_subnet(
context, m['subnet_id'])
m['network_id'] = subnet['network_id']
members_to_verify.append(m)
except n_exc.SubnetNotFound:
LOG.debug("Cannot find subnet details "
"for OVN LB member "
"%s.", m['id'])

# Find a member LSPs from all linked LS to this LB.
for member in members_to_verify:
ls = self._nb_idl.lookup(
'Logical_Switch', utils.ovn_name(member['network_id']))
for lsp in ls.ports:
if not lsp.addresses:
continue
if member['ip'] in utils.remove_macs_from_lsp_addresses(
lsp.addresses):
member['lsp'] = lsp
nats = self._nb_idl.db_find_rows(
'NAT',
('external_ids', '=', {
ovn_const.OVN_FIP_PORT_EXT_ID_KEY: lsp.name})
).execute(check_error=True)

for nat in nats:
if action == ovn_const.FIP_ACTION_ASSOCIATE:
# NOTE(mjozefcz): We should delete logical_port
# and external_mac entries from member NAT in
# order to make traffic work.
LOG.warning(
"Port %s is configured as a member "
"of one of OVN Load_Balancers and "
"Load_Balancer has FIP assigned. "
"In order to make traffic work member "
"FIP needs to be centralized, even if "
"this environment is configured as "
"DVR. Removing logical_port and "
"external_mac from NAT entry.",
lsp.name)
commands.extend([
self._nb_idl.db_clear(
'NAT', nat.uuid, 'external_mac'),
self._nb_idl.db_clear(
'NAT', nat.uuid, 'logical_port')])
else:
# NOTE(mjozefcz): The FIP from LB VIP is
# dissassociated now. We can decentralize
# member FIPs now.
LOG.warning(
"Port %s is configured as a member "
"of one of OVN Load_Balancers and "
"Load_Balancer has FIP disassociated. "
"DVR for this port can be enabled back.",
lsp.name)
commands.append(self._nb_idl.db_set(
'NAT', nat.uuid,
('logical_port', lsp.name)))
port = self._plugin.get_port(context, lsp.name)
if port['status'] == const.PORT_STATUS_ACTIVE:
commands.append(
self._nb_idl.db_set(
'NAT', nat.uuid,
('external_mac',
port['mac_address'])))

return commands

def _delete_floatingip(self, fip, lrouter, txn=None):
commands = [self._nb_idl.delete_nat_rule_in_lrouter(
lrouter, type='dnat_and_snat',
@@ -782,8 +943,12 @@ class OVNClient(object):
self._nb_idl.db_remove(
'Logical_Switch_Port', private_lsp.uuid,
'external_ids',
(ovn_const.OVN_PORT_FIP_EXT_ID_KEY))
)
(ovn_const.OVN_PORT_FIP_EXT_ID_KEY)))
commands.extend(
self._handle_lb_fip_cmds(
n_context.get_admin_context(),
private_lsp,
action=ovn_const.FIP_ACTION_DISASSOCIATE))
except KeyError:
LOG.debug("FIP %s doesn't have external_ids.", fip)
self._transaction(commands, txn=txn)


+ 243
- 97
networking_ovn/octavia/ovn_driver.py View File

@@ -62,23 +62,17 @@ REQ_TYPE_MEMBER_UPDATE = 'member_update'
REQ_TYPE_LB_CREATE_LRP_ASSOC = 'lb_create_lrp_assoc'
REQ_TYPE_LB_DELETE_LRP_ASSOC = 'lb_delete_lrp_assoc'
REQ_TYPE_HANDLE_VIP_FIP = 'handle_vip_fip'
REQ_TYPE_HANDLE_MEMBER_DVR = 'handle_member_dvr'

REQ_TYPE_EXIT = 'exit'

REQ_INFO_ACTION_ASSOCIATE = 'associate'
REQ_INFO_ACTION_DISASSOCIATE = 'disassociate'
REQ_INFO_MEMBER_ADDED = 'member_added'
REQ_INFO_MEMBER_DELETED = 'member_deleted'

DISABLED_RESOURCE_SUFFIX = 'D'

LB_EXT_IDS_LS_REFS_KEY = 'ls_refs'
LB_EXT_IDS_LR_REF_KEY = 'lr_ref'
LB_EXT_IDS_POOL_PREFIX = 'pool_'
LB_EXT_IDS_LISTENER_PREFIX = 'listener_'
LB_EXT_IDS_MEMBER_PREFIX = 'member_'
LB_EXT_IDS_VIP_KEY = 'neutron:vip'
LB_EXT_IDS_VIP_FIP_KEY = 'neutron:vip_fip'
LB_EXT_IDS_VIP_PORT_ID_KEY = 'neutron:vip_port_id'

OVN_NATIVE_LB_PROTOCOLS = [constants.PROTOCOL_TCP,
constants.PROTOCOL_UDP, ]
OVN_NATIVE_LB_ALGORITHMS = [constants.LB_ALGORITHM_SOURCE_IP_PORT, ]
@@ -160,7 +154,7 @@ class OvnNbIdlForLb(ovsdb_monitor.OvnIdl):
SCHEMA = "OVN_Northbound"
TABLES = ('Logical_Switch', 'Load_Balancer', 'Logical_Router',
'Logical_Switch_Port', 'Logical_Router_Port',
'Gateway_Chassis')
'Gateway_Chassis', 'NAT')

def __init__(self, event_lock_name=None):
self.conn_string = ovn_cfg.get_ovn_nb_connection()
@@ -234,6 +228,7 @@ class OvnProviderHelper(object):
REQ_TYPE_LB_CREATE_LRP_ASSOC: self.lb_create_lrp_assoc,
REQ_TYPE_LB_DELETE_LRP_ASSOC: self.lb_delete_lrp_assoc,
REQ_TYPE_HANDLE_VIP_FIP: self.handle_vip_fip,
REQ_TYPE_HANDLE_MEMBER_DVR: self.handle_member_dvr,
}

@staticmethod
@@ -423,7 +418,7 @@ class OvnProviderHelper(object):
# than there is more than one (for more than 1 L4 protocol).
for lb in ovn_lbs:
fip = vip_lp.external_ids.get(ovn_const.OVN_PORT_FIP_EXT_ID_KEY)
lb_vip_fip = lb.external_ids.get(LB_EXT_IDS_VIP_FIP_KEY)
lb_vip_fip = lb.external_ids.get(ovn_const.LB_EXT_IDS_VIP_FIP_KEY)
request_info = {'ovn_lb': lb,
'vip_fip': fip}
if fip and fip != lb_vip_fip:
@@ -449,8 +444,9 @@ class OvnProviderHelper(object):
Output: set of rows of type Load_Balancer or empty set
"""
return {lb for lb in network.load_balancer
if network.name in lb.external_ids.get(LB_EXT_IDS_LS_REFS_KEY,
[])}
if network.name in lb.external_ids.get(
ovn_const.LB_EXT_IDS_LS_REFS_KEY,
[])}

def _find_lb_in_table(self, lb, table):
return [item for item in self.ovn_nbdb_api.tables[table].rows.values()
@@ -556,21 +552,22 @@ class OvnProviderHelper(object):
'id': lb_id,
'protocol': protocol,
'vip_address': ovn_lbs[0].external_ids.get(
LB_EXT_IDS_VIP_KEY),
ovn_const.LB_EXT_IDS_VIP_KEY),
'vip_port_id':
ovn_lbs[0].external_ids.get(
LB_EXT_IDS_VIP_PORT_ID_KEY),
LB_EXT_IDS_LR_REF_KEY:
ovn_const.LB_EXT_IDS_VIP_PORT_ID_KEY),
ovn_const.LB_EXT_IDS_LR_REF_KEY:
ovn_lbs[0].external_ids.get(
LB_EXT_IDS_LR_REF_KEY),
LB_EXT_IDS_LS_REFS_KEY:
ovn_const.LB_EXT_IDS_LR_REF_KEY),
ovn_const.LB_EXT_IDS_LS_REFS_KEY:
ovn_lbs[0].external_ids.get(
LB_EXT_IDS_LS_REFS_KEY),
ovn_const.LB_EXT_IDS_LS_REFS_KEY),
'admin_state_up': admin_state_up}
# NOTE(mjozefcz): Handle vip_fip info if exists.
vip_fip = ovn_lbs[0].external_ids.get(LB_EXT_IDS_VIP_FIP_KEY)
vip_fip = ovn_lbs[0].external_ids.get(
ovn_const.LB_EXT_IDS_VIP_FIP_KEY)
if vip_fip:
lb_info.update({LB_EXT_IDS_VIP_FIP_KEY: vip_fip})
lb_info.update({ovn_const.LB_EXT_IDS_VIP_FIP_KEY: vip_fip})
self.lb_create(lb_info, protocol=protocol)
# Looks like we've just added new LB
# or updated exising, empty one.
@@ -637,7 +634,7 @@ class OvnProviderHelper(object):
return commands
ovn_ls = None

ls_refs = ovn_lb.external_ids.get(LB_EXT_IDS_LS_REFS_KEY)
ls_refs = ovn_lb.external_ids.get(ovn_const.LB_EXT_IDS_LS_REFS_KEY)
if ls_refs:
try:
ls_refs = jsonutils.loads(ls_refs)
@@ -669,7 +666,7 @@ class OvnProviderHelper(object):
else:
ls_refs[ls_name] = ref_ct - 1

ls_refs = {LB_EXT_IDS_LS_REFS_KEY: jsonutils.dumps(ls_refs)}
ls_refs = {ovn_const.LB_EXT_IDS_LS_REFS_KEY: jsonutils.dumps(ls_refs)}
commands.append(self.ovn_nbdb_api.db_set(
'Load_Balancer', ovn_lb.uuid,
('external_ids', ls_refs)))
@@ -694,12 +691,12 @@ class OvnProviderHelper(object):
self.ovn_nbdb_api.db_set(
'Load_Balancer', ovn_lb.uuid,
('external_ids',
{LB_EXT_IDS_LR_REF_KEY: ','.join(lr_ref)})))
{ovn_const.LB_EXT_IDS_LR_REF_KEY: ','.join(lr_ref)})))
else:
commands.append(
self.ovn_nbdb_api.db_remove(
'Load_Balancer', ovn_lb.uuid, 'external_ids',
(LB_EXT_IDS_LR_REF_KEY))
(ovn_const.LB_EXT_IDS_LR_REF_KEY))
)
commands.append(
self.ovn_nbdb_api.lr_lb_del(ovn_lr.uuid, ovn_lb.uuid,
@@ -722,15 +719,18 @@ class OvnProviderHelper(object):

if ovn_lr.name not in str(lr_rf):
# Multiple routers in lr_rf are separated with ','
lr_rf = {LB_EXT_IDS_LR_REF_KEY: ovn_lr.name} if not lr_rf else {
LB_EXT_IDS_LR_REF_KEY: "%s,%s" % (lr_rf, ovn_lr.name)}
if lr_rf:
lr_rf = {ovn_const.LB_EXT_IDS_LR_REF_KEY:
"%s,%s" % (lr_rf, ovn_lr.name)}
else:
lr_rf = {ovn_const.LB_EXT_IDS_LR_REF_KEY: ovn_lr.name}
commands.append(
self.ovn_nbdb_api.db_set('Load_Balancer', ovn_lb.uuid,
('external_ids', lr_rf)))
return commands

def _update_lb_to_lr_association(self, ovn_lb, ovn_lr, delete=False):
lr_ref = ovn_lb.external_ids.get(LB_EXT_IDS_LR_REF_KEY)
lr_ref = ovn_lb.external_ids.get(ovn_const.LB_EXT_IDS_LR_REF_KEY)
if delete:
return self._del_lb_to_lr_association(ovn_lb, ovn_lr, lr_ref)
else:
@@ -776,13 +776,13 @@ class OvnProviderHelper(object):
return lr

def _get_listener_key(self, listener_id, is_enabled=True):
listener_key = LB_EXT_IDS_LISTENER_PREFIX + str(listener_id)
listener_key = ovn_const.LB_EXT_IDS_LISTENER_PREFIX + str(listener_id)
if not is_enabled:
listener_key += ':' + DISABLED_RESOURCE_SUFFIX
return listener_key

def _get_pool_key(self, pool_id, is_enabled=True):
pool_key = LB_EXT_IDS_POOL_PREFIX + str(pool_id)
pool_key = ovn_const.LB_EXT_IDS_POOL_PREFIX + str(pool_id)
if not is_enabled:
pool_key += ':' + DISABLED_RESOURCE_SUFFIX
return pool_key
@@ -794,9 +794,24 @@ class OvnProviderHelper(object):
mem_info += str(mem.split('_')[2]) + ','
return mem_info[:-1] # Remove the last ','

def _get_member_key(self, member):
member_info = LB_EXT_IDS_MEMBER_PREFIX + member['id'] + "_"
member_info += member['address'] + ":" + str(member['protocol_port'])
def _get_member_key(self, member, old_convention=False):
member_info = ''
if isinstance(member, dict):
member_info = '%s%s_%s:%s' % (
ovn_const.LB_EXT_IDS_MEMBER_PREFIX,
member['id'],
member['address'],
member['protocol_port'])
if not old_convention and member.get('subnet_id'):
member_info += "_" + member['subnet_id']
elif isinstance(member, o_datamodels.Member):
member_info = '%s%s_%s:%s' % (
ovn_const.LB_EXT_IDS_MEMBER_PREFIX,
member.member_id,
member.address,
member.protocol_port)
if not old_convention and member.subnet_id:
member_info += "_" + member.subnet_id
return member_info

def _make_listener_key_value(self, listener_port, pool_id):
@@ -819,12 +834,12 @@ class OvnProviderHelper(object):
def _get_pool_listeners(self, ovn_lb, pool_key):
pool_listeners = []
for k, v in ovn_lb.external_ids.items():
if LB_EXT_IDS_LISTENER_PREFIX not in k:
if ovn_const.LB_EXT_IDS_LISTENER_PREFIX not in k:
continue
vip_port, p_key = self._extract_listener_key_value(v)
if pool_key == p_key:
pool_listeners.append(k[len(LB_EXT_IDS_LISTENER_PREFIX):])
pool_listeners.append(
k[len(ovn_const.LB_EXT_IDS_LISTENER_PREFIX):])
return pool_listeners

def _frame_vip_ips(self, lb_external_ids):
@@ -833,11 +848,11 @@ class OvnProviderHelper(object):
if lb_external_ids.get('enabled') == 'False':
return vip_ips

lb_vip = lb_external_ids[LB_EXT_IDS_VIP_KEY]
vip_fip = lb_external_ids.get(LB_EXT_IDS_VIP_FIP_KEY)
lb_vip = lb_external_ids[ovn_const.LB_EXT_IDS_VIP_KEY]
vip_fip = lb_external_ids.get(ovn_const.LB_EXT_IDS_VIP_FIP_KEY)

for k, v in lb_external_ids.items():
if (LB_EXT_IDS_LISTENER_PREFIX not in k or
if (ovn_const.LB_EXT_IDS_LISTENER_PREFIX not in k or
self._is_listener_disabled(k)):
continue

@@ -865,7 +880,7 @@ class OvnProviderHelper(object):

def _is_listener_in_lb(self, lb):
for key in list(lb.external_ids):
if key.startswith(LB_EXT_IDS_LISTENER_PREFIX):
if key.startswith(ovn_const.LB_EXT_IDS_LISTENER_PREFIX):
return True
return False

@@ -900,18 +915,18 @@ class OvnProviderHelper(object):
# In case port is not found for the vip_address we will see an
# exception when port['id'] is accessed.
external_ids = {
LB_EXT_IDS_VIP_KEY: loadbalancer['vip_address'],
LB_EXT_IDS_VIP_PORT_ID_KEY:
ovn_const.LB_EXT_IDS_VIP_KEY: loadbalancer['vip_address'],
ovn_const.LB_EXT_IDS_VIP_PORT_ID_KEY:
loadbalancer.get('vip_port_id') or port['id'],
'enabled': str(loadbalancer['admin_state_up'])}
# In case vip_fip was passed - use it.
vip_fip = loadbalancer.get(LB_EXT_IDS_VIP_FIP_KEY)
vip_fip = loadbalancer.get(ovn_const.LB_EXT_IDS_VIP_FIP_KEY)
if vip_fip:
external_ids[LB_EXT_IDS_VIP_FIP_KEY] = vip_fip
external_ids[ovn_const.LB_EXT_IDS_VIP_FIP_KEY] = vip_fip
# In case of lr_ref passed - use it.
lr_ref = loadbalancer.get(LB_EXT_IDS_LR_REF_KEY)
lr_ref = loadbalancer.get(ovn_const.LB_EXT_IDS_LR_REF_KEY)
if lr_ref:
external_ids[LB_EXT_IDS_LR_REF_KEY] = lr_ref
external_ids[ovn_const.LB_EXT_IDS_LR_REF_KEY] = lr_ref

try:
self.ovn_nbdb_api.db_create(
@@ -937,7 +952,7 @@ class OvnProviderHelper(object):
# NOTE(mjozefcz): In case of LS references where passed -
# apply LS to the new LB. That could happend in case we
# need another loadbalancer for other L4 protocol.
ls_refs = loadbalancer.get(LB_EXT_IDS_LS_REFS_KEY)
ls_refs = loadbalancer.get(ovn_const.LB_EXT_IDS_LS_REFS_KEY)
if ls_refs:
try:
ls_refs = jsonutils.loads(ls_refs)
@@ -1000,7 +1015,8 @@ class OvnProviderHelper(object):
return status

try:
port_id = ovn_lbs[0].external_ids[LB_EXT_IDS_VIP_PORT_ID_KEY]
port_id = ovn_lbs[0].external_ids[
ovn_const.LB_EXT_IDS_VIP_PORT_ID_KEY]
for ovn_lb in ovn_lbs:
status = self._lb_delete(loadbalancer, ovn_lb, status)
# Clear the status dict of any key having [] value
@@ -1023,7 +1039,7 @@ class OvnProviderHelper(object):
if loadbalancer['cascade']:
# Delete all pools
for key, value in ovn_lb.external_ids.items():
if key.startswith(LB_EXT_IDS_POOL_PREFIX):
if key.startswith(ovn_const.LB_EXT_IDS_POOL_PREFIX):
pool_id = key.split('_')[1]
# Delete all members in the pool
if value and len(value.split(',')) > 0:
@@ -1035,12 +1051,12 @@ class OvnProviderHelper(object):
{"id": pool_id,
"provisioning_status": constants.DELETED})

if key.startswith(LB_EXT_IDS_LISTENER_PREFIX):
if key.startswith(ovn_const.LB_EXT_IDS_LISTENER_PREFIX):
status['listeners'].append({
'id': key.split('_')[1],
'provisioning_status': constants.DELETED,
'operating_status': constants.OFFLINE})
ls_refs = ovn_lb.external_ids.get(LB_EXT_IDS_LS_REFS_KEY, {})
ls_refs = ovn_lb.external_ids.get(ovn_const.LB_EXT_IDS_LS_REFS_KEY, {})
if ls_refs:
try:
ls_refs = jsonutils.loads(ls_refs)
@@ -1061,7 +1077,7 @@ class OvnProviderHelper(object):
commands.append(
self.ovn_nbdb_api.ls_lb_del(ls.uuid, ovn_lb.uuid,
if_exists=True))
lr_ref = ovn_lb.external_ids.get(LB_EXT_IDS_LR_REF_KEY, {})
lr_ref = ovn_lb.external_ids.get(ovn_const.LB_EXT_IDS_LR_REF_KEY, {})
if lr_ref:
for lr in self.ovn_nbdb_api.tables[
'Logical_Router'].rows.values():
@@ -1395,7 +1411,7 @@ class OvnProviderHelper(object):
# Remove Pool from Listener if it is associated
listener_id = None
for key, value in ovn_lb.external_ids.items():
if (key.startswith(LB_EXT_IDS_LISTENER_PREFIX) and
if (key.startswith(ovn_const.LB_EXT_IDS_LISTENER_PREFIX) and
pool_key in value):
external_ids[key] = value.split(':')[0] + ':'
commands.append(
@@ -1516,12 +1532,19 @@ class OvnProviderHelper(object):
def _add_member(self, member, ovn_lb, pool_key):
external_ids = copy.deepcopy(ovn_lb.external_ids)
existing_members = external_ids[pool_key]
if existing_members:
existing_members = existing_members.split(",")
member_info = self._get_member_key(member)
if member_info in existing_members:
# TODO(mjozefcz): Remove this workaround in W release.
member_info_old = self._get_member_key(member, old_convention=True)
member_found = [x for x in existing_members
if re.match(member_info_old, x)]
if member_found:
# Member already present
return
if existing_members:
pool_data = {pool_key: existing_members + "," + member_info}
existing_members.append(member_info)
pool_data = {pool_key: ",".join(existing_members)}
else:
pool_data = {pool_key: member_info}

@@ -1577,10 +1600,18 @@ class OvnProviderHelper(object):
def _remove_member(self, member, ovn_lb, pool_key):
external_ids = copy.deepcopy(ovn_lb.external_ids)
existing_members = external_ids[pool_key].split(",")
member_info = self._get_member_key(member)
if member_info in existing_members:
# TODO(mjozefcz): Delete this workaround in W release.
# To support backward compatibility member
# could be defined as `member`_`id`_`ip`:`port`_`subnet_id`
# or defined as `member`_`id`_`ip`:`port
member_info_old = self._get_member_key(member, old_convention=True)

member_found = [x for x in existing_members
if re.match(member_info_old, x)]
if member_found:
commands = []
existing_members.remove(member_info)
existing_members.remove(member_found[0])

if not existing_members:
pool_status = constants.OFFLINE
else:
@@ -1588,17 +1619,14 @@ class OvnProviderHelper(object):
pool_data = {pool_key: ",".join(existing_members)}
commands.append(
self.ovn_nbdb_api.db_set('Load_Balancer', ovn_lb.uuid,
('external_ids', pool_data))
)
('external_ids', pool_data)))
external_ids[pool_key] = ",".join(existing_members)
commands.extend(
self._refresh_lb_vips(ovn_lb.uuid, external_ids)
)

self._refresh_lb_vips(ovn_lb.uuid, external_ids))
commands.extend(
self._update_lb_to_ls_association(
ovn_lb, subnet_id=member['subnet_id'], associate=False)
)
ovn_lb, subnet_id=member.get('subnet_id'),
associate=False))
self._execute_commands(commands)
return pool_status
else:
@@ -1724,22 +1752,6 @@ class OvnProviderHelper(object):
if mem_addr_port == meminf.split('_')[2]:
return meminf.split('_')[1]

def get_member_info(self, pool_id):
'''Gets Member information

:param pool_id: ID of the Pool whose member information is reqd.
:param mem_addr_port: Combination of Member Address+Port. Default=None
:returns: List -- List of Member Address+Pool of all members in pool.
:returns:[None] -- if no member exists in the pool.
:raises: Exception if Loadbalancer is not found for a Pool ID
'''
existing_members = self._get_existing_pool_members(pool_id)
# Members are saved in OVN in the form of
# member1_UUID_IP:Port, member2_UUID_IP:Port
# Return the list of (UUID,IP:Port) for all members.
return [(meminf.split('_')[1], meminf.split(
'_')[2]) for meminf in existing_members.split(',')]

def create_vip_port(self, project_id, lb_id, vip_d):
port = {'port': {'name': ovn_const.LB_VIP_PORT_PREFIX + str(lb_id),
'network_id': vip_d['vip_network_id'],
@@ -1788,18 +1800,20 @@ class OvnProviderHelper(object):
commands = []

if fip_info['action'] == REQ_INFO_ACTION_ASSOCIATE:
external_ids[LB_EXT_IDS_VIP_FIP_KEY] = fip_info['vip_fip']
vip_fip_info = {LB_EXT_IDS_VIP_FIP_KEY: fip_info['vip_fip']}
external_ids[ovn_const.LB_EXT_IDS_VIP_FIP_KEY] = (
fip_info['vip_fip'])
vip_fip_info = {
ovn_const.LB_EXT_IDS_VIP_FIP_KEY: fip_info['vip_fip']}
commands.append(
self.ovn_nbdb_api.db_set('Load_Balancer', ovn_lb.uuid,
('external_ids', vip_fip_info))
)
else:
external_ids.pop(LB_EXT_IDS_VIP_FIP_KEY)
external_ids.pop(ovn_const.LB_EXT_IDS_VIP_FIP_KEY)
commands.append(
self.ovn_nbdb_api.db_remove(
'Load_Balancer', ovn_lb.uuid, 'external_ids',
(LB_EXT_IDS_VIP_FIP_KEY))
(ovn_const.LB_EXT_IDS_VIP_FIP_KEY))
)

commands.extend(
@@ -1807,6 +1821,89 @@ class OvnProviderHelper(object):
)
self._execute_commands(commands)

def handle_member_dvr(self, info):
pool_key, ovn_lb = self._find_ovn_lb_by_pool_id(info['pool_id'])
if not ovn_lb.external_ids.get(ovn_const.LB_EXT_IDS_VIP_FIP_KEY):
LOG.debug("LB %(lb)s has no FIP on VIP configured. "
"There is no need to centralize member %(member)s "
"traffic.",
{'lb': ovn_lb.uuid, 'member': info['id']})
return

# Find out if member has FIP assigned.
network_driver = get_network_driver()
try:
subnet = network_driver.get_subnet(info['subnet_id'])
ls_name = ovn_utils.ovn_name(subnet.network_id)
except n_exc.NotFound:
LOG.exception('Subnet %s not found while trying to '
'fetch its data.', info['subnet_id'])
return

try:
ls = self.ovn_nbdb_api.lookup('Logical_Switch', ls_name)
except idlutils.RowNotFound:
LOG.warning("Logical Switch %s not found."
"Can't verify member FIP configuration.",
ls_name)
return

fip = None
f = ovn_utils.remove_macs_from_lsp_addresses
for port in ls.ports:
if info['address'] in f(port.addresses):
# We found particular port
fip = self.ovn_nbdb_api.db_find_rows(
'NAT', ('external_ids', '=', {
ovn_const.OVN_FIP_PORT_EXT_ID_KEY: port.name})
).execute(check_error=True)
fip = fip[0] if fip else fip
break

if not fip:
LOG.debug('Member %s has no FIP assigned.'
'There is no need to modify its NAT.',
info['id'])
return

if info['action'] == REQ_INFO_MEMBER_ADDED:
LOG.info('Member %(member)s is added to Load Balancer %(lb)s '
'and both have FIP assigned. Member FIP %(fip)s '
'needs to be centralized in those conditions. '
'Deleting external_mac/logical_port from it.',
{'member': info['id'],
'lb': ovn_lb.uuid,
'fip': fip.external_ip})
self.ovn_nbdb_api.db_clear(
'NAT', fip.uuid, 'external_mac').execute(check_error=True)
self.ovn_nbdb_api.db_clear(
'NAT', fip.uuid, 'logical_port').execute(check_error=True)
else:
LOG.info('Member %(member)s is deleted from Load Balancer '
'%(lb)s. and both have FIP assigned. Member FIP %(fip)s '
'can be decentralized now if environment has DVR enabled.'
'Updating FIP object for recomputation.',
{'member': info['id'],
'lb': ovn_lb.uuid,
'fip': fip.external_ip})
# NOTE(mjozefcz): We don't know if this env is DVR or not.
# We should call neutron API to do 'empty' update of the FIP.
# It will bump revision number and do recomputation of the FIP.
try:
fip_info = network_driver.neutron_client.show_floatingip(
fip.external_ids[ovn_const.OVN_FIP_EXT_ID_KEY])
empty_update = {
"floatingip": {
'description': fip_info['floatingip']['description']}}
network_driver.neutron_client.update_floatingip(
fip.external_ids[ovn_const.OVN_FIP_EXT_ID_KEY],
empty_update)
except n_exc.NotFound:
LOG.warning('Members %(member)s FIP %(fip)s not found in '
'Neutron. Can not update it.',
{'member': info['id'],
'fip': fip.external_ip})


class OvnProviderDriver(driver_base.ProviderDriver):
_ovn_helper = None
@@ -1959,7 +2056,7 @@ class OvnProviderDriver(driver_base.ProviderDriver):

def _ip_version_differs(self, member):
_, ovn_lb = self._ovn_helper._find_ovn_lb_by_pool_id(member.pool_id)
lb_vip = ovn_lb.external_ids[LB_EXT_IDS_VIP_KEY]
lb_vip = ovn_lb.external_ids[ovn_const.LB_EXT_IDS_VIP_KEY]
return netaddr.IPNetwork(lb_vip).version != (
netaddr.IPNetwork(member.address).version)

@@ -1972,7 +2069,8 @@ class OvnProviderDriver(driver_base.ProviderDriver):
if self._ip_version_differs(member):
raise IPVersionsMixingNotSupportedError()
admin_state_up = member.admin_state_up
if isinstance(member.subnet_id, o_datamodels.UnsetType):
if (isinstance(member.subnet_id, o_datamodels.UnsetType) or
not member.subnet_id):
msg = _('Subnet is required for Member creation'
' with OVN Provider Driver')
raise driver_exceptions.UnsupportedOptionError(
@@ -1991,6 +2089,18 @@ class OvnProviderDriver(driver_base.ProviderDriver):
'info': request_info}
self._ovn_helper.add_request(request)

# NOTE(mjozefcz): If LB has FIP on VIP
# and member has FIP we need to centralize
# traffic for member.
request_info = {'id': member.member_id,
'address': member.address,
'pool_id': member.pool_id,
'subnet_id': member.subnet_id,
'action': REQ_INFO_MEMBER_ADDED}
request = {'type': REQ_TYPE_HANDLE_MEMBER_DVR,
'info': request_info}
self._ovn_helper.add_request(request)

def member_delete(self, member):
request_info = {'id': member.member_id,
'address': member.address,
@@ -2000,6 +2110,17 @@ class OvnProviderDriver(driver_base.ProviderDriver):
request = {'type': REQ_TYPE_MEMBER_DELETE,
'info': request_info}
self._ovn_helper.add_request(request)
# NOTE(mjozefcz): If LB has FIP on VIP
# and member had FIP we can decentralize
# the traffic now.
request_info = {'id': member.member_id,
'address': member.address,
'pool_id': member.pool_id,
'subnet_id': member.subnet_id,
'action': REQ_INFO_MEMBER_DELETED}
request = {'type': REQ_TYPE_HANDLE_MEMBER_DVR,
'info': request_info}
self._ovn_helper.add_request(request)

def member_update(self, old_member, new_member):
if self._check_monitor_options(new_member):
@@ -2037,25 +2158,45 @@ class OvnProviderDriver(driver_base.ProviderDriver):
raise driver_exceptions.UnsupportedOptionError(
user_fault_string=msg,
operator_fault_string=msg)
current_members = self._ovn_helper.get_member_info(pool_id)
# current_members gets a list of tuples (ID, IP:Port) for pool members
pool_key, ovn_lb = self._ovn_helper._find_ovn_lb_by_pool_id(pool_id)
external_ids = copy.deepcopy(ovn_lb.external_ids)
existing_members = external_ids[pool_key].split(',')
members_to_delete = copy.copy(existing_members)
for member in members:
if (self._check_monitor_options(member) or
member.address and self._ip_version_differs(member)):
skipped_members.append(member.member_id)
continue
# NOTE(mjozefcz): We need to have subnet_id information.
if (isinstance(member.subnet_id, o_datamodels.UnsetType) or
not member.subnet_id):
msg = _('Subnet is required for Member creation'
' with OVN Provider Driver')
raise driver_exceptions.UnsupportedOptionError(
user_fault_string=msg,
operator_fault_string=msg)
admin_state_up = member.admin_state_up
if isinstance(admin_state_up, o_datamodels.UnsetType):
admin_state_up = True
mem_addr_port = str(member.address) + ':' + str(
member.protocol_port)
if (member.member_id, mem_addr_port) not in current_members:

member_info = self._ovn_helper._get_member_key(member)
# TODO(mjozefcz): Remove this workaround in W release.
member_info_old = self._ovn_helper._get_member_key(
member, old_convention=True)
member_found = [x for x in existing_members
if re.match(member_info_old, x)]
if not member_found:
req_type = REQ_TYPE_MEMBER_CREATE
else:
# If member exists in pool, then Update
req_type = REQ_TYPE_MEMBER_UPDATE
current_members.remove((member.member_id, mem_addr_port))
# Remove all updating members so only deleted ones are left
# TODO(mjozefcz): Remove this workaround in W release.
try:
members_to_delete.remove(member_info_old)
except ValueError:
members_to_delete.remove(member_info)

request_info = {'id': member.member_id,
'address': member.address,
'protocol_port': member.protocol_port,
@@ -2065,14 +2206,19 @@ class OvnProviderDriver(driver_base.ProviderDriver):
request = {'type': req_type,
'info': request_info}
request_list.append(request)
for cmember in current_members:
request_info = {'id': cmember[0],
'address': cmember[1].split(':')[0],
'protocol_port': cmember[1].split(':')[1],

for member in members_to_delete:
member_info = member.split('_')
request_info = {'id': member_info[1],
'address': member_info[2].split(':')[0],
'protocol_port': member_info[2].split(':')[1],
'pool_id': pool_id}
if len(member_info) == 4:
request_info['subnet_id'] = member_info[3]
request = {'type': REQ_TYPE_MEMBER_DELETE,
'info': request_info}
request_list.append(request)

for request in request_list:
self._ovn_helper.add_request(request)
if skipped_members:


+ 24
- 21
networking_ovn/tests/functional/octavia/test_ovn_driver.py View File

@@ -176,10 +176,10 @@ class TestOctaviaOvnProviderDriver(base.TestOVNFunctionalBase):
lbs = []
for lb in self.nb_api.tables['Load_Balancer'].rows.values():
external_ids = dict(lb.external_ids)
ls_refs = external_ids.get(ovn_driver.LB_EXT_IDS_LS_REFS_KEY)
ls_refs = external_ids.get(ovn_const.LB_EXT_IDS_LS_REFS_KEY)
if ls_refs:
external_ids[
ovn_driver.LB_EXT_IDS_LS_REFS_KEY] = jsonutils.loads(
ovn_const.LB_EXT_IDS_LS_REFS_KEY] = jsonutils.loads(
ls_refs)
lbs.append({'name': lb.name, 'protocol': lb.protocol,
'vips': lb.vips, 'external_ids': external_ids})
@@ -261,12 +261,12 @@ class TestOctaviaOvnProviderDriver(base.TestOVNFunctionalBase):
net_id = LR_REF_KEY_HEADER + '%s' % net_id

if add_ref:
if net_id not in lb_data[ovn_driver.LB_EXT_IDS_LS_REFS_KEY]:
lb_data[ovn_driver.LB_EXT_IDS_LS_REFS_KEY][net_id] = 1
if net_id not in lb_data[ovn_const.LB_EXT_IDS_LS_REFS_KEY]:
lb_data[ovn_const.LB_EXT_IDS_LS_REFS_KEY][net_id] = 1
else:
ref_ct = lb_data[ovn_driver.LB_EXT_IDS_LS_REFS_KEY][net_id]
ref_ct = lb_data[ovn_const.LB_EXT_IDS_LS_REFS_KEY][net_id]
if ref_ct <= 0:
del lb_data[ovn_driver.LB_EXT_IDS_LS_REFS_KEY][net_id]
del lb_data[ovn_const.LB_EXT_IDS_LS_REFS_KEY][net_id]

def _wait_for_status(self, expected_statuses, check_call=True):
call_count = len(expected_statuses)
@@ -327,7 +327,7 @@ class TestOctaviaOvnProviderDriver(base.TestOVNFunctionalBase):
r_id = self._create_router("r1") if create_router else None
if r_id:
lb_data[
ovn_driver.LB_EXT_IDS_LR_REF_KEY] = LR_REF_KEY_HEADER + r_id
ovn_const.LB_EXT_IDS_LR_REF_KEY] = LR_REF_KEY_HEADER + r_id
net_info = self._create_net(lb_info['vip_network'], lb_info['cidr'],
router_id=r_id)
lb_data['vip_net_info'] = net_info
@@ -335,7 +335,7 @@ class TestOctaviaOvnProviderDriver(base.TestOVNFunctionalBase):
vip_network_id=net_info[0],
vip_port_id=net_info[3],
admin_state_up=admin_state_up)
lb_data[ovn_driver.LB_EXT_IDS_LS_REFS_KEY] = {}
lb_data[ovn_const.LB_EXT_IDS_LS_REFS_KEY] = {}
lb_data['listeners'] = []
lb_data['pools'] = []
self._update_ls_refs(lb_data, net_info[0])
@@ -443,7 +443,7 @@ class TestOctaviaOvnProviderDriver(base.TestOVNFunctionalBase):
return []

vip_net_info = lb_data['vip_net_info']
external_ids = {ovn_driver.LB_EXT_IDS_LS_REFS_KEY: {},
external_ids = {ovn_const.LB_EXT_IDS_LS_REFS_KEY: {},
'neutron:vip': lb_data['model'].vip_address,
'neutron:vip_port_id': vip_net_info[3],
'enabled': str(lb_data['model'].admin_state_up)}
@@ -476,20 +476,20 @@ class TestOctaviaOvnProviderDriver(base.TestOVNFunctionalBase):
# For every connected subnet to the LB set the ref
# counter.
for net_id, ref_ct in lb_data[
ovn_driver.LB_EXT_IDS_LS_REFS_KEY].items():
ovn_const.LB_EXT_IDS_LS_REFS_KEY].items():
for lb in expected_lbs:
# If given LB hasn't VIP configured from
# this network we shouldn't touch it here.
if net_id == 'neutron-%s' % lb_data['model'].vip_network_id:
lb.get('external_ids')[
ovn_driver.LB_EXT_IDS_LS_REFS_KEY][net_id] = 1
ovn_const.LB_EXT_IDS_LS_REFS_KEY][net_id] = 1

# For every connected router set it here.
if lb_data.get(ovn_driver.LB_EXT_IDS_LR_REF_KEY):
if lb_data.get(ovn_const.LB_EXT_IDS_LR_REF_KEY):
for lb in expected_lbs:
lb.get('external_ids')[
ovn_driver.LB_EXT_IDS_LR_REF_KEY] = lb_data[
ovn_driver.LB_EXT_IDS_LR_REF_KEY]
ovn_const.LB_EXT_IDS_LR_REF_KEY] = lb_data[
ovn_const.LB_EXT_IDS_LR_REF_KEY]

pool_info = {}
for p in lb_data.get('pools', []):
@@ -502,6 +502,7 @@ class TestOctaviaOvnProviderDriver(base.TestOVNFunctionalBase):
continue
m_info = 'member_' + m.member_id + '_' + m.address
m_info += ":" + str(m.protocol_port)
m_info += "_" + str(m.subnet_id)
if p_members:
p_members += "," + m_info
else:
@@ -513,7 +514,7 @@ class TestOctaviaOvnProviderDriver(base.TestOVNFunctionalBase):
for fixed_ip in port['fixed_ips']:
if fixed_ip['subnet_id'] == m.subnet_id:
ex = external_ids[
ovn_driver.LB_EXT_IDS_LS_REFS_KEY]
ovn_const.LB_EXT_IDS_LS_REFS_KEY]
act = ex.get(
'neutron-%s' % port['network_id'], 0)
ex['neutron-%s' % port['network_id']] = act + 1
@@ -767,6 +768,8 @@ class TestOctaviaOvnProviderDriver(base.TestOVNFunctionalBase):
'loadbalancers': [{'id': pool.loadbalancer_id,
'provisioning_status': 'ACTIVE'}],
'listeners': []})
# Delete member from lb_data
pool.members.remove(m)
self._wait_for_status_and_validate(lb_data, expected_status,
check_call=False)

@@ -1129,7 +1132,7 @@ class TestOctaviaOvnProviderDriver(base.TestOVNFunctionalBase):
lba_data = self._create_load_balancer_and_validate(
{'vip_network': 'N1',
'cidr': '10.0.0.0/24'})
router_id = lba_data[ovn_driver.LB_EXT_IDS_LR_REF_KEY][
router_id = lba_data[ovn_const.LB_EXT_IDS_LR_REF_KEY][
len(LR_REF_KEY_HEADER):]
# Create Network N2, connect it to R1
nw_info = self._create_net("N2", "10.0.1.0/24", router_id)
@@ -1148,14 +1151,14 @@ class TestOctaviaOvnProviderDriver(base.TestOVNFunctionalBase):
# Add N3 to R1
self.l3_plugin.add_router_interface(
self.context, lba_data[
ovn_driver.LB_EXT_IDS_LR_REF_KEY][len(LR_REF_KEY_HEADER):],
ovn_const.LB_EXT_IDS_LR_REF_KEY][len(LR_REF_KEY_HEADER):],
{'subnet_id': lbb_data['vip_net_info'][1]})

# Check LBB exists on R1
n_utils.wait_until_true(
lambda: self._is_lb_associated_to_lr(
lbb_data['model'].loadbalancer_id,
lba_data[ovn_driver.LB_EXT_IDS_LR_REF_KEY]),
lba_data[ovn_const.LB_EXT_IDS_LR_REF_KEY]),
timeout=10)
# Check LBA connected to N3
n_utils.wait_until_true(
@@ -1181,7 +1184,7 @@ class TestOctaviaOvnProviderDriver(base.TestOVNFunctionalBase):
# N3 removed from R1
self.l3_plugin.remove_router_interface(
self.context, lba_data[
ovn_driver.LB_EXT_IDS_LR_REF_KEY][len(LR_REF_KEY_HEADER):],
ovn_const.LB_EXT_IDS_LR_REF_KEY][len(LR_REF_KEY_HEADER):],
{'subnet_id': lbb_data['vip_net_info'][1]})
else:
# Delete LBB Cascade
@@ -1191,7 +1194,7 @@ class TestOctaviaOvnProviderDriver(base.TestOVNFunctionalBase):
# Check LBB doesn't exists on R1
n_utils.wait_until_true(
lambda: not self._is_lb_associated_to_lr(
lbb_id, lba_data[ovn_driver.LB_EXT_IDS_LR_REF_KEY]),
lbb_id, lba_data[ovn_const.LB_EXT_IDS_LR_REF_KEY]),
timeout=10)
# Check LBB not connected to N1
n_utils.wait_until_true(
@@ -1215,7 +1218,7 @@ class TestOctaviaOvnProviderDriver(base.TestOVNFunctionalBase):
lba_data = self._create_load_balancer_and_validate(
{'vip_network': 'N1',
'cidr': '10.0.0.0/24'})
router_id = lba_data[ovn_driver.LB_EXT_IDS_LR_REF_KEY][
router_id = lba_data[ovn_const.LB_EXT_IDS_LR_REF_KEY][
len(LR_REF_KEY_HEADER):]

# Create provider network N2, connect it to R1


+ 1
- 0
networking_ovn/tests/unit/fakes.py View File

@@ -138,6 +138,7 @@ class FakeOvsdbNbOvnIdl(object):
self.db_list_rows = mock.Mock()
self.lsp_list = mock.MagicMock()
self.db_find = mock.Mock()
self.db_find_rows = mock.Mock()
self.db_set = mock.Mock()
self.db_clear = mock.Mock()
self.db_remove = mock.Mock()


+ 140
- 2
networking_ovn/tests/unit/l3/test_l3_ovn.py View File

@@ -30,6 +30,7 @@ from neutron_lib import exceptions as n_exc
from neutron_lib.plugins import constants as plugin_constants
from neutron_lib.plugins import directory
from oslo_config import cfg
from oslo_utils import uuidutils

from networking_ovn.common import config
from networking_ovn.common import constants as ovn_const
@@ -59,6 +60,7 @@ class OVNL3RouterPlugin(test_mech_driver.OVNMechanismDriverTestCase):
'network_id': self.fake_network['id'],
'device_owner': 'network:router_interface',
'mac_address': 'aa:aa:aa:aa:aa:aa',
'status': constants.PORT_STATUS_ACTIVE,
'fixed_ips': [{'ip_address': '10.0.0.100',
'subnet_id': 'subnet-id'}],
'id': 'router-port-id'}
@@ -143,7 +145,7 @@ class OVNL3RouterPlugin(test_mech_driver.OVNMechanismDriverTestCase):
'port_id': 'new-port_id'}
self.fake_floating_ip_new = fakes.FakeFloatingIp.create_one_fip(
attrs=self.fake_floating_ip_new_attrs)
self.fake_ovn_nat_rule = {
self.fake_ovn_nat_rule = fakes.FakeOvsdbRow.create_one_ovsdb_row({
'logical_ip': self.fake_floating_ip['fixed_ip_address'],
'external_ip': self.fake_floating_ip['floating_ip_address'],
'type': 'dnat_and_snat',
@@ -152,8 +154,55 @@ class OVNL3RouterPlugin(test_mech_driver.OVNMechanismDriverTestCase):
ovn_const.OVN_FIP_PORT_EXT_ID_KEY:
self.fake_floating_ip['port_id'],
ovn_const.OVN_ROUTER_NAME_EXT_ID_KEY: utils.ovn_name(
self.fake_floating_ip['router_id'])}}
self.fake_floating_ip['router_id'])}})
self.l3_inst = directory.get_plugin(plugin_constants.L3)
self.lb_id = uuidutils.generate_uuid()
self.member_subnet = {'id': 'subnet-id',
'ip_version': 4,
'cidr': '10.0.0.0/24',
'network_id': self.fake_network['id']}
self.member_id = uuidutils.generate_uuid()
self.member_port_id = uuidutils.generate_uuid()
self.member_address = '10.0.0.10'
self.member_l4_port = '80'
self.member_port = {
'network_id': self.fake_network['id'],
'mac_address': 'aa:aa:aa:aa:aa:aa',
'fixed_ips': [{'ip_address': self.member_address,
'subnet_id': self.member_subnet['id']}],
'id': 'fake-port-id'}
self.member_lsp = fakes.FakeOvsdbRow.create_one_ovsdb_row(
attrs={
'addresses': ['10.0.0.10 ff:ff:ff:ff:ff:ff'],
'uuid': self.member_port['id']})
self.listener_id = uuidutils.generate_uuid()
self.pool_id = uuidutils.generate_uuid()
self.ovn_lb = mock.MagicMock()
self.ovn_lb.protocol = ['tcp']
self.ovn_lb.uuid = uuidutils.generate_uuid()
self.member_line = (
'member_%s_%s:%s_%s' %
(self.member_id, self.member_address,
self.member_l4_port, self.member_subnet['id']))
self.ovn_lb.external_ids = {
ovn_const.LB_EXT_IDS_VIP_KEY: '10.22.33.4',
ovn_const.LB_EXT_IDS_VIP_FIP_KEY: '123.123.123.123',
ovn_const.LB_EXT_IDS_VIP_PORT_ID_KEY: 'foo_port',
'enabled': True,
'pool_%s' % self.pool_id: self.member_line,
'listener_%s' % self.listener_id: '80:pool_%s' % self.pool_id}
self.lb_vip_lsp = fakes.FakeOvsdbRow.create_one_ovsdb_row(
attrs={'external_ids': {ovn_const.OVN_PORT_NAME_EXT_ID_KEY:
'%s%s' % (ovn_const.LB_VIP_PORT_PREFIX,
self.ovn_lb.uuid)},
'name': uuidutils.generate_uuid(),
'addresses': ['10.0.0.100 ff:ff:ff:ff:ff:ee'],
'uuid': uuidutils.generate_uuid()})
self.lb_network = fakes.FakeOvsdbRow.create_one_ovsdb_row(
attrs={'load_balancer': [self.ovn_lb],
'name': 'neutron-%s' % self.fake_network['id'],
'ports': [self.lb_vip_lsp, self.member_lsp],
'uuid': self.fake_network['id']})
self.nb_idl = self._start_mock(
'networking_ovn.l3.l3_ovn.OVNL3RouterPlugin._ovn',
new_callable=mock.PropertyMock,
@@ -215,6 +264,10 @@ class OVNL3RouterPlugin(test_mech_driver.OVNMechanismDriverTestCase):
'networking_ovn.db.revision.bump_revision', return_value=None)
self.del_rev_p = self._start_mock(
'networking_ovn.db.revision.delete_revision', return_value=None)
self.mock_is_lb_member_fip = mock.patch(
'networking_ovn.common.ovn_client.OVNClient._is_lb_member_fip',
return_value=False)
self.mock_is_lb_member_fip.start()

@mock.patch('neutron.db.l3_db.L3_NAT_dbonly_mixin.add_router_interface')
def test_add_router_interface(self, func):
@@ -957,6 +1010,55 @@ class OVNL3RouterPlugin(test_mech_driver.OVNMechanismDriverTestCase):
'192.168.0.10'}))]
self.l3_inst._ovn.db_set.assert_has_calls(calls)

@mock.patch('neutron.db.l3_db.L3_NAT_dbonly_mixin._get_floatingip')
@mock.patch('neutron.db.db_base_plugin_v2.NeutronDbPluginV2.get_port')
def test_create_floatingip_lb_member_fip(self, gp, gf):
config.cfg.CONF.set_override(
'enable_distributed_floating_ip', True, group='ovn')
# Stop this mock.
self.mock_is_lb_member_fip.stop()
gp.return_value = self.member_port
gf.return_value = self.fake_floating_ip
self.l3_inst._ovn.lookup.return_value = self.lb_network
self.l3_inst._ovn.get_lswitch_port.return_value = self.member_lsp
self.l3_inst.create_floatingip(self.context, 'floatingip')
# Validate that there is no external_mac and logical_port while
# setting the NAT entry.
self.l3_inst._ovn.add_nat_rule_in_lrouter.assert_called_once_with(
'neutron-router-id',
external_ip='192.168.0.10',
logical_ip='10.0.0.10',
type='dnat_and_snat')

@mock.patch('neutron.db.db_base_plugin_v2.NeutronDbPluginV2.get_subnet')
def test_create_floatingip_lb_vip_fip(self, gs):
config.cfg.CONF.set_override(
'enable_distributed_floating_ip', True, group='ovn')
gs.return_value = self.member_subnet
self.l3_inst._ovn.get_lswitch_port.return_value = self.lb_vip_lsp
self.l3_inst._ovn.db_find_rows.return_value.execute.side_effect = [
[self.ovn_lb],
[self.lb_network],
[self.fake_ovn_nat_rule],
]
self.l3_inst._ovn.lookup.return_value = self.lb_network

self.l3_inst.create_floatingip(self.context, 'floatingip')
self.l3_inst._ovn.add_nat_rule_in_lrouter.assert_called_once_with(
'neutron-router-id',
external_ip='192.168.0.10',
external_mac='aa:aa:aa:aa:aa:aa',
logical_ip='10.0.0.10',
logical_port='port_id',
type='dnat_and_snat')
self.l3_inst._ovn.db_find_rows.assert_called_with(
'NAT', ('external_ids', '=', {ovn_const.OVN_FIP_PORT_EXT_ID_KEY:
self.member_lsp.name}))
# Validate that it clears external_mac/logical_port for member NAT.
self.l3_inst._ovn.db_clear.assert_has_calls([
mock.call('NAT', self.fake_ovn_nat_rule.uuid, 'external_mac'),
mock.call('NAT', self.fake_ovn_nat_rule.uuid, 'logical_port')])

@mock.patch('neutron.db.l3_db.L3_NAT_dbonly_mixin.delete_floatingip')
def test_delete_floatingip(self, df):
self.l3_inst._ovn.get_floatingip.return_value = (
@@ -968,6 +1070,42 @@ class OVNL3RouterPlugin(test_mech_driver.OVNMechanismDriverTestCase):
logical_ip='10.0.0.10',
external_ip='192.168.0.10')

@mock.patch('neutron.db.db_base_plugin_v2.NeutronDbPluginV2.get_subnet')
@mock.patch('neutron.db.l3_db.L3_NAT_dbonly_mixin._get_floatingip')
@mock.patch('neutron.db.l3_db.L3_NAT_dbonly_mixin.delete_floatingip')
def test_delete_floatingip_lb_vip_fip(self, df, gf, gs):
config.cfg.CONF.set_override(
'enable_distributed_floating_ip', True, group='ovn')
gs.return_value = self.member_subnet
gf.return_value = self.fake_floating_ip
self.l3_inst._ovn.get_floatingip.return_value = (
self.fake_ovn_nat_rule)
self.l3_inst._ovn.get_lswitch_port.return_value = self.lb_vip_lsp
self.l3_inst._ovn.db_find_rows.return_value.execute.side_effect = [
[self.ovn_lb],
[self.lb_network],
[self.fake_ovn_nat_rule],
]
self.l3_inst._ovn.lookup.return_value = self.lb_network

self.l3_inst.delete_floatingip(self.context, 'floatingip-id')
self.l3_inst._ovn.delete_nat_rule_in_lrouter.assert_called_once_with(
'neutron-router-id',
type='dnat_and_snat',
logical_ip='10.0.0.10',
external_ip='192.168.0.10')
self.l3_inst._ovn.db_find_rows.assert_called_with(
'NAT', ('external_ids', '=',
{ovn_const.OVN_FIP_PORT_EXT_ID_KEY: self.member_lsp.name}))
self.l3_inst._plugin.get_port.assert_called_once_with(
mock.ANY, self.member_lsp.name)
# Validate that it adds external_mac/logical_port back.
self.l3_inst._ovn.db_set.assert_has_calls([
mock.call('NAT', self.fake_ovn_nat_rule.uuid,
('logical_port', self.member_lsp.name)),
mock.call('NAT', self.fake_ovn_nat_rule.uuid,
('external_mac', 'aa:aa:aa:aa:aa:aa'))])

@mock.patch('neutron.db.l3_db.L3_NAT_dbonly_mixin._get_floatingip')
@mock.patch('neutron.db.l3_db.L3_NAT_dbonly_mixin.delete_floatingip')
def test_delete_floatingip_lsp_external_id(self, df, gf):


+ 255
- 65
networking_ovn/tests/unit/octavia/test_ovn_driver.py View File

@@ -135,10 +135,16 @@ class TestOvnProviderDriver(TestOvnOctaviaBase):
self.driver = ovn_driver.OvnProviderDriver()
add_req_thread = mock.patch.object(ovn_driver.OvnProviderHelper,
'add_request')
self.member_line = (
'member_%s_%s:%s_%s' %
(self.member_id, self.member_address,
self.member_port, self.member_subnet_id))
self.ovn_lb = mock.MagicMock()
self.ovn_lb.name = 'foo_ovn_lb'
self.ovn_lb.external_ids = {
ovn_driver.LB_EXT_IDS_VIP_KEY: '10.22.33.4'}
ovn_const.LB_EXT_IDS_VIP_KEY: '10.22.33.4',
'pool_%s' % self.pool_id: self.member_line,
'listener_%s' % self.listener_id: '80:pool_%s' % self.pool_id}
self.mock_add_request = add_req_thread.start()
self.project_id = uuidutils.generate_uuid()

@@ -269,11 +275,6 @@ class TestOvnProviderDriver(TestOvnOctaviaBase):
ovn_driver.OvnProviderHelper, '_find_ovn_lbs',
side_effect=lambda x, protocol=None:
self.ovn_lb if protocol else [self.ovn_lb]).start()
mock.patch.object(
ovn_driver.OvnProviderHelper, 'get_member_info',
return_value=[
(self.ref_member.member_id, "198.52.100.4:99"),
(self.fail_member.member_id, "198.51.100.4:99")]).start()
self.mock_find_lb_pool_key = mock.patch.object(
ovn_driver.OvnProviderHelper,
'_find_ovn_lb_with_pool_key',
@@ -300,8 +301,20 @@ class TestOvnProviderDriver(TestOvnOctaviaBase):
'admin_state_up': self.ref_member.admin_state_up}
expected_dict = {'type': ovn_driver.REQ_TYPE_MEMBER_CREATE,
'info': info}
info_dvr = {
'id': self.ref_member.member_id,
'address': self.ref_member.address,
'pool_id': self.ref_member.pool_id,
'subnet_id': self.ref_member.subnet_id,
'action': ovn_driver.REQ_INFO_MEMBER_ADDED}
expected_dict_dvr = {
'type': ovn_driver.REQ_TYPE_HANDLE_MEMBER_DVR,
'info': info_dvr}
self.driver.member_create(self.ref_member)
self.mock_add_request.assert_called_once_with(expected_dict)
expected = [
mock.call(expected_dict),
mock.call(expected_dict_dvr)]
self.mock_add_request.assert_has_calls(expected)

def test_member_create_failure(self):
self.assertRaises(exceptions.UnsupportedOptionError,
@@ -326,6 +339,9 @@ class TestOvnProviderDriver(TestOvnOctaviaBase):
self.ref_member.subnet_id = data_models.UnsetType()
self.assertRaises(exceptions.UnsupportedOptionError,
self.driver.member_create, self.ref_member)
self.ref_member.subnet_id = None
self.assertRaises(exceptions.UnsupportedOptionError,
self.driver.member_create, self.ref_member)

def test_member_create_monitor_opts(self):
self.ref_member.monitor_address = '172.20.20.1'
@@ -345,8 +361,13 @@ class TestOvnProviderDriver(TestOvnOctaviaBase):
'admin_state_up': True}
expected_dict = {'type': ovn_driver.REQ_TYPE_MEMBER_CREATE,
'info': info}
expected_dict_dvr = {'type': ovn_driver.REQ_TYPE_HANDLE_MEMBER_DVR,
'info': mock.ANY}
expected = [
mock.call(expected_dict),
mock.call(expected_dict_dvr)]
self.driver.member_create(self.ref_member)
self.mock_add_request.assert_called_once_with(expected_dict)
self.mock_add_request.assert_has_calls(expected)

def test_member_update(self):
info = {'id': self.update_member.member_id,
@@ -396,6 +417,11 @@ class TestOvnProviderDriver(TestOvnOctaviaBase):
self.driver.member_batch_update([self.ref_member])
self.assertEqual(self.mock_add_request.call_count, 2)

def test_member_batch_update_missing_subnet_id(self):
self.ref_member.subnet_id = None
self.assertRaises(exceptions.UnsupportedOptionError,
self.driver.member_batch_update, [self.ref_member])

def test_member_update_failure(self):
self.assertRaises(exceptions.UnsupportedOptionError,
self.driver.member_update, self.ref_member,
@@ -415,8 +441,20 @@ class TestOvnProviderDriver(TestOvnOctaviaBase):
'subnet_id': self.ref_member.subnet_id}
expected_dict = {'type': ovn_driver.REQ_TYPE_MEMBER_DELETE,
'info': info}
info_dvr = {
'id': self.ref_member.member_id,
'address': self.ref_member.address,
'pool_id': self.ref_member.pool_id,
'subnet_id': self.ref_member.subnet_id,
'action': ovn_driver.REQ_INFO_MEMBER_DELETED}
expected_dict_dvr = {
'type': ovn_driver.REQ_TYPE_HANDLE_MEMBER_DVR,
'info': info_dvr}
self.driver.member_delete(self.ref_member)
self.mock_add_request.assert_called_once_with(expected_dict)
expected = [
mock.call(expected_dict),
mock.call(expected_dict_dvr)]
self.mock_add_request.assert_has_calls(expected)

def test_listener_create(self):
info = {'id': self.ref_listener.listener_id,
@@ -590,7 +628,11 @@ class TestOvnProviderDriver(TestOvnOctaviaBase):
expected_members = {
'type': ovn_driver.REQ_TYPE_MEMBER_DELETE,
'info': info_member}
expected_members_dvr = {
'type': ovn_driver.REQ_TYPE_HANDLE_MEMBER_DVR,
'info': mock.ANY}
calls = [mock.call(expected_members),
mock.call(expected_members_dvr),
mock.call(expected)]
self.driver.pool_delete(self.ref_pool)
self.mock_add_request.assert_has_calls(calls)
@@ -672,12 +714,13 @@ class TestOvnProviderHelper(TestOvnOctaviaBase):
self.ovn_lb.protocol = ['tcp']
self.ovn_lb.uuid = uuidutils.generate_uuid()
self.member_line = (
'member_%s_%s:%s' %
(self.member_id, self.member_address, self.member_port))
'member_%s_%s:%s_%s' %
(self.member_id, self.member_address,
self.member_port, self.member_subnet_id))
self.ovn_lb.external_ids = {
ovn_driver.LB_EXT_IDS_VIP_KEY: '10.22.33.4',
ovn_driver.LB_EXT_IDS_VIP_FIP_KEY: '123.123.123.123',
ovn_driver.LB_EXT_IDS_VIP_PORT_ID_KEY: 'foo_port',
ovn_const.LB_EXT_IDS_VIP_KEY: '10.22.33.4',
ovn_const.LB_EXT_IDS_VIP_FIP_KEY: '123.123.123.123',
ovn_const.LB_EXT_IDS_VIP_PORT_ID_KEY: 'foo_port',
'enabled': True,
'pool_%s' % self.pool_id: self.member_line,
'listener_%s' % self.listener_id: '80:pool_%s' % self.pool_id}
@@ -685,9 +728,10 @@ class TestOvnProviderHelper(TestOvnOctaviaBase):
execute.return_value = [self.ovn_lb]
self.helper.ovn_nbdb_api.db_list_rows.return_value.\
execute.return_value = [self.ovn_lb]
mock.patch.object(self.helper,
'_find_ovn_lb_with_pool_key',
return_value=self.ovn_lb).start()
self.mock_find_lb_pool_key = mock.patch.object(
self.helper,
'_find_ovn_lb_with_pool_key',
return_value=self.ovn_lb).start()

self.mock_find_ovn_lbs = mock.patch.object(
ovn_driver.OvnProviderHelper, '_find_ovn_lbs',
@@ -722,8 +766,8 @@ class TestOvnProviderHelper(TestOvnOctaviaBase):
vip_address=self.vip_address,
vip_network_id=self.vip_network_id,
ext_ids={
ovn_driver.LB_EXT_IDS_LR_REF_KEY: "neutron-%s" % net_id,
ovn_driver.LB_EXT_IDS_LS_REFS_KEY:
ovn_const.LB_EXT_IDS_LR_REF_KEY: "neutron-%s" % net_id,
ovn_const.LB_EXT_IDS_LS_REFS_KEY:
'{\"neutron-%s\": 1}' % net_id})
self.ref_lb2 = MockedLB(
uuid=uuidutils.generate_uuid(),
@@ -735,8 +779,8 @@ class TestOvnProviderHelper(TestOvnOctaviaBase):
vip_address=self.vip_address,
vip_network_id=self.vip_network_id,
ext_ids={
ovn_driver.LB_EXT_IDS_LR_REF_KEY: "neutron-%s" % net_id,
ovn_driver.LB_EXT_IDS_LS_REFS_KEY:
ovn_const.LB_EXT_IDS_LR_REF_KEY: "neutron-%s" % net_id,
ovn_const.LB_EXT_IDS_LS_REFS_KEY:
'{\"neutron-%s\": 1}' % net_id})
# TODO(mjozefcz): Consider using FakeOVNRouter.
self.router = fakes.FakeOvsdbRow.create_one_ovsdb_row(
@@ -846,20 +890,20 @@ class TestOvnProviderHelper(TestOvnOctaviaBase):
'id': self.ovn_lb.name,
'protocol': 'tcp',
'vip_address': udp_lb.external_ids.get(
ovn_driver.LB_EXT_IDS_VIP_KEY),
ovn_const.LB_EXT_IDS_VIP_KEY),
'vip_port_id':
udp_lb.external_ids.get(
ovn_driver.LB_EXT_IDS_VIP_PORT_ID_KEY),
ovn_driver.LB_EXT_IDS_LR_REF_KEY:
ovn_const.LB_EXT_IDS_VIP_PORT_ID_KEY),
ovn_const.LB_EXT_IDS_LR_REF_KEY:
udp_lb.external_ids.get(
ovn_driver.LB_EXT_IDS_LR_REF_KEY),
ovn_driver.LB_EXT_IDS_LS_REFS_KEY:
ovn_const.LB_EXT_IDS_LR_REF_KEY),
ovn_const.LB_EXT_IDS_LS_REFS_KEY:
udp_lb.external_ids.get(
ovn_driver.LB_EXT_IDS_LS_REFS_KEY),
ovn_const.LB_EXT_IDS_LS_REFS_KEY),
'admin_state_up': 'True',
ovn_driver.LB_EXT_IDS_VIP_FIP_KEY:
ovn_const.LB_EXT_IDS_VIP_FIP_KEY:
udp_lb.external_ids.get(
ovn_driver.LB_EXT_IDS_VIP_FIP_KEY)}
ovn_const.LB_EXT_IDS_VIP_FIP_KEY)}
lbc.assert_called_once_with(expected_lb_info, protocol='tcp')

def test__get_or_create_ovn_lb_found(self):
@@ -897,8 +941,8 @@ class TestOvnProviderHelper(TestOvnOctaviaBase):
constants.OFFLINE)
self.helper.ovn_nbdb_api.db_create.assert_called_once_with(
'Load_Balancer', external_ids={
ovn_driver.LB_EXT_IDS_VIP_KEY: mock.ANY,
ovn_driver.LB_EXT_IDS_VIP_PORT_ID_KEY: mock.ANY,
ovn_const.LB_EXT_IDS_VIP_KEY: mock.ANY,
ovn_const.LB_EXT_IDS_VIP_PORT_ID_KEY: mock.ANY,
'enabled': 'False'},
name=mock.ANY,
protocol=None)
@@ -915,8 +959,8 @@ class TestOvnProviderHelper(TestOvnOctaviaBase):
constants.ONLINE)
self.helper.ovn_nbdb_api.db_create.assert_called_once_with(
'Load_Balancer', external_ids={
ovn_driver.LB_EXT_IDS_VIP_KEY: mock.ANY,
ovn_driver.LB_EXT_IDS_VIP_PORT_ID_KEY: mock.ANY,
ovn_const.LB_EXT_IDS_VIP_KEY: mock.ANY,
ovn_const.LB_EXT_IDS_VIP_PORT_ID_KEY: mock.ANY,
'enabled': 'True'},
name=mock.ANY,
protocol=None)
@@ -930,8 +974,8 @@ class TestOvnProviderHelper(TestOvnOctaviaBase):
"""
self.lb['admin_state_up'] = True
self.lb['protocol'] = 'UDP'
self.lb[ovn_driver.LB_EXT_IDS_LR_REF_KEY] = 'foo'
self.lb[ovn_driver.LB_EXT_IDS_LS_REFS_KEY] = "{\"neutron-foo\": 1}"
self.lb[ovn_const.LB_EXT_IDS_LR_REF_KEY] = 'foo'
self.lb[ovn_const.LB_EXT_IDS_LS_REFS_KEY] = "{\"neutron-foo\": 1}"
net_dr.return_value.neutron_client.list_ports.return_value = (
self.ports)
status = self.helper.lb_create(self.lb, protocol='UDP')
@@ -941,9 +985,9 @@ class TestOvnProviderHelper(TestOvnOctaviaBase):
constants.ONLINE)
self.helper.ovn_nbdb_api.db_create.assert_called_once_with(
'Load_Balancer', external_ids={
ovn_driver.LB_EXT_IDS_VIP_KEY: mock.ANY,
ovn_driver.LB_EXT_IDS_VIP_PORT_ID_KEY: mock.ANY,
ovn_driver.LB_EXT_IDS_LR_REF_KEY: 'foo',
ovn_const.LB_EXT_IDS_VIP_KEY: mock.ANY,
ovn_const.LB_EXT_IDS_VIP_PORT_ID_KEY: mock.ANY,
ovn_const.LB_EXT_IDS_LR_REF_KEY: 'foo',
'enabled': 'True'},
name=mock.ANY,
protocol='udp')
@@ -1037,8 +1081,8 @@ class TestOvnProviderHelper(TestOvnOctaviaBase):
@mock.patch('networking_ovn.octavia.ovn_driver.get_network_driver')
def test_lb_delete_ls_lr(self, net_dr):
self.ovn_lb.external_ids.update({
ovn_driver.LB_EXT_IDS_LR_REF_KEY: self.router.name,
ovn_driver.LB_EXT_IDS_LS_REFS_KEY:
ovn_const.LB_EXT_IDS_LR_REF_KEY: self.router.name,
ovn_const.LB_EXT_IDS_LS_REFS_KEY:
'{\"neutron-%s\": 1}' % self.network.uuid})
net_dr.return_value.neutron_client.delete_port.return_value = None
(self.helper.ovn_nbdb_api.ls_get.return_value.execute.
@@ -1502,9 +1546,9 @@ class TestOvnProviderHelper(TestOvnOctaviaBase):
mock.call(
'Load_Balancer', self.ovn_lb.uuid,
('external_ids', {
ovn_driver.LB_EXT_IDS_VIP_KEY: '10.22.33.4',
ovn_driver.LB_EXT_IDS_VIP_FIP_KEY: '123.123.123.123',
ovn_driver.LB_EXT_IDS_VIP_PORT_ID_KEY: 'foo_port',
ovn_const.LB_EXT_IDS_VIP_KEY: '10.22.33.4',
ovn_const.LB_EXT_IDS_VIP_FIP_KEY: '123.123.123.123',
ovn_const.LB_EXT_IDS_VIP_PORT_ID_KEY: 'foo_port',
'enabled': True,
'listener_%s' % self.listener_id: '80:'}))]
self.assertEqual(self.helper.ovn_nbdb_api.db_set.call_count,
@@ -1540,9 +1584,9 @@ class TestOvnProviderHelper(TestOvnOctaviaBase):
('external_ids', {
'enabled': True,
'listener_%s' % self.listener_id: '80:',
ovn_driver.LB_EXT_IDS_VIP_KEY: '10.22.33.4',
ovn_driver.LB_EXT_IDS_VIP_FIP_KEY: '123.123.123.123',
ovn_driver.LB_EXT_IDS_VIP_PORT_ID_KEY: 'foo_port'}))
ovn_const.LB_EXT_IDS_VIP_KEY: '10.22.33.4',
ovn_const.LB_EXT_IDS_VIP_FIP_KEY: '123.123.123.123',
ovn_const.LB_EXT_IDS_VIP_PORT_ID_KEY: 'foo_port'}))

def test_pool_delete_pool_disabled(self):
disabled_p_key = self.helper._get_pool_key(self.pool_id,
@@ -1641,6 +1685,15 @@ class TestOvnProviderHelper(TestOvnOctaviaBase):
self.helper.member_create(self.member)
self.helper.ovn_nbdb_api.db_set.assert_not_called()

def test_member_create_already_exists_backward_compat(self):
old_member_line = ('member_%s_%s:%s' %
(self.member_id, self.member_address,
self.member_port))
self.ovn_lb.external_ids.update(
{'pool_%s' % self.pool_id: old_member_line})
self.helper.member_create(self.member)
self.helper.ovn_nbdb_api.db_set.assert_not_called()

def test_member_create_first_member_in_pool(self):
self.ovn_lb.external_ids.update({
'pool_' + self.pool_id: ''})
@@ -1658,17 +1711,20 @@ class TestOvnProviderHelper(TestOvnOctaviaBase):

def test_member_create_second_member_in_pool(self):
member2_id = uuidutils.generate_uuid()
member2_subnet_id = uuidutils.generate_uuid()
member2_port = "1010"
member2_address = "192.168.2.150"
member2_line = ('member_%s_%s:%s' %
(member2_id, member2_address, member2_port))
member2_line = ('member_%s_%s:%s_%s' %
(member2_id, member2_address,
member2_port, member2_subnet_id))
self.ovn_lb.external_ids.update(
{'pool_%s' % self.pool_id: member2_line})
self.helper.member_create(self.member)
all_member_line = (
'%s,member_%s_%s:%s' %
'%s,member_%s_%s:%s_%s' %
(member2_line, self.member_id,
self.member_address, self.member_port))
self.member_address, self.member_port,
self.member_subnet_id))
# We have two members now.
expected_calls = [
mock.call('Load_Balancer', self.ovn_lb.uuid,
@@ -1730,16 +1786,37 @@ class TestOvnProviderHelper(TestOvnOctaviaBase):
self.assertEqual(status['pools'][0]['provisioning_status'],
constants.ACTIVE)

def test_member_update_new_member_line(self):
old_member_line = (
'member_%s_%s:%s' %
(self.member_id, self.member_address,
self.member_port))
new_member_line = (
'member_%s_%s:%s_%s' %
(self.member_id, self.member_address,
self.member_port, self.member_subnet_id))
self.ovn_lb.external_ids.update(
{'pool_%s' % self.pool_id: old_member_line})
self.helper.member_update(self.member)
expected_calls = [
mock.call('Load_Balancer', self.ovn_lb.uuid,
('external_ids', {
'pool_%s' % self.pool_id: new_member_line}))]
self.helper.ovn_nbdb_api.db_set.assert_has_calls(
expected_calls)

def test_member_update_new_port(self):
new_port = 11
member_line = ('member_%s_%s:%s' %
(self.member_id, self.member_address, new_port))
member_line = ('member_%s_%s:%s_%s' %
(self.member_id, self.member_address,
new_port, self.member_subnet_id))
self.ovn_lb.external_ids.update(
{'pool_%s' % self.pool_id: member_line})
self.helper.member_update(self.member)
new_member_line = (
'member_%s_%s:%s' %
(self.member_id, self.member_address, self.member_port))
'member_%s_%s:%s_%s' %
(self.member_id, self.member_address,
self.member_port, self.member_subnet_id))
expected_calls = [
mock.call('Load_Balancer', self.ovn_lb.uuid,
('external_ids', {
@@ -1777,6 +1854,20 @@ class TestOvnProviderHelper(TestOvnOctaviaBase):
self.assertEqual(status['pools'][0]['provisioning_status'],
constants.ACTIVE)

def test_member_delete_backward_compat(self):
old_member_line = ('member_%s_%s:%s' %
(self.member_id, self.member_address,
self.member_port))
self.ovn_lb.external_ids.update(
{'pool_%s' % self.pool_id: old_member_line})
self.helper.member_delete(self.member)
expected_calls = [
mock.call('Load_Balancer', self.ovn_lb.uuid,
('external_ids', {'pool_%s' % self.pool_id: ''})),
mock.call('Load_Balancer', self.ovn_lb.uuid,
('vips', {}))]
self.helper.ovn_nbdb_api.db_set.has_calls(expected_calls)

@mock.patch.object(ovn_driver.OvnProviderHelper, '_remove_member')
def test_member_delete_exception(self, mock_remove_member):
mock_remove_member.side_effect = [RuntimeError]
@@ -2013,7 +2104,7 @@ class TestOvnProviderHelper(TestOvnOctaviaBase):
def test__find_lb_in_ls_wrong_ref(self):
# lets break external_ids refs
self.network.load_balancer[0].external_ids.update({
ovn_driver.LB_EXT_IDS_LS_REFS_KEY: 'foo'})
ovn_const.LB_EXT_IDS_LS_REFS_KEY: 'foo'})
net_lb = self.helper._find_lb_in_ls(self.network)
for lb in self.network.load_balancer:
self.assertNotIn(lb, net_lb)
@@ -2073,7 +2164,7 @@ class TestOvnProviderHelper(TestOvnOctaviaBase):
self._update_lb_to_lr_association.stop()
self.helper._update_lb_to_lr_association(self.ref_lb1, self.router)
lr_ref = self.ref_lb1.external_ids.get(
ovn_driver.LB_EXT_IDS_LR_REF_KEY)
ovn_const.LB_EXT_IDS_LR_REF_KEY)
add.assert_called_once_with(self.ref_lb1, self.router, lr_ref)
delete.assert_not_called()

@@ -2086,14 +2177,14 @@ class TestOvnProviderHelper(TestOvnOctaviaBase):
self.helper._update_lb_to_lr_association(
self.ref_lb1, self.router, delete=True)
lr_ref = self.ref_lb1.external_ids.get(
ovn_driver.LB_EXT_IDS_LR_REF_KEY)
ovn_const.LB_EXT_IDS_LR_REF_KEY)
add.assert_not_called()
delete.assert_called_once_with(self.ref_lb1, self.router, lr_ref)

@mock.patch('networking_ovn.octavia.ovn_driver.get_network_driver')
def test__del_lb_to_lr_association(self, net_dr):
lr_ref = self.ref_lb1.external_ids.get(
ovn_driver.LB_EXT_IDS_LR_REF_KEY)
ovn_const.LB_EXT_IDS_LR_REF_KEY)
upd_lr_ref = '%s,%s' % (lr_ref, self.router.name)
self.helper._del_lb_to_lr_association(
self.ref_lb1, self.router, upd_lr_ref)
@@ -2101,7 +2192,7 @@ class TestOvnProviderHelper(TestOvnOctaviaBase):
mock.call.db_set(
'Load_Balancer', self.ref_lb1.uuid,
(('external_ids',
{ovn_driver.LB_EXT_IDS_LR_REF_KEY: lr_ref}))),
{ovn_const.LB_EXT_IDS_LR_REF_KEY: lr_ref}))),
mock.call.lr_lb_del(
self.router.uuid, self.ref_lb1.uuid,
if_exists=True)]
@@ -2125,7 +2216,7 @@ class TestOvnProviderHelper(TestOvnOctaviaBase):
self.ref_lb1, self.router, lr_ref)
self.helper.ovn_nbdb_api.db_remove.assert_called_once_with(
'Load_Balancer', self.ref_lb1.uuid, 'external_ids',
ovn_driver.LB_EXT_IDS_LR_REF_KEY)
ovn_const.LB_EXT_IDS_LR_REF_KEY)
self.helper.ovn_nbdb_api.lr_lb_del.assert_called_once_with(
self.router.uuid, self.ref_lb1.uuid, if_exists=True)
self.helper.ovn_nbdb_api.db_set.assert_not_called()
@@ -2465,6 +2556,110 @@ class TestOvnProviderHelper(TestOvnOctaviaBase):
'172.26.21.20:80': '192.168.2.149:1010'}))]
self.helper.ovn_nbdb_api.assert_has_calls(calls)

@mock.patch('networking_ovn.octavia.ovn_driver.get_network_driver')
def test_handle_member_dvr_lb_has_no_fip(self, net_dr):
lb = mock.MagicMock()
info = {
'id': self.member_id,
'pool_id': self.pool_id,
'action': ovn_driver.REQ_INFO_MEMBER_ADDED}
external_ids = {
'neutron:vip_fip': ''}
lb.external_ids = external_ids
self.mock_find_lb_pool_key.return_value = lb
self.helper.handle_member_dvr(info)
net_dr.assert_not_called()
self.helper.ovn_nbdb_api.db_clear.assert_not_called()

@mock.patch('networking_ovn.octavia.ovn_driver.get_network_driver')
def test_handle_member_dvr_lb_fip_no_ls_ports(self, net_dr):
lb = mock.MagicMock()
info = {
'id': self.member_id,
'subnet_id': self.member_subnet_id,
'pool_id': self.pool_id,
'action': ovn_driver.REQ_INFO_MEMBER_ADDED}
external_ids = {
'neutron:vip_fip': '11.11.11.11'}
lb.external_ids = external_ids
self.mock_find_lb_pool_key.return_value = lb
fake_ls = fakes.FakeOvsdbRow.create_one_ovsdb_row(
attrs={
'external_ids': {},
'ports': {}})
self.helper.ovn_nbdb_api.lookup.return_value = fake_ls
self.helper.handle_member_dvr(info)
self.helper.ovn_nbdb_api.db_clear.assert_not_called()

def _test_handle_member_dvr_lb_fip(
self, net_dr, action=ovn_driver.REQ_INFO_MEMBER_ADDED):
lb = mock.MagicMock()
fake_port = fakes.FakePort.create_one_port(
attrs={'allowed_address_pairs': ''})
info = {
'id': self.member_id,
'address': fake_port['fixed_ips'][0]['ip_address'],
'pool_id': self.pool_id,
'subnet_id': fake_port['fixed_ips'][0]['subnet_id'],
'action': action}
member_subnet = fakes.FakeSubnet.create_one_subnet()
member_subnet.id = self.member_subnet_id
member_subnet.network_id = 'foo'
net_dr.return_value.get_subnet.return_value = member_subnet
fake_lsp = fakes.FakeOVNPort.from_neutron_port(
fake_port)
fake_ls = fakes.FakeOvsdbRow.create_one_ovsdb_row(
attrs={
'external_ids': {},
'name': 'foo',
'ports': [fake_lsp]})
self.helper.ovn_nbdb_api.lookup.return_value = fake_ls
fake_nat = fakes.FakeOvsdbRow.create_one_ovsdb_row(
attrs={
'external_ip': '22.22.22.22',
'external_ids': {
ovn_const.OVN_FIP_EXT_ID_KEY: 'fip_id'}})
fip_info = {
'floatingip': {
'description': 'bar'}}
net_dr.return_value.neutron_client.show_floatingip.return_value = (
fip_info)
self.helper.ovn_nbdb_api.db_find_rows.return_value.\
execute.return_value = [fake_nat]
external_ids = {
ovn_const.LB_EXT_IDS_VIP_FIP_KEY: '11.11.11.11'}
lb.external_ids = external_ids
self.mock_find_lb_pool_key.return_value = lb
self.helper.handle_member_dvr(info)

if action == ovn_driver.REQ_INFO_MEMBER_ADDED:
calls = [
mock.call.lookup('Logical_Switch', 'neutron-foo'),
mock.call.db_find_rows('NAT', ('external_ids', '=', {
ovn_const.OVN_FIP_PORT_EXT_ID_KEY: fake_lsp.name})),
mock.ANY,
mock.call.db_clear('NAT', fake_nat.uuid, 'external_mac'),
mock.ANY,
mock.call.db_clear('NAT', fake_nat.uuid, 'logical_port'),
mock.ANY]
self.helper.ovn_nbdb_api.assert_has_calls(calls)
else:
(net_dr.return_value.neutron_client.show_floatingip.
assert_called_once_with('fip_id'))
(net_dr.return_value.neutron_client.update_floatingip.
assert_called_once_with('fip_id', {
'floatingip': {'description': 'bar'}}))
self.helper.ovn_nbdb_api.db_clear.assert_not_called()

@mock.patch('networking_ovn.octavia.ovn_driver.get_network_driver')
def test_handle_member_dvr_lb_fip_member_added(self, net_dr):
self._test_handle_member_dvr_lb_fip(net_dr)

@mock.patch('networking_ovn.octavia.ovn_driver.get_network_driver')
def test_handle_member_dvr_lb_fip_member_deleted(self, net_dr):
self._test_handle_member_dvr_lb_fip(
net_dr, action=ovn_driver.REQ_INFO_MEMBER_DELETED)

@mock.patch.object(ovn_driver, 'atexit')
def test_ovsdb_connections(self, mock_atexit):
ovn_driver.OvnProviderHelper.ovn_nbdb_api = None
@@ -2570,11 +2765,6 @@ class TestOvnProviderHelper(TestOvnOctaviaBase):
'provisioning_status': 'ERROR',
'operating_status': 'ERROR'}]})

def test_get_member_info(self):
ret = self.helper.get_member_info(self.pool_id)
self.assertEqual([(self.member_id, '%s:%s' % (self.member_address,
self.member_port))], ret)

def test_get_pool_member_id(self):
ret = self.helper.get_pool_member_id(
self.pool_id, mem_addr_port='192.168.2.149:1010')


Loading…
Cancel
Save