Centralize traffic when LB and member has FIP
When Load Balancer and its member has FIP assigned and environment is configured to use DVR the member FIP needs to be centralized. It is current core OVN limitation, that should be solved in [1]. This patch adds this mechanism to OVN Client and OVN Octavia provider driver. It covers cases: 1) FIP association on port that is a member of some LB - make it centralized. 2) FIP association on LB VIP - find a members FIPs and centralized them. 3) Add a member to LB that has FIP already configured - checks if a member has FIP and centralize it. 4) The reverse of each of the above cases. In addition I needed to extend OVN LB member external_id entry to add an information about member subnet_id in order to easly track member port from mechanism OVN driver. That means I needed also to support both old and new conventions. This patch adds also this code. Old convention: member_`member_id`_`ip_address`:`port` New convention: member_`member_id`_`ip_address`:`port`_`subnet_id` [1] https://bugzilla.redhat.com/show_bug.cgi?id=1793897 Related-Bug: #1860662 Change-Id: I254f0ac28f7585b699a8238e01ffb37dd70282ef (cherry picked from networking-ovn commit 57ac38921efa6bbf0bc4a22950355256cc3ebe6d)
This commit is contained in:
parent
b7503fcb18
commit
17b1d9d573
@ -20,3 +20,16 @@ OVN_PORT_FIP_EXT_ID_KEY = 'neutron:port_fip'
|
||||
OVN_SUBNET_EXT_ID_KEY = 'neutron:subnet_id'
|
||||
OVN_SUBNET_EXT_IDS_KEY = 'neutron:subnet_ids'
|
||||
OVN_NETWORK_NAME_EXT_ID_KEY = 'neutron:network_name'
|
||||
OVN_SG_IDS_EXT_ID_KEY = 'neutron:security_group_ids'
|
||||
OVN_DEVICE_OWNER_EXT_ID_KEY = 'neutron:device_owner'
|
||||
OVN_FIP_EXT_ID_KEY = 'neutron:fip_id'
|
||||
OVN_FIP_PORT_EXT_ID_KEY = 'neutron:fip_port_id'
|
||||
|
||||
LB_EXT_IDS_LS_REFS_KEY = 'ls_refs'
|
||||
LB_EXT_IDS_LR_REF_KEY = 'lr_ref'
|
||||
LB_EXT_IDS_POOL_PREFIX = 'pool_'
|
||||
LB_EXT_IDS_LISTENER_PREFIX = 'listener_'
|
||||
LB_EXT_IDS_MEMBER_PREFIX = 'member_'
|
||||
LB_EXT_IDS_VIP_KEY = 'neutron:vip'
|
||||
LB_EXT_IDS_VIP_FIP_KEY = 'neutron:vip_fip'
|
||||
LB_EXT_IDS_VIP_PORT_ID_KEY = 'neutron:vip_port_id'
|
||||
|
@ -10,6 +10,8 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo_utils import netutils
|
||||
|
||||
from ovn_octavia_provider.common import constants
|
||||
|
||||
|
||||
@ -31,3 +33,19 @@ def ovn_lrouter_port_name(id):
|
||||
# - patch-<UUID>-to-lrp-<UUID>
|
||||
# lrp stands for Logical Router Port
|
||||
return constants.LRP_PREFIX + '%s' % id
|
||||
|
||||
|
||||
def remove_macs_from_lsp_addresses(addresses):
|
||||
"""Remove the mac addreses from the Logical_Switch_Port addresses column.
|
||||
|
||||
:param addresses: The list of addresses from the Logical_Switch_Port.
|
||||
Example: ["80:fa:5b:06:72:b7 158.36.44.22",
|
||||
"ff:ff:ff:ff:ff:ff 10.0.0.2"]
|
||||
:returns: A list of IP addesses (v4 and v6)
|
||||
"""
|
||||
ip_list = []
|
||||
for addr in addresses:
|
||||
ip_list.extend([x for x in addr.split() if
|
||||
(netutils.is_valid_ipv4(x) or
|
||||
netutils.is_valid_ipv6(x))])
|
||||
return ip_list
|
||||
|
@ -64,23 +64,17 @@ REQ_TYPE_MEMBER_UPDATE = 'member_update'
|
||||
REQ_TYPE_LB_CREATE_LRP_ASSOC = 'lb_create_lrp_assoc'
|
||||
REQ_TYPE_LB_DELETE_LRP_ASSOC = 'lb_delete_lrp_assoc'
|
||||
REQ_TYPE_HANDLE_VIP_FIP = 'handle_vip_fip'
|
||||
REQ_TYPE_HANDLE_MEMBER_DVR = 'handle_member_dvr'
|
||||
|
||||
REQ_TYPE_EXIT = 'exit'
|
||||
|
||||
REQ_INFO_ACTION_ASSOCIATE = 'associate'
|
||||
REQ_INFO_ACTION_DISASSOCIATE = 'disassociate'
|
||||
REQ_INFO_MEMBER_ADDED = 'member_added'
|
||||
REQ_INFO_MEMBER_DELETED = 'member_deleted'
|
||||
|
||||
DISABLED_RESOURCE_SUFFIX = 'D'
|
||||
|
||||
LB_EXT_IDS_LS_REFS_KEY = 'ls_refs'
|
||||
LB_EXT_IDS_LR_REF_KEY = 'lr_ref'
|
||||
LB_EXT_IDS_POOL_PREFIX = 'pool_'
|
||||
LB_EXT_IDS_LISTENER_PREFIX = 'listener_'
|
||||
LB_EXT_IDS_MEMBER_PREFIX = 'member_'
|
||||
LB_EXT_IDS_VIP_KEY = 'neutron:vip'
|
||||
LB_EXT_IDS_VIP_FIP_KEY = 'neutron:vip_fip'
|
||||
LB_EXT_IDS_VIP_PORT_ID_KEY = 'neutron:vip_port_id'
|
||||
|
||||
OVN_NATIVE_LB_PROTOCOLS = [constants.PROTOCOL_TCP,
|
||||
constants.PROTOCOL_UDP, ]
|
||||
OVN_NATIVE_LB_ALGORITHMS = [constants.LB_ALGORITHM_SOURCE_IP_PORT, ]
|
||||
@ -162,7 +156,7 @@ class OvnNbIdlForLb(ovsdb_monitor.OvnIdl):
|
||||
SCHEMA = "OVN_Northbound"
|
||||
TABLES = ('Logical_Switch', 'Load_Balancer', 'Logical_Router',
|
||||
'Logical_Switch_Port', 'Logical_Router_Port',
|
||||
'Gateway_Chassis')
|
||||
'Gateway_Chassis', 'NAT')
|
||||
|
||||
def __init__(self, event_lock_name=None):
|
||||
self.conn_string = ovn_conf.get_ovn_nb_connection()
|
||||
@ -236,6 +230,7 @@ class OvnProviderHelper(object):
|
||||
REQ_TYPE_LB_CREATE_LRP_ASSOC: self.lb_create_lrp_assoc,
|
||||
REQ_TYPE_LB_DELETE_LRP_ASSOC: self.lb_delete_lrp_assoc,
|
||||
REQ_TYPE_HANDLE_VIP_FIP: self.handle_vip_fip,
|
||||
REQ_TYPE_HANDLE_MEMBER_DVR: self.handle_member_dvr,
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
@ -425,7 +420,7 @@ class OvnProviderHelper(object):
|
||||
# than there is more than one (for more than 1 L4 protocol).
|
||||
for lb in ovn_lbs:
|
||||
fip = vip_lp.external_ids.get(ovn_const.OVN_PORT_FIP_EXT_ID_KEY)
|
||||
lb_vip_fip = lb.external_ids.get(LB_EXT_IDS_VIP_FIP_KEY)
|
||||
lb_vip_fip = lb.external_ids.get(ovn_const.LB_EXT_IDS_VIP_FIP_KEY)
|
||||
request_info = {'ovn_lb': lb,
|
||||
'vip_fip': fip}
|
||||
if fip and fip != lb_vip_fip:
|
||||
@ -451,8 +446,9 @@ class OvnProviderHelper(object):
|
||||
Output: set of rows of type Load_Balancer or empty set
|
||||
"""
|
||||
return {lb for lb in network.load_balancer
|
||||
if network.name in lb.external_ids.get(LB_EXT_IDS_LS_REFS_KEY,
|
||||
[])}
|
||||
if network.name in lb.external_ids.get(
|
||||
ovn_const.LB_EXT_IDS_LS_REFS_KEY,
|
||||
[])}
|
||||
|
||||
def _find_lb_in_table(self, lb, table):
|
||||
return [item for item in self.ovn_nbdb_api.tables[table].rows.values()
|
||||
@ -558,21 +554,22 @@ class OvnProviderHelper(object):
|
||||
'id': lb_id,
|
||||
'protocol': protocol,
|
||||
'vip_address': ovn_lbs[0].external_ids.get(
|
||||
LB_EXT_IDS_VIP_KEY),
|
||||
ovn_const.LB_EXT_IDS_VIP_KEY),
|
||||
'vip_port_id':
|
||||
ovn_lbs[0].external_ids.get(
|
||||
LB_EXT_IDS_VIP_PORT_ID_KEY),
|
||||
LB_EXT_IDS_LR_REF_KEY:
|
||||
ovn_const.LB_EXT_IDS_VIP_PORT_ID_KEY),
|
||||
ovn_const.LB_EXT_IDS_LR_REF_KEY:
|
||||
ovn_lbs[0].external_ids.get(
|
||||
LB_EXT_IDS_LR_REF_KEY),
|
||||
LB_EXT_IDS_LS_REFS_KEY:
|
||||
ovn_const.LB_EXT_IDS_LR_REF_KEY),
|
||||
ovn_const.LB_EXT_IDS_LS_REFS_KEY:
|
||||
ovn_lbs[0].external_ids.get(
|
||||
LB_EXT_IDS_LS_REFS_KEY),
|
||||
ovn_const.LB_EXT_IDS_LS_REFS_KEY),
|
||||
'admin_state_up': admin_state_up}
|
||||
# NOTE(mjozefcz): Handle vip_fip info if exists.
|
||||
vip_fip = ovn_lbs[0].external_ids.get(LB_EXT_IDS_VIP_FIP_KEY)
|
||||
vip_fip = ovn_lbs[0].external_ids.get(
|
||||
ovn_const.LB_EXT_IDS_VIP_FIP_KEY)
|
||||
if vip_fip:
|
||||
lb_info.update({LB_EXT_IDS_VIP_FIP_KEY: vip_fip})
|
||||
lb_info.update({ovn_const.LB_EXT_IDS_VIP_FIP_KEY: vip_fip})
|
||||
self.lb_create(lb_info, protocol=protocol)
|
||||
# Looks like we've just added new LB
|
||||
# or updated exising, empty one.
|
||||
@ -639,7 +636,7 @@ class OvnProviderHelper(object):
|
||||
return commands
|
||||
ovn_ls = None
|
||||
|
||||
ls_refs = ovn_lb.external_ids.get(LB_EXT_IDS_LS_REFS_KEY)
|
||||
ls_refs = ovn_lb.external_ids.get(ovn_const.LB_EXT_IDS_LS_REFS_KEY)
|
||||
if ls_refs:
|
||||
try:
|
||||
ls_refs = jsonutils.loads(ls_refs)
|
||||
@ -671,7 +668,7 @@ class OvnProviderHelper(object):
|
||||
else:
|
||||
ls_refs[ls_name] = ref_ct - 1
|
||||
|
||||
ls_refs = {LB_EXT_IDS_LS_REFS_KEY: jsonutils.dumps(ls_refs)}
|
||||
ls_refs = {ovn_const.LB_EXT_IDS_LS_REFS_KEY: jsonutils.dumps(ls_refs)}
|
||||
commands.append(self.ovn_nbdb_api.db_set(
|
||||
'Load_Balancer', ovn_lb.uuid,
|
||||
('external_ids', ls_refs)))
|
||||
@ -696,12 +693,12 @@ class OvnProviderHelper(object):
|
||||
self.ovn_nbdb_api.db_set(
|
||||
'Load_Balancer', ovn_lb.uuid,
|
||||
('external_ids',
|
||||
{LB_EXT_IDS_LR_REF_KEY: ','.join(lr_ref)})))
|
||||
{ovn_const.LB_EXT_IDS_LR_REF_KEY: ','.join(lr_ref)})))
|
||||
else:
|
||||
commands.append(
|
||||
self.ovn_nbdb_api.db_remove(
|
||||
'Load_Balancer', ovn_lb.uuid, 'external_ids',
|
||||
(LB_EXT_IDS_LR_REF_KEY))
|
||||
(ovn_const.LB_EXT_IDS_LR_REF_KEY))
|
||||
)
|
||||
commands.append(
|
||||
self.ovn_nbdb_api.lr_lb_del(ovn_lr.uuid, ovn_lb.uuid,
|
||||
@ -724,15 +721,18 @@ class OvnProviderHelper(object):
|
||||
|
||||
if ovn_lr.name not in str(lr_rf):
|
||||
# Multiple routers in lr_rf are separated with ','
|
||||
lr_rf = {LB_EXT_IDS_LR_REF_KEY: ovn_lr.name} if not lr_rf else {
|
||||
LB_EXT_IDS_LR_REF_KEY: "%s,%s" % (lr_rf, ovn_lr.name)}
|
||||
if lr_rf:
|
||||
lr_rf = {ovn_const.LB_EXT_IDS_LR_REF_KEY:
|
||||
"%s,%s" % (lr_rf, ovn_lr.name)}
|
||||
else:
|
||||
lr_rf = {ovn_const.LB_EXT_IDS_LR_REF_KEY: ovn_lr.name}
|
||||
commands.append(
|
||||
self.ovn_nbdb_api.db_set('Load_Balancer', ovn_lb.uuid,
|
||||
('external_ids', lr_rf)))
|
||||
return commands
|
||||
|
||||
def _update_lb_to_lr_association(self, ovn_lb, ovn_lr, delete=False):
|
||||
lr_ref = ovn_lb.external_ids.get(LB_EXT_IDS_LR_REF_KEY)
|
||||
lr_ref = ovn_lb.external_ids.get(ovn_const.LB_EXT_IDS_LR_REF_KEY)
|
||||
if delete:
|
||||
return self._del_lb_to_lr_association(ovn_lb, ovn_lr, lr_ref)
|
||||
else:
|
||||
@ -778,13 +778,13 @@ class OvnProviderHelper(object):
|
||||
return lr
|
||||
|
||||
def _get_listener_key(self, listener_id, is_enabled=True):
|
||||
listener_key = LB_EXT_IDS_LISTENER_PREFIX + str(listener_id)
|
||||
listener_key = ovn_const.LB_EXT_IDS_LISTENER_PREFIX + str(listener_id)
|
||||
if not is_enabled:
|
||||
listener_key += ':' + DISABLED_RESOURCE_SUFFIX
|
||||
return listener_key
|
||||
|
||||
def _get_pool_key(self, pool_id, is_enabled=True):
|
||||
pool_key = LB_EXT_IDS_POOL_PREFIX + str(pool_id)
|
||||
pool_key = ovn_const.LB_EXT_IDS_POOL_PREFIX + str(pool_id)
|
||||
if not is_enabled:
|
||||
pool_key += ':' + DISABLED_RESOURCE_SUFFIX
|
||||
return pool_key
|
||||
@ -796,9 +796,24 @@ class OvnProviderHelper(object):
|
||||
mem_info += str(mem.split('_')[2]) + ','
|
||||
return mem_info[:-1] # Remove the last ','
|
||||
|
||||
def _get_member_key(self, member):
|
||||
member_info = LB_EXT_IDS_MEMBER_PREFIX + member['id'] + "_"
|
||||
member_info += member['address'] + ":" + str(member['protocol_port'])
|
||||
def _get_member_key(self, member, old_convention=False):
|
||||
member_info = ''
|
||||
if isinstance(member, dict):
|
||||
member_info = '%s%s_%s:%s' % (
|
||||
ovn_const.LB_EXT_IDS_MEMBER_PREFIX,
|
||||
member['id'],
|
||||
member['address'],
|
||||
member['protocol_port'])
|
||||
if not old_convention and member.get('subnet_id'):
|
||||
member_info += "_" + member['subnet_id']
|
||||
elif isinstance(member, o_datamodels.Member):
|
||||
member_info = '%s%s_%s:%s' % (
|
||||
ovn_const.LB_EXT_IDS_MEMBER_PREFIX,
|
||||
member.member_id,
|
||||
member.address,
|
||||
member.protocol_port)
|
||||
if not old_convention and member.subnet_id:
|
||||
member_info += "_" + member.subnet_id
|
||||
return member_info
|
||||
|
||||
def _make_listener_key_value(self, listener_port, pool_id):
|
||||
@ -821,12 +836,12 @@ class OvnProviderHelper(object):
|
||||
def _get_pool_listeners(self, ovn_lb, pool_key):
|
||||
pool_listeners = []
|
||||
for k, v in ovn_lb.external_ids.items():
|
||||
if LB_EXT_IDS_LISTENER_PREFIX not in k:
|
||||
if ovn_const.LB_EXT_IDS_LISTENER_PREFIX not in k:
|
||||
continue
|
||||
vip_port, p_key = self._extract_listener_key_value(v)
|
||||
if pool_key == p_key:
|
||||
pool_listeners.append(k[len(LB_EXT_IDS_LISTENER_PREFIX):])
|
||||
|
||||
pool_listeners.append(
|
||||
k[len(ovn_const.LB_EXT_IDS_LISTENER_PREFIX):])
|
||||
return pool_listeners
|
||||
|
||||
def _frame_vip_ips(self, lb_external_ids):
|
||||
@ -835,11 +850,11 @@ class OvnProviderHelper(object):
|
||||
if lb_external_ids.get('enabled') == 'False':
|
||||
return vip_ips
|
||||
|
||||
lb_vip = lb_external_ids[LB_EXT_IDS_VIP_KEY]
|
||||
vip_fip = lb_external_ids.get(LB_EXT_IDS_VIP_FIP_KEY)
|
||||
lb_vip = lb_external_ids[ovn_const.LB_EXT_IDS_VIP_KEY]
|
||||
vip_fip = lb_external_ids.get(ovn_const.LB_EXT_IDS_VIP_FIP_KEY)
|
||||
|
||||
for k, v in lb_external_ids.items():
|
||||
if (LB_EXT_IDS_LISTENER_PREFIX not in k or
|
||||
if (ovn_const.LB_EXT_IDS_LISTENER_PREFIX not in k or
|
||||
self._is_listener_disabled(k)):
|
||||
continue
|
||||
|
||||
@ -867,7 +882,7 @@ class OvnProviderHelper(object):
|
||||
|
||||
def _is_listener_in_lb(self, lb):
|
||||
for key in list(lb.external_ids):
|
||||
if key.startswith(LB_EXT_IDS_LISTENER_PREFIX):
|
||||
if key.startswith(ovn_const.LB_EXT_IDS_LISTENER_PREFIX):
|
||||
return True
|
||||
return False
|
||||
|
||||
@ -902,18 +917,18 @@ class OvnProviderHelper(object):
|
||||
# In case port is not found for the vip_address we will see an
|
||||
# exception when port['id'] is accessed.
|
||||
external_ids = {
|
||||
LB_EXT_IDS_VIP_KEY: loadbalancer['vip_address'],
|
||||
LB_EXT_IDS_VIP_PORT_ID_KEY:
|
||||
ovn_const.LB_EXT_IDS_VIP_KEY: loadbalancer['vip_address'],
|
||||
ovn_const.LB_EXT_IDS_VIP_PORT_ID_KEY:
|
||||
loadbalancer.get('vip_port_id') or port['id'],
|
||||
'enabled': str(loadbalancer['admin_state_up'])}
|
||||
# In case vip_fip was passed - use it.
|
||||
vip_fip = loadbalancer.get(LB_EXT_IDS_VIP_FIP_KEY)
|
||||
vip_fip = loadbalancer.get(ovn_const.LB_EXT_IDS_VIP_FIP_KEY)
|
||||
if vip_fip:
|
||||
external_ids[LB_EXT_IDS_VIP_FIP_KEY] = vip_fip
|
||||
external_ids[ovn_const.LB_EXT_IDS_VIP_FIP_KEY] = vip_fip
|
||||
# In case of lr_ref passed - use it.
|
||||
lr_ref = loadbalancer.get(LB_EXT_IDS_LR_REF_KEY)
|
||||
lr_ref = loadbalancer.get(ovn_const.LB_EXT_IDS_LR_REF_KEY)
|
||||
if lr_ref:
|
||||
external_ids[LB_EXT_IDS_LR_REF_KEY] = lr_ref
|
||||
external_ids[ovn_const.LB_EXT_IDS_LR_REF_KEY] = lr_ref
|
||||
|
||||
try:
|
||||
self.ovn_nbdb_api.db_create(
|
||||
@ -938,7 +953,7 @@ class OvnProviderHelper(object):
|
||||
# NOTE(mjozefcz): In case of LS references where passed -
|
||||
# apply LS to the new LB. That could happend in case we
|
||||
# need another loadbalancer for other L4 protocol.
|
||||
ls_refs = loadbalancer.get(LB_EXT_IDS_LS_REFS_KEY)
|
||||
ls_refs = loadbalancer.get(ovn_const.LB_EXT_IDS_LS_REFS_KEY)
|
||||
if ls_refs:
|
||||
try:
|
||||
ls_refs = jsonutils.loads(ls_refs)
|
||||
@ -1001,7 +1016,8 @@ class OvnProviderHelper(object):
|
||||
return status
|
||||
|
||||
try:
|
||||
port_id = ovn_lbs[0].external_ids[LB_EXT_IDS_VIP_PORT_ID_KEY]
|
||||
port_id = ovn_lbs[0].external_ids[
|
||||
ovn_const.LB_EXT_IDS_VIP_PORT_ID_KEY]
|
||||
for ovn_lb in ovn_lbs:
|
||||
status = self._lb_delete(loadbalancer, ovn_lb, status)
|
||||
# Clear the status dict of any key having [] value
|
||||
@ -1024,7 +1040,7 @@ class OvnProviderHelper(object):
|
||||
if loadbalancer['cascade']:
|
||||
# Delete all pools
|
||||
for key, value in ovn_lb.external_ids.items():
|
||||
if key.startswith(LB_EXT_IDS_POOL_PREFIX):
|
||||
if key.startswith(ovn_const.LB_EXT_IDS_POOL_PREFIX):
|
||||
pool_id = key.split('_')[1]
|
||||
# Delete all members in the pool
|
||||
if value and len(value.split(',')) > 0:
|
||||
@ -1036,12 +1052,12 @@ class OvnProviderHelper(object):
|
||||
{"id": pool_id,
|
||||
"provisioning_status": constants.DELETED})
|
||||
|
||||
if key.startswith(LB_EXT_IDS_LISTENER_PREFIX):
|
||||
if key.startswith(ovn_const.LB_EXT_IDS_LISTENER_PREFIX):
|
||||
status['listeners'].append({
|
||||
'id': key.split('_')[1],
|
||||
'provisioning_status': constants.DELETED,
|
||||
'operating_status': constants.OFFLINE})
|
||||
ls_refs = ovn_lb.external_ids.get(LB_EXT_IDS_LS_REFS_KEY, {})
|
||||
ls_refs = ovn_lb.external_ids.get(ovn_const.LB_EXT_IDS_LS_REFS_KEY, {})
|
||||
if ls_refs:
|
||||
try:
|
||||
ls_refs = jsonutils.loads(ls_refs)
|
||||
@ -1062,7 +1078,7 @@ class OvnProviderHelper(object):
|
||||
commands.append(
|
||||
self.ovn_nbdb_api.ls_lb_del(ls.uuid, ovn_lb.uuid,
|
||||
if_exists=True))
|
||||
lr_ref = ovn_lb.external_ids.get(LB_EXT_IDS_LR_REF_KEY, {})
|
||||
lr_ref = ovn_lb.external_ids.get(ovn_const.LB_EXT_IDS_LR_REF_KEY, {})
|
||||
if lr_ref:
|
||||
for lr in self.ovn_nbdb_api.tables[
|
||||
'Logical_Router'].rows.values():
|
||||
@ -1396,7 +1412,7 @@ class OvnProviderHelper(object):
|
||||
# Remove Pool from Listener if it is associated
|
||||
listener_id = None
|
||||
for key, value in ovn_lb.external_ids.items():
|
||||
if (key.startswith(LB_EXT_IDS_LISTENER_PREFIX) and
|
||||
if (key.startswith(ovn_const.LB_EXT_IDS_LISTENER_PREFIX) and
|
||||
pool_key in value):
|
||||
external_ids[key] = value.split(':')[0] + ':'
|
||||
commands.append(
|
||||
@ -1517,12 +1533,19 @@ class OvnProviderHelper(object):
|
||||
def _add_member(self, member, ovn_lb, pool_key):
|
||||
external_ids = copy.deepcopy(ovn_lb.external_ids)
|
||||
existing_members = external_ids[pool_key]
|
||||
if existing_members:
|
||||
existing_members = existing_members.split(",")
|
||||
member_info = self._get_member_key(member)
|
||||
if member_info in existing_members:
|
||||
# TODO(mjozefcz): Remove this workaround in W release.
|
||||
member_info_old = self._get_member_key(member, old_convention=True)
|
||||
member_found = [x for x in existing_members
|
||||
if re.match(member_info_old, x)]
|
||||
if member_found:
|
||||
# Member already present
|
||||
return
|
||||
if existing_members:
|
||||
pool_data = {pool_key: existing_members + "," + member_info}
|
||||
existing_members.append(member_info)
|
||||
pool_data = {pool_key: ",".join(existing_members)}
|
||||
else:
|
||||
pool_data = {pool_key: member_info}
|
||||
|
||||
@ -1578,10 +1601,18 @@ class OvnProviderHelper(object):
|
||||
def _remove_member(self, member, ovn_lb, pool_key):
|
||||
external_ids = copy.deepcopy(ovn_lb.external_ids)
|
||||
existing_members = external_ids[pool_key].split(",")
|
||||
member_info = self._get_member_key(member)
|
||||
if member_info in existing_members:
|
||||
# TODO(mjozefcz): Delete this workaround in W release.
|
||||
# To support backward compatibility member
|
||||
# could be defined as `member`_`id`_`ip`:`port`_`subnet_id`
|
||||
# or defined as `member`_`id`_`ip`:`port
|
||||
member_info_old = self._get_member_key(member, old_convention=True)
|
||||
|
||||
member_found = [x for x in existing_members
|
||||
if re.match(member_info_old, x)]
|
||||
if member_found:
|
||||
commands = []
|
||||
existing_members.remove(member_info)
|
||||
existing_members.remove(member_found[0])
|
||||
|
||||
if not existing_members:
|
||||
pool_status = constants.OFFLINE
|
||||
else:
|
||||
@ -1589,17 +1620,14 @@ class OvnProviderHelper(object):
|
||||
pool_data = {pool_key: ",".join(existing_members)}
|
||||
commands.append(
|
||||
self.ovn_nbdb_api.db_set('Load_Balancer', ovn_lb.uuid,
|
||||
('external_ids', pool_data))
|
||||
)
|
||||
('external_ids', pool_data)))
|
||||
external_ids[pool_key] = ",".join(existing_members)
|
||||
commands.extend(
|
||||
self._refresh_lb_vips(ovn_lb.uuid, external_ids)
|
||||
)
|
||||
|
||||
self._refresh_lb_vips(ovn_lb.uuid, external_ids))
|
||||
commands.extend(
|
||||
self._update_lb_to_ls_association(
|
||||
ovn_lb, subnet_id=member['subnet_id'], associate=False)
|
||||
)
|
||||
ovn_lb, subnet_id=member.get('subnet_id'),
|
||||
associate=False))
|
||||
self._execute_commands(commands)
|
||||
return pool_status
|
||||
else:
|
||||
@ -1725,22 +1753,6 @@ class OvnProviderHelper(object):
|
||||
if mem_addr_port == meminf.split('_')[2]:
|
||||
return meminf.split('_')[1]
|
||||
|
||||
def get_member_info(self, pool_id):
|
||||
'''Gets Member information
|
||||
|
||||
:param pool_id: ID of the Pool whose member information is reqd.
|
||||
:param mem_addr_port: Combination of Member Address+Port. Default=None
|
||||
:returns: List -- List of Member Address+Pool of all members in pool.
|
||||
:returns:[None] -- if no member exists in the pool.
|
||||
:raises: Exception if Loadbalancer is not found for a Pool ID
|
||||
'''
|
||||
existing_members = self._get_existing_pool_members(pool_id)
|
||||
# Members are saved in OVN in the form of
|
||||
# member1_UUID_IP:Port, member2_UUID_IP:Port
|
||||
# Return the list of (UUID,IP:Port) for all members.
|
||||
return [(meminf.split('_')[1], meminf.split(
|
||||
'_')[2]) for meminf in existing_members.split(',')]
|
||||
|
||||
def create_vip_port(self, project_id, lb_id, vip_d):
|
||||
port = {'port': {'name': ovn_const.LB_VIP_PORT_PREFIX + str(lb_id),
|
||||
'network_id': vip_d['vip_network_id'],
|
||||
@ -1789,18 +1801,20 @@ class OvnProviderHelper(object):
|
||||
commands = []
|
||||
|
||||
if fip_info['action'] == REQ_INFO_ACTION_ASSOCIATE:
|
||||
external_ids[LB_EXT_IDS_VIP_FIP_KEY] = fip_info['vip_fip']
|
||||
vip_fip_info = {LB_EXT_IDS_VIP_FIP_KEY: fip_info['vip_fip']}
|
||||
external_ids[ovn_const.LB_EXT_IDS_VIP_FIP_KEY] = (
|
||||
fip_info['vip_fip'])
|
||||
vip_fip_info = {
|
||||
ovn_const.LB_EXT_IDS_VIP_FIP_KEY: fip_info['vip_fip']}
|
||||
commands.append(
|
||||
self.ovn_nbdb_api.db_set('Load_Balancer', ovn_lb.uuid,
|
||||
('external_ids', vip_fip_info))
|
||||
)
|
||||
else:
|
||||
external_ids.pop(LB_EXT_IDS_VIP_FIP_KEY)
|
||||
external_ids.pop(ovn_const.LB_EXT_IDS_VIP_FIP_KEY)
|
||||
commands.append(
|
||||
self.ovn_nbdb_api.db_remove(
|
||||
'Load_Balancer', ovn_lb.uuid, 'external_ids',
|
||||
(LB_EXT_IDS_VIP_FIP_KEY))
|
||||
(ovn_const.LB_EXT_IDS_VIP_FIP_KEY))
|
||||
)
|
||||
|
||||
commands.extend(
|
||||
@ -1808,6 +1822,89 @@ class OvnProviderHelper(object):
|
||||
)
|
||||
self._execute_commands(commands)
|
||||
|
||||
def handle_member_dvr(self, info):
|
||||
pool_key, ovn_lb = self._find_ovn_lb_by_pool_id(info['pool_id'])
|
||||
if not ovn_lb.external_ids.get(ovn_const.LB_EXT_IDS_VIP_FIP_KEY):
|
||||
LOG.debug("LB %(lb)s has no FIP on VIP configured. "
|
||||
"There is no need to centralize member %(member)s "
|
||||
"traffic.",
|
||||
{'lb': ovn_lb.uuid, 'member': info['id']})
|
||||
return
|
||||
|
||||
# Find out if member has FIP assigned.
|
||||
network_driver = get_network_driver()
|
||||
try:
|
||||
subnet = network_driver.get_subnet(info['subnet_id'])
|
||||
ls_name = utils.ovn_name(subnet.network_id)
|
||||
except n_exc.NotFound:
|
||||
LOG.exception('Subnet %s not found while trying to '
|
||||
'fetch its data.', info['subnet_id'])
|
||||
return
|
||||
|
||||
try:
|
||||
ls = self.ovn_nbdb_api.lookup('Logical_Switch', ls_name)
|
||||
except idlutils.RowNotFound:
|
||||
LOG.warning("Logical Switch %s not found. "
|
||||
"Cannot verify member FIP configuration.",
|
||||
ls_name)
|
||||
return
|
||||
|
||||
fip = None
|
||||
f = utils.remove_macs_from_lsp_addresses
|
||||
for port in ls.ports:
|
||||
if info['address'] in f(port.addresses):
|
||||
# We found particular port
|
||||
fip = self.ovn_nbdb_api.db_find_rows(
|
||||
'NAT', ('external_ids', '=', {
|
||||
ovn_const.OVN_FIP_PORT_EXT_ID_KEY: port.name})
|
||||
).execute(check_error=True)
|
||||
fip = fip[0] if fip else fip
|
||||
break
|
||||
|
||||
if not fip:
|
||||
LOG.debug('Member %s has no FIP assigned. '
|
||||
'There is no need to modify its NAT.',
|
||||
info['id'])
|
||||
return
|
||||
|
||||
if info['action'] == REQ_INFO_MEMBER_ADDED:
|
||||
LOG.info('Member %(member)s is added to Load Balancer %(lb)s '
|
||||
'and both have FIP assigned. Member FIP %(fip)s '
|
||||
'needs to be centralized in those conditions. '
|
||||
'Deleting external_mac/logical_port from it.',
|
||||
{'member': info['id'],
|
||||
'lb': ovn_lb.uuid,
|
||||
'fip': fip.external_ip})
|
||||
self.ovn_nbdb_api.db_clear(
|
||||
'NAT', fip.uuid, 'external_mac').execute(check_error=True)
|
||||
self.ovn_nbdb_api.db_clear(
|
||||
'NAT', fip.uuid, 'logical_port').execute(check_error=True)
|
||||
else:
|
||||
LOG.info('Member %(member)s is deleted from Load Balancer '
|
||||
'%(lb)s and both have FIP assigned. Member FIP %(fip)s '
|
||||
'can be decentralized now if environment has DVR '
|
||||
'enabled. Updating FIP object for recomputation.',
|
||||
{'member': info['id'],
|
||||
'lb': ovn_lb.uuid,
|
||||
'fip': fip.external_ip})
|
||||
# NOTE(mjozefcz): We don't know if this env is DVR or not.
|
||||
# We should call neutron API to do 'empty' update of the FIP.
|
||||
# It will bump revision number and do recomputation of the FIP.
|
||||
try:
|
||||
fip_info = network_driver.neutron_client.show_floatingip(
|
||||
fip.external_ids[ovn_const.OVN_FIP_EXT_ID_KEY])
|
||||
empty_update = {
|
||||
"floatingip": {
|
||||
'description': fip_info['floatingip']['description']}}
|
||||
network_driver.neutron_client.update_floatingip(
|
||||
fip.external_ids[ovn_const.OVN_FIP_EXT_ID_KEY],
|
||||
empty_update)
|
||||
except n_exc.NotFound:
|
||||
LOG.warning('Members %(member)s FIP %(fip)s not found in '
|
||||
'Neutron. Cannot update it.',
|
||||
{'member': info['id'],
|
||||
'fip': fip.external_ip})
|
||||
|
||||
|
||||
class OvnProviderDriver(driver_base.ProviderDriver):
|
||||
_ovn_helper = None
|
||||
@ -1960,7 +2057,7 @@ class OvnProviderDriver(driver_base.ProviderDriver):
|
||||
|
||||
def _ip_version_differs(self, member):
|
||||
_, ovn_lb = self._ovn_helper._find_ovn_lb_by_pool_id(member.pool_id)
|
||||
lb_vip = ovn_lb.external_ids[LB_EXT_IDS_VIP_KEY]
|
||||
lb_vip = ovn_lb.external_ids[ovn_const.LB_EXT_IDS_VIP_KEY]
|
||||
return netaddr.IPNetwork(lb_vip).version != (
|
||||
netaddr.IPNetwork(member.address).version)
|
||||
|
||||
@ -1973,7 +2070,8 @@ class OvnProviderDriver(driver_base.ProviderDriver):
|
||||
if self._ip_version_differs(member):
|
||||
raise IPVersionsMixingNotSupportedError()
|
||||
admin_state_up = member.admin_state_up
|
||||
if isinstance(member.subnet_id, o_datamodels.UnsetType):
|
||||
if (isinstance(member.subnet_id, o_datamodels.UnsetType) or
|
||||
not member.subnet_id):
|
||||
msg = _('Subnet is required for Member creation'
|
||||
' with OVN Provider Driver')
|
||||
raise driver_exceptions.UnsupportedOptionError(
|
||||
@ -1992,6 +2090,18 @@ class OvnProviderDriver(driver_base.ProviderDriver):
|
||||
'info': request_info}
|
||||
self._ovn_helper.add_request(request)
|
||||
|
||||
# NOTE(mjozefcz): If LB has FIP on VIP
|
||||
# and member has FIP we need to centralize
|
||||
# traffic for member.
|
||||
request_info = {'id': member.member_id,
|
||||
'address': member.address,
|
||||
'pool_id': member.pool_id,
|
||||
'subnet_id': member.subnet_id,
|
||||
'action': REQ_INFO_MEMBER_ADDED}
|
||||
request = {'type': REQ_TYPE_HANDLE_MEMBER_DVR,
|
||||
'info': request_info}
|
||||
self._ovn_helper.add_request(request)
|
||||
|
||||
def member_delete(self, member):
|
||||
request_info = {'id': member.member_id,
|
||||
'address': member.address,
|
||||
@ -2001,6 +2111,17 @@ class OvnProviderDriver(driver_base.ProviderDriver):
|
||||
request = {'type': REQ_TYPE_MEMBER_DELETE,
|
||||
'info': request_info}
|
||||
self._ovn_helper.add_request(request)
|
||||
# NOTE(mjozefcz): If LB has FIP on VIP
|
||||
# and member had FIP we can decentralize
|
||||
# the traffic now.
|
||||
request_info = {'id': member.member_id,
|
||||
'address': member.address,
|
||||
'pool_id': member.pool_id,
|
||||
'subnet_id': member.subnet_id,
|
||||
'action': REQ_INFO_MEMBER_DELETED}
|
||||
request = {'type': REQ_TYPE_HANDLE_MEMBER_DVR,
|
||||
'info': request_info}
|
||||
self._ovn_helper.add_request(request)
|
||||
|
||||
def member_update(self, old_member, new_member):
|
||||
if self._check_monitor_options(new_member):
|
||||
@ -2038,25 +2159,45 @@ class OvnProviderDriver(driver_base.ProviderDriver):
|
||||
raise driver_exceptions.UnsupportedOptionError(
|
||||
user_fault_string=msg,
|
||||
operator_fault_string=msg)
|
||||
current_members = self._ovn_helper.get_member_info(pool_id)
|
||||
# current_members gets a list of tuples (ID, IP:Port) for pool members
|
||||
pool_key, ovn_lb = self._ovn_helper._find_ovn_lb_by_pool_id(pool_id)
|
||||
external_ids = copy.deepcopy(ovn_lb.external_ids)
|
||||
existing_members = external_ids[pool_key].split(',')
|
||||
members_to_delete = copy.copy(existing_members)
|
||||
for member in members:
|
||||
if (self._check_monitor_options(member) or
|
||||
member.address and self._ip_version_differs(member)):
|
||||
skipped_members.append(member.member_id)
|
||||
continue
|
||||
# NOTE(mjozefcz): We need to have subnet_id information.
|
||||
if (isinstance(member.subnet_id, o_datamodels.UnsetType) or
|
||||
not member.subnet_id):
|
||||
msg = _('Subnet is required for Member creation'
|
||||
' with OVN Provider Driver')
|
||||
raise driver_exceptions.UnsupportedOptionError(
|
||||
user_fault_string=msg,
|
||||
operator_fault_string=msg)
|
||||
admin_state_up = member.admin_state_up
|
||||
if isinstance(admin_state_up, o_datamodels.UnsetType):
|
||||
admin_state_up = True
|
||||
mem_addr_port = str(member.address) + ':' + str(
|
||||
member.protocol_port)
|
||||
if (member.member_id, mem_addr_port) not in current_members:
|
||||
|
||||
member_info = self._ovn_helper._get_member_key(member)
|
||||
# TODO(mjozefcz): Remove this workaround in W release.
|
||||
member_info_old = self._ovn_helper._get_member_key(
|
||||
member, old_convention=True)
|
||||
member_found = [x for x in existing_members
|
||||
if re.match(member_info_old, x)]
|
||||
if not member_found:
|
||||
req_type = REQ_TYPE_MEMBER_CREATE
|
||||
else:
|
||||
# If member exists in pool, then Update
|
||||
req_type = REQ_TYPE_MEMBER_UPDATE
|
||||
current_members.remove((member.member_id, mem_addr_port))
|
||||
# Remove all updating members so only deleted ones are left
|
||||
# TODO(mjozefcz): Remove this workaround in W release.
|
||||
try:
|
||||
members_to_delete.remove(member_info_old)
|
||||
except ValueError:
|
||||
members_to_delete.remove(member_info)
|
||||
|
||||
request_info = {'id': member.member_id,
|
||||
'address': member.address,
|
||||
'protocol_port': member.protocol_port,
|
||||
@ -2066,14 +2207,19 @@ class OvnProviderDriver(driver_base.ProviderDriver):
|
||||
request = {'type': req_type,
|
||||
'info': request_info}
|
||||
request_list.append(request)
|
||||
for cmember in current_members:
|
||||
request_info = {'id': cmember[0],
|
||||
'address': cmember[1].split(':')[0],
|
||||
'protocol_port': cmember[1].split(':')[1],
|
||||
|
||||
for member in members_to_delete:
|
||||
member_info = member.split('_')
|
||||
request_info = {'id': member_info[1],
|
||||
'address': member_info[2].split(':')[0],
|
||||
'protocol_port': member_info[2].split(':')[1],
|
||||
'pool_id': pool_id}
|
||||
if len(member_info) == 4:
|
||||
request_info['subnet_id'] = member_info[3]
|
||||
request = {'type': REQ_TYPE_MEMBER_DELETE,
|
||||
'info': request_info}
|
||||
request_list.append(request)
|
||||
|
||||
for request in request_list:
|
||||
self._ovn_helper.add_request(request)
|
||||
if skipped_members:
|
||||
|
@ -180,10 +180,10 @@ class TestOctaviaOvnProviderDriver(
|
||||
lbs = []
|
||||
for lb in self.nb_api.tables['Load_Balancer'].rows.values():
|
||||
external_ids = dict(lb.external_ids)
|
||||
ls_refs = external_ids.get(ovn_driver.LB_EXT_IDS_LS_REFS_KEY)
|
||||
ls_refs = external_ids.get(ovn_const.LB_EXT_IDS_LS_REFS_KEY)
|
||||
if ls_refs:
|
||||
external_ids[
|
||||
ovn_driver.LB_EXT_IDS_LS_REFS_KEY] = jsonutils.loads(
|
||||
ovn_const.LB_EXT_IDS_LS_REFS_KEY] = jsonutils.loads(
|
||||
ls_refs)
|
||||
lbs.append({'name': lb.name, 'protocol': lb.protocol,
|
||||
'vips': lb.vips, 'external_ids': external_ids})
|
||||
@ -265,12 +265,12 @@ class TestOctaviaOvnProviderDriver(
|
||||
net_id = LR_REF_KEY_HEADER + '%s' % net_id
|
||||
|
||||
if add_ref:
|
||||
if net_id not in lb_data[ovn_driver.LB_EXT_IDS_LS_REFS_KEY]:
|
||||
lb_data[ovn_driver.LB_EXT_IDS_LS_REFS_KEY][net_id] = 1
|
||||
if net_id not in lb_data[ovn_const.LB_EXT_IDS_LS_REFS_KEY]:
|
||||
lb_data[ovn_const.LB_EXT_IDS_LS_REFS_KEY][net_id] = 1
|
||||
else:
|
||||
ref_ct = lb_data[ovn_driver.LB_EXT_IDS_LS_REFS_KEY][net_id]
|
||||
ref_ct = lb_data[ovn_const.LB_EXT_IDS_LS_REFS_KEY][net_id]
|
||||
if ref_ct <= 0:
|
||||
del lb_data[ovn_driver.LB_EXT_IDS_LS_REFS_KEY][net_id]
|
||||
del lb_data[ovn_const.LB_EXT_IDS_LS_REFS_KEY][net_id]
|
||||
|
||||
def _wait_for_status(self, expected_statuses, check_call=True):
|
||||
call_count = len(expected_statuses)
|
||||
@ -331,7 +331,7 @@ class TestOctaviaOvnProviderDriver(
|
||||
r_id = self._create_router("r1") if create_router else None
|
||||
if r_id:
|
||||
lb_data[
|
||||
ovn_driver.LB_EXT_IDS_LR_REF_KEY] = LR_REF_KEY_HEADER + r_id
|
||||
ovn_const.LB_EXT_IDS_LR_REF_KEY] = LR_REF_KEY_HEADER + r_id
|
||||
net_info = self._create_net(lb_info['vip_network'], lb_info['cidr'],
|
||||
router_id=r_id)
|
||||
lb_data['vip_net_info'] = net_info
|
||||
@ -339,7 +339,7 @@ class TestOctaviaOvnProviderDriver(
|
||||
vip_network_id=net_info[0],
|
||||
vip_port_id=net_info[3],
|
||||
admin_state_up=admin_state_up)
|
||||
lb_data[ovn_driver.LB_EXT_IDS_LS_REFS_KEY] = {}
|
||||
lb_data[ovn_const.LB_EXT_IDS_LS_REFS_KEY] = {}
|
||||
lb_data['listeners'] = []
|
||||
lb_data['pools'] = []
|
||||
self._update_ls_refs(lb_data, net_info[0])
|
||||
@ -447,7 +447,7 @@ class TestOctaviaOvnProviderDriver(
|
||||
return []
|
||||
|
||||
vip_net_info = lb_data['vip_net_info']
|
||||
external_ids = {ovn_driver.LB_EXT_IDS_LS_REFS_KEY: {},
|
||||
external_ids = {ovn_const.LB_EXT_IDS_LS_REFS_KEY: {},
|
||||
'neutron:vip': lb_data['model'].vip_address,
|
||||
'neutron:vip_port_id': vip_net_info[3],
|
||||
'enabled': str(lb_data['model'].admin_state_up)}
|
||||
@ -480,20 +480,20 @@ class TestOctaviaOvnProviderDriver(
|
||||
# For every connected subnet to the LB set the ref
|
||||
# counter.
|
||||
for net_id, ref_ct in lb_data[
|
||||
ovn_driver.LB_EXT_IDS_LS_REFS_KEY].items():
|
||||
ovn_const.LB_EXT_IDS_LS_REFS_KEY].items():
|
||||
for lb in expected_lbs:
|
||||
# If given LB hasn't VIP configured from
|
||||
# this network we shouldn't touch it here.
|
||||
if net_id == 'neutron-%s' % lb_data['model'].vip_network_id:
|
||||
lb.get('external_ids')[
|
||||
ovn_driver.LB_EXT_IDS_LS_REFS_KEY][net_id] = 1
|
||||
ovn_const.LB_EXT_IDS_LS_REFS_KEY][net_id] = 1
|
||||
|
||||
# For every connected router set it here.
|
||||
if lb_data.get(ovn_driver.LB_EXT_IDS_LR_REF_KEY):
|
||||
if lb_data.get(ovn_const.LB_EXT_IDS_LR_REF_KEY):
|
||||
for lb in expected_lbs:
|
||||
lb.get('external_ids')[
|
||||
ovn_driver.LB_EXT_IDS_LR_REF_KEY] = lb_data[
|
||||
ovn_driver.LB_EXT_IDS_LR_REF_KEY]
|
||||
ovn_const.LB_EXT_IDS_LR_REF_KEY] = lb_data[
|
||||
ovn_const.LB_EXT_IDS_LR_REF_KEY]
|
||||
|
||||
pool_info = {}
|
||||
for p in lb_data.get('pools', []):
|
||||
@ -506,6 +506,7 @@ class TestOctaviaOvnProviderDriver(
|
||||
continue
|
||||
m_info = 'member_' + m.member_id + '_' + m.address
|
||||
m_info += ":" + str(m.protocol_port)
|
||||
m_info += "_" + str(m.subnet_id)
|
||||
if p_members:
|
||||
p_members += "," + m_info
|
||||
else:
|
||||
@ -517,7 +518,7 @@ class TestOctaviaOvnProviderDriver(
|
||||
for fixed_ip in port['fixed_ips']:
|
||||
if fixed_ip['subnet_id'] == m.subnet_id:
|
||||
ex = external_ids[
|
||||
ovn_driver.LB_EXT_IDS_LS_REFS_KEY]
|
||||
ovn_const.LB_EXT_IDS_LS_REFS_KEY]
|
||||
act = ex.get(
|
||||
'neutron-%s' % port['network_id'], 0)
|
||||
ex['neutron-%s' % port['network_id']] = act + 1
|
||||
@ -771,6 +772,8 @@ class TestOctaviaOvnProviderDriver(
|
||||
'loadbalancers': [{'id': pool.loadbalancer_id,
|
||||
'provisioning_status': 'ACTIVE'}],
|
||||
'listeners': []})
|
||||
# Delete member from lb_data
|
||||
pool.members.remove(m)
|
||||
self._wait_for_status_and_validate(lb_data, expected_status,
|
||||
check_call=False)
|
||||
|
||||
@ -1133,7 +1136,7 @@ class TestOctaviaOvnProviderDriver(
|
||||
lba_data = self._create_load_balancer_and_validate(
|
||||
{'vip_network': 'N1',
|
||||
'cidr': '10.0.0.0/24'})
|
||||
router_id = lba_data[ovn_driver.LB_EXT_IDS_LR_REF_KEY][
|
||||
router_id = lba_data[ovn_const.LB_EXT_IDS_LR_REF_KEY][
|
||||
len(LR_REF_KEY_HEADER):]
|
||||
# Create Network N2, connect it to R1
|
||||
nw_info = self._create_net("N2", "10.0.1.0/24", router_id)
|
||||
@ -1152,14 +1155,14 @@ class TestOctaviaOvnProviderDriver(
|
||||
# Add N3 to R1
|
||||
self.l3_plugin.add_router_interface(
|
||||
self.context, lba_data[
|
||||
ovn_driver.LB_EXT_IDS_LR_REF_KEY][len(LR_REF_KEY_HEADER):],
|
||||
ovn_const.LB_EXT_IDS_LR_REF_KEY][len(LR_REF_KEY_HEADER):],
|
||||
{'subnet_id': lbb_data['vip_net_info'][1]})
|
||||
|
||||
# Check LBB exists on R1
|
||||
n_utils.wait_until_true(
|
||||
lambda: self._is_lb_associated_to_lr(
|
||||
lbb_data['model'].loadbalancer_id,
|
||||
lba_data[ovn_driver.LB_EXT_IDS_LR_REF_KEY]),
|
||||
lba_data[ovn_const.LB_EXT_IDS_LR_REF_KEY]),
|
||||
timeout=10)
|
||||
# Check LBA connected to N3
|
||||
n_utils.wait_until_true(
|
||||
@ -1185,7 +1188,7 @@ class TestOctaviaOvnProviderDriver(
|
||||
# N3 removed from R1
|
||||
self.l3_plugin.remove_router_interface(
|
||||
self.context, lba_data[
|
||||
ovn_driver.LB_EXT_IDS_LR_REF_KEY][len(LR_REF_KEY_HEADER):],
|
||||
ovn_const.LB_EXT_IDS_LR_REF_KEY][len(LR_REF_KEY_HEADER):],
|
||||
{'subnet_id': lbb_data['vip_net_info'][1]})
|
||||
else:
|
||||
# Delete LBB Cascade
|
||||
@ -1195,7 +1198,7 @@ class TestOctaviaOvnProviderDriver(
|
||||
# Check LBB doesn't exists on R1
|
||||
n_utils.wait_until_true(
|
||||
lambda: not self._is_lb_associated_to_lr(
|
||||
lbb_id, lba_data[ovn_driver.LB_EXT_IDS_LR_REF_KEY]),
|
||||
lbb_id, lba_data[ovn_const.LB_EXT_IDS_LR_REF_KEY]),
|
||||
timeout=10)
|
||||
# Check LBB not connected to N1
|
||||
n_utils.wait_until_true(
|
||||
@ -1219,7 +1222,7 @@ class TestOctaviaOvnProviderDriver(
|
||||
lba_data = self._create_load_balancer_and_validate(
|
||||
{'vip_network': 'N1',
|
||||
'cidr': '10.0.0.0/24'})
|
||||
router_id = lba_data[ovn_driver.LB_EXT_IDS_LR_REF_KEY][
|
||||
router_id = lba_data[ovn_const.LB_EXT_IDS_LR_REF_KEY][
|
||||
len(LR_REF_KEY_HEADER):]
|
||||
|
||||
# Create provider network N2, connect it to R1
|
||||
|
@ -17,6 +17,9 @@ import copy
|
||||
import mock
|
||||
from oslo_utils import uuidutils
|
||||
|
||||
from ovn_octavia_provider.common import constants
|
||||
from ovn_octavia_provider.common import utils
|
||||
|
||||
|
||||
class FakeResource(dict):
|
||||
|
||||
@ -201,6 +204,25 @@ class FakeOVNPort(object):
|
||||
port_attrs.update(attrs)
|
||||
return type('Logical_Switch_Port', (object, ), port_attrs)
|
||||
|
||||
@staticmethod
|
||||
def from_neutron_port(port):
|
||||
"""Create a fake ovn port based on a neutron port."""
|
||||
external_ids = {
|
||||
constants.OVN_NETWORK_NAME_EXT_ID_KEY:
|
||||
utils.ovn_name(port['network_id']),
|
||||
constants.OVN_SG_IDS_EXT_ID_KEY:
|
||||
' '.join(port['security_groups']),
|
||||
constants.OVN_DEVICE_OWNER_EXT_ID_KEY:
|
||||
port.get('device_owner', '')}
|
||||
addresses = [port['mac_address'], ]
|
||||
addresses += [x['ip_address'] for x in port.get('fixed_ips', [])]
|
||||
port_security = (
|
||||
addresses + [x['ip_address'] for x in
|
||||
port.get('allowed_address_pairs', [])])
|
||||
return FakeOVNPort.create_one_port(
|
||||
{'external_ids': external_ids, 'addresses': addresses,
|
||||
'port_security': port_security})
|
||||
|
||||
|
||||
class FakeOVNRouter(object):
|
||||
|
||||
@ -220,3 +242,51 @@ class FakeOVNRouter(object):
|
||||
# Overwrite default attributes.
|
||||
router_attrs.update(attrs)
|
||||
return type('Logical_Router', (object, ), router_attrs)
|
||||
|
||||
|
||||
class FakePort(object):
|
||||
"""Fake one or more ports."""
|
||||
|
||||
@staticmethod
|
||||
def create_one_port(attrs=None):
|
||||
"""Create a fake port.
|
||||
|
||||
:param Dictionary attrs:
|
||||
A dictionary with all attributes
|
||||
:return:
|
||||
A FakeResource object faking the port
|
||||
"""
|
||||
attrs = attrs or {}
|
||||
|
||||
# Set default attributes.
|
||||
fake_uuid = uuidutils.generate_uuid()
|
||||
port_attrs = {
|
||||
'admin_state_up': True,
|
||||
'allowed_address_pairs': [{}],
|
||||
'binding:host_id': 'binding-host-id-' + fake_uuid,
|
||||
'binding:profile': {},
|
||||
'binding:vif_details': {},
|
||||
'binding:vif_type': 'ovs',
|
||||
'binding:vnic_type': 'normal',
|
||||
'device_id': 'device-id-' + fake_uuid,
|
||||
'device_owner': 'compute:nova',
|
||||
'dns_assignment': [{}],
|
||||
'dns_name': 'dns-name-' + fake_uuid,
|
||||
'extra_dhcp_opts': [{}],
|
||||
'fixed_ips': [{'subnet_id': 'subnet-id-' + fake_uuid,
|
||||
'ip_address': '10.10.10.20'}],
|
||||
'id': 'port-id-' + fake_uuid,
|
||||
'mac_address': 'fa:16:3e:a9:4e:72',
|
||||
'name': 'port-name-' + fake_uuid,
|
||||
'network_id': 'network-id-' + fake_uuid,
|
||||
'port_security_enabled': True,
|
||||
'security_groups': [],
|
||||
'status': 'ACTIVE',
|
||||
'tenant_id': 'project-id-' + fake_uuid,
|
||||
}
|
||||
|
||||
# Overwrite default attributes.
|
||||
port_attrs.update(attrs)
|
||||
|
||||
return FakeResource(info=copy.deepcopy(port_attrs),
|
||||
loaded=True)
|
||||
|
@ -134,10 +134,16 @@ class TestOvnProviderDriver(TestOvnOctaviaBase):
|
||||
self.driver = ovn_driver.OvnProviderDriver()
|
||||
add_req_thread = mock.patch.object(ovn_driver.OvnProviderHelper,
|
||||
'add_request')
|
||||
self.member_line = (
|
||||
'member_%s_%s:%s_%s' %
|
||||
(self.member_id, self.member_address,
|
||||
self.member_port, self.member_subnet_id))
|
||||
self.ovn_lb = mock.MagicMock()
|
||||
self.ovn_lb.name = 'foo_ovn_lb'
|
||||
self.ovn_lb.external_ids = {
|
||||
ovn_driver.LB_EXT_IDS_VIP_KEY: '10.22.33.4'}
|
||||
ovn_const.LB_EXT_IDS_VIP_KEY: '10.22.33.4',
|
||||
'pool_%s' % self.pool_id: self.member_line,
|
||||
'listener_%s' % self.listener_id: '80:pool_%s' % self.pool_id}
|
||||
self.mock_add_request = add_req_thread.start()
|
||||
self.project_id = uuidutils.generate_uuid()
|
||||
|
||||
@ -268,11 +274,6 @@ class TestOvnProviderDriver(TestOvnOctaviaBase):
|
||||
ovn_driver.OvnProviderHelper, '_find_ovn_lbs',
|
||||
side_effect=lambda x, protocol=None:
|
||||
self.ovn_lb if protocol else [self.ovn_lb]).start()
|
||||
mock.patch.object(
|
||||
ovn_driver.OvnProviderHelper, 'get_member_info',
|
||||
return_value=[
|
||||
(self.ref_member.member_id, "198.52.100.4:99"),
|
||||
(self.fail_member.member_id, "198.51.100.4:99")]).start()
|
||||
self.mock_find_lb_pool_key = mock.patch.object(
|
||||
ovn_driver.OvnProviderHelper,
|
||||
'_find_ovn_lb_with_pool_key',
|
||||
@ -299,8 +300,20 @@ class TestOvnProviderDriver(TestOvnOctaviaBase):
|
||||
'admin_state_up': self.ref_member.admin_state_up}
|
||||
expected_dict = {'type': ovn_driver.REQ_TYPE_MEMBER_CREATE,
|
||||
'info': info}
|
||||
info_dvr = {
|
||||
'id': self.ref_member.member_id,
|
||||
'address': self.ref_member.address,
|
||||
'pool_id': self.ref_member.pool_id,
|
||||
'subnet_id': self.ref_member.subnet_id,
|
||||
'action': ovn_driver.REQ_INFO_MEMBER_ADDED}
|
||||
expected_dict_dvr = {
|
||||
'type': ovn_driver.REQ_TYPE_HANDLE_MEMBER_DVR,
|
||||
'info': info_dvr}
|
||||
self.driver.member_create(self.ref_member)
|
||||
self.mock_add_request.assert_called_once_with(expected_dict)
|
||||
expected = [
|
||||
mock.call(expected_dict),
|
||||
mock.call(expected_dict_dvr)]
|
||||
self.mock_add_request.assert_has_calls(expected)
|
||||
|
||||
def test_member_create_failure(self):
|
||||
self.assertRaises(exceptions.UnsupportedOptionError,
|
||||
@ -325,6 +338,9 @@ class TestOvnProviderDriver(TestOvnOctaviaBase):
|
||||
self.ref_member.subnet_id = data_models.UnsetType()
|
||||
self.assertRaises(exceptions.UnsupportedOptionError,
|
||||
self.driver.member_create, self.ref_member)
|
||||
self.ref_member.subnet_id = None
|
||||
self.assertRaises(exceptions.UnsupportedOptionError,
|
||||
self.driver.member_create, self.ref_member)
|
||||
|
||||
def test_member_create_monitor_opts(self):
|
||||
self.ref_member.monitor_address = '172.20.20.1'
|
||||
@ -344,8 +360,13 @@ class TestOvnProviderDriver(TestOvnOctaviaBase):
|
||||
'admin_state_up': True}
|
||||
expected_dict = {'type': ovn_driver.REQ_TYPE_MEMBER_CREATE,
|
||||
'info': info}
|
||||
expected_dict_dvr = {'type': ovn_driver.REQ_TYPE_HANDLE_MEMBER_DVR,
|
||||
'info': mock.ANY}
|
||||
expected = [
|
||||
mock.call(expected_dict),
|
||||
mock.call(expected_dict_dvr)]
|
||||
self.driver.member_create(self.ref_member)
|
||||
self.mock_add_request.assert_called_once_with(expected_dict)
|
||||
self.mock_add_request.assert_has_calls(expected)
|
||||
|
||||
def test_member_update(self):
|
||||
info = {'id': self.update_member.member_id,
|
||||
@ -395,6 +416,11 @@ class TestOvnProviderDriver(TestOvnOctaviaBase):
|
||||
self.driver.member_batch_update([self.ref_member])
|
||||
self.assertEqual(self.mock_add_request.call_count, 2)
|
||||
|
||||
def test_member_batch_update_missing_subnet_id(self):
|
||||
self.ref_member.subnet_id = None
|
||||
self.assertRaises(exceptions.UnsupportedOptionError,
|
||||
self.driver.member_batch_update, [self.ref_member])
|
||||
|
||||
def test_member_update_failure(self):
|
||||
self.assertRaises(exceptions.UnsupportedOptionError,
|
||||
self.driver.member_update, self.ref_member,
|
||||
@ -414,8 +440,20 @@ class TestOvnProviderDriver(TestOvnOctaviaBase):
|
||||
'subnet_id': self.ref_member.subnet_id}
|
||||
expected_dict = {'type': ovn_driver.REQ_TYPE_MEMBER_DELETE,
|
||||
'info': info}
|
||||
info_dvr = {
|
||||
'id': self.ref_member.member_id,
|
||||
'address': self.ref_member.address,
|
||||
'pool_id': self.ref_member.pool_id,
|
||||
'subnet_id': self.ref_member.subnet_id,
|
||||
'action': ovn_driver.REQ_INFO_MEMBER_DELETED}
|
||||
expected_dict_dvr = {
|
||||
'type': ovn_driver.REQ_TYPE_HANDLE_MEMBER_DVR,
|
||||
'info': info_dvr}
|
||||
self.driver.member_delete(self.ref_member)
|
||||
self.mock_add_request.assert_called_once_with(expected_dict)
|
||||
expected = [
|
||||
mock.call(expected_dict),
|
||||
mock.call(expected_dict_dvr)]
|
||||
self.mock_add_request.assert_has_calls(expected)
|
||||
|
||||
def test_listener_create(self):
|
||||
info = {'id': self.ref_listener.listener_id,
|
||||
@ -589,7 +627,11 @@ class TestOvnProviderDriver(TestOvnOctaviaBase):
|
||||
expected_members = {
|
||||
'type': ovn_driver.REQ_TYPE_MEMBER_DELETE,
|
||||
'info': info_member}
|
||||
expected_members_dvr = {
|
||||
'type': ovn_driver.REQ_TYPE_HANDLE_MEMBER_DVR,
|
||||
'info': mock.ANY}
|
||||
calls = [mock.call(expected_members),
|
||||
mock.call(expected_members_dvr),
|
||||
mock.call(expected)]
|
||||
self.driver.pool_delete(self.ref_pool)
|
||||
self.mock_add_request.assert_has_calls(calls)
|
||||
@ -671,12 +713,13 @@ class TestOvnProviderHelper(TestOvnOctaviaBase):
|
||||
self.ovn_lb.protocol = ['tcp']
|
||||
self.ovn_lb.uuid = uuidutils.generate_uuid()
|
||||
self.member_line = (
|
||||
'member_%s_%s:%s' %
|
||||
(self.member_id, self.member_address, self.member_port))
|
||||
'member_%s_%s:%s_%s' %
|
||||
(self.member_id, self.member_address,
|
||||
self.member_port, self.member_subnet_id))
|
||||
self.ovn_lb.external_ids = {
|
||||
ovn_driver.LB_EXT_IDS_VIP_KEY: '10.22.33.4',
|
||||
ovn_driver.LB_EXT_IDS_VIP_FIP_KEY: '123.123.123.123',
|
||||
ovn_driver.LB_EXT_IDS_VIP_PORT_ID_KEY: 'foo_port',
|
||||
ovn_const.LB_EXT_IDS_VIP_KEY: '10.22.33.4',
|
||||
ovn_const.LB_EXT_IDS_VIP_FIP_KEY: '123.123.123.123',
|
||||
ovn_const.LB_EXT_IDS_VIP_PORT_ID_KEY: 'foo_port',
|
||||
'enabled': True,
|
||||
'pool_%s' % self.pool_id: self.member_line,
|
||||
'listener_%s' % self.listener_id: '80:pool_%s' % self.pool_id}
|
||||
@ -684,9 +727,10 @@ class TestOvnProviderHelper(TestOvnOctaviaBase):
|
||||
execute.return_value = [self.ovn_lb]
|
||||
self.helper.ovn_nbdb_api.db_list_rows.return_value.\
|
||||
execute.return_value = [self.ovn_lb]
|
||||
mock.patch.object(self.helper,
|
||||
'_find_ovn_lb_with_pool_key',
|
||||
return_value=self.ovn_lb).start()
|
||||
self.mock_find_lb_pool_key = mock.patch.object(
|
||||
self.helper,
|
||||
'_find_ovn_lb_with_pool_key',
|
||||
return_value=self.ovn_lb).start()
|
||||
|
||||
self.mock_find_ovn_lbs = mock.patch.object(
|
||||
ovn_driver.OvnProviderHelper, '_find_ovn_lbs',
|
||||
@ -721,8 +765,8 @@ class TestOvnProviderHelper(TestOvnOctaviaBase):
|
||||
vip_address=self.vip_address,
|
||||
vip_network_id=self.vip_network_id,
|
||||
ext_ids={
|
||||
ovn_driver.LB_EXT_IDS_LR_REF_KEY: "neutron-%s" % net_id,
|
||||
ovn_driver.LB_EXT_IDS_LS_REFS_KEY:
|
||||
ovn_const.LB_EXT_IDS_LR_REF_KEY: "neutron-%s" % net_id,
|
||||
ovn_const.LB_EXT_IDS_LS_REFS_KEY:
|
||||
'{\"neutron-%s\": 1}' % net_id})
|
||||
self.ref_lb2 = MockedLB(
|
||||
uuid=uuidutils.generate_uuid(),
|
||||
@ -734,8 +778,8 @@ class TestOvnProviderHelper(TestOvnOctaviaBase):
|
||||
vip_address=self.vip_address,
|
||||
vip_network_id=self.vip_network_id,
|
||||
ext_ids={
|
||||
ovn_driver.LB_EXT_IDS_LR_REF_KEY: "neutron-%s" % net_id,
|
||||
ovn_driver.LB_EXT_IDS_LS_REFS_KEY:
|
||||
ovn_const.LB_EXT_IDS_LR_REF_KEY: "neutron-%s" % net_id,
|
||||
ovn_const.LB_EXT_IDS_LS_REFS_KEY:
|
||||
'{\"neutron-%s\": 1}' % net_id})
|
||||
# TODO(mjozefcz): Consider using FakeOVNRouter.
|
||||
self.router = fakes.FakeOvsdbRow.create_one_ovsdb_row(
|
||||
@ -845,20 +889,20 @@ class TestOvnProviderHelper(TestOvnOctaviaBase):
|
||||
'id': self.ovn_lb.name,
|
||||
'protocol': 'tcp',
|
||||
'vip_address': udp_lb.external_ids.get(
|
||||
ovn_driver.LB_EXT_IDS_VIP_KEY),
|
||||
ovn_const.LB_EXT_IDS_VIP_KEY),
|
||||
'vip_port_id':
|
||||
udp_lb.external_ids.get(
|
||||
ovn_driver.LB_EXT_IDS_VIP_PORT_ID_KEY),
|
||||
ovn_driver.LB_EXT_IDS_LR_REF_KEY:
|
||||
ovn_const.LB_EXT_IDS_VIP_PORT_ID_KEY),
|
||||
ovn_const.LB_EXT_IDS_LR_REF_KEY:
|
||||
udp_lb.external_ids.get(
|
||||
ovn_driver.LB_EXT_IDS_LR_REF_KEY),
|
||||
ovn_driver.LB_EXT_IDS_LS_REFS_KEY:
|
||||
ovn_const.LB_EXT_IDS_LR_REF_KEY),
|
||||
ovn_const.LB_EXT_IDS_LS_REFS_KEY:
|
||||
udp_lb.external_ids.get(
|
||||
ovn_driver.LB_EXT_IDS_LS_REFS_KEY),
|
||||
ovn_const.LB_EXT_IDS_LS_REFS_KEY),
|
||||
'admin_state_up': 'True',
|
||||
ovn_driver.LB_EXT_IDS_VIP_FIP_KEY:
|
||||
ovn_const.LB_EXT_IDS_VIP_FIP_KEY:
|
||||
udp_lb.external_ids.get(
|
||||
ovn_driver.LB_EXT_IDS_VIP_FIP_KEY)}
|
||||
ovn_const.LB_EXT_IDS_VIP_FIP_KEY)}
|
||||
lbc.assert_called_once_with(expected_lb_info, protocol='tcp')
|
||||
|
||||
def test__get_or_create_ovn_lb_found(self):
|
||||
@ -896,8 +940,8 @@ class TestOvnProviderHelper(TestOvnOctaviaBase):
|
||||
constants.OFFLINE)
|
||||
self.helper.ovn_nbdb_api.db_create.assert_called_once_with(
|
||||
'Load_Balancer', external_ids={
|
||||
ovn_driver.LB_EXT_IDS_VIP_KEY: mock.ANY,
|
||||
ovn_driver.LB_EXT_IDS_VIP_PORT_ID_KEY: mock.ANY,
|
||||
ovn_const.LB_EXT_IDS_VIP_KEY: mock.ANY,
|
||||
ovn_const.LB_EXT_IDS_VIP_PORT_ID_KEY: mock.ANY,
|
||||
'enabled': 'False'},
|
||||
name=mock.ANY,
|
||||
protocol=None)
|
||||
@ -914,8 +958,8 @@ class TestOvnProviderHelper(TestOvnOctaviaBase):
|
||||
constants.ONLINE)
|
||||
self.helper.ovn_nbdb_api.db_create.assert_called_once_with(
|
||||
'Load_Balancer', external_ids={
|
||||
ovn_driver.LB_EXT_IDS_VIP_KEY: mock.ANY,
|
||||
ovn_driver.LB_EXT_IDS_VIP_PORT_ID_KEY: mock.ANY,
|
||||
ovn_const.LB_EXT_IDS_VIP_KEY: mock.ANY,
|
||||
ovn_const.LB_EXT_IDS_VIP_PORT_ID_KEY: mock.ANY,
|
||||
'enabled': 'True'},
|
||||
name=mock.ANY,
|
||||
protocol=None)
|
||||
@ -929,8 +973,8 @@ class TestOvnProviderHelper(TestOvnOctaviaBase):
|
||||
"""
|
||||
self.lb['admin_state_up'] = True
|
||||
self.lb['protocol'] = 'UDP'
|
||||
self.lb[ovn_driver.LB_EXT_IDS_LR_REF_KEY] = 'foo'
|
||||
self.lb[ovn_driver.LB_EXT_IDS_LS_REFS_KEY] = "{\"neutron-foo\": 1}"
|
||||
self.lb[ovn_const.LB_EXT_IDS_LR_REF_KEY] = 'foo'
|
||||
self.lb[ovn_const.LB_EXT_IDS_LS_REFS_KEY] = "{\"neutron-foo\": 1}"
|
||||
net_dr.return_value.neutron_client.list_ports.return_value = (
|
||||
self.ports)
|
||||
status = self.helper.lb_create(self.lb, protocol='UDP')
|
||||
@ -940,9 +984,9 @@ class TestOvnProviderHelper(TestOvnOctaviaBase):
|
||||
constants.ONLINE)
|
||||
self.helper.ovn_nbdb_api.db_create.assert_called_once_with(
|
||||
'Load_Balancer', external_ids={
|
||||
ovn_driver.LB_EXT_IDS_VIP_KEY: mock.ANY,
|
||||
ovn_driver.LB_EXT_IDS_VIP_PORT_ID_KEY: mock.ANY,
|
||||
ovn_driver.LB_EXT_IDS_LR_REF_KEY: 'foo',
|
||||
ovn_const.LB_EXT_IDS_VIP_KEY: mock.ANY,
|
||||
ovn_const.LB_EXT_IDS_VIP_PORT_ID_KEY: mock.ANY,
|
||||
ovn_const.LB_EXT_IDS_LR_REF_KEY: 'foo',
|
||||
'enabled': 'True'},
|
||||
name=mock.ANY,
|
||||
protocol='udp')
|
||||