Merge "Add Health Monitor support"
This commit is contained in:
commit
b25bc57734
@ -31,6 +31,7 @@ def OvnProviderAgent(exit_event):
|
||||
helper = ovn_helper.OvnProviderHelper()
|
||||
events = [ovn_event.LogicalRouterPortEvent(helper),
|
||||
ovn_event.LogicalSwitchPortUpdateEvent(helper)]
|
||||
sb_events = [ovn_event.ServiceMonitorUpdateEvent(helper)]
|
||||
|
||||
# NOTE(mjozefcz): This API is only for handling OVSDB events!
|
||||
ovn_nb_idl_for_events = impl_idl_ovn.OvnNbIdlForLb(
|
||||
@ -38,8 +39,15 @@ def OvnProviderAgent(exit_event):
|
||||
ovn_nb_idl_for_events.notify_handler.watch_events(events)
|
||||
ovn_nb_idl_for_events.start()
|
||||
|
||||
ovn_sb_idl_for_events = impl_idl_ovn.OvnSbIdlForLb(
|
||||
event_lock_name=OVN_EVENT_LOCK_NAME)
|
||||
ovn_sb_idl_for_events.notify_handler.watch_events(sb_events)
|
||||
ovn_sb_idl_for_events.start()
|
||||
|
||||
LOG.info('OVN provider agent has started.')
|
||||
exit_event.wait()
|
||||
LOG.info('OVN provider agent is exiting.')
|
||||
ovn_nb_idl_for_events.notify_handler.unwatch_events(events)
|
||||
ovn_nb_idl_for_events.stop()
|
||||
ovn_sb_idl_for_events.notify_handler.unwatch_events(sb_events)
|
||||
ovn_sb_idl_for_events.stop()
|
||||
|
@ -40,6 +40,26 @@ ovn_opts = [
|
||||
default='',
|
||||
help=_('The PEM file with CA certificate that OVN should use to'
|
||||
' verify certificates presented to it by SSL peers')),
|
||||
cfg.StrOpt('ovn_sb_connection',
|
||||
default='tcp:127.0.0.1:6642',
|
||||
help=_('The connection string for the OVN_Southbound OVSDB.\n'
|
||||
'Use tcp:IP:PORT for TCP connection.\n'
|
||||
'Use ssl:IP:PORT for SSL connection. The '
|
||||
'ovn_sb_private_key, ovn_sb_certificate and '
|
||||
'ovn_sb_ca_cert are mandatory.\n'
|
||||
'Use unix:FILE for unix domain socket connection.')),
|
||||
cfg.StrOpt('ovn_sb_private_key',
|
||||
default='',
|
||||
help=_('The PEM file with private key for SSL connection to '
|
||||
'OVN-SB-DB')),
|
||||
cfg.StrOpt('ovn_sb_certificate',
|
||||
default='',
|
||||
help=_('The PEM file with certificate that certifies the '
|
||||
'private key specified in ovn_sb_private_key')),
|
||||
cfg.StrOpt('ovn_sb_ca_cert',
|
||||
default='',
|
||||
help=_('The PEM file with CA certificate that OVN should use to'
|
||||
' verify certificates presented to it by SSL peers')),
|
||||
cfg.IntOpt('ovsdb_connection_timeout',
|
||||
default=180,
|
||||
help=_('Timeout in seconds for the OVSDB '
|
||||
@ -107,6 +127,22 @@ def get_ovn_nb_ca_cert():
|
||||
return cfg.CONF.ovn.ovn_nb_ca_cert
|
||||
|
||||
|
||||
def get_ovn_sb_connection():
|
||||
return cfg.CONF.ovn.ovn_sb_connection
|
||||
|
||||
|
||||
def get_ovn_sb_private_key():
|
||||
return cfg.CONF.ovn.ovn_sb_private_key
|
||||
|
||||
|
||||
def get_ovn_sb_certificate():
|
||||
return cfg.CONF.ovn.ovn_sb_certificate
|
||||
|
||||
|
||||
def get_ovn_sb_ca_cert():
|
||||
return cfg.CONF.ovn.ovn_sb_ca_cert
|
||||
|
||||
|
||||
def get_ovn_ovsdb_timeout():
|
||||
return cfg.CONF.ovn.ovsdb_connection_timeout
|
||||
|
||||
|
@ -34,6 +34,7 @@ LB_EXT_IDS_LR_REF_KEY = 'lr_ref'
|
||||
LB_EXT_IDS_POOL_PREFIX = 'pool_'
|
||||
LB_EXT_IDS_LISTENER_PREFIX = 'listener_'
|
||||
LB_EXT_IDS_MEMBER_PREFIX = 'member_'
|
||||
LB_EXT_IDS_HM_KEY = 'octavia:healthmonitor'
|
||||
LB_EXT_IDS_VIP_KEY = 'neutron:vip'
|
||||
LB_EXT_IDS_VIP_FIP_KEY = 'neutron:vip_fip'
|
||||
LB_EXT_IDS_VIP_PORT_ID_KEY = 'neutron:vip_port_id'
|
||||
@ -60,6 +61,10 @@ REQ_TYPE_LB_CREATE_LRP_ASSOC = 'lb_create_lrp_assoc'
|
||||
REQ_TYPE_LB_DELETE_LRP_ASSOC = 'lb_delete_lrp_assoc'
|
||||
REQ_TYPE_HANDLE_VIP_FIP = 'handle_vip_fip'
|
||||
REQ_TYPE_HANDLE_MEMBER_DVR = 'handle_member_dvr'
|
||||
REQ_TYPE_HM_CREATE = 'hm_create'
|
||||
REQ_TYPE_HM_UPDATE = 'hm_update'
|
||||
REQ_TYPE_HM_DELETE = 'hm_delete'
|
||||
REQ_TYPE_HM_UPDATE_EVENT = 'hm_update_event'
|
||||
|
||||
REQ_TYPE_EXIT = 'exit'
|
||||
|
||||
@ -78,6 +83,10 @@ OVN_NATIVE_LB_PROTOCOLS = [constants.PROTOCOL_TCP,
|
||||
constants.PROTOCOL_SCTP, ]
|
||||
OVN_NATIVE_LB_ALGORITHMS = [constants.LB_ALGORITHM_SOURCE_IP_PORT, ]
|
||||
|
||||
# This driver only supports UDP Connect and TCP health monitors
|
||||
SUPPORTED_HEALTH_MONITOR_TYPES = [constants.HEALTH_MONITOR_UDP_CONNECT,
|
||||
constants.HEALTH_MONITOR_TCP]
|
||||
|
||||
# Prepended to exception log messages
|
||||
EXCEPTION_MSG = "Exception occurred during %s"
|
||||
|
||||
|
@ -13,7 +13,6 @@
|
||||
# under the License.
|
||||
|
||||
import copy
|
||||
import re
|
||||
|
||||
import netaddr
|
||||
from octavia_lib.api.drivers import data_models as o_datamodels
|
||||
@ -44,6 +43,10 @@ class OvnProviderDriver(driver_base.ProviderDriver):
|
||||
def __del__(self):
|
||||
self._ovn_helper.shutdown()
|
||||
|
||||
def _is_health_check_supported(self):
|
||||
return self._ovn_helper.ovn_nbdb_api.is_col_present(
|
||||
'Load_Balancer', 'health_check')
|
||||
|
||||
def _check_for_supported_protocols(self, protocol):
|
||||
if protocol not in ovn_const.OVN_NATIVE_LB_PROTOCOLS:
|
||||
msg = _('OVN provider does not support %s protocol') % protocol
|
||||
@ -200,6 +203,14 @@ class OvnProviderDriver(driver_base.ProviderDriver):
|
||||
return True
|
||||
return False
|
||||
|
||||
def _check_member_monitor_options(self, member):
|
||||
if self._check_monitor_options(member):
|
||||
msg = _('OVN Load Balancer does not support different member '
|
||||
'monitor address or port.')
|
||||
raise driver_exceptions.UnsupportedOptionError(
|
||||
user_fault_string=msg,
|
||||
operator_fault_string=msg)
|
||||
|
||||
def _ip_version_differs(self, member):
|
||||
_, ovn_lb = self._ovn_helper._find_ovn_lb_by_pool_id(member.pool_id)
|
||||
lb_vip = ovn_lb.external_ids[ovn_const.LB_EXT_IDS_VIP_KEY]
|
||||
@ -207,11 +218,8 @@ class OvnProviderDriver(driver_base.ProviderDriver):
|
||||
netaddr.IPNetwork(member.address).version)
|
||||
|
||||
def member_create(self, member):
|
||||
if self._check_monitor_options(member):
|
||||
msg = _('OVN provider does not support monitor options')
|
||||
raise driver_exceptions.UnsupportedOptionError(
|
||||
user_fault_string=msg,
|
||||
operator_fault_string=msg)
|
||||
# Validate monitoring options if present
|
||||
self._check_member_monitor_options(member)
|
||||
if self._ip_version_differs(member):
|
||||
raise ovn_exc.IPVersionsMixingNotSupportedError()
|
||||
admin_state_up = member.admin_state_up
|
||||
@ -269,11 +277,8 @@ class OvnProviderDriver(driver_base.ProviderDriver):
|
||||
self._ovn_helper.add_request(request)
|
||||
|
||||
def member_update(self, old_member, new_member):
|
||||
if self._check_monitor_options(new_member):
|
||||
msg = _('OVN provider does not support monitor options')
|
||||
raise driver_exceptions.UnsupportedOptionError(
|
||||
user_fault_string=msg,
|
||||
operator_fault_string=msg)
|
||||
# Validate monitoring options if present
|
||||
self._check_member_monitor_options(new_member)
|
||||
if new_member.address and self._ip_version_differs(new_member):
|
||||
raise ovn_exc.IPVersionsMixingNotSupportedError()
|
||||
request_info = {'id': new_member.member_id,
|
||||
@ -313,23 +318,14 @@ class OvnProviderDriver(driver_base.ProviderDriver):
|
||||
if isinstance(admin_state_up, o_datamodels.UnsetType):
|
||||
admin_state_up = True
|
||||
|
||||
member_info = self._ovn_helper._get_member_key(member)
|
||||
# TODO(mjozefcz): Remove this workaround in W release.
|
||||
member_info_old = self._ovn_helper._get_member_key(
|
||||
member, old_convention=True)
|
||||
member_found = [x for x in existing_members
|
||||
if re.match(member_info_old, x)]
|
||||
if not member_found:
|
||||
member_info = self._ovn_helper._get_member_info(member)
|
||||
if member_info not in existing_members:
|
||||
req_type = ovn_const.REQ_TYPE_MEMBER_CREATE
|
||||
else:
|
||||
# If member exists in pool, then Update
|
||||
req_type = ovn_const.REQ_TYPE_MEMBER_UPDATE
|
||||
# Remove all updating members so only deleted ones are left
|
||||
# TODO(mjozefcz): Remove this workaround in W release.
|
||||
try:
|
||||
members_to_delete.remove(member_info_old)
|
||||
except ValueError:
|
||||
members_to_delete.remove(member_info)
|
||||
members_to_delete.remove(member_info)
|
||||
|
||||
request_info = {'id': member.member_id,
|
||||
'address': member.address,
|
||||
@ -377,3 +373,66 @@ class OvnProviderDriver(driver_base.ProviderDriver):
|
||||
raise driver_exceptions.DriverError(
|
||||
**kwargs)
|
||||
return vip_dict
|
||||
|
||||
def _validate_hm_support(self, hm, action='create'):
|
||||
if not self._is_health_check_supported():
|
||||
msg = _('OVN Load Balancer supports Health Check provider '
|
||||
'from version 2.12. Upgrade OVN in order to use it.')
|
||||
raise driver_exceptions.UnsupportedOptionError(
|
||||
user_fault_string=msg,
|
||||
operator_fault_string=msg)
|
||||
# type is only required for create
|
||||
if action == 'create':
|
||||
if isinstance(hm.type, o_datamodels.UnsetType):
|
||||
msg = _('OVN provider health monitor type not specified.')
|
||||
# seems this should be other than "unsupported"?
|
||||
raise driver_exceptions.UnsupportedOptionError(
|
||||
user_fault_string=msg,
|
||||
operator_fault_string=msg)
|
||||
if hm.type not in ovn_const.SUPPORTED_HEALTH_MONITOR_TYPES:
|
||||
msg = (_('OVN provider does not support %s '
|
||||
'health monitor type. Supported types: %s') %
|
||||
(hm.type,
|
||||
', '.join(ovn_const.SUPPORTED_HEALTH_MONITOR_TYPES)))
|
||||
raise driver_exceptions.UnsupportedOptionError(
|
||||
user_fault_string=msg,
|
||||
operator_fault_string=msg)
|
||||
|
||||
def health_monitor_create(self, healthmonitor):
|
||||
self._validate_hm_support(healthmonitor)
|
||||
admin_state_up = healthmonitor.admin_state_up
|
||||
if isinstance(admin_state_up, o_datamodels.UnsetType):
|
||||
admin_state_up = True
|
||||
request_info = {'id': healthmonitor.healthmonitor_id,
|
||||
'pool_id': healthmonitor.pool_id,
|
||||
'type': healthmonitor.type,
|
||||
'interval': healthmonitor.delay,
|
||||
'timeout': healthmonitor.timeout,
|
||||
'failure_count': healthmonitor.max_retries_down,
|
||||
'success_count': healthmonitor.max_retries,
|
||||
'admin_state_up': admin_state_up}
|
||||
request = {'type': ovn_const.REQ_TYPE_HM_CREATE,
|
||||
'info': request_info}
|
||||
self._ovn_helper.add_request(request)
|
||||
|
||||
def health_monitor_update(self, old_healthmonitor, new_healthmonitor):
|
||||
self._validate_hm_support(new_healthmonitor, action='update')
|
||||
admin_state_up = new_healthmonitor.admin_state_up
|
||||
if isinstance(admin_state_up, o_datamodels.UnsetType):
|
||||
admin_state_up = True
|
||||
request_info = {'id': new_healthmonitor.healthmonitor_id,
|
||||
'pool_id': old_healthmonitor.pool_id,
|
||||
'interval': new_healthmonitor.delay,
|
||||
'timeout': new_healthmonitor.timeout,
|
||||
'failure_count': new_healthmonitor.max_retries_down,
|
||||
'success_count': new_healthmonitor.max_retries,
|
||||
'admin_state_up': admin_state_up}
|
||||
request = {'type': ovn_const.REQ_TYPE_HM_UPDATE,
|
||||
'info': request_info}
|
||||
self._ovn_helper.add_request(request)
|
||||
|
||||
def health_monitor_delete(self, healthmonitor):
|
||||
request_info = {'id': healthmonitor.healthmonitor_id}
|
||||
request = {'type': ovn_const.REQ_TYPE_HM_DELETE,
|
||||
'info': request_info}
|
||||
self._ovn_helper.add_request(request)
|
||||
|
@ -66,3 +66,20 @@ class LogicalSwitchPortUpdateEvent(row_event.RowEvent):
|
||||
# Handle port update only for vip ports created by
|
||||
# this driver.
|
||||
self.driver.vip_port_update_handler(row)
|
||||
|
||||
|
||||
class ServiceMonitorUpdateEvent(row_event.RowEvent):
|
||||
|
||||
def __init__(self, driver):
|
||||
table = 'Service_Monitor'
|
||||
events = (self.ROW_UPDATE,)
|
||||
super().__init__(events, table, None)
|
||||
self.event_name = 'ServiceMonitorUpdateEvent'
|
||||
self.driver = driver
|
||||
|
||||
def run(self, event, row, old):
|
||||
LOG.debug('ServiceMonitorUpdateEvent logged, '
|
||||
'%(event)s, %(row)s',
|
||||
{'event': event,
|
||||
'row': row})
|
||||
self.driver.hm_update_event_handler(row)
|
||||
|
@ -18,6 +18,7 @@ import re
|
||||
import threading
|
||||
|
||||
import netaddr
|
||||
from neutron_lib import constants as n_const
|
||||
from neutronclient.common import exceptions as n_exc
|
||||
from octavia_lib.api.drivers import data_models as o_datamodels
|
||||
from octavia_lib.api.drivers import driver_lib as o_driver_lib
|
||||
@ -79,6 +80,10 @@ class OvnProviderHelper():
|
||||
ovn_const.REQ_TYPE_LB_DELETE_LRP_ASSOC: self.lb_delete_lrp_assoc,
|
||||
ovn_const.REQ_TYPE_HANDLE_VIP_FIP: self.handle_vip_fip,
|
||||
ovn_const.REQ_TYPE_HANDLE_MEMBER_DVR: self.handle_member_dvr,
|
||||
ovn_const.REQ_TYPE_HM_CREATE: self.hm_create,
|
||||
ovn_const.REQ_TYPE_HM_UPDATE: self.hm_update,
|
||||
ovn_const.REQ_TYPE_HM_DELETE: self.hm_delete,
|
||||
ovn_const.REQ_TYPE_HM_UPDATE_EVENT: self.hm_update_event,
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
@ -127,6 +132,17 @@ class OvnProviderHelper():
|
||||
raise idlutils.RowNotFound(table=row._table.name,
|
||||
col=col, match=key) from e
|
||||
|
||||
def _ensure_hm_ovn_port(self, network_id):
|
||||
# We need to have a metadata or dhcp port, OVN should have created
|
||||
# one when the network was created
|
||||
|
||||
neutron_client = clients.get_neutron_client()
|
||||
meta_dhcp_port = neutron_client.list_ports(
|
||||
network_id=network_id,
|
||||
device_owner=n_const.DEVICE_OWNER_DISTRIBUTED)
|
||||
if meta_dhcp_port['ports']:
|
||||
return meta_dhcp_port['ports'][0]
|
||||
|
||||
def _get_nw_router_info_on_interface_event(self, lrp):
|
||||
"""Get the Router and Network information on an interface event
|
||||
|
||||
@ -646,28 +662,31 @@ class OvnProviderHelper():
|
||||
mem_info = []
|
||||
if member:
|
||||
for mem in member.split(','):
|
||||
mem_ip_port = mem.split('_')[2]
|
||||
mem_info.append(tuple(mem_ip_port.rsplit(':', 1)))
|
||||
mem_split = mem.split('_')
|
||||
mem_ip_port = mem_split[2]
|
||||
mem_ip, mem_port = mem_ip_port.rsplit(':', 1)
|
||||
mem_subnet = mem_split[3]
|
||||
mem_info.append((mem_ip, mem_port, mem_subnet))
|
||||
return mem_info
|
||||
|
||||
def _get_member_key(self, member, old_convention=False):
|
||||
def _get_member_info(self, member):
|
||||
member_info = ''
|
||||
if isinstance(member, dict):
|
||||
member_info = '%s%s_%s:%s' % (
|
||||
subnet_id = member.get(constants.SUBNET_ID, '')
|
||||
member_info = '%s%s_%s:%s_%s' % (
|
||||
ovn_const.LB_EXT_IDS_MEMBER_PREFIX,
|
||||
member[constants.ID],
|
||||
member[constants.ADDRESS],
|
||||
member[constants.PROTOCOL_PORT])
|
||||
if not old_convention and member.get(constants.SUBNET_ID):
|
||||
member_info += "_" + member[constants.SUBNET_ID]
|
||||
member[constants.PROTOCOL_PORT],
|
||||
subnet_id)
|
||||
elif isinstance(member, o_datamodels.Member):
|
||||
member_info = '%s%s_%s:%s' % (
|
||||
subnet_id = member.subnet_id or ''
|
||||
member_info = '%s%s_%s:%s_%s' % (
|
||||
ovn_const.LB_EXT_IDS_MEMBER_PREFIX,
|
||||
member.member_id,
|
||||
member.address,
|
||||
member.protocol_port)
|
||||
if not old_convention and member.subnet_id:
|
||||
member_info += "_" + member.subnet_id
|
||||
member.protocol_port,
|
||||
subnet_id)
|
||||
return member_info
|
||||
|
||||
def _make_listener_key_value(self, listener_port, pool_id):
|
||||
@ -698,6 +717,15 @@ class OvnProviderHelper():
|
||||
k[len(ovn_const.LB_EXT_IDS_LISTENER_PREFIX):])
|
||||
return pool_listeners
|
||||
|
||||
def _get_pool_listener_port(self, ovn_lb, pool_key):
|
||||
for k, v in ovn_lb.external_ids.items():
|
||||
if ovn_const.LB_EXT_IDS_LISTENER_PREFIX not in k:
|
||||
continue
|
||||
vip_port, p_key = self._extract_listener_key_value(v)
|
||||
if pool_key == p_key:
|
||||
return vip_port
|
||||
return None
|
||||
|
||||
def _frame_vip_ips(self, lb_external_ids):
|
||||
vip_ips = {}
|
||||
# If load balancer is disabled, return
|
||||
@ -720,7 +748,7 @@ class OvnProviderHelper():
|
||||
continue
|
||||
|
||||
ips = []
|
||||
for member_ip, member_port in self._extract_member_info(
|
||||
for member_ip, member_port, subnet in self._extract_member_info(
|
||||
lb_external_ids[pool_id]):
|
||||
if netaddr.IPNetwork(member_ip).version == 6:
|
||||
ips.append('[%s]:%s' % (member_ip, member_port))
|
||||
@ -1047,18 +1075,6 @@ class OvnProviderHelper():
|
||||
str(listener[constants.PROTOCOL]).lower())))
|
||||
commands.extend(self._refresh_lb_vips(ovn_lb.uuid, external_ids))
|
||||
self._execute_commands(commands)
|
||||
|
||||
operating_status = constants.ONLINE
|
||||
if not listener.get(constants.ADMIN_STATE_UP, True):
|
||||
operating_status = constants.OFFLINE
|
||||
status = {
|
||||
constants.LISTENERS: [
|
||||
{constants.ID: listener[constants.ID],
|
||||
constants.PROVISIONING_STATUS: constants.ACTIVE,
|
||||
constants.OPERATING_STATUS: operating_status}],
|
||||
constants.LOADBALANCERS: [
|
||||
{constants.ID: listener[constants.LOADBALANCER_ID],
|
||||
constants.PROVISIONING_STATUS: constants.ACTIVE}]}
|
||||
except Exception:
|
||||
LOG.exception(ovn_const.EXCEPTION_MSG, "creation of listener")
|
||||
status = {
|
||||
@ -1069,6 +1085,25 @@ class OvnProviderHelper():
|
||||
constants.LOADBALANCERS: [
|
||||
{constants.ID: listener[constants.LOADBALANCER_ID],
|
||||
constants.PROVISIONING_STATUS: constants.ACTIVE}]}
|
||||
return status
|
||||
|
||||
operating_status = constants.ONLINE
|
||||
if not listener.get(constants.ADMIN_STATE_UP, True):
|
||||
operating_status = constants.OFFLINE
|
||||
|
||||
if (ovn_lb.health_check and
|
||||
not self._update_hm_vip(ovn_lb,
|
||||
listener[constants.PROTOCOL_PORT])):
|
||||
operating_status = constants.ERROR
|
||||
|
||||
status = {
|
||||
constants.LISTENERS: [
|
||||
{constants.ID: listener[constants.ID],
|
||||
constants.PROVISIONING_STATUS: constants.ACTIVE,
|
||||
constants.OPERATING_STATUS: operating_status}],
|
||||
constants.LOADBALANCERS: [
|
||||
{constants.ID: listener[constants.LOADBALANCER_ID],
|
||||
constants.PROVISIONING_STATUS: constants.ACTIVE}]}
|
||||
return status
|
||||
|
||||
def listener_delete(self, listener):
|
||||
@ -1440,14 +1475,10 @@ class OvnProviderHelper():
|
||||
existing_members = external_ids[pool_key]
|
||||
if existing_members:
|
||||
existing_members = existing_members.split(",")
|
||||
member_info = self._get_member_key(member)
|
||||
# TODO(mjozefcz): Remove this workaround in W release.
|
||||
member_info_old = self._get_member_key(member, old_convention=True)
|
||||
member_found = [x for x in existing_members
|
||||
if re.match(member_info_old, x)]
|
||||
if member_found:
|
||||
member_info = self._get_member_info(member)
|
||||
if member_info in existing_members:
|
||||
# Member already present
|
||||
return
|
||||
return None
|
||||
if existing_members:
|
||||
existing_members.append(member_info)
|
||||
pool_data = {pool_key: ",".join(existing_members)}
|
||||
@ -1487,12 +1518,14 @@ class OvnProviderHelper():
|
||||
pass
|
||||
|
||||
self._execute_commands(commands)
|
||||
return member_info
|
||||
|
||||
def member_create(self, member):
|
||||
new_member = None
|
||||
try:
|
||||
pool_key, ovn_lb = self._find_ovn_lb_by_pool_id(
|
||||
member[constants.POOL_ID])
|
||||
self._add_member(member, ovn_lb, pool_key)
|
||||
new_member = self._add_member(member, ovn_lb, pool_key)
|
||||
pool = {constants.ID: member[constants.POOL_ID],
|
||||
constants.PROVISIONING_STATUS: constants.ACTIVE,
|
||||
constants.OPERATING_STATUS: constants.ONLINE}
|
||||
@ -1526,22 +1559,20 @@ class OvnProviderHelper():
|
||||
{constants.ID: ovn_lb.name,
|
||||
constants.PROVISIONING_STATUS: constants.ACTIVE}]}
|
||||
|
||||
if new_member and ovn_lb.health_check:
|
||||
operating_status = constants.ONLINE
|
||||
if not self._update_hm_members(ovn_lb, pool_key):
|
||||
operating_status = constants.ERROR
|
||||
member_status[constants.OPERATING_STATUS] = operating_status
|
||||
return status
|
||||
|
||||
def _remove_member(self, member, ovn_lb, pool_key):
|
||||
external_ids = copy.deepcopy(ovn_lb.external_ids)
|
||||
existing_members = external_ids[pool_key].split(",")
|
||||
# TODO(mjozefcz): Delete this workaround in W release.
|
||||
# To support backward compatibility member
|
||||
# could be defined as `member`_`id`_`ip`:`port`_`subnet_id`
|
||||
# or defined as `member`_`id`_`ip`:`port
|
||||
member_info_old = self._get_member_key(member, old_convention=True)
|
||||
|
||||
member_found = [x for x in existing_members
|
||||
if re.match(member_info_old, x)]
|
||||
if member_found:
|
||||
member_info = self._get_member_info(member)
|
||||
if member_info in existing_members:
|
||||
commands = []
|
||||
existing_members.remove(member_found[0])
|
||||
existing_members.remove(member_info)
|
||||
|
||||
if not existing_members:
|
||||
pool_status = constants.OFFLINE
|
||||
@ -1574,6 +1605,8 @@ class OvnProviderHelper():
|
||||
pool = {constants.ID: member[constants.POOL_ID],
|
||||
constants.PROVISIONING_STATUS: constants.ACTIVE,
|
||||
constants.OPERATING_STATUS: pool_status}
|
||||
if pool_status == constants.ONLINE and ovn_lb.health_check:
|
||||
self._update_hm_members(ovn_lb, pool_key)
|
||||
status = {
|
||||
constants.POOLS: [pool],
|
||||
constants.MEMBERS: [
|
||||
@ -1608,7 +1641,7 @@ class OvnProviderHelper():
|
||||
commands = []
|
||||
external_ids = copy.deepcopy(ovn_lb.external_ids)
|
||||
existing_members = external_ids[pool_key].split(",")
|
||||
member_info = self._get_member_key(member)
|
||||
member_info = self._get_member_info(member)
|
||||
for mem in existing_members:
|
||||
if (member_info.split('_')[1] == mem.split('_')[1] and
|
||||
mem != member_info):
|
||||
@ -1843,7 +1876,540 @@ class OvnProviderHelper():
|
||||
fip.external_ids[ovn_const.OVN_FIP_EXT_ID_KEY],
|
||||
empty_update)
|
||||
except n_exc.NotFound:
|
||||
LOG.warning('Members %(member)s FIP %(fip)s not found in '
|
||||
LOG.warning('Member %(member)s FIP %(fip)s not found in '
|
||||
'Neutron. Cannot update it.',
|
||||
{'member': info['id'],
|
||||
'fip': fip.external_ip})
|
||||
|
||||
def _get_member_lsp(self, member_ip, member_subnet_id):
|
||||
neutron_client = clients.get_neutron_client()
|
||||
try:
|
||||
member_subnet = neutron_client.show_subnet(member_subnet_id)
|
||||
except n_exc.NotFound:
|
||||
LOG.exception('Subnet %s not found while trying to '
|
||||
'fetch its data.', member_subnet_id)
|
||||
return
|
||||
ls_name = utils.ovn_name(member_subnet['subnet']['network_id'])
|
||||
try:
|
||||
ls = self.ovn_nbdb_api.lookup('Logical_Switch', ls_name)
|
||||
except idlutils.RowNotFound:
|
||||
LOG.warning("Logical Switch %s not found.", ls_name)
|
||||
return
|
||||
f = utils.remove_macs_from_lsp_addresses
|
||||
for port in ls.ports:
|
||||
if member_ip in f(port.addresses):
|
||||
# We found particular port
|
||||
return port
|
||||
|
||||
def _add_hm(self, ovn_lb, pool_key, info):
|
||||
hm_id = info[constants.ID]
|
||||
status = {constants.ID: hm_id,
|
||||
constants.PROVISIONING_STATUS: constants.ERROR,
|
||||
constants.OPERATING_STATUS: constants.ERROR}
|
||||
# Example
|
||||
# MONITOR_PRT = 80
|
||||
# ID=$(ovn-nbctl --bare --column _uuid find
|
||||
# Load_Balancer_Health_Check vip="${LB_VIP_ADDR}\:${MONITOR_PRT}")
|
||||
# In our case the monitor port will be the members protocol port
|
||||
vip = ovn_lb.external_ids.get(ovn_const.LB_EXT_IDS_VIP_KEY)
|
||||
if not vip:
|
||||
LOG.error("Could not find VIP for HM %s, LB external_ids: %s",
|
||||
hm_id, ovn_lb.external_ids)
|
||||
return status
|
||||
vip_port = self._get_pool_listener_port(ovn_lb, pool_key)
|
||||
if not vip_port:
|
||||
# This is not fatal as we can add it when a listener is created
|
||||
vip = []
|
||||
else:
|
||||
vip = vip + ':' + vip_port
|
||||
|
||||
# ovn-nbctl --wait=sb --
|
||||
# set Load_Balancer_Health_Check ${ID} options:\"interval\"=6 --
|
||||
# set Load_Balancer_Health_Check ${ID} options:\"timeoutl\"=2 --
|
||||
# set Load_Balancer_Health_Check ${ID} options:\"success_count\"=1 --
|
||||
# set Load_Balancer_Health_Check ${ID} options:\"failure_count\"=3
|
||||
options = {
|
||||
'interval': str(info['interval']),
|
||||
'timeout': str(info['timeout']),
|
||||
'success_count': str(info['success_count']),
|
||||
'failure_count': str(info['failure_count'])}
|
||||
|
||||
# This is to enable lookups by Octavia DB ID value
|
||||
external_ids = {ovn_const.LB_EXT_IDS_HM_KEY: hm_id}
|
||||
|
||||
# Just seems like this needs ovsdbapp support, see:
|
||||
# ovsdbapp/schema/ovn_northbound/impl_idl.py - lb_add()
|
||||
# ovsdbapp/schema/ovn_northbound/commands.py - LbAddCommand()
|
||||
# then this could just be self.ovn_nbdb_api.lb_hm_add()
|
||||
kwargs = {
|
||||
'vip': vip,
|
||||
'options': options,
|
||||
'external_ids': external_ids}
|
||||
operating_status = constants.ONLINE
|
||||
if not info['admin_state_up']:
|
||||
operating_status = constants.OFFLINE
|
||||
try:
|
||||
with self.ovn_nbdb_api.transaction(check_error=True) as txn:
|
||||
health_check = txn.add(
|
||||
self.ovn_nbdb_api.db_create(
|
||||
'Load_Balancer_Health_Check',
|
||||
**kwargs))
|
||||
txn.add(self.ovn_nbdb_api.db_add(
|
||||
'Load_Balancer', ovn_lb.uuid,
|
||||
'health_check', health_check))
|
||||
status = {constants.ID: hm_id,
|
||||
constants.PROVISIONING_STATUS: constants.ACTIVE,
|
||||
constants.OPERATING_STATUS: operating_status}
|
||||
except Exception:
|
||||
# Any Exception will return ERROR status
|
||||
LOG.exception(ovn_const.EXCEPTION_MSG, "set of health check")
|
||||
return status
|
||||
|
||||
def _update_hm_vip(self, ovn_lb, vip_port):
|
||||
hm = self._lookup_hm_by_id(ovn_lb.health_check)
|
||||
if not hm:
|
||||
LOG.error("Could not find HM with key: %s", ovn_lb.health_check)
|
||||
return False
|
||||
|
||||
vip = ovn_lb.external_ids.get(ovn_const.LB_EXT_IDS_VIP_KEY)
|
||||
if not vip:
|
||||
LOG.error("Could not find VIP for HM %s, LB external_ids: %s",
|
||||
hm.uuid, ovn_lb.external_ids)
|
||||
return False
|
||||
|
||||
vip = vip + ':' + str(vip_port)
|
||||
commands = []
|
||||
commands.append(
|
||||
self.ovn_nbdb_api.db_set(
|
||||
'Load_Balancer_Health_Check', hm.uuid,
|
||||
('vip', vip)))
|
||||
self._execute_commands(commands)
|
||||
return True
|
||||
|
||||
def _update_hm_members(self, ovn_lb, pool_key):
|
||||
mappings = {}
|
||||
# For each member, set it's HM
|
||||
for member_ip, member_port, member_subnet in self._extract_member_info(
|
||||
ovn_lb.external_ids[pool_key]):
|
||||
member_lsp = self._get_member_lsp(member_ip, member_subnet)
|
||||
if not member_lsp:
|
||||
LOG.error("Member %(member)s Logical_Switch_Port not found. "
|
||||
"Cannot create a Health Monitor for pool %(pool)s.",
|
||||
{'member': member_ip, 'pool': pool_key})
|
||||
return False
|
||||
|
||||
network_id = member_lsp.external_ids.get(
|
||||
ovn_const.OVN_NETWORK_NAME_EXT_ID_KEY).split('neutron-')[1]
|
||||
hm_port = self._ensure_hm_ovn_port(network_id)
|
||||
if not hm_port:
|
||||
LOG.error("No port on network %(network)s available for "
|
||||
"health monitoring. Cannot create a Health Monitor "
|
||||
"for pool %(pool)s.",
|
||||
{'network': network_id,
|
||||
'pool': pool_key})
|
||||
return False
|
||||
hm_source_ip = None
|
||||
for fixed_ip in hm_port['fixed_ips']:
|
||||
if fixed_ip['subnet_id'] == member_subnet:
|
||||
hm_source_ip = fixed_ip['ip_address']
|
||||
break
|
||||
if not hm_source_ip:
|
||||
LOG.error("No port on subnet %(subnet)s available for "
|
||||
"health monitoring member IP %(member)s. Cannot "
|
||||
"create a Health Monitor for pool %(pool)s.",
|
||||
{'subnet': member_subnet,
|
||||
'member': member_ip,
|
||||
'pool': pool_key})
|
||||
return False
|
||||
# ovn-nbctl set load_balancer ${OVN_LB_ID}
|
||||
# ip_port_mappings:${MEMBER_IP}=${LSP_NAME_MEMBER}:${HEALTH_SRC}
|
||||
# where:
|
||||
# OVN_LB_ID: id of LB
|
||||
# MEMBER_IP: IP of member_lsp
|
||||
# HEALTH_SRC: source IP of hm_port
|
||||
|
||||
# need output like this
|
||||
# vips: {"172.24.4.246:80"="10.0.0.10:80"}
|
||||
# ip_port_mappings: {"10.0.0.10"="ID:10.0.0.2"}
|
||||
# ip_port_mappings: {"MEMBER_IP"="LSP_NAME_MEMBER:HEALTH_SRC"}
|
||||
# OVN does not support IPv6 Health Checks, but we check anyways
|
||||
member_src = '%s:' % member_lsp.name
|
||||
if netaddr.IPNetwork(hm_source_ip).version == 6:
|
||||
member_src += '[%s]' % hm_source_ip
|
||||
else:
|
||||
member_src += '%s' % hm_source_ip
|
||||
|
||||
if netaddr.IPNetwork(member_ip).version == 6:
|
||||
member_ip = '[%s]' % member_ip
|
||||
mappings[member_ip] = member_src
|
||||
|
||||
commands = []
|
||||
commands.append(
|
||||
self.ovn_nbdb_api.db_set(
|
||||
'Load_Balancer', ovn_lb.uuid,
|
||||
('ip_port_mappings', mappings)))
|
||||
self._execute_commands(commands)
|
||||
return True
|
||||
|
||||
def _lookup_hm_by_id(self, hm_id):
|
||||
hms = self.ovn_nbdb_api.db_list_rows(
|
||||
'Load_Balancer_Health_Check').execute(check_error=True)
|
||||
for hm in hms:
|
||||
if (ovn_const.LB_EXT_IDS_HM_KEY in hm.external_ids and
|
||||
hm.external_ids[ovn_const.LB_EXT_IDS_HM_KEY] == hm_id):
|
||||
return hm
|
||||
raise idlutils.RowNotFound(table='Load_Balancer_Health_Check',
|
||||
col='external_ids', match=hm_id)
|
||||
|
||||
def _lookup_lb_by_hm_id(self, hm_id):
|
||||
lbs = self.ovn_nbdb_api.db_find_rows(
|
||||
'Load_Balancer', ('health_check', '=', [hm_id])).execute()
|
||||
return lbs[0] if lbs else None
|
||||
|
||||
def _find_ovn_lb_from_hm_id(self, hm_id):
|
||||
try:
|
||||
hm = self._lookup_hm_by_id(hm_id)
|
||||
except idlutils.RowNotFound:
|
||||
LOG.debug("Loadbalancer health monitor %s not found!", hm_id)
|
||||
return None, None
|
||||
|
||||
try:
|
||||
ovn_lb = self._lookup_lb_by_hm_id(hm.uuid)
|
||||
except idlutils.RowNotFound:
|
||||
LOG.debug("Loadbalancer not found with health_check %s !", hm.uuid)
|
||||
return hm, None
|
||||
|
||||
return hm, ovn_lb
|
||||
|
||||
def hm_create(self, info):
|
||||
status = {
|
||||
constants.HEALTHMONITORS: [
|
||||
{constants.ID: info[constants.ID],
|
||||
constants.OPERATING_STATUS: constants.NO_MONITOR,
|
||||
constants.PROVISIONING_STATUS: constants.ERROR}]}
|
||||
|
||||
pool_id = info[constants.POOL_ID]
|
||||
pool_key, ovn_lb = self._find_ovn_lb_by_pool_id(pool_id)
|
||||
if not ovn_lb:
|
||||
LOG.debug("Could not find LB with pool id %s", pool_id)
|
||||
return status
|
||||
status[constants.LOADBALANCERS] = [
|
||||
{constants.ID: ovn_lb.name,
|
||||
constants.PROVISIONING_STATUS: constants.ACTIVE}]
|
||||
if pool_key not in ovn_lb.external_ids:
|
||||
# Returning early here will cause the pool to go into
|
||||
# PENDING_UPDATE state, which is not good
|
||||
LOG.error("Could not find pool with key %s, LB external_ids: %s",
|
||||
pool_key, ovn_lb.external_ids)
|
||||
status[constants.POOLS] = [
|
||||
{constants.ID: pool_id,
|
||||
constants.OPERATING_STATUS: constants.OFFLINE}]
|
||||
return status
|
||||
status[constants.POOLS] = [
|
||||
{constants.ID: pool_id,
|
||||
constants.PROVISIONING_STATUS: constants.ACTIVE,
|
||||
constants.OPERATING_STATUS: constants.ONLINE}]
|
||||
|
||||
# Update status for all members in the pool
|
||||
member_status = []
|
||||
existing_members = ovn_lb.external_ids[pool_key]
|
||||
if len(existing_members) > 0:
|
||||
for mem_info in existing_members.split(','):
|
||||
member_status.append({
|
||||
constants.ID: mem_info.split('_')[1],
|
||||
constants.PROVISIONING_STATUS: constants.ACTIVE,
|
||||
constants.OPERATING_STATUS: constants.ONLINE})
|
||||
status[constants.MEMBERS] = member_status
|
||||
|
||||
# MONITOR_PRT = 80
|
||||
# ovn-nbctl --wait=sb -- --id=@hc create Load_Balancer_Health_Check
|
||||
# vip="${LB_VIP_ADDR}\:${MONITOR_PRT}" -- add Load_Balancer
|
||||
# ${OVN_LB_ID} health_check @hc
|
||||
# options here are interval, timeout, failure_count and success_count
|
||||
# from info object passed-in
|
||||
hm_status = self._add_hm(ovn_lb, pool_key, info)
|
||||
if hm_status[constants.PROVISIONING_STATUS] == constants.ACTIVE:
|
||||
if not self._update_hm_members(ovn_lb, pool_key):
|
||||
hm_status[constants.PROVISIONING_STATUS] = constants.ERROR
|
||||
hm_status[constants.OPERATING_STATUS] = constants.ERROR
|
||||
status[constants.HEALTHMONITORS] = [hm_status]
|
||||
return status
|
||||
|
||||
def hm_update(self, info):
|
||||
status = {
|
||||
constants.HEALTHMONITORS: [
|
||||
{constants.ID: info[constants.ID],
|
||||
constants.OPERATING_STATUS: constants.ERROR,
|
||||
constants.PROVISIONING_STATUS: constants.ERROR}]}
|
||||
|
||||
hm_id = info[constants.ID]
|
||||
pool_id = info[constants.POOL_ID]
|
||||
|
||||
hm, ovn_lb = self._find_ovn_lb_from_hm_id(hm_id)
|
||||
if not hm:
|
||||
LOG.debug("Loadbalancer health monitor %s not found!", hm_id)
|
||||
return status
|
||||
if not ovn_lb:
|
||||
LOG.debug("Could not find LB with health monitor id %s", hm_id)
|
||||
# Do we really need to try this hard?
|
||||
pool_key, ovn_lb = self._find_ovn_lb_by_pool_id(pool_id)
|
||||
if not ovn_lb:
|
||||
LOG.debug("Could not find LB with pool id %s", pool_id)
|
||||
return status
|
||||
|
||||
options = {
|
||||
'interval': str(info['interval']),
|
||||
'timeout': str(info['timeout']),
|
||||
'success_count': str(info['success_count']),
|
||||
'failure_count': str(info['failure_count'])}
|
||||
|
||||
commands = []
|
||||
commands.append(
|
||||
self.ovn_nbdb_api.db_set(
|
||||
'Load_Balancer_Health_Check', hm.uuid,
|
||||
('options', options)))
|
||||
self._execute_commands(commands)
|
||||
|
||||
operating_status = constants.ONLINE
|
||||
if not info['admin_state_up']:
|
||||
operating_status = constants.OFFLINE
|
||||
status = {
|
||||
constants.LOADBALANCERS: [
|
||||
{constants.ID: ovn_lb.name,
|
||||
constants.PROVISIONING_STATUS: constants.ACTIVE}],
|
||||
constants.POOLS: [
|
||||
{constants.ID: pool_id,
|
||||
constants.PROVISIONING_STATUS: constants.ACTIVE}],
|
||||
constants.HEALTHMONITORS: [
|
||||
{constants.ID: info[constants.ID],
|
||||
constants.OPERATING_STATUS: operating_status,
|
||||
constants.PROVISIONING_STATUS: constants.ACTIVE}]}
|
||||
return status
|
||||
|
||||
def hm_delete(self, info):
|
||||
hm_id = info[constants.ID]
|
||||
status = {
|
||||
constants.HEALTHMONITORS: [
|
||||
{constants.ID: hm_id,
|
||||
constants.OPERATING_STATUS: constants.NO_MONITOR,
|
||||
constants.PROVISIONING_STATUS: constants.DELETED}]}
|
||||
|
||||
hm, ovn_lb = self._find_ovn_lb_from_hm_id(hm_id)
|
||||
if not hm or not ovn_lb:
|
||||
LOG.debug("Loadbalancer Health Check %s not found in OVN "
|
||||
"Northbound DB. Setting the Loadbalancer Health "
|
||||
"Monitor status to DELETED in Octavia", hm_id)
|
||||
return status
|
||||
|
||||
# Need to send pool info in status update to avoid immutable objects,
|
||||
# the LB should have this info
|
||||
pool_id = None
|
||||
for k, v in ovn_lb.external_ids.items():
|
||||
if ovn_const.LB_EXT_IDS_POOL_PREFIX in k:
|
||||
pool_id = k.split('_')[1]
|
||||
break
|
||||
|
||||
# ovn-nbctl clear load_balancer ${OVN_LB_ID} ip_port_mappings
|
||||
# ovn-nbctl clear load_balancer ${OVN_LB_ID} health_check
|
||||
# TODO(haleyb) remove just the ip_port_mappings for this hm
|
||||
commands = []
|
||||
commands.append(
|
||||
self.ovn_nbdb_api.db_clear('Load_Balancer', ovn_lb.uuid,
|
||||
'ip_port_mappings'))
|
||||
commands.append(
|
||||
self.ovn_nbdb_api.db_remove('Load_Balancer', ovn_lb.uuid,
|
||||
'health_check', hm.uuid))
|
||||
commands.append(
|
||||
self.ovn_nbdb_api.db_destroy('Load_Balancer_Health_Check',
|
||||
hm.uuid))
|
||||
self._execute_commands(commands)
|
||||
status = {
|
||||
constants.LOADBALANCERS: [
|
||||
{constants.ID: ovn_lb.name,
|
||||
constants.PROVISIONING_STATUS: constants.ACTIVE}],
|
||||
constants.HEALTHMONITORS: [
|
||||
{constants.ID: info[constants.ID],
|
||||
constants.OPERATING_STATUS: constants.NO_MONITOR,
|
||||
constants.PROVISIONING_STATUS: constants.DELETED}]}
|
||||
if pool_id:
|
||||
status[constants.POOLS] = [
|
||||
{constants.ID: pool_id,
|
||||
constants.PROVISIONING_STATUS: constants.ACTIVE}]
|
||||
else:
|
||||
LOG.warning('Pool not found for load balancer %s, status '
|
||||
'update will have incomplete data', ovn_lb.name)
|
||||
return status
|
||||
|
||||
def _get_lb_on_hm_event(self, row):
|
||||
"""Get the Load Balancer information on a health_monitor event
|
||||
|
||||
This function is called when the status of a member has
|
||||
been updated.
|
||||
Input: Service Monitor row which is coming from
|
||||
ServiceMonitorUpdateEvent.
|
||||
Output: A row from load_balancer table table matching the member
|
||||
for which the event was generated.
|
||||
Exception: RowNotFound exception can be generated.
|
||||
"""
|
||||
# ip_port_mappings: {"MEMBER_IP"="LSP_NAME_MEMBER:HEALTH_SRC"}
|
||||
# There could be more than one entry in ip_port_mappings!
|
||||
mappings = {}
|
||||
hm_source_ip = str(row.src_ip)
|
||||
member_ip = str(row.ip)
|
||||
member_src = '%s:' % row.logical_port
|
||||
if netaddr.IPNetwork(hm_source_ip).version == 6:
|
||||
member_src += '[%s]' % hm_source_ip
|
||||
else:
|
||||
member_src += '%s' % hm_source_ip
|
||||
if netaddr.IPNetwork(member_ip).version == 6:
|
||||
member_ip = '[%s]' % member_ip
|
||||
mappings[member_ip] = member_src
|
||||
lbs = self.ovn_nbdb_api.db_find_rows(
|
||||
'Load_Balancer', (('ip_port_mappings', '=', mappings),
|
||||
('protocol', '=', row.protocol))).execute()
|
||||
return lbs[0] if lbs else None
|
||||
|
||||
def hm_update_event_handler(self, row):
|
||||
try:
|
||||
ovn_lb = self._get_lb_on_hm_event(row)
|
||||
except idlutils.RowNotFound:
|
||||
LOG.debug("Load balancer information not found")
|
||||
return
|
||||
|
||||
if not ovn_lb:
|
||||
LOG.debug("Load balancer not found")
|
||||
return
|
||||
|
||||
if row.protocol != ovn_lb.protocol:
|
||||
LOG.debug('Row protocol (%s) does not match LB protocol (%s)',
|
||||
row.protocol, ovn_lb.protocol)
|
||||
return
|
||||
|
||||
request_info = {'ovn_lb': ovn_lb,
|
||||
'ip': row.ip,
|
||||
'port': str(row.port),
|
||||
'status': row.status}
|
||||
self.add_request({'type': ovn_const.REQ_TYPE_HM_UPDATE_EVENT,
|
||||
'info': request_info})
|
||||
|
||||
def _get_new_operating_statuses(self, ovn_lb, pool_id, member_id,
|
||||
member_status):
|
||||
# When a member's operating status changes, we have to determine
|
||||
# the correct operating_status to report back to Octavia.
|
||||
# For example:
|
||||
#
|
||||
# LB with Pool and 2 members
|
||||
#
|
||||
# member-1 goes offline
|
||||
# member-1 operating_status is ERROR
|
||||
# if Pool operating_status is ONLINE
|
||||
# Pool operating_status is DEGRADED
|
||||
# if LB operating_status is ONLINE
|
||||
# LB operating_status is DEGRADED
|
||||
#
|
||||
# member-2 then goes offline
|
||||
# member-2 operating_status is ERROR
|
||||
# Pool operating_status is ERROR
|
||||
# LB operating_status is ERROR
|
||||
#
|
||||
# The opposite would also have to happen.
|
||||
#
|
||||
# If there is only one member, the Pool and LB will reflect
|
||||
# the same status
|
||||
operating_status = member_status
|
||||
|
||||
# Assume the best
|
||||
pool_status = constants.ONLINE
|
||||
lb_status = constants.ONLINE
|
||||
|
||||
pool = self._octavia_driver_lib.get_pool(pool_id)
|
||||
if pool:
|
||||
pool_status = pool.operating_status
|
||||
|
||||
lb = self._octavia_driver_lib.get_loadbalancer(ovn_lb.name)
|
||||
if lb:
|
||||
lb_status = lb.operating_status
|
||||
|
||||
for k, v in ovn_lb.external_ids.items():
|
||||
if ovn_const.LB_EXT_IDS_POOL_PREFIX not in k:
|
||||
continue
|
||||
lb_pool_id = k.split('_')[1]
|
||||
if lb_pool_id != pool_id:
|
||||
continue
|
||||
existing_members = v.split(",")
|
||||
for mem in existing_members:
|
||||
# Ignore the passed member ID, we already know it's status
|
||||
mem_id = mem.split('_')[1]
|
||||
if mem_id != member_id:
|
||||
member = self._octavia_driver_lib.get_member(mem_id)
|
||||
# If the statuses are different it is degraded
|
||||
if member and member.operating_status != member_status:
|
||||
operating_status = constants.DEGRADED
|
||||
break
|
||||
|
||||
# operating_status will either be ONLINE, ERROR or DEGRADED
|
||||
if operating_status == constants.ONLINE:
|
||||
if pool_status != constants.ONLINE:
|
||||
pool_status = constants.ONLINE
|
||||
if lb_status != constants.ONLINE:
|
||||
lb_status = constants.ONLINE
|
||||
elif operating_status == constants.ERROR:
|
||||
if pool_status == constants.ONLINE:
|
||||
pool_status = constants.ERROR
|
||||
if lb_status == constants.ONLINE:
|
||||
lb_status = constants.ERROR
|
||||
else:
|
||||
if pool_status == constants.ONLINE:
|
||||
pool_status = constants.DEGRADED
|
||||
if lb_status == constants.ONLINE:
|
||||
lb_status = constants.DEGRADED
|
||||
|
||||
return lb_status, pool_status
|
||||
|
||||
def hm_update_event(self, info):
|
||||
ovn_lb = info['ovn_lb']
|
||||
|
||||
# Lookup pool and member
|
||||
pool_id = None
|
||||
member_id = None
|
||||
for k, v in ovn_lb.external_ids.items():
|
||||
if ovn_const.LB_EXT_IDS_POOL_PREFIX not in k:
|
||||
continue
|
||||
for member_ip, member_port, subnet in self._extract_member_info(v):
|
||||
if info['ip'] != member_ip:
|
||||
continue
|
||||
if info['port'] != member_port:
|
||||
continue
|
||||
# match
|
||||
pool_id = k.split('_')[1]
|
||||
member_id = v.split('_')[1]
|
||||
break
|
||||
|
||||
# found it in inner loop
|
||||
if member_id:
|
||||
break
|
||||
|
||||
if not member_id:
|
||||
LOG.warning('Member for event not found, info: %s', info)
|
||||
return
|
||||
|
||||
member_status = constants.ONLINE
|
||||
if info['status'] == ['offline']:
|
||||
member_status = constants.ERROR
|
||||
lb_status, pool_status = self._get_new_operating_statuses(
|
||||
ovn_lb, pool_id, member_id, member_status)
|
||||
|
||||
status = {
|
||||
constants.POOLS: [
|
||||
{constants.ID: pool_id,
|
||||
constants.PROVISIONING_STATUS: constants.ACTIVE,
|
||||
constants.OPERATING_STATUS: pool_status}],
|
||||
constants.MEMBERS: [
|
||||
{constants.ID: member_id,
|
||||
constants.PROVISIONING_STATUS: constants.ACTIVE,
|
||||
constants.OPERATING_STATUS: member_status}],
|
||||
constants.LOADBALANCERS: [
|
||||
{constants.ID: ovn_lb.name,
|
||||
constants.PROVISIONING_STATUS: constants.ACTIVE,
|
||||
constants.OPERATING_STATUS: lb_status}]}
|
||||
return status
|
||||
|
@ -20,6 +20,7 @@ from ovsdbapp.backend.ovs_idl import connection
|
||||
from ovsdbapp.backend.ovs_idl import idlutils
|
||||
from ovsdbapp.backend.ovs_idl import transaction as idl_trans
|
||||
from ovsdbapp.schema.ovn_northbound import impl_idl as nb_impl_idl
|
||||
from ovsdbapp.schema.ovn_southbound import impl_idl as sb_impl_idl
|
||||
import tenacity
|
||||
|
||||
from ovn_octavia_provider.common import config
|
||||
@ -144,10 +145,17 @@ class OvsdbNbOvnIdl(nb_impl_idl.OvnNbApiIdlImpl, Backend):
|
||||
LOG.info('Transaction aborted. Reason: %s', e)
|
||||
|
||||
|
||||
class OvsdbSbOvnIdl(sb_impl_idl.OvnSbApiIdlImpl, Backend):
|
||||
def __init__(self, connection):
|
||||
super().__init__(connection)
|
||||
self.idl._session.reconnect.set_probe_interval(
|
||||
config.get_ovn_ovsdb_probe_interval())
|
||||
|
||||
|
||||
class OvnNbIdlForLb(ovsdb_monitor.OvnIdl):
|
||||
SCHEMA = "OVN_Northbound"
|
||||
TABLES = ('Logical_Switch', 'Load_Balancer', 'Logical_Router',
|
||||
'Logical_Switch_Port', 'Logical_Router_Port',
|
||||
TABLES = ('Logical_Switch', 'Load_Balancer', 'Load_Balancer_Health_Check',
|
||||
'Logical_Router', 'Logical_Switch_Port', 'Logical_Router_Port',
|
||||
'Gateway_Chassis', 'NAT')
|
||||
|
||||
def __init__(self, event_lock_name=None):
|
||||
@ -186,3 +194,45 @@ class OvnNbIdlForLb(ovsdb_monitor.OvnIdl):
|
||||
self.notify_handler.shutdown()
|
||||
# Close the idl session
|
||||
self.close()
|
||||
|
||||
|
||||
class OvnSbIdlForLb(ovsdb_monitor.OvnIdl):
|
||||
SCHEMA = "OVN_Southbound"
|
||||
TABLES = ('Load_Balancer', 'Service_Monitor')
|
||||
|
||||
def __init__(self, event_lock_name=None):
|
||||
self.conn_string = config.get_ovn_sb_connection()
|
||||
ovsdb_monitor._check_and_set_ssl_files(self.SCHEMA)
|
||||
helper = self._get_ovsdb_helper(self.conn_string)
|
||||
for table in OvnSbIdlForLb.TABLES:
|
||||
helper.register_table(table)
|
||||
super().__init__(
|
||||
driver=None, remote=self.conn_string, schema=helper)
|
||||
self.event_lock_name = event_lock_name
|
||||
if self.event_lock_name:
|
||||
self.set_lock(self.event_lock_name)
|
||||
atexit.register(self.stop)
|
||||
|
||||
@tenacity.retry(
|
||||
wait=tenacity.wait_exponential(
|
||||
max=config.get_ovn_ovsdb_retry_max_interval()),
|
||||
reraise=True)
|
||||
def _get_ovsdb_helper(self, connection_string):
|
||||
return idlutils.get_schema_helper(connection_string, self.SCHEMA)
|
||||
|
||||
def start(self):
|
||||
self.conn = connection.Connection(
|
||||
self, timeout=config.get_ovn_ovsdb_timeout())
|
||||
return impl_idl_ovn.OvsdbSbOvnIdl(self.conn)
|
||||
|
||||
def stop(self):
|
||||
# Close the running connection if it has been initalized
|
||||
if hasattr(self, 'conn'):
|
||||
if not self.conn.stop(timeout=config.get_ovn_ovsdb_timeout()):
|
||||
LOG.debug("Connection terminated to OvnSb "
|
||||
"but a thread is still alive")
|
||||
del self.conn
|
||||
# complete the shutdown for the event handler
|
||||
self.notify_handler.shutdown()
|
||||
# Close the idl session
|
||||
self.close()
|
||||
|
@ -88,3 +88,12 @@ def _check_and_set_ssl_files(schema_name):
|
||||
Stream.ssl_set_private_key_file(priv_key_file)
|
||||
Stream.ssl_set_certificate_file(cert_file)
|
||||
Stream.ssl_set_ca_cert_file(ca_cert_file)
|
||||
|
||||
if schema_name == 'OVN_Southbound':
|
||||
priv_key_file = ovn_config.get_ovn_sb_private_key()
|
||||
cert_file = ovn_config.get_ovn_sb_certificate()
|
||||
ca_cert_file = ovn_config.get_ovn_sb_ca_cert()
|
||||
|
||||
Stream.ssl_set_private_key_file(priv_key_file)
|
||||
Stream.ssl_set_certificate_file(cert_file)
|
||||
Stream.ssl_set_ca_cert_file(ca_cert_file)
|
||||
|
@ -23,7 +23,8 @@ from octavia_lib.api.drivers import driver_lib
|
||||
from octavia_lib.common import constants as o_constants
|
||||
from oslo_serialization import jsonutils
|
||||
from oslo_utils import uuidutils
|
||||
from ovsdbapp.schema.ovn_northbound import impl_idl as idl_ovn
|
||||
from ovsdbapp.schema.ovn_northbound import impl_idl as nb_idl_ovn
|
||||
from ovsdbapp.schema.ovn_southbound import impl_idl as sb_idl_ovn
|
||||
|
||||
# NOTE(mjozefcz): We need base neutron functionals because we need
|
||||
# mechanism driver and l3 plugin.
|
||||
@ -38,7 +39,8 @@ class TestOvnOctaviaBase(base.TestOVNFunctionalBase,
|
||||
|
||||
def setUp(self):
|
||||
super().setUp()
|
||||
idl_ovn.OvnNbApiIdlImpl.ovsdb_connection = None
|
||||
nb_idl_ovn.OvnNbApiIdlImpl.ovsdb_connection = None
|
||||
sb_idl_ovn.OvnSbApiIdlImpl.ovsdb_connection = None
|
||||
# TODO(mjozefcz): Use octavia listeners to provide needed
|
||||
# sockets and modify tests in order to verify if fake
|
||||
# listener (status) has received valid value.
|
||||
@ -93,7 +95,7 @@ class TestOvnOctaviaBase(base.TestOVNFunctionalBase,
|
||||
return e1, e1_s1
|
||||
|
||||
def _create_lb_model(self, vip=None, vip_network_id=None,
|
||||
vip_port_id=None,
|
||||
vip_subnet_id=None, vip_port_id=None,
|
||||
admin_state_up=True):
|
||||
lb = octavia_data_model.LoadBalancer()
|
||||
lb.loadbalancer_id = uuidutils.generate_uuid()
|
||||
@ -105,6 +107,8 @@ class TestOvnOctaviaBase(base.TestOVNFunctionalBase,
|
||||
|
||||
if vip_network_id:
|
||||
lb.vip_network_id = vip_network_id
|
||||
if vip_subnet_id:
|
||||
lb.vip_subnet_id = vip_subnet_id
|
||||
if vip_port_id:
|
||||
lb.vip_port_id = vip_port_id
|
||||
lb.admin_state_up = admin_state_up
|
||||
@ -331,6 +335,7 @@ class TestOvnOctaviaBase(base.TestOVNFunctionalBase,
|
||||
lb_data['vip_net_info'] = net_info
|
||||
lb_data['model'] = self._create_lb_model(vip=net_info[2],
|
||||
vip_network_id=net_info[0],
|
||||
vip_subnet_id=net_info[1],
|
||||
vip_port_id=net_info[3],
|
||||
admin_state_up=admin_state_up)
|
||||
lb_data[ovn_const.LB_EXT_IDS_LS_REFS_KEY] = {}
|
||||
|
@ -36,9 +36,13 @@ class TestOvnOctaviaBase(base.BaseTestCase):
|
||||
self.vip_network_id = uuidutils.generate_uuid()
|
||||
self.vip_port_id = uuidutils.generate_uuid()
|
||||
self.vip_subnet_id = uuidutils.generate_uuid()
|
||||
self.healthmonitor_id = uuidutils.generate_uuid()
|
||||
ovn_nb_idl = mock.patch(
|
||||
'ovn_octavia_provider.ovsdb.impl_idl_ovn.OvnNbIdlForLb')
|
||||
self.mock_ovn_nb_idl = ovn_nb_idl.start()
|
||||
ovn_sb_idl = mock.patch(
|
||||
'ovn_octavia_provider.ovsdb.impl_idl_ovn.OvnSbIdlForLb')
|
||||
self.mock_ovn_sb_idl = ovn_sb_idl.start()
|
||||
self.member_address = '192.168.2.149'
|
||||
self.vip_address = '192.148.210.109'
|
||||
self.vip_dict = {'vip_network_id': uuidutils.generate_uuid(),
|
||||
|
@ -302,3 +302,23 @@ class FakeLB(data_models.LoadBalancer):
|
||||
def __hash__(self):
|
||||
# Required for Python3, not for Python2
|
||||
return self.__sizeof__()
|
||||
|
||||
|
||||
class FakePool(data_models.Pool):
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.uuid = kwargs.pop('uuid')
|
||||
super().__init__(*args, **kwargs)
|
||||
|
||||
def __hash__(self):
|
||||
# Required for Python3, not for Python2
|
||||
return self.__sizeof__()
|
||||
|
||||
|
||||
class FakeMember(data_models.Member):
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.uuid = kwargs.pop('uuid')
|
||||
super().__init__(*args, **kwargs)
|
||||
|
||||
def __hash__(self):
|
||||
# Required for Python3, not for Python2
|
||||
return self.__sizeof__()
|
||||
|
@ -25,7 +25,9 @@ from ovn_octavia_provider.ovsdb import impl_idl_ovn
|
||||
basedir = os.path.dirname(os.path.abspath(__file__))
|
||||
schema_files = {
|
||||
'OVN_Northbound': os.path.join(basedir,
|
||||
'..', 'schemas', 'ovn-nb.ovsschema')}
|
||||
'..', 'schemas', 'ovn-nb.ovsschema'),
|
||||
'OVN_Southbound': os.path.join(basedir,
|
||||
'..', 'schemas', 'ovn-sb.ovsschema')}
|
||||
|
||||
|
||||
class TestOvnNbIdlForLb(base.BaseTestCase):
|
||||
@ -78,3 +80,50 @@ class TestOvnNbIdlForLb(base.BaseTestCase):
|
||||
'set_lock') as set_lock:
|
||||
self.idl = impl_idl_ovn.OvnNbIdlForLb(event_lock_name='foo')
|
||||
set_lock.assert_called_once_with('foo')
|
||||
|
||||
|
||||
class TestOvnSbIdlForLb(base.BaseTestCase):
|
||||
|
||||
def setUp(self):
|
||||
super().setUp()
|
||||
# TODO(haleyb) - figure out why every test in this class generates
|
||||
# this warning, think it's in relation to reading this schema file:
|
||||
# sys:1: ResourceWarning: unclosed file <_io.FileIO name=1 mode='wb'
|
||||
# closefd=True> ResourceWarning: Enable tracemalloc to get the object
|
||||
# allocation traceback
|
||||
self.mock_gsh = mock.patch.object(
|
||||
idlutils, 'get_schema_helper',
|
||||
side_effect=lambda x, y: ovs_idl.SchemaHelper(
|
||||
location=schema_files['OVN_Southbound'])).start()
|
||||
self.idl = impl_idl_ovn.OvnSbIdlForLb()
|
||||
|
||||
@mock.patch.object(real_ovs_idl.Backend, 'autocreate_indices', mock.Mock(),
|
||||
create=True)
|
||||
def test_start(self):
|
||||
with mock.patch('ovsdbapp.backend.ovs_idl.connection.Connection',
|
||||
side_effect=lambda x, timeout: mock.Mock()):
|
||||
idl1 = impl_idl_ovn.OvnSbIdlForLb()
|
||||
ret1 = idl1.start()
|
||||
id1 = id(ret1.ovsdb_connection)
|
||||
idl2 = impl_idl_ovn.OvnSbIdlForLb()
|
||||
ret2 = idl2.start()
|
||||
id2 = id(ret2.ovsdb_connection)
|
||||
self.assertNotEqual(id1, id2)
|
||||
|
||||
@mock.patch('ovsdbapp.backend.ovs_idl.connection.Connection')
|
||||
def test_stop(self, mock_conn):
|
||||
mock_conn.stop.return_value = False
|
||||
with (
|
||||
mock.patch.object(
|
||||
self.idl.notify_handler, 'shutdown')) as mock_notify, (
|
||||
mock.patch.object(self.idl, 'close')) as mock_close:
|
||||
self.idl.start()
|
||||
self.idl.stop()
|
||||
mock_notify.assert_called_once_with()
|
||||
mock_close.assert_called_once_with()
|
||||
|
||||
def test_setlock(self):
|
||||
with mock.patch.object(impl_idl_ovn.OvnSbIdlForLb,
|
||||
'set_lock') as set_lock:
|
||||
self.idl = impl_idl_ovn.OvnSbIdlForLb(event_lock_name='foo')
|
||||
set_lock.assert_called_once_with('foo')
|
||||
|
531
ovn_octavia_provider/tests/unit/schemas/ovn-sb.ovsschema
Normal file
531
ovn_octavia_provider/tests/unit/schemas/ovn-sb.ovsschema
Normal file
@ -0,0 +1,531 @@
|
||||
{
|
||||
"name": "OVN_Southbound",
|
||||
"version": "20.17.0",
|
||||
"cksum": "669123379 26536",
|
||||
"tables": {
|
||||
"SB_Global": {
|
||||
"columns": {
|
||||
"nb_cfg": {"type": {"key": "integer"}},
|
||||
"external_ids": {
|
||||
"type": {"key": "string", "value": "string",
|
||||
"min": 0, "max": "unlimited"}},
|
||||
"connections": {
|
||||
"type": {"key": {"type": "uuid",
|
||||
"refTable": "Connection"},
|
||||
"min": 0,
|
||||
"max": "unlimited"}},
|
||||
"ssl": {
|
||||
"type": {"key": {"type": "uuid",
|
||||
"refTable": "SSL"},
|
||||
"min": 0, "max": 1}},
|
||||
"options": {
|
||||
"type": {"key": "string", "value": "string",
|
||||
"min": 0, "max": "unlimited"}},
|
||||
"ipsec": {"type": "boolean"}},
|
||||
"maxRows": 1,
|
||||
"isRoot": true},
|
||||
"Chassis": {
|
||||
"columns": {
|
||||
"name": {"type": "string"},
|
||||
"hostname": {"type": "string"},
|
||||
"encaps": {"type": {"key": {"type": "uuid",
|
||||
"refTable": "Encap"},
|
||||
"min": 1, "max": "unlimited"}},
|
||||
"vtep_logical_switches" : {"type": {"key": "string",
|
||||
"min": 0,
|
||||
"max": "unlimited"}},
|
||||
"nb_cfg": {"type": {"key": "integer"}},
|
||||
"external_ids": {
|
||||
"type": {"key": "string", "value": "string",
|
||||
"min": 0, "max": "unlimited"}},
|
||||
"other_config": {
|
||||
"type": {"key": "string", "value": "string",
|
||||
"min": 0, "max": "unlimited"}},
|
||||
"transport_zones" : {"type": {"key": "string",
|
||||
"min": 0,
|
||||
"max": "unlimited"}}},
|
||||
"isRoot": true,
|
||||
"indexes": [["name"]]},
|
||||
"Chassis_Private": {
|
||||
"columns": {
|
||||
"name": {"type": "string"},
|
||||
"chassis": {"type": {"key": {"type": "uuid",
|
||||
"refTable": "Chassis",
|
||||
"refType": "weak"},
|
||||
"min": 0, "max": 1}},
|
||||
"nb_cfg": {"type": {"key": "integer"}},
|
||||
"nb_cfg_timestamp": {"type": {"key": "integer"}},
|
||||
"external_ids": {
|
||||
"type": {"key": "string", "value": "string",
|
||||
"min": 0, "max": "unlimited"}}},
|
||||
"isRoot": true,
|
||||
"indexes": [["name"]]},
|
||||
"Encap": {
|
||||
"columns": {
|
||||
"type": {"type": {"key": {
|
||||
"type": "string",
|
||||
"enum": ["set", ["geneve", "stt", "vxlan"]]}}},
|
||||
"options": {"type": {"key": "string",
|
||||
"value": "string",
|
||||
"min": 0,
|
||||
"max": "unlimited"}},
|
||||
"ip": {"type": "string"},
|
||||
"chassis_name": {"type": "string"}},
|
||||
"indexes": [["type", "ip"]]},
|
||||
"Address_Set": {
|
||||
"columns": {
|
||||
"name": {"type": "string"},
|
||||
"addresses": {"type": {"key": "string",
|
||||
"min": 0,
|
||||
"max": "unlimited"}}},
|
||||
"indexes": [["name"]],
|
||||
"isRoot": true},
|
||||
"Port_Group": {
|
||||
"columns": {
|
||||
"name": {"type": "string"},
|
||||
"ports": {"type": {"key": "string",
|
||||
"min": 0,
|
||||
"max": "unlimited"}}},
|
||||
"indexes": [["name"]],
|
||||
"isRoot": true},
|
||||
"Logical_Flow": {
|
||||
"columns": {
|
||||
"logical_datapath":
|
||||
{"type": {"key": {"type": "uuid",
|
||||
"refTable": "Datapath_Binding"},
|
||||
"min": 0, "max": 1}},
|
||||
"logical_dp_group":
|
||||
{"type": {"key": {"type": "uuid",
|
||||
"refTable": "Logical_DP_Group"},
|
||||
"min": 0, "max": 1}},
|
||||
"pipeline": {"type": {"key": {"type": "string",
|
||||
"enum": ["set", ["ingress",
|
||||
"egress"]]}}},
|
||||
"table_id": {"type": {"key": {"type": "integer",
|
||||
"minInteger": 0,
|
||||
"maxInteger": 32}}},
|
||||
"priority": {"type": {"key": {"type": "integer",
|
||||
"minInteger": 0,
|
||||
"maxInteger": 65535}}},
|
||||
"match": {"type": "string"},
|
||||
"actions": {"type": "string"},
|
||||
"external_ids": {
|
||||
"type": {"key": "string", "value": "string",
|
||||
"min": 0, "max": "unlimited"}}},
|
||||
"isRoot": true},
|
||||
"Logical_DP_Group": {
|
||||
"columns": {
|
||||
"datapaths":
|
||||
{"type": {"key": {"type": "uuid",
|
||||
"refTable": "Datapath_Binding",
|
||||
"refType": "weak"},
|
||||
"min": 0, "max": "unlimited"}}},
|
||||
"isRoot": false},
|
||||
"Multicast_Group": {
|
||||
"columns": {
|
||||
"datapath": {"type": {"key": {"type": "uuid",
|
||||
"refTable": "Datapath_Binding"}}},
|
||||
"name": {"type": "string"},
|
||||
"tunnel_key": {
|
||||
"type": {"key": {"type": "integer",
|
||||
"minInteger": 32768,
|
||||
"maxInteger": 65535}}},
|
||||
"ports": {"type": {"key": {"type": "uuid",
|
||||
"refTable": "Port_Binding",
|
||||
"refType": "weak"},
|
||||
"min": 0, "max": "unlimited"}}},
|
||||
"indexes": [["datapath", "tunnel_key"],
|
||||
["datapath", "name"]],
|
||||
"isRoot": true},
|
||||
"Meter": {
|
||||
"columns": {
|
||||
"name": {"type": "string"},
|
||||
"unit": {"type": {"key": {"type": "string",
|
||||
"enum": ["set", ["kbps", "pktps"]]}}},
|
||||
"bands": {"type": {"key": {"type": "uuid",
|
||||
"refTable": "Meter_Band",
|
||||
"refType": "strong"},
|
||||
"min": 1,
|
||||
"max": "unlimited"}}},
|
||||
"indexes": [["name"]],
|
||||
"isRoot": true},
|
||||
"Meter_Band": {
|
||||
"columns": {
|
||||
"action": {"type": {"key": {"type": "string",
|
||||
"enum": ["set", ["drop"]]}}},
|
||||
"rate": {"type": {"key": {"type": "integer",
|
||||
"minInteger": 1,
|
||||
"maxInteger": 4294967295}}},
|
||||
"burst_size": {"type": {"key": {"type": "integer",
|
||||
"minInteger": 0,
|
||||
"maxInteger": 4294967295}}}},
|
||||
"isRoot": false},
|
||||
"Datapath_Binding": {
|
||||
"columns": {
|
||||
"tunnel_key": {
|
||||
"type": {"key": {"type": "integer",
|
||||
"minInteger": 1,
|
||||
"maxInteger": 16777215}}},
|
||||
"load_balancers": {"type": {"key": {"type": "uuid",
|
||||
"refTable": "Load_Balancer",
|
||||
"refType": "weak"},
|
||||
"min": 0,
|
||||
"max": "unlimited"}},
|
||||
"external_ids": {
|
||||
"type": {"key": "string", "value": "string",
|
||||
"min": 0, "max": "unlimited"}}},
|
||||
"indexes": [["tunnel_key"]],
|
||||
"isRoot": true},
|
||||
"Port_Binding": {
|
||||
"columns": {
|
||||
"logical_port": {"type": "string"},
|
||||
"type": {"type": "string"},
|
||||
"gateway_chassis": {
|
||||
"type": {"key": {"type": "uuid",
|
||||
"refTable": "Gateway_Chassis",
|
||||
"refType": "strong"},
|
||||
"min": 0,
|
||||
"max": "unlimited"}},
|
||||
"ha_chassis_group": {
|
||||
"type": {"key": {"type": "uuid",
|
||||
"refTable": "HA_Chassis_Group",
|
||||
"refType": "strong"},
|
||||
"min": 0,
|
||||
"max": 1}},
|
||||
"options": {
|
||||
"type": {"key": "string",
|
||||
"value": "string",
|
||||
"min": 0,
|
||||
"max": "unlimited"}},
|
||||
"datapath": {"type": {"key": {"type": "uuid",
|
||||
"refTable": "Datapath_Binding"}}},
|
||||
"tunnel_key": {
|
||||
"type": {"key": {"type": "integer",
|
||||
"minInteger": 1,
|
||||
"maxInteger": 32767}}},
|
||||
"parent_port": {"type": {"key": "string", "min": 0, "max": 1}},
|
||||
"tag": {
|
||||
"type": {"key": {"type": "integer",
|
||||
"minInteger": 1,
|
||||
"maxInteger": 4095},
|
||||
"min": 0, "max": 1}},
|
||||
"virtual_parent": {"type": {"key": "string", "min": 0,
|
||||
"max": 1}},
|
||||
"chassis": {"type": {"key": {"type": "uuid",
|
||||
"refTable": "Chassis",
|
||||
"refType": "weak"},
|
||||
"min": 0, "max": 1}},
|
||||
"encap": {"type": {"key": {"type": "uuid",
|
||||
"refTable": "Encap",
|
||||
"refType": "weak"},
|
||||
"min": 0, "max": 1}},
|
||||
"mac": {"type": {"key": "string",
|
||||
"min": 0,
|
||||
"max": "unlimited"}},
|
||||
"nat_addresses": {"type": {"key": "string",
|
||||
"min": 0,
|
||||
"max": "unlimited"}},
|
||||
"up": {"type": {"key": "boolean", "min": 0, "max": 1}},
|
||||
"external_ids": {"type": {"key": "string",
|
||||
"value": "string",
|
||||
"min": 0,
|
||||
"max": "unlimited"}}},
|
||||
"indexes": [["datapath", "tunnel_key"], ["logical_port"]],
|
||||
"isRoot": true},
|
||||
"MAC_Binding": {
|
||||
"columns": {
|
||||
"logical_port": {"type": "string"},
|
||||
"ip": {"type": "string"},
|
||||
"mac": {"type": "string"},
|
||||
"datapath": {"type": {"key": {"type": "uuid",
|
||||
"refTable": "Datapath_Binding"}}}},
|
||||
"indexes": [["logical_port", "ip"]],
|
||||
"isRoot": true},
|
||||
"DHCP_Options": {
|
||||
"columns": {
|
||||
"name": {"type": "string"},
|
||||
"code": {
|
||||
"type": {"key": {"type": "integer",
|
||||
"minInteger": 0, "maxInteger": 254}}},
|
||||
"type": {
|
||||
"type": {"key": {
|
||||
"type": "string",
|
||||
"enum": ["set", ["bool", "uint8", "uint16", "uint32",
|
||||
"ipv4", "static_routes", "str",
|
||||
"host_id", "domains"]]}}}},
|
||||
"isRoot": true},
|
||||
"DHCPv6_Options": {
|
||||
"columns": {
|
||||
"name": {"type": "string"},
|
||||
"code": {
|
||||
"type": {"key": {"type": "integer",
|
||||
"minInteger": 0, "maxInteger": 254}}},
|
||||
"type": {
|
||||
"type": {"key": {
|
||||
"type": "string",
|
||||
"enum": ["set", ["ipv6", "str", "mac"]]}}}},
|
||||
"isRoot": true},
|
||||
"Connection": {
|
||||
"columns": {
|
||||
"target": {"type": "string"},
|
||||
"max_backoff": {"type": {"key": {"type": "integer",
|
||||
"minInteger": 1000},
|
||||
"min": 0,
|
||||
"max": 1}},
|
||||
"inactivity_probe": {"type": {"key": "integer",
|
||||
"min": 0,
|
||||
"max": 1}},
|
||||
"read_only": {"type": "boolean"},
|
||||
"role": {"type": "string"},
|
||||
"other_config": {"type": {"key": "string",
|
||||
"value": "string",
|
||||
"min": 0,
|
||||
"max": "unlimited"}},
|
||||
"external_ids": {"type": {"key": "string",
|
||||
"value": "string",
|
||||
"min": 0,
|
||||
"max": "unlimited"}},
|
||||
"is_connected": {"type": "boolean", "ephemeral": true},
|
||||
"status": {"type": {"key": "string",
|
||||
"value": "string",
|
||||
"min": 0,
|
||||
"max": "unlimited"},
|
||||
"ephemeral": true}},
|
||||
"indexes": [["target"]]},
|
||||
"SSL": {
|
||||
"columns": {
|
||||
"private_key": {"type": "string"},
|
||||
"certificate": {"type": "string"},
|
||||
"ca_cert": {"type": "string"},
|
||||
"bootstrap_ca_cert": {"type": "boolean"},
|
||||
"ssl_protocols": {"type": "string"},
|
||||
"ssl_ciphers": {"type": "string"},
|
||||
"external_ids": {"type": {"key": "string",
|
||||
"value": "string",
|
||||
"min": 0,
|
||||
"max": "unlimited"}}},
|
||||
"maxRows": 1},
|
||||
"DNS": {
|
||||
"columns": {
|
||||
"records": {"type": {"key": "string",
|
||||
"value": "string",
|
||||
"min": 0,
|
||||
"max": "unlimited"}},
|
||||
"datapaths": {"type": {"key": {"type": "uuid",
|
||||
"refTable": "Datapath_Binding"},
|
||||
"min": 1,
|
||||
"max": "unlimited"}},
|
||||
"external_ids": {"type": {"key": "string",
|
||||
"value": "string",
|
||||
"min": 0,
|
||||
"max": "unlimited"}}},
|
||||
"isRoot": true},
|
||||
"RBAC_Role": {
|
||||
"columns": {
|
||||
"name": {"type": "string"},
|
||||
"permissions": {
|
||||
"type": {"key": {"type": "string"},
|
||||
"value": {"type": "uuid",
|
||||
"refTable": "RBAC_Permission",
|
||||
"refType": "weak"},
|
||||
"min": 0, "max": "unlimited"}}},
|
||||
"isRoot": true},
|
||||
"RBAC_Permission": {
|
||||
"columns": {
|
||||
"table": {"type": "string"},
|
||||
"authorization": {"type": {"key": "string",
|
||||
"min": 0,
|
||||
"max": "unlimited"}},
|
||||
"insert_delete": {"type": "boolean"},
|
||||
"update" : {"type": {"key": "string",
|
||||
"min": 0,
|
||||
"max": "unlimited"}}},
|
||||
"isRoot": true},
|
||||
"Gateway_Chassis": {
|
||||
"columns": {
|
||||
"name": {"type": "string"},
|
||||
"chassis": {"type": {"key": {"type": "uuid",
|
||||
"refTable": "Chassis",
|
||||
"refType": "weak"},
|
||||
"min": 0, "max": 1}},
|
||||
"priority": {"type": {"key": {"type": "integer",
|
||||
"minInteger": 0,
|
||||
"maxInteger": 32767}}},
|
||||
"external_ids": {
|
||||
"type": {"key": "string", "value": "string",
|
||||
"min": 0, "max": "unlimited"}},
|
||||
"options": {
|
||||
"type": {"key": "string", "value": "string",
|
||||
"min": 0, "max": "unlimited"}}},
|
||||
"indexes": [["name"]],
|
||||
"isRoot": false},
|
||||
"HA_Chassis": {
|
||||
"columns": {
|
||||
"chassis": {"type": {"key": {"type": "uuid",
|
||||
"refTable": "Chassis",
|
||||
"refType": "weak"},
|
||||
"min": 0, "max": 1}},
|
||||
"priority": {"type": {"key": {"type": "integer",
|
||||
"minInteger": 0,
|
||||
"maxInteger": 32767}}},
|
||||
"external_ids": {
|
||||
"type": {"key": "string", "value": "string",
|
||||
"min": 0, "max": "unlimited"}}},
|
||||
"isRoot": false},
|
||||
"HA_Chassis_Group": {
|
||||
"columns": {
|
||||
"name": {"type": "string"},
|
||||
"ha_chassis": {
|
||||
"type": {"key": {"type": "uuid",
|
||||
"refTable": "HA_Chassis",
|
||||
"refType": "strong"},
|
||||
"min": 0,
|
||||
"max": "unlimited"}},
|
||||
"ref_chassis": {"type": {"key": {"type": "uuid",
|
||||
"refTable": "Chassis",
|
||||
"refType": "weak"},
|
||||
"min": 0, "max": "unlimited"}},
|
||||
"external_ids": {
|
||||
"type": {"key": "string", "value": "string",
|
||||
"min": 0, "max": "unlimited"}}},
|
||||
"indexes": [["name"]],
|
||||
"isRoot": true},
|
||||
"Controller_Event": {
|
||||
"columns": {
|
||||
"event_type": {"type": {"key": {"type": "string",
|
||||
"enum": ["set", ["empty_lb_backends"]]}}},
|
||||
"event_info": {"type": {"key": "string", "value": "string",
|
||||
"min": 0, "max": "unlimited"}},
|
||||
"chassis": {"type": {"key": {"type": "uuid",
|
||||
"refTable": "Chassis",
|
||||
"refType": "weak"},
|
||||
"min": 0, "max": 1}},
|
||||
"seq_num": {"type": {"key": "integer"}}
|
||||
},
|
||||
"isRoot": true},
|
||||
"IP_Multicast": {
|
||||
"columns": {
|
||||
"datapath": {"type": {"key": {"type": "uuid",
|
||||
"refTable": "Datapath_Binding",
|
||||
"refType": "weak"}}},
|
||||
"enabled": {"type": {"key": "boolean", "min": 0, "max": 1}},
|
||||
"querier": {"type": {"key": "boolean", "min": 0, "max": 1}},
|
||||
"eth_src": {"type": "string"},
|
||||
"ip4_src": {"type": "string"},
|
||||
"ip6_src": {"type": "string"},
|
||||
"table_size": {"type": {"key": "integer",
|
||||
"min": 0, "max": 1}},
|
||||
"idle_timeout": {"type": {"key": "integer",
|
||||
"min": 0, "max": 1}},
|
||||
"query_interval": {"type": {"key": "integer",
|
||||
"min": 0, "max": 1}},
|
||||
"query_max_resp": {"type": {"key": "integer",
|
||||
"min": 0, "max": 1}},
|
||||
"seq_no": {"type": "integer"}},
|
||||
"indexes": [["datapath"]],
|
||||
"isRoot": true},
|
||||
"IGMP_Group": {
|
||||
"columns": {
|
||||
"address": {"type": "string"},
|
||||
"datapath": {"type": {"key": {"type": "uuid",
|
||||
"refTable": "Datapath_Binding",
|
||||
"refType": "weak"},
|
||||
"min": 0,
|
||||
"max": 1}},
|
||||
"chassis": {"type": {"key": {"type": "uuid",
|
||||
"refTable": "Chassis",
|
||||
"refType": "weak"},
|
||||
"min": 0,
|
||||
"max": 1}},
|
||||
"ports": {"type": {"key": {"type": "uuid",
|
||||
"refTable": "Port_Binding",
|
||||
"refType": "weak"},
|
||||
"min": 0, "max": "unlimited"}}},
|
||||
"indexes": [["address", "datapath", "chassis"]],
|
||||
"isRoot": true},
|
||||
"Service_Monitor": {
|
||||
"columns": {
|
||||
"ip": {"type": "string"},
|
||||
"protocol": {
|
||||
"type": {"key": {"type": "string",
|
||||
"enum": ["set", ["tcp", "udp"]]},
|
||||
"min": 0, "max": 1}},
|
||||
"port": {"type": {"key": {"type": "integer",
|
||||
"minInteger": 0,
|
||||
"maxInteger": 32767}}},
|
||||
"logical_port": {"type": "string"},
|
||||
"src_mac": {"type": "string"},
|
||||
"src_ip": {"type": "string"},
|
||||
"status": {
|
||||
"type": {"key": {"type": "string",
|
||||
"enum": ["set", ["online", "offline", "error"]]},
|
||||
"min": 0, "max": 1}},
|
||||
"options": {
|
||||
"type": {"key": "string", "value": "string",
|
||||
"min": 0, "max": "unlimited"}},
|
||||
"external_ids": {
|
||||
"type": {"key": "string", "value": "string",
|
||||
"min": 0, "max": "unlimited"}}},
|
||||
"indexes": [["logical_port", "ip", "port", "protocol"]],
|
||||
"isRoot": true},
|
||||
"Load_Balancer": {
|
||||
"columns": {
|
||||
"name": {"type": "string"},
|
||||
"vips": {
|
||||
"type": {"key": "string", "value": "string",
|
||||
"min": 0, "max": "unlimited"}},
|
||||
"protocol": {
|
||||
"type": {"key": {"type": "string",
|
||||
"enum": ["set", ["tcp", "udp", "sctp"]]},
|
||||
"min": 0, "max": 1}},
|
||||
"datapaths": {
|
||||
"type": {"key": {"type": "uuid",
|
||||
"refTable": "Datapath_Binding"},
|
||||
"min": 0, "max": "unlimited"}},
|
||||
"options": {
|
||||
"type": {"key": "string",
|
||||
"value": "string",
|
||||
"min": 0,
|
||||
"max": "unlimited"}},
|
||||
"external_ids": {
|
||||
"type": {"key": "string", "value": "string",
|
||||
"min": 0, "max": "unlimited"}}},
|
||||
"isRoot": true},
|
||||
"BFD": {
|
||||
"columns": {
|
||||
"src_port": {"type": {"key": {"type": "integer",
|
||||
"minInteger": 49152,
|
||||
"maxInteger": 65535}}},
|
||||
"disc": {"type": {"key": {"type": "integer"}}},
|
||||
"logical_port": {"type": "string"},
|
||||
"dst_ip": {"type": "string"},
|
||||
"min_tx": {"type": {"key": {"type": "integer"}}},
|
||||
"min_rx": {"type": {"key": {"type": "integer"}}},
|
||||
"detect_mult": {"type": {"key": {"type": "integer"}}},
|
||||
"status": {
|
||||
"type": {"key": {"type": "string",
|
||||
"enum": ["set", ["down", "init", "up",
|
||||
"admin_down"]]}}},
|
||||
"external_ids": {
|
||||
"type": {"key": "string", "value": "string",
|
||||
"min": 0, "max": "unlimited"}},
|
||||
"options": {
|
||||
"type": {"key": "string", "value": "string",
|
||||
"min": 0, "max": "unlimited"}}},
|
||||
"indexes": [["logical_port", "dst_ip", "src_port", "disc"]],
|
||||
"isRoot": true},
|
||||
"FDB": {
|
||||
"columns": {
|
||||
"mac": {"type": "string"},
|
||||
"dp_key": {
|
||||
"type": {"key": {"type": "integer",
|
||||
"minInteger": 1,
|
||||
"maxInteger": 16777215}}},
|
||||
"port_key": {
|
||||
"type": {"key": {"type": "integer",
|
||||
"minInteger": 1,
|
||||
"maxInteger": 16777215}}}},
|
||||
"indexes": [["mac", "dp_key"]],
|
||||
"isRoot": true}
|
||||
}
|
||||
}
|
@ -25,3 +25,4 @@ class TestOvnProviderAgent(ovn_base.TestOvnOctaviaBase):
|
||||
ovn_agent.OvnProviderAgent(mock_exit_event)
|
||||
self.assertEqual(1, mock_exit_event.wait.call_count)
|
||||
self.assertEqual(2, self.mock_ovn_nb_idl.call_count)
|
||||
self.assertEqual(1, self.mock_ovn_sb_idl.call_count)
|
||||
|
@ -179,6 +179,34 @@ class TestOvnProviderDriver(ovn_base.TestOvnOctaviaBase):
|
||||
project_id=self.project_id,
|
||||
vip_address=self.vip_address,
|
||||
vip_network_id=self.vip_network_id)
|
||||
self.fail_health_monitor = data_models.HealthMonitor(
|
||||
admin_state_up=True,
|
||||
name='UnHealthy',
|
||||
pool_id=self.pool_id,
|
||||
healthmonitor_id=self.healthmonitor_id,
|
||||
type="not_valid",
|
||||
delay=1,
|
||||
timeout=2,
|
||||
max_retries_down=3,
|
||||
max_retries=4)
|
||||
self.ref_health_monitor = data_models.HealthMonitor(
|
||||
admin_state_up=True,
|
||||
name='Healthy',
|
||||
pool_id=self.pool_id,
|
||||
healthmonitor_id=self.healthmonitor_id,
|
||||
type=constants.HEALTH_MONITOR_TCP,
|
||||
delay=6,
|
||||
timeout=7,
|
||||
max_retries_down=5,
|
||||
max_retries=3)
|
||||
self.ref_update_health_monitor = data_models.HealthMonitor(
|
||||
admin_state_up=True,
|
||||
name='ReHealthy',
|
||||
healthmonitor_id=self.healthmonitor_id,
|
||||
delay=16,
|
||||
timeout=17,
|
||||
max_retries_down=15,
|
||||
max_retries=13)
|
||||
mock.patch.object(
|
||||
ovn_helper.OvnProviderHelper, '_find_ovn_lbs',
|
||||
side_effect=lambda x, protocol=None:
|
||||
@ -589,3 +617,92 @@ class TestOvnProviderDriver(ovn_base.TestOvnOctaviaBase):
|
||||
self.loadbalancer_id,
|
||||
self.project_id,
|
||||
self.vip_dict)
|
||||
|
||||
def test_health_monitor_create(self):
|
||||
info = {'id': self.ref_health_monitor.healthmonitor_id,
|
||||
'pool_id': self.ref_health_monitor.pool_id,
|
||||
'type': self.ref_health_monitor.type,
|
||||
'interval': self.ref_health_monitor.delay,
|
||||
'timeout': self.ref_health_monitor.timeout,
|
||||
'failure_count': self.ref_health_monitor.max_retries_down,
|
||||
'success_count': self.ref_health_monitor.max_retries,
|
||||
'admin_state_up': self.ref_health_monitor.admin_state_up}
|
||||
expected_dict = {'type': ovn_const.REQ_TYPE_HM_CREATE,
|
||||
'info': info}
|
||||
self.driver.health_monitor_create(self.ref_health_monitor)
|
||||
self.mock_add_request.assert_called_once_with(expected_dict)
|
||||
|
||||
@mock.patch.object(ovn_driver.OvnProviderDriver,
|
||||
'_is_health_check_supported')
|
||||
def test_health_monitor_create_not_supported(self, ihcs):
|
||||
ihcs.return_value = False
|
||||
self.assertRaises(exceptions.UnsupportedOptionError,
|
||||
self.driver.health_monitor_create,
|
||||
self.ref_health_monitor)
|
||||
|
||||
def test_health_monitor_create_failure(self):
|
||||
self.assertRaises(exceptions.UnsupportedOptionError,
|
||||
self.driver.health_monitor_create,
|
||||
self.fail_health_monitor)
|
||||
|
||||
def test_health_monitor_create_failure_unset_type(self):
|
||||
self.fail_health_monitor.type = data_models.UnsetType()
|
||||
self.assertRaises(exceptions.UnsupportedOptionError,
|
||||
self.driver.health_monitor_create,
|
||||
self.fail_health_monitor)
|
||||
|
||||
def test_health_monitor_create_unset_admin_state_up(self):
|
||||
self.ref_health_monitor.admin_state_up = data_models.UnsetType()
|
||||
info = {'id': self.ref_health_monitor.healthmonitor_id,
|
||||
'pool_id': self.ref_health_monitor.pool_id,
|
||||
'type': self.ref_health_monitor.type,
|
||||
'interval': self.ref_health_monitor.delay,
|
||||
'timeout': self.ref_health_monitor.timeout,
|
||||
'failure_count': self.ref_health_monitor.max_retries_down,
|
||||
'success_count': self.ref_health_monitor.max_retries,
|
||||
'admin_state_up': True}
|
||||
expected_dict = {'type': ovn_const.REQ_TYPE_HM_CREATE,
|
||||
'info': info}
|
||||
self.driver.health_monitor_create(self.ref_health_monitor)
|
||||
self.mock_add_request.assert_called_once_with(expected_dict)
|
||||
|
||||
def test_health_monitor_update(self):
|
||||
info = {'id': self.ref_update_health_monitor.healthmonitor_id,
|
||||
'pool_id': self.ref_health_monitor.pool_id,
|
||||
'interval': self.ref_update_health_monitor.delay,
|
||||
'timeout': self.ref_update_health_monitor.timeout,
|
||||
'failure_count':
|
||||
self.ref_update_health_monitor.max_retries_down,
|
||||
'success_count':
|
||||
self.ref_update_health_monitor.max_retries,
|
||||
'admin_state_up':
|
||||
self.ref_update_health_monitor.admin_state_up}
|
||||
expected_dict = {'type': ovn_const.REQ_TYPE_HM_UPDATE,
|
||||
'info': info}
|
||||
self.driver.health_monitor_update(self.ref_health_monitor,
|
||||
self.ref_update_health_monitor)
|
||||
self.mock_add_request.assert_called_once_with(expected_dict)
|
||||
|
||||
def test_health_monitor_update_unset_admin_state_up(self):
|
||||
self.ref_update_health_monitor.admin_state_up = data_models.UnsetType()
|
||||
info = {'id': self.ref_update_health_monitor.healthmonitor_id,
|
||||
'pool_id': self.ref_health_monitor.pool_id,
|
||||
'interval': self.ref_update_health_monitor.delay,
|
||||
'timeout': self.ref_update_health_monitor.timeout,
|
||||
'failure_count':
|
||||
self.ref_update_health_monitor.max_retries_down,
|
||||
'success_count':
|
||||
self.ref_update_health_monitor.max_retries,
|
||||
'admin_state_up': True}
|
||||
expected_dict = {'type': ovn_const.REQ_TYPE_HM_UPDATE,
|
||||
'info': info}
|
||||
self.driver.health_monitor_update(self.ref_health_monitor,
|
||||
self.ref_update_health_monitor)
|
||||
self.mock_add_request.assert_called_once_with(expected_dict)
|
||||
|
||||
def test_health_monitor_delete(self):
|
||||
info = {'id': self.ref_health_monitor.healthmonitor_id}
|
||||
expected_dict = {'type': ovn_const.REQ_TYPE_HM_DELETE,
|
||||
'info': info}
|
||||
self.driver.health_monitor_delete(self.ref_health_monitor)
|
||||
self.mock_add_request.assert_called_once_with(expected_dict)
|
||||
|
@ -36,6 +36,8 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
|
||||
self.real_helper_find_ovn_lb_with_pool_key = (
|
||||
self.helper._find_ovn_lb_with_pool_key)
|
||||
mock.patch.object(self.helper, '_update_status_to_octavia').start()
|
||||
self.octavia_driver_lib = mock.patch.object(
|
||||
self.helper, '_octavia_driver_lib').start()
|
||||
self.listener = {'id': self.listener_id,
|
||||
'loadbalancer_id': self.loadbalancer_id,
|
||||
'protocol': 'TCP',
|
||||
@ -64,6 +66,22 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
|
||||
'pool_id': self.member_pool_id,
|
||||
'admin_state_up': True,
|
||||
'old_admin_state_up': True}
|
||||
self.health_monitor = {'id': self.healthmonitor_id,
|
||||
'pool_id': self.pool_id,
|
||||
'type': constants.HEALTH_MONITOR_TCP,
|
||||
'interval': 6,
|
||||
'timeout': 7,
|
||||
'failure_count': 5,
|
||||
'success_count': 3,
|
||||
'admin_state_up': True}
|
||||
self.health_mon_udp = {'id': self.healthmonitor_id,
|
||||
'pool_id': self.pool_id,
|
||||
'type': constants.HEALTH_MONITOR_UDP_CONNECT,
|
||||
'interval': 6,
|
||||
'timeout': 7,
|
||||
'failure_count': 5,
|
||||
'success_count': 3,
|
||||
'admin_state_up': True}
|
||||
self.ovn_nbdb_api = mock.patch.object(self.helper, 'ovn_nbdb_api')
|
||||
self.ovn_nbdb_api.start()
|
||||
add_req_thread = mock.patch.object(ovn_helper.OvnProviderHelper,
|
||||
@ -72,6 +90,15 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
|
||||
self.ovn_lb = mock.MagicMock()
|
||||
self.ovn_lb.protocol = ['tcp']
|
||||
self.ovn_lb.uuid = uuidutils.generate_uuid()
|
||||
self.ovn_lb.health_check = []
|
||||
self.ovn_hm_lb = mock.MagicMock()
|
||||
self.ovn_hm_lb.protocol = ['tcp']
|
||||
self.ovn_hm_lb.uuid = uuidutils.generate_uuid()
|
||||
self.ovn_hm_lb.health_check = []
|
||||
self.ovn_hm = mock.MagicMock()
|
||||
self.ovn_hm.uuid = self.healthmonitor_id
|
||||
self.ovn_hm.external_ids = {
|
||||
ovn_const.LB_EXT_IDS_HM_KEY: self.ovn_hm.uuid}
|
||||
self.member_line = (
|
||||
'member_%s_%s:%s_%s' %
|
||||
(self.member_id, self.member_address,
|
||||
@ -83,6 +110,13 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
|
||||
'enabled': True,
|
||||
'pool_%s' % self.pool_id: self.member_line,
|
||||
'listener_%s' % self.listener_id: '80:pool_%s' % self.pool_id}
|
||||
self.ovn_hm_lb.external_ids = {
|
||||
ovn_const.LB_EXT_IDS_VIP_KEY: '10.22.33.99',
|
||||
ovn_const.LB_EXT_IDS_VIP_FIP_KEY: '123.123.123.99',
|
||||
ovn_const.LB_EXT_IDS_VIP_PORT_ID_KEY: 'foo_hm_port',
|
||||
'enabled': True,
|
||||
'pool_%s' % self.pool_id: [],
|
||||
'listener_%s' % self.listener_id: '80:pool_%s' % self.pool_id}
|
||||
self.helper.ovn_nbdb_api.db_find.return_value.\
|
||||
execute.return_value = [self.ovn_lb]
|
||||
self.helper.ovn_nbdb_api.db_list_rows.return_value.\
|
||||
@ -1214,15 +1248,6 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
|
||||
self.helper.member_create(self.member)
|
||||
self.helper.ovn_nbdb_api.db_set.assert_not_called()
|
||||
|
||||
def test_member_create_already_exists_backward_compat(self):
|
||||
old_member_line = ('member_%s_%s:%s' %
|
||||
(self.member_id, self.member_address,
|
||||
self.member_port))
|
||||
self.ovn_lb.external_ids.update(
|
||||
{'pool_%s' % self.pool_id: old_member_line})
|
||||
self.helper.member_create(self.member)
|
||||
self.helper.ovn_nbdb_api.db_set.assert_not_called()
|
||||
|
||||
def test_member_create_first_member_in_pool(self):
|
||||
self.ovn_lb.external_ids.update({
|
||||
'pool_' + self.pool_id: ''})
|
||||
@ -1382,10 +1407,12 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
|
||||
member2_id = uuidutils.generate_uuid()
|
||||
member2_port = '1010'
|
||||
member2_address = '192.168.2.150'
|
||||
member2_subnet_id = uuidutils.generate_uuid()
|
||||
member_line = (
|
||||
'member_%s_%s:%s,member_%s_%s:%s' %
|
||||
'member_%s_%s:%s_%s,member_%s_%s:%s_%s' %
|
||||
(self.member_id, self.member_address, self.member_port,
|
||||
member2_id, member2_address, member2_port))
|
||||
self.member_subnet_id,
|
||||
member2_id, member2_address, member2_port, member2_subnet_id))
|
||||
self.ovn_lb.external_ids.update({
|
||||
'pool_' + self.pool_id: member_line})
|
||||
status = self.helper.member_delete(self.member)
|
||||
@ -1394,20 +1421,6 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
|
||||
self.assertEqual(status['pools'][0]['provisioning_status'],
|
||||
constants.ACTIVE)
|
||||
|
||||
def test_member_delete_backward_compat(self):
|
||||
old_member_line = ('member_%s_%s:%s' %
|
||||
(self.member_id, self.member_address,
|
||||
self.member_port))
|
||||
self.ovn_lb.external_ids.update(
|
||||
{'pool_%s' % self.pool_id: old_member_line})
|
||||
self.helper.member_delete(self.member)
|
||||
expected_calls = [
|
||||
mock.call('Load_Balancer', self.ovn_lb.uuid,
|
||||
('external_ids', {'pool_%s' % self.pool_id: ''})),
|
||||
mock.call('Load_Balancer', self.ovn_lb.uuid,
|
||||
('vips', {}))]
|
||||
self.helper.ovn_nbdb_api.db_set.has_calls(expected_calls)
|
||||
|
||||
@mock.patch.object(ovn_helper.OvnProviderHelper, '_remove_member')
|
||||
def test_member_delete_exception(self, mock_remove_member):
|
||||
mock_remove_member.side_effect = [RuntimeError]
|
||||
@ -1424,9 +1437,12 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
|
||||
mock.call('pool_%s%s' % (self.pool_id, ':D'))])
|
||||
|
||||
def test_member_delete_pool_listeners(self):
|
||||
member_line = (
|
||||
'member_%s_%s:%s_%s' %
|
||||
(self.member_id, self.member_address, self.member_port,
|
||||
self.member_subnet_id))
|
||||
self.ovn_lb.external_ids.update({
|
||||
'pool_' + self.pool_id: 'member_' + self.member_id + '_' +
|
||||
self.member_address + ':' + self.member_port})
|
||||
'pool_' + self.pool_id: member_line})
|
||||
self.helper._get_pool_listeners.return_value = ['listener1']
|
||||
status = self.helper.member_delete(self.member)
|
||||
self.assertEqual(status['listeners'][0]['provisioning_status'],
|
||||
@ -2104,9 +2120,10 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
|
||||
'action': 'associate',
|
||||
'vip_fip': '10.0.0.123',
|
||||
'ovn_lb': lb}
|
||||
members = 'member_%s_%s:%s' % (self.member_id,
|
||||
self.member_address,
|
||||
self.member_port)
|
||||
members = 'member_%s_%s:%s_%s' % (self.member_id,
|
||||
self.member_address,
|
||||
self.member_port,
|
||||
self.member_subnet_id)
|
||||
external_ids = {
|
||||
'listener_foo': '80:pool_%s' % self.pool_id,
|
||||
'pool_%s' % self.pool_id: members,
|
||||
@ -2433,3 +2450,693 @@ class TestOvnProviderHelper(ovn_base.TestOvnOctaviaBase):
|
||||
fol.return_value = None
|
||||
ret = self.helper.check_lb_protocol(self.listener_id, 'TCP')
|
||||
self.assertFalse(ret)
|
||||
|
||||
@mock.patch('ovn_octavia_provider.common.clients.get_neutron_client')
|
||||
@mock.patch.object(ovn_helper.OvnProviderHelper, '_update_hm_members')
|
||||
@mock.patch.object(ovn_helper.OvnProviderHelper, '_find_ovn_lb_by_pool_id')
|
||||
def _test_hm_create(self, protocol, members, folbpi, uhm, net_cli):
|
||||
fake_subnet = fakes.FakeSubnet.create_one_subnet()
|
||||
pool_key = 'pool_%s' % self.pool_id
|
||||
self.ovn_hm_lb.protocol = [protocol]
|
||||
folbpi.return_value = (pool_key, self.ovn_hm_lb)
|
||||
uhm.return_value = True
|
||||
net_cli.return_value.show_subnet.return_value = {'subnet': fake_subnet}
|
||||
status = self.helper.hm_create(self.health_monitor)
|
||||
self.assertEqual(status['healthmonitors'][0]['provisioning_status'],
|
||||
constants.ACTIVE)
|
||||
self.assertEqual(status['healthmonitors'][0]['operating_status'],
|
||||
constants.ONLINE)
|
||||
self.assertEqual(status['pools'][0]['provisioning_status'],
|
||||
constants.ACTIVE)
|
||||
self.assertEqual(status['pools'][0]['operating_status'],
|
||||
constants.ONLINE)
|
||||
if members:
|
||||
self.assertEqual(status['members'][0]['provisioning_status'],
|
||||
constants.ACTIVE)
|
||||
self.assertEqual(status['members'][0]['operating_status'],
|
||||
constants.ONLINE)
|
||||
vip = (self.ovn_hm_lb.external_ids[ovn_const.LB_EXT_IDS_VIP_KEY] +
|
||||
':' + str(self.listener['protocol_port']))
|
||||
options = {'interval': '6',
|
||||
'timeout': '7',
|
||||
'failure_count': '5',
|
||||
'success_count': '3'}
|
||||
external_ids = {ovn_const.LB_EXT_IDS_HM_KEY: self.healthmonitor_id}
|
||||
kwargs = {'vip': vip,
|
||||
'options': options,
|
||||
'external_ids': external_ids}
|
||||
self.helper.ovn_nbdb_api.db_create.assert_called_once_with(
|
||||
'Load_Balancer_Health_Check', **kwargs)
|
||||
self.helper.ovn_nbdb_api.db_add.assert_called_once_with(
|
||||
'Load_Balancer', self.ovn_hm_lb.uuid, 'health_check', mock.ANY)
|
||||
|
||||
def test_hm_create_tcp(self):
|
||||
self._test_hm_create('tcp', False)
|
||||
|
||||
def test_hm_create_udp(self):
|
||||
self._test_hm_create('udp', False)
|
||||
|
||||
def test_hm_create_tcp_pool_members(self):
|
||||
pool_key = 'pool_%s' % self.pool_id
|
||||
self.ovn_hm_lb.external_ids[pool_key] = self.member_line
|
||||
self._test_hm_create('tcp', True)
|
||||
|
||||
@mock.patch.object(ovn_helper.OvnProviderHelper, '_find_ovn_lb_by_pool_id')
|
||||
def test_hm_create_no_vip_port(self, folbpi):
|
||||
pool_key = 'pool_%s' % self.pool_id
|
||||
listener_key = 'listener_%s' % self.listener_id
|
||||
self.ovn_hm_lb.external_ids.pop(listener_key)
|
||||
folbpi.return_value = (pool_key, self.ovn_hm_lb)
|
||||
status = self.helper.hm_create(self.health_monitor)
|
||||
self.assertEqual(status['healthmonitors'][0]['provisioning_status'],
|
||||
constants.ACTIVE)
|
||||
self.assertEqual(status['healthmonitors'][0]['operating_status'],
|
||||
constants.ONLINE)
|
||||
vip = []
|
||||
options = {'interval': '6',
|
||||
'timeout': '7',
|
||||
'failure_count': '5',
|
||||
'success_count': '3'}
|
||||
self.ovn_hm.external_ids.pop(ovn_const.LB_EXT_IDS_HM_KEY)
|
||||
external_ids = {ovn_const.LB_EXT_IDS_HM_KEY: self.healthmonitor_id}
|
||||
kwargs = {'vip': vip,
|
||||
'options': options,
|
||||
'external_ids': external_ids}
|
||||
self.helper.ovn_nbdb_api.db_create.assert_called_once_with(
|
||||
'Load_Balancer_Health_Check', **kwargs)
|
||||
self.helper.ovn_nbdb_api.db_add.assert_called_once_with(
|
||||
'Load_Balancer', self.ovn_hm_lb.uuid, 'health_check', mock.ANY)
|
||||
|
||||
@mock.patch.object(ovn_helper.OvnProviderHelper, '_find_ovn_lb_by_pool_id')
|
||||
def test_hm_create_offline(self, folbpi):
|
||||
pool_key = 'pool_%s' % self.pool_id
|
||||
folbpi.return_value = (pool_key, self.ovn_hm_lb)
|
||||
self.health_monitor['admin_state_up'] = False
|
||||
status = self.helper.hm_create(self.health_monitor)
|
||||
self.assertEqual(status['healthmonitors'][0]['provisioning_status'],
|
||||
constants.ACTIVE)
|
||||
self.assertEqual(status['healthmonitors'][0]['operating_status'],
|
||||
constants.OFFLINE)
|
||||
self.assertEqual(status['pools'][0]['provisioning_status'],
|
||||
constants.ACTIVE)
|
||||
self.assertEqual(status['pools'][0]['operating_status'],
|
||||
constants.ONLINE)
|
||||
|
||||
@mock.patch.object(ovn_helper.OvnProviderHelper, '_find_ovn_lb_by_pool_id')
|
||||
def test_hm_create_lb_not_found(self, folbpi):
|
||||
folbpi.return_value = (None, None)
|
||||
status = self.helper.hm_create(self.health_monitor)
|
||||
self.assertEqual(status['healthmonitors'][0]['provisioning_status'],
|
||||
constants.ERROR)
|
||||
self.assertEqual(status['healthmonitors'][0]['operating_status'],
|
||||
constants.NO_MONITOR)
|
||||
|
||||
@mock.patch.object(ovn_helper.OvnProviderHelper, '_find_ovn_lb_by_pool_id')
|
||||
def test_hm_create_pool_not_found(self, folbpi):
|
||||
folbpi.return_value = ('pool_closed', self.ovn_hm_lb)
|
||||
status = self.helper.hm_create(self.health_monitor)
|
||||
self.assertEqual(status['healthmonitors'][0]['provisioning_status'],
|
||||
constants.ERROR)
|
||||
self.assertEqual(status['healthmonitors'][0]['operating_status'],
|
||||
constants.NO_MONITOR)
|
||||
self.assertEqual(status['pools'][0]['operating_status'],
|
||||
constants.OFFLINE)
|
||||
|
||||
@mock.patch.object(ovn_helper.OvnProviderHelper, '_find_ovn_lb_by_pool_id')
|
||||
def test_hm_create_vip_not_found(self, folbpi):
|
||||
pool_key = 'pool_%s' % self.pool_id
|
||||
self.ovn_hm_lb.external_ids.pop(ovn_const.LB_EXT_IDS_VIP_KEY)
|
||||
folbpi.return_value = (pool_key, self.ovn_hm_lb)
|
||||
status = self.helper.hm_create(self.health_monitor)
|
||||
self.assertEqual(status['healthmonitors'][0]['provisioning_status'],
|
||||
constants.ERROR)
|
||||
self.assertEqual(status['healthmonitors'][0]['operating_status'],
|
||||
constants.ERROR)
|
||||
|
||||
@mock.patch('ovn_octavia_provider.common.clients.get_neutron_client')
|
||||
@mock.patch.object(ovn_helper.OvnProviderHelper, '_find_ovn_lb_by_pool_id')
|
||||
def test_hm_create_lsp_not_found(self, folbpi, net_cli):
|
||||
pool_key = 'pool_%s' % self.pool_id
|
||||
self.ovn_hm_lb.external_ids[pool_key] = self.member_line
|
||||
folbpi.return_value = (pool_key, self.ovn_hm_lb)
|
||||
net_cli.return_value.show_subnet.side_effect = [n_exc.NotFound]
|
||||
status = self.helper.hm_create(self.health_monitor)
|
||||
self.assertEqual(status['healthmonitors'][0]['provisioning_status'],
|
||||
constants.ERROR)
|
||||
self.assertEqual(status['healthmonitors'][0]['operating_status'],
|
||||
constants.ERROR)
|
||||
|
||||
@mock.patch('ovn_octavia_provider.common.clients.get_neutron_client')
|
||||
@mock.patch.object(ovn_helper.OvnProviderHelper, '_find_ovn_lb_by_pool_id')
|
||||
def test_hm_create_hm_port_not_found(self, folbpi, net_cli):
|
||||
fake_subnet = fakes.FakeSubnet.create_one_subnet()
|
||||
fake_port = fakes.FakePort.create_one_port(
|
||||
attrs={'allowed_address_pairs': ''})
|
||||
member = {'id': uuidutils.generate_uuid(),
|
||||
'address': fake_port['fixed_ips'][0]['ip_address'],
|
||||
'protocol_port': '9999',
|
||||
'subnet_id': fake_subnet['id'],
|
||||
'pool_id': self.pool_id,
|
||||
'admin_state_up': True,
|
||||
'old_admin_state_up': True}
|
||||
member_line = (
|
||||
'member_%s_%s:%s_%s' %
|
||||
(member['id'], member['address'],
|
||||
member['protocol_port'], member['subnet_id']))
|
||||
pool_key = 'pool_%s' % self.pool_id
|
||||
self.ovn_hm_lb.external_ids[pool_key] = member_line
|
||||
folbpi.return_value = (pool_key, self.ovn_hm_lb)
|
||||
net_cli.return_value.show_subnet.return_value = {'subnet': fake_subnet}
|
||||
net_cli.return_value.list_ports.return_value = {'ports': []}
|
||||
fake_lsp = fakes.FakeOVNPort.from_neutron_port(fake_port)
|
||||
fake_ls = fakes.FakeOvsdbRow.create_one_ovsdb_row(
|
||||
attrs={
|
||||
'external_ids': {},
|
||||
'ports': [fake_lsp]})
|
||||
self.helper.ovn_nbdb_api.lookup.return_value = fake_ls
|
||||
status = self.helper.hm_create(self.health_monitor)
|
||||
self.assertEqual(status['healthmonitors'][0]['provisioning_status'],
|
||||
constants.ERROR)
|
||||
self.assertEqual(status['healthmonitors'][0]['operating_status'],
|
||||
constants.ERROR)
|
||||
|
||||
@mock.patch('ovn_octavia_provider.common.clients.get_neutron_client')
|
||||
@mock.patch.object(ovn_helper.OvnProviderHelper, '_find_ovn_lb_by_pool_id')
|
||||
def test_hm_create_hm_source_ip_not_found(self, folbpi, net_cli):
|
||||
fake_subnet = fakes.FakeSubnet.create_one_subnet()
|
||||
fake_port = fakes.FakePort.create_one_port(
|
||||
attrs={'allowed_address_pairs': ''})
|
||||
member = {'id': uuidutils.generate_uuid(),
|
||||
'address': fake_port['fixed_ips'][0]['ip_address'],
|
||||
'protocol_port': '9999',
|
||||
'subnet_id': fake_subnet['id'],
|
||||
'pool_id': self.pool_id,
|
||||
'admin_state_up': True,
|
||||
'old_admin_state_up': True}
|
||||
member_line = (
|
||||
'member_%s_%s:%s_%s' %
|
||||
(member['id'], member['address'],
|
||||
member['protocol_port'], member['subnet_id']))
|
||||
pool_key = 'pool_%s' % self.pool_id
|
||||
self.ovn_hm_lb.external_ids[pool_key] = member_line
|
||||
folbpi.return_value = (pool_key, self.ovn_hm_lb)
|
||||
net_cli.return_value.show_subnet.return_value = {'subnet': fake_subnet}
|
||||
fake_lsp = fakes.FakeOVNPort.from_neutron_port(fake_port)
|
||||
fake_ls = fakes.FakeOvsdbRow.create_one_ovsdb_row(
|
||||
attrs={
|
||||
'external_ids': {},
|
||||
'ports': [fake_lsp]})
|
||||
self.helper.ovn_nbdb_api.lookup.return_value = fake_ls
|
||||
status = self.helper.hm_create(self.health_monitor)
|
||||
self.assertEqual(status['healthmonitors'][0]['provisioning_status'],
|
||||
constants.ERROR)
|
||||
self.assertEqual(status['healthmonitors'][0]['operating_status'],
|
||||
constants.ERROR)
|
||||
|
||||
@mock.patch.object(ovn_helper.OvnProviderHelper, '_find_ovn_lb_by_pool_id')
|
||||
def test_hm_create_db_exception(self, folbpi):
|
||||
pool_key = 'pool_%s' % self.pool_id
|
||||
folbpi.return_value = (pool_key, self.ovn_hm_lb)
|
||||
self.helper.ovn_nbdb_api.db_create.side_effect = [RuntimeError]
|
||||
status = self.helper.hm_create(self.health_monitor)
|
||||
self.assertEqual(status['healthmonitors'][0]['provisioning_status'],
|
||||
constants.ERROR)
|
||||
self.assertEqual(status['healthmonitors'][0]['operating_status'],
|
||||
constants.ERROR)
|
||||
|
||||
@mock.patch.object(ovn_helper.OvnProviderHelper, '_lookup_hm_by_id')
|
||||
@mock.patch.object(ovn_helper.OvnProviderHelper, '_get_or_create_ovn_lb')
|
||||
def test_hm_create_then_listener_create(self, get_ovn_lb, lookup_hm):
|
||||
get_ovn_lb.return_value = self.ovn_hm_lb
|
||||
lookup_hm.return_value = self.ovn_hm
|
||||
self.ovn_hm_lb.health_check = self.ovn_hm
|
||||
self.listener['admin_state_up'] = True
|
||||
status = self.helper.listener_create(self.listener)
|
||||
vip = (self.ovn_hm_lb.external_ids[ovn_const.LB_EXT_IDS_VIP_KEY] +
|
||||
':' + str(self.listener['protocol_port']))
|
||||
self.helper.ovn_nbdb_api.db_set.assert_called_with(
|
||||
'Load_Balancer_Health_Check', self.ovn_hm.uuid, ('vip', vip))
|
||||
self.assertEqual(status['listeners'][0]['provisioning_status'],
|
||||
constants.ACTIVE)
|
||||
self.assertEqual(status['listeners'][0]['operating_status'],
|
||||
constants.ONLINE)
|
||||
|
||||
@mock.patch.object(ovn_helper.OvnProviderHelper, '_lookup_hm_by_id')
|
||||
@mock.patch.object(ovn_helper.OvnProviderHelper, '_get_or_create_ovn_lb')
|
||||
def test_hm_create_then_listener_create_no_hm(self, get_ovn_lb, lookup_hm):
|
||||
get_ovn_lb.return_value = self.ovn_hm_lb
|
||||
lookup_hm.return_value = None
|
||||
self.ovn_hm_lb.health_check = self.ovn_hm
|
||||
self.listener['admin_state_up'] = True
|
||||
status = self.helper.listener_create(self.listener)
|
||||
self.assertEqual(status['listeners'][0]['provisioning_status'],
|
||||
constants.ACTIVE)
|
||||
self.assertEqual(status['listeners'][0]['operating_status'],
|
||||
constants.ERROR)
|
||||
|
||||
@mock.patch.object(ovn_helper.OvnProviderHelper, '_refresh_lb_vips')
|
||||
@mock.patch.object(ovn_helper.OvnProviderHelper, '_lookup_hm_by_id')
|
||||
@mock.patch.object(ovn_helper.OvnProviderHelper, '_get_or_create_ovn_lb')
|
||||
def test_hm_create_then_listener_create_no_vip(self, get_ovn_lb,
|
||||
lookup_hm, refresh_vips):
|
||||
get_ovn_lb.return_value = self.ovn_hm_lb
|
||||
lookup_hm.return_value = self.ovn_hm
|
||||
self.ovn_hm_lb.health_check = self.ovn_hm
|
||||
self.ovn_hm_lb.external_ids.pop(ovn_const.LB_EXT_IDS_VIP_KEY)
|
||||
self.listener['admin_state_up'] = True
|
||||
status = self.helper.listener_create(self.listener)
|
||||
self.assertEqual(status['listeners'][0]['provisioning_status'],
|
||||
constants.ACTIVE)
|
||||
self.assertEqual(status['listeners'][0]['operating_status'],
|
||||
constants.ERROR)
|
||||
|
||||
@mock.patch.object(ovn_helper.OvnProviderHelper, '_find_ovn_lb_from_hm_id')
|
||||
def test_hm_update(self, folbfhi):
|
||||
folbfhi.return_value = (self.ovn_hm, self.ovn_hm_lb)
|
||||
status = self.helper.hm_update(self.health_monitor)
|
||||
self.assertEqual(status['healthmonitors'][0]['provisioning_status'],
|
||||
constants.ACTIVE)
|
||||
self.assertEqual(status['healthmonitors'][0]['operating_status'],
|
||||
constants.ONLINE)
|
||||
|
||||
@mock.patch.object(ovn_helper.OvnProviderHelper, '_find_ovn_lb_from_hm_id')
|
||||
def test_hm_update_no_admin_state_up(self, folbfhi):
|
||||
folbfhi.return_value = (self.ovn_hm, self.ovn_hm_lb)
|
||||
self.ovn_hm_lb.pop('admin_state_up')
|
||||
status = self.helper.hm_update(self.health_monitor)
|
||||
self.assertEqual(status['healthmonitors'][0]['provisioning_status'],
|
||||
constants.ACTIVE)
|
||||
self.assertEqual(status['healthmonitors'][0]['operating_status'],
|
||||
constants.ONLINE)
|
||||
|
||||
@mock.patch.object(ovn_helper.OvnProviderHelper, '_find_ovn_lb_from_hm_id')
|
||||
def test_hm_update_offline(self, folbfhi):
|
||||
folbfhi.return_value = (self.ovn_hm, self.ovn_hm_lb)
|
||||
self.health_monitor['admin_state_up'] = False
|
||||
status = self.helper.hm_update(self.health_monitor)
|
||||
self.assertEqual(status['healthmonitors'][0]['provisioning_status'],
|
||||
constants.ACTIVE)
|
||||
self.assertEqual(status['healthmonitors'][0]['operating_status'],
|
||||
constants.OFFLINE)
|
||||
|
||||
@mock.patch.object(ovn_helper.OvnProviderHelper, '_find_ovn_lb_from_hm_id')
|
||||
def test_hm_update_hm_not_found(self, folbfhi):
|
||||
folbfhi.return_value = (None, None)
|
||||
status = self.helper.hm_update(self.health_monitor)
|
||||
self.assertEqual(status['healthmonitors'][0]['provisioning_status'],
|
||||
constants.ERROR)
|
||||
self.assertEqual(status['healthmonitors'][0]['operating_status'],
|
||||
constants.ERROR)
|
||||
|
||||
@mock.patch.object(ovn_helper.OvnProviderHelper, '_find_ovn_lb_from_hm_id')
|
||||
@mock.patch.object(ovn_helper.OvnProviderHelper, '_find_ovn_lb_by_pool_id')
|
||||
def test_hm_update_lb_not_found(self, folbpi, folbfhi):
|
||||
folbfhi.return_value = (self.ovn_hm, None)
|
||||
folbpi.return_value = (None, None)
|
||||
status = self.helper.hm_update(self.health_monitor)
|
||||
self.assertEqual(status['healthmonitors'][0]['provisioning_status'],
|
||||
constants.ERROR)
|
||||
self.assertEqual(status['healthmonitors'][0]['operating_status'],
|
||||
constants.ERROR)
|
||||
|
||||
def test_hm_delete(self):
|
||||
self.helper.ovn_nbdb_api.db_list_rows.return_value.\
|
||||
execute.return_value = [self.ovn_hm]
|
||||
self.helper.ovn_nbdb_api.db_find_rows.return_value.\
|
||||
execute.return_value = [self.ovn_hm_lb]
|
||||
status = self.helper.hm_delete(self.health_monitor)
|
||||
self.assertEqual(status['healthmonitors'][0]['provisioning_status'],
|
||||
constants.DELETED)
|
||||
self.assertEqual(status['healthmonitors'][0]['operating_status'],
|
||||
constants.NO_MONITOR)
|
||||
self.assertEqual(status['loadbalancers'][0]['provisioning_status'],
|
||||
constants.ACTIVE)
|
||||
self.assertEqual(status['pools'][0]['provisioning_status'],
|
||||
constants.ACTIVE)
|
||||
expected_clear_calls = [
|
||||
mock.call('Load_Balancer', self.ovn_hm_lb.uuid,
|
||||
'ip_port_mappings')]
|
||||
expected_remove_calls = [
|
||||
mock.call('Load_Balancer', self.ovn_hm_lb.uuid, 'health_check',
|
||||
self.ovn_hm.uuid)]
|
||||
expected_destroy_calls = [
|
||||
mock.call('Load_Balancer_Health_Check', self.ovn_hm.uuid)]
|
||||
self.helper.ovn_nbdb_api.db_clear.assert_has_calls(
|
||||
expected_clear_calls)
|
||||
self.helper.ovn_nbdb_api.db_remove.assert_has_calls(
|
||||
expected_remove_calls)
|
||||
self.helper.ovn_nbdb_api.db_destroy.assert_has_calls(
|
||||
expected_destroy_calls)
|
||||
|
||||
def test_hm_delete_row_not_found(self):
|
||||
self.helper.ovn_nbdb_api.db_list_rows.return_value.\
|
||||
execute.return_value = [self.ovn_hm]
|
||||
self.helper.ovn_nbdb_api.db_find_rows.side_effect = (
|
||||
[idlutils.RowNotFound])
|
||||
status = self.helper.hm_delete(self.health_monitor)
|
||||
self.assertEqual(status['healthmonitors'][0]['provisioning_status'],
|
||||
constants.DELETED)
|
||||
self.assertEqual(status['healthmonitors'][0]['operating_status'],
|
||||
constants.NO_MONITOR)
|
||||
self.helper.ovn_nbdb_api.db_clear.assert_not_called()
|
||||
|
||||
def test_hm_delete_hm_not_found(self):
|
||||
self.helper.ovn_nbdb_api.db_list_rows.return_value.\
|
||||
execute.return_value = [self.ovn_hm]
|
||||
self.helper.ovn_nbdb_api.db_find_rows.return_value.\
|
||||
execute.return_value = [self.ovn_hm_lb]
|
||||
self.health_monitor['id'] = 'id_not_found'
|
||||
status = self.helper.hm_delete(self.health_monitor)
|
||||
self.assertEqual(status['healthmonitors'][0]['provisioning_status'],
|
||||
constants.DELETED)
|
||||
self.assertEqual(status['healthmonitors'][0]['operating_status'],
|
||||
constants.NO_MONITOR)
|
||||
self.helper.ovn_nbdb_api.db_clear.assert_not_called()
|
||||
|
||||
def test_hm_update_event_offline(self):
|
||||
self.helper.ovn_nbdb_api.db_find_rows.return_value.\
|
||||
execute.return_value = [self.ovn_hm_lb]
|
||||
self.hm_update_event = ovn_event.ServiceMonitorUpdateEvent(
|
||||
self.helper)
|
||||
src_ip = '10.22.33.4'
|
||||
row = fakes.FakeOvsdbRow.create_one_ovsdb_row(
|
||||
attrs={'ip': self.member_address,
|
||||
'logical_port': 'a-logical-port',
|
||||
'src_ip': src_ip,
|
||||
'port': self.member_port,
|
||||
'protocol': self.ovn_hm_lb.protocol,
|
||||
'status': ['offline']})
|
||||
self.hm_update_event.run('update', row, mock.ANY)
|
||||
expected = {
|
||||
'info':
|
||||
{'ovn_lb': self.ovn_hm_lb,
|
||||
'ip': self.member_address,
|
||||
'port': self.member_port,
|
||||
'status': ['offline']},
|
||||
'type': 'hm_update_event'}
|
||||
self.mock_add_request.assert_called_once_with(expected)
|
||||
self.helper.ovn_nbdb_api.db_find_rows.assert_called_once_with(
|
||||
'Load_Balancer',
|
||||
(('ip_port_mappings', '=',
|
||||
{self.member_address: 'a-logical-port:' + src_ip}),
|
||||
('protocol', '=', self.ovn_hm_lb.protocol)))
|
||||
|
||||
def test_hm_update_event_lb_not_found(self):
|
||||
self.helper.ovn_nbdb_api.db_find_rows.return_value.\
|
||||
execute.return_value = []
|
||||
self.hm_update_event = ovn_event.ServiceMonitorUpdateEvent(
|
||||
self.helper)
|
||||
row = fakes.FakeOvsdbRow.create_one_ovsdb_row(
|
||||
attrs={'ip': self.member_address,
|
||||
'logical_port': 'a-logical-port',
|
||||
'src_ip': '10.22.33.4',
|
||||
'port': self.member_port,
|
||||
'protocol': self.ovn_hm_lb.protocol,
|
||||
'status': ['offline']})
|
||||
self.hm_update_event.run('update', row, mock.ANY)
|
||||
self.mock_add_request.assert_not_called()
|
||||
|
||||
def test_hm_update_event_lb_row_not_found(self):
|
||||
self.helper.ovn_nbdb_api.db_find_rows.\
|
||||
side_effect = [idlutils.RowNotFound]
|
||||
self.hm_update_event = ovn_event.ServiceMonitorUpdateEvent(
|
||||
self.helper)
|
||||
row = fakes.FakeOvsdbRow.create_one_ovsdb_row(
|
||||
attrs={'ip': self.member_address,
|
||||
'logical_port': 'a-logical-port',
|
||||
'src_ip': '10.22.33.4',
|
||||
'port': self.member_port,
|
||||
'protocol': self.ovn_hm_lb.protocol,
|
||||
'status': ['offline']})
|
||||
self.hm_update_event.run('update', row, mock.ANY)
|
||||
self.mock_add_request.assert_not_called()
|
||||
|
||||
def test_hm_update_event_lb_protocol_not_found(self):
|
||||
self.helper.ovn_nbdb_api.db_find_rows.\
|
||||
side_effect = [self.ovn_hm_lb, idlutils.RowNotFound]
|
||||
self.hm_update_event = ovn_event.ServiceMonitorUpdateEvent(
|
||||
self.helper)
|
||||
row = fakes.FakeOvsdbRow.create_one_ovsdb_row(
|
||||
attrs={'ip': self.member_address,
|
||||
'logical_port': 'a-logical-port',
|
||||
'src_ip': '10.22.33.4',
|
||||
'port': self.member_port,
|
||||
'protocol': 'unknown',
|
||||
'status': ['offline']})
|
||||
self.hm_update_event.run('update', row, mock.ANY)
|
||||
self.mock_add_request.assert_not_called()
|
||||
|
||||
def _test_hm_update_no_member(self, bad_ip, bad_port):
|
||||
fake_subnet = fakes.FakeSubnet.create_one_subnet()
|
||||
fake_port = fakes.FakePort.create_one_port(
|
||||
attrs={'allowed_address_pairs': ''})
|
||||
ip = fake_port['fixed_ips'][0]['ip_address']
|
||||
member = {'id': uuidutils.generate_uuid(),
|
||||
'address': ip,
|
||||
'protocol_port': self.member_port,
|
||||
'subnet_id': fake_subnet['id'],
|
||||
'pool_id': self.pool_id,
|
||||
'admin_state_up': True,
|
||||
'old_admin_state_up': True}
|
||||
member_line = (
|
||||
'member_%s_%s:%s_%s' %
|
||||
(member['id'], member['address'],
|
||||
member['protocol_port'], member['subnet_id']))
|
||||
pool_key = 'pool_%s' % self.pool_id
|
||||
self.ovn_hm_lb.external_ids[pool_key] = member_line
|
||||
|
||||
if bad_ip:
|
||||
ip = 'bad-ip'
|
||||
port = self.member_port
|
||||
if bad_port:
|
||||
port = 'bad-port'
|
||||
info = {
|
||||
'ovn_lb': self.ovn_hm_lb,
|
||||
'ip': ip,
|
||||
'logical_port': 'a-logical-port',
|
||||
'src_ip': '10.22.33.4',
|
||||
'port': port,
|
||||
'protocol': self.ovn_hm_lb.protocol,
|
||||
'status': ['offline']}
|
||||
|
||||
status = self.helper.hm_update_event(info)
|
||||
self.assertIsNone(status)
|
||||
|
||||
def test_hm_update_event_member_ip_not_found(self):
|
||||
self._test_hm_update_no_member(True, False)
|
||||
|
||||
def test_hm_update_event_member_port_not_found(self):
|
||||
self._test_hm_update_no_member(False, True)
|
||||
|
||||
def _test_hm_update_status(self, ip, port, member_status,
|
||||
lb_status=constants.ONLINE,
|
||||
pool_status=constants.ONLINE):
|
||||
fake_lb = fakes.FakeLB(
|
||||
uuid=uuidutils.generate_uuid(),
|
||||
admin_state_up=True,
|
||||
name='fake_lb',
|
||||
ext_ids={})
|
||||
fake_pool = fakes.FakePool(
|
||||
uuid=uuidutils.generate_uuid(),
|
||||
admin_state_up=True,
|
||||
name='fake_pool')
|
||||
info = {
|
||||
'ovn_lb': self.ovn_hm_lb,
|
||||
'ip': ip,
|
||||
'logical_port': 'a-logical-port',
|
||||
'src_ip': '10.22.33.4',
|
||||
'port': port,
|
||||
'protocol': self.ovn_hm_lb.protocol,
|
||||
'status': [member_status]}
|
||||
|
||||
fake_lb.operating_status = lb_status
|
||||
fake_pool.operating_status = pool_status
|
||||
self.octavia_driver_lib.get_pool.return_value = fake_pool
|
||||
self.octavia_driver_lib.get_loadbalancer.return_value = fake_lb
|
||||
status = self.helper.hm_update_event(info)
|
||||
return status
|
||||
|
||||
def _add_member(self, subnet, port):
|
||||
fake_port = fakes.FakePort.create_one_port(
|
||||
attrs={'allowed_address_pairs': ''})
|
||||
ip = fake_port['fixed_ips'][0]['ip_address']
|
||||
member = {'id': uuidutils.generate_uuid(),
|
||||
'address': ip,
|
||||
'protocol_port': port,
|
||||
'subnet_id': subnet['id'],
|
||||
'pool_id': self.pool_id,
|
||||
'admin_state_up': True,
|
||||
'old_admin_state_up': True}
|
||||
member_line = (
|
||||
'member_%s_%s:%s_%s' %
|
||||
(member['id'], member['address'],
|
||||
member['protocol_port'], member['subnet_id']))
|
||||
pool_key = 'pool_%s' % self.pool_id
|
||||
|
||||
existing_members = self.ovn_hm_lb.external_ids[pool_key]
|
||||
if existing_members:
|
||||
existing_members = ','.join([existing_members, member_line])
|
||||
self.ovn_hm_lb.external_ids[pool_key] = existing_members
|
||||
else:
|
||||
self.ovn_hm_lb.external_ids[pool_key] = member_line
|
||||
return member
|
||||
|
||||
def test_hm_update_status_offline(self):
|
||||
fake_subnet = fakes.FakeSubnet.create_one_subnet()
|
||||
member = self._add_member(fake_subnet, 8080)
|
||||
status = self._test_hm_update_status(member['address'], '8080',
|
||||
'offline')
|
||||
|
||||
self.assertEqual(status['pools'][0]['provisioning_status'],
|
||||
constants.ACTIVE)
|
||||
self.assertEqual(status['pools'][0]['operating_status'],
|
||||
constants.ERROR)
|
||||
self.assertEqual(status['members'][0]['provisioning_status'],
|
||||
constants.ACTIVE)
|
||||
self.assertEqual(status['members'][0]['operating_status'],
|
||||
constants.ERROR)
|
||||
self.assertEqual(status['loadbalancers'][0]['provisioning_status'],
|
||||
constants.ACTIVE)
|
||||
self.assertEqual(status['loadbalancers'][0]['operating_status'],
|
||||
constants.ERROR)
|
||||
|
||||
def test_hm_update_status_offline_lb_pool_offline(self):
|
||||
fake_subnet = fakes.FakeSubnet.create_one_subnet()
|
||||
member = self._add_member(fake_subnet, 8080)
|
||||
status = self._test_hm_update_status(member['address'], '8080',
|
||||
'offline',
|
||||
lb_status=constants.OFFLINE,
|
||||
pool_status=constants.OFFLINE)
|
||||
|
||||
self.assertEqual(status['pools'][0]['provisioning_status'],
|
||||
constants.ACTIVE)
|
||||
self.assertEqual(status['pools'][0]['operating_status'],
|
||||
constants.OFFLINE)
|
||||
self.assertEqual(status['members'][0]['provisioning_status'],
|
||||
constants.ACTIVE)
|
||||
self.assertEqual(status['members'][0]['operating_status'],
|
||||
constants.ERROR)
|
||||
self.assertEqual(status['loadbalancers'][0]['provisioning_status'],
|
||||
constants.ACTIVE)
|
||||
self.assertEqual(status['loadbalancers'][0]['operating_status'],
|
||||
constants.OFFLINE)
|
||||
|
||||
def test_hm_update_status_online(self):
|
||||
fake_subnet = fakes.FakeSubnet.create_one_subnet()
|
||||
member = self._add_member(fake_subnet, 8080)
|
||||
status = self._test_hm_update_status(member['address'], '8080',
|
||||
'online')
|
||||
|
||||
self.assertEqual(status['pools'][0]['provisioning_status'],
|
||||
constants.ACTIVE)
|
||||
self.assertEqual(status['pools'][0]['operating_status'],
|
||||
constants.ONLINE)
|
||||
self.assertEqual(status['members'][0]['provisioning_status'],
|
||||
constants.ACTIVE)
|
||||
self.assertEqual(status['members'][0]['operating_status'],
|
||||
constants.ONLINE)
|
||||
self.assertEqual(status['loadbalancers'][0]['provisioning_status'],
|
||||
constants.ACTIVE)
|
||||
self.assertEqual(status['loadbalancers'][0]['operating_status'],
|
||||
constants.ONLINE)
|
||||
|
||||
def test_hm_update_status_online_lb_pool_offline(self):
|
||||
fake_subnet = fakes.FakeSubnet.create_one_subnet()
|
||||
member = self._add_member(fake_subnet, 8080)
|
||||
status = self._test_hm_update_status(member['address'], '8080',
|
||||
'online',
|
||||
lb_status=constants.OFFLINE,
|
||||
pool_status=constants.OFFLINE)
|
||||
|
||||
self.assertEqual(status['pools'][0]['provisioning_status'],
|
||||
constants.ACTIVE)
|
||||
self.assertEqual(status['pools'][0]['operating_status'],
|
||||
constants.ONLINE)
|
||||
self.assertEqual(status['members'][0]['provisioning_status'],
|
||||
constants.ACTIVE)
|
||||
self.assertEqual(status['members'][0]['operating_status'],
|
||||
constants.ONLINE)
|
||||
self.assertEqual(status['loadbalancers'][0]['provisioning_status'],
|
||||
constants.ACTIVE)
|
||||
self.assertEqual(status['loadbalancers'][0]['operating_status'],
|
||||
constants.ONLINE)
|
||||
|
||||
def test_hm_update_status_offline_two_members(self):
|
||||
fake_subnet = fakes.FakeSubnet.create_one_subnet()
|
||||
member_1 = self._add_member(fake_subnet, 8080)
|
||||
ip_1 = member_1['address']
|
||||
member_2 = self._add_member(fake_subnet, 8081)
|
||||
ip_2 = member_2['address']
|
||||
# This is the Octavia API version
|
||||
fake_member = fakes.FakeMember(
|
||||
uuid=member_2['id'],
|
||||
admin_state_up=True,
|
||||
name='member_2',
|
||||
project_id=self.project_id,
|
||||
address=ip_2,
|
||||
protocol_port=8081)
|
||||
|
||||
# Second member ONLINE, operating_status should be DEGRADED
|
||||
# for Pool and Loadbalancer
|
||||
fake_member.operating_status = constants.ONLINE
|
||||
self.octavia_driver_lib.get_member.return_value = fake_member
|
||||
|
||||
status = self._test_hm_update_status(ip_1, '8081', 'offline')
|
||||
self.assertEqual(status['members'][0]['operating_status'],
|
||||
constants.ERROR)
|
||||
self.assertEqual(status['pools'][0]['operating_status'],
|
||||
constants.DEGRADED)
|
||||
self.assertEqual(status['loadbalancers'][0]['operating_status'],
|
||||
constants.DEGRADED)
|
||||
|
||||
# Second member ERROR, operating_status should be ERROR
|
||||
# for Pool and Loadbalancer
|
||||
fake_member.operating_status = constants.ERROR
|
||||
self.octavia_driver_lib.get_member.return_value = fake_member
|
||||
status = self._test_hm_update_status(ip_1, '8081', 'offline')
|
||||
self.assertEqual(status['members'][0]['operating_status'],
|
||||
constants.ERROR)
|
||||
self.assertEqual(status['pools'][0]['operating_status'],
|
||||
constants.ERROR)
|
||||
self.assertEqual(status['loadbalancers'][0]['operating_status'],
|
||||
constants.ERROR)
|
||||
|
||||
def test_hm_update_status_online_two_members(self):
|
||||
fake_subnet = fakes.FakeSubnet.create_one_subnet()
|
||||
member_1 = self._add_member(fake_subnet, 8080)
|
||||
ip_1 = member_1['address']
|
||||
member_2 = self._add_member(fake_subnet, 8081)
|
||||
ip_2 = member_2['address']
|
||||
# This is the Octavia API version
|
||||
fake_member = fakes.FakeMember(
|
||||
uuid=member_2['id'],
|
||||
admin_state_up=True,
|
||||
name='member_2',
|
||||
project_id=self.project_id,
|
||||
address=ip_2,
|
||||
protocol_port=8081)
|
||||
|
||||
# Second member ERROR, operating_status should be DEGRADED
|
||||
# for Pool and Loadbalancer
|
||||
fake_member.operating_status = constants.ERROR
|
||||
self.octavia_driver_lib.get_member.return_value = fake_member
|
||||
|
||||
status = self._test_hm_update_status(ip_1, '8081', 'online')
|
||||
self.assertEqual(status['members'][0]['operating_status'],
|
||||
constants.ONLINE)
|
||||
self.assertEqual(status['pools'][0]['operating_status'],
|
||||
constants.DEGRADED)
|
||||
self.assertEqual(status['loadbalancers'][0]['operating_status'],
|
||||
constants.DEGRADED)
|
||||
|
||||
# Second member ONLINE, operating_status should be ONLINE
|
||||
# for Pool and Loadbalancer
|
||||
fake_member.operating_status = constants.ONLINE
|
||||
self.octavia_driver_lib.get_member.return_value = fake_member
|
||||
status = self._test_hm_update_status(ip_1, '8081', 'online')
|
||||
self.assertEqual(status['members'][0]['operating_status'],
|
||||
constants.ONLINE)
|
||||
self.assertEqual(status['pools'][0]['operating_status'],
|
||||
constants.ONLINE)
|
||||
self.assertEqual(status['loadbalancers'][0]['operating_status'],
|
||||
constants.ONLINE)
|
||||
|
10
releasenotes/notes/add-hm-support-2c6729d8816125a5.yaml
Normal file
10
releasenotes/notes/add-hm-support-2c6729d8816125a5.yaml
Normal file
@ -0,0 +1,10 @@
|
||||
---
|
||||
features:
|
||||
- |
|
||||
The OVN Octavia provider drvier now supports health monitoring.
|
||||
TCP and UDP Connect health monitors are now supported by the
|
||||
provider driver, when the underlying OVN version supports
|
||||
them. The health monitor uses the OVN distributed DHCP port as the
|
||||
source IP for messages by default, if one doesn't exist then
|
||||
a port will be created on each given subnet. The list of member
|
||||
ports to monitor is updated whenever one is created or deleted.
|
@ -95,7 +95,7 @@
|
||||
enabled_provider_drivers: 'ovn:OVN provider driver'
|
||||
test_sctp_protocol: True
|
||||
loadbalancer-feature-enabled:
|
||||
health_monitor_enabled: False
|
||||
health_monitor_enabled: True
|
||||
pool_algorithms_enabled: False
|
||||
l7_protocol_enabled: False
|
||||
l4_protocol: "TCP"
|
||||
|
Loading…
x
Reference in New Issue
Block a user