diff --git a/ovn_octavia_provider/common/config.py b/ovn_octavia_provider/common/config.py new file mode 100644 index 00000000..d9e42fef --- /dev/null +++ b/ovn_octavia_provider/common/config.py @@ -0,0 +1,94 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_config import cfg +from oslo_log import log as logging + +from ovn_octavia_provider.i18n import _ + +LOG = logging.getLogger(__name__) + + +ovn_opts = [ + cfg.StrOpt('ovn_nb_connection', + default='tcp:127.0.0.1:6641', + help=_('The connection string for the OVN_Northbound OVSDB.\n' + 'Use tcp:IP:PORT for TCP connection.\n' + 'Use ssl:IP:PORT for SSL connection. The ' + 'ovn_nb_private_key, ovn_nb_certificate and ' + 'ovn_nb_ca_cert are mandatory.\n' + 'Use unix:FILE for unix domain socket connection.')), + cfg.StrOpt('ovn_nb_private_key', + default='', + help=_('The PEM file with private key for SSL connection to ' + 'OVN-NB-DB')), + cfg.StrOpt('ovn_nb_certificate', + default='', + help=_('The PEM file with certificate that certifies the ' + 'private key specified in ovn_nb_private_key')), + cfg.StrOpt('ovn_nb_ca_cert', + default='', + help=_('The PEM file with CA certificate that OVN should use to' + ' verify certificates presented to it by SSL peers')), + cfg.IntOpt('ovsdb_connection_timeout', + default=180, + help=_('Timeout in seconds for the OVSDB ' + 'connection transaction')), + cfg.IntOpt('ovsdb_retry_max_interval', + default=180, + help=_('Max interval in seconds between ' + 'each retry to get the OVN NB and SB IDLs')), + cfg.IntOpt('ovsdb_probe_interval', + min=0, + default=60000, + help=_('The probe interval in for the OVSDB session in ' + 'milliseconds. If this is zero, it disables the ' + 'connection keepalive feature. If non-zero the value ' + 'will be forced to at least 1000 milliseconds. Defaults ' + 'to 60 seconds.')), +] + +cfg.CONF.register_opts(ovn_opts, group='ovn') + + +def list_opts(): + return [ + ('ovn', ovn_opts), + ] + + +def get_ovn_nb_connection(): + return cfg.CONF.ovn.ovn_nb_connection + + +def get_ovn_nb_private_key(): + return cfg.CONF.ovn.ovn_nb_private_key + + +def get_ovn_nb_certificate(): + return cfg.CONF.ovn.ovn_nb_certificate + + +def get_ovn_nb_ca_cert(): + return cfg.CONF.ovn.ovn_nb_ca_cert + + +def get_ovn_ovsdb_timeout(): + return cfg.CONF.ovn.ovsdb_connection_timeout + + +def get_ovn_ovsdb_retry_max_interval(): + return cfg.CONF.ovn.ovsdb_retry_max_interval + + +def get_ovn_ovsdb_probe_interval(): + return cfg.CONF.ovn.ovsdb_probe_interval diff --git a/ovn_octavia_provider/common/constants.py b/ovn_octavia_provider/common/constants.py new file mode 100644 index 00000000..72b609fb --- /dev/null +++ b/ovn_octavia_provider/common/constants.py @@ -0,0 +1,22 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# TODO(mjozefcz): Use those variables from neutron-lib once released. +LRP_PREFIX = "lrp-" +LB_VIP_PORT_PREFIX = "ovn-lb-vip-" +OVN_PORT_NAME_EXT_ID_KEY = 'neutron:port_name' +OVN_ROUTER_NAME_EXT_ID_KEY = 'neutron:router_name' +OVN_PORT_NAME_EXT_ID_KEY = 'neutron:port_name' +OVN_PORT_FIP_EXT_ID_KEY = 'neutron:port_fip' +OVN_SUBNET_EXT_ID_KEY = 'neutron:subnet_id' +OVN_SUBNET_EXT_IDS_KEY = 'neutron:subnet_ids' +OVN_NETWORK_NAME_EXT_ID_KEY = 'neutron:network_name' diff --git a/ovn_octavia_provider/common/exceptions.py b/ovn_octavia_provider/common/exceptions.py new file mode 100644 index 00000000..0e3c95af --- /dev/null +++ b/ovn_octavia_provider/common/exceptions.py @@ -0,0 +1,24 @@ +# Copyright 2019 Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from neutron_lib import exceptions as n_exc + +from ovn_octavia_provider.i18n import _ + + +class RevisionConflict(n_exc.NeutronException): + message = _('OVN revision number for %(resource_id)s (type: ' + '%(resource_type)s) is equal or higher than the given ' + 'resource. Skipping update') diff --git a/ovn_octavia_provider/common/utils.py b/ovn_octavia_provider/common/utils.py new file mode 100644 index 00000000..e2f32091 --- /dev/null +++ b/ovn_octavia_provider/common/utils.py @@ -0,0 +1,33 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from ovn_octavia_provider.common import constants + + +def ovn_name(id): + # The name of the OVN entry will be neutron- + # This is due to the fact that the OVN application checks if the name + # is a UUID. If so then there will be no matches. + # We prefix the UUID to enable us to use the Neutron UUID when + # updating, deleting etc. + return 'neutron-%s' % id + + +def ovn_lrouter_port_name(id): + # The name of the OVN lrouter port entry will be lrp- + # This is to distinguish with the name of the connected lswitch patch port, + # which is named with neutron port uuid, so that OVS patch ports are + # generated properly. The pairing patch port names will be: + # - patch-lrp--to- + # - patch--to-lrp- + # lrp stands for Logical Router Port + return constants.LRP_PREFIX + '%s' % id diff --git a/ovn_octavia_provider/driver.py b/ovn_octavia_provider/driver.py new file mode 100644 index 00000000..edeb706e --- /dev/null +++ b/ovn_octavia_provider/driver.py @@ -0,0 +1,1856 @@ +# Copyright 2020 Red Hat, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import atexit +import copy +import threading + +import netaddr +from neutronclient.common import exceptions as n_exc +from octavia_lib.api.drivers import data_models as o_datamodels +from octavia_lib.api.drivers import driver_lib as o_driver_lib +from octavia_lib.api.drivers import exceptions as driver_exceptions +from octavia_lib.api.drivers import provider_base as driver_base +from octavia_lib.common import constants +from oslo_config import cfg +from oslo_log import log as logging +from oslo_serialization import jsonutils +from ovs.stream import Stream +from ovsdbapp.backend.ovs_idl import connection +from ovsdbapp.backend.ovs_idl import event as row_event +from ovsdbapp.backend.ovs_idl import idlutils +from six.moves import queue as Queue +from stevedore import driver +import tenacity + +from ovn_octavia_provider.common import config as ovn_conf +# TODO(mjozefcz): Start consuming const and utils +# from neutron-lib once released. +from ovn_octavia_provider.common import constants as ovn_const +from ovn_octavia_provider.common import utils +from ovn_octavia_provider.i18n import _ +from ovn_octavia_provider.ovsdb import impl_idl_ovn +from ovn_octavia_provider.ovsdb import ovsdb_monitor + +CONF = cfg.CONF # Gets Octavia Conf as it runs under o-api domain + +LOG = logging.getLogger(__name__) + +REQ_TYPE_LB_CREATE = 'lb_create' +REQ_TYPE_LB_DELETE = 'lb_delete' +REQ_TYPE_LB_FAILOVER = 'lb_failover' +REQ_TYPE_LB_UPDATE = 'lb_update' +REQ_TYPE_LISTENER_CREATE = 'listener_create' +REQ_TYPE_LISTENER_DELETE = 'listener_delete' +REQ_TYPE_LISTENER_UPDATE = 'listener_update' +REQ_TYPE_POOL_CREATE = 'pool_create' +REQ_TYPE_POOL_DELETE = 'pool_delete' +REQ_TYPE_POOL_UPDATE = 'pool_update' +REQ_TYPE_MEMBER_CREATE = 'member_create' +REQ_TYPE_MEMBER_DELETE = 'member_delete' +REQ_TYPE_MEMBER_UPDATE = 'member_update' +REQ_TYPE_LB_CREATE_LRP_ASSOC = 'lb_create_lrp_assoc' +REQ_TYPE_LB_DELETE_LRP_ASSOC = 'lb_delete_lrp_assoc' +REQ_TYPE_HANDLE_VIP_FIP = 'handle_vip_fip' + +REQ_TYPE_EXIT = 'exit' + +REQ_INFO_ACTION_ASSOCIATE = 'associate' +REQ_INFO_ACTION_DISASSOCIATE = 'disassociate' + +DISABLED_RESOURCE_SUFFIX = 'D' + +LB_EXT_IDS_LS_REFS_KEY = 'ls_refs' +LB_EXT_IDS_LR_REF_KEY = 'lr_ref' +LB_EXT_IDS_POOL_PREFIX = 'pool_' +LB_EXT_IDS_LISTENER_PREFIX = 'listener_' +LB_EXT_IDS_MEMBER_PREFIX = 'member_' +LB_EXT_IDS_VIP_KEY = 'neutron:vip' +LB_EXT_IDS_VIP_FIP_KEY = 'neutron:vip_fip' +LB_EXT_IDS_VIP_PORT_ID_KEY = 'neutron:vip_port_id' + +OVN_NATIVE_LB_PROTOCOLS = [constants.PROTOCOL_TCP, + constants.PROTOCOL_UDP, ] +OVN_NATIVE_LB_ALGORITHMS = [constants.LB_ALGORITHM_SOURCE_IP_PORT, ] +EXCEPTION_MSG = "Exception occurred during %s" +OVN_EVENT_LOCK_NAME = "neutron_ovn_octavia_event_lock" + + +class IPVersionsMixingNotSupportedError( + driver_exceptions.UnsupportedOptionError): + user_fault_string = _('OVN provider does not support mixing IPv4/IPv6 ' + 'configuration within the same Load Balancer.') + operator_fault_string = user_fault_string + + +def get_network_driver(): + try: + CONF.import_group('controller_worker', 'octavia.common.config') + name = CONF.controller_worker.network_driver + except ImportError: + # TODO(mjozefcz): Remove this when the config option will + # land in octavia-lib. + name = 'network_noop_driver' + return driver.DriverManager( + namespace='octavia.network.drivers', + name=name, + invoke_on_load=True + ).driver + + +class LogicalRouterPortEvent(row_event.RowEvent): + + driver = None + + def __init__(self, driver): + table = 'Logical_Router_Port' + events = (self.ROW_CREATE, self.ROW_DELETE) + super(LogicalRouterPortEvent, self).__init__( + events, table, None) + self.event_name = 'LogicalRouterPortEvent' + self.driver = driver + + def run(self, event, row, old): + LOG.debug('LogicalRouterPortEvent logged, ' + '%(event)s, %(row)s', + {'event': event, + 'row': row}) + if not self.driver or row.gateway_chassis: + return + if event == self.ROW_CREATE: + self.driver.lb_create_lrp_assoc_handler(row) + elif event == self.ROW_DELETE: + self.driver.lb_delete_lrp_assoc_handler(row) + + +class LogicalSwitchPortUpdateEvent(row_event.RowEvent): + + driver = None + + def __init__(self, driver): + table = 'Logical_Switch_Port' + events = (self.ROW_UPDATE,) + super(LogicalSwitchPortUpdateEvent, self).__init__( + events, table, None) + self.event_name = 'LogicalSwitchPortUpdateEvent' + self.driver = driver + + def run(self, event, row, old): + # Get the neutron:port_name from external_ids and check if + # it's a vip port or not. + port_name = row.external_ids.get( + ovn_const.OVN_PORT_NAME_EXT_ID_KEY, '') + if self.driver and port_name.startswith(ovn_const.LB_VIP_PORT_PREFIX): + # Handle port update only for vip ports created by + # this driver. + self.driver.vip_port_update_handler(row) + + +class OvnNbIdlForLb(ovsdb_monitor.OvnIdl): + SCHEMA = "OVN_Northbound" + TABLES = ('Logical_Switch', 'Load_Balancer', 'Logical_Router', + 'Logical_Switch_Port', 'Logical_Router_Port', + 'Gateway_Chassis') + + def __init__(self, event_lock_name=None): + self.conn_string = ovn_conf.get_ovn_nb_connection() + ovsdb_monitor._check_and_set_ssl_files(self.SCHEMA) + helper = self._get_ovsdb_helper(self.conn_string) + for table in OvnNbIdlForLb.TABLES: + helper.register_table(table) + super(OvnNbIdlForLb, self).__init__( + driver=None, remote=self.conn_string, schema=helper) + self.event_lock_name = event_lock_name + if self.event_lock_name: + self.set_lock(self.event_lock_name) + atexit.register(self.stop) + + @tenacity.retry( + wait=tenacity.wait_exponential(max=180), + reraise=True) + def _get_ovsdb_helper(self, connection_string): + return idlutils.get_schema_helper(connection_string, self.SCHEMA) + + def start(self): + self.conn = connection.Connection( + self, timeout=ovn_conf.get_ovn_ovsdb_timeout()) + return impl_idl_ovn.OvsdbNbOvnIdl(self.conn) + + def stop(self): + # Close the running connection if it has been initalized + if ((hasattr(self, 'conn') and not + self.conn.stop(timeout=ovn_conf.get_ovn_ovsdb_timeout()))): + LOG.debug("Connection terminated to OvnNb " + "but a thread is still alive") + # complete the shutdown for the event handler + self.notify_handler.shutdown() + # Close the idl session + self.close() + + +class OvnProviderHelper(object): + + ovn_nbdb_api_for_events = None + ovn_nb_idl_for_events = None + ovn_nbdb_api = None + + def __init__(self): + self.requests = Queue.Queue() + self.helper_thread = threading.Thread(target=self.request_handler) + self.helper_thread.daemon = True + atexit.register(self.shutdown) + self._octavia_driver_lib = o_driver_lib.DriverLibrary() + self._check_and_set_ssl_files() + self._init_lb_actions() + self.events = [LogicalRouterPortEvent(self), + LogicalSwitchPortUpdateEvent(self)] + self.start() + + def _init_lb_actions(self): + self._lb_request_func_maps = { + REQ_TYPE_LB_CREATE: self.lb_create, + REQ_TYPE_LB_DELETE: self.lb_delete, + REQ_TYPE_LB_UPDATE: self.lb_update, + REQ_TYPE_LB_FAILOVER: self.lb_failover, + REQ_TYPE_LISTENER_CREATE: self.listener_create, + REQ_TYPE_LISTENER_DELETE: self.listener_delete, + REQ_TYPE_LISTENER_UPDATE: self.listener_update, + REQ_TYPE_POOL_CREATE: self.pool_create, + REQ_TYPE_POOL_DELETE: self.pool_delete, + REQ_TYPE_POOL_UPDATE: self.pool_update, + REQ_TYPE_MEMBER_CREATE: self.member_create, + REQ_TYPE_MEMBER_DELETE: self.member_delete, + REQ_TYPE_MEMBER_UPDATE: self.member_update, + REQ_TYPE_LB_CREATE_LRP_ASSOC: self.lb_create_lrp_assoc, + REQ_TYPE_LB_DELETE_LRP_ASSOC: self.lb_delete_lrp_assoc, + REQ_TYPE_HANDLE_VIP_FIP: self.handle_vip_fip, + } + + def _check_and_set_ssl_files(self): + # TODO(reedip): Make ovsdb_monitor's _check_and_set_ssl_files() public + # This is a copy of ovsdb_monitor._check_and_set_ssl_files + if OvnProviderHelper.ovn_nbdb_api: + return + priv_key_file = ovn_conf.get_ovn_nb_private_key() + cert_file = ovn_conf.get_ovn_nb_certificate() + ca_cert_file = ovn_conf.get_ovn_nb_ca_cert() + if priv_key_file: + Stream.ssl_set_private_key_file(priv_key_file) + + if cert_file: + Stream.ssl_set_certificate_file(cert_file) + + if ca_cert_file: + Stream.ssl_set_ca_cert_file(ca_cert_file) + + def start(self): + # NOTE(mjozefcz): This API is only for handling octavia API requests. + if not OvnProviderHelper.ovn_nbdb_api: + OvnProviderHelper.ovn_nbdb_api = OvnNbIdlForLb().start() + + # NOTE(mjozefcz): This API is only for handling OVSDB events! + if not OvnProviderHelper.ovn_nbdb_api_for_events: + OvnProviderHelper.ovn_nb_idl_for_events = OvnNbIdlForLb( + event_lock_name=OVN_EVENT_LOCK_NAME) + (OvnProviderHelper.ovn_nb_idl_for_events.notify_handler. + watch_events(self.events)) + OvnProviderHelper.ovn_nbdb_api_for_events = ( + OvnProviderHelper.ovn_nb_idl_for_events.start()) + self.helper_thread.start() + + def shutdown(self): + self.requests.put({'type': REQ_TYPE_EXIT}) + self.helper_thread.join() + self.ovn_nb_idl_for_events.notify_handler.unwatch_events(self.events) + + @staticmethod + def _map_val(row, col, key): + # If the row doesnt exist, RowNotFound is raised by the _map_val + # and is expected to be caught by the caller. + try: + return getattr(row, col)[key] + except KeyError: + raise idlutils.RowNotFound(table=row._table.name, + col=col, match=key) + + def _get_nw_router_info_on_interface_event(self, lrp): + """Get the Router and Network information on an interface event + + This function is called when a new interface between a router and + a network is added or deleted. + Input: Logical Router Port row which is coming from + LogicalRouterPortEvent. + Output: A row from router table and network table matching the router + and network for which the event was generated. + Exception: RowNotFound exception can be generated. + """ + router = self.ovn_nbdb_api.lookup( + 'Logical_Router', utils.ovn_name(self._map_val( + lrp, 'external_ids', ovn_const.OVN_ROUTER_NAME_EXT_ID_KEY))) + network = self.ovn_nbdb_api.lookup( + 'Logical_Switch', + self._map_val(lrp, 'external_ids', + ovn_const.OVN_NETWORK_NAME_EXT_ID_KEY)) + return router, network + + def lb_delete_lrp_assoc_handler(self, row): + try: + router, network = self._get_nw_router_info_on_interface_event(row) + except idlutils.RowNotFound: + LOG.debug("Router or network information not found") + return + request_info = {'network': network, + 'router': router} + self.add_request({'type': REQ_TYPE_LB_DELETE_LRP_ASSOC, + 'info': request_info}) + + def lb_delete_lrp_assoc(self, info): + # TODO(reedip): When OVS>=2.12, LB can be deleted without removing + # Network and Router references as pushed in the patch + # https://github.com/openvswitch/ovs/commit + # /612f80fa8ebf88dad2e204364c6c02b451dca36c + commands = [] + network = info['network'] + router = info['router'] + + # Find all loadbalancers which have a reference with the network + nw_lb = self._find_lb_in_ls(network=network) + # Find all loadbalancers which have a reference with the router + r_lb = set(router.load_balancer) - nw_lb + # Delete all LB on N/W from Router + for nlb in nw_lb: + commands.extend(self._update_lb_to_lr_association(nlb, router, + delete=True)) + # Delete all LB on Router from N/W + for rlb in r_lb: + commands.append(self.ovn_nbdb_api.ls_lb_del( + network.uuid, rlb.uuid)) + if commands: + self._execute_commands(commands) + + def lb_create_lrp_assoc_handler(self, row): + try: + router, network = self._get_nw_router_info_on_interface_event(row) + except idlutils.RowNotFound: + LOG.debug("Router or network information not found") + return + request_info = {'network': network, + 'router': router} + self.add_request({'type': REQ_TYPE_LB_CREATE_LRP_ASSOC, + 'info': request_info}) + + def lb_create_lrp_assoc(self, info): + commands = [] + + router_lb = set(info['router'].load_balancer) + network_lb = set(info['network'].load_balancer) + # Add only those lb to routers which are unique to the network + for lb in (network_lb - router_lb): + commands.extend(self._update_lb_to_lr_association( + lb, info['router'])) + + # Add those lb to the network which are unique to the router + for lb in (router_lb - network_lb): + commands.append(self.ovn_nbdb_api.ls_lb_add( + info['network'].uuid, lb.uuid, may_exist=True)) + if commands: + self._execute_commands(commands) + + def vip_port_update_handler(self, vip_lp): + """Handler for VirtualIP port updates. + + If a floating ip is associated to a vip port, then networking-ovn sets + the fip in the external_ids column of the logical port as: + Logical_Switch_Port.external_ids:port_fip = . + Then, in the Load_Balancer table for the vip, networking-ovn creates + another vip entry for the FIP. + If a floating ip is disassociated from the vip, then it deletes + the vip entry for the FIP. + """ + + port_name = vip_lp.external_ids.get(ovn_const.OVN_PORT_NAME_EXT_ID_KEY) + lb_id = port_name[len(ovn_const.LB_VIP_PORT_PREFIX):] + try: + ovn_lb = self._find_ovn_lb(lb_id) + except idlutils.RowNotFound: + LOG.debug("Loadbalancer %s not found!", lb_id) + return + + fip = vip_lp.external_ids.get(ovn_const.OVN_PORT_FIP_EXT_ID_KEY) + lb_vip_fip = ovn_lb.external_ids.get(LB_EXT_IDS_VIP_FIP_KEY) + request_info = {'lb_id': lb_id, + 'vip_fip': fip} + + if fip and fip != lb_vip_fip: + request_info['action'] = REQ_INFO_ACTION_ASSOCIATE + elif fip is None and fip != lb_vip_fip: + request_info['action'] = REQ_INFO_ACTION_DISASSOCIATE + else: + return + + self.add_request({'type': REQ_TYPE_HANDLE_VIP_FIP, + 'info': request_info}) + + def _find_lb_in_ls(self, network): + """Find LB associated to a Network using Network information + + This function retrieves those loadbalancers whose ls_ref + column in the OVN northbound database's load_balancer table + has the network's name. Though different networks can be + associated with a loadbalancer, but ls_ref of a loadbalancer + points to the network where it was actually created, and this + function tries to retrieve all those loadbalancers created on this + network. + Input : row of type Logical_Switch + Output: set of rows of type Load_Balancer or empty set + """ + return {lb for lb in network.load_balancer + if network.name in lb.external_ids.get(LB_EXT_IDS_LS_REFS_KEY, + [])} + + def _find_lb_in_table(self, lb, table): + return [item for item in self.ovn_nbdb_api.tables[table].rows.values() + if lb in item.load_balancer] + + def request_handler(self): + while True: + try: + request = self.requests.get() + request_type = request['type'] + if request_type == REQ_TYPE_EXIT: + break + + request_handler = self._lb_request_func_maps.get(request_type) + if request_handler: + status = request_handler(request['info']) + if status: + self._update_status_to_octavia(status) + self.requests.task_done() + except Exception: + # If any unexpected exception happens we don't want the + # notify_loop to exit. + LOG.exception('Unexpected exception in request_handler') + + def add_request(self, req): + self.requests.put(req) + + def _update_status_to_octavia(self, status): + try: + self._octavia_driver_lib.update_loadbalancer_status(status) + except driver_exceptions.UpdateStatusError as e: + msg = ("Error while updating the load balancer " + "status: %s") % e.fault_string + LOG.error(msg) + raise driver_exceptions.UpdateStatusError(msg) + + def _find_ovn_lb(self, lb_id): + """Find the Loadbalancer in OVN with the given lb_id as its name + + This function searches for the LoadBalancer whose Name has the pattern + passed in lb_id. + Input: String format of LoadBalancer ID provided by Octavia in its API + request. Note that OVN saves the above ID in the 'name' column. + Output: LoadBalancer row matching the lb_id + Exception: RowNotFound can be generated if the LoadBalancer is not + found. + """ + return self.ovn_nbdb_api.lookup('Load_Balancer', lb_id) + + def _find_ovn_lb_with_pool_key(self, pool_key): + lbs = self.ovn_nbdb_api.db_list_rows( + 'Load_Balancer').execute(check_error=True) + for lb in lbs: + if pool_key in lb.external_ids: + return lb + + def _find_ovn_lb_by_id(self, pool_id): + pool_key = self._get_pool_key(pool_id) + ovn_lb = self._find_ovn_lb_with_pool_key(pool_key) + if not ovn_lb: + pool_key = self._get_pool_key(pool_id, is_enabled=False) + ovn_lb = self._find_ovn_lb_with_pool_key(pool_key) + return pool_key, ovn_lb + + def _execute_commands(self, commands): + with self.ovn_nbdb_api.transaction(check_error=True) as txn: + for command in commands: + txn.add(command) + + def _update_lb_to_ls_association(self, ovn_lb, network_id=None, + subnet_id=None, associate=True): + """Update LB association with Logical Switch + + This function deals with updating the References of Logical Switch + in LB and addition of LB to LS. + """ + commands = [] + if not network_id and not subnet_id: + return commands + + if network_id: + ls_name = utils.ovn_name(network_id) + else: + network_driver = get_network_driver() + subnet = network_driver.get_subnet(subnet_id) + ls_name = utils.ovn_name(subnet.network_id) + + ovn_ls = self.ovn_nbdb_api.ls_get(ls_name).execute( + check_error=True) + if not ovn_ls: + return commands + + ls_refs = ovn_lb.external_ids.get(LB_EXT_IDS_LS_REFS_KEY) + if ls_refs: + try: + ls_refs = jsonutils.loads(ls_refs) + except ValueError: + ls_refs = {} + else: + ls_refs = {} + + if associate: + if ls_name in ls_refs: + ref_ct = ls_refs[ls_name] + ls_refs[ls_name] = ref_ct + 1 + else: + ls_refs[ls_name] = 1 + commands.append(self.ovn_nbdb_api.ls_lb_add( + ovn_ls.uuid, ovn_lb.uuid, may_exist=True)) + else: + if ls_name not in ls_refs: + # Nothing to be done. + return commands + + ref_ct = ls_refs[ls_name] + if ref_ct == 1: + del ls_refs[ls_name] + commands.append(self.ovn_nbdb_api.ls_lb_del( + ovn_ls.uuid, ovn_lb.uuid, if_exists=True)) + else: + ls_refs[ls_name] = ref_ct - 1 + + ls_refs = {LB_EXT_IDS_LS_REFS_KEY: jsonutils.dumps(ls_refs)} + commands.append(self.ovn_nbdb_api.db_set( + 'Load_Balancer', ovn_lb.uuid, + ('external_ids', ls_refs))) + + return commands + + def _del_lb_to_lr_association(self, ovn_lb, ovn_lr, lr_ref): + commands = [] + if lr_ref: + try: + lr_ref = [r for r in + [lr.strip() for lr in lr_ref.split(',')] + if r != ovn_lr.name] + except ValueError: + msg = ('The loadbalancer %(lb)s is not associated with ' + 'the router %(router)s' % + {'lb': ovn_lb.name, + 'router': ovn_lr.name}) + LOG.warning(msg) + if lr_ref: + commands.append( + self.ovn_nbdb_api.db_set( + 'Load_Balancer', ovn_lb.uuid, + ('external_ids', + {LB_EXT_IDS_LR_REF_KEY: ','.join(lr_ref)}))) + else: + commands.append( + self.ovn_nbdb_api.db_remove( + 'Load_Balancer', ovn_lb.uuid, 'external_ids', + (LB_EXT_IDS_LR_REF_KEY)) + ) + commands.append( + self.ovn_nbdb_api.lr_lb_del(ovn_lr.uuid, ovn_lb.uuid, + if_exists=True) + ) + for net in self._find_ls_for_lr(ovn_lr): + commands.append(self.ovn_nbdb_api.ls_lb_del( + net, ovn_lb.uuid, if_exists=True)) + return commands + + def _add_lb_to_lr_association(self, ovn_lb, ovn_lr, lr_rf): + commands = [] + commands.append( + self.ovn_nbdb_api.lr_lb_add(ovn_lr.uuid, ovn_lb.uuid, + may_exist=True) + ) + for net in self._find_ls_for_lr(ovn_lr): + commands.append(self.ovn_nbdb_api.ls_lb_add( + net, ovn_lb.uuid, may_exist=True)) + + # Multiple routers in lr_rf are separated with ',' + lr_rf = {LB_EXT_IDS_LR_REF_KEY: ovn_lr.name} if not lr_rf else { + LB_EXT_IDS_LR_REF_KEY: "%s,%s" % (lr_rf, ovn_lr.name)} + commands.append( + self.ovn_nbdb_api.db_set('Load_Balancer', ovn_lb.uuid, + ('external_ids', lr_rf)) + ) + return commands + + def _update_lb_to_lr_association(self, ovn_lb, ovn_lr, delete=False): + lr_ref = ovn_lb.external_ids.get(LB_EXT_IDS_LR_REF_KEY) + if delete: + return self._del_lb_to_lr_association(ovn_lb, ovn_lr, lr_ref) + else: + return self._add_lb_to_lr_association(ovn_lb, ovn_lr, lr_ref) + + def _find_ls_for_lr(self, router): + # NOTE(mjozefcz): We skip here ports connected to + # provider networks (with gateway_chassis set). + netdriver = get_network_driver() + try: + return [utils.ovn_name(netdriver.get_subnet(sid).network_id) + for port in router.ports + for sid in port.external_ids.get( + ovn_const.OVN_SUBNET_EXT_IDS_KEY, '').split(' ') + if not port.gateway_chassis] + except Exception: + LOG.exception('Unknown exception occurred') + return [] + + def _find_lr_of_ls(self, ovn_ls): + lsp_router_port = None + for port in ovn_ls.ports or []: + if port.type == 'router': + lsp_router_port = port + break + else: + return + + lrp_name = lsp_router_port.options.get('router-port') + if not lrp_name: + return + + for lr in self.ovn_nbdb_api.tables['Logical_Router'].rows.values(): + for lrp in lr.ports: + if lrp.name == lrp_name: + return lr + # Handles networks with only gateway port in the router + if utils.ovn_lrouter_port_name( + lr.external_ids.get("neutron:gw_port_id")) == lrp_name: + return lr + + def _get_listener_key(self, listener_id, is_enabled=True): + listener_key = LB_EXT_IDS_LISTENER_PREFIX + str(listener_id) + if not is_enabled: + listener_key += ':' + DISABLED_RESOURCE_SUFFIX + return listener_key + + def _get_pool_key(self, pool_id, is_enabled=True): + pool_key = LB_EXT_IDS_POOL_PREFIX + str(pool_id) + if not is_enabled: + pool_key += ':' + DISABLED_RESOURCE_SUFFIX + return pool_key + + def _extract_member_info(self, member): + mem_info = '' + if member: + for mem in member.split(','): + mem_info += str(mem.split('_')[2]) + ',' + return mem_info[:-1] # Remove the last ',' + + def _get_member_key(self, member): + member_info = LB_EXT_IDS_MEMBER_PREFIX + member['id'] + "_" + member_info += member['address'] + ":" + str(member['protocol_port']) + return member_info + + def _make_listener_key_value(self, listener_port, pool_id): + return str(listener_port) + ':' + pool_id + + def _extract_listener_key_value(self, listener_value): + v = listener_value.split(':') + if len(v) == 2: + return (v[0], v[1]) + else: + return (None, None) + + def _is_listener_disabled(self, listener_key): + v = listener_key.split(':') + if len(v) == 2 and v[1] == DISABLED_RESOURCE_SUFFIX: + return True + + return False + + def _get_pool_listeners(self, ovn_lb, pool_key): + pool_listeners = [] + for k, v in ovn_lb.external_ids.items(): + if LB_EXT_IDS_LISTENER_PREFIX not in k: + continue + vip_port, p_key = self._extract_listener_key_value(v) + if pool_key == p_key: + pool_listeners.append(k[len(LB_EXT_IDS_LISTENER_PREFIX):]) + + return pool_listeners + + def _frame_vip_ips(self, lb_external_ids): + vip_ips = {} + # If load balancer is disabled, return + if lb_external_ids.get('enabled') == 'False': + return vip_ips + + lb_vip = lb_external_ids[LB_EXT_IDS_VIP_KEY] + vip_fip = lb_external_ids.get(LB_EXT_IDS_VIP_FIP_KEY) + + for k, v in lb_external_ids.items(): + if (LB_EXT_IDS_LISTENER_PREFIX not in k or + self._is_listener_disabled(k)): + continue + + vip_port, pool_id = self._extract_listener_key_value(v) + if not vip_port or not pool_id: + continue + + if pool_id not in lb_external_ids or not lb_external_ids[pool_id]: + continue + + ips = self._extract_member_info(lb_external_ids[pool_id]) + vip_ips[lb_vip + ':' + vip_port] = ips + + if vip_fip: + vip_ips[vip_fip + ':' + vip_port] = ips + + return vip_ips + + def _refresh_lb_vips(self, ovn_lb_uuid, lb_external_ids): + vip_ips = self._frame_vip_ips(lb_external_ids) + return [self.ovn_nbdb_api.db_clear('Load_Balancer', ovn_lb_uuid, + 'vips'), + self.ovn_nbdb_api.db_set('Load_Balancer', ovn_lb_uuid, + ('vips', vip_ips))] + + def _is_listener_in_lb(self, lb): + for key in list(lb.external_ids): + if key.startswith(LB_EXT_IDS_LISTENER_PREFIX): + return True + return False + + def check_lb_protocol(self, lb_id, listener_protocol): + ovn_lb = self._find_ovn_lb(lb_id) + if not ovn_lb: + return False + elif not self._is_listener_in_lb(ovn_lb): + return True + else: + return str(listener_protocol).lower() in ovn_lb.protocol + + def lb_create(self, loadbalancer): + port = None + try: + # Get the port id of the vip and store it in the external_ids. + # This is required to delete the port when the loadbalancer is + # deleted. + network_driver = get_network_driver() + ports = network_driver.neutron_client.list_ports( + network_id=loadbalancer['vip_network_id']) + for p in ports['ports']: + for ip in p['fixed_ips']: + if ip['ip_address'] == loadbalancer['vip_address']: + port = p + break + # In case port is not found for the vip_address we will see an + # exception when port['id'] is accessed. + self.ovn_nbdb_api.db_create( + 'Load_Balancer', name=loadbalancer['id'], protocol='tcp', + external_ids={LB_EXT_IDS_VIP_KEY: loadbalancer['vip_address'], + LB_EXT_IDS_VIP_PORT_ID_KEY: port['id'], + 'enabled': str(loadbalancer['admin_state_up'])} + ).execute(check_error=True) + ovn_lb = self._find_ovn_lb(loadbalancer['id']) + commands = self._update_lb_to_ls_association( + ovn_lb, network_id=loadbalancer['vip_network_id'], + associate=True) + ls_name = utils.ovn_name(loadbalancer['vip_network_id']) + ovn_ls = self.ovn_nbdb_api.ls_get(ls_name).execute( + check_error=True) + ovn_lr = self._find_lr_of_ls(ovn_ls) + if ovn_lr: + commands.extend(self._update_lb_to_lr_association( + ovn_lb, ovn_lr)) + self._execute_commands(commands) + operating_status = constants.ONLINE + # The issue is that since OVN doesnt support any HMs, + # we ideally should never put the status as 'ONLINE' + if not loadbalancer.get('admin_state_up', True): + operating_status = constants.OFFLINE + status = { + 'loadbalancers': [{"id": loadbalancer['id'], + "provisioning_status": constants.ACTIVE, + "operating_status": operating_status}]} + # If the connection with the OVN NB db server is broken, then + # ovsdbapp will throw either TimeOutException or RunTimeError. + # May be we can catch these specific exceptions. + # It is important to report the status to octavia. We can report + # immediately or reschedule the lb_create request later. + # For now lets report immediately. + except Exception: + LOG.exception(EXCEPTION_MSG, "creation of loadbalancer") + # Any Exception set the status to ERROR + if isinstance(port, dict): + self.delete_vip_port(port.get('id')) + LOG.warning("Deleting the VIP port %s since LB went into " + "ERROR state", str(port.get('id'))) + status = { + 'loadbalancers': [{"id": loadbalancer['id'], + "provisioning_status": constants.ERROR, + "operating_status": constants.ERROR}]} + return status + + def lb_delete(self, loadbalancer): + commands = [] + port_id = None + try: + status = {'loadbalancers': [{"id": loadbalancer['id'], + "provisioning_status": "DELETED", + "operating_status": "OFFLINE"}]} + ovn_lb = None + try: + ovn_lb = self._find_ovn_lb(loadbalancer['id']) + except idlutils.RowNotFound: + LOG.warning("Loadbalancer %s not found in OVN Northbound DB." + "Setting the Loadbalancer status to DELETED " + "in Octavia", str(loadbalancer['id'])) + return status + if not ovn_lb: + return status + + if loadbalancer['cascade']: + status['members'] = [] + status['pools'] = [] + status['listeners'] = [] + # Delete all pools + for key, value in ovn_lb.external_ids.items(): + if key.startswith(LB_EXT_IDS_POOL_PREFIX): + pool_id = key.split('_')[1] + # Delete all members in the pool + if value and len(value.split(',')) > 0: + for mem_info in value.split(','): + status['members'].append({ + 'id': mem_info.split('_')[1], + 'provisioning_status': constants.DELETED}) + status['pools'].append( + {"id": pool_id, + "provisioning_status": constants.DELETED}) + + if key.startswith(LB_EXT_IDS_LISTENER_PREFIX): + status['listeners'].append({ + 'id': key.split('_')[1], + 'provisioning_status': constants.DELETED, + 'operating_status': constants.OFFLINE}) + # Clear the status dict of any key having [] value + # Python 3.6 doesnt allow deleting an element in a + # dict while iterating over it. So first get a list of keys. + # https://cito.github.io/blog/never-iterate-a-changing-dict/ + status = {key: value for key, value in status.items() if value} + ls_refs = ovn_lb.external_ids.get(LB_EXT_IDS_LS_REFS_KEY, {}) + if ls_refs: + try: + ls_refs = jsonutils.loads(ls_refs) + except ValueError: + ls_refs = {} + # Delete the VIP Port + self.delete_vip_port(ovn_lb.external_ids[ + LB_EXT_IDS_VIP_PORT_ID_KEY]) + for ls_name in ls_refs.keys(): + ovn_ls = self.ovn_nbdb_api.ls_get(ls_name).execute( + check_error=True) + if ovn_ls: + commands.append( + self.ovn_nbdb_api.ls_lb_del(ovn_ls.uuid, ovn_lb.uuid)) + # Delete LB from all Networks the LB is indirectly associated + for ls in self._find_lb_in_table(ovn_lb, 'Logical_Switch'): + commands.append( + self.ovn_nbdb_api.ls_lb_del(ls.uuid, ovn_lb.uuid, + if_exists=True)) + lr_ref = ovn_lb.external_ids.get(LB_EXT_IDS_LR_REF_KEY, {}) + if lr_ref: + for lr in self.ovn_nbdb_api.tables[ + 'Logical_Router'].rows.values(): + if lr.name == lr_ref: + commands.append(self.ovn_nbdb_api.lr_lb_del( + lr.uuid, ovn_lb.uuid)) + break + # Delete LB from all Routers the LB is indirectly associated + for lr in self._find_lb_in_table(ovn_lb, 'Logical_Router'): + commands.append( + self.ovn_nbdb_api.lr_lb_del(lr.uuid, ovn_lb.uuid, + if_exists=True)) + # Save the port ID before deleting the LoadBalancer + port_id = ovn_lb.external_ids[LB_EXT_IDS_VIP_PORT_ID_KEY] + commands.append(self.ovn_nbdb_api.lb_del(ovn_lb.uuid)) + self._execute_commands(commands) + # We need to delete the vip port but not fail LB delete if port + # delete fails. Can happen when Port deleted manually by user. + network_driver = get_network_driver() + network_driver.neutron_client.delete_port(port_id) + except n_exc.PortNotFoundClient: + LOG.warning("Port %s could not be found. Please " + "check Neutron logs", port_id) + except Exception: + LOG.exception(EXCEPTION_MSG, "deletion of loadbalancer") + status = { + 'loadbalancers': [{"id": loadbalancer['id'], + "provisioning_status": constants.ERROR, + "operating_status": constants.ERROR}]} + return status + + def lb_failover(self, loadbalancer): + status = { + 'loadbalancers': [{'id': loadbalancer['id'], + 'provisioning_status': constants.ACTIVE}]} + return status + + def lb_update(self, loadbalancer): + try: + lb_status = {'id': loadbalancer['id'], + 'provisioning_status': constants.ACTIVE} + status = {'loadbalancers': [lb_status]} + if 'admin_state_up' not in loadbalancer: + return status + lb_enabled = loadbalancer['admin_state_up'] + + ovn_lb = self._find_ovn_lb(loadbalancer['id']) + if ovn_lb.external_ids['enabled'] != str(lb_enabled): + commands = [] + enable_info = {'enabled': str(lb_enabled)} + ovn_lb.external_ids['enabled'] = str(lb_enabled) + commands.append( + self.ovn_nbdb_api.db_set('Load_Balancer', ovn_lb.uuid, + ('external_ids', enable_info))) + commands.extend( + self._refresh_lb_vips(ovn_lb.uuid, ovn_lb.external_ids)) + self._execute_commands(commands) + if lb_enabled: + operating_status = constants.ONLINE + else: + operating_status = constants.OFFLINE + lb_status['operating_status'] = operating_status + except Exception: + LOG.exception(EXCEPTION_MSG, "update of loadbalancer") + lb_status['provisioning_status'] = constants.ERROR + lb_status['operating_status'] = constants.ERROR + return status + + def listener_create(self, listener): + try: + ovn_lb = self._find_ovn_lb(listener['loadbalancer_id']) + + external_ids = copy.deepcopy(ovn_lb.external_ids) + listener_key = self._get_listener_key( + listener['id'], is_enabled=listener['admin_state_up']) + + if listener.get('default_pool_id'): + pool_key = self._get_pool_key(listener['default_pool_id']) + else: + pool_key = '' + external_ids[listener_key] = self._make_listener_key_value( + listener['protocol_port'], pool_key) + + listener_info = {listener_key: external_ids[listener_key]} + commands = [] + commands.append( + self.ovn_nbdb_api.db_set('Load_Balancer', ovn_lb.uuid, + ('external_ids', listener_info)) + ) + if not self._is_listener_in_lb(ovn_lb): + commands.append( + self.ovn_nbdb_api.db_set( + 'Load_Balancer', ovn_lb.uuid, + ('protocol', str(listener['protocol']).lower())) + ) + commands.extend( + self._refresh_lb_vips(ovn_lb.uuid, external_ids) + ) + self._execute_commands(commands) + + operating_status = constants.ONLINE + if not listener.get('admin_state_up', True): + operating_status = constants.OFFLINE + status = { + 'listeners': [{"id": listener['id'], + "provisioning_status": constants.ACTIVE, + "operating_status": operating_status}], + 'loadbalancers': [{"id": listener['loadbalancer_id'], + "provisioning_status": constants.ACTIVE}]} + except Exception: + LOG.exception(EXCEPTION_MSG, "creation of listener") + status = { + 'listeners': [{"id": listener['id'], + "provisioning_status": constants.ERROR, + "operating_status": constants.ERROR}], + 'loadbalancers': [{"id": listener['loadbalancer_id'], + "provisioning_status": constants.ACTIVE}]} + return status + + def listener_delete(self, listener): + try: + ovn_lb = self._find_ovn_lb(listener['loadbalancer_id']) + external_ids = copy.deepcopy(ovn_lb.external_ids) + listener_key = self._get_listener_key(listener['id']) + + if listener_key in external_ids: + commands = [] + commands.append( + self.ovn_nbdb_api.db_remove( + 'Load_Balancer', ovn_lb.uuid, 'external_ids', + (listener_key)) + ) + + del external_ids[listener_key] + commands.extend( + self._refresh_lb_vips(ovn_lb.uuid, external_ids) + ) + + self._execute_commands(commands) + + status = { + 'listeners': [{"id": listener['id'], + "provisioning_status": constants.DELETED, + "operating_status": constants.OFFLINE}], + 'loadbalancers': [{"id": listener['loadbalancer_id'], + "provisioning_status": constants.ACTIVE}]} + except Exception: + LOG.exception(EXCEPTION_MSG, "deletion of listener") + status = { + 'listeners': [{"id": listener['id'], + "provisioning_status": constants.ERROR, + "operating_status": constants.ERROR}], + 'loadbalancers': [{"id": listener['loadbalancer_id'], + "provisioning_status": constants.ACTIVE}]} + return status + + def listener_update(self, listener): + try: + listener_status = {'id': listener['id'], + 'provisioning_status': constants.ACTIVE} + pool_status = [] + status = { + 'listeners': [listener_status], + 'loadbalancers': [{'id': listener['loadbalancer_id'], + 'provisioning_status': constants.ACTIVE}], + 'pools': pool_status} + + ovn_lb = self._find_ovn_lb(listener['loadbalancer_id']) + l_key_when_enabled = self._get_listener_key(listener['id']) + l_key_when_disabled = self._get_listener_key( + listener['id'], is_enabled=False) + + external_ids = copy.deepcopy(ovn_lb.external_ids) + + if 'admin_state_up' not in listener and ( + 'default_pool_id' not in listener): + return status + + l_key_to_add = {} + if l_key_when_enabled in external_ids: + present_l_key = l_key_when_enabled + elif l_key_when_disabled in external_ids: + present_l_key = l_key_when_disabled + else: + # Something is terribly wrong. This cannot happen. + return status + + commands = [] + new_l_key = None + l_key_to_remove = None + if 'admin_state_up' in listener: + if listener['admin_state_up']: + # We need to enable the listener + new_l_key = l_key_when_enabled + listener_status['operating_status'] = constants.ONLINE + else: + # We need to disable the listener + new_l_key = l_key_when_disabled + listener_status['operating_status'] = constants.OFFLINE + + if present_l_key != new_l_key: + external_ids[new_l_key] = external_ids[present_l_key] + l_key_to_add[new_l_key] = external_ids[present_l_key] + del external_ids[present_l_key] + l_key_to_remove = present_l_key + + if l_key_to_remove: + commands.append( + self.ovn_nbdb_api.db_remove( + 'Load_Balancer', ovn_lb.uuid, 'external_ids', + (l_key_to_remove)) + ) + else: + new_l_key = present_l_key + + if 'default_pool_id' in listener: + pool_key = self._get_pool_key(listener['default_pool_id']) + l_key_value = self._make_listener_key_value( + listener['protocol_port'], pool_key) + l_key_to_add[new_l_key] = l_key_value + external_ids[new_l_key] = l_key_value + pool_status.append({'id': listener['default_pool_id'], + 'provisioning_status': constants.ACTIVE}) + + if l_key_to_add: + commands.append( + self.ovn_nbdb_api.db_set( + 'Load_Balancer', ovn_lb.uuid, + ('external_ids', l_key_to_add)) + ) + + commands.extend( + self._refresh_lb_vips(ovn_lb.uuid, external_ids)) + self._execute_commands(commands) + except Exception: + LOG.exception(EXCEPTION_MSG, "update of listener") + status = { + 'listeners': [{'id': listener['id'], + 'provisioning_status': constants.ERROR}], + 'loadbalancers': [{'id': listener['loadbalancer_id'], + 'provisioning_status': constants.ACTIVE}]} + + return status + + def pool_create(self, pool): + try: + ovn_lb = self._find_ovn_lb(pool['loadbalancer_id']) + external_ids = copy.deepcopy(ovn_lb.external_ids) + pool_key = self._get_pool_key(pool['id'], + is_enabled=pool['admin_state_up']) + external_ids[pool_key] = '' + if pool['listener_id']: + listener_key = self._get_listener_key(pool['listener_id']) + if listener_key in ovn_lb.external_ids: + external_ids[listener_key] = str( + external_ids[listener_key]) + str(pool_key) + self.ovn_nbdb_api.db_set( + 'Load_Balancer', ovn_lb.uuid, + ('external_ids', external_ids)).execute(check_error=True) + # Pool status will be set to Online after a member is added to it. + operating_status = constants.OFFLINE + + status = { + 'pools': [{'id': pool['id'], + 'provisioning_status': constants.ACTIVE, + 'operating_status': operating_status}], + 'loadbalancers': [{"id": pool['loadbalancer_id'], + "provisioning_status": constants.ACTIVE}]} + if pool['listener_id']: + listener_status = [{'id': pool['listener_id'], + 'provisioning_status': constants.ACTIVE}] + status['listeners'] = listener_status + except Exception: + LOG.exception(EXCEPTION_MSG, "creation of pool") + status = { + 'pools': [{"id": pool['id'], + "provisioning_status": constants.ERROR}], + 'loadbalancers': [{"id": pool['loadbalancer_id'], + "provisioning_status": constants.ACTIVE}]} + if pool['listener_id']: + listener_status = [{'id': pool['listener_id'], + 'provisioning_status': constants.ACTIVE}] + status['listeners'] = listener_status + + return status + + def pool_delete(self, pool): + try: + ovn_lb = self._find_ovn_lb(pool['loadbalancer_id']) + pool_key = self._get_pool_key(pool['id']) + commands = [] + external_ids = copy.deepcopy(ovn_lb.external_ids) + if pool_key in ovn_lb.external_ids: + commands.append( + self.ovn_nbdb_api.db_remove('Load_Balancer', ovn_lb.uuid, + 'external_ids', (pool_key)) + ) + del external_ids[pool_key] + commands.extend( + self._refresh_lb_vips(ovn_lb.uuid, external_ids)) + # Remove Pool from Listener if it is associated + listener_id = None + for key, value in ovn_lb.external_ids.items(): + if (key.startswith(LB_EXT_IDS_LISTENER_PREFIX) and + pool_key in value): + external_ids[key] = value.split(':')[0] + ':' + commands.append( + self.ovn_nbdb_api.db_set( + 'Load_Balancer', ovn_lb.uuid, + ('external_ids', external_ids))) + listener_id = key.split('_')[1] + + pool_key_when_disabled = self._get_pool_key(pool['id'], + is_enabled=False) + if pool_key_when_disabled in ovn_lb.external_ids: + commands.append( + self.ovn_nbdb_api.db_remove( + 'Load_Balancer', ovn_lb.uuid, + 'external_ids', (pool_key_when_disabled)) + ) + + self._execute_commands(commands) + + status = { + 'pools': [{"id": pool['id'], + "provisioning_status": constants.DELETED}], + 'loadbalancers': [{"id": pool['loadbalancer_id'], + "provisioning_status": constants.ACTIVE}]} + if listener_id: + status['listeners'] = [{ + 'id': listener_id, + 'provisioning_status': constants.ACTIVE}] + except Exception: + LOG.exception(EXCEPTION_MSG, "deletion of pool") + status = { + 'pools': [{"id": pool['id'], + "provisioning_status": constants.ERROR}], + 'loadbalancers': [{"id": pool['loadbalancer_id'], + "provisioning_status": constants.ACTIVE}]} + + return status + + def pool_update(self, pool): + try: + pool_status = {'id': pool['id'], + 'provisioning_status': constants.ACTIVE} + status = { + 'pools': [pool_status], + 'loadbalancers': [{'id': pool['loadbalancer_id'], + 'provisioning_status': constants.ACTIVE}]} + if 'admin_state_up' not in pool: + return status + + ovn_lb = self._find_ovn_lb(pool['loadbalancer_id']) + pool_key = self._get_pool_key(pool['id']) + p_key_when_disabled = self._get_pool_key(pool['id'], + is_enabled=False) + + external_ids = copy.deepcopy(ovn_lb.external_ids) + p_key_to_remove = None + p_key_to_add = {} + if pool['admin_state_up']: + if p_key_when_disabled in external_ids: + p_key_to_add[pool_key] = external_ids[p_key_when_disabled] + external_ids[pool_key] = external_ids[p_key_when_disabled] + del external_ids[p_key_when_disabled] + p_key_to_remove = p_key_when_disabled + else: + if pool_key in external_ids: + p_key_to_add[p_key_when_disabled] = external_ids[pool_key] + external_ids[p_key_when_disabled] = external_ids[pool_key] + del external_ids[pool_key] + p_key_to_remove = pool_key + + if p_key_to_remove: + commands = [] + commands.append( + self.ovn_nbdb_api.db_remove( + 'Load_Balancer', ovn_lb.uuid, 'external_ids', + (p_key_to_remove)) + ) + + commands.append( + self.ovn_nbdb_api.db_set( + 'Load_Balancer', ovn_lb.uuid, + ('external_ids', p_key_to_add)) + ) + + commands.extend( + self._refresh_lb_vips(ovn_lb.uuid, external_ids)) + self._execute_commands(commands) + if pool['admin_state_up']: + operating_status = constants.ONLINE + else: + operating_status = constants.OFFLINE + pool_status['operating_status'] = operating_status + + pool_listeners = self._get_pool_listeners(ovn_lb, + pool_key) + listener_status = [] + for l in pool_listeners: + listener_status.append( + {'id': l, + 'provisioning_status': constants.ACTIVE}) + status['listeners'] = listener_status + except Exception: + LOG.exception(EXCEPTION_MSG, "update of pool") + status = { + 'pools': [{"id": pool['id'], + 'provisioning_status': constants.ERROR}], + 'loadbalancers': [{"id": pool['loadbalancer_id'], + 'provisioning_status': constants.ACTIVE}]} + + return status + + def _add_member(self, member, ovn_lb, pool_key): + external_ids = copy.deepcopy(ovn_lb.external_ids) + existing_members = external_ids[pool_key] + member_info = self._get_member_key(member) + if member_info in existing_members: + # Member already present + return + if existing_members: + pool_data = {pool_key: existing_members + "," + member_info} + else: + pool_data = {pool_key: member_info} + + commands = [] + commands.append( + self.ovn_nbdb_api.db_set('Load_Balancer', ovn_lb.uuid, + ('external_ids', pool_data)) + ) + + external_ids[pool_key] = pool_data[pool_key] + commands.extend( + self._refresh_lb_vips(ovn_lb.uuid, external_ids) + ) + commands.extend( + self._update_lb_to_ls_association( + ovn_lb, subnet_id=member['subnet_id'], associate=True) + ) + self._execute_commands(commands) + + def member_create(self, member): + try: + pool_key, ovn_lb = self._find_ovn_lb_by_id( + member['pool_id']) + self._add_member(member, ovn_lb, pool_key) + pool = {"id": member['pool_id'], + "provisioning_status": constants.ACTIVE, + "operating_status": constants.ONLINE} + status = { + 'pools': [pool], + 'members': [{"id": member['id'], + "provisioning_status": constants.ACTIVE}], + 'loadbalancers': [{"id": ovn_lb.name, + "provisioning_status": constants.ACTIVE}]} + pool_listeners = self._get_pool_listeners(ovn_lb, pool_key) + listener_status = [] + for l in pool_listeners: + listener_status.append( + {'id': l, + 'provisioning_status': constants.ACTIVE}) + status['listeners'] = listener_status + except Exception: + LOG.exception(EXCEPTION_MSG, "creation of member") + status = { + 'pools': [{"id": member['pool_id'], + "provisioning_status": constants.ERROR}], + 'members': [{"id": member['id'], + "provisioning_status": constants.ACTIVE}], + 'loadbalancers': [{"id": ovn_lb.name, + "provisioning_status": constants.ACTIVE}]} + + return status + + def _remove_member(self, member, ovn_lb, pool_key): + external_ids = copy.deepcopy(ovn_lb.external_ids) + existing_members = external_ids[pool_key].split(",") + member_info = self._get_member_key(member) + if member_info in existing_members: + commands = [] + existing_members.remove(member_info) + if not existing_members: + pool_status = constants.OFFLINE + else: + pool_status = constants.ONLINE + pool_data = {pool_key: ",".join(existing_members)} + commands.append( + self.ovn_nbdb_api.db_set('Load_Balancer', ovn_lb.uuid, + ('external_ids', pool_data)) + ) + external_ids[pool_key] = ",".join(existing_members) + commands.extend( + self._refresh_lb_vips(ovn_lb.uuid, external_ids) + ) + + commands.extend( + self._update_lb_to_ls_association( + ovn_lb, subnet_id=member['subnet_id'], associate=False) + ) + self._execute_commands(commands) + return pool_status + else: + msg = "Member %s not found in the pool" % member['id'] + raise driver_exceptions.DriverError( + user_fault_string=msg, + operator_fault_string=msg) + + def member_delete(self, member): + try: + pool_key, ovn_lb = self._find_ovn_lb_by_id( + member['pool_id']) + pool_status = self._remove_member(member, ovn_lb, pool_key) + pool = {"id": member['pool_id'], + "provisioning_status": constants.ACTIVE, + "operating_status": pool_status} + status = { + 'pools': [pool], + 'members': [{"id": member['id'], + "provisioning_status": constants.DELETED}], + 'loadbalancers': [{"id": ovn_lb.name, + "provisioning_status": constants.ACTIVE}]} + pool_listeners = self._get_pool_listeners(ovn_lb, pool_key) + listener_status = [] + for l in pool_listeners: + listener_status.append( + {'id': l, + 'provisioning_status': constants.ACTIVE}) + status['listeners'] = listener_status + except Exception: + LOG.exception(EXCEPTION_MSG, "deletion of member") + status = { + 'pools': [{"id": member['pool_id'], + "provisioning_status": constants.ACTIVE}], + 'members': [{"id": member['id'], + "provisioning_status": constants.ERROR}], + 'loadbalancers': [{"id": ovn_lb.name, + "provisioning_status": constants.ACTIVE}]} + + return status + + def _update_member(self, member, ovn_lb, pool_key): + commands = [] + external_ids = copy.deepcopy(ovn_lb.external_ids) + existing_members = external_ids[pool_key].split(",") + member_info = self._get_member_key(member) + for mem in existing_members: + if (member_info.split('_')[1] == mem.split('_')[1] and + mem != member_info): + existing_members.remove(mem) + existing_members.append(member_info) + pool_data = {pool_key: ",".join(existing_members)} + commands.append( + self.ovn_nbdb_api.db_set('Load_Balancer', ovn_lb.uuid, + ('external_ids', pool_data)) + ) + external_ids[pool_key] = ",".join(existing_members) + commands.extend( + self._refresh_lb_vips(ovn_lb.uuid, external_ids) + ) + self._execute_commands(commands) + + def member_update(self, member): + try: + pool_key, ovn_lb = self._find_ovn_lb_by_id( + member['pool_id']) + status = { + 'pools': [{'id': member['pool_id'], + 'provisioning_status': constants.ACTIVE}], + 'members': [{'id': member['id'], + 'provisioning_status': constants.ACTIVE}], + 'loadbalancers': [{'id': ovn_lb.name, + 'provisioning_status': constants.ACTIVE}]} + self._update_member(member, ovn_lb, pool_key) + if 'admin_state_up' in member: + if member['admin_state_up']: + status['members'][0]['operating_status'] = constants.ONLINE + else: + status['members'][0][ + 'operating_status'] = constants.OFFLINE + + pool_listeners = self._get_pool_listeners(ovn_lb, pool_key) + listener_status = [] + for l in pool_listeners: + listener_status.append( + {'id': l, + 'provisioning_status': constants.ACTIVE}) + status['listeners'] = listener_status + except Exception: + LOG.exception(EXCEPTION_MSG, "update of member") + status = { + 'pools': [{'id': member['pool_id'], + 'provisioning_status': constants.ACTIVE}], + 'members': [{'id': member['id'], + 'provisioning_status': constants.ERROR}], + 'loadbalancers': [{'id': ovn_lb.name, + 'provisioning_status': constants.ACTIVE}]} + return status + + def _get_existing_pool_members(self, pool_id): + pool_key, ovn_lb = self._find_ovn_lb_by_id(pool_id) + if not ovn_lb: + msg = _("Loadbalancer with pool %s does not exist") % pool_key + raise driver_exceptions.DriverError(msg) + external_ids = dict(ovn_lb.external_ids) + return external_ids[pool_key] + + def get_pool_member_id(self, pool_id, mem_addr_port=None): + '''Gets Member information + + :param pool_id: ID of the Pool whose member information is reqd. + :param mem_addr_port: Combination of Member Address+Port. Default=None + :returns: UUID -- ID of the Member if member exists in pool. + :returns: None -- if no member exists in the pool + :raises: Exception if Loadbalancer is not found for a Pool ID + ''' + existing_members = self._get_existing_pool_members(pool_id) + # Members are saved in OVN in the form of + # member1_UUID_IP:Port, member2_UUID_IP:Port + # Match the IP:Port for all members with the mem_addr_port + # information and return the UUID. + for meminf in existing_members.split(','): + if mem_addr_port == meminf.split('_')[2]: + return meminf.split('_')[1] + + def get_member_info(self, pool_id): + '''Gets Member information + + :param pool_id: ID of the Pool whose member information is reqd. + :param mem_addr_port: Combination of Member Address+Port. Default=None + :returns: List -- List of Member Address+Pool of all members in pool. + :returns:[None] -- if no member exists in the pool. + :raises: Exception if Loadbalancer is not found for a Pool ID + ''' + existing_members = self._get_existing_pool_members(pool_id) + # Members are saved in OVN in the form of + # member1_UUID_IP:Port, member2_UUID_IP:Port + # Return the list of (UUID,IP:Port) for all members. + return [(meminf.split('_')[1], meminf.split( + '_')[2]) for meminf in existing_members.split(',')] + + def create_vip_port(self, project_id, lb_id, vip_d): + port = {'port': {'name': ovn_const.LB_VIP_PORT_PREFIX + str(lb_id), + 'network_id': vip_d['vip_network_id'], + 'fixed_ips': [{'subnet_id': vip_d['vip_subnet_id']}], + 'admin_state_up': True, + 'project_id': project_id}} + try: + port['port']['fixed_ips'][0]['ip_address'] = vip_d['vip_address'] + except KeyError: + pass + network_driver = get_network_driver() + return network_driver.neutron_client.create_port(port) + + def delete_vip_port(self, port_id): + network_driver = get_network_driver() + network_driver.neutron_client.delete_port(port_id) + + def handle_vip_fip(self, fip_info): + try: + ovn_lb = self._find_ovn_lb(fip_info['lb_id']) + except idlutils.RowNotFound: + LOG.debug("Loadbalancer %s not found!", fip_info['lb_id']) + return + external_ids = copy.deepcopy(ovn_lb.external_ids) + commands = [] + + if fip_info['action'] == REQ_INFO_ACTION_ASSOCIATE: + external_ids[LB_EXT_IDS_VIP_FIP_KEY] = fip_info['vip_fip'] + vip_fip_info = {LB_EXT_IDS_VIP_FIP_KEY: fip_info['vip_fip']} + commands.append( + self.ovn_nbdb_api.db_set('Load_Balancer', ovn_lb.uuid, + ('external_ids', vip_fip_info)) + ) + else: + external_ids.pop(LB_EXT_IDS_VIP_FIP_KEY) + commands.append( + self.ovn_nbdb_api.db_remove( + 'Load_Balancer', ovn_lb.uuid, 'external_ids', + (LB_EXT_IDS_VIP_FIP_KEY)) + ) + + commands.extend( + self._refresh_lb_vips(ovn_lb.uuid, external_ids) + ) + self._execute_commands(commands) + + +class OvnProviderDriver(driver_base.ProviderDriver): + def __init__(self): + super(OvnProviderDriver, self).__init__() + self._ovn_helper = OvnProviderHelper() + + def _check_for_supported_protocols(self, protocol): + if protocol not in OVN_NATIVE_LB_PROTOCOLS: + msg = _('OVN provider does not support %s protocol') % protocol + raise driver_exceptions.UnsupportedOptionError( + user_fault_string=msg, + operator_fault_string=msg) + + def _check_for_supported_algorithms(self, algorithm): + if algorithm not in OVN_NATIVE_LB_ALGORITHMS: + msg = _('OVN provider does not support %s algorithm') % algorithm + raise driver_exceptions.UnsupportedOptionError( + user_fault_string=msg, + operator_fault_string=msg) + + def loadbalancer_create(self, loadbalancer): + admin_state_up = loadbalancer.admin_state_up + if isinstance(admin_state_up, o_datamodels.UnsetType): + admin_state_up = True + request_info = {'id': loadbalancer.loadbalancer_id, + 'vip_address': loadbalancer.vip_address, + 'vip_network_id': loadbalancer.vip_network_id, + 'admin_state_up': admin_state_up} + + request = {'type': REQ_TYPE_LB_CREATE, + 'info': request_info} + self._ovn_helper.add_request(request) + + def loadbalancer_delete(self, loadbalancer, cascade=False): + request_info = {'id': loadbalancer.loadbalancer_id, + 'cascade': cascade} + request = {'type': REQ_TYPE_LB_DELETE, + 'info': request_info} + self._ovn_helper.add_request(request) + + def loadbalancer_failover(self, loadbalancer_id): + request_info = {'id': loadbalancer_id} + request = {'type': REQ_TYPE_LB_FAILOVER, + 'info': request_info} + self._ovn_helper.add_request(request) + + def loadbalancer_update(self, old_loadbalancer, new_loadbalncer): + request_info = {'id': new_loadbalncer.loadbalancer_id} + if not isinstance( + new_loadbalncer.admin_state_up, o_datamodels.UnsetType): + request_info['admin_state_up'] = new_loadbalncer.admin_state_up + request = {'type': REQ_TYPE_LB_UPDATE, + 'info': request_info} + self._ovn_helper.add_request(request) + + # Pool + def pool_create(self, pool): + self._check_for_supported_protocols(pool.protocol) + self._check_for_supported_algorithms(pool.lb_algorithm) + admin_state_up = pool.admin_state_up + if isinstance(admin_state_up, o_datamodels.UnsetType): + admin_state_up = True + request_info = {'id': pool.pool_id, + 'loadbalancer_id': pool.loadbalancer_id, + 'listener_id': pool.listener_id, + 'admin_state_up': admin_state_up} + request = {'type': REQ_TYPE_POOL_CREATE, + 'info': request_info} + self._ovn_helper.add_request(request) + + def pool_delete(self, pool): + for member in pool.members: + self.member_delete(member) + + request_info = {'id': pool.pool_id, + 'loadbalancer_id': pool.loadbalancer_id} + request = {'type': REQ_TYPE_POOL_DELETE, + 'info': request_info} + self._ovn_helper.add_request(request) + + def pool_update(self, old_pool, new_pool): + if not isinstance(new_pool.protocol, o_datamodels.UnsetType): + self._check_for_supported_protocols(new_pool.protocol) + if not isinstance(new_pool.lb_algorithm, o_datamodels.UnsetType): + self._check_for_supported_algorithms(new_pool.lb_algorithm) + request_info = {'id': new_pool.pool_id, + 'loadbalancer_id': old_pool.loadbalancer_id} + + if not isinstance(new_pool.admin_state_up, o_datamodels.UnsetType): + request_info['admin_state_up'] = new_pool.admin_state_up + request = {'type': REQ_TYPE_POOL_UPDATE, + 'info': request_info} + self._ovn_helper.add_request(request) + + # Listener + def _check_listener_protocol(self, listener): + self._check_for_supported_protocols(listener.protocol) + if not self._ovn_helper.check_lb_protocol( + listener.loadbalancer_id, listener.protocol): + msg = (_('The loadbalancer %(lb)s does not support %(proto)s ' + 'protocol') % { + 'lb': listener.loadbalancer_id, + 'proto': listener.protocol}) + raise driver_exceptions.UnsupportedOptionError( + user_fault_string=msg, + operator_fault_string=msg) + + def listener_create(self, listener): + self._check_listener_protocol(listener) + admin_state_up = listener.admin_state_up + if isinstance(admin_state_up, o_datamodels.UnsetType): + admin_state_up = True + request_info = {'id': listener.listener_id, + 'protocol': listener.protocol, + 'loadbalancer_id': listener.loadbalancer_id, + 'protocol_port': listener.protocol_port, + 'default_pool_id': listener.default_pool_id, + 'admin_state_up': admin_state_up} + request = {'type': REQ_TYPE_LISTENER_CREATE, + 'info': request_info} + self._ovn_helper.add_request(request) + + def listener_delete(self, listener): + request_info = {'id': listener.listener_id, + 'loadbalancer_id': listener.loadbalancer_id, + 'protocol_port': listener.protocol_port} + request = {'type': REQ_TYPE_LISTENER_DELETE, + 'info': request_info} + self._ovn_helper.add_request(request) + + def listener_update(self, old_listener, new_listener): + if not isinstance(new_listener.protocol, o_datamodels.UnsetType): + self._check_listener_protocol(new_listener) + request_info = {'id': new_listener.listener_id, + 'loadbalancer_id': old_listener.loadbalancer_id, + 'protocol_port': old_listener.protocol_port} + + if not isinstance(new_listener.admin_state_up, o_datamodels.UnsetType): + request_info['admin_state_up'] = new_listener.admin_state_up + + if not isinstance(new_listener.default_pool_id, + o_datamodels.UnsetType): + request_info['default_pool_id'] = new_listener.default_pool_id + + request = {'type': REQ_TYPE_LISTENER_UPDATE, + 'info': request_info} + self._ovn_helper.add_request(request) + + # Member + def _check_monitor_options(self, member): + if (isinstance(member.monitor_address, o_datamodels.UnsetType) and + isinstance(member.monitor_port, o_datamodels.UnsetType)): + return False + if member.monitor_address or member.monitor_port: + return True + return False + + def _ip_version_differs(self, member): + _, ovn_lb = self._ovn_helper._find_ovn_lb_by_id(member.pool_id) + lb_vip = ovn_lb.external_ids[LB_EXT_IDS_VIP_KEY] + return netaddr.IPNetwork(lb_vip).version != ( + netaddr.IPNetwork(member.address).version) + + def member_create(self, member): + if self._check_monitor_options(member): + msg = _('OVN provider does not support monitor options') + raise driver_exceptions.UnsupportedOptionError( + user_fault_string=msg, + operator_fault_string=msg) + if self._ip_version_differs(member): + raise IPVersionsMixingNotSupportedError() + admin_state_up = member.admin_state_up + if isinstance(member.subnet_id, o_datamodels.UnsetType): + msg = _('Subnet is required for Member creation' + ' with OVN Provider Driver') + raise driver_exceptions.UnsupportedOptionError( + user_fault_string=msg, + operator_fault_string=msg) + + if isinstance(admin_state_up, o_datamodels.UnsetType): + admin_state_up = True + request_info = {'id': member.member_id, + 'address': member.address, + 'protocol_port': member.protocol_port, + 'pool_id': member.pool_id, + 'subnet_id': member.subnet_id, + 'admin_state_up': admin_state_up} + request = {'type': REQ_TYPE_MEMBER_CREATE, + 'info': request_info} + self._ovn_helper.add_request(request) + + def member_delete(self, member): + request_info = {'id': member.member_id, + 'address': member.address, + 'protocol_port': member.protocol_port, + 'pool_id': member.pool_id, + 'subnet_id': member.subnet_id} + request = {'type': REQ_TYPE_MEMBER_DELETE, + 'info': request_info} + self._ovn_helper.add_request(request) + + def member_update(self, old_member, new_member): + if self._check_monitor_options(new_member): + msg = _('OVN provider does not support monitor options') + raise driver_exceptions.UnsupportedOptionError( + user_fault_string=msg, + operator_fault_string=msg) + if new_member.address and self._ip_version_differs(new_member): + raise IPVersionsMixingNotSupportedError() + request_info = {'id': new_member.member_id, + 'address': old_member.address, + 'protocol_port': old_member.protocol_port, + 'pool_id': old_member.pool_id, + 'subnet_id': old_member.subnet_id} + if not isinstance(new_member.admin_state_up, o_datamodels.UnsetType): + request_info['admin_state_up'] = new_member.admin_state_up + request = {'type': REQ_TYPE_MEMBER_UPDATE, + 'info': request_info} + self._ovn_helper.add_request(request) + + def member_batch_update(self, members): + # Note(rbanerje): all members belong to the same pool. + request_list = [] + skipped_members = [] + pool_id = None + try: + pool_id = members[0].pool_id + except IndexError: + msg = (_('No member information has been passed')) + raise driver_exceptions.UnsupportedOptionError( + user_fault_string=msg, + operator_fault_string=msg) + except AttributeError: + msg = (_('Member does not have proper pool information')) + raise driver_exceptions.UnsupportedOptionError( + user_fault_string=msg, + operator_fault_string=msg) + current_members = self._ovn_helper.get_member_info(pool_id) + # current_members gets a list of tuples (ID, IP:Port) for pool members + for member in members: + if (self._check_monitor_options(member) or + member.address and self._ip_version_differs(member)): + skipped_members.append(member.member_id) + continue + admin_state_up = member.admin_state_up + if isinstance(admin_state_up, o_datamodels.UnsetType): + admin_state_up = True + mem_addr_port = str(member.address) + ':' + str( + member.protocol_port) + if (member.member_id, mem_addr_port) not in current_members: + req_type = REQ_TYPE_MEMBER_CREATE + else: + # If member exists in pool, then Update + req_type = REQ_TYPE_MEMBER_UPDATE + current_members.remove((member.member_id, mem_addr_port)) + # Remove all updating members so only deleted ones are left + request_info = {'id': member.member_id, + 'address': member.address, + 'protocol_port': member.protocol_port, + 'pool_id': member.pool_id, + 'subnet_id': member.subnet_id, + 'admin_state_up': admin_state_up} + request = {'type': req_type, + 'info': request_info} + request_list.append(request) + for cmember in current_members: + request_info = {'id': cmember[0], + 'address': cmember[1].split(':')[0], + 'protocol_port': cmember[1].split(':')[1], + 'pool_id': pool_id} + request = {'type': REQ_TYPE_MEMBER_DELETE, + 'info': request_info} + request_list.append(request) + for request in request_list: + self._ovn_helper.add_request(request) + if skipped_members: + msg = (_('OVN provider does not support monitor options, ' + 'so following members skipped: %s') % skipped_members) + raise driver_exceptions.UnsupportedOptionError( + user_fault_string=msg, + operator_fault_string=msg) + + def create_vip_port(self, lb_id, project_id, vip_dict): + try: + port = self._ovn_helper.create_vip_port( + project_id, lb_id, vip_dict)['port'] + vip_dict['vip_port_id'] = port['id'] + vip_dict['vip_address'] = port['fixed_ips'][0]['ip_address'] + except Exception as e: + raise driver_exceptions.DriverError(e) + return vip_dict diff --git a/ovn_octavia_provider/i18n.py b/ovn_octavia_provider/i18n.py new file mode 100644 index 00000000..54e80c33 --- /dev/null +++ b/ovn_octavia_provider/i18n.py @@ -0,0 +1,20 @@ +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import oslo_i18n as i18n + +_translators = i18n.TranslatorFactory(domain='octavia') + +# The primary translation function using the well-known name "_" +_ = _translators.primary diff --git a/ovn_octavia_provider/ovsdb/impl_idl_ovn.py b/ovn_octavia_provider/ovsdb/impl_idl_ovn.py new file mode 100644 index 00000000..785288ee --- /dev/null +++ b/ovn_octavia_provider/ovsdb/impl_idl_ovn.py @@ -0,0 +1,138 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import contextlib + +from neutron_lib import exceptions as n_exc +from oslo_log import log +from ovsdbapp.backend import ovs_idl +from ovsdbapp.backend.ovs_idl import idlutils +from ovsdbapp.backend.ovs_idl import transaction as idl_trans +from ovsdbapp.schema.ovn_northbound import impl_idl as nb_impl_idl +import tenacity + +from ovn_octavia_provider.common import config +from ovn_octavia_provider.common import exceptions as ovn_exc +from ovn_octavia_provider.i18n import _ + + +LOG = log.getLogger(__name__) + + +class OvnNbTransaction(idl_trans.Transaction): + + def __init__(self, *args, **kwargs): + # NOTE(lucasagomes): The bump_nb_cfg parameter is only used by + # the agents health status check + self.bump_nb_cfg = kwargs.pop('bump_nb_cfg', False) + super(OvnNbTransaction, self).__init__(*args, **kwargs) + + def pre_commit(self, txn): + if not self.bump_nb_cfg: + return + self.api.nb_global.increment('nb_cfg') + + +# This version of Backend doesn't use a class variable for ovsdb_connection +# and therefor allows networking-ovn to manage connection scope on its own +class Backend(ovs_idl.Backend): + lookup_table = {} + + def __init__(self, connection): + self.ovsdb_connection = connection + super(Backend, self).__init__(connection) + + def start_connection(self, connection): + try: + self.ovsdb_connection.start() + except Exception as e: + connection_exception = OvsdbConnectionUnavailable( + db_schema=self.schema, error=e) + LOG.exception(connection_exception) + raise connection_exception + + @property + def idl(self): + return self.ovsdb_connection.idl + + @property + def tables(self): + return self.idl.tables + + _tables = tables + + def is_table_present(self, table_name): + return table_name in self._tables + + def is_col_present(self, table_name, col_name): + return self.is_table_present(table_name) and ( + col_name in self._tables[table_name].columns) + + def create_transaction(self, check_error=False, log_errors=True): + return idl_trans.Transaction( + self, self.ovsdb_connection, self.ovsdb_connection.timeout, + check_error, log_errors) + + # Check for a column match in the table. If not found do a retry with + # a stop delay of 10 secs. This function would be useful if the caller + # wants to verify for the presence of a particular row in the table + # with the column match before doing any transaction. + # Eg. We can check if Logical_Switch row is present before adding a + # logical switch port to it. + @tenacity.retry(retry=tenacity.retry_if_exception_type(RuntimeError), + wait=tenacity.wait_exponential(), + stop=tenacity.stop_after_delay(10), + reraise=True) + def check_for_row_by_value_and_retry(self, table, column, match): + try: + idlutils.row_by_value(self.idl, table, column, match) + except idlutils.RowNotFound: + msg = (_("%(match)s does not exist in %(column)s of %(table)s") + % {'match': match, 'column': column, 'table': table}) + raise RuntimeError(msg) + + +class OvsdbConnectionUnavailable(n_exc.ServiceUnavailable): + message = _("OVS database connection to %(db_schema)s failed with error: " + "'%(error)s'. Verify that the OVS and OVN services are " + "available and that the 'ovn_nb_connection' and " + "'ovn_sb_connection' configuration options are correct.") + + +class OvsdbNbOvnIdl(nb_impl_idl.OvnNbApiIdlImpl, Backend): + def __init__(self, connection): + super(OvsdbNbOvnIdl, self).__init__(connection) + self.idl._session.reconnect.set_probe_interval( + config.get_ovn_ovsdb_probe_interval()) + + @property + def nb_global(self): + return next(iter(self.tables['NB_Global'].rows.values())) + + def create_transaction(self, check_error=False, log_errors=True, + bump_nb_cfg=False): + return OvnNbTransaction( + self, self.ovsdb_connection, self.ovsdb_connection.timeout, + check_error, log_errors, bump_nb_cfg=bump_nb_cfg) + + @contextlib.contextmanager + def transaction(self, *args, **kwargs): + """A wrapper on the ovsdbapp transaction to work with revisions. + + This method is just a wrapper around the ovsdbapp transaction + to handle revision conflicts correctly. + """ + try: + with super(OvsdbNbOvnIdl, self).transaction(*args, **kwargs) as t: + yield t + except ovn_exc.RevisionConflict as e: + LOG.info('Transaction aborted. Reason: %s', e) diff --git a/ovn_octavia_provider/ovsdb/ovsdb_monitor.py b/ovn_octavia_provider/ovsdb/ovsdb_monitor.py new file mode 100644 index 00000000..f72f120d --- /dev/null +++ b/ovn_octavia_provider/ovsdb/ovsdb_monitor.py @@ -0,0 +1,90 @@ +# Copyright 2020 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import abc + +from oslo_config import cfg +from oslo_log import log +from ovs.stream import Stream +from ovsdbapp.backend.ovs_idl import connection +from ovsdbapp.backend.ovs_idl import idlutils +from ovsdbapp import event + +from ovn_octavia_provider.common import config as ovn_config + +CONF = cfg.CONF +LOG = log.getLogger(__name__) + + +class BaseOvnIdl(connection.OvsdbIdl): + @classmethod + def from_server(cls, connection_string, schema_name): + _check_and_set_ssl_files(schema_name) + helper = idlutils.get_schema_helper(connection_string, schema_name) + helper.register_all() + return cls(connection_string, helper) + + +class OvnIdl(BaseOvnIdl): + + def __init__(self, driver, remote, schema): + super(OvnIdl, self).__init__(remote, schema) + self.driver = driver + self.notify_handler = OvnDbNotifyHandler(driver) + # ovsdb lock name to acquire. + # This event lock is used to handle the notify events sent by idl.Idl + # idl.Idl will call notify function for the "update" rpc method it + # receives from the ovsdb-server. + # This event lock is required for the following reasons + # - If there are multiple neutron servers running, OvnWorkers of + # these neutron servers would receive the notify events from + # idl.Idl + # + # - we do not want all the neutron servers to handle these events + # + # - only the neutron server which has the lock will handle the + # notify events. + # + # - In case the neutron server which owns this lock goes down, + # ovsdb server would assign the lock to one of the other neutron + # servers. + self.event_lock_name = "ovn_provider_driver_event_lock" + + def notify(self, event, row, updates=None): + # Do not handle the notification if the event lock is requested, + # but not granted by the ovsdb-server. + if self.is_lock_contended: + return + self.notify_handler.notify(event, row, updates) + + @abc.abstractmethod + def post_connect(self): + """Should be called after the idl has been initialized""" + + +class OvnDbNotifyHandler(event.RowEventHandler): + def __init__(self, driver): + super(OvnDbNotifyHandler, self).__init__() + self.driver = driver + + +def _check_and_set_ssl_files(schema_name): + if schema_name == 'OVN_Northbound': + priv_key_file = ovn_config.get_ovn_nb_private_key() + cert_file = ovn_config.get_ovn_nb_certificate() + ca_cert_file = ovn_config.get_ovn_nb_ca_cert() + + Stream.ssl_set_private_key_file(priv_key_file) + Stream.ssl_set_certificate_file(cert_file) + Stream.ssl_set_ca_cert_file(ca_cert_file) diff --git a/ovn_octavia_provider/tests/unit/fakes.py b/ovn_octavia_provider/tests/unit/fakes.py new file mode 100644 index 00000000..a7e210c0 --- /dev/null +++ b/ovn_octavia_provider/tests/unit/fakes.py @@ -0,0 +1,222 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +import copy + +import mock +from oslo_utils import uuidutils + + +class FakeResource(dict): + + def __init__(self, manager=None, info=None, loaded=False, methods=None): + """Set attributes and methods for a resource. + + :param manager: + The resource manager + :param Dictionary info: + A dictionary with all attributes + :param bool loaded: + True if the resource is loaded in memory + :param Dictionary methods: + A dictionary with all methods + """ + info = info or {} + super(FakeResource, self).__init__(info) + methods = methods or {} + + self.__name__ = type(self).__name__ + self.manager = manager + self._info = info + self._add_details(info) + self._add_methods(methods) + self._loaded = loaded + # Add a revision number by default + setattr(self, 'revision_number', 1) + + @property + def db_obj(self): + return self + + def _add_details(self, info): + for (k, v) in info.items(): + setattr(self, k, v) + + def _add_methods(self, methods): + """Fake methods with MagicMock objects. + + For each <@key, @value> pairs in methods, add an callable MagicMock + object named @key as an attribute, and set the mock's return_value to + @value. When users access the attribute with (), @value will be + returned, which looks like a function call. + """ + for (name, ret) in methods.items(): + method = mock.MagicMock(return_value=ret) + setattr(self, name, method) + + def __repr__(self): + reprkeys = sorted(k for k in self.__dict__.keys() if k[0] != '_' and + k != 'manager') + info = ", ".join("%s=%s" % (k, getattr(self, k)) for k in reprkeys) + return "<%s %s>" % (self.__class__.__name__, info) + + def keys(self): + return self._info.keys() + + def info(self): + return self._info + + def update(self, info): + super(FakeResource, self).update(info) + self._add_details(info) + + +class FakeOvsdbRow(FakeResource): + """Fake one or more OVSDB rows.""" + + @staticmethod + def create_one_ovsdb_row(attrs=None, methods=None): + """Create a fake OVSDB row. + + :param Dictionary attrs: + A dictionary with all attributes + :param Dictionary methods: + A dictionary with all methods + :return: + A FakeResource object faking the OVSDB row + """ + attrs = attrs or {} + methods = methods or {} + + # Set default attributes. + fake_uuid = uuidutils.generate_uuid() + ovsdb_row_attrs = { + 'uuid': fake_uuid, + 'name': 'name-' + fake_uuid, + 'external_ids': {}, + } + + # Set default methods. + ovsdb_row_methods = { + 'addvalue': None, + 'delete': None, + 'delvalue': None, + 'verify': None, + 'setkey': None, + } + + # Overwrite default attributes and methods. + ovsdb_row_attrs.update(attrs) + ovsdb_row_methods.update(methods) + + return FakeResource(info=copy.deepcopy(ovsdb_row_attrs), + loaded=True, + methods=copy.deepcopy(ovsdb_row_methods)) + + +class FakeSubnet(object): + """Fake one or more subnets.""" + + @staticmethod + def create_one_subnet(attrs=None): + """Create a fake subnet. + + :param Dictionary attrs: + A dictionary with all attributes + :return: + A FakeResource object faking the subnet + """ + attrs = attrs or {} + + # Set default attributes. + fake_uuid = uuidutils.generate_uuid() + subnet_attrs = { + 'id': 'subnet-id-' + fake_uuid, + 'name': 'subnet-name-' + fake_uuid, + 'network_id': 'network-id-' + fake_uuid, + 'cidr': '10.10.10.0/24', + 'tenant_id': 'project-id-' + fake_uuid, + 'enable_dhcp': True, + 'dns_nameservers': [], + 'allocation_pools': [], + 'host_routes': [], + 'ip_version': 4, + 'gateway_ip': '10.10.10.1', + 'ipv6_address_mode': 'None', + 'ipv6_ra_mode': 'None', + 'subnetpool_id': None, + } + + # Overwrite default attributes. + subnet_attrs.update(attrs) + + return FakeResource(info=copy.deepcopy(subnet_attrs), + loaded=True) + + +class FakeOVNPort(object): + """Fake one or more ports.""" + + @staticmethod + def create_one_port(attrs=None): + """Create a fake ovn port. + + :param Dictionary attrs: + A dictionary with all attributes + :return: + A FakeResource object faking the port + """ + attrs = attrs or {} + + # Set default attributes. + fake_uuid = uuidutils.generate_uuid() + port_attrs = { + 'addresses': [], + 'dhcpv4_options': '', + 'dhcpv6_options': [], + 'enabled': True, + 'external_ids': {}, + 'name': fake_uuid, + 'options': {}, + 'parent_name': [], + 'port_security': [], + 'tag': [], + 'tag_request': [], + 'type': '', + 'up': False, + } + + # Overwrite default attributes. + port_attrs.update(attrs) + return type('Logical_Switch_Port', (object, ), port_attrs) + + +class FakeOVNRouter(object): + + @staticmethod + def create_one_router(attrs=None): + router_attrs = { + 'enabled': False, + 'external_ids': {}, + 'load_balancer': [], + 'name': '', + 'nat': [], + 'options': {}, + 'ports': [], + 'static_routes': [], + } + + # Overwrite default attributes. + router_attrs.update(attrs) + return type('Logical_Router', (object, ), router_attrs) diff --git a/ovn_octavia_provider/tests/unit/schemas/ovn-nb.ovsschema b/ovn_octavia_provider/tests/unit/schemas/ovn-nb.ovsschema new file mode 100644 index 00000000..2c87cbba --- /dev/null +++ b/ovn_octavia_provider/tests/unit/schemas/ovn-nb.ovsschema @@ -0,0 +1,449 @@ +{ + "name": "OVN_Northbound", + "version": "5.16.0", + "cksum": "923459061 23095", + "tables": { + "NB_Global": { + "columns": { + "nb_cfg": {"type": {"key": "integer"}}, + "sb_cfg": {"type": {"key": "integer"}}, + "hv_cfg": {"type": {"key": "integer"}}, + "external_ids": { + "type": {"key": "string", "value": "string", + "min": 0, "max": "unlimited"}}, + "connections": { + "type": {"key": {"type": "uuid", + "refTable": "Connection"}, + "min": 0, + "max": "unlimited"}}, + "ssl": { + "type": {"key": {"type": "uuid", + "refTable": "SSL"}, + "min": 0, "max": 1}}, + "options": { + "type": {"key": "string", "value": "string", + "min": 0, "max": "unlimited"}}, + "ipsec": {"type": "boolean"}}, + "maxRows": 1, + "isRoot": true}, + "Logical_Switch": { + "columns": { + "name": {"type": "string"}, + "ports": {"type": {"key": {"type": "uuid", + "refTable": "Logical_Switch_Port", + "refType": "strong"}, + "min": 0, + "max": "unlimited"}}, + "acls": {"type": {"key": {"type": "uuid", + "refTable": "ACL", + "refType": "strong"}, + "min": 0, + "max": "unlimited"}}, + "qos_rules": {"type": {"key": {"type": "uuid", + "refTable": "QoS", + "refType": "strong"}, + "min": 0, + "max": "unlimited"}}, + "load_balancer": {"type": {"key": {"type": "uuid", + "refTable": "Load_Balancer", + "refType": "weak"}, + "min": 0, + "max": "unlimited"}}, + "dns_records": {"type": {"key": {"type": "uuid", + "refTable": "DNS", + "refType": "weak"}, + "min": 0, + "max": "unlimited"}}, + "other_config": { + "type": {"key": "string", "value": "string", + "min": 0, "max": "unlimited"}}, + "external_ids": { + "type": {"key": "string", "value": "string", + "min": 0, "max": "unlimited"}}}, + "isRoot": true}, + "Logical_Switch_Port": { + "columns": { + "name": {"type": "string"}, + "type": {"type": "string"}, + "options": { + "type": {"key": "string", + "value": "string", + "min": 0, + "max": "unlimited"}}, + "parent_name": {"type": {"key": "string", "min": 0, "max": 1}}, + "tag_request": { + "type": {"key": {"type": "integer", + "minInteger": 0, + "maxInteger": 4095}, + "min": 0, "max": 1}}, + "tag": { + "type": {"key": {"type": "integer", + "minInteger": 1, + "maxInteger": 4095}, + "min": 0, "max": 1}}, + "addresses": {"type": {"key": "string", + "min": 0, + "max": "unlimited"}}, + "dynamic_addresses": {"type": {"key": "string", + "min": 0, + "max": 1}}, + "port_security": {"type": {"key": "string", + "min": 0, + "max": "unlimited"}}, + "up": {"type": {"key": "boolean", "min": 0, "max": 1}}, + "enabled": {"type": {"key": "boolean", "min": 0, "max": 1}}, + "dhcpv4_options": {"type": {"key": {"type": "uuid", + "refTable": "DHCP_Options", + "refType": "weak"}, + "min": 0, + "max": 1}}, + "dhcpv6_options": {"type": {"key": {"type": "uuid", + "refTable": "DHCP_Options", + "refType": "weak"}, + "min": 0, + "max": 1}}, + "ha_chassis_group": { + "type": {"key": {"type": "uuid", + "refTable": "HA_Chassis_Group", + "refType": "strong"}, + "min": 0, + "max": 1}}, + "external_ids": { + "type": {"key": "string", "value": "string", + "min": 0, "max": "unlimited"}}}, + "indexes": [["name"]], + "isRoot": false}, + "Address_Set": { + "columns": { + "name": {"type": "string"}, + "addresses": {"type": {"key": "string", + "min": 0, + "max": "unlimited"}}, + "external_ids": { + "type": {"key": "string", "value": "string", + "min": 0, "max": "unlimited"}}}, + "indexes": [["name"]], + "isRoot": true}, + "Port_Group": { + "columns": { + "name": {"type": "string"}, + "ports": {"type": {"key": {"type": "uuid", + "refTable": "Logical_Switch_Port", + "refType": "weak"}, + "min": 0, + "max": "unlimited"}}, + "acls": {"type": {"key": {"type": "uuid", + "refTable": "ACL", + "refType": "strong"}, + "min": 0, + "max": "unlimited"}}, + "external_ids": { + "type": {"key": "string", "value": "string", + "min": 0, "max": "unlimited"}}}, + "indexes": [["name"]], + "isRoot": true}, + "Load_Balancer": { + "columns": { + "name": {"type": "string"}, + "vips": { + "type": {"key": "string", "value": "string", + "min": 0, "max": "unlimited"}}, + "protocol": { + "type": {"key": {"type": "string", + "enum": ["set", ["tcp", "udp"]]}, + "min": 0, "max": 1}}, + "external_ids": { + "type": {"key": "string", "value": "string", + "min": 0, "max": "unlimited"}}}, + "isRoot": true}, + "ACL": { + "columns": { + "name": {"type": {"key": {"type": "string", + "maxLength": 63}, + "min": 0, "max": 1}}, + "priority": {"type": {"key": {"type": "integer", + "minInteger": 0, + "maxInteger": 32767}}}, + "direction": {"type": {"key": {"type": "string", + "enum": ["set", ["from-lport", "to-lport"]]}}}, + "match": {"type": "string"}, + "action": {"type": {"key": {"type": "string", + "enum": ["set", ["allow", "allow-related", "drop", "reject"]]}}}, + "log": {"type": "boolean"}, + "severity": {"type": {"key": {"type": "string", + "enum": ["set", + ["alert", "warning", + "notice", "info", + "debug"]]}, + "min": 0, "max": 1}}, + "meter": {"type": {"key": "string", "min": 0, "max": 1}}, + "external_ids": { + "type": {"key": "string", "value": "string", + "min": 0, "max": "unlimited"}}}, + "isRoot": false}, + "QoS": { + "columns": { + "priority": {"type": {"key": {"type": "integer", + "minInteger": 0, + "maxInteger": 32767}}}, + "direction": {"type": {"key": {"type": "string", + "enum": ["set", ["from-lport", "to-lport"]]}}}, + "match": {"type": "string"}, + "action": {"type": {"key": {"type": "string", + "enum": ["set", ["dscp"]]}, + "value": {"type": "integer", + "minInteger": 0, + "maxInteger": 63}, + "min": 0, "max": "unlimited"}}, + "bandwidth": {"type": {"key": {"type": "string", + "enum": ["set", ["rate", + "burst"]]}, + "value": {"type": "integer", + "minInteger": 1, + "maxInteger": 4294967295}, + "min": 0, "max": "unlimited"}}, + "external_ids": { + "type": {"key": "string", "value": "string", + "min": 0, "max": "unlimited"}}}, + "isRoot": false}, + "Meter": { + "columns": { + "name": {"type": "string"}, + "unit": {"type": {"key": {"type": "string", + "enum": ["set", ["kbps", "pktps"]]}}}, + "bands": {"type": {"key": {"type": "uuid", + "refTable": "Meter_Band", + "refType": "strong"}, + "min": 1, + "max": "unlimited"}}, + "external_ids": { + "type": {"key": "string", "value": "string", + "min": 0, "max": "unlimited"}}}, + "indexes": [["name"]], + "isRoot": true}, + "Meter_Band": { + "columns": { + "action": {"type": {"key": {"type": "string", + "enum": ["set", ["drop"]]}}}, + "rate": {"type": {"key": {"type": "integer", + "minInteger": 1, + "maxInteger": 4294967295}}}, + "burst_size": {"type": {"key": {"type": "integer", + "minInteger": 0, + "maxInteger": 4294967295}}}, + "external_ids": { + "type": {"key": "string", "value": "string", + "min": 0, "max": "unlimited"}}}, + "isRoot": false}, + "Logical_Router": { + "columns": { + "name": {"type": "string"}, + "ports": {"type": {"key": {"type": "uuid", + "refTable": "Logical_Router_Port", + "refType": "strong"}, + "min": 0, + "max": "unlimited"}}, + "static_routes": {"type": {"key": {"type": "uuid", + "refTable": "Logical_Router_Static_Route", + "refType": "strong"}, + "min": 0, + "max": "unlimited"}}, + "policies": { + "type": {"key": {"type": "uuid", + "refTable": "Logical_Router_Policy", + "refType": "strong"}, + "min": 0, + "max": "unlimited"}}, + "enabled": {"type": {"key": "boolean", "min": 0, "max": 1}}, + "nat": {"type": {"key": {"type": "uuid", + "refTable": "NAT", + "refType": "strong"}, + "min": 0, + "max": "unlimited"}}, + "load_balancer": {"type": {"key": {"type": "uuid", + "refTable": "Load_Balancer", + "refType": "weak"}, + "min": 0, + "max": "unlimited"}}, + "options": { + "type": {"key": "string", + "value": "string", + "min": 0, + "max": "unlimited"}}, + "external_ids": { + "type": {"key": "string", "value": "string", + "min": 0, "max": "unlimited"}}}, + "isRoot": true}, + "Logical_Router_Port": { + "columns": { + "name": {"type": "string"}, + "gateway_chassis": { + "type": {"key": {"type": "uuid", + "refTable": "Gateway_Chassis", + "refType": "strong"}, + "min": 0, + "max": "unlimited"}}, + "ha_chassis_group": { + "type": {"key": {"type": "uuid", + "refTable": "HA_Chassis_Group", + "refType": "strong"}, + "min": 0, + "max": 1}}, + "options": { + "type": {"key": "string", + "value": "string", + "min": 0, + "max": "unlimited"}}, + "networks": {"type": {"key": "string", + "min": 1, + "max": "unlimited"}}, + "mac": {"type": "string"}, + "peer": {"type": {"key": "string", "min": 0, "max": 1}}, + "enabled": {"type": {"key": "boolean", "min": 0, "max": 1}}, + "ipv6_ra_configs": { + "type": {"key": "string", "value": "string", + "min": 0, "max": "unlimited"}}, + "external_ids": { + "type": {"key": "string", "value": "string", + "min": 0, "max": "unlimited"}}}, + "indexes": [["name"]], + "isRoot": false}, + "Logical_Router_Static_Route": { + "columns": { + "ip_prefix": {"type": "string"}, + "policy": {"type": {"key": {"type": "string", + "enum": ["set", ["src-ip", + "dst-ip"]]}, + "min": 0, "max": 1}}, + "nexthop": {"type": "string"}, + "output_port": {"type": {"key": "string", "min": 0, "max": 1}}, + "external_ids": { + "type": {"key": "string", "value": "string", + "min": 0, "max": "unlimited"}}}, + "isRoot": false}, + "Logical_Router_Policy": { + "columns": { + "priority": {"type": {"key": {"type": "integer", + "minInteger": 0, + "maxInteger": 32767}}}, + "match": {"type": "string"}, + "action": {"type": { + "key": {"type": "string", + "enum": ["set", ["allow", "drop", "reroute"]]}}}, + "nexthop": {"type": {"key": "string", "min": 0, "max": 1}}}, + "isRoot": false}, + "NAT": { + "columns": { + "external_ip": {"type": "string"}, + "external_mac": {"type": {"key": "string", + "min": 0, "max": 1}}, + "logical_ip": {"type": "string"}, + "logical_port": {"type": {"key": "string", + "min": 0, "max": 1}}, + "type": {"type": {"key": {"type": "string", + "enum": ["set", ["dnat", + "snat", + "dnat_and_snat" + ]]}}}, + "external_ids": { + "type": {"key": "string", "value": "string", + "min": 0, "max": "unlimited"}}}, + "isRoot": false}, + "DHCP_Options": { + "columns": { + "cidr": {"type": "string"}, + "options": {"type": {"key": "string", "value": "string", + "min": 0, "max": "unlimited"}}, + "external_ids": { + "type": {"key": "string", "value": "string", + "min": 0, "max": "unlimited"}}}, + "isRoot": true}, + "Connection": { + "columns": { + "target": {"type": "string"}, + "max_backoff": {"type": {"key": {"type": "integer", + "minInteger": 1000}, + "min": 0, + "max": 1}}, + "inactivity_probe": {"type": {"key": "integer", + "min": 0, + "max": 1}}, + "other_config": {"type": {"key": "string", + "value": "string", + "min": 0, + "max": "unlimited"}}, + "external_ids": {"type": {"key": "string", + "value": "string", + "min": 0, + "max": "unlimited"}}, + "is_connected": {"type": "boolean", "ephemeral": true}, + "status": {"type": {"key": "string", + "value": "string", + "min": 0, + "max": "unlimited"}, + "ephemeral": true}}, + "indexes": [["target"]]}, + "DNS": { + "columns": { + "records": {"type": {"key": "string", + "value": "string", + "min": 0, + "max": "unlimited"}}, + "external_ids": {"type": {"key": "string", + "value": "string", + "min": 0, + "max": "unlimited"}}}, + "isRoot": true}, + "SSL": { + "columns": { + "private_key": {"type": "string"}, + "certificate": {"type": "string"}, + "ca_cert": {"type": "string"}, + "bootstrap_ca_cert": {"type": "boolean"}, + "ssl_protocols": {"type": "string"}, + "ssl_ciphers": {"type": "string"}, + "external_ids": {"type": {"key": "string", + "value": "string", + "min": 0, + "max": "unlimited"}}}, + "maxRows": 1}, + "Gateway_Chassis": { + "columns": { + "name": {"type": "string"}, + "chassis_name": {"type": "string"}, + "priority": {"type": {"key": {"type": "integer", + "minInteger": 0, + "maxInteger": 32767}}}, + "external_ids": { + "type": {"key": "string", "value": "string", + "min": 0, "max": "unlimited"}}, + "options": { + "type": {"key": "string", "value": "string", + "min": 0, "max": "unlimited"}}}, + "indexes": [["name"]], + "isRoot": false}, + "HA_Chassis": { + "columns": { + "chassis_name": {"type": "string"}, + "priority": {"type": {"key": {"type": "integer", + "minInteger": 0, + "maxInteger": 32767}}}, + "external_ids": { + "type": {"key": "string", "value": "string", + "min": 0, "max": "unlimited"}}}, + "isRoot": false}, + "HA_Chassis_Group": { + "columns": { + "name": {"type": "string"}, + "ha_chassis": { + "type": {"key": {"type": "uuid", + "refTable": "HA_Chassis", + "refType": "strong"}, + "min": 0, + "max": "unlimited"}}, + "external_ids": { + "type": {"key": "string", "value": "string", + "min": 0, "max": "unlimited"}}}, + "indexes": [["name"]], + "isRoot": true}} + } diff --git a/ovn_octavia_provider/tests/unit/test_driver.py b/ovn_octavia_provider/tests/unit/test_driver.py new file mode 100644 index 00000000..80395917 --- /dev/null +++ b/ovn_octavia_provider/tests/unit/test_driver.py @@ -0,0 +1,2141 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +import os + +import mock +from neutron.tests import base +from neutronclient.common import exceptions as n_exc +from octavia_lib.api.drivers import data_models +from octavia_lib.api.drivers import driver_lib +from octavia_lib.api.drivers import exceptions +from octavia_lib.common import constants +from oslo_utils import uuidutils +from ovs.db import idl as ovs_idl +from ovsdbapp.backend.ovs_idl import idlutils + +from ovn_octavia_provider.common import constants as ovn_const +from ovn_octavia_provider import driver as ovn_driver +from ovn_octavia_provider.tests.unit import fakes + +basedir = os.path.dirname(os.path.abspath(__file__)) +schema_files = { + 'OVN_Northbound': os.path.join(basedir, + 'schemas', 'ovn-nb.ovsschema')} + + +# TODO(mjozefcz): Move it to unittest fakes. +class MockedLB(data_models.LoadBalancer): + def __init__(self, *args, **kwargs): + self.external_ids = kwargs.pop('ext_ids') + self.uuid = kwargs.pop('uuid') + super(MockedLB, self).__init__(*args, **kwargs) + + def __hash__(self): + # Required for Python3, not for Python2 + return self.__sizeof__() + + +class TestOvnNbIdlForLb(base.BaseTestCase): + def setUp(self): + super(TestOvnNbIdlForLb, self).setUp() + self.mock_gsh = mock.patch.object( + idlutils, 'get_schema_helper', + side_effect=lambda x, y: ovs_idl.SchemaHelper( + location=schema_files['OVN_Northbound'])).start() + self.idl = ovn_driver.OvnNbIdlForLb() + + def test__get_ovsdb_helper(self): + self.mock_gsh.reset_mock() + self.idl._get_ovsdb_helper('foo') + self.mock_gsh.assert_called_once_with('foo', 'OVN_Northbound') + + def test_start(self): + with mock.patch('ovsdbapp.backend.ovs_idl.connection.Connection', + side_effect=lambda x, timeout: mock.Mock()): + idl1 = ovn_driver.OvnNbIdlForLb() + ret1 = idl1.start() + id1 = id(ret1.ovsdb_connection) + idl2 = ovn_driver.OvnNbIdlForLb() + ret2 = idl2.start() + id2 = id(ret2.ovsdb_connection) + self.assertNotEqual(id1, id2) + + @mock.patch('ovsdbapp.backend.ovs_idl.connection.Connection') + def test_stop(self, mock_conn): + mock_conn.stop.return_value = False + with ( + mock.patch.object( + self.idl.notify_handler, 'shutdown')) as mock_notify, ( + mock.patch.object(self.idl, 'close')) as mock_close: + self.idl.start() + self.idl.stop() + mock_notify.assert_called_once_with() + mock_close.assert_called_once_with() + + def test_setlock(self): + with mock.patch.object(ovn_driver.OvnNbIdlForLb, + 'set_lock') as set_lock: + self.idl = ovn_driver.OvnNbIdlForLb(event_lock_name='foo') + set_lock.assert_called_once_with('foo') + + +class TestOvnOctaviaBase(base.BaseTestCase): + + def setUp(self): + super(TestOvnOctaviaBase, self).setUp() + self.listener_id = uuidutils.generate_uuid() + self.loadbalancer_id = uuidutils.generate_uuid() + self.pool_id = uuidutils.generate_uuid() + self.member_id = uuidutils.generate_uuid() + self.member_subnet_id = uuidutils.generate_uuid() + self.member_port = "1010" + self.member_pool_id = self.pool_id + self.member_address = "192.168.2.149" + self.port1_id = uuidutils.generate_uuid() + self.port2_id = uuidutils.generate_uuid() + self.project_id = uuidutils.generate_uuid() + self.vip_network_id = uuidutils.generate_uuid() + self.vip_port_id = uuidutils.generate_uuid() + self.vip_subnet_id = uuidutils.generate_uuid() + mock.patch( + "ovn_octavia_provider.driver.OvnNbIdlForLb").start() + self.member_address = "192.168.2.149" + self.vip_address = '192.148.210.109' + self.vip_dict = {'vip_network_id': uuidutils.generate_uuid(), + 'vip_subnet_id': uuidutils.generate_uuid()} + self.vip_output = {'vip_network_id': self.vip_dict['vip_network_id'], + 'vip_subnet_id': self.vip_dict['vip_subnet_id']} + mock.patch( + 'ovsdbapp.backend.ovs_idl.idlutils.get_schema_helper').start() + try: + mock.patch.object( + driver_lib.DriverLibrary, '_check_for_socket_ready').start() + except AttributeError: + # Backward compatiblity with octavia-lib < 1.3.1 + pass + + +class TestOvnProviderDriver(TestOvnOctaviaBase): + + def setUp(self): + super(TestOvnProviderDriver, self).setUp() + self.driver = ovn_driver.OvnProviderDriver() + add_req_thread = mock.patch.object(ovn_driver.OvnProviderHelper, + 'add_request') + self.ovn_lb = mock.MagicMock() + self.ovn_lb.external_ids = { + ovn_driver.LB_EXT_IDS_VIP_KEY: '10.22.33.4'} + self.mock_add_request = add_req_thread.start() + self.project_id = uuidutils.generate_uuid() + + self.fail_member = data_models.Member( + address='198.51.100.4', + admin_state_up=True, + member_id=self.member_id, + monitor_address="100.200.200.100", + monitor_port=66, + name='Amazin', + pool_id=self.pool_id, + protocol_port=99, + subnet_id=self.member_subnet_id, + weight=55) + self.ref_member = data_models.Member( + address='198.52.100.4', + admin_state_up=True, + member_id=self.member_id, + monitor_address=data_models.Unset, + monitor_port=data_models.Unset, + name='Amazing', + pool_id=self.pool_id, + protocol_port=99, + subnet_id=self.member_subnet_id, + weight=55) + self.update_member = data_models.Member( + address='198.53.100.4', + admin_state_up=False, + member_id=self.member_id, + monitor_address=data_models.Unset, + monitor_port=data_models.Unset, + name='Amazin', + pool_id=self.pool_id, + protocol_port=99, + subnet_id=self.member_subnet_id, + weight=55) + self.ref_update_pool = data_models.Pool( + admin_state_up=False, + description='pool', + name='Peter', + lb_algorithm=constants.LB_ALGORITHM_SOURCE_IP_PORT, + loadbalancer_id=self.loadbalancer_id, + listener_id=self.listener_id, + members=[self.ref_member], + pool_id=self.pool_id, + protocol='TCP', + session_persistence={'type': 'fix'}) + self.ref_pool = data_models.Pool( + admin_state_up=True, + description='pool', + name='Peter', + lb_algorithm=constants.LB_ALGORITHM_SOURCE_IP_PORT, + loadbalancer_id=self.loadbalancer_id, + listener_id=self.listener_id, + members=[self.ref_member], + pool_id=self.pool_id, + protocol='TCP', + session_persistence={'type': 'fix'}) + self.ref_http_pool = data_models.Pool( + admin_state_up=True, + description='pool', + lb_algorithm=constants.LB_ALGORITHM_SOURCE_IP_PORT, + loadbalancer_id=self.loadbalancer_id, + listener_id=self.listener_id, + members=[self.ref_member], + name='Groot', + pool_id=self.pool_id, + protocol='HTTP', + session_persistence={'type': 'fix'}) + self.ref_lc_pool = data_models.Pool( + admin_state_up=True, + description='pool', + lb_algorithm=constants.LB_ALGORITHM_LEAST_CONNECTIONS, + loadbalancer_id=self.loadbalancer_id, + listener_id=self.listener_id, + members=[self.ref_member], + name='Groot', + pool_id=self.pool_id, + protocol='HTTP', + session_persistence={'type': 'fix'}) + self.ref_listener = data_models.Listener( + admin_state_up=False, + connection_limit=5, + default_pool=self.ref_pool, + default_pool_id=self.pool_id, + listener_id=self.listener_id, + loadbalancer_id=self.loadbalancer_id, + name='listener', + protocol='TCP', + protocol_port=42) + self.ref_listener_udp = data_models.Listener( + admin_state_up=False, + connection_limit=5, + default_pool=self.ref_pool, + default_pool_id=self.pool_id, + listener_id=self.listener_id, + loadbalancer_id=self.loadbalancer_id, + name='listener', + protocol='UDP', + protocol_port=42) + self.fail_listener = data_models.Listener( + admin_state_up=False, + connection_limit=5, + default_pool=self.ref_pool, + default_pool_id=self.pool_id, + listener_id=self.listener_id, + loadbalancer_id=self.loadbalancer_id, + name='listener', + protocol='http', + protocol_port=42) + self.ref_lb0 = data_models.LoadBalancer( + admin_state_up=False, + listeners=[self.ref_listener], + loadbalancer_id=self.loadbalancer_id, + name='favorite_lb0', + project_id=self.project_id, + vip_address=self.vip_address, + vip_network_id=self.vip_network_id) + self.ref_lb1 = data_models.LoadBalancer( + admin_state_up=True, + listeners=[self.ref_listener], + loadbalancer_id=self.loadbalancer_id, + name='favorite_lb1', + project_id=self.project_id, + vip_address=self.vip_address, + vip_network_id=self.vip_network_id) + mock.patch.object(ovn_driver.OvnProviderHelper, '_find_ovn_lb', + return_value=self.ovn_lb).start() + mock.patch.object( + ovn_driver.OvnProviderHelper, 'get_member_info', + return_value=[ + (self.ref_member.member_id, "198.52.100.4:99"), + (self.fail_member.member_id, "198.51.100.4:99")]).start() + self.mock_find_lb_pool_key = mock.patch.object( + ovn_driver.OvnProviderHelper, + '_find_ovn_lb_with_pool_key', + return_value=self.ovn_lb).start() + + def test__ip_version_differs(self): + self.assertFalse(self.driver._ip_version_differs(self.ref_member)) + self.ref_member.address = 'fc00::1' + self.assertTrue(self.driver._ip_version_differs(self.ref_member)) + + def test__ip_version_differs_pool_disabled(self): + self.mock_find_lb_pool_key.side_effect = [None, self.ovn_lb] + self.driver._ip_version_differs(self.ref_member) + self.mock_find_lb_pool_key.assert_has_calls([ + mock.call('pool_%s' % self.pool_id), + mock.call('pool_%s:D' % self.pool_id)]) + + def test_member_create(self): + info = {'id': self.ref_member.member_id, + 'address': self.ref_member.address, + 'protocol_port': self.ref_member.protocol_port, + 'pool_id': self.ref_member.pool_id, + 'subnet_id': self.ref_member.subnet_id, + 'admin_state_up': self.ref_member.admin_state_up} + expected_dict = {'type': ovn_driver.REQ_TYPE_MEMBER_CREATE, + 'info': info} + self.driver.member_create(self.ref_member) + self.mock_add_request.assert_called_once_with(expected_dict) + + def test_member_create_failure(self): + self.assertRaises(exceptions.UnsupportedOptionError, + self.driver.member_create, self.fail_member) + + def test_member_create_different_ip_version(self): + self.ref_member.address = 'fc00::1' + self.assertRaises(exceptions.UnsupportedOptionError, + self.driver.member_create, self.ref_member) + + def test_member_create_different_ip_version_lb_disable(self): + self.driver._ovn_helper._find_ovn_lb_with_pool_key.side_effect = [ + None, self.ovn_lb] + self.ref_member.address = 'fc00::1' + self.assertRaises(exceptions.UnsupportedOptionError, + self.driver.member_create, self.ref_member) + self.driver._ovn_helper._find_ovn_lb_with_pool_key.assert_has_calls( + [mock.call('pool_%s' % self.pool_id), + mock.call('pool_%s%s' % (self.pool_id, ':D'))]) + + def test_member_create_no_subnet_provided(self): + self.ref_member.subnet_id = data_models.UnsetType() + self.assertRaises(exceptions.UnsupportedOptionError, + self.driver.member_create, self.ref_member) + + def test_member_create_monitor_opts(self): + self.ref_member.monitor_address = '172.20.20.1' + self.assertRaises(exceptions.UnsupportedOptionError, + self.driver.member_create, self.ref_member) + self.ref_member.monitor_port = '80' + self.assertRaises(exceptions.UnsupportedOptionError, + self.driver.member_create, self.ref_member) + + def test_member_create_no_set_admin_state_up(self): + self.ref_member.admin_state_up = data_models.UnsetType() + info = {'id': self.ref_member.member_id, + 'address': self.ref_member.address, + 'protocol_port': self.ref_member.protocol_port, + 'pool_id': self.ref_member.pool_id, + 'subnet_id': self.ref_member.subnet_id, + 'admin_state_up': True} + expected_dict = {'type': ovn_driver.REQ_TYPE_MEMBER_CREATE, + 'info': info} + self.driver.member_create(self.ref_member) + self.mock_add_request.assert_called_once_with(expected_dict) + + def test_member_update(self): + info = {'id': self.update_member.member_id, + 'address': self.ref_member.address, + 'protocol_port': self.ref_member.protocol_port, + 'pool_id': self.ref_member.pool_id, + 'admin_state_up': self.update_member.admin_state_up, + 'subnet_id': self.ref_member.subnet_id} + expected_dict = {'type': ovn_driver.REQ_TYPE_MEMBER_UPDATE, + 'info': info} + self.driver.member_update(self.ref_member, self.update_member) + self.mock_add_request.assert_called_once_with(expected_dict) + + @mock.patch.object(ovn_driver.OvnProviderDriver, '_ip_version_differs') + def test_member_update_no_ip_addr(self, mock_ip_differs): + self.update_member.address = None + self.driver.member_update(self.ref_member, self.update_member) + mock_ip_differs.assert_not_called() + + def test_member_batch_update(self): + self.driver.member_batch_update([self.ref_member, self.update_member]) + self.assertEqual(self.mock_add_request.call_count, 3) + + def test_member_batch_update_no_members(self): + self.assertRaises(exceptions.UnsupportedOptionError, + self.driver.member_batch_update, []) + + def test_member_batch_update_missing_pool(self): + delattr(self.ref_member, 'pool_id') + self.assertRaises(exceptions.UnsupportedOptionError, + self.driver.member_batch_update, [self.ref_member]) + + def test_member_batch_update_skipped_monitor(self): + self.ref_member.monitor_address = '10.11.1.1' + self.assertRaises(exceptions.UnsupportedOptionError, + self.driver.member_batch_update, + [self.ref_member]) + + def test_member_batch_update_skipped_mixed_ip(self): + self.ref_member.address = 'fc00::1' + self.assertRaises(exceptions.UnsupportedOptionError, + self.driver.member_batch_update, + [self.ref_member]) + + def test_member_batch_update_unset_admin_state_up(self): + self.ref_member.admin_state_up = data_models.UnsetType() + self.driver.member_batch_update([self.ref_member]) + self.assertEqual(self.mock_add_request.call_count, 2) + + def test_member_update_failure(self): + self.assertRaises(exceptions.UnsupportedOptionError, + self.driver.member_update, self.ref_member, + self.fail_member) + + def test_member_update_different_ip_version(self): + self.ref_member.address = 'fc00::1' + self.assertRaises(exceptions.UnsupportedOptionError, + self.driver.member_update, self.ref_member, + self.ref_member) + + def test_member_delete(self): + info = {'id': self.ref_member.member_id, + 'address': self.ref_member.address, + 'protocol_port': self.ref_member.protocol_port, + 'pool_id': self.ref_member.pool_id, + 'subnet_id': self.ref_member.subnet_id} + expected_dict = {'type': ovn_driver.REQ_TYPE_MEMBER_DELETE, + 'info': info} + self.driver.member_delete(self.ref_member) + self.mock_add_request.assert_called_once_with(expected_dict) + + def test_listener_create(self): + info = {'id': self.ref_listener.listener_id, + 'protocol': self.ref_listener.protocol, + 'protocol_port': self.ref_listener.protocol_port, + 'default_pool_id': self.ref_listener.default_pool_id, + 'admin_state_up': self.ref_listener.admin_state_up, + 'loadbalancer_id': self.ref_listener.loadbalancer_id} + expected_dict = {'type': ovn_driver.REQ_TYPE_LISTENER_CREATE, + 'info': info} + self.driver.listener_create(self.ref_listener) + self.mock_add_request.assert_called_once_with(expected_dict) + + def test_listener_create_unset_admin_state_up(self): + self.ref_listener.admin_state_up = data_models.UnsetType() + info = {'id': self.ref_listener.listener_id, + 'protocol': self.ref_listener.protocol, + 'protocol_port': self.ref_listener.protocol_port, + 'default_pool_id': self.ref_listener.default_pool_id, + 'admin_state_up': True, + 'loadbalancer_id': self.ref_listener.loadbalancer_id} + expected_dict = {'type': ovn_driver.REQ_TYPE_LISTENER_CREATE, + 'info': info} + self.driver.listener_create(self.ref_listener) + self.mock_add_request.assert_called_once_with(expected_dict) + + def test_listener_update(self): + info = {'id': self.ref_listener.listener_id, + 'protocol_port': self.ref_listener.protocol_port, + 'admin_state_up': self.ref_listener.admin_state_up, + 'loadbalancer_id': self.ref_listener.loadbalancer_id} + if self.ref_listener.default_pool_id: + info['default_pool_id'] = self.ref_listener.default_pool_id + expected_dict = {'type': ovn_driver.REQ_TYPE_LISTENER_UPDATE, + 'info': info} + self.driver.listener_update(self.ref_listener, self.ref_listener) + self.mock_add_request.assert_called_once_with(expected_dict) + + @mock.patch.object(ovn_driver.OvnProviderHelper, '_is_listener_in_lb', + return_value=True) + def test_listener_failure(self, mock_listener): + self.assertRaises(exceptions.UnsupportedOptionError, + self.driver.listener_create, self.fail_listener) + self.assertRaises(exceptions.UnsupportedOptionError, + self.driver.listener_update, self.ref_listener, + self.fail_listener) + self.ovn_lb.protocol = ['TCP'] + self.assertRaises(exceptions.UnsupportedOptionError, + self.driver.listener_create, + self.ref_listener_udp) + + def test_listener_delete(self): + info = {'id': self.ref_listener.listener_id, + 'protocol_port': self.ref_listener.protocol_port, + 'loadbalancer_id': self.ref_listener.loadbalancer_id} + expected_dict = {'type': ovn_driver.REQ_TYPE_LISTENER_DELETE, + 'info': info} + self.driver.listener_delete(self.ref_listener) + self.mock_add_request.assert_called_once_with(expected_dict) + + def test_loadbalancer_create(self): + info = {'id': self.ref_lb0.loadbalancer_id, + 'vip_address': self.ref_lb0.vip_address, + 'vip_network_id': self.ref_lb0.vip_network_id, + 'admin_state_up': self.ref_lb0.admin_state_up} + expected_dict = {'type': ovn_driver.REQ_TYPE_LB_CREATE, + 'info': info} + self.driver.loadbalancer_create(self.ref_lb0) + self.mock_add_request.assert_called_once_with(expected_dict) + + def test_loadbalancer_create_unset_admin_state_up(self): + self.ref_lb0.admin_state_up = data_models.UnsetType() + info = {'id': self.ref_lb0.loadbalancer_id, + 'vip_address': self.ref_lb0.vip_address, + 'vip_network_id': self.ref_lb0.vip_network_id, + 'admin_state_up': True} + expected_dict = {'type': ovn_driver.REQ_TYPE_LB_CREATE, + 'info': info} + self.driver.loadbalancer_create(self.ref_lb0) + self.mock_add_request.assert_called_once_with(expected_dict) + + def test_loadbalancer_update(self): + info = {'id': self.ref_lb1.loadbalancer_id, + 'admin_state_up': self.ref_lb1.admin_state_up} + expected_dict = {'type': ovn_driver.REQ_TYPE_LB_UPDATE, + 'info': info} + self.driver.loadbalancer_update(self.ref_lb0, self.ref_lb1) + self.mock_add_request.assert_called_once_with(expected_dict) + + def test_loadbalancer_delete(self): + info = {'id': self.ref_lb0.loadbalancer_id, + 'cascade': False} + expected_dict = {'type': ovn_driver.REQ_TYPE_LB_DELETE, + 'info': info} + self.driver.loadbalancer_delete(self.ref_lb1) + self.mock_add_request.assert_called_once_with(expected_dict) + + def test_loadbalancer_failover(self): + info = {'id': self.ref_lb0.loadbalancer_id} + expected_dict = {'type': ovn_driver.REQ_TYPE_LB_FAILOVER, + 'info': info} + self.driver.loadbalancer_failover(info['id']) + self.mock_add_request.assert_called_once_with(expected_dict) + + def test_pool_create_http(self): + self.ref_pool.protocol = 'HTTP' + self.assertRaises(exceptions.UnsupportedOptionError, + self.driver.pool_create, self.ref_pool) + + def test_pool_create_leastcount_algo(self): + self.ref_pool.lb_algorithm = constants.LB_ALGORITHM_LEAST_CONNECTIONS + self.assertRaises(exceptions.UnsupportedOptionError, + self.driver.pool_create, self.ref_pool) + + def test_pool_create(self): + info = {'id': self.ref_pool.pool_id, + 'loadbalancer_id': self.ref_pool.loadbalancer_id, + 'listener_id': self.ref_pool.listener_id, + 'admin_state_up': self.ref_pool.admin_state_up} + expected_dict = {'type': ovn_driver.REQ_TYPE_POOL_CREATE, + 'info': info} + self.driver.pool_create(self.ref_pool) + self.mock_add_request.assert_called_once_with(expected_dict) + + def test_pool_create_unset_admin_state_up(self): + self.ref_pool.admin_state_up = data_models.UnsetType() + info = {'id': self.ref_pool.pool_id, + 'loadbalancer_id': self.ref_pool.loadbalancer_id, + 'listener_id': self.ref_pool.listener_id, + 'admin_state_up': True} + expected_dict = {'type': ovn_driver.REQ_TYPE_POOL_CREATE, + 'info': info} + self.driver.pool_create(self.ref_pool) + self.mock_add_request.assert_called_once_with(expected_dict) + + def test_pool_delete(self): + # Pretent we don't have members + self.ref_pool.members = [] + info = {'id': self.ref_pool.pool_id, + 'loadbalancer_id': self.ref_pool.loadbalancer_id} + expected = {'type': ovn_driver.REQ_TYPE_POOL_DELETE, + 'info': info} + self.driver.pool_delete(self.ref_pool) + self.mock_add_request.assert_called_once_with(expected) + + def test_pool_delete_with_members(self): + info = {'id': self.ref_pool.pool_id, + 'loadbalancer_id': self.ref_pool.loadbalancer_id} + expected = {'type': ovn_driver.REQ_TYPE_POOL_DELETE, + 'info': info} + info_member = {'id': self.ref_member.member_id, + 'pool_id': self.ref_member.pool_id, + 'subnet_id': self.ref_member.subnet_id, + 'protocol_port': self.ref_member.protocol_port, + 'address': self.ref_member.address} + expected_members = { + 'type': ovn_driver.REQ_TYPE_MEMBER_DELETE, + 'info': info_member} + calls = [mock.call(expected_members), + mock.call(expected)] + self.driver.pool_delete(self.ref_pool) + self.mock_add_request.assert_has_calls(calls) + + def test_pool_update(self): + info = {'id': self.ref_update_pool.pool_id, + 'loadbalancer_id': self.ref_update_pool.loadbalancer_id, + 'admin_state_up': self.ref_update_pool.admin_state_up} + expected_dict = {'type': ovn_driver.REQ_TYPE_POOL_UPDATE, + 'info': info} + self.driver.pool_update(self.ref_pool, self.ref_update_pool) + self.mock_add_request.assert_called_once_with(expected_dict) + + def test_create_vip_port(self): + with mock.patch.object(ovn_driver, 'get_network_driver'): + port_dict = self.driver.create_vip_port(self.loadbalancer_id, + self.project_id, + self.vip_dict) + self.assertIsNotNone(port_dict.pop('vip_address', None)) + self.assertIsNotNone(port_dict.pop('vip_port_id', None)) + # The network_driver function is mocked, therefore the + # created port vip_address and vip_port_id are also mocked. + # Check if it exists and move on. + # The finally output is include vip_address, vip_port_id, + # vip_network_id and vip_subnet_id. + for key, value in port_dict.items(): + self.assertEqual(value, self.vip_output[key]) + + def test_create_vip_port_exception(self): + with mock.patch.object(ovn_driver, 'get_network_driver', + side_effect=[RuntimeError]): + self.assertRaises( + exceptions.DriverError, + self.driver.create_vip_port, + self.loadbalancer_id, + self.project_id, + self.vip_dict) + + +class TestOvnProviderHelper(TestOvnOctaviaBase): + + def setUp(self): + super(TestOvnProviderHelper, self).setUp() + self.helper = ovn_driver.OvnProviderHelper() + mock.patch.object(self.helper, '_update_status_to_octavia').start() + self.listener = {'id': self.listener_id, + 'loadbalancer_id': self.loadbalancer_id, + 'protocol': "TCP", + 'protocol_port': 80, + 'default_pool_id': self.pool_id, + 'admin_state_up': False} + self.lb = {'id': self.loadbalancer_id, + 'vip_address': self.vip_address, + 'cascade': False, + 'vip_network_id': self.vip_network_id, + 'admin_state_up': False} + self.ports = {'ports': [{ + 'fixed_ips': [{'ip_address': self.vip_address}], + 'id': self.port1_id}]} + self.pool = {'id': self.pool_id, + 'loadbalancer_id': self.loadbalancer_id, + 'listener_id': self.listener_id, + 'admin_state_up': False} + self.member = {'id': self.member_id, + 'address': self.member_address, + 'protocol_port': self.member_port, + 'subnet_id': self.member_subnet_id, + 'pool_id': self.member_pool_id, + 'admin_state_up': True} + self.ovn_nbdb_api = mock.patch.object(self.helper, 'ovn_nbdb_api') + self.ovn_nbdb_api.start() + add_req_thread = mock.patch.object(ovn_driver.OvnProviderHelper, + 'add_request') + self.mock_add_request = add_req_thread.start() + self.ovn_lb = mock.MagicMock() + self.ovn_lb.uuid = uuidutils.generate_uuid() + self.member_line = ( + 'member_%s_%s:%s' % + (self.member_id, self.member_address, self.member_port)) + self.ovn_lb.external_ids = { + ovn_driver.LB_EXT_IDS_VIP_KEY: '10.22.33.4', + ovn_driver.LB_EXT_IDS_VIP_FIP_KEY: '123.123.123.123', + ovn_driver.LB_EXT_IDS_VIP_PORT_ID_KEY: 'foo_port', + 'enabled': True, + 'pool_%s' % self.pool_id: self.member_line, + 'listener_%s' % self.listener_id: '80:pool_%s' % self.pool_id} + self.helper.ovn_nbdb_api.db_find.return_value.\ + execute.return_value = [self.ovn_lb] + self.helper.ovn_nbdb_api.db_list_rows.return_value.\ + execute.return_value = [self.ovn_lb] + mock.patch.object(self.helper, + '_find_ovn_lb_with_pool_key', + return_value=self.ovn_lb).start() + mock.patch.object(ovn_driver.OvnProviderHelper, '_find_ovn_lb', + return_value=self.ovn_lb).start() + mock.patch.object(self.helper, + '_get_pool_listeners', + return_value=[]).start() + self._update_lb_to_ls_association = mock.patch.object( + self.helper, + '_update_lb_to_ls_association', + return_value=[]) + self._update_lb_to_ls_association.start() + self._update_lb_to_lr_association = mock.patch.object( + self.helper, + '_update_lb_to_lr_association', + return_value=[]) + self._update_lb_to_lr_association.start() + + # NOTE(mjozefcz): Create foo router and network. + net_id = uuidutils.generate_uuid() + router_id = uuidutils.generate_uuid() + self.ref_lb1 = MockedLB( + uuid=uuidutils.generate_uuid(), + admin_state_up=True, + listeners=[], + loadbalancer_id=self.loadbalancer_id, + name='favorite_lb1', + project_id=self.project_id, + vip_address=self.vip_address, + vip_network_id=self.vip_network_id, + ext_ids={ + ovn_driver.LB_EXT_IDS_LR_REF_KEY: "neutron-%s" % net_id, + ovn_driver.LB_EXT_IDS_LS_REFS_KEY: + '{\"neutron-%s\": 1}' % net_id}) + self.ref_lb2 = MockedLB( + uuid=uuidutils.generate_uuid(), + admin_state_up=True, + listeners=[], + loadbalancer_id=self.loadbalancer_id, + name='favorite_lb2', + project_id=self.project_id, + vip_address=self.vip_address, + vip_network_id=self.vip_network_id, + ext_ids={ + ovn_driver.LB_EXT_IDS_LR_REF_KEY: "neutron-%s" % net_id, + ovn_driver.LB_EXT_IDS_LS_REFS_KEY: + '{\"neutron-%s\": 1}' % net_id}) + # TODO(mjozefcz): Consider using FakeOVNRouter. + self.router = fakes.FakeOvsdbRow.create_one_ovsdb_row( + attrs={'load_balancer': [self.ref_lb1], + 'name': 'neutron-%s' % router_id, + 'ports': []}) + # TODO(mjozefcz): Consider using FakeOVNSwitch. + self.network = fakes.FakeOvsdbRow.create_one_ovsdb_row( + attrs={'load_balancer': [self.ref_lb2], + 'name': 'neutron-%s' % net_id, + 'ports': [], + 'uuid': net_id}) + self.mock_get_nw = mock.patch.object( + self.helper, "_get_nw_router_info_on_interface_event", + return_value=(self.router, self.network)) + self.mock_get_nw.start() + (self.helper.ovn_nbdb_api.ls_get.return_value. + execute.return_value) = self.network + + @mock.patch('ovn_octavia_provider.driver.get_network_driver') + def test_lb_create_disabled(self, net_dr): + self.lb['admin_state_up'] = False + net_dr.return_value.neutron_client.list_ports.return_value = ( + self.ports) + status = self.helper.lb_create(self.lb) + self.assertEqual(status['loadbalancers'][0]['provisioning_status'], + constants.ACTIVE) + self.assertEqual(status['loadbalancers'][0]['operating_status'], + constants.OFFLINE) + self.helper.ovn_nbdb_api.db_create.assert_called_once_with( + 'Load_Balancer', external_ids={ + ovn_driver.LB_EXT_IDS_VIP_KEY: mock.ANY, + ovn_driver.LB_EXT_IDS_VIP_PORT_ID_KEY: mock.ANY, + 'enabled': 'False'}, + name=mock.ANY, + protocol='tcp') + + @mock.patch('ovn_octavia_provider.driver.get_network_driver') + def test_lb_create_enabled(self, net_dr): + self.lb['admin_state_up'] = True + net_dr.return_value.neutron_client.list_ports.return_value = ( + self.ports) + status = self.helper.lb_create(self.lb) + self.assertEqual(status['loadbalancers'][0]['provisioning_status'], + constants.ACTIVE) + self.assertEqual(status['loadbalancers'][0]['operating_status'], + constants.ONLINE) + self.helper.ovn_nbdb_api.db_create.assert_called_once_with( + 'Load_Balancer', external_ids={ + ovn_driver.LB_EXT_IDS_VIP_KEY: mock.ANY, + ovn_driver.LB_EXT_IDS_VIP_PORT_ID_KEY: mock.ANY, + 'enabled': 'True'}, + name=mock.ANY, + protocol='tcp') + + @mock.patch('ovn_octavia_provider.driver.get_network_driver') + @mock.patch.object(ovn_driver.OvnProviderHelper, 'delete_vip_port') + def test_lb_create_exception(self, del_port, net_dr): + self.helper._find_ovn_lb.side_effect = [RuntimeError] + net_dr.return_value.neutron_client.list_ports.return_value = ( + self.ports) + status = self.helper.lb_create(self.lb) + self.assertEqual(status['loadbalancers'][0]['provisioning_status'], + constants.ERROR) + self.assertEqual(status['loadbalancers'][0]['operating_status'], + constants.ERROR) + del_port.assert_called_once_with(self.ports.get('ports')[0]['id']) + + @mock.patch('ovn_octavia_provider.driver.get_network_driver') + @mock.patch.object(ovn_driver.OvnProviderHelper, 'delete_vip_port') + def test_lb_delete(self, del_port, net_dr): + net_dr.return_value.neutron_client.delete_port.return_value = None + status = self.helper.lb_delete(self.ovn_lb) + self.assertEqual(status['loadbalancers'][0]['provisioning_status'], + constants.DELETED) + self.assertEqual(status['loadbalancers'][0]['operating_status'], + constants.OFFLINE) + self.helper.ovn_nbdb_api.lb_del.assert_called_once_with( + self.ovn_lb.uuid) + del_port.assert_called_once_with('foo_port') + + @mock.patch.object(ovn_driver.OvnProviderHelper, 'delete_vip_port') + def test_lb_delete_row_not_found(self, del_port): + self.helper._find_ovn_lb.side_effect = [idlutils.RowNotFound] + status = self.helper.lb_delete(self.lb) + self.assertEqual(status['loadbalancers'][0]['provisioning_status'], + constants.DELETED) + self.assertEqual(status['loadbalancers'][0]['operating_status'], + constants.OFFLINE) + self.helper.ovn_nbdb_api.lb_del.assert_not_called() + del_port.assert_not_called() + + @mock.patch('ovn_octavia_provider.driver.get_network_driver') + @mock.patch.object(ovn_driver.OvnProviderHelper, 'delete_vip_port') + def test_lb_delete_exception(self, del_port, net_dr): + del_port.side_effect = [RuntimeError] + status = self.helper.lb_delete(self.lb) + self.assertEqual(status['loadbalancers'][0]['provisioning_status'], + constants.ERROR) + self.assertEqual(status['loadbalancers'][0]['operating_status'], + constants.ERROR) + self.helper.ovn_nbdb_api.lb_del.assert_not_called() + del_port.assert_called_once_with('foo_port') + + @mock.patch('ovn_octavia_provider.driver.get_network_driver') + @mock.patch.object(ovn_driver.OvnProviderHelper, 'delete_vip_port') + def test_lb_delete_port_not_found(self, del_port, net_dr): + net_dr.return_value.neutron_client.delete_port.return_value = None + del_port.side_effect = [n_exc.PortNotFoundClient] + status = self.helper.lb_delete(self.lb) + self.assertEqual(status['loadbalancers'][0]['provisioning_status'], + constants.DELETED) + self.assertEqual(status['loadbalancers'][0]['operating_status'], + constants.OFFLINE) + self.helper.ovn_nbdb_api.lb_del.assert_not_called() + del_port.assert_called_once_with('foo_port') + + @mock.patch('ovn_octavia_provider.driver.get_network_driver') + def test_lb_delete_cascade(self, net_dr): + net_dr.return_value.neutron_client.delete_port.return_value = None + self.lb['cascade'] = True + status = self.helper.lb_delete(self.lb) + self.assertEqual(status['loadbalancers'][0]['provisioning_status'], + constants.DELETED) + self.assertEqual(status['loadbalancers'][0]['operating_status'], + constants.OFFLINE) + self.assertEqual(status['listeners'][0]['provisioning_status'], + constants.DELETED) + self.assertEqual(status['pools'][0]['provisioning_status'], + constants.DELETED) + self.assertEqual(status['members'][0]['provisioning_status'], + constants.DELETED) + self.helper.ovn_nbdb_api.lb_del.assert_called_once_with( + self.ovn_lb.uuid) + + @mock.patch('ovn_octavia_provider.driver.get_network_driver') + def test_lb_delete_ls_lr(self, net_dr): + self.ovn_lb.external_ids.update({ + ovn_driver.LB_EXT_IDS_LR_REF_KEY: self.router.name, + ovn_driver.LB_EXT_IDS_LS_REFS_KEY: + '{\"neutron-%s\": 1}' % self.network.uuid}) + net_dr.return_value.neutron_client.delete_port.return_value = None + (self.helper.ovn_nbdb_api.ls_get.return_value.execute. + return_value) = self.network + (self.helper.ovn_nbdb_api.tables['Logical_Router'].rows. + values.return_value) = [self.router] + self.helper.lb_delete(self.ovn_lb) + self.helper.ovn_nbdb_api.ls_lb_del.assert_called_once_with( + self.network.uuid, self.ovn_lb.uuid) + self.helper.ovn_nbdb_api.lr_lb_del.assert_called_once_with( + self.router.uuid, self.ovn_lb.uuid) + + def test_lb_failover(self): + status = self.helper.lb_failover(self.lb) + self.assertEqual(status['loadbalancers'][0]['provisioning_status'], + constants.ACTIVE) + + @mock.patch.object(ovn_driver.OvnProviderHelper, '_refresh_lb_vips') + def test_lb_update_disabled(self, refresh_vips): + status = self.helper.lb_update(self.lb) + self.assertEqual(status['loadbalancers'][0]['provisioning_status'], + constants.ACTIVE) + self.assertEqual(status['loadbalancers'][0]['operating_status'], + constants.OFFLINE) + refresh_vips.assert_called_once_with( + self.ovn_lb.uuid, self.ovn_lb.external_ids) + self.helper.ovn_nbdb_api.db_set.assert_called_once_with( + 'Load_Balancer', self.ovn_lb.uuid, + ('external_ids', {'enabled': 'False'})) + + @mock.patch.object(ovn_driver.OvnProviderHelper, '_refresh_lb_vips') + def test_lb_update_enabled(self, refresh_vips): + self.lb['admin_state_up'] = True + status = self.helper.lb_update(self.lb) + self.assertEqual(status['loadbalancers'][0]['provisioning_status'], + constants.ACTIVE) + self.assertEqual(status['loadbalancers'][0]['operating_status'], + constants.ONLINE) + refresh_vips.assert_called_once_with( + self.ovn_lb.uuid, self.ovn_lb.external_ids) + self.helper.ovn_nbdb_api.db_set.assert_called_once_with( + 'Load_Balancer', self.ovn_lb.uuid, + ('external_ids', {'enabled': 'True'})) + + def test_lb_update_exception(self): + self.helper._find_ovn_lb.side_effect = [RuntimeError] + status = self.helper.lb_update(self.lb) + self.assertEqual(status['loadbalancers'][0]['provisioning_status'], + constants.ERROR) + self.assertEqual(status['loadbalancers'][0]['operating_status'], + constants.ERROR) + + @mock.patch.object(ovn_driver.OvnProviderHelper, '_refresh_lb_vips') + def test_listener_create_disabled(self, refresh_vips): + self.ovn_lb.external_ids.pop('listener_%s' % self.listener_id) + status = self.helper.listener_create(self.listener) + # Set expected as disabled + self.ovn_lb.external_ids.update({ + 'listener_%s:D' % self.listener_id: '80:pool_%s' % self.pool_id}) + refresh_vips.assert_called_once_with( + self.ovn_lb.uuid, self.ovn_lb.external_ids) + expected_calls = [ + mock.call( + 'Load_Balancer', self.ovn_lb.uuid, + ('external_ids', { + 'listener_%s:D' % self.listener_id: + '80:pool_%s' % self.pool_id})), + mock.call('Load_Balancer', self.ovn_lb.uuid, ('protocol', 'tcp'))] + self.helper.ovn_nbdb_api.db_set.assert_has_calls(expected_calls) + self.assertEqual( + len(expected_calls), + self.helper.ovn_nbdb_api.db_set.call_count) + self.assertEqual(status['loadbalancers'][0]['provisioning_status'], + constants.ACTIVE) + self.assertEqual(status['listeners'][0]['provisioning_status'], + constants.ACTIVE) + self.assertEqual(status['listeners'][0]['operating_status'], + constants.OFFLINE) + + @mock.patch.object(ovn_driver.OvnProviderHelper, '_refresh_lb_vips') + def test_listener_create_enabled(self, refresh_vips): + self.listener['admin_state_up'] = True + status = self.helper.listener_create(self.listener) + refresh_vips.assert_called_once_with( + self.ovn_lb.uuid, self.ovn_lb.external_ids) + expected_calls = [ + mock.call( + 'Load_Balancer', self.ovn_lb.uuid, + ('external_ids', { + 'listener_%s' % self.listener_id: + '80:pool_%s' % self.pool_id}))] + self.helper.ovn_nbdb_api.db_set.assert_has_calls(expected_calls) + self.assertEqual( + len(expected_calls), + self.helper.ovn_nbdb_api.db_set.call_count) + self.assertEqual(status['loadbalancers'][0]['provisioning_status'], + constants.ACTIVE) + self.assertEqual(status['listeners'][0]['provisioning_status'], + constants.ACTIVE) + self.assertEqual(status['listeners'][0]['operating_status'], + constants.ONLINE) + + def test_listener_create_no_default_pool(self): + self.listener['admin_state_up'] = True + self.listener.pop('default_pool_id') + self.helper.listener_create(self.listener) + expected_calls = [ + mock.call('Load_Balancer', self.ovn_lb.uuid, ('external_ids', { + 'listener_%s' % self.listener_id: '80:'})), + mock.call('Load_Balancer', self.ovn_lb.uuid, + ('vips', {}))] + self.helper.ovn_nbdb_api.db_set.assert_has_calls( + expected_calls) + self.assertEqual( + len(expected_calls), + self.helper.ovn_nbdb_api.db_set.call_count) + + def test_listener_create_exception(self): + self.helper._find_ovn_lb.side_effect = [RuntimeError] + status = self.helper.listener_create(self.listener) + self.assertEqual(status['loadbalancers'][0]['provisioning_status'], + constants.ACTIVE) + self.assertEqual(status['listeners'][0]['provisioning_status'], + constants.ERROR) + self.assertEqual(status['listeners'][0]['operating_status'], + constants.ERROR) + + def test_listener_update(self): + status = self.helper.listener_update(self.listener) + self.assertEqual(status['loadbalancers'][0]['provisioning_status'], + constants.ACTIVE) + self.assertEqual(status['listeners'][0]['provisioning_status'], + constants.ACTIVE) + self.assertEqual(status['listeners'][0]['operating_status'], + constants.OFFLINE) + self.assertEqual(status['pools'][0]['provisioning_status'], + constants.ACTIVE) + self.listener['admin_state_up'] = True + status = self.helper.listener_update(self.listener) + self.assertEqual(status['loadbalancers'][0]['provisioning_status'], + constants.ACTIVE) + self.assertEqual(status['listeners'][0]['provisioning_status'], + constants.ACTIVE) + self.assertEqual(status['listeners'][0]['operating_status'], + constants.ONLINE) + self.assertEqual(status['pools'][0]['provisioning_status'], + constants.ACTIVE) + + def test_listener_update_exception(self): + self.helper._find_ovn_lb.side_effect = [RuntimeError] + status = self.helper.listener_update(self.listener) + self.assertEqual(status['loadbalancers'][0]['provisioning_status'], + constants.ACTIVE) + self.assertEqual(status['listeners'][0]['provisioning_status'], + constants.ERROR) + self.helper.ovn_nbdb_api.db_set.assert_not_called() + + @mock.patch.object(ovn_driver.OvnProviderHelper, '_refresh_lb_vips') + def test_listener_update_listener_enabled(self, refresh_vips): + self.listener['admin_state_up'] = True + # Update the listener port. + self.listener.update({'protocol_port': 123}) + status = self.helper.listener_update(self.listener) + self.assertEqual(status['loadbalancers'][0]['provisioning_status'], + constants.ACTIVE) + self.assertEqual(status['listeners'][0]['provisioning_status'], + constants.ACTIVE) + self.assertEqual(status['listeners'][0]['operating_status'], + constants.ONLINE) + self.assertEqual(status['pools'][0]['provisioning_status'], + constants.ACTIVE) + self.helper.ovn_nbdb_api.db_set.assert_called_once_with( + 'Load_Balancer', self.ovn_lb.uuid, + ('external_ids', { + 'listener_%s' % self.listener_id: + '123:pool_%s' % self.pool_id})) + # Update expected listener, because it was updated. + self.ovn_lb.external_ids.pop('listener_%s' % self.listener_id) + self.ovn_lb.external_ids.update( + {'listener_%s' % self.listener_id: '123:pool_%s' % self.pool_id}) + refresh_vips.assert_called_once_with( + self.ovn_lb.uuid, self.ovn_lb.external_ids) + + @mock.patch.object(ovn_driver.OvnProviderHelper, '_refresh_lb_vips') + def test_listener_update_listener_disabled(self, refresh_vips): + self.listener['admin_state_up'] = False + status = self.helper.listener_update(self.listener) + self.assertEqual(status['loadbalancers'][0]['provisioning_status'], + constants.ACTIVE) + self.assertEqual(status['listeners'][0]['provisioning_status'], + constants.ACTIVE) + self.assertEqual(status['listeners'][0]['operating_status'], + constants.OFFLINE) + self.assertEqual(status['pools'][0]['provisioning_status'], + constants.ACTIVE) + self.helper.ovn_nbdb_api.db_remove.assert_called_once_with( + 'Load_Balancer', self.ovn_lb.uuid, 'external_ids', + 'listener_%s' % self.listener_id) + # It gets disabled, so update the key + self.ovn_lb.external_ids.pop('listener_%s' % self.listener_id) + self.ovn_lb.external_ids.update( + {'listener_%s:D' % self.listener_id: '80:pool_%s' % self.pool_id}) + refresh_vips.assert_called_once_with( + self.ovn_lb.uuid, self.ovn_lb.external_ids) + + def test_listener_delete_no_external_id(self): + self.ovn_lb.external_ids.pop('listener_%s' % self.listener_id) + status = self.helper.listener_delete(self.listener) + self.assertEqual(status['loadbalancers'][0]['provisioning_status'], + constants.ACTIVE) + self.assertEqual(status['listeners'][0]['provisioning_status'], + constants.DELETED) + self.assertEqual(status['listeners'][0]['operating_status'], + constants.OFFLINE) + self.helper.ovn_nbdb_api.db_remove.assert_not_called() + + def test_listener_delete_exception(self): + self.helper._find_ovn_lb.side_effect = [RuntimeError] + status = self.helper.listener_delete(self.listener) + self.assertEqual(status['loadbalancers'][0]['provisioning_status'], + constants.ACTIVE) + self.assertEqual(status['listeners'][0]['provisioning_status'], + constants.ERROR) + self.assertEqual(status['listeners'][0]['operating_status'], + constants.ERROR) + + @mock.patch.object(ovn_driver.OvnProviderHelper, '_refresh_lb_vips') + def test_listener_delete_external_id(self, refresh_vips): + status = self.helper.listener_delete(self.listener) + self.assertEqual(status['loadbalancers'][0]['provisioning_status'], + constants.ACTIVE) + self.assertEqual(status['listeners'][0]['provisioning_status'], + constants.DELETED) + self.assertEqual(status['listeners'][0]['operating_status'], + constants.OFFLINE) + self.helper.ovn_nbdb_api.db_remove.assert_called_once_with( + 'Load_Balancer', self.ovn_lb.uuid, + 'external_ids', 'listener_%s' % self.listener_id) + self.ovn_lb.external_ids.pop('listener_%s' % self.listener_id) + refresh_vips.assert_called_once_with( + self.ovn_lb.uuid, self.ovn_lb.external_ids) + + def test_pool_create(self): + status = self.helper.pool_create(self.pool) + self.assertEqual(status['loadbalancers'][0]['provisioning_status'], + constants.ACTIVE) + self.assertEqual(status['listeners'][0]['provisioning_status'], + constants.ACTIVE) + self.assertEqual(status['pools'][0]['provisioning_status'], + constants.ACTIVE) + self.assertEqual(status['pools'][0]['operating_status'], + constants.OFFLINE) + self.pool['admin_state_up'] = True + # Pool Operating status shouldnt change if member isnt present. + status = self.helper.pool_create(self.pool) + self.assertEqual(status['loadbalancers'][0]['provisioning_status'], + constants.ACTIVE) + self.assertEqual(status['pools'][0]['provisioning_status'], + constants.ACTIVE) + self.assertEqual(status['pools'][0]['operating_status'], + constants.OFFLINE) + + def test_pool_create_exception(self): + self.helper._find_ovn_lb.side_effect = [RuntimeError] + status = self.helper.pool_update(self.pool) + self.assertEqual(status['pools'][0]['provisioning_status'], + constants.ERROR) + self.assertEqual(status['loadbalancers'][0]['provisioning_status'], + constants.ACTIVE) + + def test_pool_update(self): + status = self.helper.pool_update(self.pool) + self.assertEqual(status['loadbalancers'][0]['provisioning_status'], + constants.ACTIVE) + self.assertEqual(status['pools'][0]['provisioning_status'], + constants.ACTIVE) + self.assertEqual(status['pools'][0]['operating_status'], + constants.OFFLINE) + self.pool['admin_state_up'] = True + status = self.helper.pool_update(self.pool) + self.assertEqual(status['loadbalancers'][0]['provisioning_status'], + constants.ACTIVE) + self.assertEqual(status['pools'][0]['provisioning_status'], + constants.ACTIVE) + self.assertEqual(status['pools'][0]['operating_status'], + constants.ONLINE) + + def test_pool_update_exception(self): + self.helper._get_pool_listeners.side_effect = [RuntimeError] + status = self.helper.pool_update(self.pool) + self.assertEqual(status['pools'][0]['provisioning_status'], + constants.ERROR) + + def test_pool_update_unset_admin_state_up(self): + self.pool.pop('admin_state_up') + status = self.helper.pool_update(self.pool) + self.assertEqual(status['loadbalancers'][0]['provisioning_status'], + constants.ACTIVE) + self.assertEqual(status['pools'][0]['provisioning_status'], + constants.ACTIVE) + + def test_pool_update_pool_disabled_change_to_up(self): + self.pool.update({'admin_state_up': True}) + disabled_p_key = self.helper._get_pool_key(self.pool_id, + is_enabled=False) + p_key = self.helper._get_pool_key(self.pool_id) + self.ovn_lb.external_ids.update({ + disabled_p_key: self.member_line}) + self.ovn_lb.external_ids.pop(p_key) + status = self.helper.pool_update(self.pool) + self.assertEqual(status['loadbalancers'][0]['provisioning_status'], + constants.ACTIVE) + self.assertEqual(status['pools'][0]['provisioning_status'], + constants.ACTIVE) + self.assertEqual(status['pools'][0]['operating_status'], + constants.ONLINE) + expected_calls = [ + mock.call('Load_Balancer', self.ovn_lb.uuid, + ('external_ids', + {'pool_%s' % self.pool_id: self.member_line})), + mock.call('Load_Balancer', self.ovn_lb.uuid, + ('vips', {'10.22.33.4:80': '192.168.2.149:1010', + '123.123.123.123:80': '192.168.2.149:1010'}))] + self.helper.ovn_nbdb_api.db_set.assert_has_calls( + expected_calls) + + def test_pool_update_pool_up_change_to_disabled(self): + self.pool.update({'admin_state_up': False}) + status = self.helper.pool_update(self.pool) + self.assertEqual(status['loadbalancers'][0]['provisioning_status'], + constants.ACTIVE) + self.assertEqual(status['pools'][0]['provisioning_status'], + constants.ACTIVE) + self.assertEqual(status['pools'][0]['operating_status'], + constants.OFFLINE) + expected_calls = [ + mock.call('Load_Balancer', self.ovn_lb.uuid, + ('external_ids', + {'pool_%s:D' % self.pool_id: self.member_line})), + mock.call('Load_Balancer', self.ovn_lb.uuid, ('vips', {}))] + self.helper.ovn_nbdb_api.db_set.assert_has_calls( + expected_calls) + + def test_pool_update_listeners(self): + self.helper._get_pool_listeners.return_value = ['listener1'] + status = self.helper.pool_update(self.pool) + self.assertEqual(status['listeners'][0]['provisioning_status'], + constants.ACTIVE) + + def test_pool_delete(self): + status = self.helper.pool_delete(self.pool) + self.assertEqual(status['loadbalancers'][0]['provisioning_status'], + constants.ACTIVE) + self.assertEqual(status['pools'][0]['provisioning_status'], + constants.DELETED) + self.helper.ovn_nbdb_api.db_clear.assert_called_once_with( + 'Load_Balancer', self.ovn_lb.uuid, 'vips') + self.helper.ovn_nbdb_api.db_remove.assert_called_once_with( + 'Load_Balancer', self.ovn_lb.uuid, + 'external_ids', 'pool_%s' % self.pool_id) + expected_calls = [ + mock.call('Load_Balancer', self.ovn_lb.uuid, ('vips', {})), + mock.call( + 'Load_Balancer', self.ovn_lb.uuid, + ('external_ids', { + ovn_driver.LB_EXT_IDS_VIP_KEY: '10.22.33.4', + ovn_driver.LB_EXT_IDS_VIP_FIP_KEY: '123.123.123.123', + ovn_driver.LB_EXT_IDS_VIP_PORT_ID_KEY: 'foo_port', + 'enabled': True, + 'listener_%s' % self.listener_id: '80:'}))] + self.assertEqual(self.helper.ovn_nbdb_api.db_set.call_count, + len(expected_calls)) + self.helper.ovn_nbdb_api.db_set.assert_has_calls( + expected_calls) + + def test_pool_delete_exception(self): + self.helper._find_ovn_lb.side_effect = [RuntimeError] + status = self.helper.pool_delete(self.pool) + self.assertEqual(status['loadbalancers'][0]['provisioning_status'], + constants.ACTIVE) + self.assertEqual(status['pools'][0]['provisioning_status'], + constants.ERROR) + self.helper.ovn_nbdb_api.db_remove.assert_not_called() + self.helper.ovn_nbdb_api.db_set.assert_not_called() + + def test_pool_delete_associated_listeners(self): + self.helper._get_pool_listeners.return_value = ['listener1'] + status = self.helper.pool_delete(self.pool) + self.assertEqual(status['listeners'][0]['provisioning_status'], + constants.ACTIVE) + self.helper.ovn_nbdb_api.db_set.assert_called_with( + 'Load_Balancer', self.ovn_lb.uuid, + ('external_ids', { + 'enabled': True, + 'listener_%s' % self.listener_id: '80:', + ovn_driver.LB_EXT_IDS_VIP_KEY: '10.22.33.4', + ovn_driver.LB_EXT_IDS_VIP_FIP_KEY: '123.123.123.123', + ovn_driver.LB_EXT_IDS_VIP_PORT_ID_KEY: 'foo_port'})) + + def test_pool_delete_pool_disabled(self): + disabled_p_key = self.helper._get_pool_key(self.pool_id, + is_enabled=False) + p_key = self.helper._get_pool_key(self.pool_id) + self.ovn_lb.external_ids.update({ + disabled_p_key: self.member_line}) + self.ovn_lb.external_ids.pop(p_key) + status = self.helper.pool_delete(self.pool) + self.assertEqual(status['loadbalancers'][0]['provisioning_status'], + constants.ACTIVE) + self.assertEqual(status['pools'][0]['provisioning_status'], + constants.DELETED) + self.helper.ovn_nbdb_api.db_remove.assert_called_once_with( + 'Load_Balancer', self.ovn_lb.uuid, + 'external_ids', 'pool_%s:D' % self.pool_id) + + def test_member_create(self): + self.ovn_lb.external_ids = mock.MagicMock() + status = self.helper.member_create(self.member) + self.assertEqual(status['loadbalancers'][0]['provisioning_status'], + constants.ACTIVE) + self.assertEqual(status['pools'][0]['provisioning_status'], + constants.ACTIVE) + self.assertEqual(status['members'][0]['provisioning_status'], + constants.ACTIVE) + self.member['admin_state_up'] = False + status = self.helper.member_create(self.member) + self.assertEqual(status['loadbalancers'][0]['provisioning_status'], + constants.ACTIVE) + self.assertEqual(status['pools'][0]['provisioning_status'], + constants.ACTIVE) + self.assertEqual(status['members'][0]['provisioning_status'], + constants.ACTIVE) + + @mock.patch.object(ovn_driver.OvnProviderHelper, '_add_member') + def test_member_create_exception(self, mock_add_member): + mock_add_member.side_effect = [RuntimeError] + status = self.helper.member_create(self.member) + self.assertEqual(status['pools'][0]['provisioning_status'], + constants.ERROR) + + def test_member_create_lb_disabled(self): + self.helper._find_ovn_lb_with_pool_key.side_effect = [ + None, self.ovn_lb] + self.helper.member_create(self.member) + self.helper._find_ovn_lb_with_pool_key.assert_has_calls( + [mock.call('pool_%s' % self.pool_id), + mock.call('pool_%s%s' % (self.pool_id, ':D'))]) + + def test_member_create_listener(self): + self.ovn_lb.external_ids = mock.MagicMock() + self.helper._get_pool_listeners.return_value = ['listener1'] + status = self.helper.member_create(self.member) + self.assertEqual(status['listeners'][0]['provisioning_status'], + constants.ACTIVE) + self.assertEqual(status['listeners'][0]['id'], + 'listener1') + + def test_member_create_already_exists(self): + self.helper.member_create(self.member) + self.helper.ovn_nbdb_api.db_set.assert_not_called() + + def test_member_create_first_member_in_pool(self): + self.ovn_lb.external_ids.update({ + 'pool_' + self.pool_id: ''}) + self.helper.member_create(self.member) + expected_calls = [ + mock.call('Load_Balancer', self.ovn_lb.uuid, + ('external_ids', + {'pool_%s' % self.pool_id: self.member_line})), + mock.call('Load_Balancer', self.ovn_lb.uuid, + ('vips', { + '10.22.33.4:80': '192.168.2.149:1010', + '123.123.123.123:80': '192.168.2.149:1010'}))] + self.helper.ovn_nbdb_api.db_set.assert_has_calls( + expected_calls) + + def test_member_create_second_member_in_pool(self): + member2_id = uuidutils.generate_uuid() + member2_port = "1010" + member2_address = "192.168.2.150" + member2_line = ('member_%s_%s:%s' % + (member2_id, member2_address, member2_port)) + self.ovn_lb.external_ids.update( + {'pool_%s' % self.pool_id: member2_line}) + self.helper.member_create(self.member) + all_member_line = ( + '%s,member_%s_%s:%s' % + (member2_line, self.member_id, + self.member_address, self.member_port)) + # We have two members now. + expected_calls = [ + mock.call('Load_Balancer', self.ovn_lb.uuid, + ('external_ids', { + 'pool_%s' % self.pool_id: all_member_line})), + mock.call( + 'Load_Balancer', self.ovn_lb.uuid, + ('vips', { + '10.22.33.4:80': + '192.168.2.150:1010,192.168.2.149:1010', + '123.123.123.123:80': + '192.168.2.150:1010,192.168.2.149:1010'}))] + self.helper.ovn_nbdb_api.db_set.assert_has_calls( + expected_calls) + + def test_member_update(self): + self.ovn_lb.external_ids = mock.MagicMock() + status = self.helper.member_update(self.member) + self.assertEqual(status['loadbalancers'][0]['provisioning_status'], + constants.ACTIVE) + self.assertEqual(status['pools'][0]['provisioning_status'], + constants.ACTIVE) + self.assertEqual(status['members'][0]['provisioning_status'], + constants.ACTIVE) + self.assertEqual(status['members'][0]['operating_status'], + constants.ONLINE) + self.member['admin_state_up'] = False + status = self.helper.member_update(self.member) + self.assertEqual(status['loadbalancers'][0]['provisioning_status'], + constants.ACTIVE) + self.assertEqual(status['pools'][0]['provisioning_status'], + constants.ACTIVE) + self.assertEqual(status['members'][0]['provisioning_status'], + constants.ACTIVE) + self.assertEqual(status['members'][0]['operating_status'], + constants.OFFLINE) + + def test_member_update_disabled_lb(self): + self.helper._find_ovn_lb_with_pool_key.side_effect = [ + None, self.ovn_lb] + self.helper.member_update(self.member) + self.helper._find_ovn_lb_with_pool_key.assert_has_calls( + [mock.call('pool_%s' % self.pool_id), + mock.call('pool_%s%s' % (self.pool_id, ':D'))]) + + def test_member_update_pool_listeners(self): + self.ovn_lb.external_ids = mock.MagicMock() + self.helper._get_pool_listeners.return_value = ['listener1'] + status = self.helper.member_update(self.member) + self.assertEqual(status['listeners'][0]['provisioning_status'], + constants.ACTIVE) + self.assertEqual(status['listeners'][0]['id'], + 'listener1') + + @mock.patch.object(ovn_driver.OvnProviderHelper, '_update_member') + def test_member_update_exception(self, mock_update_member): + mock_update_member.side_effect = [RuntimeError] + status = self.helper.member_update(self.member) + self.assertEqual(status['pools'][0]['provisioning_status'], + constants.ACTIVE) + + def test_member_update_new_port(self): + new_port = 11 + member_line = ('member_%s_%s:%s' % + (self.member_id, self.member_address, new_port)) + self.ovn_lb.external_ids.update( + {'pool_%s' % self.pool_id: member_line}) + self.helper.member_update(self.member) + new_member_line = ( + 'member_%s_%s:%s' % + (self.member_id, self.member_address, self.member_port)) + expected_calls = [ + mock.call('Load_Balancer', self.ovn_lb.uuid, + ('external_ids', { + 'pool_%s' % self.pool_id: new_member_line})), + mock.call('Load_Balancer', self.ovn_lb.uuid, ('vips', { + '10.22.33.4:80': '192.168.2.149:1010', + '123.123.123.123:80': '192.168.2.149:1010'}))] + self.helper.ovn_nbdb_api.db_set.assert_has_calls( + expected_calls) + + @mock.patch('ovn_octavia_provider.driver.OvnProviderHelper.' + '_refresh_lb_vips') + def test_member_delete(self, mock_vip_command): + status = self.helper.member_delete(self.member) + self.assertEqual(status['loadbalancers'][0]['provisioning_status'], + constants.ACTIVE) + self.assertEqual(status['pools'][0]['provisioning_status'], + constants.ACTIVE) + self.assertEqual(status['members'][0]['provisioning_status'], + constants.DELETED) + + def test_member_delete_one_left(self): + member2_id = uuidutils.generate_uuid() + member2_port = "1010" + member2_address = "192.168.2.150" + member_line = ( + 'member_%s_%s:%s,member_%s_%s:%s' % + (self.member_id, self.member_address, self.member_port, + member2_id, member2_address, member2_port)) + self.ovn_lb.external_ids.update({ + 'pool_' + self.pool_id: member_line}) + status = self.helper.member_delete(self.member) + self.assertEqual(status['members'][0]['provisioning_status'], + constants.DELETED) + self.assertEqual(status['pools'][0]['provisioning_status'], + constants.ACTIVE) + + @mock.patch.object(ovn_driver.OvnProviderHelper, '_remove_member') + def test_member_delete_exception(self, mock_remove_member): + mock_remove_member.side_effect = [RuntimeError] + status = self.helper.member_delete(self.member) + self.assertEqual(status['pools'][0]['provisioning_status'], + constants.ACTIVE) + + def test_member_delete_disabled_lb(self): + self.helper._find_ovn_lb_with_pool_key.side_effect = [ + None, self.ovn_lb] + self.helper.member_delete(self.member) + self.helper._find_ovn_lb_with_pool_key.assert_has_calls( + [mock.call('pool_%s' % self.pool_id), + mock.call('pool_%s%s' % (self.pool_id, ':D'))]) + + def test_member_delete_pool_listeners(self): + self.ovn_lb.external_ids.update({ + 'pool_' + self.pool_id: 'member_' + self.member_id + '_' + + self.member_address + ':' + self.member_port}) + self.helper._get_pool_listeners.return_value = ['listener1'] + status = self.helper.member_delete(self.member) + self.assertEqual(status['listeners'][0]['provisioning_status'], + constants.ACTIVE) + self.assertEqual(status['listeners'][0]['id'], + 'listener1') + + @mock.patch('ovn_octavia_provider.driver.get_network_driver') + def test_logical_router_port_event_create(self, net_dr): + self.router_port_event = ovn_driver.LogicalRouterPortEvent( + self.helper) + row = fakes.FakeOvsdbRow.create_one_ovsdb_row( + attrs={'gateway_chassis': []}) + self.router_port_event.run('create', row, mock.ANY) + expected = { + 'info': + {'router': self.router, + 'network': self.network}, + 'type': 'lb_create_lrp_assoc'} + self.mock_add_request.assert_called_once_with(expected) + + @mock.patch('ovn_octavia_provider.driver.get_network_driver') + def test_logical_router_port_event_delete(self, net_dr): + self.router_port_event = ovn_driver.LogicalRouterPortEvent( + self.helper) + row = fakes.FakeOvsdbRow.create_one_ovsdb_row( + attrs={'gateway_chassis': []}) + self.router_port_event.run('delete', row, mock.ANY) + expected = { + 'info': + {'router': self.router, + 'network': self.network}, + 'type': 'lb_delete_lrp_assoc'} + self.mock_add_request.assert_called_once_with(expected) + + @mock.patch('ovn_octavia_provider.driver.get_network_driver') + def test_logical_router_port_event_gw_port(self, net_dr): + self.router_port_event = ovn_driver.LogicalRouterPortEvent( + self.helper) + row = fakes.FakeOvsdbRow.create_one_ovsdb_row( + attrs={'gateway_chassis': ['temp-gateway-chassis']}) + self.router_port_event.run(mock.ANY, row, mock.ANY) + self.mock_add_request.assert_not_called() + + def test__get_nw_router_info_on_interface_event(self): + self.mock_get_nw.stop() + lrp = fakes.FakeOvsdbRow.create_one_ovsdb_row( + attrs={ + 'external_ids': { + ovn_const.OVN_ROUTER_NAME_EXT_ID_KEY: 'router1', + ovn_const.OVN_NETWORK_NAME_EXT_ID_KEY: 'network1'} + }) + self.helper._get_nw_router_info_on_interface_event(lrp) + expected_calls = [ + mock.call.lookup('Logical_Router', 'neutron-router1'), + mock.call.lookup('Logical_Switch', 'network1')] + self.helper.ovn_nbdb_api.assert_has_calls(expected_calls) + + def test__get_nw_router_info_on_interface_event_not_found(self): + self.mock_get_nw.stop() + self.helper.ovn_nbdb_api.lookup.side_effect = [idlutils.RowNotFound] + lrp = fakes.FakeOvsdbRow.create_one_ovsdb_row( + attrs={ + 'external_ids': { + ovn_const.OVN_ROUTER_NAME_EXT_ID_KEY: 'router1'} + }) + self.assertRaises( + idlutils.RowNotFound, + self.helper._get_nw_router_info_on_interface_event, + lrp) + + def test_lb_delete_lrp_assoc_handler(self): + lrp = fakes.FakeOvsdbRow.create_one_ovsdb_row() + self.helper.lb_delete_lrp_assoc_handler(lrp) + expected = { + 'info': + {'router': self.router, + 'network': self.network}, + 'type': 'lb_delete_lrp_assoc'} + self.mock_add_request.assert_called_once_with(expected) + + def test_lb_delete_lrp_assoc_handler_info_not_found(self): + self.mock_get_nw.stop() + self.helper.ovn_nbdb_api.lookup.side_effect = [idlutils.RowNotFound] + lrp = fakes.FakeOvsdbRow.create_one_ovsdb_row( + attrs={ + 'external_ids': { + ovn_const.OVN_ROUTER_NAME_EXT_ID_KEY: 'router1'} + }) + self.helper.lb_delete_lrp_assoc_handler(lrp) + self.mock_add_request.assert_not_called() + + @mock.patch.object(ovn_driver.OvnProviderHelper, + '_execute_commands') + def test_lb_delete_lrp_assoc_no_net_lb_no_r_lb(self, mock_execute): + info = { + 'network': self.network, + 'router': self.router, + } + self.network.load_balancer = [] + self.router.load_balancer = [] + self.helper.lb_delete_lrp_assoc(info) + self.helper._update_lb_to_lr_association.assert_not_called() + mock_execute.assert_not_called() + + @mock.patch.object(ovn_driver.OvnProviderHelper, + '_execute_commands') + def test_lb_delete_lrp_assoc_no_net_lb_r_lb(self, mock_execute): + info = { + 'network': self.network, + 'router': self.router, + } + self.network.load_balancer = [] + self.helper.lb_delete_lrp_assoc(info) + expected = [ + self.helper.ovn_nbdb_api.ls_lb_del( + self.network.uuid, + self.router.load_balancer[0].uuid + ), + ] + self.helper._update_lb_to_lr_association.assert_not_called() + mock_execute.assert_called_once_with(expected) + + @mock.patch.object(ovn_driver.OvnProviderHelper, + '_execute_commands') + def test_lb_delete_lrp_assoc_net_lb_no_r_lb(self, mock_execute): + info = { + 'network': self.network, + 'router': self.router, + } + self.router.load_balancer = [] + self.helper.lb_delete_lrp_assoc(info) + mock_execute.assert_not_called() + self.helper._update_lb_to_lr_association.assert_called_once_with( + self.network.load_balancer[0], self.router, delete=True + ) + + @mock.patch.object(ovn_driver.OvnProviderHelper, + '_execute_commands') + def test_lb_delete_lrp_assoc(self, mock_execute): + info = { + 'network': self.network, + 'router': self.router, + } + self.helper.lb_delete_lrp_assoc(info) + self.helper._update_lb_to_lr_association.assert_called_once_with( + self.network.load_balancer[0], self.router, delete=True + ) + expected = [ + self.helper.ovn_nbdb_api.ls_lb_del( + self.network.uuid, + self.router.load_balancer[0].uuid + ), + ] + mock_execute.assert_called_once_with(expected) + + def test_lb_create_lrp_assoc_handler(self): + lrp = fakes.FakeOvsdbRow.create_one_ovsdb_row() + self.helper.lb_create_lrp_assoc_handler(lrp) + expected = { + 'info': + {'router': self.router, + 'network': self.network}, + 'type': 'lb_create_lrp_assoc'} + self.mock_add_request.assert_called_once_with(expected) + + def test_lb_create_lrp_assoc_handler_row_not_found(self): + self.mock_get_nw.stop() + self.helper.ovn_nbdb_api.lookup.side_effect = [idlutils.RowNotFound] + lrp = fakes.FakeOvsdbRow.create_one_ovsdb_row( + attrs={ + 'external_ids': { + ovn_const.OVN_ROUTER_NAME_EXT_ID_KEY: 'router1'} + }) + self.helper.lb_create_lrp_assoc_handler(lrp) + self.mock_add_request.assert_not_called() + + @mock.patch.object(ovn_driver.OvnProviderHelper, + '_execute_commands') + def test_lb_create_lrp_assoc(self, mock_execute): + info = { + 'network': self.network, + 'router': self.router, + } + self.helper.lb_create_lrp_assoc(info) + self.helper._update_lb_to_lr_association.assert_called_once_with( + self.network.load_balancer[0], self.router + ) + expected = [ + self.helper.ovn_nbdb_api.ls_lb_add( + self.network.uuid, + self.router.load_balancer[0].uuid + ), + ] + mock_execute.assert_called_once_with(expected) + + @mock.patch.object(ovn_driver.OvnProviderHelper, + '_execute_commands') + def test_lb_create_lrp_assoc_uniq_lb(self, mock_execute): + info = { + 'network': self.network, + 'router': self.router, + } + # Make it already uniq. + self.network.load_balancer = self.router.load_balancer + self.helper.lb_create_lrp_assoc(info) + self.helper._update_lb_to_lr_association.assert_not_called() + mock_execute.assert_not_called() + + def test__find_lb_in_ls(self): + net_lb = self.helper._find_lb_in_ls(self.network) + for lb in self.network.load_balancer: + self.assertIn(lb, net_lb) + + def test__find_lb_in_ls_wrong_ref(self): + # lets break external_ids refs + self.network.load_balancer[0].external_ids.update({ + ovn_driver.LB_EXT_IDS_LS_REFS_KEY: 'foo'}) + net_lb = self.helper._find_lb_in_ls(self.network) + for lb in self.network.load_balancer: + self.assertNotIn(lb, net_lb) + + @mock.patch('ovn_octavia_provider.driver.get_network_driver') + def test__find_ls_for_lr(self, net_dr): + fake_subnet1 = fakes.FakeSubnet.create_one_subnet() + fake_subnet1.network_id = 'foo1' + fake_subnet2 = fakes.FakeSubnet.create_one_subnet() + fake_subnet2.network_id = 'foo2' + net_dr.return_value.get_subnet.side_effect = [ + fake_subnet1, fake_subnet2] + p1 = fakes.FakeOVNPort.create_one_port(attrs={ + 'gateway_chassis': [], + 'external_ids': { + ovn_const.OVN_SUBNET_EXT_IDS_KEY: + '%s %s' % (fake_subnet1.id, + fake_subnet2.id)}}) + self.router.ports.append(p1) + res = self.helper._find_ls_for_lr(self.router) + self.assertListEqual(['neutron-foo1', 'neutron-foo2'], + res) + + @mock.patch('ovn_octavia_provider.driver.get_network_driver') + def test__find_ls_for_lr_gw_port(self, net_dr): + p1 = fakes.FakeOVNPort.create_one_port(attrs={ + 'gateway_chassis': ['foo-gw-chassis'], + 'external_ids': { + ovn_const.OVN_SUBNET_EXT_IDS_KEY: self.member_subnet_id}}) + self.router.ports.append(p1) + result = self.helper._find_ls_for_lr(self.router) + self.assertListEqual([], result) + + @mock.patch.object( + ovn_driver.OvnProviderHelper, '_del_lb_to_lr_association') + @mock.patch.object( + ovn_driver.OvnProviderHelper, '_add_lb_to_lr_association') + def test__update_lb_to_lr_association(self, add, delete): + self._update_lb_to_lr_association.stop() + self.helper._update_lb_to_lr_association(self.ref_lb1, self.router) + lr_ref = self.ref_lb1.external_ids.get( + ovn_driver.LB_EXT_IDS_LR_REF_KEY) + add.assert_called_once_with(self.ref_lb1, self.router, lr_ref) + delete.assert_not_called() + + @mock.patch.object( + ovn_driver.OvnProviderHelper, '_del_lb_to_lr_association') + @mock.patch.object( + ovn_driver.OvnProviderHelper, '_add_lb_to_lr_association') + def test__update_lb_to_lr_association_delete(self, add, delete): + self._update_lb_to_lr_association.stop() + self.helper._update_lb_to_lr_association( + self.ref_lb1, self.router, delete=True) + lr_ref = self.ref_lb1.external_ids.get( + ovn_driver.LB_EXT_IDS_LR_REF_KEY) + add.assert_not_called() + delete.assert_called_once_with(self.ref_lb1, self.router, lr_ref) + + @mock.patch('ovn_octavia_provider.driver.get_network_driver') + def test__del_lb_to_lr_association(self, net_dr): + lr_ref = self.ref_lb1.external_ids.get( + ovn_driver.LB_EXT_IDS_LR_REF_KEY) + upd_lr_ref = '%s,%s' % (lr_ref, self.router.name) + self.helper._del_lb_to_lr_association( + self.ref_lb1, self.router, upd_lr_ref) + expected_calls = [ + mock.call.db_set( + 'Load_Balancer', self.ref_lb1.uuid, + (('external_ids', + {ovn_driver.LB_EXT_IDS_LR_REF_KEY: lr_ref}))), + mock.call.lr_lb_del( + self.router.uuid, self.ref_lb1.uuid, + if_exists=True)] + self.helper.ovn_nbdb_api.assert_has_calls( + expected_calls) + self.helper.ovn_nbdb_api.db_remove.assert_not_called() + + @mock.patch('ovn_octavia_provider.driver.get_network_driver') + def test__del_lb_to_lr_association_no_lr_ref(self, net_dr): + lr_ref = '' + self.helper._del_lb_to_lr_association( + self.ref_lb1, self.router, lr_ref) + self.helper.ovn_nbdb_api.db_set.assert_not_called() + self.helper.ovn_nbdb_api.db_remove.assert_not_called() + self.helper.ovn_nbdb_api.lr_lb_del.assert_not_called() + + @mock.patch('ovn_octavia_provider.driver.get_network_driver') + def test__del_lb_to_lr_association_lr_ref_empty_after(self, net_dr): + lr_ref = self.router.name + self.helper._del_lb_to_lr_association( + self.ref_lb1, self.router, lr_ref) + self.helper.ovn_nbdb_api.db_remove.assert_called_once_with( + 'Load_Balancer', self.ref_lb1.uuid, 'external_ids', + ovn_driver.LB_EXT_IDS_LR_REF_KEY) + self.helper.ovn_nbdb_api.lr_lb_del.assert_called_once_with( + self.router.uuid, self.ref_lb1.uuid, if_exists=True) + self.helper.ovn_nbdb_api.db_set.assert_not_called() + + @mock.patch.object(ovn_driver.OvnProviderHelper, '_find_ls_for_lr') + def test__del_lb_to_lr_association_from_ls(self, f_ls): + # This test if LB is deleted from Logical_Router_Port + # Logical_Switch. + f_ls.return_value = ['neutron-xyz', 'neutron-qwr'] + self.helper._del_lb_to_lr_association(self.ref_lb1, self.router, '') + self.helper.ovn_nbdb_api.ls_lb_del.assert_has_calls([ + (mock.call('neutron-xyz', self.ref_lb1.uuid, if_exists=True)), + (mock.call('neutron-qwr', self.ref_lb1.uuid, if_exists=True))]) + + @mock.patch.object(ovn_driver.OvnProviderHelper, '_find_ls_for_lr') + def test__add_lb_to_lr_association(self, f_ls): + lr_ref = 'foo' + f_ls.return_value = ['neutron-xyz', 'neutron-qwr'] + self.helper._add_lb_to_lr_association( + self.ref_lb1, self.router, lr_ref) + self.helper.ovn_nbdb_api.lr_lb_add.assert_called_once_with( + self.router.uuid, self.ref_lb1.uuid, may_exist=True) + self.helper.ovn_nbdb_api.ls_lb_add.assert_has_calls([ + (mock.call('neutron-xyz', self.ref_lb1.uuid, may_exist=True)), + (mock.call('neutron-qwr', self.ref_lb1.uuid, may_exist=True))]) + self.helper.ovn_nbdb_api.db_set.assert_called_once_with( + 'Load_Balancer', self.ref_lb1.uuid, + ('external_ids', {'lr_ref': 'foo,%s' % self.router.name})) + + def test__find_lr_of_ls(self): + lsp = fakes.FakeOvsdbRow.create_one_ovsdb_row( + attrs={ + 'external_ids': { + ovn_const.OVN_ROUTER_NAME_EXT_ID_KEY: 'router1'}, + 'type': 'router', + 'options': { + 'router-port': 'lrp-foo-name'} + }) + lrp = fakes.FakeOvsdbRow.create_one_ovsdb_row( + attrs={ + 'name': 'lrp-foo-name' + }) + lr = fakes.FakeOVNRouter.create_one_router( + attrs={ + 'name': 'router1', + 'ports': [lrp]}) + ls = fakes.FakeOvsdbRow.create_one_ovsdb_row( + attrs={'ports': [lsp]}) + + (self.helper.ovn_nbdb_api.tables['Logical_Router'].rows. + values.return_value) = [lr] + returned_lr = self.helper._find_lr_of_ls(ls) + self.assertEqual(lr, returned_lr) + + def test__find_lr_of_ls_no_lrp(self): + ls = fakes.FakeOvsdbRow.create_one_ovsdb_row( + attrs={'ports': []}) + returned_lr = self.helper._find_lr_of_ls(ls) + (self.helper.ovn_nbdb_api.tables['Logical_Router'].rows. + values.assert_not_called()) + self.assertIsNone(returned_lr) + + def test__update_lb_to_ls_association_empty_network_and_subnet(self): + self._update_lb_to_ls_association.stop() + returned_commands = self.helper._update_lb_to_ls_association( + self.ref_lb1, associate=True) + self.assertListEqual(returned_commands, []) + + def test__update_lb_to_ls_association_network(self): + self._update_lb_to_ls_association.stop() + + self.helper._update_lb_to_ls_association( + self.ref_lb1, network_id=self.network.uuid, associate=True) + + self.helper.ovn_nbdb_api.ls_get.assert_called_once_with( + self.network.name) + ls_refs = {'ls_refs': '{"%s": 2}' % self.network.name} + self.helper.ovn_nbdb_api.db_set.assert_called_once_with( + 'Load_Balancer', self.ref_lb1.uuid, ('external_ids', ls_refs)) + + @mock.patch('ovn_octavia_provider.driver.get_network_driver') + def test__update_lb_to_ls_association_subnet(self, net_dr): + self._update_lb_to_ls_association.stop() + subnet = fakes.FakeSubnet.create_one_subnet( + attrs={'id': 'foo_subnet_id', + 'name': 'foo_subnet_name', + 'network_id': 'foo_network_id'}) + net_dr.return_value.get_subnet.return_value = subnet + + self.helper._update_lb_to_ls_association( + self.ref_lb1, subnet_id=subnet.id, associate=True) + + self.helper.ovn_nbdb_api.ls_get.assert_called_once_with( + 'neutron-foo_network_id') + + def test__update_lb_to_ls_association_empty_ls_refs(self): + self._update_lb_to_ls_association.stop() + (self.helper.ovn_nbdb_api.ls_get.return_value.execute. + return_value) = self.network + self.ref_lb1.external_ids.pop('ls_refs') + + self.helper._update_lb_to_ls_association( + self.ref_lb1, network_id=self.network.uuid) + + self.helper.ovn_nbdb_api.ls_lb_add.assert_called_once_with( + self.network.uuid, self.ref_lb1.uuid, may_exist=True) + ls_refs = {'ls_refs': '{"%s": 1}' % self.network.name} + self.helper.ovn_nbdb_api.db_set.assert_called_once_with( + 'Load_Balancer', self.ref_lb1.uuid, ('external_ids', ls_refs)) + + @mock.patch('ovn_octavia_provider.driver.get_network_driver') + def test__update_lb_to_ls_association_no_ls(self, net_dr): + self._update_lb_to_ls_association.stop() + (self.helper.ovn_nbdb_api.ls_get.return_value.execute. + return_value) = None + + returned_commands = self.helper._update_lb_to_ls_association( + self.ref_lb1, network_id=self.network.uuid) + + self.helper.ovn_nbdb_api.ls_get.assert_called_once_with( + self.network.name) + self.assertListEqual([], returned_commands) + + def test__update_lb_to_ls_association_network_disassociate(self): + self._update_lb_to_ls_association.stop() + (self.helper.ovn_nbdb_api.ls_get.return_value.execute. + return_value) = self.network + + self.helper._update_lb_to_ls_association( + self.ref_lb1, network_id=self.network.uuid, associate=False) + + self.helper.ovn_nbdb_api.ls_get.assert_called_once_with( + self.network.name) + self.helper.ovn_nbdb_api.db_set.assert_called_once_with( + 'Load_Balancer', self.ref_lb1.uuid, + ('external_ids', {'ls_refs': '{}'})) + self.helper.ovn_nbdb_api.ls_lb_del.assert_called_once_with( + self.network.uuid, self.ref_lb1.uuid, if_exists=True) + + def test__update_lb_to_ls_association_disassoc_ls_not_in_ls_refs(self): + self._update_lb_to_ls_association.stop() + (self.helper.ovn_nbdb_api.ls_get.return_value.execute. + return_value) = self.network + self.ref_lb1.external_ids.pop('ls_refs') + + self.helper._update_lb_to_ls_association( + self.ref_lb1, network_id=self.network.uuid, associate=False) + + self.helper.ovn_nbdb_api.ls_lb_del.assert_not_called() + self.helper.ovn_nbdb_api.db_set.assert_not_called() + + def test__update_lb_to_ls_association_disassoc_multiple_refs(self): + self._update_lb_to_ls_association.stop() + (self.helper.ovn_nbdb_api.ls_get.return_value.execute. + return_value) = self.network + # multiple refs + ls_refs = {'ls_refs': '{"%s": 2}' % self.network.name} + self.ref_lb1.external_ids.update(ls_refs) + + self.helper._update_lb_to_ls_association( + self.ref_lb1, network_id=self.network.uuid, associate=False) + + self.helper.ovn_nbdb_api.ls_get.assert_called_once_with( + self.network.name) + exp_ls_refs = {'ls_refs': '{"%s": 1}' % self.network.name} + self.helper.ovn_nbdb_api.db_set.assert_called_once_with( + 'Load_Balancer', self.ref_lb1.uuid, ('external_ids', exp_ls_refs)) + + def test_logical_switch_port_update_event_vip_port(self): + self.switch_port_event = ovn_driver.LogicalSwitchPortUpdateEvent( + self.helper) + port_name = '%s%s' % (ovn_const.LB_VIP_PORT_PREFIX, 'foo') + attrs = { + 'external_ids': + {ovn_const.OVN_PORT_NAME_EXT_ID_KEY: port_name, + ovn_const.OVN_PORT_FIP_EXT_ID_KEY: '10.0.0.1'}} + row = fakes.FakeOvsdbRow.create_one_ovsdb_row( + attrs=attrs) + self.switch_port_event.run(mock.ANY, row, mock.ANY) + expected_call = { + 'info': + {'action': 'associate', + 'vip_fip': '10.0.0.1', + 'lb_id': 'foo'}, + 'type': 'handle_vip_fip'} + self.mock_add_request.assert_called_once_with(expected_call) + + def test_logical_switch_port_update_event_missing_port_name(self): + self.switch_port_event = ovn_driver.LogicalSwitchPortUpdateEvent( + self.helper) + attrs = {'external_ids': {}} + row = fakes.FakeOvsdbRow.create_one_ovsdb_row( + attrs=attrs) + self.switch_port_event.run(mock.ANY, row, mock.ANY) + self.mock_add_request.assert_not_called() + + def test_logical_switch_port_update_event_empty_fip(self): + self.switch_port_event = ovn_driver.LogicalSwitchPortUpdateEvent( + self.helper) + port_name = '%s%s' % (ovn_const.LB_VIP_PORT_PREFIX, 'foo') + attrs = {'external_ids': + {ovn_const.OVN_PORT_NAME_EXT_ID_KEY: port_name}} + row = fakes.FakeOvsdbRow.create_one_ovsdb_row( + attrs=attrs) + self.switch_port_event.run(mock.ANY, row, mock.ANY) + expected_call = { + 'info': + {'action': 'disassociate', + 'vip_fip': None, + 'lb_id': 'foo'}, + 'type': 'handle_vip_fip'} + self.mock_add_request.assert_called_once_with(expected_call) + + def test_logical_switch_port_update_event_not_vip_port(self): + self.switch_port_event = ovn_driver.LogicalSwitchPortUpdateEvent( + self.helper) + port_name = 'foo' + row = fakes.FakeOvsdbRow.create_one_ovsdb_row( + attrs={'external_ids': + {ovn_const.OVN_PORT_NAME_EXT_ID_KEY: port_name}}) + self.switch_port_event.run(mock.ANY, row, mock.ANY) + self.mock_add_request.assert_not_called() + + @mock.patch('ovn_octavia_provider.driver.OvnProviderHelper.' + '_find_ovn_lb') + def test_vip_port_update_handler_lb_not_found(self, lb): + lb.side_effect = [idlutils.RowNotFound] + self.switch_port_event = ovn_driver.LogicalSwitchPortUpdateEvent( + self.helper) + port_name = '%s%s' % (ovn_const.LB_VIP_PORT_PREFIX, 'foo') + attrs = {'external_ids': + {ovn_const.OVN_PORT_NAME_EXT_ID_KEY: port_name}} + row = fakes.FakeOvsdbRow.create_one_ovsdb_row( + attrs=attrs) + self.switch_port_event.run(mock.ANY, row, mock.ANY) + self.mock_add_request.assert_not_called() + + @mock.patch('ovn_octavia_provider.driver.OvnProviderHelper.' + '_find_ovn_lb') + def test_handle_vip_fip_disassociate(self, flb): + fip_info = { + 'action': 'disassociate', + 'vip_fip': None, + 'lb_id': 'foo'} + lb = mock.MagicMock() + flb.return_value = lb + self.helper.handle_vip_fip(fip_info) + calls = [ + mock.call.db_remove( + 'Load_Balancer', lb.uuid, 'external_ids', 'neutron:vip_fip'), + mock.call.db_clear('Load_Balancer', lb.uuid, 'vips'), + mock.call.db_set('Load_Balancer', lb.uuid, ('vips', {}))] + self.helper.ovn_nbdb_api.assert_has_calls(calls) + + @mock.patch('ovn_octavia_provider.driver.OvnProviderHelper.' + '_find_ovn_lb') + @mock.patch('ovn_octavia_provider.driver.get_network_driver') + def test_handle_vip_fip_associate(self, net_dr, fb): + fip_info = { + 'action': 'associate', + 'vip_fip': '10.0.0.123', + 'lb_id': 'foo'} + members = 'member_%s_%s:%s' % (self.member_id, + self.member_address, + self.member_port) + external_ids = { + 'listener_foo': '80:pool_%s' % self.pool_id, + 'pool_%s' % self.pool_id: members, + 'neutron:vip': '172.26.21.20'} + + lb = mock.MagicMock() + lb.external_ids = external_ids + fb.return_value = lb + + self.helper.handle_vip_fip(fip_info) + calls = [ + mock.call.db_set( + 'Load_Balancer', lb.uuid, + ('external_ids', {'neutron:vip_fip': '10.0.0.123'})), + mock.call.db_clear('Load_Balancer', lb.uuid, 'vips'), + mock.call.db_set( + 'Load_Balancer', lb.uuid, + ('vips', {'10.0.0.123:80': '192.168.2.149:1010', + '172.26.21.20:80': '192.168.2.149:1010'}))] + self.helper.ovn_nbdb_api.assert_has_calls(calls) + + @mock.patch('ovn_octavia_provider.driver.OvnProviderHelper.' + '_find_ovn_lb') + @mock.patch('ovn_octavia_provider.driver.get_network_driver') + def test_handle_vip_fip_lb_not_found(self, net_dr, fb): + fip_info = {'lb_id': 'foo'} + fb.side_effect = [idlutils.RowNotFound] + self.helper.handle_vip_fip(fip_info) + self.helper.ovn_nbdb_api.assert_not_called() + + @mock.patch.object(ovn_driver, 'atexit') + def test_ovsdb_connections(self, mock_atexit): + ovn_driver.OvnProviderHelper.ovn_nbdb_api = None + ovn_driver.OvnProviderHelper.ovn_nbdb_api_for_events = None + prov_helper1 = ovn_driver.OvnProviderHelper() + prov_helper2 = ovn_driver.OvnProviderHelper() + # One connection for API requests + self.assertIs(prov_helper1.ovn_nbdb_api, + prov_helper2.ovn_nbdb_api) + # One connection to handle events + self.assertIs(prov_helper1.ovn_nbdb_api_for_events, + prov_helper2.ovn_nbdb_api_for_events) + prov_helper2.shutdown() + prov_helper1.shutdown() + # Assert at_exit calls + mock_atexit.assert_has_calls([ + mock.call.register(prov_helper1.shutdown), + mock.call.register(prov_helper2.shutdown)]) + + def test_create_vip_port_vip_selected(self): + expected_dict = { + 'port': {'name': '%s%s' % (ovn_const.LB_VIP_PORT_PREFIX, + self.loadbalancer_id), + 'fixed_ips': [{'subnet_id': + self.vip_dict['vip_subnet_id'], + 'ip_address':'10.1.10.1'}], + 'network_id': self.vip_dict['vip_network_id'], + 'admin_state_up': True, + 'project_id': self.project_id}} + with mock.patch.object(ovn_driver, 'get_network_driver') as gn: + self.vip_dict['vip_address'] = '10.1.10.1' + self.helper.create_vip_port(self.project_id, + self.loadbalancer_id, + self.vip_dict) + expected_call = [ + mock.call().neutron_client.create_port(expected_dict)] + gn.assert_has_calls(expected_call) + + def test_create_vip_port_vip_not_selected(self): + expected_dict = { + 'port': {'name': '%s%s' % (ovn_const.LB_VIP_PORT_PREFIX, + self.loadbalancer_id), + 'fixed_ips': [{'subnet_id': + self.vip_dict['vip_subnet_id']}], + 'network_id': self.vip_dict['vip_network_id'], + 'admin_state_up': True, + 'project_id': self.project_id}} + with mock.patch.object(ovn_driver, 'get_network_driver') as gn: + self.helper.create_vip_port(self.project_id, + self.loadbalancer_id, + self.vip_dict) + expected_call = [ + mock.call().neutron_client.create_port(expected_dict)] + gn.assert_has_calls(expected_call) + + def test_get_member_info(self): + ret = self.helper.get_member_info(self.pool_id) + self.assertEqual([(self.member_id, '%s:%s' % (self.member_address, + self.member_port))], ret) + + def test_get_pool_member_id(self): + ret = self.helper.get_pool_member_id( + self.pool_id, mem_addr_port='192.168.2.149:1010') + self.assertEqual(self.member_id, ret) + + def test__get_existing_pool_members(self): + ret = self.helper._get_existing_pool_members(self.pool_id) + self.assertEqual(ret, self.member_line) diff --git a/requirements.txt b/requirements.txt index 385319fa..ed86eb00 100644 --- a/requirements.txt +++ b/requirements.txt @@ -10,6 +10,5 @@ ovsdbapp>=0.17.0 # Apache-2.0 pbr!=2.1.0,>=2.0.0 # Apache-2.0 tenacity>=4.4.0 # Apache-2.0 Babel!=2.4.0,>=2.3.4 # BSD -neutron>=13.0.0.0b2 # Apache-2.0 octavia-lib>=1.3.1 # Apache-2.0 python-neutronclient>=6.7.0 # Apache-2.0 diff --git a/setup.cfg b/setup.cfg index 962fa28b..5abfadf8 100644 --- a/setup.cfg +++ b/setup.cfg @@ -25,3 +25,8 @@ packages = [global] setup-hooks = pbr.hooks.setup_hook + + +[entry_points] +octavia.api.drivers = + ovn = ovn_octavia_provider.driver:OvnProviderDriver diff --git a/test-requirements.txt b/test-requirements.txt index 04342318..e2c5e6f0 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -17,3 +17,4 @@ testresources>=2.0.0 # Apache-2.0/BSD testscenarios>=0.4 # Apache-2.0/BSD WebTest>=2.0.27 # MIT testtools>=2.2.0 # MIT +neutron>=15.0.0 # Apache-2.0